1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DPAA2 Ethernet Switch driver 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/interrupt.h> 13 #include <linux/msi.h> 14 #include <linux/kthread.h> 15 #include <linux/workqueue.h> 16 #include <linux/iommu.h> 17 #include <net/pkt_cls.h> 18 19 #include <linux/fsl/mc.h> 20 21 #include "dpaa2-switch.h" 22 23 /* Minimal supported DPSW version */ 24 #define DPSW_MIN_VER_MAJOR 8 25 #define DPSW_MIN_VER_MINOR 9 26 27 #define DEFAULT_VLAN_ID 1 28 29 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) 30 { 31 return port_priv->fdb->fdb_id; 32 } 33 34 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) 35 { 36 int i; 37 38 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 39 if (!ethsw->fdbs[i].in_use) 40 return ðsw->fdbs[i]; 41 return NULL; 42 } 43 44 static struct dpaa2_switch_acl_tbl * 45 dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw) 46 { 47 int i; 48 49 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 50 if (!ethsw->acls[i].in_use) 51 return ðsw->acls[i]; 52 return NULL; 53 } 54 55 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, 56 struct net_device *bridge_dev) 57 { 58 struct ethsw_port_priv *other_port_priv = NULL; 59 struct dpaa2_switch_fdb *fdb; 60 struct net_device *other_dev; 61 struct list_head *iter; 62 63 /* If we leave a bridge (bridge_dev is NULL), find an unused 64 * FDB and use that. 65 */ 66 if (!bridge_dev) { 67 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); 68 69 /* If there is no unused FDB, we must be the last port that 70 * leaves the last bridge, all the others are standalone. We 71 * can just keep the FDB that we already have. 72 */ 73 74 if (!fdb) { 75 port_priv->fdb->bridge_dev = NULL; 76 return 0; 77 } 78 79 port_priv->fdb = fdb; 80 port_priv->fdb->in_use = true; 81 port_priv->fdb->bridge_dev = NULL; 82 return 0; 83 } 84 85 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock 86 * being held. Assert on it so that it's easier to catch new code 87 * paths that reach this point without the RTNL lock. 88 */ 89 ASSERT_RTNL(); 90 91 /* If part of a bridge, use the FDB of the first dpaa2 switch interface 92 * to be present in that bridge 93 */ 94 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { 95 if (!dpaa2_switch_port_dev_check(other_dev)) 96 continue; 97 98 if (other_dev == port_priv->netdev) 99 continue; 100 101 other_port_priv = netdev_priv(other_dev); 102 break; 103 } 104 105 /* The current port is about to change its FDB to the one used by the 106 * first port that joined the bridge. 107 */ 108 if (other_port_priv) { 109 /* The previous FDB is about to become unused, since the 110 * interface is no longer standalone. 111 */ 112 port_priv->fdb->in_use = false; 113 port_priv->fdb->bridge_dev = NULL; 114 115 /* Get a reference to the new FDB */ 116 port_priv->fdb = other_port_priv->fdb; 117 } 118 119 /* Keep track of the new upper bridge device */ 120 port_priv->fdb->bridge_dev = bridge_dev; 121 122 return 0; 123 } 124 125 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, 126 enum dpsw_flood_type type, 127 struct dpsw_egress_flood_cfg *cfg) 128 { 129 int i = 0, j; 130 131 memset(cfg, 0, sizeof(*cfg)); 132 133 /* Add all the DPAA2 switch ports found in the same bridging domain to 134 * the egress flooding domain 135 */ 136 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { 137 if (!ethsw->ports[j]) 138 continue; 139 if (ethsw->ports[j]->fdb->fdb_id != fdb_id) 140 continue; 141 142 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) 143 cfg->if_id[i++] = ethsw->ports[j]->idx; 144 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) 145 cfg->if_id[i++] = ethsw->ports[j]->idx; 146 } 147 148 /* Add the CTRL interface to the egress flooding domain */ 149 cfg->if_id[i++] = ethsw->sw_attr.num_ifs; 150 151 cfg->fdb_id = fdb_id; 152 cfg->flood_type = type; 153 cfg->num_ifs = i; 154 } 155 156 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) 157 { 158 struct dpsw_egress_flood_cfg flood_cfg; 159 int err; 160 161 /* Setup broadcast flooding domain */ 162 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); 163 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 164 &flood_cfg); 165 if (err) { 166 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 167 return err; 168 } 169 170 /* Setup unknown flooding domain */ 171 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); 172 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 173 &flood_cfg); 174 if (err) { 175 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 176 return err; 177 } 178 179 return 0; 180 } 181 182 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 183 dma_addr_t iova_addr) 184 { 185 phys_addr_t phys_addr; 186 187 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 188 189 return phys_to_virt(phys_addr); 190 } 191 192 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) 193 { 194 struct ethsw_core *ethsw = port_priv->ethsw_data; 195 struct dpsw_vlan_cfg vcfg = {0}; 196 int err; 197 198 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 199 err = dpsw_vlan_add(ethsw->mc_io, 0, 200 ethsw->dpsw_handle, vid, &vcfg); 201 if (err) { 202 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); 203 return err; 204 } 205 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; 206 207 return 0; 208 } 209 210 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) 211 { 212 struct net_device *netdev = port_priv->netdev; 213 struct dpsw_link_state state; 214 int err; 215 216 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 217 port_priv->ethsw_data->dpsw_handle, 218 port_priv->idx, &state); 219 if (err) { 220 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 221 return true; 222 } 223 224 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 225 226 return state.up ? true : false; 227 } 228 229 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) 230 { 231 struct ethsw_core *ethsw = port_priv->ethsw_data; 232 struct net_device *netdev = port_priv->netdev; 233 struct dpsw_tci_cfg tci_cfg = { 0 }; 234 bool up; 235 int err, ret; 236 237 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 238 port_priv->idx, &tci_cfg); 239 if (err) { 240 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); 241 return err; 242 } 243 244 tci_cfg.vlan_id = pvid; 245 246 /* Interface needs to be down to change PVID */ 247 up = dpaa2_switch_port_is_up(port_priv); 248 if (up) { 249 err = dpsw_if_disable(ethsw->mc_io, 0, 250 ethsw->dpsw_handle, 251 port_priv->idx); 252 if (err) { 253 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 254 return err; 255 } 256 } 257 258 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 259 port_priv->idx, &tci_cfg); 260 if (err) { 261 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); 262 goto set_tci_error; 263 } 264 265 /* Delete previous PVID info and mark the new one */ 266 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; 267 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; 268 port_priv->pvid = pvid; 269 270 set_tci_error: 271 if (up) { 272 ret = dpsw_if_enable(ethsw->mc_io, 0, 273 ethsw->dpsw_handle, 274 port_priv->idx); 275 if (ret) { 276 netdev_err(netdev, "dpsw_if_enable err %d\n", ret); 277 return ret; 278 } 279 } 280 281 return err; 282 } 283 284 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, 285 u16 vid, u16 flags) 286 { 287 struct ethsw_core *ethsw = port_priv->ethsw_data; 288 struct net_device *netdev = port_priv->netdev; 289 struct dpsw_vlan_if_cfg vcfg = {0}; 290 int err; 291 292 if (port_priv->vlans[vid]) { 293 netdev_warn(netdev, "VLAN %d already configured\n", vid); 294 return -EEXIST; 295 } 296 297 /* If hit, this VLAN rule will lead the packet into the FDB table 298 * specified in the vlan configuration below 299 */ 300 vcfg.num_ifs = 1; 301 vcfg.if_id[0] = port_priv->idx; 302 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 303 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; 304 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); 305 if (err) { 306 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); 307 return err; 308 } 309 310 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; 311 312 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { 313 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, 314 ethsw->dpsw_handle, 315 vid, &vcfg); 316 if (err) { 317 netdev_err(netdev, 318 "dpsw_vlan_add_if_untagged err %d\n", err); 319 return err; 320 } 321 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; 322 } 323 324 if (flags & BRIDGE_VLAN_INFO_PVID) { 325 err = dpaa2_switch_port_set_pvid(port_priv, vid); 326 if (err) 327 return err; 328 } 329 330 return 0; 331 } 332 333 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state) 334 { 335 switch (state) { 336 case BR_STATE_DISABLED: 337 return DPSW_STP_STATE_DISABLED; 338 case BR_STATE_LISTENING: 339 return DPSW_STP_STATE_LISTENING; 340 case BR_STATE_LEARNING: 341 return DPSW_STP_STATE_LEARNING; 342 case BR_STATE_FORWARDING: 343 return DPSW_STP_STATE_FORWARDING; 344 case BR_STATE_BLOCKING: 345 return DPSW_STP_STATE_BLOCKING; 346 default: 347 return DPSW_STP_STATE_DISABLED; 348 } 349 } 350 351 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) 352 { 353 struct dpsw_stp_cfg stp_cfg = {0}; 354 int err; 355 u16 vid; 356 357 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) 358 return 0; /* Nothing to do */ 359 360 stp_cfg.state = br_stp_state_to_dpsw(state); 361 for (vid = 0; vid <= VLAN_VID_MASK; vid++) { 362 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 363 stp_cfg.vlan_id = vid; 364 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, 365 port_priv->ethsw_data->dpsw_handle, 366 port_priv->idx, &stp_cfg); 367 if (err) { 368 netdev_err(port_priv->netdev, 369 "dpsw_if_set_stp err %d\n", err); 370 return err; 371 } 372 } 373 } 374 375 port_priv->stp_state = state; 376 377 return 0; 378 } 379 380 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) 381 { 382 struct ethsw_port_priv *ppriv_local = NULL; 383 int i, err; 384 385 if (!ethsw->vlans[vid]) 386 return -ENOENT; 387 388 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); 389 if (err) { 390 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); 391 return err; 392 } 393 ethsw->vlans[vid] = 0; 394 395 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 396 ppriv_local = ethsw->ports[i]; 397 ppriv_local->vlans[vid] = 0; 398 } 399 400 return 0; 401 } 402 403 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, 404 const unsigned char *addr) 405 { 406 struct dpsw_fdb_unicast_cfg entry = {0}; 407 u16 fdb_id; 408 int err; 409 410 entry.if_egress = port_priv->idx; 411 entry.type = DPSW_FDB_ENTRY_STATIC; 412 ether_addr_copy(entry.mac_addr, addr); 413 414 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 415 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, 416 port_priv->ethsw_data->dpsw_handle, 417 fdb_id, &entry); 418 if (err) 419 netdev_err(port_priv->netdev, 420 "dpsw_fdb_add_unicast err %d\n", err); 421 return err; 422 } 423 424 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, 425 const unsigned char *addr) 426 { 427 struct dpsw_fdb_unicast_cfg entry = {0}; 428 u16 fdb_id; 429 int err; 430 431 entry.if_egress = port_priv->idx; 432 entry.type = DPSW_FDB_ENTRY_STATIC; 433 ether_addr_copy(entry.mac_addr, addr); 434 435 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 436 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, 437 port_priv->ethsw_data->dpsw_handle, 438 fdb_id, &entry); 439 /* Silently discard error for calling multiple times the del command */ 440 if (err && err != -ENXIO) 441 netdev_err(port_priv->netdev, 442 "dpsw_fdb_remove_unicast err %d\n", err); 443 return err; 444 } 445 446 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, 447 const unsigned char *addr) 448 { 449 struct dpsw_fdb_multicast_cfg entry = {0}; 450 u16 fdb_id; 451 int err; 452 453 ether_addr_copy(entry.mac_addr, addr); 454 entry.type = DPSW_FDB_ENTRY_STATIC; 455 entry.num_ifs = 1; 456 entry.if_id[0] = port_priv->idx; 457 458 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 459 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, 460 port_priv->ethsw_data->dpsw_handle, 461 fdb_id, &entry); 462 /* Silently discard error for calling multiple times the add command */ 463 if (err && err != -ENXIO) 464 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", 465 err); 466 return err; 467 } 468 469 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, 470 const unsigned char *addr) 471 { 472 struct dpsw_fdb_multicast_cfg entry = {0}; 473 u16 fdb_id; 474 int err; 475 476 ether_addr_copy(entry.mac_addr, addr); 477 entry.type = DPSW_FDB_ENTRY_STATIC; 478 entry.num_ifs = 1; 479 entry.if_id[0] = port_priv->idx; 480 481 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 482 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, 483 port_priv->ethsw_data->dpsw_handle, 484 fdb_id, &entry); 485 /* Silently discard error for calling multiple times the del command */ 486 if (err && err != -ENAVAIL) 487 netdev_err(port_priv->netdev, 488 "dpsw_fdb_remove_multicast err %d\n", err); 489 return err; 490 } 491 492 static void dpaa2_switch_port_get_stats(struct net_device *netdev, 493 struct rtnl_link_stats64 *stats) 494 { 495 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 496 u64 tmp; 497 int err; 498 499 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 500 port_priv->ethsw_data->dpsw_handle, 501 port_priv->idx, 502 DPSW_CNT_ING_FRAME, &stats->rx_packets); 503 if (err) 504 goto error; 505 506 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 507 port_priv->ethsw_data->dpsw_handle, 508 port_priv->idx, 509 DPSW_CNT_EGR_FRAME, &stats->tx_packets); 510 if (err) 511 goto error; 512 513 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 514 port_priv->ethsw_data->dpsw_handle, 515 port_priv->idx, 516 DPSW_CNT_ING_BYTE, &stats->rx_bytes); 517 if (err) 518 goto error; 519 520 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 521 port_priv->ethsw_data->dpsw_handle, 522 port_priv->idx, 523 DPSW_CNT_EGR_BYTE, &stats->tx_bytes); 524 if (err) 525 goto error; 526 527 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 528 port_priv->ethsw_data->dpsw_handle, 529 port_priv->idx, 530 DPSW_CNT_ING_FRAME_DISCARD, 531 &stats->rx_dropped); 532 if (err) 533 goto error; 534 535 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 536 port_priv->ethsw_data->dpsw_handle, 537 port_priv->idx, 538 DPSW_CNT_ING_FLTR_FRAME, 539 &tmp); 540 if (err) 541 goto error; 542 stats->rx_dropped += tmp; 543 544 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 545 port_priv->ethsw_data->dpsw_handle, 546 port_priv->idx, 547 DPSW_CNT_EGR_FRAME_DISCARD, 548 &stats->tx_dropped); 549 if (err) 550 goto error; 551 552 return; 553 554 error: 555 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); 556 } 557 558 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, 559 int attr_id) 560 { 561 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); 562 } 563 564 static int dpaa2_switch_port_get_offload_stats(int attr_id, 565 const struct net_device *netdev, 566 void *sp) 567 { 568 switch (attr_id) { 569 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 570 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); 571 return 0; 572 } 573 574 return -EINVAL; 575 } 576 577 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) 578 { 579 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 580 int err; 581 582 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, 583 0, 584 port_priv->ethsw_data->dpsw_handle, 585 port_priv->idx, 586 (u16)ETHSW_L2_MAX_FRM(mtu)); 587 if (err) { 588 netdev_err(netdev, 589 "dpsw_if_set_max_frame_length() err %d\n", err); 590 return err; 591 } 592 593 netdev->mtu = mtu; 594 return 0; 595 } 596 597 static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) 598 { 599 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 600 struct dpsw_link_state state; 601 int err; 602 603 /* Interrupts are received even though no one issued an 'ifconfig up' 604 * on the switch interface. Ignore these link state update interrupts 605 */ 606 if (!netif_running(netdev)) 607 return 0; 608 609 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 610 port_priv->ethsw_data->dpsw_handle, 611 port_priv->idx, &state); 612 if (err) { 613 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 614 return err; 615 } 616 617 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 618 619 if (state.up != port_priv->link_state) { 620 if (state.up) { 621 netif_carrier_on(netdev); 622 netif_tx_start_all_queues(netdev); 623 } else { 624 netif_carrier_off(netdev); 625 netif_tx_stop_all_queues(netdev); 626 } 627 port_priv->link_state = state.up; 628 } 629 630 return 0; 631 } 632 633 /* Manage all NAPI instances for the control interface. 634 * 635 * We only have one RX queue and one Tx Conf queue for all 636 * switch ports. Therefore, we only need to enable the NAPI instance once, the 637 * first time one of the switch ports runs .dev_open(). 638 */ 639 640 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) 641 { 642 int i; 643 644 /* Access to the ethsw->napi_users relies on the RTNL lock */ 645 ASSERT_RTNL(); 646 647 /* a new interface is using the NAPI instance */ 648 ethsw->napi_users++; 649 650 /* if there is already a user of the instance, return */ 651 if (ethsw->napi_users > 1) 652 return; 653 654 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 655 napi_enable(ðsw->fq[i].napi); 656 } 657 658 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) 659 { 660 int i; 661 662 /* Access to the ethsw->napi_users relies on the RTNL lock */ 663 ASSERT_RTNL(); 664 665 /* If we are not the last interface using the NAPI, return */ 666 ethsw->napi_users--; 667 if (ethsw->napi_users) 668 return; 669 670 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 671 napi_disable(ðsw->fq[i].napi); 672 } 673 674 static int dpaa2_switch_port_open(struct net_device *netdev) 675 { 676 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 677 struct ethsw_core *ethsw = port_priv->ethsw_data; 678 int err; 679 680 /* Explicitly set carrier off, otherwise 681 * netif_carrier_ok() will return true and cause 'ip link show' 682 * to report the LOWER_UP flag, even though the link 683 * notification wasn't even received. 684 */ 685 netif_carrier_off(netdev); 686 687 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, 688 port_priv->ethsw_data->dpsw_handle, 689 port_priv->idx); 690 if (err) { 691 netdev_err(netdev, "dpsw_if_enable err %d\n", err); 692 return err; 693 } 694 695 /* sync carrier state */ 696 err = dpaa2_switch_port_carrier_state_sync(netdev); 697 if (err) { 698 netdev_err(netdev, 699 "dpaa2_switch_port_carrier_state_sync err %d\n", err); 700 goto err_carrier_sync; 701 } 702 703 dpaa2_switch_enable_ctrl_if_napi(ethsw); 704 705 return 0; 706 707 err_carrier_sync: 708 dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 709 port_priv->ethsw_data->dpsw_handle, 710 port_priv->idx); 711 return err; 712 } 713 714 static int dpaa2_switch_port_stop(struct net_device *netdev) 715 { 716 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 717 struct ethsw_core *ethsw = port_priv->ethsw_data; 718 int err; 719 720 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 721 port_priv->ethsw_data->dpsw_handle, 722 port_priv->idx); 723 if (err) { 724 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 725 return err; 726 } 727 728 dpaa2_switch_disable_ctrl_if_napi(ethsw); 729 730 return 0; 731 } 732 733 static int dpaa2_switch_port_parent_id(struct net_device *dev, 734 struct netdev_phys_item_id *ppid) 735 { 736 struct ethsw_port_priv *port_priv = netdev_priv(dev); 737 738 ppid->id_len = 1; 739 ppid->id[0] = port_priv->ethsw_data->dev_id; 740 741 return 0; 742 } 743 744 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, 745 size_t len) 746 { 747 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 748 int err; 749 750 err = snprintf(name, len, "p%d", port_priv->idx); 751 if (err >= len) 752 return -EINVAL; 753 754 return 0; 755 } 756 757 struct ethsw_dump_ctx { 758 struct net_device *dev; 759 struct sk_buff *skb; 760 struct netlink_callback *cb; 761 int idx; 762 }; 763 764 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, 765 struct ethsw_dump_ctx *dump) 766 { 767 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; 768 u32 portid = NETLINK_CB(dump->cb->skb).portid; 769 u32 seq = dump->cb->nlh->nlmsg_seq; 770 struct nlmsghdr *nlh; 771 struct ndmsg *ndm; 772 773 if (dump->idx < dump->cb->args[2]) 774 goto skip; 775 776 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 777 sizeof(*ndm), NLM_F_MULTI); 778 if (!nlh) 779 return -EMSGSIZE; 780 781 ndm = nlmsg_data(nlh); 782 ndm->ndm_family = AF_BRIDGE; 783 ndm->ndm_pad1 = 0; 784 ndm->ndm_pad2 = 0; 785 ndm->ndm_flags = NTF_SELF; 786 ndm->ndm_type = 0; 787 ndm->ndm_ifindex = dump->dev->ifindex; 788 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; 789 790 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) 791 goto nla_put_failure; 792 793 nlmsg_end(dump->skb, nlh); 794 795 skip: 796 dump->idx++; 797 return 0; 798 799 nla_put_failure: 800 nlmsg_cancel(dump->skb, nlh); 801 return -EMSGSIZE; 802 } 803 804 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, 805 struct ethsw_port_priv *port_priv) 806 { 807 int idx = port_priv->idx; 808 int valid; 809 810 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 811 valid = entry->if_info == port_priv->idx; 812 else 813 valid = entry->if_mask[idx / 8] & BIT(idx % 8); 814 815 return valid; 816 } 817 818 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, 819 dpaa2_switch_fdb_cb_t cb, void *data) 820 { 821 struct net_device *net_dev = port_priv->netdev; 822 struct ethsw_core *ethsw = port_priv->ethsw_data; 823 struct device *dev = net_dev->dev.parent; 824 struct fdb_dump_entry *fdb_entries; 825 struct fdb_dump_entry fdb_entry; 826 dma_addr_t fdb_dump_iova; 827 u16 num_fdb_entries; 828 u32 fdb_dump_size; 829 int err = 0, i; 830 u8 *dma_mem; 831 u16 fdb_id; 832 833 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); 834 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); 835 if (!dma_mem) 836 return -ENOMEM; 837 838 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, 839 DMA_FROM_DEVICE); 840 if (dma_mapping_error(dev, fdb_dump_iova)) { 841 netdev_err(net_dev, "dma_map_single() failed\n"); 842 err = -ENOMEM; 843 goto err_map; 844 } 845 846 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 847 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, 848 fdb_dump_iova, fdb_dump_size, &num_fdb_entries); 849 if (err) { 850 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); 851 goto err_dump; 852 } 853 854 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); 855 856 fdb_entries = (struct fdb_dump_entry *)dma_mem; 857 for (i = 0; i < num_fdb_entries; i++) { 858 fdb_entry = fdb_entries[i]; 859 860 err = cb(port_priv, &fdb_entry, data); 861 if (err) 862 goto end; 863 } 864 865 end: 866 kfree(dma_mem); 867 868 return 0; 869 870 err_dump: 871 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); 872 err_map: 873 kfree(dma_mem); 874 return err; 875 } 876 877 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, 878 struct fdb_dump_entry *fdb_entry, 879 void *data) 880 { 881 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 882 return 0; 883 884 return dpaa2_switch_fdb_dump_nl(fdb_entry, data); 885 } 886 887 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 888 struct net_device *net_dev, 889 struct net_device *filter_dev, int *idx) 890 { 891 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 892 struct ethsw_dump_ctx dump = { 893 .dev = net_dev, 894 .skb = skb, 895 .cb = cb, 896 .idx = *idx, 897 }; 898 int err; 899 900 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); 901 *idx = dump.idx; 902 903 return err; 904 } 905 906 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, 907 struct fdb_dump_entry *fdb_entry, 908 void *data __always_unused) 909 { 910 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 911 return 0; 912 913 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) 914 return 0; 915 916 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 917 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); 918 else 919 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); 920 921 return 0; 922 } 923 924 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) 925 { 926 dpaa2_switch_fdb_iterate(port_priv, 927 dpaa2_switch_fdb_entry_fast_age, NULL); 928 } 929 930 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, 931 u16 vid) 932 { 933 struct switchdev_obj_port_vlan vlan = { 934 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 935 .vid = vid, 936 .obj.orig_dev = netdev, 937 /* This API only allows programming tagged, non-PVID VIDs */ 938 .flags = 0, 939 }; 940 941 return dpaa2_switch_port_vlans_add(netdev, &vlan); 942 } 943 944 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, 945 u16 vid) 946 { 947 struct switchdev_obj_port_vlan vlan = { 948 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 949 .vid = vid, 950 .obj.orig_dev = netdev, 951 /* This API only allows programming tagged, non-PVID VIDs */ 952 .flags = 0, 953 }; 954 955 return dpaa2_switch_port_vlans_del(netdev, &vlan); 956 } 957 958 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) 959 { 960 struct ethsw_core *ethsw = port_priv->ethsw_data; 961 struct net_device *net_dev = port_priv->netdev; 962 struct device *dev = net_dev->dev.parent; 963 u8 mac_addr[ETH_ALEN]; 964 int err; 965 966 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) 967 return 0; 968 969 /* Get firmware address, if any */ 970 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, 971 port_priv->idx, mac_addr); 972 if (err) { 973 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); 974 return err; 975 } 976 977 /* First check if firmware has any address configured by bootloader */ 978 if (!is_zero_ether_addr(mac_addr)) { 979 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 980 } else { 981 /* No MAC address configured, fill in net_dev->dev_addr 982 * with a random one 983 */ 984 eth_hw_addr_random(net_dev); 985 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 986 987 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 988 * practical purposes, this will be our "permanent" mac address, 989 * at least until the next reboot. This move will also permit 990 * register_netdevice() to properly fill up net_dev->perm_addr. 991 */ 992 net_dev->addr_assign_type = NET_ADDR_PERM; 993 } 994 995 return 0; 996 } 997 998 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, 999 const struct dpaa2_fd *fd) 1000 { 1001 struct device *dev = ethsw->dev; 1002 unsigned char *buffer_start; 1003 struct sk_buff **skbh, *skb; 1004 dma_addr_t fd_addr; 1005 1006 fd_addr = dpaa2_fd_get_addr(fd); 1007 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); 1008 1009 skb = *skbh; 1010 buffer_start = (unsigned char *)skbh; 1011 1012 dma_unmap_single(dev, fd_addr, 1013 skb_tail_pointer(skb) - buffer_start, 1014 DMA_TO_DEVICE); 1015 1016 /* Move on with skb release */ 1017 dev_kfree_skb(skb); 1018 } 1019 1020 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, 1021 struct sk_buff *skb, 1022 struct dpaa2_fd *fd) 1023 { 1024 struct device *dev = ethsw->dev; 1025 struct sk_buff **skbh; 1026 dma_addr_t addr; 1027 u8 *buff_start; 1028 void *hwa; 1029 1030 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - 1031 DPAA2_SWITCH_TX_BUF_ALIGN, 1032 DPAA2_SWITCH_TX_BUF_ALIGN); 1033 1034 /* Clear FAS to have consistent values for TX confirmation. It is 1035 * located in the first 8 bytes of the buffer's hardware annotation 1036 * area 1037 */ 1038 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; 1039 memset(hwa, 0, 8); 1040 1041 /* Store a backpointer to the skb at the beginning of the buffer 1042 * (in the private data area) such that we can release it 1043 * on Tx confirm 1044 */ 1045 skbh = (struct sk_buff **)buff_start; 1046 *skbh = skb; 1047 1048 addr = dma_map_single(dev, buff_start, 1049 skb_tail_pointer(skb) - buff_start, 1050 DMA_TO_DEVICE); 1051 if (unlikely(dma_mapping_error(dev, addr))) 1052 return -ENOMEM; 1053 1054 /* Setup the FD fields */ 1055 memset(fd, 0, sizeof(*fd)); 1056 1057 dpaa2_fd_set_addr(fd, addr); 1058 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); 1059 dpaa2_fd_set_len(fd, skb->len); 1060 dpaa2_fd_set_format(fd, dpaa2_fd_single); 1061 1062 return 0; 1063 } 1064 1065 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, 1066 struct net_device *net_dev) 1067 { 1068 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 1069 struct ethsw_core *ethsw = port_priv->ethsw_data; 1070 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; 1071 struct dpaa2_fd fd; 1072 int err; 1073 1074 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { 1075 struct sk_buff *ns; 1076 1077 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); 1078 if (unlikely(!ns)) { 1079 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); 1080 goto err_free_skb; 1081 } 1082 dev_consume_skb_any(skb); 1083 skb = ns; 1084 } 1085 1086 /* We'll be holding a back-reference to the skb until Tx confirmation */ 1087 skb = skb_unshare(skb, GFP_ATOMIC); 1088 if (unlikely(!skb)) { 1089 /* skb_unshare() has already freed the skb */ 1090 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); 1091 goto err_exit; 1092 } 1093 1094 /* At this stage, we do not support non-linear skbs so just try to 1095 * linearize the skb and if that's not working, just drop the packet. 1096 */ 1097 err = skb_linearize(skb); 1098 if (err) { 1099 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); 1100 goto err_free_skb; 1101 } 1102 1103 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); 1104 if (unlikely(err)) { 1105 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); 1106 goto err_free_skb; 1107 } 1108 1109 do { 1110 err = dpaa2_io_service_enqueue_qd(NULL, 1111 port_priv->tx_qdid, 1112 8, 0, &fd); 1113 retries--; 1114 } while (err == -EBUSY && retries); 1115 1116 if (unlikely(err < 0)) { 1117 dpaa2_switch_free_fd(ethsw, &fd); 1118 goto err_exit; 1119 } 1120 1121 return NETDEV_TX_OK; 1122 1123 err_free_skb: 1124 dev_kfree_skb(skb); 1125 err_exit: 1126 return NETDEV_TX_OK; 1127 } 1128 1129 static int 1130 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl, 1131 struct flow_cls_offload *f) 1132 { 1133 switch (f->command) { 1134 case FLOW_CLS_REPLACE: 1135 return dpaa2_switch_cls_flower_replace(acl_tbl, f); 1136 case FLOW_CLS_DESTROY: 1137 return dpaa2_switch_cls_flower_destroy(acl_tbl, f); 1138 default: 1139 return -EOPNOTSUPP; 1140 } 1141 } 1142 1143 static int 1144 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl, 1145 struct tc_cls_matchall_offload *f) 1146 { 1147 switch (f->command) { 1148 case TC_CLSMATCHALL_REPLACE: 1149 return dpaa2_switch_cls_matchall_replace(acl_tbl, f); 1150 case TC_CLSMATCHALL_DESTROY: 1151 return dpaa2_switch_cls_matchall_destroy(acl_tbl, f); 1152 default: 1153 return -EOPNOTSUPP; 1154 } 1155 } 1156 1157 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, 1158 void *type_data, 1159 void *cb_priv) 1160 { 1161 switch (type) { 1162 case TC_SETUP_CLSFLOWER: 1163 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); 1164 case TC_SETUP_CLSMATCHALL: 1165 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); 1166 default: 1167 return -EOPNOTSUPP; 1168 } 1169 } 1170 1171 static LIST_HEAD(dpaa2_switch_block_cb_list); 1172 1173 static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, 1174 struct dpaa2_switch_acl_tbl *acl_tbl) 1175 { 1176 struct ethsw_core *ethsw = port_priv->ethsw_data; 1177 struct net_device *netdev = port_priv->netdev; 1178 struct dpsw_acl_if_cfg acl_if_cfg; 1179 int err; 1180 1181 if (port_priv->acl_tbl) 1182 return -EINVAL; 1183 1184 acl_if_cfg.if_id[0] = port_priv->idx; 1185 acl_if_cfg.num_ifs = 1; 1186 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1187 acl_tbl->id, &acl_if_cfg); 1188 if (err) { 1189 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1190 return err; 1191 } 1192 1193 acl_tbl->ports |= BIT(port_priv->idx); 1194 port_priv->acl_tbl = acl_tbl; 1195 1196 return 0; 1197 } 1198 1199 static int 1200 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, 1201 struct dpaa2_switch_acl_tbl *acl_tbl) 1202 { 1203 struct ethsw_core *ethsw = port_priv->ethsw_data; 1204 struct net_device *netdev = port_priv->netdev; 1205 struct dpsw_acl_if_cfg acl_if_cfg; 1206 int err; 1207 1208 if (port_priv->acl_tbl != acl_tbl) 1209 return -EINVAL; 1210 1211 acl_if_cfg.if_id[0] = port_priv->idx; 1212 acl_if_cfg.num_ifs = 1; 1213 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1214 acl_tbl->id, &acl_if_cfg); 1215 if (err) { 1216 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1217 return err; 1218 } 1219 1220 acl_tbl->ports &= ~BIT(port_priv->idx); 1221 port_priv->acl_tbl = NULL; 1222 return 0; 1223 } 1224 1225 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, 1226 struct dpaa2_switch_acl_tbl *acl_tbl) 1227 { 1228 struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl; 1229 int err; 1230 1231 /* If the port is already bound to this ACL table then do nothing. This 1232 * can happen when this port is the first one to join a tc block 1233 */ 1234 if (port_priv->acl_tbl == acl_tbl) 1235 return 0; 1236 1237 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl); 1238 if (err) 1239 return err; 1240 1241 /* Mark the previous ACL table as being unused if this was the last 1242 * port that was using it. 1243 */ 1244 if (old_acl_tbl->ports == 0) 1245 old_acl_tbl->in_use = false; 1246 1247 return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); 1248 } 1249 1250 static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, 1251 struct dpaa2_switch_acl_tbl *acl_tbl) 1252 { 1253 struct ethsw_core *ethsw = port_priv->ethsw_data; 1254 struct dpaa2_switch_acl_tbl *new_acl_tbl; 1255 int err; 1256 1257 /* We are the last port that leaves a block (an ACL table). 1258 * We'll continue to use this table. 1259 */ 1260 if (acl_tbl->ports == BIT(port_priv->idx)) 1261 return 0; 1262 1263 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl); 1264 if (err) 1265 return err; 1266 1267 if (acl_tbl->ports == 0) 1268 acl_tbl->in_use = false; 1269 1270 new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); 1271 new_acl_tbl->in_use = true; 1272 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl); 1273 } 1274 1275 static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, 1276 struct flow_block_offload *f) 1277 { 1278 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1279 struct ethsw_core *ethsw = port_priv->ethsw_data; 1280 struct dpaa2_switch_acl_tbl *acl_tbl; 1281 struct flow_block_cb *block_cb; 1282 bool register_block = false; 1283 int err; 1284 1285 block_cb = flow_block_cb_lookup(f->block, 1286 dpaa2_switch_port_setup_tc_block_cb_ig, 1287 ethsw); 1288 1289 if (!block_cb) { 1290 /* If the ACL table is not already known, then this port must 1291 * be the first to join it. In this case, we can just continue 1292 * to use our private table 1293 */ 1294 acl_tbl = port_priv->acl_tbl; 1295 1296 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, 1297 ethsw, acl_tbl, NULL); 1298 if (IS_ERR(block_cb)) 1299 return PTR_ERR(block_cb); 1300 1301 register_block = true; 1302 } else { 1303 acl_tbl = flow_block_cb_priv(block_cb); 1304 } 1305 1306 flow_block_cb_incref(block_cb); 1307 err = dpaa2_switch_port_block_bind(port_priv, acl_tbl); 1308 if (err) 1309 goto err_block_bind; 1310 1311 if (register_block) { 1312 flow_block_cb_add(block_cb, f); 1313 list_add_tail(&block_cb->driver_list, 1314 &dpaa2_switch_block_cb_list); 1315 } 1316 1317 return 0; 1318 1319 err_block_bind: 1320 if (!flow_block_cb_decref(block_cb)) 1321 flow_block_cb_free(block_cb); 1322 return err; 1323 } 1324 1325 static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, 1326 struct flow_block_offload *f) 1327 { 1328 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1329 struct ethsw_core *ethsw = port_priv->ethsw_data; 1330 struct dpaa2_switch_acl_tbl *acl_tbl; 1331 struct flow_block_cb *block_cb; 1332 int err; 1333 1334 block_cb = flow_block_cb_lookup(f->block, 1335 dpaa2_switch_port_setup_tc_block_cb_ig, 1336 ethsw); 1337 if (!block_cb) 1338 return; 1339 1340 acl_tbl = flow_block_cb_priv(block_cb); 1341 err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl); 1342 if (!err && !flow_block_cb_decref(block_cb)) { 1343 flow_block_cb_remove(block_cb, f); 1344 list_del(&block_cb->driver_list); 1345 } 1346 } 1347 1348 static int dpaa2_switch_setup_tc_block(struct net_device *netdev, 1349 struct flow_block_offload *f) 1350 { 1351 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1352 return -EOPNOTSUPP; 1353 1354 f->driver_block_list = &dpaa2_switch_block_cb_list; 1355 1356 switch (f->command) { 1357 case FLOW_BLOCK_BIND: 1358 return dpaa2_switch_setup_tc_block_bind(netdev, f); 1359 case FLOW_BLOCK_UNBIND: 1360 dpaa2_switch_setup_tc_block_unbind(netdev, f); 1361 return 0; 1362 default: 1363 return -EOPNOTSUPP; 1364 } 1365 } 1366 1367 static int dpaa2_switch_port_setup_tc(struct net_device *netdev, 1368 enum tc_setup_type type, 1369 void *type_data) 1370 { 1371 switch (type) { 1372 case TC_SETUP_BLOCK: { 1373 return dpaa2_switch_setup_tc_block(netdev, type_data); 1374 } 1375 default: 1376 return -EOPNOTSUPP; 1377 } 1378 1379 return 0; 1380 } 1381 1382 static const struct net_device_ops dpaa2_switch_port_ops = { 1383 .ndo_open = dpaa2_switch_port_open, 1384 .ndo_stop = dpaa2_switch_port_stop, 1385 1386 .ndo_set_mac_address = eth_mac_addr, 1387 .ndo_get_stats64 = dpaa2_switch_port_get_stats, 1388 .ndo_change_mtu = dpaa2_switch_port_change_mtu, 1389 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, 1390 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, 1391 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, 1392 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, 1393 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, 1394 1395 .ndo_start_xmit = dpaa2_switch_port_tx, 1396 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, 1397 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, 1398 .ndo_setup_tc = dpaa2_switch_port_setup_tc, 1399 }; 1400 1401 bool dpaa2_switch_port_dev_check(const struct net_device *netdev) 1402 { 1403 return netdev->netdev_ops == &dpaa2_switch_port_ops; 1404 } 1405 1406 static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) 1407 { 1408 int i; 1409 1410 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 1411 dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); 1412 dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); 1413 } 1414 } 1415 1416 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) 1417 { 1418 struct device *dev = (struct device *)arg; 1419 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1420 1421 /* Mask the events and the if_id reserved bits to be cleared on read */ 1422 u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; 1423 int err; 1424 1425 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1426 DPSW_IRQ_INDEX_IF, &status); 1427 if (err) { 1428 dev_err(dev, "Can't get irq status (err %d)\n", err); 1429 1430 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1431 DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); 1432 if (err) 1433 dev_err(dev, "Can't clear irq status (err %d)\n", err); 1434 goto out; 1435 } 1436 1437 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) 1438 dpaa2_switch_links_state_update(ethsw); 1439 1440 out: 1441 return IRQ_HANDLED; 1442 } 1443 1444 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) 1445 { 1446 struct device *dev = &sw_dev->dev; 1447 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1448 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; 1449 struct fsl_mc_device_irq *irq; 1450 int err; 1451 1452 err = fsl_mc_allocate_irqs(sw_dev); 1453 if (err) { 1454 dev_err(dev, "MC irqs allocation failed\n"); 1455 return err; 1456 } 1457 1458 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { 1459 err = -EINVAL; 1460 goto free_irq; 1461 } 1462 1463 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1464 DPSW_IRQ_INDEX_IF, 0); 1465 if (err) { 1466 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1467 goto free_irq; 1468 } 1469 1470 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; 1471 1472 err = devm_request_threaded_irq(dev, irq->msi_desc->irq, 1473 NULL, 1474 dpaa2_switch_irq0_handler_thread, 1475 IRQF_NO_SUSPEND | IRQF_ONESHOT, 1476 dev_name(dev), dev); 1477 if (err) { 1478 dev_err(dev, "devm_request_threaded_irq(): %d\n", err); 1479 goto free_irq; 1480 } 1481 1482 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, 1483 DPSW_IRQ_INDEX_IF, mask); 1484 if (err) { 1485 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); 1486 goto free_devm_irq; 1487 } 1488 1489 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1490 DPSW_IRQ_INDEX_IF, 1); 1491 if (err) { 1492 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); 1493 goto free_devm_irq; 1494 } 1495 1496 return 0; 1497 1498 free_devm_irq: 1499 devm_free_irq(dev, irq->msi_desc->irq, dev); 1500 free_irq: 1501 fsl_mc_free_irqs(sw_dev); 1502 return err; 1503 } 1504 1505 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) 1506 { 1507 struct device *dev = &sw_dev->dev; 1508 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1509 int err; 1510 1511 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1512 DPSW_IRQ_INDEX_IF, 0); 1513 if (err) 1514 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1515 1516 fsl_mc_free_irqs(sw_dev); 1517 } 1518 1519 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) 1520 { 1521 struct ethsw_core *ethsw = port_priv->ethsw_data; 1522 enum dpsw_learning_mode learn_mode; 1523 int err; 1524 1525 if (enable) 1526 learn_mode = DPSW_LEARNING_MODE_HW; 1527 else 1528 learn_mode = DPSW_LEARNING_MODE_DIS; 1529 1530 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 1531 port_priv->idx, learn_mode); 1532 if (err) 1533 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); 1534 1535 if (!enable) 1536 dpaa2_switch_port_fast_age(port_priv); 1537 1538 return err; 1539 } 1540 1541 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, 1542 u8 state) 1543 { 1544 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1545 int err; 1546 1547 err = dpaa2_switch_port_set_stp_state(port_priv, state); 1548 if (err) 1549 return err; 1550 1551 switch (state) { 1552 case BR_STATE_DISABLED: 1553 case BR_STATE_BLOCKING: 1554 case BR_STATE_LISTENING: 1555 err = dpaa2_switch_port_set_learning(port_priv, false); 1556 break; 1557 case BR_STATE_LEARNING: 1558 case BR_STATE_FORWARDING: 1559 err = dpaa2_switch_port_set_learning(port_priv, 1560 port_priv->learn_ena); 1561 break; 1562 } 1563 1564 return err; 1565 } 1566 1567 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, 1568 struct switchdev_brport_flags flags) 1569 { 1570 struct ethsw_core *ethsw = port_priv->ethsw_data; 1571 1572 if (flags.mask & BR_BCAST_FLOOD) 1573 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); 1574 1575 if (flags.mask & BR_FLOOD) 1576 port_priv->ucast_flood = !!(flags.val & BR_FLOOD); 1577 1578 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1579 } 1580 1581 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, 1582 struct switchdev_brport_flags flags, 1583 struct netlink_ext_ack *extack) 1584 { 1585 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | 1586 BR_MCAST_FLOOD)) 1587 return -EINVAL; 1588 1589 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { 1590 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 1591 bool unicast = !!(flags.val & BR_FLOOD); 1592 1593 if (unicast != multicast) { 1594 NL_SET_ERR_MSG_MOD(extack, 1595 "Cannot configure multicast flooding independently of unicast"); 1596 return -EINVAL; 1597 } 1598 } 1599 1600 return 0; 1601 } 1602 1603 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, 1604 struct switchdev_brport_flags flags, 1605 struct netlink_ext_ack *extack) 1606 { 1607 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1608 int err; 1609 1610 if (flags.mask & BR_LEARNING) { 1611 bool learn_ena = !!(flags.val & BR_LEARNING); 1612 1613 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1614 if (err) 1615 return err; 1616 port_priv->learn_ena = learn_ena; 1617 } 1618 1619 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { 1620 err = dpaa2_switch_port_flood(port_priv, flags); 1621 if (err) 1622 return err; 1623 } 1624 1625 return 0; 1626 } 1627 1628 static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx, 1629 const struct switchdev_attr *attr, 1630 struct netlink_ext_ack *extack) 1631 { 1632 int err = 0; 1633 1634 switch (attr->id) { 1635 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 1636 err = dpaa2_switch_port_attr_stp_state_set(netdev, 1637 attr->u.stp_state); 1638 break; 1639 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 1640 if (!attr->u.vlan_filtering) { 1641 NL_SET_ERR_MSG_MOD(extack, 1642 "The DPAA2 switch does not support VLAN-unaware operation"); 1643 return -EOPNOTSUPP; 1644 } 1645 break; 1646 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 1647 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); 1648 break; 1649 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 1650 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); 1651 break; 1652 default: 1653 err = -EOPNOTSUPP; 1654 break; 1655 } 1656 1657 return err; 1658 } 1659 1660 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 1661 const struct switchdev_obj_port_vlan *vlan) 1662 { 1663 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1664 struct ethsw_core *ethsw = port_priv->ethsw_data; 1665 struct dpsw_attr *attr = ðsw->sw_attr; 1666 int err = 0; 1667 1668 /* Make sure that the VLAN is not already configured 1669 * on the switch port 1670 */ 1671 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) 1672 return -EEXIST; 1673 1674 /* Check if there is space for a new VLAN */ 1675 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1676 ðsw->sw_attr); 1677 if (err) { 1678 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1679 return err; 1680 } 1681 if (attr->max_vlans - attr->num_vlans < 1) 1682 return -ENOSPC; 1683 1684 /* Check if there is space for a new VLAN */ 1685 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1686 ðsw->sw_attr); 1687 if (err) { 1688 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1689 return err; 1690 } 1691 if (attr->max_vlans - attr->num_vlans < 1) 1692 return -ENOSPC; 1693 1694 if (!port_priv->ethsw_data->vlans[vlan->vid]) { 1695 /* this is a new VLAN */ 1696 err = dpaa2_switch_add_vlan(port_priv, vlan->vid); 1697 if (err) 1698 return err; 1699 1700 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; 1701 } 1702 1703 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); 1704 } 1705 1706 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, 1707 const unsigned char *addr) 1708 { 1709 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; 1710 struct netdev_hw_addr *ha; 1711 1712 netif_addr_lock_bh(netdev); 1713 list_for_each_entry(ha, &list->list, list) { 1714 if (ether_addr_equal(ha->addr, addr)) { 1715 netif_addr_unlock_bh(netdev); 1716 return 1; 1717 } 1718 } 1719 netif_addr_unlock_bh(netdev); 1720 return 0; 1721 } 1722 1723 static int dpaa2_switch_port_mdb_add(struct net_device *netdev, 1724 const struct switchdev_obj_port_mdb *mdb) 1725 { 1726 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1727 int err; 1728 1729 /* Check if address is already set on this port */ 1730 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1731 return -EEXIST; 1732 1733 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); 1734 if (err) 1735 return err; 1736 1737 err = dev_mc_add(netdev, mdb->addr); 1738 if (err) { 1739 netdev_err(netdev, "dev_mc_add err %d\n", err); 1740 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1741 } 1742 1743 return err; 1744 } 1745 1746 static int dpaa2_switch_port_obj_add(struct net_device *netdev, 1747 const struct switchdev_obj *obj) 1748 { 1749 int err; 1750 1751 switch (obj->id) { 1752 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1753 err = dpaa2_switch_port_vlans_add(netdev, 1754 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1755 break; 1756 case SWITCHDEV_OBJ_ID_PORT_MDB: 1757 err = dpaa2_switch_port_mdb_add(netdev, 1758 SWITCHDEV_OBJ_PORT_MDB(obj)); 1759 break; 1760 default: 1761 err = -EOPNOTSUPP; 1762 break; 1763 } 1764 1765 return err; 1766 } 1767 1768 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) 1769 { 1770 struct ethsw_core *ethsw = port_priv->ethsw_data; 1771 struct net_device *netdev = port_priv->netdev; 1772 struct dpsw_vlan_if_cfg vcfg; 1773 int i, err; 1774 1775 if (!port_priv->vlans[vid]) 1776 return -ENOENT; 1777 1778 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { 1779 /* If we are deleting the PVID of a port, use VLAN 4095 instead 1780 * as we are sure that neither the bridge nor the 8021q module 1781 * will use it 1782 */ 1783 err = dpaa2_switch_port_set_pvid(port_priv, 4095); 1784 if (err) 1785 return err; 1786 } 1787 1788 vcfg.num_ifs = 1; 1789 vcfg.if_id[0] = port_priv->idx; 1790 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { 1791 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, 1792 ethsw->dpsw_handle, 1793 vid, &vcfg); 1794 if (err) { 1795 netdev_err(netdev, 1796 "dpsw_vlan_remove_if_untagged err %d\n", 1797 err); 1798 } 1799 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; 1800 } 1801 1802 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 1803 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1804 vid, &vcfg); 1805 if (err) { 1806 netdev_err(netdev, 1807 "dpsw_vlan_remove_if err %d\n", err); 1808 return err; 1809 } 1810 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; 1811 1812 /* Delete VLAN from switch if it is no longer configured on 1813 * any port 1814 */ 1815 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 1816 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) 1817 return 0; /* Found a port member in VID */ 1818 1819 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; 1820 1821 err = dpaa2_switch_dellink(ethsw, vid); 1822 if (err) 1823 return err; 1824 } 1825 1826 return 0; 1827 } 1828 1829 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 1830 const struct switchdev_obj_port_vlan *vlan) 1831 { 1832 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1833 1834 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1835 return -EOPNOTSUPP; 1836 1837 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); 1838 } 1839 1840 static int dpaa2_switch_port_mdb_del(struct net_device *netdev, 1841 const struct switchdev_obj_port_mdb *mdb) 1842 { 1843 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1844 int err; 1845 1846 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1847 return -ENOENT; 1848 1849 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1850 if (err) 1851 return err; 1852 1853 err = dev_mc_del(netdev, mdb->addr); 1854 if (err) { 1855 netdev_err(netdev, "dev_mc_del err %d\n", err); 1856 return err; 1857 } 1858 1859 return err; 1860 } 1861 1862 static int dpaa2_switch_port_obj_del(struct net_device *netdev, 1863 const struct switchdev_obj *obj) 1864 { 1865 int err; 1866 1867 switch (obj->id) { 1868 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1869 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); 1870 break; 1871 case SWITCHDEV_OBJ_ID_PORT_MDB: 1872 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); 1873 break; 1874 default: 1875 err = -EOPNOTSUPP; 1876 break; 1877 } 1878 return err; 1879 } 1880 1881 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, 1882 struct switchdev_notifier_port_attr_info *ptr) 1883 { 1884 int err; 1885 1886 err = switchdev_handle_port_attr_set(netdev, ptr, 1887 dpaa2_switch_port_dev_check, 1888 dpaa2_switch_port_attr_set); 1889 return notifier_from_errno(err); 1890 } 1891 1892 static struct notifier_block dpaa2_switch_port_switchdev_nb; 1893 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; 1894 1895 static int dpaa2_switch_port_bridge_join(struct net_device *netdev, 1896 struct net_device *upper_dev, 1897 struct netlink_ext_ack *extack) 1898 { 1899 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1900 struct ethsw_core *ethsw = port_priv->ethsw_data; 1901 struct ethsw_port_priv *other_port_priv; 1902 struct net_device *other_dev; 1903 struct list_head *iter; 1904 bool learn_ena; 1905 int err; 1906 1907 netdev_for_each_lower_dev(upper_dev, other_dev, iter) { 1908 if (!dpaa2_switch_port_dev_check(other_dev)) 1909 continue; 1910 1911 other_port_priv = netdev_priv(other_dev); 1912 if (other_port_priv->ethsw_data != port_priv->ethsw_data) { 1913 NL_SET_ERR_MSG_MOD(extack, 1914 "Interface from a different DPSW is in the bridge already"); 1915 return -EINVAL; 1916 } 1917 } 1918 1919 /* Delete the previously manually installed VLAN 1 */ 1920 err = dpaa2_switch_port_del_vlan(port_priv, 1); 1921 if (err) 1922 return err; 1923 1924 dpaa2_switch_port_set_fdb(port_priv, upper_dev); 1925 1926 /* Inherit the initial bridge port learning state */ 1927 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); 1928 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1929 port_priv->learn_ena = learn_ena; 1930 1931 /* Setup the egress flood policy (broadcast, unknown unicast) */ 1932 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1933 if (err) 1934 goto err_egress_flood; 1935 1936 err = switchdev_bridge_port_offload(netdev, netdev, NULL, 1937 &dpaa2_switch_port_switchdev_nb, 1938 &dpaa2_switch_port_switchdev_blocking_nb, 1939 extack); 1940 if (err) 1941 goto err_switchdev_offload; 1942 1943 return 0; 1944 1945 err_switchdev_offload: 1946 err_egress_flood: 1947 dpaa2_switch_port_set_fdb(port_priv, NULL); 1948 return err; 1949 } 1950 1951 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) 1952 { 1953 __be16 vlan_proto = htons(ETH_P_8021Q); 1954 1955 if (vdev) 1956 vlan_proto = vlan_dev_vlan_proto(vdev); 1957 1958 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); 1959 } 1960 1961 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) 1962 { 1963 __be16 vlan_proto = htons(ETH_P_8021Q); 1964 1965 if (vdev) 1966 vlan_proto = vlan_dev_vlan_proto(vdev); 1967 1968 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); 1969 } 1970 1971 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) 1972 { 1973 switchdev_bridge_port_unoffload(netdev, NULL, 1974 &dpaa2_switch_port_switchdev_nb, 1975 &dpaa2_switch_port_switchdev_blocking_nb); 1976 } 1977 1978 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) 1979 { 1980 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1981 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 1982 struct ethsw_core *ethsw = port_priv->ethsw_data; 1983 int err; 1984 1985 /* First of all, fast age any learn FDB addresses on this switch port */ 1986 dpaa2_switch_port_fast_age(port_priv); 1987 1988 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN 1989 * upper devices or otherwise from the FDB table that we are about to 1990 * leave 1991 */ 1992 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); 1993 if (err) 1994 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); 1995 1996 dpaa2_switch_port_set_fdb(port_priv, NULL); 1997 1998 /* Restore all RX VLANs into the new FDB table that we just joined */ 1999 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); 2000 if (err) 2001 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); 2002 2003 /* Reset the flooding state to denote that this port can send any 2004 * packet in standalone mode. With this, we are also ensuring that any 2005 * later bridge join will have the flooding flag on. 2006 */ 2007 port_priv->bcast_flood = true; 2008 port_priv->ucast_flood = true; 2009 2010 /* Setup the egress flood policy (broadcast, unknown unicast). 2011 * When the port is not under a bridge, only the CTRL interface is part 2012 * of the flooding domain besides the actual port 2013 */ 2014 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2015 if (err) 2016 return err; 2017 2018 /* Recreate the egress flood domain of the FDB that we just left */ 2019 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 2020 if (err) 2021 return err; 2022 2023 /* No HW learning when not under a bridge */ 2024 err = dpaa2_switch_port_set_learning(port_priv, false); 2025 if (err) 2026 return err; 2027 port_priv->learn_ena = false; 2028 2029 /* Add the VLAN 1 as PVID when not under a bridge. We need this since 2030 * the dpaa2 switch interfaces are not capable to be VLAN unaware 2031 */ 2032 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, 2033 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); 2034 } 2035 2036 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) 2037 { 2038 struct net_device *upper_dev; 2039 struct list_head *iter; 2040 2041 /* RCU read lock not necessary because we have write-side protection 2042 * (rtnl_mutex), however a non-rcu iterator does not exist. 2043 */ 2044 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) 2045 if (is_vlan_dev(upper_dev)) 2046 return -EOPNOTSUPP; 2047 2048 return 0; 2049 } 2050 2051 static int 2052 dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev, 2053 struct net_device *upper_dev, 2054 struct netlink_ext_ack *extack) 2055 { 2056 int err; 2057 2058 if (!br_vlan_enabled(upper_dev)) { 2059 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); 2060 return -EOPNOTSUPP; 2061 } 2062 2063 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); 2064 if (err) { 2065 NL_SET_ERR_MSG_MOD(extack, 2066 "Cannot join a bridge while VLAN uppers are present"); 2067 return 0; 2068 } 2069 2070 return 0; 2071 } 2072 2073 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, 2074 unsigned long event, void *ptr) 2075 { 2076 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 2077 struct netdev_notifier_changeupper_info *info = ptr; 2078 struct netlink_ext_ack *extack; 2079 struct net_device *upper_dev; 2080 int err = 0; 2081 2082 if (!dpaa2_switch_port_dev_check(netdev)) 2083 return NOTIFY_DONE; 2084 2085 extack = netdev_notifier_info_to_extack(&info->info); 2086 2087 switch (event) { 2088 case NETDEV_PRECHANGEUPPER: 2089 upper_dev = info->upper_dev; 2090 if (!netif_is_bridge_master(upper_dev)) 2091 break; 2092 2093 err = dpaa2_switch_prechangeupper_sanity_checks(netdev, 2094 upper_dev, 2095 extack); 2096 if (err) 2097 goto out; 2098 2099 if (!info->linking) 2100 dpaa2_switch_port_pre_bridge_leave(netdev); 2101 2102 break; 2103 case NETDEV_CHANGEUPPER: 2104 upper_dev = info->upper_dev; 2105 if (netif_is_bridge_master(upper_dev)) { 2106 if (info->linking) 2107 err = dpaa2_switch_port_bridge_join(netdev, 2108 upper_dev, 2109 extack); 2110 else 2111 err = dpaa2_switch_port_bridge_leave(netdev); 2112 } 2113 break; 2114 } 2115 2116 out: 2117 return notifier_from_errno(err); 2118 } 2119 2120 struct ethsw_switchdev_event_work { 2121 struct work_struct work; 2122 struct switchdev_notifier_fdb_info fdb_info; 2123 struct net_device *dev; 2124 unsigned long event; 2125 }; 2126 2127 static void dpaa2_switch_event_work(struct work_struct *work) 2128 { 2129 struct ethsw_switchdev_event_work *switchdev_work = 2130 container_of(work, struct ethsw_switchdev_event_work, work); 2131 struct net_device *dev = switchdev_work->dev; 2132 struct switchdev_notifier_fdb_info *fdb_info; 2133 int err; 2134 2135 rtnl_lock(); 2136 fdb_info = &switchdev_work->fdb_info; 2137 2138 switch (switchdev_work->event) { 2139 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2140 if (!fdb_info->added_by_user || fdb_info->is_local) 2141 break; 2142 if (is_unicast_ether_addr(fdb_info->addr)) 2143 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), 2144 fdb_info->addr); 2145 else 2146 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), 2147 fdb_info->addr); 2148 if (err) 2149 break; 2150 fdb_info->offloaded = true; 2151 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 2152 &fdb_info->info, NULL); 2153 break; 2154 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2155 if (!fdb_info->added_by_user || fdb_info->is_local) 2156 break; 2157 if (is_unicast_ether_addr(fdb_info->addr)) 2158 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); 2159 else 2160 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); 2161 break; 2162 } 2163 2164 rtnl_unlock(); 2165 kfree(switchdev_work->fdb_info.addr); 2166 kfree(switchdev_work); 2167 dev_put(dev); 2168 } 2169 2170 /* Called under rcu_read_lock() */ 2171 static int dpaa2_switch_port_event(struct notifier_block *nb, 2172 unsigned long event, void *ptr) 2173 { 2174 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2175 struct ethsw_port_priv *port_priv = netdev_priv(dev); 2176 struct ethsw_switchdev_event_work *switchdev_work; 2177 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2178 struct ethsw_core *ethsw = port_priv->ethsw_data; 2179 2180 if (event == SWITCHDEV_PORT_ATTR_SET) 2181 return dpaa2_switch_port_attr_set_event(dev, ptr); 2182 2183 if (!dpaa2_switch_port_dev_check(dev)) 2184 return NOTIFY_DONE; 2185 2186 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2187 if (!switchdev_work) 2188 return NOTIFY_BAD; 2189 2190 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); 2191 switchdev_work->dev = dev; 2192 switchdev_work->event = event; 2193 2194 switch (event) { 2195 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2196 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2197 memcpy(&switchdev_work->fdb_info, ptr, 2198 sizeof(switchdev_work->fdb_info)); 2199 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2200 if (!switchdev_work->fdb_info.addr) 2201 goto err_addr_alloc; 2202 2203 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2204 fdb_info->addr); 2205 2206 /* Take a reference on the device to avoid being freed. */ 2207 dev_hold(dev); 2208 break; 2209 default: 2210 kfree(switchdev_work); 2211 return NOTIFY_DONE; 2212 } 2213 2214 queue_work(ethsw->workqueue, &switchdev_work->work); 2215 2216 return NOTIFY_DONE; 2217 2218 err_addr_alloc: 2219 kfree(switchdev_work); 2220 return NOTIFY_BAD; 2221 } 2222 2223 static int dpaa2_switch_port_obj_event(unsigned long event, 2224 struct net_device *netdev, 2225 struct switchdev_notifier_port_obj_info *port_obj_info) 2226 { 2227 int err = -EOPNOTSUPP; 2228 2229 if (!dpaa2_switch_port_dev_check(netdev)) 2230 return NOTIFY_DONE; 2231 2232 switch (event) { 2233 case SWITCHDEV_PORT_OBJ_ADD: 2234 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); 2235 break; 2236 case SWITCHDEV_PORT_OBJ_DEL: 2237 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); 2238 break; 2239 } 2240 2241 port_obj_info->handled = true; 2242 return notifier_from_errno(err); 2243 } 2244 2245 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, 2246 unsigned long event, void *ptr) 2247 { 2248 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2249 2250 switch (event) { 2251 case SWITCHDEV_PORT_OBJ_ADD: 2252 case SWITCHDEV_PORT_OBJ_DEL: 2253 return dpaa2_switch_port_obj_event(event, dev, ptr); 2254 case SWITCHDEV_PORT_ATTR_SET: 2255 return dpaa2_switch_port_attr_set_event(dev, ptr); 2256 } 2257 2258 return NOTIFY_DONE; 2259 } 2260 2261 /* Build a linear skb based on a single-buffer frame descriptor */ 2262 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, 2263 const struct dpaa2_fd *fd) 2264 { 2265 u16 fd_offset = dpaa2_fd_get_offset(fd); 2266 dma_addr_t addr = dpaa2_fd_get_addr(fd); 2267 u32 fd_length = dpaa2_fd_get_len(fd); 2268 struct device *dev = ethsw->dev; 2269 struct sk_buff *skb = NULL; 2270 void *fd_vaddr; 2271 2272 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); 2273 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, 2274 DMA_FROM_DEVICE); 2275 2276 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + 2277 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 2278 if (unlikely(!skb)) { 2279 dev_err(dev, "build_skb() failed\n"); 2280 return NULL; 2281 } 2282 2283 skb_reserve(skb, fd_offset); 2284 skb_put(skb, fd_length); 2285 2286 ethsw->buf_count--; 2287 2288 return skb; 2289 } 2290 2291 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, 2292 const struct dpaa2_fd *fd) 2293 { 2294 dpaa2_switch_free_fd(fq->ethsw, fd); 2295 } 2296 2297 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, 2298 const struct dpaa2_fd *fd) 2299 { 2300 struct ethsw_core *ethsw = fq->ethsw; 2301 struct ethsw_port_priv *port_priv; 2302 struct net_device *netdev; 2303 struct vlan_ethhdr *hdr; 2304 struct sk_buff *skb; 2305 u16 vlan_tci, vid; 2306 int if_id, err; 2307 2308 /* get switch ingress interface ID */ 2309 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; 2310 2311 if (if_id >= ethsw->sw_attr.num_ifs) { 2312 dev_err(ethsw->dev, "Frame received from unknown interface!\n"); 2313 goto err_free_fd; 2314 } 2315 port_priv = ethsw->ports[if_id]; 2316 netdev = port_priv->netdev; 2317 2318 /* build the SKB based on the FD received */ 2319 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { 2320 if (net_ratelimit()) { 2321 netdev_err(netdev, "Received invalid frame format\n"); 2322 goto err_free_fd; 2323 } 2324 } 2325 2326 skb = dpaa2_switch_build_linear_skb(ethsw, fd); 2327 if (unlikely(!skb)) 2328 goto err_free_fd; 2329 2330 skb_reset_mac_header(skb); 2331 2332 /* Remove the VLAN header if the packet that we just received has a vid 2333 * equal to the port PVIDs. Since the dpaa2-switch can operate only in 2334 * VLAN-aware mode and no alterations are made on the packet when it's 2335 * redirected/mirrored to the control interface, we are sure that there 2336 * will always be a VLAN header present. 2337 */ 2338 hdr = vlan_eth_hdr(skb); 2339 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; 2340 if (vid == port_priv->pvid) { 2341 err = __skb_vlan_pop(skb, &vlan_tci); 2342 if (err) { 2343 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); 2344 goto err_free_fd; 2345 } 2346 } 2347 2348 skb->dev = netdev; 2349 skb->protocol = eth_type_trans(skb, skb->dev); 2350 2351 /* Setup the offload_fwd_mark only if the port is under a bridge */ 2352 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); 2353 2354 netif_receive_skb(skb); 2355 2356 return; 2357 2358 err_free_fd: 2359 dpaa2_switch_free_fd(ethsw, fd); 2360 } 2361 2362 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) 2363 { 2364 ethsw->features = 0; 2365 2366 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) 2367 ethsw->features |= ETHSW_FEATURE_MAC_ADDR; 2368 } 2369 2370 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) 2371 { 2372 struct dpsw_ctrl_if_attr ctrl_if_attr; 2373 struct device *dev = ethsw->dev; 2374 int i = 0; 2375 int err; 2376 2377 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2378 &ctrl_if_attr); 2379 if (err) { 2380 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); 2381 return err; 2382 } 2383 2384 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; 2385 ethsw->fq[i].ethsw = ethsw; 2386 ethsw->fq[i++].type = DPSW_QUEUE_RX; 2387 2388 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; 2389 ethsw->fq[i].ethsw = ethsw; 2390 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; 2391 2392 return 0; 2393 } 2394 2395 /* Free buffers acquired from the buffer pool or which were meant to 2396 * be released in the pool 2397 */ 2398 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) 2399 { 2400 struct device *dev = ethsw->dev; 2401 void *vaddr; 2402 int i; 2403 2404 for (i = 0; i < count; i++) { 2405 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); 2406 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, 2407 DMA_FROM_DEVICE); 2408 free_pages((unsigned long)vaddr, 0); 2409 } 2410 } 2411 2412 /* Perform a single release command to add buffers 2413 * to the specified buffer pool 2414 */ 2415 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) 2416 { 2417 struct device *dev = ethsw->dev; 2418 u64 buf_array[BUFS_PER_CMD]; 2419 struct page *page; 2420 int retries = 0; 2421 dma_addr_t addr; 2422 int err; 2423 int i; 2424 2425 for (i = 0; i < BUFS_PER_CMD; i++) { 2426 /* Allocate one page for each Rx buffer. WRIOP sees 2427 * the entire page except for a tailroom reserved for 2428 * skb shared info 2429 */ 2430 page = dev_alloc_pages(0); 2431 if (!page) { 2432 dev_err(dev, "buffer allocation failed\n"); 2433 goto err_alloc; 2434 } 2435 2436 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, 2437 DMA_FROM_DEVICE); 2438 if (dma_mapping_error(dev, addr)) { 2439 dev_err(dev, "dma_map_single() failed\n"); 2440 goto err_map; 2441 } 2442 buf_array[i] = addr; 2443 } 2444 2445 release_bufs: 2446 /* In case the portal is busy, retry until successful or 2447 * max retries hit. 2448 */ 2449 while ((err = dpaa2_io_service_release(NULL, bpid, 2450 buf_array, i)) == -EBUSY) { 2451 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) 2452 break; 2453 2454 cpu_relax(); 2455 } 2456 2457 /* If release command failed, clean up and bail out. */ 2458 if (err) { 2459 dpaa2_switch_free_bufs(ethsw, buf_array, i); 2460 return 0; 2461 } 2462 2463 return i; 2464 2465 err_map: 2466 __free_pages(page, 0); 2467 err_alloc: 2468 /* If we managed to allocate at least some buffers, 2469 * release them to hardware 2470 */ 2471 if (i) 2472 goto release_bufs; 2473 2474 return 0; 2475 } 2476 2477 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) 2478 { 2479 int *count = ðsw->buf_count; 2480 int new_count; 2481 int err = 0; 2482 2483 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { 2484 do { 2485 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2486 if (unlikely(!new_count)) { 2487 /* Out of memory; abort for now, we'll 2488 * try later on 2489 */ 2490 break; 2491 } 2492 *count += new_count; 2493 } while (*count < DPAA2_ETHSW_NUM_BUFS); 2494 2495 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) 2496 err = -ENOMEM; 2497 } 2498 2499 return err; 2500 } 2501 2502 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) 2503 { 2504 int *count, i; 2505 2506 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { 2507 count = ðsw->buf_count; 2508 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2509 2510 if (unlikely(*count < BUFS_PER_CMD)) 2511 return -ENOMEM; 2512 } 2513 2514 return 0; 2515 } 2516 2517 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) 2518 { 2519 u64 buf_array[BUFS_PER_CMD]; 2520 int ret; 2521 2522 do { 2523 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, 2524 buf_array, BUFS_PER_CMD); 2525 if (ret < 0) { 2526 dev_err(ethsw->dev, 2527 "dpaa2_io_service_acquire() = %d\n", ret); 2528 return; 2529 } 2530 dpaa2_switch_free_bufs(ethsw, buf_array, ret); 2531 2532 } while (ret); 2533 } 2534 2535 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) 2536 { 2537 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; 2538 struct device *dev = ethsw->dev; 2539 struct fsl_mc_device *dpbp_dev; 2540 struct dpbp_attr dpbp_attrs; 2541 int err; 2542 2543 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2544 &dpbp_dev); 2545 if (err) { 2546 if (err == -ENXIO) 2547 err = -EPROBE_DEFER; 2548 else 2549 dev_err(dev, "DPBP device allocation failed\n"); 2550 return err; 2551 } 2552 ethsw->dpbp_dev = dpbp_dev; 2553 2554 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, 2555 &dpbp_dev->mc_handle); 2556 if (err) { 2557 dev_err(dev, "dpbp_open() failed\n"); 2558 goto err_open; 2559 } 2560 2561 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2562 if (err) { 2563 dev_err(dev, "dpbp_reset() failed\n"); 2564 goto err_reset; 2565 } 2566 2567 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2568 if (err) { 2569 dev_err(dev, "dpbp_enable() failed\n"); 2570 goto err_enable; 2571 } 2572 2573 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, 2574 &dpbp_attrs); 2575 if (err) { 2576 dev_err(dev, "dpbp_get_attributes() failed\n"); 2577 goto err_get_attr; 2578 } 2579 2580 dpsw_ctrl_if_pools_cfg.num_dpbp = 1; 2581 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; 2582 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; 2583 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; 2584 2585 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, 2586 &dpsw_ctrl_if_pools_cfg); 2587 if (err) { 2588 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2589 goto err_get_attr; 2590 } 2591 ethsw->bpid = dpbp_attrs.id; 2592 2593 return 0; 2594 2595 err_get_attr: 2596 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2597 err_enable: 2598 err_reset: 2599 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2600 err_open: 2601 fsl_mc_object_free(dpbp_dev); 2602 return err; 2603 } 2604 2605 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) 2606 { 2607 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2608 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2609 fsl_mc_object_free(ethsw->dpbp_dev); 2610 } 2611 2612 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) 2613 { 2614 int i; 2615 2616 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2617 ethsw->fq[i].store = 2618 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, 2619 ethsw->dev); 2620 if (!ethsw->fq[i].store) { 2621 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); 2622 while (--i >= 0) 2623 dpaa2_io_store_destroy(ethsw->fq[i].store); 2624 return -ENOMEM; 2625 } 2626 } 2627 2628 return 0; 2629 } 2630 2631 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) 2632 { 2633 int i; 2634 2635 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2636 dpaa2_io_store_destroy(ethsw->fq[i].store); 2637 } 2638 2639 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) 2640 { 2641 int err, retries = 0; 2642 2643 /* Try to pull from the FQ while the portal is busy and we didn't hit 2644 * the maximum number fo retries 2645 */ 2646 do { 2647 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); 2648 cpu_relax(); 2649 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2650 2651 if (unlikely(err)) 2652 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); 2653 2654 return err; 2655 } 2656 2657 /* Consume all frames pull-dequeued into the store */ 2658 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) 2659 { 2660 struct ethsw_core *ethsw = fq->ethsw; 2661 int cleaned = 0, is_last; 2662 struct dpaa2_dq *dq; 2663 int retries = 0; 2664 2665 do { 2666 /* Get the next available FD from the store */ 2667 dq = dpaa2_io_store_next(fq->store, &is_last); 2668 if (unlikely(!dq)) { 2669 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { 2670 dev_err_once(ethsw->dev, 2671 "No valid dequeue response\n"); 2672 return -ETIMEDOUT; 2673 } 2674 continue; 2675 } 2676 2677 if (fq->type == DPSW_QUEUE_RX) 2678 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); 2679 else 2680 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); 2681 cleaned++; 2682 2683 } while (!is_last); 2684 2685 return cleaned; 2686 } 2687 2688 /* NAPI poll routine */ 2689 static int dpaa2_switch_poll(struct napi_struct *napi, int budget) 2690 { 2691 int err, cleaned = 0, store_cleaned, work_done; 2692 struct dpaa2_switch_fq *fq; 2693 int retries = 0; 2694 2695 fq = container_of(napi, struct dpaa2_switch_fq, napi); 2696 2697 do { 2698 err = dpaa2_switch_pull_fq(fq); 2699 if (unlikely(err)) 2700 break; 2701 2702 /* Refill pool if appropriate */ 2703 dpaa2_switch_refill_bp(fq->ethsw); 2704 2705 store_cleaned = dpaa2_switch_store_consume(fq); 2706 cleaned += store_cleaned; 2707 2708 if (cleaned >= budget) { 2709 work_done = budget; 2710 goto out; 2711 } 2712 2713 } while (store_cleaned); 2714 2715 /* We didn't consume the entire budget, so finish napi and re-enable 2716 * data availability notifications 2717 */ 2718 napi_complete_done(napi, cleaned); 2719 do { 2720 err = dpaa2_io_service_rearm(NULL, &fq->nctx); 2721 cpu_relax(); 2722 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2723 2724 work_done = max(cleaned, 1); 2725 out: 2726 2727 return work_done; 2728 } 2729 2730 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) 2731 { 2732 struct dpaa2_switch_fq *fq; 2733 2734 fq = container_of(nctx, struct dpaa2_switch_fq, nctx); 2735 2736 napi_schedule(&fq->napi); 2737 } 2738 2739 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) 2740 { 2741 struct dpsw_ctrl_if_queue_cfg queue_cfg; 2742 struct dpaa2_io_notification_ctx *nctx; 2743 int err, i, j; 2744 2745 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2746 nctx = ðsw->fq[i].nctx; 2747 2748 /* Register a new software context for the FQID. 2749 * By using NULL as the first parameter, we specify that we do 2750 * not care on which cpu are interrupts received for this queue 2751 */ 2752 nctx->is_cdan = 0; 2753 nctx->id = ethsw->fq[i].fqid; 2754 nctx->desired_cpu = DPAA2_IO_ANY_CPU; 2755 nctx->cb = dpaa2_switch_fqdan_cb; 2756 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); 2757 if (err) { 2758 err = -EPROBE_DEFER; 2759 goto err_register; 2760 } 2761 2762 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | 2763 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; 2764 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; 2765 queue_cfg.dest_cfg.dest_id = nctx->dpio_id; 2766 queue_cfg.dest_cfg.priority = 0; 2767 queue_cfg.user_ctx = nctx->qman64; 2768 2769 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, 2770 ethsw->dpsw_handle, 2771 ethsw->fq[i].type, 2772 &queue_cfg); 2773 if (err) 2774 goto err_set_queue; 2775 } 2776 2777 return 0; 2778 2779 err_set_queue: 2780 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); 2781 err_register: 2782 for (j = 0; j < i; j++) 2783 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, 2784 ethsw->dev); 2785 2786 return err; 2787 } 2788 2789 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) 2790 { 2791 int i; 2792 2793 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2794 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, 2795 ethsw->dev); 2796 } 2797 2798 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) 2799 { 2800 int err; 2801 2802 /* setup FQs for Rx and Tx Conf */ 2803 err = dpaa2_switch_setup_fqs(ethsw); 2804 if (err) 2805 return err; 2806 2807 /* setup the buffer pool needed on the Rx path */ 2808 err = dpaa2_switch_setup_dpbp(ethsw); 2809 if (err) 2810 return err; 2811 2812 err = dpaa2_switch_seed_bp(ethsw); 2813 if (err) 2814 goto err_free_dpbp; 2815 2816 err = dpaa2_switch_alloc_rings(ethsw); 2817 if (err) 2818 goto err_drain_dpbp; 2819 2820 err = dpaa2_switch_setup_dpio(ethsw); 2821 if (err) 2822 goto err_destroy_rings; 2823 2824 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2825 if (err) { 2826 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); 2827 goto err_deregister_dpio; 2828 } 2829 2830 return 0; 2831 2832 err_deregister_dpio: 2833 dpaa2_switch_free_dpio(ethsw); 2834 err_destroy_rings: 2835 dpaa2_switch_destroy_rings(ethsw); 2836 err_drain_dpbp: 2837 dpaa2_switch_drain_bp(ethsw); 2838 err_free_dpbp: 2839 dpaa2_switch_free_dpbp(ethsw); 2840 2841 return err; 2842 } 2843 2844 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) 2845 { 2846 struct device *dev = &sw_dev->dev; 2847 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2848 struct dpsw_vlan_if_cfg vcfg = {0}; 2849 struct dpsw_tci_cfg tci_cfg = {0}; 2850 struct dpsw_stp_cfg stp_cfg; 2851 int err; 2852 u16 i; 2853 2854 ethsw->dev_id = sw_dev->obj_desc.id; 2855 2856 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); 2857 if (err) { 2858 dev_err(dev, "dpsw_open err %d\n", err); 2859 return err; 2860 } 2861 2862 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2863 ðsw->sw_attr); 2864 if (err) { 2865 dev_err(dev, "dpsw_get_attributes err %d\n", err); 2866 goto err_close; 2867 } 2868 2869 err = dpsw_get_api_version(ethsw->mc_io, 0, 2870 ðsw->major, 2871 ðsw->minor); 2872 if (err) { 2873 dev_err(dev, "dpsw_get_api_version err %d\n", err); 2874 goto err_close; 2875 } 2876 2877 /* Minimum supported DPSW version check */ 2878 if (ethsw->major < DPSW_MIN_VER_MAJOR || 2879 (ethsw->major == DPSW_MIN_VER_MAJOR && 2880 ethsw->minor < DPSW_MIN_VER_MINOR)) { 2881 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", 2882 ethsw->major, ethsw->minor); 2883 err = -EOPNOTSUPP; 2884 goto err_close; 2885 } 2886 2887 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { 2888 err = -EOPNOTSUPP; 2889 goto err_close; 2890 } 2891 2892 dpaa2_switch_detect_features(ethsw); 2893 2894 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); 2895 if (err) { 2896 dev_err(dev, "dpsw_reset err %d\n", err); 2897 goto err_close; 2898 } 2899 2900 stp_cfg.vlan_id = DEFAULT_VLAN_ID; 2901 stp_cfg.state = DPSW_STP_STATE_FORWARDING; 2902 2903 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2904 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); 2905 if (err) { 2906 dev_err(dev, "dpsw_if_disable err %d\n", err); 2907 goto err_close; 2908 } 2909 2910 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, 2911 &stp_cfg); 2912 if (err) { 2913 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", 2914 err, i); 2915 goto err_close; 2916 } 2917 2918 /* Switch starts with all ports configured to VLAN 1. Need to 2919 * remove this setting to allow configuration at bridge join 2920 */ 2921 vcfg.num_ifs = 1; 2922 vcfg.if_id[0] = i; 2923 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, 2924 DEFAULT_VLAN_ID, &vcfg); 2925 if (err) { 2926 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", 2927 err); 2928 goto err_close; 2929 } 2930 2931 tci_cfg.vlan_id = 4095; 2932 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); 2933 if (err) { 2934 dev_err(dev, "dpsw_if_set_tci err %d\n", err); 2935 goto err_close; 2936 } 2937 2938 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 2939 DEFAULT_VLAN_ID, &vcfg); 2940 if (err) { 2941 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); 2942 goto err_close; 2943 } 2944 } 2945 2946 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); 2947 if (err) { 2948 dev_err(dev, "dpsw_vlan_remove err %d\n", err); 2949 goto err_close; 2950 } 2951 2952 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", 2953 WQ_MEM_RECLAIM, "ethsw", 2954 ethsw->sw_attr.id); 2955 if (!ethsw->workqueue) { 2956 err = -ENOMEM; 2957 goto err_close; 2958 } 2959 2960 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); 2961 if (err) 2962 goto err_destroy_ordered_workqueue; 2963 2964 err = dpaa2_switch_ctrl_if_setup(ethsw); 2965 if (err) 2966 goto err_destroy_ordered_workqueue; 2967 2968 return 0; 2969 2970 err_destroy_ordered_workqueue: 2971 destroy_workqueue(ethsw->workqueue); 2972 2973 err_close: 2974 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 2975 return err; 2976 } 2977 2978 /* Add an ACL to redirect frames with specific destination MAC address to 2979 * control interface 2980 */ 2981 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, 2982 const char *mac) 2983 { 2984 struct dpaa2_switch_acl_entry acl_entry = {0}; 2985 2986 /* Match on the destination MAC address */ 2987 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac); 2988 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac); 2989 2990 /* Trap to CPU */ 2991 acl_entry.cfg.precedence = 0; 2992 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; 2993 2994 return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry); 2995 } 2996 2997 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) 2998 { 2999 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; 3000 struct switchdev_obj_port_vlan vlan = { 3001 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 3002 .vid = DEFAULT_VLAN_ID, 3003 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, 3004 }; 3005 struct net_device *netdev = port_priv->netdev; 3006 struct ethsw_core *ethsw = port_priv->ethsw_data; 3007 struct dpaa2_switch_acl_tbl *acl_tbl; 3008 struct dpsw_fdb_cfg fdb_cfg = {0}; 3009 struct dpsw_if_attr dpsw_if_attr; 3010 struct dpaa2_switch_fdb *fdb; 3011 struct dpsw_acl_cfg acl_cfg; 3012 u16 fdb_id, acl_tbl_id; 3013 int err; 3014 3015 /* Get the Tx queue for this specific port */ 3016 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 3017 port_priv->idx, &dpsw_if_attr); 3018 if (err) { 3019 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); 3020 return err; 3021 } 3022 port_priv->tx_qdid = dpsw_if_attr.qdid; 3023 3024 /* Create a FDB table for this particular switch port */ 3025 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; 3026 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3027 &fdb_id, &fdb_cfg); 3028 if (err) { 3029 netdev_err(netdev, "dpsw_fdb_add err %d\n", err); 3030 return err; 3031 } 3032 3033 /* Find an unused dpaa2_switch_fdb structure and use it */ 3034 fdb = dpaa2_switch_fdb_get_unused(ethsw); 3035 fdb->fdb_id = fdb_id; 3036 fdb->in_use = true; 3037 fdb->bridge_dev = NULL; 3038 port_priv->fdb = fdb; 3039 3040 /* We need to add VLAN 1 as the PVID on this port until it is under a 3041 * bridge since the DPAA2 switch is not able to handle the traffic in a 3042 * VLAN unaware fashion 3043 */ 3044 err = dpaa2_switch_port_vlans_add(netdev, &vlan); 3045 if (err) 3046 return err; 3047 3048 /* Setup the egress flooding domains (broadcast, unknown unicast */ 3049 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 3050 if (err) 3051 return err; 3052 3053 /* Create an ACL table to be used by this switch port */ 3054 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; 3055 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3056 &acl_tbl_id, &acl_cfg); 3057 if (err) { 3058 netdev_err(netdev, "dpsw_acl_add err %d\n", err); 3059 return err; 3060 } 3061 3062 acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); 3063 acl_tbl->ethsw = ethsw; 3064 acl_tbl->id = acl_tbl_id; 3065 acl_tbl->in_use = true; 3066 acl_tbl->num_rules = 0; 3067 INIT_LIST_HEAD(&acl_tbl->entries); 3068 3069 err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); 3070 if (err) 3071 return err; 3072 3073 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); 3074 if (err) 3075 return err; 3076 3077 return err; 3078 } 3079 3080 static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) 3081 { 3082 struct device *dev = &sw_dev->dev; 3083 struct ethsw_core *ethsw = dev_get_drvdata(dev); 3084 int err; 3085 3086 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3087 if (err) 3088 dev_warn(dev, "dpsw_close err %d\n", err); 3089 } 3090 3091 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 3092 { 3093 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3094 dpaa2_switch_free_dpio(ethsw); 3095 dpaa2_switch_destroy_rings(ethsw); 3096 dpaa2_switch_drain_bp(ethsw); 3097 dpaa2_switch_free_dpbp(ethsw); 3098 } 3099 3100 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) 3101 { 3102 struct ethsw_port_priv *port_priv; 3103 struct ethsw_core *ethsw; 3104 struct device *dev; 3105 int i; 3106 3107 dev = &sw_dev->dev; 3108 ethsw = dev_get_drvdata(dev); 3109 3110 dpaa2_switch_ctrl_if_teardown(ethsw); 3111 3112 dpaa2_switch_teardown_irqs(sw_dev); 3113 3114 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3115 3116 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3117 port_priv = ethsw->ports[i]; 3118 unregister_netdev(port_priv->netdev); 3119 free_netdev(port_priv->netdev); 3120 } 3121 3122 kfree(ethsw->fdbs); 3123 kfree(ethsw->acls); 3124 kfree(ethsw->ports); 3125 3126 dpaa2_switch_takedown(sw_dev); 3127 3128 destroy_workqueue(ethsw->workqueue); 3129 3130 fsl_mc_portal_free(ethsw->mc_io); 3131 3132 kfree(ethsw); 3133 3134 dev_set_drvdata(dev, NULL); 3135 3136 return 0; 3137 } 3138 3139 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, 3140 u16 port_idx) 3141 { 3142 struct ethsw_port_priv *port_priv; 3143 struct device *dev = ethsw->dev; 3144 struct net_device *port_netdev; 3145 int err; 3146 3147 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); 3148 if (!port_netdev) { 3149 dev_err(dev, "alloc_etherdev error\n"); 3150 return -ENOMEM; 3151 } 3152 3153 port_priv = netdev_priv(port_netdev); 3154 port_priv->netdev = port_netdev; 3155 port_priv->ethsw_data = ethsw; 3156 3157 port_priv->idx = port_idx; 3158 port_priv->stp_state = BR_STATE_FORWARDING; 3159 3160 SET_NETDEV_DEV(port_netdev, dev); 3161 port_netdev->netdev_ops = &dpaa2_switch_port_ops; 3162 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; 3163 3164 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; 3165 3166 port_priv->bcast_flood = true; 3167 port_priv->ucast_flood = true; 3168 3169 /* Set MTU limits */ 3170 port_netdev->min_mtu = ETH_MIN_MTU; 3171 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; 3172 3173 /* Populate the private port structure so that later calls to 3174 * dpaa2_switch_port_init() can use it. 3175 */ 3176 ethsw->ports[port_idx] = port_priv; 3177 3178 /* The DPAA2 switch's ingress path depends on the VLAN table, 3179 * thus we are not able to disable VLAN filtering. 3180 */ 3181 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | 3182 NETIF_F_HW_VLAN_STAG_FILTER | 3183 NETIF_F_HW_TC; 3184 3185 err = dpaa2_switch_port_init(port_priv, port_idx); 3186 if (err) 3187 goto err_port_probe; 3188 3189 err = dpaa2_switch_port_set_mac_addr(port_priv); 3190 if (err) 3191 goto err_port_probe; 3192 3193 err = dpaa2_switch_port_set_learning(port_priv, false); 3194 if (err) 3195 goto err_port_probe; 3196 port_priv->learn_ena = false; 3197 3198 return 0; 3199 3200 err_port_probe: 3201 free_netdev(port_netdev); 3202 ethsw->ports[port_idx] = NULL; 3203 3204 return err; 3205 } 3206 3207 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) 3208 { 3209 struct device *dev = &sw_dev->dev; 3210 struct ethsw_core *ethsw; 3211 int i, err; 3212 3213 /* Allocate switch core*/ 3214 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); 3215 3216 if (!ethsw) 3217 return -ENOMEM; 3218 3219 ethsw->dev = dev; 3220 ethsw->iommu_domain = iommu_get_domain_for_dev(dev); 3221 dev_set_drvdata(dev, ethsw); 3222 3223 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 3224 ðsw->mc_io); 3225 if (err) { 3226 if (err == -ENXIO) 3227 err = -EPROBE_DEFER; 3228 else 3229 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); 3230 goto err_free_drvdata; 3231 } 3232 3233 err = dpaa2_switch_init(sw_dev); 3234 if (err) 3235 goto err_free_cmdport; 3236 3237 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), 3238 GFP_KERNEL); 3239 if (!(ethsw->ports)) { 3240 err = -ENOMEM; 3241 goto err_takedown; 3242 } 3243 3244 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), 3245 GFP_KERNEL); 3246 if (!ethsw->fdbs) { 3247 err = -ENOMEM; 3248 goto err_free_ports; 3249 } 3250 3251 ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls), 3252 GFP_KERNEL); 3253 if (!ethsw->acls) { 3254 err = -ENOMEM; 3255 goto err_free_fdbs; 3256 } 3257 3258 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3259 err = dpaa2_switch_probe_port(ethsw, i); 3260 if (err) 3261 goto err_free_netdev; 3262 } 3263 3264 /* Add a NAPI instance for each of the Rx queues. The first port's 3265 * net_device will be associated with the instances since we do not have 3266 * different queues for each switch ports. 3267 */ 3268 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 3269 netif_napi_add(ethsw->ports[0]->netdev, 3270 ðsw->fq[i].napi, dpaa2_switch_poll, 3271 NAPI_POLL_WEIGHT); 3272 3273 err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3274 if (err) { 3275 dev_err(ethsw->dev, "dpsw_enable err %d\n", err); 3276 goto err_free_netdev; 3277 } 3278 3279 /* Setup IRQs */ 3280 err = dpaa2_switch_setup_irqs(sw_dev); 3281 if (err) 3282 goto err_stop; 3283 3284 /* Register the netdev only when the entire setup is done and the 3285 * switch port interfaces are ready to receive traffic 3286 */ 3287 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3288 err = register_netdev(ethsw->ports[i]->netdev); 3289 if (err < 0) { 3290 dev_err(dev, "register_netdev error %d\n", err); 3291 goto err_unregister_ports; 3292 } 3293 } 3294 3295 return 0; 3296 3297 err_unregister_ports: 3298 for (i--; i >= 0; i--) 3299 unregister_netdev(ethsw->ports[i]->netdev); 3300 dpaa2_switch_teardown_irqs(sw_dev); 3301 err_stop: 3302 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3303 err_free_netdev: 3304 for (i--; i >= 0; i--) 3305 free_netdev(ethsw->ports[i]->netdev); 3306 kfree(ethsw->acls); 3307 err_free_fdbs: 3308 kfree(ethsw->fdbs); 3309 err_free_ports: 3310 kfree(ethsw->ports); 3311 3312 err_takedown: 3313 dpaa2_switch_takedown(sw_dev); 3314 3315 err_free_cmdport: 3316 fsl_mc_portal_free(ethsw->mc_io); 3317 3318 err_free_drvdata: 3319 kfree(ethsw); 3320 dev_set_drvdata(dev, NULL); 3321 3322 return err; 3323 } 3324 3325 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { 3326 { 3327 .vendor = FSL_MC_VENDOR_FREESCALE, 3328 .obj_type = "dpsw", 3329 }, 3330 { .vendor = 0x0 } 3331 }; 3332 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); 3333 3334 static struct fsl_mc_driver dpaa2_switch_drv = { 3335 .driver = { 3336 .name = KBUILD_MODNAME, 3337 .owner = THIS_MODULE, 3338 }, 3339 .probe = dpaa2_switch_probe, 3340 .remove = dpaa2_switch_remove, 3341 .match_id_table = dpaa2_switch_match_id_table 3342 }; 3343 3344 static struct notifier_block dpaa2_switch_port_nb __read_mostly = { 3345 .notifier_call = dpaa2_switch_port_netdevice_event, 3346 }; 3347 3348 static struct notifier_block dpaa2_switch_port_switchdev_nb = { 3349 .notifier_call = dpaa2_switch_port_event, 3350 }; 3351 3352 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { 3353 .notifier_call = dpaa2_switch_port_blocking_event, 3354 }; 3355 3356 static int dpaa2_switch_register_notifiers(void) 3357 { 3358 int err; 3359 3360 err = register_netdevice_notifier(&dpaa2_switch_port_nb); 3361 if (err) { 3362 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); 3363 return err; 3364 } 3365 3366 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3367 if (err) { 3368 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); 3369 goto err_switchdev_nb; 3370 } 3371 3372 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3373 if (err) { 3374 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); 3375 goto err_switchdev_blocking_nb; 3376 } 3377 3378 return 0; 3379 3380 err_switchdev_blocking_nb: 3381 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3382 err_switchdev_nb: 3383 unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3384 3385 return err; 3386 } 3387 3388 static void dpaa2_switch_unregister_notifiers(void) 3389 { 3390 int err; 3391 3392 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3393 if (err) 3394 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", 3395 err); 3396 3397 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3398 if (err) 3399 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); 3400 3401 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3402 if (err) 3403 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); 3404 } 3405 3406 static int __init dpaa2_switch_driver_init(void) 3407 { 3408 int err; 3409 3410 err = fsl_mc_driver_register(&dpaa2_switch_drv); 3411 if (err) 3412 return err; 3413 3414 err = dpaa2_switch_register_notifiers(); 3415 if (err) { 3416 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3417 return err; 3418 } 3419 3420 return 0; 3421 } 3422 3423 static void __exit dpaa2_switch_driver_exit(void) 3424 { 3425 dpaa2_switch_unregister_notifiers(); 3426 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3427 } 3428 3429 module_init(dpaa2_switch_driver_init); 3430 module_exit(dpaa2_switch_driver_exit); 3431 3432 MODULE_LICENSE("GPL v2"); 3433 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); 3434