1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DPAA2 Ethernet Switch driver 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/interrupt.h> 13 #include <linux/msi.h> 14 #include <linux/kthread.h> 15 #include <linux/workqueue.h> 16 #include <linux/iommu.h> 17 #include <net/pkt_cls.h> 18 19 #include <linux/fsl/mc.h> 20 21 #include "dpaa2-switch.h" 22 23 /* Minimal supported DPSW version */ 24 #define DPSW_MIN_VER_MAJOR 8 25 #define DPSW_MIN_VER_MINOR 9 26 27 #define DEFAULT_VLAN_ID 1 28 29 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) 30 { 31 return port_priv->fdb->fdb_id; 32 } 33 34 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) 35 { 36 int i; 37 38 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 39 if (!ethsw->fdbs[i].in_use) 40 return ðsw->fdbs[i]; 41 return NULL; 42 } 43 44 static struct dpaa2_switch_filter_block * 45 dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw) 46 { 47 int i; 48 49 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 50 if (!ethsw->filter_blocks[i].in_use) 51 return ðsw->filter_blocks[i]; 52 return NULL; 53 } 54 55 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, 56 struct net_device *bridge_dev) 57 { 58 struct ethsw_port_priv *other_port_priv = NULL; 59 struct dpaa2_switch_fdb *fdb; 60 struct net_device *other_dev; 61 struct list_head *iter; 62 63 /* If we leave a bridge (bridge_dev is NULL), find an unused 64 * FDB and use that. 65 */ 66 if (!bridge_dev) { 67 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); 68 69 /* If there is no unused FDB, we must be the last port that 70 * leaves the last bridge, all the others are standalone. We 71 * can just keep the FDB that we already have. 72 */ 73 74 if (!fdb) { 75 port_priv->fdb->bridge_dev = NULL; 76 return 0; 77 } 78 79 port_priv->fdb = fdb; 80 port_priv->fdb->in_use = true; 81 port_priv->fdb->bridge_dev = NULL; 82 return 0; 83 } 84 85 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock 86 * being held. Assert on it so that it's easier to catch new code 87 * paths that reach this point without the RTNL lock. 88 */ 89 ASSERT_RTNL(); 90 91 /* If part of a bridge, use the FDB of the first dpaa2 switch interface 92 * to be present in that bridge 93 */ 94 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { 95 if (!dpaa2_switch_port_dev_check(other_dev)) 96 continue; 97 98 if (other_dev == port_priv->netdev) 99 continue; 100 101 other_port_priv = netdev_priv(other_dev); 102 break; 103 } 104 105 /* The current port is about to change its FDB to the one used by the 106 * first port that joined the bridge. 107 */ 108 if (other_port_priv) { 109 /* The previous FDB is about to become unused, since the 110 * interface is no longer standalone. 111 */ 112 port_priv->fdb->in_use = false; 113 port_priv->fdb->bridge_dev = NULL; 114 115 /* Get a reference to the new FDB */ 116 port_priv->fdb = other_port_priv->fdb; 117 } 118 119 /* Keep track of the new upper bridge device */ 120 port_priv->fdb->bridge_dev = bridge_dev; 121 122 return 0; 123 } 124 125 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, 126 enum dpsw_flood_type type, 127 struct dpsw_egress_flood_cfg *cfg) 128 { 129 int i = 0, j; 130 131 memset(cfg, 0, sizeof(*cfg)); 132 133 /* Add all the DPAA2 switch ports found in the same bridging domain to 134 * the egress flooding domain 135 */ 136 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { 137 if (!ethsw->ports[j]) 138 continue; 139 if (ethsw->ports[j]->fdb->fdb_id != fdb_id) 140 continue; 141 142 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) 143 cfg->if_id[i++] = ethsw->ports[j]->idx; 144 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) 145 cfg->if_id[i++] = ethsw->ports[j]->idx; 146 } 147 148 /* Add the CTRL interface to the egress flooding domain */ 149 cfg->if_id[i++] = ethsw->sw_attr.num_ifs; 150 151 cfg->fdb_id = fdb_id; 152 cfg->flood_type = type; 153 cfg->num_ifs = i; 154 } 155 156 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) 157 { 158 struct dpsw_egress_flood_cfg flood_cfg; 159 int err; 160 161 /* Setup broadcast flooding domain */ 162 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); 163 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 164 &flood_cfg); 165 if (err) { 166 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 167 return err; 168 } 169 170 /* Setup unknown flooding domain */ 171 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); 172 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 173 &flood_cfg); 174 if (err) { 175 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 176 return err; 177 } 178 179 return 0; 180 } 181 182 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 183 dma_addr_t iova_addr) 184 { 185 phys_addr_t phys_addr; 186 187 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 188 189 return phys_to_virt(phys_addr); 190 } 191 192 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) 193 { 194 struct ethsw_core *ethsw = port_priv->ethsw_data; 195 struct dpsw_vlan_cfg vcfg = {0}; 196 int err; 197 198 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 199 err = dpsw_vlan_add(ethsw->mc_io, 0, 200 ethsw->dpsw_handle, vid, &vcfg); 201 if (err) { 202 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); 203 return err; 204 } 205 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; 206 207 return 0; 208 } 209 210 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) 211 { 212 struct net_device *netdev = port_priv->netdev; 213 struct dpsw_link_state state; 214 int err; 215 216 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 217 port_priv->ethsw_data->dpsw_handle, 218 port_priv->idx, &state); 219 if (err) { 220 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 221 return true; 222 } 223 224 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 225 226 return state.up ? true : false; 227 } 228 229 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) 230 { 231 struct ethsw_core *ethsw = port_priv->ethsw_data; 232 struct net_device *netdev = port_priv->netdev; 233 struct dpsw_tci_cfg tci_cfg = { 0 }; 234 bool up; 235 int err, ret; 236 237 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 238 port_priv->idx, &tci_cfg); 239 if (err) { 240 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); 241 return err; 242 } 243 244 tci_cfg.vlan_id = pvid; 245 246 /* Interface needs to be down to change PVID */ 247 up = dpaa2_switch_port_is_up(port_priv); 248 if (up) { 249 err = dpsw_if_disable(ethsw->mc_io, 0, 250 ethsw->dpsw_handle, 251 port_priv->idx); 252 if (err) { 253 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 254 return err; 255 } 256 } 257 258 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 259 port_priv->idx, &tci_cfg); 260 if (err) { 261 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); 262 goto set_tci_error; 263 } 264 265 /* Delete previous PVID info and mark the new one */ 266 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; 267 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; 268 port_priv->pvid = pvid; 269 270 set_tci_error: 271 if (up) { 272 ret = dpsw_if_enable(ethsw->mc_io, 0, 273 ethsw->dpsw_handle, 274 port_priv->idx); 275 if (ret) { 276 netdev_err(netdev, "dpsw_if_enable err %d\n", ret); 277 return ret; 278 } 279 } 280 281 return err; 282 } 283 284 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, 285 u16 vid, u16 flags) 286 { 287 struct ethsw_core *ethsw = port_priv->ethsw_data; 288 struct net_device *netdev = port_priv->netdev; 289 struct dpsw_vlan_if_cfg vcfg = {0}; 290 int err; 291 292 if (port_priv->vlans[vid]) { 293 netdev_warn(netdev, "VLAN %d already configured\n", vid); 294 return -EEXIST; 295 } 296 297 /* If hit, this VLAN rule will lead the packet into the FDB table 298 * specified in the vlan configuration below 299 */ 300 vcfg.num_ifs = 1; 301 vcfg.if_id[0] = port_priv->idx; 302 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 303 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; 304 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); 305 if (err) { 306 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); 307 return err; 308 } 309 310 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; 311 312 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { 313 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, 314 ethsw->dpsw_handle, 315 vid, &vcfg); 316 if (err) { 317 netdev_err(netdev, 318 "dpsw_vlan_add_if_untagged err %d\n", err); 319 return err; 320 } 321 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; 322 } 323 324 if (flags & BRIDGE_VLAN_INFO_PVID) { 325 err = dpaa2_switch_port_set_pvid(port_priv, vid); 326 if (err) 327 return err; 328 } 329 330 return 0; 331 } 332 333 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state) 334 { 335 switch (state) { 336 case BR_STATE_DISABLED: 337 return DPSW_STP_STATE_DISABLED; 338 case BR_STATE_LISTENING: 339 return DPSW_STP_STATE_LISTENING; 340 case BR_STATE_LEARNING: 341 return DPSW_STP_STATE_LEARNING; 342 case BR_STATE_FORWARDING: 343 return DPSW_STP_STATE_FORWARDING; 344 case BR_STATE_BLOCKING: 345 return DPSW_STP_STATE_BLOCKING; 346 default: 347 return DPSW_STP_STATE_DISABLED; 348 } 349 } 350 351 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) 352 { 353 struct dpsw_stp_cfg stp_cfg = {0}; 354 int err; 355 u16 vid; 356 357 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) 358 return 0; /* Nothing to do */ 359 360 stp_cfg.state = br_stp_state_to_dpsw(state); 361 for (vid = 0; vid <= VLAN_VID_MASK; vid++) { 362 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 363 stp_cfg.vlan_id = vid; 364 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, 365 port_priv->ethsw_data->dpsw_handle, 366 port_priv->idx, &stp_cfg); 367 if (err) { 368 netdev_err(port_priv->netdev, 369 "dpsw_if_set_stp err %d\n", err); 370 return err; 371 } 372 } 373 } 374 375 port_priv->stp_state = state; 376 377 return 0; 378 } 379 380 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) 381 { 382 struct ethsw_port_priv *ppriv_local = NULL; 383 int i, err; 384 385 if (!ethsw->vlans[vid]) 386 return -ENOENT; 387 388 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); 389 if (err) { 390 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); 391 return err; 392 } 393 ethsw->vlans[vid] = 0; 394 395 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 396 ppriv_local = ethsw->ports[i]; 397 if (ppriv_local) 398 ppriv_local->vlans[vid] = 0; 399 } 400 401 return 0; 402 } 403 404 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, 405 const unsigned char *addr) 406 { 407 struct dpsw_fdb_unicast_cfg entry = {0}; 408 u16 fdb_id; 409 int err; 410 411 entry.if_egress = port_priv->idx; 412 entry.type = DPSW_FDB_ENTRY_STATIC; 413 ether_addr_copy(entry.mac_addr, addr); 414 415 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 416 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, 417 port_priv->ethsw_data->dpsw_handle, 418 fdb_id, &entry); 419 if (err) 420 netdev_err(port_priv->netdev, 421 "dpsw_fdb_add_unicast err %d\n", err); 422 return err; 423 } 424 425 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, 426 const unsigned char *addr) 427 { 428 struct dpsw_fdb_unicast_cfg entry = {0}; 429 u16 fdb_id; 430 int err; 431 432 entry.if_egress = port_priv->idx; 433 entry.type = DPSW_FDB_ENTRY_STATIC; 434 ether_addr_copy(entry.mac_addr, addr); 435 436 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 437 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, 438 port_priv->ethsw_data->dpsw_handle, 439 fdb_id, &entry); 440 /* Silently discard error for calling multiple times the del command */ 441 if (err && err != -ENXIO) 442 netdev_err(port_priv->netdev, 443 "dpsw_fdb_remove_unicast err %d\n", err); 444 return err; 445 } 446 447 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, 448 const unsigned char *addr) 449 { 450 struct dpsw_fdb_multicast_cfg entry = {0}; 451 u16 fdb_id; 452 int err; 453 454 ether_addr_copy(entry.mac_addr, addr); 455 entry.type = DPSW_FDB_ENTRY_STATIC; 456 entry.num_ifs = 1; 457 entry.if_id[0] = port_priv->idx; 458 459 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 460 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, 461 port_priv->ethsw_data->dpsw_handle, 462 fdb_id, &entry); 463 /* Silently discard error for calling multiple times the add command */ 464 if (err && err != -ENXIO) 465 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", 466 err); 467 return err; 468 } 469 470 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, 471 const unsigned char *addr) 472 { 473 struct dpsw_fdb_multicast_cfg entry = {0}; 474 u16 fdb_id; 475 int err; 476 477 ether_addr_copy(entry.mac_addr, addr); 478 entry.type = DPSW_FDB_ENTRY_STATIC; 479 entry.num_ifs = 1; 480 entry.if_id[0] = port_priv->idx; 481 482 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 483 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, 484 port_priv->ethsw_data->dpsw_handle, 485 fdb_id, &entry); 486 /* Silently discard error for calling multiple times the del command */ 487 if (err && err != -ENAVAIL) 488 netdev_err(port_priv->netdev, 489 "dpsw_fdb_remove_multicast err %d\n", err); 490 return err; 491 } 492 493 static void dpaa2_switch_port_get_stats(struct net_device *netdev, 494 struct rtnl_link_stats64 *stats) 495 { 496 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 497 u64 tmp; 498 int err; 499 500 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 501 port_priv->ethsw_data->dpsw_handle, 502 port_priv->idx, 503 DPSW_CNT_ING_FRAME, &stats->rx_packets); 504 if (err) 505 goto error; 506 507 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 508 port_priv->ethsw_data->dpsw_handle, 509 port_priv->idx, 510 DPSW_CNT_EGR_FRAME, &stats->tx_packets); 511 if (err) 512 goto error; 513 514 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 515 port_priv->ethsw_data->dpsw_handle, 516 port_priv->idx, 517 DPSW_CNT_ING_BYTE, &stats->rx_bytes); 518 if (err) 519 goto error; 520 521 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 522 port_priv->ethsw_data->dpsw_handle, 523 port_priv->idx, 524 DPSW_CNT_EGR_BYTE, &stats->tx_bytes); 525 if (err) 526 goto error; 527 528 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 529 port_priv->ethsw_data->dpsw_handle, 530 port_priv->idx, 531 DPSW_CNT_ING_FRAME_DISCARD, 532 &stats->rx_dropped); 533 if (err) 534 goto error; 535 536 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 537 port_priv->ethsw_data->dpsw_handle, 538 port_priv->idx, 539 DPSW_CNT_ING_FLTR_FRAME, 540 &tmp); 541 if (err) 542 goto error; 543 stats->rx_dropped += tmp; 544 545 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 546 port_priv->ethsw_data->dpsw_handle, 547 port_priv->idx, 548 DPSW_CNT_EGR_FRAME_DISCARD, 549 &stats->tx_dropped); 550 if (err) 551 goto error; 552 553 return; 554 555 error: 556 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); 557 } 558 559 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, 560 int attr_id) 561 { 562 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); 563 } 564 565 static int dpaa2_switch_port_get_offload_stats(int attr_id, 566 const struct net_device *netdev, 567 void *sp) 568 { 569 switch (attr_id) { 570 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 571 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); 572 return 0; 573 } 574 575 return -EINVAL; 576 } 577 578 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) 579 { 580 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 581 int err; 582 583 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, 584 0, 585 port_priv->ethsw_data->dpsw_handle, 586 port_priv->idx, 587 (u16)ETHSW_L2_MAX_FRM(mtu)); 588 if (err) { 589 netdev_err(netdev, 590 "dpsw_if_set_max_frame_length() err %d\n", err); 591 return err; 592 } 593 594 netdev->mtu = mtu; 595 return 0; 596 } 597 598 static int dpaa2_switch_port_link_state_update(struct net_device *netdev) 599 { 600 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 601 struct dpsw_link_state state; 602 int err; 603 604 /* When we manage the MAC/PHY using phylink there is no need 605 * to manually update the netif_carrier. 606 */ 607 if (dpaa2_switch_port_is_type_phy(port_priv)) 608 return 0; 609 610 /* Interrupts are received even though no one issued an 'ifconfig up' 611 * on the switch interface. Ignore these link state update interrupts 612 */ 613 if (!netif_running(netdev)) 614 return 0; 615 616 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 617 port_priv->ethsw_data->dpsw_handle, 618 port_priv->idx, &state); 619 if (err) { 620 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 621 return err; 622 } 623 624 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 625 626 if (state.up != port_priv->link_state) { 627 if (state.up) { 628 netif_carrier_on(netdev); 629 netif_tx_start_all_queues(netdev); 630 } else { 631 netif_carrier_off(netdev); 632 netif_tx_stop_all_queues(netdev); 633 } 634 port_priv->link_state = state.up; 635 } 636 637 return 0; 638 } 639 640 /* Manage all NAPI instances for the control interface. 641 * 642 * We only have one RX queue and one Tx Conf queue for all 643 * switch ports. Therefore, we only need to enable the NAPI instance once, the 644 * first time one of the switch ports runs .dev_open(). 645 */ 646 647 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) 648 { 649 int i; 650 651 /* Access to the ethsw->napi_users relies on the RTNL lock */ 652 ASSERT_RTNL(); 653 654 /* a new interface is using the NAPI instance */ 655 ethsw->napi_users++; 656 657 /* if there is already a user of the instance, return */ 658 if (ethsw->napi_users > 1) 659 return; 660 661 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 662 napi_enable(ðsw->fq[i].napi); 663 } 664 665 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) 666 { 667 int i; 668 669 /* Access to the ethsw->napi_users relies on the RTNL lock */ 670 ASSERT_RTNL(); 671 672 /* If we are not the last interface using the NAPI, return */ 673 ethsw->napi_users--; 674 if (ethsw->napi_users) 675 return; 676 677 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 678 napi_disable(ðsw->fq[i].napi); 679 } 680 681 static int dpaa2_switch_port_open(struct net_device *netdev) 682 { 683 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 684 struct ethsw_core *ethsw = port_priv->ethsw_data; 685 int err; 686 687 if (!dpaa2_switch_port_is_type_phy(port_priv)) { 688 /* Explicitly set carrier off, otherwise 689 * netif_carrier_ok() will return true and cause 'ip link show' 690 * to report the LOWER_UP flag, even though the link 691 * notification wasn't even received. 692 */ 693 netif_carrier_off(netdev); 694 } 695 696 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, 697 port_priv->ethsw_data->dpsw_handle, 698 port_priv->idx); 699 if (err) { 700 netdev_err(netdev, "dpsw_if_enable err %d\n", err); 701 return err; 702 } 703 704 dpaa2_switch_enable_ctrl_if_napi(ethsw); 705 706 if (dpaa2_switch_port_is_type_phy(port_priv)) 707 phylink_start(port_priv->mac->phylink); 708 709 return 0; 710 } 711 712 static int dpaa2_switch_port_stop(struct net_device *netdev) 713 { 714 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 715 struct ethsw_core *ethsw = port_priv->ethsw_data; 716 int err; 717 718 if (dpaa2_switch_port_is_type_phy(port_priv)) { 719 phylink_stop(port_priv->mac->phylink); 720 } else { 721 netif_tx_stop_all_queues(netdev); 722 netif_carrier_off(netdev); 723 } 724 725 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 726 port_priv->ethsw_data->dpsw_handle, 727 port_priv->idx); 728 if (err) { 729 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 730 return err; 731 } 732 733 dpaa2_switch_disable_ctrl_if_napi(ethsw); 734 735 return 0; 736 } 737 738 static int dpaa2_switch_port_parent_id(struct net_device *dev, 739 struct netdev_phys_item_id *ppid) 740 { 741 struct ethsw_port_priv *port_priv = netdev_priv(dev); 742 743 ppid->id_len = 1; 744 ppid->id[0] = port_priv->ethsw_data->dev_id; 745 746 return 0; 747 } 748 749 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, 750 size_t len) 751 { 752 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 753 int err; 754 755 err = snprintf(name, len, "p%d", port_priv->idx); 756 if (err >= len) 757 return -EINVAL; 758 759 return 0; 760 } 761 762 struct ethsw_dump_ctx { 763 struct net_device *dev; 764 struct sk_buff *skb; 765 struct netlink_callback *cb; 766 int idx; 767 }; 768 769 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, 770 struct ethsw_dump_ctx *dump) 771 { 772 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; 773 u32 portid = NETLINK_CB(dump->cb->skb).portid; 774 u32 seq = dump->cb->nlh->nlmsg_seq; 775 struct nlmsghdr *nlh; 776 struct ndmsg *ndm; 777 778 if (dump->idx < dump->cb->args[2]) 779 goto skip; 780 781 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 782 sizeof(*ndm), NLM_F_MULTI); 783 if (!nlh) 784 return -EMSGSIZE; 785 786 ndm = nlmsg_data(nlh); 787 ndm->ndm_family = AF_BRIDGE; 788 ndm->ndm_pad1 = 0; 789 ndm->ndm_pad2 = 0; 790 ndm->ndm_flags = NTF_SELF; 791 ndm->ndm_type = 0; 792 ndm->ndm_ifindex = dump->dev->ifindex; 793 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; 794 795 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) 796 goto nla_put_failure; 797 798 nlmsg_end(dump->skb, nlh); 799 800 skip: 801 dump->idx++; 802 return 0; 803 804 nla_put_failure: 805 nlmsg_cancel(dump->skb, nlh); 806 return -EMSGSIZE; 807 } 808 809 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, 810 struct ethsw_port_priv *port_priv) 811 { 812 int idx = port_priv->idx; 813 int valid; 814 815 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 816 valid = entry->if_info == port_priv->idx; 817 else 818 valid = entry->if_mask[idx / 8] & BIT(idx % 8); 819 820 return valid; 821 } 822 823 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, 824 dpaa2_switch_fdb_cb_t cb, void *data) 825 { 826 struct net_device *net_dev = port_priv->netdev; 827 struct ethsw_core *ethsw = port_priv->ethsw_data; 828 struct device *dev = net_dev->dev.parent; 829 struct fdb_dump_entry *fdb_entries; 830 struct fdb_dump_entry fdb_entry; 831 dma_addr_t fdb_dump_iova; 832 u16 num_fdb_entries; 833 u32 fdb_dump_size; 834 int err = 0, i; 835 u8 *dma_mem; 836 u16 fdb_id; 837 838 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); 839 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); 840 if (!dma_mem) 841 return -ENOMEM; 842 843 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, 844 DMA_FROM_DEVICE); 845 if (dma_mapping_error(dev, fdb_dump_iova)) { 846 netdev_err(net_dev, "dma_map_single() failed\n"); 847 err = -ENOMEM; 848 goto err_map; 849 } 850 851 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 852 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, 853 fdb_dump_iova, fdb_dump_size, &num_fdb_entries); 854 if (err) { 855 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); 856 goto err_dump; 857 } 858 859 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); 860 861 fdb_entries = (struct fdb_dump_entry *)dma_mem; 862 for (i = 0; i < num_fdb_entries; i++) { 863 fdb_entry = fdb_entries[i]; 864 865 err = cb(port_priv, &fdb_entry, data); 866 if (err) 867 goto end; 868 } 869 870 end: 871 kfree(dma_mem); 872 873 return 0; 874 875 err_dump: 876 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); 877 err_map: 878 kfree(dma_mem); 879 return err; 880 } 881 882 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, 883 struct fdb_dump_entry *fdb_entry, 884 void *data) 885 { 886 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 887 return 0; 888 889 return dpaa2_switch_fdb_dump_nl(fdb_entry, data); 890 } 891 892 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 893 struct net_device *net_dev, 894 struct net_device *filter_dev, int *idx) 895 { 896 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 897 struct ethsw_dump_ctx dump = { 898 .dev = net_dev, 899 .skb = skb, 900 .cb = cb, 901 .idx = *idx, 902 }; 903 int err; 904 905 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); 906 *idx = dump.idx; 907 908 return err; 909 } 910 911 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, 912 struct fdb_dump_entry *fdb_entry, 913 void *data __always_unused) 914 { 915 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 916 return 0; 917 918 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) 919 return 0; 920 921 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 922 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); 923 else 924 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); 925 926 return 0; 927 } 928 929 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) 930 { 931 dpaa2_switch_fdb_iterate(port_priv, 932 dpaa2_switch_fdb_entry_fast_age, NULL); 933 } 934 935 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, 936 u16 vid) 937 { 938 struct switchdev_obj_port_vlan vlan = { 939 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 940 .vid = vid, 941 .obj.orig_dev = netdev, 942 /* This API only allows programming tagged, non-PVID VIDs */ 943 .flags = 0, 944 }; 945 946 return dpaa2_switch_port_vlans_add(netdev, &vlan); 947 } 948 949 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, 950 u16 vid) 951 { 952 struct switchdev_obj_port_vlan vlan = { 953 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 954 .vid = vid, 955 .obj.orig_dev = netdev, 956 /* This API only allows programming tagged, non-PVID VIDs */ 957 .flags = 0, 958 }; 959 960 return dpaa2_switch_port_vlans_del(netdev, &vlan); 961 } 962 963 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) 964 { 965 struct ethsw_core *ethsw = port_priv->ethsw_data; 966 struct net_device *net_dev = port_priv->netdev; 967 struct device *dev = net_dev->dev.parent; 968 u8 mac_addr[ETH_ALEN]; 969 int err; 970 971 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) 972 return 0; 973 974 /* Get firmware address, if any */ 975 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, 976 port_priv->idx, mac_addr); 977 if (err) { 978 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); 979 return err; 980 } 981 982 /* First check if firmware has any address configured by bootloader */ 983 if (!is_zero_ether_addr(mac_addr)) { 984 eth_hw_addr_set(net_dev, mac_addr); 985 } else { 986 /* No MAC address configured, fill in net_dev->dev_addr 987 * with a random one 988 */ 989 eth_hw_addr_random(net_dev); 990 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 991 992 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 993 * practical purposes, this will be our "permanent" mac address, 994 * at least until the next reboot. This move will also permit 995 * register_netdevice() to properly fill up net_dev->perm_addr. 996 */ 997 net_dev->addr_assign_type = NET_ADDR_PERM; 998 } 999 1000 return 0; 1001 } 1002 1003 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, 1004 const struct dpaa2_fd *fd) 1005 { 1006 struct device *dev = ethsw->dev; 1007 unsigned char *buffer_start; 1008 struct sk_buff **skbh, *skb; 1009 dma_addr_t fd_addr; 1010 1011 fd_addr = dpaa2_fd_get_addr(fd); 1012 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); 1013 1014 skb = *skbh; 1015 buffer_start = (unsigned char *)skbh; 1016 1017 dma_unmap_single(dev, fd_addr, 1018 skb_tail_pointer(skb) - buffer_start, 1019 DMA_TO_DEVICE); 1020 1021 /* Move on with skb release */ 1022 dev_kfree_skb(skb); 1023 } 1024 1025 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, 1026 struct sk_buff *skb, 1027 struct dpaa2_fd *fd) 1028 { 1029 struct device *dev = ethsw->dev; 1030 struct sk_buff **skbh; 1031 dma_addr_t addr; 1032 u8 *buff_start; 1033 void *hwa; 1034 1035 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - 1036 DPAA2_SWITCH_TX_BUF_ALIGN, 1037 DPAA2_SWITCH_TX_BUF_ALIGN); 1038 1039 /* Clear FAS to have consistent values for TX confirmation. It is 1040 * located in the first 8 bytes of the buffer's hardware annotation 1041 * area 1042 */ 1043 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; 1044 memset(hwa, 0, 8); 1045 1046 /* Store a backpointer to the skb at the beginning of the buffer 1047 * (in the private data area) such that we can release it 1048 * on Tx confirm 1049 */ 1050 skbh = (struct sk_buff **)buff_start; 1051 *skbh = skb; 1052 1053 addr = dma_map_single(dev, buff_start, 1054 skb_tail_pointer(skb) - buff_start, 1055 DMA_TO_DEVICE); 1056 if (unlikely(dma_mapping_error(dev, addr))) 1057 return -ENOMEM; 1058 1059 /* Setup the FD fields */ 1060 memset(fd, 0, sizeof(*fd)); 1061 1062 dpaa2_fd_set_addr(fd, addr); 1063 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); 1064 dpaa2_fd_set_len(fd, skb->len); 1065 dpaa2_fd_set_format(fd, dpaa2_fd_single); 1066 1067 return 0; 1068 } 1069 1070 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, 1071 struct net_device *net_dev) 1072 { 1073 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 1074 struct ethsw_core *ethsw = port_priv->ethsw_data; 1075 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; 1076 struct dpaa2_fd fd; 1077 int err; 1078 1079 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { 1080 struct sk_buff *ns; 1081 1082 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); 1083 if (unlikely(!ns)) { 1084 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); 1085 goto err_free_skb; 1086 } 1087 dev_consume_skb_any(skb); 1088 skb = ns; 1089 } 1090 1091 /* We'll be holding a back-reference to the skb until Tx confirmation */ 1092 skb = skb_unshare(skb, GFP_ATOMIC); 1093 if (unlikely(!skb)) { 1094 /* skb_unshare() has already freed the skb */ 1095 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); 1096 goto err_exit; 1097 } 1098 1099 /* At this stage, we do not support non-linear skbs so just try to 1100 * linearize the skb and if that's not working, just drop the packet. 1101 */ 1102 err = skb_linearize(skb); 1103 if (err) { 1104 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); 1105 goto err_free_skb; 1106 } 1107 1108 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); 1109 if (unlikely(err)) { 1110 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); 1111 goto err_free_skb; 1112 } 1113 1114 do { 1115 err = dpaa2_io_service_enqueue_qd(NULL, 1116 port_priv->tx_qdid, 1117 8, 0, &fd); 1118 retries--; 1119 } while (err == -EBUSY && retries); 1120 1121 if (unlikely(err < 0)) { 1122 dpaa2_switch_free_fd(ethsw, &fd); 1123 goto err_exit; 1124 } 1125 1126 return NETDEV_TX_OK; 1127 1128 err_free_skb: 1129 dev_kfree_skb(skb); 1130 err_exit: 1131 return NETDEV_TX_OK; 1132 } 1133 1134 static int 1135 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block, 1136 struct flow_cls_offload *f) 1137 { 1138 switch (f->command) { 1139 case FLOW_CLS_REPLACE: 1140 return dpaa2_switch_cls_flower_replace(filter_block, f); 1141 case FLOW_CLS_DESTROY: 1142 return dpaa2_switch_cls_flower_destroy(filter_block, f); 1143 default: 1144 return -EOPNOTSUPP; 1145 } 1146 } 1147 1148 static int 1149 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block, 1150 struct tc_cls_matchall_offload *f) 1151 { 1152 switch (f->command) { 1153 case TC_CLSMATCHALL_REPLACE: 1154 return dpaa2_switch_cls_matchall_replace(block, f); 1155 case TC_CLSMATCHALL_DESTROY: 1156 return dpaa2_switch_cls_matchall_destroy(block, f); 1157 default: 1158 return -EOPNOTSUPP; 1159 } 1160 } 1161 1162 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, 1163 void *type_data, 1164 void *cb_priv) 1165 { 1166 switch (type) { 1167 case TC_SETUP_CLSFLOWER: 1168 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); 1169 case TC_SETUP_CLSMATCHALL: 1170 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); 1171 default: 1172 return -EOPNOTSUPP; 1173 } 1174 } 1175 1176 static LIST_HEAD(dpaa2_switch_block_cb_list); 1177 1178 static int 1179 dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, 1180 struct dpaa2_switch_filter_block *block) 1181 { 1182 struct ethsw_core *ethsw = port_priv->ethsw_data; 1183 struct net_device *netdev = port_priv->netdev; 1184 struct dpsw_acl_if_cfg acl_if_cfg; 1185 int err; 1186 1187 if (port_priv->filter_block) 1188 return -EINVAL; 1189 1190 acl_if_cfg.if_id[0] = port_priv->idx; 1191 acl_if_cfg.num_ifs = 1; 1192 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1193 block->acl_id, &acl_if_cfg); 1194 if (err) { 1195 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1196 return err; 1197 } 1198 1199 block->ports |= BIT(port_priv->idx); 1200 port_priv->filter_block = block; 1201 1202 return 0; 1203 } 1204 1205 static int 1206 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, 1207 struct dpaa2_switch_filter_block *block) 1208 { 1209 struct ethsw_core *ethsw = port_priv->ethsw_data; 1210 struct net_device *netdev = port_priv->netdev; 1211 struct dpsw_acl_if_cfg acl_if_cfg; 1212 int err; 1213 1214 if (port_priv->filter_block != block) 1215 return -EINVAL; 1216 1217 acl_if_cfg.if_id[0] = port_priv->idx; 1218 acl_if_cfg.num_ifs = 1; 1219 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1220 block->acl_id, &acl_if_cfg); 1221 if (err) { 1222 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1223 return err; 1224 } 1225 1226 block->ports &= ~BIT(port_priv->idx); 1227 port_priv->filter_block = NULL; 1228 return 0; 1229 } 1230 1231 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, 1232 struct dpaa2_switch_filter_block *block) 1233 { 1234 struct dpaa2_switch_filter_block *old_block = port_priv->filter_block; 1235 int err; 1236 1237 /* Offload all the mirror entries found in the block on this new port 1238 * joining it. 1239 */ 1240 err = dpaa2_switch_block_offload_mirror(block, port_priv); 1241 if (err) 1242 return err; 1243 1244 /* If the port is already bound to this ACL table then do nothing. This 1245 * can happen when this port is the first one to join a tc block 1246 */ 1247 if (port_priv->filter_block == block) 1248 return 0; 1249 1250 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block); 1251 if (err) 1252 return err; 1253 1254 /* Mark the previous ACL table as being unused if this was the last 1255 * port that was using it. 1256 */ 1257 if (old_block->ports == 0) 1258 old_block->in_use = false; 1259 1260 return dpaa2_switch_port_acl_tbl_bind(port_priv, block); 1261 } 1262 1263 static int 1264 dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, 1265 struct dpaa2_switch_filter_block *block) 1266 { 1267 struct ethsw_core *ethsw = port_priv->ethsw_data; 1268 struct dpaa2_switch_filter_block *new_block; 1269 int err; 1270 1271 /* Unoffload all the mirror entries found in the block from the 1272 * port leaving it. 1273 */ 1274 err = dpaa2_switch_block_unoffload_mirror(block, port_priv); 1275 if (err) 1276 return err; 1277 1278 /* We are the last port that leaves a block (an ACL table). 1279 * We'll continue to use this table. 1280 */ 1281 if (block->ports == BIT(port_priv->idx)) 1282 return 0; 1283 1284 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block); 1285 if (err) 1286 return err; 1287 1288 if (block->ports == 0) 1289 block->in_use = false; 1290 1291 new_block = dpaa2_switch_filter_block_get_unused(ethsw); 1292 new_block->in_use = true; 1293 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block); 1294 } 1295 1296 static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, 1297 struct flow_block_offload *f) 1298 { 1299 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1300 struct ethsw_core *ethsw = port_priv->ethsw_data; 1301 struct dpaa2_switch_filter_block *filter_block; 1302 struct flow_block_cb *block_cb; 1303 bool register_block = false; 1304 int err; 1305 1306 block_cb = flow_block_cb_lookup(f->block, 1307 dpaa2_switch_port_setup_tc_block_cb_ig, 1308 ethsw); 1309 1310 if (!block_cb) { 1311 /* If the filter block is not already known, then this port 1312 * must be the first to join it. In this case, we can just 1313 * continue to use our private table 1314 */ 1315 filter_block = port_priv->filter_block; 1316 1317 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, 1318 ethsw, filter_block, NULL); 1319 if (IS_ERR(block_cb)) 1320 return PTR_ERR(block_cb); 1321 1322 register_block = true; 1323 } else { 1324 filter_block = flow_block_cb_priv(block_cb); 1325 } 1326 1327 flow_block_cb_incref(block_cb); 1328 err = dpaa2_switch_port_block_bind(port_priv, filter_block); 1329 if (err) 1330 goto err_block_bind; 1331 1332 if (register_block) { 1333 flow_block_cb_add(block_cb, f); 1334 list_add_tail(&block_cb->driver_list, 1335 &dpaa2_switch_block_cb_list); 1336 } 1337 1338 return 0; 1339 1340 err_block_bind: 1341 if (!flow_block_cb_decref(block_cb)) 1342 flow_block_cb_free(block_cb); 1343 return err; 1344 } 1345 1346 static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, 1347 struct flow_block_offload *f) 1348 { 1349 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1350 struct ethsw_core *ethsw = port_priv->ethsw_data; 1351 struct dpaa2_switch_filter_block *filter_block; 1352 struct flow_block_cb *block_cb; 1353 int err; 1354 1355 block_cb = flow_block_cb_lookup(f->block, 1356 dpaa2_switch_port_setup_tc_block_cb_ig, 1357 ethsw); 1358 if (!block_cb) 1359 return; 1360 1361 filter_block = flow_block_cb_priv(block_cb); 1362 err = dpaa2_switch_port_block_unbind(port_priv, filter_block); 1363 if (!err && !flow_block_cb_decref(block_cb)) { 1364 flow_block_cb_remove(block_cb, f); 1365 list_del(&block_cb->driver_list); 1366 } 1367 } 1368 1369 static int dpaa2_switch_setup_tc_block(struct net_device *netdev, 1370 struct flow_block_offload *f) 1371 { 1372 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1373 return -EOPNOTSUPP; 1374 1375 f->driver_block_list = &dpaa2_switch_block_cb_list; 1376 1377 switch (f->command) { 1378 case FLOW_BLOCK_BIND: 1379 return dpaa2_switch_setup_tc_block_bind(netdev, f); 1380 case FLOW_BLOCK_UNBIND: 1381 dpaa2_switch_setup_tc_block_unbind(netdev, f); 1382 return 0; 1383 default: 1384 return -EOPNOTSUPP; 1385 } 1386 } 1387 1388 static int dpaa2_switch_port_setup_tc(struct net_device *netdev, 1389 enum tc_setup_type type, 1390 void *type_data) 1391 { 1392 switch (type) { 1393 case TC_SETUP_BLOCK: { 1394 return dpaa2_switch_setup_tc_block(netdev, type_data); 1395 } 1396 default: 1397 return -EOPNOTSUPP; 1398 } 1399 1400 return 0; 1401 } 1402 1403 static const struct net_device_ops dpaa2_switch_port_ops = { 1404 .ndo_open = dpaa2_switch_port_open, 1405 .ndo_stop = dpaa2_switch_port_stop, 1406 1407 .ndo_set_mac_address = eth_mac_addr, 1408 .ndo_get_stats64 = dpaa2_switch_port_get_stats, 1409 .ndo_change_mtu = dpaa2_switch_port_change_mtu, 1410 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, 1411 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, 1412 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, 1413 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, 1414 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, 1415 1416 .ndo_start_xmit = dpaa2_switch_port_tx, 1417 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, 1418 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, 1419 .ndo_setup_tc = dpaa2_switch_port_setup_tc, 1420 }; 1421 1422 bool dpaa2_switch_port_dev_check(const struct net_device *netdev) 1423 { 1424 return netdev->netdev_ops == &dpaa2_switch_port_ops; 1425 } 1426 1427 static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) 1428 { 1429 struct fsl_mc_device *dpsw_port_dev, *dpmac_dev; 1430 struct dpaa2_mac *mac; 1431 int err; 1432 1433 dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent); 1434 dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx); 1435 1436 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) 1437 return PTR_ERR(dpmac_dev); 1438 1439 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 1440 return 0; 1441 1442 mac = kzalloc(sizeof(*mac), GFP_KERNEL); 1443 if (!mac) 1444 return -ENOMEM; 1445 1446 mac->mc_dev = dpmac_dev; 1447 mac->mc_io = port_priv->ethsw_data->mc_io; 1448 mac->net_dev = port_priv->netdev; 1449 1450 err = dpaa2_mac_open(mac); 1451 if (err) 1452 goto err_free_mac; 1453 port_priv->mac = mac; 1454 1455 if (dpaa2_switch_port_is_type_phy(port_priv)) { 1456 err = dpaa2_mac_connect(mac); 1457 if (err) { 1458 netdev_err(port_priv->netdev, 1459 "Error connecting to the MAC endpoint %pe\n", 1460 ERR_PTR(err)); 1461 goto err_close_mac; 1462 } 1463 } 1464 1465 return 0; 1466 1467 err_close_mac: 1468 dpaa2_mac_close(mac); 1469 port_priv->mac = NULL; 1470 err_free_mac: 1471 kfree(mac); 1472 return err; 1473 } 1474 1475 static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv) 1476 { 1477 if (dpaa2_switch_port_is_type_phy(port_priv)) 1478 dpaa2_mac_disconnect(port_priv->mac); 1479 1480 if (!dpaa2_switch_port_has_mac(port_priv)) 1481 return; 1482 1483 dpaa2_mac_close(port_priv->mac); 1484 kfree(port_priv->mac); 1485 port_priv->mac = NULL; 1486 } 1487 1488 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) 1489 { 1490 struct device *dev = (struct device *)arg; 1491 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1492 struct ethsw_port_priv *port_priv; 1493 u32 status = ~0; 1494 int err, if_id; 1495 1496 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1497 DPSW_IRQ_INDEX_IF, &status); 1498 if (err) { 1499 dev_err(dev, "Can't get irq status (err %d)\n", err); 1500 goto out; 1501 } 1502 1503 if_id = (status & 0xFFFF0000) >> 16; 1504 port_priv = ethsw->ports[if_id]; 1505 1506 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) { 1507 dpaa2_switch_port_link_state_update(port_priv->netdev); 1508 dpaa2_switch_port_set_mac_addr(port_priv); 1509 } 1510 1511 if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) { 1512 rtnl_lock(); 1513 if (dpaa2_switch_port_has_mac(port_priv)) 1514 dpaa2_switch_port_disconnect_mac(port_priv); 1515 else 1516 dpaa2_switch_port_connect_mac(port_priv); 1517 rtnl_unlock(); 1518 } 1519 1520 out: 1521 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1522 DPSW_IRQ_INDEX_IF, status); 1523 if (err) 1524 dev_err(dev, "Can't clear irq status (err %d)\n", err); 1525 1526 return IRQ_HANDLED; 1527 } 1528 1529 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) 1530 { 1531 struct device *dev = &sw_dev->dev; 1532 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1533 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; 1534 struct fsl_mc_device_irq *irq; 1535 int err; 1536 1537 err = fsl_mc_allocate_irqs(sw_dev); 1538 if (err) { 1539 dev_err(dev, "MC irqs allocation failed\n"); 1540 return err; 1541 } 1542 1543 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { 1544 err = -EINVAL; 1545 goto free_irq; 1546 } 1547 1548 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1549 DPSW_IRQ_INDEX_IF, 0); 1550 if (err) { 1551 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1552 goto free_irq; 1553 } 1554 1555 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; 1556 1557 err = devm_request_threaded_irq(dev, irq->virq, NULL, 1558 dpaa2_switch_irq0_handler_thread, 1559 IRQF_NO_SUSPEND | IRQF_ONESHOT, 1560 dev_name(dev), dev); 1561 if (err) { 1562 dev_err(dev, "devm_request_threaded_irq(): %d\n", err); 1563 goto free_irq; 1564 } 1565 1566 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, 1567 DPSW_IRQ_INDEX_IF, mask); 1568 if (err) { 1569 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); 1570 goto free_devm_irq; 1571 } 1572 1573 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1574 DPSW_IRQ_INDEX_IF, 1); 1575 if (err) { 1576 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); 1577 goto free_devm_irq; 1578 } 1579 1580 return 0; 1581 1582 free_devm_irq: 1583 devm_free_irq(dev, irq->virq, dev); 1584 free_irq: 1585 fsl_mc_free_irqs(sw_dev); 1586 return err; 1587 } 1588 1589 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) 1590 { 1591 struct device *dev = &sw_dev->dev; 1592 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1593 int err; 1594 1595 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1596 DPSW_IRQ_INDEX_IF, 0); 1597 if (err) 1598 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1599 1600 fsl_mc_free_irqs(sw_dev); 1601 } 1602 1603 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) 1604 { 1605 struct ethsw_core *ethsw = port_priv->ethsw_data; 1606 enum dpsw_learning_mode learn_mode; 1607 int err; 1608 1609 if (enable) 1610 learn_mode = DPSW_LEARNING_MODE_HW; 1611 else 1612 learn_mode = DPSW_LEARNING_MODE_DIS; 1613 1614 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 1615 port_priv->idx, learn_mode); 1616 if (err) 1617 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); 1618 1619 if (!enable) 1620 dpaa2_switch_port_fast_age(port_priv); 1621 1622 return err; 1623 } 1624 1625 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, 1626 u8 state) 1627 { 1628 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1629 int err; 1630 1631 err = dpaa2_switch_port_set_stp_state(port_priv, state); 1632 if (err) 1633 return err; 1634 1635 switch (state) { 1636 case BR_STATE_DISABLED: 1637 case BR_STATE_BLOCKING: 1638 case BR_STATE_LISTENING: 1639 err = dpaa2_switch_port_set_learning(port_priv, false); 1640 break; 1641 case BR_STATE_LEARNING: 1642 case BR_STATE_FORWARDING: 1643 err = dpaa2_switch_port_set_learning(port_priv, 1644 port_priv->learn_ena); 1645 break; 1646 } 1647 1648 return err; 1649 } 1650 1651 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, 1652 struct switchdev_brport_flags flags) 1653 { 1654 struct ethsw_core *ethsw = port_priv->ethsw_data; 1655 1656 if (flags.mask & BR_BCAST_FLOOD) 1657 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); 1658 1659 if (flags.mask & BR_FLOOD) 1660 port_priv->ucast_flood = !!(flags.val & BR_FLOOD); 1661 1662 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1663 } 1664 1665 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, 1666 struct switchdev_brport_flags flags, 1667 struct netlink_ext_ack *extack) 1668 { 1669 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | 1670 BR_MCAST_FLOOD)) 1671 return -EINVAL; 1672 1673 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { 1674 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 1675 bool unicast = !!(flags.val & BR_FLOOD); 1676 1677 if (unicast != multicast) { 1678 NL_SET_ERR_MSG_MOD(extack, 1679 "Cannot configure multicast flooding independently of unicast"); 1680 return -EINVAL; 1681 } 1682 } 1683 1684 return 0; 1685 } 1686 1687 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, 1688 struct switchdev_brport_flags flags, 1689 struct netlink_ext_ack *extack) 1690 { 1691 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1692 int err; 1693 1694 if (flags.mask & BR_LEARNING) { 1695 bool learn_ena = !!(flags.val & BR_LEARNING); 1696 1697 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1698 if (err) 1699 return err; 1700 port_priv->learn_ena = learn_ena; 1701 } 1702 1703 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { 1704 err = dpaa2_switch_port_flood(port_priv, flags); 1705 if (err) 1706 return err; 1707 } 1708 1709 return 0; 1710 } 1711 1712 static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx, 1713 const struct switchdev_attr *attr, 1714 struct netlink_ext_ack *extack) 1715 { 1716 int err = 0; 1717 1718 switch (attr->id) { 1719 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 1720 err = dpaa2_switch_port_attr_stp_state_set(netdev, 1721 attr->u.stp_state); 1722 break; 1723 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 1724 if (!attr->u.vlan_filtering) { 1725 NL_SET_ERR_MSG_MOD(extack, 1726 "The DPAA2 switch does not support VLAN-unaware operation"); 1727 return -EOPNOTSUPP; 1728 } 1729 break; 1730 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 1731 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); 1732 break; 1733 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 1734 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); 1735 break; 1736 default: 1737 err = -EOPNOTSUPP; 1738 break; 1739 } 1740 1741 return err; 1742 } 1743 1744 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 1745 const struct switchdev_obj_port_vlan *vlan) 1746 { 1747 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1748 struct ethsw_core *ethsw = port_priv->ethsw_data; 1749 struct dpsw_attr *attr = ðsw->sw_attr; 1750 int err = 0; 1751 1752 /* Make sure that the VLAN is not already configured 1753 * on the switch port 1754 */ 1755 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) 1756 return -EEXIST; 1757 1758 /* Check if there is space for a new VLAN */ 1759 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1760 ðsw->sw_attr); 1761 if (err) { 1762 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1763 return err; 1764 } 1765 if (attr->max_vlans - attr->num_vlans < 1) 1766 return -ENOSPC; 1767 1768 /* Check if there is space for a new VLAN */ 1769 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1770 ðsw->sw_attr); 1771 if (err) { 1772 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1773 return err; 1774 } 1775 if (attr->max_vlans - attr->num_vlans < 1) 1776 return -ENOSPC; 1777 1778 if (!port_priv->ethsw_data->vlans[vlan->vid]) { 1779 /* this is a new VLAN */ 1780 err = dpaa2_switch_add_vlan(port_priv, vlan->vid); 1781 if (err) 1782 return err; 1783 1784 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; 1785 } 1786 1787 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); 1788 } 1789 1790 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, 1791 const unsigned char *addr) 1792 { 1793 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; 1794 struct netdev_hw_addr *ha; 1795 1796 netif_addr_lock_bh(netdev); 1797 list_for_each_entry(ha, &list->list, list) { 1798 if (ether_addr_equal(ha->addr, addr)) { 1799 netif_addr_unlock_bh(netdev); 1800 return 1; 1801 } 1802 } 1803 netif_addr_unlock_bh(netdev); 1804 return 0; 1805 } 1806 1807 static int dpaa2_switch_port_mdb_add(struct net_device *netdev, 1808 const struct switchdev_obj_port_mdb *mdb) 1809 { 1810 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1811 int err; 1812 1813 /* Check if address is already set on this port */ 1814 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1815 return -EEXIST; 1816 1817 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); 1818 if (err) 1819 return err; 1820 1821 err = dev_mc_add(netdev, mdb->addr); 1822 if (err) { 1823 netdev_err(netdev, "dev_mc_add err %d\n", err); 1824 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1825 } 1826 1827 return err; 1828 } 1829 1830 static int dpaa2_switch_port_obj_add(struct net_device *netdev, 1831 const struct switchdev_obj *obj) 1832 { 1833 int err; 1834 1835 switch (obj->id) { 1836 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1837 err = dpaa2_switch_port_vlans_add(netdev, 1838 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1839 break; 1840 case SWITCHDEV_OBJ_ID_PORT_MDB: 1841 err = dpaa2_switch_port_mdb_add(netdev, 1842 SWITCHDEV_OBJ_PORT_MDB(obj)); 1843 break; 1844 default: 1845 err = -EOPNOTSUPP; 1846 break; 1847 } 1848 1849 return err; 1850 } 1851 1852 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) 1853 { 1854 struct ethsw_core *ethsw = port_priv->ethsw_data; 1855 struct net_device *netdev = port_priv->netdev; 1856 struct dpsw_vlan_if_cfg vcfg; 1857 int i, err; 1858 1859 if (!port_priv->vlans[vid]) 1860 return -ENOENT; 1861 1862 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { 1863 /* If we are deleting the PVID of a port, use VLAN 4095 instead 1864 * as we are sure that neither the bridge nor the 8021q module 1865 * will use it 1866 */ 1867 err = dpaa2_switch_port_set_pvid(port_priv, 4095); 1868 if (err) 1869 return err; 1870 } 1871 1872 vcfg.num_ifs = 1; 1873 vcfg.if_id[0] = port_priv->idx; 1874 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { 1875 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, 1876 ethsw->dpsw_handle, 1877 vid, &vcfg); 1878 if (err) { 1879 netdev_err(netdev, 1880 "dpsw_vlan_remove_if_untagged err %d\n", 1881 err); 1882 } 1883 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; 1884 } 1885 1886 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 1887 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1888 vid, &vcfg); 1889 if (err) { 1890 netdev_err(netdev, 1891 "dpsw_vlan_remove_if err %d\n", err); 1892 return err; 1893 } 1894 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; 1895 1896 /* Delete VLAN from switch if it is no longer configured on 1897 * any port 1898 */ 1899 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 1900 if (ethsw->ports[i] && 1901 ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) 1902 return 0; /* Found a port member in VID */ 1903 } 1904 1905 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; 1906 1907 err = dpaa2_switch_dellink(ethsw, vid); 1908 if (err) 1909 return err; 1910 } 1911 1912 return 0; 1913 } 1914 1915 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 1916 const struct switchdev_obj_port_vlan *vlan) 1917 { 1918 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1919 1920 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1921 return -EOPNOTSUPP; 1922 1923 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); 1924 } 1925 1926 static int dpaa2_switch_port_mdb_del(struct net_device *netdev, 1927 const struct switchdev_obj_port_mdb *mdb) 1928 { 1929 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1930 int err; 1931 1932 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1933 return -ENOENT; 1934 1935 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1936 if (err) 1937 return err; 1938 1939 err = dev_mc_del(netdev, mdb->addr); 1940 if (err) { 1941 netdev_err(netdev, "dev_mc_del err %d\n", err); 1942 return err; 1943 } 1944 1945 return err; 1946 } 1947 1948 static int dpaa2_switch_port_obj_del(struct net_device *netdev, 1949 const struct switchdev_obj *obj) 1950 { 1951 int err; 1952 1953 switch (obj->id) { 1954 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1955 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); 1956 break; 1957 case SWITCHDEV_OBJ_ID_PORT_MDB: 1958 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); 1959 break; 1960 default: 1961 err = -EOPNOTSUPP; 1962 break; 1963 } 1964 return err; 1965 } 1966 1967 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, 1968 struct switchdev_notifier_port_attr_info *ptr) 1969 { 1970 int err; 1971 1972 err = switchdev_handle_port_attr_set(netdev, ptr, 1973 dpaa2_switch_port_dev_check, 1974 dpaa2_switch_port_attr_set); 1975 return notifier_from_errno(err); 1976 } 1977 1978 static struct notifier_block dpaa2_switch_port_switchdev_nb; 1979 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; 1980 1981 static int dpaa2_switch_port_bridge_join(struct net_device *netdev, 1982 struct net_device *upper_dev, 1983 struct netlink_ext_ack *extack) 1984 { 1985 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1986 struct ethsw_core *ethsw = port_priv->ethsw_data; 1987 struct ethsw_port_priv *other_port_priv; 1988 struct net_device *other_dev; 1989 struct list_head *iter; 1990 bool learn_ena; 1991 int err; 1992 1993 netdev_for_each_lower_dev(upper_dev, other_dev, iter) { 1994 if (!dpaa2_switch_port_dev_check(other_dev)) 1995 continue; 1996 1997 other_port_priv = netdev_priv(other_dev); 1998 if (other_port_priv->ethsw_data != port_priv->ethsw_data) { 1999 NL_SET_ERR_MSG_MOD(extack, 2000 "Interface from a different DPSW is in the bridge already"); 2001 return -EINVAL; 2002 } 2003 } 2004 2005 /* Delete the previously manually installed VLAN 1 */ 2006 err = dpaa2_switch_port_del_vlan(port_priv, 1); 2007 if (err) 2008 return err; 2009 2010 dpaa2_switch_port_set_fdb(port_priv, upper_dev); 2011 2012 /* Inherit the initial bridge port learning state */ 2013 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); 2014 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 2015 port_priv->learn_ena = learn_ena; 2016 2017 /* Setup the egress flood policy (broadcast, unknown unicast) */ 2018 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2019 if (err) 2020 goto err_egress_flood; 2021 2022 err = switchdev_bridge_port_offload(netdev, netdev, NULL, 2023 &dpaa2_switch_port_switchdev_nb, 2024 &dpaa2_switch_port_switchdev_blocking_nb, 2025 false, extack); 2026 if (err) 2027 goto err_switchdev_offload; 2028 2029 return 0; 2030 2031 err_switchdev_offload: 2032 err_egress_flood: 2033 dpaa2_switch_port_set_fdb(port_priv, NULL); 2034 return err; 2035 } 2036 2037 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) 2038 { 2039 __be16 vlan_proto = htons(ETH_P_8021Q); 2040 2041 if (vdev) 2042 vlan_proto = vlan_dev_vlan_proto(vdev); 2043 2044 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); 2045 } 2046 2047 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) 2048 { 2049 __be16 vlan_proto = htons(ETH_P_8021Q); 2050 2051 if (vdev) 2052 vlan_proto = vlan_dev_vlan_proto(vdev); 2053 2054 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); 2055 } 2056 2057 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) 2058 { 2059 switchdev_bridge_port_unoffload(netdev, NULL, 2060 &dpaa2_switch_port_switchdev_nb, 2061 &dpaa2_switch_port_switchdev_blocking_nb); 2062 } 2063 2064 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) 2065 { 2066 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 2067 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 2068 struct ethsw_core *ethsw = port_priv->ethsw_data; 2069 int err; 2070 2071 /* First of all, fast age any learn FDB addresses on this switch port */ 2072 dpaa2_switch_port_fast_age(port_priv); 2073 2074 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN 2075 * upper devices or otherwise from the FDB table that we are about to 2076 * leave 2077 */ 2078 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); 2079 if (err) 2080 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); 2081 2082 dpaa2_switch_port_set_fdb(port_priv, NULL); 2083 2084 /* Restore all RX VLANs into the new FDB table that we just joined */ 2085 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); 2086 if (err) 2087 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); 2088 2089 /* Reset the flooding state to denote that this port can send any 2090 * packet in standalone mode. With this, we are also ensuring that any 2091 * later bridge join will have the flooding flag on. 2092 */ 2093 port_priv->bcast_flood = true; 2094 port_priv->ucast_flood = true; 2095 2096 /* Setup the egress flood policy (broadcast, unknown unicast). 2097 * When the port is not under a bridge, only the CTRL interface is part 2098 * of the flooding domain besides the actual port 2099 */ 2100 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2101 if (err) 2102 return err; 2103 2104 /* Recreate the egress flood domain of the FDB that we just left */ 2105 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 2106 if (err) 2107 return err; 2108 2109 /* No HW learning when not under a bridge */ 2110 err = dpaa2_switch_port_set_learning(port_priv, false); 2111 if (err) 2112 return err; 2113 port_priv->learn_ena = false; 2114 2115 /* Add the VLAN 1 as PVID when not under a bridge. We need this since 2116 * the dpaa2 switch interfaces are not capable to be VLAN unaware 2117 */ 2118 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, 2119 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); 2120 } 2121 2122 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) 2123 { 2124 struct net_device *upper_dev; 2125 struct list_head *iter; 2126 2127 /* RCU read lock not necessary because we have write-side protection 2128 * (rtnl_mutex), however a non-rcu iterator does not exist. 2129 */ 2130 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) 2131 if (is_vlan_dev(upper_dev)) 2132 return -EOPNOTSUPP; 2133 2134 return 0; 2135 } 2136 2137 static int 2138 dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev, 2139 struct net_device *upper_dev, 2140 struct netlink_ext_ack *extack) 2141 { 2142 int err; 2143 2144 if (!br_vlan_enabled(upper_dev)) { 2145 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); 2146 return -EOPNOTSUPP; 2147 } 2148 2149 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); 2150 if (err) { 2151 NL_SET_ERR_MSG_MOD(extack, 2152 "Cannot join a bridge while VLAN uppers are present"); 2153 return 0; 2154 } 2155 2156 return 0; 2157 } 2158 2159 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, 2160 unsigned long event, void *ptr) 2161 { 2162 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 2163 struct netdev_notifier_changeupper_info *info = ptr; 2164 struct netlink_ext_ack *extack; 2165 struct net_device *upper_dev; 2166 int err = 0; 2167 2168 if (!dpaa2_switch_port_dev_check(netdev)) 2169 return NOTIFY_DONE; 2170 2171 extack = netdev_notifier_info_to_extack(&info->info); 2172 2173 switch (event) { 2174 case NETDEV_PRECHANGEUPPER: 2175 upper_dev = info->upper_dev; 2176 if (!netif_is_bridge_master(upper_dev)) 2177 break; 2178 2179 err = dpaa2_switch_prechangeupper_sanity_checks(netdev, 2180 upper_dev, 2181 extack); 2182 if (err) 2183 goto out; 2184 2185 if (!info->linking) 2186 dpaa2_switch_port_pre_bridge_leave(netdev); 2187 2188 break; 2189 case NETDEV_CHANGEUPPER: 2190 upper_dev = info->upper_dev; 2191 if (netif_is_bridge_master(upper_dev)) { 2192 if (info->linking) 2193 err = dpaa2_switch_port_bridge_join(netdev, 2194 upper_dev, 2195 extack); 2196 else 2197 err = dpaa2_switch_port_bridge_leave(netdev); 2198 } 2199 break; 2200 } 2201 2202 out: 2203 return notifier_from_errno(err); 2204 } 2205 2206 struct ethsw_switchdev_event_work { 2207 struct work_struct work; 2208 struct switchdev_notifier_fdb_info fdb_info; 2209 struct net_device *dev; 2210 unsigned long event; 2211 }; 2212 2213 static void dpaa2_switch_event_work(struct work_struct *work) 2214 { 2215 struct ethsw_switchdev_event_work *switchdev_work = 2216 container_of(work, struct ethsw_switchdev_event_work, work); 2217 struct net_device *dev = switchdev_work->dev; 2218 struct switchdev_notifier_fdb_info *fdb_info; 2219 int err; 2220 2221 rtnl_lock(); 2222 fdb_info = &switchdev_work->fdb_info; 2223 2224 switch (switchdev_work->event) { 2225 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2226 if (!fdb_info->added_by_user || fdb_info->is_local) 2227 break; 2228 if (is_unicast_ether_addr(fdb_info->addr)) 2229 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), 2230 fdb_info->addr); 2231 else 2232 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), 2233 fdb_info->addr); 2234 if (err) 2235 break; 2236 fdb_info->offloaded = true; 2237 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 2238 &fdb_info->info, NULL); 2239 break; 2240 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2241 if (!fdb_info->added_by_user || fdb_info->is_local) 2242 break; 2243 if (is_unicast_ether_addr(fdb_info->addr)) 2244 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); 2245 else 2246 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); 2247 break; 2248 } 2249 2250 rtnl_unlock(); 2251 kfree(switchdev_work->fdb_info.addr); 2252 kfree(switchdev_work); 2253 dev_put(dev); 2254 } 2255 2256 /* Called under rcu_read_lock() */ 2257 static int dpaa2_switch_port_event(struct notifier_block *nb, 2258 unsigned long event, void *ptr) 2259 { 2260 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2261 struct ethsw_port_priv *port_priv = netdev_priv(dev); 2262 struct ethsw_switchdev_event_work *switchdev_work; 2263 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2264 struct ethsw_core *ethsw = port_priv->ethsw_data; 2265 2266 if (event == SWITCHDEV_PORT_ATTR_SET) 2267 return dpaa2_switch_port_attr_set_event(dev, ptr); 2268 2269 if (!dpaa2_switch_port_dev_check(dev)) 2270 return NOTIFY_DONE; 2271 2272 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2273 if (!switchdev_work) 2274 return NOTIFY_BAD; 2275 2276 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); 2277 switchdev_work->dev = dev; 2278 switchdev_work->event = event; 2279 2280 switch (event) { 2281 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2282 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2283 memcpy(&switchdev_work->fdb_info, ptr, 2284 sizeof(switchdev_work->fdb_info)); 2285 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2286 if (!switchdev_work->fdb_info.addr) 2287 goto err_addr_alloc; 2288 2289 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2290 fdb_info->addr); 2291 2292 /* Take a reference on the device to avoid being freed. */ 2293 dev_hold(dev); 2294 break; 2295 default: 2296 kfree(switchdev_work); 2297 return NOTIFY_DONE; 2298 } 2299 2300 queue_work(ethsw->workqueue, &switchdev_work->work); 2301 2302 return NOTIFY_DONE; 2303 2304 err_addr_alloc: 2305 kfree(switchdev_work); 2306 return NOTIFY_BAD; 2307 } 2308 2309 static int dpaa2_switch_port_obj_event(unsigned long event, 2310 struct net_device *netdev, 2311 struct switchdev_notifier_port_obj_info *port_obj_info) 2312 { 2313 int err = -EOPNOTSUPP; 2314 2315 if (!dpaa2_switch_port_dev_check(netdev)) 2316 return NOTIFY_DONE; 2317 2318 switch (event) { 2319 case SWITCHDEV_PORT_OBJ_ADD: 2320 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); 2321 break; 2322 case SWITCHDEV_PORT_OBJ_DEL: 2323 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); 2324 break; 2325 } 2326 2327 port_obj_info->handled = true; 2328 return notifier_from_errno(err); 2329 } 2330 2331 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, 2332 unsigned long event, void *ptr) 2333 { 2334 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2335 2336 switch (event) { 2337 case SWITCHDEV_PORT_OBJ_ADD: 2338 case SWITCHDEV_PORT_OBJ_DEL: 2339 return dpaa2_switch_port_obj_event(event, dev, ptr); 2340 case SWITCHDEV_PORT_ATTR_SET: 2341 return dpaa2_switch_port_attr_set_event(dev, ptr); 2342 } 2343 2344 return NOTIFY_DONE; 2345 } 2346 2347 /* Build a linear skb based on a single-buffer frame descriptor */ 2348 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, 2349 const struct dpaa2_fd *fd) 2350 { 2351 u16 fd_offset = dpaa2_fd_get_offset(fd); 2352 dma_addr_t addr = dpaa2_fd_get_addr(fd); 2353 u32 fd_length = dpaa2_fd_get_len(fd); 2354 struct device *dev = ethsw->dev; 2355 struct sk_buff *skb = NULL; 2356 void *fd_vaddr; 2357 2358 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); 2359 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, 2360 DMA_FROM_DEVICE); 2361 2362 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + 2363 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 2364 if (unlikely(!skb)) { 2365 dev_err(dev, "build_skb() failed\n"); 2366 return NULL; 2367 } 2368 2369 skb_reserve(skb, fd_offset); 2370 skb_put(skb, fd_length); 2371 2372 ethsw->buf_count--; 2373 2374 return skb; 2375 } 2376 2377 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, 2378 const struct dpaa2_fd *fd) 2379 { 2380 dpaa2_switch_free_fd(fq->ethsw, fd); 2381 } 2382 2383 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, 2384 const struct dpaa2_fd *fd) 2385 { 2386 struct ethsw_core *ethsw = fq->ethsw; 2387 struct ethsw_port_priv *port_priv; 2388 struct net_device *netdev; 2389 struct vlan_ethhdr *hdr; 2390 struct sk_buff *skb; 2391 u16 vlan_tci, vid; 2392 int if_id, err; 2393 2394 /* get switch ingress interface ID */ 2395 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; 2396 2397 if (if_id >= ethsw->sw_attr.num_ifs) { 2398 dev_err(ethsw->dev, "Frame received from unknown interface!\n"); 2399 goto err_free_fd; 2400 } 2401 port_priv = ethsw->ports[if_id]; 2402 netdev = port_priv->netdev; 2403 2404 /* build the SKB based on the FD received */ 2405 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { 2406 if (net_ratelimit()) { 2407 netdev_err(netdev, "Received invalid frame format\n"); 2408 goto err_free_fd; 2409 } 2410 } 2411 2412 skb = dpaa2_switch_build_linear_skb(ethsw, fd); 2413 if (unlikely(!skb)) 2414 goto err_free_fd; 2415 2416 skb_reset_mac_header(skb); 2417 2418 /* Remove the VLAN header if the packet that we just received has a vid 2419 * equal to the port PVIDs. Since the dpaa2-switch can operate only in 2420 * VLAN-aware mode and no alterations are made on the packet when it's 2421 * redirected/mirrored to the control interface, we are sure that there 2422 * will always be a VLAN header present. 2423 */ 2424 hdr = vlan_eth_hdr(skb); 2425 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; 2426 if (vid == port_priv->pvid) { 2427 err = __skb_vlan_pop(skb, &vlan_tci); 2428 if (err) { 2429 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); 2430 goto err_free_fd; 2431 } 2432 } 2433 2434 skb->dev = netdev; 2435 skb->protocol = eth_type_trans(skb, skb->dev); 2436 2437 /* Setup the offload_fwd_mark only if the port is under a bridge */ 2438 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); 2439 2440 netif_receive_skb(skb); 2441 2442 return; 2443 2444 err_free_fd: 2445 dpaa2_switch_free_fd(ethsw, fd); 2446 } 2447 2448 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) 2449 { 2450 ethsw->features = 0; 2451 2452 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) 2453 ethsw->features |= ETHSW_FEATURE_MAC_ADDR; 2454 } 2455 2456 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) 2457 { 2458 struct dpsw_ctrl_if_attr ctrl_if_attr; 2459 struct device *dev = ethsw->dev; 2460 int i = 0; 2461 int err; 2462 2463 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2464 &ctrl_if_attr); 2465 if (err) { 2466 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); 2467 return err; 2468 } 2469 2470 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; 2471 ethsw->fq[i].ethsw = ethsw; 2472 ethsw->fq[i++].type = DPSW_QUEUE_RX; 2473 2474 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; 2475 ethsw->fq[i].ethsw = ethsw; 2476 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; 2477 2478 return 0; 2479 } 2480 2481 /* Free buffers acquired from the buffer pool or which were meant to 2482 * be released in the pool 2483 */ 2484 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) 2485 { 2486 struct device *dev = ethsw->dev; 2487 void *vaddr; 2488 int i; 2489 2490 for (i = 0; i < count; i++) { 2491 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); 2492 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, 2493 DMA_FROM_DEVICE); 2494 free_pages((unsigned long)vaddr, 0); 2495 } 2496 } 2497 2498 /* Perform a single release command to add buffers 2499 * to the specified buffer pool 2500 */ 2501 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) 2502 { 2503 struct device *dev = ethsw->dev; 2504 u64 buf_array[BUFS_PER_CMD]; 2505 struct page *page; 2506 int retries = 0; 2507 dma_addr_t addr; 2508 int err; 2509 int i; 2510 2511 for (i = 0; i < BUFS_PER_CMD; i++) { 2512 /* Allocate one page for each Rx buffer. WRIOP sees 2513 * the entire page except for a tailroom reserved for 2514 * skb shared info 2515 */ 2516 page = dev_alloc_pages(0); 2517 if (!page) { 2518 dev_err(dev, "buffer allocation failed\n"); 2519 goto err_alloc; 2520 } 2521 2522 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, 2523 DMA_FROM_DEVICE); 2524 if (dma_mapping_error(dev, addr)) { 2525 dev_err(dev, "dma_map_single() failed\n"); 2526 goto err_map; 2527 } 2528 buf_array[i] = addr; 2529 } 2530 2531 release_bufs: 2532 /* In case the portal is busy, retry until successful or 2533 * max retries hit. 2534 */ 2535 while ((err = dpaa2_io_service_release(NULL, bpid, 2536 buf_array, i)) == -EBUSY) { 2537 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) 2538 break; 2539 2540 cpu_relax(); 2541 } 2542 2543 /* If release command failed, clean up and bail out. */ 2544 if (err) { 2545 dpaa2_switch_free_bufs(ethsw, buf_array, i); 2546 return 0; 2547 } 2548 2549 return i; 2550 2551 err_map: 2552 __free_pages(page, 0); 2553 err_alloc: 2554 /* If we managed to allocate at least some buffers, 2555 * release them to hardware 2556 */ 2557 if (i) 2558 goto release_bufs; 2559 2560 return 0; 2561 } 2562 2563 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) 2564 { 2565 int *count = ðsw->buf_count; 2566 int new_count; 2567 int err = 0; 2568 2569 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { 2570 do { 2571 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2572 if (unlikely(!new_count)) { 2573 /* Out of memory; abort for now, we'll 2574 * try later on 2575 */ 2576 break; 2577 } 2578 *count += new_count; 2579 } while (*count < DPAA2_ETHSW_NUM_BUFS); 2580 2581 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) 2582 err = -ENOMEM; 2583 } 2584 2585 return err; 2586 } 2587 2588 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) 2589 { 2590 int *count, i; 2591 2592 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { 2593 count = ðsw->buf_count; 2594 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2595 2596 if (unlikely(*count < BUFS_PER_CMD)) 2597 return -ENOMEM; 2598 } 2599 2600 return 0; 2601 } 2602 2603 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) 2604 { 2605 u64 buf_array[BUFS_PER_CMD]; 2606 int ret; 2607 2608 do { 2609 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, 2610 buf_array, BUFS_PER_CMD); 2611 if (ret < 0) { 2612 dev_err(ethsw->dev, 2613 "dpaa2_io_service_acquire() = %d\n", ret); 2614 return; 2615 } 2616 dpaa2_switch_free_bufs(ethsw, buf_array, ret); 2617 2618 } while (ret); 2619 } 2620 2621 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) 2622 { 2623 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; 2624 struct device *dev = ethsw->dev; 2625 struct fsl_mc_device *dpbp_dev; 2626 struct dpbp_attr dpbp_attrs; 2627 int err; 2628 2629 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2630 &dpbp_dev); 2631 if (err) { 2632 if (err == -ENXIO) 2633 err = -EPROBE_DEFER; 2634 else 2635 dev_err(dev, "DPBP device allocation failed\n"); 2636 return err; 2637 } 2638 ethsw->dpbp_dev = dpbp_dev; 2639 2640 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, 2641 &dpbp_dev->mc_handle); 2642 if (err) { 2643 dev_err(dev, "dpbp_open() failed\n"); 2644 goto err_open; 2645 } 2646 2647 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2648 if (err) { 2649 dev_err(dev, "dpbp_reset() failed\n"); 2650 goto err_reset; 2651 } 2652 2653 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2654 if (err) { 2655 dev_err(dev, "dpbp_enable() failed\n"); 2656 goto err_enable; 2657 } 2658 2659 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, 2660 &dpbp_attrs); 2661 if (err) { 2662 dev_err(dev, "dpbp_get_attributes() failed\n"); 2663 goto err_get_attr; 2664 } 2665 2666 dpsw_ctrl_if_pools_cfg.num_dpbp = 1; 2667 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; 2668 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; 2669 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; 2670 2671 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, 2672 &dpsw_ctrl_if_pools_cfg); 2673 if (err) { 2674 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2675 goto err_get_attr; 2676 } 2677 ethsw->bpid = dpbp_attrs.id; 2678 2679 return 0; 2680 2681 err_get_attr: 2682 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2683 err_enable: 2684 err_reset: 2685 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2686 err_open: 2687 fsl_mc_object_free(dpbp_dev); 2688 return err; 2689 } 2690 2691 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) 2692 { 2693 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2694 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2695 fsl_mc_object_free(ethsw->dpbp_dev); 2696 } 2697 2698 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) 2699 { 2700 int i; 2701 2702 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2703 ethsw->fq[i].store = 2704 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, 2705 ethsw->dev); 2706 if (!ethsw->fq[i].store) { 2707 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); 2708 while (--i >= 0) 2709 dpaa2_io_store_destroy(ethsw->fq[i].store); 2710 return -ENOMEM; 2711 } 2712 } 2713 2714 return 0; 2715 } 2716 2717 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) 2718 { 2719 int i; 2720 2721 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2722 dpaa2_io_store_destroy(ethsw->fq[i].store); 2723 } 2724 2725 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) 2726 { 2727 int err, retries = 0; 2728 2729 /* Try to pull from the FQ while the portal is busy and we didn't hit 2730 * the maximum number fo retries 2731 */ 2732 do { 2733 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); 2734 cpu_relax(); 2735 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2736 2737 if (unlikely(err)) 2738 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); 2739 2740 return err; 2741 } 2742 2743 /* Consume all frames pull-dequeued into the store */ 2744 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) 2745 { 2746 struct ethsw_core *ethsw = fq->ethsw; 2747 int cleaned = 0, is_last; 2748 struct dpaa2_dq *dq; 2749 int retries = 0; 2750 2751 do { 2752 /* Get the next available FD from the store */ 2753 dq = dpaa2_io_store_next(fq->store, &is_last); 2754 if (unlikely(!dq)) { 2755 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { 2756 dev_err_once(ethsw->dev, 2757 "No valid dequeue response\n"); 2758 return -ETIMEDOUT; 2759 } 2760 continue; 2761 } 2762 2763 if (fq->type == DPSW_QUEUE_RX) 2764 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); 2765 else 2766 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); 2767 cleaned++; 2768 2769 } while (!is_last); 2770 2771 return cleaned; 2772 } 2773 2774 /* NAPI poll routine */ 2775 static int dpaa2_switch_poll(struct napi_struct *napi, int budget) 2776 { 2777 int err, cleaned = 0, store_cleaned, work_done; 2778 struct dpaa2_switch_fq *fq; 2779 int retries = 0; 2780 2781 fq = container_of(napi, struct dpaa2_switch_fq, napi); 2782 2783 do { 2784 err = dpaa2_switch_pull_fq(fq); 2785 if (unlikely(err)) 2786 break; 2787 2788 /* Refill pool if appropriate */ 2789 dpaa2_switch_refill_bp(fq->ethsw); 2790 2791 store_cleaned = dpaa2_switch_store_consume(fq); 2792 cleaned += store_cleaned; 2793 2794 if (cleaned >= budget) { 2795 work_done = budget; 2796 goto out; 2797 } 2798 2799 } while (store_cleaned); 2800 2801 /* We didn't consume the entire budget, so finish napi and re-enable 2802 * data availability notifications 2803 */ 2804 napi_complete_done(napi, cleaned); 2805 do { 2806 err = dpaa2_io_service_rearm(NULL, &fq->nctx); 2807 cpu_relax(); 2808 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2809 2810 work_done = max(cleaned, 1); 2811 out: 2812 2813 return work_done; 2814 } 2815 2816 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) 2817 { 2818 struct dpaa2_switch_fq *fq; 2819 2820 fq = container_of(nctx, struct dpaa2_switch_fq, nctx); 2821 2822 napi_schedule(&fq->napi); 2823 } 2824 2825 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) 2826 { 2827 struct dpsw_ctrl_if_queue_cfg queue_cfg; 2828 struct dpaa2_io_notification_ctx *nctx; 2829 int err, i, j; 2830 2831 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2832 nctx = ðsw->fq[i].nctx; 2833 2834 /* Register a new software context for the FQID. 2835 * By using NULL as the first parameter, we specify that we do 2836 * not care on which cpu are interrupts received for this queue 2837 */ 2838 nctx->is_cdan = 0; 2839 nctx->id = ethsw->fq[i].fqid; 2840 nctx->desired_cpu = DPAA2_IO_ANY_CPU; 2841 nctx->cb = dpaa2_switch_fqdan_cb; 2842 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); 2843 if (err) { 2844 err = -EPROBE_DEFER; 2845 goto err_register; 2846 } 2847 2848 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | 2849 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; 2850 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; 2851 queue_cfg.dest_cfg.dest_id = nctx->dpio_id; 2852 queue_cfg.dest_cfg.priority = 0; 2853 queue_cfg.user_ctx = nctx->qman64; 2854 2855 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, 2856 ethsw->dpsw_handle, 2857 ethsw->fq[i].type, 2858 &queue_cfg); 2859 if (err) 2860 goto err_set_queue; 2861 } 2862 2863 return 0; 2864 2865 err_set_queue: 2866 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); 2867 err_register: 2868 for (j = 0; j < i; j++) 2869 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, 2870 ethsw->dev); 2871 2872 return err; 2873 } 2874 2875 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) 2876 { 2877 int i; 2878 2879 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2880 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, 2881 ethsw->dev); 2882 } 2883 2884 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) 2885 { 2886 int err; 2887 2888 /* setup FQs for Rx and Tx Conf */ 2889 err = dpaa2_switch_setup_fqs(ethsw); 2890 if (err) 2891 return err; 2892 2893 /* setup the buffer pool needed on the Rx path */ 2894 err = dpaa2_switch_setup_dpbp(ethsw); 2895 if (err) 2896 return err; 2897 2898 err = dpaa2_switch_alloc_rings(ethsw); 2899 if (err) 2900 goto err_free_dpbp; 2901 2902 err = dpaa2_switch_setup_dpio(ethsw); 2903 if (err) 2904 goto err_destroy_rings; 2905 2906 err = dpaa2_switch_seed_bp(ethsw); 2907 if (err) 2908 goto err_deregister_dpio; 2909 2910 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2911 if (err) { 2912 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); 2913 goto err_drain_dpbp; 2914 } 2915 2916 return 0; 2917 2918 err_drain_dpbp: 2919 dpaa2_switch_drain_bp(ethsw); 2920 err_deregister_dpio: 2921 dpaa2_switch_free_dpio(ethsw); 2922 err_destroy_rings: 2923 dpaa2_switch_destroy_rings(ethsw); 2924 err_free_dpbp: 2925 dpaa2_switch_free_dpbp(ethsw); 2926 2927 return err; 2928 } 2929 2930 static void dpaa2_switch_remove_port(struct ethsw_core *ethsw, 2931 u16 port_idx) 2932 { 2933 struct ethsw_port_priv *port_priv = ethsw->ports[port_idx]; 2934 2935 rtnl_lock(); 2936 dpaa2_switch_port_disconnect_mac(port_priv); 2937 rtnl_unlock(); 2938 free_netdev(port_priv->netdev); 2939 ethsw->ports[port_idx] = NULL; 2940 } 2941 2942 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) 2943 { 2944 struct device *dev = &sw_dev->dev; 2945 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2946 struct dpsw_vlan_if_cfg vcfg = {0}; 2947 struct dpsw_tci_cfg tci_cfg = {0}; 2948 struct dpsw_stp_cfg stp_cfg; 2949 int err; 2950 u16 i; 2951 2952 ethsw->dev_id = sw_dev->obj_desc.id; 2953 2954 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); 2955 if (err) { 2956 dev_err(dev, "dpsw_open err %d\n", err); 2957 return err; 2958 } 2959 2960 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2961 ðsw->sw_attr); 2962 if (err) { 2963 dev_err(dev, "dpsw_get_attributes err %d\n", err); 2964 goto err_close; 2965 } 2966 2967 err = dpsw_get_api_version(ethsw->mc_io, 0, 2968 ðsw->major, 2969 ðsw->minor); 2970 if (err) { 2971 dev_err(dev, "dpsw_get_api_version err %d\n", err); 2972 goto err_close; 2973 } 2974 2975 /* Minimum supported DPSW version check */ 2976 if (ethsw->major < DPSW_MIN_VER_MAJOR || 2977 (ethsw->major == DPSW_MIN_VER_MAJOR && 2978 ethsw->minor < DPSW_MIN_VER_MINOR)) { 2979 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", 2980 ethsw->major, ethsw->minor); 2981 err = -EOPNOTSUPP; 2982 goto err_close; 2983 } 2984 2985 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { 2986 err = -EOPNOTSUPP; 2987 goto err_close; 2988 } 2989 2990 dpaa2_switch_detect_features(ethsw); 2991 2992 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); 2993 if (err) { 2994 dev_err(dev, "dpsw_reset err %d\n", err); 2995 goto err_close; 2996 } 2997 2998 stp_cfg.vlan_id = DEFAULT_VLAN_ID; 2999 stp_cfg.state = DPSW_STP_STATE_FORWARDING; 3000 3001 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3002 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); 3003 if (err) { 3004 dev_err(dev, "dpsw_if_disable err %d\n", err); 3005 goto err_close; 3006 } 3007 3008 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, 3009 &stp_cfg); 3010 if (err) { 3011 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", 3012 err, i); 3013 goto err_close; 3014 } 3015 3016 /* Switch starts with all ports configured to VLAN 1. Need to 3017 * remove this setting to allow configuration at bridge join 3018 */ 3019 vcfg.num_ifs = 1; 3020 vcfg.if_id[0] = i; 3021 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, 3022 DEFAULT_VLAN_ID, &vcfg); 3023 if (err) { 3024 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", 3025 err); 3026 goto err_close; 3027 } 3028 3029 tci_cfg.vlan_id = 4095; 3030 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); 3031 if (err) { 3032 dev_err(dev, "dpsw_if_set_tci err %d\n", err); 3033 goto err_close; 3034 } 3035 3036 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 3037 DEFAULT_VLAN_ID, &vcfg); 3038 if (err) { 3039 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); 3040 goto err_close; 3041 } 3042 } 3043 3044 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); 3045 if (err) { 3046 dev_err(dev, "dpsw_vlan_remove err %d\n", err); 3047 goto err_close; 3048 } 3049 3050 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", 3051 WQ_MEM_RECLAIM, "ethsw", 3052 ethsw->sw_attr.id); 3053 if (!ethsw->workqueue) { 3054 err = -ENOMEM; 3055 goto err_close; 3056 } 3057 3058 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); 3059 if (err) 3060 goto err_destroy_ordered_workqueue; 3061 3062 err = dpaa2_switch_ctrl_if_setup(ethsw); 3063 if (err) 3064 goto err_destroy_ordered_workqueue; 3065 3066 return 0; 3067 3068 err_destroy_ordered_workqueue: 3069 destroy_workqueue(ethsw->workqueue); 3070 3071 err_close: 3072 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3073 return err; 3074 } 3075 3076 /* Add an ACL to redirect frames with specific destination MAC address to 3077 * control interface 3078 */ 3079 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, 3080 const char *mac) 3081 { 3082 struct dpaa2_switch_acl_entry acl_entry = {0}; 3083 3084 /* Match on the destination MAC address */ 3085 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac); 3086 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac); 3087 3088 /* Trap to CPU */ 3089 acl_entry.cfg.precedence = 0; 3090 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; 3091 3092 return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry); 3093 } 3094 3095 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) 3096 { 3097 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; 3098 struct switchdev_obj_port_vlan vlan = { 3099 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 3100 .vid = DEFAULT_VLAN_ID, 3101 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, 3102 }; 3103 struct net_device *netdev = port_priv->netdev; 3104 struct ethsw_core *ethsw = port_priv->ethsw_data; 3105 struct dpaa2_switch_filter_block *filter_block; 3106 struct dpsw_fdb_cfg fdb_cfg = {0}; 3107 struct dpsw_if_attr dpsw_if_attr; 3108 struct dpaa2_switch_fdb *fdb; 3109 struct dpsw_acl_cfg acl_cfg; 3110 u16 fdb_id, acl_tbl_id; 3111 int err; 3112 3113 /* Get the Tx queue for this specific port */ 3114 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 3115 port_priv->idx, &dpsw_if_attr); 3116 if (err) { 3117 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); 3118 return err; 3119 } 3120 port_priv->tx_qdid = dpsw_if_attr.qdid; 3121 3122 /* Create a FDB table for this particular switch port */ 3123 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; 3124 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3125 &fdb_id, &fdb_cfg); 3126 if (err) { 3127 netdev_err(netdev, "dpsw_fdb_add err %d\n", err); 3128 return err; 3129 } 3130 3131 /* Find an unused dpaa2_switch_fdb structure and use it */ 3132 fdb = dpaa2_switch_fdb_get_unused(ethsw); 3133 fdb->fdb_id = fdb_id; 3134 fdb->in_use = true; 3135 fdb->bridge_dev = NULL; 3136 port_priv->fdb = fdb; 3137 3138 /* We need to add VLAN 1 as the PVID on this port until it is under a 3139 * bridge since the DPAA2 switch is not able to handle the traffic in a 3140 * VLAN unaware fashion 3141 */ 3142 err = dpaa2_switch_port_vlans_add(netdev, &vlan); 3143 if (err) 3144 return err; 3145 3146 /* Setup the egress flooding domains (broadcast, unknown unicast */ 3147 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 3148 if (err) 3149 return err; 3150 3151 /* Create an ACL table to be used by this switch port */ 3152 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; 3153 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3154 &acl_tbl_id, &acl_cfg); 3155 if (err) { 3156 netdev_err(netdev, "dpsw_acl_add err %d\n", err); 3157 return err; 3158 } 3159 3160 filter_block = dpaa2_switch_filter_block_get_unused(ethsw); 3161 filter_block->ethsw = ethsw; 3162 filter_block->acl_id = acl_tbl_id; 3163 filter_block->in_use = true; 3164 filter_block->num_acl_rules = 0; 3165 INIT_LIST_HEAD(&filter_block->acl_entries); 3166 INIT_LIST_HEAD(&filter_block->mirror_entries); 3167 3168 err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block); 3169 if (err) 3170 return err; 3171 3172 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); 3173 if (err) 3174 return err; 3175 3176 return err; 3177 } 3178 3179 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 3180 { 3181 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3182 dpaa2_switch_free_dpio(ethsw); 3183 dpaa2_switch_destroy_rings(ethsw); 3184 dpaa2_switch_drain_bp(ethsw); 3185 dpaa2_switch_free_dpbp(ethsw); 3186 } 3187 3188 static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev) 3189 { 3190 struct device *dev = &sw_dev->dev; 3191 struct ethsw_core *ethsw = dev_get_drvdata(dev); 3192 int err; 3193 3194 dpaa2_switch_ctrl_if_teardown(ethsw); 3195 3196 destroy_workqueue(ethsw->workqueue); 3197 3198 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3199 if (err) 3200 dev_warn(dev, "dpsw_close err %d\n", err); 3201 } 3202 3203 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) 3204 { 3205 struct ethsw_port_priv *port_priv; 3206 struct ethsw_core *ethsw; 3207 struct device *dev; 3208 int i; 3209 3210 dev = &sw_dev->dev; 3211 ethsw = dev_get_drvdata(dev); 3212 3213 dpaa2_switch_teardown_irqs(sw_dev); 3214 3215 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3216 3217 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3218 port_priv = ethsw->ports[i]; 3219 unregister_netdev(port_priv->netdev); 3220 dpaa2_switch_remove_port(ethsw, i); 3221 } 3222 3223 kfree(ethsw->fdbs); 3224 kfree(ethsw->filter_blocks); 3225 kfree(ethsw->ports); 3226 3227 dpaa2_switch_teardown(sw_dev); 3228 3229 fsl_mc_portal_free(ethsw->mc_io); 3230 3231 kfree(ethsw); 3232 3233 dev_set_drvdata(dev, NULL); 3234 3235 return 0; 3236 } 3237 3238 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, 3239 u16 port_idx) 3240 { 3241 struct ethsw_port_priv *port_priv; 3242 struct device *dev = ethsw->dev; 3243 struct net_device *port_netdev; 3244 int err; 3245 3246 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); 3247 if (!port_netdev) { 3248 dev_err(dev, "alloc_etherdev error\n"); 3249 return -ENOMEM; 3250 } 3251 3252 port_priv = netdev_priv(port_netdev); 3253 port_priv->netdev = port_netdev; 3254 port_priv->ethsw_data = ethsw; 3255 3256 port_priv->idx = port_idx; 3257 port_priv->stp_state = BR_STATE_FORWARDING; 3258 3259 SET_NETDEV_DEV(port_netdev, dev); 3260 port_netdev->netdev_ops = &dpaa2_switch_port_ops; 3261 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; 3262 3263 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; 3264 3265 port_priv->bcast_flood = true; 3266 port_priv->ucast_flood = true; 3267 3268 /* Set MTU limits */ 3269 port_netdev->min_mtu = ETH_MIN_MTU; 3270 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; 3271 3272 /* Populate the private port structure so that later calls to 3273 * dpaa2_switch_port_init() can use it. 3274 */ 3275 ethsw->ports[port_idx] = port_priv; 3276 3277 /* The DPAA2 switch's ingress path depends on the VLAN table, 3278 * thus we are not able to disable VLAN filtering. 3279 */ 3280 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | 3281 NETIF_F_HW_VLAN_STAG_FILTER | 3282 NETIF_F_HW_TC; 3283 3284 err = dpaa2_switch_port_init(port_priv, port_idx); 3285 if (err) 3286 goto err_port_probe; 3287 3288 err = dpaa2_switch_port_set_mac_addr(port_priv); 3289 if (err) 3290 goto err_port_probe; 3291 3292 err = dpaa2_switch_port_set_learning(port_priv, false); 3293 if (err) 3294 goto err_port_probe; 3295 port_priv->learn_ena = false; 3296 3297 err = dpaa2_switch_port_connect_mac(port_priv); 3298 if (err) 3299 goto err_port_probe; 3300 3301 return 0; 3302 3303 err_port_probe: 3304 free_netdev(port_netdev); 3305 ethsw->ports[port_idx] = NULL; 3306 3307 return err; 3308 } 3309 3310 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) 3311 { 3312 struct device *dev = &sw_dev->dev; 3313 struct ethsw_core *ethsw; 3314 int i, err; 3315 3316 /* Allocate switch core*/ 3317 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); 3318 3319 if (!ethsw) 3320 return -ENOMEM; 3321 3322 ethsw->dev = dev; 3323 ethsw->iommu_domain = iommu_get_domain_for_dev(dev); 3324 dev_set_drvdata(dev, ethsw); 3325 3326 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 3327 ðsw->mc_io); 3328 if (err) { 3329 if (err == -ENXIO) 3330 err = -EPROBE_DEFER; 3331 else 3332 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); 3333 goto err_free_drvdata; 3334 } 3335 3336 err = dpaa2_switch_init(sw_dev); 3337 if (err) 3338 goto err_free_cmdport; 3339 3340 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), 3341 GFP_KERNEL); 3342 if (!(ethsw->ports)) { 3343 err = -ENOMEM; 3344 goto err_teardown; 3345 } 3346 3347 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), 3348 GFP_KERNEL); 3349 if (!ethsw->fdbs) { 3350 err = -ENOMEM; 3351 goto err_free_ports; 3352 } 3353 3354 ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs, 3355 sizeof(*ethsw->filter_blocks), 3356 GFP_KERNEL); 3357 if (!ethsw->filter_blocks) { 3358 err = -ENOMEM; 3359 goto err_free_fdbs; 3360 } 3361 3362 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3363 err = dpaa2_switch_probe_port(ethsw, i); 3364 if (err) 3365 goto err_free_netdev; 3366 } 3367 3368 /* Add a NAPI instance for each of the Rx queues. The first port's 3369 * net_device will be associated with the instances since we do not have 3370 * different queues for each switch ports. 3371 */ 3372 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 3373 netif_napi_add(ethsw->ports[0]->netdev, 3374 ðsw->fq[i].napi, dpaa2_switch_poll, 3375 NAPI_POLL_WEIGHT); 3376 3377 /* Setup IRQs */ 3378 err = dpaa2_switch_setup_irqs(sw_dev); 3379 if (err) 3380 goto err_stop; 3381 3382 /* By convention, if the mirror port is equal to the number of switch 3383 * interfaces, then mirroring of any kind is disabled. 3384 */ 3385 ethsw->mirror_port = ethsw->sw_attr.num_ifs; 3386 3387 /* Register the netdev only when the entire setup is done and the 3388 * switch port interfaces are ready to receive traffic 3389 */ 3390 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3391 err = register_netdev(ethsw->ports[i]->netdev); 3392 if (err < 0) { 3393 dev_err(dev, "register_netdev error %d\n", err); 3394 goto err_unregister_ports; 3395 } 3396 } 3397 3398 return 0; 3399 3400 err_unregister_ports: 3401 for (i--; i >= 0; i--) 3402 unregister_netdev(ethsw->ports[i]->netdev); 3403 dpaa2_switch_teardown_irqs(sw_dev); 3404 err_stop: 3405 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3406 err_free_netdev: 3407 for (i--; i >= 0; i--) 3408 dpaa2_switch_remove_port(ethsw, i); 3409 kfree(ethsw->filter_blocks); 3410 err_free_fdbs: 3411 kfree(ethsw->fdbs); 3412 err_free_ports: 3413 kfree(ethsw->ports); 3414 3415 err_teardown: 3416 dpaa2_switch_teardown(sw_dev); 3417 3418 err_free_cmdport: 3419 fsl_mc_portal_free(ethsw->mc_io); 3420 3421 err_free_drvdata: 3422 kfree(ethsw); 3423 dev_set_drvdata(dev, NULL); 3424 3425 return err; 3426 } 3427 3428 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { 3429 { 3430 .vendor = FSL_MC_VENDOR_FREESCALE, 3431 .obj_type = "dpsw", 3432 }, 3433 { .vendor = 0x0 } 3434 }; 3435 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); 3436 3437 static struct fsl_mc_driver dpaa2_switch_drv = { 3438 .driver = { 3439 .name = KBUILD_MODNAME, 3440 .owner = THIS_MODULE, 3441 }, 3442 .probe = dpaa2_switch_probe, 3443 .remove = dpaa2_switch_remove, 3444 .match_id_table = dpaa2_switch_match_id_table 3445 }; 3446 3447 static struct notifier_block dpaa2_switch_port_nb __read_mostly = { 3448 .notifier_call = dpaa2_switch_port_netdevice_event, 3449 }; 3450 3451 static struct notifier_block dpaa2_switch_port_switchdev_nb = { 3452 .notifier_call = dpaa2_switch_port_event, 3453 }; 3454 3455 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { 3456 .notifier_call = dpaa2_switch_port_blocking_event, 3457 }; 3458 3459 static int dpaa2_switch_register_notifiers(void) 3460 { 3461 int err; 3462 3463 err = register_netdevice_notifier(&dpaa2_switch_port_nb); 3464 if (err) { 3465 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); 3466 return err; 3467 } 3468 3469 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3470 if (err) { 3471 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); 3472 goto err_switchdev_nb; 3473 } 3474 3475 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3476 if (err) { 3477 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); 3478 goto err_switchdev_blocking_nb; 3479 } 3480 3481 return 0; 3482 3483 err_switchdev_blocking_nb: 3484 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3485 err_switchdev_nb: 3486 unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3487 3488 return err; 3489 } 3490 3491 static void dpaa2_switch_unregister_notifiers(void) 3492 { 3493 int err; 3494 3495 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3496 if (err) 3497 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", 3498 err); 3499 3500 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3501 if (err) 3502 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); 3503 3504 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3505 if (err) 3506 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); 3507 } 3508 3509 static int __init dpaa2_switch_driver_init(void) 3510 { 3511 int err; 3512 3513 err = fsl_mc_driver_register(&dpaa2_switch_drv); 3514 if (err) 3515 return err; 3516 3517 err = dpaa2_switch_register_notifiers(); 3518 if (err) { 3519 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3520 return err; 3521 } 3522 3523 return 0; 3524 } 3525 3526 static void __exit dpaa2_switch_driver_exit(void) 3527 { 3528 dpaa2_switch_unregister_notifiers(); 3529 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3530 } 3531 3532 module_init(dpaa2_switch_driver_init); 3533 module_exit(dpaa2_switch_driver_exit); 3534 3535 MODULE_LICENSE("GPL v2"); 3536 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); 3537