1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DPAA2 Ethernet Switch driver 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/interrupt.h> 13 #include <linux/msi.h> 14 #include <linux/kthread.h> 15 #include <linux/workqueue.h> 16 #include <linux/iommu.h> 17 #include <net/pkt_cls.h> 18 19 #include <linux/fsl/mc.h> 20 21 #include "dpaa2-switch.h" 22 23 /* Minimal supported DPSW version */ 24 #define DPSW_MIN_VER_MAJOR 8 25 #define DPSW_MIN_VER_MINOR 9 26 27 #define DEFAULT_VLAN_ID 1 28 29 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) 30 { 31 return port_priv->fdb->fdb_id; 32 } 33 34 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) 35 { 36 int i; 37 38 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 39 if (!ethsw->fdbs[i].in_use) 40 return ðsw->fdbs[i]; 41 return NULL; 42 } 43 44 static struct dpaa2_switch_filter_block * 45 dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw) 46 { 47 int i; 48 49 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 50 if (!ethsw->filter_blocks[i].in_use) 51 return ðsw->filter_blocks[i]; 52 return NULL; 53 } 54 55 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, 56 struct net_device *bridge_dev) 57 { 58 struct ethsw_port_priv *other_port_priv = NULL; 59 struct dpaa2_switch_fdb *fdb; 60 struct net_device *other_dev; 61 struct list_head *iter; 62 63 /* If we leave a bridge (bridge_dev is NULL), find an unused 64 * FDB and use that. 65 */ 66 if (!bridge_dev) { 67 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); 68 69 /* If there is no unused FDB, we must be the last port that 70 * leaves the last bridge, all the others are standalone. We 71 * can just keep the FDB that we already have. 72 */ 73 74 if (!fdb) { 75 port_priv->fdb->bridge_dev = NULL; 76 return 0; 77 } 78 79 port_priv->fdb = fdb; 80 port_priv->fdb->in_use = true; 81 port_priv->fdb->bridge_dev = NULL; 82 return 0; 83 } 84 85 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock 86 * being held. Assert on it so that it's easier to catch new code 87 * paths that reach this point without the RTNL lock. 88 */ 89 ASSERT_RTNL(); 90 91 /* If part of a bridge, use the FDB of the first dpaa2 switch interface 92 * to be present in that bridge 93 */ 94 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { 95 if (!dpaa2_switch_port_dev_check(other_dev)) 96 continue; 97 98 if (other_dev == port_priv->netdev) 99 continue; 100 101 other_port_priv = netdev_priv(other_dev); 102 break; 103 } 104 105 /* The current port is about to change its FDB to the one used by the 106 * first port that joined the bridge. 107 */ 108 if (other_port_priv) { 109 /* The previous FDB is about to become unused, since the 110 * interface is no longer standalone. 111 */ 112 port_priv->fdb->in_use = false; 113 port_priv->fdb->bridge_dev = NULL; 114 115 /* Get a reference to the new FDB */ 116 port_priv->fdb = other_port_priv->fdb; 117 } 118 119 /* Keep track of the new upper bridge device */ 120 port_priv->fdb->bridge_dev = bridge_dev; 121 122 return 0; 123 } 124 125 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, 126 enum dpsw_flood_type type, 127 struct dpsw_egress_flood_cfg *cfg) 128 { 129 int i = 0, j; 130 131 memset(cfg, 0, sizeof(*cfg)); 132 133 /* Add all the DPAA2 switch ports found in the same bridging domain to 134 * the egress flooding domain 135 */ 136 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { 137 if (!ethsw->ports[j]) 138 continue; 139 if (ethsw->ports[j]->fdb->fdb_id != fdb_id) 140 continue; 141 142 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) 143 cfg->if_id[i++] = ethsw->ports[j]->idx; 144 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) 145 cfg->if_id[i++] = ethsw->ports[j]->idx; 146 } 147 148 /* Add the CTRL interface to the egress flooding domain */ 149 cfg->if_id[i++] = ethsw->sw_attr.num_ifs; 150 151 cfg->fdb_id = fdb_id; 152 cfg->flood_type = type; 153 cfg->num_ifs = i; 154 } 155 156 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) 157 { 158 struct dpsw_egress_flood_cfg flood_cfg; 159 int err; 160 161 /* Setup broadcast flooding domain */ 162 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); 163 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 164 &flood_cfg); 165 if (err) { 166 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 167 return err; 168 } 169 170 /* Setup unknown flooding domain */ 171 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); 172 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 173 &flood_cfg); 174 if (err) { 175 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 176 return err; 177 } 178 179 return 0; 180 } 181 182 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 183 dma_addr_t iova_addr) 184 { 185 phys_addr_t phys_addr; 186 187 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 188 189 return phys_to_virt(phys_addr); 190 } 191 192 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) 193 { 194 struct ethsw_core *ethsw = port_priv->ethsw_data; 195 struct dpsw_vlan_cfg vcfg = {0}; 196 int err; 197 198 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 199 err = dpsw_vlan_add(ethsw->mc_io, 0, 200 ethsw->dpsw_handle, vid, &vcfg); 201 if (err) { 202 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); 203 return err; 204 } 205 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; 206 207 return 0; 208 } 209 210 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) 211 { 212 struct net_device *netdev = port_priv->netdev; 213 struct dpsw_link_state state; 214 int err; 215 216 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 217 port_priv->ethsw_data->dpsw_handle, 218 port_priv->idx, &state); 219 if (err) { 220 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 221 return true; 222 } 223 224 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 225 226 return state.up ? true : false; 227 } 228 229 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) 230 { 231 struct ethsw_core *ethsw = port_priv->ethsw_data; 232 struct net_device *netdev = port_priv->netdev; 233 struct dpsw_tci_cfg tci_cfg = { 0 }; 234 bool up; 235 int err, ret; 236 237 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 238 port_priv->idx, &tci_cfg); 239 if (err) { 240 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); 241 return err; 242 } 243 244 tci_cfg.vlan_id = pvid; 245 246 /* Interface needs to be down to change PVID */ 247 up = dpaa2_switch_port_is_up(port_priv); 248 if (up) { 249 err = dpsw_if_disable(ethsw->mc_io, 0, 250 ethsw->dpsw_handle, 251 port_priv->idx); 252 if (err) { 253 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 254 return err; 255 } 256 } 257 258 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 259 port_priv->idx, &tci_cfg); 260 if (err) { 261 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); 262 goto set_tci_error; 263 } 264 265 /* Delete previous PVID info and mark the new one */ 266 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; 267 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; 268 port_priv->pvid = pvid; 269 270 set_tci_error: 271 if (up) { 272 ret = dpsw_if_enable(ethsw->mc_io, 0, 273 ethsw->dpsw_handle, 274 port_priv->idx); 275 if (ret) { 276 netdev_err(netdev, "dpsw_if_enable err %d\n", ret); 277 return ret; 278 } 279 } 280 281 return err; 282 } 283 284 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, 285 u16 vid, u16 flags) 286 { 287 struct ethsw_core *ethsw = port_priv->ethsw_data; 288 struct net_device *netdev = port_priv->netdev; 289 struct dpsw_vlan_if_cfg vcfg = {0}; 290 int err; 291 292 if (port_priv->vlans[vid]) { 293 netdev_warn(netdev, "VLAN %d already configured\n", vid); 294 return -EEXIST; 295 } 296 297 /* If hit, this VLAN rule will lead the packet into the FDB table 298 * specified in the vlan configuration below 299 */ 300 vcfg.num_ifs = 1; 301 vcfg.if_id[0] = port_priv->idx; 302 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 303 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; 304 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); 305 if (err) { 306 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); 307 return err; 308 } 309 310 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; 311 312 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { 313 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, 314 ethsw->dpsw_handle, 315 vid, &vcfg); 316 if (err) { 317 netdev_err(netdev, 318 "dpsw_vlan_add_if_untagged err %d\n", err); 319 return err; 320 } 321 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; 322 } 323 324 if (flags & BRIDGE_VLAN_INFO_PVID) { 325 err = dpaa2_switch_port_set_pvid(port_priv, vid); 326 if (err) 327 return err; 328 } 329 330 return 0; 331 } 332 333 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state) 334 { 335 switch (state) { 336 case BR_STATE_DISABLED: 337 return DPSW_STP_STATE_DISABLED; 338 case BR_STATE_LISTENING: 339 return DPSW_STP_STATE_LISTENING; 340 case BR_STATE_LEARNING: 341 return DPSW_STP_STATE_LEARNING; 342 case BR_STATE_FORWARDING: 343 return DPSW_STP_STATE_FORWARDING; 344 case BR_STATE_BLOCKING: 345 return DPSW_STP_STATE_BLOCKING; 346 default: 347 return DPSW_STP_STATE_DISABLED; 348 } 349 } 350 351 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) 352 { 353 struct dpsw_stp_cfg stp_cfg = {0}; 354 int err; 355 u16 vid; 356 357 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) 358 return 0; /* Nothing to do */ 359 360 stp_cfg.state = br_stp_state_to_dpsw(state); 361 for (vid = 0; vid <= VLAN_VID_MASK; vid++) { 362 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 363 stp_cfg.vlan_id = vid; 364 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, 365 port_priv->ethsw_data->dpsw_handle, 366 port_priv->idx, &stp_cfg); 367 if (err) { 368 netdev_err(port_priv->netdev, 369 "dpsw_if_set_stp err %d\n", err); 370 return err; 371 } 372 } 373 } 374 375 port_priv->stp_state = state; 376 377 return 0; 378 } 379 380 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) 381 { 382 struct ethsw_port_priv *ppriv_local = NULL; 383 int i, err; 384 385 if (!ethsw->vlans[vid]) 386 return -ENOENT; 387 388 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); 389 if (err) { 390 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); 391 return err; 392 } 393 ethsw->vlans[vid] = 0; 394 395 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 396 ppriv_local = ethsw->ports[i]; 397 ppriv_local->vlans[vid] = 0; 398 } 399 400 return 0; 401 } 402 403 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, 404 const unsigned char *addr) 405 { 406 struct dpsw_fdb_unicast_cfg entry = {0}; 407 u16 fdb_id; 408 int err; 409 410 entry.if_egress = port_priv->idx; 411 entry.type = DPSW_FDB_ENTRY_STATIC; 412 ether_addr_copy(entry.mac_addr, addr); 413 414 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 415 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, 416 port_priv->ethsw_data->dpsw_handle, 417 fdb_id, &entry); 418 if (err) 419 netdev_err(port_priv->netdev, 420 "dpsw_fdb_add_unicast err %d\n", err); 421 return err; 422 } 423 424 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, 425 const unsigned char *addr) 426 { 427 struct dpsw_fdb_unicast_cfg entry = {0}; 428 u16 fdb_id; 429 int err; 430 431 entry.if_egress = port_priv->idx; 432 entry.type = DPSW_FDB_ENTRY_STATIC; 433 ether_addr_copy(entry.mac_addr, addr); 434 435 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 436 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, 437 port_priv->ethsw_data->dpsw_handle, 438 fdb_id, &entry); 439 /* Silently discard error for calling multiple times the del command */ 440 if (err && err != -ENXIO) 441 netdev_err(port_priv->netdev, 442 "dpsw_fdb_remove_unicast err %d\n", err); 443 return err; 444 } 445 446 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, 447 const unsigned char *addr) 448 { 449 struct dpsw_fdb_multicast_cfg entry = {0}; 450 u16 fdb_id; 451 int err; 452 453 ether_addr_copy(entry.mac_addr, addr); 454 entry.type = DPSW_FDB_ENTRY_STATIC; 455 entry.num_ifs = 1; 456 entry.if_id[0] = port_priv->idx; 457 458 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 459 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, 460 port_priv->ethsw_data->dpsw_handle, 461 fdb_id, &entry); 462 /* Silently discard error for calling multiple times the add command */ 463 if (err && err != -ENXIO) 464 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", 465 err); 466 return err; 467 } 468 469 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, 470 const unsigned char *addr) 471 { 472 struct dpsw_fdb_multicast_cfg entry = {0}; 473 u16 fdb_id; 474 int err; 475 476 ether_addr_copy(entry.mac_addr, addr); 477 entry.type = DPSW_FDB_ENTRY_STATIC; 478 entry.num_ifs = 1; 479 entry.if_id[0] = port_priv->idx; 480 481 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 482 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, 483 port_priv->ethsw_data->dpsw_handle, 484 fdb_id, &entry); 485 /* Silently discard error for calling multiple times the del command */ 486 if (err && err != -ENAVAIL) 487 netdev_err(port_priv->netdev, 488 "dpsw_fdb_remove_multicast err %d\n", err); 489 return err; 490 } 491 492 static void dpaa2_switch_port_get_stats(struct net_device *netdev, 493 struct rtnl_link_stats64 *stats) 494 { 495 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 496 u64 tmp; 497 int err; 498 499 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 500 port_priv->ethsw_data->dpsw_handle, 501 port_priv->idx, 502 DPSW_CNT_ING_FRAME, &stats->rx_packets); 503 if (err) 504 goto error; 505 506 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 507 port_priv->ethsw_data->dpsw_handle, 508 port_priv->idx, 509 DPSW_CNT_EGR_FRAME, &stats->tx_packets); 510 if (err) 511 goto error; 512 513 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 514 port_priv->ethsw_data->dpsw_handle, 515 port_priv->idx, 516 DPSW_CNT_ING_BYTE, &stats->rx_bytes); 517 if (err) 518 goto error; 519 520 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 521 port_priv->ethsw_data->dpsw_handle, 522 port_priv->idx, 523 DPSW_CNT_EGR_BYTE, &stats->tx_bytes); 524 if (err) 525 goto error; 526 527 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 528 port_priv->ethsw_data->dpsw_handle, 529 port_priv->idx, 530 DPSW_CNT_ING_FRAME_DISCARD, 531 &stats->rx_dropped); 532 if (err) 533 goto error; 534 535 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 536 port_priv->ethsw_data->dpsw_handle, 537 port_priv->idx, 538 DPSW_CNT_ING_FLTR_FRAME, 539 &tmp); 540 if (err) 541 goto error; 542 stats->rx_dropped += tmp; 543 544 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 545 port_priv->ethsw_data->dpsw_handle, 546 port_priv->idx, 547 DPSW_CNT_EGR_FRAME_DISCARD, 548 &stats->tx_dropped); 549 if (err) 550 goto error; 551 552 return; 553 554 error: 555 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); 556 } 557 558 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, 559 int attr_id) 560 { 561 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); 562 } 563 564 static int dpaa2_switch_port_get_offload_stats(int attr_id, 565 const struct net_device *netdev, 566 void *sp) 567 { 568 switch (attr_id) { 569 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 570 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); 571 return 0; 572 } 573 574 return -EINVAL; 575 } 576 577 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) 578 { 579 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 580 int err; 581 582 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, 583 0, 584 port_priv->ethsw_data->dpsw_handle, 585 port_priv->idx, 586 (u16)ETHSW_L2_MAX_FRM(mtu)); 587 if (err) { 588 netdev_err(netdev, 589 "dpsw_if_set_max_frame_length() err %d\n", err); 590 return err; 591 } 592 593 netdev->mtu = mtu; 594 return 0; 595 } 596 597 static int dpaa2_switch_port_link_state_update(struct net_device *netdev) 598 { 599 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 600 struct dpsw_link_state state; 601 int err; 602 603 /* When we manage the MAC/PHY using phylink there is no need 604 * to manually update the netif_carrier. 605 */ 606 if (dpaa2_switch_port_is_type_phy(port_priv)) 607 return 0; 608 609 /* Interrupts are received even though no one issued an 'ifconfig up' 610 * on the switch interface. Ignore these link state update interrupts 611 */ 612 if (!netif_running(netdev)) 613 return 0; 614 615 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 616 port_priv->ethsw_data->dpsw_handle, 617 port_priv->idx, &state); 618 if (err) { 619 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 620 return err; 621 } 622 623 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 624 625 if (state.up != port_priv->link_state) { 626 if (state.up) { 627 netif_carrier_on(netdev); 628 netif_tx_start_all_queues(netdev); 629 } else { 630 netif_carrier_off(netdev); 631 netif_tx_stop_all_queues(netdev); 632 } 633 port_priv->link_state = state.up; 634 } 635 636 return 0; 637 } 638 639 /* Manage all NAPI instances for the control interface. 640 * 641 * We only have one RX queue and one Tx Conf queue for all 642 * switch ports. Therefore, we only need to enable the NAPI instance once, the 643 * first time one of the switch ports runs .dev_open(). 644 */ 645 646 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) 647 { 648 int i; 649 650 /* Access to the ethsw->napi_users relies on the RTNL lock */ 651 ASSERT_RTNL(); 652 653 /* a new interface is using the NAPI instance */ 654 ethsw->napi_users++; 655 656 /* if there is already a user of the instance, return */ 657 if (ethsw->napi_users > 1) 658 return; 659 660 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 661 napi_enable(ðsw->fq[i].napi); 662 } 663 664 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) 665 { 666 int i; 667 668 /* Access to the ethsw->napi_users relies on the RTNL lock */ 669 ASSERT_RTNL(); 670 671 /* If we are not the last interface using the NAPI, return */ 672 ethsw->napi_users--; 673 if (ethsw->napi_users) 674 return; 675 676 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 677 napi_disable(ðsw->fq[i].napi); 678 } 679 680 static int dpaa2_switch_port_open(struct net_device *netdev) 681 { 682 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 683 struct ethsw_core *ethsw = port_priv->ethsw_data; 684 int err; 685 686 if (!dpaa2_switch_port_is_type_phy(port_priv)) { 687 /* Explicitly set carrier off, otherwise 688 * netif_carrier_ok() will return true and cause 'ip link show' 689 * to report the LOWER_UP flag, even though the link 690 * notification wasn't even received. 691 */ 692 netif_carrier_off(netdev); 693 } 694 695 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, 696 port_priv->ethsw_data->dpsw_handle, 697 port_priv->idx); 698 if (err) { 699 netdev_err(netdev, "dpsw_if_enable err %d\n", err); 700 return err; 701 } 702 703 dpaa2_switch_enable_ctrl_if_napi(ethsw); 704 705 if (dpaa2_switch_port_is_type_phy(port_priv)) 706 phylink_start(port_priv->mac->phylink); 707 708 return 0; 709 } 710 711 static int dpaa2_switch_port_stop(struct net_device *netdev) 712 { 713 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 714 struct ethsw_core *ethsw = port_priv->ethsw_data; 715 int err; 716 717 if (dpaa2_switch_port_is_type_phy(port_priv)) { 718 phylink_stop(port_priv->mac->phylink); 719 } else { 720 netif_tx_stop_all_queues(netdev); 721 netif_carrier_off(netdev); 722 } 723 724 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 725 port_priv->ethsw_data->dpsw_handle, 726 port_priv->idx); 727 if (err) { 728 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 729 return err; 730 } 731 732 dpaa2_switch_disable_ctrl_if_napi(ethsw); 733 734 return 0; 735 } 736 737 static int dpaa2_switch_port_parent_id(struct net_device *dev, 738 struct netdev_phys_item_id *ppid) 739 { 740 struct ethsw_port_priv *port_priv = netdev_priv(dev); 741 742 ppid->id_len = 1; 743 ppid->id[0] = port_priv->ethsw_data->dev_id; 744 745 return 0; 746 } 747 748 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, 749 size_t len) 750 { 751 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 752 int err; 753 754 err = snprintf(name, len, "p%d", port_priv->idx); 755 if (err >= len) 756 return -EINVAL; 757 758 return 0; 759 } 760 761 struct ethsw_dump_ctx { 762 struct net_device *dev; 763 struct sk_buff *skb; 764 struct netlink_callback *cb; 765 int idx; 766 }; 767 768 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, 769 struct ethsw_dump_ctx *dump) 770 { 771 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; 772 u32 portid = NETLINK_CB(dump->cb->skb).portid; 773 u32 seq = dump->cb->nlh->nlmsg_seq; 774 struct nlmsghdr *nlh; 775 struct ndmsg *ndm; 776 777 if (dump->idx < dump->cb->args[2]) 778 goto skip; 779 780 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 781 sizeof(*ndm), NLM_F_MULTI); 782 if (!nlh) 783 return -EMSGSIZE; 784 785 ndm = nlmsg_data(nlh); 786 ndm->ndm_family = AF_BRIDGE; 787 ndm->ndm_pad1 = 0; 788 ndm->ndm_pad2 = 0; 789 ndm->ndm_flags = NTF_SELF; 790 ndm->ndm_type = 0; 791 ndm->ndm_ifindex = dump->dev->ifindex; 792 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; 793 794 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) 795 goto nla_put_failure; 796 797 nlmsg_end(dump->skb, nlh); 798 799 skip: 800 dump->idx++; 801 return 0; 802 803 nla_put_failure: 804 nlmsg_cancel(dump->skb, nlh); 805 return -EMSGSIZE; 806 } 807 808 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, 809 struct ethsw_port_priv *port_priv) 810 { 811 int idx = port_priv->idx; 812 int valid; 813 814 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 815 valid = entry->if_info == port_priv->idx; 816 else 817 valid = entry->if_mask[idx / 8] & BIT(idx % 8); 818 819 return valid; 820 } 821 822 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, 823 dpaa2_switch_fdb_cb_t cb, void *data) 824 { 825 struct net_device *net_dev = port_priv->netdev; 826 struct ethsw_core *ethsw = port_priv->ethsw_data; 827 struct device *dev = net_dev->dev.parent; 828 struct fdb_dump_entry *fdb_entries; 829 struct fdb_dump_entry fdb_entry; 830 dma_addr_t fdb_dump_iova; 831 u16 num_fdb_entries; 832 u32 fdb_dump_size; 833 int err = 0, i; 834 u8 *dma_mem; 835 u16 fdb_id; 836 837 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); 838 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); 839 if (!dma_mem) 840 return -ENOMEM; 841 842 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, 843 DMA_FROM_DEVICE); 844 if (dma_mapping_error(dev, fdb_dump_iova)) { 845 netdev_err(net_dev, "dma_map_single() failed\n"); 846 err = -ENOMEM; 847 goto err_map; 848 } 849 850 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 851 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, 852 fdb_dump_iova, fdb_dump_size, &num_fdb_entries); 853 if (err) { 854 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); 855 goto err_dump; 856 } 857 858 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); 859 860 fdb_entries = (struct fdb_dump_entry *)dma_mem; 861 for (i = 0; i < num_fdb_entries; i++) { 862 fdb_entry = fdb_entries[i]; 863 864 err = cb(port_priv, &fdb_entry, data); 865 if (err) 866 goto end; 867 } 868 869 end: 870 kfree(dma_mem); 871 872 return 0; 873 874 err_dump: 875 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); 876 err_map: 877 kfree(dma_mem); 878 return err; 879 } 880 881 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, 882 struct fdb_dump_entry *fdb_entry, 883 void *data) 884 { 885 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 886 return 0; 887 888 return dpaa2_switch_fdb_dump_nl(fdb_entry, data); 889 } 890 891 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 892 struct net_device *net_dev, 893 struct net_device *filter_dev, int *idx) 894 { 895 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 896 struct ethsw_dump_ctx dump = { 897 .dev = net_dev, 898 .skb = skb, 899 .cb = cb, 900 .idx = *idx, 901 }; 902 int err; 903 904 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); 905 *idx = dump.idx; 906 907 return err; 908 } 909 910 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, 911 struct fdb_dump_entry *fdb_entry, 912 void *data __always_unused) 913 { 914 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 915 return 0; 916 917 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) 918 return 0; 919 920 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 921 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); 922 else 923 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); 924 925 return 0; 926 } 927 928 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) 929 { 930 dpaa2_switch_fdb_iterate(port_priv, 931 dpaa2_switch_fdb_entry_fast_age, NULL); 932 } 933 934 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, 935 u16 vid) 936 { 937 struct switchdev_obj_port_vlan vlan = { 938 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 939 .vid = vid, 940 .obj.orig_dev = netdev, 941 /* This API only allows programming tagged, non-PVID VIDs */ 942 .flags = 0, 943 }; 944 945 return dpaa2_switch_port_vlans_add(netdev, &vlan); 946 } 947 948 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, 949 u16 vid) 950 { 951 struct switchdev_obj_port_vlan vlan = { 952 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 953 .vid = vid, 954 .obj.orig_dev = netdev, 955 /* This API only allows programming tagged, non-PVID VIDs */ 956 .flags = 0, 957 }; 958 959 return dpaa2_switch_port_vlans_del(netdev, &vlan); 960 } 961 962 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) 963 { 964 struct ethsw_core *ethsw = port_priv->ethsw_data; 965 struct net_device *net_dev = port_priv->netdev; 966 struct device *dev = net_dev->dev.parent; 967 u8 mac_addr[ETH_ALEN]; 968 int err; 969 970 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) 971 return 0; 972 973 /* Get firmware address, if any */ 974 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, 975 port_priv->idx, mac_addr); 976 if (err) { 977 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); 978 return err; 979 } 980 981 /* First check if firmware has any address configured by bootloader */ 982 if (!is_zero_ether_addr(mac_addr)) { 983 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 984 } else { 985 /* No MAC address configured, fill in net_dev->dev_addr 986 * with a random one 987 */ 988 eth_hw_addr_random(net_dev); 989 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 990 991 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 992 * practical purposes, this will be our "permanent" mac address, 993 * at least until the next reboot. This move will also permit 994 * register_netdevice() to properly fill up net_dev->perm_addr. 995 */ 996 net_dev->addr_assign_type = NET_ADDR_PERM; 997 } 998 999 return 0; 1000 } 1001 1002 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, 1003 const struct dpaa2_fd *fd) 1004 { 1005 struct device *dev = ethsw->dev; 1006 unsigned char *buffer_start; 1007 struct sk_buff **skbh, *skb; 1008 dma_addr_t fd_addr; 1009 1010 fd_addr = dpaa2_fd_get_addr(fd); 1011 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); 1012 1013 skb = *skbh; 1014 buffer_start = (unsigned char *)skbh; 1015 1016 dma_unmap_single(dev, fd_addr, 1017 skb_tail_pointer(skb) - buffer_start, 1018 DMA_TO_DEVICE); 1019 1020 /* Move on with skb release */ 1021 dev_kfree_skb(skb); 1022 } 1023 1024 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, 1025 struct sk_buff *skb, 1026 struct dpaa2_fd *fd) 1027 { 1028 struct device *dev = ethsw->dev; 1029 struct sk_buff **skbh; 1030 dma_addr_t addr; 1031 u8 *buff_start; 1032 void *hwa; 1033 1034 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - 1035 DPAA2_SWITCH_TX_BUF_ALIGN, 1036 DPAA2_SWITCH_TX_BUF_ALIGN); 1037 1038 /* Clear FAS to have consistent values for TX confirmation. It is 1039 * located in the first 8 bytes of the buffer's hardware annotation 1040 * area 1041 */ 1042 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; 1043 memset(hwa, 0, 8); 1044 1045 /* Store a backpointer to the skb at the beginning of the buffer 1046 * (in the private data area) such that we can release it 1047 * on Tx confirm 1048 */ 1049 skbh = (struct sk_buff **)buff_start; 1050 *skbh = skb; 1051 1052 addr = dma_map_single(dev, buff_start, 1053 skb_tail_pointer(skb) - buff_start, 1054 DMA_TO_DEVICE); 1055 if (unlikely(dma_mapping_error(dev, addr))) 1056 return -ENOMEM; 1057 1058 /* Setup the FD fields */ 1059 memset(fd, 0, sizeof(*fd)); 1060 1061 dpaa2_fd_set_addr(fd, addr); 1062 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); 1063 dpaa2_fd_set_len(fd, skb->len); 1064 dpaa2_fd_set_format(fd, dpaa2_fd_single); 1065 1066 return 0; 1067 } 1068 1069 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, 1070 struct net_device *net_dev) 1071 { 1072 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 1073 struct ethsw_core *ethsw = port_priv->ethsw_data; 1074 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; 1075 struct dpaa2_fd fd; 1076 int err; 1077 1078 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { 1079 struct sk_buff *ns; 1080 1081 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); 1082 if (unlikely(!ns)) { 1083 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); 1084 goto err_free_skb; 1085 } 1086 dev_consume_skb_any(skb); 1087 skb = ns; 1088 } 1089 1090 /* We'll be holding a back-reference to the skb until Tx confirmation */ 1091 skb = skb_unshare(skb, GFP_ATOMIC); 1092 if (unlikely(!skb)) { 1093 /* skb_unshare() has already freed the skb */ 1094 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); 1095 goto err_exit; 1096 } 1097 1098 /* At this stage, we do not support non-linear skbs so just try to 1099 * linearize the skb and if that's not working, just drop the packet. 1100 */ 1101 err = skb_linearize(skb); 1102 if (err) { 1103 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); 1104 goto err_free_skb; 1105 } 1106 1107 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); 1108 if (unlikely(err)) { 1109 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); 1110 goto err_free_skb; 1111 } 1112 1113 do { 1114 err = dpaa2_io_service_enqueue_qd(NULL, 1115 port_priv->tx_qdid, 1116 8, 0, &fd); 1117 retries--; 1118 } while (err == -EBUSY && retries); 1119 1120 if (unlikely(err < 0)) { 1121 dpaa2_switch_free_fd(ethsw, &fd); 1122 goto err_exit; 1123 } 1124 1125 return NETDEV_TX_OK; 1126 1127 err_free_skb: 1128 dev_kfree_skb(skb); 1129 err_exit: 1130 return NETDEV_TX_OK; 1131 } 1132 1133 static int 1134 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block, 1135 struct flow_cls_offload *f) 1136 { 1137 switch (f->command) { 1138 case FLOW_CLS_REPLACE: 1139 return dpaa2_switch_cls_flower_replace(filter_block, f); 1140 case FLOW_CLS_DESTROY: 1141 return dpaa2_switch_cls_flower_destroy(filter_block, f); 1142 default: 1143 return -EOPNOTSUPP; 1144 } 1145 } 1146 1147 static int 1148 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block, 1149 struct tc_cls_matchall_offload *f) 1150 { 1151 switch (f->command) { 1152 case TC_CLSMATCHALL_REPLACE: 1153 return dpaa2_switch_cls_matchall_replace(block, f); 1154 case TC_CLSMATCHALL_DESTROY: 1155 return dpaa2_switch_cls_matchall_destroy(block, f); 1156 default: 1157 return -EOPNOTSUPP; 1158 } 1159 } 1160 1161 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, 1162 void *type_data, 1163 void *cb_priv) 1164 { 1165 switch (type) { 1166 case TC_SETUP_CLSFLOWER: 1167 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); 1168 case TC_SETUP_CLSMATCHALL: 1169 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); 1170 default: 1171 return -EOPNOTSUPP; 1172 } 1173 } 1174 1175 static LIST_HEAD(dpaa2_switch_block_cb_list); 1176 1177 static int 1178 dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, 1179 struct dpaa2_switch_filter_block *block) 1180 { 1181 struct ethsw_core *ethsw = port_priv->ethsw_data; 1182 struct net_device *netdev = port_priv->netdev; 1183 struct dpsw_acl_if_cfg acl_if_cfg; 1184 int err; 1185 1186 if (port_priv->filter_block) 1187 return -EINVAL; 1188 1189 acl_if_cfg.if_id[0] = port_priv->idx; 1190 acl_if_cfg.num_ifs = 1; 1191 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1192 block->acl_id, &acl_if_cfg); 1193 if (err) { 1194 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1195 return err; 1196 } 1197 1198 block->ports |= BIT(port_priv->idx); 1199 port_priv->filter_block = block; 1200 1201 return 0; 1202 } 1203 1204 static int 1205 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, 1206 struct dpaa2_switch_filter_block *block) 1207 { 1208 struct ethsw_core *ethsw = port_priv->ethsw_data; 1209 struct net_device *netdev = port_priv->netdev; 1210 struct dpsw_acl_if_cfg acl_if_cfg; 1211 int err; 1212 1213 if (port_priv->filter_block != block) 1214 return -EINVAL; 1215 1216 acl_if_cfg.if_id[0] = port_priv->idx; 1217 acl_if_cfg.num_ifs = 1; 1218 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1219 block->acl_id, &acl_if_cfg); 1220 if (err) { 1221 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 1222 return err; 1223 } 1224 1225 block->ports &= ~BIT(port_priv->idx); 1226 port_priv->filter_block = NULL; 1227 return 0; 1228 } 1229 1230 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, 1231 struct dpaa2_switch_filter_block *block) 1232 { 1233 struct dpaa2_switch_filter_block *old_block = port_priv->filter_block; 1234 int err; 1235 1236 /* Offload all the mirror entries found in the block on this new port 1237 * joining it. 1238 */ 1239 err = dpaa2_switch_block_offload_mirror(block, port_priv); 1240 if (err) 1241 return err; 1242 1243 /* If the port is already bound to this ACL table then do nothing. This 1244 * can happen when this port is the first one to join a tc block 1245 */ 1246 if (port_priv->filter_block == block) 1247 return 0; 1248 1249 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block); 1250 if (err) 1251 return err; 1252 1253 /* Mark the previous ACL table as being unused if this was the last 1254 * port that was using it. 1255 */ 1256 if (old_block->ports == 0) 1257 old_block->in_use = false; 1258 1259 return dpaa2_switch_port_acl_tbl_bind(port_priv, block); 1260 } 1261 1262 static int 1263 dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, 1264 struct dpaa2_switch_filter_block *block) 1265 { 1266 struct ethsw_core *ethsw = port_priv->ethsw_data; 1267 struct dpaa2_switch_filter_block *new_block; 1268 int err; 1269 1270 /* Unoffload all the mirror entries found in the block from the 1271 * port leaving it. 1272 */ 1273 err = dpaa2_switch_block_unoffload_mirror(block, port_priv); 1274 if (err) 1275 return err; 1276 1277 /* We are the last port that leaves a block (an ACL table). 1278 * We'll continue to use this table. 1279 */ 1280 if (block->ports == BIT(port_priv->idx)) 1281 return 0; 1282 1283 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block); 1284 if (err) 1285 return err; 1286 1287 if (block->ports == 0) 1288 block->in_use = false; 1289 1290 new_block = dpaa2_switch_filter_block_get_unused(ethsw); 1291 new_block->in_use = true; 1292 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block); 1293 } 1294 1295 static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, 1296 struct flow_block_offload *f) 1297 { 1298 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1299 struct ethsw_core *ethsw = port_priv->ethsw_data; 1300 struct dpaa2_switch_filter_block *filter_block; 1301 struct flow_block_cb *block_cb; 1302 bool register_block = false; 1303 int err; 1304 1305 block_cb = flow_block_cb_lookup(f->block, 1306 dpaa2_switch_port_setup_tc_block_cb_ig, 1307 ethsw); 1308 1309 if (!block_cb) { 1310 /* If the filter block is not already known, then this port 1311 * must be the first to join it. In this case, we can just 1312 * continue to use our private table 1313 */ 1314 filter_block = port_priv->filter_block; 1315 1316 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, 1317 ethsw, filter_block, NULL); 1318 if (IS_ERR(block_cb)) 1319 return PTR_ERR(block_cb); 1320 1321 register_block = true; 1322 } else { 1323 filter_block = flow_block_cb_priv(block_cb); 1324 } 1325 1326 flow_block_cb_incref(block_cb); 1327 err = dpaa2_switch_port_block_bind(port_priv, filter_block); 1328 if (err) 1329 goto err_block_bind; 1330 1331 if (register_block) { 1332 flow_block_cb_add(block_cb, f); 1333 list_add_tail(&block_cb->driver_list, 1334 &dpaa2_switch_block_cb_list); 1335 } 1336 1337 return 0; 1338 1339 err_block_bind: 1340 if (!flow_block_cb_decref(block_cb)) 1341 flow_block_cb_free(block_cb); 1342 return err; 1343 } 1344 1345 static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, 1346 struct flow_block_offload *f) 1347 { 1348 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1349 struct ethsw_core *ethsw = port_priv->ethsw_data; 1350 struct dpaa2_switch_filter_block *filter_block; 1351 struct flow_block_cb *block_cb; 1352 int err; 1353 1354 block_cb = flow_block_cb_lookup(f->block, 1355 dpaa2_switch_port_setup_tc_block_cb_ig, 1356 ethsw); 1357 if (!block_cb) 1358 return; 1359 1360 filter_block = flow_block_cb_priv(block_cb); 1361 err = dpaa2_switch_port_block_unbind(port_priv, filter_block); 1362 if (!err && !flow_block_cb_decref(block_cb)) { 1363 flow_block_cb_remove(block_cb, f); 1364 list_del(&block_cb->driver_list); 1365 } 1366 } 1367 1368 static int dpaa2_switch_setup_tc_block(struct net_device *netdev, 1369 struct flow_block_offload *f) 1370 { 1371 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1372 return -EOPNOTSUPP; 1373 1374 f->driver_block_list = &dpaa2_switch_block_cb_list; 1375 1376 switch (f->command) { 1377 case FLOW_BLOCK_BIND: 1378 return dpaa2_switch_setup_tc_block_bind(netdev, f); 1379 case FLOW_BLOCK_UNBIND: 1380 dpaa2_switch_setup_tc_block_unbind(netdev, f); 1381 return 0; 1382 default: 1383 return -EOPNOTSUPP; 1384 } 1385 } 1386 1387 static int dpaa2_switch_port_setup_tc(struct net_device *netdev, 1388 enum tc_setup_type type, 1389 void *type_data) 1390 { 1391 switch (type) { 1392 case TC_SETUP_BLOCK: { 1393 return dpaa2_switch_setup_tc_block(netdev, type_data); 1394 } 1395 default: 1396 return -EOPNOTSUPP; 1397 } 1398 1399 return 0; 1400 } 1401 1402 static const struct net_device_ops dpaa2_switch_port_ops = { 1403 .ndo_open = dpaa2_switch_port_open, 1404 .ndo_stop = dpaa2_switch_port_stop, 1405 1406 .ndo_set_mac_address = eth_mac_addr, 1407 .ndo_get_stats64 = dpaa2_switch_port_get_stats, 1408 .ndo_change_mtu = dpaa2_switch_port_change_mtu, 1409 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, 1410 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, 1411 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, 1412 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, 1413 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, 1414 1415 .ndo_start_xmit = dpaa2_switch_port_tx, 1416 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, 1417 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, 1418 .ndo_setup_tc = dpaa2_switch_port_setup_tc, 1419 }; 1420 1421 bool dpaa2_switch_port_dev_check(const struct net_device *netdev) 1422 { 1423 return netdev->netdev_ops == &dpaa2_switch_port_ops; 1424 } 1425 1426 static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) 1427 { 1428 struct fsl_mc_device *dpsw_port_dev, *dpmac_dev; 1429 struct dpaa2_mac *mac; 1430 int err; 1431 1432 dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent); 1433 dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx); 1434 1435 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) 1436 return PTR_ERR(dpmac_dev); 1437 1438 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 1439 return 0; 1440 1441 mac = kzalloc(sizeof(*mac), GFP_KERNEL); 1442 if (!mac) 1443 return -ENOMEM; 1444 1445 mac->mc_dev = dpmac_dev; 1446 mac->mc_io = port_priv->ethsw_data->mc_io; 1447 mac->net_dev = port_priv->netdev; 1448 1449 err = dpaa2_mac_open(mac); 1450 if (err) 1451 goto err_free_mac; 1452 port_priv->mac = mac; 1453 1454 if (dpaa2_switch_port_is_type_phy(port_priv)) { 1455 err = dpaa2_mac_connect(mac); 1456 if (err) { 1457 netdev_err(port_priv->netdev, 1458 "Error connecting to the MAC endpoint %pe\n", 1459 ERR_PTR(err)); 1460 goto err_close_mac; 1461 } 1462 } 1463 1464 return 0; 1465 1466 err_close_mac: 1467 dpaa2_mac_close(mac); 1468 port_priv->mac = NULL; 1469 err_free_mac: 1470 kfree(mac); 1471 return err; 1472 } 1473 1474 static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv) 1475 { 1476 if (dpaa2_switch_port_is_type_phy(port_priv)) 1477 dpaa2_mac_disconnect(port_priv->mac); 1478 1479 if (!dpaa2_switch_port_has_mac(port_priv)) 1480 return; 1481 1482 dpaa2_mac_close(port_priv->mac); 1483 kfree(port_priv->mac); 1484 port_priv->mac = NULL; 1485 } 1486 1487 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) 1488 { 1489 struct device *dev = (struct device *)arg; 1490 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1491 struct ethsw_port_priv *port_priv; 1492 u32 status = ~0; 1493 int err, if_id; 1494 1495 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1496 DPSW_IRQ_INDEX_IF, &status); 1497 if (err) { 1498 dev_err(dev, "Can't get irq status (err %d)\n", err); 1499 goto out; 1500 } 1501 1502 if_id = (status & 0xFFFF0000) >> 16; 1503 port_priv = ethsw->ports[if_id]; 1504 1505 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) { 1506 dpaa2_switch_port_link_state_update(port_priv->netdev); 1507 dpaa2_switch_port_set_mac_addr(port_priv); 1508 } 1509 1510 if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) { 1511 rtnl_lock(); 1512 if (dpaa2_switch_port_has_mac(port_priv)) 1513 dpaa2_switch_port_disconnect_mac(port_priv); 1514 else 1515 dpaa2_switch_port_connect_mac(port_priv); 1516 rtnl_unlock(); 1517 } 1518 1519 out: 1520 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1521 DPSW_IRQ_INDEX_IF, status); 1522 if (err) 1523 dev_err(dev, "Can't clear irq status (err %d)\n", err); 1524 1525 return IRQ_HANDLED; 1526 } 1527 1528 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) 1529 { 1530 struct device *dev = &sw_dev->dev; 1531 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1532 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; 1533 struct fsl_mc_device_irq *irq; 1534 int err; 1535 1536 err = fsl_mc_allocate_irqs(sw_dev); 1537 if (err) { 1538 dev_err(dev, "MC irqs allocation failed\n"); 1539 return err; 1540 } 1541 1542 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { 1543 err = -EINVAL; 1544 goto free_irq; 1545 } 1546 1547 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1548 DPSW_IRQ_INDEX_IF, 0); 1549 if (err) { 1550 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1551 goto free_irq; 1552 } 1553 1554 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; 1555 1556 err = devm_request_threaded_irq(dev, irq->msi_desc->irq, 1557 NULL, 1558 dpaa2_switch_irq0_handler_thread, 1559 IRQF_NO_SUSPEND | IRQF_ONESHOT, 1560 dev_name(dev), dev); 1561 if (err) { 1562 dev_err(dev, "devm_request_threaded_irq(): %d\n", err); 1563 goto free_irq; 1564 } 1565 1566 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, 1567 DPSW_IRQ_INDEX_IF, mask); 1568 if (err) { 1569 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); 1570 goto free_devm_irq; 1571 } 1572 1573 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1574 DPSW_IRQ_INDEX_IF, 1); 1575 if (err) { 1576 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); 1577 goto free_devm_irq; 1578 } 1579 1580 return 0; 1581 1582 free_devm_irq: 1583 devm_free_irq(dev, irq->msi_desc->irq, dev); 1584 free_irq: 1585 fsl_mc_free_irqs(sw_dev); 1586 return err; 1587 } 1588 1589 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) 1590 { 1591 struct device *dev = &sw_dev->dev; 1592 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1593 int err; 1594 1595 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1596 DPSW_IRQ_INDEX_IF, 0); 1597 if (err) 1598 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1599 1600 fsl_mc_free_irqs(sw_dev); 1601 } 1602 1603 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) 1604 { 1605 struct ethsw_core *ethsw = port_priv->ethsw_data; 1606 enum dpsw_learning_mode learn_mode; 1607 int err; 1608 1609 if (enable) 1610 learn_mode = DPSW_LEARNING_MODE_HW; 1611 else 1612 learn_mode = DPSW_LEARNING_MODE_DIS; 1613 1614 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 1615 port_priv->idx, learn_mode); 1616 if (err) 1617 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); 1618 1619 if (!enable) 1620 dpaa2_switch_port_fast_age(port_priv); 1621 1622 return err; 1623 } 1624 1625 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, 1626 u8 state) 1627 { 1628 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1629 int err; 1630 1631 err = dpaa2_switch_port_set_stp_state(port_priv, state); 1632 if (err) 1633 return err; 1634 1635 switch (state) { 1636 case BR_STATE_DISABLED: 1637 case BR_STATE_BLOCKING: 1638 case BR_STATE_LISTENING: 1639 err = dpaa2_switch_port_set_learning(port_priv, false); 1640 break; 1641 case BR_STATE_LEARNING: 1642 case BR_STATE_FORWARDING: 1643 err = dpaa2_switch_port_set_learning(port_priv, 1644 port_priv->learn_ena); 1645 break; 1646 } 1647 1648 return err; 1649 } 1650 1651 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, 1652 struct switchdev_brport_flags flags) 1653 { 1654 struct ethsw_core *ethsw = port_priv->ethsw_data; 1655 1656 if (flags.mask & BR_BCAST_FLOOD) 1657 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); 1658 1659 if (flags.mask & BR_FLOOD) 1660 port_priv->ucast_flood = !!(flags.val & BR_FLOOD); 1661 1662 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1663 } 1664 1665 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, 1666 struct switchdev_brport_flags flags, 1667 struct netlink_ext_ack *extack) 1668 { 1669 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | 1670 BR_MCAST_FLOOD)) 1671 return -EINVAL; 1672 1673 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { 1674 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 1675 bool unicast = !!(flags.val & BR_FLOOD); 1676 1677 if (unicast != multicast) { 1678 NL_SET_ERR_MSG_MOD(extack, 1679 "Cannot configure multicast flooding independently of unicast"); 1680 return -EINVAL; 1681 } 1682 } 1683 1684 return 0; 1685 } 1686 1687 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, 1688 struct switchdev_brport_flags flags, 1689 struct netlink_ext_ack *extack) 1690 { 1691 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1692 int err; 1693 1694 if (flags.mask & BR_LEARNING) { 1695 bool learn_ena = !!(flags.val & BR_LEARNING); 1696 1697 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1698 if (err) 1699 return err; 1700 port_priv->learn_ena = learn_ena; 1701 } 1702 1703 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { 1704 err = dpaa2_switch_port_flood(port_priv, flags); 1705 if (err) 1706 return err; 1707 } 1708 1709 return 0; 1710 } 1711 1712 static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx, 1713 const struct switchdev_attr *attr, 1714 struct netlink_ext_ack *extack) 1715 { 1716 int err = 0; 1717 1718 switch (attr->id) { 1719 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 1720 err = dpaa2_switch_port_attr_stp_state_set(netdev, 1721 attr->u.stp_state); 1722 break; 1723 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 1724 if (!attr->u.vlan_filtering) { 1725 NL_SET_ERR_MSG_MOD(extack, 1726 "The DPAA2 switch does not support VLAN-unaware operation"); 1727 return -EOPNOTSUPP; 1728 } 1729 break; 1730 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 1731 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); 1732 break; 1733 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 1734 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); 1735 break; 1736 default: 1737 err = -EOPNOTSUPP; 1738 break; 1739 } 1740 1741 return err; 1742 } 1743 1744 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 1745 const struct switchdev_obj_port_vlan *vlan) 1746 { 1747 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1748 struct ethsw_core *ethsw = port_priv->ethsw_data; 1749 struct dpsw_attr *attr = ðsw->sw_attr; 1750 int err = 0; 1751 1752 /* Make sure that the VLAN is not already configured 1753 * on the switch port 1754 */ 1755 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) 1756 return -EEXIST; 1757 1758 /* Check if there is space for a new VLAN */ 1759 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1760 ðsw->sw_attr); 1761 if (err) { 1762 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1763 return err; 1764 } 1765 if (attr->max_vlans - attr->num_vlans < 1) 1766 return -ENOSPC; 1767 1768 /* Check if there is space for a new VLAN */ 1769 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1770 ðsw->sw_attr); 1771 if (err) { 1772 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1773 return err; 1774 } 1775 if (attr->max_vlans - attr->num_vlans < 1) 1776 return -ENOSPC; 1777 1778 if (!port_priv->ethsw_data->vlans[vlan->vid]) { 1779 /* this is a new VLAN */ 1780 err = dpaa2_switch_add_vlan(port_priv, vlan->vid); 1781 if (err) 1782 return err; 1783 1784 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; 1785 } 1786 1787 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); 1788 } 1789 1790 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, 1791 const unsigned char *addr) 1792 { 1793 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; 1794 struct netdev_hw_addr *ha; 1795 1796 netif_addr_lock_bh(netdev); 1797 list_for_each_entry(ha, &list->list, list) { 1798 if (ether_addr_equal(ha->addr, addr)) { 1799 netif_addr_unlock_bh(netdev); 1800 return 1; 1801 } 1802 } 1803 netif_addr_unlock_bh(netdev); 1804 return 0; 1805 } 1806 1807 static int dpaa2_switch_port_mdb_add(struct net_device *netdev, 1808 const struct switchdev_obj_port_mdb *mdb) 1809 { 1810 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1811 int err; 1812 1813 /* Check if address is already set on this port */ 1814 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1815 return -EEXIST; 1816 1817 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); 1818 if (err) 1819 return err; 1820 1821 err = dev_mc_add(netdev, mdb->addr); 1822 if (err) { 1823 netdev_err(netdev, "dev_mc_add err %d\n", err); 1824 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1825 } 1826 1827 return err; 1828 } 1829 1830 static int dpaa2_switch_port_obj_add(struct net_device *netdev, 1831 const struct switchdev_obj *obj) 1832 { 1833 int err; 1834 1835 switch (obj->id) { 1836 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1837 err = dpaa2_switch_port_vlans_add(netdev, 1838 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1839 break; 1840 case SWITCHDEV_OBJ_ID_PORT_MDB: 1841 err = dpaa2_switch_port_mdb_add(netdev, 1842 SWITCHDEV_OBJ_PORT_MDB(obj)); 1843 break; 1844 default: 1845 err = -EOPNOTSUPP; 1846 break; 1847 } 1848 1849 return err; 1850 } 1851 1852 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) 1853 { 1854 struct ethsw_core *ethsw = port_priv->ethsw_data; 1855 struct net_device *netdev = port_priv->netdev; 1856 struct dpsw_vlan_if_cfg vcfg; 1857 int i, err; 1858 1859 if (!port_priv->vlans[vid]) 1860 return -ENOENT; 1861 1862 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { 1863 /* If we are deleting the PVID of a port, use VLAN 4095 instead 1864 * as we are sure that neither the bridge nor the 8021q module 1865 * will use it 1866 */ 1867 err = dpaa2_switch_port_set_pvid(port_priv, 4095); 1868 if (err) 1869 return err; 1870 } 1871 1872 vcfg.num_ifs = 1; 1873 vcfg.if_id[0] = port_priv->idx; 1874 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { 1875 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, 1876 ethsw->dpsw_handle, 1877 vid, &vcfg); 1878 if (err) { 1879 netdev_err(netdev, 1880 "dpsw_vlan_remove_if_untagged err %d\n", 1881 err); 1882 } 1883 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; 1884 } 1885 1886 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 1887 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1888 vid, &vcfg); 1889 if (err) { 1890 netdev_err(netdev, 1891 "dpsw_vlan_remove_if err %d\n", err); 1892 return err; 1893 } 1894 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; 1895 1896 /* Delete VLAN from switch if it is no longer configured on 1897 * any port 1898 */ 1899 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 1900 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) 1901 return 0; /* Found a port member in VID */ 1902 1903 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; 1904 1905 err = dpaa2_switch_dellink(ethsw, vid); 1906 if (err) 1907 return err; 1908 } 1909 1910 return 0; 1911 } 1912 1913 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 1914 const struct switchdev_obj_port_vlan *vlan) 1915 { 1916 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1917 1918 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1919 return -EOPNOTSUPP; 1920 1921 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); 1922 } 1923 1924 static int dpaa2_switch_port_mdb_del(struct net_device *netdev, 1925 const struct switchdev_obj_port_mdb *mdb) 1926 { 1927 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1928 int err; 1929 1930 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1931 return -ENOENT; 1932 1933 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1934 if (err) 1935 return err; 1936 1937 err = dev_mc_del(netdev, mdb->addr); 1938 if (err) { 1939 netdev_err(netdev, "dev_mc_del err %d\n", err); 1940 return err; 1941 } 1942 1943 return err; 1944 } 1945 1946 static int dpaa2_switch_port_obj_del(struct net_device *netdev, 1947 const struct switchdev_obj *obj) 1948 { 1949 int err; 1950 1951 switch (obj->id) { 1952 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1953 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); 1954 break; 1955 case SWITCHDEV_OBJ_ID_PORT_MDB: 1956 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); 1957 break; 1958 default: 1959 err = -EOPNOTSUPP; 1960 break; 1961 } 1962 return err; 1963 } 1964 1965 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, 1966 struct switchdev_notifier_port_attr_info *ptr) 1967 { 1968 int err; 1969 1970 err = switchdev_handle_port_attr_set(netdev, ptr, 1971 dpaa2_switch_port_dev_check, 1972 dpaa2_switch_port_attr_set); 1973 return notifier_from_errno(err); 1974 } 1975 1976 static struct notifier_block dpaa2_switch_port_switchdev_nb; 1977 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; 1978 1979 static int dpaa2_switch_port_bridge_join(struct net_device *netdev, 1980 struct net_device *upper_dev, 1981 struct netlink_ext_ack *extack) 1982 { 1983 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1984 struct ethsw_core *ethsw = port_priv->ethsw_data; 1985 struct ethsw_port_priv *other_port_priv; 1986 struct net_device *other_dev; 1987 struct list_head *iter; 1988 bool learn_ena; 1989 int err; 1990 1991 netdev_for_each_lower_dev(upper_dev, other_dev, iter) { 1992 if (!dpaa2_switch_port_dev_check(other_dev)) 1993 continue; 1994 1995 other_port_priv = netdev_priv(other_dev); 1996 if (other_port_priv->ethsw_data != port_priv->ethsw_data) { 1997 NL_SET_ERR_MSG_MOD(extack, 1998 "Interface from a different DPSW is in the bridge already"); 1999 return -EINVAL; 2000 } 2001 } 2002 2003 /* Delete the previously manually installed VLAN 1 */ 2004 err = dpaa2_switch_port_del_vlan(port_priv, 1); 2005 if (err) 2006 return err; 2007 2008 dpaa2_switch_port_set_fdb(port_priv, upper_dev); 2009 2010 /* Inherit the initial bridge port learning state */ 2011 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); 2012 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 2013 port_priv->learn_ena = learn_ena; 2014 2015 /* Setup the egress flood policy (broadcast, unknown unicast) */ 2016 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2017 if (err) 2018 goto err_egress_flood; 2019 2020 err = switchdev_bridge_port_offload(netdev, netdev, NULL, 2021 &dpaa2_switch_port_switchdev_nb, 2022 &dpaa2_switch_port_switchdev_blocking_nb, 2023 false, extack); 2024 if (err) 2025 goto err_switchdev_offload; 2026 2027 return 0; 2028 2029 err_switchdev_offload: 2030 err_egress_flood: 2031 dpaa2_switch_port_set_fdb(port_priv, NULL); 2032 return err; 2033 } 2034 2035 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) 2036 { 2037 __be16 vlan_proto = htons(ETH_P_8021Q); 2038 2039 if (vdev) 2040 vlan_proto = vlan_dev_vlan_proto(vdev); 2041 2042 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); 2043 } 2044 2045 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) 2046 { 2047 __be16 vlan_proto = htons(ETH_P_8021Q); 2048 2049 if (vdev) 2050 vlan_proto = vlan_dev_vlan_proto(vdev); 2051 2052 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); 2053 } 2054 2055 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) 2056 { 2057 switchdev_bridge_port_unoffload(netdev, NULL, 2058 &dpaa2_switch_port_switchdev_nb, 2059 &dpaa2_switch_port_switchdev_blocking_nb); 2060 } 2061 2062 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) 2063 { 2064 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 2065 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 2066 struct ethsw_core *ethsw = port_priv->ethsw_data; 2067 int err; 2068 2069 /* First of all, fast age any learn FDB addresses on this switch port */ 2070 dpaa2_switch_port_fast_age(port_priv); 2071 2072 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN 2073 * upper devices or otherwise from the FDB table that we are about to 2074 * leave 2075 */ 2076 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); 2077 if (err) 2078 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); 2079 2080 dpaa2_switch_port_set_fdb(port_priv, NULL); 2081 2082 /* Restore all RX VLANs into the new FDB table that we just joined */ 2083 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); 2084 if (err) 2085 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); 2086 2087 /* Reset the flooding state to denote that this port can send any 2088 * packet in standalone mode. With this, we are also ensuring that any 2089 * later bridge join will have the flooding flag on. 2090 */ 2091 port_priv->bcast_flood = true; 2092 port_priv->ucast_flood = true; 2093 2094 /* Setup the egress flood policy (broadcast, unknown unicast). 2095 * When the port is not under a bridge, only the CTRL interface is part 2096 * of the flooding domain besides the actual port 2097 */ 2098 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2099 if (err) 2100 return err; 2101 2102 /* Recreate the egress flood domain of the FDB that we just left */ 2103 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 2104 if (err) 2105 return err; 2106 2107 /* No HW learning when not under a bridge */ 2108 err = dpaa2_switch_port_set_learning(port_priv, false); 2109 if (err) 2110 return err; 2111 port_priv->learn_ena = false; 2112 2113 /* Add the VLAN 1 as PVID when not under a bridge. We need this since 2114 * the dpaa2 switch interfaces are not capable to be VLAN unaware 2115 */ 2116 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, 2117 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); 2118 } 2119 2120 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) 2121 { 2122 struct net_device *upper_dev; 2123 struct list_head *iter; 2124 2125 /* RCU read lock not necessary because we have write-side protection 2126 * (rtnl_mutex), however a non-rcu iterator does not exist. 2127 */ 2128 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) 2129 if (is_vlan_dev(upper_dev)) 2130 return -EOPNOTSUPP; 2131 2132 return 0; 2133 } 2134 2135 static int 2136 dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev, 2137 struct net_device *upper_dev, 2138 struct netlink_ext_ack *extack) 2139 { 2140 int err; 2141 2142 if (!br_vlan_enabled(upper_dev)) { 2143 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); 2144 return -EOPNOTSUPP; 2145 } 2146 2147 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); 2148 if (err) { 2149 NL_SET_ERR_MSG_MOD(extack, 2150 "Cannot join a bridge while VLAN uppers are present"); 2151 return 0; 2152 } 2153 2154 return 0; 2155 } 2156 2157 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, 2158 unsigned long event, void *ptr) 2159 { 2160 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 2161 struct netdev_notifier_changeupper_info *info = ptr; 2162 struct netlink_ext_ack *extack; 2163 struct net_device *upper_dev; 2164 int err = 0; 2165 2166 if (!dpaa2_switch_port_dev_check(netdev)) 2167 return NOTIFY_DONE; 2168 2169 extack = netdev_notifier_info_to_extack(&info->info); 2170 2171 switch (event) { 2172 case NETDEV_PRECHANGEUPPER: 2173 upper_dev = info->upper_dev; 2174 if (!netif_is_bridge_master(upper_dev)) 2175 break; 2176 2177 err = dpaa2_switch_prechangeupper_sanity_checks(netdev, 2178 upper_dev, 2179 extack); 2180 if (err) 2181 goto out; 2182 2183 if (!info->linking) 2184 dpaa2_switch_port_pre_bridge_leave(netdev); 2185 2186 break; 2187 case NETDEV_CHANGEUPPER: 2188 upper_dev = info->upper_dev; 2189 if (netif_is_bridge_master(upper_dev)) { 2190 if (info->linking) 2191 err = dpaa2_switch_port_bridge_join(netdev, 2192 upper_dev, 2193 extack); 2194 else 2195 err = dpaa2_switch_port_bridge_leave(netdev); 2196 } 2197 break; 2198 } 2199 2200 out: 2201 return notifier_from_errno(err); 2202 } 2203 2204 struct ethsw_switchdev_event_work { 2205 struct work_struct work; 2206 struct switchdev_notifier_fdb_info fdb_info; 2207 struct net_device *dev; 2208 unsigned long event; 2209 }; 2210 2211 static void dpaa2_switch_event_work(struct work_struct *work) 2212 { 2213 struct ethsw_switchdev_event_work *switchdev_work = 2214 container_of(work, struct ethsw_switchdev_event_work, work); 2215 struct net_device *dev = switchdev_work->dev; 2216 struct switchdev_notifier_fdb_info *fdb_info; 2217 int err; 2218 2219 rtnl_lock(); 2220 fdb_info = &switchdev_work->fdb_info; 2221 2222 switch (switchdev_work->event) { 2223 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2224 if (!fdb_info->added_by_user || fdb_info->is_local) 2225 break; 2226 if (is_unicast_ether_addr(fdb_info->addr)) 2227 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), 2228 fdb_info->addr); 2229 else 2230 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), 2231 fdb_info->addr); 2232 if (err) 2233 break; 2234 fdb_info->offloaded = true; 2235 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 2236 &fdb_info->info, NULL); 2237 break; 2238 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2239 if (!fdb_info->added_by_user || fdb_info->is_local) 2240 break; 2241 if (is_unicast_ether_addr(fdb_info->addr)) 2242 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); 2243 else 2244 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); 2245 break; 2246 } 2247 2248 rtnl_unlock(); 2249 kfree(switchdev_work->fdb_info.addr); 2250 kfree(switchdev_work); 2251 dev_put(dev); 2252 } 2253 2254 /* Called under rcu_read_lock() */ 2255 static int dpaa2_switch_port_event(struct notifier_block *nb, 2256 unsigned long event, void *ptr) 2257 { 2258 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2259 struct ethsw_port_priv *port_priv = netdev_priv(dev); 2260 struct ethsw_switchdev_event_work *switchdev_work; 2261 struct switchdev_notifier_fdb_info *fdb_info = ptr; 2262 struct ethsw_core *ethsw = port_priv->ethsw_data; 2263 2264 if (event == SWITCHDEV_PORT_ATTR_SET) 2265 return dpaa2_switch_port_attr_set_event(dev, ptr); 2266 2267 if (!dpaa2_switch_port_dev_check(dev)) 2268 return NOTIFY_DONE; 2269 2270 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 2271 if (!switchdev_work) 2272 return NOTIFY_BAD; 2273 2274 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); 2275 switchdev_work->dev = dev; 2276 switchdev_work->event = event; 2277 2278 switch (event) { 2279 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2280 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2281 memcpy(&switchdev_work->fdb_info, ptr, 2282 sizeof(switchdev_work->fdb_info)); 2283 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2284 if (!switchdev_work->fdb_info.addr) 2285 goto err_addr_alloc; 2286 2287 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 2288 fdb_info->addr); 2289 2290 /* Take a reference on the device to avoid being freed. */ 2291 dev_hold(dev); 2292 break; 2293 default: 2294 kfree(switchdev_work); 2295 return NOTIFY_DONE; 2296 } 2297 2298 queue_work(ethsw->workqueue, &switchdev_work->work); 2299 2300 return NOTIFY_DONE; 2301 2302 err_addr_alloc: 2303 kfree(switchdev_work); 2304 return NOTIFY_BAD; 2305 } 2306 2307 static int dpaa2_switch_port_obj_event(unsigned long event, 2308 struct net_device *netdev, 2309 struct switchdev_notifier_port_obj_info *port_obj_info) 2310 { 2311 int err = -EOPNOTSUPP; 2312 2313 if (!dpaa2_switch_port_dev_check(netdev)) 2314 return NOTIFY_DONE; 2315 2316 switch (event) { 2317 case SWITCHDEV_PORT_OBJ_ADD: 2318 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); 2319 break; 2320 case SWITCHDEV_PORT_OBJ_DEL: 2321 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); 2322 break; 2323 } 2324 2325 port_obj_info->handled = true; 2326 return notifier_from_errno(err); 2327 } 2328 2329 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, 2330 unsigned long event, void *ptr) 2331 { 2332 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 2333 2334 switch (event) { 2335 case SWITCHDEV_PORT_OBJ_ADD: 2336 case SWITCHDEV_PORT_OBJ_DEL: 2337 return dpaa2_switch_port_obj_event(event, dev, ptr); 2338 case SWITCHDEV_PORT_ATTR_SET: 2339 return dpaa2_switch_port_attr_set_event(dev, ptr); 2340 } 2341 2342 return NOTIFY_DONE; 2343 } 2344 2345 /* Build a linear skb based on a single-buffer frame descriptor */ 2346 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, 2347 const struct dpaa2_fd *fd) 2348 { 2349 u16 fd_offset = dpaa2_fd_get_offset(fd); 2350 dma_addr_t addr = dpaa2_fd_get_addr(fd); 2351 u32 fd_length = dpaa2_fd_get_len(fd); 2352 struct device *dev = ethsw->dev; 2353 struct sk_buff *skb = NULL; 2354 void *fd_vaddr; 2355 2356 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); 2357 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, 2358 DMA_FROM_DEVICE); 2359 2360 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + 2361 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 2362 if (unlikely(!skb)) { 2363 dev_err(dev, "build_skb() failed\n"); 2364 return NULL; 2365 } 2366 2367 skb_reserve(skb, fd_offset); 2368 skb_put(skb, fd_length); 2369 2370 ethsw->buf_count--; 2371 2372 return skb; 2373 } 2374 2375 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, 2376 const struct dpaa2_fd *fd) 2377 { 2378 dpaa2_switch_free_fd(fq->ethsw, fd); 2379 } 2380 2381 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, 2382 const struct dpaa2_fd *fd) 2383 { 2384 struct ethsw_core *ethsw = fq->ethsw; 2385 struct ethsw_port_priv *port_priv; 2386 struct net_device *netdev; 2387 struct vlan_ethhdr *hdr; 2388 struct sk_buff *skb; 2389 u16 vlan_tci, vid; 2390 int if_id, err; 2391 2392 /* get switch ingress interface ID */ 2393 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; 2394 2395 if (if_id >= ethsw->sw_attr.num_ifs) { 2396 dev_err(ethsw->dev, "Frame received from unknown interface!\n"); 2397 goto err_free_fd; 2398 } 2399 port_priv = ethsw->ports[if_id]; 2400 netdev = port_priv->netdev; 2401 2402 /* build the SKB based on the FD received */ 2403 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { 2404 if (net_ratelimit()) { 2405 netdev_err(netdev, "Received invalid frame format\n"); 2406 goto err_free_fd; 2407 } 2408 } 2409 2410 skb = dpaa2_switch_build_linear_skb(ethsw, fd); 2411 if (unlikely(!skb)) 2412 goto err_free_fd; 2413 2414 skb_reset_mac_header(skb); 2415 2416 /* Remove the VLAN header if the packet that we just received has a vid 2417 * equal to the port PVIDs. Since the dpaa2-switch can operate only in 2418 * VLAN-aware mode and no alterations are made on the packet when it's 2419 * redirected/mirrored to the control interface, we are sure that there 2420 * will always be a VLAN header present. 2421 */ 2422 hdr = vlan_eth_hdr(skb); 2423 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; 2424 if (vid == port_priv->pvid) { 2425 err = __skb_vlan_pop(skb, &vlan_tci); 2426 if (err) { 2427 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); 2428 goto err_free_fd; 2429 } 2430 } 2431 2432 skb->dev = netdev; 2433 skb->protocol = eth_type_trans(skb, skb->dev); 2434 2435 /* Setup the offload_fwd_mark only if the port is under a bridge */ 2436 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); 2437 2438 netif_receive_skb(skb); 2439 2440 return; 2441 2442 err_free_fd: 2443 dpaa2_switch_free_fd(ethsw, fd); 2444 } 2445 2446 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) 2447 { 2448 ethsw->features = 0; 2449 2450 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) 2451 ethsw->features |= ETHSW_FEATURE_MAC_ADDR; 2452 } 2453 2454 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) 2455 { 2456 struct dpsw_ctrl_if_attr ctrl_if_attr; 2457 struct device *dev = ethsw->dev; 2458 int i = 0; 2459 int err; 2460 2461 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2462 &ctrl_if_attr); 2463 if (err) { 2464 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); 2465 return err; 2466 } 2467 2468 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; 2469 ethsw->fq[i].ethsw = ethsw; 2470 ethsw->fq[i++].type = DPSW_QUEUE_RX; 2471 2472 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; 2473 ethsw->fq[i].ethsw = ethsw; 2474 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; 2475 2476 return 0; 2477 } 2478 2479 /* Free buffers acquired from the buffer pool or which were meant to 2480 * be released in the pool 2481 */ 2482 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) 2483 { 2484 struct device *dev = ethsw->dev; 2485 void *vaddr; 2486 int i; 2487 2488 for (i = 0; i < count; i++) { 2489 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); 2490 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, 2491 DMA_FROM_DEVICE); 2492 free_pages((unsigned long)vaddr, 0); 2493 } 2494 } 2495 2496 /* Perform a single release command to add buffers 2497 * to the specified buffer pool 2498 */ 2499 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) 2500 { 2501 struct device *dev = ethsw->dev; 2502 u64 buf_array[BUFS_PER_CMD]; 2503 struct page *page; 2504 int retries = 0; 2505 dma_addr_t addr; 2506 int err; 2507 int i; 2508 2509 for (i = 0; i < BUFS_PER_CMD; i++) { 2510 /* Allocate one page for each Rx buffer. WRIOP sees 2511 * the entire page except for a tailroom reserved for 2512 * skb shared info 2513 */ 2514 page = dev_alloc_pages(0); 2515 if (!page) { 2516 dev_err(dev, "buffer allocation failed\n"); 2517 goto err_alloc; 2518 } 2519 2520 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, 2521 DMA_FROM_DEVICE); 2522 if (dma_mapping_error(dev, addr)) { 2523 dev_err(dev, "dma_map_single() failed\n"); 2524 goto err_map; 2525 } 2526 buf_array[i] = addr; 2527 } 2528 2529 release_bufs: 2530 /* In case the portal is busy, retry until successful or 2531 * max retries hit. 2532 */ 2533 while ((err = dpaa2_io_service_release(NULL, bpid, 2534 buf_array, i)) == -EBUSY) { 2535 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) 2536 break; 2537 2538 cpu_relax(); 2539 } 2540 2541 /* If release command failed, clean up and bail out. */ 2542 if (err) { 2543 dpaa2_switch_free_bufs(ethsw, buf_array, i); 2544 return 0; 2545 } 2546 2547 return i; 2548 2549 err_map: 2550 __free_pages(page, 0); 2551 err_alloc: 2552 /* If we managed to allocate at least some buffers, 2553 * release them to hardware 2554 */ 2555 if (i) 2556 goto release_bufs; 2557 2558 return 0; 2559 } 2560 2561 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) 2562 { 2563 int *count = ðsw->buf_count; 2564 int new_count; 2565 int err = 0; 2566 2567 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { 2568 do { 2569 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2570 if (unlikely(!new_count)) { 2571 /* Out of memory; abort for now, we'll 2572 * try later on 2573 */ 2574 break; 2575 } 2576 *count += new_count; 2577 } while (*count < DPAA2_ETHSW_NUM_BUFS); 2578 2579 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) 2580 err = -ENOMEM; 2581 } 2582 2583 return err; 2584 } 2585 2586 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) 2587 { 2588 int *count, i; 2589 2590 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { 2591 count = ðsw->buf_count; 2592 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2593 2594 if (unlikely(*count < BUFS_PER_CMD)) 2595 return -ENOMEM; 2596 } 2597 2598 return 0; 2599 } 2600 2601 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) 2602 { 2603 u64 buf_array[BUFS_PER_CMD]; 2604 int ret; 2605 2606 do { 2607 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, 2608 buf_array, BUFS_PER_CMD); 2609 if (ret < 0) { 2610 dev_err(ethsw->dev, 2611 "dpaa2_io_service_acquire() = %d\n", ret); 2612 return; 2613 } 2614 dpaa2_switch_free_bufs(ethsw, buf_array, ret); 2615 2616 } while (ret); 2617 } 2618 2619 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) 2620 { 2621 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; 2622 struct device *dev = ethsw->dev; 2623 struct fsl_mc_device *dpbp_dev; 2624 struct dpbp_attr dpbp_attrs; 2625 int err; 2626 2627 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2628 &dpbp_dev); 2629 if (err) { 2630 if (err == -ENXIO) 2631 err = -EPROBE_DEFER; 2632 else 2633 dev_err(dev, "DPBP device allocation failed\n"); 2634 return err; 2635 } 2636 ethsw->dpbp_dev = dpbp_dev; 2637 2638 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, 2639 &dpbp_dev->mc_handle); 2640 if (err) { 2641 dev_err(dev, "dpbp_open() failed\n"); 2642 goto err_open; 2643 } 2644 2645 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2646 if (err) { 2647 dev_err(dev, "dpbp_reset() failed\n"); 2648 goto err_reset; 2649 } 2650 2651 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2652 if (err) { 2653 dev_err(dev, "dpbp_enable() failed\n"); 2654 goto err_enable; 2655 } 2656 2657 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, 2658 &dpbp_attrs); 2659 if (err) { 2660 dev_err(dev, "dpbp_get_attributes() failed\n"); 2661 goto err_get_attr; 2662 } 2663 2664 dpsw_ctrl_if_pools_cfg.num_dpbp = 1; 2665 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; 2666 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; 2667 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; 2668 2669 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, 2670 &dpsw_ctrl_if_pools_cfg); 2671 if (err) { 2672 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2673 goto err_get_attr; 2674 } 2675 ethsw->bpid = dpbp_attrs.id; 2676 2677 return 0; 2678 2679 err_get_attr: 2680 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2681 err_enable: 2682 err_reset: 2683 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2684 err_open: 2685 fsl_mc_object_free(dpbp_dev); 2686 return err; 2687 } 2688 2689 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) 2690 { 2691 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2692 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2693 fsl_mc_object_free(ethsw->dpbp_dev); 2694 } 2695 2696 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) 2697 { 2698 int i; 2699 2700 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2701 ethsw->fq[i].store = 2702 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, 2703 ethsw->dev); 2704 if (!ethsw->fq[i].store) { 2705 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); 2706 while (--i >= 0) 2707 dpaa2_io_store_destroy(ethsw->fq[i].store); 2708 return -ENOMEM; 2709 } 2710 } 2711 2712 return 0; 2713 } 2714 2715 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) 2716 { 2717 int i; 2718 2719 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2720 dpaa2_io_store_destroy(ethsw->fq[i].store); 2721 } 2722 2723 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) 2724 { 2725 int err, retries = 0; 2726 2727 /* Try to pull from the FQ while the portal is busy and we didn't hit 2728 * the maximum number fo retries 2729 */ 2730 do { 2731 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); 2732 cpu_relax(); 2733 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2734 2735 if (unlikely(err)) 2736 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); 2737 2738 return err; 2739 } 2740 2741 /* Consume all frames pull-dequeued into the store */ 2742 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) 2743 { 2744 struct ethsw_core *ethsw = fq->ethsw; 2745 int cleaned = 0, is_last; 2746 struct dpaa2_dq *dq; 2747 int retries = 0; 2748 2749 do { 2750 /* Get the next available FD from the store */ 2751 dq = dpaa2_io_store_next(fq->store, &is_last); 2752 if (unlikely(!dq)) { 2753 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { 2754 dev_err_once(ethsw->dev, 2755 "No valid dequeue response\n"); 2756 return -ETIMEDOUT; 2757 } 2758 continue; 2759 } 2760 2761 if (fq->type == DPSW_QUEUE_RX) 2762 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); 2763 else 2764 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); 2765 cleaned++; 2766 2767 } while (!is_last); 2768 2769 return cleaned; 2770 } 2771 2772 /* NAPI poll routine */ 2773 static int dpaa2_switch_poll(struct napi_struct *napi, int budget) 2774 { 2775 int err, cleaned = 0, store_cleaned, work_done; 2776 struct dpaa2_switch_fq *fq; 2777 int retries = 0; 2778 2779 fq = container_of(napi, struct dpaa2_switch_fq, napi); 2780 2781 do { 2782 err = dpaa2_switch_pull_fq(fq); 2783 if (unlikely(err)) 2784 break; 2785 2786 /* Refill pool if appropriate */ 2787 dpaa2_switch_refill_bp(fq->ethsw); 2788 2789 store_cleaned = dpaa2_switch_store_consume(fq); 2790 cleaned += store_cleaned; 2791 2792 if (cleaned >= budget) { 2793 work_done = budget; 2794 goto out; 2795 } 2796 2797 } while (store_cleaned); 2798 2799 /* We didn't consume the entire budget, so finish napi and re-enable 2800 * data availability notifications 2801 */ 2802 napi_complete_done(napi, cleaned); 2803 do { 2804 err = dpaa2_io_service_rearm(NULL, &fq->nctx); 2805 cpu_relax(); 2806 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2807 2808 work_done = max(cleaned, 1); 2809 out: 2810 2811 return work_done; 2812 } 2813 2814 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) 2815 { 2816 struct dpaa2_switch_fq *fq; 2817 2818 fq = container_of(nctx, struct dpaa2_switch_fq, nctx); 2819 2820 napi_schedule(&fq->napi); 2821 } 2822 2823 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) 2824 { 2825 struct dpsw_ctrl_if_queue_cfg queue_cfg; 2826 struct dpaa2_io_notification_ctx *nctx; 2827 int err, i, j; 2828 2829 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2830 nctx = ðsw->fq[i].nctx; 2831 2832 /* Register a new software context for the FQID. 2833 * By using NULL as the first parameter, we specify that we do 2834 * not care on which cpu are interrupts received for this queue 2835 */ 2836 nctx->is_cdan = 0; 2837 nctx->id = ethsw->fq[i].fqid; 2838 nctx->desired_cpu = DPAA2_IO_ANY_CPU; 2839 nctx->cb = dpaa2_switch_fqdan_cb; 2840 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); 2841 if (err) { 2842 err = -EPROBE_DEFER; 2843 goto err_register; 2844 } 2845 2846 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | 2847 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; 2848 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; 2849 queue_cfg.dest_cfg.dest_id = nctx->dpio_id; 2850 queue_cfg.dest_cfg.priority = 0; 2851 queue_cfg.user_ctx = nctx->qman64; 2852 2853 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, 2854 ethsw->dpsw_handle, 2855 ethsw->fq[i].type, 2856 &queue_cfg); 2857 if (err) 2858 goto err_set_queue; 2859 } 2860 2861 return 0; 2862 2863 err_set_queue: 2864 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); 2865 err_register: 2866 for (j = 0; j < i; j++) 2867 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, 2868 ethsw->dev); 2869 2870 return err; 2871 } 2872 2873 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) 2874 { 2875 int i; 2876 2877 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2878 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, 2879 ethsw->dev); 2880 } 2881 2882 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) 2883 { 2884 int err; 2885 2886 /* setup FQs for Rx and Tx Conf */ 2887 err = dpaa2_switch_setup_fqs(ethsw); 2888 if (err) 2889 return err; 2890 2891 /* setup the buffer pool needed on the Rx path */ 2892 err = dpaa2_switch_setup_dpbp(ethsw); 2893 if (err) 2894 return err; 2895 2896 err = dpaa2_switch_alloc_rings(ethsw); 2897 if (err) 2898 goto err_free_dpbp; 2899 2900 err = dpaa2_switch_setup_dpio(ethsw); 2901 if (err) 2902 goto err_destroy_rings; 2903 2904 err = dpaa2_switch_seed_bp(ethsw); 2905 if (err) 2906 goto err_deregister_dpio; 2907 2908 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2909 if (err) { 2910 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); 2911 goto err_drain_dpbp; 2912 } 2913 2914 return 0; 2915 2916 err_drain_dpbp: 2917 dpaa2_switch_drain_bp(ethsw); 2918 err_deregister_dpio: 2919 dpaa2_switch_free_dpio(ethsw); 2920 err_destroy_rings: 2921 dpaa2_switch_destroy_rings(ethsw); 2922 err_free_dpbp: 2923 dpaa2_switch_free_dpbp(ethsw); 2924 2925 return err; 2926 } 2927 2928 static void dpaa2_switch_remove_port(struct ethsw_core *ethsw, 2929 u16 port_idx) 2930 { 2931 struct ethsw_port_priv *port_priv = ethsw->ports[port_idx]; 2932 2933 rtnl_lock(); 2934 dpaa2_switch_port_disconnect_mac(port_priv); 2935 rtnl_unlock(); 2936 free_netdev(port_priv->netdev); 2937 ethsw->ports[port_idx] = NULL; 2938 } 2939 2940 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) 2941 { 2942 struct device *dev = &sw_dev->dev; 2943 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2944 struct dpsw_vlan_if_cfg vcfg = {0}; 2945 struct dpsw_tci_cfg tci_cfg = {0}; 2946 struct dpsw_stp_cfg stp_cfg; 2947 int err; 2948 u16 i; 2949 2950 ethsw->dev_id = sw_dev->obj_desc.id; 2951 2952 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); 2953 if (err) { 2954 dev_err(dev, "dpsw_open err %d\n", err); 2955 return err; 2956 } 2957 2958 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2959 ðsw->sw_attr); 2960 if (err) { 2961 dev_err(dev, "dpsw_get_attributes err %d\n", err); 2962 goto err_close; 2963 } 2964 2965 err = dpsw_get_api_version(ethsw->mc_io, 0, 2966 ðsw->major, 2967 ðsw->minor); 2968 if (err) { 2969 dev_err(dev, "dpsw_get_api_version err %d\n", err); 2970 goto err_close; 2971 } 2972 2973 /* Minimum supported DPSW version check */ 2974 if (ethsw->major < DPSW_MIN_VER_MAJOR || 2975 (ethsw->major == DPSW_MIN_VER_MAJOR && 2976 ethsw->minor < DPSW_MIN_VER_MINOR)) { 2977 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", 2978 ethsw->major, ethsw->minor); 2979 err = -EOPNOTSUPP; 2980 goto err_close; 2981 } 2982 2983 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { 2984 err = -EOPNOTSUPP; 2985 goto err_close; 2986 } 2987 2988 dpaa2_switch_detect_features(ethsw); 2989 2990 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); 2991 if (err) { 2992 dev_err(dev, "dpsw_reset err %d\n", err); 2993 goto err_close; 2994 } 2995 2996 stp_cfg.vlan_id = DEFAULT_VLAN_ID; 2997 stp_cfg.state = DPSW_STP_STATE_FORWARDING; 2998 2999 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3000 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); 3001 if (err) { 3002 dev_err(dev, "dpsw_if_disable err %d\n", err); 3003 goto err_close; 3004 } 3005 3006 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, 3007 &stp_cfg); 3008 if (err) { 3009 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", 3010 err, i); 3011 goto err_close; 3012 } 3013 3014 /* Switch starts with all ports configured to VLAN 1. Need to 3015 * remove this setting to allow configuration at bridge join 3016 */ 3017 vcfg.num_ifs = 1; 3018 vcfg.if_id[0] = i; 3019 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, 3020 DEFAULT_VLAN_ID, &vcfg); 3021 if (err) { 3022 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", 3023 err); 3024 goto err_close; 3025 } 3026 3027 tci_cfg.vlan_id = 4095; 3028 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); 3029 if (err) { 3030 dev_err(dev, "dpsw_if_set_tci err %d\n", err); 3031 goto err_close; 3032 } 3033 3034 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 3035 DEFAULT_VLAN_ID, &vcfg); 3036 if (err) { 3037 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); 3038 goto err_close; 3039 } 3040 } 3041 3042 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); 3043 if (err) { 3044 dev_err(dev, "dpsw_vlan_remove err %d\n", err); 3045 goto err_close; 3046 } 3047 3048 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", 3049 WQ_MEM_RECLAIM, "ethsw", 3050 ethsw->sw_attr.id); 3051 if (!ethsw->workqueue) { 3052 err = -ENOMEM; 3053 goto err_close; 3054 } 3055 3056 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); 3057 if (err) 3058 goto err_destroy_ordered_workqueue; 3059 3060 err = dpaa2_switch_ctrl_if_setup(ethsw); 3061 if (err) 3062 goto err_destroy_ordered_workqueue; 3063 3064 return 0; 3065 3066 err_destroy_ordered_workqueue: 3067 destroy_workqueue(ethsw->workqueue); 3068 3069 err_close: 3070 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3071 return err; 3072 } 3073 3074 /* Add an ACL to redirect frames with specific destination MAC address to 3075 * control interface 3076 */ 3077 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, 3078 const char *mac) 3079 { 3080 struct dpaa2_switch_acl_entry acl_entry = {0}; 3081 3082 /* Match on the destination MAC address */ 3083 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac); 3084 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac); 3085 3086 /* Trap to CPU */ 3087 acl_entry.cfg.precedence = 0; 3088 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; 3089 3090 return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry); 3091 } 3092 3093 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) 3094 { 3095 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; 3096 struct switchdev_obj_port_vlan vlan = { 3097 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 3098 .vid = DEFAULT_VLAN_ID, 3099 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, 3100 }; 3101 struct net_device *netdev = port_priv->netdev; 3102 struct ethsw_core *ethsw = port_priv->ethsw_data; 3103 struct dpaa2_switch_filter_block *filter_block; 3104 struct dpsw_fdb_cfg fdb_cfg = {0}; 3105 struct dpsw_if_attr dpsw_if_attr; 3106 struct dpaa2_switch_fdb *fdb; 3107 struct dpsw_acl_cfg acl_cfg; 3108 u16 fdb_id, acl_tbl_id; 3109 int err; 3110 3111 /* Get the Tx queue for this specific port */ 3112 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 3113 port_priv->idx, &dpsw_if_attr); 3114 if (err) { 3115 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); 3116 return err; 3117 } 3118 port_priv->tx_qdid = dpsw_if_attr.qdid; 3119 3120 /* Create a FDB table for this particular switch port */ 3121 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; 3122 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3123 &fdb_id, &fdb_cfg); 3124 if (err) { 3125 netdev_err(netdev, "dpsw_fdb_add err %d\n", err); 3126 return err; 3127 } 3128 3129 /* Find an unused dpaa2_switch_fdb structure and use it */ 3130 fdb = dpaa2_switch_fdb_get_unused(ethsw); 3131 fdb->fdb_id = fdb_id; 3132 fdb->in_use = true; 3133 fdb->bridge_dev = NULL; 3134 port_priv->fdb = fdb; 3135 3136 /* We need to add VLAN 1 as the PVID on this port until it is under a 3137 * bridge since the DPAA2 switch is not able to handle the traffic in a 3138 * VLAN unaware fashion 3139 */ 3140 err = dpaa2_switch_port_vlans_add(netdev, &vlan); 3141 if (err) 3142 return err; 3143 3144 /* Setup the egress flooding domains (broadcast, unknown unicast */ 3145 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 3146 if (err) 3147 return err; 3148 3149 /* Create an ACL table to be used by this switch port */ 3150 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; 3151 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 3152 &acl_tbl_id, &acl_cfg); 3153 if (err) { 3154 netdev_err(netdev, "dpsw_acl_add err %d\n", err); 3155 return err; 3156 } 3157 3158 filter_block = dpaa2_switch_filter_block_get_unused(ethsw); 3159 filter_block->ethsw = ethsw; 3160 filter_block->acl_id = acl_tbl_id; 3161 filter_block->in_use = true; 3162 filter_block->num_acl_rules = 0; 3163 INIT_LIST_HEAD(&filter_block->acl_entries); 3164 INIT_LIST_HEAD(&filter_block->mirror_entries); 3165 3166 err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block); 3167 if (err) 3168 return err; 3169 3170 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); 3171 if (err) 3172 return err; 3173 3174 return err; 3175 } 3176 3177 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 3178 { 3179 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3180 dpaa2_switch_free_dpio(ethsw); 3181 dpaa2_switch_destroy_rings(ethsw); 3182 dpaa2_switch_drain_bp(ethsw); 3183 dpaa2_switch_free_dpbp(ethsw); 3184 } 3185 3186 static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev) 3187 { 3188 struct device *dev = &sw_dev->dev; 3189 struct ethsw_core *ethsw = dev_get_drvdata(dev); 3190 int err; 3191 3192 dpaa2_switch_ctrl_if_teardown(ethsw); 3193 3194 destroy_workqueue(ethsw->workqueue); 3195 3196 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 3197 if (err) 3198 dev_warn(dev, "dpsw_close err %d\n", err); 3199 } 3200 3201 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) 3202 { 3203 struct ethsw_port_priv *port_priv; 3204 struct ethsw_core *ethsw; 3205 struct device *dev; 3206 int i; 3207 3208 dev = &sw_dev->dev; 3209 ethsw = dev_get_drvdata(dev); 3210 3211 dpaa2_switch_teardown_irqs(sw_dev); 3212 3213 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3214 3215 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3216 port_priv = ethsw->ports[i]; 3217 unregister_netdev(port_priv->netdev); 3218 dpaa2_switch_remove_port(ethsw, i); 3219 } 3220 3221 kfree(ethsw->fdbs); 3222 kfree(ethsw->filter_blocks); 3223 kfree(ethsw->ports); 3224 3225 dpaa2_switch_teardown(sw_dev); 3226 3227 fsl_mc_portal_free(ethsw->mc_io); 3228 3229 kfree(ethsw); 3230 3231 dev_set_drvdata(dev, NULL); 3232 3233 return 0; 3234 } 3235 3236 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, 3237 u16 port_idx) 3238 { 3239 struct ethsw_port_priv *port_priv; 3240 struct device *dev = ethsw->dev; 3241 struct net_device *port_netdev; 3242 int err; 3243 3244 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); 3245 if (!port_netdev) { 3246 dev_err(dev, "alloc_etherdev error\n"); 3247 return -ENOMEM; 3248 } 3249 3250 port_priv = netdev_priv(port_netdev); 3251 port_priv->netdev = port_netdev; 3252 port_priv->ethsw_data = ethsw; 3253 3254 port_priv->idx = port_idx; 3255 port_priv->stp_state = BR_STATE_FORWARDING; 3256 3257 SET_NETDEV_DEV(port_netdev, dev); 3258 port_netdev->netdev_ops = &dpaa2_switch_port_ops; 3259 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; 3260 3261 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; 3262 3263 port_priv->bcast_flood = true; 3264 port_priv->ucast_flood = true; 3265 3266 /* Set MTU limits */ 3267 port_netdev->min_mtu = ETH_MIN_MTU; 3268 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; 3269 3270 /* Populate the private port structure so that later calls to 3271 * dpaa2_switch_port_init() can use it. 3272 */ 3273 ethsw->ports[port_idx] = port_priv; 3274 3275 /* The DPAA2 switch's ingress path depends on the VLAN table, 3276 * thus we are not able to disable VLAN filtering. 3277 */ 3278 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | 3279 NETIF_F_HW_VLAN_STAG_FILTER | 3280 NETIF_F_HW_TC; 3281 3282 err = dpaa2_switch_port_init(port_priv, port_idx); 3283 if (err) 3284 goto err_port_probe; 3285 3286 err = dpaa2_switch_port_set_mac_addr(port_priv); 3287 if (err) 3288 goto err_port_probe; 3289 3290 err = dpaa2_switch_port_set_learning(port_priv, false); 3291 if (err) 3292 goto err_port_probe; 3293 port_priv->learn_ena = false; 3294 3295 err = dpaa2_switch_port_connect_mac(port_priv); 3296 if (err) 3297 goto err_port_probe; 3298 3299 return 0; 3300 3301 err_port_probe: 3302 free_netdev(port_netdev); 3303 ethsw->ports[port_idx] = NULL; 3304 3305 return err; 3306 } 3307 3308 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) 3309 { 3310 struct device *dev = &sw_dev->dev; 3311 struct ethsw_core *ethsw; 3312 int i, err; 3313 3314 /* Allocate switch core*/ 3315 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); 3316 3317 if (!ethsw) 3318 return -ENOMEM; 3319 3320 ethsw->dev = dev; 3321 ethsw->iommu_domain = iommu_get_domain_for_dev(dev); 3322 dev_set_drvdata(dev, ethsw); 3323 3324 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 3325 ðsw->mc_io); 3326 if (err) { 3327 if (err == -ENXIO) 3328 err = -EPROBE_DEFER; 3329 else 3330 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); 3331 goto err_free_drvdata; 3332 } 3333 3334 err = dpaa2_switch_init(sw_dev); 3335 if (err) 3336 goto err_free_cmdport; 3337 3338 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), 3339 GFP_KERNEL); 3340 if (!(ethsw->ports)) { 3341 err = -ENOMEM; 3342 goto err_teardown; 3343 } 3344 3345 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), 3346 GFP_KERNEL); 3347 if (!ethsw->fdbs) { 3348 err = -ENOMEM; 3349 goto err_free_ports; 3350 } 3351 3352 ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs, 3353 sizeof(*ethsw->filter_blocks), 3354 GFP_KERNEL); 3355 if (!ethsw->filter_blocks) { 3356 err = -ENOMEM; 3357 goto err_free_fdbs; 3358 } 3359 3360 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3361 err = dpaa2_switch_probe_port(ethsw, i); 3362 if (err) 3363 goto err_free_netdev; 3364 } 3365 3366 /* Add a NAPI instance for each of the Rx queues. The first port's 3367 * net_device will be associated with the instances since we do not have 3368 * different queues for each switch ports. 3369 */ 3370 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 3371 netif_napi_add(ethsw->ports[0]->netdev, 3372 ðsw->fq[i].napi, dpaa2_switch_poll, 3373 NAPI_POLL_WEIGHT); 3374 3375 /* Setup IRQs */ 3376 err = dpaa2_switch_setup_irqs(sw_dev); 3377 if (err) 3378 goto err_stop; 3379 3380 /* By convention, if the mirror port is equal to the number of switch 3381 * interfaces, then mirroring of any kind is disabled. 3382 */ 3383 ethsw->mirror_port = ethsw->sw_attr.num_ifs; 3384 3385 /* Register the netdev only when the entire setup is done and the 3386 * switch port interfaces are ready to receive traffic 3387 */ 3388 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3389 err = register_netdev(ethsw->ports[i]->netdev); 3390 if (err < 0) { 3391 dev_err(dev, "register_netdev error %d\n", err); 3392 goto err_unregister_ports; 3393 } 3394 } 3395 3396 return 0; 3397 3398 err_unregister_ports: 3399 for (i--; i >= 0; i--) 3400 unregister_netdev(ethsw->ports[i]->netdev); 3401 dpaa2_switch_teardown_irqs(sw_dev); 3402 err_stop: 3403 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3404 err_free_netdev: 3405 for (i--; i >= 0; i--) 3406 dpaa2_switch_remove_port(ethsw, i); 3407 kfree(ethsw->filter_blocks); 3408 err_free_fdbs: 3409 kfree(ethsw->fdbs); 3410 err_free_ports: 3411 kfree(ethsw->ports); 3412 3413 err_teardown: 3414 dpaa2_switch_teardown(sw_dev); 3415 3416 err_free_cmdport: 3417 fsl_mc_portal_free(ethsw->mc_io); 3418 3419 err_free_drvdata: 3420 kfree(ethsw); 3421 dev_set_drvdata(dev, NULL); 3422 3423 return err; 3424 } 3425 3426 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { 3427 { 3428 .vendor = FSL_MC_VENDOR_FREESCALE, 3429 .obj_type = "dpsw", 3430 }, 3431 { .vendor = 0x0 } 3432 }; 3433 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); 3434 3435 static struct fsl_mc_driver dpaa2_switch_drv = { 3436 .driver = { 3437 .name = KBUILD_MODNAME, 3438 .owner = THIS_MODULE, 3439 }, 3440 .probe = dpaa2_switch_probe, 3441 .remove = dpaa2_switch_remove, 3442 .match_id_table = dpaa2_switch_match_id_table 3443 }; 3444 3445 static struct notifier_block dpaa2_switch_port_nb __read_mostly = { 3446 .notifier_call = dpaa2_switch_port_netdevice_event, 3447 }; 3448 3449 static struct notifier_block dpaa2_switch_port_switchdev_nb = { 3450 .notifier_call = dpaa2_switch_port_event, 3451 }; 3452 3453 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { 3454 .notifier_call = dpaa2_switch_port_blocking_event, 3455 }; 3456 3457 static int dpaa2_switch_register_notifiers(void) 3458 { 3459 int err; 3460 3461 err = register_netdevice_notifier(&dpaa2_switch_port_nb); 3462 if (err) { 3463 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); 3464 return err; 3465 } 3466 3467 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3468 if (err) { 3469 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); 3470 goto err_switchdev_nb; 3471 } 3472 3473 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3474 if (err) { 3475 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); 3476 goto err_switchdev_blocking_nb; 3477 } 3478 3479 return 0; 3480 3481 err_switchdev_blocking_nb: 3482 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3483 err_switchdev_nb: 3484 unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3485 3486 return err; 3487 } 3488 3489 static void dpaa2_switch_unregister_notifiers(void) 3490 { 3491 int err; 3492 3493 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3494 if (err) 3495 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", 3496 err); 3497 3498 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3499 if (err) 3500 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); 3501 3502 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3503 if (err) 3504 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); 3505 } 3506 3507 static int __init dpaa2_switch_driver_init(void) 3508 { 3509 int err; 3510 3511 err = fsl_mc_driver_register(&dpaa2_switch_drv); 3512 if (err) 3513 return err; 3514 3515 err = dpaa2_switch_register_notifiers(); 3516 if (err) { 3517 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3518 return err; 3519 } 3520 3521 return 0; 3522 } 3523 3524 static void __exit dpaa2_switch_driver_exit(void) 3525 { 3526 dpaa2_switch_unregister_notifiers(); 3527 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3528 } 3529 3530 module_init(dpaa2_switch_driver_init); 3531 module_exit(dpaa2_switch_driver_exit); 3532 3533 MODULE_LICENSE("GPL v2"); 3534 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); 3535