1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DPAA2 Ethernet Switch driver 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/interrupt.h> 13 #include <linux/msi.h> 14 #include <linux/kthread.h> 15 #include <linux/workqueue.h> 16 #include <linux/iommu.h> 17 18 #include <linux/fsl/mc.h> 19 20 #include "dpaa2-switch.h" 21 22 /* Minimal supported DPSW version */ 23 #define DPSW_MIN_VER_MAJOR 8 24 #define DPSW_MIN_VER_MINOR 9 25 26 #define DEFAULT_VLAN_ID 1 27 28 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) 29 { 30 return port_priv->fdb->fdb_id; 31 } 32 33 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) 34 { 35 int i; 36 37 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 38 if (!ethsw->fdbs[i].in_use) 39 return ðsw->fdbs[i]; 40 return NULL; 41 } 42 43 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, 44 struct net_device *bridge_dev) 45 { 46 struct ethsw_port_priv *other_port_priv = NULL; 47 struct dpaa2_switch_fdb *fdb; 48 struct net_device *other_dev; 49 struct list_head *iter; 50 51 /* If we leave a bridge (bridge_dev is NULL), find an unused 52 * FDB and use that. 53 */ 54 if (!bridge_dev) { 55 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); 56 57 /* If there is no unused FDB, we must be the last port that 58 * leaves the last bridge, all the others are standalone. We 59 * can just keep the FDB that we already have. 60 */ 61 62 if (!fdb) { 63 port_priv->fdb->bridge_dev = NULL; 64 return 0; 65 } 66 67 port_priv->fdb = fdb; 68 port_priv->fdb->in_use = true; 69 port_priv->fdb->bridge_dev = NULL; 70 return 0; 71 } 72 73 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock 74 * being held. Assert on it so that it's easier to catch new code 75 * paths that reach this point without the RTNL lock. 76 */ 77 ASSERT_RTNL(); 78 79 /* If part of a bridge, use the FDB of the first dpaa2 switch interface 80 * to be present in that bridge 81 */ 82 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { 83 if (!dpaa2_switch_port_dev_check(other_dev)) 84 continue; 85 86 if (other_dev == port_priv->netdev) 87 continue; 88 89 other_port_priv = netdev_priv(other_dev); 90 break; 91 } 92 93 /* The current port is about to change its FDB to the one used by the 94 * first port that joined the bridge. 95 */ 96 if (other_port_priv) { 97 /* The previous FDB is about to become unused, since the 98 * interface is no longer standalone. 99 */ 100 port_priv->fdb->in_use = false; 101 port_priv->fdb->bridge_dev = NULL; 102 103 /* Get a reference to the new FDB */ 104 port_priv->fdb = other_port_priv->fdb; 105 } 106 107 /* Keep track of the new upper bridge device */ 108 port_priv->fdb->bridge_dev = bridge_dev; 109 110 return 0; 111 } 112 113 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, 114 enum dpsw_flood_type type, 115 struct dpsw_egress_flood_cfg *cfg) 116 { 117 int i = 0, j; 118 119 memset(cfg, 0, sizeof(*cfg)); 120 121 /* Add all the DPAA2 switch ports found in the same bridging domain to 122 * the egress flooding domain 123 */ 124 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { 125 if (!ethsw->ports[j]) 126 continue; 127 if (ethsw->ports[j]->fdb->fdb_id != fdb_id) 128 continue; 129 130 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) 131 cfg->if_id[i++] = ethsw->ports[j]->idx; 132 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) 133 cfg->if_id[i++] = ethsw->ports[j]->idx; 134 } 135 136 /* Add the CTRL interface to the egress flooding domain */ 137 cfg->if_id[i++] = ethsw->sw_attr.num_ifs; 138 139 cfg->fdb_id = fdb_id; 140 cfg->flood_type = type; 141 cfg->num_ifs = i; 142 } 143 144 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) 145 { 146 struct dpsw_egress_flood_cfg flood_cfg; 147 int err; 148 149 /* Setup broadcast flooding domain */ 150 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); 151 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 152 &flood_cfg); 153 if (err) { 154 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 155 return err; 156 } 157 158 /* Setup unknown flooding domain */ 159 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); 160 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 161 &flood_cfg); 162 if (err) { 163 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 164 return err; 165 } 166 167 return 0; 168 } 169 170 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 171 dma_addr_t iova_addr) 172 { 173 phys_addr_t phys_addr; 174 175 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 176 177 return phys_to_virt(phys_addr); 178 } 179 180 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) 181 { 182 struct ethsw_core *ethsw = port_priv->ethsw_data; 183 struct dpsw_vlan_cfg vcfg = {0}; 184 int err; 185 186 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 187 err = dpsw_vlan_add(ethsw->mc_io, 0, 188 ethsw->dpsw_handle, vid, &vcfg); 189 if (err) { 190 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); 191 return err; 192 } 193 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; 194 195 return 0; 196 } 197 198 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) 199 { 200 struct net_device *netdev = port_priv->netdev; 201 struct dpsw_link_state state; 202 int err; 203 204 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 205 port_priv->ethsw_data->dpsw_handle, 206 port_priv->idx, &state); 207 if (err) { 208 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 209 return true; 210 } 211 212 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 213 214 return state.up ? true : false; 215 } 216 217 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) 218 { 219 struct ethsw_core *ethsw = port_priv->ethsw_data; 220 struct net_device *netdev = port_priv->netdev; 221 struct dpsw_tci_cfg tci_cfg = { 0 }; 222 bool up; 223 int err, ret; 224 225 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 226 port_priv->idx, &tci_cfg); 227 if (err) { 228 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); 229 return err; 230 } 231 232 tci_cfg.vlan_id = pvid; 233 234 /* Interface needs to be down to change PVID */ 235 up = dpaa2_switch_port_is_up(port_priv); 236 if (up) { 237 err = dpsw_if_disable(ethsw->mc_io, 0, 238 ethsw->dpsw_handle, 239 port_priv->idx); 240 if (err) { 241 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 242 return err; 243 } 244 } 245 246 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 247 port_priv->idx, &tci_cfg); 248 if (err) { 249 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); 250 goto set_tci_error; 251 } 252 253 /* Delete previous PVID info and mark the new one */ 254 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; 255 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; 256 port_priv->pvid = pvid; 257 258 set_tci_error: 259 if (up) { 260 ret = dpsw_if_enable(ethsw->mc_io, 0, 261 ethsw->dpsw_handle, 262 port_priv->idx); 263 if (ret) { 264 netdev_err(netdev, "dpsw_if_enable err %d\n", ret); 265 return ret; 266 } 267 } 268 269 return err; 270 } 271 272 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, 273 u16 vid, u16 flags) 274 { 275 struct ethsw_core *ethsw = port_priv->ethsw_data; 276 struct net_device *netdev = port_priv->netdev; 277 struct dpsw_vlan_if_cfg vcfg = {0}; 278 int err; 279 280 if (port_priv->vlans[vid]) { 281 netdev_warn(netdev, "VLAN %d already configured\n", vid); 282 return -EEXIST; 283 } 284 285 /* If hit, this VLAN rule will lead the packet into the FDB table 286 * specified in the vlan configuration below 287 */ 288 vcfg.num_ifs = 1; 289 vcfg.if_id[0] = port_priv->idx; 290 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 291 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; 292 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); 293 if (err) { 294 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); 295 return err; 296 } 297 298 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; 299 300 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { 301 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, 302 ethsw->dpsw_handle, 303 vid, &vcfg); 304 if (err) { 305 netdev_err(netdev, 306 "dpsw_vlan_add_if_untagged err %d\n", err); 307 return err; 308 } 309 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; 310 } 311 312 if (flags & BRIDGE_VLAN_INFO_PVID) { 313 err = dpaa2_switch_port_set_pvid(port_priv, vid); 314 if (err) 315 return err; 316 } 317 318 return 0; 319 } 320 321 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state) 322 { 323 switch (state) { 324 case BR_STATE_DISABLED: 325 return DPSW_STP_STATE_DISABLED; 326 case BR_STATE_LISTENING: 327 return DPSW_STP_STATE_LISTENING; 328 case BR_STATE_LEARNING: 329 return DPSW_STP_STATE_LEARNING; 330 case BR_STATE_FORWARDING: 331 return DPSW_STP_STATE_FORWARDING; 332 case BR_STATE_BLOCKING: 333 return DPSW_STP_STATE_BLOCKING; 334 default: 335 return DPSW_STP_STATE_DISABLED; 336 } 337 } 338 339 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) 340 { 341 struct dpsw_stp_cfg stp_cfg = {0}; 342 int err; 343 u16 vid; 344 345 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) 346 return 0; /* Nothing to do */ 347 348 stp_cfg.state = br_stp_state_to_dpsw(state); 349 for (vid = 0; vid <= VLAN_VID_MASK; vid++) { 350 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 351 stp_cfg.vlan_id = vid; 352 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, 353 port_priv->ethsw_data->dpsw_handle, 354 port_priv->idx, &stp_cfg); 355 if (err) { 356 netdev_err(port_priv->netdev, 357 "dpsw_if_set_stp err %d\n", err); 358 return err; 359 } 360 } 361 } 362 363 port_priv->stp_state = state; 364 365 return 0; 366 } 367 368 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) 369 { 370 struct ethsw_port_priv *ppriv_local = NULL; 371 int i, err; 372 373 if (!ethsw->vlans[vid]) 374 return -ENOENT; 375 376 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); 377 if (err) { 378 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); 379 return err; 380 } 381 ethsw->vlans[vid] = 0; 382 383 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 384 ppriv_local = ethsw->ports[i]; 385 ppriv_local->vlans[vid] = 0; 386 } 387 388 return 0; 389 } 390 391 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, 392 const unsigned char *addr) 393 { 394 struct dpsw_fdb_unicast_cfg entry = {0}; 395 u16 fdb_id; 396 int err; 397 398 entry.if_egress = port_priv->idx; 399 entry.type = DPSW_FDB_ENTRY_STATIC; 400 ether_addr_copy(entry.mac_addr, addr); 401 402 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 403 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, 404 port_priv->ethsw_data->dpsw_handle, 405 fdb_id, &entry); 406 if (err) 407 netdev_err(port_priv->netdev, 408 "dpsw_fdb_add_unicast err %d\n", err); 409 return err; 410 } 411 412 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, 413 const unsigned char *addr) 414 { 415 struct dpsw_fdb_unicast_cfg entry = {0}; 416 u16 fdb_id; 417 int err; 418 419 entry.if_egress = port_priv->idx; 420 entry.type = DPSW_FDB_ENTRY_STATIC; 421 ether_addr_copy(entry.mac_addr, addr); 422 423 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 424 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, 425 port_priv->ethsw_data->dpsw_handle, 426 fdb_id, &entry); 427 /* Silently discard error for calling multiple times the del command */ 428 if (err && err != -ENXIO) 429 netdev_err(port_priv->netdev, 430 "dpsw_fdb_remove_unicast err %d\n", err); 431 return err; 432 } 433 434 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, 435 const unsigned char *addr) 436 { 437 struct dpsw_fdb_multicast_cfg entry = {0}; 438 u16 fdb_id; 439 int err; 440 441 ether_addr_copy(entry.mac_addr, addr); 442 entry.type = DPSW_FDB_ENTRY_STATIC; 443 entry.num_ifs = 1; 444 entry.if_id[0] = port_priv->idx; 445 446 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 447 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, 448 port_priv->ethsw_data->dpsw_handle, 449 fdb_id, &entry); 450 /* Silently discard error for calling multiple times the add command */ 451 if (err && err != -ENXIO) 452 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", 453 err); 454 return err; 455 } 456 457 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, 458 const unsigned char *addr) 459 { 460 struct dpsw_fdb_multicast_cfg entry = {0}; 461 u16 fdb_id; 462 int err; 463 464 ether_addr_copy(entry.mac_addr, addr); 465 entry.type = DPSW_FDB_ENTRY_STATIC; 466 entry.num_ifs = 1; 467 entry.if_id[0] = port_priv->idx; 468 469 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 470 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, 471 port_priv->ethsw_data->dpsw_handle, 472 fdb_id, &entry); 473 /* Silently discard error for calling multiple times the del command */ 474 if (err && err != -ENAVAIL) 475 netdev_err(port_priv->netdev, 476 "dpsw_fdb_remove_multicast err %d\n", err); 477 return err; 478 } 479 480 static void dpaa2_switch_port_get_stats(struct net_device *netdev, 481 struct rtnl_link_stats64 *stats) 482 { 483 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 484 u64 tmp; 485 int err; 486 487 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 488 port_priv->ethsw_data->dpsw_handle, 489 port_priv->idx, 490 DPSW_CNT_ING_FRAME, &stats->rx_packets); 491 if (err) 492 goto error; 493 494 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 495 port_priv->ethsw_data->dpsw_handle, 496 port_priv->idx, 497 DPSW_CNT_EGR_FRAME, &stats->tx_packets); 498 if (err) 499 goto error; 500 501 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 502 port_priv->ethsw_data->dpsw_handle, 503 port_priv->idx, 504 DPSW_CNT_ING_BYTE, &stats->rx_bytes); 505 if (err) 506 goto error; 507 508 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 509 port_priv->ethsw_data->dpsw_handle, 510 port_priv->idx, 511 DPSW_CNT_EGR_BYTE, &stats->tx_bytes); 512 if (err) 513 goto error; 514 515 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 516 port_priv->ethsw_data->dpsw_handle, 517 port_priv->idx, 518 DPSW_CNT_ING_FRAME_DISCARD, 519 &stats->rx_dropped); 520 if (err) 521 goto error; 522 523 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 524 port_priv->ethsw_data->dpsw_handle, 525 port_priv->idx, 526 DPSW_CNT_ING_FLTR_FRAME, 527 &tmp); 528 if (err) 529 goto error; 530 stats->rx_dropped += tmp; 531 532 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 533 port_priv->ethsw_data->dpsw_handle, 534 port_priv->idx, 535 DPSW_CNT_EGR_FRAME_DISCARD, 536 &stats->tx_dropped); 537 if (err) 538 goto error; 539 540 return; 541 542 error: 543 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); 544 } 545 546 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, 547 int attr_id) 548 { 549 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); 550 } 551 552 static int dpaa2_switch_port_get_offload_stats(int attr_id, 553 const struct net_device *netdev, 554 void *sp) 555 { 556 switch (attr_id) { 557 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 558 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); 559 return 0; 560 } 561 562 return -EINVAL; 563 } 564 565 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) 566 { 567 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 568 int err; 569 570 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, 571 0, 572 port_priv->ethsw_data->dpsw_handle, 573 port_priv->idx, 574 (u16)ETHSW_L2_MAX_FRM(mtu)); 575 if (err) { 576 netdev_err(netdev, 577 "dpsw_if_set_max_frame_length() err %d\n", err); 578 return err; 579 } 580 581 netdev->mtu = mtu; 582 return 0; 583 } 584 585 static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) 586 { 587 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 588 struct dpsw_link_state state; 589 int err; 590 591 /* Interrupts are received even though no one issued an 'ifconfig up' 592 * on the switch interface. Ignore these link state update interrupts 593 */ 594 if (!netif_running(netdev)) 595 return 0; 596 597 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 598 port_priv->ethsw_data->dpsw_handle, 599 port_priv->idx, &state); 600 if (err) { 601 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 602 return err; 603 } 604 605 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 606 607 if (state.up != port_priv->link_state) { 608 if (state.up) { 609 netif_carrier_on(netdev); 610 netif_tx_start_all_queues(netdev); 611 } else { 612 netif_carrier_off(netdev); 613 netif_tx_stop_all_queues(netdev); 614 } 615 port_priv->link_state = state.up; 616 } 617 618 return 0; 619 } 620 621 /* Manage all NAPI instances for the control interface. 622 * 623 * We only have one RX queue and one Tx Conf queue for all 624 * switch ports. Therefore, we only need to enable the NAPI instance once, the 625 * first time one of the switch ports runs .dev_open(). 626 */ 627 628 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) 629 { 630 int i; 631 632 /* Access to the ethsw->napi_users relies on the RTNL lock */ 633 ASSERT_RTNL(); 634 635 /* a new interface is using the NAPI instance */ 636 ethsw->napi_users++; 637 638 /* if there is already a user of the instance, return */ 639 if (ethsw->napi_users > 1) 640 return; 641 642 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 643 napi_enable(ðsw->fq[i].napi); 644 } 645 646 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) 647 { 648 int i; 649 650 /* Access to the ethsw->napi_users relies on the RTNL lock */ 651 ASSERT_RTNL(); 652 653 /* If we are not the last interface using the NAPI, return */ 654 ethsw->napi_users--; 655 if (ethsw->napi_users) 656 return; 657 658 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 659 napi_disable(ðsw->fq[i].napi); 660 } 661 662 static int dpaa2_switch_port_open(struct net_device *netdev) 663 { 664 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 665 struct ethsw_core *ethsw = port_priv->ethsw_data; 666 int err; 667 668 /* Explicitly set carrier off, otherwise 669 * netif_carrier_ok() will return true and cause 'ip link show' 670 * to report the LOWER_UP flag, even though the link 671 * notification wasn't even received. 672 */ 673 netif_carrier_off(netdev); 674 675 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, 676 port_priv->ethsw_data->dpsw_handle, 677 port_priv->idx); 678 if (err) { 679 netdev_err(netdev, "dpsw_if_enable err %d\n", err); 680 return err; 681 } 682 683 /* sync carrier state */ 684 err = dpaa2_switch_port_carrier_state_sync(netdev); 685 if (err) { 686 netdev_err(netdev, 687 "dpaa2_switch_port_carrier_state_sync err %d\n", err); 688 goto err_carrier_sync; 689 } 690 691 dpaa2_switch_enable_ctrl_if_napi(ethsw); 692 693 return 0; 694 695 err_carrier_sync: 696 dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 697 port_priv->ethsw_data->dpsw_handle, 698 port_priv->idx); 699 return err; 700 } 701 702 static int dpaa2_switch_port_stop(struct net_device *netdev) 703 { 704 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 705 struct ethsw_core *ethsw = port_priv->ethsw_data; 706 int err; 707 708 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 709 port_priv->ethsw_data->dpsw_handle, 710 port_priv->idx); 711 if (err) { 712 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 713 return err; 714 } 715 716 dpaa2_switch_disable_ctrl_if_napi(ethsw); 717 718 return 0; 719 } 720 721 static int dpaa2_switch_port_parent_id(struct net_device *dev, 722 struct netdev_phys_item_id *ppid) 723 { 724 struct ethsw_port_priv *port_priv = netdev_priv(dev); 725 726 ppid->id_len = 1; 727 ppid->id[0] = port_priv->ethsw_data->dev_id; 728 729 return 0; 730 } 731 732 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, 733 size_t len) 734 { 735 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 736 int err; 737 738 err = snprintf(name, len, "p%d", port_priv->idx); 739 if (err >= len) 740 return -EINVAL; 741 742 return 0; 743 } 744 745 struct ethsw_dump_ctx { 746 struct net_device *dev; 747 struct sk_buff *skb; 748 struct netlink_callback *cb; 749 int idx; 750 }; 751 752 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, 753 struct ethsw_dump_ctx *dump) 754 { 755 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; 756 u32 portid = NETLINK_CB(dump->cb->skb).portid; 757 u32 seq = dump->cb->nlh->nlmsg_seq; 758 struct nlmsghdr *nlh; 759 struct ndmsg *ndm; 760 761 if (dump->idx < dump->cb->args[2]) 762 goto skip; 763 764 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 765 sizeof(*ndm), NLM_F_MULTI); 766 if (!nlh) 767 return -EMSGSIZE; 768 769 ndm = nlmsg_data(nlh); 770 ndm->ndm_family = AF_BRIDGE; 771 ndm->ndm_pad1 = 0; 772 ndm->ndm_pad2 = 0; 773 ndm->ndm_flags = NTF_SELF; 774 ndm->ndm_type = 0; 775 ndm->ndm_ifindex = dump->dev->ifindex; 776 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; 777 778 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) 779 goto nla_put_failure; 780 781 nlmsg_end(dump->skb, nlh); 782 783 skip: 784 dump->idx++; 785 return 0; 786 787 nla_put_failure: 788 nlmsg_cancel(dump->skb, nlh); 789 return -EMSGSIZE; 790 } 791 792 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, 793 struct ethsw_port_priv *port_priv) 794 { 795 int idx = port_priv->idx; 796 int valid; 797 798 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 799 valid = entry->if_info == port_priv->idx; 800 else 801 valid = entry->if_mask[idx / 8] & BIT(idx % 8); 802 803 return valid; 804 } 805 806 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, 807 dpaa2_switch_fdb_cb_t cb, void *data) 808 { 809 struct net_device *net_dev = port_priv->netdev; 810 struct ethsw_core *ethsw = port_priv->ethsw_data; 811 struct device *dev = net_dev->dev.parent; 812 struct fdb_dump_entry *fdb_entries; 813 struct fdb_dump_entry fdb_entry; 814 dma_addr_t fdb_dump_iova; 815 u16 num_fdb_entries; 816 u32 fdb_dump_size; 817 int err = 0, i; 818 u8 *dma_mem; 819 u16 fdb_id; 820 821 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); 822 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); 823 if (!dma_mem) 824 return -ENOMEM; 825 826 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, 827 DMA_FROM_DEVICE); 828 if (dma_mapping_error(dev, fdb_dump_iova)) { 829 netdev_err(net_dev, "dma_map_single() failed\n"); 830 err = -ENOMEM; 831 goto err_map; 832 } 833 834 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 835 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, 836 fdb_dump_iova, fdb_dump_size, &num_fdb_entries); 837 if (err) { 838 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); 839 goto err_dump; 840 } 841 842 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); 843 844 fdb_entries = (struct fdb_dump_entry *)dma_mem; 845 for (i = 0; i < num_fdb_entries; i++) { 846 fdb_entry = fdb_entries[i]; 847 848 err = cb(port_priv, &fdb_entry, data); 849 if (err) 850 goto end; 851 } 852 853 end: 854 kfree(dma_mem); 855 856 return 0; 857 858 err_dump: 859 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); 860 err_map: 861 kfree(dma_mem); 862 return err; 863 } 864 865 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, 866 struct fdb_dump_entry *fdb_entry, 867 void *data) 868 { 869 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 870 return 0; 871 872 return dpaa2_switch_fdb_dump_nl(fdb_entry, data); 873 } 874 875 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 876 struct net_device *net_dev, 877 struct net_device *filter_dev, int *idx) 878 { 879 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 880 struct ethsw_dump_ctx dump = { 881 .dev = net_dev, 882 .skb = skb, 883 .cb = cb, 884 .idx = *idx, 885 }; 886 int err; 887 888 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); 889 *idx = dump.idx; 890 891 return err; 892 } 893 894 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, 895 struct fdb_dump_entry *fdb_entry, 896 void *data __always_unused) 897 { 898 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 899 return 0; 900 901 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) 902 return 0; 903 904 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 905 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); 906 else 907 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); 908 909 return 0; 910 } 911 912 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) 913 { 914 dpaa2_switch_fdb_iterate(port_priv, 915 dpaa2_switch_fdb_entry_fast_age, NULL); 916 } 917 918 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, 919 u16 vid) 920 { 921 struct switchdev_obj_port_vlan vlan = { 922 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 923 .vid = vid, 924 .obj.orig_dev = netdev, 925 /* This API only allows programming tagged, non-PVID VIDs */ 926 .flags = 0, 927 }; 928 929 return dpaa2_switch_port_vlans_add(netdev, &vlan); 930 } 931 932 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, 933 u16 vid) 934 { 935 struct switchdev_obj_port_vlan vlan = { 936 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 937 .vid = vid, 938 .obj.orig_dev = netdev, 939 /* This API only allows programming tagged, non-PVID VIDs */ 940 .flags = 0, 941 }; 942 943 return dpaa2_switch_port_vlans_del(netdev, &vlan); 944 } 945 946 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) 947 { 948 struct ethsw_core *ethsw = port_priv->ethsw_data; 949 struct net_device *net_dev = port_priv->netdev; 950 struct device *dev = net_dev->dev.parent; 951 u8 mac_addr[ETH_ALEN]; 952 int err; 953 954 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) 955 return 0; 956 957 /* Get firmware address, if any */ 958 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, 959 port_priv->idx, mac_addr); 960 if (err) { 961 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); 962 return err; 963 } 964 965 /* First check if firmware has any address configured by bootloader */ 966 if (!is_zero_ether_addr(mac_addr)) { 967 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 968 } else { 969 /* No MAC address configured, fill in net_dev->dev_addr 970 * with a random one 971 */ 972 eth_hw_addr_random(net_dev); 973 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 974 975 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 976 * practical purposes, this will be our "permanent" mac address, 977 * at least until the next reboot. This move will also permit 978 * register_netdevice() to properly fill up net_dev->perm_addr. 979 */ 980 net_dev->addr_assign_type = NET_ADDR_PERM; 981 } 982 983 return 0; 984 } 985 986 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, 987 const struct dpaa2_fd *fd) 988 { 989 struct device *dev = ethsw->dev; 990 unsigned char *buffer_start; 991 struct sk_buff **skbh, *skb; 992 dma_addr_t fd_addr; 993 994 fd_addr = dpaa2_fd_get_addr(fd); 995 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); 996 997 skb = *skbh; 998 buffer_start = (unsigned char *)skbh; 999 1000 dma_unmap_single(dev, fd_addr, 1001 skb_tail_pointer(skb) - buffer_start, 1002 DMA_TO_DEVICE); 1003 1004 /* Move on with skb release */ 1005 dev_kfree_skb(skb); 1006 } 1007 1008 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, 1009 struct sk_buff *skb, 1010 struct dpaa2_fd *fd) 1011 { 1012 struct device *dev = ethsw->dev; 1013 struct sk_buff **skbh; 1014 dma_addr_t addr; 1015 u8 *buff_start; 1016 void *hwa; 1017 1018 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - 1019 DPAA2_SWITCH_TX_BUF_ALIGN, 1020 DPAA2_SWITCH_TX_BUF_ALIGN); 1021 1022 /* Clear FAS to have consistent values for TX confirmation. It is 1023 * located in the first 8 bytes of the buffer's hardware annotation 1024 * area 1025 */ 1026 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; 1027 memset(hwa, 0, 8); 1028 1029 /* Store a backpointer to the skb at the beginning of the buffer 1030 * (in the private data area) such that we can release it 1031 * on Tx confirm 1032 */ 1033 skbh = (struct sk_buff **)buff_start; 1034 *skbh = skb; 1035 1036 addr = dma_map_single(dev, buff_start, 1037 skb_tail_pointer(skb) - buff_start, 1038 DMA_TO_DEVICE); 1039 if (unlikely(dma_mapping_error(dev, addr))) 1040 return -ENOMEM; 1041 1042 /* Setup the FD fields */ 1043 memset(fd, 0, sizeof(*fd)); 1044 1045 dpaa2_fd_set_addr(fd, addr); 1046 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); 1047 dpaa2_fd_set_len(fd, skb->len); 1048 dpaa2_fd_set_format(fd, dpaa2_fd_single); 1049 1050 return 0; 1051 } 1052 1053 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, 1054 struct net_device *net_dev) 1055 { 1056 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 1057 struct ethsw_core *ethsw = port_priv->ethsw_data; 1058 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; 1059 struct dpaa2_fd fd; 1060 int err; 1061 1062 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { 1063 struct sk_buff *ns; 1064 1065 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); 1066 if (unlikely(!ns)) { 1067 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); 1068 goto err_free_skb; 1069 } 1070 dev_consume_skb_any(skb); 1071 skb = ns; 1072 } 1073 1074 /* We'll be holding a back-reference to the skb until Tx confirmation */ 1075 skb = skb_unshare(skb, GFP_ATOMIC); 1076 if (unlikely(!skb)) { 1077 /* skb_unshare() has already freed the skb */ 1078 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); 1079 goto err_exit; 1080 } 1081 1082 /* At this stage, we do not support non-linear skbs so just try to 1083 * linearize the skb and if that's not working, just drop the packet. 1084 */ 1085 err = skb_linearize(skb); 1086 if (err) { 1087 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); 1088 goto err_free_skb; 1089 } 1090 1091 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); 1092 if (unlikely(err)) { 1093 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); 1094 goto err_free_skb; 1095 } 1096 1097 do { 1098 err = dpaa2_io_service_enqueue_qd(NULL, 1099 port_priv->tx_qdid, 1100 8, 0, &fd); 1101 retries--; 1102 } while (err == -EBUSY && retries); 1103 1104 if (unlikely(err < 0)) { 1105 dpaa2_switch_free_fd(ethsw, &fd); 1106 goto err_exit; 1107 } 1108 1109 return NETDEV_TX_OK; 1110 1111 err_free_skb: 1112 dev_kfree_skb(skb); 1113 err_exit: 1114 return NETDEV_TX_OK; 1115 } 1116 1117 static const struct net_device_ops dpaa2_switch_port_ops = { 1118 .ndo_open = dpaa2_switch_port_open, 1119 .ndo_stop = dpaa2_switch_port_stop, 1120 1121 .ndo_set_mac_address = eth_mac_addr, 1122 .ndo_get_stats64 = dpaa2_switch_port_get_stats, 1123 .ndo_change_mtu = dpaa2_switch_port_change_mtu, 1124 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, 1125 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, 1126 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, 1127 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, 1128 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, 1129 1130 .ndo_start_xmit = dpaa2_switch_port_tx, 1131 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, 1132 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, 1133 }; 1134 1135 bool dpaa2_switch_port_dev_check(const struct net_device *netdev) 1136 { 1137 return netdev->netdev_ops == &dpaa2_switch_port_ops; 1138 } 1139 1140 static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) 1141 { 1142 int i; 1143 1144 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 1145 dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); 1146 dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); 1147 } 1148 } 1149 1150 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) 1151 { 1152 struct device *dev = (struct device *)arg; 1153 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1154 1155 /* Mask the events and the if_id reserved bits to be cleared on read */ 1156 u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; 1157 int err; 1158 1159 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1160 DPSW_IRQ_INDEX_IF, &status); 1161 if (err) { 1162 dev_err(dev, "Can't get irq status (err %d)\n", err); 1163 1164 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1165 DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); 1166 if (err) 1167 dev_err(dev, "Can't clear irq status (err %d)\n", err); 1168 goto out; 1169 } 1170 1171 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) 1172 dpaa2_switch_links_state_update(ethsw); 1173 1174 out: 1175 return IRQ_HANDLED; 1176 } 1177 1178 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) 1179 { 1180 struct device *dev = &sw_dev->dev; 1181 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1182 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; 1183 struct fsl_mc_device_irq *irq; 1184 int err; 1185 1186 err = fsl_mc_allocate_irqs(sw_dev); 1187 if (err) { 1188 dev_err(dev, "MC irqs allocation failed\n"); 1189 return err; 1190 } 1191 1192 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { 1193 err = -EINVAL; 1194 goto free_irq; 1195 } 1196 1197 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1198 DPSW_IRQ_INDEX_IF, 0); 1199 if (err) { 1200 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1201 goto free_irq; 1202 } 1203 1204 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; 1205 1206 err = devm_request_threaded_irq(dev, irq->msi_desc->irq, 1207 NULL, 1208 dpaa2_switch_irq0_handler_thread, 1209 IRQF_NO_SUSPEND | IRQF_ONESHOT, 1210 dev_name(dev), dev); 1211 if (err) { 1212 dev_err(dev, "devm_request_threaded_irq(): %d\n", err); 1213 goto free_irq; 1214 } 1215 1216 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, 1217 DPSW_IRQ_INDEX_IF, mask); 1218 if (err) { 1219 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); 1220 goto free_devm_irq; 1221 } 1222 1223 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1224 DPSW_IRQ_INDEX_IF, 1); 1225 if (err) { 1226 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); 1227 goto free_devm_irq; 1228 } 1229 1230 return 0; 1231 1232 free_devm_irq: 1233 devm_free_irq(dev, irq->msi_desc->irq, dev); 1234 free_irq: 1235 fsl_mc_free_irqs(sw_dev); 1236 return err; 1237 } 1238 1239 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) 1240 { 1241 struct device *dev = &sw_dev->dev; 1242 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1243 int err; 1244 1245 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1246 DPSW_IRQ_INDEX_IF, 0); 1247 if (err) 1248 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1249 1250 fsl_mc_free_irqs(sw_dev); 1251 } 1252 1253 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) 1254 { 1255 struct ethsw_core *ethsw = port_priv->ethsw_data; 1256 enum dpsw_learning_mode learn_mode; 1257 int err; 1258 1259 if (enable) 1260 learn_mode = DPSW_LEARNING_MODE_HW; 1261 else 1262 learn_mode = DPSW_LEARNING_MODE_DIS; 1263 1264 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 1265 port_priv->idx, learn_mode); 1266 if (err) 1267 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); 1268 1269 if (!enable) 1270 dpaa2_switch_port_fast_age(port_priv); 1271 1272 return err; 1273 } 1274 1275 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, 1276 u8 state) 1277 { 1278 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1279 int err; 1280 1281 err = dpaa2_switch_port_set_stp_state(port_priv, state); 1282 if (err) 1283 return err; 1284 1285 switch (state) { 1286 case BR_STATE_DISABLED: 1287 case BR_STATE_BLOCKING: 1288 case BR_STATE_LISTENING: 1289 err = dpaa2_switch_port_set_learning(port_priv, false); 1290 break; 1291 case BR_STATE_LEARNING: 1292 case BR_STATE_FORWARDING: 1293 err = dpaa2_switch_port_set_learning(port_priv, 1294 port_priv->learn_ena); 1295 break; 1296 } 1297 1298 return err; 1299 } 1300 1301 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, 1302 struct switchdev_brport_flags flags) 1303 { 1304 struct ethsw_core *ethsw = port_priv->ethsw_data; 1305 1306 if (flags.mask & BR_BCAST_FLOOD) 1307 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); 1308 1309 if (flags.mask & BR_FLOOD) 1310 port_priv->ucast_flood = !!(flags.val & BR_FLOOD); 1311 1312 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1313 } 1314 1315 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, 1316 struct switchdev_brport_flags flags, 1317 struct netlink_ext_ack *extack) 1318 { 1319 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | 1320 BR_MCAST_FLOOD)) 1321 return -EINVAL; 1322 1323 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { 1324 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 1325 bool unicast = !!(flags.val & BR_FLOOD); 1326 1327 if (unicast != multicast) { 1328 NL_SET_ERR_MSG_MOD(extack, 1329 "Cannot configure multicast flooding independently of unicast"); 1330 return -EINVAL; 1331 } 1332 } 1333 1334 return 0; 1335 } 1336 1337 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, 1338 struct switchdev_brport_flags flags, 1339 struct netlink_ext_ack *extack) 1340 { 1341 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1342 int err; 1343 1344 if (flags.mask & BR_LEARNING) { 1345 bool learn_ena = !!(flags.val & BR_LEARNING); 1346 1347 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1348 if (err) 1349 return err; 1350 port_priv->learn_ena = learn_ena; 1351 } 1352 1353 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { 1354 err = dpaa2_switch_port_flood(port_priv, flags); 1355 if (err) 1356 return err; 1357 } 1358 1359 return 0; 1360 } 1361 1362 static int dpaa2_switch_port_attr_set(struct net_device *netdev, 1363 const struct switchdev_attr *attr, 1364 struct netlink_ext_ack *extack) 1365 { 1366 int err = 0; 1367 1368 switch (attr->id) { 1369 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 1370 err = dpaa2_switch_port_attr_stp_state_set(netdev, 1371 attr->u.stp_state); 1372 break; 1373 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 1374 if (!attr->u.vlan_filtering) { 1375 NL_SET_ERR_MSG_MOD(extack, 1376 "The DPAA2 switch does not support VLAN-unaware operation"); 1377 return -EOPNOTSUPP; 1378 } 1379 break; 1380 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 1381 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); 1382 break; 1383 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 1384 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); 1385 break; 1386 default: 1387 err = -EOPNOTSUPP; 1388 break; 1389 } 1390 1391 return err; 1392 } 1393 1394 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 1395 const struct switchdev_obj_port_vlan *vlan) 1396 { 1397 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1398 struct ethsw_core *ethsw = port_priv->ethsw_data; 1399 struct dpsw_attr *attr = ðsw->sw_attr; 1400 int err = 0; 1401 1402 /* Make sure that the VLAN is not already configured 1403 * on the switch port 1404 */ 1405 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) 1406 return -EEXIST; 1407 1408 /* Check if there is space for a new VLAN */ 1409 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1410 ðsw->sw_attr); 1411 if (err) { 1412 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1413 return err; 1414 } 1415 if (attr->max_vlans - attr->num_vlans < 1) 1416 return -ENOSPC; 1417 1418 /* Check if there is space for a new VLAN */ 1419 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1420 ðsw->sw_attr); 1421 if (err) { 1422 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1423 return err; 1424 } 1425 if (attr->max_vlans - attr->num_vlans < 1) 1426 return -ENOSPC; 1427 1428 if (!port_priv->ethsw_data->vlans[vlan->vid]) { 1429 /* this is a new VLAN */ 1430 err = dpaa2_switch_add_vlan(port_priv, vlan->vid); 1431 if (err) 1432 return err; 1433 1434 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; 1435 } 1436 1437 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); 1438 } 1439 1440 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, 1441 const unsigned char *addr) 1442 { 1443 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; 1444 struct netdev_hw_addr *ha; 1445 1446 netif_addr_lock_bh(netdev); 1447 list_for_each_entry(ha, &list->list, list) { 1448 if (ether_addr_equal(ha->addr, addr)) { 1449 netif_addr_unlock_bh(netdev); 1450 return 1; 1451 } 1452 } 1453 netif_addr_unlock_bh(netdev); 1454 return 0; 1455 } 1456 1457 static int dpaa2_switch_port_mdb_add(struct net_device *netdev, 1458 const struct switchdev_obj_port_mdb *mdb) 1459 { 1460 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1461 int err; 1462 1463 /* Check if address is already set on this port */ 1464 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1465 return -EEXIST; 1466 1467 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); 1468 if (err) 1469 return err; 1470 1471 err = dev_mc_add(netdev, mdb->addr); 1472 if (err) { 1473 netdev_err(netdev, "dev_mc_add err %d\n", err); 1474 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1475 } 1476 1477 return err; 1478 } 1479 1480 static int dpaa2_switch_port_obj_add(struct net_device *netdev, 1481 const struct switchdev_obj *obj) 1482 { 1483 int err; 1484 1485 switch (obj->id) { 1486 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1487 err = dpaa2_switch_port_vlans_add(netdev, 1488 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1489 break; 1490 case SWITCHDEV_OBJ_ID_PORT_MDB: 1491 err = dpaa2_switch_port_mdb_add(netdev, 1492 SWITCHDEV_OBJ_PORT_MDB(obj)); 1493 break; 1494 default: 1495 err = -EOPNOTSUPP; 1496 break; 1497 } 1498 1499 return err; 1500 } 1501 1502 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) 1503 { 1504 struct ethsw_core *ethsw = port_priv->ethsw_data; 1505 struct net_device *netdev = port_priv->netdev; 1506 struct dpsw_vlan_if_cfg vcfg; 1507 int i, err; 1508 1509 if (!port_priv->vlans[vid]) 1510 return -ENOENT; 1511 1512 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { 1513 /* If we are deleting the PVID of a port, use VLAN 4095 instead 1514 * as we are sure that neither the bridge nor the 8021q module 1515 * will use it 1516 */ 1517 err = dpaa2_switch_port_set_pvid(port_priv, 4095); 1518 if (err) 1519 return err; 1520 } 1521 1522 vcfg.num_ifs = 1; 1523 vcfg.if_id[0] = port_priv->idx; 1524 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { 1525 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, 1526 ethsw->dpsw_handle, 1527 vid, &vcfg); 1528 if (err) { 1529 netdev_err(netdev, 1530 "dpsw_vlan_remove_if_untagged err %d\n", 1531 err); 1532 } 1533 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; 1534 } 1535 1536 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 1537 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1538 vid, &vcfg); 1539 if (err) { 1540 netdev_err(netdev, 1541 "dpsw_vlan_remove_if err %d\n", err); 1542 return err; 1543 } 1544 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; 1545 1546 /* Delete VLAN from switch if it is no longer configured on 1547 * any port 1548 */ 1549 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 1550 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) 1551 return 0; /* Found a port member in VID */ 1552 1553 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; 1554 1555 err = dpaa2_switch_dellink(ethsw, vid); 1556 if (err) 1557 return err; 1558 } 1559 1560 return 0; 1561 } 1562 1563 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 1564 const struct switchdev_obj_port_vlan *vlan) 1565 { 1566 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1567 1568 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1569 return -EOPNOTSUPP; 1570 1571 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); 1572 } 1573 1574 static int dpaa2_switch_port_mdb_del(struct net_device *netdev, 1575 const struct switchdev_obj_port_mdb *mdb) 1576 { 1577 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1578 int err; 1579 1580 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1581 return -ENOENT; 1582 1583 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1584 if (err) 1585 return err; 1586 1587 err = dev_mc_del(netdev, mdb->addr); 1588 if (err) { 1589 netdev_err(netdev, "dev_mc_del err %d\n", err); 1590 return err; 1591 } 1592 1593 return err; 1594 } 1595 1596 static int dpaa2_switch_port_obj_del(struct net_device *netdev, 1597 const struct switchdev_obj *obj) 1598 { 1599 int err; 1600 1601 switch (obj->id) { 1602 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1603 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); 1604 break; 1605 case SWITCHDEV_OBJ_ID_PORT_MDB: 1606 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); 1607 break; 1608 default: 1609 err = -EOPNOTSUPP; 1610 break; 1611 } 1612 return err; 1613 } 1614 1615 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, 1616 struct switchdev_notifier_port_attr_info *ptr) 1617 { 1618 int err; 1619 1620 err = switchdev_handle_port_attr_set(netdev, ptr, 1621 dpaa2_switch_port_dev_check, 1622 dpaa2_switch_port_attr_set); 1623 return notifier_from_errno(err); 1624 } 1625 1626 static int dpaa2_switch_port_bridge_join(struct net_device *netdev, 1627 struct net_device *upper_dev) 1628 { 1629 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1630 struct ethsw_core *ethsw = port_priv->ethsw_data; 1631 struct ethsw_port_priv *other_port_priv; 1632 struct net_device *other_dev; 1633 struct list_head *iter; 1634 bool learn_ena; 1635 int err; 1636 1637 netdev_for_each_lower_dev(upper_dev, other_dev, iter) { 1638 if (!dpaa2_switch_port_dev_check(other_dev)) 1639 continue; 1640 1641 other_port_priv = netdev_priv(other_dev); 1642 if (other_port_priv->ethsw_data != port_priv->ethsw_data) { 1643 netdev_err(netdev, 1644 "Interface from a different DPSW is in the bridge already!\n"); 1645 return -EINVAL; 1646 } 1647 } 1648 1649 /* Delete the previously manually installed VLAN 1 */ 1650 err = dpaa2_switch_port_del_vlan(port_priv, 1); 1651 if (err) 1652 return err; 1653 1654 dpaa2_switch_port_set_fdb(port_priv, upper_dev); 1655 1656 /* Inherit the initial bridge port learning state */ 1657 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); 1658 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1659 port_priv->learn_ena = learn_ena; 1660 1661 /* Setup the egress flood policy (broadcast, unknown unicast) */ 1662 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1663 if (err) 1664 goto err_egress_flood; 1665 1666 return 0; 1667 1668 err_egress_flood: 1669 dpaa2_switch_port_set_fdb(port_priv, NULL); 1670 return err; 1671 } 1672 1673 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) 1674 { 1675 __be16 vlan_proto = htons(ETH_P_8021Q); 1676 1677 if (vdev) 1678 vlan_proto = vlan_dev_vlan_proto(vdev); 1679 1680 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); 1681 } 1682 1683 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) 1684 { 1685 __be16 vlan_proto = htons(ETH_P_8021Q); 1686 1687 if (vdev) 1688 vlan_proto = vlan_dev_vlan_proto(vdev); 1689 1690 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); 1691 } 1692 1693 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) 1694 { 1695 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1696 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 1697 struct ethsw_core *ethsw = port_priv->ethsw_data; 1698 int err; 1699 1700 /* First of all, fast age any learn FDB addresses on this switch port */ 1701 dpaa2_switch_port_fast_age(port_priv); 1702 1703 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN 1704 * upper devices or otherwise from the FDB table that we are about to 1705 * leave 1706 */ 1707 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); 1708 if (err) 1709 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); 1710 1711 dpaa2_switch_port_set_fdb(port_priv, NULL); 1712 1713 /* Restore all RX VLANs into the new FDB table that we just joined */ 1714 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); 1715 if (err) 1716 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); 1717 1718 /* Reset the flooding state to denote that this port can send any 1719 * packet in standalone mode. With this, we are also ensuring that any 1720 * later bridge join will have the flooding flag on. 1721 */ 1722 port_priv->bcast_flood = true; 1723 port_priv->ucast_flood = true; 1724 1725 /* Setup the egress flood policy (broadcast, unknown unicast). 1726 * When the port is not under a bridge, only the CTRL interface is part 1727 * of the flooding domain besides the actual port 1728 */ 1729 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1730 if (err) 1731 return err; 1732 1733 /* Recreate the egress flood domain of the FDB that we just left */ 1734 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 1735 if (err) 1736 return err; 1737 1738 /* No HW learning when not under a bridge */ 1739 err = dpaa2_switch_port_set_learning(port_priv, false); 1740 if (err) 1741 return err; 1742 port_priv->learn_ena = false; 1743 1744 /* Add the VLAN 1 as PVID when not under a bridge. We need this since 1745 * the dpaa2 switch interfaces are not capable to be VLAN unaware 1746 */ 1747 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, 1748 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); 1749 } 1750 1751 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) 1752 { 1753 struct net_device *upper_dev; 1754 struct list_head *iter; 1755 1756 /* RCU read lock not necessary because we have write-side protection 1757 * (rtnl_mutex), however a non-rcu iterator does not exist. 1758 */ 1759 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) 1760 if (is_vlan_dev(upper_dev)) 1761 return -EOPNOTSUPP; 1762 1763 return 0; 1764 } 1765 1766 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, 1767 unsigned long event, void *ptr) 1768 { 1769 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1770 struct netdev_notifier_changeupper_info *info = ptr; 1771 struct netlink_ext_ack *extack; 1772 struct net_device *upper_dev; 1773 int err = 0; 1774 1775 if (!dpaa2_switch_port_dev_check(netdev)) 1776 return NOTIFY_DONE; 1777 1778 extack = netdev_notifier_info_to_extack(&info->info); 1779 1780 switch (event) { 1781 case NETDEV_PRECHANGEUPPER: 1782 upper_dev = info->upper_dev; 1783 if (!netif_is_bridge_master(upper_dev)) 1784 break; 1785 1786 if (!br_vlan_enabled(upper_dev)) { 1787 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); 1788 err = -EOPNOTSUPP; 1789 goto out; 1790 } 1791 1792 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); 1793 if (err) { 1794 NL_SET_ERR_MSG_MOD(extack, 1795 "Cannot join a bridge while VLAN uppers are present"); 1796 goto out; 1797 } 1798 1799 break; 1800 case NETDEV_CHANGEUPPER: 1801 upper_dev = info->upper_dev; 1802 if (netif_is_bridge_master(upper_dev)) { 1803 if (info->linking) 1804 err = dpaa2_switch_port_bridge_join(netdev, upper_dev); 1805 else 1806 err = dpaa2_switch_port_bridge_leave(netdev); 1807 } 1808 break; 1809 } 1810 1811 out: 1812 return notifier_from_errno(err); 1813 } 1814 1815 struct ethsw_switchdev_event_work { 1816 struct work_struct work; 1817 struct switchdev_notifier_fdb_info fdb_info; 1818 struct net_device *dev; 1819 unsigned long event; 1820 }; 1821 1822 static void dpaa2_switch_event_work(struct work_struct *work) 1823 { 1824 struct ethsw_switchdev_event_work *switchdev_work = 1825 container_of(work, struct ethsw_switchdev_event_work, work); 1826 struct net_device *dev = switchdev_work->dev; 1827 struct switchdev_notifier_fdb_info *fdb_info; 1828 int err; 1829 1830 rtnl_lock(); 1831 fdb_info = &switchdev_work->fdb_info; 1832 1833 switch (switchdev_work->event) { 1834 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1835 if (!fdb_info->added_by_user) 1836 break; 1837 if (is_unicast_ether_addr(fdb_info->addr)) 1838 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), 1839 fdb_info->addr); 1840 else 1841 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), 1842 fdb_info->addr); 1843 if (err) 1844 break; 1845 fdb_info->offloaded = true; 1846 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 1847 &fdb_info->info, NULL); 1848 break; 1849 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1850 if (!fdb_info->added_by_user) 1851 break; 1852 if (is_unicast_ether_addr(fdb_info->addr)) 1853 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); 1854 else 1855 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); 1856 break; 1857 } 1858 1859 rtnl_unlock(); 1860 kfree(switchdev_work->fdb_info.addr); 1861 kfree(switchdev_work); 1862 dev_put(dev); 1863 } 1864 1865 /* Called under rcu_read_lock() */ 1866 static int dpaa2_switch_port_event(struct notifier_block *nb, 1867 unsigned long event, void *ptr) 1868 { 1869 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1870 struct ethsw_port_priv *port_priv = netdev_priv(dev); 1871 struct ethsw_switchdev_event_work *switchdev_work; 1872 struct switchdev_notifier_fdb_info *fdb_info = ptr; 1873 struct ethsw_core *ethsw = port_priv->ethsw_data; 1874 1875 if (event == SWITCHDEV_PORT_ATTR_SET) 1876 return dpaa2_switch_port_attr_set_event(dev, ptr); 1877 1878 if (!dpaa2_switch_port_dev_check(dev)) 1879 return NOTIFY_DONE; 1880 1881 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 1882 if (!switchdev_work) 1883 return NOTIFY_BAD; 1884 1885 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); 1886 switchdev_work->dev = dev; 1887 switchdev_work->event = event; 1888 1889 switch (event) { 1890 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1891 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1892 memcpy(&switchdev_work->fdb_info, ptr, 1893 sizeof(switchdev_work->fdb_info)); 1894 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 1895 if (!switchdev_work->fdb_info.addr) 1896 goto err_addr_alloc; 1897 1898 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 1899 fdb_info->addr); 1900 1901 /* Take a reference on the device to avoid being freed. */ 1902 dev_hold(dev); 1903 break; 1904 default: 1905 kfree(switchdev_work); 1906 return NOTIFY_DONE; 1907 } 1908 1909 queue_work(ethsw->workqueue, &switchdev_work->work); 1910 1911 return NOTIFY_DONE; 1912 1913 err_addr_alloc: 1914 kfree(switchdev_work); 1915 return NOTIFY_BAD; 1916 } 1917 1918 static int dpaa2_switch_port_obj_event(unsigned long event, 1919 struct net_device *netdev, 1920 struct switchdev_notifier_port_obj_info *port_obj_info) 1921 { 1922 int err = -EOPNOTSUPP; 1923 1924 if (!dpaa2_switch_port_dev_check(netdev)) 1925 return NOTIFY_DONE; 1926 1927 switch (event) { 1928 case SWITCHDEV_PORT_OBJ_ADD: 1929 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); 1930 break; 1931 case SWITCHDEV_PORT_OBJ_DEL: 1932 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); 1933 break; 1934 } 1935 1936 port_obj_info->handled = true; 1937 return notifier_from_errno(err); 1938 } 1939 1940 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, 1941 unsigned long event, void *ptr) 1942 { 1943 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1944 1945 switch (event) { 1946 case SWITCHDEV_PORT_OBJ_ADD: 1947 case SWITCHDEV_PORT_OBJ_DEL: 1948 return dpaa2_switch_port_obj_event(event, dev, ptr); 1949 case SWITCHDEV_PORT_ATTR_SET: 1950 return dpaa2_switch_port_attr_set_event(dev, ptr); 1951 } 1952 1953 return NOTIFY_DONE; 1954 } 1955 1956 /* Build a linear skb based on a single-buffer frame descriptor */ 1957 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, 1958 const struct dpaa2_fd *fd) 1959 { 1960 u16 fd_offset = dpaa2_fd_get_offset(fd); 1961 dma_addr_t addr = dpaa2_fd_get_addr(fd); 1962 u32 fd_length = dpaa2_fd_get_len(fd); 1963 struct device *dev = ethsw->dev; 1964 struct sk_buff *skb = NULL; 1965 void *fd_vaddr; 1966 1967 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); 1968 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, 1969 DMA_FROM_DEVICE); 1970 1971 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + 1972 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1973 if (unlikely(!skb)) { 1974 dev_err(dev, "build_skb() failed\n"); 1975 return NULL; 1976 } 1977 1978 skb_reserve(skb, fd_offset); 1979 skb_put(skb, fd_length); 1980 1981 ethsw->buf_count--; 1982 1983 return skb; 1984 } 1985 1986 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, 1987 const struct dpaa2_fd *fd) 1988 { 1989 dpaa2_switch_free_fd(fq->ethsw, fd); 1990 } 1991 1992 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, 1993 const struct dpaa2_fd *fd) 1994 { 1995 struct ethsw_core *ethsw = fq->ethsw; 1996 struct ethsw_port_priv *port_priv; 1997 struct net_device *netdev; 1998 struct vlan_ethhdr *hdr; 1999 struct sk_buff *skb; 2000 u16 vlan_tci, vid; 2001 int if_id, err; 2002 2003 /* get switch ingress interface ID */ 2004 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; 2005 2006 if (if_id >= ethsw->sw_attr.num_ifs) { 2007 dev_err(ethsw->dev, "Frame received from unknown interface!\n"); 2008 goto err_free_fd; 2009 } 2010 port_priv = ethsw->ports[if_id]; 2011 netdev = port_priv->netdev; 2012 2013 /* build the SKB based on the FD received */ 2014 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { 2015 if (net_ratelimit()) { 2016 netdev_err(netdev, "Received invalid frame format\n"); 2017 goto err_free_fd; 2018 } 2019 } 2020 2021 skb = dpaa2_switch_build_linear_skb(ethsw, fd); 2022 if (unlikely(!skb)) 2023 goto err_free_fd; 2024 2025 skb_reset_mac_header(skb); 2026 2027 /* Remove the VLAN header if the packet that we just received has a vid 2028 * equal to the port PVIDs. Since the dpaa2-switch can operate only in 2029 * VLAN-aware mode and no alterations are made on the packet when it's 2030 * redirected/mirrored to the control interface, we are sure that there 2031 * will always be a VLAN header present. 2032 */ 2033 hdr = vlan_eth_hdr(skb); 2034 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; 2035 if (vid == port_priv->pvid) { 2036 err = __skb_vlan_pop(skb, &vlan_tci); 2037 if (err) { 2038 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); 2039 goto err_free_fd; 2040 } 2041 } 2042 2043 skb->dev = netdev; 2044 skb->protocol = eth_type_trans(skb, skb->dev); 2045 2046 /* Setup the offload_fwd_mark only if the port is under a bridge */ 2047 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); 2048 2049 netif_receive_skb(skb); 2050 2051 return; 2052 2053 err_free_fd: 2054 dpaa2_switch_free_fd(ethsw, fd); 2055 } 2056 2057 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) 2058 { 2059 ethsw->features = 0; 2060 2061 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) 2062 ethsw->features |= ETHSW_FEATURE_MAC_ADDR; 2063 } 2064 2065 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) 2066 { 2067 struct dpsw_ctrl_if_attr ctrl_if_attr; 2068 struct device *dev = ethsw->dev; 2069 int i = 0; 2070 int err; 2071 2072 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2073 &ctrl_if_attr); 2074 if (err) { 2075 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); 2076 return err; 2077 } 2078 2079 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; 2080 ethsw->fq[i].ethsw = ethsw; 2081 ethsw->fq[i++].type = DPSW_QUEUE_RX; 2082 2083 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; 2084 ethsw->fq[i].ethsw = ethsw; 2085 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; 2086 2087 return 0; 2088 } 2089 2090 /* Free buffers acquired from the buffer pool or which were meant to 2091 * be released in the pool 2092 */ 2093 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) 2094 { 2095 struct device *dev = ethsw->dev; 2096 void *vaddr; 2097 int i; 2098 2099 for (i = 0; i < count; i++) { 2100 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); 2101 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, 2102 DMA_FROM_DEVICE); 2103 free_pages((unsigned long)vaddr, 0); 2104 } 2105 } 2106 2107 /* Perform a single release command to add buffers 2108 * to the specified buffer pool 2109 */ 2110 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) 2111 { 2112 struct device *dev = ethsw->dev; 2113 u64 buf_array[BUFS_PER_CMD]; 2114 struct page *page; 2115 int retries = 0; 2116 dma_addr_t addr; 2117 int err; 2118 int i; 2119 2120 for (i = 0; i < BUFS_PER_CMD; i++) { 2121 /* Allocate one page for each Rx buffer. WRIOP sees 2122 * the entire page except for a tailroom reserved for 2123 * skb shared info 2124 */ 2125 page = dev_alloc_pages(0); 2126 if (!page) { 2127 dev_err(dev, "buffer allocation failed\n"); 2128 goto err_alloc; 2129 } 2130 2131 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, 2132 DMA_FROM_DEVICE); 2133 if (dma_mapping_error(dev, addr)) { 2134 dev_err(dev, "dma_map_single() failed\n"); 2135 goto err_map; 2136 } 2137 buf_array[i] = addr; 2138 } 2139 2140 release_bufs: 2141 /* In case the portal is busy, retry until successful or 2142 * max retries hit. 2143 */ 2144 while ((err = dpaa2_io_service_release(NULL, bpid, 2145 buf_array, i)) == -EBUSY) { 2146 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) 2147 break; 2148 2149 cpu_relax(); 2150 } 2151 2152 /* If release command failed, clean up and bail out. */ 2153 if (err) { 2154 dpaa2_switch_free_bufs(ethsw, buf_array, i); 2155 return 0; 2156 } 2157 2158 return i; 2159 2160 err_map: 2161 __free_pages(page, 0); 2162 err_alloc: 2163 /* If we managed to allocate at least some buffers, 2164 * release them to hardware 2165 */ 2166 if (i) 2167 goto release_bufs; 2168 2169 return 0; 2170 } 2171 2172 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) 2173 { 2174 int *count = ðsw->buf_count; 2175 int new_count; 2176 int err = 0; 2177 2178 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { 2179 do { 2180 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2181 if (unlikely(!new_count)) { 2182 /* Out of memory; abort for now, we'll 2183 * try later on 2184 */ 2185 break; 2186 } 2187 *count += new_count; 2188 } while (*count < DPAA2_ETHSW_NUM_BUFS); 2189 2190 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) 2191 err = -ENOMEM; 2192 } 2193 2194 return err; 2195 } 2196 2197 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) 2198 { 2199 int *count, i; 2200 2201 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { 2202 count = ðsw->buf_count; 2203 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2204 2205 if (unlikely(*count < BUFS_PER_CMD)) 2206 return -ENOMEM; 2207 } 2208 2209 return 0; 2210 } 2211 2212 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) 2213 { 2214 u64 buf_array[BUFS_PER_CMD]; 2215 int ret; 2216 2217 do { 2218 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, 2219 buf_array, BUFS_PER_CMD); 2220 if (ret < 0) { 2221 dev_err(ethsw->dev, 2222 "dpaa2_io_service_acquire() = %d\n", ret); 2223 return; 2224 } 2225 dpaa2_switch_free_bufs(ethsw, buf_array, ret); 2226 2227 } while (ret); 2228 } 2229 2230 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) 2231 { 2232 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; 2233 struct device *dev = ethsw->dev; 2234 struct fsl_mc_device *dpbp_dev; 2235 struct dpbp_attr dpbp_attrs; 2236 int err; 2237 2238 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2239 &dpbp_dev); 2240 if (err) { 2241 if (err == -ENXIO) 2242 err = -EPROBE_DEFER; 2243 else 2244 dev_err(dev, "DPBP device allocation failed\n"); 2245 return err; 2246 } 2247 ethsw->dpbp_dev = dpbp_dev; 2248 2249 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, 2250 &dpbp_dev->mc_handle); 2251 if (err) { 2252 dev_err(dev, "dpbp_open() failed\n"); 2253 goto err_open; 2254 } 2255 2256 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2257 if (err) { 2258 dev_err(dev, "dpbp_reset() failed\n"); 2259 goto err_reset; 2260 } 2261 2262 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2263 if (err) { 2264 dev_err(dev, "dpbp_enable() failed\n"); 2265 goto err_enable; 2266 } 2267 2268 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, 2269 &dpbp_attrs); 2270 if (err) { 2271 dev_err(dev, "dpbp_get_attributes() failed\n"); 2272 goto err_get_attr; 2273 } 2274 2275 dpsw_ctrl_if_pools_cfg.num_dpbp = 1; 2276 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; 2277 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; 2278 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; 2279 2280 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, 2281 &dpsw_ctrl_if_pools_cfg); 2282 if (err) { 2283 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2284 goto err_get_attr; 2285 } 2286 ethsw->bpid = dpbp_attrs.id; 2287 2288 return 0; 2289 2290 err_get_attr: 2291 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2292 err_enable: 2293 err_reset: 2294 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2295 err_open: 2296 fsl_mc_object_free(dpbp_dev); 2297 return err; 2298 } 2299 2300 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) 2301 { 2302 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2303 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2304 fsl_mc_object_free(ethsw->dpbp_dev); 2305 } 2306 2307 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) 2308 { 2309 int i; 2310 2311 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2312 ethsw->fq[i].store = 2313 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, 2314 ethsw->dev); 2315 if (!ethsw->fq[i].store) { 2316 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); 2317 while (--i >= 0) 2318 dpaa2_io_store_destroy(ethsw->fq[i].store); 2319 return -ENOMEM; 2320 } 2321 } 2322 2323 return 0; 2324 } 2325 2326 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) 2327 { 2328 int i; 2329 2330 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2331 dpaa2_io_store_destroy(ethsw->fq[i].store); 2332 } 2333 2334 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) 2335 { 2336 int err, retries = 0; 2337 2338 /* Try to pull from the FQ while the portal is busy and we didn't hit 2339 * the maximum number fo retries 2340 */ 2341 do { 2342 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); 2343 cpu_relax(); 2344 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2345 2346 if (unlikely(err)) 2347 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); 2348 2349 return err; 2350 } 2351 2352 /* Consume all frames pull-dequeued into the store */ 2353 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) 2354 { 2355 struct ethsw_core *ethsw = fq->ethsw; 2356 int cleaned = 0, is_last; 2357 struct dpaa2_dq *dq; 2358 int retries = 0; 2359 2360 do { 2361 /* Get the next available FD from the store */ 2362 dq = dpaa2_io_store_next(fq->store, &is_last); 2363 if (unlikely(!dq)) { 2364 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { 2365 dev_err_once(ethsw->dev, 2366 "No valid dequeue response\n"); 2367 return -ETIMEDOUT; 2368 } 2369 continue; 2370 } 2371 2372 if (fq->type == DPSW_QUEUE_RX) 2373 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); 2374 else 2375 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); 2376 cleaned++; 2377 2378 } while (!is_last); 2379 2380 return cleaned; 2381 } 2382 2383 /* NAPI poll routine */ 2384 static int dpaa2_switch_poll(struct napi_struct *napi, int budget) 2385 { 2386 int err, cleaned = 0, store_cleaned, work_done; 2387 struct dpaa2_switch_fq *fq; 2388 int retries = 0; 2389 2390 fq = container_of(napi, struct dpaa2_switch_fq, napi); 2391 2392 do { 2393 err = dpaa2_switch_pull_fq(fq); 2394 if (unlikely(err)) 2395 break; 2396 2397 /* Refill pool if appropriate */ 2398 dpaa2_switch_refill_bp(fq->ethsw); 2399 2400 store_cleaned = dpaa2_switch_store_consume(fq); 2401 cleaned += store_cleaned; 2402 2403 if (cleaned >= budget) { 2404 work_done = budget; 2405 goto out; 2406 } 2407 2408 } while (store_cleaned); 2409 2410 /* We didn't consume the entire budget, so finish napi and re-enable 2411 * data availability notifications 2412 */ 2413 napi_complete_done(napi, cleaned); 2414 do { 2415 err = dpaa2_io_service_rearm(NULL, &fq->nctx); 2416 cpu_relax(); 2417 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2418 2419 work_done = max(cleaned, 1); 2420 out: 2421 2422 return work_done; 2423 } 2424 2425 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) 2426 { 2427 struct dpaa2_switch_fq *fq; 2428 2429 fq = container_of(nctx, struct dpaa2_switch_fq, nctx); 2430 2431 napi_schedule(&fq->napi); 2432 } 2433 2434 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) 2435 { 2436 struct dpsw_ctrl_if_queue_cfg queue_cfg; 2437 struct dpaa2_io_notification_ctx *nctx; 2438 int err, i, j; 2439 2440 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2441 nctx = ðsw->fq[i].nctx; 2442 2443 /* Register a new software context for the FQID. 2444 * By using NULL as the first parameter, we specify that we do 2445 * not care on which cpu are interrupts received for this queue 2446 */ 2447 nctx->is_cdan = 0; 2448 nctx->id = ethsw->fq[i].fqid; 2449 nctx->desired_cpu = DPAA2_IO_ANY_CPU; 2450 nctx->cb = dpaa2_switch_fqdan_cb; 2451 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); 2452 if (err) { 2453 err = -EPROBE_DEFER; 2454 goto err_register; 2455 } 2456 2457 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | 2458 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; 2459 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; 2460 queue_cfg.dest_cfg.dest_id = nctx->dpio_id; 2461 queue_cfg.dest_cfg.priority = 0; 2462 queue_cfg.user_ctx = nctx->qman64; 2463 2464 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, 2465 ethsw->dpsw_handle, 2466 ethsw->fq[i].type, 2467 &queue_cfg); 2468 if (err) 2469 goto err_set_queue; 2470 } 2471 2472 return 0; 2473 2474 err_set_queue: 2475 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); 2476 err_register: 2477 for (j = 0; j < i; j++) 2478 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, 2479 ethsw->dev); 2480 2481 return err; 2482 } 2483 2484 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) 2485 { 2486 int i; 2487 2488 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2489 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, 2490 ethsw->dev); 2491 } 2492 2493 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) 2494 { 2495 int err; 2496 2497 /* setup FQs for Rx and Tx Conf */ 2498 err = dpaa2_switch_setup_fqs(ethsw); 2499 if (err) 2500 return err; 2501 2502 /* setup the buffer pool needed on the Rx path */ 2503 err = dpaa2_switch_setup_dpbp(ethsw); 2504 if (err) 2505 return err; 2506 2507 err = dpaa2_switch_seed_bp(ethsw); 2508 if (err) 2509 goto err_free_dpbp; 2510 2511 err = dpaa2_switch_alloc_rings(ethsw); 2512 if (err) 2513 goto err_drain_dpbp; 2514 2515 err = dpaa2_switch_setup_dpio(ethsw); 2516 if (err) 2517 goto err_destroy_rings; 2518 2519 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2520 if (err) { 2521 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); 2522 goto err_deregister_dpio; 2523 } 2524 2525 return 0; 2526 2527 err_deregister_dpio: 2528 dpaa2_switch_free_dpio(ethsw); 2529 err_destroy_rings: 2530 dpaa2_switch_destroy_rings(ethsw); 2531 err_drain_dpbp: 2532 dpaa2_switch_drain_bp(ethsw); 2533 err_free_dpbp: 2534 dpaa2_switch_free_dpbp(ethsw); 2535 2536 return err; 2537 } 2538 2539 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) 2540 { 2541 struct device *dev = &sw_dev->dev; 2542 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2543 struct dpsw_vlan_if_cfg vcfg = {0}; 2544 struct dpsw_tci_cfg tci_cfg = {0}; 2545 struct dpsw_stp_cfg stp_cfg; 2546 int err; 2547 u16 i; 2548 2549 ethsw->dev_id = sw_dev->obj_desc.id; 2550 2551 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); 2552 if (err) { 2553 dev_err(dev, "dpsw_open err %d\n", err); 2554 return err; 2555 } 2556 2557 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2558 ðsw->sw_attr); 2559 if (err) { 2560 dev_err(dev, "dpsw_get_attributes err %d\n", err); 2561 goto err_close; 2562 } 2563 2564 err = dpsw_get_api_version(ethsw->mc_io, 0, 2565 ðsw->major, 2566 ðsw->minor); 2567 if (err) { 2568 dev_err(dev, "dpsw_get_api_version err %d\n", err); 2569 goto err_close; 2570 } 2571 2572 /* Minimum supported DPSW version check */ 2573 if (ethsw->major < DPSW_MIN_VER_MAJOR || 2574 (ethsw->major == DPSW_MIN_VER_MAJOR && 2575 ethsw->minor < DPSW_MIN_VER_MINOR)) { 2576 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", 2577 ethsw->major, ethsw->minor); 2578 err = -EOPNOTSUPP; 2579 goto err_close; 2580 } 2581 2582 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { 2583 err = -EOPNOTSUPP; 2584 goto err_close; 2585 } 2586 2587 dpaa2_switch_detect_features(ethsw); 2588 2589 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); 2590 if (err) { 2591 dev_err(dev, "dpsw_reset err %d\n", err); 2592 goto err_close; 2593 } 2594 2595 stp_cfg.vlan_id = DEFAULT_VLAN_ID; 2596 stp_cfg.state = DPSW_STP_STATE_FORWARDING; 2597 2598 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2599 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); 2600 if (err) { 2601 dev_err(dev, "dpsw_if_disable err %d\n", err); 2602 goto err_close; 2603 } 2604 2605 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, 2606 &stp_cfg); 2607 if (err) { 2608 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", 2609 err, i); 2610 goto err_close; 2611 } 2612 2613 /* Switch starts with all ports configured to VLAN 1. Need to 2614 * remove this setting to allow configuration at bridge join 2615 */ 2616 vcfg.num_ifs = 1; 2617 vcfg.if_id[0] = i; 2618 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, 2619 DEFAULT_VLAN_ID, &vcfg); 2620 if (err) { 2621 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", 2622 err); 2623 goto err_close; 2624 } 2625 2626 tci_cfg.vlan_id = 4095; 2627 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); 2628 if (err) { 2629 dev_err(dev, "dpsw_if_set_tci err %d\n", err); 2630 goto err_close; 2631 } 2632 2633 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 2634 DEFAULT_VLAN_ID, &vcfg); 2635 if (err) { 2636 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); 2637 goto err_close; 2638 } 2639 } 2640 2641 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); 2642 if (err) { 2643 dev_err(dev, "dpsw_vlan_remove err %d\n", err); 2644 goto err_close; 2645 } 2646 2647 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", 2648 WQ_MEM_RECLAIM, "ethsw", 2649 ethsw->sw_attr.id); 2650 if (!ethsw->workqueue) { 2651 err = -ENOMEM; 2652 goto err_close; 2653 } 2654 2655 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); 2656 if (err) 2657 goto err_destroy_ordered_workqueue; 2658 2659 err = dpaa2_switch_ctrl_if_setup(ethsw); 2660 if (err) 2661 goto err_destroy_ordered_workqueue; 2662 2663 return 0; 2664 2665 err_destroy_ordered_workqueue: 2666 destroy_workqueue(ethsw->workqueue); 2667 2668 err_close: 2669 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 2670 return err; 2671 } 2672 2673 /* Add an ACL to redirect frames with specific destination MAC address to 2674 * control interface 2675 */ 2676 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, 2677 const char *mac) 2678 { 2679 struct net_device *netdev = port_priv->netdev; 2680 struct dpsw_acl_entry_cfg acl_entry_cfg; 2681 struct dpsw_acl_fields *acl_h; 2682 struct dpsw_acl_fields *acl_m; 2683 struct dpsw_acl_key acl_key; 2684 struct device *dev; 2685 u8 *cmd_buff; 2686 int err; 2687 2688 dev = port_priv->netdev->dev.parent; 2689 acl_h = &acl_key.match; 2690 acl_m = &acl_key.mask; 2691 2692 if (port_priv->acl_num_rules >= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) { 2693 netdev_err(netdev, "ACL full\n"); 2694 return -ENOMEM; 2695 } 2696 2697 memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg)); 2698 memset(&acl_key, 0, sizeof(acl_key)); 2699 2700 /* Match on the destination MAC address */ 2701 ether_addr_copy(acl_h->l2_dest_mac, mac); 2702 eth_broadcast_addr(acl_m->l2_dest_mac); 2703 2704 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL); 2705 if (!cmd_buff) 2706 return -ENOMEM; 2707 dpsw_acl_prepare_entry_cfg(&acl_key, cmd_buff); 2708 2709 memset(&acl_entry_cfg, 0, sizeof(acl_entry_cfg)); 2710 acl_entry_cfg.precedence = port_priv->acl_num_rules; 2711 acl_entry_cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; 2712 acl_entry_cfg.key_iova = dma_map_single(dev, cmd_buff, 2713 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, 2714 DMA_TO_DEVICE); 2715 if (unlikely(dma_mapping_error(dev, acl_entry_cfg.key_iova))) { 2716 netdev_err(netdev, "DMA mapping failed\n"); 2717 return -EFAULT; 2718 } 2719 2720 err = dpsw_acl_add_entry(port_priv->ethsw_data->mc_io, 0, 2721 port_priv->ethsw_data->dpsw_handle, 2722 port_priv->acl_tbl, &acl_entry_cfg); 2723 2724 dma_unmap_single(dev, acl_entry_cfg.key_iova, sizeof(cmd_buff), 2725 DMA_TO_DEVICE); 2726 if (err) { 2727 netdev_err(netdev, "dpsw_acl_add_entry() failed %d\n", err); 2728 return err; 2729 } 2730 2731 port_priv->acl_num_rules++; 2732 2733 return 0; 2734 } 2735 2736 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) 2737 { 2738 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; 2739 struct switchdev_obj_port_vlan vlan = { 2740 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 2741 .vid = DEFAULT_VLAN_ID, 2742 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, 2743 }; 2744 struct net_device *netdev = port_priv->netdev; 2745 struct ethsw_core *ethsw = port_priv->ethsw_data; 2746 struct dpsw_fdb_cfg fdb_cfg = {0}; 2747 struct dpsw_acl_if_cfg acl_if_cfg; 2748 struct dpsw_if_attr dpsw_if_attr; 2749 struct dpaa2_switch_fdb *fdb; 2750 struct dpsw_acl_cfg acl_cfg; 2751 u16 fdb_id; 2752 int err; 2753 2754 /* Get the Tx queue for this specific port */ 2755 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2756 port_priv->idx, &dpsw_if_attr); 2757 if (err) { 2758 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); 2759 return err; 2760 } 2761 port_priv->tx_qdid = dpsw_if_attr.qdid; 2762 2763 /* Create a FDB table for this particular switch port */ 2764 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; 2765 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 2766 &fdb_id, &fdb_cfg); 2767 if (err) { 2768 netdev_err(netdev, "dpsw_fdb_add err %d\n", err); 2769 return err; 2770 } 2771 2772 /* Find an unused dpaa2_switch_fdb structure and use it */ 2773 fdb = dpaa2_switch_fdb_get_unused(ethsw); 2774 fdb->fdb_id = fdb_id; 2775 fdb->in_use = true; 2776 fdb->bridge_dev = NULL; 2777 port_priv->fdb = fdb; 2778 2779 /* We need to add VLAN 1 as the PVID on this port until it is under a 2780 * bridge since the DPAA2 switch is not able to handle the traffic in a 2781 * VLAN unaware fashion 2782 */ 2783 err = dpaa2_switch_port_vlans_add(netdev, &vlan); 2784 if (err) 2785 return err; 2786 2787 /* Setup the egress flooding domains (broadcast, unknown unicast */ 2788 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2789 if (err) 2790 return err; 2791 2792 /* Create an ACL table to be used by this switch port */ 2793 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; 2794 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 2795 &port_priv->acl_tbl, &acl_cfg); 2796 if (err) { 2797 netdev_err(netdev, "dpsw_acl_add err %d\n", err); 2798 return err; 2799 } 2800 2801 acl_if_cfg.if_id[0] = port_priv->idx; 2802 acl_if_cfg.num_ifs = 1; 2803 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 2804 port_priv->acl_tbl, &acl_if_cfg); 2805 if (err) { 2806 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 2807 dpsw_acl_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 2808 port_priv->acl_tbl); 2809 } 2810 2811 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa); 2812 if (err) 2813 return err; 2814 2815 return err; 2816 } 2817 2818 static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) 2819 { 2820 struct device *dev = &sw_dev->dev; 2821 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2822 int err; 2823 2824 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 2825 if (err) 2826 dev_warn(dev, "dpsw_close err %d\n", err); 2827 } 2828 2829 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 2830 { 2831 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2832 dpaa2_switch_free_dpio(ethsw); 2833 dpaa2_switch_destroy_rings(ethsw); 2834 dpaa2_switch_drain_bp(ethsw); 2835 dpaa2_switch_free_dpbp(ethsw); 2836 } 2837 2838 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) 2839 { 2840 struct ethsw_port_priv *port_priv; 2841 struct ethsw_core *ethsw; 2842 struct device *dev; 2843 int i; 2844 2845 dev = &sw_dev->dev; 2846 ethsw = dev_get_drvdata(dev); 2847 2848 dpaa2_switch_ctrl_if_teardown(ethsw); 2849 2850 dpaa2_switch_teardown_irqs(sw_dev); 2851 2852 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2853 2854 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2855 port_priv = ethsw->ports[i]; 2856 unregister_netdev(port_priv->netdev); 2857 free_netdev(port_priv->netdev); 2858 } 2859 2860 kfree(ethsw->fdbs); 2861 kfree(ethsw->ports); 2862 2863 dpaa2_switch_takedown(sw_dev); 2864 2865 destroy_workqueue(ethsw->workqueue); 2866 2867 fsl_mc_portal_free(ethsw->mc_io); 2868 2869 kfree(ethsw); 2870 2871 dev_set_drvdata(dev, NULL); 2872 2873 return 0; 2874 } 2875 2876 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, 2877 u16 port_idx) 2878 { 2879 struct ethsw_port_priv *port_priv; 2880 struct device *dev = ethsw->dev; 2881 struct net_device *port_netdev; 2882 int err; 2883 2884 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); 2885 if (!port_netdev) { 2886 dev_err(dev, "alloc_etherdev error\n"); 2887 return -ENOMEM; 2888 } 2889 2890 port_priv = netdev_priv(port_netdev); 2891 port_priv->netdev = port_netdev; 2892 port_priv->ethsw_data = ethsw; 2893 2894 port_priv->idx = port_idx; 2895 port_priv->stp_state = BR_STATE_FORWARDING; 2896 2897 SET_NETDEV_DEV(port_netdev, dev); 2898 port_netdev->netdev_ops = &dpaa2_switch_port_ops; 2899 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; 2900 2901 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; 2902 2903 port_priv->bcast_flood = true; 2904 port_priv->ucast_flood = true; 2905 2906 /* Set MTU limits */ 2907 port_netdev->min_mtu = ETH_MIN_MTU; 2908 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; 2909 2910 /* Populate the private port structure so that later calls to 2911 * dpaa2_switch_port_init() can use it. 2912 */ 2913 ethsw->ports[port_idx] = port_priv; 2914 2915 /* The DPAA2 switch's ingress path depends on the VLAN table, 2916 * thus we are not able to disable VLAN filtering. 2917 */ 2918 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER; 2919 2920 err = dpaa2_switch_port_init(port_priv, port_idx); 2921 if (err) 2922 goto err_port_probe; 2923 2924 err = dpaa2_switch_port_set_mac_addr(port_priv); 2925 if (err) 2926 goto err_port_probe; 2927 2928 err = dpaa2_switch_port_set_learning(port_priv, false); 2929 if (err) 2930 goto err_port_probe; 2931 port_priv->learn_ena = false; 2932 2933 return 0; 2934 2935 err_port_probe: 2936 free_netdev(port_netdev); 2937 ethsw->ports[port_idx] = NULL; 2938 2939 return err; 2940 } 2941 2942 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) 2943 { 2944 struct device *dev = &sw_dev->dev; 2945 struct ethsw_core *ethsw; 2946 int i, err; 2947 2948 /* Allocate switch core*/ 2949 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); 2950 2951 if (!ethsw) 2952 return -ENOMEM; 2953 2954 ethsw->dev = dev; 2955 ethsw->iommu_domain = iommu_get_domain_for_dev(dev); 2956 dev_set_drvdata(dev, ethsw); 2957 2958 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 2959 ðsw->mc_io); 2960 if (err) { 2961 if (err == -ENXIO) 2962 err = -EPROBE_DEFER; 2963 else 2964 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); 2965 goto err_free_drvdata; 2966 } 2967 2968 err = dpaa2_switch_init(sw_dev); 2969 if (err) 2970 goto err_free_cmdport; 2971 2972 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), 2973 GFP_KERNEL); 2974 if (!(ethsw->ports)) { 2975 err = -ENOMEM; 2976 goto err_takedown; 2977 } 2978 2979 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), 2980 GFP_KERNEL); 2981 if (!ethsw->fdbs) { 2982 err = -ENOMEM; 2983 goto err_free_ports; 2984 } 2985 2986 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2987 err = dpaa2_switch_probe_port(ethsw, i); 2988 if (err) 2989 goto err_free_netdev; 2990 } 2991 2992 /* Add a NAPI instance for each of the Rx queues. The first port's 2993 * net_device will be associated with the instances since we do not have 2994 * different queues for each switch ports. 2995 */ 2996 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2997 netif_napi_add(ethsw->ports[0]->netdev, 2998 ðsw->fq[i].napi, dpaa2_switch_poll, 2999 NAPI_POLL_WEIGHT); 3000 3001 err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3002 if (err) { 3003 dev_err(ethsw->dev, "dpsw_enable err %d\n", err); 3004 goto err_free_netdev; 3005 } 3006 3007 /* Setup IRQs */ 3008 err = dpaa2_switch_setup_irqs(sw_dev); 3009 if (err) 3010 goto err_stop; 3011 3012 /* Register the netdev only when the entire setup is done and the 3013 * switch port interfaces are ready to receive traffic 3014 */ 3015 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 3016 err = register_netdev(ethsw->ports[i]->netdev); 3017 if (err < 0) { 3018 dev_err(dev, "register_netdev error %d\n", err); 3019 goto err_unregister_ports; 3020 } 3021 } 3022 3023 return 0; 3024 3025 err_unregister_ports: 3026 for (i--; i >= 0; i--) 3027 unregister_netdev(ethsw->ports[i]->netdev); 3028 dpaa2_switch_teardown_irqs(sw_dev); 3029 err_stop: 3030 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 3031 err_free_netdev: 3032 for (i--; i >= 0; i--) 3033 free_netdev(ethsw->ports[i]->netdev); 3034 kfree(ethsw->fdbs); 3035 err_free_ports: 3036 kfree(ethsw->ports); 3037 3038 err_takedown: 3039 dpaa2_switch_takedown(sw_dev); 3040 3041 err_free_cmdport: 3042 fsl_mc_portal_free(ethsw->mc_io); 3043 3044 err_free_drvdata: 3045 kfree(ethsw); 3046 dev_set_drvdata(dev, NULL); 3047 3048 return err; 3049 } 3050 3051 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { 3052 { 3053 .vendor = FSL_MC_VENDOR_FREESCALE, 3054 .obj_type = "dpsw", 3055 }, 3056 { .vendor = 0x0 } 3057 }; 3058 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); 3059 3060 static struct fsl_mc_driver dpaa2_switch_drv = { 3061 .driver = { 3062 .name = KBUILD_MODNAME, 3063 .owner = THIS_MODULE, 3064 }, 3065 .probe = dpaa2_switch_probe, 3066 .remove = dpaa2_switch_remove, 3067 .match_id_table = dpaa2_switch_match_id_table 3068 }; 3069 3070 static struct notifier_block dpaa2_switch_port_nb __read_mostly = { 3071 .notifier_call = dpaa2_switch_port_netdevice_event, 3072 }; 3073 3074 static struct notifier_block dpaa2_switch_port_switchdev_nb = { 3075 .notifier_call = dpaa2_switch_port_event, 3076 }; 3077 3078 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { 3079 .notifier_call = dpaa2_switch_port_blocking_event, 3080 }; 3081 3082 static int dpaa2_switch_register_notifiers(void) 3083 { 3084 int err; 3085 3086 err = register_netdevice_notifier(&dpaa2_switch_port_nb); 3087 if (err) { 3088 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); 3089 return err; 3090 } 3091 3092 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3093 if (err) { 3094 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); 3095 goto err_switchdev_nb; 3096 } 3097 3098 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3099 if (err) { 3100 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); 3101 goto err_switchdev_blocking_nb; 3102 } 3103 3104 return 0; 3105 3106 err_switchdev_blocking_nb: 3107 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3108 err_switchdev_nb: 3109 unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3110 3111 return err; 3112 } 3113 3114 static void dpaa2_switch_unregister_notifiers(void) 3115 { 3116 int err; 3117 3118 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3119 if (err) 3120 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", 3121 err); 3122 3123 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3124 if (err) 3125 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); 3126 3127 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3128 if (err) 3129 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); 3130 } 3131 3132 static int __init dpaa2_switch_driver_init(void) 3133 { 3134 int err; 3135 3136 err = fsl_mc_driver_register(&dpaa2_switch_drv); 3137 if (err) 3138 return err; 3139 3140 err = dpaa2_switch_register_notifiers(); 3141 if (err) { 3142 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3143 return err; 3144 } 3145 3146 return 0; 3147 } 3148 3149 static void __exit dpaa2_switch_driver_exit(void) 3150 { 3151 dpaa2_switch_unregister_notifiers(); 3152 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3153 } 3154 3155 module_init(dpaa2_switch_driver_init); 3156 module_exit(dpaa2_switch_driver_exit); 3157 3158 MODULE_LICENSE("GPL v2"); 3159 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); 3160