1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DPAA2 Ethernet Switch driver 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/interrupt.h> 13 #include <linux/msi.h> 14 #include <linux/kthread.h> 15 #include <linux/workqueue.h> 16 #include <linux/iommu.h> 17 18 #include <linux/fsl/mc.h> 19 20 #include "dpaa2-switch.h" 21 22 /* Minimal supported DPSW version */ 23 #define DPSW_MIN_VER_MAJOR 8 24 #define DPSW_MIN_VER_MINOR 9 25 26 #define DEFAULT_VLAN_ID 1 27 28 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) 29 { 30 return port_priv->fdb->fdb_id; 31 } 32 33 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) 34 { 35 int i; 36 37 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 38 if (!ethsw->fdbs[i].in_use) 39 return ðsw->fdbs[i]; 40 return NULL; 41 } 42 43 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, 44 struct net_device *bridge_dev) 45 { 46 struct ethsw_port_priv *other_port_priv = NULL; 47 struct dpaa2_switch_fdb *fdb; 48 struct net_device *other_dev; 49 struct list_head *iter; 50 51 /* If we leave a bridge (bridge_dev is NULL), find an unused 52 * FDB and use that. 53 */ 54 if (!bridge_dev) { 55 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); 56 57 /* If there is no unused FDB, we must be the last port that 58 * leaves the last bridge, all the others are standalone. We 59 * can just keep the FDB that we already have. 60 */ 61 62 if (!fdb) { 63 port_priv->fdb->bridge_dev = NULL; 64 return 0; 65 } 66 67 port_priv->fdb = fdb; 68 port_priv->fdb->in_use = true; 69 port_priv->fdb->bridge_dev = NULL; 70 return 0; 71 } 72 73 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock 74 * being held. Assert on it so that it's easier to catch new code 75 * paths that reach this point without the RTNL lock. 76 */ 77 ASSERT_RTNL(); 78 79 /* If part of a bridge, use the FDB of the first dpaa2 switch interface 80 * to be present in that bridge 81 */ 82 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { 83 if (!dpaa2_switch_port_dev_check(other_dev)) 84 continue; 85 86 if (other_dev == port_priv->netdev) 87 continue; 88 89 other_port_priv = netdev_priv(other_dev); 90 break; 91 } 92 93 /* The current port is about to change its FDB to the one used by the 94 * first port that joined the bridge. 95 */ 96 if (other_port_priv) { 97 /* The previous FDB is about to become unused, since the 98 * interface is no longer standalone. 99 */ 100 port_priv->fdb->in_use = false; 101 port_priv->fdb->bridge_dev = NULL; 102 103 /* Get a reference to the new FDB */ 104 port_priv->fdb = other_port_priv->fdb; 105 } 106 107 /* Keep track of the new upper bridge device */ 108 port_priv->fdb->bridge_dev = bridge_dev; 109 110 return 0; 111 } 112 113 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, 114 enum dpsw_flood_type type, 115 struct dpsw_egress_flood_cfg *cfg) 116 { 117 int i = 0, j; 118 119 memset(cfg, 0, sizeof(*cfg)); 120 121 /* Add all the DPAA2 switch ports found in the same bridging domain to 122 * the egress flooding domain 123 */ 124 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { 125 if (!ethsw->ports[j]) 126 continue; 127 if (ethsw->ports[j]->fdb->fdb_id != fdb_id) 128 continue; 129 130 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) 131 cfg->if_id[i++] = ethsw->ports[j]->idx; 132 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) 133 cfg->if_id[i++] = ethsw->ports[j]->idx; 134 } 135 136 /* Add the CTRL interface to the egress flooding domain */ 137 cfg->if_id[i++] = ethsw->sw_attr.num_ifs; 138 139 cfg->fdb_id = fdb_id; 140 cfg->flood_type = type; 141 cfg->num_ifs = i; 142 } 143 144 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) 145 { 146 struct dpsw_egress_flood_cfg flood_cfg; 147 int err; 148 149 /* Setup broadcast flooding domain */ 150 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); 151 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 152 &flood_cfg); 153 if (err) { 154 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 155 return err; 156 } 157 158 /* Setup unknown flooding domain */ 159 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); 160 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 161 &flood_cfg); 162 if (err) { 163 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 164 return err; 165 } 166 167 return 0; 168 } 169 170 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 171 dma_addr_t iova_addr) 172 { 173 phys_addr_t phys_addr; 174 175 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 176 177 return phys_to_virt(phys_addr); 178 } 179 180 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) 181 { 182 struct ethsw_core *ethsw = port_priv->ethsw_data; 183 struct dpsw_vlan_cfg vcfg = {0}; 184 int err; 185 186 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 187 err = dpsw_vlan_add(ethsw->mc_io, 0, 188 ethsw->dpsw_handle, vid, &vcfg); 189 if (err) { 190 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); 191 return err; 192 } 193 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; 194 195 return 0; 196 } 197 198 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) 199 { 200 struct net_device *netdev = port_priv->netdev; 201 struct dpsw_link_state state; 202 int err; 203 204 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 205 port_priv->ethsw_data->dpsw_handle, 206 port_priv->idx, &state); 207 if (err) { 208 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 209 return true; 210 } 211 212 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 213 214 return state.up ? true : false; 215 } 216 217 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) 218 { 219 struct ethsw_core *ethsw = port_priv->ethsw_data; 220 struct net_device *netdev = port_priv->netdev; 221 struct dpsw_tci_cfg tci_cfg = { 0 }; 222 bool up; 223 int err, ret; 224 225 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 226 port_priv->idx, &tci_cfg); 227 if (err) { 228 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); 229 return err; 230 } 231 232 tci_cfg.vlan_id = pvid; 233 234 /* Interface needs to be down to change PVID */ 235 up = dpaa2_switch_port_is_up(port_priv); 236 if (up) { 237 err = dpsw_if_disable(ethsw->mc_io, 0, 238 ethsw->dpsw_handle, 239 port_priv->idx); 240 if (err) { 241 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 242 return err; 243 } 244 } 245 246 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 247 port_priv->idx, &tci_cfg); 248 if (err) { 249 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); 250 goto set_tci_error; 251 } 252 253 /* Delete previous PVID info and mark the new one */ 254 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; 255 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; 256 port_priv->pvid = pvid; 257 258 set_tci_error: 259 if (up) { 260 ret = dpsw_if_enable(ethsw->mc_io, 0, 261 ethsw->dpsw_handle, 262 port_priv->idx); 263 if (ret) { 264 netdev_err(netdev, "dpsw_if_enable err %d\n", ret); 265 return ret; 266 } 267 } 268 269 return err; 270 } 271 272 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, 273 u16 vid, u16 flags) 274 { 275 struct ethsw_core *ethsw = port_priv->ethsw_data; 276 struct net_device *netdev = port_priv->netdev; 277 struct dpsw_vlan_if_cfg vcfg = {0}; 278 int err; 279 280 if (port_priv->vlans[vid]) { 281 netdev_warn(netdev, "VLAN %d already configured\n", vid); 282 return -EEXIST; 283 } 284 285 /* If hit, this VLAN rule will lead the packet into the FDB table 286 * specified in the vlan configuration below 287 */ 288 vcfg.num_ifs = 1; 289 vcfg.if_id[0] = port_priv->idx; 290 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 291 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; 292 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); 293 if (err) { 294 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); 295 return err; 296 } 297 298 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; 299 300 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { 301 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, 302 ethsw->dpsw_handle, 303 vid, &vcfg); 304 if (err) { 305 netdev_err(netdev, 306 "dpsw_vlan_add_if_untagged err %d\n", err); 307 return err; 308 } 309 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; 310 } 311 312 if (flags & BRIDGE_VLAN_INFO_PVID) { 313 err = dpaa2_switch_port_set_pvid(port_priv, vid); 314 if (err) 315 return err; 316 } 317 318 return 0; 319 } 320 321 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) 322 { 323 struct dpsw_stp_cfg stp_cfg = { 324 .state = state, 325 }; 326 int err; 327 u16 vid; 328 329 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) 330 return 0; /* Nothing to do */ 331 332 for (vid = 0; vid <= VLAN_VID_MASK; vid++) { 333 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 334 stp_cfg.vlan_id = vid; 335 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, 336 port_priv->ethsw_data->dpsw_handle, 337 port_priv->idx, &stp_cfg); 338 if (err) { 339 netdev_err(port_priv->netdev, 340 "dpsw_if_set_stp err %d\n", err); 341 return err; 342 } 343 } 344 } 345 346 port_priv->stp_state = state; 347 348 return 0; 349 } 350 351 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) 352 { 353 struct ethsw_port_priv *ppriv_local = NULL; 354 int i, err; 355 356 if (!ethsw->vlans[vid]) 357 return -ENOENT; 358 359 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); 360 if (err) { 361 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); 362 return err; 363 } 364 ethsw->vlans[vid] = 0; 365 366 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 367 ppriv_local = ethsw->ports[i]; 368 ppriv_local->vlans[vid] = 0; 369 } 370 371 return 0; 372 } 373 374 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, 375 const unsigned char *addr) 376 { 377 struct dpsw_fdb_unicast_cfg entry = {0}; 378 u16 fdb_id; 379 int err; 380 381 entry.if_egress = port_priv->idx; 382 entry.type = DPSW_FDB_ENTRY_STATIC; 383 ether_addr_copy(entry.mac_addr, addr); 384 385 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 386 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, 387 port_priv->ethsw_data->dpsw_handle, 388 fdb_id, &entry); 389 if (err) 390 netdev_err(port_priv->netdev, 391 "dpsw_fdb_add_unicast err %d\n", err); 392 return err; 393 } 394 395 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, 396 const unsigned char *addr) 397 { 398 struct dpsw_fdb_unicast_cfg entry = {0}; 399 u16 fdb_id; 400 int err; 401 402 entry.if_egress = port_priv->idx; 403 entry.type = DPSW_FDB_ENTRY_STATIC; 404 ether_addr_copy(entry.mac_addr, addr); 405 406 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 407 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, 408 port_priv->ethsw_data->dpsw_handle, 409 fdb_id, &entry); 410 /* Silently discard error for calling multiple times the del command */ 411 if (err && err != -ENXIO) 412 netdev_err(port_priv->netdev, 413 "dpsw_fdb_remove_unicast err %d\n", err); 414 return err; 415 } 416 417 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, 418 const unsigned char *addr) 419 { 420 struct dpsw_fdb_multicast_cfg entry = {0}; 421 u16 fdb_id; 422 int err; 423 424 ether_addr_copy(entry.mac_addr, addr); 425 entry.type = DPSW_FDB_ENTRY_STATIC; 426 entry.num_ifs = 1; 427 entry.if_id[0] = port_priv->idx; 428 429 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 430 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, 431 port_priv->ethsw_data->dpsw_handle, 432 fdb_id, &entry); 433 /* Silently discard error for calling multiple times the add command */ 434 if (err && err != -ENXIO) 435 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", 436 err); 437 return err; 438 } 439 440 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, 441 const unsigned char *addr) 442 { 443 struct dpsw_fdb_multicast_cfg entry = {0}; 444 u16 fdb_id; 445 int err; 446 447 ether_addr_copy(entry.mac_addr, addr); 448 entry.type = DPSW_FDB_ENTRY_STATIC; 449 entry.num_ifs = 1; 450 entry.if_id[0] = port_priv->idx; 451 452 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 453 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, 454 port_priv->ethsw_data->dpsw_handle, 455 fdb_id, &entry); 456 /* Silently discard error for calling multiple times the del command */ 457 if (err && err != -ENAVAIL) 458 netdev_err(port_priv->netdev, 459 "dpsw_fdb_remove_multicast err %d\n", err); 460 return err; 461 } 462 463 static void dpaa2_switch_port_get_stats(struct net_device *netdev, 464 struct rtnl_link_stats64 *stats) 465 { 466 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 467 u64 tmp; 468 int err; 469 470 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 471 port_priv->ethsw_data->dpsw_handle, 472 port_priv->idx, 473 DPSW_CNT_ING_FRAME, &stats->rx_packets); 474 if (err) 475 goto error; 476 477 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 478 port_priv->ethsw_data->dpsw_handle, 479 port_priv->idx, 480 DPSW_CNT_EGR_FRAME, &stats->tx_packets); 481 if (err) 482 goto error; 483 484 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 485 port_priv->ethsw_data->dpsw_handle, 486 port_priv->idx, 487 DPSW_CNT_ING_BYTE, &stats->rx_bytes); 488 if (err) 489 goto error; 490 491 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 492 port_priv->ethsw_data->dpsw_handle, 493 port_priv->idx, 494 DPSW_CNT_EGR_BYTE, &stats->tx_bytes); 495 if (err) 496 goto error; 497 498 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 499 port_priv->ethsw_data->dpsw_handle, 500 port_priv->idx, 501 DPSW_CNT_ING_FRAME_DISCARD, 502 &stats->rx_dropped); 503 if (err) 504 goto error; 505 506 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 507 port_priv->ethsw_data->dpsw_handle, 508 port_priv->idx, 509 DPSW_CNT_ING_FLTR_FRAME, 510 &tmp); 511 if (err) 512 goto error; 513 stats->rx_dropped += tmp; 514 515 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 516 port_priv->ethsw_data->dpsw_handle, 517 port_priv->idx, 518 DPSW_CNT_EGR_FRAME_DISCARD, 519 &stats->tx_dropped); 520 if (err) 521 goto error; 522 523 return; 524 525 error: 526 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); 527 } 528 529 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, 530 int attr_id) 531 { 532 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); 533 } 534 535 static int dpaa2_switch_port_get_offload_stats(int attr_id, 536 const struct net_device *netdev, 537 void *sp) 538 { 539 switch (attr_id) { 540 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 541 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); 542 return 0; 543 } 544 545 return -EINVAL; 546 } 547 548 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) 549 { 550 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 551 int err; 552 553 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, 554 0, 555 port_priv->ethsw_data->dpsw_handle, 556 port_priv->idx, 557 (u16)ETHSW_L2_MAX_FRM(mtu)); 558 if (err) { 559 netdev_err(netdev, 560 "dpsw_if_set_max_frame_length() err %d\n", err); 561 return err; 562 } 563 564 netdev->mtu = mtu; 565 return 0; 566 } 567 568 static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) 569 { 570 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 571 struct dpsw_link_state state; 572 int err; 573 574 /* Interrupts are received even though no one issued an 'ifconfig up' 575 * on the switch interface. Ignore these link state update interrupts 576 */ 577 if (!netif_running(netdev)) 578 return 0; 579 580 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 581 port_priv->ethsw_data->dpsw_handle, 582 port_priv->idx, &state); 583 if (err) { 584 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 585 return err; 586 } 587 588 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 589 590 if (state.up != port_priv->link_state) { 591 if (state.up) { 592 netif_carrier_on(netdev); 593 netif_tx_start_all_queues(netdev); 594 } else { 595 netif_carrier_off(netdev); 596 netif_tx_stop_all_queues(netdev); 597 } 598 port_priv->link_state = state.up; 599 } 600 601 return 0; 602 } 603 604 /* Manage all NAPI instances for the control interface. 605 * 606 * We only have one RX queue and one Tx Conf queue for all 607 * switch ports. Therefore, we only need to enable the NAPI instance once, the 608 * first time one of the switch ports runs .dev_open(). 609 */ 610 611 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) 612 { 613 int i; 614 615 /* Access to the ethsw->napi_users relies on the RTNL lock */ 616 ASSERT_RTNL(); 617 618 /* a new interface is using the NAPI instance */ 619 ethsw->napi_users++; 620 621 /* if there is already a user of the instance, return */ 622 if (ethsw->napi_users > 1) 623 return; 624 625 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 626 napi_enable(ðsw->fq[i].napi); 627 } 628 629 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) 630 { 631 int i; 632 633 /* Access to the ethsw->napi_users relies on the RTNL lock */ 634 ASSERT_RTNL(); 635 636 /* If we are not the last interface using the NAPI, return */ 637 ethsw->napi_users--; 638 if (ethsw->napi_users) 639 return; 640 641 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 642 napi_disable(ðsw->fq[i].napi); 643 } 644 645 static int dpaa2_switch_port_open(struct net_device *netdev) 646 { 647 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 648 struct ethsw_core *ethsw = port_priv->ethsw_data; 649 int err; 650 651 /* Explicitly set carrier off, otherwise 652 * netif_carrier_ok() will return true and cause 'ip link show' 653 * to report the LOWER_UP flag, even though the link 654 * notification wasn't even received. 655 */ 656 netif_carrier_off(netdev); 657 658 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, 659 port_priv->ethsw_data->dpsw_handle, 660 port_priv->idx); 661 if (err) { 662 netdev_err(netdev, "dpsw_if_enable err %d\n", err); 663 return err; 664 } 665 666 /* sync carrier state */ 667 err = dpaa2_switch_port_carrier_state_sync(netdev); 668 if (err) { 669 netdev_err(netdev, 670 "dpaa2_switch_port_carrier_state_sync err %d\n", err); 671 goto err_carrier_sync; 672 } 673 674 dpaa2_switch_enable_ctrl_if_napi(ethsw); 675 676 return 0; 677 678 err_carrier_sync: 679 dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 680 port_priv->ethsw_data->dpsw_handle, 681 port_priv->idx); 682 return err; 683 } 684 685 static int dpaa2_switch_port_stop(struct net_device *netdev) 686 { 687 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 688 struct ethsw_core *ethsw = port_priv->ethsw_data; 689 int err; 690 691 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 692 port_priv->ethsw_data->dpsw_handle, 693 port_priv->idx); 694 if (err) { 695 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 696 return err; 697 } 698 699 dpaa2_switch_disable_ctrl_if_napi(ethsw); 700 701 return 0; 702 } 703 704 static int dpaa2_switch_port_parent_id(struct net_device *dev, 705 struct netdev_phys_item_id *ppid) 706 { 707 struct ethsw_port_priv *port_priv = netdev_priv(dev); 708 709 ppid->id_len = 1; 710 ppid->id[0] = port_priv->ethsw_data->dev_id; 711 712 return 0; 713 } 714 715 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, 716 size_t len) 717 { 718 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 719 int err; 720 721 err = snprintf(name, len, "p%d", port_priv->idx); 722 if (err >= len) 723 return -EINVAL; 724 725 return 0; 726 } 727 728 struct ethsw_dump_ctx { 729 struct net_device *dev; 730 struct sk_buff *skb; 731 struct netlink_callback *cb; 732 int idx; 733 }; 734 735 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, 736 struct ethsw_dump_ctx *dump) 737 { 738 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; 739 u32 portid = NETLINK_CB(dump->cb->skb).portid; 740 u32 seq = dump->cb->nlh->nlmsg_seq; 741 struct nlmsghdr *nlh; 742 struct ndmsg *ndm; 743 744 if (dump->idx < dump->cb->args[2]) 745 goto skip; 746 747 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 748 sizeof(*ndm), NLM_F_MULTI); 749 if (!nlh) 750 return -EMSGSIZE; 751 752 ndm = nlmsg_data(nlh); 753 ndm->ndm_family = AF_BRIDGE; 754 ndm->ndm_pad1 = 0; 755 ndm->ndm_pad2 = 0; 756 ndm->ndm_flags = NTF_SELF; 757 ndm->ndm_type = 0; 758 ndm->ndm_ifindex = dump->dev->ifindex; 759 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; 760 761 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) 762 goto nla_put_failure; 763 764 nlmsg_end(dump->skb, nlh); 765 766 skip: 767 dump->idx++; 768 return 0; 769 770 nla_put_failure: 771 nlmsg_cancel(dump->skb, nlh); 772 return -EMSGSIZE; 773 } 774 775 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, 776 struct ethsw_port_priv *port_priv) 777 { 778 int idx = port_priv->idx; 779 int valid; 780 781 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 782 valid = entry->if_info == port_priv->idx; 783 else 784 valid = entry->if_mask[idx / 8] & BIT(idx % 8); 785 786 return valid; 787 } 788 789 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, 790 dpaa2_switch_fdb_cb_t cb, void *data) 791 { 792 struct net_device *net_dev = port_priv->netdev; 793 struct ethsw_core *ethsw = port_priv->ethsw_data; 794 struct device *dev = net_dev->dev.parent; 795 struct fdb_dump_entry *fdb_entries; 796 struct fdb_dump_entry fdb_entry; 797 dma_addr_t fdb_dump_iova; 798 u16 num_fdb_entries; 799 u32 fdb_dump_size; 800 int err = 0, i; 801 u8 *dma_mem; 802 u16 fdb_id; 803 804 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); 805 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); 806 if (!dma_mem) 807 return -ENOMEM; 808 809 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, 810 DMA_FROM_DEVICE); 811 if (dma_mapping_error(dev, fdb_dump_iova)) { 812 netdev_err(net_dev, "dma_map_single() failed\n"); 813 err = -ENOMEM; 814 goto err_map; 815 } 816 817 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 818 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, 819 fdb_dump_iova, fdb_dump_size, &num_fdb_entries); 820 if (err) { 821 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); 822 goto err_dump; 823 } 824 825 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); 826 827 fdb_entries = (struct fdb_dump_entry *)dma_mem; 828 for (i = 0; i < num_fdb_entries; i++) { 829 fdb_entry = fdb_entries[i]; 830 831 err = cb(port_priv, &fdb_entry, data); 832 if (err) 833 goto end; 834 } 835 836 end: 837 kfree(dma_mem); 838 839 return 0; 840 841 err_dump: 842 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); 843 err_map: 844 kfree(dma_mem); 845 return err; 846 } 847 848 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, 849 struct fdb_dump_entry *fdb_entry, 850 void *data) 851 { 852 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 853 return 0; 854 855 return dpaa2_switch_fdb_dump_nl(fdb_entry, data); 856 } 857 858 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 859 struct net_device *net_dev, 860 struct net_device *filter_dev, int *idx) 861 { 862 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 863 struct ethsw_dump_ctx dump = { 864 .dev = net_dev, 865 .skb = skb, 866 .cb = cb, 867 .idx = *idx, 868 }; 869 int err; 870 871 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); 872 *idx = dump.idx; 873 874 return err; 875 } 876 877 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, 878 struct fdb_dump_entry *fdb_entry, 879 void *data __always_unused) 880 { 881 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 882 return 0; 883 884 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) 885 return 0; 886 887 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 888 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); 889 else 890 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); 891 892 return 0; 893 } 894 895 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) 896 { 897 dpaa2_switch_fdb_iterate(port_priv, 898 dpaa2_switch_fdb_entry_fast_age, NULL); 899 } 900 901 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, 902 u16 vid) 903 { 904 struct switchdev_obj_port_vlan vlan = { 905 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 906 .vid = vid, 907 .obj.orig_dev = netdev, 908 /* This API only allows programming tagged, non-PVID VIDs */ 909 .flags = 0, 910 }; 911 912 return dpaa2_switch_port_vlans_add(netdev, &vlan); 913 } 914 915 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, 916 u16 vid) 917 { 918 struct switchdev_obj_port_vlan vlan = { 919 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 920 .vid = vid, 921 .obj.orig_dev = netdev, 922 /* This API only allows programming tagged, non-PVID VIDs */ 923 .flags = 0, 924 }; 925 926 return dpaa2_switch_port_vlans_del(netdev, &vlan); 927 } 928 929 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) 930 { 931 struct ethsw_core *ethsw = port_priv->ethsw_data; 932 struct net_device *net_dev = port_priv->netdev; 933 struct device *dev = net_dev->dev.parent; 934 u8 mac_addr[ETH_ALEN]; 935 int err; 936 937 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) 938 return 0; 939 940 /* Get firmware address, if any */ 941 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, 942 port_priv->idx, mac_addr); 943 if (err) { 944 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); 945 return err; 946 } 947 948 /* First check if firmware has any address configured by bootloader */ 949 if (!is_zero_ether_addr(mac_addr)) { 950 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 951 } else { 952 /* No MAC address configured, fill in net_dev->dev_addr 953 * with a random one 954 */ 955 eth_hw_addr_random(net_dev); 956 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 957 958 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 959 * practical purposes, this will be our "permanent" mac address, 960 * at least until the next reboot. This move will also permit 961 * register_netdevice() to properly fill up net_dev->perm_addr. 962 */ 963 net_dev->addr_assign_type = NET_ADDR_PERM; 964 } 965 966 return 0; 967 } 968 969 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, 970 const struct dpaa2_fd *fd) 971 { 972 struct device *dev = ethsw->dev; 973 unsigned char *buffer_start; 974 struct sk_buff **skbh, *skb; 975 dma_addr_t fd_addr; 976 977 fd_addr = dpaa2_fd_get_addr(fd); 978 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); 979 980 skb = *skbh; 981 buffer_start = (unsigned char *)skbh; 982 983 dma_unmap_single(dev, fd_addr, 984 skb_tail_pointer(skb) - buffer_start, 985 DMA_TO_DEVICE); 986 987 /* Move on with skb release */ 988 dev_kfree_skb(skb); 989 } 990 991 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, 992 struct sk_buff *skb, 993 struct dpaa2_fd *fd) 994 { 995 struct device *dev = ethsw->dev; 996 struct sk_buff **skbh; 997 dma_addr_t addr; 998 u8 *buff_start; 999 void *hwa; 1000 1001 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - 1002 DPAA2_SWITCH_TX_BUF_ALIGN, 1003 DPAA2_SWITCH_TX_BUF_ALIGN); 1004 1005 /* Clear FAS to have consistent values for TX confirmation. It is 1006 * located in the first 8 bytes of the buffer's hardware annotation 1007 * area 1008 */ 1009 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; 1010 memset(hwa, 0, 8); 1011 1012 /* Store a backpointer to the skb at the beginning of the buffer 1013 * (in the private data area) such that we can release it 1014 * on Tx confirm 1015 */ 1016 skbh = (struct sk_buff **)buff_start; 1017 *skbh = skb; 1018 1019 addr = dma_map_single(dev, buff_start, 1020 skb_tail_pointer(skb) - buff_start, 1021 DMA_TO_DEVICE); 1022 if (unlikely(dma_mapping_error(dev, addr))) 1023 return -ENOMEM; 1024 1025 /* Setup the FD fields */ 1026 memset(fd, 0, sizeof(*fd)); 1027 1028 dpaa2_fd_set_addr(fd, addr); 1029 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); 1030 dpaa2_fd_set_len(fd, skb->len); 1031 dpaa2_fd_set_format(fd, dpaa2_fd_single); 1032 1033 return 0; 1034 } 1035 1036 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, 1037 struct net_device *net_dev) 1038 { 1039 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 1040 struct ethsw_core *ethsw = port_priv->ethsw_data; 1041 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; 1042 struct dpaa2_fd fd; 1043 int err; 1044 1045 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { 1046 struct sk_buff *ns; 1047 1048 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); 1049 if (unlikely(!ns)) { 1050 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); 1051 goto err_free_skb; 1052 } 1053 dev_consume_skb_any(skb); 1054 skb = ns; 1055 } 1056 1057 /* We'll be holding a back-reference to the skb until Tx confirmation */ 1058 skb = skb_unshare(skb, GFP_ATOMIC); 1059 if (unlikely(!skb)) { 1060 /* skb_unshare() has already freed the skb */ 1061 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); 1062 goto err_exit; 1063 } 1064 1065 /* At this stage, we do not support non-linear skbs so just try to 1066 * linearize the skb and if that's not working, just drop the packet. 1067 */ 1068 err = skb_linearize(skb); 1069 if (err) { 1070 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); 1071 goto err_free_skb; 1072 } 1073 1074 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); 1075 if (unlikely(err)) { 1076 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); 1077 goto err_free_skb; 1078 } 1079 1080 do { 1081 err = dpaa2_io_service_enqueue_qd(NULL, 1082 port_priv->tx_qdid, 1083 8, 0, &fd); 1084 retries--; 1085 } while (err == -EBUSY && retries); 1086 1087 if (unlikely(err < 0)) { 1088 dpaa2_switch_free_fd(ethsw, &fd); 1089 goto err_exit; 1090 } 1091 1092 return NETDEV_TX_OK; 1093 1094 err_free_skb: 1095 dev_kfree_skb(skb); 1096 err_exit: 1097 return NETDEV_TX_OK; 1098 } 1099 1100 static const struct net_device_ops dpaa2_switch_port_ops = { 1101 .ndo_open = dpaa2_switch_port_open, 1102 .ndo_stop = dpaa2_switch_port_stop, 1103 1104 .ndo_set_mac_address = eth_mac_addr, 1105 .ndo_get_stats64 = dpaa2_switch_port_get_stats, 1106 .ndo_change_mtu = dpaa2_switch_port_change_mtu, 1107 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, 1108 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, 1109 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, 1110 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, 1111 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, 1112 1113 .ndo_start_xmit = dpaa2_switch_port_tx, 1114 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, 1115 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, 1116 }; 1117 1118 bool dpaa2_switch_port_dev_check(const struct net_device *netdev) 1119 { 1120 return netdev->netdev_ops == &dpaa2_switch_port_ops; 1121 } 1122 1123 static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) 1124 { 1125 int i; 1126 1127 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 1128 dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); 1129 dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); 1130 } 1131 } 1132 1133 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) 1134 { 1135 struct device *dev = (struct device *)arg; 1136 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1137 1138 /* Mask the events and the if_id reserved bits to be cleared on read */ 1139 u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; 1140 int err; 1141 1142 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1143 DPSW_IRQ_INDEX_IF, &status); 1144 if (err) { 1145 dev_err(dev, "Can't get irq status (err %d)\n", err); 1146 1147 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1148 DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); 1149 if (err) 1150 dev_err(dev, "Can't clear irq status (err %d)\n", err); 1151 goto out; 1152 } 1153 1154 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) 1155 dpaa2_switch_links_state_update(ethsw); 1156 1157 out: 1158 return IRQ_HANDLED; 1159 } 1160 1161 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) 1162 { 1163 struct device *dev = &sw_dev->dev; 1164 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1165 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; 1166 struct fsl_mc_device_irq *irq; 1167 int err; 1168 1169 err = fsl_mc_allocate_irqs(sw_dev); 1170 if (err) { 1171 dev_err(dev, "MC irqs allocation failed\n"); 1172 return err; 1173 } 1174 1175 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { 1176 err = -EINVAL; 1177 goto free_irq; 1178 } 1179 1180 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1181 DPSW_IRQ_INDEX_IF, 0); 1182 if (err) { 1183 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1184 goto free_irq; 1185 } 1186 1187 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; 1188 1189 err = devm_request_threaded_irq(dev, irq->msi_desc->irq, 1190 NULL, 1191 dpaa2_switch_irq0_handler_thread, 1192 IRQF_NO_SUSPEND | IRQF_ONESHOT, 1193 dev_name(dev), dev); 1194 if (err) { 1195 dev_err(dev, "devm_request_threaded_irq(): %d\n", err); 1196 goto free_irq; 1197 } 1198 1199 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, 1200 DPSW_IRQ_INDEX_IF, mask); 1201 if (err) { 1202 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); 1203 goto free_devm_irq; 1204 } 1205 1206 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1207 DPSW_IRQ_INDEX_IF, 1); 1208 if (err) { 1209 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); 1210 goto free_devm_irq; 1211 } 1212 1213 return 0; 1214 1215 free_devm_irq: 1216 devm_free_irq(dev, irq->msi_desc->irq, dev); 1217 free_irq: 1218 fsl_mc_free_irqs(sw_dev); 1219 return err; 1220 } 1221 1222 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) 1223 { 1224 struct device *dev = &sw_dev->dev; 1225 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1226 int err; 1227 1228 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1229 DPSW_IRQ_INDEX_IF, 0); 1230 if (err) 1231 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1232 1233 fsl_mc_free_irqs(sw_dev); 1234 } 1235 1236 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, 1237 u8 state) 1238 { 1239 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1240 1241 return dpaa2_switch_port_set_stp_state(port_priv, state); 1242 } 1243 1244 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) 1245 { 1246 struct ethsw_core *ethsw = port_priv->ethsw_data; 1247 enum dpsw_learning_mode learn_mode; 1248 int err; 1249 1250 if (enable) 1251 learn_mode = DPSW_LEARNING_MODE_HW; 1252 else 1253 learn_mode = DPSW_LEARNING_MODE_DIS; 1254 1255 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 1256 port_priv->idx, learn_mode); 1257 if (err) 1258 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); 1259 1260 if (!enable) 1261 dpaa2_switch_port_fast_age(port_priv); 1262 1263 return err; 1264 } 1265 1266 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, 1267 struct switchdev_brport_flags flags) 1268 { 1269 struct ethsw_core *ethsw = port_priv->ethsw_data; 1270 1271 if (flags.mask & BR_BCAST_FLOOD) 1272 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); 1273 1274 if (flags.mask & BR_FLOOD) 1275 port_priv->ucast_flood = !!(flags.val & BR_FLOOD); 1276 1277 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1278 } 1279 1280 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, 1281 struct switchdev_brport_flags flags, 1282 struct netlink_ext_ack *extack) 1283 { 1284 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | 1285 BR_MCAST_FLOOD)) 1286 return -EINVAL; 1287 1288 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { 1289 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 1290 bool unicast = !!(flags.val & BR_FLOOD); 1291 1292 if (unicast != multicast) { 1293 NL_SET_ERR_MSG_MOD(extack, 1294 "Cannot configure multicast flooding independently of unicast"); 1295 return -EINVAL; 1296 } 1297 } 1298 1299 return 0; 1300 } 1301 1302 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, 1303 struct switchdev_brport_flags flags, 1304 struct netlink_ext_ack *extack) 1305 { 1306 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1307 int err; 1308 1309 if (flags.mask & BR_LEARNING) { 1310 bool learn_ena = !!(flags.val & BR_LEARNING); 1311 1312 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1313 if (err) 1314 return err; 1315 } 1316 1317 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { 1318 err = dpaa2_switch_port_flood(port_priv, flags); 1319 if (err) 1320 return err; 1321 } 1322 1323 return 0; 1324 } 1325 1326 static int dpaa2_switch_port_attr_set(struct net_device *netdev, 1327 const struct switchdev_attr *attr, 1328 struct netlink_ext_ack *extack) 1329 { 1330 int err = 0; 1331 1332 switch (attr->id) { 1333 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 1334 err = dpaa2_switch_port_attr_stp_state_set(netdev, 1335 attr->u.stp_state); 1336 break; 1337 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 1338 if (!attr->u.vlan_filtering) { 1339 NL_SET_ERR_MSG_MOD(extack, 1340 "The DPAA2 switch does not support VLAN-unaware operation"); 1341 return -EOPNOTSUPP; 1342 } 1343 break; 1344 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 1345 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); 1346 break; 1347 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 1348 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); 1349 break; 1350 default: 1351 err = -EOPNOTSUPP; 1352 break; 1353 } 1354 1355 return err; 1356 } 1357 1358 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 1359 const struct switchdev_obj_port_vlan *vlan) 1360 { 1361 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1362 struct ethsw_core *ethsw = port_priv->ethsw_data; 1363 struct dpsw_attr *attr = ðsw->sw_attr; 1364 int err = 0; 1365 1366 /* Make sure that the VLAN is not already configured 1367 * on the switch port 1368 */ 1369 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) 1370 return -EEXIST; 1371 1372 /* Check if there is space for a new VLAN */ 1373 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1374 ðsw->sw_attr); 1375 if (err) { 1376 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1377 return err; 1378 } 1379 if (attr->max_vlans - attr->num_vlans < 1) 1380 return -ENOSPC; 1381 1382 /* Check if there is space for a new VLAN */ 1383 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1384 ðsw->sw_attr); 1385 if (err) { 1386 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1387 return err; 1388 } 1389 if (attr->max_vlans - attr->num_vlans < 1) 1390 return -ENOSPC; 1391 1392 if (!port_priv->ethsw_data->vlans[vlan->vid]) { 1393 /* this is a new VLAN */ 1394 err = dpaa2_switch_add_vlan(port_priv, vlan->vid); 1395 if (err) 1396 return err; 1397 1398 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; 1399 } 1400 1401 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); 1402 } 1403 1404 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, 1405 const unsigned char *addr) 1406 { 1407 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; 1408 struct netdev_hw_addr *ha; 1409 1410 netif_addr_lock_bh(netdev); 1411 list_for_each_entry(ha, &list->list, list) { 1412 if (ether_addr_equal(ha->addr, addr)) { 1413 netif_addr_unlock_bh(netdev); 1414 return 1; 1415 } 1416 } 1417 netif_addr_unlock_bh(netdev); 1418 return 0; 1419 } 1420 1421 static int dpaa2_switch_port_mdb_add(struct net_device *netdev, 1422 const struct switchdev_obj_port_mdb *mdb) 1423 { 1424 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1425 int err; 1426 1427 /* Check if address is already set on this port */ 1428 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1429 return -EEXIST; 1430 1431 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); 1432 if (err) 1433 return err; 1434 1435 err = dev_mc_add(netdev, mdb->addr); 1436 if (err) { 1437 netdev_err(netdev, "dev_mc_add err %d\n", err); 1438 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1439 } 1440 1441 return err; 1442 } 1443 1444 static int dpaa2_switch_port_obj_add(struct net_device *netdev, 1445 const struct switchdev_obj *obj) 1446 { 1447 int err; 1448 1449 switch (obj->id) { 1450 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1451 err = dpaa2_switch_port_vlans_add(netdev, 1452 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1453 break; 1454 case SWITCHDEV_OBJ_ID_PORT_MDB: 1455 err = dpaa2_switch_port_mdb_add(netdev, 1456 SWITCHDEV_OBJ_PORT_MDB(obj)); 1457 break; 1458 default: 1459 err = -EOPNOTSUPP; 1460 break; 1461 } 1462 1463 return err; 1464 } 1465 1466 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) 1467 { 1468 struct ethsw_core *ethsw = port_priv->ethsw_data; 1469 struct net_device *netdev = port_priv->netdev; 1470 struct dpsw_vlan_if_cfg vcfg; 1471 int i, err; 1472 1473 if (!port_priv->vlans[vid]) 1474 return -ENOENT; 1475 1476 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { 1477 /* If we are deleting the PVID of a port, use VLAN 4095 instead 1478 * as we are sure that neither the bridge nor the 8021q module 1479 * will use it 1480 */ 1481 err = dpaa2_switch_port_set_pvid(port_priv, 4095); 1482 if (err) 1483 return err; 1484 } 1485 1486 vcfg.num_ifs = 1; 1487 vcfg.if_id[0] = port_priv->idx; 1488 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { 1489 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, 1490 ethsw->dpsw_handle, 1491 vid, &vcfg); 1492 if (err) { 1493 netdev_err(netdev, 1494 "dpsw_vlan_remove_if_untagged err %d\n", 1495 err); 1496 } 1497 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; 1498 } 1499 1500 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 1501 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1502 vid, &vcfg); 1503 if (err) { 1504 netdev_err(netdev, 1505 "dpsw_vlan_remove_if err %d\n", err); 1506 return err; 1507 } 1508 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; 1509 1510 /* Delete VLAN from switch if it is no longer configured on 1511 * any port 1512 */ 1513 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 1514 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) 1515 return 0; /* Found a port member in VID */ 1516 1517 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; 1518 1519 err = dpaa2_switch_dellink(ethsw, vid); 1520 if (err) 1521 return err; 1522 } 1523 1524 return 0; 1525 } 1526 1527 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 1528 const struct switchdev_obj_port_vlan *vlan) 1529 { 1530 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1531 1532 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1533 return -EOPNOTSUPP; 1534 1535 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); 1536 } 1537 1538 static int dpaa2_switch_port_mdb_del(struct net_device *netdev, 1539 const struct switchdev_obj_port_mdb *mdb) 1540 { 1541 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1542 int err; 1543 1544 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1545 return -ENOENT; 1546 1547 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1548 if (err) 1549 return err; 1550 1551 err = dev_mc_del(netdev, mdb->addr); 1552 if (err) { 1553 netdev_err(netdev, "dev_mc_del err %d\n", err); 1554 return err; 1555 } 1556 1557 return err; 1558 } 1559 1560 static int dpaa2_switch_port_obj_del(struct net_device *netdev, 1561 const struct switchdev_obj *obj) 1562 { 1563 int err; 1564 1565 switch (obj->id) { 1566 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1567 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); 1568 break; 1569 case SWITCHDEV_OBJ_ID_PORT_MDB: 1570 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); 1571 break; 1572 default: 1573 err = -EOPNOTSUPP; 1574 break; 1575 } 1576 return err; 1577 } 1578 1579 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, 1580 struct switchdev_notifier_port_attr_info *ptr) 1581 { 1582 int err; 1583 1584 err = switchdev_handle_port_attr_set(netdev, ptr, 1585 dpaa2_switch_port_dev_check, 1586 dpaa2_switch_port_attr_set); 1587 return notifier_from_errno(err); 1588 } 1589 1590 static int dpaa2_switch_port_bridge_join(struct net_device *netdev, 1591 struct net_device *upper_dev) 1592 { 1593 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1594 struct ethsw_core *ethsw = port_priv->ethsw_data; 1595 struct ethsw_port_priv *other_port_priv; 1596 struct net_device *other_dev; 1597 struct list_head *iter; 1598 bool learn_ena; 1599 int err; 1600 1601 netdev_for_each_lower_dev(upper_dev, other_dev, iter) { 1602 if (!dpaa2_switch_port_dev_check(other_dev)) 1603 continue; 1604 1605 other_port_priv = netdev_priv(other_dev); 1606 if (other_port_priv->ethsw_data != port_priv->ethsw_data) { 1607 netdev_err(netdev, 1608 "Interface from a different DPSW is in the bridge already!\n"); 1609 return -EINVAL; 1610 } 1611 } 1612 1613 /* Delete the previously manually installed VLAN 1 */ 1614 err = dpaa2_switch_port_del_vlan(port_priv, 1); 1615 if (err) 1616 return err; 1617 1618 dpaa2_switch_port_set_fdb(port_priv, upper_dev); 1619 1620 /* Inherit the initial bridge port learning state */ 1621 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); 1622 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1623 1624 /* Setup the egress flood policy (broadcast, unknown unicast) */ 1625 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1626 if (err) 1627 goto err_egress_flood; 1628 1629 return 0; 1630 1631 err_egress_flood: 1632 dpaa2_switch_port_set_fdb(port_priv, NULL); 1633 return err; 1634 } 1635 1636 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) 1637 { 1638 __be16 vlan_proto = htons(ETH_P_8021Q); 1639 1640 if (vdev) 1641 vlan_proto = vlan_dev_vlan_proto(vdev); 1642 1643 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); 1644 } 1645 1646 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) 1647 { 1648 __be16 vlan_proto = htons(ETH_P_8021Q); 1649 1650 if (vdev) 1651 vlan_proto = vlan_dev_vlan_proto(vdev); 1652 1653 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); 1654 } 1655 1656 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) 1657 { 1658 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1659 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 1660 struct ethsw_core *ethsw = port_priv->ethsw_data; 1661 int err; 1662 1663 /* First of all, fast age any learn FDB addresses on this switch port */ 1664 dpaa2_switch_port_fast_age(port_priv); 1665 1666 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN 1667 * upper devices or otherwise from the FDB table that we are about to 1668 * leave 1669 */ 1670 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); 1671 if (err) 1672 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); 1673 1674 dpaa2_switch_port_set_fdb(port_priv, NULL); 1675 1676 /* Restore all RX VLANs into the new FDB table that we just joined */ 1677 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); 1678 if (err) 1679 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); 1680 1681 /* Reset the flooding state to denote that this port can send any 1682 * packet in standalone mode. With this, we are also ensuring that any 1683 * later bridge join will have the flooding flag on. 1684 */ 1685 port_priv->bcast_flood = true; 1686 port_priv->ucast_flood = true; 1687 1688 /* Setup the egress flood policy (broadcast, unknown unicast). 1689 * When the port is not under a bridge, only the CTRL interface is part 1690 * of the flooding domain besides the actual port 1691 */ 1692 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1693 if (err) 1694 return err; 1695 1696 /* Recreate the egress flood domain of the FDB that we just left */ 1697 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 1698 if (err) 1699 return err; 1700 1701 /* No HW learning when not under a bridge */ 1702 err = dpaa2_switch_port_set_learning(port_priv, false); 1703 if (err) 1704 return err; 1705 1706 /* Add the VLAN 1 as PVID when not under a bridge. We need this since 1707 * the dpaa2 switch interfaces are not capable to be VLAN unaware 1708 */ 1709 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, 1710 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); 1711 } 1712 1713 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) 1714 { 1715 struct net_device *upper_dev; 1716 struct list_head *iter; 1717 1718 /* RCU read lock not necessary because we have write-side protection 1719 * (rtnl_mutex), however a non-rcu iterator does not exist. 1720 */ 1721 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) 1722 if (is_vlan_dev(upper_dev)) 1723 return -EOPNOTSUPP; 1724 1725 return 0; 1726 } 1727 1728 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, 1729 unsigned long event, void *ptr) 1730 { 1731 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1732 struct netdev_notifier_changeupper_info *info = ptr; 1733 struct netlink_ext_ack *extack; 1734 struct net_device *upper_dev; 1735 int err = 0; 1736 1737 if (!dpaa2_switch_port_dev_check(netdev)) 1738 return NOTIFY_DONE; 1739 1740 extack = netdev_notifier_info_to_extack(&info->info); 1741 1742 switch (event) { 1743 case NETDEV_PRECHANGEUPPER: 1744 upper_dev = info->upper_dev; 1745 if (!netif_is_bridge_master(upper_dev)) 1746 break; 1747 1748 if (!br_vlan_enabled(upper_dev)) { 1749 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); 1750 err = -EOPNOTSUPP; 1751 goto out; 1752 } 1753 1754 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); 1755 if (err) { 1756 NL_SET_ERR_MSG_MOD(extack, 1757 "Cannot join a bridge while VLAN uppers are present"); 1758 goto out; 1759 } 1760 1761 break; 1762 case NETDEV_CHANGEUPPER: 1763 upper_dev = info->upper_dev; 1764 if (netif_is_bridge_master(upper_dev)) { 1765 if (info->linking) 1766 err = dpaa2_switch_port_bridge_join(netdev, upper_dev); 1767 else 1768 err = dpaa2_switch_port_bridge_leave(netdev); 1769 } 1770 break; 1771 } 1772 1773 out: 1774 return notifier_from_errno(err); 1775 } 1776 1777 struct ethsw_switchdev_event_work { 1778 struct work_struct work; 1779 struct switchdev_notifier_fdb_info fdb_info; 1780 struct net_device *dev; 1781 unsigned long event; 1782 }; 1783 1784 static void dpaa2_switch_event_work(struct work_struct *work) 1785 { 1786 struct ethsw_switchdev_event_work *switchdev_work = 1787 container_of(work, struct ethsw_switchdev_event_work, work); 1788 struct net_device *dev = switchdev_work->dev; 1789 struct switchdev_notifier_fdb_info *fdb_info; 1790 int err; 1791 1792 rtnl_lock(); 1793 fdb_info = &switchdev_work->fdb_info; 1794 1795 switch (switchdev_work->event) { 1796 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1797 if (!fdb_info->added_by_user) 1798 break; 1799 if (is_unicast_ether_addr(fdb_info->addr)) 1800 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), 1801 fdb_info->addr); 1802 else 1803 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), 1804 fdb_info->addr); 1805 if (err) 1806 break; 1807 fdb_info->offloaded = true; 1808 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 1809 &fdb_info->info, NULL); 1810 break; 1811 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1812 if (!fdb_info->added_by_user) 1813 break; 1814 if (is_unicast_ether_addr(fdb_info->addr)) 1815 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); 1816 else 1817 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); 1818 break; 1819 } 1820 1821 rtnl_unlock(); 1822 kfree(switchdev_work->fdb_info.addr); 1823 kfree(switchdev_work); 1824 dev_put(dev); 1825 } 1826 1827 /* Called under rcu_read_lock() */ 1828 static int dpaa2_switch_port_event(struct notifier_block *nb, 1829 unsigned long event, void *ptr) 1830 { 1831 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1832 struct ethsw_port_priv *port_priv = netdev_priv(dev); 1833 struct ethsw_switchdev_event_work *switchdev_work; 1834 struct switchdev_notifier_fdb_info *fdb_info = ptr; 1835 struct ethsw_core *ethsw = port_priv->ethsw_data; 1836 1837 if (event == SWITCHDEV_PORT_ATTR_SET) 1838 return dpaa2_switch_port_attr_set_event(dev, ptr); 1839 1840 if (!dpaa2_switch_port_dev_check(dev)) 1841 return NOTIFY_DONE; 1842 1843 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 1844 if (!switchdev_work) 1845 return NOTIFY_BAD; 1846 1847 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); 1848 switchdev_work->dev = dev; 1849 switchdev_work->event = event; 1850 1851 switch (event) { 1852 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1853 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1854 memcpy(&switchdev_work->fdb_info, ptr, 1855 sizeof(switchdev_work->fdb_info)); 1856 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 1857 if (!switchdev_work->fdb_info.addr) 1858 goto err_addr_alloc; 1859 1860 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 1861 fdb_info->addr); 1862 1863 /* Take a reference on the device to avoid being freed. */ 1864 dev_hold(dev); 1865 break; 1866 default: 1867 kfree(switchdev_work); 1868 return NOTIFY_DONE; 1869 } 1870 1871 queue_work(ethsw->workqueue, &switchdev_work->work); 1872 1873 return NOTIFY_DONE; 1874 1875 err_addr_alloc: 1876 kfree(switchdev_work); 1877 return NOTIFY_BAD; 1878 } 1879 1880 static int dpaa2_switch_port_obj_event(unsigned long event, 1881 struct net_device *netdev, 1882 struct switchdev_notifier_port_obj_info *port_obj_info) 1883 { 1884 int err = -EOPNOTSUPP; 1885 1886 if (!dpaa2_switch_port_dev_check(netdev)) 1887 return NOTIFY_DONE; 1888 1889 switch (event) { 1890 case SWITCHDEV_PORT_OBJ_ADD: 1891 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); 1892 break; 1893 case SWITCHDEV_PORT_OBJ_DEL: 1894 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); 1895 break; 1896 } 1897 1898 port_obj_info->handled = true; 1899 return notifier_from_errno(err); 1900 } 1901 1902 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, 1903 unsigned long event, void *ptr) 1904 { 1905 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1906 1907 switch (event) { 1908 case SWITCHDEV_PORT_OBJ_ADD: 1909 case SWITCHDEV_PORT_OBJ_DEL: 1910 return dpaa2_switch_port_obj_event(event, dev, ptr); 1911 case SWITCHDEV_PORT_ATTR_SET: 1912 return dpaa2_switch_port_attr_set_event(dev, ptr); 1913 } 1914 1915 return NOTIFY_DONE; 1916 } 1917 1918 /* Build a linear skb based on a single-buffer frame descriptor */ 1919 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, 1920 const struct dpaa2_fd *fd) 1921 { 1922 u16 fd_offset = dpaa2_fd_get_offset(fd); 1923 dma_addr_t addr = dpaa2_fd_get_addr(fd); 1924 u32 fd_length = dpaa2_fd_get_len(fd); 1925 struct device *dev = ethsw->dev; 1926 struct sk_buff *skb = NULL; 1927 void *fd_vaddr; 1928 1929 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); 1930 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, 1931 DMA_FROM_DEVICE); 1932 1933 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + 1934 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1935 if (unlikely(!skb)) { 1936 dev_err(dev, "build_skb() failed\n"); 1937 return NULL; 1938 } 1939 1940 skb_reserve(skb, fd_offset); 1941 skb_put(skb, fd_length); 1942 1943 ethsw->buf_count--; 1944 1945 return skb; 1946 } 1947 1948 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, 1949 const struct dpaa2_fd *fd) 1950 { 1951 dpaa2_switch_free_fd(fq->ethsw, fd); 1952 } 1953 1954 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, 1955 const struct dpaa2_fd *fd) 1956 { 1957 struct ethsw_core *ethsw = fq->ethsw; 1958 struct ethsw_port_priv *port_priv; 1959 struct net_device *netdev; 1960 struct vlan_ethhdr *hdr; 1961 struct sk_buff *skb; 1962 u16 vlan_tci, vid; 1963 int if_id, err; 1964 1965 /* get switch ingress interface ID */ 1966 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; 1967 1968 if (if_id >= ethsw->sw_attr.num_ifs) { 1969 dev_err(ethsw->dev, "Frame received from unknown interface!\n"); 1970 goto err_free_fd; 1971 } 1972 port_priv = ethsw->ports[if_id]; 1973 netdev = port_priv->netdev; 1974 1975 /* build the SKB based on the FD received */ 1976 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { 1977 if (net_ratelimit()) { 1978 netdev_err(netdev, "Received invalid frame format\n"); 1979 goto err_free_fd; 1980 } 1981 } 1982 1983 skb = dpaa2_switch_build_linear_skb(ethsw, fd); 1984 if (unlikely(!skb)) 1985 goto err_free_fd; 1986 1987 skb_reset_mac_header(skb); 1988 1989 /* Remove the VLAN header if the packet that we just received has a vid 1990 * equal to the port PVIDs. Since the dpaa2-switch can operate only in 1991 * VLAN-aware mode and no alterations are made on the packet when it's 1992 * redirected/mirrored to the control interface, we are sure that there 1993 * will always be a VLAN header present. 1994 */ 1995 hdr = vlan_eth_hdr(skb); 1996 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; 1997 if (vid == port_priv->pvid) { 1998 err = __skb_vlan_pop(skb, &vlan_tci); 1999 if (err) { 2000 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); 2001 goto err_free_fd; 2002 } 2003 } 2004 2005 skb->dev = netdev; 2006 skb->protocol = eth_type_trans(skb, skb->dev); 2007 2008 /* Setup the offload_fwd_mark only if the port is under a bridge */ 2009 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); 2010 2011 netif_receive_skb(skb); 2012 2013 return; 2014 2015 err_free_fd: 2016 dpaa2_switch_free_fd(ethsw, fd); 2017 } 2018 2019 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) 2020 { 2021 ethsw->features = 0; 2022 2023 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) 2024 ethsw->features |= ETHSW_FEATURE_MAC_ADDR; 2025 } 2026 2027 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) 2028 { 2029 struct dpsw_ctrl_if_attr ctrl_if_attr; 2030 struct device *dev = ethsw->dev; 2031 int i = 0; 2032 int err; 2033 2034 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2035 &ctrl_if_attr); 2036 if (err) { 2037 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); 2038 return err; 2039 } 2040 2041 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; 2042 ethsw->fq[i].ethsw = ethsw; 2043 ethsw->fq[i++].type = DPSW_QUEUE_RX; 2044 2045 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; 2046 ethsw->fq[i].ethsw = ethsw; 2047 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; 2048 2049 return 0; 2050 } 2051 2052 /* Free buffers acquired from the buffer pool or which were meant to 2053 * be released in the pool 2054 */ 2055 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) 2056 { 2057 struct device *dev = ethsw->dev; 2058 void *vaddr; 2059 int i; 2060 2061 for (i = 0; i < count; i++) { 2062 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); 2063 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, 2064 DMA_FROM_DEVICE); 2065 free_pages((unsigned long)vaddr, 0); 2066 } 2067 } 2068 2069 /* Perform a single release command to add buffers 2070 * to the specified buffer pool 2071 */ 2072 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) 2073 { 2074 struct device *dev = ethsw->dev; 2075 u64 buf_array[BUFS_PER_CMD]; 2076 struct page *page; 2077 int retries = 0; 2078 dma_addr_t addr; 2079 int err; 2080 int i; 2081 2082 for (i = 0; i < BUFS_PER_CMD; i++) { 2083 /* Allocate one page for each Rx buffer. WRIOP sees 2084 * the entire page except for a tailroom reserved for 2085 * skb shared info 2086 */ 2087 page = dev_alloc_pages(0); 2088 if (!page) { 2089 dev_err(dev, "buffer allocation failed\n"); 2090 goto err_alloc; 2091 } 2092 2093 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, 2094 DMA_FROM_DEVICE); 2095 if (dma_mapping_error(dev, addr)) { 2096 dev_err(dev, "dma_map_single() failed\n"); 2097 goto err_map; 2098 } 2099 buf_array[i] = addr; 2100 } 2101 2102 release_bufs: 2103 /* In case the portal is busy, retry until successful or 2104 * max retries hit. 2105 */ 2106 while ((err = dpaa2_io_service_release(NULL, bpid, 2107 buf_array, i)) == -EBUSY) { 2108 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) 2109 break; 2110 2111 cpu_relax(); 2112 } 2113 2114 /* If release command failed, clean up and bail out. */ 2115 if (err) { 2116 dpaa2_switch_free_bufs(ethsw, buf_array, i); 2117 return 0; 2118 } 2119 2120 return i; 2121 2122 err_map: 2123 __free_pages(page, 0); 2124 err_alloc: 2125 /* If we managed to allocate at least some buffers, 2126 * release them to hardware 2127 */ 2128 if (i) 2129 goto release_bufs; 2130 2131 return 0; 2132 } 2133 2134 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) 2135 { 2136 int *count = ðsw->buf_count; 2137 int new_count; 2138 int err = 0; 2139 2140 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { 2141 do { 2142 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2143 if (unlikely(!new_count)) { 2144 /* Out of memory; abort for now, we'll 2145 * try later on 2146 */ 2147 break; 2148 } 2149 *count += new_count; 2150 } while (*count < DPAA2_ETHSW_NUM_BUFS); 2151 2152 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) 2153 err = -ENOMEM; 2154 } 2155 2156 return err; 2157 } 2158 2159 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) 2160 { 2161 int *count, i; 2162 2163 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { 2164 count = ðsw->buf_count; 2165 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2166 2167 if (unlikely(*count < BUFS_PER_CMD)) 2168 return -ENOMEM; 2169 } 2170 2171 return 0; 2172 } 2173 2174 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) 2175 { 2176 u64 buf_array[BUFS_PER_CMD]; 2177 int ret; 2178 2179 do { 2180 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, 2181 buf_array, BUFS_PER_CMD); 2182 if (ret < 0) { 2183 dev_err(ethsw->dev, 2184 "dpaa2_io_service_acquire() = %d\n", ret); 2185 return; 2186 } 2187 dpaa2_switch_free_bufs(ethsw, buf_array, ret); 2188 2189 } while (ret); 2190 } 2191 2192 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) 2193 { 2194 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; 2195 struct device *dev = ethsw->dev; 2196 struct fsl_mc_device *dpbp_dev; 2197 struct dpbp_attr dpbp_attrs; 2198 int err; 2199 2200 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2201 &dpbp_dev); 2202 if (err) { 2203 if (err == -ENXIO) 2204 err = -EPROBE_DEFER; 2205 else 2206 dev_err(dev, "DPBP device allocation failed\n"); 2207 return err; 2208 } 2209 ethsw->dpbp_dev = dpbp_dev; 2210 2211 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, 2212 &dpbp_dev->mc_handle); 2213 if (err) { 2214 dev_err(dev, "dpbp_open() failed\n"); 2215 goto err_open; 2216 } 2217 2218 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2219 if (err) { 2220 dev_err(dev, "dpbp_reset() failed\n"); 2221 goto err_reset; 2222 } 2223 2224 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2225 if (err) { 2226 dev_err(dev, "dpbp_enable() failed\n"); 2227 goto err_enable; 2228 } 2229 2230 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, 2231 &dpbp_attrs); 2232 if (err) { 2233 dev_err(dev, "dpbp_get_attributes() failed\n"); 2234 goto err_get_attr; 2235 } 2236 2237 dpsw_ctrl_if_pools_cfg.num_dpbp = 1; 2238 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; 2239 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; 2240 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; 2241 2242 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, 2243 &dpsw_ctrl_if_pools_cfg); 2244 if (err) { 2245 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2246 goto err_get_attr; 2247 } 2248 ethsw->bpid = dpbp_attrs.id; 2249 2250 return 0; 2251 2252 err_get_attr: 2253 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2254 err_enable: 2255 err_reset: 2256 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2257 err_open: 2258 fsl_mc_object_free(dpbp_dev); 2259 return err; 2260 } 2261 2262 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) 2263 { 2264 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2265 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2266 fsl_mc_object_free(ethsw->dpbp_dev); 2267 } 2268 2269 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) 2270 { 2271 int i; 2272 2273 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2274 ethsw->fq[i].store = 2275 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, 2276 ethsw->dev); 2277 if (!ethsw->fq[i].store) { 2278 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); 2279 while (--i >= 0) 2280 dpaa2_io_store_destroy(ethsw->fq[i].store); 2281 return -ENOMEM; 2282 } 2283 } 2284 2285 return 0; 2286 } 2287 2288 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) 2289 { 2290 int i; 2291 2292 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2293 dpaa2_io_store_destroy(ethsw->fq[i].store); 2294 } 2295 2296 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) 2297 { 2298 int err, retries = 0; 2299 2300 /* Try to pull from the FQ while the portal is busy and we didn't hit 2301 * the maximum number fo retries 2302 */ 2303 do { 2304 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); 2305 cpu_relax(); 2306 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2307 2308 if (unlikely(err)) 2309 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); 2310 2311 return err; 2312 } 2313 2314 /* Consume all frames pull-dequeued into the store */ 2315 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) 2316 { 2317 struct ethsw_core *ethsw = fq->ethsw; 2318 int cleaned = 0, is_last; 2319 struct dpaa2_dq *dq; 2320 int retries = 0; 2321 2322 do { 2323 /* Get the next available FD from the store */ 2324 dq = dpaa2_io_store_next(fq->store, &is_last); 2325 if (unlikely(!dq)) { 2326 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { 2327 dev_err_once(ethsw->dev, 2328 "No valid dequeue response\n"); 2329 return -ETIMEDOUT; 2330 } 2331 continue; 2332 } 2333 2334 if (fq->type == DPSW_QUEUE_RX) 2335 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); 2336 else 2337 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); 2338 cleaned++; 2339 2340 } while (!is_last); 2341 2342 return cleaned; 2343 } 2344 2345 /* NAPI poll routine */ 2346 static int dpaa2_switch_poll(struct napi_struct *napi, int budget) 2347 { 2348 int err, cleaned = 0, store_cleaned, work_done; 2349 struct dpaa2_switch_fq *fq; 2350 int retries = 0; 2351 2352 fq = container_of(napi, struct dpaa2_switch_fq, napi); 2353 2354 do { 2355 err = dpaa2_switch_pull_fq(fq); 2356 if (unlikely(err)) 2357 break; 2358 2359 /* Refill pool if appropriate */ 2360 dpaa2_switch_refill_bp(fq->ethsw); 2361 2362 store_cleaned = dpaa2_switch_store_consume(fq); 2363 cleaned += store_cleaned; 2364 2365 if (cleaned >= budget) { 2366 work_done = budget; 2367 goto out; 2368 } 2369 2370 } while (store_cleaned); 2371 2372 /* We didn't consume the entire budget, so finish napi and re-enable 2373 * data availability notifications 2374 */ 2375 napi_complete_done(napi, cleaned); 2376 do { 2377 err = dpaa2_io_service_rearm(NULL, &fq->nctx); 2378 cpu_relax(); 2379 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2380 2381 work_done = max(cleaned, 1); 2382 out: 2383 2384 return work_done; 2385 } 2386 2387 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) 2388 { 2389 struct dpaa2_switch_fq *fq; 2390 2391 fq = container_of(nctx, struct dpaa2_switch_fq, nctx); 2392 2393 napi_schedule(&fq->napi); 2394 } 2395 2396 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) 2397 { 2398 struct dpsw_ctrl_if_queue_cfg queue_cfg; 2399 struct dpaa2_io_notification_ctx *nctx; 2400 int err, i, j; 2401 2402 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2403 nctx = ðsw->fq[i].nctx; 2404 2405 /* Register a new software context for the FQID. 2406 * By using NULL as the first parameter, we specify that we do 2407 * not care on which cpu are interrupts received for this queue 2408 */ 2409 nctx->is_cdan = 0; 2410 nctx->id = ethsw->fq[i].fqid; 2411 nctx->desired_cpu = DPAA2_IO_ANY_CPU; 2412 nctx->cb = dpaa2_switch_fqdan_cb; 2413 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); 2414 if (err) { 2415 err = -EPROBE_DEFER; 2416 goto err_register; 2417 } 2418 2419 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | 2420 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; 2421 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; 2422 queue_cfg.dest_cfg.dest_id = nctx->dpio_id; 2423 queue_cfg.dest_cfg.priority = 0; 2424 queue_cfg.user_ctx = nctx->qman64; 2425 2426 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, 2427 ethsw->dpsw_handle, 2428 ethsw->fq[i].type, 2429 &queue_cfg); 2430 if (err) 2431 goto err_set_queue; 2432 } 2433 2434 return 0; 2435 2436 err_set_queue: 2437 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); 2438 err_register: 2439 for (j = 0; j < i; j++) 2440 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, 2441 ethsw->dev); 2442 2443 return err; 2444 } 2445 2446 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) 2447 { 2448 int i; 2449 2450 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2451 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, 2452 ethsw->dev); 2453 } 2454 2455 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) 2456 { 2457 int err; 2458 2459 /* setup FQs for Rx and Tx Conf */ 2460 err = dpaa2_switch_setup_fqs(ethsw); 2461 if (err) 2462 return err; 2463 2464 /* setup the buffer pool needed on the Rx path */ 2465 err = dpaa2_switch_setup_dpbp(ethsw); 2466 if (err) 2467 return err; 2468 2469 err = dpaa2_switch_seed_bp(ethsw); 2470 if (err) 2471 goto err_free_dpbp; 2472 2473 err = dpaa2_switch_alloc_rings(ethsw); 2474 if (err) 2475 goto err_drain_dpbp; 2476 2477 err = dpaa2_switch_setup_dpio(ethsw); 2478 if (err) 2479 goto err_destroy_rings; 2480 2481 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2482 if (err) { 2483 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); 2484 goto err_deregister_dpio; 2485 } 2486 2487 return 0; 2488 2489 err_deregister_dpio: 2490 dpaa2_switch_free_dpio(ethsw); 2491 err_destroy_rings: 2492 dpaa2_switch_destroy_rings(ethsw); 2493 err_drain_dpbp: 2494 dpaa2_switch_drain_bp(ethsw); 2495 err_free_dpbp: 2496 dpaa2_switch_free_dpbp(ethsw); 2497 2498 return err; 2499 } 2500 2501 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) 2502 { 2503 struct device *dev = &sw_dev->dev; 2504 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2505 struct dpsw_vlan_if_cfg vcfg = {0}; 2506 struct dpsw_tci_cfg tci_cfg = {0}; 2507 struct dpsw_stp_cfg stp_cfg; 2508 int err; 2509 u16 i; 2510 2511 ethsw->dev_id = sw_dev->obj_desc.id; 2512 2513 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); 2514 if (err) { 2515 dev_err(dev, "dpsw_open err %d\n", err); 2516 return err; 2517 } 2518 2519 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2520 ðsw->sw_attr); 2521 if (err) { 2522 dev_err(dev, "dpsw_get_attributes err %d\n", err); 2523 goto err_close; 2524 } 2525 2526 err = dpsw_get_api_version(ethsw->mc_io, 0, 2527 ðsw->major, 2528 ðsw->minor); 2529 if (err) { 2530 dev_err(dev, "dpsw_get_api_version err %d\n", err); 2531 goto err_close; 2532 } 2533 2534 /* Minimum supported DPSW version check */ 2535 if (ethsw->major < DPSW_MIN_VER_MAJOR || 2536 (ethsw->major == DPSW_MIN_VER_MAJOR && 2537 ethsw->minor < DPSW_MIN_VER_MINOR)) { 2538 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", 2539 ethsw->major, ethsw->minor); 2540 err = -EOPNOTSUPP; 2541 goto err_close; 2542 } 2543 2544 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { 2545 err = -EOPNOTSUPP; 2546 goto err_close; 2547 } 2548 2549 dpaa2_switch_detect_features(ethsw); 2550 2551 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); 2552 if (err) { 2553 dev_err(dev, "dpsw_reset err %d\n", err); 2554 goto err_close; 2555 } 2556 2557 stp_cfg.vlan_id = DEFAULT_VLAN_ID; 2558 stp_cfg.state = DPSW_STP_STATE_FORWARDING; 2559 2560 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2561 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); 2562 if (err) { 2563 dev_err(dev, "dpsw_if_disable err %d\n", err); 2564 goto err_close; 2565 } 2566 2567 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, 2568 &stp_cfg); 2569 if (err) { 2570 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", 2571 err, i); 2572 goto err_close; 2573 } 2574 2575 /* Switch starts with all ports configured to VLAN 1. Need to 2576 * remove this setting to allow configuration at bridge join 2577 */ 2578 vcfg.num_ifs = 1; 2579 vcfg.if_id[0] = i; 2580 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, 2581 DEFAULT_VLAN_ID, &vcfg); 2582 if (err) { 2583 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", 2584 err); 2585 goto err_close; 2586 } 2587 2588 tci_cfg.vlan_id = 4095; 2589 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); 2590 if (err) { 2591 dev_err(dev, "dpsw_if_set_tci err %d\n", err); 2592 goto err_close; 2593 } 2594 2595 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 2596 DEFAULT_VLAN_ID, &vcfg); 2597 if (err) { 2598 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); 2599 goto err_close; 2600 } 2601 } 2602 2603 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); 2604 if (err) { 2605 dev_err(dev, "dpsw_vlan_remove err %d\n", err); 2606 goto err_close; 2607 } 2608 2609 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", 2610 WQ_MEM_RECLAIM, "ethsw", 2611 ethsw->sw_attr.id); 2612 if (!ethsw->workqueue) { 2613 err = -ENOMEM; 2614 goto err_close; 2615 } 2616 2617 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); 2618 if (err) 2619 goto err_destroy_ordered_workqueue; 2620 2621 err = dpaa2_switch_ctrl_if_setup(ethsw); 2622 if (err) 2623 goto err_destroy_ordered_workqueue; 2624 2625 return 0; 2626 2627 err_destroy_ordered_workqueue: 2628 destroy_workqueue(ethsw->workqueue); 2629 2630 err_close: 2631 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 2632 return err; 2633 } 2634 2635 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) 2636 { 2637 struct switchdev_obj_port_vlan vlan = { 2638 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 2639 .vid = DEFAULT_VLAN_ID, 2640 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, 2641 }; 2642 struct net_device *netdev = port_priv->netdev; 2643 struct ethsw_core *ethsw = port_priv->ethsw_data; 2644 struct dpsw_fdb_cfg fdb_cfg = {0}; 2645 struct dpaa2_switch_fdb *fdb; 2646 struct dpsw_if_attr dpsw_if_attr; 2647 u16 fdb_id; 2648 int err; 2649 2650 /* Get the Tx queue for this specific port */ 2651 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2652 port_priv->idx, &dpsw_if_attr); 2653 if (err) { 2654 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); 2655 return err; 2656 } 2657 port_priv->tx_qdid = dpsw_if_attr.qdid; 2658 2659 /* Create a FDB table for this particular switch port */ 2660 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; 2661 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 2662 &fdb_id, &fdb_cfg); 2663 if (err) { 2664 netdev_err(netdev, "dpsw_fdb_add err %d\n", err); 2665 return err; 2666 } 2667 2668 /* Find an unused dpaa2_switch_fdb structure and use it */ 2669 fdb = dpaa2_switch_fdb_get_unused(ethsw); 2670 fdb->fdb_id = fdb_id; 2671 fdb->in_use = true; 2672 fdb->bridge_dev = NULL; 2673 port_priv->fdb = fdb; 2674 2675 /* We need to add VLAN 1 as the PVID on this port until it is under a 2676 * bridge since the DPAA2 switch is not able to handle the traffic in a 2677 * VLAN unaware fashion 2678 */ 2679 err = dpaa2_switch_port_vlans_add(netdev, &vlan); 2680 if (err) 2681 return err; 2682 2683 /* Setup the egress flooding domains (broadcast, unknown unicast */ 2684 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2685 if (err) 2686 return err; 2687 2688 return err; 2689 } 2690 2691 static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) 2692 { 2693 struct device *dev = &sw_dev->dev; 2694 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2695 int err; 2696 2697 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 2698 if (err) 2699 dev_warn(dev, "dpsw_close err %d\n", err); 2700 } 2701 2702 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 2703 { 2704 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2705 dpaa2_switch_free_dpio(ethsw); 2706 dpaa2_switch_destroy_rings(ethsw); 2707 dpaa2_switch_drain_bp(ethsw); 2708 dpaa2_switch_free_dpbp(ethsw); 2709 } 2710 2711 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) 2712 { 2713 struct ethsw_port_priv *port_priv; 2714 struct ethsw_core *ethsw; 2715 struct device *dev; 2716 int i; 2717 2718 dev = &sw_dev->dev; 2719 ethsw = dev_get_drvdata(dev); 2720 2721 dpaa2_switch_ctrl_if_teardown(ethsw); 2722 2723 dpaa2_switch_teardown_irqs(sw_dev); 2724 2725 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2726 2727 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2728 port_priv = ethsw->ports[i]; 2729 unregister_netdev(port_priv->netdev); 2730 free_netdev(port_priv->netdev); 2731 } 2732 2733 kfree(ethsw->fdbs); 2734 kfree(ethsw->ports); 2735 2736 dpaa2_switch_takedown(sw_dev); 2737 2738 destroy_workqueue(ethsw->workqueue); 2739 2740 fsl_mc_portal_free(ethsw->mc_io); 2741 2742 kfree(ethsw); 2743 2744 dev_set_drvdata(dev, NULL); 2745 2746 return 0; 2747 } 2748 2749 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, 2750 u16 port_idx) 2751 { 2752 struct ethsw_port_priv *port_priv; 2753 struct device *dev = ethsw->dev; 2754 struct net_device *port_netdev; 2755 int err; 2756 2757 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); 2758 if (!port_netdev) { 2759 dev_err(dev, "alloc_etherdev error\n"); 2760 return -ENOMEM; 2761 } 2762 2763 port_priv = netdev_priv(port_netdev); 2764 port_priv->netdev = port_netdev; 2765 port_priv->ethsw_data = ethsw; 2766 2767 port_priv->idx = port_idx; 2768 port_priv->stp_state = BR_STATE_FORWARDING; 2769 2770 SET_NETDEV_DEV(port_netdev, dev); 2771 port_netdev->netdev_ops = &dpaa2_switch_port_ops; 2772 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; 2773 2774 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; 2775 2776 port_priv->bcast_flood = true; 2777 port_priv->ucast_flood = true; 2778 2779 /* Set MTU limits */ 2780 port_netdev->min_mtu = ETH_MIN_MTU; 2781 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; 2782 2783 /* Populate the private port structure so that later calls to 2784 * dpaa2_switch_port_init() can use it. 2785 */ 2786 ethsw->ports[port_idx] = port_priv; 2787 2788 /* The DPAA2 switch's ingress path depends on the VLAN table, 2789 * thus we are not able to disable VLAN filtering. 2790 */ 2791 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER; 2792 2793 err = dpaa2_switch_port_init(port_priv, port_idx); 2794 if (err) 2795 goto err_port_probe; 2796 2797 err = dpaa2_switch_port_set_mac_addr(port_priv); 2798 if (err) 2799 goto err_port_probe; 2800 2801 err = dpaa2_switch_port_set_learning(port_priv, false); 2802 if (err) 2803 goto err_port_probe; 2804 2805 return 0; 2806 2807 err_port_probe: 2808 free_netdev(port_netdev); 2809 ethsw->ports[port_idx] = NULL; 2810 2811 return err; 2812 } 2813 2814 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) 2815 { 2816 struct device *dev = &sw_dev->dev; 2817 struct ethsw_core *ethsw; 2818 int i, err; 2819 2820 /* Allocate switch core*/ 2821 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); 2822 2823 if (!ethsw) 2824 return -ENOMEM; 2825 2826 ethsw->dev = dev; 2827 ethsw->iommu_domain = iommu_get_domain_for_dev(dev); 2828 dev_set_drvdata(dev, ethsw); 2829 2830 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 2831 ðsw->mc_io); 2832 if (err) { 2833 if (err == -ENXIO) 2834 err = -EPROBE_DEFER; 2835 else 2836 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); 2837 goto err_free_drvdata; 2838 } 2839 2840 err = dpaa2_switch_init(sw_dev); 2841 if (err) 2842 goto err_free_cmdport; 2843 2844 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), 2845 GFP_KERNEL); 2846 if (!(ethsw->ports)) { 2847 err = -ENOMEM; 2848 goto err_takedown; 2849 } 2850 2851 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), 2852 GFP_KERNEL); 2853 if (!ethsw->fdbs) { 2854 err = -ENOMEM; 2855 goto err_free_ports; 2856 } 2857 2858 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2859 err = dpaa2_switch_probe_port(ethsw, i); 2860 if (err) 2861 goto err_free_netdev; 2862 } 2863 2864 /* Add a NAPI instance for each of the Rx queues. The first port's 2865 * net_device will be associated with the instances since we do not have 2866 * different queues for each switch ports. 2867 */ 2868 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2869 netif_napi_add(ethsw->ports[0]->netdev, 2870 ðsw->fq[i].napi, dpaa2_switch_poll, 2871 NAPI_POLL_WEIGHT); 2872 2873 err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2874 if (err) { 2875 dev_err(ethsw->dev, "dpsw_enable err %d\n", err); 2876 goto err_free_netdev; 2877 } 2878 2879 /* Setup IRQs */ 2880 err = dpaa2_switch_setup_irqs(sw_dev); 2881 if (err) 2882 goto err_stop; 2883 2884 /* Register the netdev only when the entire setup is done and the 2885 * switch port interfaces are ready to receive traffic 2886 */ 2887 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2888 err = register_netdev(ethsw->ports[i]->netdev); 2889 if (err < 0) { 2890 dev_err(dev, "register_netdev error %d\n", err); 2891 goto err_unregister_ports; 2892 } 2893 } 2894 2895 return 0; 2896 2897 err_unregister_ports: 2898 for (i--; i >= 0; i--) 2899 unregister_netdev(ethsw->ports[i]->netdev); 2900 dpaa2_switch_teardown_irqs(sw_dev); 2901 err_stop: 2902 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2903 err_free_netdev: 2904 for (i--; i >= 0; i--) 2905 free_netdev(ethsw->ports[i]->netdev); 2906 kfree(ethsw->fdbs); 2907 err_free_ports: 2908 kfree(ethsw->ports); 2909 2910 err_takedown: 2911 dpaa2_switch_takedown(sw_dev); 2912 2913 err_free_cmdport: 2914 fsl_mc_portal_free(ethsw->mc_io); 2915 2916 err_free_drvdata: 2917 kfree(ethsw); 2918 dev_set_drvdata(dev, NULL); 2919 2920 return err; 2921 } 2922 2923 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { 2924 { 2925 .vendor = FSL_MC_VENDOR_FREESCALE, 2926 .obj_type = "dpsw", 2927 }, 2928 { .vendor = 0x0 } 2929 }; 2930 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); 2931 2932 static struct fsl_mc_driver dpaa2_switch_drv = { 2933 .driver = { 2934 .name = KBUILD_MODNAME, 2935 .owner = THIS_MODULE, 2936 }, 2937 .probe = dpaa2_switch_probe, 2938 .remove = dpaa2_switch_remove, 2939 .match_id_table = dpaa2_switch_match_id_table 2940 }; 2941 2942 static struct notifier_block dpaa2_switch_port_nb __read_mostly = { 2943 .notifier_call = dpaa2_switch_port_netdevice_event, 2944 }; 2945 2946 static struct notifier_block dpaa2_switch_port_switchdev_nb = { 2947 .notifier_call = dpaa2_switch_port_event, 2948 }; 2949 2950 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { 2951 .notifier_call = dpaa2_switch_port_blocking_event, 2952 }; 2953 2954 static int dpaa2_switch_register_notifiers(void) 2955 { 2956 int err; 2957 2958 err = register_netdevice_notifier(&dpaa2_switch_port_nb); 2959 if (err) { 2960 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); 2961 return err; 2962 } 2963 2964 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 2965 if (err) { 2966 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); 2967 goto err_switchdev_nb; 2968 } 2969 2970 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 2971 if (err) { 2972 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); 2973 goto err_switchdev_blocking_nb; 2974 } 2975 2976 return 0; 2977 2978 err_switchdev_blocking_nb: 2979 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 2980 err_switchdev_nb: 2981 unregister_netdevice_notifier(&dpaa2_switch_port_nb); 2982 2983 return err; 2984 } 2985 2986 static void dpaa2_switch_unregister_notifiers(void) 2987 { 2988 int err; 2989 2990 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 2991 if (err) 2992 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", 2993 err); 2994 2995 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 2996 if (err) 2997 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); 2998 2999 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3000 if (err) 3001 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); 3002 } 3003 3004 static int __init dpaa2_switch_driver_init(void) 3005 { 3006 int err; 3007 3008 err = fsl_mc_driver_register(&dpaa2_switch_drv); 3009 if (err) 3010 return err; 3011 3012 err = dpaa2_switch_register_notifiers(); 3013 if (err) { 3014 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3015 return err; 3016 } 3017 3018 return 0; 3019 } 3020 3021 static void __exit dpaa2_switch_driver_exit(void) 3022 { 3023 dpaa2_switch_unregister_notifiers(); 3024 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3025 } 3026 3027 module_init(dpaa2_switch_driver_init); 3028 module_exit(dpaa2_switch_driver_exit); 3029 3030 MODULE_LICENSE("GPL v2"); 3031 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); 3032