1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DPAA2 Ethernet Switch driver 4 * 5 * Copyright 2014-2016 Freescale Semiconductor Inc. 6 * Copyright 2017-2021 NXP 7 * 8 */ 9 10 #include <linux/module.h> 11 12 #include <linux/interrupt.h> 13 #include <linux/msi.h> 14 #include <linux/kthread.h> 15 #include <linux/workqueue.h> 16 #include <linux/iommu.h> 17 18 #include <linux/fsl/mc.h> 19 20 #include "dpaa2-switch.h" 21 22 /* Minimal supported DPSW version */ 23 #define DPSW_MIN_VER_MAJOR 8 24 #define DPSW_MIN_VER_MINOR 9 25 26 #define DEFAULT_VLAN_ID 1 27 28 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv) 29 { 30 return port_priv->fdb->fdb_id; 31 } 32 33 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw) 34 { 35 int i; 36 37 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 38 if (!ethsw->fdbs[i].in_use) 39 return ðsw->fdbs[i]; 40 return NULL; 41 } 42 43 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv, 44 struct net_device *bridge_dev) 45 { 46 struct ethsw_port_priv *other_port_priv = NULL; 47 struct dpaa2_switch_fdb *fdb; 48 struct net_device *other_dev; 49 struct list_head *iter; 50 51 /* If we leave a bridge (bridge_dev is NULL), find an unused 52 * FDB and use that. 53 */ 54 if (!bridge_dev) { 55 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data); 56 57 /* If there is no unused FDB, we must be the last port that 58 * leaves the last bridge, all the others are standalone. We 59 * can just keep the FDB that we already have. 60 */ 61 62 if (!fdb) { 63 port_priv->fdb->bridge_dev = NULL; 64 return 0; 65 } 66 67 port_priv->fdb = fdb; 68 port_priv->fdb->in_use = true; 69 port_priv->fdb->bridge_dev = NULL; 70 return 0; 71 } 72 73 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock 74 * being held. Assert on it so that it's easier to catch new code 75 * paths that reach this point without the RTNL lock. 76 */ 77 ASSERT_RTNL(); 78 79 /* If part of a bridge, use the FDB of the first dpaa2 switch interface 80 * to be present in that bridge 81 */ 82 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) { 83 if (!dpaa2_switch_port_dev_check(other_dev)) 84 continue; 85 86 if (other_dev == port_priv->netdev) 87 continue; 88 89 other_port_priv = netdev_priv(other_dev); 90 break; 91 } 92 93 /* The current port is about to change its FDB to the one used by the 94 * first port that joined the bridge. 95 */ 96 if (other_port_priv) { 97 /* The previous FDB is about to become unused, since the 98 * interface is no longer standalone. 99 */ 100 port_priv->fdb->in_use = false; 101 port_priv->fdb->bridge_dev = NULL; 102 103 /* Get a reference to the new FDB */ 104 port_priv->fdb = other_port_priv->fdb; 105 } 106 107 /* Keep track of the new upper bridge device */ 108 port_priv->fdb->bridge_dev = bridge_dev; 109 110 return 0; 111 } 112 113 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id, 114 enum dpsw_flood_type type, 115 struct dpsw_egress_flood_cfg *cfg) 116 { 117 int i = 0, j; 118 119 memset(cfg, 0, sizeof(*cfg)); 120 121 /* Add all the DPAA2 switch ports found in the same bridging domain to 122 * the egress flooding domain 123 */ 124 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) { 125 if (!ethsw->ports[j]) 126 continue; 127 if (ethsw->ports[j]->fdb->fdb_id != fdb_id) 128 continue; 129 130 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood) 131 cfg->if_id[i++] = ethsw->ports[j]->idx; 132 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood) 133 cfg->if_id[i++] = ethsw->ports[j]->idx; 134 } 135 136 /* Add the CTRL interface to the egress flooding domain */ 137 cfg->if_id[i++] = ethsw->sw_attr.num_ifs; 138 139 cfg->fdb_id = fdb_id; 140 cfg->flood_type = type; 141 cfg->num_ifs = i; 142 } 143 144 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id) 145 { 146 struct dpsw_egress_flood_cfg flood_cfg; 147 int err; 148 149 /* Setup broadcast flooding domain */ 150 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg); 151 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 152 &flood_cfg); 153 if (err) { 154 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 155 return err; 156 } 157 158 /* Setup unknown flooding domain */ 159 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg); 160 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle, 161 &flood_cfg); 162 if (err) { 163 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err); 164 return err; 165 } 166 167 return 0; 168 } 169 170 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 171 dma_addr_t iova_addr) 172 { 173 phys_addr_t phys_addr; 174 175 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 176 177 return phys_to_virt(phys_addr); 178 } 179 180 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid) 181 { 182 struct ethsw_core *ethsw = port_priv->ethsw_data; 183 struct dpsw_vlan_cfg vcfg = {0}; 184 int err; 185 186 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 187 err = dpsw_vlan_add(ethsw->mc_io, 0, 188 ethsw->dpsw_handle, vid, &vcfg); 189 if (err) { 190 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err); 191 return err; 192 } 193 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER; 194 195 return 0; 196 } 197 198 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv) 199 { 200 struct net_device *netdev = port_priv->netdev; 201 struct dpsw_link_state state; 202 int err; 203 204 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 205 port_priv->ethsw_data->dpsw_handle, 206 port_priv->idx, &state); 207 if (err) { 208 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 209 return true; 210 } 211 212 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 213 214 return state.up ? true : false; 215 } 216 217 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid) 218 { 219 struct ethsw_core *ethsw = port_priv->ethsw_data; 220 struct net_device *netdev = port_priv->netdev; 221 struct dpsw_tci_cfg tci_cfg = { 0 }; 222 bool up; 223 int err, ret; 224 225 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 226 port_priv->idx, &tci_cfg); 227 if (err) { 228 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err); 229 return err; 230 } 231 232 tci_cfg.vlan_id = pvid; 233 234 /* Interface needs to be down to change PVID */ 235 up = dpaa2_switch_port_is_up(port_priv); 236 if (up) { 237 err = dpsw_if_disable(ethsw->mc_io, 0, 238 ethsw->dpsw_handle, 239 port_priv->idx); 240 if (err) { 241 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 242 return err; 243 } 244 } 245 246 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, 247 port_priv->idx, &tci_cfg); 248 if (err) { 249 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err); 250 goto set_tci_error; 251 } 252 253 /* Delete previous PVID info and mark the new one */ 254 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID; 255 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID; 256 port_priv->pvid = pvid; 257 258 set_tci_error: 259 if (up) { 260 ret = dpsw_if_enable(ethsw->mc_io, 0, 261 ethsw->dpsw_handle, 262 port_priv->idx); 263 if (ret) { 264 netdev_err(netdev, "dpsw_if_enable err %d\n", ret); 265 return ret; 266 } 267 } 268 269 return err; 270 } 271 272 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv, 273 u16 vid, u16 flags) 274 { 275 struct ethsw_core *ethsw = port_priv->ethsw_data; 276 struct net_device *netdev = port_priv->netdev; 277 struct dpsw_vlan_if_cfg vcfg = {0}; 278 int err; 279 280 if (port_priv->vlans[vid]) { 281 netdev_warn(netdev, "VLAN %d already configured\n", vid); 282 return -EEXIST; 283 } 284 285 /* If hit, this VLAN rule will lead the packet into the FDB table 286 * specified in the vlan configuration below 287 */ 288 vcfg.num_ifs = 1; 289 vcfg.if_id[0] = port_priv->idx; 290 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 291 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID; 292 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg); 293 if (err) { 294 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err); 295 return err; 296 } 297 298 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER; 299 300 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { 301 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0, 302 ethsw->dpsw_handle, 303 vid, &vcfg); 304 if (err) { 305 netdev_err(netdev, 306 "dpsw_vlan_add_if_untagged err %d\n", err); 307 return err; 308 } 309 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED; 310 } 311 312 if (flags & BRIDGE_VLAN_INFO_PVID) { 313 err = dpaa2_switch_port_set_pvid(port_priv, vid); 314 if (err) 315 return err; 316 } 317 318 return 0; 319 } 320 321 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state) 322 { 323 switch (state) { 324 case BR_STATE_DISABLED: 325 return DPSW_STP_STATE_DISABLED; 326 case BR_STATE_LISTENING: 327 return DPSW_STP_STATE_LISTENING; 328 case BR_STATE_LEARNING: 329 return DPSW_STP_STATE_LEARNING; 330 case BR_STATE_FORWARDING: 331 return DPSW_STP_STATE_FORWARDING; 332 case BR_STATE_BLOCKING: 333 return DPSW_STP_STATE_BLOCKING; 334 default: 335 return DPSW_STP_STATE_DISABLED; 336 } 337 } 338 339 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state) 340 { 341 struct dpsw_stp_cfg stp_cfg = {0}; 342 int err; 343 u16 vid; 344 345 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state) 346 return 0; /* Nothing to do */ 347 348 stp_cfg.state = br_stp_state_to_dpsw(state); 349 for (vid = 0; vid <= VLAN_VID_MASK; vid++) { 350 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 351 stp_cfg.vlan_id = vid; 352 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0, 353 port_priv->ethsw_data->dpsw_handle, 354 port_priv->idx, &stp_cfg); 355 if (err) { 356 netdev_err(port_priv->netdev, 357 "dpsw_if_set_stp err %d\n", err); 358 return err; 359 } 360 } 361 } 362 363 port_priv->stp_state = state; 364 365 return 0; 366 } 367 368 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) 369 { 370 struct ethsw_port_priv *ppriv_local = NULL; 371 int i, err; 372 373 if (!ethsw->vlans[vid]) 374 return -ENOENT; 375 376 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid); 377 if (err) { 378 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err); 379 return err; 380 } 381 ethsw->vlans[vid] = 0; 382 383 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 384 ppriv_local = ethsw->ports[i]; 385 ppriv_local->vlans[vid] = 0; 386 } 387 388 return 0; 389 } 390 391 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv, 392 const unsigned char *addr) 393 { 394 struct dpsw_fdb_unicast_cfg entry = {0}; 395 u16 fdb_id; 396 int err; 397 398 entry.if_egress = port_priv->idx; 399 entry.type = DPSW_FDB_ENTRY_STATIC; 400 ether_addr_copy(entry.mac_addr, addr); 401 402 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 403 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0, 404 port_priv->ethsw_data->dpsw_handle, 405 fdb_id, &entry); 406 if (err) 407 netdev_err(port_priv->netdev, 408 "dpsw_fdb_add_unicast err %d\n", err); 409 return err; 410 } 411 412 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv, 413 const unsigned char *addr) 414 { 415 struct dpsw_fdb_unicast_cfg entry = {0}; 416 u16 fdb_id; 417 int err; 418 419 entry.if_egress = port_priv->idx; 420 entry.type = DPSW_FDB_ENTRY_STATIC; 421 ether_addr_copy(entry.mac_addr, addr); 422 423 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 424 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0, 425 port_priv->ethsw_data->dpsw_handle, 426 fdb_id, &entry); 427 /* Silently discard error for calling multiple times the del command */ 428 if (err && err != -ENXIO) 429 netdev_err(port_priv->netdev, 430 "dpsw_fdb_remove_unicast err %d\n", err); 431 return err; 432 } 433 434 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv, 435 const unsigned char *addr) 436 { 437 struct dpsw_fdb_multicast_cfg entry = {0}; 438 u16 fdb_id; 439 int err; 440 441 ether_addr_copy(entry.mac_addr, addr); 442 entry.type = DPSW_FDB_ENTRY_STATIC; 443 entry.num_ifs = 1; 444 entry.if_id[0] = port_priv->idx; 445 446 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 447 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0, 448 port_priv->ethsw_data->dpsw_handle, 449 fdb_id, &entry); 450 /* Silently discard error for calling multiple times the add command */ 451 if (err && err != -ENXIO) 452 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n", 453 err); 454 return err; 455 } 456 457 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv, 458 const unsigned char *addr) 459 { 460 struct dpsw_fdb_multicast_cfg entry = {0}; 461 u16 fdb_id; 462 int err; 463 464 ether_addr_copy(entry.mac_addr, addr); 465 entry.type = DPSW_FDB_ENTRY_STATIC; 466 entry.num_ifs = 1; 467 entry.if_id[0] = port_priv->idx; 468 469 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 470 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0, 471 port_priv->ethsw_data->dpsw_handle, 472 fdb_id, &entry); 473 /* Silently discard error for calling multiple times the del command */ 474 if (err && err != -ENAVAIL) 475 netdev_err(port_priv->netdev, 476 "dpsw_fdb_remove_multicast err %d\n", err); 477 return err; 478 } 479 480 static void dpaa2_switch_port_get_stats(struct net_device *netdev, 481 struct rtnl_link_stats64 *stats) 482 { 483 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 484 u64 tmp; 485 int err; 486 487 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 488 port_priv->ethsw_data->dpsw_handle, 489 port_priv->idx, 490 DPSW_CNT_ING_FRAME, &stats->rx_packets); 491 if (err) 492 goto error; 493 494 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 495 port_priv->ethsw_data->dpsw_handle, 496 port_priv->idx, 497 DPSW_CNT_EGR_FRAME, &stats->tx_packets); 498 if (err) 499 goto error; 500 501 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 502 port_priv->ethsw_data->dpsw_handle, 503 port_priv->idx, 504 DPSW_CNT_ING_BYTE, &stats->rx_bytes); 505 if (err) 506 goto error; 507 508 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 509 port_priv->ethsw_data->dpsw_handle, 510 port_priv->idx, 511 DPSW_CNT_EGR_BYTE, &stats->tx_bytes); 512 if (err) 513 goto error; 514 515 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 516 port_priv->ethsw_data->dpsw_handle, 517 port_priv->idx, 518 DPSW_CNT_ING_FRAME_DISCARD, 519 &stats->rx_dropped); 520 if (err) 521 goto error; 522 523 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 524 port_priv->ethsw_data->dpsw_handle, 525 port_priv->idx, 526 DPSW_CNT_ING_FLTR_FRAME, 527 &tmp); 528 if (err) 529 goto error; 530 stats->rx_dropped += tmp; 531 532 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0, 533 port_priv->ethsw_data->dpsw_handle, 534 port_priv->idx, 535 DPSW_CNT_EGR_FRAME_DISCARD, 536 &stats->tx_dropped); 537 if (err) 538 goto error; 539 540 return; 541 542 error: 543 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); 544 } 545 546 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev, 547 int attr_id) 548 { 549 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT); 550 } 551 552 static int dpaa2_switch_port_get_offload_stats(int attr_id, 553 const struct net_device *netdev, 554 void *sp) 555 { 556 switch (attr_id) { 557 case IFLA_OFFLOAD_XSTATS_CPU_HIT: 558 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp); 559 return 0; 560 } 561 562 return -EINVAL; 563 } 564 565 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) 566 { 567 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 568 int err; 569 570 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io, 571 0, 572 port_priv->ethsw_data->dpsw_handle, 573 port_priv->idx, 574 (u16)ETHSW_L2_MAX_FRM(mtu)); 575 if (err) { 576 netdev_err(netdev, 577 "dpsw_if_set_max_frame_length() err %d\n", err); 578 return err; 579 } 580 581 netdev->mtu = mtu; 582 return 0; 583 } 584 585 static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) 586 { 587 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 588 struct dpsw_link_state state; 589 int err; 590 591 /* Interrupts are received even though no one issued an 'ifconfig up' 592 * on the switch interface. Ignore these link state update interrupts 593 */ 594 if (!netif_running(netdev)) 595 return 0; 596 597 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, 598 port_priv->ethsw_data->dpsw_handle, 599 port_priv->idx, &state); 600 if (err) { 601 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err); 602 return err; 603 } 604 605 WARN_ONCE(state.up > 1, "Garbage read into link_state"); 606 607 if (state.up != port_priv->link_state) { 608 if (state.up) { 609 netif_carrier_on(netdev); 610 netif_tx_start_all_queues(netdev); 611 } else { 612 netif_carrier_off(netdev); 613 netif_tx_stop_all_queues(netdev); 614 } 615 port_priv->link_state = state.up; 616 } 617 618 return 0; 619 } 620 621 /* Manage all NAPI instances for the control interface. 622 * 623 * We only have one RX queue and one Tx Conf queue for all 624 * switch ports. Therefore, we only need to enable the NAPI instance once, the 625 * first time one of the switch ports runs .dev_open(). 626 */ 627 628 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw) 629 { 630 int i; 631 632 /* Access to the ethsw->napi_users relies on the RTNL lock */ 633 ASSERT_RTNL(); 634 635 /* a new interface is using the NAPI instance */ 636 ethsw->napi_users++; 637 638 /* if there is already a user of the instance, return */ 639 if (ethsw->napi_users > 1) 640 return; 641 642 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 643 napi_enable(ðsw->fq[i].napi); 644 } 645 646 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw) 647 { 648 int i; 649 650 /* Access to the ethsw->napi_users relies on the RTNL lock */ 651 ASSERT_RTNL(); 652 653 /* If we are not the last interface using the NAPI, return */ 654 ethsw->napi_users--; 655 if (ethsw->napi_users) 656 return; 657 658 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 659 napi_disable(ðsw->fq[i].napi); 660 } 661 662 static int dpaa2_switch_port_open(struct net_device *netdev) 663 { 664 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 665 struct ethsw_core *ethsw = port_priv->ethsw_data; 666 int err; 667 668 /* Explicitly set carrier off, otherwise 669 * netif_carrier_ok() will return true and cause 'ip link show' 670 * to report the LOWER_UP flag, even though the link 671 * notification wasn't even received. 672 */ 673 netif_carrier_off(netdev); 674 675 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, 676 port_priv->ethsw_data->dpsw_handle, 677 port_priv->idx); 678 if (err) { 679 netdev_err(netdev, "dpsw_if_enable err %d\n", err); 680 return err; 681 } 682 683 /* sync carrier state */ 684 err = dpaa2_switch_port_carrier_state_sync(netdev); 685 if (err) { 686 netdev_err(netdev, 687 "dpaa2_switch_port_carrier_state_sync err %d\n", err); 688 goto err_carrier_sync; 689 } 690 691 dpaa2_switch_enable_ctrl_if_napi(ethsw); 692 693 return 0; 694 695 err_carrier_sync: 696 dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 697 port_priv->ethsw_data->dpsw_handle, 698 port_priv->idx); 699 return err; 700 } 701 702 static int dpaa2_switch_port_stop(struct net_device *netdev) 703 { 704 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 705 struct ethsw_core *ethsw = port_priv->ethsw_data; 706 int err; 707 708 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, 709 port_priv->ethsw_data->dpsw_handle, 710 port_priv->idx); 711 if (err) { 712 netdev_err(netdev, "dpsw_if_disable err %d\n", err); 713 return err; 714 } 715 716 dpaa2_switch_disable_ctrl_if_napi(ethsw); 717 718 return 0; 719 } 720 721 static int dpaa2_switch_port_parent_id(struct net_device *dev, 722 struct netdev_phys_item_id *ppid) 723 { 724 struct ethsw_port_priv *port_priv = netdev_priv(dev); 725 726 ppid->id_len = 1; 727 ppid->id[0] = port_priv->ethsw_data->dev_id; 728 729 return 0; 730 } 731 732 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name, 733 size_t len) 734 { 735 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 736 int err; 737 738 err = snprintf(name, len, "p%d", port_priv->idx); 739 if (err >= len) 740 return -EINVAL; 741 742 return 0; 743 } 744 745 struct ethsw_dump_ctx { 746 struct net_device *dev; 747 struct sk_buff *skb; 748 struct netlink_callback *cb; 749 int idx; 750 }; 751 752 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry, 753 struct ethsw_dump_ctx *dump) 754 { 755 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC; 756 u32 portid = NETLINK_CB(dump->cb->skb).portid; 757 u32 seq = dump->cb->nlh->nlmsg_seq; 758 struct nlmsghdr *nlh; 759 struct ndmsg *ndm; 760 761 if (dump->idx < dump->cb->args[2]) 762 goto skip; 763 764 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 765 sizeof(*ndm), NLM_F_MULTI); 766 if (!nlh) 767 return -EMSGSIZE; 768 769 ndm = nlmsg_data(nlh); 770 ndm->ndm_family = AF_BRIDGE; 771 ndm->ndm_pad1 = 0; 772 ndm->ndm_pad2 = 0; 773 ndm->ndm_flags = NTF_SELF; 774 ndm->ndm_type = 0; 775 ndm->ndm_ifindex = dump->dev->ifindex; 776 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP; 777 778 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr)) 779 goto nla_put_failure; 780 781 nlmsg_end(dump->skb, nlh); 782 783 skip: 784 dump->idx++; 785 return 0; 786 787 nla_put_failure: 788 nlmsg_cancel(dump->skb, nlh); 789 return -EMSGSIZE; 790 } 791 792 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry, 793 struct ethsw_port_priv *port_priv) 794 { 795 int idx = port_priv->idx; 796 int valid; 797 798 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 799 valid = entry->if_info == port_priv->idx; 800 else 801 valid = entry->if_mask[idx / 8] & BIT(idx % 8); 802 803 return valid; 804 } 805 806 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv, 807 dpaa2_switch_fdb_cb_t cb, void *data) 808 { 809 struct net_device *net_dev = port_priv->netdev; 810 struct ethsw_core *ethsw = port_priv->ethsw_data; 811 struct device *dev = net_dev->dev.parent; 812 struct fdb_dump_entry *fdb_entries; 813 struct fdb_dump_entry fdb_entry; 814 dma_addr_t fdb_dump_iova; 815 u16 num_fdb_entries; 816 u32 fdb_dump_size; 817 int err = 0, i; 818 u8 *dma_mem; 819 u16 fdb_id; 820 821 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry); 822 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL); 823 if (!dma_mem) 824 return -ENOMEM; 825 826 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size, 827 DMA_FROM_DEVICE); 828 if (dma_mapping_error(dev, fdb_dump_iova)) { 829 netdev_err(net_dev, "dma_map_single() failed\n"); 830 err = -ENOMEM; 831 goto err_map; 832 } 833 834 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv); 835 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id, 836 fdb_dump_iova, fdb_dump_size, &num_fdb_entries); 837 if (err) { 838 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err); 839 goto err_dump; 840 } 841 842 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE); 843 844 fdb_entries = (struct fdb_dump_entry *)dma_mem; 845 for (i = 0; i < num_fdb_entries; i++) { 846 fdb_entry = fdb_entries[i]; 847 848 err = cb(port_priv, &fdb_entry, data); 849 if (err) 850 goto end; 851 } 852 853 end: 854 kfree(dma_mem); 855 856 return 0; 857 858 err_dump: 859 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE); 860 err_map: 861 kfree(dma_mem); 862 return err; 863 } 864 865 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv, 866 struct fdb_dump_entry *fdb_entry, 867 void *data) 868 { 869 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 870 return 0; 871 872 return dpaa2_switch_fdb_dump_nl(fdb_entry, data); 873 } 874 875 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 876 struct net_device *net_dev, 877 struct net_device *filter_dev, int *idx) 878 { 879 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 880 struct ethsw_dump_ctx dump = { 881 .dev = net_dev, 882 .skb = skb, 883 .cb = cb, 884 .idx = *idx, 885 }; 886 int err; 887 888 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump); 889 *idx = dump.idx; 890 891 return err; 892 } 893 894 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv, 895 struct fdb_dump_entry *fdb_entry, 896 void *data __always_unused) 897 { 898 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv)) 899 return 0; 900 901 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC)) 902 return 0; 903 904 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST) 905 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr); 906 else 907 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr); 908 909 return 0; 910 } 911 912 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv) 913 { 914 dpaa2_switch_fdb_iterate(port_priv, 915 dpaa2_switch_fdb_entry_fast_age, NULL); 916 } 917 918 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto, 919 u16 vid) 920 { 921 struct switchdev_obj_port_vlan vlan = { 922 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 923 .vid = vid, 924 .obj.orig_dev = netdev, 925 /* This API only allows programming tagged, non-PVID VIDs */ 926 .flags = 0, 927 }; 928 929 return dpaa2_switch_port_vlans_add(netdev, &vlan); 930 } 931 932 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto, 933 u16 vid) 934 { 935 struct switchdev_obj_port_vlan vlan = { 936 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 937 .vid = vid, 938 .obj.orig_dev = netdev, 939 /* This API only allows programming tagged, non-PVID VIDs */ 940 .flags = 0, 941 }; 942 943 return dpaa2_switch_port_vlans_del(netdev, &vlan); 944 } 945 946 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv) 947 { 948 struct ethsw_core *ethsw = port_priv->ethsw_data; 949 struct net_device *net_dev = port_priv->netdev; 950 struct device *dev = net_dev->dev.parent; 951 u8 mac_addr[ETH_ALEN]; 952 int err; 953 954 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR)) 955 return 0; 956 957 /* Get firmware address, if any */ 958 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle, 959 port_priv->idx, mac_addr); 960 if (err) { 961 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n"); 962 return err; 963 } 964 965 /* First check if firmware has any address configured by bootloader */ 966 if (!is_zero_ether_addr(mac_addr)) { 967 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 968 } else { 969 /* No MAC address configured, fill in net_dev->dev_addr 970 * with a random one 971 */ 972 eth_hw_addr_random(net_dev); 973 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 974 975 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 976 * practical purposes, this will be our "permanent" mac address, 977 * at least until the next reboot. This move will also permit 978 * register_netdevice() to properly fill up net_dev->perm_addr. 979 */ 980 net_dev->addr_assign_type = NET_ADDR_PERM; 981 } 982 983 return 0; 984 } 985 986 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw, 987 const struct dpaa2_fd *fd) 988 { 989 struct device *dev = ethsw->dev; 990 unsigned char *buffer_start; 991 struct sk_buff **skbh, *skb; 992 dma_addr_t fd_addr; 993 994 fd_addr = dpaa2_fd_get_addr(fd); 995 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr); 996 997 skb = *skbh; 998 buffer_start = (unsigned char *)skbh; 999 1000 dma_unmap_single(dev, fd_addr, 1001 skb_tail_pointer(skb) - buffer_start, 1002 DMA_TO_DEVICE); 1003 1004 /* Move on with skb release */ 1005 dev_kfree_skb(skb); 1006 } 1007 1008 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw, 1009 struct sk_buff *skb, 1010 struct dpaa2_fd *fd) 1011 { 1012 struct device *dev = ethsw->dev; 1013 struct sk_buff **skbh; 1014 dma_addr_t addr; 1015 u8 *buff_start; 1016 void *hwa; 1017 1018 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET - 1019 DPAA2_SWITCH_TX_BUF_ALIGN, 1020 DPAA2_SWITCH_TX_BUF_ALIGN); 1021 1022 /* Clear FAS to have consistent values for TX confirmation. It is 1023 * located in the first 8 bytes of the buffer's hardware annotation 1024 * area 1025 */ 1026 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE; 1027 memset(hwa, 0, 8); 1028 1029 /* Store a backpointer to the skb at the beginning of the buffer 1030 * (in the private data area) such that we can release it 1031 * on Tx confirm 1032 */ 1033 skbh = (struct sk_buff **)buff_start; 1034 *skbh = skb; 1035 1036 addr = dma_map_single(dev, buff_start, 1037 skb_tail_pointer(skb) - buff_start, 1038 DMA_TO_DEVICE); 1039 if (unlikely(dma_mapping_error(dev, addr))) 1040 return -ENOMEM; 1041 1042 /* Setup the FD fields */ 1043 memset(fd, 0, sizeof(*fd)); 1044 1045 dpaa2_fd_set_addr(fd, addr); 1046 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start)); 1047 dpaa2_fd_set_len(fd, skb->len); 1048 dpaa2_fd_set_format(fd, dpaa2_fd_single); 1049 1050 return 0; 1051 } 1052 1053 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb, 1054 struct net_device *net_dev) 1055 { 1056 struct ethsw_port_priv *port_priv = netdev_priv(net_dev); 1057 struct ethsw_core *ethsw = port_priv->ethsw_data; 1058 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES; 1059 struct dpaa2_fd fd; 1060 int err; 1061 1062 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) { 1063 struct sk_buff *ns; 1064 1065 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM); 1066 if (unlikely(!ns)) { 1067 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name); 1068 goto err_free_skb; 1069 } 1070 dev_consume_skb_any(skb); 1071 skb = ns; 1072 } 1073 1074 /* We'll be holding a back-reference to the skb until Tx confirmation */ 1075 skb = skb_unshare(skb, GFP_ATOMIC); 1076 if (unlikely(!skb)) { 1077 /* skb_unshare() has already freed the skb */ 1078 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name); 1079 goto err_exit; 1080 } 1081 1082 /* At this stage, we do not support non-linear skbs so just try to 1083 * linearize the skb and if that's not working, just drop the packet. 1084 */ 1085 err = skb_linearize(skb); 1086 if (err) { 1087 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err); 1088 goto err_free_skb; 1089 } 1090 1091 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd); 1092 if (unlikely(err)) { 1093 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err); 1094 goto err_free_skb; 1095 } 1096 1097 do { 1098 err = dpaa2_io_service_enqueue_qd(NULL, 1099 port_priv->tx_qdid, 1100 8, 0, &fd); 1101 retries--; 1102 } while (err == -EBUSY && retries); 1103 1104 if (unlikely(err < 0)) { 1105 dpaa2_switch_free_fd(ethsw, &fd); 1106 goto err_exit; 1107 } 1108 1109 return NETDEV_TX_OK; 1110 1111 err_free_skb: 1112 dev_kfree_skb(skb); 1113 err_exit: 1114 return NETDEV_TX_OK; 1115 } 1116 1117 static const struct net_device_ops dpaa2_switch_port_ops = { 1118 .ndo_open = dpaa2_switch_port_open, 1119 .ndo_stop = dpaa2_switch_port_stop, 1120 1121 .ndo_set_mac_address = eth_mac_addr, 1122 .ndo_get_stats64 = dpaa2_switch_port_get_stats, 1123 .ndo_change_mtu = dpaa2_switch_port_change_mtu, 1124 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats, 1125 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats, 1126 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump, 1127 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add, 1128 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill, 1129 1130 .ndo_start_xmit = dpaa2_switch_port_tx, 1131 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id, 1132 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name, 1133 }; 1134 1135 bool dpaa2_switch_port_dev_check(const struct net_device *netdev) 1136 { 1137 return netdev->netdev_ops == &dpaa2_switch_port_ops; 1138 } 1139 1140 static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) 1141 { 1142 int i; 1143 1144 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 1145 dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); 1146 dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); 1147 } 1148 } 1149 1150 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) 1151 { 1152 struct device *dev = (struct device *)arg; 1153 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1154 1155 /* Mask the events and the if_id reserved bits to be cleared on read */ 1156 u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; 1157 int err; 1158 1159 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1160 DPSW_IRQ_INDEX_IF, &status); 1161 if (err) { 1162 dev_err(dev, "Can't get irq status (err %d)\n", err); 1163 1164 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, 1165 DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); 1166 if (err) 1167 dev_err(dev, "Can't clear irq status (err %d)\n", err); 1168 goto out; 1169 } 1170 1171 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) 1172 dpaa2_switch_links_state_update(ethsw); 1173 1174 out: 1175 return IRQ_HANDLED; 1176 } 1177 1178 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev) 1179 { 1180 struct device *dev = &sw_dev->dev; 1181 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1182 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED; 1183 struct fsl_mc_device_irq *irq; 1184 int err; 1185 1186 err = fsl_mc_allocate_irqs(sw_dev); 1187 if (err) { 1188 dev_err(dev, "MC irqs allocation failed\n"); 1189 return err; 1190 } 1191 1192 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) { 1193 err = -EINVAL; 1194 goto free_irq; 1195 } 1196 1197 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1198 DPSW_IRQ_INDEX_IF, 0); 1199 if (err) { 1200 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1201 goto free_irq; 1202 } 1203 1204 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF]; 1205 1206 err = devm_request_threaded_irq(dev, irq->msi_desc->irq, 1207 NULL, 1208 dpaa2_switch_irq0_handler_thread, 1209 IRQF_NO_SUSPEND | IRQF_ONESHOT, 1210 dev_name(dev), dev); 1211 if (err) { 1212 dev_err(dev, "devm_request_threaded_irq(): %d\n", err); 1213 goto free_irq; 1214 } 1215 1216 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle, 1217 DPSW_IRQ_INDEX_IF, mask); 1218 if (err) { 1219 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err); 1220 goto free_devm_irq; 1221 } 1222 1223 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1224 DPSW_IRQ_INDEX_IF, 1); 1225 if (err) { 1226 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err); 1227 goto free_devm_irq; 1228 } 1229 1230 return 0; 1231 1232 free_devm_irq: 1233 devm_free_irq(dev, irq->msi_desc->irq, dev); 1234 free_irq: 1235 fsl_mc_free_irqs(sw_dev); 1236 return err; 1237 } 1238 1239 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev) 1240 { 1241 struct device *dev = &sw_dev->dev; 1242 struct ethsw_core *ethsw = dev_get_drvdata(dev); 1243 int err; 1244 1245 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle, 1246 DPSW_IRQ_INDEX_IF, 0); 1247 if (err) 1248 dev_err(dev, "dpsw_set_irq_enable err %d\n", err); 1249 1250 fsl_mc_free_irqs(sw_dev); 1251 } 1252 1253 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev, 1254 u8 state) 1255 { 1256 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1257 1258 return dpaa2_switch_port_set_stp_state(port_priv, state); 1259 } 1260 1261 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) 1262 { 1263 struct ethsw_core *ethsw = port_priv->ethsw_data; 1264 enum dpsw_learning_mode learn_mode; 1265 int err; 1266 1267 if (enable) 1268 learn_mode = DPSW_LEARNING_MODE_HW; 1269 else 1270 learn_mode = DPSW_LEARNING_MODE_DIS; 1271 1272 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 1273 port_priv->idx, learn_mode); 1274 if (err) 1275 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err); 1276 1277 if (!enable) 1278 dpaa2_switch_port_fast_age(port_priv); 1279 1280 return err; 1281 } 1282 1283 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, 1284 struct switchdev_brport_flags flags) 1285 { 1286 struct ethsw_core *ethsw = port_priv->ethsw_data; 1287 1288 if (flags.mask & BR_BCAST_FLOOD) 1289 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD); 1290 1291 if (flags.mask & BR_FLOOD) 1292 port_priv->ucast_flood = !!(flags.val & BR_FLOOD); 1293 1294 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1295 } 1296 1297 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, 1298 struct switchdev_brport_flags flags, 1299 struct netlink_ext_ack *extack) 1300 { 1301 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD | 1302 BR_MCAST_FLOOD)) 1303 return -EINVAL; 1304 1305 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) { 1306 bool multicast = !!(flags.val & BR_MCAST_FLOOD); 1307 bool unicast = !!(flags.val & BR_FLOOD); 1308 1309 if (unicast != multicast) { 1310 NL_SET_ERR_MSG_MOD(extack, 1311 "Cannot configure multicast flooding independently of unicast"); 1312 return -EINVAL; 1313 } 1314 } 1315 1316 return 0; 1317 } 1318 1319 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, 1320 struct switchdev_brport_flags flags, 1321 struct netlink_ext_ack *extack) 1322 { 1323 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1324 int err; 1325 1326 if (flags.mask & BR_LEARNING) { 1327 bool learn_ena = !!(flags.val & BR_LEARNING); 1328 1329 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1330 if (err) 1331 return err; 1332 } 1333 1334 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) { 1335 err = dpaa2_switch_port_flood(port_priv, flags); 1336 if (err) 1337 return err; 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int dpaa2_switch_port_attr_set(struct net_device *netdev, 1344 const struct switchdev_attr *attr, 1345 struct netlink_ext_ack *extack) 1346 { 1347 int err = 0; 1348 1349 switch (attr->id) { 1350 case SWITCHDEV_ATTR_ID_PORT_STP_STATE: 1351 err = dpaa2_switch_port_attr_stp_state_set(netdev, 1352 attr->u.stp_state); 1353 break; 1354 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: 1355 if (!attr->u.vlan_filtering) { 1356 NL_SET_ERR_MSG_MOD(extack, 1357 "The DPAA2 switch does not support VLAN-unaware operation"); 1358 return -EOPNOTSUPP; 1359 } 1360 break; 1361 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: 1362 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); 1363 break; 1364 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: 1365 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); 1366 break; 1367 default: 1368 err = -EOPNOTSUPP; 1369 break; 1370 } 1371 1372 return err; 1373 } 1374 1375 int dpaa2_switch_port_vlans_add(struct net_device *netdev, 1376 const struct switchdev_obj_port_vlan *vlan) 1377 { 1378 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1379 struct ethsw_core *ethsw = port_priv->ethsw_data; 1380 struct dpsw_attr *attr = ðsw->sw_attr; 1381 int err = 0; 1382 1383 /* Make sure that the VLAN is not already configured 1384 * on the switch port 1385 */ 1386 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) 1387 return -EEXIST; 1388 1389 /* Check if there is space for a new VLAN */ 1390 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1391 ðsw->sw_attr); 1392 if (err) { 1393 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1394 return err; 1395 } 1396 if (attr->max_vlans - attr->num_vlans < 1) 1397 return -ENOSPC; 1398 1399 /* Check if there is space for a new VLAN */ 1400 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 1401 ðsw->sw_attr); 1402 if (err) { 1403 netdev_err(netdev, "dpsw_get_attributes err %d\n", err); 1404 return err; 1405 } 1406 if (attr->max_vlans - attr->num_vlans < 1) 1407 return -ENOSPC; 1408 1409 if (!port_priv->ethsw_data->vlans[vlan->vid]) { 1410 /* this is a new VLAN */ 1411 err = dpaa2_switch_add_vlan(port_priv, vlan->vid); 1412 if (err) 1413 return err; 1414 1415 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL; 1416 } 1417 1418 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags); 1419 } 1420 1421 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc, 1422 const unsigned char *addr) 1423 { 1424 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc; 1425 struct netdev_hw_addr *ha; 1426 1427 netif_addr_lock_bh(netdev); 1428 list_for_each_entry(ha, &list->list, list) { 1429 if (ether_addr_equal(ha->addr, addr)) { 1430 netif_addr_unlock_bh(netdev); 1431 return 1; 1432 } 1433 } 1434 netif_addr_unlock_bh(netdev); 1435 return 0; 1436 } 1437 1438 static int dpaa2_switch_port_mdb_add(struct net_device *netdev, 1439 const struct switchdev_obj_port_mdb *mdb) 1440 { 1441 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1442 int err; 1443 1444 /* Check if address is already set on this port */ 1445 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1446 return -EEXIST; 1447 1448 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr); 1449 if (err) 1450 return err; 1451 1452 err = dev_mc_add(netdev, mdb->addr); 1453 if (err) { 1454 netdev_err(netdev, "dev_mc_add err %d\n", err); 1455 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1456 } 1457 1458 return err; 1459 } 1460 1461 static int dpaa2_switch_port_obj_add(struct net_device *netdev, 1462 const struct switchdev_obj *obj) 1463 { 1464 int err; 1465 1466 switch (obj->id) { 1467 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1468 err = dpaa2_switch_port_vlans_add(netdev, 1469 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1470 break; 1471 case SWITCHDEV_OBJ_ID_PORT_MDB: 1472 err = dpaa2_switch_port_mdb_add(netdev, 1473 SWITCHDEV_OBJ_PORT_MDB(obj)); 1474 break; 1475 default: 1476 err = -EOPNOTSUPP; 1477 break; 1478 } 1479 1480 return err; 1481 } 1482 1483 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid) 1484 { 1485 struct ethsw_core *ethsw = port_priv->ethsw_data; 1486 struct net_device *netdev = port_priv->netdev; 1487 struct dpsw_vlan_if_cfg vcfg; 1488 int i, err; 1489 1490 if (!port_priv->vlans[vid]) 1491 return -ENOENT; 1492 1493 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) { 1494 /* If we are deleting the PVID of a port, use VLAN 4095 instead 1495 * as we are sure that neither the bridge nor the 8021q module 1496 * will use it 1497 */ 1498 err = dpaa2_switch_port_set_pvid(port_priv, 4095); 1499 if (err) 1500 return err; 1501 } 1502 1503 vcfg.num_ifs = 1; 1504 vcfg.if_id[0] = port_priv->idx; 1505 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) { 1506 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, 1507 ethsw->dpsw_handle, 1508 vid, &vcfg); 1509 if (err) { 1510 netdev_err(netdev, 1511 "dpsw_vlan_remove_if_untagged err %d\n", 1512 err); 1513 } 1514 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED; 1515 } 1516 1517 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) { 1518 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 1519 vid, &vcfg); 1520 if (err) { 1521 netdev_err(netdev, 1522 "dpsw_vlan_remove_if err %d\n", err); 1523 return err; 1524 } 1525 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER; 1526 1527 /* Delete VLAN from switch if it is no longer configured on 1528 * any port 1529 */ 1530 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) 1531 if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) 1532 return 0; /* Found a port member in VID */ 1533 1534 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; 1535 1536 err = dpaa2_switch_dellink(ethsw, vid); 1537 if (err) 1538 return err; 1539 } 1540 1541 return 0; 1542 } 1543 1544 int dpaa2_switch_port_vlans_del(struct net_device *netdev, 1545 const struct switchdev_obj_port_vlan *vlan) 1546 { 1547 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1548 1549 if (netif_is_bridge_master(vlan->obj.orig_dev)) 1550 return -EOPNOTSUPP; 1551 1552 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid); 1553 } 1554 1555 static int dpaa2_switch_port_mdb_del(struct net_device *netdev, 1556 const struct switchdev_obj_port_mdb *mdb) 1557 { 1558 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1559 int err; 1560 1561 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr)) 1562 return -ENOENT; 1563 1564 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr); 1565 if (err) 1566 return err; 1567 1568 err = dev_mc_del(netdev, mdb->addr); 1569 if (err) { 1570 netdev_err(netdev, "dev_mc_del err %d\n", err); 1571 return err; 1572 } 1573 1574 return err; 1575 } 1576 1577 static int dpaa2_switch_port_obj_del(struct net_device *netdev, 1578 const struct switchdev_obj *obj) 1579 { 1580 int err; 1581 1582 switch (obj->id) { 1583 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1584 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj)); 1585 break; 1586 case SWITCHDEV_OBJ_ID_PORT_MDB: 1587 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj)); 1588 break; 1589 default: 1590 err = -EOPNOTSUPP; 1591 break; 1592 } 1593 return err; 1594 } 1595 1596 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, 1597 struct switchdev_notifier_port_attr_info *ptr) 1598 { 1599 int err; 1600 1601 err = switchdev_handle_port_attr_set(netdev, ptr, 1602 dpaa2_switch_port_dev_check, 1603 dpaa2_switch_port_attr_set); 1604 return notifier_from_errno(err); 1605 } 1606 1607 static int dpaa2_switch_port_bridge_join(struct net_device *netdev, 1608 struct net_device *upper_dev) 1609 { 1610 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1611 struct ethsw_core *ethsw = port_priv->ethsw_data; 1612 struct ethsw_port_priv *other_port_priv; 1613 struct net_device *other_dev; 1614 struct list_head *iter; 1615 bool learn_ena; 1616 int err; 1617 1618 netdev_for_each_lower_dev(upper_dev, other_dev, iter) { 1619 if (!dpaa2_switch_port_dev_check(other_dev)) 1620 continue; 1621 1622 other_port_priv = netdev_priv(other_dev); 1623 if (other_port_priv->ethsw_data != port_priv->ethsw_data) { 1624 netdev_err(netdev, 1625 "Interface from a different DPSW is in the bridge already!\n"); 1626 return -EINVAL; 1627 } 1628 } 1629 1630 /* Delete the previously manually installed VLAN 1 */ 1631 err = dpaa2_switch_port_del_vlan(port_priv, 1); 1632 if (err) 1633 return err; 1634 1635 dpaa2_switch_port_set_fdb(port_priv, upper_dev); 1636 1637 /* Inherit the initial bridge port learning state */ 1638 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING); 1639 err = dpaa2_switch_port_set_learning(port_priv, learn_ena); 1640 1641 /* Setup the egress flood policy (broadcast, unknown unicast) */ 1642 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1643 if (err) 1644 goto err_egress_flood; 1645 1646 return 0; 1647 1648 err_egress_flood: 1649 dpaa2_switch_port_set_fdb(port_priv, NULL); 1650 return err; 1651 } 1652 1653 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg) 1654 { 1655 __be16 vlan_proto = htons(ETH_P_8021Q); 1656 1657 if (vdev) 1658 vlan_proto = vlan_dev_vlan_proto(vdev); 1659 1660 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid); 1661 } 1662 1663 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg) 1664 { 1665 __be16 vlan_proto = htons(ETH_P_8021Q); 1666 1667 if (vdev) 1668 vlan_proto = vlan_dev_vlan_proto(vdev); 1669 1670 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); 1671 } 1672 1673 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) 1674 { 1675 struct ethsw_port_priv *port_priv = netdev_priv(netdev); 1676 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb; 1677 struct ethsw_core *ethsw = port_priv->ethsw_data; 1678 int err; 1679 1680 /* First of all, fast age any learn FDB addresses on this switch port */ 1681 dpaa2_switch_port_fast_age(port_priv); 1682 1683 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN 1684 * upper devices or otherwise from the FDB table that we are about to 1685 * leave 1686 */ 1687 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev); 1688 if (err) 1689 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err); 1690 1691 dpaa2_switch_port_set_fdb(port_priv, NULL); 1692 1693 /* Restore all RX VLANs into the new FDB table that we just joined */ 1694 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev); 1695 if (err) 1696 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err); 1697 1698 /* Reset the flooding state to denote that this port can send any 1699 * packet in standalone mode. With this, we are also ensuring that any 1700 * later bridge join will have the flooding flag on. 1701 */ 1702 port_priv->bcast_flood = true; 1703 port_priv->ucast_flood = true; 1704 1705 /* Setup the egress flood policy (broadcast, unknown unicast). 1706 * When the port is not under a bridge, only the CTRL interface is part 1707 * of the flooding domain besides the actual port 1708 */ 1709 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 1710 if (err) 1711 return err; 1712 1713 /* Recreate the egress flood domain of the FDB that we just left */ 1714 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id); 1715 if (err) 1716 return err; 1717 1718 /* No HW learning when not under a bridge */ 1719 err = dpaa2_switch_port_set_learning(port_priv, false); 1720 if (err) 1721 return err; 1722 1723 /* Add the VLAN 1 as PVID when not under a bridge. We need this since 1724 * the dpaa2 switch interfaces are not capable to be VLAN unaware 1725 */ 1726 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID, 1727 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID); 1728 } 1729 1730 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev) 1731 { 1732 struct net_device *upper_dev; 1733 struct list_head *iter; 1734 1735 /* RCU read lock not necessary because we have write-side protection 1736 * (rtnl_mutex), however a non-rcu iterator does not exist. 1737 */ 1738 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter) 1739 if (is_vlan_dev(upper_dev)) 1740 return -EOPNOTSUPP; 1741 1742 return 0; 1743 } 1744 1745 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, 1746 unsigned long event, void *ptr) 1747 { 1748 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1749 struct netdev_notifier_changeupper_info *info = ptr; 1750 struct netlink_ext_ack *extack; 1751 struct net_device *upper_dev; 1752 int err = 0; 1753 1754 if (!dpaa2_switch_port_dev_check(netdev)) 1755 return NOTIFY_DONE; 1756 1757 extack = netdev_notifier_info_to_extack(&info->info); 1758 1759 switch (event) { 1760 case NETDEV_PRECHANGEUPPER: 1761 upper_dev = info->upper_dev; 1762 if (!netif_is_bridge_master(upper_dev)) 1763 break; 1764 1765 if (!br_vlan_enabled(upper_dev)) { 1766 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); 1767 err = -EOPNOTSUPP; 1768 goto out; 1769 } 1770 1771 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); 1772 if (err) { 1773 NL_SET_ERR_MSG_MOD(extack, 1774 "Cannot join a bridge while VLAN uppers are present"); 1775 goto out; 1776 } 1777 1778 break; 1779 case NETDEV_CHANGEUPPER: 1780 upper_dev = info->upper_dev; 1781 if (netif_is_bridge_master(upper_dev)) { 1782 if (info->linking) 1783 err = dpaa2_switch_port_bridge_join(netdev, upper_dev); 1784 else 1785 err = dpaa2_switch_port_bridge_leave(netdev); 1786 } 1787 break; 1788 } 1789 1790 out: 1791 return notifier_from_errno(err); 1792 } 1793 1794 struct ethsw_switchdev_event_work { 1795 struct work_struct work; 1796 struct switchdev_notifier_fdb_info fdb_info; 1797 struct net_device *dev; 1798 unsigned long event; 1799 }; 1800 1801 static void dpaa2_switch_event_work(struct work_struct *work) 1802 { 1803 struct ethsw_switchdev_event_work *switchdev_work = 1804 container_of(work, struct ethsw_switchdev_event_work, work); 1805 struct net_device *dev = switchdev_work->dev; 1806 struct switchdev_notifier_fdb_info *fdb_info; 1807 int err; 1808 1809 rtnl_lock(); 1810 fdb_info = &switchdev_work->fdb_info; 1811 1812 switch (switchdev_work->event) { 1813 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1814 if (!fdb_info->added_by_user) 1815 break; 1816 if (is_unicast_ether_addr(fdb_info->addr)) 1817 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev), 1818 fdb_info->addr); 1819 else 1820 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev), 1821 fdb_info->addr); 1822 if (err) 1823 break; 1824 fdb_info->offloaded = true; 1825 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, 1826 &fdb_info->info, NULL); 1827 break; 1828 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1829 if (!fdb_info->added_by_user) 1830 break; 1831 if (is_unicast_ether_addr(fdb_info->addr)) 1832 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr); 1833 else 1834 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr); 1835 break; 1836 } 1837 1838 rtnl_unlock(); 1839 kfree(switchdev_work->fdb_info.addr); 1840 kfree(switchdev_work); 1841 dev_put(dev); 1842 } 1843 1844 /* Called under rcu_read_lock() */ 1845 static int dpaa2_switch_port_event(struct notifier_block *nb, 1846 unsigned long event, void *ptr) 1847 { 1848 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1849 struct ethsw_port_priv *port_priv = netdev_priv(dev); 1850 struct ethsw_switchdev_event_work *switchdev_work; 1851 struct switchdev_notifier_fdb_info *fdb_info = ptr; 1852 struct ethsw_core *ethsw = port_priv->ethsw_data; 1853 1854 if (event == SWITCHDEV_PORT_ATTR_SET) 1855 return dpaa2_switch_port_attr_set_event(dev, ptr); 1856 1857 if (!dpaa2_switch_port_dev_check(dev)) 1858 return NOTIFY_DONE; 1859 1860 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); 1861 if (!switchdev_work) 1862 return NOTIFY_BAD; 1863 1864 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work); 1865 switchdev_work->dev = dev; 1866 switchdev_work->event = event; 1867 1868 switch (event) { 1869 case SWITCHDEV_FDB_ADD_TO_DEVICE: 1870 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1871 memcpy(&switchdev_work->fdb_info, ptr, 1872 sizeof(switchdev_work->fdb_info)); 1873 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 1874 if (!switchdev_work->fdb_info.addr) 1875 goto err_addr_alloc; 1876 1877 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 1878 fdb_info->addr); 1879 1880 /* Take a reference on the device to avoid being freed. */ 1881 dev_hold(dev); 1882 break; 1883 default: 1884 kfree(switchdev_work); 1885 return NOTIFY_DONE; 1886 } 1887 1888 queue_work(ethsw->workqueue, &switchdev_work->work); 1889 1890 return NOTIFY_DONE; 1891 1892 err_addr_alloc: 1893 kfree(switchdev_work); 1894 return NOTIFY_BAD; 1895 } 1896 1897 static int dpaa2_switch_port_obj_event(unsigned long event, 1898 struct net_device *netdev, 1899 struct switchdev_notifier_port_obj_info *port_obj_info) 1900 { 1901 int err = -EOPNOTSUPP; 1902 1903 if (!dpaa2_switch_port_dev_check(netdev)) 1904 return NOTIFY_DONE; 1905 1906 switch (event) { 1907 case SWITCHDEV_PORT_OBJ_ADD: 1908 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj); 1909 break; 1910 case SWITCHDEV_PORT_OBJ_DEL: 1911 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj); 1912 break; 1913 } 1914 1915 port_obj_info->handled = true; 1916 return notifier_from_errno(err); 1917 } 1918 1919 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb, 1920 unsigned long event, void *ptr) 1921 { 1922 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1923 1924 switch (event) { 1925 case SWITCHDEV_PORT_OBJ_ADD: 1926 case SWITCHDEV_PORT_OBJ_DEL: 1927 return dpaa2_switch_port_obj_event(event, dev, ptr); 1928 case SWITCHDEV_PORT_ATTR_SET: 1929 return dpaa2_switch_port_attr_set_event(dev, ptr); 1930 } 1931 1932 return NOTIFY_DONE; 1933 } 1934 1935 /* Build a linear skb based on a single-buffer frame descriptor */ 1936 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw, 1937 const struct dpaa2_fd *fd) 1938 { 1939 u16 fd_offset = dpaa2_fd_get_offset(fd); 1940 dma_addr_t addr = dpaa2_fd_get_addr(fd); 1941 u32 fd_length = dpaa2_fd_get_len(fd); 1942 struct device *dev = ethsw->dev; 1943 struct sk_buff *skb = NULL; 1944 void *fd_vaddr; 1945 1946 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr); 1947 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE, 1948 DMA_FROM_DEVICE); 1949 1950 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE + 1951 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 1952 if (unlikely(!skb)) { 1953 dev_err(dev, "build_skb() failed\n"); 1954 return NULL; 1955 } 1956 1957 skb_reserve(skb, fd_offset); 1958 skb_put(skb, fd_length); 1959 1960 ethsw->buf_count--; 1961 1962 return skb; 1963 } 1964 1965 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, 1966 const struct dpaa2_fd *fd) 1967 { 1968 dpaa2_switch_free_fd(fq->ethsw, fd); 1969 } 1970 1971 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, 1972 const struct dpaa2_fd *fd) 1973 { 1974 struct ethsw_core *ethsw = fq->ethsw; 1975 struct ethsw_port_priv *port_priv; 1976 struct net_device *netdev; 1977 struct vlan_ethhdr *hdr; 1978 struct sk_buff *skb; 1979 u16 vlan_tci, vid; 1980 int if_id, err; 1981 1982 /* get switch ingress interface ID */ 1983 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF; 1984 1985 if (if_id >= ethsw->sw_attr.num_ifs) { 1986 dev_err(ethsw->dev, "Frame received from unknown interface!\n"); 1987 goto err_free_fd; 1988 } 1989 port_priv = ethsw->ports[if_id]; 1990 netdev = port_priv->netdev; 1991 1992 /* build the SKB based on the FD received */ 1993 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) { 1994 if (net_ratelimit()) { 1995 netdev_err(netdev, "Received invalid frame format\n"); 1996 goto err_free_fd; 1997 } 1998 } 1999 2000 skb = dpaa2_switch_build_linear_skb(ethsw, fd); 2001 if (unlikely(!skb)) 2002 goto err_free_fd; 2003 2004 skb_reset_mac_header(skb); 2005 2006 /* Remove the VLAN header if the packet that we just received has a vid 2007 * equal to the port PVIDs. Since the dpaa2-switch can operate only in 2008 * VLAN-aware mode and no alterations are made on the packet when it's 2009 * redirected/mirrored to the control interface, we are sure that there 2010 * will always be a VLAN header present. 2011 */ 2012 hdr = vlan_eth_hdr(skb); 2013 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK; 2014 if (vid == port_priv->pvid) { 2015 err = __skb_vlan_pop(skb, &vlan_tci); 2016 if (err) { 2017 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err); 2018 goto err_free_fd; 2019 } 2020 } 2021 2022 skb->dev = netdev; 2023 skb->protocol = eth_type_trans(skb, skb->dev); 2024 2025 /* Setup the offload_fwd_mark only if the port is under a bridge */ 2026 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev); 2027 2028 netif_receive_skb(skb); 2029 2030 return; 2031 2032 err_free_fd: 2033 dpaa2_switch_free_fd(ethsw, fd); 2034 } 2035 2036 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw) 2037 { 2038 ethsw->features = 0; 2039 2040 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6)) 2041 ethsw->features |= ETHSW_FEATURE_MAC_ADDR; 2042 } 2043 2044 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw) 2045 { 2046 struct dpsw_ctrl_if_attr ctrl_if_attr; 2047 struct device *dev = ethsw->dev; 2048 int i = 0; 2049 int err; 2050 2051 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2052 &ctrl_if_attr); 2053 if (err) { 2054 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err); 2055 return err; 2056 } 2057 2058 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; 2059 ethsw->fq[i].ethsw = ethsw; 2060 ethsw->fq[i++].type = DPSW_QUEUE_RX; 2061 2062 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; 2063 ethsw->fq[i].ethsw = ethsw; 2064 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF; 2065 2066 return 0; 2067 } 2068 2069 /* Free buffers acquired from the buffer pool or which were meant to 2070 * be released in the pool 2071 */ 2072 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count) 2073 { 2074 struct device *dev = ethsw->dev; 2075 void *vaddr; 2076 int i; 2077 2078 for (i = 0; i < count; i++) { 2079 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]); 2080 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE, 2081 DMA_FROM_DEVICE); 2082 free_pages((unsigned long)vaddr, 0); 2083 } 2084 } 2085 2086 /* Perform a single release command to add buffers 2087 * to the specified buffer pool 2088 */ 2089 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid) 2090 { 2091 struct device *dev = ethsw->dev; 2092 u64 buf_array[BUFS_PER_CMD]; 2093 struct page *page; 2094 int retries = 0; 2095 dma_addr_t addr; 2096 int err; 2097 int i; 2098 2099 for (i = 0; i < BUFS_PER_CMD; i++) { 2100 /* Allocate one page for each Rx buffer. WRIOP sees 2101 * the entire page except for a tailroom reserved for 2102 * skb shared info 2103 */ 2104 page = dev_alloc_pages(0); 2105 if (!page) { 2106 dev_err(dev, "buffer allocation failed\n"); 2107 goto err_alloc; 2108 } 2109 2110 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE, 2111 DMA_FROM_DEVICE); 2112 if (dma_mapping_error(dev, addr)) { 2113 dev_err(dev, "dma_map_single() failed\n"); 2114 goto err_map; 2115 } 2116 buf_array[i] = addr; 2117 } 2118 2119 release_bufs: 2120 /* In case the portal is busy, retry until successful or 2121 * max retries hit. 2122 */ 2123 while ((err = dpaa2_io_service_release(NULL, bpid, 2124 buf_array, i)) == -EBUSY) { 2125 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) 2126 break; 2127 2128 cpu_relax(); 2129 } 2130 2131 /* If release command failed, clean up and bail out. */ 2132 if (err) { 2133 dpaa2_switch_free_bufs(ethsw, buf_array, i); 2134 return 0; 2135 } 2136 2137 return i; 2138 2139 err_map: 2140 __free_pages(page, 0); 2141 err_alloc: 2142 /* If we managed to allocate at least some buffers, 2143 * release them to hardware 2144 */ 2145 if (i) 2146 goto release_bufs; 2147 2148 return 0; 2149 } 2150 2151 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw) 2152 { 2153 int *count = ðsw->buf_count; 2154 int new_count; 2155 int err = 0; 2156 2157 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) { 2158 do { 2159 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2160 if (unlikely(!new_count)) { 2161 /* Out of memory; abort for now, we'll 2162 * try later on 2163 */ 2164 break; 2165 } 2166 *count += new_count; 2167 } while (*count < DPAA2_ETHSW_NUM_BUFS); 2168 2169 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS)) 2170 err = -ENOMEM; 2171 } 2172 2173 return err; 2174 } 2175 2176 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw) 2177 { 2178 int *count, i; 2179 2180 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) { 2181 count = ðsw->buf_count; 2182 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid); 2183 2184 if (unlikely(*count < BUFS_PER_CMD)) 2185 return -ENOMEM; 2186 } 2187 2188 return 0; 2189 } 2190 2191 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw) 2192 { 2193 u64 buf_array[BUFS_PER_CMD]; 2194 int ret; 2195 2196 do { 2197 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid, 2198 buf_array, BUFS_PER_CMD); 2199 if (ret < 0) { 2200 dev_err(ethsw->dev, 2201 "dpaa2_io_service_acquire() = %d\n", ret); 2202 return; 2203 } 2204 dpaa2_switch_free_bufs(ethsw, buf_array, ret); 2205 2206 } while (ret); 2207 } 2208 2209 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw) 2210 { 2211 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 }; 2212 struct device *dev = ethsw->dev; 2213 struct fsl_mc_device *dpbp_dev; 2214 struct dpbp_attr dpbp_attrs; 2215 int err; 2216 2217 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2218 &dpbp_dev); 2219 if (err) { 2220 if (err == -ENXIO) 2221 err = -EPROBE_DEFER; 2222 else 2223 dev_err(dev, "DPBP device allocation failed\n"); 2224 return err; 2225 } 2226 ethsw->dpbp_dev = dpbp_dev; 2227 2228 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id, 2229 &dpbp_dev->mc_handle); 2230 if (err) { 2231 dev_err(dev, "dpbp_open() failed\n"); 2232 goto err_open; 2233 } 2234 2235 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2236 if (err) { 2237 dev_err(dev, "dpbp_reset() failed\n"); 2238 goto err_reset; 2239 } 2240 2241 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2242 if (err) { 2243 dev_err(dev, "dpbp_enable() failed\n"); 2244 goto err_enable; 2245 } 2246 2247 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle, 2248 &dpbp_attrs); 2249 if (err) { 2250 dev_err(dev, "dpbp_get_attributes() failed\n"); 2251 goto err_get_attr; 2252 } 2253 2254 dpsw_ctrl_if_pools_cfg.num_dpbp = 1; 2255 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; 2256 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; 2257 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; 2258 2259 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle, 2260 &dpsw_ctrl_if_pools_cfg); 2261 if (err) { 2262 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n"); 2263 goto err_get_attr; 2264 } 2265 ethsw->bpid = dpbp_attrs.id; 2266 2267 return 0; 2268 2269 err_get_attr: 2270 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2271 err_enable: 2272 err_reset: 2273 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle); 2274 err_open: 2275 fsl_mc_object_free(dpbp_dev); 2276 return err; 2277 } 2278 2279 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw) 2280 { 2281 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2282 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle); 2283 fsl_mc_object_free(ethsw->dpbp_dev); 2284 } 2285 2286 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw) 2287 { 2288 int i; 2289 2290 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2291 ethsw->fq[i].store = 2292 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE, 2293 ethsw->dev); 2294 if (!ethsw->fq[i].store) { 2295 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n"); 2296 while (--i >= 0) 2297 dpaa2_io_store_destroy(ethsw->fq[i].store); 2298 return -ENOMEM; 2299 } 2300 } 2301 2302 return 0; 2303 } 2304 2305 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw) 2306 { 2307 int i; 2308 2309 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2310 dpaa2_io_store_destroy(ethsw->fq[i].store); 2311 } 2312 2313 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq) 2314 { 2315 int err, retries = 0; 2316 2317 /* Try to pull from the FQ while the portal is busy and we didn't hit 2318 * the maximum number fo retries 2319 */ 2320 do { 2321 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); 2322 cpu_relax(); 2323 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2324 2325 if (unlikely(err)) 2326 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err); 2327 2328 return err; 2329 } 2330 2331 /* Consume all frames pull-dequeued into the store */ 2332 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq) 2333 { 2334 struct ethsw_core *ethsw = fq->ethsw; 2335 int cleaned = 0, is_last; 2336 struct dpaa2_dq *dq; 2337 int retries = 0; 2338 2339 do { 2340 /* Get the next available FD from the store */ 2341 dq = dpaa2_io_store_next(fq->store, &is_last); 2342 if (unlikely(!dq)) { 2343 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) { 2344 dev_err_once(ethsw->dev, 2345 "No valid dequeue response\n"); 2346 return -ETIMEDOUT; 2347 } 2348 continue; 2349 } 2350 2351 if (fq->type == DPSW_QUEUE_RX) 2352 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq)); 2353 else 2354 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq)); 2355 cleaned++; 2356 2357 } while (!is_last); 2358 2359 return cleaned; 2360 } 2361 2362 /* NAPI poll routine */ 2363 static int dpaa2_switch_poll(struct napi_struct *napi, int budget) 2364 { 2365 int err, cleaned = 0, store_cleaned, work_done; 2366 struct dpaa2_switch_fq *fq; 2367 int retries = 0; 2368 2369 fq = container_of(napi, struct dpaa2_switch_fq, napi); 2370 2371 do { 2372 err = dpaa2_switch_pull_fq(fq); 2373 if (unlikely(err)) 2374 break; 2375 2376 /* Refill pool if appropriate */ 2377 dpaa2_switch_refill_bp(fq->ethsw); 2378 2379 store_cleaned = dpaa2_switch_store_consume(fq); 2380 cleaned += store_cleaned; 2381 2382 if (cleaned >= budget) { 2383 work_done = budget; 2384 goto out; 2385 } 2386 2387 } while (store_cleaned); 2388 2389 /* We didn't consume the entire budget, so finish napi and re-enable 2390 * data availability notifications 2391 */ 2392 napi_complete_done(napi, cleaned); 2393 do { 2394 err = dpaa2_io_service_rearm(NULL, &fq->nctx); 2395 cpu_relax(); 2396 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES); 2397 2398 work_done = max(cleaned, 1); 2399 out: 2400 2401 return work_done; 2402 } 2403 2404 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) 2405 { 2406 struct dpaa2_switch_fq *fq; 2407 2408 fq = container_of(nctx, struct dpaa2_switch_fq, nctx); 2409 2410 napi_schedule(&fq->napi); 2411 } 2412 2413 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw) 2414 { 2415 struct dpsw_ctrl_if_queue_cfg queue_cfg; 2416 struct dpaa2_io_notification_ctx *nctx; 2417 int err, i, j; 2418 2419 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) { 2420 nctx = ðsw->fq[i].nctx; 2421 2422 /* Register a new software context for the FQID. 2423 * By using NULL as the first parameter, we specify that we do 2424 * not care on which cpu are interrupts received for this queue 2425 */ 2426 nctx->is_cdan = 0; 2427 nctx->id = ethsw->fq[i].fqid; 2428 nctx->desired_cpu = DPAA2_IO_ANY_CPU; 2429 nctx->cb = dpaa2_switch_fqdan_cb; 2430 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev); 2431 if (err) { 2432 err = -EPROBE_DEFER; 2433 goto err_register; 2434 } 2435 2436 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST | 2437 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX; 2438 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO; 2439 queue_cfg.dest_cfg.dest_id = nctx->dpio_id; 2440 queue_cfg.dest_cfg.priority = 0; 2441 queue_cfg.user_ctx = nctx->qman64; 2442 2443 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0, 2444 ethsw->dpsw_handle, 2445 ethsw->fq[i].type, 2446 &queue_cfg); 2447 if (err) 2448 goto err_set_queue; 2449 } 2450 2451 return 0; 2452 2453 err_set_queue: 2454 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev); 2455 err_register: 2456 for (j = 0; j < i; j++) 2457 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx, 2458 ethsw->dev); 2459 2460 return err; 2461 } 2462 2463 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw) 2464 { 2465 int i; 2466 2467 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2468 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx, 2469 ethsw->dev); 2470 } 2471 2472 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) 2473 { 2474 int err; 2475 2476 /* setup FQs for Rx and Tx Conf */ 2477 err = dpaa2_switch_setup_fqs(ethsw); 2478 if (err) 2479 return err; 2480 2481 /* setup the buffer pool needed on the Rx path */ 2482 err = dpaa2_switch_setup_dpbp(ethsw); 2483 if (err) 2484 return err; 2485 2486 err = dpaa2_switch_seed_bp(ethsw); 2487 if (err) 2488 goto err_free_dpbp; 2489 2490 err = dpaa2_switch_alloc_rings(ethsw); 2491 if (err) 2492 goto err_drain_dpbp; 2493 2494 err = dpaa2_switch_setup_dpio(ethsw); 2495 if (err) 2496 goto err_destroy_rings; 2497 2498 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2499 if (err) { 2500 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); 2501 goto err_deregister_dpio; 2502 } 2503 2504 return 0; 2505 2506 err_deregister_dpio: 2507 dpaa2_switch_free_dpio(ethsw); 2508 err_destroy_rings: 2509 dpaa2_switch_destroy_rings(ethsw); 2510 err_drain_dpbp: 2511 dpaa2_switch_drain_bp(ethsw); 2512 err_free_dpbp: 2513 dpaa2_switch_free_dpbp(ethsw); 2514 2515 return err; 2516 } 2517 2518 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) 2519 { 2520 struct device *dev = &sw_dev->dev; 2521 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2522 struct dpsw_vlan_if_cfg vcfg = {0}; 2523 struct dpsw_tci_cfg tci_cfg = {0}; 2524 struct dpsw_stp_cfg stp_cfg; 2525 int err; 2526 u16 i; 2527 2528 ethsw->dev_id = sw_dev->obj_desc.id; 2529 2530 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle); 2531 if (err) { 2532 dev_err(dev, "dpsw_open err %d\n", err); 2533 return err; 2534 } 2535 2536 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2537 ðsw->sw_attr); 2538 if (err) { 2539 dev_err(dev, "dpsw_get_attributes err %d\n", err); 2540 goto err_close; 2541 } 2542 2543 err = dpsw_get_api_version(ethsw->mc_io, 0, 2544 ðsw->major, 2545 ðsw->minor); 2546 if (err) { 2547 dev_err(dev, "dpsw_get_api_version err %d\n", err); 2548 goto err_close; 2549 } 2550 2551 /* Minimum supported DPSW version check */ 2552 if (ethsw->major < DPSW_MIN_VER_MAJOR || 2553 (ethsw->major == DPSW_MIN_VER_MAJOR && 2554 ethsw->minor < DPSW_MIN_VER_MINOR)) { 2555 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n", 2556 ethsw->major, ethsw->minor); 2557 err = -EOPNOTSUPP; 2558 goto err_close; 2559 } 2560 2561 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) { 2562 err = -EOPNOTSUPP; 2563 goto err_close; 2564 } 2565 2566 dpaa2_switch_detect_features(ethsw); 2567 2568 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle); 2569 if (err) { 2570 dev_err(dev, "dpsw_reset err %d\n", err); 2571 goto err_close; 2572 } 2573 2574 stp_cfg.vlan_id = DEFAULT_VLAN_ID; 2575 stp_cfg.state = DPSW_STP_STATE_FORWARDING; 2576 2577 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2578 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i); 2579 if (err) { 2580 dev_err(dev, "dpsw_if_disable err %d\n", err); 2581 goto err_close; 2582 } 2583 2584 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i, 2585 &stp_cfg); 2586 if (err) { 2587 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n", 2588 err, i); 2589 goto err_close; 2590 } 2591 2592 /* Switch starts with all ports configured to VLAN 1. Need to 2593 * remove this setting to allow configuration at bridge join 2594 */ 2595 vcfg.num_ifs = 1; 2596 vcfg.if_id[0] = i; 2597 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle, 2598 DEFAULT_VLAN_ID, &vcfg); 2599 if (err) { 2600 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n", 2601 err); 2602 goto err_close; 2603 } 2604 2605 tci_cfg.vlan_id = 4095; 2606 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg); 2607 if (err) { 2608 dev_err(dev, "dpsw_if_set_tci err %d\n", err); 2609 goto err_close; 2610 } 2611 2612 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 2613 DEFAULT_VLAN_ID, &vcfg); 2614 if (err) { 2615 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err); 2616 goto err_close; 2617 } 2618 } 2619 2620 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID); 2621 if (err) { 2622 dev_err(dev, "dpsw_vlan_remove err %d\n", err); 2623 goto err_close; 2624 } 2625 2626 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered", 2627 WQ_MEM_RECLAIM, "ethsw", 2628 ethsw->sw_attr.id); 2629 if (!ethsw->workqueue) { 2630 err = -ENOMEM; 2631 goto err_close; 2632 } 2633 2634 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0); 2635 if (err) 2636 goto err_destroy_ordered_workqueue; 2637 2638 err = dpaa2_switch_ctrl_if_setup(ethsw); 2639 if (err) 2640 goto err_destroy_ordered_workqueue; 2641 2642 return 0; 2643 2644 err_destroy_ordered_workqueue: 2645 destroy_workqueue(ethsw->workqueue); 2646 2647 err_close: 2648 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 2649 return err; 2650 } 2651 2652 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) 2653 { 2654 struct switchdev_obj_port_vlan vlan = { 2655 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, 2656 .vid = DEFAULT_VLAN_ID, 2657 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID, 2658 }; 2659 struct net_device *netdev = port_priv->netdev; 2660 struct ethsw_core *ethsw = port_priv->ethsw_data; 2661 struct dpsw_fdb_cfg fdb_cfg = {0}; 2662 struct dpsw_acl_if_cfg acl_if_cfg; 2663 struct dpsw_if_attr dpsw_if_attr; 2664 struct dpaa2_switch_fdb *fdb; 2665 struct dpsw_acl_cfg acl_cfg; 2666 u16 fdb_id; 2667 int err; 2668 2669 /* Get the Tx queue for this specific port */ 2670 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle, 2671 port_priv->idx, &dpsw_if_attr); 2672 if (err) { 2673 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err); 2674 return err; 2675 } 2676 port_priv->tx_qdid = dpsw_if_attr.qdid; 2677 2678 /* Create a FDB table for this particular switch port */ 2679 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs; 2680 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 2681 &fdb_id, &fdb_cfg); 2682 if (err) { 2683 netdev_err(netdev, "dpsw_fdb_add err %d\n", err); 2684 return err; 2685 } 2686 2687 /* Find an unused dpaa2_switch_fdb structure and use it */ 2688 fdb = dpaa2_switch_fdb_get_unused(ethsw); 2689 fdb->fdb_id = fdb_id; 2690 fdb->in_use = true; 2691 fdb->bridge_dev = NULL; 2692 port_priv->fdb = fdb; 2693 2694 /* We need to add VLAN 1 as the PVID on this port until it is under a 2695 * bridge since the DPAA2 switch is not able to handle the traffic in a 2696 * VLAN unaware fashion 2697 */ 2698 err = dpaa2_switch_port_vlans_add(netdev, &vlan); 2699 if (err) 2700 return err; 2701 2702 /* Setup the egress flooding domains (broadcast, unknown unicast */ 2703 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); 2704 if (err) 2705 return err; 2706 2707 /* Create an ACL table to be used by this switch port */ 2708 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES; 2709 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle, 2710 &port_priv->acl_tbl, &acl_cfg); 2711 if (err) { 2712 netdev_err(netdev, "dpsw_acl_add err %d\n", err); 2713 return err; 2714 } 2715 2716 acl_if_cfg.if_id[0] = port_priv->idx; 2717 acl_if_cfg.num_ifs = 1; 2718 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, 2719 port_priv->acl_tbl, &acl_if_cfg); 2720 if (err) { 2721 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); 2722 dpsw_acl_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 2723 port_priv->acl_tbl); 2724 } 2725 2726 return err; 2727 } 2728 2729 static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev) 2730 { 2731 struct device *dev = &sw_dev->dev; 2732 struct ethsw_core *ethsw = dev_get_drvdata(dev); 2733 int err; 2734 2735 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle); 2736 if (err) 2737 dev_warn(dev, "dpsw_close err %d\n", err); 2738 } 2739 2740 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw) 2741 { 2742 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2743 dpaa2_switch_free_dpio(ethsw); 2744 dpaa2_switch_destroy_rings(ethsw); 2745 dpaa2_switch_drain_bp(ethsw); 2746 dpaa2_switch_free_dpbp(ethsw); 2747 } 2748 2749 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) 2750 { 2751 struct ethsw_port_priv *port_priv; 2752 struct ethsw_core *ethsw; 2753 struct device *dev; 2754 int i; 2755 2756 dev = &sw_dev->dev; 2757 ethsw = dev_get_drvdata(dev); 2758 2759 dpaa2_switch_ctrl_if_teardown(ethsw); 2760 2761 dpaa2_switch_teardown_irqs(sw_dev); 2762 2763 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2764 2765 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2766 port_priv = ethsw->ports[i]; 2767 unregister_netdev(port_priv->netdev); 2768 free_netdev(port_priv->netdev); 2769 } 2770 2771 kfree(ethsw->fdbs); 2772 kfree(ethsw->ports); 2773 2774 dpaa2_switch_takedown(sw_dev); 2775 2776 destroy_workqueue(ethsw->workqueue); 2777 2778 fsl_mc_portal_free(ethsw->mc_io); 2779 2780 kfree(ethsw); 2781 2782 dev_set_drvdata(dev, NULL); 2783 2784 return 0; 2785 } 2786 2787 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, 2788 u16 port_idx) 2789 { 2790 struct ethsw_port_priv *port_priv; 2791 struct device *dev = ethsw->dev; 2792 struct net_device *port_netdev; 2793 int err; 2794 2795 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv)); 2796 if (!port_netdev) { 2797 dev_err(dev, "alloc_etherdev error\n"); 2798 return -ENOMEM; 2799 } 2800 2801 port_priv = netdev_priv(port_netdev); 2802 port_priv->netdev = port_netdev; 2803 port_priv->ethsw_data = ethsw; 2804 2805 port_priv->idx = port_idx; 2806 port_priv->stp_state = BR_STATE_FORWARDING; 2807 2808 SET_NETDEV_DEV(port_netdev, dev); 2809 port_netdev->netdev_ops = &dpaa2_switch_port_ops; 2810 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops; 2811 2812 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM; 2813 2814 port_priv->bcast_flood = true; 2815 port_priv->ucast_flood = true; 2816 2817 /* Set MTU limits */ 2818 port_netdev->min_mtu = ETH_MIN_MTU; 2819 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH; 2820 2821 /* Populate the private port structure so that later calls to 2822 * dpaa2_switch_port_init() can use it. 2823 */ 2824 ethsw->ports[port_idx] = port_priv; 2825 2826 /* The DPAA2 switch's ingress path depends on the VLAN table, 2827 * thus we are not able to disable VLAN filtering. 2828 */ 2829 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER; 2830 2831 err = dpaa2_switch_port_init(port_priv, port_idx); 2832 if (err) 2833 goto err_port_probe; 2834 2835 err = dpaa2_switch_port_set_mac_addr(port_priv); 2836 if (err) 2837 goto err_port_probe; 2838 2839 err = dpaa2_switch_port_set_learning(port_priv, false); 2840 if (err) 2841 goto err_port_probe; 2842 2843 return 0; 2844 2845 err_port_probe: 2846 free_netdev(port_netdev); 2847 ethsw->ports[port_idx] = NULL; 2848 2849 return err; 2850 } 2851 2852 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) 2853 { 2854 struct device *dev = &sw_dev->dev; 2855 struct ethsw_core *ethsw; 2856 int i, err; 2857 2858 /* Allocate switch core*/ 2859 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL); 2860 2861 if (!ethsw) 2862 return -ENOMEM; 2863 2864 ethsw->dev = dev; 2865 ethsw->iommu_domain = iommu_get_domain_for_dev(dev); 2866 dev_set_drvdata(dev, ethsw); 2867 2868 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 2869 ðsw->mc_io); 2870 if (err) { 2871 if (err == -ENXIO) 2872 err = -EPROBE_DEFER; 2873 else 2874 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); 2875 goto err_free_drvdata; 2876 } 2877 2878 err = dpaa2_switch_init(sw_dev); 2879 if (err) 2880 goto err_free_cmdport; 2881 2882 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports), 2883 GFP_KERNEL); 2884 if (!(ethsw->ports)) { 2885 err = -ENOMEM; 2886 goto err_takedown; 2887 } 2888 2889 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs), 2890 GFP_KERNEL); 2891 if (!ethsw->fdbs) { 2892 err = -ENOMEM; 2893 goto err_free_ports; 2894 } 2895 2896 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2897 err = dpaa2_switch_probe_port(ethsw, i); 2898 if (err) 2899 goto err_free_netdev; 2900 } 2901 2902 /* Add a NAPI instance for each of the Rx queues. The first port's 2903 * net_device will be associated with the instances since we do not have 2904 * different queues for each switch ports. 2905 */ 2906 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) 2907 netif_napi_add(ethsw->ports[0]->netdev, 2908 ðsw->fq[i].napi, dpaa2_switch_poll, 2909 NAPI_POLL_WEIGHT); 2910 2911 err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2912 if (err) { 2913 dev_err(ethsw->dev, "dpsw_enable err %d\n", err); 2914 goto err_free_netdev; 2915 } 2916 2917 /* Setup IRQs */ 2918 err = dpaa2_switch_setup_irqs(sw_dev); 2919 if (err) 2920 goto err_stop; 2921 2922 /* Register the netdev only when the entire setup is done and the 2923 * switch port interfaces are ready to receive traffic 2924 */ 2925 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { 2926 err = register_netdev(ethsw->ports[i]->netdev); 2927 if (err < 0) { 2928 dev_err(dev, "register_netdev error %d\n", err); 2929 goto err_unregister_ports; 2930 } 2931 } 2932 2933 return 0; 2934 2935 err_unregister_ports: 2936 for (i--; i >= 0; i--) 2937 unregister_netdev(ethsw->ports[i]->netdev); 2938 dpaa2_switch_teardown_irqs(sw_dev); 2939 err_stop: 2940 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); 2941 err_free_netdev: 2942 for (i--; i >= 0; i--) 2943 free_netdev(ethsw->ports[i]->netdev); 2944 kfree(ethsw->fdbs); 2945 err_free_ports: 2946 kfree(ethsw->ports); 2947 2948 err_takedown: 2949 dpaa2_switch_takedown(sw_dev); 2950 2951 err_free_cmdport: 2952 fsl_mc_portal_free(ethsw->mc_io); 2953 2954 err_free_drvdata: 2955 kfree(ethsw); 2956 dev_set_drvdata(dev, NULL); 2957 2958 return err; 2959 } 2960 2961 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = { 2962 { 2963 .vendor = FSL_MC_VENDOR_FREESCALE, 2964 .obj_type = "dpsw", 2965 }, 2966 { .vendor = 0x0 } 2967 }; 2968 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); 2969 2970 static struct fsl_mc_driver dpaa2_switch_drv = { 2971 .driver = { 2972 .name = KBUILD_MODNAME, 2973 .owner = THIS_MODULE, 2974 }, 2975 .probe = dpaa2_switch_probe, 2976 .remove = dpaa2_switch_remove, 2977 .match_id_table = dpaa2_switch_match_id_table 2978 }; 2979 2980 static struct notifier_block dpaa2_switch_port_nb __read_mostly = { 2981 .notifier_call = dpaa2_switch_port_netdevice_event, 2982 }; 2983 2984 static struct notifier_block dpaa2_switch_port_switchdev_nb = { 2985 .notifier_call = dpaa2_switch_port_event, 2986 }; 2987 2988 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = { 2989 .notifier_call = dpaa2_switch_port_blocking_event, 2990 }; 2991 2992 static int dpaa2_switch_register_notifiers(void) 2993 { 2994 int err; 2995 2996 err = register_netdevice_notifier(&dpaa2_switch_port_nb); 2997 if (err) { 2998 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err); 2999 return err; 3000 } 3001 3002 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3003 if (err) { 3004 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err); 3005 goto err_switchdev_nb; 3006 } 3007 3008 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3009 if (err) { 3010 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err); 3011 goto err_switchdev_blocking_nb; 3012 } 3013 3014 return 0; 3015 3016 err_switchdev_blocking_nb: 3017 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3018 err_switchdev_nb: 3019 unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3020 3021 return err; 3022 } 3023 3024 static void dpaa2_switch_unregister_notifiers(void) 3025 { 3026 int err; 3027 3028 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb); 3029 if (err) 3030 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n", 3031 err); 3032 3033 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb); 3034 if (err) 3035 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err); 3036 3037 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb); 3038 if (err) 3039 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err); 3040 } 3041 3042 static int __init dpaa2_switch_driver_init(void) 3043 { 3044 int err; 3045 3046 err = fsl_mc_driver_register(&dpaa2_switch_drv); 3047 if (err) 3048 return err; 3049 3050 err = dpaa2_switch_register_notifiers(); 3051 if (err) { 3052 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3053 return err; 3054 } 3055 3056 return 0; 3057 } 3058 3059 static void __exit dpaa2_switch_driver_exit(void) 3060 { 3061 dpaa2_switch_unregister_notifiers(); 3062 fsl_mc_driver_unregister(&dpaa2_switch_drv); 3063 } 3064 3065 module_init(dpaa2_switch_driver_init); 3066 module_exit(dpaa2_switch_driver_exit); 3067 3068 MODULE_LICENSE("GPL v2"); 3069 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver"); 3070