1 /* 2 * Broadcom Starfighter 2 DSA switch driver 3 * 4 * Copyright (C) 2014, Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/platform_device.h> 17 #include <linux/phy.h> 18 #include <linux/phy_fixed.h> 19 #include <linux/mii.h> 20 #include <linux/of.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_address.h> 23 #include <linux/of_net.h> 24 #include <linux/of_mdio.h> 25 #include <net/dsa.h> 26 #include <linux/ethtool.h> 27 #include <linux/if_bridge.h> 28 #include <linux/brcmphy.h> 29 #include <linux/etherdevice.h> 30 #include <linux/platform_data/b53.h> 31 32 #include "bcm_sf2.h" 33 #include "bcm_sf2_regs.h" 34 #include "b53/b53_priv.h" 35 #include "b53/b53_regs.h" 36 37 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) 38 { 39 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 40 unsigned int i; 41 u32 reg, offset; 42 43 if (priv->type == BCM7445_DEVICE_ID) 44 offset = CORE_STS_OVERRIDE_IMP; 45 else 46 offset = CORE_STS_OVERRIDE_IMP2; 47 48 /* Enable the port memories */ 49 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 50 reg &= ~P_TXQ_PSM_VDD(port); 51 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 52 53 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ 54 reg = core_readl(priv, CORE_IMP_CTL); 55 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); 56 reg &= ~(RX_DIS | TX_DIS); 57 core_writel(priv, reg, CORE_IMP_CTL); 58 59 /* Enable forwarding */ 60 core_writel(priv, SW_FWDG_EN, CORE_SWMODE); 61 62 /* Enable IMP port in dumb mode */ 63 reg = core_readl(priv, CORE_SWITCH_CTRL); 64 reg |= MII_DUMB_FWDG_EN; 65 core_writel(priv, reg, CORE_SWITCH_CTRL); 66 67 /* Configure Traffic Class to QoS mapping, allow each priority to map 68 * to a different queue number 69 */ 70 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 71 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 72 reg |= i << (PRT_TO_QID_SHIFT * i); 73 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 74 75 b53_brcm_hdr_setup(ds, port); 76 77 /* Force link status for IMP port */ 78 reg = core_readl(priv, offset); 79 reg |= (MII_SW_OR | LINK_STS); 80 core_writel(priv, reg, offset); 81 } 82 83 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) 84 { 85 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 86 u32 reg; 87 88 reg = reg_readl(priv, REG_SPHY_CNTRL); 89 if (enable) { 90 reg |= PHY_RESET; 91 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS); 92 reg_writel(priv, reg, REG_SPHY_CNTRL); 93 udelay(21); 94 reg = reg_readl(priv, REG_SPHY_CNTRL); 95 reg &= ~PHY_RESET; 96 } else { 97 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; 98 reg_writel(priv, reg, REG_SPHY_CNTRL); 99 mdelay(1); 100 reg |= CK25_DIS; 101 } 102 reg_writel(priv, reg, REG_SPHY_CNTRL); 103 104 /* Use PHY-driven LED signaling */ 105 if (!enable) { 106 reg = reg_readl(priv, REG_LED_CNTRL(0)); 107 reg |= SPDLNK_SRC_SEL; 108 reg_writel(priv, reg, REG_LED_CNTRL(0)); 109 } 110 } 111 112 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, 113 int port) 114 { 115 unsigned int off; 116 117 switch (port) { 118 case 7: 119 off = P7_IRQ_OFF; 120 break; 121 case 0: 122 /* Port 0 interrupts are located on the first bank */ 123 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); 124 return; 125 default: 126 off = P_IRQ_OFF(port); 127 break; 128 } 129 130 intrl2_1_mask_clear(priv, P_IRQ_MASK(off)); 131 } 132 133 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, 134 int port) 135 { 136 unsigned int off; 137 138 switch (port) { 139 case 7: 140 off = P7_IRQ_OFF; 141 break; 142 case 0: 143 /* Port 0 interrupts are located on the first bank */ 144 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF)); 145 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); 146 return; 147 default: 148 off = P_IRQ_OFF(port); 149 break; 150 } 151 152 intrl2_1_mask_set(priv, P_IRQ_MASK(off)); 153 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR); 154 } 155 156 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, 157 struct phy_device *phy) 158 { 159 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 160 unsigned int i; 161 u32 reg; 162 163 /* Clear the memory power down */ 164 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 165 reg &= ~P_TXQ_PSM_VDD(port); 166 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 167 168 /* Enable Broadcom tags for that port if requested */ 169 if (priv->brcm_tag_mask & BIT(port)) 170 b53_brcm_hdr_setup(ds, port); 171 172 /* Configure Traffic Class to QoS mapping, allow each priority to map 173 * to a different queue number 174 */ 175 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 176 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 177 reg |= i << (PRT_TO_QID_SHIFT * i); 178 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 179 180 /* Re-enable the GPHY and re-apply workarounds */ 181 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { 182 bcm_sf2_gphy_enable_set(ds, true); 183 if (phy) { 184 /* if phy_stop() has been called before, phy 185 * will be in halted state, and phy_start() 186 * will call resume. 187 * 188 * the resume path does not configure back 189 * autoneg settings, and since we hard reset 190 * the phy manually here, we need to reset the 191 * state machine also. 192 */ 193 phy->state = PHY_READY; 194 phy_init_hw(phy); 195 } 196 } 197 198 /* Enable MoCA port interrupts to get notified */ 199 if (port == priv->moca_port) 200 bcm_sf2_port_intr_enable(priv, port); 201 202 /* Set per-queue pause threshold to 32 */ 203 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port)); 204 205 /* Set ACB threshold to 24 */ 206 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) { 207 reg = acb_readl(priv, ACB_QUEUE_CFG(port * 208 SF2_NUM_EGRESS_QUEUES + i)); 209 reg &= ~XOFF_THRESHOLD_MASK; 210 reg |= 24; 211 acb_writel(priv, reg, ACB_QUEUE_CFG(port * 212 SF2_NUM_EGRESS_QUEUES + i)); 213 } 214 215 return b53_enable_port(ds, port, phy); 216 } 217 218 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, 219 struct phy_device *phy) 220 { 221 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 222 u32 off, reg; 223 224 if (priv->wol_ports_mask & (1 << port)) 225 return; 226 227 if (port == priv->moca_port) 228 bcm_sf2_port_intr_disable(priv, port); 229 230 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) 231 bcm_sf2_gphy_enable_set(ds, false); 232 233 if (dsa_is_cpu_port(ds, port)) 234 off = CORE_IMP_CTL; 235 else 236 off = CORE_G_PCTL_PORT(port); 237 238 b53_disable_port(ds, port, phy); 239 240 /* Power down the port memory */ 241 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 242 reg |= P_TXQ_PSM_VDD(port); 243 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 244 } 245 246 247 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, 248 int regnum, u16 val) 249 { 250 int ret = 0; 251 u32 reg; 252 253 reg = reg_readl(priv, REG_SWITCH_CNTRL); 254 reg |= MDIO_MASTER_SEL; 255 reg_writel(priv, reg, REG_SWITCH_CNTRL); 256 257 /* Page << 8 | offset */ 258 reg = 0x70; 259 reg <<= 2; 260 core_writel(priv, addr, reg); 261 262 /* Page << 8 | offset */ 263 reg = 0x80 << 8 | regnum << 1; 264 reg <<= 2; 265 266 if (op) 267 ret = core_readl(priv, reg); 268 else 269 core_writel(priv, val, reg); 270 271 reg = reg_readl(priv, REG_SWITCH_CNTRL); 272 reg &= ~MDIO_MASTER_SEL; 273 reg_writel(priv, reg, REG_SWITCH_CNTRL); 274 275 return ret & 0xffff; 276 } 277 278 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) 279 { 280 struct bcm_sf2_priv *priv = bus->priv; 281 282 /* Intercept reads from Broadcom pseudo-PHY address, else, send 283 * them to our master MDIO bus controller 284 */ 285 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 286 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); 287 else 288 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum); 289 } 290 291 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, 292 u16 val) 293 { 294 struct bcm_sf2_priv *priv = bus->priv; 295 296 /* Intercept writes to the Broadcom pseudo-PHY address, else, 297 * send them to our master MDIO bus controller 298 */ 299 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 300 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); 301 else 302 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val); 303 304 return 0; 305 } 306 307 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 308 { 309 struct bcm_sf2_priv *priv = dev_id; 310 311 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 312 ~priv->irq0_mask; 313 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 314 315 return IRQ_HANDLED; 316 } 317 318 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) 319 { 320 struct bcm_sf2_priv *priv = dev_id; 321 322 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 323 ~priv->irq1_mask; 324 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 325 326 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) 327 priv->port_sts[7].link = 1; 328 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) 329 priv->port_sts[7].link = 0; 330 331 return IRQ_HANDLED; 332 } 333 334 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) 335 { 336 unsigned int timeout = 1000; 337 u32 reg; 338 339 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 340 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; 341 core_writel(priv, reg, CORE_WATCHDOG_CTRL); 342 343 do { 344 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 345 if (!(reg & SOFTWARE_RESET)) 346 break; 347 348 usleep_range(1000, 2000); 349 } while (timeout-- > 0); 350 351 if (timeout == 0) 352 return -ETIMEDOUT; 353 354 return 0; 355 } 356 357 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) 358 { 359 intrl2_0_mask_set(priv, 0xffffffff); 360 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 361 intrl2_1_mask_set(priv, 0xffffffff); 362 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 363 } 364 365 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, 366 struct device_node *dn) 367 { 368 struct device_node *port; 369 int mode; 370 unsigned int port_num; 371 372 priv->moca_port = -1; 373 374 for_each_available_child_of_node(dn, port) { 375 if (of_property_read_u32(port, "reg", &port_num)) 376 continue; 377 378 /* Internal PHYs get assigned a specific 'phy-mode' property 379 * value: "internal" to help flag them before MDIO probing 380 * has completed, since they might be turned off at that 381 * time 382 */ 383 mode = of_get_phy_mode(port); 384 if (mode < 0) 385 continue; 386 387 if (mode == PHY_INTERFACE_MODE_INTERNAL) 388 priv->int_phy_mask |= 1 << port_num; 389 390 if (mode == PHY_INTERFACE_MODE_MOCA) 391 priv->moca_port = port_num; 392 393 if (of_property_read_bool(port, "brcm,use-bcm-hdr")) 394 priv->brcm_tag_mask |= 1 << port_num; 395 } 396 } 397 398 static int bcm_sf2_mdio_register(struct dsa_switch *ds) 399 { 400 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 401 struct device_node *dn; 402 static int index; 403 int err; 404 405 /* Find our integrated MDIO bus node */ 406 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); 407 priv->master_mii_bus = of_mdio_find_bus(dn); 408 if (!priv->master_mii_bus) 409 return -EPROBE_DEFER; 410 411 get_device(&priv->master_mii_bus->dev); 412 priv->master_mii_dn = dn; 413 414 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); 415 if (!priv->slave_mii_bus) 416 return -ENOMEM; 417 418 priv->slave_mii_bus->priv = priv; 419 priv->slave_mii_bus->name = "sf2 slave mii"; 420 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; 421 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; 422 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", 423 index++); 424 priv->slave_mii_bus->dev.of_node = dn; 425 426 /* Include the pseudo-PHY address to divert reads towards our 427 * workaround. This is only required for 7445D0, since 7445E0 428 * disconnects the internal switch pseudo-PHY such that we can use the 429 * regular SWITCH_MDIO master controller instead. 430 * 431 * Here we flag the pseudo PHY as needing special treatment and would 432 * otherwise make all other PHY read/writes go to the master MDIO bus 433 * controller that comes with this switch backed by the "mdio-unimac" 434 * driver. 435 */ 436 if (of_machine_is_compatible("brcm,bcm7445d0")) 437 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR); 438 else 439 priv->indir_phy_mask = 0; 440 441 ds->phys_mii_mask = priv->indir_phy_mask; 442 ds->slave_mii_bus = priv->slave_mii_bus; 443 priv->slave_mii_bus->parent = ds->dev->parent; 444 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; 445 446 if (dn) 447 err = of_mdiobus_register(priv->slave_mii_bus, dn); 448 else 449 err = mdiobus_register(priv->slave_mii_bus); 450 451 if (err) 452 of_node_put(dn); 453 454 return err; 455 } 456 457 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) 458 { 459 mdiobus_unregister(priv->slave_mii_bus); 460 if (priv->master_mii_dn) 461 of_node_put(priv->master_mii_dn); 462 } 463 464 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) 465 { 466 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 467 468 /* The BCM7xxx PHY driver expects to find the integrated PHY revision 469 * in bits 15:8 and the patch level in bits 7:0 which is exactly what 470 * the REG_PHY_REVISION register layout is. 471 */ 472 473 return priv->hw_params.gphy_rev; 474 } 475 476 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, 477 struct phy_device *phydev) 478 { 479 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 480 struct ethtool_eee *p = &priv->dev->ports[port].eee; 481 u32 id_mode_dis = 0, port_mode; 482 const char *str = NULL; 483 u32 reg, offset; 484 485 if (priv->type == BCM7445_DEVICE_ID) 486 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 487 else 488 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 489 490 switch (phydev->interface) { 491 case PHY_INTERFACE_MODE_RGMII: 492 str = "RGMII (no delay)"; 493 id_mode_dis = 1; 494 case PHY_INTERFACE_MODE_RGMII_TXID: 495 if (!str) 496 str = "RGMII (TX delay)"; 497 port_mode = EXT_GPHY; 498 break; 499 case PHY_INTERFACE_MODE_MII: 500 str = "MII"; 501 port_mode = EXT_EPHY; 502 break; 503 case PHY_INTERFACE_MODE_REVMII: 504 str = "Reverse MII"; 505 port_mode = EXT_REVMII; 506 break; 507 default: 508 /* All other PHYs: internal and MoCA */ 509 goto force_link; 510 } 511 512 /* If the link is down, just disable the interface to conserve power */ 513 if (!phydev->link) { 514 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 515 reg &= ~RGMII_MODE_EN; 516 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 517 goto force_link; 518 } 519 520 /* Clear id_mode_dis bit, and the existing port mode, but 521 * make sure we enable the RGMII block for data to pass 522 */ 523 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 524 reg &= ~ID_MODE_DIS; 525 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); 526 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); 527 528 reg |= port_mode | RGMII_MODE_EN; 529 if (id_mode_dis) 530 reg |= ID_MODE_DIS; 531 532 if (phydev->pause) { 533 if (phydev->asym_pause) 534 reg |= TX_PAUSE_EN; 535 reg |= RX_PAUSE_EN; 536 } 537 538 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 539 540 pr_info("Port %d configured for %s\n", port, str); 541 542 force_link: 543 /* Force link settings detected from the PHY */ 544 reg = SW_OVERRIDE; 545 switch (phydev->speed) { 546 case SPEED_1000: 547 reg |= SPDSTS_1000 << SPEED_SHIFT; 548 break; 549 case SPEED_100: 550 reg |= SPDSTS_100 << SPEED_SHIFT; 551 break; 552 } 553 554 if (phydev->link) 555 reg |= LINK_STS; 556 if (phydev->duplex == DUPLEX_FULL) 557 reg |= DUPLX_MODE; 558 559 core_writel(priv, reg, offset); 560 561 if (!phydev->is_pseudo_fixed_link) 562 p->eee_enabled = b53_eee_init(ds, port, phydev); 563 } 564 565 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, 566 struct fixed_phy_status *status) 567 { 568 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 569 u32 duplex, pause, offset; 570 u32 reg; 571 572 if (priv->type == BCM7445_DEVICE_ID) 573 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 574 else 575 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 576 577 duplex = core_readl(priv, CORE_DUPSTS); 578 pause = core_readl(priv, CORE_PAUSESTS); 579 580 status->link = 0; 581 582 /* MoCA port is special as we do not get link status from CORE_LNKSTS, 583 * which means that we need to force the link at the port override 584 * level to get the data to flow. We do use what the interrupt handler 585 * did determine before. 586 * 587 * For the other ports, we just force the link status, since this is 588 * a fixed PHY device. 589 */ 590 if (port == priv->moca_port) { 591 status->link = priv->port_sts[port].link; 592 /* For MoCA interfaces, also force a link down notification 593 * since some version of the user-space daemon (mocad) use 594 * cmd->autoneg to force the link, which messes up the PHY 595 * state machine and make it go in PHY_FORCING state instead. 596 */ 597 if (!status->link) 598 netif_carrier_off(ds->ports[port].slave); 599 status->duplex = 1; 600 } else { 601 status->link = 1; 602 status->duplex = !!(duplex & (1 << port)); 603 } 604 605 reg = core_readl(priv, offset); 606 reg |= SW_OVERRIDE; 607 if (status->link) 608 reg |= LINK_STS; 609 else 610 reg &= ~LINK_STS; 611 core_writel(priv, reg, offset); 612 613 if ((pause & (1 << port)) && 614 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { 615 status->asym_pause = 1; 616 status->pause = 1; 617 } 618 619 if (pause & (1 << port)) 620 status->pause = 1; 621 } 622 623 static void bcm_sf2_enable_acb(struct dsa_switch *ds) 624 { 625 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 626 u32 reg; 627 628 /* Enable ACB globally */ 629 reg = acb_readl(priv, ACB_CONTROL); 630 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); 631 acb_writel(priv, reg, ACB_CONTROL); 632 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); 633 reg |= ACB_EN | ACB_ALGORITHM; 634 acb_writel(priv, reg, ACB_CONTROL); 635 } 636 637 static int bcm_sf2_sw_suspend(struct dsa_switch *ds) 638 { 639 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 640 unsigned int port; 641 642 bcm_sf2_intr_disable(priv); 643 644 /* Disable all ports physically present including the IMP 645 * port, the other ones have already been disabled during 646 * bcm_sf2_sw_setup 647 */ 648 for (port = 0; port < DSA_MAX_PORTS; port++) { 649 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) 650 bcm_sf2_port_disable(ds, port, NULL); 651 } 652 653 return 0; 654 } 655 656 static int bcm_sf2_sw_resume(struct dsa_switch *ds) 657 { 658 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 659 unsigned int port; 660 int ret; 661 662 ret = bcm_sf2_sw_rst(priv); 663 if (ret) { 664 pr_err("%s: failed to software reset switch\n", __func__); 665 return ret; 666 } 667 668 if (priv->hw_params.num_gphy == 1) 669 bcm_sf2_gphy_enable_set(ds, true); 670 671 for (port = 0; port < DSA_MAX_PORTS; port++) { 672 if (dsa_is_user_port(ds, port)) 673 bcm_sf2_port_setup(ds, port, NULL); 674 else if (dsa_is_cpu_port(ds, port)) 675 bcm_sf2_imp_setup(ds, port); 676 } 677 678 bcm_sf2_enable_acb(ds); 679 680 return 0; 681 } 682 683 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, 684 struct ethtool_wolinfo *wol) 685 { 686 struct net_device *p = ds->ports[port].cpu_dp->master; 687 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 688 struct ethtool_wolinfo pwol; 689 690 /* Get the parent device WoL settings */ 691 p->ethtool_ops->get_wol(p, &pwol); 692 693 /* Advertise the parent device supported settings */ 694 wol->supported = pwol.supported; 695 memset(&wol->sopass, 0, sizeof(wol->sopass)); 696 697 if (pwol.wolopts & WAKE_MAGICSECURE) 698 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); 699 700 if (priv->wol_ports_mask & (1 << port)) 701 wol->wolopts = pwol.wolopts; 702 else 703 wol->wolopts = 0; 704 } 705 706 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, 707 struct ethtool_wolinfo *wol) 708 { 709 struct net_device *p = ds->ports[port].cpu_dp->master; 710 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 711 s8 cpu_port = ds->ports[port].cpu_dp->index; 712 struct ethtool_wolinfo pwol; 713 714 p->ethtool_ops->get_wol(p, &pwol); 715 if (wol->wolopts & ~pwol.supported) 716 return -EINVAL; 717 718 if (wol->wolopts) 719 priv->wol_ports_mask |= (1 << port); 720 else 721 priv->wol_ports_mask &= ~(1 << port); 722 723 /* If we have at least one port enabled, make sure the CPU port 724 * is also enabled. If the CPU port is the last one enabled, we disable 725 * it since this configuration does not make sense. 726 */ 727 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) 728 priv->wol_ports_mask |= (1 << cpu_port); 729 else 730 priv->wol_ports_mask &= ~(1 << cpu_port); 731 732 return p->ethtool_ops->set_wol(p, wol); 733 } 734 735 static int bcm_sf2_sw_setup(struct dsa_switch *ds) 736 { 737 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 738 unsigned int port; 739 740 /* Enable all valid ports and disable those unused */ 741 for (port = 0; port < priv->hw_params.num_ports; port++) { 742 /* IMP port receives special treatment */ 743 if (dsa_is_user_port(ds, port)) 744 bcm_sf2_port_setup(ds, port, NULL); 745 else if (dsa_is_cpu_port(ds, port)) 746 bcm_sf2_imp_setup(ds, port); 747 else 748 bcm_sf2_port_disable(ds, port, NULL); 749 } 750 751 b53_configure_vlan(ds); 752 bcm_sf2_enable_acb(ds); 753 754 return 0; 755 } 756 757 /* The SWITCH_CORE register space is managed by b53 but operates on a page + 758 * register basis so we need to translate that into an address that the 759 * bus-glue understands. 760 */ 761 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) 762 763 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, 764 u8 *val) 765 { 766 struct bcm_sf2_priv *priv = dev->priv; 767 768 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 769 770 return 0; 771 } 772 773 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, 774 u16 *val) 775 { 776 struct bcm_sf2_priv *priv = dev->priv; 777 778 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 779 780 return 0; 781 } 782 783 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, 784 u32 *val) 785 { 786 struct bcm_sf2_priv *priv = dev->priv; 787 788 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 789 790 return 0; 791 } 792 793 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, 794 u64 *val) 795 { 796 struct bcm_sf2_priv *priv = dev->priv; 797 798 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); 799 800 return 0; 801 } 802 803 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, 804 u8 value) 805 { 806 struct bcm_sf2_priv *priv = dev->priv; 807 808 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 809 810 return 0; 811 } 812 813 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, 814 u16 value) 815 { 816 struct bcm_sf2_priv *priv = dev->priv; 817 818 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 819 820 return 0; 821 } 822 823 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, 824 u32 value) 825 { 826 struct bcm_sf2_priv *priv = dev->priv; 827 828 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 829 830 return 0; 831 } 832 833 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, 834 u64 value) 835 { 836 struct bcm_sf2_priv *priv = dev->priv; 837 838 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 839 840 return 0; 841 } 842 843 static const struct b53_io_ops bcm_sf2_io_ops = { 844 .read8 = bcm_sf2_core_read8, 845 .read16 = bcm_sf2_core_read16, 846 .read32 = bcm_sf2_core_read32, 847 .read48 = bcm_sf2_core_read64, 848 .read64 = bcm_sf2_core_read64, 849 .write8 = bcm_sf2_core_write8, 850 .write16 = bcm_sf2_core_write16, 851 .write32 = bcm_sf2_core_write32, 852 .write48 = bcm_sf2_core_write64, 853 .write64 = bcm_sf2_core_write64, 854 }; 855 856 static const struct dsa_switch_ops bcm_sf2_ops = { 857 .get_tag_protocol = b53_get_tag_protocol, 858 .setup = bcm_sf2_sw_setup, 859 .get_strings = b53_get_strings, 860 .get_ethtool_stats = b53_get_ethtool_stats, 861 .get_sset_count = b53_get_sset_count, 862 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 863 .adjust_link = bcm_sf2_sw_adjust_link, 864 .fixed_link_update = bcm_sf2_sw_fixed_link_update, 865 .suspend = bcm_sf2_sw_suspend, 866 .resume = bcm_sf2_sw_resume, 867 .get_wol = bcm_sf2_sw_get_wol, 868 .set_wol = bcm_sf2_sw_set_wol, 869 .port_enable = bcm_sf2_port_setup, 870 .port_disable = bcm_sf2_port_disable, 871 .get_mac_eee = b53_get_mac_eee, 872 .set_mac_eee = b53_set_mac_eee, 873 .port_bridge_join = b53_br_join, 874 .port_bridge_leave = b53_br_leave, 875 .port_stp_state_set = b53_br_set_stp_state, 876 .port_fast_age = b53_br_fast_age, 877 .port_vlan_filtering = b53_vlan_filtering, 878 .port_vlan_prepare = b53_vlan_prepare, 879 .port_vlan_add = b53_vlan_add, 880 .port_vlan_del = b53_vlan_del, 881 .port_fdb_dump = b53_fdb_dump, 882 .port_fdb_add = b53_fdb_add, 883 .port_fdb_del = b53_fdb_del, 884 .get_rxnfc = bcm_sf2_get_rxnfc, 885 .set_rxnfc = bcm_sf2_set_rxnfc, 886 .port_mirror_add = b53_mirror_add, 887 .port_mirror_del = b53_mirror_del, 888 }; 889 890 struct bcm_sf2_of_data { 891 u32 type; 892 const u16 *reg_offsets; 893 unsigned int core_reg_align; 894 unsigned int num_cfp_rules; 895 }; 896 897 /* Register offsets for the SWITCH_REG_* block */ 898 static const u16 bcm_sf2_7445_reg_offsets[] = { 899 [REG_SWITCH_CNTRL] = 0x00, 900 [REG_SWITCH_STATUS] = 0x04, 901 [REG_DIR_DATA_WRITE] = 0x08, 902 [REG_DIR_DATA_READ] = 0x0C, 903 [REG_SWITCH_REVISION] = 0x18, 904 [REG_PHY_REVISION] = 0x1C, 905 [REG_SPHY_CNTRL] = 0x2C, 906 [REG_RGMII_0_CNTRL] = 0x34, 907 [REG_RGMII_1_CNTRL] = 0x40, 908 [REG_RGMII_2_CNTRL] = 0x4c, 909 [REG_LED_0_CNTRL] = 0x90, 910 [REG_LED_1_CNTRL] = 0x94, 911 [REG_LED_2_CNTRL] = 0x98, 912 }; 913 914 static const struct bcm_sf2_of_data bcm_sf2_7445_data = { 915 .type = BCM7445_DEVICE_ID, 916 .core_reg_align = 0, 917 .reg_offsets = bcm_sf2_7445_reg_offsets, 918 .num_cfp_rules = 256, 919 }; 920 921 static const u16 bcm_sf2_7278_reg_offsets[] = { 922 [REG_SWITCH_CNTRL] = 0x00, 923 [REG_SWITCH_STATUS] = 0x04, 924 [REG_DIR_DATA_WRITE] = 0x08, 925 [REG_DIR_DATA_READ] = 0x0c, 926 [REG_SWITCH_REVISION] = 0x10, 927 [REG_PHY_REVISION] = 0x14, 928 [REG_SPHY_CNTRL] = 0x24, 929 [REG_RGMII_0_CNTRL] = 0xe0, 930 [REG_RGMII_1_CNTRL] = 0xec, 931 [REG_RGMII_2_CNTRL] = 0xf8, 932 [REG_LED_0_CNTRL] = 0x40, 933 [REG_LED_1_CNTRL] = 0x4c, 934 [REG_LED_2_CNTRL] = 0x58, 935 }; 936 937 static const struct bcm_sf2_of_data bcm_sf2_7278_data = { 938 .type = BCM7278_DEVICE_ID, 939 .core_reg_align = 1, 940 .reg_offsets = bcm_sf2_7278_reg_offsets, 941 .num_cfp_rules = 128, 942 }; 943 944 static const struct of_device_id bcm_sf2_of_match[] = { 945 { .compatible = "brcm,bcm7445-switch-v4.0", 946 .data = &bcm_sf2_7445_data 947 }, 948 { .compatible = "brcm,bcm7278-switch-v4.0", 949 .data = &bcm_sf2_7278_data 950 }, 951 { .compatible = "brcm,bcm7278-switch-v4.8", 952 .data = &bcm_sf2_7278_data 953 }, 954 { /* sentinel */ }, 955 }; 956 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); 957 958 static int bcm_sf2_sw_probe(struct platform_device *pdev) 959 { 960 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 961 struct device_node *dn = pdev->dev.of_node; 962 const struct of_device_id *of_id = NULL; 963 const struct bcm_sf2_of_data *data; 964 struct b53_platform_data *pdata; 965 struct dsa_switch_ops *ops; 966 struct bcm_sf2_priv *priv; 967 struct b53_device *dev; 968 struct dsa_switch *ds; 969 void __iomem **base; 970 struct resource *r; 971 unsigned int i; 972 u32 reg, rev; 973 int ret; 974 975 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 976 if (!priv) 977 return -ENOMEM; 978 979 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL); 980 if (!ops) 981 return -ENOMEM; 982 983 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); 984 if (!dev) 985 return -ENOMEM; 986 987 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 988 if (!pdata) 989 return -ENOMEM; 990 991 of_id = of_match_node(bcm_sf2_of_match, dn); 992 if (!of_id || !of_id->data) 993 return -EINVAL; 994 995 data = of_id->data; 996 997 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ 998 priv->type = data->type; 999 priv->reg_offsets = data->reg_offsets; 1000 priv->core_reg_align = data->core_reg_align; 1001 priv->num_cfp_rules = data->num_cfp_rules; 1002 1003 /* Auto-detection using standard registers will not work, so 1004 * provide an indication of what kind of device we are for 1005 * b53_common to work with 1006 */ 1007 pdata->chip_id = priv->type; 1008 dev->pdata = pdata; 1009 1010 priv->dev = dev; 1011 ds = dev->ds; 1012 ds->ops = &bcm_sf2_ops; 1013 1014 /* Advertise the 8 egress queues */ 1015 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES; 1016 1017 dev_set_drvdata(&pdev->dev, priv); 1018 1019 spin_lock_init(&priv->indir_lock); 1020 mutex_init(&priv->stats_mutex); 1021 mutex_init(&priv->cfp.lock); 1022 1023 /* CFP rule #0 cannot be used for specific classifications, flag it as 1024 * permanently used 1025 */ 1026 set_bit(0, priv->cfp.used); 1027 set_bit(0, priv->cfp.unique); 1028 1029 bcm_sf2_identify_ports(priv, dn->child); 1030 1031 priv->irq0 = irq_of_parse_and_map(dn, 0); 1032 priv->irq1 = irq_of_parse_and_map(dn, 1); 1033 1034 base = &priv->core; 1035 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1036 r = platform_get_resource(pdev, IORESOURCE_MEM, i); 1037 *base = devm_ioremap_resource(&pdev->dev, r); 1038 if (IS_ERR(*base)) { 1039 pr_err("unable to find register: %s\n", reg_names[i]); 1040 return PTR_ERR(*base); 1041 } 1042 base++; 1043 } 1044 1045 ret = bcm_sf2_sw_rst(priv); 1046 if (ret) { 1047 pr_err("unable to software reset switch: %d\n", ret); 1048 return ret; 1049 } 1050 1051 ret = bcm_sf2_mdio_register(ds); 1052 if (ret) { 1053 pr_err("failed to register MDIO bus\n"); 1054 return ret; 1055 } 1056 1057 ret = bcm_sf2_cfp_rst(priv); 1058 if (ret) { 1059 pr_err("failed to reset CFP\n"); 1060 goto out_mdio; 1061 } 1062 1063 /* Disable all interrupts and request them */ 1064 bcm_sf2_intr_disable(priv); 1065 1066 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, 1067 "switch_0", priv); 1068 if (ret < 0) { 1069 pr_err("failed to request switch_0 IRQ\n"); 1070 goto out_mdio; 1071 } 1072 1073 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, 1074 "switch_1", priv); 1075 if (ret < 0) { 1076 pr_err("failed to request switch_1 IRQ\n"); 1077 goto out_mdio; 1078 } 1079 1080 /* Reset the MIB counters */ 1081 reg = core_readl(priv, CORE_GMNCFGCFG); 1082 reg |= RST_MIB_CNT; 1083 core_writel(priv, reg, CORE_GMNCFGCFG); 1084 reg &= ~RST_MIB_CNT; 1085 core_writel(priv, reg, CORE_GMNCFGCFG); 1086 1087 /* Get the maximum number of ports for this switch */ 1088 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; 1089 if (priv->hw_params.num_ports > DSA_MAX_PORTS) 1090 priv->hw_params.num_ports = DSA_MAX_PORTS; 1091 1092 /* Assume a single GPHY setup if we can't read that property */ 1093 if (of_property_read_u32(dn, "brcm,num-gphy", 1094 &priv->hw_params.num_gphy)) 1095 priv->hw_params.num_gphy = 1; 1096 1097 rev = reg_readl(priv, REG_SWITCH_REVISION); 1098 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 1099 SWITCH_TOP_REV_MASK; 1100 priv->hw_params.core_rev = (rev & SF2_REV_MASK); 1101 1102 rev = reg_readl(priv, REG_PHY_REVISION); 1103 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; 1104 1105 ret = b53_switch_register(dev); 1106 if (ret) 1107 goto out_mdio; 1108 1109 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", 1110 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, 1111 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, 1112 priv->core, priv->irq0, priv->irq1); 1113 1114 return 0; 1115 1116 out_mdio: 1117 bcm_sf2_mdio_unregister(priv); 1118 return ret; 1119 } 1120 1121 static int bcm_sf2_sw_remove(struct platform_device *pdev) 1122 { 1123 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1124 1125 /* Disable all ports and interrupts */ 1126 priv->wol_ports_mask = 0; 1127 bcm_sf2_sw_suspend(priv->dev->ds); 1128 dsa_unregister_switch(priv->dev->ds); 1129 bcm_sf2_mdio_unregister(priv); 1130 1131 return 0; 1132 } 1133 1134 static void bcm_sf2_sw_shutdown(struct platform_device *pdev) 1135 { 1136 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1137 1138 /* For a kernel about to be kexec'd we want to keep the GPHY on for a 1139 * successful MDIO bus scan to occur. If we did turn off the GPHY 1140 * before (e.g: port_disable), this will also power it back on. 1141 * 1142 * Do not rely on kexec_in_progress, just power the PHY on. 1143 */ 1144 if (priv->hw_params.num_gphy == 1) 1145 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1146 } 1147 1148 #ifdef CONFIG_PM_SLEEP 1149 static int bcm_sf2_suspend(struct device *dev) 1150 { 1151 struct platform_device *pdev = to_platform_device(dev); 1152 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1153 1154 return dsa_switch_suspend(priv->dev->ds); 1155 } 1156 1157 static int bcm_sf2_resume(struct device *dev) 1158 { 1159 struct platform_device *pdev = to_platform_device(dev); 1160 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1161 1162 return dsa_switch_resume(priv->dev->ds); 1163 } 1164 #endif /* CONFIG_PM_SLEEP */ 1165 1166 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, 1167 bcm_sf2_suspend, bcm_sf2_resume); 1168 1169 1170 static struct platform_driver bcm_sf2_driver = { 1171 .probe = bcm_sf2_sw_probe, 1172 .remove = bcm_sf2_sw_remove, 1173 .shutdown = bcm_sf2_sw_shutdown, 1174 .driver = { 1175 .name = "brcm-sf2", 1176 .of_match_table = bcm_sf2_of_match, 1177 .pm = &bcm_sf2_pm_ops, 1178 }, 1179 }; 1180 module_platform_driver(bcm_sf2_driver); 1181 1182 MODULE_AUTHOR("Broadcom Corporation"); 1183 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); 1184 MODULE_LICENSE("GPL"); 1185 MODULE_ALIAS("platform:brcm-sf2"); 1186