1 /* 2 * Broadcom Starfighter 2 DSA switch driver 3 * 4 * Copyright (C) 2014, Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/platform_device.h> 17 #include <linux/of.h> 18 #include <linux/phy.h> 19 #include <linux/phy_fixed.h> 20 #include <linux/mii.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 #include <linux/of_net.h> 25 #include <linux/of_mdio.h> 26 #include <net/dsa.h> 27 #include <linux/ethtool.h> 28 #include <linux/if_bridge.h> 29 #include <linux/brcmphy.h> 30 #include <linux/etherdevice.h> 31 #include <net/switchdev.h> 32 #include <linux/platform_data/b53.h> 33 34 #include "bcm_sf2.h" 35 #include "bcm_sf2_regs.h" 36 #include "b53/b53_priv.h" 37 #include "b53/b53_regs.h" 38 39 static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds) 40 { 41 return DSA_TAG_PROTO_BRCM; 42 } 43 44 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 45 { 46 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 47 unsigned int i; 48 u32 reg; 49 50 /* Enable the IMP Port to be in the same VLAN as the other ports 51 * on a per-port basis such that we only have Port i and IMP in 52 * the same VLAN. 53 */ 54 for (i = 0; i < priv->hw_params.num_ports; i++) { 55 if (!((1 << i) & ds->enabled_port_mask)) 56 continue; 57 58 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); 59 reg |= (1 << cpu_port); 60 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); 61 } 62 } 63 64 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) 65 { 66 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 67 u32 reg, val; 68 69 /* Enable the port memories */ 70 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 71 reg &= ~P_TXQ_PSM_VDD(port); 72 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 73 74 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ 75 reg = core_readl(priv, CORE_IMP_CTL); 76 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); 77 reg &= ~(RX_DIS | TX_DIS); 78 core_writel(priv, reg, CORE_IMP_CTL); 79 80 /* Enable forwarding */ 81 core_writel(priv, SW_FWDG_EN, CORE_SWMODE); 82 83 /* Enable IMP port in dumb mode */ 84 reg = core_readl(priv, CORE_SWITCH_CTRL); 85 reg |= MII_DUMB_FWDG_EN; 86 core_writel(priv, reg, CORE_SWITCH_CTRL); 87 88 /* Resolve which bit controls the Broadcom tag */ 89 switch (port) { 90 case 8: 91 val = BRCM_HDR_EN_P8; 92 break; 93 case 7: 94 val = BRCM_HDR_EN_P7; 95 break; 96 case 5: 97 val = BRCM_HDR_EN_P5; 98 break; 99 default: 100 val = 0; 101 break; 102 } 103 104 /* Enable Broadcom tags for IMP port */ 105 reg = core_readl(priv, CORE_BRCM_HDR_CTRL); 106 reg |= val; 107 core_writel(priv, reg, CORE_BRCM_HDR_CTRL); 108 109 /* Enable reception Broadcom tag for CPU TX (switch RX) to 110 * allow us to tag outgoing frames 111 */ 112 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS); 113 reg &= ~(1 << port); 114 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS); 115 116 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 117 * allow delivering frames to the per-port net_devices 118 */ 119 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); 120 reg &= ~(1 << port); 121 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); 122 123 /* Force link status for IMP port */ 124 reg = core_readl(priv, CORE_STS_OVERRIDE_IMP); 125 reg |= (MII_SW_OR | LINK_STS); 126 core_writel(priv, reg, CORE_STS_OVERRIDE_IMP); 127 } 128 129 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 130 { 131 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 132 u32 reg; 133 134 reg = core_readl(priv, CORE_EEE_EN_CTRL); 135 if (enable) 136 reg |= 1 << port; 137 else 138 reg &= ~(1 << port); 139 core_writel(priv, reg, CORE_EEE_EN_CTRL); 140 } 141 142 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) 143 { 144 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 145 u32 reg; 146 147 reg = reg_readl(priv, REG_SPHY_CNTRL); 148 if (enable) { 149 reg |= PHY_RESET; 150 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); 151 reg_writel(priv, reg, REG_SPHY_CNTRL); 152 udelay(21); 153 reg = reg_readl(priv, REG_SPHY_CNTRL); 154 reg &= ~PHY_RESET; 155 } else { 156 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; 157 reg_writel(priv, reg, REG_SPHY_CNTRL); 158 mdelay(1); 159 reg |= CK25_DIS; 160 } 161 reg_writel(priv, reg, REG_SPHY_CNTRL); 162 163 /* Use PHY-driven LED signaling */ 164 if (!enable) { 165 reg = reg_readl(priv, REG_LED_CNTRL(0)); 166 reg |= SPDLNK_SRC_SEL; 167 reg_writel(priv, reg, REG_LED_CNTRL(0)); 168 } 169 } 170 171 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, 172 int port) 173 { 174 unsigned int off; 175 176 switch (port) { 177 case 7: 178 off = P7_IRQ_OFF; 179 break; 180 case 0: 181 /* Port 0 interrupts are located on the first bank */ 182 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); 183 return; 184 default: 185 off = P_IRQ_OFF(port); 186 break; 187 } 188 189 intrl2_1_mask_clear(priv, P_IRQ_MASK(off)); 190 } 191 192 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, 193 int port) 194 { 195 unsigned int off; 196 197 switch (port) { 198 case 7: 199 off = P7_IRQ_OFF; 200 break; 201 case 0: 202 /* Port 0 interrupts are located on the first bank */ 203 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF)); 204 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); 205 return; 206 default: 207 off = P_IRQ_OFF(port); 208 break; 209 } 210 211 intrl2_1_mask_set(priv, P_IRQ_MASK(off)); 212 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR); 213 } 214 215 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, 216 struct phy_device *phy) 217 { 218 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 219 s8 cpu_port = ds->dst[ds->index].cpu_port; 220 u32 reg; 221 222 /* Clear the memory power down */ 223 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 224 reg &= ~P_TXQ_PSM_VDD(port); 225 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 226 227 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 228 core_writel(priv, 0, CORE_G_PCTL_PORT(port)); 229 230 /* Re-enable the GPHY and re-apply workarounds */ 231 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { 232 bcm_sf2_gphy_enable_set(ds, true); 233 if (phy) { 234 /* if phy_stop() has been called before, phy 235 * will be in halted state, and phy_start() 236 * will call resume. 237 * 238 * the resume path does not configure back 239 * autoneg settings, and since we hard reset 240 * the phy manually here, we need to reset the 241 * state machine also. 242 */ 243 phy->state = PHY_READY; 244 phy_init_hw(phy); 245 } 246 } 247 248 /* Enable MoCA port interrupts to get notified */ 249 if (port == priv->moca_port) 250 bcm_sf2_port_intr_enable(priv, port); 251 252 /* Set this port, and only this one to be in the default VLAN, 253 * if member of a bridge, restore its membership prior to 254 * bringing down this port. 255 */ 256 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); 257 reg &= ~PORT_VLAN_CTRL_MASK; 258 reg |= (1 << port); 259 reg |= priv->dev->ports[port].vlan_ctl_mask; 260 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); 261 262 bcm_sf2_imp_vlan_setup(ds, cpu_port); 263 264 /* If EEE was enabled, restore it */ 265 if (priv->port_sts[port].eee.eee_enabled) 266 bcm_sf2_eee_enable_set(ds, port, true); 267 268 return 0; 269 } 270 271 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, 272 struct phy_device *phy) 273 { 274 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 275 u32 off, reg; 276 277 if (priv->wol_ports_mask & (1 << port)) 278 return; 279 280 if (port == priv->moca_port) 281 bcm_sf2_port_intr_disable(priv, port); 282 283 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) 284 bcm_sf2_gphy_enable_set(ds, false); 285 286 if (dsa_is_cpu_port(ds, port)) 287 off = CORE_IMP_CTL; 288 else 289 off = CORE_G_PCTL_PORT(port); 290 291 reg = core_readl(priv, off); 292 reg |= RX_DIS | TX_DIS; 293 core_writel(priv, reg, off); 294 295 /* Power down the port memory */ 296 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 297 reg |= P_TXQ_PSM_VDD(port); 298 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 299 } 300 301 /* Returns 0 if EEE was not enabled, or 1 otherwise 302 */ 303 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, 304 struct phy_device *phy) 305 { 306 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 307 struct ethtool_eee *p = &priv->port_sts[port].eee; 308 int ret; 309 310 p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); 311 312 ret = phy_init_eee(phy, 0); 313 if (ret) 314 return 0; 315 316 bcm_sf2_eee_enable_set(ds, port, true); 317 318 return 1; 319 } 320 321 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, 322 struct ethtool_eee *e) 323 { 324 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 325 struct ethtool_eee *p = &priv->port_sts[port].eee; 326 u32 reg; 327 328 reg = core_readl(priv, CORE_EEE_LPI_INDICATE); 329 e->eee_enabled = p->eee_enabled; 330 e->eee_active = !!(reg & (1 << port)); 331 332 return 0; 333 } 334 335 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, 336 struct phy_device *phydev, 337 struct ethtool_eee *e) 338 { 339 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 340 struct ethtool_eee *p = &priv->port_sts[port].eee; 341 342 p->eee_enabled = e->eee_enabled; 343 344 if (!p->eee_enabled) { 345 bcm_sf2_eee_enable_set(ds, port, false); 346 } else { 347 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); 348 if (!p->eee_enabled) 349 return -EOPNOTSUPP; 350 } 351 352 return 0; 353 } 354 355 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, 356 int regnum, u16 val) 357 { 358 int ret = 0; 359 u32 reg; 360 361 reg = reg_readl(priv, REG_SWITCH_CNTRL); 362 reg |= MDIO_MASTER_SEL; 363 reg_writel(priv, reg, REG_SWITCH_CNTRL); 364 365 /* Page << 8 | offset */ 366 reg = 0x70; 367 reg <<= 2; 368 core_writel(priv, addr, reg); 369 370 /* Page << 8 | offset */ 371 reg = 0x80 << 8 | regnum << 1; 372 reg <<= 2; 373 374 if (op) 375 ret = core_readl(priv, reg); 376 else 377 core_writel(priv, val, reg); 378 379 reg = reg_readl(priv, REG_SWITCH_CNTRL); 380 reg &= ~MDIO_MASTER_SEL; 381 reg_writel(priv, reg, REG_SWITCH_CNTRL); 382 383 return ret & 0xffff; 384 } 385 386 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) 387 { 388 struct bcm_sf2_priv *priv = bus->priv; 389 390 /* Intercept reads from Broadcom pseudo-PHY address, else, send 391 * them to our master MDIO bus controller 392 */ 393 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 394 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); 395 else 396 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum); 397 } 398 399 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, 400 u16 val) 401 { 402 struct bcm_sf2_priv *priv = bus->priv; 403 404 /* Intercept writes to the Broadcom pseudo-PHY address, else, 405 * send them to our master MDIO bus controller 406 */ 407 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 408 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); 409 else 410 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val); 411 412 return 0; 413 } 414 415 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 416 { 417 struct bcm_sf2_priv *priv = dev_id; 418 419 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 420 ~priv->irq0_mask; 421 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 422 423 return IRQ_HANDLED; 424 } 425 426 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) 427 { 428 struct bcm_sf2_priv *priv = dev_id; 429 430 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 431 ~priv->irq1_mask; 432 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 433 434 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) 435 priv->port_sts[7].link = 1; 436 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) 437 priv->port_sts[7].link = 0; 438 439 return IRQ_HANDLED; 440 } 441 442 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) 443 { 444 unsigned int timeout = 1000; 445 u32 reg; 446 447 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 448 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; 449 core_writel(priv, reg, CORE_WATCHDOG_CTRL); 450 451 do { 452 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 453 if (!(reg & SOFTWARE_RESET)) 454 break; 455 456 usleep_range(1000, 2000); 457 } while (timeout-- > 0); 458 459 if (timeout == 0) 460 return -ETIMEDOUT; 461 462 return 0; 463 } 464 465 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) 466 { 467 intrl2_0_mask_set(priv, 0xffffffff); 468 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 469 intrl2_1_mask_set(priv, 0xffffffff); 470 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 471 } 472 473 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, 474 struct device_node *dn) 475 { 476 struct device_node *port; 477 const char *phy_mode_str; 478 int mode; 479 unsigned int port_num; 480 int ret; 481 482 priv->moca_port = -1; 483 484 for_each_available_child_of_node(dn, port) { 485 if (of_property_read_u32(port, "reg", &port_num)) 486 continue; 487 488 /* Internal PHYs get assigned a specific 'phy-mode' property 489 * value: "internal" to help flag them before MDIO probing 490 * has completed, since they might be turned off at that 491 * time 492 */ 493 mode = of_get_phy_mode(port); 494 if (mode < 0) { 495 ret = of_property_read_string(port, "phy-mode", 496 &phy_mode_str); 497 if (ret < 0) 498 continue; 499 500 if (!strcasecmp(phy_mode_str, "internal")) 501 priv->int_phy_mask |= 1 << port_num; 502 } 503 504 if (mode == PHY_INTERFACE_MODE_MOCA) 505 priv->moca_port = port_num; 506 } 507 } 508 509 static int bcm_sf2_mdio_register(struct dsa_switch *ds) 510 { 511 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 512 struct device_node *dn; 513 static int index; 514 int err; 515 516 /* Find our integrated MDIO bus node */ 517 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); 518 priv->master_mii_bus = of_mdio_find_bus(dn); 519 if (!priv->master_mii_bus) 520 return -EPROBE_DEFER; 521 522 get_device(&priv->master_mii_bus->dev); 523 priv->master_mii_dn = dn; 524 525 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); 526 if (!priv->slave_mii_bus) 527 return -ENOMEM; 528 529 priv->slave_mii_bus->priv = priv; 530 priv->slave_mii_bus->name = "sf2 slave mii"; 531 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; 532 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; 533 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", 534 index++); 535 priv->slave_mii_bus->dev.of_node = dn; 536 537 /* Include the pseudo-PHY address to divert reads towards our 538 * workaround. This is only required for 7445D0, since 7445E0 539 * disconnects the internal switch pseudo-PHY such that we can use the 540 * regular SWITCH_MDIO master controller instead. 541 * 542 * Here we flag the pseudo PHY as needing special treatment and would 543 * otherwise make all other PHY read/writes go to the master MDIO bus 544 * controller that comes with this switch backed by the "mdio-unimac" 545 * driver. 546 */ 547 if (of_machine_is_compatible("brcm,bcm7445d0")) 548 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR); 549 else 550 priv->indir_phy_mask = 0; 551 552 ds->phys_mii_mask = priv->indir_phy_mask; 553 ds->slave_mii_bus = priv->slave_mii_bus; 554 priv->slave_mii_bus->parent = ds->dev->parent; 555 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; 556 557 if (dn) 558 err = of_mdiobus_register(priv->slave_mii_bus, dn); 559 else 560 err = mdiobus_register(priv->slave_mii_bus); 561 562 if (err) 563 of_node_put(dn); 564 565 return err; 566 } 567 568 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) 569 { 570 mdiobus_unregister(priv->slave_mii_bus); 571 if (priv->master_mii_dn) 572 of_node_put(priv->master_mii_dn); 573 } 574 575 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) 576 { 577 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 578 579 /* The BCM7xxx PHY driver expects to find the integrated PHY revision 580 * in bits 15:8 and the patch level in bits 7:0 which is exactly what 581 * the REG_PHY_REVISION register layout is. 582 */ 583 584 return priv->hw_params.gphy_rev; 585 } 586 587 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, 588 struct phy_device *phydev) 589 { 590 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 591 struct ethtool_eee *p = &priv->port_sts[port].eee; 592 u32 id_mode_dis = 0, port_mode; 593 const char *str = NULL; 594 u32 reg; 595 596 switch (phydev->interface) { 597 case PHY_INTERFACE_MODE_RGMII: 598 str = "RGMII (no delay)"; 599 id_mode_dis = 1; 600 case PHY_INTERFACE_MODE_RGMII_TXID: 601 if (!str) 602 str = "RGMII (TX delay)"; 603 port_mode = EXT_GPHY; 604 break; 605 case PHY_INTERFACE_MODE_MII: 606 str = "MII"; 607 port_mode = EXT_EPHY; 608 break; 609 case PHY_INTERFACE_MODE_REVMII: 610 str = "Reverse MII"; 611 port_mode = EXT_REVMII; 612 break; 613 default: 614 /* All other PHYs: internal and MoCA */ 615 goto force_link; 616 } 617 618 /* If the link is down, just disable the interface to conserve power */ 619 if (!phydev->link) { 620 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 621 reg &= ~RGMII_MODE_EN; 622 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 623 goto force_link; 624 } 625 626 /* Clear id_mode_dis bit, and the existing port mode, but 627 * make sure we enable the RGMII block for data to pass 628 */ 629 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 630 reg &= ~ID_MODE_DIS; 631 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); 632 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); 633 634 reg |= port_mode | RGMII_MODE_EN; 635 if (id_mode_dis) 636 reg |= ID_MODE_DIS; 637 638 if (phydev->pause) { 639 if (phydev->asym_pause) 640 reg |= TX_PAUSE_EN; 641 reg |= RX_PAUSE_EN; 642 } 643 644 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 645 646 pr_info("Port %d configured for %s\n", port, str); 647 648 force_link: 649 /* Force link settings detected from the PHY */ 650 reg = SW_OVERRIDE; 651 switch (phydev->speed) { 652 case SPEED_1000: 653 reg |= SPDSTS_1000 << SPEED_SHIFT; 654 break; 655 case SPEED_100: 656 reg |= SPDSTS_100 << SPEED_SHIFT; 657 break; 658 } 659 660 if (phydev->link) 661 reg |= LINK_STS; 662 if (phydev->duplex == DUPLEX_FULL) 663 reg |= DUPLX_MODE; 664 665 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); 666 667 if (!phydev->is_pseudo_fixed_link) 668 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); 669 } 670 671 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, 672 struct fixed_phy_status *status) 673 { 674 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 675 u32 duplex, pause; 676 u32 reg; 677 678 duplex = core_readl(priv, CORE_DUPSTS); 679 pause = core_readl(priv, CORE_PAUSESTS); 680 681 status->link = 0; 682 683 /* MoCA port is special as we do not get link status from CORE_LNKSTS, 684 * which means that we need to force the link at the port override 685 * level to get the data to flow. We do use what the interrupt handler 686 * did determine before. 687 * 688 * For the other ports, we just force the link status, since this is 689 * a fixed PHY device. 690 */ 691 if (port == priv->moca_port) { 692 status->link = priv->port_sts[port].link; 693 /* For MoCA interfaces, also force a link down notification 694 * since some version of the user-space daemon (mocad) use 695 * cmd->autoneg to force the link, which messes up the PHY 696 * state machine and make it go in PHY_FORCING state instead. 697 */ 698 if (!status->link) 699 netif_carrier_off(ds->ports[port].netdev); 700 status->duplex = 1; 701 } else { 702 status->link = 1; 703 status->duplex = !!(duplex & (1 << port)); 704 } 705 706 reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); 707 reg |= SW_OVERRIDE; 708 if (status->link) 709 reg |= LINK_STS; 710 else 711 reg &= ~LINK_STS; 712 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); 713 714 if ((pause & (1 << port)) && 715 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { 716 status->asym_pause = 1; 717 status->pause = 1; 718 } 719 720 if (pause & (1 << port)) 721 status->pause = 1; 722 } 723 724 static int bcm_sf2_sw_suspend(struct dsa_switch *ds) 725 { 726 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 727 unsigned int port; 728 729 bcm_sf2_intr_disable(priv); 730 731 /* Disable all ports physically present including the IMP 732 * port, the other ones have already been disabled during 733 * bcm_sf2_sw_setup 734 */ 735 for (port = 0; port < DSA_MAX_PORTS; port++) { 736 if ((1 << port) & ds->enabled_port_mask || 737 dsa_is_cpu_port(ds, port)) 738 bcm_sf2_port_disable(ds, port, NULL); 739 } 740 741 return 0; 742 } 743 744 static int bcm_sf2_sw_resume(struct dsa_switch *ds) 745 { 746 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 747 unsigned int port; 748 int ret; 749 750 ret = bcm_sf2_sw_rst(priv); 751 if (ret) { 752 pr_err("%s: failed to software reset switch\n", __func__); 753 return ret; 754 } 755 756 if (priv->hw_params.num_gphy == 1) 757 bcm_sf2_gphy_enable_set(ds, true); 758 759 for (port = 0; port < DSA_MAX_PORTS; port++) { 760 if ((1 << port) & ds->enabled_port_mask) 761 bcm_sf2_port_setup(ds, port, NULL); 762 else if (dsa_is_cpu_port(ds, port)) 763 bcm_sf2_imp_setup(ds, port); 764 } 765 766 return 0; 767 } 768 769 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, 770 struct ethtool_wolinfo *wol) 771 { 772 struct net_device *p = ds->dst[ds->index].master_netdev; 773 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 774 struct ethtool_wolinfo pwol; 775 776 /* Get the parent device WoL settings */ 777 p->ethtool_ops->get_wol(p, &pwol); 778 779 /* Advertise the parent device supported settings */ 780 wol->supported = pwol.supported; 781 memset(&wol->sopass, 0, sizeof(wol->sopass)); 782 783 if (pwol.wolopts & WAKE_MAGICSECURE) 784 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); 785 786 if (priv->wol_ports_mask & (1 << port)) 787 wol->wolopts = pwol.wolopts; 788 else 789 wol->wolopts = 0; 790 } 791 792 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, 793 struct ethtool_wolinfo *wol) 794 { 795 struct net_device *p = ds->dst[ds->index].master_netdev; 796 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 797 s8 cpu_port = ds->dst[ds->index].cpu_port; 798 struct ethtool_wolinfo pwol; 799 800 p->ethtool_ops->get_wol(p, &pwol); 801 if (wol->wolopts & ~pwol.supported) 802 return -EINVAL; 803 804 if (wol->wolopts) 805 priv->wol_ports_mask |= (1 << port); 806 else 807 priv->wol_ports_mask &= ~(1 << port); 808 809 /* If we have at least one port enabled, make sure the CPU port 810 * is also enabled. If the CPU port is the last one enabled, we disable 811 * it since this configuration does not make sense. 812 */ 813 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) 814 priv->wol_ports_mask |= (1 << cpu_port); 815 else 816 priv->wol_ports_mask &= ~(1 << cpu_port); 817 818 return p->ethtool_ops->set_wol(p, wol); 819 } 820 821 static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) 822 { 823 unsigned int timeout = 10; 824 u32 reg; 825 826 do { 827 reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); 828 if (!(reg & ARLA_VTBL_STDN)) 829 return 0; 830 831 usleep_range(1000, 2000); 832 } while (timeout--); 833 834 return -ETIMEDOUT; 835 } 836 837 static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) 838 { 839 core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); 840 841 return bcm_sf2_vlan_op_wait(priv); 842 } 843 844 static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) 845 { 846 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 847 unsigned int port; 848 849 /* Clear all VLANs */ 850 bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR); 851 852 for (port = 0; port < priv->hw_params.num_ports; port++) { 853 if (!((1 << port) & ds->enabled_port_mask)) 854 continue; 855 856 core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port)); 857 } 858 } 859 860 static int bcm_sf2_sw_setup(struct dsa_switch *ds) 861 { 862 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 863 unsigned int port; 864 865 /* Enable all valid ports and disable those unused */ 866 for (port = 0; port < priv->hw_params.num_ports; port++) { 867 /* IMP port receives special treatment */ 868 if ((1 << port) & ds->enabled_port_mask) 869 bcm_sf2_port_setup(ds, port, NULL); 870 else if (dsa_is_cpu_port(ds, port)) 871 bcm_sf2_imp_setup(ds, port); 872 else 873 bcm_sf2_port_disable(ds, port, NULL); 874 } 875 876 bcm_sf2_sw_configure_vlan(ds); 877 878 return 0; 879 } 880 881 /* The SWITCH_CORE register space is managed by b53 but operates on a page + 882 * register basis so we need to translate that into an address that the 883 * bus-glue understands. 884 */ 885 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) 886 887 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, 888 u8 *val) 889 { 890 struct bcm_sf2_priv *priv = dev->priv; 891 892 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 893 894 return 0; 895 } 896 897 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, 898 u16 *val) 899 { 900 struct bcm_sf2_priv *priv = dev->priv; 901 902 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 903 904 return 0; 905 } 906 907 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, 908 u32 *val) 909 { 910 struct bcm_sf2_priv *priv = dev->priv; 911 912 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 913 914 return 0; 915 } 916 917 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, 918 u64 *val) 919 { 920 struct bcm_sf2_priv *priv = dev->priv; 921 922 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); 923 924 return 0; 925 } 926 927 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, 928 u8 value) 929 { 930 struct bcm_sf2_priv *priv = dev->priv; 931 932 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 933 934 return 0; 935 } 936 937 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, 938 u16 value) 939 { 940 struct bcm_sf2_priv *priv = dev->priv; 941 942 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 943 944 return 0; 945 } 946 947 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, 948 u32 value) 949 { 950 struct bcm_sf2_priv *priv = dev->priv; 951 952 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 953 954 return 0; 955 } 956 957 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, 958 u64 value) 959 { 960 struct bcm_sf2_priv *priv = dev->priv; 961 962 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 963 964 return 0; 965 } 966 967 static struct b53_io_ops bcm_sf2_io_ops = { 968 .read8 = bcm_sf2_core_read8, 969 .read16 = bcm_sf2_core_read16, 970 .read32 = bcm_sf2_core_read32, 971 .read48 = bcm_sf2_core_read64, 972 .read64 = bcm_sf2_core_read64, 973 .write8 = bcm_sf2_core_write8, 974 .write16 = bcm_sf2_core_write16, 975 .write32 = bcm_sf2_core_write32, 976 .write48 = bcm_sf2_core_write64, 977 .write64 = bcm_sf2_core_write64, 978 }; 979 980 static int bcm_sf2_sw_probe(struct platform_device *pdev) 981 { 982 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 983 struct device_node *dn = pdev->dev.of_node; 984 struct b53_platform_data *pdata; 985 struct dsa_switch_ops *ops; 986 struct bcm_sf2_priv *priv; 987 struct b53_device *dev; 988 struct dsa_switch *ds; 989 void __iomem **base; 990 struct resource *r; 991 unsigned int i; 992 u32 reg, rev; 993 int ret; 994 995 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 996 if (!priv) 997 return -ENOMEM; 998 999 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL); 1000 if (!ops) 1001 return -ENOMEM; 1002 1003 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); 1004 if (!dev) 1005 return -ENOMEM; 1006 1007 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1008 if (!pdata) 1009 return -ENOMEM; 1010 1011 /* Auto-detection using standard registers will not work, so 1012 * provide an indication of what kind of device we are for 1013 * b53_common to work with 1014 */ 1015 pdata->chip_id = BCM7445_DEVICE_ID; 1016 dev->pdata = pdata; 1017 1018 priv->dev = dev; 1019 ds = dev->ds; 1020 1021 /* Override the parts that are non-standard wrt. normal b53 devices */ 1022 memcpy(ops, ds->ops, sizeof(*ops)); 1023 ds->ops = ops; 1024 ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol; 1025 ds->ops->setup = bcm_sf2_sw_setup; 1026 ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags; 1027 ds->ops->adjust_link = bcm_sf2_sw_adjust_link; 1028 ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update; 1029 ds->ops->suspend = bcm_sf2_sw_suspend; 1030 ds->ops->resume = bcm_sf2_sw_resume; 1031 ds->ops->get_wol = bcm_sf2_sw_get_wol; 1032 ds->ops->set_wol = bcm_sf2_sw_set_wol; 1033 ds->ops->port_enable = bcm_sf2_port_setup; 1034 ds->ops->port_disable = bcm_sf2_port_disable; 1035 ds->ops->get_eee = bcm_sf2_sw_get_eee; 1036 ds->ops->set_eee = bcm_sf2_sw_set_eee; 1037 1038 /* Avoid having DSA free our slave MDIO bus (checking for 1039 * ds->slave_mii_bus and ds->ops->phy_read being non-NULL) 1040 */ 1041 ds->ops->phy_read = NULL; 1042 1043 dev_set_drvdata(&pdev->dev, priv); 1044 1045 spin_lock_init(&priv->indir_lock); 1046 mutex_init(&priv->stats_mutex); 1047 1048 bcm_sf2_identify_ports(priv, dn->child); 1049 1050 priv->irq0 = irq_of_parse_and_map(dn, 0); 1051 priv->irq1 = irq_of_parse_and_map(dn, 1); 1052 1053 base = &priv->core; 1054 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1055 r = platform_get_resource(pdev, IORESOURCE_MEM, i); 1056 *base = devm_ioremap_resource(&pdev->dev, r); 1057 if (IS_ERR(*base)) { 1058 pr_err("unable to find register: %s\n", reg_names[i]); 1059 return PTR_ERR(*base); 1060 } 1061 base++; 1062 } 1063 1064 ret = bcm_sf2_sw_rst(priv); 1065 if (ret) { 1066 pr_err("unable to software reset switch: %d\n", ret); 1067 return ret; 1068 } 1069 1070 ret = bcm_sf2_mdio_register(ds); 1071 if (ret) { 1072 pr_err("failed to register MDIO bus\n"); 1073 return ret; 1074 } 1075 1076 /* Disable all interrupts and request them */ 1077 bcm_sf2_intr_disable(priv); 1078 1079 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, 1080 "switch_0", priv); 1081 if (ret < 0) { 1082 pr_err("failed to request switch_0 IRQ\n"); 1083 goto out_mdio; 1084 } 1085 1086 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, 1087 "switch_1", priv); 1088 if (ret < 0) { 1089 pr_err("failed to request switch_1 IRQ\n"); 1090 goto out_mdio; 1091 } 1092 1093 /* Reset the MIB counters */ 1094 reg = core_readl(priv, CORE_GMNCFGCFG); 1095 reg |= RST_MIB_CNT; 1096 core_writel(priv, reg, CORE_GMNCFGCFG); 1097 reg &= ~RST_MIB_CNT; 1098 core_writel(priv, reg, CORE_GMNCFGCFG); 1099 1100 /* Get the maximum number of ports for this switch */ 1101 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; 1102 if (priv->hw_params.num_ports > DSA_MAX_PORTS) 1103 priv->hw_params.num_ports = DSA_MAX_PORTS; 1104 1105 /* Assume a single GPHY setup if we can't read that property */ 1106 if (of_property_read_u32(dn, "brcm,num-gphy", 1107 &priv->hw_params.num_gphy)) 1108 priv->hw_params.num_gphy = 1; 1109 1110 rev = reg_readl(priv, REG_SWITCH_REVISION); 1111 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 1112 SWITCH_TOP_REV_MASK; 1113 priv->hw_params.core_rev = (rev & SF2_REV_MASK); 1114 1115 rev = reg_readl(priv, REG_PHY_REVISION); 1116 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; 1117 1118 ret = b53_switch_register(dev); 1119 if (ret) 1120 goto out_mdio; 1121 1122 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", 1123 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, 1124 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, 1125 priv->core, priv->irq0, priv->irq1); 1126 1127 return 0; 1128 1129 out_mdio: 1130 bcm_sf2_mdio_unregister(priv); 1131 return ret; 1132 } 1133 1134 static int bcm_sf2_sw_remove(struct platform_device *pdev) 1135 { 1136 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1137 1138 /* Disable all ports and interrupts */ 1139 priv->wol_ports_mask = 0; 1140 bcm_sf2_sw_suspend(priv->dev->ds); 1141 dsa_unregister_switch(priv->dev->ds); 1142 bcm_sf2_mdio_unregister(priv); 1143 1144 return 0; 1145 } 1146 1147 static void bcm_sf2_sw_shutdown(struct platform_device *pdev) 1148 { 1149 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1150 1151 /* For a kernel about to be kexec'd we want to keep the GPHY on for a 1152 * successful MDIO bus scan to occur. If we did turn off the GPHY 1153 * before (e.g: port_disable), this will also power it back on. 1154 * 1155 * Do not rely on kexec_in_progress, just power the PHY on. 1156 */ 1157 if (priv->hw_params.num_gphy == 1) 1158 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1159 } 1160 1161 #ifdef CONFIG_PM_SLEEP 1162 static int bcm_sf2_suspend(struct device *dev) 1163 { 1164 struct platform_device *pdev = to_platform_device(dev); 1165 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1166 1167 return dsa_switch_suspend(priv->dev->ds); 1168 } 1169 1170 static int bcm_sf2_resume(struct device *dev) 1171 { 1172 struct platform_device *pdev = to_platform_device(dev); 1173 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1174 1175 return dsa_switch_resume(priv->dev->ds); 1176 } 1177 #endif /* CONFIG_PM_SLEEP */ 1178 1179 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, 1180 bcm_sf2_suspend, bcm_sf2_resume); 1181 1182 static const struct of_device_id bcm_sf2_of_match[] = { 1183 { .compatible = "brcm,bcm7445-switch-v4.0" }, 1184 { /* sentinel */ }, 1185 }; 1186 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); 1187 1188 static struct platform_driver bcm_sf2_driver = { 1189 .probe = bcm_sf2_sw_probe, 1190 .remove = bcm_sf2_sw_remove, 1191 .shutdown = bcm_sf2_sw_shutdown, 1192 .driver = { 1193 .name = "brcm-sf2", 1194 .of_match_table = bcm_sf2_of_match, 1195 .pm = &bcm_sf2_pm_ops, 1196 }, 1197 }; 1198 module_platform_driver(bcm_sf2_driver); 1199 1200 MODULE_AUTHOR("Broadcom Corporation"); 1201 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); 1202 MODULE_LICENSE("GPL"); 1203 MODULE_ALIAS("platform:brcm-sf2"); 1204