1 /* 2 * Broadcom Starfighter 2 DSA switch driver 3 * 4 * Copyright (C) 2014, Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/platform_device.h> 17 #include <linux/of.h> 18 #include <linux/phy.h> 19 #include <linux/phy_fixed.h> 20 #include <linux/mii.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 #include <linux/of_net.h> 25 #include <linux/of_mdio.h> 26 #include <net/dsa.h> 27 #include <linux/ethtool.h> 28 #include <linux/if_bridge.h> 29 #include <linux/brcmphy.h> 30 #include <linux/etherdevice.h> 31 #include <net/switchdev.h> 32 #include <linux/platform_data/b53.h> 33 34 #include "bcm_sf2.h" 35 #include "bcm_sf2_regs.h" 36 #include "b53/b53_priv.h" 37 #include "b53/b53_regs.h" 38 39 static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds) 40 { 41 return DSA_TAG_PROTO_BRCM; 42 } 43 44 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 45 { 46 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 47 unsigned int i; 48 u32 reg; 49 50 /* Enable the IMP Port to be in the same VLAN as the other ports 51 * on a per-port basis such that we only have Port i and IMP in 52 * the same VLAN. 53 */ 54 for (i = 0; i < priv->hw_params.num_ports; i++) { 55 if (!((1 << i) & ds->enabled_port_mask)) 56 continue; 57 58 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); 59 reg |= (1 << cpu_port); 60 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); 61 } 62 } 63 64 static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port) 65 { 66 u32 reg, val; 67 68 /* Resolve which bit controls the Broadcom tag */ 69 switch (port) { 70 case 8: 71 val = BRCM_HDR_EN_P8; 72 break; 73 case 7: 74 val = BRCM_HDR_EN_P7; 75 break; 76 case 5: 77 val = BRCM_HDR_EN_P5; 78 break; 79 default: 80 val = 0; 81 break; 82 } 83 84 /* Enable Broadcom tags for IMP port */ 85 reg = core_readl(priv, CORE_BRCM_HDR_CTRL); 86 reg |= val; 87 core_writel(priv, reg, CORE_BRCM_HDR_CTRL); 88 89 /* Enable reception Broadcom tag for CPU TX (switch RX) to 90 * allow us to tag outgoing frames 91 */ 92 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS); 93 reg &= ~(1 << port); 94 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS); 95 96 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 97 * allow delivering frames to the per-port net_devices 98 */ 99 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); 100 reg &= ~(1 << port); 101 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); 102 } 103 104 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) 105 { 106 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 107 u32 reg, offset; 108 109 if (priv->type == BCM7445_DEVICE_ID) 110 offset = CORE_STS_OVERRIDE_IMP; 111 else 112 offset = CORE_STS_OVERRIDE_IMP2; 113 114 /* Enable the port memories */ 115 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 116 reg &= ~P_TXQ_PSM_VDD(port); 117 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 118 119 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ 120 reg = core_readl(priv, CORE_IMP_CTL); 121 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); 122 reg &= ~(RX_DIS | TX_DIS); 123 core_writel(priv, reg, CORE_IMP_CTL); 124 125 /* Enable forwarding */ 126 core_writel(priv, SW_FWDG_EN, CORE_SWMODE); 127 128 /* Enable IMP port in dumb mode */ 129 reg = core_readl(priv, CORE_SWITCH_CTRL); 130 reg |= MII_DUMB_FWDG_EN; 131 core_writel(priv, reg, CORE_SWITCH_CTRL); 132 133 bcm_sf2_brcm_hdr_setup(priv, port); 134 135 /* Force link status for IMP port */ 136 reg = core_readl(priv, offset); 137 reg |= (MII_SW_OR | LINK_STS); 138 core_writel(priv, reg, offset); 139 } 140 141 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 142 { 143 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 144 u32 reg; 145 146 reg = core_readl(priv, CORE_EEE_EN_CTRL); 147 if (enable) 148 reg |= 1 << port; 149 else 150 reg &= ~(1 << port); 151 core_writel(priv, reg, CORE_EEE_EN_CTRL); 152 } 153 154 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) 155 { 156 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 157 u32 reg; 158 159 reg = reg_readl(priv, REG_SPHY_CNTRL); 160 if (enable) { 161 reg |= PHY_RESET; 162 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); 163 reg_writel(priv, reg, REG_SPHY_CNTRL); 164 udelay(21); 165 reg = reg_readl(priv, REG_SPHY_CNTRL); 166 reg &= ~PHY_RESET; 167 } else { 168 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; 169 reg_writel(priv, reg, REG_SPHY_CNTRL); 170 mdelay(1); 171 reg |= CK25_DIS; 172 } 173 reg_writel(priv, reg, REG_SPHY_CNTRL); 174 175 /* Use PHY-driven LED signaling */ 176 if (!enable) { 177 reg = reg_readl(priv, REG_LED_CNTRL(0)); 178 reg |= SPDLNK_SRC_SEL; 179 reg_writel(priv, reg, REG_LED_CNTRL(0)); 180 } 181 } 182 183 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, 184 int port) 185 { 186 unsigned int off; 187 188 switch (port) { 189 case 7: 190 off = P7_IRQ_OFF; 191 break; 192 case 0: 193 /* Port 0 interrupts are located on the first bank */ 194 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); 195 return; 196 default: 197 off = P_IRQ_OFF(port); 198 break; 199 } 200 201 intrl2_1_mask_clear(priv, P_IRQ_MASK(off)); 202 } 203 204 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, 205 int port) 206 { 207 unsigned int off; 208 209 switch (port) { 210 case 7: 211 off = P7_IRQ_OFF; 212 break; 213 case 0: 214 /* Port 0 interrupts are located on the first bank */ 215 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF)); 216 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); 217 return; 218 default: 219 off = P_IRQ_OFF(port); 220 break; 221 } 222 223 intrl2_1_mask_set(priv, P_IRQ_MASK(off)); 224 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR); 225 } 226 227 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, 228 struct phy_device *phy) 229 { 230 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 231 s8 cpu_port = ds->dst[ds->index].cpu_port; 232 unsigned int i; 233 u32 reg; 234 235 /* Clear the memory power down */ 236 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 237 reg &= ~P_TXQ_PSM_VDD(port); 238 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 239 240 /* Enable Broadcom tags for that port if requested */ 241 if (priv->brcm_tag_mask & BIT(port)) 242 bcm_sf2_brcm_hdr_setup(priv, port); 243 244 /* Configure Traffic Class to QoS mapping, allow each priority to map 245 * to a different queue number 246 */ 247 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 248 for (i = 0; i < 8; i++) 249 reg |= i << (PRT_TO_QID_SHIFT * i); 250 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 251 252 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 253 core_writel(priv, 0, CORE_G_PCTL_PORT(port)); 254 255 /* Re-enable the GPHY and re-apply workarounds */ 256 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { 257 bcm_sf2_gphy_enable_set(ds, true); 258 if (phy) { 259 /* if phy_stop() has been called before, phy 260 * will be in halted state, and phy_start() 261 * will call resume. 262 * 263 * the resume path does not configure back 264 * autoneg settings, and since we hard reset 265 * the phy manually here, we need to reset the 266 * state machine also. 267 */ 268 phy->state = PHY_READY; 269 phy_init_hw(phy); 270 } 271 } 272 273 /* Enable MoCA port interrupts to get notified */ 274 if (port == priv->moca_port) 275 bcm_sf2_port_intr_enable(priv, port); 276 277 /* Set this port, and only this one to be in the default VLAN, 278 * if member of a bridge, restore its membership prior to 279 * bringing down this port. 280 */ 281 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); 282 reg &= ~PORT_VLAN_CTRL_MASK; 283 reg |= (1 << port); 284 reg |= priv->dev->ports[port].vlan_ctl_mask; 285 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); 286 287 bcm_sf2_imp_vlan_setup(ds, cpu_port); 288 289 /* If EEE was enabled, restore it */ 290 if (priv->port_sts[port].eee.eee_enabled) 291 bcm_sf2_eee_enable_set(ds, port, true); 292 293 return 0; 294 } 295 296 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, 297 struct phy_device *phy) 298 { 299 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 300 u32 off, reg; 301 302 if (priv->wol_ports_mask & (1 << port)) 303 return; 304 305 if (port == priv->moca_port) 306 bcm_sf2_port_intr_disable(priv, port); 307 308 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) 309 bcm_sf2_gphy_enable_set(ds, false); 310 311 if (dsa_is_cpu_port(ds, port)) 312 off = CORE_IMP_CTL; 313 else 314 off = CORE_G_PCTL_PORT(port); 315 316 reg = core_readl(priv, off); 317 reg |= RX_DIS | TX_DIS; 318 core_writel(priv, reg, off); 319 320 /* Power down the port memory */ 321 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 322 reg |= P_TXQ_PSM_VDD(port); 323 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 324 } 325 326 /* Returns 0 if EEE was not enabled, or 1 otherwise 327 */ 328 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, 329 struct phy_device *phy) 330 { 331 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 332 struct ethtool_eee *p = &priv->port_sts[port].eee; 333 int ret; 334 335 p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); 336 337 ret = phy_init_eee(phy, 0); 338 if (ret) 339 return 0; 340 341 bcm_sf2_eee_enable_set(ds, port, true); 342 343 return 1; 344 } 345 346 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, 347 struct ethtool_eee *e) 348 { 349 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 350 struct ethtool_eee *p = &priv->port_sts[port].eee; 351 u32 reg; 352 353 reg = core_readl(priv, CORE_EEE_LPI_INDICATE); 354 e->eee_enabled = p->eee_enabled; 355 e->eee_active = !!(reg & (1 << port)); 356 357 return 0; 358 } 359 360 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, 361 struct phy_device *phydev, 362 struct ethtool_eee *e) 363 { 364 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 365 struct ethtool_eee *p = &priv->port_sts[port].eee; 366 367 p->eee_enabled = e->eee_enabled; 368 369 if (!p->eee_enabled) { 370 bcm_sf2_eee_enable_set(ds, port, false); 371 } else { 372 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); 373 if (!p->eee_enabled) 374 return -EOPNOTSUPP; 375 } 376 377 return 0; 378 } 379 380 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, 381 int regnum, u16 val) 382 { 383 int ret = 0; 384 u32 reg; 385 386 reg = reg_readl(priv, REG_SWITCH_CNTRL); 387 reg |= MDIO_MASTER_SEL; 388 reg_writel(priv, reg, REG_SWITCH_CNTRL); 389 390 /* Page << 8 | offset */ 391 reg = 0x70; 392 reg <<= 2; 393 core_writel(priv, addr, reg); 394 395 /* Page << 8 | offset */ 396 reg = 0x80 << 8 | regnum << 1; 397 reg <<= 2; 398 399 if (op) 400 ret = core_readl(priv, reg); 401 else 402 core_writel(priv, val, reg); 403 404 reg = reg_readl(priv, REG_SWITCH_CNTRL); 405 reg &= ~MDIO_MASTER_SEL; 406 reg_writel(priv, reg, REG_SWITCH_CNTRL); 407 408 return ret & 0xffff; 409 } 410 411 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) 412 { 413 struct bcm_sf2_priv *priv = bus->priv; 414 415 /* Intercept reads from Broadcom pseudo-PHY address, else, send 416 * them to our master MDIO bus controller 417 */ 418 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 419 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); 420 else 421 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum); 422 } 423 424 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, 425 u16 val) 426 { 427 struct bcm_sf2_priv *priv = bus->priv; 428 429 /* Intercept writes to the Broadcom pseudo-PHY address, else, 430 * send them to our master MDIO bus controller 431 */ 432 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 433 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); 434 else 435 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val); 436 437 return 0; 438 } 439 440 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 441 { 442 struct bcm_sf2_priv *priv = dev_id; 443 444 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 445 ~priv->irq0_mask; 446 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 447 448 return IRQ_HANDLED; 449 } 450 451 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) 452 { 453 struct bcm_sf2_priv *priv = dev_id; 454 455 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 456 ~priv->irq1_mask; 457 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 458 459 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) 460 priv->port_sts[7].link = 1; 461 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) 462 priv->port_sts[7].link = 0; 463 464 return IRQ_HANDLED; 465 } 466 467 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) 468 { 469 unsigned int timeout = 1000; 470 u32 reg; 471 472 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 473 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; 474 core_writel(priv, reg, CORE_WATCHDOG_CTRL); 475 476 do { 477 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 478 if (!(reg & SOFTWARE_RESET)) 479 break; 480 481 usleep_range(1000, 2000); 482 } while (timeout-- > 0); 483 484 if (timeout == 0) 485 return -ETIMEDOUT; 486 487 return 0; 488 } 489 490 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) 491 { 492 intrl2_0_mask_set(priv, 0xffffffff); 493 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 494 intrl2_1_mask_set(priv, 0xffffffff); 495 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 496 } 497 498 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, 499 struct device_node *dn) 500 { 501 struct device_node *port; 502 const char *phy_mode_str; 503 int mode; 504 unsigned int port_num; 505 int ret; 506 507 priv->moca_port = -1; 508 509 for_each_available_child_of_node(dn, port) { 510 if (of_property_read_u32(port, "reg", &port_num)) 511 continue; 512 513 /* Internal PHYs get assigned a specific 'phy-mode' property 514 * value: "internal" to help flag them before MDIO probing 515 * has completed, since they might be turned off at that 516 * time 517 */ 518 mode = of_get_phy_mode(port); 519 if (mode < 0) { 520 ret = of_property_read_string(port, "phy-mode", 521 &phy_mode_str); 522 if (ret < 0) 523 continue; 524 525 if (!strcasecmp(phy_mode_str, "internal")) 526 priv->int_phy_mask |= 1 << port_num; 527 } 528 529 if (mode == PHY_INTERFACE_MODE_MOCA) 530 priv->moca_port = port_num; 531 532 if (of_property_read_bool(port, "brcm,use-bcm-hdr")) 533 priv->brcm_tag_mask |= 1 << port_num; 534 } 535 } 536 537 static int bcm_sf2_mdio_register(struct dsa_switch *ds) 538 { 539 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 540 struct device_node *dn; 541 static int index; 542 int err; 543 544 /* Find our integrated MDIO bus node */ 545 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); 546 priv->master_mii_bus = of_mdio_find_bus(dn); 547 if (!priv->master_mii_bus) 548 return -EPROBE_DEFER; 549 550 get_device(&priv->master_mii_bus->dev); 551 priv->master_mii_dn = dn; 552 553 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); 554 if (!priv->slave_mii_bus) 555 return -ENOMEM; 556 557 priv->slave_mii_bus->priv = priv; 558 priv->slave_mii_bus->name = "sf2 slave mii"; 559 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; 560 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; 561 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", 562 index++); 563 priv->slave_mii_bus->dev.of_node = dn; 564 565 /* Include the pseudo-PHY address to divert reads towards our 566 * workaround. This is only required for 7445D0, since 7445E0 567 * disconnects the internal switch pseudo-PHY such that we can use the 568 * regular SWITCH_MDIO master controller instead. 569 * 570 * Here we flag the pseudo PHY as needing special treatment and would 571 * otherwise make all other PHY read/writes go to the master MDIO bus 572 * controller that comes with this switch backed by the "mdio-unimac" 573 * driver. 574 */ 575 if (of_machine_is_compatible("brcm,bcm7445d0")) 576 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR); 577 else 578 priv->indir_phy_mask = 0; 579 580 ds->phys_mii_mask = priv->indir_phy_mask; 581 ds->slave_mii_bus = priv->slave_mii_bus; 582 priv->slave_mii_bus->parent = ds->dev->parent; 583 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; 584 585 if (dn) 586 err = of_mdiobus_register(priv->slave_mii_bus, dn); 587 else 588 err = mdiobus_register(priv->slave_mii_bus); 589 590 if (err) 591 of_node_put(dn); 592 593 return err; 594 } 595 596 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) 597 { 598 mdiobus_unregister(priv->slave_mii_bus); 599 if (priv->master_mii_dn) 600 of_node_put(priv->master_mii_dn); 601 } 602 603 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) 604 { 605 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 606 607 /* The BCM7xxx PHY driver expects to find the integrated PHY revision 608 * in bits 15:8 and the patch level in bits 7:0 which is exactly what 609 * the REG_PHY_REVISION register layout is. 610 */ 611 612 return priv->hw_params.gphy_rev; 613 } 614 615 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, 616 struct phy_device *phydev) 617 { 618 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 619 struct ethtool_eee *p = &priv->port_sts[port].eee; 620 u32 id_mode_dis = 0, port_mode; 621 const char *str = NULL; 622 u32 reg, offset; 623 624 if (priv->type == BCM7445_DEVICE_ID) 625 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 626 else 627 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 628 629 switch (phydev->interface) { 630 case PHY_INTERFACE_MODE_RGMII: 631 str = "RGMII (no delay)"; 632 id_mode_dis = 1; 633 case PHY_INTERFACE_MODE_RGMII_TXID: 634 if (!str) 635 str = "RGMII (TX delay)"; 636 port_mode = EXT_GPHY; 637 break; 638 case PHY_INTERFACE_MODE_MII: 639 str = "MII"; 640 port_mode = EXT_EPHY; 641 break; 642 case PHY_INTERFACE_MODE_REVMII: 643 str = "Reverse MII"; 644 port_mode = EXT_REVMII; 645 break; 646 default: 647 /* All other PHYs: internal and MoCA */ 648 goto force_link; 649 } 650 651 /* If the link is down, just disable the interface to conserve power */ 652 if (!phydev->link) { 653 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 654 reg &= ~RGMII_MODE_EN; 655 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 656 goto force_link; 657 } 658 659 /* Clear id_mode_dis bit, and the existing port mode, but 660 * make sure we enable the RGMII block for data to pass 661 */ 662 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 663 reg &= ~ID_MODE_DIS; 664 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); 665 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); 666 667 reg |= port_mode | RGMII_MODE_EN; 668 if (id_mode_dis) 669 reg |= ID_MODE_DIS; 670 671 if (phydev->pause) { 672 if (phydev->asym_pause) 673 reg |= TX_PAUSE_EN; 674 reg |= RX_PAUSE_EN; 675 } 676 677 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 678 679 pr_info("Port %d configured for %s\n", port, str); 680 681 force_link: 682 /* Force link settings detected from the PHY */ 683 reg = SW_OVERRIDE; 684 switch (phydev->speed) { 685 case SPEED_1000: 686 reg |= SPDSTS_1000 << SPEED_SHIFT; 687 break; 688 case SPEED_100: 689 reg |= SPDSTS_100 << SPEED_SHIFT; 690 break; 691 } 692 693 if (phydev->link) 694 reg |= LINK_STS; 695 if (phydev->duplex == DUPLEX_FULL) 696 reg |= DUPLX_MODE; 697 698 core_writel(priv, reg, offset); 699 700 if (!phydev->is_pseudo_fixed_link) 701 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); 702 } 703 704 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, 705 struct fixed_phy_status *status) 706 { 707 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 708 u32 duplex, pause, offset; 709 u32 reg; 710 711 if (priv->type == BCM7445_DEVICE_ID) 712 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 713 else 714 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 715 716 duplex = core_readl(priv, CORE_DUPSTS); 717 pause = core_readl(priv, CORE_PAUSESTS); 718 719 status->link = 0; 720 721 /* MoCA port is special as we do not get link status from CORE_LNKSTS, 722 * which means that we need to force the link at the port override 723 * level to get the data to flow. We do use what the interrupt handler 724 * did determine before. 725 * 726 * For the other ports, we just force the link status, since this is 727 * a fixed PHY device. 728 */ 729 if (port == priv->moca_port) { 730 status->link = priv->port_sts[port].link; 731 /* For MoCA interfaces, also force a link down notification 732 * since some version of the user-space daemon (mocad) use 733 * cmd->autoneg to force the link, which messes up the PHY 734 * state machine and make it go in PHY_FORCING state instead. 735 */ 736 if (!status->link) 737 netif_carrier_off(ds->ports[port].netdev); 738 status->duplex = 1; 739 } else { 740 status->link = 1; 741 status->duplex = !!(duplex & (1 << port)); 742 } 743 744 reg = core_readl(priv, offset); 745 reg |= SW_OVERRIDE; 746 if (status->link) 747 reg |= LINK_STS; 748 else 749 reg &= ~LINK_STS; 750 core_writel(priv, reg, offset); 751 752 if ((pause & (1 << port)) && 753 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { 754 status->asym_pause = 1; 755 status->pause = 1; 756 } 757 758 if (pause & (1 << port)) 759 status->pause = 1; 760 } 761 762 static int bcm_sf2_sw_suspend(struct dsa_switch *ds) 763 { 764 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 765 unsigned int port; 766 767 bcm_sf2_intr_disable(priv); 768 769 /* Disable all ports physically present including the IMP 770 * port, the other ones have already been disabled during 771 * bcm_sf2_sw_setup 772 */ 773 for (port = 0; port < DSA_MAX_PORTS; port++) { 774 if ((1 << port) & ds->enabled_port_mask || 775 dsa_is_cpu_port(ds, port)) 776 bcm_sf2_port_disable(ds, port, NULL); 777 } 778 779 return 0; 780 } 781 782 static int bcm_sf2_sw_resume(struct dsa_switch *ds) 783 { 784 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 785 unsigned int port; 786 int ret; 787 788 ret = bcm_sf2_sw_rst(priv); 789 if (ret) { 790 pr_err("%s: failed to software reset switch\n", __func__); 791 return ret; 792 } 793 794 if (priv->hw_params.num_gphy == 1) 795 bcm_sf2_gphy_enable_set(ds, true); 796 797 for (port = 0; port < DSA_MAX_PORTS; port++) { 798 if ((1 << port) & ds->enabled_port_mask) 799 bcm_sf2_port_setup(ds, port, NULL); 800 else if (dsa_is_cpu_port(ds, port)) 801 bcm_sf2_imp_setup(ds, port); 802 } 803 804 return 0; 805 } 806 807 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, 808 struct ethtool_wolinfo *wol) 809 { 810 struct net_device *p = ds->dst[ds->index].master_netdev; 811 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 812 struct ethtool_wolinfo pwol; 813 814 /* Get the parent device WoL settings */ 815 p->ethtool_ops->get_wol(p, &pwol); 816 817 /* Advertise the parent device supported settings */ 818 wol->supported = pwol.supported; 819 memset(&wol->sopass, 0, sizeof(wol->sopass)); 820 821 if (pwol.wolopts & WAKE_MAGICSECURE) 822 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); 823 824 if (priv->wol_ports_mask & (1 << port)) 825 wol->wolopts = pwol.wolopts; 826 else 827 wol->wolopts = 0; 828 } 829 830 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, 831 struct ethtool_wolinfo *wol) 832 { 833 struct net_device *p = ds->dst[ds->index].master_netdev; 834 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 835 s8 cpu_port = ds->dst[ds->index].cpu_port; 836 struct ethtool_wolinfo pwol; 837 838 p->ethtool_ops->get_wol(p, &pwol); 839 if (wol->wolopts & ~pwol.supported) 840 return -EINVAL; 841 842 if (wol->wolopts) 843 priv->wol_ports_mask |= (1 << port); 844 else 845 priv->wol_ports_mask &= ~(1 << port); 846 847 /* If we have at least one port enabled, make sure the CPU port 848 * is also enabled. If the CPU port is the last one enabled, we disable 849 * it since this configuration does not make sense. 850 */ 851 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) 852 priv->wol_ports_mask |= (1 << cpu_port); 853 else 854 priv->wol_ports_mask &= ~(1 << cpu_port); 855 856 return p->ethtool_ops->set_wol(p, wol); 857 } 858 859 static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) 860 { 861 unsigned int timeout = 10; 862 u32 reg; 863 864 do { 865 reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); 866 if (!(reg & ARLA_VTBL_STDN)) 867 return 0; 868 869 usleep_range(1000, 2000); 870 } while (timeout--); 871 872 return -ETIMEDOUT; 873 } 874 875 static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) 876 { 877 core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); 878 879 return bcm_sf2_vlan_op_wait(priv); 880 } 881 882 static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) 883 { 884 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 885 unsigned int port; 886 887 /* Clear all VLANs */ 888 bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR); 889 890 for (port = 0; port < priv->hw_params.num_ports; port++) { 891 if (!((1 << port) & ds->enabled_port_mask)) 892 continue; 893 894 core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port)); 895 } 896 } 897 898 static int bcm_sf2_sw_setup(struct dsa_switch *ds) 899 { 900 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 901 unsigned int port; 902 903 /* Enable all valid ports and disable those unused */ 904 for (port = 0; port < priv->hw_params.num_ports; port++) { 905 /* IMP port receives special treatment */ 906 if ((1 << port) & ds->enabled_port_mask) 907 bcm_sf2_port_setup(ds, port, NULL); 908 else if (dsa_is_cpu_port(ds, port)) 909 bcm_sf2_imp_setup(ds, port); 910 else 911 bcm_sf2_port_disable(ds, port, NULL); 912 } 913 914 bcm_sf2_sw_configure_vlan(ds); 915 916 return 0; 917 } 918 919 /* The SWITCH_CORE register space is managed by b53 but operates on a page + 920 * register basis so we need to translate that into an address that the 921 * bus-glue understands. 922 */ 923 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) 924 925 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, 926 u8 *val) 927 { 928 struct bcm_sf2_priv *priv = dev->priv; 929 930 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 931 932 return 0; 933 } 934 935 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, 936 u16 *val) 937 { 938 struct bcm_sf2_priv *priv = dev->priv; 939 940 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 941 942 return 0; 943 } 944 945 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, 946 u32 *val) 947 { 948 struct bcm_sf2_priv *priv = dev->priv; 949 950 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 951 952 return 0; 953 } 954 955 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, 956 u64 *val) 957 { 958 struct bcm_sf2_priv *priv = dev->priv; 959 960 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); 961 962 return 0; 963 } 964 965 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, 966 u8 value) 967 { 968 struct bcm_sf2_priv *priv = dev->priv; 969 970 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 971 972 return 0; 973 } 974 975 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, 976 u16 value) 977 { 978 struct bcm_sf2_priv *priv = dev->priv; 979 980 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 981 982 return 0; 983 } 984 985 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, 986 u32 value) 987 { 988 struct bcm_sf2_priv *priv = dev->priv; 989 990 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 991 992 return 0; 993 } 994 995 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, 996 u64 value) 997 { 998 struct bcm_sf2_priv *priv = dev->priv; 999 1000 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 1001 1002 return 0; 1003 } 1004 1005 static struct b53_io_ops bcm_sf2_io_ops = { 1006 .read8 = bcm_sf2_core_read8, 1007 .read16 = bcm_sf2_core_read16, 1008 .read32 = bcm_sf2_core_read32, 1009 .read48 = bcm_sf2_core_read64, 1010 .read64 = bcm_sf2_core_read64, 1011 .write8 = bcm_sf2_core_write8, 1012 .write16 = bcm_sf2_core_write16, 1013 .write32 = bcm_sf2_core_write32, 1014 .write48 = bcm_sf2_core_write64, 1015 .write64 = bcm_sf2_core_write64, 1016 }; 1017 1018 static const struct dsa_switch_ops bcm_sf2_ops = { 1019 .get_tag_protocol = bcm_sf2_sw_get_tag_protocol, 1020 .setup = bcm_sf2_sw_setup, 1021 .get_strings = b53_get_strings, 1022 .get_ethtool_stats = b53_get_ethtool_stats, 1023 .get_sset_count = b53_get_sset_count, 1024 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 1025 .adjust_link = bcm_sf2_sw_adjust_link, 1026 .fixed_link_update = bcm_sf2_sw_fixed_link_update, 1027 .suspend = bcm_sf2_sw_suspend, 1028 .resume = bcm_sf2_sw_resume, 1029 .get_wol = bcm_sf2_sw_get_wol, 1030 .set_wol = bcm_sf2_sw_set_wol, 1031 .port_enable = bcm_sf2_port_setup, 1032 .port_disable = bcm_sf2_port_disable, 1033 .get_eee = bcm_sf2_sw_get_eee, 1034 .set_eee = bcm_sf2_sw_set_eee, 1035 .port_bridge_join = b53_br_join, 1036 .port_bridge_leave = b53_br_leave, 1037 .port_stp_state_set = b53_br_set_stp_state, 1038 .port_fast_age = b53_br_fast_age, 1039 .port_vlan_filtering = b53_vlan_filtering, 1040 .port_vlan_prepare = b53_vlan_prepare, 1041 .port_vlan_add = b53_vlan_add, 1042 .port_vlan_del = b53_vlan_del, 1043 .port_vlan_dump = b53_vlan_dump, 1044 .port_fdb_prepare = b53_fdb_prepare, 1045 .port_fdb_dump = b53_fdb_dump, 1046 .port_fdb_add = b53_fdb_add, 1047 .port_fdb_del = b53_fdb_del, 1048 .get_rxnfc = bcm_sf2_get_rxnfc, 1049 .set_rxnfc = bcm_sf2_set_rxnfc, 1050 .port_mirror_add = b53_mirror_add, 1051 .port_mirror_del = b53_mirror_del, 1052 }; 1053 1054 struct bcm_sf2_of_data { 1055 u32 type; 1056 const u16 *reg_offsets; 1057 unsigned int core_reg_align; 1058 }; 1059 1060 /* Register offsets for the SWITCH_REG_* block */ 1061 static const u16 bcm_sf2_7445_reg_offsets[] = { 1062 [REG_SWITCH_CNTRL] = 0x00, 1063 [REG_SWITCH_STATUS] = 0x04, 1064 [REG_DIR_DATA_WRITE] = 0x08, 1065 [REG_DIR_DATA_READ] = 0x0C, 1066 [REG_SWITCH_REVISION] = 0x18, 1067 [REG_PHY_REVISION] = 0x1C, 1068 [REG_SPHY_CNTRL] = 0x2C, 1069 [REG_RGMII_0_CNTRL] = 0x34, 1070 [REG_RGMII_1_CNTRL] = 0x40, 1071 [REG_RGMII_2_CNTRL] = 0x4c, 1072 [REG_LED_0_CNTRL] = 0x90, 1073 [REG_LED_1_CNTRL] = 0x94, 1074 [REG_LED_2_CNTRL] = 0x98, 1075 }; 1076 1077 static const struct bcm_sf2_of_data bcm_sf2_7445_data = { 1078 .type = BCM7445_DEVICE_ID, 1079 .core_reg_align = 0, 1080 .reg_offsets = bcm_sf2_7445_reg_offsets, 1081 }; 1082 1083 static const u16 bcm_sf2_7278_reg_offsets[] = { 1084 [REG_SWITCH_CNTRL] = 0x00, 1085 [REG_SWITCH_STATUS] = 0x04, 1086 [REG_DIR_DATA_WRITE] = 0x08, 1087 [REG_DIR_DATA_READ] = 0x0c, 1088 [REG_SWITCH_REVISION] = 0x10, 1089 [REG_PHY_REVISION] = 0x14, 1090 [REG_SPHY_CNTRL] = 0x24, 1091 [REG_RGMII_0_CNTRL] = 0xe0, 1092 [REG_RGMII_1_CNTRL] = 0xec, 1093 [REG_RGMII_2_CNTRL] = 0xf8, 1094 [REG_LED_0_CNTRL] = 0x40, 1095 [REG_LED_1_CNTRL] = 0x4c, 1096 [REG_LED_2_CNTRL] = 0x58, 1097 }; 1098 1099 static const struct bcm_sf2_of_data bcm_sf2_7278_data = { 1100 .type = BCM7278_DEVICE_ID, 1101 .core_reg_align = 1, 1102 .reg_offsets = bcm_sf2_7278_reg_offsets, 1103 }; 1104 1105 static const struct of_device_id bcm_sf2_of_match[] = { 1106 { .compatible = "brcm,bcm7445-switch-v4.0", 1107 .data = &bcm_sf2_7445_data 1108 }, 1109 { .compatible = "brcm,bcm7278-switch-v4.0", 1110 .data = &bcm_sf2_7278_data 1111 }, 1112 { /* sentinel */ }, 1113 }; 1114 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); 1115 1116 static int bcm_sf2_sw_probe(struct platform_device *pdev) 1117 { 1118 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 1119 struct device_node *dn = pdev->dev.of_node; 1120 const struct of_device_id *of_id = NULL; 1121 const struct bcm_sf2_of_data *data; 1122 struct b53_platform_data *pdata; 1123 struct dsa_switch_ops *ops; 1124 struct bcm_sf2_priv *priv; 1125 struct b53_device *dev; 1126 struct dsa_switch *ds; 1127 void __iomem **base; 1128 struct resource *r; 1129 unsigned int i; 1130 u32 reg, rev; 1131 int ret; 1132 1133 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1134 if (!priv) 1135 return -ENOMEM; 1136 1137 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL); 1138 if (!ops) 1139 return -ENOMEM; 1140 1141 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); 1142 if (!dev) 1143 return -ENOMEM; 1144 1145 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1146 if (!pdata) 1147 return -ENOMEM; 1148 1149 of_id = of_match_node(bcm_sf2_of_match, dn); 1150 if (!of_id || !of_id->data) 1151 return -EINVAL; 1152 1153 data = of_id->data; 1154 1155 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ 1156 priv->type = data->type; 1157 priv->reg_offsets = data->reg_offsets; 1158 priv->core_reg_align = data->core_reg_align; 1159 1160 /* Auto-detection using standard registers will not work, so 1161 * provide an indication of what kind of device we are for 1162 * b53_common to work with 1163 */ 1164 pdata->chip_id = priv->type; 1165 dev->pdata = pdata; 1166 1167 priv->dev = dev; 1168 ds = dev->ds; 1169 ds->ops = &bcm_sf2_ops; 1170 1171 dev_set_drvdata(&pdev->dev, priv); 1172 1173 spin_lock_init(&priv->indir_lock); 1174 mutex_init(&priv->stats_mutex); 1175 mutex_init(&priv->cfp.lock); 1176 1177 /* CFP rule #0 cannot be used for specific classifications, flag it as 1178 * permanently used 1179 */ 1180 set_bit(0, priv->cfp.used); 1181 1182 bcm_sf2_identify_ports(priv, dn->child); 1183 1184 priv->irq0 = irq_of_parse_and_map(dn, 0); 1185 priv->irq1 = irq_of_parse_and_map(dn, 1); 1186 1187 base = &priv->core; 1188 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1189 r = platform_get_resource(pdev, IORESOURCE_MEM, i); 1190 *base = devm_ioremap_resource(&pdev->dev, r); 1191 if (IS_ERR(*base)) { 1192 pr_err("unable to find register: %s\n", reg_names[i]); 1193 return PTR_ERR(*base); 1194 } 1195 base++; 1196 } 1197 1198 ret = bcm_sf2_sw_rst(priv); 1199 if (ret) { 1200 pr_err("unable to software reset switch: %d\n", ret); 1201 return ret; 1202 } 1203 1204 ret = bcm_sf2_mdio_register(ds); 1205 if (ret) { 1206 pr_err("failed to register MDIO bus\n"); 1207 return ret; 1208 } 1209 1210 ret = bcm_sf2_cfp_rst(priv); 1211 if (ret) { 1212 pr_err("failed to reset CFP\n"); 1213 goto out_mdio; 1214 } 1215 1216 /* Disable all interrupts and request them */ 1217 bcm_sf2_intr_disable(priv); 1218 1219 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, 1220 "switch_0", priv); 1221 if (ret < 0) { 1222 pr_err("failed to request switch_0 IRQ\n"); 1223 goto out_mdio; 1224 } 1225 1226 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, 1227 "switch_1", priv); 1228 if (ret < 0) { 1229 pr_err("failed to request switch_1 IRQ\n"); 1230 goto out_mdio; 1231 } 1232 1233 /* Reset the MIB counters */ 1234 reg = core_readl(priv, CORE_GMNCFGCFG); 1235 reg |= RST_MIB_CNT; 1236 core_writel(priv, reg, CORE_GMNCFGCFG); 1237 reg &= ~RST_MIB_CNT; 1238 core_writel(priv, reg, CORE_GMNCFGCFG); 1239 1240 /* Get the maximum number of ports for this switch */ 1241 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; 1242 if (priv->hw_params.num_ports > DSA_MAX_PORTS) 1243 priv->hw_params.num_ports = DSA_MAX_PORTS; 1244 1245 /* Assume a single GPHY setup if we can't read that property */ 1246 if (of_property_read_u32(dn, "brcm,num-gphy", 1247 &priv->hw_params.num_gphy)) 1248 priv->hw_params.num_gphy = 1; 1249 1250 rev = reg_readl(priv, REG_SWITCH_REVISION); 1251 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 1252 SWITCH_TOP_REV_MASK; 1253 priv->hw_params.core_rev = (rev & SF2_REV_MASK); 1254 1255 rev = reg_readl(priv, REG_PHY_REVISION); 1256 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; 1257 1258 ret = b53_switch_register(dev); 1259 if (ret) 1260 goto out_mdio; 1261 1262 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", 1263 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, 1264 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, 1265 priv->core, priv->irq0, priv->irq1); 1266 1267 return 0; 1268 1269 out_mdio: 1270 bcm_sf2_mdio_unregister(priv); 1271 return ret; 1272 } 1273 1274 static int bcm_sf2_sw_remove(struct platform_device *pdev) 1275 { 1276 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1277 1278 /* Disable all ports and interrupts */ 1279 priv->wol_ports_mask = 0; 1280 bcm_sf2_sw_suspend(priv->dev->ds); 1281 dsa_unregister_switch(priv->dev->ds); 1282 bcm_sf2_mdio_unregister(priv); 1283 1284 return 0; 1285 } 1286 1287 static void bcm_sf2_sw_shutdown(struct platform_device *pdev) 1288 { 1289 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1290 1291 /* For a kernel about to be kexec'd we want to keep the GPHY on for a 1292 * successful MDIO bus scan to occur. If we did turn off the GPHY 1293 * before (e.g: port_disable), this will also power it back on. 1294 * 1295 * Do not rely on kexec_in_progress, just power the PHY on. 1296 */ 1297 if (priv->hw_params.num_gphy == 1) 1298 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1299 } 1300 1301 #ifdef CONFIG_PM_SLEEP 1302 static int bcm_sf2_suspend(struct device *dev) 1303 { 1304 struct platform_device *pdev = to_platform_device(dev); 1305 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1306 1307 return dsa_switch_suspend(priv->dev->ds); 1308 } 1309 1310 static int bcm_sf2_resume(struct device *dev) 1311 { 1312 struct platform_device *pdev = to_platform_device(dev); 1313 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1314 1315 return dsa_switch_resume(priv->dev->ds); 1316 } 1317 #endif /* CONFIG_PM_SLEEP */ 1318 1319 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, 1320 bcm_sf2_suspend, bcm_sf2_resume); 1321 1322 1323 static struct platform_driver bcm_sf2_driver = { 1324 .probe = bcm_sf2_sw_probe, 1325 .remove = bcm_sf2_sw_remove, 1326 .shutdown = bcm_sf2_sw_shutdown, 1327 .driver = { 1328 .name = "brcm-sf2", 1329 .of_match_table = bcm_sf2_of_match, 1330 .pm = &bcm_sf2_pm_ops, 1331 }, 1332 }; 1333 module_platform_driver(bcm_sf2_driver); 1334 1335 MODULE_AUTHOR("Broadcom Corporation"); 1336 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); 1337 MODULE_LICENSE("GPL"); 1338 MODULE_ALIAS("platform:brcm-sf2"); 1339