1 /* 2 * Broadcom Starfighter 2 DSA switch driver 3 * 4 * Copyright (C) 2014, Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/platform_device.h> 17 #include <linux/of.h> 18 #include <linux/phy.h> 19 #include <linux/phy_fixed.h> 20 #include <linux/mii.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 #include <linux/of_net.h> 25 #include <linux/of_mdio.h> 26 #include <net/dsa.h> 27 #include <linux/ethtool.h> 28 #include <linux/if_bridge.h> 29 #include <linux/brcmphy.h> 30 #include <linux/etherdevice.h> 31 #include <linux/platform_data/b53.h> 32 33 #include "bcm_sf2.h" 34 #include "bcm_sf2_regs.h" 35 #include "b53/b53_priv.h" 36 #include "b53/b53_regs.h" 37 38 static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds) 39 { 40 return DSA_TAG_PROTO_BRCM; 41 } 42 43 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) 44 { 45 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 46 unsigned int i; 47 u32 reg; 48 49 /* Enable the IMP Port to be in the same VLAN as the other ports 50 * on a per-port basis such that we only have Port i and IMP in 51 * the same VLAN. 52 */ 53 for (i = 0; i < priv->hw_params.num_ports; i++) { 54 if (!((1 << i) & ds->enabled_port_mask)) 55 continue; 56 57 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); 58 reg |= (1 << cpu_port); 59 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); 60 } 61 } 62 63 static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port) 64 { 65 u32 reg, val; 66 67 /* Resolve which bit controls the Broadcom tag */ 68 switch (port) { 69 case 8: 70 val = BRCM_HDR_EN_P8; 71 break; 72 case 7: 73 val = BRCM_HDR_EN_P7; 74 break; 75 case 5: 76 val = BRCM_HDR_EN_P5; 77 break; 78 default: 79 val = 0; 80 break; 81 } 82 83 /* Enable Broadcom tags for IMP port */ 84 reg = core_readl(priv, CORE_BRCM_HDR_CTRL); 85 reg |= val; 86 core_writel(priv, reg, CORE_BRCM_HDR_CTRL); 87 88 /* Enable reception Broadcom tag for CPU TX (switch RX) to 89 * allow us to tag outgoing frames 90 */ 91 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS); 92 reg &= ~(1 << port); 93 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS); 94 95 /* Enable transmission of Broadcom tags from the switch (CPU RX) to 96 * allow delivering frames to the per-port net_devices 97 */ 98 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); 99 reg &= ~(1 << port); 100 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); 101 } 102 103 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) 104 { 105 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 106 unsigned int i; 107 u32 reg, offset; 108 109 if (priv->type == BCM7445_DEVICE_ID) 110 offset = CORE_STS_OVERRIDE_IMP; 111 else 112 offset = CORE_STS_OVERRIDE_IMP2; 113 114 /* Enable the port memories */ 115 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 116 reg &= ~P_TXQ_PSM_VDD(port); 117 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 118 119 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ 120 reg = core_readl(priv, CORE_IMP_CTL); 121 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); 122 reg &= ~(RX_DIS | TX_DIS); 123 core_writel(priv, reg, CORE_IMP_CTL); 124 125 /* Enable forwarding */ 126 core_writel(priv, SW_FWDG_EN, CORE_SWMODE); 127 128 /* Enable IMP port in dumb mode */ 129 reg = core_readl(priv, CORE_SWITCH_CTRL); 130 reg |= MII_DUMB_FWDG_EN; 131 core_writel(priv, reg, CORE_SWITCH_CTRL); 132 133 /* Configure Traffic Class to QoS mapping, allow each priority to map 134 * to a different queue number 135 */ 136 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 137 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 138 reg |= i << (PRT_TO_QID_SHIFT * i); 139 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 140 141 bcm_sf2_brcm_hdr_setup(priv, port); 142 143 /* Force link status for IMP port */ 144 reg = core_readl(priv, offset); 145 reg |= (MII_SW_OR | LINK_STS); 146 core_writel(priv, reg, offset); 147 } 148 149 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) 150 { 151 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 152 u32 reg; 153 154 reg = core_readl(priv, CORE_EEE_EN_CTRL); 155 if (enable) 156 reg |= 1 << port; 157 else 158 reg &= ~(1 << port); 159 core_writel(priv, reg, CORE_EEE_EN_CTRL); 160 } 161 162 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) 163 { 164 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 165 u32 reg; 166 167 reg = reg_readl(priv, REG_SPHY_CNTRL); 168 if (enable) { 169 reg |= PHY_RESET; 170 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); 171 reg_writel(priv, reg, REG_SPHY_CNTRL); 172 udelay(21); 173 reg = reg_readl(priv, REG_SPHY_CNTRL); 174 reg &= ~PHY_RESET; 175 } else { 176 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; 177 reg_writel(priv, reg, REG_SPHY_CNTRL); 178 mdelay(1); 179 reg |= CK25_DIS; 180 } 181 reg_writel(priv, reg, REG_SPHY_CNTRL); 182 183 /* Use PHY-driven LED signaling */ 184 if (!enable) { 185 reg = reg_readl(priv, REG_LED_CNTRL(0)); 186 reg |= SPDLNK_SRC_SEL; 187 reg_writel(priv, reg, REG_LED_CNTRL(0)); 188 } 189 } 190 191 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, 192 int port) 193 { 194 unsigned int off; 195 196 switch (port) { 197 case 7: 198 off = P7_IRQ_OFF; 199 break; 200 case 0: 201 /* Port 0 interrupts are located on the first bank */ 202 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); 203 return; 204 default: 205 off = P_IRQ_OFF(port); 206 break; 207 } 208 209 intrl2_1_mask_clear(priv, P_IRQ_MASK(off)); 210 } 211 212 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, 213 int port) 214 { 215 unsigned int off; 216 217 switch (port) { 218 case 7: 219 off = P7_IRQ_OFF; 220 break; 221 case 0: 222 /* Port 0 interrupts are located on the first bank */ 223 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF)); 224 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); 225 return; 226 default: 227 off = P_IRQ_OFF(port); 228 break; 229 } 230 231 intrl2_1_mask_set(priv, P_IRQ_MASK(off)); 232 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR); 233 } 234 235 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, 236 struct phy_device *phy) 237 { 238 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 239 s8 cpu_port = ds->dst->cpu_dp->index; 240 unsigned int i; 241 u32 reg; 242 243 /* Clear the memory power down */ 244 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 245 reg &= ~P_TXQ_PSM_VDD(port); 246 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 247 248 /* Enable Broadcom tags for that port if requested */ 249 if (priv->brcm_tag_mask & BIT(port)) 250 bcm_sf2_brcm_hdr_setup(priv, port); 251 252 /* Configure Traffic Class to QoS mapping, allow each priority to map 253 * to a different queue number 254 */ 255 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 256 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 257 reg |= i << (PRT_TO_QID_SHIFT * i); 258 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 259 260 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 261 core_writel(priv, 0, CORE_G_PCTL_PORT(port)); 262 263 /* Re-enable the GPHY and re-apply workarounds */ 264 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { 265 bcm_sf2_gphy_enable_set(ds, true); 266 if (phy) { 267 /* if phy_stop() has been called before, phy 268 * will be in halted state, and phy_start() 269 * will call resume. 270 * 271 * the resume path does not configure back 272 * autoneg settings, and since we hard reset 273 * the phy manually here, we need to reset the 274 * state machine also. 275 */ 276 phy->state = PHY_READY; 277 phy_init_hw(phy); 278 } 279 } 280 281 /* Enable MoCA port interrupts to get notified */ 282 if (port == priv->moca_port) 283 bcm_sf2_port_intr_enable(priv, port); 284 285 /* Set this port, and only this one to be in the default VLAN, 286 * if member of a bridge, restore its membership prior to 287 * bringing down this port. 288 */ 289 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); 290 reg &= ~PORT_VLAN_CTRL_MASK; 291 reg |= (1 << port); 292 reg |= priv->dev->ports[port].vlan_ctl_mask; 293 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); 294 295 bcm_sf2_imp_vlan_setup(ds, cpu_port); 296 297 /* If EEE was enabled, restore it */ 298 if (priv->port_sts[port].eee.eee_enabled) 299 bcm_sf2_eee_enable_set(ds, port, true); 300 301 return 0; 302 } 303 304 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, 305 struct phy_device *phy) 306 { 307 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 308 u32 off, reg; 309 310 if (priv->wol_ports_mask & (1 << port)) 311 return; 312 313 if (port == priv->moca_port) 314 bcm_sf2_port_intr_disable(priv, port); 315 316 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) 317 bcm_sf2_gphy_enable_set(ds, false); 318 319 if (dsa_is_cpu_port(ds, port)) 320 off = CORE_IMP_CTL; 321 else 322 off = CORE_G_PCTL_PORT(port); 323 324 reg = core_readl(priv, off); 325 reg |= RX_DIS | TX_DIS; 326 core_writel(priv, reg, off); 327 328 /* Power down the port memory */ 329 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 330 reg |= P_TXQ_PSM_VDD(port); 331 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 332 } 333 334 /* Returns 0 if EEE was not enabled, or 1 otherwise 335 */ 336 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, 337 struct phy_device *phy) 338 { 339 int ret; 340 341 ret = phy_init_eee(phy, 0); 342 if (ret) 343 return 0; 344 345 bcm_sf2_eee_enable_set(ds, port, true); 346 347 return 1; 348 } 349 350 static int bcm_sf2_sw_get_mac_eee(struct dsa_switch *ds, int port, 351 struct ethtool_eee *e) 352 { 353 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 354 struct ethtool_eee *p = &priv->port_sts[port].eee; 355 u32 reg; 356 357 reg = core_readl(priv, CORE_EEE_LPI_INDICATE); 358 e->eee_enabled = p->eee_enabled; 359 e->eee_active = !!(reg & (1 << port)); 360 361 return 0; 362 } 363 364 static int bcm_sf2_sw_set_mac_eee(struct dsa_switch *ds, int port, 365 struct ethtool_eee *e) 366 { 367 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 368 struct ethtool_eee *p = &priv->port_sts[port].eee; 369 370 p->eee_enabled = e->eee_enabled; 371 bcm_sf2_eee_enable_set(ds, port, e->eee_enabled); 372 373 return 0; 374 } 375 376 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, 377 int regnum, u16 val) 378 { 379 int ret = 0; 380 u32 reg; 381 382 reg = reg_readl(priv, REG_SWITCH_CNTRL); 383 reg |= MDIO_MASTER_SEL; 384 reg_writel(priv, reg, REG_SWITCH_CNTRL); 385 386 /* Page << 8 | offset */ 387 reg = 0x70; 388 reg <<= 2; 389 core_writel(priv, addr, reg); 390 391 /* Page << 8 | offset */ 392 reg = 0x80 << 8 | regnum << 1; 393 reg <<= 2; 394 395 if (op) 396 ret = core_readl(priv, reg); 397 else 398 core_writel(priv, val, reg); 399 400 reg = reg_readl(priv, REG_SWITCH_CNTRL); 401 reg &= ~MDIO_MASTER_SEL; 402 reg_writel(priv, reg, REG_SWITCH_CNTRL); 403 404 return ret & 0xffff; 405 } 406 407 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) 408 { 409 struct bcm_sf2_priv *priv = bus->priv; 410 411 /* Intercept reads from Broadcom pseudo-PHY address, else, send 412 * them to our master MDIO bus controller 413 */ 414 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 415 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); 416 else 417 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum); 418 } 419 420 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, 421 u16 val) 422 { 423 struct bcm_sf2_priv *priv = bus->priv; 424 425 /* Intercept writes to the Broadcom pseudo-PHY address, else, 426 * send them to our master MDIO bus controller 427 */ 428 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 429 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); 430 else 431 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val); 432 433 return 0; 434 } 435 436 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 437 { 438 struct bcm_sf2_priv *priv = dev_id; 439 440 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 441 ~priv->irq0_mask; 442 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 443 444 return IRQ_HANDLED; 445 } 446 447 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) 448 { 449 struct bcm_sf2_priv *priv = dev_id; 450 451 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 452 ~priv->irq1_mask; 453 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 454 455 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) 456 priv->port_sts[7].link = 1; 457 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) 458 priv->port_sts[7].link = 0; 459 460 return IRQ_HANDLED; 461 } 462 463 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) 464 { 465 unsigned int timeout = 1000; 466 u32 reg; 467 468 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 469 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; 470 core_writel(priv, reg, CORE_WATCHDOG_CTRL); 471 472 do { 473 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 474 if (!(reg & SOFTWARE_RESET)) 475 break; 476 477 usleep_range(1000, 2000); 478 } while (timeout-- > 0); 479 480 if (timeout == 0) 481 return -ETIMEDOUT; 482 483 return 0; 484 } 485 486 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) 487 { 488 intrl2_0_mask_set(priv, 0xffffffff); 489 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 490 intrl2_1_mask_set(priv, 0xffffffff); 491 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 492 } 493 494 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, 495 struct device_node *dn) 496 { 497 struct device_node *port; 498 int mode; 499 unsigned int port_num; 500 501 priv->moca_port = -1; 502 503 for_each_available_child_of_node(dn, port) { 504 if (of_property_read_u32(port, "reg", &port_num)) 505 continue; 506 507 /* Internal PHYs get assigned a specific 'phy-mode' property 508 * value: "internal" to help flag them before MDIO probing 509 * has completed, since they might be turned off at that 510 * time 511 */ 512 mode = of_get_phy_mode(port); 513 if (mode < 0) 514 continue; 515 516 if (mode == PHY_INTERFACE_MODE_INTERNAL) 517 priv->int_phy_mask |= 1 << port_num; 518 519 if (mode == PHY_INTERFACE_MODE_MOCA) 520 priv->moca_port = port_num; 521 522 if (of_property_read_bool(port, "brcm,use-bcm-hdr")) 523 priv->brcm_tag_mask |= 1 << port_num; 524 } 525 } 526 527 static int bcm_sf2_mdio_register(struct dsa_switch *ds) 528 { 529 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 530 struct device_node *dn; 531 static int index; 532 int err; 533 534 /* Find our integrated MDIO bus node */ 535 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); 536 priv->master_mii_bus = of_mdio_find_bus(dn); 537 if (!priv->master_mii_bus) 538 return -EPROBE_DEFER; 539 540 get_device(&priv->master_mii_bus->dev); 541 priv->master_mii_dn = dn; 542 543 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); 544 if (!priv->slave_mii_bus) 545 return -ENOMEM; 546 547 priv->slave_mii_bus->priv = priv; 548 priv->slave_mii_bus->name = "sf2 slave mii"; 549 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; 550 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; 551 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", 552 index++); 553 priv->slave_mii_bus->dev.of_node = dn; 554 555 /* Include the pseudo-PHY address to divert reads towards our 556 * workaround. This is only required for 7445D0, since 7445E0 557 * disconnects the internal switch pseudo-PHY such that we can use the 558 * regular SWITCH_MDIO master controller instead. 559 * 560 * Here we flag the pseudo PHY as needing special treatment and would 561 * otherwise make all other PHY read/writes go to the master MDIO bus 562 * controller that comes with this switch backed by the "mdio-unimac" 563 * driver. 564 */ 565 if (of_machine_is_compatible("brcm,bcm7445d0")) 566 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR); 567 else 568 priv->indir_phy_mask = 0; 569 570 ds->phys_mii_mask = priv->indir_phy_mask; 571 ds->slave_mii_bus = priv->slave_mii_bus; 572 priv->slave_mii_bus->parent = ds->dev->parent; 573 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; 574 575 if (dn) 576 err = of_mdiobus_register(priv->slave_mii_bus, dn); 577 else 578 err = mdiobus_register(priv->slave_mii_bus); 579 580 if (err) 581 of_node_put(dn); 582 583 return err; 584 } 585 586 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) 587 { 588 mdiobus_unregister(priv->slave_mii_bus); 589 if (priv->master_mii_dn) 590 of_node_put(priv->master_mii_dn); 591 } 592 593 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) 594 { 595 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 596 597 /* The BCM7xxx PHY driver expects to find the integrated PHY revision 598 * in bits 15:8 and the patch level in bits 7:0 which is exactly what 599 * the REG_PHY_REVISION register layout is. 600 */ 601 602 return priv->hw_params.gphy_rev; 603 } 604 605 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, 606 struct phy_device *phydev) 607 { 608 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 609 struct ethtool_eee *p = &priv->port_sts[port].eee; 610 u32 id_mode_dis = 0, port_mode; 611 const char *str = NULL; 612 u32 reg, offset; 613 614 if (priv->type == BCM7445_DEVICE_ID) 615 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 616 else 617 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 618 619 switch (phydev->interface) { 620 case PHY_INTERFACE_MODE_RGMII: 621 str = "RGMII (no delay)"; 622 id_mode_dis = 1; 623 case PHY_INTERFACE_MODE_RGMII_TXID: 624 if (!str) 625 str = "RGMII (TX delay)"; 626 port_mode = EXT_GPHY; 627 break; 628 case PHY_INTERFACE_MODE_MII: 629 str = "MII"; 630 port_mode = EXT_EPHY; 631 break; 632 case PHY_INTERFACE_MODE_REVMII: 633 str = "Reverse MII"; 634 port_mode = EXT_REVMII; 635 break; 636 default: 637 /* All other PHYs: internal and MoCA */ 638 goto force_link; 639 } 640 641 /* If the link is down, just disable the interface to conserve power */ 642 if (!phydev->link) { 643 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 644 reg &= ~RGMII_MODE_EN; 645 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 646 goto force_link; 647 } 648 649 /* Clear id_mode_dis bit, and the existing port mode, but 650 * make sure we enable the RGMII block for data to pass 651 */ 652 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 653 reg &= ~ID_MODE_DIS; 654 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); 655 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); 656 657 reg |= port_mode | RGMII_MODE_EN; 658 if (id_mode_dis) 659 reg |= ID_MODE_DIS; 660 661 if (phydev->pause) { 662 if (phydev->asym_pause) 663 reg |= TX_PAUSE_EN; 664 reg |= RX_PAUSE_EN; 665 } 666 667 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 668 669 pr_info("Port %d configured for %s\n", port, str); 670 671 force_link: 672 /* Force link settings detected from the PHY */ 673 reg = SW_OVERRIDE; 674 switch (phydev->speed) { 675 case SPEED_1000: 676 reg |= SPDSTS_1000 << SPEED_SHIFT; 677 break; 678 case SPEED_100: 679 reg |= SPDSTS_100 << SPEED_SHIFT; 680 break; 681 } 682 683 if (phydev->link) 684 reg |= LINK_STS; 685 if (phydev->duplex == DUPLEX_FULL) 686 reg |= DUPLX_MODE; 687 688 core_writel(priv, reg, offset); 689 690 if (!phydev->is_pseudo_fixed_link) 691 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); 692 } 693 694 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, 695 struct fixed_phy_status *status) 696 { 697 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 698 u32 duplex, pause, offset; 699 u32 reg; 700 701 if (priv->type == BCM7445_DEVICE_ID) 702 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 703 else 704 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 705 706 duplex = core_readl(priv, CORE_DUPSTS); 707 pause = core_readl(priv, CORE_PAUSESTS); 708 709 status->link = 0; 710 711 /* MoCA port is special as we do not get link status from CORE_LNKSTS, 712 * which means that we need to force the link at the port override 713 * level to get the data to flow. We do use what the interrupt handler 714 * did determine before. 715 * 716 * For the other ports, we just force the link status, since this is 717 * a fixed PHY device. 718 */ 719 if (port == priv->moca_port) { 720 status->link = priv->port_sts[port].link; 721 /* For MoCA interfaces, also force a link down notification 722 * since some version of the user-space daemon (mocad) use 723 * cmd->autoneg to force the link, which messes up the PHY 724 * state machine and make it go in PHY_FORCING state instead. 725 */ 726 if (!status->link) 727 netif_carrier_off(ds->ports[port].netdev); 728 status->duplex = 1; 729 } else { 730 status->link = 1; 731 status->duplex = !!(duplex & (1 << port)); 732 } 733 734 reg = core_readl(priv, offset); 735 reg |= SW_OVERRIDE; 736 if (status->link) 737 reg |= LINK_STS; 738 else 739 reg &= ~LINK_STS; 740 core_writel(priv, reg, offset); 741 742 if ((pause & (1 << port)) && 743 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { 744 status->asym_pause = 1; 745 status->pause = 1; 746 } 747 748 if (pause & (1 << port)) 749 status->pause = 1; 750 } 751 752 static int bcm_sf2_sw_suspend(struct dsa_switch *ds) 753 { 754 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 755 unsigned int port; 756 757 bcm_sf2_intr_disable(priv); 758 759 /* Disable all ports physically present including the IMP 760 * port, the other ones have already been disabled during 761 * bcm_sf2_sw_setup 762 */ 763 for (port = 0; port < DSA_MAX_PORTS; port++) { 764 if ((1 << port) & ds->enabled_port_mask || 765 dsa_is_cpu_port(ds, port)) 766 bcm_sf2_port_disable(ds, port, NULL); 767 } 768 769 return 0; 770 } 771 772 static int bcm_sf2_sw_resume(struct dsa_switch *ds) 773 { 774 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 775 unsigned int port; 776 int ret; 777 778 ret = bcm_sf2_sw_rst(priv); 779 if (ret) { 780 pr_err("%s: failed to software reset switch\n", __func__); 781 return ret; 782 } 783 784 if (priv->hw_params.num_gphy == 1) 785 bcm_sf2_gphy_enable_set(ds, true); 786 787 for (port = 0; port < DSA_MAX_PORTS; port++) { 788 if ((1 << port) & ds->enabled_port_mask) 789 bcm_sf2_port_setup(ds, port, NULL); 790 else if (dsa_is_cpu_port(ds, port)) 791 bcm_sf2_imp_setup(ds, port); 792 } 793 794 return 0; 795 } 796 797 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, 798 struct ethtool_wolinfo *wol) 799 { 800 struct net_device *p = ds->dst->cpu_dp->netdev; 801 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 802 struct ethtool_wolinfo pwol; 803 804 /* Get the parent device WoL settings */ 805 p->ethtool_ops->get_wol(p, &pwol); 806 807 /* Advertise the parent device supported settings */ 808 wol->supported = pwol.supported; 809 memset(&wol->sopass, 0, sizeof(wol->sopass)); 810 811 if (pwol.wolopts & WAKE_MAGICSECURE) 812 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); 813 814 if (priv->wol_ports_mask & (1 << port)) 815 wol->wolopts = pwol.wolopts; 816 else 817 wol->wolopts = 0; 818 } 819 820 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, 821 struct ethtool_wolinfo *wol) 822 { 823 struct net_device *p = ds->dst->cpu_dp->netdev; 824 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 825 s8 cpu_port = ds->dst->cpu_dp->index; 826 struct ethtool_wolinfo pwol; 827 828 p->ethtool_ops->get_wol(p, &pwol); 829 if (wol->wolopts & ~pwol.supported) 830 return -EINVAL; 831 832 if (wol->wolopts) 833 priv->wol_ports_mask |= (1 << port); 834 else 835 priv->wol_ports_mask &= ~(1 << port); 836 837 /* If we have at least one port enabled, make sure the CPU port 838 * is also enabled. If the CPU port is the last one enabled, we disable 839 * it since this configuration does not make sense. 840 */ 841 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) 842 priv->wol_ports_mask |= (1 << cpu_port); 843 else 844 priv->wol_ports_mask &= ~(1 << cpu_port); 845 846 return p->ethtool_ops->set_wol(p, wol); 847 } 848 849 static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) 850 { 851 unsigned int timeout = 10; 852 u32 reg; 853 854 do { 855 reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); 856 if (!(reg & ARLA_VTBL_STDN)) 857 return 0; 858 859 usleep_range(1000, 2000); 860 } while (timeout--); 861 862 return -ETIMEDOUT; 863 } 864 865 static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) 866 { 867 core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); 868 869 return bcm_sf2_vlan_op_wait(priv); 870 } 871 872 static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) 873 { 874 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 875 unsigned int port; 876 877 /* Clear all VLANs */ 878 bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR); 879 880 for (port = 0; port < priv->hw_params.num_ports; port++) { 881 if (!((1 << port) & ds->enabled_port_mask)) 882 continue; 883 884 core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port)); 885 } 886 } 887 888 static int bcm_sf2_sw_setup(struct dsa_switch *ds) 889 { 890 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 891 unsigned int port; 892 893 /* Enable all valid ports and disable those unused */ 894 for (port = 0; port < priv->hw_params.num_ports; port++) { 895 /* IMP port receives special treatment */ 896 if ((1 << port) & ds->enabled_port_mask) 897 bcm_sf2_port_setup(ds, port, NULL); 898 else if (dsa_is_cpu_port(ds, port)) 899 bcm_sf2_imp_setup(ds, port); 900 else 901 bcm_sf2_port_disable(ds, port, NULL); 902 } 903 904 bcm_sf2_sw_configure_vlan(ds); 905 906 return 0; 907 } 908 909 /* The SWITCH_CORE register space is managed by b53 but operates on a page + 910 * register basis so we need to translate that into an address that the 911 * bus-glue understands. 912 */ 913 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) 914 915 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, 916 u8 *val) 917 { 918 struct bcm_sf2_priv *priv = dev->priv; 919 920 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 921 922 return 0; 923 } 924 925 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, 926 u16 *val) 927 { 928 struct bcm_sf2_priv *priv = dev->priv; 929 930 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 931 932 return 0; 933 } 934 935 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, 936 u32 *val) 937 { 938 struct bcm_sf2_priv *priv = dev->priv; 939 940 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 941 942 return 0; 943 } 944 945 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, 946 u64 *val) 947 { 948 struct bcm_sf2_priv *priv = dev->priv; 949 950 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); 951 952 return 0; 953 } 954 955 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, 956 u8 value) 957 { 958 struct bcm_sf2_priv *priv = dev->priv; 959 960 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 961 962 return 0; 963 } 964 965 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, 966 u16 value) 967 { 968 struct bcm_sf2_priv *priv = dev->priv; 969 970 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 971 972 return 0; 973 } 974 975 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, 976 u32 value) 977 { 978 struct bcm_sf2_priv *priv = dev->priv; 979 980 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 981 982 return 0; 983 } 984 985 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, 986 u64 value) 987 { 988 struct bcm_sf2_priv *priv = dev->priv; 989 990 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 991 992 return 0; 993 } 994 995 static const struct b53_io_ops bcm_sf2_io_ops = { 996 .read8 = bcm_sf2_core_read8, 997 .read16 = bcm_sf2_core_read16, 998 .read32 = bcm_sf2_core_read32, 999 .read48 = bcm_sf2_core_read64, 1000 .read64 = bcm_sf2_core_read64, 1001 .write8 = bcm_sf2_core_write8, 1002 .write16 = bcm_sf2_core_write16, 1003 .write32 = bcm_sf2_core_write32, 1004 .write48 = bcm_sf2_core_write64, 1005 .write64 = bcm_sf2_core_write64, 1006 }; 1007 1008 static const struct dsa_switch_ops bcm_sf2_ops = { 1009 .get_tag_protocol = bcm_sf2_sw_get_tag_protocol, 1010 .setup = bcm_sf2_sw_setup, 1011 .get_strings = b53_get_strings, 1012 .get_ethtool_stats = b53_get_ethtool_stats, 1013 .get_sset_count = b53_get_sset_count, 1014 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 1015 .adjust_link = bcm_sf2_sw_adjust_link, 1016 .fixed_link_update = bcm_sf2_sw_fixed_link_update, 1017 .suspend = bcm_sf2_sw_suspend, 1018 .resume = bcm_sf2_sw_resume, 1019 .get_wol = bcm_sf2_sw_get_wol, 1020 .set_wol = bcm_sf2_sw_set_wol, 1021 .port_enable = bcm_sf2_port_setup, 1022 .port_disable = bcm_sf2_port_disable, 1023 .get_mac_eee = bcm_sf2_sw_get_mac_eee, 1024 .set_mac_eee = bcm_sf2_sw_set_mac_eee, 1025 .port_bridge_join = b53_br_join, 1026 .port_bridge_leave = b53_br_leave, 1027 .port_stp_state_set = b53_br_set_stp_state, 1028 .port_fast_age = b53_br_fast_age, 1029 .port_vlan_filtering = b53_vlan_filtering, 1030 .port_vlan_prepare = b53_vlan_prepare, 1031 .port_vlan_add = b53_vlan_add, 1032 .port_vlan_del = b53_vlan_del, 1033 .port_fdb_dump = b53_fdb_dump, 1034 .port_fdb_add = b53_fdb_add, 1035 .port_fdb_del = b53_fdb_del, 1036 .get_rxnfc = bcm_sf2_get_rxnfc, 1037 .set_rxnfc = bcm_sf2_set_rxnfc, 1038 .port_mirror_add = b53_mirror_add, 1039 .port_mirror_del = b53_mirror_del, 1040 }; 1041 1042 struct bcm_sf2_of_data { 1043 u32 type; 1044 const u16 *reg_offsets; 1045 unsigned int core_reg_align; 1046 unsigned int num_cfp_rules; 1047 }; 1048 1049 /* Register offsets for the SWITCH_REG_* block */ 1050 static const u16 bcm_sf2_7445_reg_offsets[] = { 1051 [REG_SWITCH_CNTRL] = 0x00, 1052 [REG_SWITCH_STATUS] = 0x04, 1053 [REG_DIR_DATA_WRITE] = 0x08, 1054 [REG_DIR_DATA_READ] = 0x0C, 1055 [REG_SWITCH_REVISION] = 0x18, 1056 [REG_PHY_REVISION] = 0x1C, 1057 [REG_SPHY_CNTRL] = 0x2C, 1058 [REG_RGMII_0_CNTRL] = 0x34, 1059 [REG_RGMII_1_CNTRL] = 0x40, 1060 [REG_RGMII_2_CNTRL] = 0x4c, 1061 [REG_LED_0_CNTRL] = 0x90, 1062 [REG_LED_1_CNTRL] = 0x94, 1063 [REG_LED_2_CNTRL] = 0x98, 1064 }; 1065 1066 static const struct bcm_sf2_of_data bcm_sf2_7445_data = { 1067 .type = BCM7445_DEVICE_ID, 1068 .core_reg_align = 0, 1069 .reg_offsets = bcm_sf2_7445_reg_offsets, 1070 .num_cfp_rules = 256, 1071 }; 1072 1073 static const u16 bcm_sf2_7278_reg_offsets[] = { 1074 [REG_SWITCH_CNTRL] = 0x00, 1075 [REG_SWITCH_STATUS] = 0x04, 1076 [REG_DIR_DATA_WRITE] = 0x08, 1077 [REG_DIR_DATA_READ] = 0x0c, 1078 [REG_SWITCH_REVISION] = 0x10, 1079 [REG_PHY_REVISION] = 0x14, 1080 [REG_SPHY_CNTRL] = 0x24, 1081 [REG_RGMII_0_CNTRL] = 0xe0, 1082 [REG_RGMII_1_CNTRL] = 0xec, 1083 [REG_RGMII_2_CNTRL] = 0xf8, 1084 [REG_LED_0_CNTRL] = 0x40, 1085 [REG_LED_1_CNTRL] = 0x4c, 1086 [REG_LED_2_CNTRL] = 0x58, 1087 }; 1088 1089 static const struct bcm_sf2_of_data bcm_sf2_7278_data = { 1090 .type = BCM7278_DEVICE_ID, 1091 .core_reg_align = 1, 1092 .reg_offsets = bcm_sf2_7278_reg_offsets, 1093 .num_cfp_rules = 128, 1094 }; 1095 1096 static const struct of_device_id bcm_sf2_of_match[] = { 1097 { .compatible = "brcm,bcm7445-switch-v4.0", 1098 .data = &bcm_sf2_7445_data 1099 }, 1100 { .compatible = "brcm,bcm7278-switch-v4.0", 1101 .data = &bcm_sf2_7278_data 1102 }, 1103 { /* sentinel */ }, 1104 }; 1105 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); 1106 1107 static int bcm_sf2_sw_probe(struct platform_device *pdev) 1108 { 1109 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 1110 struct device_node *dn = pdev->dev.of_node; 1111 const struct of_device_id *of_id = NULL; 1112 const struct bcm_sf2_of_data *data; 1113 struct b53_platform_data *pdata; 1114 struct dsa_switch_ops *ops; 1115 struct bcm_sf2_priv *priv; 1116 struct b53_device *dev; 1117 struct dsa_switch *ds; 1118 void __iomem **base; 1119 struct resource *r; 1120 unsigned int i; 1121 u32 reg, rev; 1122 int ret; 1123 1124 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1125 if (!priv) 1126 return -ENOMEM; 1127 1128 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL); 1129 if (!ops) 1130 return -ENOMEM; 1131 1132 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); 1133 if (!dev) 1134 return -ENOMEM; 1135 1136 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1137 if (!pdata) 1138 return -ENOMEM; 1139 1140 of_id = of_match_node(bcm_sf2_of_match, dn); 1141 if (!of_id || !of_id->data) 1142 return -EINVAL; 1143 1144 data = of_id->data; 1145 1146 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ 1147 priv->type = data->type; 1148 priv->reg_offsets = data->reg_offsets; 1149 priv->core_reg_align = data->core_reg_align; 1150 priv->num_cfp_rules = data->num_cfp_rules; 1151 1152 /* Auto-detection using standard registers will not work, so 1153 * provide an indication of what kind of device we are for 1154 * b53_common to work with 1155 */ 1156 pdata->chip_id = priv->type; 1157 dev->pdata = pdata; 1158 1159 priv->dev = dev; 1160 ds = dev->ds; 1161 ds->ops = &bcm_sf2_ops; 1162 1163 /* Advertise the 8 egress queues */ 1164 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES; 1165 1166 dev_set_drvdata(&pdev->dev, priv); 1167 1168 spin_lock_init(&priv->indir_lock); 1169 mutex_init(&priv->stats_mutex); 1170 mutex_init(&priv->cfp.lock); 1171 1172 /* CFP rule #0 cannot be used for specific classifications, flag it as 1173 * permanently used 1174 */ 1175 set_bit(0, priv->cfp.used); 1176 1177 bcm_sf2_identify_ports(priv, dn->child); 1178 1179 priv->irq0 = irq_of_parse_and_map(dn, 0); 1180 priv->irq1 = irq_of_parse_and_map(dn, 1); 1181 1182 base = &priv->core; 1183 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1184 r = platform_get_resource(pdev, IORESOURCE_MEM, i); 1185 *base = devm_ioremap_resource(&pdev->dev, r); 1186 if (IS_ERR(*base)) { 1187 pr_err("unable to find register: %s\n", reg_names[i]); 1188 return PTR_ERR(*base); 1189 } 1190 base++; 1191 } 1192 1193 ret = bcm_sf2_sw_rst(priv); 1194 if (ret) { 1195 pr_err("unable to software reset switch: %d\n", ret); 1196 return ret; 1197 } 1198 1199 ret = bcm_sf2_mdio_register(ds); 1200 if (ret) { 1201 pr_err("failed to register MDIO bus\n"); 1202 return ret; 1203 } 1204 1205 ret = bcm_sf2_cfp_rst(priv); 1206 if (ret) { 1207 pr_err("failed to reset CFP\n"); 1208 goto out_mdio; 1209 } 1210 1211 /* Disable all interrupts and request them */ 1212 bcm_sf2_intr_disable(priv); 1213 1214 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, 1215 "switch_0", priv); 1216 if (ret < 0) { 1217 pr_err("failed to request switch_0 IRQ\n"); 1218 goto out_mdio; 1219 } 1220 1221 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, 1222 "switch_1", priv); 1223 if (ret < 0) { 1224 pr_err("failed to request switch_1 IRQ\n"); 1225 goto out_mdio; 1226 } 1227 1228 /* Reset the MIB counters */ 1229 reg = core_readl(priv, CORE_GMNCFGCFG); 1230 reg |= RST_MIB_CNT; 1231 core_writel(priv, reg, CORE_GMNCFGCFG); 1232 reg &= ~RST_MIB_CNT; 1233 core_writel(priv, reg, CORE_GMNCFGCFG); 1234 1235 /* Get the maximum number of ports for this switch */ 1236 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; 1237 if (priv->hw_params.num_ports > DSA_MAX_PORTS) 1238 priv->hw_params.num_ports = DSA_MAX_PORTS; 1239 1240 /* Assume a single GPHY setup if we can't read that property */ 1241 if (of_property_read_u32(dn, "brcm,num-gphy", 1242 &priv->hw_params.num_gphy)) 1243 priv->hw_params.num_gphy = 1; 1244 1245 rev = reg_readl(priv, REG_SWITCH_REVISION); 1246 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 1247 SWITCH_TOP_REV_MASK; 1248 priv->hw_params.core_rev = (rev & SF2_REV_MASK); 1249 1250 rev = reg_readl(priv, REG_PHY_REVISION); 1251 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; 1252 1253 ret = b53_switch_register(dev); 1254 if (ret) 1255 goto out_mdio; 1256 1257 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", 1258 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, 1259 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, 1260 priv->core, priv->irq0, priv->irq1); 1261 1262 return 0; 1263 1264 out_mdio: 1265 bcm_sf2_mdio_unregister(priv); 1266 return ret; 1267 } 1268 1269 static int bcm_sf2_sw_remove(struct platform_device *pdev) 1270 { 1271 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1272 1273 /* Disable all ports and interrupts */ 1274 priv->wol_ports_mask = 0; 1275 bcm_sf2_sw_suspend(priv->dev->ds); 1276 dsa_unregister_switch(priv->dev->ds); 1277 bcm_sf2_mdio_unregister(priv); 1278 1279 return 0; 1280 } 1281 1282 static void bcm_sf2_sw_shutdown(struct platform_device *pdev) 1283 { 1284 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1285 1286 /* For a kernel about to be kexec'd we want to keep the GPHY on for a 1287 * successful MDIO bus scan to occur. If we did turn off the GPHY 1288 * before (e.g: port_disable), this will also power it back on. 1289 * 1290 * Do not rely on kexec_in_progress, just power the PHY on. 1291 */ 1292 if (priv->hw_params.num_gphy == 1) 1293 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1294 } 1295 1296 #ifdef CONFIG_PM_SLEEP 1297 static int bcm_sf2_suspend(struct device *dev) 1298 { 1299 struct platform_device *pdev = to_platform_device(dev); 1300 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1301 1302 return dsa_switch_suspend(priv->dev->ds); 1303 } 1304 1305 static int bcm_sf2_resume(struct device *dev) 1306 { 1307 struct platform_device *pdev = to_platform_device(dev); 1308 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1309 1310 return dsa_switch_resume(priv->dev->ds); 1311 } 1312 #endif /* CONFIG_PM_SLEEP */ 1313 1314 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, 1315 bcm_sf2_suspend, bcm_sf2_resume); 1316 1317 1318 static struct platform_driver bcm_sf2_driver = { 1319 .probe = bcm_sf2_sw_probe, 1320 .remove = bcm_sf2_sw_remove, 1321 .shutdown = bcm_sf2_sw_shutdown, 1322 .driver = { 1323 .name = "brcm-sf2", 1324 .of_match_table = bcm_sf2_of_match, 1325 .pm = &bcm_sf2_pm_ops, 1326 }, 1327 }; 1328 module_platform_driver(bcm_sf2_driver); 1329 1330 MODULE_AUTHOR("Broadcom Corporation"); 1331 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); 1332 MODULE_LICENSE("GPL"); 1333 MODULE_ALIAS("platform:brcm-sf2"); 1334