1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Broadcom Starfighter 2 DSA switch driver 4 * 5 * Copyright (C) 2014, Broadcom Corporation 6 */ 7 8 #include <linux/list.h> 9 #include <linux/module.h> 10 #include <linux/netdevice.h> 11 #include <linux/interrupt.h> 12 #include <linux/platform_device.h> 13 #include <linux/phy.h> 14 #include <linux/phy_fixed.h> 15 #include <linux/phylink.h> 16 #include <linux/mii.h> 17 #include <linux/clk.h> 18 #include <linux/of.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_address.h> 21 #include <linux/of_net.h> 22 #include <linux/of_mdio.h> 23 #include <net/dsa.h> 24 #include <linux/ethtool.h> 25 #include <linux/if_bridge.h> 26 #include <linux/brcmphy.h> 27 #include <linux/etherdevice.h> 28 #include <linux/platform_data/b53.h> 29 30 #include "bcm_sf2.h" 31 #include "bcm_sf2_regs.h" 32 #include "b53/b53_priv.h" 33 #include "b53/b53_regs.h" 34 35 /* Return the number of active ports, not counting the IMP (CPU) port */ 36 static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds) 37 { 38 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 39 unsigned int port, count = 0; 40 41 for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) { 42 if (dsa_is_cpu_port(ds, port)) 43 continue; 44 if (priv->port_sts[port].enabled) 45 count++; 46 } 47 48 return count; 49 } 50 51 static void bcm_sf2_recalc_clock(struct dsa_switch *ds) 52 { 53 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 54 unsigned long new_rate; 55 unsigned int ports_active; 56 /* Frequenty in Mhz */ 57 static const unsigned long rate_table[] = { 58 59220000, 59 60820000, 60 62500000, 61 62500000, 62 }; 63 64 ports_active = bcm_sf2_num_active_ports(ds); 65 if (ports_active == 0 || !priv->clk_mdiv) 66 return; 67 68 /* If we overflow our table, just use the recommended operational 69 * frequency 70 */ 71 if (ports_active > ARRAY_SIZE(rate_table)) 72 new_rate = 90000000; 73 else 74 new_rate = rate_table[ports_active - 1]; 75 clk_set_rate(priv->clk_mdiv, new_rate); 76 } 77 78 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) 79 { 80 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 81 unsigned int i; 82 u32 reg, offset; 83 84 /* Enable the port memories */ 85 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 86 reg &= ~P_TXQ_PSM_VDD(port); 87 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 88 89 /* Enable forwarding */ 90 core_writel(priv, SW_FWDG_EN, CORE_SWMODE); 91 92 /* Enable IMP port in dumb mode */ 93 reg = core_readl(priv, CORE_SWITCH_CTRL); 94 reg |= MII_DUMB_FWDG_EN; 95 core_writel(priv, reg, CORE_SWITCH_CTRL); 96 97 /* Configure Traffic Class to QoS mapping, allow each priority to map 98 * to a different queue number 99 */ 100 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 101 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 102 reg |= i << (PRT_TO_QID_SHIFT * i); 103 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 104 105 b53_brcm_hdr_setup(ds, port); 106 107 if (port == 8) { 108 if (priv->type == BCM4908_DEVICE_ID || 109 priv->type == BCM7445_DEVICE_ID) 110 offset = CORE_STS_OVERRIDE_IMP; 111 else 112 offset = CORE_STS_OVERRIDE_IMP2; 113 114 /* Force link status for IMP port */ 115 reg = core_readl(priv, offset); 116 reg |= (MII_SW_OR | LINK_STS); 117 reg &= ~GMII_SPEED_UP_2G; 118 core_writel(priv, reg, offset); 119 120 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ 121 reg = core_readl(priv, CORE_IMP_CTL); 122 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); 123 reg &= ~(RX_DIS | TX_DIS); 124 core_writel(priv, reg, CORE_IMP_CTL); 125 } else { 126 reg = core_readl(priv, CORE_G_PCTL_PORT(port)); 127 reg &= ~(RX_DIS | TX_DIS); 128 core_writel(priv, reg, CORE_G_PCTL_PORT(port)); 129 } 130 131 priv->port_sts[port].enabled = true; 132 } 133 134 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) 135 { 136 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 137 u32 reg; 138 139 reg = reg_readl(priv, REG_SPHY_CNTRL); 140 if (enable) { 141 reg |= PHY_RESET; 142 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS); 143 reg_writel(priv, reg, REG_SPHY_CNTRL); 144 udelay(21); 145 reg = reg_readl(priv, REG_SPHY_CNTRL); 146 reg &= ~PHY_RESET; 147 } else { 148 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; 149 reg_writel(priv, reg, REG_SPHY_CNTRL); 150 mdelay(1); 151 reg |= CK25_DIS; 152 } 153 reg_writel(priv, reg, REG_SPHY_CNTRL); 154 155 /* Use PHY-driven LED signaling */ 156 if (!enable) { 157 reg = reg_readl(priv, REG_LED_CNTRL(0)); 158 reg |= SPDLNK_SRC_SEL; 159 reg_writel(priv, reg, REG_LED_CNTRL(0)); 160 } 161 } 162 163 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, 164 int port) 165 { 166 unsigned int off; 167 168 switch (port) { 169 case 7: 170 off = P7_IRQ_OFF; 171 break; 172 case 0: 173 /* Port 0 interrupts are located on the first bank */ 174 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); 175 return; 176 default: 177 off = P_IRQ_OFF(port); 178 break; 179 } 180 181 intrl2_1_mask_clear(priv, P_IRQ_MASK(off)); 182 } 183 184 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, 185 int port) 186 { 187 unsigned int off; 188 189 switch (port) { 190 case 7: 191 off = P7_IRQ_OFF; 192 break; 193 case 0: 194 /* Port 0 interrupts are located on the first bank */ 195 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF)); 196 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); 197 return; 198 default: 199 off = P_IRQ_OFF(port); 200 break; 201 } 202 203 intrl2_1_mask_set(priv, P_IRQ_MASK(off)); 204 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR); 205 } 206 207 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, 208 struct phy_device *phy) 209 { 210 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 211 unsigned int i; 212 u32 reg; 213 214 if (!dsa_is_user_port(ds, port)) 215 return 0; 216 217 priv->port_sts[port].enabled = true; 218 219 bcm_sf2_recalc_clock(ds); 220 221 /* Clear the memory power down */ 222 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 223 reg &= ~P_TXQ_PSM_VDD(port); 224 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 225 226 /* Enable learning */ 227 reg = core_readl(priv, CORE_DIS_LEARN); 228 reg &= ~BIT(port); 229 core_writel(priv, reg, CORE_DIS_LEARN); 230 231 /* Enable Broadcom tags for that port if requested */ 232 if (priv->brcm_tag_mask & BIT(port)) { 233 b53_brcm_hdr_setup(ds, port); 234 235 /* Disable learning on ASP port */ 236 if (port == 7) { 237 reg = core_readl(priv, CORE_DIS_LEARN); 238 reg |= BIT(port); 239 core_writel(priv, reg, CORE_DIS_LEARN); 240 } 241 } 242 243 /* Configure Traffic Class to QoS mapping, allow each priority to map 244 * to a different queue number 245 */ 246 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 247 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 248 reg |= i << (PRT_TO_QID_SHIFT * i); 249 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 250 251 /* Re-enable the GPHY and re-apply workarounds */ 252 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { 253 bcm_sf2_gphy_enable_set(ds, true); 254 if (phy) { 255 /* if phy_stop() has been called before, phy 256 * will be in halted state, and phy_start() 257 * will call resume. 258 * 259 * the resume path does not configure back 260 * autoneg settings, and since we hard reset 261 * the phy manually here, we need to reset the 262 * state machine also. 263 */ 264 phy->state = PHY_READY; 265 phy_init_hw(phy); 266 } 267 } 268 269 /* Enable MoCA port interrupts to get notified */ 270 if (port == priv->moca_port) 271 bcm_sf2_port_intr_enable(priv, port); 272 273 /* Set per-queue pause threshold to 32 */ 274 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port)); 275 276 /* Set ACB threshold to 24 */ 277 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) { 278 reg = acb_readl(priv, ACB_QUEUE_CFG(port * 279 SF2_NUM_EGRESS_QUEUES + i)); 280 reg &= ~XOFF_THRESHOLD_MASK; 281 reg |= 24; 282 acb_writel(priv, reg, ACB_QUEUE_CFG(port * 283 SF2_NUM_EGRESS_QUEUES + i)); 284 } 285 286 return b53_enable_port(ds, port, phy); 287 } 288 289 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port) 290 { 291 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 292 u32 reg; 293 294 /* Disable learning while in WoL mode */ 295 if (priv->wol_ports_mask & (1 << port)) { 296 reg = core_readl(priv, CORE_DIS_LEARN); 297 reg |= BIT(port); 298 core_writel(priv, reg, CORE_DIS_LEARN); 299 return; 300 } 301 302 if (port == priv->moca_port) 303 bcm_sf2_port_intr_disable(priv, port); 304 305 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) 306 bcm_sf2_gphy_enable_set(ds, false); 307 308 b53_disable_port(ds, port); 309 310 /* Power down the port memory */ 311 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 312 reg |= P_TXQ_PSM_VDD(port); 313 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 314 315 priv->port_sts[port].enabled = false; 316 317 bcm_sf2_recalc_clock(ds); 318 } 319 320 321 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, 322 int regnum, u16 val) 323 { 324 int ret = 0; 325 u32 reg; 326 327 reg = reg_readl(priv, REG_SWITCH_CNTRL); 328 reg |= MDIO_MASTER_SEL; 329 reg_writel(priv, reg, REG_SWITCH_CNTRL); 330 331 /* Page << 8 | offset */ 332 reg = 0x70; 333 reg <<= 2; 334 core_writel(priv, addr, reg); 335 336 /* Page << 8 | offset */ 337 reg = 0x80 << 8 | regnum << 1; 338 reg <<= 2; 339 340 if (op) 341 ret = core_readl(priv, reg); 342 else 343 core_writel(priv, val, reg); 344 345 reg = reg_readl(priv, REG_SWITCH_CNTRL); 346 reg &= ~MDIO_MASTER_SEL; 347 reg_writel(priv, reg, REG_SWITCH_CNTRL); 348 349 return ret & 0xffff; 350 } 351 352 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) 353 { 354 struct bcm_sf2_priv *priv = bus->priv; 355 356 /* Intercept reads from Broadcom pseudo-PHY address, else, send 357 * them to our master MDIO bus controller 358 */ 359 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 360 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); 361 else 362 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum); 363 } 364 365 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, 366 u16 val) 367 { 368 struct bcm_sf2_priv *priv = bus->priv; 369 370 /* Intercept writes to the Broadcom pseudo-PHY address, else, 371 * send them to our master MDIO bus controller 372 */ 373 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 374 return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); 375 else 376 return mdiobus_write_nested(priv->master_mii_bus, addr, 377 regnum, val); 378 } 379 380 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 381 { 382 struct dsa_switch *ds = dev_id; 383 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 384 385 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 386 ~priv->irq0_mask; 387 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 388 389 return IRQ_HANDLED; 390 } 391 392 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) 393 { 394 struct dsa_switch *ds = dev_id; 395 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 396 397 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 398 ~priv->irq1_mask; 399 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 400 401 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) { 402 priv->port_sts[7].link = true; 403 dsa_port_phylink_mac_change(ds, 7, true); 404 } 405 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) { 406 priv->port_sts[7].link = false; 407 dsa_port_phylink_mac_change(ds, 7, false); 408 } 409 410 return IRQ_HANDLED; 411 } 412 413 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) 414 { 415 unsigned int timeout = 1000; 416 u32 reg; 417 int ret; 418 419 /* The watchdog reset does not work on 7278, we need to hit the 420 * "external" reset line through the reset controller. 421 */ 422 if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) { 423 ret = reset_control_assert(priv->rcdev); 424 if (ret) 425 return ret; 426 427 return reset_control_deassert(priv->rcdev); 428 } 429 430 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 431 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; 432 core_writel(priv, reg, CORE_WATCHDOG_CTRL); 433 434 do { 435 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 436 if (!(reg & SOFTWARE_RESET)) 437 break; 438 439 usleep_range(1000, 2000); 440 } while (timeout-- > 0); 441 442 if (timeout == 0) 443 return -ETIMEDOUT; 444 445 return 0; 446 } 447 448 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) 449 { 450 intrl2_0_mask_set(priv, 0xffffffff); 451 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 452 intrl2_1_mask_set(priv, 0xffffffff); 453 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 454 } 455 456 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, 457 struct device_node *dn) 458 { 459 struct device_node *port; 460 unsigned int port_num; 461 struct property *prop; 462 phy_interface_t mode; 463 int err; 464 465 priv->moca_port = -1; 466 467 for_each_available_child_of_node(dn, port) { 468 if (of_property_read_u32(port, "reg", &port_num)) 469 continue; 470 471 /* Internal PHYs get assigned a specific 'phy-mode' property 472 * value: "internal" to help flag them before MDIO probing 473 * has completed, since they might be turned off at that 474 * time 475 */ 476 err = of_get_phy_mode(port, &mode); 477 if (err) 478 continue; 479 480 if (mode == PHY_INTERFACE_MODE_INTERNAL) 481 priv->int_phy_mask |= 1 << port_num; 482 483 if (mode == PHY_INTERFACE_MODE_MOCA) 484 priv->moca_port = port_num; 485 486 if (of_property_read_bool(port, "brcm,use-bcm-hdr")) 487 priv->brcm_tag_mask |= 1 << port_num; 488 489 /* Ensure that port 5 is not picked up as a DSA CPU port 490 * flavour but a regular port instead. We should be using 491 * devlink to be able to set the port flavour. 492 */ 493 if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) { 494 prop = of_find_property(port, "ethernet", NULL); 495 if (prop) 496 of_remove_property(port, prop); 497 } 498 } 499 } 500 501 static int bcm_sf2_mdio_register(struct dsa_switch *ds) 502 { 503 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 504 struct device_node *dn, *child; 505 struct phy_device *phydev; 506 struct property *prop; 507 static int index; 508 int err, reg; 509 510 /* Find our integrated MDIO bus node */ 511 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); 512 priv->master_mii_bus = of_mdio_find_bus(dn); 513 if (!priv->master_mii_bus) { 514 of_node_put(dn); 515 return -EPROBE_DEFER; 516 } 517 518 get_device(&priv->master_mii_bus->dev); 519 priv->master_mii_dn = dn; 520 521 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); 522 if (!priv->slave_mii_bus) { 523 of_node_put(dn); 524 return -ENOMEM; 525 } 526 527 priv->slave_mii_bus->priv = priv; 528 priv->slave_mii_bus->name = "sf2 slave mii"; 529 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; 530 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; 531 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", 532 index++); 533 priv->slave_mii_bus->dev.of_node = dn; 534 535 /* Include the pseudo-PHY address to divert reads towards our 536 * workaround. This is only required for 7445D0, since 7445E0 537 * disconnects the internal switch pseudo-PHY such that we can use the 538 * regular SWITCH_MDIO master controller instead. 539 * 540 * Here we flag the pseudo PHY as needing special treatment and would 541 * otherwise make all other PHY read/writes go to the master MDIO bus 542 * controller that comes with this switch backed by the "mdio-unimac" 543 * driver. 544 */ 545 if (of_machine_is_compatible("brcm,bcm7445d0")) 546 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0); 547 else 548 priv->indir_phy_mask = 0; 549 550 ds->phys_mii_mask = priv->indir_phy_mask; 551 ds->slave_mii_bus = priv->slave_mii_bus; 552 priv->slave_mii_bus->parent = ds->dev->parent; 553 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; 554 555 /* We need to make sure that of_phy_connect() will not work by 556 * removing the 'phandle' and 'linux,phandle' properties and 557 * unregister the existing PHY device that was already registered. 558 */ 559 for_each_available_child_of_node(dn, child) { 560 if (of_property_read_u32(child, "reg", ®) || 561 reg >= PHY_MAX_ADDR) 562 continue; 563 564 if (!(priv->indir_phy_mask & BIT(reg))) 565 continue; 566 567 prop = of_find_property(child, "phandle", NULL); 568 if (prop) 569 of_remove_property(child, prop); 570 571 prop = of_find_property(child, "linux,phandle", NULL); 572 if (prop) 573 of_remove_property(child, prop); 574 575 phydev = of_phy_find_device(child); 576 if (phydev) 577 phy_device_remove(phydev); 578 } 579 580 err = mdiobus_register(priv->slave_mii_bus); 581 if (err && dn) 582 of_node_put(dn); 583 584 return err; 585 } 586 587 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) 588 { 589 mdiobus_unregister(priv->slave_mii_bus); 590 of_node_put(priv->master_mii_dn); 591 } 592 593 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) 594 { 595 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 596 597 /* The BCM7xxx PHY driver expects to find the integrated PHY revision 598 * in bits 15:8 and the patch level in bits 7:0 which is exactly what 599 * the REG_PHY_REVISION register layout is. 600 */ 601 602 return priv->hw_params.gphy_rev; 603 } 604 605 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, 606 unsigned long *supported, 607 struct phylink_link_state *state) 608 { 609 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 610 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 611 612 if (!phy_interface_mode_is_rgmii(state->interface) && 613 state->interface != PHY_INTERFACE_MODE_MII && 614 state->interface != PHY_INTERFACE_MODE_REVMII && 615 state->interface != PHY_INTERFACE_MODE_GMII && 616 state->interface != PHY_INTERFACE_MODE_INTERNAL && 617 state->interface != PHY_INTERFACE_MODE_MOCA) { 618 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 619 if (port != core_readl(priv, CORE_IMP0_PRT_ID)) 620 dev_err(ds->dev, 621 "Unsupported interface: %d for port %d\n", 622 state->interface, port); 623 return; 624 } 625 626 /* Allow all the expected bits */ 627 phylink_set(mask, Autoneg); 628 phylink_set_port_modes(mask); 629 phylink_set(mask, Pause); 630 phylink_set(mask, Asym_Pause); 631 632 /* With the exclusion of MII and Reverse MII, we support Gigabit, 633 * including Half duplex 634 */ 635 if (state->interface != PHY_INTERFACE_MODE_MII && 636 state->interface != PHY_INTERFACE_MODE_REVMII) { 637 phylink_set(mask, 1000baseT_Full); 638 phylink_set(mask, 1000baseT_Half); 639 } 640 641 phylink_set(mask, 10baseT_Half); 642 phylink_set(mask, 10baseT_Full); 643 phylink_set(mask, 100baseT_Half); 644 phylink_set(mask, 100baseT_Full); 645 646 bitmap_and(supported, supported, mask, 647 __ETHTOOL_LINK_MODE_MASK_NBITS); 648 bitmap_and(state->advertising, state->advertising, mask, 649 __ETHTOOL_LINK_MODE_MASK_NBITS); 650 } 651 652 static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, 653 unsigned int mode, 654 const struct phylink_link_state *state) 655 { 656 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 657 u32 id_mode_dis = 0, port_mode; 658 u32 reg; 659 660 if (port == core_readl(priv, CORE_IMP0_PRT_ID)) 661 return; 662 663 switch (state->interface) { 664 case PHY_INTERFACE_MODE_RGMII: 665 id_mode_dis = 1; 666 fallthrough; 667 case PHY_INTERFACE_MODE_RGMII_TXID: 668 port_mode = EXT_GPHY; 669 break; 670 case PHY_INTERFACE_MODE_MII: 671 port_mode = EXT_EPHY; 672 break; 673 case PHY_INTERFACE_MODE_REVMII: 674 port_mode = EXT_REVMII; 675 break; 676 default: 677 /* Nothing required for all other PHYs: internal and MoCA */ 678 return; 679 } 680 681 /* Clear id_mode_dis bit, and the existing port mode, let 682 * RGMII_MODE_EN bet set by mac_link_{up,down} 683 */ 684 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 685 reg &= ~ID_MODE_DIS; 686 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); 687 688 reg |= port_mode; 689 if (id_mode_dis) 690 reg |= ID_MODE_DIS; 691 692 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 693 } 694 695 static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, 696 phy_interface_t interface, bool link) 697 { 698 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 699 u32 reg; 700 701 if (!phy_interface_mode_is_rgmii(interface) && 702 interface != PHY_INTERFACE_MODE_MII && 703 interface != PHY_INTERFACE_MODE_REVMII) 704 return; 705 706 /* If the link is down, just disable the interface to conserve power */ 707 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 708 if (link) 709 reg |= RGMII_MODE_EN; 710 else 711 reg &= ~RGMII_MODE_EN; 712 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 713 } 714 715 static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, 716 unsigned int mode, 717 phy_interface_t interface) 718 { 719 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 720 u32 reg, offset; 721 722 if (port != core_readl(priv, CORE_IMP0_PRT_ID)) { 723 if (priv->type == BCM4908_DEVICE_ID || 724 priv->type == BCM7445_DEVICE_ID) 725 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 726 else 727 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 728 729 reg = core_readl(priv, offset); 730 reg &= ~LINK_STS; 731 core_writel(priv, reg, offset); 732 } 733 734 bcm_sf2_sw_mac_link_set(ds, port, interface, false); 735 } 736 737 static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, 738 unsigned int mode, 739 phy_interface_t interface, 740 struct phy_device *phydev, 741 int speed, int duplex, 742 bool tx_pause, bool rx_pause) 743 { 744 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 745 struct ethtool_eee *p = &priv->dev->ports[port].eee; 746 u32 reg, offset; 747 748 bcm_sf2_sw_mac_link_set(ds, port, interface, true); 749 750 if (port != core_readl(priv, CORE_IMP0_PRT_ID)) { 751 if (priv->type == BCM4908_DEVICE_ID || 752 priv->type == BCM7445_DEVICE_ID) 753 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 754 else 755 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 756 757 if (interface == PHY_INTERFACE_MODE_RGMII || 758 interface == PHY_INTERFACE_MODE_RGMII_TXID || 759 interface == PHY_INTERFACE_MODE_MII || 760 interface == PHY_INTERFACE_MODE_REVMII) { 761 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 762 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); 763 764 if (tx_pause) 765 reg |= TX_PAUSE_EN; 766 if (rx_pause) 767 reg |= RX_PAUSE_EN; 768 769 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 770 } 771 772 reg = SW_OVERRIDE | LINK_STS; 773 switch (speed) { 774 case SPEED_1000: 775 reg |= SPDSTS_1000 << SPEED_SHIFT; 776 break; 777 case SPEED_100: 778 reg |= SPDSTS_100 << SPEED_SHIFT; 779 break; 780 } 781 782 if (duplex == DUPLEX_FULL) 783 reg |= DUPLX_MODE; 784 785 core_writel(priv, reg, offset); 786 } 787 788 if (mode == MLO_AN_PHY && phydev) 789 p->eee_enabled = b53_eee_init(ds, port, phydev); 790 } 791 792 static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, 793 struct phylink_link_state *status) 794 { 795 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 796 797 status->link = false; 798 799 /* MoCA port is special as we do not get link status from CORE_LNKSTS, 800 * which means that we need to force the link at the port override 801 * level to get the data to flow. We do use what the interrupt handler 802 * did determine before. 803 * 804 * For the other ports, we just force the link status, since this is 805 * a fixed PHY device. 806 */ 807 if (port == priv->moca_port) { 808 status->link = priv->port_sts[port].link; 809 /* For MoCA interfaces, also force a link down notification 810 * since some version of the user-space daemon (mocad) use 811 * cmd->autoneg to force the link, which messes up the PHY 812 * state machine and make it go in PHY_FORCING state instead. 813 */ 814 if (!status->link) 815 netif_carrier_off(dsa_to_port(ds, port)->slave); 816 status->duplex = DUPLEX_FULL; 817 } else { 818 status->link = true; 819 } 820 } 821 822 static void bcm_sf2_enable_acb(struct dsa_switch *ds) 823 { 824 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 825 u32 reg; 826 827 /* Enable ACB globally */ 828 reg = acb_readl(priv, ACB_CONTROL); 829 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); 830 acb_writel(priv, reg, ACB_CONTROL); 831 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); 832 reg |= ACB_EN | ACB_ALGORITHM; 833 acb_writel(priv, reg, ACB_CONTROL); 834 } 835 836 static int bcm_sf2_sw_suspend(struct dsa_switch *ds) 837 { 838 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 839 unsigned int port; 840 841 bcm_sf2_intr_disable(priv); 842 843 /* Disable all ports physically present including the IMP 844 * port, the other ones have already been disabled during 845 * bcm_sf2_sw_setup 846 */ 847 for (port = 0; port < ds->num_ports; port++) { 848 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) 849 bcm_sf2_port_disable(ds, port); 850 } 851 852 if (!priv->wol_ports_mask) 853 clk_disable_unprepare(priv->clk); 854 855 return 0; 856 } 857 858 static int bcm_sf2_sw_resume(struct dsa_switch *ds) 859 { 860 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 861 int ret; 862 863 if (!priv->wol_ports_mask) 864 clk_prepare_enable(priv->clk); 865 866 ret = bcm_sf2_sw_rst(priv); 867 if (ret) { 868 pr_err("%s: failed to software reset switch\n", __func__); 869 return ret; 870 } 871 872 ret = bcm_sf2_cfp_resume(ds); 873 if (ret) 874 return ret; 875 876 if (priv->hw_params.num_gphy == 1) 877 bcm_sf2_gphy_enable_set(ds, true); 878 879 ds->ops->setup(ds); 880 881 return 0; 882 } 883 884 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, 885 struct ethtool_wolinfo *wol) 886 { 887 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; 888 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 889 struct ethtool_wolinfo pwol = { }; 890 891 /* Get the parent device WoL settings */ 892 if (p->ethtool_ops->get_wol) 893 p->ethtool_ops->get_wol(p, &pwol); 894 895 /* Advertise the parent device supported settings */ 896 wol->supported = pwol.supported; 897 memset(&wol->sopass, 0, sizeof(wol->sopass)); 898 899 if (pwol.wolopts & WAKE_MAGICSECURE) 900 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); 901 902 if (priv->wol_ports_mask & (1 << port)) 903 wol->wolopts = pwol.wolopts; 904 else 905 wol->wolopts = 0; 906 } 907 908 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, 909 struct ethtool_wolinfo *wol) 910 { 911 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; 912 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 913 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; 914 struct ethtool_wolinfo pwol = { }; 915 916 if (p->ethtool_ops->get_wol) 917 p->ethtool_ops->get_wol(p, &pwol); 918 if (wol->wolopts & ~pwol.supported) 919 return -EINVAL; 920 921 if (wol->wolopts) 922 priv->wol_ports_mask |= (1 << port); 923 else 924 priv->wol_ports_mask &= ~(1 << port); 925 926 /* If we have at least one port enabled, make sure the CPU port 927 * is also enabled. If the CPU port is the last one enabled, we disable 928 * it since this configuration does not make sense. 929 */ 930 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) 931 priv->wol_ports_mask |= (1 << cpu_port); 932 else 933 priv->wol_ports_mask &= ~(1 << cpu_port); 934 935 return p->ethtool_ops->set_wol(p, wol); 936 } 937 938 static int bcm_sf2_sw_setup(struct dsa_switch *ds) 939 { 940 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 941 unsigned int port; 942 943 /* Enable all valid ports and disable those unused */ 944 for (port = 0; port < priv->hw_params.num_ports; port++) { 945 /* IMP port receives special treatment */ 946 if (dsa_is_user_port(ds, port)) 947 bcm_sf2_port_setup(ds, port, NULL); 948 else if (dsa_is_cpu_port(ds, port)) 949 bcm_sf2_imp_setup(ds, port); 950 else 951 bcm_sf2_port_disable(ds, port); 952 } 953 954 b53_configure_vlan(ds); 955 bcm_sf2_enable_acb(ds); 956 957 return b53_setup_devlink_resources(ds); 958 } 959 960 static void bcm_sf2_sw_teardown(struct dsa_switch *ds) 961 { 962 dsa_devlink_resources_unregister(ds); 963 } 964 965 /* The SWITCH_CORE register space is managed by b53 but operates on a page + 966 * register basis so we need to translate that into an address that the 967 * bus-glue understands. 968 */ 969 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) 970 971 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, 972 u8 *val) 973 { 974 struct bcm_sf2_priv *priv = dev->priv; 975 976 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 977 978 return 0; 979 } 980 981 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, 982 u16 *val) 983 { 984 struct bcm_sf2_priv *priv = dev->priv; 985 986 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 987 988 return 0; 989 } 990 991 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, 992 u32 *val) 993 { 994 struct bcm_sf2_priv *priv = dev->priv; 995 996 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 997 998 return 0; 999 } 1000 1001 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, 1002 u64 *val) 1003 { 1004 struct bcm_sf2_priv *priv = dev->priv; 1005 1006 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); 1007 1008 return 0; 1009 } 1010 1011 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, 1012 u8 value) 1013 { 1014 struct bcm_sf2_priv *priv = dev->priv; 1015 1016 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 1017 1018 return 0; 1019 } 1020 1021 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, 1022 u16 value) 1023 { 1024 struct bcm_sf2_priv *priv = dev->priv; 1025 1026 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 1027 1028 return 0; 1029 } 1030 1031 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, 1032 u32 value) 1033 { 1034 struct bcm_sf2_priv *priv = dev->priv; 1035 1036 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 1037 1038 return 0; 1039 } 1040 1041 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, 1042 u64 value) 1043 { 1044 struct bcm_sf2_priv *priv = dev->priv; 1045 1046 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 1047 1048 return 0; 1049 } 1050 1051 static const struct b53_io_ops bcm_sf2_io_ops = { 1052 .read8 = bcm_sf2_core_read8, 1053 .read16 = bcm_sf2_core_read16, 1054 .read32 = bcm_sf2_core_read32, 1055 .read48 = bcm_sf2_core_read64, 1056 .read64 = bcm_sf2_core_read64, 1057 .write8 = bcm_sf2_core_write8, 1058 .write16 = bcm_sf2_core_write16, 1059 .write32 = bcm_sf2_core_write32, 1060 .write48 = bcm_sf2_core_write64, 1061 .write64 = bcm_sf2_core_write64, 1062 }; 1063 1064 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port, 1065 u32 stringset, uint8_t *data) 1066 { 1067 int cnt = b53_get_sset_count(ds, port, stringset); 1068 1069 b53_get_strings(ds, port, stringset, data); 1070 bcm_sf2_cfp_get_strings(ds, port, stringset, 1071 data + cnt * ETH_GSTRING_LEN); 1072 } 1073 1074 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port, 1075 uint64_t *data) 1076 { 1077 int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS); 1078 1079 b53_get_ethtool_stats(ds, port, data); 1080 bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt); 1081 } 1082 1083 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port, 1084 int sset) 1085 { 1086 int cnt = b53_get_sset_count(ds, port, sset); 1087 1088 if (cnt < 0) 1089 return cnt; 1090 1091 cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset); 1092 1093 return cnt; 1094 } 1095 1096 static const struct dsa_switch_ops bcm_sf2_ops = { 1097 .get_tag_protocol = b53_get_tag_protocol, 1098 .setup = bcm_sf2_sw_setup, 1099 .teardown = bcm_sf2_sw_teardown, 1100 .get_strings = bcm_sf2_sw_get_strings, 1101 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, 1102 .get_sset_count = bcm_sf2_sw_get_sset_count, 1103 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 1104 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 1105 .phylink_validate = bcm_sf2_sw_validate, 1106 .phylink_mac_config = bcm_sf2_sw_mac_config, 1107 .phylink_mac_link_down = bcm_sf2_sw_mac_link_down, 1108 .phylink_mac_link_up = bcm_sf2_sw_mac_link_up, 1109 .phylink_fixed_state = bcm_sf2_sw_fixed_state, 1110 .suspend = bcm_sf2_sw_suspend, 1111 .resume = bcm_sf2_sw_resume, 1112 .get_wol = bcm_sf2_sw_get_wol, 1113 .set_wol = bcm_sf2_sw_set_wol, 1114 .port_enable = bcm_sf2_port_setup, 1115 .port_disable = bcm_sf2_port_disable, 1116 .get_mac_eee = b53_get_mac_eee, 1117 .set_mac_eee = b53_set_mac_eee, 1118 .port_bridge_join = b53_br_join, 1119 .port_bridge_leave = b53_br_leave, 1120 .port_stp_state_set = b53_br_set_stp_state, 1121 .port_fast_age = b53_br_fast_age, 1122 .port_vlan_filtering = b53_vlan_filtering, 1123 .port_vlan_add = b53_vlan_add, 1124 .port_vlan_del = b53_vlan_del, 1125 .port_fdb_dump = b53_fdb_dump, 1126 .port_fdb_add = b53_fdb_add, 1127 .port_fdb_del = b53_fdb_del, 1128 .get_rxnfc = bcm_sf2_get_rxnfc, 1129 .set_rxnfc = bcm_sf2_set_rxnfc, 1130 .port_mirror_add = b53_mirror_add, 1131 .port_mirror_del = b53_mirror_del, 1132 .port_mdb_add = b53_mdb_add, 1133 .port_mdb_del = b53_mdb_del, 1134 }; 1135 1136 struct bcm_sf2_of_data { 1137 u32 type; 1138 const u16 *reg_offsets; 1139 unsigned int core_reg_align; 1140 unsigned int num_cfp_rules; 1141 }; 1142 1143 static const u16 bcm_sf2_4908_reg_offsets[] = { 1144 [REG_SWITCH_CNTRL] = 0x00, 1145 [REG_SWITCH_STATUS] = 0x04, 1146 [REG_DIR_DATA_WRITE] = 0x08, 1147 [REG_DIR_DATA_READ] = 0x0c, 1148 [REG_SWITCH_REVISION] = 0x10, 1149 [REG_PHY_REVISION] = 0x14, 1150 [REG_SPHY_CNTRL] = 0x24, 1151 [REG_CROSSBAR] = 0xc8, 1152 [REG_RGMII_0_CNTRL] = 0xe0, 1153 [REG_RGMII_1_CNTRL] = 0xec, 1154 [REG_RGMII_2_CNTRL] = 0xf8, 1155 [REG_LED_0_CNTRL] = 0x40, 1156 [REG_LED_1_CNTRL] = 0x4c, 1157 [REG_LED_2_CNTRL] = 0x58, 1158 }; 1159 1160 static const struct bcm_sf2_of_data bcm_sf2_4908_data = { 1161 .type = BCM4908_DEVICE_ID, 1162 .core_reg_align = 0, 1163 .reg_offsets = bcm_sf2_4908_reg_offsets, 1164 .num_cfp_rules = 0, /* FIXME */ 1165 }; 1166 1167 /* Register offsets for the SWITCH_REG_* block */ 1168 static const u16 bcm_sf2_7445_reg_offsets[] = { 1169 [REG_SWITCH_CNTRL] = 0x00, 1170 [REG_SWITCH_STATUS] = 0x04, 1171 [REG_DIR_DATA_WRITE] = 0x08, 1172 [REG_DIR_DATA_READ] = 0x0C, 1173 [REG_SWITCH_REVISION] = 0x18, 1174 [REG_PHY_REVISION] = 0x1C, 1175 [REG_SPHY_CNTRL] = 0x2C, 1176 [REG_RGMII_0_CNTRL] = 0x34, 1177 [REG_RGMII_1_CNTRL] = 0x40, 1178 [REG_RGMII_2_CNTRL] = 0x4c, 1179 [REG_LED_0_CNTRL] = 0x90, 1180 [REG_LED_1_CNTRL] = 0x94, 1181 [REG_LED_2_CNTRL] = 0x98, 1182 }; 1183 1184 static const struct bcm_sf2_of_data bcm_sf2_7445_data = { 1185 .type = BCM7445_DEVICE_ID, 1186 .core_reg_align = 0, 1187 .reg_offsets = bcm_sf2_7445_reg_offsets, 1188 .num_cfp_rules = 256, 1189 }; 1190 1191 static const u16 bcm_sf2_7278_reg_offsets[] = { 1192 [REG_SWITCH_CNTRL] = 0x00, 1193 [REG_SWITCH_STATUS] = 0x04, 1194 [REG_DIR_DATA_WRITE] = 0x08, 1195 [REG_DIR_DATA_READ] = 0x0c, 1196 [REG_SWITCH_REVISION] = 0x10, 1197 [REG_PHY_REVISION] = 0x14, 1198 [REG_SPHY_CNTRL] = 0x24, 1199 [REG_RGMII_0_CNTRL] = 0xe0, 1200 [REG_RGMII_1_CNTRL] = 0xec, 1201 [REG_RGMII_2_CNTRL] = 0xf8, 1202 [REG_LED_0_CNTRL] = 0x40, 1203 [REG_LED_1_CNTRL] = 0x4c, 1204 [REG_LED_2_CNTRL] = 0x58, 1205 }; 1206 1207 static const struct bcm_sf2_of_data bcm_sf2_7278_data = { 1208 .type = BCM7278_DEVICE_ID, 1209 .core_reg_align = 1, 1210 .reg_offsets = bcm_sf2_7278_reg_offsets, 1211 .num_cfp_rules = 128, 1212 }; 1213 1214 static const struct of_device_id bcm_sf2_of_match[] = { 1215 { .compatible = "brcm,bcm4908-switch", 1216 .data = &bcm_sf2_4908_data 1217 }, 1218 { .compatible = "brcm,bcm7445-switch-v4.0", 1219 .data = &bcm_sf2_7445_data 1220 }, 1221 { .compatible = "brcm,bcm7278-switch-v4.0", 1222 .data = &bcm_sf2_7278_data 1223 }, 1224 { .compatible = "brcm,bcm7278-switch-v4.8", 1225 .data = &bcm_sf2_7278_data 1226 }, 1227 { /* sentinel */ }, 1228 }; 1229 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); 1230 1231 static int bcm_sf2_sw_probe(struct platform_device *pdev) 1232 { 1233 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 1234 struct device_node *dn = pdev->dev.of_node; 1235 const struct of_device_id *of_id = NULL; 1236 const struct bcm_sf2_of_data *data; 1237 struct b53_platform_data *pdata; 1238 struct dsa_switch_ops *ops; 1239 struct device_node *ports; 1240 struct bcm_sf2_priv *priv; 1241 struct b53_device *dev; 1242 struct dsa_switch *ds; 1243 void __iomem **base; 1244 unsigned int i; 1245 u32 reg, rev; 1246 int ret; 1247 1248 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1249 if (!priv) 1250 return -ENOMEM; 1251 1252 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL); 1253 if (!ops) 1254 return -ENOMEM; 1255 1256 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); 1257 if (!dev) 1258 return -ENOMEM; 1259 1260 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1261 if (!pdata) 1262 return -ENOMEM; 1263 1264 of_id = of_match_node(bcm_sf2_of_match, dn); 1265 if (!of_id || !of_id->data) 1266 return -EINVAL; 1267 1268 data = of_id->data; 1269 1270 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ 1271 priv->type = data->type; 1272 priv->reg_offsets = data->reg_offsets; 1273 priv->core_reg_align = data->core_reg_align; 1274 priv->num_cfp_rules = data->num_cfp_rules; 1275 1276 priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev, 1277 "switch"); 1278 if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER) 1279 return PTR_ERR(priv->rcdev); 1280 1281 /* Auto-detection using standard registers will not work, so 1282 * provide an indication of what kind of device we are for 1283 * b53_common to work with 1284 */ 1285 pdata->chip_id = priv->type; 1286 dev->pdata = pdata; 1287 1288 priv->dev = dev; 1289 ds = dev->ds; 1290 ds->ops = &bcm_sf2_ops; 1291 1292 /* Advertise the 8 egress queues */ 1293 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES; 1294 1295 dev_set_drvdata(&pdev->dev, priv); 1296 1297 spin_lock_init(&priv->indir_lock); 1298 mutex_init(&priv->cfp.lock); 1299 INIT_LIST_HEAD(&priv->cfp.rules_list); 1300 1301 /* CFP rule #0 cannot be used for specific classifications, flag it as 1302 * permanently used 1303 */ 1304 set_bit(0, priv->cfp.used); 1305 set_bit(0, priv->cfp.unique); 1306 1307 /* Balance of_node_put() done by of_find_node_by_name() */ 1308 of_node_get(dn); 1309 ports = of_find_node_by_name(dn, "ports"); 1310 if (ports) { 1311 bcm_sf2_identify_ports(priv, ports); 1312 of_node_put(ports); 1313 } 1314 1315 priv->irq0 = irq_of_parse_and_map(dn, 0); 1316 priv->irq1 = irq_of_parse_and_map(dn, 1); 1317 1318 base = &priv->core; 1319 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1320 *base = devm_platform_ioremap_resource(pdev, i); 1321 if (IS_ERR(*base)) { 1322 pr_err("unable to find register: %s\n", reg_names[i]); 1323 return PTR_ERR(*base); 1324 } 1325 base++; 1326 } 1327 1328 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch"); 1329 if (IS_ERR(priv->clk)) 1330 return PTR_ERR(priv->clk); 1331 1332 clk_prepare_enable(priv->clk); 1333 1334 priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv"); 1335 if (IS_ERR(priv->clk_mdiv)) { 1336 ret = PTR_ERR(priv->clk_mdiv); 1337 goto out_clk; 1338 } 1339 1340 clk_prepare_enable(priv->clk_mdiv); 1341 1342 ret = bcm_sf2_sw_rst(priv); 1343 if (ret) { 1344 pr_err("unable to software reset switch: %d\n", ret); 1345 goto out_clk_mdiv; 1346 } 1347 1348 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1349 1350 ret = bcm_sf2_mdio_register(ds); 1351 if (ret) { 1352 pr_err("failed to register MDIO bus\n"); 1353 goto out_clk_mdiv; 1354 } 1355 1356 bcm_sf2_gphy_enable_set(priv->dev->ds, false); 1357 1358 ret = bcm_sf2_cfp_rst(priv); 1359 if (ret) { 1360 pr_err("failed to reset CFP\n"); 1361 goto out_mdio; 1362 } 1363 1364 /* Disable all interrupts and request them */ 1365 bcm_sf2_intr_disable(priv); 1366 1367 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, 1368 "switch_0", ds); 1369 if (ret < 0) { 1370 pr_err("failed to request switch_0 IRQ\n"); 1371 goto out_mdio; 1372 } 1373 1374 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, 1375 "switch_1", ds); 1376 if (ret < 0) { 1377 pr_err("failed to request switch_1 IRQ\n"); 1378 goto out_mdio; 1379 } 1380 1381 /* Reset the MIB counters */ 1382 reg = core_readl(priv, CORE_GMNCFGCFG); 1383 reg |= RST_MIB_CNT; 1384 core_writel(priv, reg, CORE_GMNCFGCFG); 1385 reg &= ~RST_MIB_CNT; 1386 core_writel(priv, reg, CORE_GMNCFGCFG); 1387 1388 /* Get the maximum number of ports for this switch */ 1389 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; 1390 if (priv->hw_params.num_ports > DSA_MAX_PORTS) 1391 priv->hw_params.num_ports = DSA_MAX_PORTS; 1392 1393 /* Assume a single GPHY setup if we can't read that property */ 1394 if (of_property_read_u32(dn, "brcm,num-gphy", 1395 &priv->hw_params.num_gphy)) 1396 priv->hw_params.num_gphy = 1; 1397 1398 rev = reg_readl(priv, REG_SWITCH_REVISION); 1399 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 1400 SWITCH_TOP_REV_MASK; 1401 priv->hw_params.core_rev = (rev & SF2_REV_MASK); 1402 1403 rev = reg_readl(priv, REG_PHY_REVISION); 1404 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; 1405 1406 ret = b53_switch_register(dev); 1407 if (ret) 1408 goto out_mdio; 1409 1410 dev_info(&pdev->dev, 1411 "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n", 1412 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, 1413 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, 1414 priv->irq0, priv->irq1); 1415 1416 return 0; 1417 1418 out_mdio: 1419 bcm_sf2_mdio_unregister(priv); 1420 out_clk_mdiv: 1421 clk_disable_unprepare(priv->clk_mdiv); 1422 out_clk: 1423 clk_disable_unprepare(priv->clk); 1424 return ret; 1425 } 1426 1427 static int bcm_sf2_sw_remove(struct platform_device *pdev) 1428 { 1429 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1430 1431 priv->wol_ports_mask = 0; 1432 /* Disable interrupts */ 1433 bcm_sf2_intr_disable(priv); 1434 dsa_unregister_switch(priv->dev->ds); 1435 bcm_sf2_cfp_exit(priv->dev->ds); 1436 bcm_sf2_mdio_unregister(priv); 1437 clk_disable_unprepare(priv->clk_mdiv); 1438 clk_disable_unprepare(priv->clk); 1439 if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) 1440 reset_control_assert(priv->rcdev); 1441 1442 return 0; 1443 } 1444 1445 static void bcm_sf2_sw_shutdown(struct platform_device *pdev) 1446 { 1447 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1448 1449 /* For a kernel about to be kexec'd we want to keep the GPHY on for a 1450 * successful MDIO bus scan to occur. If we did turn off the GPHY 1451 * before (e.g: port_disable), this will also power it back on. 1452 * 1453 * Do not rely on kexec_in_progress, just power the PHY on. 1454 */ 1455 if (priv->hw_params.num_gphy == 1) 1456 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1457 } 1458 1459 #ifdef CONFIG_PM_SLEEP 1460 static int bcm_sf2_suspend(struct device *dev) 1461 { 1462 struct bcm_sf2_priv *priv = dev_get_drvdata(dev); 1463 1464 return dsa_switch_suspend(priv->dev->ds); 1465 } 1466 1467 static int bcm_sf2_resume(struct device *dev) 1468 { 1469 struct bcm_sf2_priv *priv = dev_get_drvdata(dev); 1470 1471 return dsa_switch_resume(priv->dev->ds); 1472 } 1473 #endif /* CONFIG_PM_SLEEP */ 1474 1475 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, 1476 bcm_sf2_suspend, bcm_sf2_resume); 1477 1478 1479 static struct platform_driver bcm_sf2_driver = { 1480 .probe = bcm_sf2_sw_probe, 1481 .remove = bcm_sf2_sw_remove, 1482 .shutdown = bcm_sf2_sw_shutdown, 1483 .driver = { 1484 .name = "brcm-sf2", 1485 .of_match_table = bcm_sf2_of_match, 1486 .pm = &bcm_sf2_pm_ops, 1487 }, 1488 }; 1489 module_platform_driver(bcm_sf2_driver); 1490 1491 MODULE_AUTHOR("Broadcom Corporation"); 1492 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); 1493 MODULE_LICENSE("GPL"); 1494 MODULE_ALIAS("platform:brcm-sf2"); 1495