1 /* 2 * Broadcom Starfighter 2 DSA switch driver 3 * 4 * Copyright (C) 2014, Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/interrupt.h> 16 #include <linux/platform_device.h> 17 #include <linux/phy.h> 18 #include <linux/phy_fixed.h> 19 #include <linux/phylink.h> 20 #include <linux/mii.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 #include <linux/of_net.h> 25 #include <linux/of_mdio.h> 26 #include <net/dsa.h> 27 #include <linux/ethtool.h> 28 #include <linux/if_bridge.h> 29 #include <linux/brcmphy.h> 30 #include <linux/etherdevice.h> 31 #include <linux/platform_data/b53.h> 32 33 #include "bcm_sf2.h" 34 #include "bcm_sf2_regs.h" 35 #include "b53/b53_priv.h" 36 #include "b53/b53_regs.h" 37 38 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) 39 { 40 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 41 unsigned int i; 42 u32 reg, offset; 43 44 if (priv->type == BCM7445_DEVICE_ID) 45 offset = CORE_STS_OVERRIDE_IMP; 46 else 47 offset = CORE_STS_OVERRIDE_IMP2; 48 49 /* Enable the port memories */ 50 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 51 reg &= ~P_TXQ_PSM_VDD(port); 52 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 53 54 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ 55 reg = core_readl(priv, CORE_IMP_CTL); 56 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); 57 reg &= ~(RX_DIS | TX_DIS); 58 core_writel(priv, reg, CORE_IMP_CTL); 59 60 /* Enable forwarding */ 61 core_writel(priv, SW_FWDG_EN, CORE_SWMODE); 62 63 /* Enable IMP port in dumb mode */ 64 reg = core_readl(priv, CORE_SWITCH_CTRL); 65 reg |= MII_DUMB_FWDG_EN; 66 core_writel(priv, reg, CORE_SWITCH_CTRL); 67 68 /* Configure Traffic Class to QoS mapping, allow each priority to map 69 * to a different queue number 70 */ 71 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 72 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 73 reg |= i << (PRT_TO_QID_SHIFT * i); 74 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 75 76 b53_brcm_hdr_setup(ds, port); 77 78 /* Force link status for IMP port */ 79 reg = core_readl(priv, offset); 80 reg |= (MII_SW_OR | LINK_STS); 81 core_writel(priv, reg, offset); 82 } 83 84 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) 85 { 86 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 87 u32 reg; 88 89 reg = reg_readl(priv, REG_SPHY_CNTRL); 90 if (enable) { 91 reg |= PHY_RESET; 92 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS); 93 reg_writel(priv, reg, REG_SPHY_CNTRL); 94 udelay(21); 95 reg = reg_readl(priv, REG_SPHY_CNTRL); 96 reg &= ~PHY_RESET; 97 } else { 98 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET; 99 reg_writel(priv, reg, REG_SPHY_CNTRL); 100 mdelay(1); 101 reg |= CK25_DIS; 102 } 103 reg_writel(priv, reg, REG_SPHY_CNTRL); 104 105 /* Use PHY-driven LED signaling */ 106 if (!enable) { 107 reg = reg_readl(priv, REG_LED_CNTRL(0)); 108 reg |= SPDLNK_SRC_SEL; 109 reg_writel(priv, reg, REG_LED_CNTRL(0)); 110 } 111 } 112 113 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv, 114 int port) 115 { 116 unsigned int off; 117 118 switch (port) { 119 case 7: 120 off = P7_IRQ_OFF; 121 break; 122 case 0: 123 /* Port 0 interrupts are located on the first bank */ 124 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF)); 125 return; 126 default: 127 off = P_IRQ_OFF(port); 128 break; 129 } 130 131 intrl2_1_mask_clear(priv, P_IRQ_MASK(off)); 132 } 133 134 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, 135 int port) 136 { 137 unsigned int off; 138 139 switch (port) { 140 case 7: 141 off = P7_IRQ_OFF; 142 break; 143 case 0: 144 /* Port 0 interrupts are located on the first bank */ 145 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF)); 146 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR); 147 return; 148 default: 149 off = P_IRQ_OFF(port); 150 break; 151 } 152 153 intrl2_1_mask_set(priv, P_IRQ_MASK(off)); 154 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR); 155 } 156 157 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, 158 struct phy_device *phy) 159 { 160 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 161 unsigned int i; 162 u32 reg; 163 164 /* Clear the memory power down */ 165 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 166 reg &= ~P_TXQ_PSM_VDD(port); 167 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 168 169 /* Enable learning */ 170 reg = core_readl(priv, CORE_DIS_LEARN); 171 reg &= ~BIT(port); 172 core_writel(priv, reg, CORE_DIS_LEARN); 173 174 /* Enable Broadcom tags for that port if requested */ 175 if (priv->brcm_tag_mask & BIT(port)) 176 b53_brcm_hdr_setup(ds, port); 177 178 /* Configure Traffic Class to QoS mapping, allow each priority to map 179 * to a different queue number 180 */ 181 reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); 182 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) 183 reg |= i << (PRT_TO_QID_SHIFT * i); 184 core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); 185 186 /* Re-enable the GPHY and re-apply workarounds */ 187 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { 188 bcm_sf2_gphy_enable_set(ds, true); 189 if (phy) { 190 /* if phy_stop() has been called before, phy 191 * will be in halted state, and phy_start() 192 * will call resume. 193 * 194 * the resume path does not configure back 195 * autoneg settings, and since we hard reset 196 * the phy manually here, we need to reset the 197 * state machine also. 198 */ 199 phy->state = PHY_READY; 200 phy_init_hw(phy); 201 } 202 } 203 204 /* Enable MoCA port interrupts to get notified */ 205 if (port == priv->moca_port) 206 bcm_sf2_port_intr_enable(priv, port); 207 208 /* Set per-queue pause threshold to 32 */ 209 core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port)); 210 211 /* Set ACB threshold to 24 */ 212 for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) { 213 reg = acb_readl(priv, ACB_QUEUE_CFG(port * 214 SF2_NUM_EGRESS_QUEUES + i)); 215 reg &= ~XOFF_THRESHOLD_MASK; 216 reg |= 24; 217 acb_writel(priv, reg, ACB_QUEUE_CFG(port * 218 SF2_NUM_EGRESS_QUEUES + i)); 219 } 220 221 return b53_enable_port(ds, port, phy); 222 } 223 224 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, 225 struct phy_device *phy) 226 { 227 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 228 u32 reg; 229 230 /* Disable learning while in WoL mode */ 231 if (priv->wol_ports_mask & (1 << port)) { 232 reg = core_readl(priv, CORE_DIS_LEARN); 233 reg |= BIT(port); 234 core_writel(priv, reg, CORE_DIS_LEARN); 235 return; 236 } 237 238 if (port == priv->moca_port) 239 bcm_sf2_port_intr_disable(priv, port); 240 241 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) 242 bcm_sf2_gphy_enable_set(ds, false); 243 244 b53_disable_port(ds, port, phy); 245 246 /* Power down the port memory */ 247 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); 248 reg |= P_TXQ_PSM_VDD(port); 249 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); 250 } 251 252 253 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, 254 int regnum, u16 val) 255 { 256 int ret = 0; 257 u32 reg; 258 259 reg = reg_readl(priv, REG_SWITCH_CNTRL); 260 reg |= MDIO_MASTER_SEL; 261 reg_writel(priv, reg, REG_SWITCH_CNTRL); 262 263 /* Page << 8 | offset */ 264 reg = 0x70; 265 reg <<= 2; 266 core_writel(priv, addr, reg); 267 268 /* Page << 8 | offset */ 269 reg = 0x80 << 8 | regnum << 1; 270 reg <<= 2; 271 272 if (op) 273 ret = core_readl(priv, reg); 274 else 275 core_writel(priv, val, reg); 276 277 reg = reg_readl(priv, REG_SWITCH_CNTRL); 278 reg &= ~MDIO_MASTER_SEL; 279 reg_writel(priv, reg, REG_SWITCH_CNTRL); 280 281 return ret & 0xffff; 282 } 283 284 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) 285 { 286 struct bcm_sf2_priv *priv = bus->priv; 287 288 /* Intercept reads from Broadcom pseudo-PHY address, else, send 289 * them to our master MDIO bus controller 290 */ 291 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 292 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); 293 else 294 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum); 295 } 296 297 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, 298 u16 val) 299 { 300 struct bcm_sf2_priv *priv = bus->priv; 301 302 /* Intercept writes to the Broadcom pseudo-PHY address, else, 303 * send them to our master MDIO bus controller 304 */ 305 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 306 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); 307 else 308 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val); 309 310 return 0; 311 } 312 313 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) 314 { 315 struct dsa_switch *ds = dev_id; 316 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 317 318 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 319 ~priv->irq0_mask; 320 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 321 322 return IRQ_HANDLED; 323 } 324 325 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) 326 { 327 struct dsa_switch *ds = dev_id; 328 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 329 330 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 331 ~priv->irq1_mask; 332 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 333 334 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) { 335 priv->port_sts[7].link = true; 336 dsa_port_phylink_mac_change(ds, 7, true); 337 } 338 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) { 339 priv->port_sts[7].link = false; 340 dsa_port_phylink_mac_change(ds, 7, false); 341 } 342 343 return IRQ_HANDLED; 344 } 345 346 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) 347 { 348 unsigned int timeout = 1000; 349 u32 reg; 350 351 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 352 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; 353 core_writel(priv, reg, CORE_WATCHDOG_CTRL); 354 355 do { 356 reg = core_readl(priv, CORE_WATCHDOG_CTRL); 357 if (!(reg & SOFTWARE_RESET)) 358 break; 359 360 usleep_range(1000, 2000); 361 } while (timeout-- > 0); 362 363 if (timeout == 0) 364 return -ETIMEDOUT; 365 366 return 0; 367 } 368 369 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) 370 { 371 intrl2_0_mask_set(priv, 0xffffffff); 372 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 373 intrl2_1_mask_set(priv, 0xffffffff); 374 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 375 } 376 377 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, 378 struct device_node *dn) 379 { 380 struct device_node *port; 381 int mode; 382 unsigned int port_num; 383 384 priv->moca_port = -1; 385 386 for_each_available_child_of_node(dn, port) { 387 if (of_property_read_u32(port, "reg", &port_num)) 388 continue; 389 390 /* Internal PHYs get assigned a specific 'phy-mode' property 391 * value: "internal" to help flag them before MDIO probing 392 * has completed, since they might be turned off at that 393 * time 394 */ 395 mode = of_get_phy_mode(port); 396 if (mode < 0) 397 continue; 398 399 if (mode == PHY_INTERFACE_MODE_INTERNAL) 400 priv->int_phy_mask |= 1 << port_num; 401 402 if (mode == PHY_INTERFACE_MODE_MOCA) 403 priv->moca_port = port_num; 404 405 if (of_property_read_bool(port, "brcm,use-bcm-hdr")) 406 priv->brcm_tag_mask |= 1 << port_num; 407 } 408 } 409 410 static int bcm_sf2_mdio_register(struct dsa_switch *ds) 411 { 412 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 413 struct device_node *dn; 414 static int index; 415 int err; 416 417 /* Find our integrated MDIO bus node */ 418 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); 419 priv->master_mii_bus = of_mdio_find_bus(dn); 420 if (!priv->master_mii_bus) 421 return -EPROBE_DEFER; 422 423 get_device(&priv->master_mii_bus->dev); 424 priv->master_mii_dn = dn; 425 426 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); 427 if (!priv->slave_mii_bus) 428 return -ENOMEM; 429 430 priv->slave_mii_bus->priv = priv; 431 priv->slave_mii_bus->name = "sf2 slave mii"; 432 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; 433 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; 434 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", 435 index++); 436 priv->slave_mii_bus->dev.of_node = dn; 437 438 /* Include the pseudo-PHY address to divert reads towards our 439 * workaround. This is only required for 7445D0, since 7445E0 440 * disconnects the internal switch pseudo-PHY such that we can use the 441 * regular SWITCH_MDIO master controller instead. 442 * 443 * Here we flag the pseudo PHY as needing special treatment and would 444 * otherwise make all other PHY read/writes go to the master MDIO bus 445 * controller that comes with this switch backed by the "mdio-unimac" 446 * driver. 447 */ 448 if (of_machine_is_compatible("brcm,bcm7445d0")) 449 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR); 450 else 451 priv->indir_phy_mask = 0; 452 453 ds->phys_mii_mask = priv->indir_phy_mask; 454 ds->slave_mii_bus = priv->slave_mii_bus; 455 priv->slave_mii_bus->parent = ds->dev->parent; 456 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; 457 458 err = of_mdiobus_register(priv->slave_mii_bus, dn); 459 if (err && dn) 460 of_node_put(dn); 461 462 return err; 463 } 464 465 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) 466 { 467 mdiobus_unregister(priv->slave_mii_bus); 468 if (priv->master_mii_dn) 469 of_node_put(priv->master_mii_dn); 470 } 471 472 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) 473 { 474 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 475 476 /* The BCM7xxx PHY driver expects to find the integrated PHY revision 477 * in bits 15:8 and the patch level in bits 7:0 which is exactly what 478 * the REG_PHY_REVISION register layout is. 479 */ 480 481 return priv->hw_params.gphy_rev; 482 } 483 484 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, 485 unsigned long *supported, 486 struct phylink_link_state *state) 487 { 488 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 489 490 if (!phy_interface_mode_is_rgmii(state->interface) && 491 state->interface != PHY_INTERFACE_MODE_MII && 492 state->interface != PHY_INTERFACE_MODE_REVMII && 493 state->interface != PHY_INTERFACE_MODE_GMII && 494 state->interface != PHY_INTERFACE_MODE_INTERNAL && 495 state->interface != PHY_INTERFACE_MODE_MOCA) { 496 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 497 dev_err(ds->dev, 498 "Unsupported interface: %d\n", state->interface); 499 return; 500 } 501 502 /* Allow all the expected bits */ 503 phylink_set(mask, Autoneg); 504 phylink_set_port_modes(mask); 505 phylink_set(mask, Pause); 506 phylink_set(mask, Asym_Pause); 507 508 /* With the exclusion of MII and Reverse MII, we support Gigabit, 509 * including Half duplex 510 */ 511 if (state->interface != PHY_INTERFACE_MODE_MII && 512 state->interface != PHY_INTERFACE_MODE_REVMII) { 513 phylink_set(mask, 1000baseT_Full); 514 phylink_set(mask, 1000baseT_Half); 515 } 516 517 phylink_set(mask, 10baseT_Half); 518 phylink_set(mask, 10baseT_Full); 519 phylink_set(mask, 100baseT_Half); 520 phylink_set(mask, 100baseT_Full); 521 522 bitmap_and(supported, supported, mask, 523 __ETHTOOL_LINK_MODE_MASK_NBITS); 524 bitmap_and(state->advertising, state->advertising, mask, 525 __ETHTOOL_LINK_MODE_MASK_NBITS); 526 } 527 528 static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, 529 unsigned int mode, 530 const struct phylink_link_state *state) 531 { 532 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 533 u32 id_mode_dis = 0, port_mode; 534 u32 reg, offset; 535 536 if (priv->type == BCM7445_DEVICE_ID) 537 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 538 else 539 offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); 540 541 switch (state->interface) { 542 case PHY_INTERFACE_MODE_RGMII: 543 id_mode_dis = 1; 544 /* fallthrough */ 545 case PHY_INTERFACE_MODE_RGMII_TXID: 546 port_mode = EXT_GPHY; 547 break; 548 case PHY_INTERFACE_MODE_MII: 549 port_mode = EXT_EPHY; 550 break; 551 case PHY_INTERFACE_MODE_REVMII: 552 port_mode = EXT_REVMII; 553 break; 554 default: 555 /* all other PHYs: internal and MoCA */ 556 goto force_link; 557 } 558 559 /* Clear id_mode_dis bit, and the existing port mode, let 560 * RGMII_MODE_EN bet set by mac_link_{up,down} 561 */ 562 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 563 reg &= ~ID_MODE_DIS; 564 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); 565 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); 566 567 reg |= port_mode; 568 if (id_mode_dis) 569 reg |= ID_MODE_DIS; 570 571 if (state->pause & MLO_PAUSE_TXRX_MASK) { 572 if (state->pause & MLO_PAUSE_TX) 573 reg |= TX_PAUSE_EN; 574 reg |= RX_PAUSE_EN; 575 } 576 577 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 578 579 force_link: 580 /* Force link settings detected from the PHY */ 581 reg = SW_OVERRIDE; 582 switch (state->speed) { 583 case SPEED_1000: 584 reg |= SPDSTS_1000 << SPEED_SHIFT; 585 break; 586 case SPEED_100: 587 reg |= SPDSTS_100 << SPEED_SHIFT; 588 break; 589 } 590 591 if (state->link) 592 reg |= LINK_STS; 593 if (state->duplex == DUPLEX_FULL) 594 reg |= DUPLX_MODE; 595 596 core_writel(priv, reg, offset); 597 } 598 599 static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, 600 phy_interface_t interface, bool link) 601 { 602 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 603 u32 reg; 604 605 if (!phy_interface_mode_is_rgmii(interface) && 606 interface != PHY_INTERFACE_MODE_MII && 607 interface != PHY_INTERFACE_MODE_REVMII) 608 return; 609 610 /* If the link is down, just disable the interface to conserve power */ 611 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); 612 if (link) 613 reg |= RGMII_MODE_EN; 614 else 615 reg &= ~RGMII_MODE_EN; 616 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); 617 } 618 619 static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, 620 unsigned int mode, 621 phy_interface_t interface) 622 { 623 bcm_sf2_sw_mac_link_set(ds, port, interface, false); 624 } 625 626 static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, 627 unsigned int mode, 628 phy_interface_t interface, 629 struct phy_device *phydev) 630 { 631 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 632 struct ethtool_eee *p = &priv->dev->ports[port].eee; 633 634 bcm_sf2_sw_mac_link_set(ds, port, interface, true); 635 636 if (mode == MLO_AN_PHY && phydev) 637 p->eee_enabled = b53_eee_init(ds, port, phydev); 638 } 639 640 static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, 641 struct phylink_link_state *status) 642 { 643 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 644 645 status->link = false; 646 647 /* MoCA port is special as we do not get link status from CORE_LNKSTS, 648 * which means that we need to force the link at the port override 649 * level to get the data to flow. We do use what the interrupt handler 650 * did determine before. 651 * 652 * For the other ports, we just force the link status, since this is 653 * a fixed PHY device. 654 */ 655 if (port == priv->moca_port) { 656 status->link = priv->port_sts[port].link; 657 /* For MoCA interfaces, also force a link down notification 658 * since some version of the user-space daemon (mocad) use 659 * cmd->autoneg to force the link, which messes up the PHY 660 * state machine and make it go in PHY_FORCING state instead. 661 */ 662 if (!status->link) 663 netif_carrier_off(ds->ports[port].slave); 664 status->duplex = DUPLEX_FULL; 665 } else { 666 status->link = true; 667 } 668 } 669 670 static void bcm_sf2_enable_acb(struct dsa_switch *ds) 671 { 672 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 673 u32 reg; 674 675 /* Enable ACB globally */ 676 reg = acb_readl(priv, ACB_CONTROL); 677 reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); 678 acb_writel(priv, reg, ACB_CONTROL); 679 reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); 680 reg |= ACB_EN | ACB_ALGORITHM; 681 acb_writel(priv, reg, ACB_CONTROL); 682 } 683 684 static int bcm_sf2_sw_suspend(struct dsa_switch *ds) 685 { 686 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 687 unsigned int port; 688 689 bcm_sf2_intr_disable(priv); 690 691 /* Disable all ports physically present including the IMP 692 * port, the other ones have already been disabled during 693 * bcm_sf2_sw_setup 694 */ 695 for (port = 0; port < DSA_MAX_PORTS; port++) { 696 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) 697 bcm_sf2_port_disable(ds, port, NULL); 698 } 699 700 return 0; 701 } 702 703 static int bcm_sf2_sw_resume(struct dsa_switch *ds) 704 { 705 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 706 unsigned int port; 707 int ret; 708 709 ret = bcm_sf2_sw_rst(priv); 710 if (ret) { 711 pr_err("%s: failed to software reset switch\n", __func__); 712 return ret; 713 } 714 715 if (priv->hw_params.num_gphy == 1) 716 bcm_sf2_gphy_enable_set(ds, true); 717 718 for (port = 0; port < DSA_MAX_PORTS; port++) { 719 if (dsa_is_user_port(ds, port)) 720 bcm_sf2_port_setup(ds, port, NULL); 721 else if (dsa_is_cpu_port(ds, port)) 722 bcm_sf2_imp_setup(ds, port); 723 } 724 725 bcm_sf2_enable_acb(ds); 726 727 return 0; 728 } 729 730 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, 731 struct ethtool_wolinfo *wol) 732 { 733 struct net_device *p = ds->ports[port].cpu_dp->master; 734 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 735 struct ethtool_wolinfo pwol; 736 737 /* Get the parent device WoL settings */ 738 p->ethtool_ops->get_wol(p, &pwol); 739 740 /* Advertise the parent device supported settings */ 741 wol->supported = pwol.supported; 742 memset(&wol->sopass, 0, sizeof(wol->sopass)); 743 744 if (pwol.wolopts & WAKE_MAGICSECURE) 745 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); 746 747 if (priv->wol_ports_mask & (1 << port)) 748 wol->wolopts = pwol.wolopts; 749 else 750 wol->wolopts = 0; 751 } 752 753 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, 754 struct ethtool_wolinfo *wol) 755 { 756 struct net_device *p = ds->ports[port].cpu_dp->master; 757 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 758 s8 cpu_port = ds->ports[port].cpu_dp->index; 759 struct ethtool_wolinfo pwol; 760 761 p->ethtool_ops->get_wol(p, &pwol); 762 if (wol->wolopts & ~pwol.supported) 763 return -EINVAL; 764 765 if (wol->wolopts) 766 priv->wol_ports_mask |= (1 << port); 767 else 768 priv->wol_ports_mask &= ~(1 << port); 769 770 /* If we have at least one port enabled, make sure the CPU port 771 * is also enabled. If the CPU port is the last one enabled, we disable 772 * it since this configuration does not make sense. 773 */ 774 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) 775 priv->wol_ports_mask |= (1 << cpu_port); 776 else 777 priv->wol_ports_mask &= ~(1 << cpu_port); 778 779 return p->ethtool_ops->set_wol(p, wol); 780 } 781 782 static int bcm_sf2_sw_setup(struct dsa_switch *ds) 783 { 784 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 785 unsigned int port; 786 787 /* Enable all valid ports and disable those unused */ 788 for (port = 0; port < priv->hw_params.num_ports; port++) { 789 /* IMP port receives special treatment */ 790 if (dsa_is_user_port(ds, port)) 791 bcm_sf2_port_setup(ds, port, NULL); 792 else if (dsa_is_cpu_port(ds, port)) 793 bcm_sf2_imp_setup(ds, port); 794 else 795 bcm_sf2_port_disable(ds, port, NULL); 796 } 797 798 b53_configure_vlan(ds); 799 bcm_sf2_enable_acb(ds); 800 801 return 0; 802 } 803 804 /* The SWITCH_CORE register space is managed by b53 but operates on a page + 805 * register basis so we need to translate that into an address that the 806 * bus-glue understands. 807 */ 808 #define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) 809 810 static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, 811 u8 *val) 812 { 813 struct bcm_sf2_priv *priv = dev->priv; 814 815 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 816 817 return 0; 818 } 819 820 static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, 821 u16 *val) 822 { 823 struct bcm_sf2_priv *priv = dev->priv; 824 825 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 826 827 return 0; 828 } 829 830 static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, 831 u32 *val) 832 { 833 struct bcm_sf2_priv *priv = dev->priv; 834 835 *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); 836 837 return 0; 838 } 839 840 static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, 841 u64 *val) 842 { 843 struct bcm_sf2_priv *priv = dev->priv; 844 845 *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); 846 847 return 0; 848 } 849 850 static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, 851 u8 value) 852 { 853 struct bcm_sf2_priv *priv = dev->priv; 854 855 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 856 857 return 0; 858 } 859 860 static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, 861 u16 value) 862 { 863 struct bcm_sf2_priv *priv = dev->priv; 864 865 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 866 867 return 0; 868 } 869 870 static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, 871 u32 value) 872 { 873 struct bcm_sf2_priv *priv = dev->priv; 874 875 core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 876 877 return 0; 878 } 879 880 static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, 881 u64 value) 882 { 883 struct bcm_sf2_priv *priv = dev->priv; 884 885 core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); 886 887 return 0; 888 } 889 890 static const struct b53_io_ops bcm_sf2_io_ops = { 891 .read8 = bcm_sf2_core_read8, 892 .read16 = bcm_sf2_core_read16, 893 .read32 = bcm_sf2_core_read32, 894 .read48 = bcm_sf2_core_read64, 895 .read64 = bcm_sf2_core_read64, 896 .write8 = bcm_sf2_core_write8, 897 .write16 = bcm_sf2_core_write16, 898 .write32 = bcm_sf2_core_write32, 899 .write48 = bcm_sf2_core_write64, 900 .write64 = bcm_sf2_core_write64, 901 }; 902 903 static const struct dsa_switch_ops bcm_sf2_ops = { 904 .get_tag_protocol = b53_get_tag_protocol, 905 .setup = bcm_sf2_sw_setup, 906 .get_strings = b53_get_strings, 907 .get_ethtool_stats = b53_get_ethtool_stats, 908 .get_sset_count = b53_get_sset_count, 909 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, 910 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 911 .phylink_validate = bcm_sf2_sw_validate, 912 .phylink_mac_config = bcm_sf2_sw_mac_config, 913 .phylink_mac_link_down = bcm_sf2_sw_mac_link_down, 914 .phylink_mac_link_up = bcm_sf2_sw_mac_link_up, 915 .phylink_fixed_state = bcm_sf2_sw_fixed_state, 916 .suspend = bcm_sf2_sw_suspend, 917 .resume = bcm_sf2_sw_resume, 918 .get_wol = bcm_sf2_sw_get_wol, 919 .set_wol = bcm_sf2_sw_set_wol, 920 .port_enable = bcm_sf2_port_setup, 921 .port_disable = bcm_sf2_port_disable, 922 .get_mac_eee = b53_get_mac_eee, 923 .set_mac_eee = b53_set_mac_eee, 924 .port_bridge_join = b53_br_join, 925 .port_bridge_leave = b53_br_leave, 926 .port_stp_state_set = b53_br_set_stp_state, 927 .port_fast_age = b53_br_fast_age, 928 .port_vlan_filtering = b53_vlan_filtering, 929 .port_vlan_prepare = b53_vlan_prepare, 930 .port_vlan_add = b53_vlan_add, 931 .port_vlan_del = b53_vlan_del, 932 .port_fdb_dump = b53_fdb_dump, 933 .port_fdb_add = b53_fdb_add, 934 .port_fdb_del = b53_fdb_del, 935 .get_rxnfc = bcm_sf2_get_rxnfc, 936 .set_rxnfc = bcm_sf2_set_rxnfc, 937 .port_mirror_add = b53_mirror_add, 938 .port_mirror_del = b53_mirror_del, 939 }; 940 941 struct bcm_sf2_of_data { 942 u32 type; 943 const u16 *reg_offsets; 944 unsigned int core_reg_align; 945 unsigned int num_cfp_rules; 946 }; 947 948 /* Register offsets for the SWITCH_REG_* block */ 949 static const u16 bcm_sf2_7445_reg_offsets[] = { 950 [REG_SWITCH_CNTRL] = 0x00, 951 [REG_SWITCH_STATUS] = 0x04, 952 [REG_DIR_DATA_WRITE] = 0x08, 953 [REG_DIR_DATA_READ] = 0x0C, 954 [REG_SWITCH_REVISION] = 0x18, 955 [REG_PHY_REVISION] = 0x1C, 956 [REG_SPHY_CNTRL] = 0x2C, 957 [REG_RGMII_0_CNTRL] = 0x34, 958 [REG_RGMII_1_CNTRL] = 0x40, 959 [REG_RGMII_2_CNTRL] = 0x4c, 960 [REG_LED_0_CNTRL] = 0x90, 961 [REG_LED_1_CNTRL] = 0x94, 962 [REG_LED_2_CNTRL] = 0x98, 963 }; 964 965 static const struct bcm_sf2_of_data bcm_sf2_7445_data = { 966 .type = BCM7445_DEVICE_ID, 967 .core_reg_align = 0, 968 .reg_offsets = bcm_sf2_7445_reg_offsets, 969 .num_cfp_rules = 256, 970 }; 971 972 static const u16 bcm_sf2_7278_reg_offsets[] = { 973 [REG_SWITCH_CNTRL] = 0x00, 974 [REG_SWITCH_STATUS] = 0x04, 975 [REG_DIR_DATA_WRITE] = 0x08, 976 [REG_DIR_DATA_READ] = 0x0c, 977 [REG_SWITCH_REVISION] = 0x10, 978 [REG_PHY_REVISION] = 0x14, 979 [REG_SPHY_CNTRL] = 0x24, 980 [REG_RGMII_0_CNTRL] = 0xe0, 981 [REG_RGMII_1_CNTRL] = 0xec, 982 [REG_RGMII_2_CNTRL] = 0xf8, 983 [REG_LED_0_CNTRL] = 0x40, 984 [REG_LED_1_CNTRL] = 0x4c, 985 [REG_LED_2_CNTRL] = 0x58, 986 }; 987 988 static const struct bcm_sf2_of_data bcm_sf2_7278_data = { 989 .type = BCM7278_DEVICE_ID, 990 .core_reg_align = 1, 991 .reg_offsets = bcm_sf2_7278_reg_offsets, 992 .num_cfp_rules = 128, 993 }; 994 995 static const struct of_device_id bcm_sf2_of_match[] = { 996 { .compatible = "brcm,bcm7445-switch-v4.0", 997 .data = &bcm_sf2_7445_data 998 }, 999 { .compatible = "brcm,bcm7278-switch-v4.0", 1000 .data = &bcm_sf2_7278_data 1001 }, 1002 { .compatible = "brcm,bcm7278-switch-v4.8", 1003 .data = &bcm_sf2_7278_data 1004 }, 1005 { /* sentinel */ }, 1006 }; 1007 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match); 1008 1009 static int bcm_sf2_sw_probe(struct platform_device *pdev) 1010 { 1011 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 1012 struct device_node *dn = pdev->dev.of_node; 1013 const struct of_device_id *of_id = NULL; 1014 const struct bcm_sf2_of_data *data; 1015 struct b53_platform_data *pdata; 1016 struct dsa_switch_ops *ops; 1017 struct bcm_sf2_priv *priv; 1018 struct b53_device *dev; 1019 struct dsa_switch *ds; 1020 void __iomem **base; 1021 struct resource *r; 1022 unsigned int i; 1023 u32 reg, rev; 1024 int ret; 1025 1026 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 1027 if (!priv) 1028 return -ENOMEM; 1029 1030 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL); 1031 if (!ops) 1032 return -ENOMEM; 1033 1034 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); 1035 if (!dev) 1036 return -ENOMEM; 1037 1038 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1039 if (!pdata) 1040 return -ENOMEM; 1041 1042 of_id = of_match_node(bcm_sf2_of_match, dn); 1043 if (!of_id || !of_id->data) 1044 return -EINVAL; 1045 1046 data = of_id->data; 1047 1048 /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */ 1049 priv->type = data->type; 1050 priv->reg_offsets = data->reg_offsets; 1051 priv->core_reg_align = data->core_reg_align; 1052 priv->num_cfp_rules = data->num_cfp_rules; 1053 1054 /* Auto-detection using standard registers will not work, so 1055 * provide an indication of what kind of device we are for 1056 * b53_common to work with 1057 */ 1058 pdata->chip_id = priv->type; 1059 dev->pdata = pdata; 1060 1061 priv->dev = dev; 1062 ds = dev->ds; 1063 ds->ops = &bcm_sf2_ops; 1064 1065 /* Advertise the 8 egress queues */ 1066 ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES; 1067 1068 dev_set_drvdata(&pdev->dev, priv); 1069 1070 spin_lock_init(&priv->indir_lock); 1071 mutex_init(&priv->stats_mutex); 1072 mutex_init(&priv->cfp.lock); 1073 1074 /* CFP rule #0 cannot be used for specific classifications, flag it as 1075 * permanently used 1076 */ 1077 set_bit(0, priv->cfp.used); 1078 set_bit(0, priv->cfp.unique); 1079 1080 bcm_sf2_identify_ports(priv, dn->child); 1081 1082 priv->irq0 = irq_of_parse_and_map(dn, 0); 1083 priv->irq1 = irq_of_parse_and_map(dn, 1); 1084 1085 base = &priv->core; 1086 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 1087 r = platform_get_resource(pdev, IORESOURCE_MEM, i); 1088 *base = devm_ioremap_resource(&pdev->dev, r); 1089 if (IS_ERR(*base)) { 1090 pr_err("unable to find register: %s\n", reg_names[i]); 1091 return PTR_ERR(*base); 1092 } 1093 base++; 1094 } 1095 1096 ret = bcm_sf2_sw_rst(priv); 1097 if (ret) { 1098 pr_err("unable to software reset switch: %d\n", ret); 1099 return ret; 1100 } 1101 1102 ret = bcm_sf2_mdio_register(ds); 1103 if (ret) { 1104 pr_err("failed to register MDIO bus\n"); 1105 return ret; 1106 } 1107 1108 ret = bcm_sf2_cfp_rst(priv); 1109 if (ret) { 1110 pr_err("failed to reset CFP\n"); 1111 goto out_mdio; 1112 } 1113 1114 /* Disable all interrupts and request them */ 1115 bcm_sf2_intr_disable(priv); 1116 1117 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, 1118 "switch_0", ds); 1119 if (ret < 0) { 1120 pr_err("failed to request switch_0 IRQ\n"); 1121 goto out_mdio; 1122 } 1123 1124 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, 1125 "switch_1", ds); 1126 if (ret < 0) { 1127 pr_err("failed to request switch_1 IRQ\n"); 1128 goto out_mdio; 1129 } 1130 1131 /* Reset the MIB counters */ 1132 reg = core_readl(priv, CORE_GMNCFGCFG); 1133 reg |= RST_MIB_CNT; 1134 core_writel(priv, reg, CORE_GMNCFGCFG); 1135 reg &= ~RST_MIB_CNT; 1136 core_writel(priv, reg, CORE_GMNCFGCFG); 1137 1138 /* Get the maximum number of ports for this switch */ 1139 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; 1140 if (priv->hw_params.num_ports > DSA_MAX_PORTS) 1141 priv->hw_params.num_ports = DSA_MAX_PORTS; 1142 1143 /* Assume a single GPHY setup if we can't read that property */ 1144 if (of_property_read_u32(dn, "brcm,num-gphy", 1145 &priv->hw_params.num_gphy)) 1146 priv->hw_params.num_gphy = 1; 1147 1148 rev = reg_readl(priv, REG_SWITCH_REVISION); 1149 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 1150 SWITCH_TOP_REV_MASK; 1151 priv->hw_params.core_rev = (rev & SF2_REV_MASK); 1152 1153 rev = reg_readl(priv, REG_PHY_REVISION); 1154 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; 1155 1156 ret = b53_switch_register(dev); 1157 if (ret) 1158 goto out_mdio; 1159 1160 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", 1161 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, 1162 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, 1163 priv->core, priv->irq0, priv->irq1); 1164 1165 return 0; 1166 1167 out_mdio: 1168 bcm_sf2_mdio_unregister(priv); 1169 return ret; 1170 } 1171 1172 static int bcm_sf2_sw_remove(struct platform_device *pdev) 1173 { 1174 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1175 1176 /* Disable all ports and interrupts */ 1177 priv->wol_ports_mask = 0; 1178 bcm_sf2_sw_suspend(priv->dev->ds); 1179 dsa_unregister_switch(priv->dev->ds); 1180 bcm_sf2_mdio_unregister(priv); 1181 1182 return 0; 1183 } 1184 1185 static void bcm_sf2_sw_shutdown(struct platform_device *pdev) 1186 { 1187 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1188 1189 /* For a kernel about to be kexec'd we want to keep the GPHY on for a 1190 * successful MDIO bus scan to occur. If we did turn off the GPHY 1191 * before (e.g: port_disable), this will also power it back on. 1192 * 1193 * Do not rely on kexec_in_progress, just power the PHY on. 1194 */ 1195 if (priv->hw_params.num_gphy == 1) 1196 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1197 } 1198 1199 #ifdef CONFIG_PM_SLEEP 1200 static int bcm_sf2_suspend(struct device *dev) 1201 { 1202 struct platform_device *pdev = to_platform_device(dev); 1203 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1204 1205 return dsa_switch_suspend(priv->dev->ds); 1206 } 1207 1208 static int bcm_sf2_resume(struct device *dev) 1209 { 1210 struct platform_device *pdev = to_platform_device(dev); 1211 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1212 1213 return dsa_switch_resume(priv->dev->ds); 1214 } 1215 #endif /* CONFIG_PM_SLEEP */ 1216 1217 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, 1218 bcm_sf2_suspend, bcm_sf2_resume); 1219 1220 1221 static struct platform_driver bcm_sf2_driver = { 1222 .probe = bcm_sf2_sw_probe, 1223 .remove = bcm_sf2_sw_remove, 1224 .shutdown = bcm_sf2_sw_shutdown, 1225 .driver = { 1226 .name = "brcm-sf2", 1227 .of_match_table = bcm_sf2_of_match, 1228 .pm = &bcm_sf2_pm_ops, 1229 }, 1230 }; 1231 module_platform_driver(bcm_sf2_driver); 1232 1233 MODULE_AUTHOR("Broadcom Corporation"); 1234 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); 1235 MODULE_LICENSE("GPL"); 1236 MODULE_ALIAS("platform:brcm-sf2"); 1237