1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Lantiq / Intel GSWIP switch driver for VRX200 SoCs 4 * 5 * Copyright (C) 2010 Lantiq Deutschland 6 * Copyright (C) 2012 John Crispin <john@phrozen.org> 7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/etherdevice.h> 12 #include <linux/firmware.h> 13 #include <linux/if_bridge.h> 14 #include <linux/if_vlan.h> 15 #include <linux/iopoll.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of_mdio.h> 19 #include <linux/of_net.h> 20 #include <linux/of_platform.h> 21 #include <linux/phy.h> 22 #include <linux/phylink.h> 23 #include <linux/platform_device.h> 24 #include <linux/regmap.h> 25 #include <linux/reset.h> 26 #include <net/dsa.h> 27 #include <dt-bindings/mips/lantiq_rcu_gphy.h> 28 29 #include "lantiq_pce.h" 30 31 /* GSWIP MDIO Registers */ 32 #define GSWIP_MDIO_GLOB 0x00 33 #define GSWIP_MDIO_GLOB_ENABLE BIT(15) 34 #define GSWIP_MDIO_CTRL 0x08 35 #define GSWIP_MDIO_CTRL_BUSY BIT(12) 36 #define GSWIP_MDIO_CTRL_RD BIT(11) 37 #define GSWIP_MDIO_CTRL_WR BIT(10) 38 #define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f 39 #define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5 40 #define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f 41 #define GSWIP_MDIO_READ 0x09 42 #define GSWIP_MDIO_WRITE 0x0A 43 #define GSWIP_MDIO_MDC_CFG0 0x0B 44 #define GSWIP_MDIO_MDC_CFG1 0x0C 45 #define GSWIP_MDIO_PHYp(p) (0x15 - (p)) 46 #define GSWIP_MDIO_PHY_LINK_MASK 0x6000 47 #define GSWIP_MDIO_PHY_LINK_AUTO 0x0000 48 #define GSWIP_MDIO_PHY_LINK_DOWN 0x4000 49 #define GSWIP_MDIO_PHY_LINK_UP 0x2000 50 #define GSWIP_MDIO_PHY_SPEED_MASK 0x1800 51 #define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800 52 #define GSWIP_MDIO_PHY_SPEED_M10 0x0000 53 #define GSWIP_MDIO_PHY_SPEED_M100 0x0800 54 #define GSWIP_MDIO_PHY_SPEED_G1 0x1000 55 #define GSWIP_MDIO_PHY_FDUP_MASK 0x0600 56 #define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000 57 #define GSWIP_MDIO_PHY_FDUP_EN 0x0200 58 #define GSWIP_MDIO_PHY_FDUP_DIS 0x0600 59 #define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180 60 #define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000 61 #define GSWIP_MDIO_PHY_FCONTX_EN 0x0100 62 #define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180 63 #define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060 64 #define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000 65 #define GSWIP_MDIO_PHY_FCONRX_EN 0x0020 66 #define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060 67 #define GSWIP_MDIO_PHY_ADDR_MASK 0x001f 68 #define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \ 69 GSWIP_MDIO_PHY_FCONRX_MASK | \ 70 GSWIP_MDIO_PHY_FCONTX_MASK | \ 71 GSWIP_MDIO_PHY_LINK_MASK | \ 72 GSWIP_MDIO_PHY_SPEED_MASK | \ 73 GSWIP_MDIO_PHY_FDUP_MASK) 74 75 /* GSWIP MII Registers */ 76 #define GSWIP_MII_CFG0 0x00 77 #define GSWIP_MII_CFG1 0x02 78 #define GSWIP_MII_CFG5 0x04 79 #define GSWIP_MII_CFG_EN BIT(14) 80 #define GSWIP_MII_CFG_LDCLKDIS BIT(12) 81 #define GSWIP_MII_CFG_MODE_MIIP 0x0 82 #define GSWIP_MII_CFG_MODE_MIIM 0x1 83 #define GSWIP_MII_CFG_MODE_RMIIP 0x2 84 #define GSWIP_MII_CFG_MODE_RMIIM 0x3 85 #define GSWIP_MII_CFG_MODE_RGMII 0x4 86 #define GSWIP_MII_CFG_MODE_MASK 0xf 87 #define GSWIP_MII_CFG_RATE_M2P5 0x00 88 #define GSWIP_MII_CFG_RATE_M25 0x10 89 #define GSWIP_MII_CFG_RATE_M125 0x20 90 #define GSWIP_MII_CFG_RATE_M50 0x30 91 #define GSWIP_MII_CFG_RATE_AUTO 0x40 92 #define GSWIP_MII_CFG_RATE_MASK 0x70 93 #define GSWIP_MII_PCDU0 0x01 94 #define GSWIP_MII_PCDU1 0x03 95 #define GSWIP_MII_PCDU5 0x05 96 #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0) 97 #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7) 98 99 /* GSWIP Core Registers */ 100 #define GSWIP_SWRES 0x000 101 #define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */ 102 #define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */ 103 #define GSWIP_VERSION 0x013 104 #define GSWIP_VERSION_REV_SHIFT 0 105 #define GSWIP_VERSION_REV_MASK GENMASK(7, 0) 106 #define GSWIP_VERSION_MOD_SHIFT 8 107 #define GSWIP_VERSION_MOD_MASK GENMASK(15, 8) 108 #define GSWIP_VERSION_2_0 0x100 109 #define GSWIP_VERSION_2_1 0x021 110 #define GSWIP_VERSION_2_2 0x122 111 #define GSWIP_VERSION_2_2_ETC 0x022 112 113 #define GSWIP_BM_RAM_VAL(x) (0x043 - (x)) 114 #define GSWIP_BM_RAM_ADDR 0x044 115 #define GSWIP_BM_RAM_CTRL 0x045 116 #define GSWIP_BM_RAM_CTRL_BAS BIT(15) 117 #define GSWIP_BM_RAM_CTRL_OPMOD BIT(5) 118 #define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0) 119 #define GSWIP_BM_QUEUE_GCTRL 0x04A 120 #define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10) 121 /* buffer management Port Configuration Register */ 122 #define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2)) 123 #define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */ 124 #define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */ 125 /* buffer management Port Control Register */ 126 #define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2)) 127 #define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */ 128 #define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */ 129 130 /* PCE */ 131 #define GSWIP_PCE_TBL_KEY(x) (0x447 - (x)) 132 #define GSWIP_PCE_TBL_MASK 0x448 133 #define GSWIP_PCE_TBL_VAL(x) (0x44D - (x)) 134 #define GSWIP_PCE_TBL_ADDR 0x44E 135 #define GSWIP_PCE_TBL_CTRL 0x44F 136 #define GSWIP_PCE_TBL_CTRL_BAS BIT(15) 137 #define GSWIP_PCE_TBL_CTRL_TYPE BIT(13) 138 #define GSWIP_PCE_TBL_CTRL_VLD BIT(12) 139 #define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11) 140 #define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7) 141 #define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5) 142 #define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00 143 #define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20 144 #define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40 145 #define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60 146 #define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0) 147 #define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */ 148 #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */ 149 #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */ 150 #define GSWIP_PCE_GCTRL_0 0x456 151 #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3) 152 #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */ 153 #define GSWIP_PCE_GCTRL_1 0x457 154 #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */ 155 #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */ 156 #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA)) 157 #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) 158 #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0 159 #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1 160 #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2 161 #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 162 #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 163 #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) 164 165 #define GSWIP_MAC_FLEN 0x8C5 166 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) 167 #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ 168 169 /* Ethernet Switch Fetch DMA Port Control Register */ 170 #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6)) 171 #define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */ 172 #define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */ 173 #define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */ 174 #define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */ 175 #define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 176 #define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 177 #define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 178 #define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 179 180 /* Ethernet Switch Store DMA Port Control Register */ 181 #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6)) 182 #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */ 183 #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */ 184 #define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */ 185 186 #define XRX200_GPHY_FW_ALIGN (16 * 1024) 187 188 struct gswip_hw_info { 189 int max_ports; 190 int cpu_port; 191 }; 192 193 struct xway_gphy_match_data { 194 char *fe_firmware_name; 195 char *ge_firmware_name; 196 }; 197 198 struct gswip_gphy_fw { 199 struct clk *clk_gate; 200 struct reset_control *reset; 201 u32 fw_addr_offset; 202 char *fw_name; 203 }; 204 205 struct gswip_priv { 206 __iomem void *gswip; 207 __iomem void *mdio; 208 __iomem void *mii; 209 const struct gswip_hw_info *hw_info; 210 const struct xway_gphy_match_data *gphy_fw_name_cfg; 211 struct dsa_switch *ds; 212 struct device *dev; 213 struct regmap *rcu_regmap; 214 int num_gphy_fw; 215 struct gswip_gphy_fw *gphy_fw; 216 }; 217 218 struct gswip_rmon_cnt_desc { 219 unsigned int size; 220 unsigned int offset; 221 const char *name; 222 }; 223 224 #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} 225 226 static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { 227 /** Receive Packet Count (only packets that are accepted and not discarded). */ 228 MIB_DESC(1, 0x1F, "RxGoodPkts"), 229 MIB_DESC(1, 0x23, "RxUnicastPkts"), 230 MIB_DESC(1, 0x22, "RxMulticastPkts"), 231 MIB_DESC(1, 0x21, "RxFCSErrorPkts"), 232 MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), 233 MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), 234 MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), 235 MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), 236 MIB_DESC(1, 0x20, "RxGoodPausePkts"), 237 MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), 238 MIB_DESC(1, 0x12, "Rx64BytePkts"), 239 MIB_DESC(1, 0x13, "Rx127BytePkts"), 240 MIB_DESC(1, 0x14, "Rx255BytePkts"), 241 MIB_DESC(1, 0x15, "Rx511BytePkts"), 242 MIB_DESC(1, 0x16, "Rx1023BytePkts"), 243 /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ 244 MIB_DESC(1, 0x17, "RxMaxBytePkts"), 245 MIB_DESC(1, 0x18, "RxDroppedPkts"), 246 MIB_DESC(1, 0x19, "RxFilteredPkts"), 247 MIB_DESC(2, 0x24, "RxGoodBytes"), 248 MIB_DESC(2, 0x26, "RxBadBytes"), 249 MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), 250 MIB_DESC(1, 0x0C, "TxGoodPkts"), 251 MIB_DESC(1, 0x06, "TxUnicastPkts"), 252 MIB_DESC(1, 0x07, "TxMulticastPkts"), 253 MIB_DESC(1, 0x00, "Tx64BytePkts"), 254 MIB_DESC(1, 0x01, "Tx127BytePkts"), 255 MIB_DESC(1, 0x02, "Tx255BytePkts"), 256 MIB_DESC(1, 0x03, "Tx511BytePkts"), 257 MIB_DESC(1, 0x04, "Tx1023BytePkts"), 258 /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ 259 MIB_DESC(1, 0x05, "TxMaxBytePkts"), 260 MIB_DESC(1, 0x08, "TxSingleCollCount"), 261 MIB_DESC(1, 0x09, "TxMultCollCount"), 262 MIB_DESC(1, 0x0A, "TxLateCollCount"), 263 MIB_DESC(1, 0x0B, "TxExcessCollCount"), 264 MIB_DESC(1, 0x0D, "TxPauseCount"), 265 MIB_DESC(1, 0x10, "TxDroppedPkts"), 266 MIB_DESC(2, 0x0E, "TxGoodBytes"), 267 }; 268 269 static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset) 270 { 271 return __raw_readl(priv->gswip + (offset * 4)); 272 } 273 274 static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset) 275 { 276 __raw_writel(val, priv->gswip + (offset * 4)); 277 } 278 279 static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set, 280 u32 offset) 281 { 282 u32 val = gswip_switch_r(priv, offset); 283 284 val &= ~(clear); 285 val |= set; 286 gswip_switch_w(priv, val, offset); 287 } 288 289 static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, 290 u32 cleared) 291 { 292 u32 val; 293 294 return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val, 295 (val & cleared) == 0, 20, 50000); 296 } 297 298 static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset) 299 { 300 return __raw_readl(priv->mdio + (offset * 4)); 301 } 302 303 static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset) 304 { 305 __raw_writel(val, priv->mdio + (offset * 4)); 306 } 307 308 static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set, 309 u32 offset) 310 { 311 u32 val = gswip_mdio_r(priv, offset); 312 313 val &= ~(clear); 314 val |= set; 315 gswip_mdio_w(priv, val, offset); 316 } 317 318 static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset) 319 { 320 return __raw_readl(priv->mii + (offset * 4)); 321 } 322 323 static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset) 324 { 325 __raw_writel(val, priv->mii + (offset * 4)); 326 } 327 328 static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, 329 u32 offset) 330 { 331 u32 val = gswip_mii_r(priv, offset); 332 333 val &= ~(clear); 334 val |= set; 335 gswip_mii_w(priv, val, offset); 336 } 337 338 static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, 339 int port) 340 { 341 switch (port) { 342 case 0: 343 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0); 344 break; 345 case 1: 346 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1); 347 break; 348 case 5: 349 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5); 350 break; 351 } 352 } 353 354 static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, 355 int port) 356 { 357 switch (port) { 358 case 0: 359 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0); 360 break; 361 case 1: 362 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1); 363 break; 364 case 5: 365 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5); 366 break; 367 } 368 } 369 370 static int gswip_mdio_poll(struct gswip_priv *priv) 371 { 372 int cnt = 100; 373 374 while (likely(cnt--)) { 375 u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL); 376 377 if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0) 378 return 0; 379 usleep_range(20, 40); 380 } 381 382 return -ETIMEDOUT; 383 } 384 385 static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) 386 { 387 struct gswip_priv *priv = bus->priv; 388 int err; 389 390 err = gswip_mdio_poll(priv); 391 if (err) { 392 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 393 return err; 394 } 395 396 gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE); 397 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | 398 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | 399 (reg & GSWIP_MDIO_CTRL_REGAD_MASK), 400 GSWIP_MDIO_CTRL); 401 402 return 0; 403 } 404 405 static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) 406 { 407 struct gswip_priv *priv = bus->priv; 408 int err; 409 410 err = gswip_mdio_poll(priv); 411 if (err) { 412 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 413 return err; 414 } 415 416 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | 417 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | 418 (reg & GSWIP_MDIO_CTRL_REGAD_MASK), 419 GSWIP_MDIO_CTRL); 420 421 err = gswip_mdio_poll(priv); 422 if (err) { 423 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 424 return err; 425 } 426 427 return gswip_mdio_r(priv, GSWIP_MDIO_READ); 428 } 429 430 static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) 431 { 432 struct dsa_switch *ds = priv->ds; 433 434 ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev); 435 if (!ds->slave_mii_bus) 436 return -ENOMEM; 437 438 ds->slave_mii_bus->priv = priv; 439 ds->slave_mii_bus->read = gswip_mdio_rd; 440 ds->slave_mii_bus->write = gswip_mdio_wr; 441 ds->slave_mii_bus->name = "lantiq,xrx200-mdio"; 442 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", 443 dev_name(priv->dev)); 444 ds->slave_mii_bus->parent = priv->dev; 445 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 446 447 return of_mdiobus_register(ds->slave_mii_bus, mdio_np); 448 } 449 450 static int gswip_port_enable(struct dsa_switch *ds, int port, 451 struct phy_device *phydev) 452 { 453 struct gswip_priv *priv = ds->priv; 454 455 /* RMON Counter Enable for port */ 456 gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); 457 458 /* enable port fetch/store dma & VLAN Modification */ 459 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN | 460 GSWIP_FDMA_PCTRL_VLANMOD_BOTH, 461 GSWIP_FDMA_PCTRLp(port)); 462 gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, 463 GSWIP_SDMA_PCTRLp(port)); 464 gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, 465 GSWIP_PCE_PCTRL_0p(port)); 466 467 if (!dsa_is_cpu_port(ds, port)) { 468 u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | 469 GSWIP_MDIO_PHY_SPEED_AUTO | 470 GSWIP_MDIO_PHY_FDUP_AUTO | 471 GSWIP_MDIO_PHY_FCONTX_AUTO | 472 GSWIP_MDIO_PHY_FCONRX_AUTO | 473 (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK); 474 475 gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port)); 476 /* Activate MDIO auto polling */ 477 gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0); 478 } 479 480 return 0; 481 } 482 483 static void gswip_port_disable(struct dsa_switch *ds, int port, 484 struct phy_device *phy) 485 { 486 struct gswip_priv *priv = ds->priv; 487 488 if (!dsa_is_cpu_port(ds, port)) { 489 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN, 490 GSWIP_MDIO_PHY_LINK_MASK, 491 GSWIP_MDIO_PHYp(port)); 492 /* Deactivate MDIO auto polling */ 493 gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0); 494 } 495 496 gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, 497 GSWIP_FDMA_PCTRLp(port)); 498 gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, 499 GSWIP_SDMA_PCTRLp(port)); 500 } 501 502 static int gswip_pce_load_microcode(struct gswip_priv *priv) 503 { 504 int i; 505 int err; 506 507 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 508 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 509 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL); 510 gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK); 511 512 for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) { 513 gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR); 514 gswip_switch_w(priv, gswip_pce_microcode[i].val_0, 515 GSWIP_PCE_TBL_VAL(0)); 516 gswip_switch_w(priv, gswip_pce_microcode[i].val_1, 517 GSWIP_PCE_TBL_VAL(1)); 518 gswip_switch_w(priv, gswip_pce_microcode[i].val_2, 519 GSWIP_PCE_TBL_VAL(2)); 520 gswip_switch_w(priv, gswip_pce_microcode[i].val_3, 521 GSWIP_PCE_TBL_VAL(3)); 522 523 /* start the table access: */ 524 gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS, 525 GSWIP_PCE_TBL_CTRL); 526 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 527 GSWIP_PCE_TBL_CTRL_BAS); 528 if (err) 529 return err; 530 } 531 532 /* tell the switch that the microcode is loaded */ 533 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID, 534 GSWIP_PCE_GCTRL_0); 535 536 return 0; 537 } 538 539 static int gswip_setup(struct dsa_switch *ds) 540 { 541 struct gswip_priv *priv = ds->priv; 542 unsigned int cpu_port = priv->hw_info->cpu_port; 543 int i; 544 int err; 545 546 gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES); 547 usleep_range(5000, 10000); 548 gswip_switch_w(priv, 0, GSWIP_SWRES); 549 550 /* disable port fetch/store dma on all ports */ 551 for (i = 0; i < priv->hw_info->max_ports; i++) 552 gswip_port_disable(ds, i, NULL); 553 554 /* enable Switch */ 555 gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); 556 557 err = gswip_pce_load_microcode(priv); 558 if (err) { 559 dev_err(priv->dev, "writing PCE microcode failed, %i", err); 560 return err; 561 } 562 563 /* Default unknown Broadcast/Multicast/Unicast port maps */ 564 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1); 565 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); 566 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3); 567 568 /* disable PHY auto polling */ 569 gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); 570 /* Configure the MDIO Clock 2.5 MHz */ 571 gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); 572 573 /* Disable the xMII link */ 574 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0); 575 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1); 576 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5); 577 578 /* enable special tag insertion on cpu port */ 579 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, 580 GSWIP_FDMA_PCTRLp(cpu_port)); 581 582 gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, 583 GSWIP_MAC_CTRL_2p(cpu_port)); 584 gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN); 585 gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, 586 GSWIP_BM_QUEUE_GCTRL); 587 588 /* VLAN aware Switching */ 589 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); 590 591 /* Mac Address Table Lock */ 592 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK | 593 GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD, 594 GSWIP_PCE_GCTRL_1); 595 596 gswip_port_enable(ds, cpu_port, NULL); 597 return 0; 598 } 599 600 static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, 601 int port) 602 { 603 return DSA_TAG_PROTO_GSWIP; 604 } 605 606 static void gswip_phylink_validate(struct dsa_switch *ds, int port, 607 unsigned long *supported, 608 struct phylink_link_state *state) 609 { 610 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 611 612 switch (port) { 613 case 0: 614 case 1: 615 if (!phy_interface_mode_is_rgmii(state->interface) && 616 state->interface != PHY_INTERFACE_MODE_MII && 617 state->interface != PHY_INTERFACE_MODE_REVMII && 618 state->interface != PHY_INTERFACE_MODE_RMII) 619 goto unsupported; 620 break; 621 case 2: 622 case 3: 623 case 4: 624 if (state->interface != PHY_INTERFACE_MODE_INTERNAL) 625 goto unsupported; 626 break; 627 case 5: 628 if (!phy_interface_mode_is_rgmii(state->interface) && 629 state->interface != PHY_INTERFACE_MODE_INTERNAL) 630 goto unsupported; 631 break; 632 default: 633 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 634 dev_err(ds->dev, "Unsupported port: %i\n", port); 635 return; 636 } 637 638 /* Allow all the expected bits */ 639 phylink_set(mask, Autoneg); 640 phylink_set_port_modes(mask); 641 phylink_set(mask, Pause); 642 phylink_set(mask, Asym_Pause); 643 644 /* With the exclusion of MII and Reverse MII, we support Gigabit, 645 * including Half duplex 646 */ 647 if (state->interface != PHY_INTERFACE_MODE_MII && 648 state->interface != PHY_INTERFACE_MODE_REVMII) { 649 phylink_set(mask, 1000baseT_Full); 650 phylink_set(mask, 1000baseT_Half); 651 } 652 653 phylink_set(mask, 10baseT_Half); 654 phylink_set(mask, 10baseT_Full); 655 phylink_set(mask, 100baseT_Half); 656 phylink_set(mask, 100baseT_Full); 657 658 bitmap_and(supported, supported, mask, 659 __ETHTOOL_LINK_MODE_MASK_NBITS); 660 bitmap_and(state->advertising, state->advertising, mask, 661 __ETHTOOL_LINK_MODE_MASK_NBITS); 662 return; 663 664 unsupported: 665 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 666 dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); 667 return; 668 } 669 670 static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, 671 unsigned int mode, 672 const struct phylink_link_state *state) 673 { 674 struct gswip_priv *priv = ds->priv; 675 u32 miicfg = 0; 676 677 miicfg |= GSWIP_MII_CFG_LDCLKDIS; 678 679 switch (state->interface) { 680 case PHY_INTERFACE_MODE_MII: 681 case PHY_INTERFACE_MODE_INTERNAL: 682 miicfg |= GSWIP_MII_CFG_MODE_MIIM; 683 break; 684 case PHY_INTERFACE_MODE_REVMII: 685 miicfg |= GSWIP_MII_CFG_MODE_MIIP; 686 break; 687 case PHY_INTERFACE_MODE_RMII: 688 miicfg |= GSWIP_MII_CFG_MODE_RMIIM; 689 break; 690 case PHY_INTERFACE_MODE_RGMII: 691 case PHY_INTERFACE_MODE_RGMII_ID: 692 case PHY_INTERFACE_MODE_RGMII_RXID: 693 case PHY_INTERFACE_MODE_RGMII_TXID: 694 miicfg |= GSWIP_MII_CFG_MODE_RGMII; 695 break; 696 default: 697 dev_err(ds->dev, 698 "Unsupported interface: %d\n", state->interface); 699 return; 700 } 701 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port); 702 703 switch (state->interface) { 704 case PHY_INTERFACE_MODE_RGMII_ID: 705 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK | 706 GSWIP_MII_PCDU_RXDLY_MASK, 0, port); 707 break; 708 case PHY_INTERFACE_MODE_RGMII_RXID: 709 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port); 710 break; 711 case PHY_INTERFACE_MODE_RGMII_TXID: 712 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port); 713 break; 714 default: 715 break; 716 } 717 } 718 719 static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, 720 unsigned int mode, 721 phy_interface_t interface) 722 { 723 struct gswip_priv *priv = ds->priv; 724 725 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); 726 } 727 728 static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, 729 unsigned int mode, 730 phy_interface_t interface, 731 struct phy_device *phydev) 732 { 733 struct gswip_priv *priv = ds->priv; 734 735 /* Enable the xMII interface only for the external PHY */ 736 if (interface != PHY_INTERFACE_MODE_INTERNAL) 737 gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); 738 } 739 740 static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, 741 uint8_t *data) 742 { 743 int i; 744 745 if (stringset != ETH_SS_STATS) 746 return; 747 748 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) 749 strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name, 750 ETH_GSTRING_LEN); 751 } 752 753 static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, 754 u32 index) 755 { 756 u32 result; 757 int err; 758 759 gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR); 760 gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK | 761 GSWIP_BM_RAM_CTRL_OPMOD, 762 table | GSWIP_BM_RAM_CTRL_BAS, 763 GSWIP_BM_RAM_CTRL); 764 765 err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, 766 GSWIP_BM_RAM_CTRL_BAS); 767 if (err) { 768 dev_err(priv->dev, "timeout while reading table: %u, index: %u", 769 table, index); 770 return 0; 771 } 772 773 result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0)); 774 result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16; 775 776 return result; 777 } 778 779 static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, 780 uint64_t *data) 781 { 782 struct gswip_priv *priv = ds->priv; 783 const struct gswip_rmon_cnt_desc *rmon_cnt; 784 int i; 785 u64 high; 786 787 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { 788 rmon_cnt = &gswip_rmon_cnt[i]; 789 790 data[i] = gswip_bcm_ram_entry_read(priv, port, 791 rmon_cnt->offset); 792 if (rmon_cnt->size == 2) { 793 high = gswip_bcm_ram_entry_read(priv, port, 794 rmon_cnt->offset + 1); 795 data[i] |= high << 32; 796 } 797 } 798 } 799 800 static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) 801 { 802 if (sset != ETH_SS_STATS) 803 return 0; 804 805 return ARRAY_SIZE(gswip_rmon_cnt); 806 } 807 808 static const struct dsa_switch_ops gswip_switch_ops = { 809 .get_tag_protocol = gswip_get_tag_protocol, 810 .setup = gswip_setup, 811 .port_enable = gswip_port_enable, 812 .port_disable = gswip_port_disable, 813 .phylink_validate = gswip_phylink_validate, 814 .phylink_mac_config = gswip_phylink_mac_config, 815 .phylink_mac_link_down = gswip_phylink_mac_link_down, 816 .phylink_mac_link_up = gswip_phylink_mac_link_up, 817 .get_strings = gswip_get_strings, 818 .get_ethtool_stats = gswip_get_ethtool_stats, 819 .get_sset_count = gswip_get_sset_count, 820 }; 821 822 static const struct xway_gphy_match_data xrx200a1x_gphy_data = { 823 .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", 824 .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", 825 }; 826 827 static const struct xway_gphy_match_data xrx200a2x_gphy_data = { 828 .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", 829 .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", 830 }; 831 832 static const struct xway_gphy_match_data xrx300_gphy_data = { 833 .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", 834 .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", 835 }; 836 837 static const struct of_device_id xway_gphy_match[] = { 838 { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL }, 839 { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data }, 840 { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data }, 841 { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data }, 842 { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data }, 843 {}, 844 }; 845 846 static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw) 847 { 848 struct device *dev = priv->dev; 849 const struct firmware *fw; 850 void *fw_addr; 851 dma_addr_t dma_addr; 852 dma_addr_t dev_addr; 853 size_t size; 854 int ret; 855 856 ret = clk_prepare_enable(gphy_fw->clk_gate); 857 if (ret) 858 return ret; 859 860 reset_control_assert(gphy_fw->reset); 861 862 ret = request_firmware(&fw, gphy_fw->fw_name, dev); 863 if (ret) { 864 dev_err(dev, "failed to load firmware: %s, error: %i\n", 865 gphy_fw->fw_name, ret); 866 return ret; 867 } 868 869 /* GPHY cores need the firmware code in a persistent and contiguous 870 * memory area with a 16 kB boundary aligned start address. 871 */ 872 size = fw->size + XRX200_GPHY_FW_ALIGN; 873 874 fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); 875 if (fw_addr) { 876 fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); 877 dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); 878 memcpy(fw_addr, fw->data, fw->size); 879 } else { 880 dev_err(dev, "failed to alloc firmware memory\n"); 881 release_firmware(fw); 882 return -ENOMEM; 883 } 884 885 release_firmware(fw); 886 887 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr); 888 if (ret) 889 return ret; 890 891 reset_control_deassert(gphy_fw->reset); 892 893 return ret; 894 } 895 896 static int gswip_gphy_fw_probe(struct gswip_priv *priv, 897 struct gswip_gphy_fw *gphy_fw, 898 struct device_node *gphy_fw_np, int i) 899 { 900 struct device *dev = priv->dev; 901 u32 gphy_mode; 902 int ret; 903 char gphyname[10]; 904 905 snprintf(gphyname, sizeof(gphyname), "gphy%d", i); 906 907 gphy_fw->clk_gate = devm_clk_get(dev, gphyname); 908 if (IS_ERR(gphy_fw->clk_gate)) { 909 dev_err(dev, "Failed to lookup gate clock\n"); 910 return PTR_ERR(gphy_fw->clk_gate); 911 } 912 913 ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset); 914 if (ret) 915 return ret; 916 917 ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode); 918 /* Default to GE mode */ 919 if (ret) 920 gphy_mode = GPHY_MODE_GE; 921 922 switch (gphy_mode) { 923 case GPHY_MODE_FE: 924 gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name; 925 break; 926 case GPHY_MODE_GE: 927 gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name; 928 break; 929 default: 930 dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); 931 return -EINVAL; 932 } 933 934 gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np); 935 if (IS_ERR(gphy_fw->reset)) { 936 if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER) 937 dev_err(dev, "Failed to lookup gphy reset\n"); 938 return PTR_ERR(gphy_fw->reset); 939 } 940 941 return gswip_gphy_fw_load(priv, gphy_fw); 942 } 943 944 static void gswip_gphy_fw_remove(struct gswip_priv *priv, 945 struct gswip_gphy_fw *gphy_fw) 946 { 947 int ret; 948 949 /* check if the device was fully probed */ 950 if (!gphy_fw->fw_name) 951 return; 952 953 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0); 954 if (ret) 955 dev_err(priv->dev, "can not reset GPHY FW pointer"); 956 957 clk_disable_unprepare(gphy_fw->clk_gate); 958 959 reset_control_put(gphy_fw->reset); 960 } 961 962 static int gswip_gphy_fw_list(struct gswip_priv *priv, 963 struct device_node *gphy_fw_list_np, u32 version) 964 { 965 struct device *dev = priv->dev; 966 struct device_node *gphy_fw_np; 967 const struct of_device_id *match; 968 int err; 969 int i = 0; 970 971 /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older 972 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also 973 * needs a different GPHY firmware. 974 */ 975 if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) { 976 switch (version) { 977 case GSWIP_VERSION_2_0: 978 priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data; 979 break; 980 case GSWIP_VERSION_2_1: 981 priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data; 982 break; 983 default: 984 dev_err(dev, "unknown GSWIP version: 0x%x", version); 985 return -ENOENT; 986 } 987 } 988 989 match = of_match_node(xway_gphy_match, gphy_fw_list_np); 990 if (match && match->data) 991 priv->gphy_fw_name_cfg = match->data; 992 993 if (!priv->gphy_fw_name_cfg) { 994 dev_err(dev, "GPHY compatible type not supported"); 995 return -ENOENT; 996 } 997 998 priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np); 999 if (!priv->num_gphy_fw) 1000 return -ENOENT; 1001 1002 priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np, 1003 "lantiq,rcu"); 1004 if (IS_ERR(priv->rcu_regmap)) 1005 return PTR_ERR(priv->rcu_regmap); 1006 1007 priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw, 1008 sizeof(*priv->gphy_fw), 1009 GFP_KERNEL | __GFP_ZERO); 1010 if (!priv->gphy_fw) 1011 return -ENOMEM; 1012 1013 for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { 1014 err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], 1015 gphy_fw_np, i); 1016 if (err) 1017 goto remove_gphy; 1018 i++; 1019 } 1020 1021 return 0; 1022 1023 remove_gphy: 1024 for (i = 0; i < priv->num_gphy_fw; i++) 1025 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 1026 return err; 1027 } 1028 1029 static int gswip_probe(struct platform_device *pdev) 1030 { 1031 struct gswip_priv *priv; 1032 struct resource *gswip_res, *mdio_res, *mii_res; 1033 struct device_node *mdio_np, *gphy_fw_np; 1034 struct device *dev = &pdev->dev; 1035 int err; 1036 int i; 1037 u32 version; 1038 1039 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1040 if (!priv) 1041 return -ENOMEM; 1042 1043 gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1044 priv->gswip = devm_ioremap_resource(dev, gswip_res); 1045 if (IS_ERR(priv->gswip)) 1046 return PTR_ERR(priv->gswip); 1047 1048 mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1049 priv->mdio = devm_ioremap_resource(dev, mdio_res); 1050 if (IS_ERR(priv->mdio)) 1051 return PTR_ERR(priv->mdio); 1052 1053 mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 1054 priv->mii = devm_ioremap_resource(dev, mii_res); 1055 if (IS_ERR(priv->mii)) 1056 return PTR_ERR(priv->mii); 1057 1058 priv->hw_info = of_device_get_match_data(dev); 1059 if (!priv->hw_info) 1060 return -EINVAL; 1061 1062 priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports); 1063 if (!priv->ds) 1064 return -ENOMEM; 1065 1066 priv->ds->priv = priv; 1067 priv->ds->ops = &gswip_switch_ops; 1068 priv->dev = dev; 1069 version = gswip_switch_r(priv, GSWIP_VERSION); 1070 1071 /* bring up the mdio bus */ 1072 gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL, 1073 "lantiq,gphy-fw"); 1074 if (gphy_fw_np) { 1075 err = gswip_gphy_fw_list(priv, gphy_fw_np, version); 1076 if (err) { 1077 dev_err(dev, "gphy fw probe failed\n"); 1078 return err; 1079 } 1080 } 1081 1082 /* bring up the mdio bus */ 1083 mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL, 1084 "lantiq,xrx200-mdio"); 1085 if (mdio_np) { 1086 err = gswip_mdio(priv, mdio_np); 1087 if (err) { 1088 dev_err(dev, "mdio probe failed\n"); 1089 goto gphy_fw; 1090 } 1091 } 1092 1093 err = dsa_register_switch(priv->ds); 1094 if (err) { 1095 dev_err(dev, "dsa switch register failed: %i\n", err); 1096 goto mdio_bus; 1097 } 1098 if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) { 1099 dev_err(dev, "wrong CPU port defined, HW only supports port: %i", 1100 priv->hw_info->cpu_port); 1101 err = -EINVAL; 1102 goto mdio_bus; 1103 } 1104 1105 platform_set_drvdata(pdev, priv); 1106 1107 dev_info(dev, "probed GSWIP version %lx mod %lx\n", 1108 (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT, 1109 (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT); 1110 return 0; 1111 1112 mdio_bus: 1113 if (mdio_np) 1114 mdiobus_unregister(priv->ds->slave_mii_bus); 1115 gphy_fw: 1116 for (i = 0; i < priv->num_gphy_fw; i++) 1117 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 1118 return err; 1119 } 1120 1121 static int gswip_remove(struct platform_device *pdev) 1122 { 1123 struct gswip_priv *priv = platform_get_drvdata(pdev); 1124 int i; 1125 1126 if (!priv) 1127 return 0; 1128 1129 /* disable the switch */ 1130 gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); 1131 1132 dsa_unregister_switch(priv->ds); 1133 1134 if (priv->ds->slave_mii_bus) 1135 mdiobus_unregister(priv->ds->slave_mii_bus); 1136 1137 for (i = 0; i < priv->num_gphy_fw; i++) 1138 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 1139 1140 return 0; 1141 } 1142 1143 static const struct gswip_hw_info gswip_xrx200 = { 1144 .max_ports = 7, 1145 .cpu_port = 6, 1146 }; 1147 1148 static const struct of_device_id gswip_of_match[] = { 1149 { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, 1150 {}, 1151 }; 1152 MODULE_DEVICE_TABLE(of, gswip_of_match); 1153 1154 static struct platform_driver gswip_driver = { 1155 .probe = gswip_probe, 1156 .remove = gswip_remove, 1157 .driver = { 1158 .name = "gswip", 1159 .of_match_table = gswip_of_match, 1160 }, 1161 }; 1162 1163 module_platform_driver(gswip_driver); 1164 1165 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); 1166 MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver"); 1167 MODULE_LICENSE("GPL v2"); 1168