1 /* 2 * (C) Copyright 2010 3 * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com. 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 /* 9 * Designware ethernet IP driver for u-boot 10 */ 11 12 #include <common.h> 13 #include <miiphy.h> 14 #include <malloc.h> 15 #include <linux/compiler.h> 16 #include <linux/err.h> 17 #include <asm/io.h> 18 #include "designware.h" 19 20 #if !defined(CONFIG_PHYLIB) 21 # error "DesignWare Ether MAC requires PHYLIB - missing CONFIG_PHYLIB" 22 #endif 23 24 static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 25 { 26 struct eth_mac_regs *mac_p = bus->priv; 27 ulong start; 28 u16 miiaddr; 29 int timeout = CONFIG_MDIO_TIMEOUT; 30 31 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) | 32 ((reg << MIIREGSHIFT) & MII_REGMSK); 33 34 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr); 35 36 start = get_timer(0); 37 while (get_timer(start) < timeout) { 38 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) 39 return readl(&mac_p->miidata); 40 udelay(10); 41 }; 42 43 return -1; 44 } 45 46 static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 47 u16 val) 48 { 49 struct eth_mac_regs *mac_p = bus->priv; 50 ulong start; 51 u16 miiaddr; 52 int ret = -1, timeout = CONFIG_MDIO_TIMEOUT; 53 54 writel(val, &mac_p->miidata); 55 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) | 56 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE; 57 58 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr); 59 60 start = get_timer(0); 61 while (get_timer(start) < timeout) { 62 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) { 63 ret = 0; 64 break; 65 } 66 udelay(10); 67 }; 68 69 return ret; 70 } 71 72 static int dw_mdio_init(char *name, struct eth_mac_regs *mac_regs_p) 73 { 74 struct mii_dev *bus = mdio_alloc(); 75 76 if (!bus) { 77 printf("Failed to allocate MDIO bus\n"); 78 return -1; 79 } 80 81 bus->read = dw_mdio_read; 82 bus->write = dw_mdio_write; 83 sprintf(bus->name, name); 84 85 bus->priv = (void *)mac_regs_p; 86 87 return mdio_register(bus); 88 } 89 90 static void tx_descs_init(struct eth_device *dev) 91 { 92 struct dw_eth_dev *priv = dev->priv; 93 struct eth_dma_regs *dma_p = priv->dma_regs_p; 94 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0]; 95 char *txbuffs = &priv->txbuffs[0]; 96 struct dmamacdescr *desc_p; 97 u32 idx; 98 99 for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) { 100 desc_p = &desc_table_p[idx]; 101 desc_p->dmamac_addr = &txbuffs[idx * CONFIG_ETH_BUFSIZE]; 102 desc_p->dmamac_next = &desc_table_p[idx + 1]; 103 104 #if defined(CONFIG_DW_ALTDESCRIPTOR) 105 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST | 106 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS | \ 107 DESC_TXSTS_TXCHECKINSCTRL | \ 108 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS); 109 110 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN; 111 desc_p->dmamac_cntl = 0; 112 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA); 113 #else 114 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN; 115 desc_p->txrx_status = 0; 116 #endif 117 } 118 119 /* Correcting the last pointer of the chain */ 120 desc_p->dmamac_next = &desc_table_p[0]; 121 122 /* Flush all Tx buffer descriptors at once */ 123 flush_dcache_range((unsigned int)priv->tx_mac_descrtable, 124 (unsigned int)priv->tx_mac_descrtable + 125 sizeof(priv->tx_mac_descrtable)); 126 127 writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr); 128 priv->tx_currdescnum = 0; 129 } 130 131 static void rx_descs_init(struct eth_device *dev) 132 { 133 struct dw_eth_dev *priv = dev->priv; 134 struct eth_dma_regs *dma_p = priv->dma_regs_p; 135 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0]; 136 char *rxbuffs = &priv->rxbuffs[0]; 137 struct dmamacdescr *desc_p; 138 u32 idx; 139 140 /* Before passing buffers to GMAC we need to make sure zeros 141 * written there right after "priv" structure allocation were 142 * flushed into RAM. 143 * Otherwise there's a chance to get some of them flushed in RAM when 144 * GMAC is already pushing data to RAM via DMA. This way incoming from 145 * GMAC data will be corrupted. */ 146 flush_dcache_range((unsigned int)rxbuffs, (unsigned int)rxbuffs + 147 RX_TOTAL_BUFSIZE); 148 149 for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) { 150 desc_p = &desc_table_p[idx]; 151 desc_p->dmamac_addr = &rxbuffs[idx * CONFIG_ETH_BUFSIZE]; 152 desc_p->dmamac_next = &desc_table_p[idx + 1]; 153 154 desc_p->dmamac_cntl = 155 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) | \ 156 DESC_RXCTRL_RXCHAIN; 157 158 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA; 159 } 160 161 /* Correcting the last pointer of the chain */ 162 desc_p->dmamac_next = &desc_table_p[0]; 163 164 /* Flush all Rx buffer descriptors at once */ 165 flush_dcache_range((unsigned int)priv->rx_mac_descrtable, 166 (unsigned int)priv->rx_mac_descrtable + 167 sizeof(priv->rx_mac_descrtable)); 168 169 writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr); 170 priv->rx_currdescnum = 0; 171 } 172 173 static int dw_write_hwaddr(struct eth_device *dev) 174 { 175 struct dw_eth_dev *priv = dev->priv; 176 struct eth_mac_regs *mac_p = priv->mac_regs_p; 177 u32 macid_lo, macid_hi; 178 u8 *mac_id = &dev->enetaddr[0]; 179 180 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) + 181 (mac_id[3] << 24); 182 macid_hi = mac_id[4] + (mac_id[5] << 8); 183 184 writel(macid_hi, &mac_p->macaddr0hi); 185 writel(macid_lo, &mac_p->macaddr0lo); 186 187 return 0; 188 } 189 190 static void dw_adjust_link(struct eth_mac_regs *mac_p, 191 struct phy_device *phydev) 192 { 193 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN; 194 195 if (!phydev->link) { 196 printf("%s: No link.\n", phydev->dev->name); 197 return; 198 } 199 200 if (phydev->speed != 1000) 201 conf |= MII_PORTSELECT; 202 203 if (phydev->speed == 100) 204 conf |= FES_100; 205 206 if (phydev->duplex) 207 conf |= FULLDPLXMODE; 208 209 writel(conf, &mac_p->conf); 210 211 printf("Speed: %d, %s duplex%s\n", phydev->speed, 212 (phydev->duplex) ? "full" : "half", 213 (phydev->port == PORT_FIBRE) ? ", fiber mode" : ""); 214 } 215 216 static void dw_eth_halt(struct eth_device *dev) 217 { 218 struct dw_eth_dev *priv = dev->priv; 219 struct eth_mac_regs *mac_p = priv->mac_regs_p; 220 struct eth_dma_regs *dma_p = priv->dma_regs_p; 221 222 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf); 223 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode); 224 225 phy_shutdown(priv->phydev); 226 } 227 228 static int dw_eth_init(struct eth_device *dev, bd_t *bis) 229 { 230 struct dw_eth_dev *priv = dev->priv; 231 struct eth_mac_regs *mac_p = priv->mac_regs_p; 232 struct eth_dma_regs *dma_p = priv->dma_regs_p; 233 unsigned int start; 234 235 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode); 236 237 start = get_timer(0); 238 while (readl(&dma_p->busmode) & DMAMAC_SRST) { 239 if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) { 240 printf("DMA reset timeout\n"); 241 return -1; 242 } 243 244 mdelay(100); 245 }; 246 247 /* Soft reset above clears HW address registers. 248 * So we have to set it here once again */ 249 dw_write_hwaddr(dev); 250 251 rx_descs_init(dev); 252 tx_descs_init(dev); 253 254 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode); 255 256 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD, 257 &dma_p->opmode); 258 259 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode); 260 261 #ifdef CONFIG_DW_AXI_BURST_LEN 262 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus); 263 #endif 264 265 /* Start up the PHY */ 266 if (phy_startup(priv->phydev)) { 267 printf("Could not initialize PHY %s\n", 268 priv->phydev->dev->name); 269 return -1; 270 } 271 272 dw_adjust_link(mac_p, priv->phydev); 273 274 if (!priv->phydev->link) 275 return -1; 276 277 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf); 278 279 return 0; 280 } 281 282 static int dw_eth_send(struct eth_device *dev, void *packet, int length) 283 { 284 struct dw_eth_dev *priv = dev->priv; 285 struct eth_dma_regs *dma_p = priv->dma_regs_p; 286 u32 desc_num = priv->tx_currdescnum; 287 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num]; 288 uint32_t desc_start = (uint32_t)desc_p; 289 uint32_t desc_end = desc_start + 290 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN); 291 uint32_t data_start = (uint32_t)desc_p->dmamac_addr; 292 uint32_t data_end = data_start + 293 roundup(length, ARCH_DMA_MINALIGN); 294 /* 295 * Strictly we only need to invalidate the "txrx_status" field 296 * for the following check, but on some platforms we cannot 297 * invalidate only 4 bytes, so we flush the entire descriptor, 298 * which is 16 bytes in total. This is safe because the 299 * individual descriptors in the array are each aligned to 300 * ARCH_DMA_MINALIGN and padded appropriately. 301 */ 302 invalidate_dcache_range(desc_start, desc_end); 303 304 /* Check if the descriptor is owned by CPU */ 305 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) { 306 printf("CPU not owner of tx frame\n"); 307 return -1; 308 } 309 310 memcpy(desc_p->dmamac_addr, packet, length); 311 312 /* Flush data to be sent */ 313 flush_dcache_range(data_start, data_end); 314 315 #if defined(CONFIG_DW_ALTDESCRIPTOR) 316 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST; 317 desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) & \ 318 DESC_TXCTRL_SIZE1MASK; 319 320 desc_p->txrx_status &= ~(DESC_TXSTS_MSK); 321 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA; 322 #else 323 desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) & \ 324 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST | \ 325 DESC_TXCTRL_TXFIRST; 326 327 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA; 328 #endif 329 330 /* Flush modified buffer descriptor */ 331 flush_dcache_range(desc_start, desc_end); 332 333 /* Test the wrap-around condition. */ 334 if (++desc_num >= CONFIG_TX_DESCR_NUM) 335 desc_num = 0; 336 337 priv->tx_currdescnum = desc_num; 338 339 /* Start the transmission */ 340 writel(POLL_DATA, &dma_p->txpolldemand); 341 342 return 0; 343 } 344 345 static int dw_eth_recv(struct eth_device *dev) 346 { 347 struct dw_eth_dev *priv = dev->priv; 348 u32 status, desc_num = priv->rx_currdescnum; 349 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num]; 350 int length = 0; 351 uint32_t desc_start = (uint32_t)desc_p; 352 uint32_t desc_end = desc_start + 353 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN); 354 uint32_t data_start = (uint32_t)desc_p->dmamac_addr; 355 uint32_t data_end; 356 357 /* Invalidate entire buffer descriptor */ 358 invalidate_dcache_range(desc_start, desc_end); 359 360 status = desc_p->txrx_status; 361 362 /* Check if the owner is the CPU */ 363 if (!(status & DESC_RXSTS_OWNBYDMA)) { 364 365 length = (status & DESC_RXSTS_FRMLENMSK) >> \ 366 DESC_RXSTS_FRMLENSHFT; 367 368 /* Invalidate received data */ 369 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN); 370 invalidate_dcache_range(data_start, data_end); 371 372 NetReceive(desc_p->dmamac_addr, length); 373 374 /* 375 * Make the current descriptor valid again and go to 376 * the next one 377 */ 378 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA; 379 380 /* Flush only status field - others weren't changed */ 381 flush_dcache_range(desc_start, desc_end); 382 383 /* Test the wrap-around condition. */ 384 if (++desc_num >= CONFIG_RX_DESCR_NUM) 385 desc_num = 0; 386 } 387 388 priv->rx_currdescnum = desc_num; 389 390 return length; 391 } 392 393 static int dw_phy_init(struct eth_device *dev) 394 { 395 struct dw_eth_dev *priv = dev->priv; 396 struct phy_device *phydev; 397 int mask = 0xffffffff; 398 399 #ifdef CONFIG_PHY_ADDR 400 mask = 1 << CONFIG_PHY_ADDR; 401 #endif 402 403 phydev = phy_find_by_mask(priv->bus, mask, priv->interface); 404 if (!phydev) 405 return -1; 406 407 phy_connect_dev(phydev, dev); 408 409 phydev->supported &= PHY_GBIT_FEATURES; 410 phydev->advertising = phydev->supported; 411 412 priv->phydev = phydev; 413 phy_config(phydev); 414 415 return 1; 416 } 417 418 int designware_initialize(ulong base_addr, u32 interface) 419 { 420 struct eth_device *dev; 421 struct dw_eth_dev *priv; 422 423 dev = (struct eth_device *) malloc(sizeof(struct eth_device)); 424 if (!dev) 425 return -ENOMEM; 426 427 /* 428 * Since the priv structure contains the descriptors which need a strict 429 * buswidth alignment, memalign is used to allocate memory 430 */ 431 priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN, 432 sizeof(struct dw_eth_dev)); 433 if (!priv) { 434 free(dev); 435 return -ENOMEM; 436 } 437 438 memset(dev, 0, sizeof(struct eth_device)); 439 memset(priv, 0, sizeof(struct dw_eth_dev)); 440 441 sprintf(dev->name, "dwmac.%lx", base_addr); 442 dev->iobase = (int)base_addr; 443 dev->priv = priv; 444 445 priv->dev = dev; 446 priv->mac_regs_p = (struct eth_mac_regs *)base_addr; 447 priv->dma_regs_p = (struct eth_dma_regs *)(base_addr + 448 DW_DMA_BASE_OFFSET); 449 450 dev->init = dw_eth_init; 451 dev->send = dw_eth_send; 452 dev->recv = dw_eth_recv; 453 dev->halt = dw_eth_halt; 454 dev->write_hwaddr = dw_write_hwaddr; 455 456 eth_register(dev); 457 458 priv->interface = interface; 459 460 dw_mdio_init(dev->name, priv->mac_regs_p); 461 priv->bus = miiphy_get_dev_by_name(dev->name); 462 463 return dw_phy_init(dev); 464 } 465