1 /* 2 * (C) Copyright 2011 Michal Simek 3 * 4 * Michal SIMEK <monstr@monstr.eu> 5 * 6 * Based on Xilinx gmac driver: 7 * (C) Copyright 2011 Xilinx 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <common.h> 13 #include <net.h> 14 #include <config.h> 15 #include <malloc.h> 16 #include <asm/io.h> 17 #include <phy.h> 18 #include <miiphy.h> 19 #include <watchdog.h> 20 #include <asm/arch/hardware.h> 21 #include <asm/arch/sys_proto.h> 22 23 #if !defined(CONFIG_PHYLIB) 24 # error XILINX_GEM_ETHERNET requires PHYLIB 25 #endif 26 27 /* Bit/mask specification */ 28 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */ 29 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */ 30 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */ 31 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */ 32 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */ 33 34 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */ 35 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */ 36 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */ 37 38 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */ 39 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */ 40 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */ 41 42 /* Wrap bit, last descriptor */ 43 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000 44 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */ 45 46 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */ 47 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */ 48 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */ 49 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */ 50 51 #define ZYNQ_GEM_NWCFG_SPEED100 0x000000001 /* 100 Mbps operation */ 52 #define ZYNQ_GEM_NWCFG_SPEED1000 0x000000400 /* 1Gbps operation */ 53 #define ZYNQ_GEM_NWCFG_FDEN 0x000000002 /* Full Duplex mode */ 54 #define ZYNQ_GEM_NWCFG_FSREM 0x000020000 /* FCS removal */ 55 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000080000 /* Div pclk by 32, 80MHz */ 56 #define ZYNQ_GEM_NWCFG_MDCCLKDIV2 0x0000c0000 /* Div pclk by 48, 120MHz */ 57 58 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_NWCFG_FDEN | \ 59 ZYNQ_GEM_NWCFG_FSREM | \ 60 ZYNQ_GEM_NWCFG_MDCCLKDIV) 61 62 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */ 63 64 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */ 65 /* Use full configured addressable space (8 Kb) */ 66 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300 67 /* Use full configured addressable space (4 Kb) */ 68 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400 69 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */ 70 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000 71 72 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \ 73 ZYNQ_GEM_DMACR_RXSIZE | \ 74 ZYNQ_GEM_DMACR_TXSIZE | \ 75 ZYNQ_GEM_DMACR_RXBUF) 76 77 /* Use MII register 1 (MII status register) to detect PHY */ 78 #define PHY_DETECT_REG 1 79 80 /* Mask used to verify certain PHY features (or register contents) 81 * in the register above: 82 * 0x1000: 10Mbps full duplex support 83 * 0x0800: 10Mbps half duplex support 84 * 0x0008: Auto-negotiation support 85 */ 86 #define PHY_DETECT_MASK 0x1808 87 88 /* TX BD status masks */ 89 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff 90 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000 91 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000 92 93 /* Clock frequencies for different speeds */ 94 #define ZYNQ_GEM_FREQUENCY_10 2500000UL 95 #define ZYNQ_GEM_FREQUENCY_100 25000000UL 96 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL 97 98 /* Device registers */ 99 struct zynq_gem_regs { 100 u32 nwctrl; /* Network Control reg */ 101 u32 nwcfg; /* Network Config reg */ 102 u32 nwsr; /* Network Status reg */ 103 u32 reserved1; 104 u32 dmacr; /* DMA Control reg */ 105 u32 txsr; /* TX Status reg */ 106 u32 rxqbase; /* RX Q Base address reg */ 107 u32 txqbase; /* TX Q Base address reg */ 108 u32 rxsr; /* RX Status reg */ 109 u32 reserved2[2]; 110 u32 idr; /* Interrupt Disable reg */ 111 u32 reserved3; 112 u32 phymntnc; /* Phy Maintaince reg */ 113 u32 reserved4[18]; 114 u32 hashl; /* Hash Low address reg */ 115 u32 hashh; /* Hash High address reg */ 116 #define LADDR_LOW 0 117 #define LADDR_HIGH 1 118 u32 laddr[4][LADDR_HIGH + 1]; /* Specific1 addr low/high reg */ 119 u32 match[4]; /* Type ID1 Match reg */ 120 u32 reserved6[18]; 121 u32 stat[44]; /* Octects transmitted Low reg - stat start */ 122 }; 123 124 /* BD descriptors */ 125 struct emac_bd { 126 u32 addr; /* Next descriptor pointer */ 127 u32 status; 128 }; 129 130 #define RX_BUF 3 131 /* Page table entries are set to 1MB, or multiples of 1MB 132 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 133 */ 134 #define BD_SPACE 0x100000 135 /* BD separation space */ 136 #define BD_SEPRN_SPACE 64 137 138 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */ 139 struct zynq_gem_priv { 140 struct emac_bd *tx_bd; 141 struct emac_bd *rx_bd; 142 char *rxbuffers; 143 u32 rxbd_current; 144 u32 rx_first_buf; 145 int phyaddr; 146 u32 emio; 147 int init; 148 struct phy_device *phydev; 149 struct mii_dev *bus; 150 }; 151 152 static inline int mdio_wait(struct eth_device *dev) 153 { 154 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 155 u32 timeout = 200; 156 157 /* Wait till MDIO interface is ready to accept a new transaction. */ 158 while (--timeout) { 159 if (readl(®s->nwsr) & ZYNQ_GEM_NWSR_MDIOIDLE_MASK) 160 break; 161 WATCHDOG_RESET(); 162 } 163 164 if (!timeout) { 165 printf("%s: Timeout\n", __func__); 166 return 1; 167 } 168 169 return 0; 170 } 171 172 static u32 phy_setup_op(struct eth_device *dev, u32 phy_addr, u32 regnum, 173 u32 op, u16 *data) 174 { 175 u32 mgtcr; 176 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 177 178 if (mdio_wait(dev)) 179 return 1; 180 181 /* Construct mgtcr mask for the operation */ 182 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op | 183 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) | 184 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data; 185 186 /* Write mgtcr and wait for completion */ 187 writel(mgtcr, ®s->phymntnc); 188 189 if (mdio_wait(dev)) 190 return 1; 191 192 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK) 193 *data = readl(®s->phymntnc); 194 195 return 0; 196 } 197 198 static u32 phyread(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 *val) 199 { 200 return phy_setup_op(dev, phy_addr, regnum, 201 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val); 202 } 203 204 static u32 phywrite(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 data) 205 { 206 return phy_setup_op(dev, phy_addr, regnum, 207 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data); 208 } 209 210 static void phy_detection(struct eth_device *dev) 211 { 212 int i; 213 u16 phyreg; 214 struct zynq_gem_priv *priv = dev->priv; 215 216 if (priv->phyaddr != -1) { 217 phyread(dev, priv->phyaddr, PHY_DETECT_REG, &phyreg); 218 if ((phyreg != 0xFFFF) && 219 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 220 /* Found a valid PHY address */ 221 debug("Default phy address %d is valid\n", 222 priv->phyaddr); 223 return; 224 } else { 225 debug("PHY address is not setup correctly %d\n", 226 priv->phyaddr); 227 priv->phyaddr = -1; 228 } 229 } 230 231 debug("detecting phy address\n"); 232 if (priv->phyaddr == -1) { 233 /* detect the PHY address */ 234 for (i = 31; i >= 0; i--) { 235 phyread(dev, i, PHY_DETECT_REG, &phyreg); 236 if ((phyreg != 0xFFFF) && 237 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 238 /* Found a valid PHY address */ 239 priv->phyaddr = i; 240 debug("Found valid phy address, %d\n", i); 241 return; 242 } 243 } 244 } 245 printf("PHY is not detected\n"); 246 } 247 248 static int zynq_gem_setup_mac(struct eth_device *dev) 249 { 250 u32 i, macaddrlow, macaddrhigh; 251 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 252 253 /* Set the MAC bits [31:0] in BOT */ 254 macaddrlow = dev->enetaddr[0]; 255 macaddrlow |= dev->enetaddr[1] << 8; 256 macaddrlow |= dev->enetaddr[2] << 16; 257 macaddrlow |= dev->enetaddr[3] << 24; 258 259 /* Set MAC bits [47:32] in TOP */ 260 macaddrhigh = dev->enetaddr[4]; 261 macaddrhigh |= dev->enetaddr[5] << 8; 262 263 for (i = 0; i < 4; i++) { 264 writel(0, ®s->laddr[i][LADDR_LOW]); 265 writel(0, ®s->laddr[i][LADDR_HIGH]); 266 /* Do not use MATCHx register */ 267 writel(0, ®s->match[i]); 268 } 269 270 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]); 271 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]); 272 273 return 0; 274 } 275 276 static int zynq_gem_init(struct eth_device *dev, bd_t * bis) 277 { 278 u32 i; 279 unsigned long clk_rate = 0; 280 struct phy_device *phydev; 281 const u32 stat_size = (sizeof(struct zynq_gem_regs) - 282 offsetof(struct zynq_gem_regs, stat)) / 4; 283 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 284 struct zynq_gem_priv *priv = dev->priv; 285 const u32 supported = SUPPORTED_10baseT_Half | 286 SUPPORTED_10baseT_Full | 287 SUPPORTED_100baseT_Half | 288 SUPPORTED_100baseT_Full | 289 SUPPORTED_1000baseT_Half | 290 SUPPORTED_1000baseT_Full; 291 292 if (!priv->init) { 293 /* Disable all interrupts */ 294 writel(0xFFFFFFFF, ®s->idr); 295 296 /* Disable the receiver & transmitter */ 297 writel(0, ®s->nwctrl); 298 writel(0, ®s->txsr); 299 writel(0, ®s->rxsr); 300 writel(0, ®s->phymntnc); 301 302 /* Clear the Hash registers for the mac address 303 * pointed by AddressPtr 304 */ 305 writel(0x0, ®s->hashl); 306 /* Write bits [63:32] in TOP */ 307 writel(0x0, ®s->hashh); 308 309 /* Clear all counters */ 310 for (i = 0; i <= stat_size; i++) 311 readl(®s->stat[i]); 312 313 /* Setup RxBD space */ 314 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd)); 315 316 for (i = 0; i < RX_BUF; i++) { 317 priv->rx_bd[i].status = 0xF0000000; 318 priv->rx_bd[i].addr = 319 ((u32)(priv->rxbuffers) + 320 (i * PKTSIZE_ALIGN)); 321 } 322 /* WRAP bit to last BD */ 323 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK; 324 /* Write RxBDs to IP */ 325 writel((u32)priv->rx_bd, ®s->rxqbase); 326 327 /* Setup for DMA Configuration register */ 328 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr); 329 330 /* Setup for Network Control register, MDIO, Rx and Tx enable */ 331 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK); 332 333 priv->init++; 334 } 335 336 phy_detection(dev); 337 338 /* interface - look at tsec */ 339 phydev = phy_connect(priv->bus, priv->phyaddr, dev, 0); 340 341 phydev->supported = supported | ADVERTISED_Pause | 342 ADVERTISED_Asym_Pause; 343 phydev->advertising = phydev->supported; 344 priv->phydev = phydev; 345 phy_config(phydev); 346 phy_startup(phydev); 347 348 if (!phydev->link) { 349 printf("%s: No link.\n", phydev->dev->name); 350 return -1; 351 } 352 353 switch (phydev->speed) { 354 case SPEED_1000: 355 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED1000, 356 ®s->nwcfg); 357 clk_rate = ZYNQ_GEM_FREQUENCY_1000; 358 break; 359 case SPEED_100: 360 clrsetbits_le32(®s->nwcfg, ZYNQ_GEM_NWCFG_SPEED1000, 361 ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED100); 362 clk_rate = ZYNQ_GEM_FREQUENCY_100; 363 break; 364 case SPEED_10: 365 clk_rate = ZYNQ_GEM_FREQUENCY_10; 366 break; 367 } 368 369 /* Change the rclk and clk only not using EMIO interface */ 370 if (!priv->emio) 371 zynq_slcr_gem_clk_setup(dev->iobase != 372 ZYNQ_GEM_BASEADDR0, clk_rate); 373 374 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 375 ZYNQ_GEM_NWCTRL_TXEN_MASK); 376 377 return 0; 378 } 379 380 static int zynq_gem_send(struct eth_device *dev, void *ptr, int len) 381 { 382 u32 addr, size; 383 struct zynq_gem_priv *priv = dev->priv; 384 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 385 386 /* setup BD */ 387 writel((u32)priv->tx_bd, ®s->txqbase); 388 389 /* Setup Tx BD */ 390 memset(priv->tx_bd, 0, sizeof(struct emac_bd)); 391 392 priv->tx_bd->addr = (u32)ptr; 393 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) | 394 ZYNQ_GEM_TXBUF_LAST_MASK; 395 396 addr = (u32) ptr; 397 addr &= ~(ARCH_DMA_MINALIGN - 1); 398 size = roundup(len, ARCH_DMA_MINALIGN); 399 flush_dcache_range(addr, addr + size); 400 barrier(); 401 402 /* Start transmit */ 403 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK); 404 405 /* Read TX BD status */ 406 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_UNDERRUN) 407 printf("TX underrun\n"); 408 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED) 409 printf("TX buffers exhausted in mid frame\n"); 410 411 return 0; 412 } 413 414 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */ 415 static int zynq_gem_recv(struct eth_device *dev) 416 { 417 int frame_len; 418 struct zynq_gem_priv *priv = dev->priv; 419 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current]; 420 struct emac_bd *first_bd; 421 422 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK)) 423 return 0; 424 425 if (!(current_bd->status & 426 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) { 427 printf("GEM: SOF or EOF not set for last buffer received!\n"); 428 return 0; 429 } 430 431 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK; 432 if (frame_len) { 433 u32 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK; 434 addr &= ~(ARCH_DMA_MINALIGN - 1); 435 u32 size = roundup(frame_len, ARCH_DMA_MINALIGN); 436 invalidate_dcache_range(addr, addr + size); 437 438 NetReceive((u8 *)addr, frame_len); 439 440 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) 441 priv->rx_first_buf = priv->rxbd_current; 442 else { 443 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 444 current_bd->status = 0xF0000000; /* FIXME */ 445 } 446 447 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) { 448 first_bd = &priv->rx_bd[priv->rx_first_buf]; 449 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 450 first_bd->status = 0xF0000000; 451 } 452 453 if ((++priv->rxbd_current) >= RX_BUF) 454 priv->rxbd_current = 0; 455 } 456 457 return frame_len; 458 } 459 460 static void zynq_gem_halt(struct eth_device *dev) 461 { 462 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 463 464 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 465 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0); 466 } 467 468 static int zynq_gem_miiphyread(const char *devname, uchar addr, 469 uchar reg, ushort *val) 470 { 471 struct eth_device *dev = eth_get_dev(); 472 int ret; 473 474 ret = phyread(dev, addr, reg, val); 475 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, *val); 476 return ret; 477 } 478 479 static int zynq_gem_miiphy_write(const char *devname, uchar addr, 480 uchar reg, ushort val) 481 { 482 struct eth_device *dev = eth_get_dev(); 483 484 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val); 485 return phywrite(dev, addr, reg, val); 486 } 487 488 int zynq_gem_initialize(bd_t *bis, int base_addr, int phy_addr, u32 emio) 489 { 490 struct eth_device *dev; 491 struct zynq_gem_priv *priv; 492 void *bd_space; 493 494 dev = calloc(1, sizeof(*dev)); 495 if (dev == NULL) 496 return -1; 497 498 dev->priv = calloc(1, sizeof(struct zynq_gem_priv)); 499 if (dev->priv == NULL) { 500 free(dev); 501 return -1; 502 } 503 priv = dev->priv; 504 505 /* Align rxbuffers to ARCH_DMA_MINALIGN */ 506 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN); 507 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN); 508 509 /* Align bd_space to 1MB */ 510 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 511 mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE, DCACHE_OFF); 512 513 /* Initialize the bd spaces for tx and rx bd's */ 514 priv->tx_bd = (struct emac_bd *)bd_space; 515 priv->rx_bd = (struct emac_bd *)((u32)bd_space + BD_SEPRN_SPACE); 516 517 priv->phyaddr = phy_addr; 518 priv->emio = emio; 519 520 sprintf(dev->name, "Gem.%x", base_addr); 521 522 dev->iobase = base_addr; 523 524 dev->init = zynq_gem_init; 525 dev->halt = zynq_gem_halt; 526 dev->send = zynq_gem_send; 527 dev->recv = zynq_gem_recv; 528 dev->write_hwaddr = zynq_gem_setup_mac; 529 530 eth_register(dev); 531 532 miiphy_register(dev->name, zynq_gem_miiphyread, zynq_gem_miiphy_write); 533 priv->bus = miiphy_get_dev_by_name(dev->name); 534 535 return 1; 536 } 537