1 /* 2 * (C) Copyright 2011 Michal Simek 3 * 4 * Michal SIMEK <monstr@monstr.eu> 5 * 6 * Based on Xilinx gmac driver: 7 * (C) Copyright 2011 Xilinx 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <common.h> 13 #include <net.h> 14 #include <config.h> 15 #include <malloc.h> 16 #include <asm/io.h> 17 #include <phy.h> 18 #include <miiphy.h> 19 #include <watchdog.h> 20 #include <asm/arch/hardware.h> 21 #include <asm/arch/sys_proto.h> 22 23 #if !defined(CONFIG_PHYLIB) 24 # error XILINX_GEM_ETHERNET requires PHYLIB 25 #endif 26 27 /* Bit/mask specification */ 28 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */ 29 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */ 30 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */ 31 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */ 32 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */ 33 34 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */ 35 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */ 36 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */ 37 38 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */ 39 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */ 40 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */ 41 42 /* Wrap bit, last descriptor */ 43 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000 44 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */ 45 46 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */ 47 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */ 48 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */ 49 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */ 50 51 #define ZYNQ_GEM_NWCFG_SPEED100 0x000000001 /* 100 Mbps operation */ 52 #define ZYNQ_GEM_NWCFG_SPEED1000 0x000000400 /* 1Gbps operation */ 53 #define ZYNQ_GEM_NWCFG_FDEN 0x000000002 /* Full Duplex mode */ 54 #define ZYNQ_GEM_NWCFG_FSREM 0x000020000 /* FCS removal */ 55 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000080000 /* Div pclk by 32, 80MHz */ 56 #define ZYNQ_GEM_NWCFG_MDCCLKDIV2 0x0000c0000 /* Div pclk by 48, 120MHz */ 57 58 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_NWCFG_FDEN | \ 59 ZYNQ_GEM_NWCFG_FSREM | \ 60 ZYNQ_GEM_NWCFG_MDCCLKDIV) 61 62 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */ 63 64 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */ 65 /* Use full configured addressable space (8 Kb) */ 66 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300 67 /* Use full configured addressable space (4 Kb) */ 68 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400 69 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */ 70 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000 71 72 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \ 73 ZYNQ_GEM_DMACR_RXSIZE | \ 74 ZYNQ_GEM_DMACR_TXSIZE | \ 75 ZYNQ_GEM_DMACR_RXBUF) 76 77 /* Use MII register 1 (MII status register) to detect PHY */ 78 #define PHY_DETECT_REG 1 79 80 /* Mask used to verify certain PHY features (or register contents) 81 * in the register above: 82 * 0x1000: 10Mbps full duplex support 83 * 0x0800: 10Mbps half duplex support 84 * 0x0008: Auto-negotiation support 85 */ 86 #define PHY_DETECT_MASK 0x1808 87 88 /* TX BD status masks */ 89 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff 90 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000 91 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000 92 93 /* Device registers */ 94 struct zynq_gem_regs { 95 u32 nwctrl; /* Network Control reg */ 96 u32 nwcfg; /* Network Config reg */ 97 u32 nwsr; /* Network Status reg */ 98 u32 reserved1; 99 u32 dmacr; /* DMA Control reg */ 100 u32 txsr; /* TX Status reg */ 101 u32 rxqbase; /* RX Q Base address reg */ 102 u32 txqbase; /* TX Q Base address reg */ 103 u32 rxsr; /* RX Status reg */ 104 u32 reserved2[2]; 105 u32 idr; /* Interrupt Disable reg */ 106 u32 reserved3; 107 u32 phymntnc; /* Phy Maintaince reg */ 108 u32 reserved4[18]; 109 u32 hashl; /* Hash Low address reg */ 110 u32 hashh; /* Hash High address reg */ 111 #define LADDR_LOW 0 112 #define LADDR_HIGH 1 113 u32 laddr[4][LADDR_HIGH + 1]; /* Specific1 addr low/high reg */ 114 u32 match[4]; /* Type ID1 Match reg */ 115 u32 reserved6[18]; 116 u32 stat[44]; /* Octects transmitted Low reg - stat start */ 117 }; 118 119 /* BD descriptors */ 120 struct emac_bd { 121 u32 addr; /* Next descriptor pointer */ 122 u32 status; 123 }; 124 125 #define RX_BUF 3 126 /* Page table entries are set to 1MB, or multiples of 1MB 127 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 128 */ 129 #define BD_SPACE 0x100000 130 /* BD separation space */ 131 #define BD_SEPRN_SPACE 64 132 133 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */ 134 struct zynq_gem_priv { 135 struct emac_bd *tx_bd; 136 struct emac_bd *rx_bd; 137 char *rxbuffers; 138 u32 rxbd_current; 139 u32 rx_first_buf; 140 int phyaddr; 141 u32 emio; 142 int init; 143 struct phy_device *phydev; 144 struct mii_dev *bus; 145 }; 146 147 static inline int mdio_wait(struct eth_device *dev) 148 { 149 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 150 u32 timeout = 200; 151 152 /* Wait till MDIO interface is ready to accept a new transaction. */ 153 while (--timeout) { 154 if (readl(®s->nwsr) & ZYNQ_GEM_NWSR_MDIOIDLE_MASK) 155 break; 156 WATCHDOG_RESET(); 157 } 158 159 if (!timeout) { 160 printf("%s: Timeout\n", __func__); 161 return 1; 162 } 163 164 return 0; 165 } 166 167 static u32 phy_setup_op(struct eth_device *dev, u32 phy_addr, u32 regnum, 168 u32 op, u16 *data) 169 { 170 u32 mgtcr; 171 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 172 173 if (mdio_wait(dev)) 174 return 1; 175 176 /* Construct mgtcr mask for the operation */ 177 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op | 178 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) | 179 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data; 180 181 /* Write mgtcr and wait for completion */ 182 writel(mgtcr, ®s->phymntnc); 183 184 if (mdio_wait(dev)) 185 return 1; 186 187 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK) 188 *data = readl(®s->phymntnc); 189 190 return 0; 191 } 192 193 static u32 phyread(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 *val) 194 { 195 return phy_setup_op(dev, phy_addr, regnum, 196 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val); 197 } 198 199 static u32 phywrite(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 data) 200 { 201 return phy_setup_op(dev, phy_addr, regnum, 202 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data); 203 } 204 205 static void phy_detection(struct eth_device *dev) 206 { 207 int i; 208 u16 phyreg; 209 struct zynq_gem_priv *priv = dev->priv; 210 211 if (priv->phyaddr != -1) { 212 phyread(dev, priv->phyaddr, PHY_DETECT_REG, &phyreg); 213 if ((phyreg != 0xFFFF) && 214 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 215 /* Found a valid PHY address */ 216 debug("Default phy address %d is valid\n", 217 priv->phyaddr); 218 return; 219 } else { 220 debug("PHY address is not setup correctly %d\n", 221 priv->phyaddr); 222 priv->phyaddr = -1; 223 } 224 } 225 226 debug("detecting phy address\n"); 227 if (priv->phyaddr == -1) { 228 /* detect the PHY address */ 229 for (i = 31; i >= 0; i--) { 230 phyread(dev, i, PHY_DETECT_REG, &phyreg); 231 if ((phyreg != 0xFFFF) && 232 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 233 /* Found a valid PHY address */ 234 priv->phyaddr = i; 235 debug("Found valid phy address, %d\n", i); 236 return; 237 } 238 } 239 } 240 printf("PHY is not detected\n"); 241 } 242 243 static int zynq_gem_setup_mac(struct eth_device *dev) 244 { 245 u32 i, macaddrlow, macaddrhigh; 246 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 247 248 /* Set the MAC bits [31:0] in BOT */ 249 macaddrlow = dev->enetaddr[0]; 250 macaddrlow |= dev->enetaddr[1] << 8; 251 macaddrlow |= dev->enetaddr[2] << 16; 252 macaddrlow |= dev->enetaddr[3] << 24; 253 254 /* Set MAC bits [47:32] in TOP */ 255 macaddrhigh = dev->enetaddr[4]; 256 macaddrhigh |= dev->enetaddr[5] << 8; 257 258 for (i = 0; i < 4; i++) { 259 writel(0, ®s->laddr[i][LADDR_LOW]); 260 writel(0, ®s->laddr[i][LADDR_HIGH]); 261 /* Do not use MATCHx register */ 262 writel(0, ®s->match[i]); 263 } 264 265 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]); 266 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]); 267 268 return 0; 269 } 270 271 static int zynq_gem_init(struct eth_device *dev, bd_t * bis) 272 { 273 u32 i, rclk, clk = 0; 274 struct phy_device *phydev; 275 const u32 stat_size = (sizeof(struct zynq_gem_regs) - 276 offsetof(struct zynq_gem_regs, stat)) / 4; 277 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 278 struct zynq_gem_priv *priv = dev->priv; 279 const u32 supported = SUPPORTED_10baseT_Half | 280 SUPPORTED_10baseT_Full | 281 SUPPORTED_100baseT_Half | 282 SUPPORTED_100baseT_Full | 283 SUPPORTED_1000baseT_Half | 284 SUPPORTED_1000baseT_Full; 285 286 if (!priv->init) { 287 /* Disable all interrupts */ 288 writel(0xFFFFFFFF, ®s->idr); 289 290 /* Disable the receiver & transmitter */ 291 writel(0, ®s->nwctrl); 292 writel(0, ®s->txsr); 293 writel(0, ®s->rxsr); 294 writel(0, ®s->phymntnc); 295 296 /* Clear the Hash registers for the mac address 297 * pointed by AddressPtr 298 */ 299 writel(0x0, ®s->hashl); 300 /* Write bits [63:32] in TOP */ 301 writel(0x0, ®s->hashh); 302 303 /* Clear all counters */ 304 for (i = 0; i <= stat_size; i++) 305 readl(®s->stat[i]); 306 307 /* Setup RxBD space */ 308 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd)); 309 310 for (i = 0; i < RX_BUF; i++) { 311 priv->rx_bd[i].status = 0xF0000000; 312 priv->rx_bd[i].addr = 313 ((u32)(priv->rxbuffers) + 314 (i * PKTSIZE_ALIGN)); 315 } 316 /* WRAP bit to last BD */ 317 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK; 318 /* Write RxBDs to IP */ 319 writel((u32)priv->rx_bd, ®s->rxqbase); 320 321 /* Setup for DMA Configuration register */ 322 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr); 323 324 /* Setup for Network Control register, MDIO, Rx and Tx enable */ 325 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK); 326 327 priv->init++; 328 } 329 330 phy_detection(dev); 331 332 /* interface - look at tsec */ 333 phydev = phy_connect(priv->bus, priv->phyaddr, dev, 0); 334 335 phydev->supported = supported | ADVERTISED_Pause | 336 ADVERTISED_Asym_Pause; 337 phydev->advertising = phydev->supported; 338 priv->phydev = phydev; 339 phy_config(phydev); 340 phy_startup(phydev); 341 342 if (!phydev->link) { 343 printf("%s: No link.\n", phydev->dev->name); 344 return -1; 345 } 346 347 switch (phydev->speed) { 348 case SPEED_1000: 349 writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED1000, 350 ®s->nwcfg); 351 rclk = (0 << 4) | (1 << 0); 352 clk = (1 << 20) | (8 << 8) | (0 << 4) | (1 << 0); 353 break; 354 case SPEED_100: 355 clrsetbits_le32(®s->nwcfg, ZYNQ_GEM_NWCFG_SPEED1000, 356 ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED100); 357 rclk = 1 << 0; 358 clk = (5 << 20) | (8 << 8) | (0 << 4) | (1 << 0); 359 break; 360 case SPEED_10: 361 rclk = 1 << 0; 362 /* FIXME untested */ 363 clk = (5 << 20) | (8 << 8) | (0 << 4) | (1 << 0); 364 break; 365 } 366 367 /* Change the rclk and clk only not using EMIO interface */ 368 if (!priv->emio) 369 zynq_slcr_gem_clk_setup(dev->iobase != 370 ZYNQ_GEM_BASEADDR0, rclk, clk); 371 372 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 373 ZYNQ_GEM_NWCTRL_TXEN_MASK); 374 375 return 0; 376 } 377 378 static int zynq_gem_send(struct eth_device *dev, void *ptr, int len) 379 { 380 u32 addr, size; 381 struct zynq_gem_priv *priv = dev->priv; 382 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 383 384 /* setup BD */ 385 writel((u32)priv->tx_bd, ®s->txqbase); 386 387 /* Setup Tx BD */ 388 memset(priv->tx_bd, 0, sizeof(struct emac_bd)); 389 390 priv->tx_bd->addr = (u32)ptr; 391 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) | 392 ZYNQ_GEM_TXBUF_LAST_MASK; 393 394 addr = (u32) ptr; 395 addr &= ~(ARCH_DMA_MINALIGN - 1); 396 size = roundup(len, ARCH_DMA_MINALIGN); 397 flush_dcache_range(addr, addr + size); 398 barrier(); 399 400 /* Start transmit */ 401 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK); 402 403 /* Read TX BD status */ 404 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_UNDERRUN) 405 printf("TX underrun\n"); 406 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED) 407 printf("TX buffers exhausted in mid frame\n"); 408 409 return 0; 410 } 411 412 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */ 413 static int zynq_gem_recv(struct eth_device *dev) 414 { 415 int frame_len; 416 struct zynq_gem_priv *priv = dev->priv; 417 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current]; 418 struct emac_bd *first_bd; 419 420 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK)) 421 return 0; 422 423 if (!(current_bd->status & 424 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) { 425 printf("GEM: SOF or EOF not set for last buffer received!\n"); 426 return 0; 427 } 428 429 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK; 430 if (frame_len) { 431 u32 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK; 432 addr &= ~(ARCH_DMA_MINALIGN - 1); 433 u32 size = roundup(frame_len, ARCH_DMA_MINALIGN); 434 invalidate_dcache_range(addr, addr + size); 435 436 NetReceive((u8 *)addr, frame_len); 437 438 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) 439 priv->rx_first_buf = priv->rxbd_current; 440 else { 441 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 442 current_bd->status = 0xF0000000; /* FIXME */ 443 } 444 445 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) { 446 first_bd = &priv->rx_bd[priv->rx_first_buf]; 447 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 448 first_bd->status = 0xF0000000; 449 } 450 451 if ((++priv->rxbd_current) >= RX_BUF) 452 priv->rxbd_current = 0; 453 } 454 455 return frame_len; 456 } 457 458 static void zynq_gem_halt(struct eth_device *dev) 459 { 460 struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase; 461 462 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 463 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0); 464 } 465 466 static int zynq_gem_miiphyread(const char *devname, uchar addr, 467 uchar reg, ushort *val) 468 { 469 struct eth_device *dev = eth_get_dev(); 470 int ret; 471 472 ret = phyread(dev, addr, reg, val); 473 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, *val); 474 return ret; 475 } 476 477 static int zynq_gem_miiphy_write(const char *devname, uchar addr, 478 uchar reg, ushort val) 479 { 480 struct eth_device *dev = eth_get_dev(); 481 482 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val); 483 return phywrite(dev, addr, reg, val); 484 } 485 486 int zynq_gem_initialize(bd_t *bis, int base_addr, int phy_addr, u32 emio) 487 { 488 struct eth_device *dev; 489 struct zynq_gem_priv *priv; 490 void *bd_space; 491 492 dev = calloc(1, sizeof(*dev)); 493 if (dev == NULL) 494 return -1; 495 496 dev->priv = calloc(1, sizeof(struct zynq_gem_priv)); 497 if (dev->priv == NULL) { 498 free(dev); 499 return -1; 500 } 501 priv = dev->priv; 502 503 /* Align rxbuffers to ARCH_DMA_MINALIGN */ 504 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN); 505 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN); 506 507 /* Align bd_space to 1MB */ 508 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 509 mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE, DCACHE_OFF); 510 511 /* Initialize the bd spaces for tx and rx bd's */ 512 priv->tx_bd = (struct emac_bd *)bd_space; 513 priv->rx_bd = (struct emac_bd *)((u32)bd_space + BD_SEPRN_SPACE); 514 515 priv->phyaddr = phy_addr; 516 priv->emio = emio; 517 518 sprintf(dev->name, "Gem.%x", base_addr); 519 520 dev->iobase = base_addr; 521 522 dev->init = zynq_gem_init; 523 dev->halt = zynq_gem_halt; 524 dev->send = zynq_gem_send; 525 dev->recv = zynq_gem_recv; 526 dev->write_hwaddr = zynq_gem_setup_mac; 527 528 eth_register(dev); 529 530 miiphy_register(dev->name, zynq_gem_miiphyread, zynq_gem_miiphy_write); 531 priv->bus = miiphy_get_dev_by_name(dev->name); 532 533 return 1; 534 } 535