1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * (C) Copyright 2011 Michal Simek 4 * 5 * Michal SIMEK <monstr@monstr.eu> 6 * 7 * Based on Xilinx gmac driver: 8 * (C) Copyright 2011 Xilinx 9 */ 10 11 #include <clk.h> 12 #include <common.h> 13 #include <dm.h> 14 #include <net.h> 15 #include <netdev.h> 16 #include <config.h> 17 #include <console.h> 18 #include <malloc.h> 19 #include <asm/io.h> 20 #include <phy.h> 21 #include <miiphy.h> 22 #include <wait_bit.h> 23 #include <watchdog.h> 24 #include <asm/system.h> 25 #include <asm/arch/hardware.h> 26 #include <asm/arch/sys_proto.h> 27 #include <linux/errno.h> 28 29 DECLARE_GLOBAL_DATA_PTR; 30 31 /* Bit/mask specification */ 32 #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */ 33 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */ 34 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */ 35 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */ 36 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */ 37 38 #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */ 39 #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */ 40 #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */ 41 42 #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */ 43 #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */ 44 #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */ 45 46 /* Wrap bit, last descriptor */ 47 #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000 48 #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */ 49 #define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */ 50 51 #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */ 52 #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */ 53 #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */ 54 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */ 55 56 #define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */ 57 #define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */ 58 #define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */ 59 #define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */ 60 #define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */ 61 #define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */ 62 #ifdef CONFIG_ARM64 63 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000 /* Div pclk by 64, max 160MHz */ 64 #else 65 #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000 /* Div pclk by 48, max 120MHz */ 66 #endif 67 68 #ifdef CONFIG_ARM64 69 # define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */ 70 #else 71 # define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */ 72 #endif 73 74 #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \ 75 ZYNQ_GEM_NWCFG_FDEN | \ 76 ZYNQ_GEM_NWCFG_FSREM | \ 77 ZYNQ_GEM_NWCFG_MDCCLKDIV) 78 79 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */ 80 81 #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */ 82 /* Use full configured addressable space (8 Kb) */ 83 #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300 84 /* Use full configured addressable space (4 Kb) */ 85 #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400 86 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */ 87 #define ZYNQ_GEM_DMACR_RXBUF 0x00180000 88 89 #if defined(CONFIG_PHYS_64BIT) 90 # define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */ 91 #else 92 # define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */ 93 #endif 94 95 #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \ 96 ZYNQ_GEM_DMACR_RXSIZE | \ 97 ZYNQ_GEM_DMACR_TXSIZE | \ 98 ZYNQ_GEM_DMACR_RXBUF | \ 99 ZYNQ_GEM_DMA_BUS_WIDTH) 100 101 #define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */ 102 103 #define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000 104 105 #define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23) 106 107 /* Use MII register 1 (MII status register) to detect PHY */ 108 #define PHY_DETECT_REG 1 109 110 /* Mask used to verify certain PHY features (or register contents) 111 * in the register above: 112 * 0x1000: 10Mbps full duplex support 113 * 0x0800: 10Mbps half duplex support 114 * 0x0008: Auto-negotiation support 115 */ 116 #define PHY_DETECT_MASK 0x1808 117 118 /* TX BD status masks */ 119 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff 120 #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000 121 #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000 122 123 /* Clock frequencies for different speeds */ 124 #define ZYNQ_GEM_FREQUENCY_10 2500000UL 125 #define ZYNQ_GEM_FREQUENCY_100 25000000UL 126 #define ZYNQ_GEM_FREQUENCY_1000 125000000UL 127 128 /* Device registers */ 129 struct zynq_gem_regs { 130 u32 nwctrl; /* 0x0 - Network Control reg */ 131 u32 nwcfg; /* 0x4 - Network Config reg */ 132 u32 nwsr; /* 0x8 - Network Status reg */ 133 u32 reserved1; 134 u32 dmacr; /* 0x10 - DMA Control reg */ 135 u32 txsr; /* 0x14 - TX Status reg */ 136 u32 rxqbase; /* 0x18 - RX Q Base address reg */ 137 u32 txqbase; /* 0x1c - TX Q Base address reg */ 138 u32 rxsr; /* 0x20 - RX Status reg */ 139 u32 reserved2[2]; 140 u32 idr; /* 0x2c - Interrupt Disable reg */ 141 u32 reserved3; 142 u32 phymntnc; /* 0x34 - Phy Maintaince reg */ 143 u32 reserved4[18]; 144 u32 hashl; /* 0x80 - Hash Low address reg */ 145 u32 hashh; /* 0x84 - Hash High address reg */ 146 #define LADDR_LOW 0 147 #define LADDR_HIGH 1 148 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */ 149 u32 match[4]; /* 0xa8 - Type ID1 Match reg */ 150 u32 reserved6[18]; 151 #define STAT_SIZE 44 152 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */ 153 u32 reserved9[20]; 154 u32 pcscntrl; 155 u32 rserved12[36]; 156 u32 dcfg6; /* 0x294 Design config reg6 */ 157 u32 reserved7[106]; 158 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */ 159 u32 reserved8[15]; 160 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */ 161 u32 reserved10[17]; 162 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */ 163 u32 reserved11[2]; 164 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */ 165 }; 166 167 /* BD descriptors */ 168 struct emac_bd { 169 u32 addr; /* Next descriptor pointer */ 170 u32 status; 171 #if defined(CONFIG_PHYS_64BIT) 172 u32 addr_hi; 173 u32 reserved; 174 #endif 175 }; 176 177 #define RX_BUF 32 178 /* Page table entries are set to 1MB, or multiples of 1MB 179 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 180 */ 181 #define BD_SPACE 0x100000 182 /* BD separation space */ 183 #define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd)) 184 185 /* Setup the first free TX descriptor */ 186 #define TX_FREE_DESC 2 187 188 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */ 189 struct zynq_gem_priv { 190 struct emac_bd *tx_bd; 191 struct emac_bd *rx_bd; 192 char *rxbuffers; 193 u32 rxbd_current; 194 u32 rx_first_buf; 195 int phyaddr; 196 int init; 197 struct zynq_gem_regs *iobase; 198 phy_interface_t interface; 199 struct phy_device *phydev; 200 ofnode phy_of_node; 201 struct mii_dev *bus; 202 struct clk clk; 203 u32 max_speed; 204 bool int_pcs; 205 bool dma_64bit; 206 }; 207 208 static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum, 209 u32 op, u16 *data) 210 { 211 u32 mgtcr; 212 struct zynq_gem_regs *regs = priv->iobase; 213 int err; 214 215 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK, 216 true, 20000, false); 217 if (err) 218 return err; 219 220 /* Construct mgtcr mask for the operation */ 221 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op | 222 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) | 223 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data; 224 225 /* Write mgtcr and wait for completion */ 226 writel(mgtcr, ®s->phymntnc); 227 228 err = wait_for_bit_le32(®s->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK, 229 true, 20000, false); 230 if (err) 231 return err; 232 233 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK) 234 *data = readl(®s->phymntnc); 235 236 return 0; 237 } 238 239 static int phyread(struct zynq_gem_priv *priv, u32 phy_addr, 240 u32 regnum, u16 *val) 241 { 242 int ret; 243 244 ret = phy_setup_op(priv, phy_addr, regnum, 245 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val); 246 247 if (!ret) 248 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__, 249 phy_addr, regnum, *val); 250 251 return ret; 252 } 253 254 static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr, 255 u32 regnum, u16 data) 256 { 257 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr, 258 regnum, data); 259 260 return phy_setup_op(priv, phy_addr, regnum, 261 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data); 262 } 263 264 static int phy_detection(struct udevice *dev) 265 { 266 int i; 267 u16 phyreg = 0; 268 struct zynq_gem_priv *priv = dev->priv; 269 270 if (priv->phyaddr != -1) { 271 phyread(priv, priv->phyaddr, PHY_DETECT_REG, &phyreg); 272 if ((phyreg != 0xFFFF) && 273 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 274 /* Found a valid PHY address */ 275 debug("Default phy address %d is valid\n", 276 priv->phyaddr); 277 return 0; 278 } else { 279 debug("PHY address is not setup correctly %d\n", 280 priv->phyaddr); 281 priv->phyaddr = -1; 282 } 283 } 284 285 debug("detecting phy address\n"); 286 if (priv->phyaddr == -1) { 287 /* detect the PHY address */ 288 for (i = 31; i >= 0; i--) { 289 phyread(priv, i, PHY_DETECT_REG, &phyreg); 290 if ((phyreg != 0xFFFF) && 291 ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) { 292 /* Found a valid PHY address */ 293 priv->phyaddr = i; 294 debug("Found valid phy address, %d\n", i); 295 return 0; 296 } 297 } 298 } 299 printf("PHY is not detected\n"); 300 return -1; 301 } 302 303 static int zynq_gem_setup_mac(struct udevice *dev) 304 { 305 u32 i, macaddrlow, macaddrhigh; 306 struct eth_pdata *pdata = dev_get_platdata(dev); 307 struct zynq_gem_priv *priv = dev_get_priv(dev); 308 struct zynq_gem_regs *regs = priv->iobase; 309 310 /* Set the MAC bits [31:0] in BOT */ 311 macaddrlow = pdata->enetaddr[0]; 312 macaddrlow |= pdata->enetaddr[1] << 8; 313 macaddrlow |= pdata->enetaddr[2] << 16; 314 macaddrlow |= pdata->enetaddr[3] << 24; 315 316 /* Set MAC bits [47:32] in TOP */ 317 macaddrhigh = pdata->enetaddr[4]; 318 macaddrhigh |= pdata->enetaddr[5] << 8; 319 320 for (i = 0; i < 4; i++) { 321 writel(0, ®s->laddr[i][LADDR_LOW]); 322 writel(0, ®s->laddr[i][LADDR_HIGH]); 323 /* Do not use MATCHx register */ 324 writel(0, ®s->match[i]); 325 } 326 327 writel(macaddrlow, ®s->laddr[0][LADDR_LOW]); 328 writel(macaddrhigh, ®s->laddr[0][LADDR_HIGH]); 329 330 return 0; 331 } 332 333 static int zynq_phy_init(struct udevice *dev) 334 { 335 int ret; 336 struct zynq_gem_priv *priv = dev_get_priv(dev); 337 struct zynq_gem_regs *regs = priv->iobase; 338 const u32 supported = SUPPORTED_10baseT_Half | 339 SUPPORTED_10baseT_Full | 340 SUPPORTED_100baseT_Half | 341 SUPPORTED_100baseT_Full | 342 SUPPORTED_1000baseT_Half | 343 SUPPORTED_1000baseT_Full; 344 345 /* Enable only MDIO bus */ 346 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, ®s->nwctrl); 347 348 if ((priv->interface != PHY_INTERFACE_MODE_SGMII) && 349 (priv->interface != PHY_INTERFACE_MODE_GMII)) { 350 ret = phy_detection(dev); 351 if (ret) { 352 printf("GEM PHY init failed\n"); 353 return ret; 354 } 355 } 356 357 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev, 358 priv->interface); 359 if (!priv->phydev) 360 return -ENODEV; 361 362 priv->phydev->supported &= supported | ADVERTISED_Pause | 363 ADVERTISED_Asym_Pause; 364 if (priv->max_speed) { 365 ret = phy_set_supported(priv->phydev, priv->max_speed); 366 if (ret) 367 return ret; 368 } 369 370 priv->phydev->advertising = priv->phydev->supported; 371 priv->phydev->node = priv->phy_of_node; 372 373 return phy_config(priv->phydev); 374 } 375 376 static int zynq_gem_init(struct udevice *dev) 377 { 378 u32 i, nwconfig; 379 int ret; 380 unsigned long clk_rate = 0; 381 struct zynq_gem_priv *priv = dev_get_priv(dev); 382 struct zynq_gem_regs *regs = priv->iobase; 383 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC]; 384 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2]; 385 386 if (readl(®s->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B) 387 priv->dma_64bit = true; 388 else 389 priv->dma_64bit = false; 390 391 #if defined(CONFIG_PHYS_64BIT) 392 if (!priv->dma_64bit) { 393 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n", 394 __func__); 395 return -EINVAL; 396 } 397 #else 398 if (priv->dma_64bit) 399 debug("WARN: %s: Not using 64-bit dma even HW supports it\n", 400 __func__); 401 #endif 402 403 if (!priv->init) { 404 /* Disable all interrupts */ 405 writel(0xFFFFFFFF, ®s->idr); 406 407 /* Disable the receiver & transmitter */ 408 writel(0, ®s->nwctrl); 409 writel(0, ®s->txsr); 410 writel(0, ®s->rxsr); 411 writel(0, ®s->phymntnc); 412 413 /* Clear the Hash registers for the mac address 414 * pointed by AddressPtr 415 */ 416 writel(0x0, ®s->hashl); 417 /* Write bits [63:32] in TOP */ 418 writel(0x0, ®s->hashh); 419 420 /* Clear all counters */ 421 for (i = 0; i < STAT_SIZE; i++) 422 readl(®s->stat[i]); 423 424 /* Setup RxBD space */ 425 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd)); 426 427 for (i = 0; i < RX_BUF; i++) { 428 priv->rx_bd[i].status = 0xF0000000; 429 priv->rx_bd[i].addr = 430 (lower_32_bits((ulong)(priv->rxbuffers) 431 + (i * PKTSIZE_ALIGN))); 432 #if defined(CONFIG_PHYS_64BIT) 433 priv->rx_bd[i].addr_hi = 434 (upper_32_bits((ulong)(priv->rxbuffers) 435 + (i * PKTSIZE_ALIGN))); 436 #endif 437 } 438 /* WRAP bit to last BD */ 439 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK; 440 /* Write RxBDs to IP */ 441 writel(lower_32_bits((ulong)priv->rx_bd), ®s->rxqbase); 442 #if defined(CONFIG_PHYS_64BIT) 443 writel(upper_32_bits((ulong)priv->rx_bd), ®s->upper_rxqbase); 444 #endif 445 446 /* Setup for DMA Configuration register */ 447 writel(ZYNQ_GEM_DMACR_INIT, ®s->dmacr); 448 449 /* Setup for Network Control register, MDIO, Rx and Tx enable */ 450 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK); 451 452 /* Disable the second priority queue */ 453 dummy_tx_bd->addr = 0; 454 #if defined(CONFIG_PHYS_64BIT) 455 dummy_tx_bd->addr_hi = 0; 456 #endif 457 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK | 458 ZYNQ_GEM_TXBUF_LAST_MASK| 459 ZYNQ_GEM_TXBUF_USED_MASK; 460 461 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK | 462 ZYNQ_GEM_RXBUF_NEW_MASK; 463 #if defined(CONFIG_PHYS_64BIT) 464 dummy_rx_bd->addr_hi = 0; 465 #endif 466 dummy_rx_bd->status = 0; 467 468 writel((ulong)dummy_tx_bd, ®s->transmit_q1_ptr); 469 writel((ulong)dummy_rx_bd, ®s->receive_q1_ptr); 470 471 priv->init++; 472 } 473 474 ret = phy_startup(priv->phydev); 475 if (ret) 476 return ret; 477 478 if (!priv->phydev->link) { 479 printf("%s: No link.\n", priv->phydev->dev->name); 480 return -1; 481 } 482 483 nwconfig = ZYNQ_GEM_NWCFG_INIT; 484 485 /* 486 * Set SGMII enable PCS selection only if internal PCS/PMA 487 * core is used and interface is SGMII. 488 */ 489 if (priv->interface == PHY_INTERFACE_MODE_SGMII && 490 priv->int_pcs) { 491 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL | 492 ZYNQ_GEM_NWCFG_PCS_SEL; 493 #ifdef CONFIG_ARM64 494 writel(readl(®s->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL, 495 ®s->pcscntrl); 496 #endif 497 } 498 499 switch (priv->phydev->speed) { 500 case SPEED_1000: 501 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000, 502 ®s->nwcfg); 503 clk_rate = ZYNQ_GEM_FREQUENCY_1000; 504 break; 505 case SPEED_100: 506 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100, 507 ®s->nwcfg); 508 clk_rate = ZYNQ_GEM_FREQUENCY_100; 509 break; 510 case SPEED_10: 511 clk_rate = ZYNQ_GEM_FREQUENCY_10; 512 break; 513 } 514 515 #if !defined(CONFIG_ARCH_VERSAL) 516 ret = clk_set_rate(&priv->clk, clk_rate); 517 if (IS_ERR_VALUE(ret) && ret != (unsigned long)-ENOSYS) { 518 dev_err(dev, "failed to set tx clock rate\n"); 519 return ret; 520 } 521 522 ret = clk_enable(&priv->clk); 523 if (ret && ret != -ENOSYS) { 524 dev_err(dev, "failed to enable tx clock\n"); 525 return ret; 526 } 527 #else 528 debug("requested clk_rate %ld\n", clk_rate); 529 #endif 530 531 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 532 ZYNQ_GEM_NWCTRL_TXEN_MASK); 533 534 return 0; 535 } 536 537 static int zynq_gem_send(struct udevice *dev, void *ptr, int len) 538 { 539 dma_addr_t addr; 540 u32 size; 541 struct zynq_gem_priv *priv = dev_get_priv(dev); 542 struct zynq_gem_regs *regs = priv->iobase; 543 struct emac_bd *current_bd = &priv->tx_bd[1]; 544 545 /* Setup Tx BD */ 546 memset(priv->tx_bd, 0, sizeof(struct emac_bd)); 547 548 priv->tx_bd->addr = lower_32_bits((ulong)ptr); 549 #if defined(CONFIG_PHYS_64BIT) 550 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr); 551 #endif 552 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) | 553 ZYNQ_GEM_TXBUF_LAST_MASK; 554 /* Dummy descriptor to mark it as the last in descriptor chain */ 555 current_bd->addr = 0x0; 556 #if defined(CONFIG_PHYS_64BIT) 557 current_bd->addr_hi = 0x0; 558 #endif 559 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK | 560 ZYNQ_GEM_TXBUF_LAST_MASK| 561 ZYNQ_GEM_TXBUF_USED_MASK; 562 563 /* setup BD */ 564 writel(lower_32_bits((ulong)priv->tx_bd), ®s->txqbase); 565 #if defined(CONFIG_PHYS_64BIT) 566 writel(upper_32_bits((ulong)priv->tx_bd), ®s->upper_txqbase); 567 #endif 568 569 addr = (ulong) ptr; 570 addr &= ~(ARCH_DMA_MINALIGN - 1); 571 size = roundup(len, ARCH_DMA_MINALIGN); 572 flush_dcache_range(addr, addr + size); 573 574 addr = (ulong)priv->rxbuffers; 575 addr &= ~(ARCH_DMA_MINALIGN - 1); 576 size = roundup((RX_BUF * PKTSIZE_ALIGN), ARCH_DMA_MINALIGN); 577 flush_dcache_range(addr, addr + size); 578 barrier(); 579 580 /* Start transmit */ 581 setbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK); 582 583 /* Read TX BD status */ 584 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED) 585 printf("TX buffers exhausted in mid frame\n"); 586 587 return wait_for_bit_le32(®s->txsr, ZYNQ_GEM_TSR_DONE, 588 true, 20000, true); 589 } 590 591 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */ 592 static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp) 593 { 594 int frame_len; 595 dma_addr_t addr; 596 struct zynq_gem_priv *priv = dev_get_priv(dev); 597 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current]; 598 599 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK)) 600 return -1; 601 602 if (!(current_bd->status & 603 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) { 604 printf("GEM: SOF or EOF not set for last buffer received!\n"); 605 return -1; 606 } 607 608 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK; 609 if (!frame_len) { 610 printf("%s: Zero size packet?\n", __func__); 611 return -1; 612 } 613 614 #if defined(CONFIG_PHYS_64BIT) 615 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK) 616 | ((dma_addr_t)current_bd->addr_hi << 32)); 617 #else 618 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK; 619 #endif 620 addr &= ~(ARCH_DMA_MINALIGN - 1); 621 622 *packetp = (uchar *)(uintptr_t)addr; 623 624 return frame_len; 625 } 626 627 static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length) 628 { 629 struct zynq_gem_priv *priv = dev_get_priv(dev); 630 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current]; 631 struct emac_bd *first_bd; 632 633 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) { 634 priv->rx_first_buf = priv->rxbd_current; 635 } else { 636 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 637 current_bd->status = 0xF0000000; /* FIXME */ 638 } 639 640 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) { 641 first_bd = &priv->rx_bd[priv->rx_first_buf]; 642 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK; 643 first_bd->status = 0xF0000000; 644 } 645 646 if ((++priv->rxbd_current) >= RX_BUF) 647 priv->rxbd_current = 0; 648 649 return 0; 650 } 651 652 static void zynq_gem_halt(struct udevice *dev) 653 { 654 struct zynq_gem_priv *priv = dev_get_priv(dev); 655 struct zynq_gem_regs *regs = priv->iobase; 656 657 clrsetbits_le32(®s->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK | 658 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0); 659 } 660 661 __weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr) 662 { 663 return -ENOSYS; 664 } 665 666 static int zynq_gem_read_rom_mac(struct udevice *dev) 667 { 668 struct eth_pdata *pdata = dev_get_platdata(dev); 669 670 if (!pdata) 671 return -ENOSYS; 672 673 return zynq_board_read_rom_ethaddr(pdata->enetaddr); 674 } 675 676 static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr, 677 int devad, int reg) 678 { 679 struct zynq_gem_priv *priv = bus->priv; 680 int ret; 681 u16 val = 0; 682 683 ret = phyread(priv, addr, reg, &val); 684 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret); 685 return val; 686 } 687 688 static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad, 689 int reg, u16 value) 690 { 691 struct zynq_gem_priv *priv = bus->priv; 692 693 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value); 694 return phywrite(priv, addr, reg, value); 695 } 696 697 static int zynq_gem_probe(struct udevice *dev) 698 { 699 void *bd_space; 700 struct zynq_gem_priv *priv = dev_get_priv(dev); 701 int ret; 702 703 /* Align rxbuffers to ARCH_DMA_MINALIGN */ 704 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN); 705 if (!priv->rxbuffers) 706 return -ENOMEM; 707 708 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN); 709 710 /* Align bd_space to MMU_SECTION_SHIFT */ 711 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 712 if (!bd_space) 713 return -ENOMEM; 714 715 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, 716 BD_SPACE, DCACHE_OFF); 717 718 /* Initialize the bd spaces for tx and rx bd's */ 719 priv->tx_bd = (struct emac_bd *)bd_space; 720 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE); 721 722 ret = clk_get_by_name(dev, "tx_clk", &priv->clk); 723 if (ret < 0) { 724 dev_err(dev, "failed to get clock\n"); 725 return -EINVAL; 726 } 727 728 priv->bus = mdio_alloc(); 729 priv->bus->read = zynq_gem_miiphy_read; 730 priv->bus->write = zynq_gem_miiphy_write; 731 priv->bus->priv = priv; 732 733 ret = mdio_register_seq(priv->bus, dev->seq); 734 if (ret) 735 return ret; 736 737 return zynq_phy_init(dev); 738 } 739 740 static int zynq_gem_remove(struct udevice *dev) 741 { 742 struct zynq_gem_priv *priv = dev_get_priv(dev); 743 744 free(priv->phydev); 745 mdio_unregister(priv->bus); 746 mdio_free(priv->bus); 747 748 return 0; 749 } 750 751 static const struct eth_ops zynq_gem_ops = { 752 .start = zynq_gem_init, 753 .send = zynq_gem_send, 754 .recv = zynq_gem_recv, 755 .free_pkt = zynq_gem_free_pkt, 756 .stop = zynq_gem_halt, 757 .write_hwaddr = zynq_gem_setup_mac, 758 .read_rom_hwaddr = zynq_gem_read_rom_mac, 759 }; 760 761 static int zynq_gem_ofdata_to_platdata(struct udevice *dev) 762 { 763 struct eth_pdata *pdata = dev_get_platdata(dev); 764 struct zynq_gem_priv *priv = dev_get_priv(dev); 765 struct ofnode_phandle_args phandle_args; 766 const char *phy_mode; 767 768 pdata->iobase = (phys_addr_t)dev_read_addr(dev); 769 priv->iobase = (struct zynq_gem_regs *)pdata->iobase; 770 /* Hardcode for now */ 771 priv->phyaddr = -1; 772 773 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, 774 &phandle_args)) { 775 debug("phy-handle does exist %s\n", dev->name); 776 priv->phyaddr = ofnode_read_u32_default(phandle_args.node, 777 "reg", -1); 778 priv->phy_of_node = phandle_args.node; 779 priv->max_speed = ofnode_read_u32_default(phandle_args.node, 780 "max-speed", 781 SPEED_1000); 782 } 783 784 phy_mode = dev_read_prop(dev, "phy-mode", NULL); 785 if (phy_mode) 786 pdata->phy_interface = phy_get_interface_by_name(phy_mode); 787 if (pdata->phy_interface == -1) { 788 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); 789 return -EINVAL; 790 } 791 priv->interface = pdata->phy_interface; 792 793 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma"); 794 795 printf("ZYNQ GEM: %lx, phyaddr %x, interface %s\n", (ulong)priv->iobase, 796 priv->phyaddr, phy_string_for_interface(priv->interface)); 797 798 return 0; 799 } 800 801 static const struct udevice_id zynq_gem_ids[] = { 802 { .compatible = "cdns,zynqmp-gem" }, 803 { .compatible = "cdns,zynq-gem" }, 804 { .compatible = "cdns,gem" }, 805 { } 806 }; 807 808 U_BOOT_DRIVER(zynq_gem) = { 809 .name = "zynq_gem", 810 .id = UCLASS_ETH, 811 .of_match = zynq_gem_ids, 812 .ofdata_to_platdata = zynq_gem_ofdata_to_platdata, 813 .probe = zynq_gem_probe, 814 .remove = zynq_gem_remove, 815 .ops = &zynq_gem_ops, 816 .priv_auto_alloc_size = sizeof(struct zynq_gem_priv), 817 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 818 }; 819