1 /* 2 * Copyright 2014 Broadcom Corporation. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #ifdef BCM_GMAC_DEBUG 8 #ifndef DEBUG 9 #define DEBUG 10 #endif 11 #endif 12 13 #include <config.h> 14 #include <common.h> 15 #include <malloc.h> 16 #include <net.h> 17 #include <asm/io.h> 18 #include <phy.h> 19 20 #include "bcm-sf2-eth.h" 21 #include "bcm-sf2-eth-gmac.h" 22 23 #define SPINWAIT(exp, us) { \ 24 uint countdown = (us) + 9; \ 25 while ((exp) && (countdown >= 10)) {\ 26 udelay(10); \ 27 countdown -= 10; \ 28 } \ 29 } 30 31 static int gmac_disable_dma(struct eth_dma *dma, int dir); 32 static int gmac_enable_dma(struct eth_dma *dma, int dir); 33 34 /* DMA Descriptor */ 35 typedef struct { 36 /* misc control bits */ 37 uint32_t ctrl1; 38 /* buffer count and address extension */ 39 uint32_t ctrl2; 40 /* memory address of the date buffer, bits 31:0 */ 41 uint32_t addrlow; 42 /* memory address of the date buffer, bits 63:32 */ 43 uint32_t addrhigh; 44 } dma64dd_t; 45 46 uint32_t g_dmactrlflags; 47 48 static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags) 49 { 50 debug("%s enter\n", __func__); 51 52 g_dmactrlflags &= ~mask; 53 g_dmactrlflags |= flags; 54 55 /* If trying to enable parity, check if parity is actually supported */ 56 if (g_dmactrlflags & DMA_CTRL_PEN) { 57 uint32_t control; 58 59 control = readl(GMAC0_DMA_TX_CTRL_ADDR); 60 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR); 61 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) { 62 /* 63 * We *can* disable it, therefore it is supported; 64 * restore control register 65 */ 66 writel(control, GMAC0_DMA_TX_CTRL_ADDR); 67 } else { 68 /* Not supported, don't allow it to be enabled */ 69 g_dmactrlflags &= ~DMA_CTRL_PEN; 70 } 71 } 72 73 return g_dmactrlflags; 74 } 75 76 static inline void reg32_clear_bits(uint32_t reg, uint32_t value) 77 { 78 uint32_t v = readl(reg); 79 v &= ~(value); 80 writel(v, reg); 81 } 82 83 static inline void reg32_set_bits(uint32_t reg, uint32_t value) 84 { 85 uint32_t v = readl(reg); 86 v |= value; 87 writel(v, reg); 88 } 89 90 #ifdef BCM_GMAC_DEBUG 91 static void dma_tx_dump(struct eth_dma *dma) 92 { 93 dma64dd_t *descp = NULL; 94 uint8_t *bufp; 95 int i; 96 97 printf("TX DMA Register:\n"); 98 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n", 99 readl(GMAC0_DMA_TX_CTRL_ADDR), 100 readl(GMAC0_DMA_TX_PTR_ADDR), 101 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR), 102 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR), 103 readl(GMAC0_DMA_TX_STATUS0_ADDR), 104 readl(GMAC0_DMA_TX_STATUS1_ADDR)); 105 106 printf("TX Descriptors:\n"); 107 for (i = 0; i < TX_BUF_NUM; i++) { 108 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i; 109 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n", 110 descp->ctrl1, descp->ctrl2, 111 descp->addrhigh, descp->addrlow); 112 } 113 114 printf("TX Buffers:\n"); 115 /* Initialize TX DMA descriptor table */ 116 for (i = 0; i < TX_BUF_NUM; i++) { 117 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE); 118 printf("buf%d:0x%x; ", i, (uint32_t)bufp); 119 } 120 printf("\n"); 121 } 122 123 static void dma_rx_dump(struct eth_dma *dma) 124 { 125 dma64dd_t *descp = NULL; 126 uint8_t *bufp; 127 int i; 128 129 printf("RX DMA Register:\n"); 130 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n", 131 readl(GMAC0_DMA_RX_CTRL_ADDR), 132 readl(GMAC0_DMA_RX_PTR_ADDR), 133 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR), 134 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR), 135 readl(GMAC0_DMA_RX_STATUS0_ADDR), 136 readl(GMAC0_DMA_RX_STATUS1_ADDR)); 137 138 printf("RX Descriptors:\n"); 139 for (i = 0; i < RX_BUF_NUM; i++) { 140 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i; 141 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n", 142 descp->ctrl1, descp->ctrl2, 143 descp->addrhigh, descp->addrlow); 144 } 145 146 printf("RX Buffers:\n"); 147 for (i = 0; i < RX_BUF_NUM; i++) { 148 bufp = dma->rx_buf + i * RX_BUF_SIZE; 149 printf("buf%d:0x%x; ", i, (uint32_t)bufp); 150 } 151 printf("\n"); 152 } 153 #endif 154 155 static int dma_tx_init(struct eth_dma *dma) 156 { 157 dma64dd_t *descp = NULL; 158 uint8_t *bufp; 159 int i; 160 uint32_t ctrl; 161 162 debug("%s enter\n", __func__); 163 164 /* clear descriptor memory */ 165 memset((void *)(dma->tx_desc_aligned), 0, 166 TX_BUF_NUM * sizeof(dma64dd_t)); 167 memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE); 168 169 /* Initialize TX DMA descriptor table */ 170 for (i = 0; i < TX_BUF_NUM; i++) { 171 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i; 172 bufp = dma->tx_buf + i * TX_BUF_SIZE; 173 /* clear buffer memory */ 174 memset((void *)bufp, 0, TX_BUF_SIZE); 175 176 ctrl = 0; 177 /* if last descr set endOfTable */ 178 if (i == (TX_BUF_NUM-1)) 179 ctrl = D64_CTRL1_EOT; 180 descp->ctrl1 = ctrl; 181 descp->ctrl2 = 0; 182 descp->addrlow = (uint32_t)bufp; 183 descp->addrhigh = 0; 184 } 185 186 /* flush descriptor and buffer */ 187 descp = dma->tx_desc_aligned; 188 bufp = dma->tx_buf; 189 flush_dcache_range((unsigned long)descp, 190 (unsigned long)(descp + 191 sizeof(dma64dd_t) * TX_BUF_NUM)); 192 flush_dcache_range((unsigned long)(bufp), 193 (unsigned long)(bufp + TX_BUF_SIZE * TX_BUF_NUM)); 194 195 /* initialize the DMA channel */ 196 writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR); 197 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR); 198 199 /* now update the dma last descriptor */ 200 writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK, 201 GMAC0_DMA_TX_PTR_ADDR); 202 203 return 0; 204 } 205 206 static int dma_rx_init(struct eth_dma *dma) 207 { 208 uint32_t last_desc; 209 dma64dd_t *descp = NULL; 210 uint8_t *bufp; 211 uint32_t ctrl; 212 int i; 213 214 debug("%s enter\n", __func__); 215 216 /* clear descriptor memory */ 217 memset((void *)(dma->rx_desc_aligned), 0, 218 RX_BUF_NUM * sizeof(dma64dd_t)); 219 /* clear buffer memory */ 220 memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE); 221 222 /* Initialize RX DMA descriptor table */ 223 for (i = 0; i < RX_BUF_NUM; i++) { 224 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i; 225 bufp = dma->rx_buf + i * RX_BUF_SIZE; 226 ctrl = 0; 227 /* if last descr set endOfTable */ 228 if (i == (RX_BUF_NUM - 1)) 229 ctrl = D64_CTRL1_EOT; 230 descp->ctrl1 = ctrl; 231 descp->ctrl2 = RX_BUF_SIZE; 232 descp->addrlow = (uint32_t)bufp; 233 descp->addrhigh = 0; 234 235 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK) 236 + sizeof(dma64dd_t); 237 } 238 239 descp = dma->rx_desc_aligned; 240 bufp = dma->rx_buf; 241 /* flush descriptor and buffer */ 242 flush_dcache_range((unsigned long)descp, 243 (unsigned long)(descp + 244 sizeof(dma64dd_t) * RX_BUF_NUM)); 245 flush_dcache_range((unsigned long)(bufp), 246 (unsigned long)(bufp + RX_BUF_SIZE * RX_BUF_NUM)); 247 248 /* initailize the DMA channel */ 249 writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR); 250 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR); 251 252 /* now update the dma last descriptor */ 253 writel(last_desc, GMAC0_DMA_RX_PTR_ADDR); 254 255 return 0; 256 } 257 258 static int dma_init(struct eth_dma *dma) 259 { 260 debug(" %s enter\n", __func__); 261 262 /* 263 * Default flags: For backwards compatibility both 264 * Rx Overflow Continue and Parity are DISABLED. 265 */ 266 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0); 267 268 debug("rx burst len 0x%x\n", 269 (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK) 270 >> D64_RC_BL_SHIFT); 271 debug("tx burst len 0x%x\n", 272 (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK) 273 >> D64_XC_BL_SHIFT); 274 275 dma_tx_init(dma); 276 dma_rx_init(dma); 277 278 /* From end of chip_init() */ 279 /* enable the overflow continue feature and disable parity */ 280 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */, 281 DMA_CTRL_ROC /* value */); 282 283 return 0; 284 } 285 286 static int dma_deinit(struct eth_dma *dma) 287 { 288 debug(" %s enter\n", __func__); 289 290 gmac_disable_dma(dma, MAC_DMA_RX); 291 gmac_disable_dma(dma, MAC_DMA_TX); 292 293 free(dma->tx_buf); 294 dma->tx_buf = NULL; 295 free(dma->tx_desc); 296 dma->tx_desc = NULL; 297 dma->tx_desc_aligned = NULL; 298 299 free(dma->rx_buf); 300 dma->rx_buf = NULL; 301 free(dma->rx_desc); 302 dma->rx_desc = NULL; 303 dma->rx_desc_aligned = NULL; 304 305 return 0; 306 } 307 308 int gmac_tx_packet(struct eth_dma *dma, void *packet, int length) 309 { 310 uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE; 311 312 /* kick off the dma */ 313 size_t len = length; 314 int txout = dma->cur_tx_index; 315 uint32_t flags; 316 dma64dd_t *descp = NULL; 317 uint32_t ctrl; 318 uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) + 319 sizeof(dma64dd_t)) & D64_XP_LD_MASK; 320 size_t buflen; 321 322 debug("%s enter\n", __func__); 323 324 /* load the buffer */ 325 memcpy(bufp, packet, len); 326 327 /* Add 4 bytes for Ethernet FCS/CRC */ 328 buflen = len + 4; 329 330 ctrl = (buflen & D64_CTRL2_BC_MASK); 331 332 /* the transmit will only be one frame or set SOF, EOF */ 333 /* also set int on completion */ 334 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF; 335 336 /* txout points to the descriptor to uset */ 337 /* if last descriptor then set EOT */ 338 if (txout == (TX_BUF_NUM - 1)) { 339 flags |= D64_CTRL1_EOT; 340 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK; 341 } 342 343 /* write the descriptor */ 344 descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout; 345 descp->addrlow = (uint32_t)bufp; 346 descp->addrhigh = 0; 347 descp->ctrl1 = flags; 348 descp->ctrl2 = ctrl; 349 350 /* flush descriptor and buffer */ 351 flush_dcache_range((unsigned long)descp, 352 (unsigned long)(descp + sizeof(dma64dd_t))); 353 flush_dcache_range((unsigned long)bufp, 354 (unsigned long)(bufp + TX_BUF_SIZE)); 355 356 /* now update the dma last descriptor */ 357 writel(last_desc, GMAC0_DMA_TX_PTR_ADDR); 358 359 /* tx dma should be enabled so packet should go out */ 360 361 /* update txout */ 362 dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1); 363 364 return 0; 365 } 366 367 bool gmac_check_tx_done(struct eth_dma *dma) 368 { 369 /* wait for tx to complete */ 370 uint32_t intstatus; 371 bool xfrdone = false; 372 373 debug("%s enter\n", __func__); 374 375 intstatus = readl(GMAC0_INT_STATUS_ADDR); 376 377 debug("int(0x%x)\n", intstatus); 378 if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) { 379 xfrdone = true; 380 /* clear the int bits */ 381 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3); 382 writel(intstatus, GMAC0_INT_STATUS_ADDR); 383 } else { 384 debug("Tx int(0x%x)\n", intstatus); 385 } 386 387 return xfrdone; 388 } 389 390 int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf) 391 { 392 void *bufp, *datap; 393 size_t rcvlen = 0, buflen = 0; 394 uint32_t stat0 = 0, stat1 = 0; 395 uint32_t control, offset; 396 uint8_t statbuf[HWRXOFF*2]; 397 398 int index, curr, active; 399 dma64dd_t *descp = NULL; 400 401 /* udelay(50); */ 402 403 /* 404 * this api will check if a packet has been received. 405 * If so it will return the address of the buffer and current 406 * descriptor index will be incremented to the 407 * next descriptor. Once done with the frame the buffer should be 408 * added back onto the descriptor and the lastdscr should be updated 409 * to this descriptor. 410 */ 411 index = dma->cur_rx_index; 412 offset = (uint32_t)(dma->rx_desc_aligned); 413 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK; 414 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK; 415 curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t); 416 active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t); 417 418 /* check if any frame */ 419 if (index == curr) 420 return -1; 421 422 debug("received packet\n"); 423 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active); 424 /* remove warning */ 425 if (index == active) 426 ; 427 428 /* get the packet pointer that corresponds to the rx descriptor */ 429 bufp = dma->rx_buf + index * RX_BUF_SIZE; 430 431 descp = (dma64dd_t *)(dma->rx_desc_aligned) + index; 432 /* flush descriptor and buffer */ 433 flush_dcache_range((unsigned long)descp, 434 (unsigned long)(descp + sizeof(dma64dd_t))); 435 flush_dcache_range((unsigned long)bufp, 436 (unsigned long)(bufp + RX_BUF_SIZE)); 437 438 buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK); 439 440 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR); 441 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR); 442 443 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n", 444 (uint32_t)bufp, index, buflen, stat0, stat1); 445 446 dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1); 447 448 /* get buffer offset */ 449 control = readl(GMAC0_DMA_RX_CTRL_ADDR); 450 offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT; 451 rcvlen = *(uint16_t *)bufp; 452 453 debug("Received %d bytes\n", rcvlen); 454 /* copy status into temp buf then copy data from rx buffer */ 455 memcpy(statbuf, bufp, offset); 456 datap = (void *)((uint32_t)bufp + offset); 457 memcpy(buf, datap, rcvlen); 458 459 /* update descriptor that is being added back on ring */ 460 descp->ctrl2 = RX_BUF_SIZE; 461 descp->addrlow = (uint32_t)bufp; 462 descp->addrhigh = 0; 463 /* flush descriptor */ 464 flush_dcache_range((unsigned long)descp, 465 (unsigned long)(descp + sizeof(dma64dd_t))); 466 467 /* set the lastdscr for the rx ring */ 468 writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR); 469 470 return (int)rcvlen; 471 } 472 473 static int gmac_disable_dma(struct eth_dma *dma, int dir) 474 { 475 int status; 476 477 debug("%s enter\n", __func__); 478 479 if (dir == MAC_DMA_TX) { 480 /* address PR8249/PR7577 issue */ 481 /* suspend tx DMA first */ 482 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR); 483 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) & 484 D64_XS0_XS_MASK)) != 485 D64_XS0_XS_DISABLED) && 486 (status != D64_XS0_XS_IDLE) && 487 (status != D64_XS0_XS_STOPPED), 10000); 488 489 /* 490 * PR2414 WAR: DMA engines are not disabled until 491 * transfer finishes 492 */ 493 writel(0, GMAC0_DMA_TX_CTRL_ADDR); 494 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) & 495 D64_XS0_XS_MASK)) != 496 D64_XS0_XS_DISABLED), 10000); 497 498 /* wait for the last transaction to complete */ 499 udelay(2); 500 501 status = (status == D64_XS0_XS_DISABLED); 502 } else { 503 /* 504 * PR2414 WAR: DMA engines are not disabled until 505 * transfer finishes 506 */ 507 writel(0, GMAC0_DMA_RX_CTRL_ADDR); 508 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) & 509 D64_RS0_RS_MASK)) != 510 D64_RS0_RS_DISABLED), 10000); 511 512 status = (status == D64_RS0_RS_DISABLED); 513 } 514 515 return status; 516 } 517 518 static int gmac_enable_dma(struct eth_dma *dma, int dir) 519 { 520 uint32_t control; 521 522 debug("%s enter\n", __func__); 523 524 if (dir == MAC_DMA_TX) { 525 dma->cur_tx_index = 0; 526 527 /* 528 * These bits 20:18 (burstLen) of control register can be 529 * written but will take effect only if these bits are 530 * valid. So this will not affect previous versions 531 * of the DMA. They will continue to have those bits set to 0. 532 */ 533 control = readl(GMAC0_DMA_TX_CTRL_ADDR); 534 535 control |= D64_XC_XE; 536 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0) 537 control |= D64_XC_PD; 538 539 writel(control, GMAC0_DMA_TX_CTRL_ADDR); 540 541 /* initailize the DMA channel */ 542 writel((uint32_t)(dma->tx_desc_aligned), 543 GMAC0_DMA_TX_ADDR_LOW_ADDR); 544 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR); 545 } else { 546 dma->cur_rx_index = 0; 547 548 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) & 549 D64_RC_AE) | D64_RC_RE; 550 551 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0) 552 control |= D64_RC_PD; 553 554 if (g_dmactrlflags & DMA_CTRL_ROC) 555 control |= D64_RC_OC; 556 557 /* 558 * These bits 20:18 (burstLen) of control register can be 559 * written but will take effect only if these bits are 560 * valid. So this will not affect previous versions 561 * of the DMA. They will continue to have those bits set to 0. 562 */ 563 control &= ~D64_RC_BL_MASK; 564 /* Keep default Rx burstlen */ 565 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK; 566 control |= HWRXOFF << D64_RC_RO_SHIFT; 567 568 writel(control, GMAC0_DMA_RX_CTRL_ADDR); 569 570 /* 571 * the rx descriptor ring should have 572 * the addresses set properly; 573 * set the lastdscr for the rx ring 574 */ 575 writel(((uint32_t)(dma->rx_desc_aligned) + 576 (RX_BUF_NUM - 1) * RX_BUF_SIZE) & 577 D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR); 578 } 579 580 return 0; 581 } 582 583 bool gmac_mii_busywait(unsigned int timeout) 584 { 585 uint32_t tmp = 0; 586 587 while (timeout > 10) { 588 tmp = readl(GMAC_MII_CTRL_ADDR); 589 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) { 590 udelay(10); 591 timeout -= 10; 592 } else { 593 break; 594 } 595 } 596 return tmp & (1 << GMAC_MII_BUSY_SHIFT); 597 } 598 599 int gmac_miiphy_read(const char *devname, unsigned char phyaddr, 600 unsigned char reg, unsigned short *value) 601 { 602 uint32_t tmp = 0; 603 604 (void)devname; 605 606 /* Busy wait timeout is 1ms */ 607 if (gmac_mii_busywait(1000)) { 608 error("%s: Prepare MII read: MII/MDIO busy\n", __func__); 609 return -1; 610 } 611 612 /* Read operation */ 613 tmp = GMAC_MII_DATA_READ_CMD; 614 tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) | 615 (reg << GMAC_MII_PHY_REG_SHIFT); 616 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg); 617 writel(tmp, GMAC_MII_DATA_ADDR); 618 619 if (gmac_mii_busywait(1000)) { 620 error("%s: MII read failure: MII/MDIO busy\n", __func__); 621 return -1; 622 } 623 624 *value = readl(GMAC_MII_DATA_ADDR) & 0xffff; 625 debug("MII read data 0x%x\n", *value); 626 return 0; 627 } 628 629 int gmac_miiphy_write(const char *devname, unsigned char phyaddr, 630 unsigned char reg, unsigned short value) 631 { 632 uint32_t tmp = 0; 633 634 (void)devname; 635 636 /* Busy wait timeout is 1ms */ 637 if (gmac_mii_busywait(1000)) { 638 error("%s: Prepare MII write: MII/MDIO busy\n", __func__); 639 return -1; 640 } 641 642 /* Write operation */ 643 tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff); 644 tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) | 645 (reg << GMAC_MII_PHY_REG_SHIFT)); 646 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n", 647 tmp, phyaddr, reg, value); 648 writel(tmp, GMAC_MII_DATA_ADDR); 649 650 if (gmac_mii_busywait(1000)) { 651 error("%s: MII write failure: MII/MDIO busy\n", __func__); 652 return -1; 653 } 654 655 return 0; 656 } 657 658 void gmac_init_reset(void) 659 { 660 debug("%s enter\n", __func__); 661 662 /* set command config reg CC_SR */ 663 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR); 664 udelay(GMAC_RESET_DELAY); 665 } 666 667 void gmac_clear_reset(void) 668 { 669 debug("%s enter\n", __func__); 670 671 /* clear command config reg CC_SR */ 672 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR); 673 udelay(GMAC_RESET_DELAY); 674 } 675 676 static void gmac_enable_local(bool en) 677 { 678 uint32_t cmdcfg; 679 680 debug("%s enter\n", __func__); 681 682 /* read command config reg */ 683 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR); 684 685 /* put mac in reset */ 686 gmac_init_reset(); 687 688 cmdcfg |= CC_SR; 689 690 /* first deassert rx_ena and tx_ena while in reset */ 691 cmdcfg &= ~(CC_RE | CC_TE); 692 /* write command config reg */ 693 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR); 694 695 /* bring mac out of reset */ 696 gmac_clear_reset(); 697 698 /* if not enable exit now */ 699 if (!en) 700 return; 701 702 /* enable the mac transmit and receive paths now */ 703 udelay(2); 704 cmdcfg &= ~CC_SR; 705 cmdcfg |= (CC_RE | CC_TE); 706 707 /* assert rx_ena and tx_ena when out of reset to enable the mac */ 708 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR); 709 710 return; 711 } 712 713 int gmac_enable(void) 714 { 715 gmac_enable_local(1); 716 717 /* clear interrupts */ 718 writel(I_INTMASK, GMAC0_INT_STATUS_ADDR); 719 return 0; 720 } 721 722 int gmac_disable(void) 723 { 724 gmac_enable_local(0); 725 return 0; 726 } 727 728 int gmac_set_speed(int speed, int duplex) 729 { 730 uint32_t cmdcfg; 731 uint32_t hd_ena; 732 uint32_t speed_cfg; 733 734 hd_ena = duplex ? 0 : CC_HD; 735 if (speed == 1000) { 736 speed_cfg = 2; 737 } else if (speed == 100) { 738 speed_cfg = 1; 739 } else if (speed == 10) { 740 speed_cfg = 0; 741 } else { 742 error("%s: Invalid GMAC speed(%d)!\n", __func__, speed); 743 return -1; 744 } 745 746 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR); 747 cmdcfg &= ~(CC_ES_MASK | CC_HD); 748 cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena); 749 750 printf("Change GMAC speed to %dMB\n", speed); 751 debug("GMAC speed cfg 0x%x\n", cmdcfg); 752 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR); 753 754 return 0; 755 } 756 757 int gmac_set_mac_addr(unsigned char *mac) 758 { 759 /* set our local address */ 760 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n", 761 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 762 writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR); 763 writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR); 764 765 return 0; 766 } 767 768 int gmac_mac_init(struct eth_device *dev) 769 { 770 struct eth_info *eth = (struct eth_info *)(dev->priv); 771 struct eth_dma *dma = &(eth->dma); 772 773 uint32_t tmp; 774 uint32_t cmdcfg; 775 int chipid; 776 777 debug("%s enter\n", __func__); 778 779 /* Always use GMAC0 */ 780 printf("Using GMAC%d\n", 0); 781 782 /* Reset AMAC0 core */ 783 writel(0, AMAC0_IDM_RESET_ADDR); 784 tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR); 785 /* Set clock */ 786 tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT); 787 tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT); 788 /* Set Tx clock */ 789 tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT); 790 writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR); 791 792 /* reset gmac */ 793 /* 794 * As AMAC is just reset, NO need? 795 * set eth_data into loopback mode to ensure no rx traffic 796 * gmac_loopback(eth_data, TRUE); 797 * ET_TRACE(("%s gmac loopback\n", __func__)); 798 * udelay(1); 799 */ 800 801 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR); 802 cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML | 803 CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI | 804 CC_PAD_EN | CC_PF); 805 cmdcfg |= (CC_PROM | CC_NLC | CC_CFE); 806 /* put mac in reset */ 807 gmac_init_reset(); 808 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR); 809 gmac_clear_reset(); 810 811 /* enable clear MIB on read */ 812 reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR); 813 /* PHY: set smi_master to drive mdc_clk */ 814 reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE); 815 816 /* clear persistent sw intstatus */ 817 writel(0, GMAC0_INT_STATUS_ADDR); 818 819 if (dma_init(dma) < 0) { 820 error("%s: GMAC dma_init failed\n", __func__); 821 goto err_exit; 822 } 823 824 chipid = CHIPID; 825 printf("%s: Chip ID: 0x%x\n", __func__, chipid); 826 827 /* set switch bypass mode */ 828 tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR); 829 tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); 830 831 /* Switch mode */ 832 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */ 833 834 writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR); 835 836 tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR); 837 tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT); 838 writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR); 839 840 /* Set MDIO to internal GPHY */ 841 tmp = readl(GMAC_MII_CTRL_ADDR); 842 /* Select internal MDC/MDIO bus*/ 843 tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT); 844 /* select MDC/MDIO connecting to on-chip internal PHYs */ 845 tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT); 846 /* 847 * give bit[6:0](MDCDIV) with required divisor to set 848 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ 849 */ 850 tmp |= 0x1A; 851 852 writel(tmp, GMAC_MII_CTRL_ADDR); 853 854 if (gmac_mii_busywait(1000)) { 855 error("%s: Configure MDIO: MII/MDIO busy\n", __func__); 856 goto err_exit; 857 } 858 859 /* Configure GMAC0 */ 860 /* enable one rx interrupt per received frame */ 861 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR); 862 863 /* read command config reg */ 864 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR); 865 /* enable 802.3x tx flow control (honor received PAUSE frames) */ 866 cmdcfg &= ~CC_RPI; 867 /* enable promiscuous mode */ 868 cmdcfg |= CC_PROM; 869 /* Disable loopback mode */ 870 cmdcfg &= ~CC_ML; 871 /* set the speed */ 872 cmdcfg &= ~(CC_ES_MASK | CC_HD); 873 /* Set to 1Gbps and full duplex by default */ 874 cmdcfg |= (2 << CC_ES_SHIFT); 875 876 /* put mac in reset */ 877 gmac_init_reset(); 878 /* write register */ 879 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR); 880 /* bring mac out of reset */ 881 gmac_clear_reset(); 882 883 /* set max frame lengths; account for possible vlan tag */ 884 writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR); 885 886 return 0; 887 888 err_exit: 889 dma_deinit(dma); 890 return -1; 891 } 892 893 int gmac_add(struct eth_device *dev) 894 { 895 struct eth_info *eth = (struct eth_info *)(dev->priv); 896 struct eth_dma *dma = &(eth->dma); 897 void *tmp; 898 899 /* 900 * Desc has to be 16-byte aligned ? 901 * If it is 8-byte aligned by malloc, fail Tx 902 */ 903 tmp = malloc(sizeof(dma64dd_t) * TX_BUF_NUM + 8); 904 if (tmp == NULL) { 905 printf("%s: Failed to allocate TX desc Buffer\n", __func__); 906 return -1; 907 } 908 909 dma->tx_desc = (void *)tmp; 910 dma->tx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf)); 911 debug("TX Descriptor Buffer: %p; length: 0x%x\n", 912 dma->tx_desc_aligned, sizeof(dma64dd_t) * TX_BUF_NUM); 913 914 tmp = malloc(TX_BUF_SIZE * TX_BUF_NUM); 915 if (tmp == NULL) { 916 printf("%s: Failed to allocate TX Data Buffer\n", __func__); 917 free(dma->tx_desc); 918 return -1; 919 } 920 dma->tx_buf = (uint8_t *)tmp; 921 debug("TX Data Buffer: %p; length: 0x%x\n", 922 dma->tx_buf, TX_BUF_SIZE * TX_BUF_NUM); 923 924 /* Desc has to be 16-byte aligned ? */ 925 tmp = malloc(sizeof(dma64dd_t) * RX_BUF_NUM + 8); 926 if (tmp == NULL) { 927 printf("%s: Failed to allocate RX Descriptor\n", __func__); 928 free(dma->tx_desc); 929 free(dma->tx_buf); 930 return -1; 931 } 932 dma->rx_desc = tmp; 933 dma->rx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf)); 934 debug("RX Descriptor Buffer: %p, length: 0x%x\n", 935 dma->rx_desc_aligned, sizeof(dma64dd_t) * RX_BUF_NUM); 936 937 tmp = malloc(RX_BUF_SIZE * RX_BUF_NUM); 938 if (tmp == NULL) { 939 printf("%s: Failed to allocate RX Data Buffer\n", __func__); 940 free(dma->tx_desc); 941 free(dma->tx_buf); 942 free(dma->rx_desc); 943 return -1; 944 } 945 dma->rx_buf = tmp; 946 debug("RX Data Buffer: %p; length: 0x%x\n", 947 dma->rx_buf, RX_BUF_SIZE * RX_BUF_NUM); 948 949 g_dmactrlflags = 0; 950 951 eth->phy_interface = PHY_INTERFACE_MODE_GMII; 952 953 dma->tx_packet = gmac_tx_packet; 954 dma->check_tx_done = gmac_check_tx_done; 955 956 dma->check_rx_done = gmac_check_rx_done; 957 958 dma->enable_dma = gmac_enable_dma; 959 dma->disable_dma = gmac_disable_dma; 960 961 eth->miiphy_read = gmac_miiphy_read; 962 eth->miiphy_write = gmac_miiphy_write; 963 964 eth->mac_init = gmac_mac_init; 965 eth->disable_mac = gmac_disable; 966 eth->enable_mac = gmac_enable; 967 eth->set_mac_addr = gmac_set_mac_addr; 968 eth->set_mac_speed = gmac_set_speed; 969 970 return 0; 971 } 972