1 /* 2 * Copyright (C) 2005-2006 Atmel Corporation 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 #include <common.h> 7 8 /* 9 * The u-boot networking stack is a little weird. It seems like the 10 * networking core allocates receive buffers up front without any 11 * regard to the hardware that's supposed to actually receive those 12 * packets. 13 * 14 * The MACB receives packets into 128-byte receive buffers, so the 15 * buffers allocated by the core isn't very practical to use. We'll 16 * allocate our own, but we need one such buffer in case a packet 17 * wraps around the DMA ring so that we have to copy it. 18 * 19 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific 20 * configuration header. This way, the core allocates one RX buffer 21 * and one TX buffer, each of which can hold a ethernet packet of 22 * maximum size. 23 * 24 * For some reason, the networking core unconditionally specifies a 25 * 32-byte packet "alignment" (which really should be called 26 * "padding"). MACB shouldn't need that, but we'll refrain from any 27 * core modifications here... 28 */ 29 30 #include <net.h> 31 #include <netdev.h> 32 #include <malloc.h> 33 #include <miiphy.h> 34 35 #include <linux/mii.h> 36 #include <asm/io.h> 37 #include <asm/dma-mapping.h> 38 #include <asm/arch/clk.h> 39 #include <asm-generic/errno.h> 40 41 #include "macb.h" 42 43 #define MACB_RX_BUFFER_SIZE 4096 44 #define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128) 45 #define MACB_TX_RING_SIZE 16 46 #define MACB_TX_TIMEOUT 1000 47 #define MACB_AUTONEG_TIMEOUT 5000000 48 49 struct macb_dma_desc { 50 u32 addr; 51 u32 ctrl; 52 }; 53 54 #define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc)) 55 #define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE)) 56 #define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE)) 57 #define MACB_TX_DUMMY_DMA_DESC_SIZE (DMA_DESC_BYTES(1)) 58 59 #define RXADDR_USED 0x00000001 60 #define RXADDR_WRAP 0x00000002 61 62 #define RXBUF_FRMLEN_MASK 0x00000fff 63 #define RXBUF_FRAME_START 0x00004000 64 #define RXBUF_FRAME_END 0x00008000 65 #define RXBUF_TYPEID_MATCH 0x00400000 66 #define RXBUF_ADDR4_MATCH 0x00800000 67 #define RXBUF_ADDR3_MATCH 0x01000000 68 #define RXBUF_ADDR2_MATCH 0x02000000 69 #define RXBUF_ADDR1_MATCH 0x04000000 70 #define RXBUF_BROADCAST 0x80000000 71 72 #define TXBUF_FRMLEN_MASK 0x000007ff 73 #define TXBUF_FRAME_END 0x00008000 74 #define TXBUF_NOCRC 0x00010000 75 #define TXBUF_EXHAUSTED 0x08000000 76 #define TXBUF_UNDERRUN 0x10000000 77 #define TXBUF_MAXRETRY 0x20000000 78 #define TXBUF_WRAP 0x40000000 79 #define TXBUF_USED 0x80000000 80 81 struct macb_device { 82 void *regs; 83 84 unsigned int rx_tail; 85 unsigned int tx_head; 86 unsigned int tx_tail; 87 88 void *rx_buffer; 89 void *tx_buffer; 90 struct macb_dma_desc *rx_ring; 91 struct macb_dma_desc *tx_ring; 92 93 unsigned long rx_buffer_dma; 94 unsigned long rx_ring_dma; 95 unsigned long tx_ring_dma; 96 97 struct macb_dma_desc *dummy_desc; 98 unsigned long dummy_desc_dma; 99 100 const struct device *dev; 101 struct eth_device netdev; 102 unsigned short phy_addr; 103 struct mii_dev *bus; 104 }; 105 #define to_macb(_nd) container_of(_nd, struct macb_device, netdev) 106 107 static int macb_is_gem(struct macb_device *macb) 108 { 109 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2; 110 } 111 112 static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value) 113 { 114 unsigned long netctl; 115 unsigned long netstat; 116 unsigned long frame; 117 118 netctl = macb_readl(macb, NCR); 119 netctl |= MACB_BIT(MPE); 120 macb_writel(macb, NCR, netctl); 121 122 frame = (MACB_BF(SOF, 1) 123 | MACB_BF(RW, 1) 124 | MACB_BF(PHYA, macb->phy_addr) 125 | MACB_BF(REGA, reg) 126 | MACB_BF(CODE, 2) 127 | MACB_BF(DATA, value)); 128 macb_writel(macb, MAN, frame); 129 130 do { 131 netstat = macb_readl(macb, NSR); 132 } while (!(netstat & MACB_BIT(IDLE))); 133 134 netctl = macb_readl(macb, NCR); 135 netctl &= ~MACB_BIT(MPE); 136 macb_writel(macb, NCR, netctl); 137 } 138 139 static u16 macb_mdio_read(struct macb_device *macb, u8 reg) 140 { 141 unsigned long netctl; 142 unsigned long netstat; 143 unsigned long frame; 144 145 netctl = macb_readl(macb, NCR); 146 netctl |= MACB_BIT(MPE); 147 macb_writel(macb, NCR, netctl); 148 149 frame = (MACB_BF(SOF, 1) 150 | MACB_BF(RW, 2) 151 | MACB_BF(PHYA, macb->phy_addr) 152 | MACB_BF(REGA, reg) 153 | MACB_BF(CODE, 2)); 154 macb_writel(macb, MAN, frame); 155 156 do { 157 netstat = macb_readl(macb, NSR); 158 } while (!(netstat & MACB_BIT(IDLE))); 159 160 frame = macb_readl(macb, MAN); 161 162 netctl = macb_readl(macb, NCR); 163 netctl &= ~MACB_BIT(MPE); 164 macb_writel(macb, NCR, netctl); 165 166 return MACB_BFEXT(DATA, frame); 167 } 168 169 void __weak arch_get_mdio_control(const char *name) 170 { 171 return; 172 } 173 174 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 175 176 int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value) 177 { 178 struct eth_device *dev = eth_get_dev_by_name(devname); 179 struct macb_device *macb = to_macb(dev); 180 181 if (macb->phy_addr != phy_adr) 182 return -1; 183 184 arch_get_mdio_control(devname); 185 *value = macb_mdio_read(macb, reg); 186 187 return 0; 188 } 189 190 int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value) 191 { 192 struct eth_device *dev = eth_get_dev_by_name(devname); 193 struct macb_device *macb = to_macb(dev); 194 195 if (macb->phy_addr != phy_adr) 196 return -1; 197 198 arch_get_mdio_control(devname); 199 macb_mdio_write(macb, reg, value); 200 201 return 0; 202 } 203 #endif 204 205 #define RX 1 206 #define TX 0 207 static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx) 208 { 209 if (rx) 210 invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 211 MACB_RX_DMA_DESC_SIZE); 212 else 213 invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 214 MACB_TX_DMA_DESC_SIZE); 215 } 216 217 static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx) 218 { 219 if (rx) 220 flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 221 MACB_RX_DMA_DESC_SIZE); 222 else 223 flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 224 MACB_TX_DMA_DESC_SIZE); 225 } 226 227 static inline void macb_flush_rx_buffer(struct macb_device *macb) 228 { 229 flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 230 MACB_RX_BUFFER_SIZE); 231 } 232 233 static inline void macb_invalidate_rx_buffer(struct macb_device *macb) 234 { 235 invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 236 MACB_RX_BUFFER_SIZE); 237 } 238 239 #if defined(CONFIG_CMD_NET) 240 241 static int macb_send(struct eth_device *netdev, void *packet, int length) 242 { 243 struct macb_device *macb = to_macb(netdev); 244 unsigned long paddr, ctrl; 245 unsigned int tx_head = macb->tx_head; 246 int i; 247 248 paddr = dma_map_single(packet, length, DMA_TO_DEVICE); 249 250 ctrl = length & TXBUF_FRMLEN_MASK; 251 ctrl |= TXBUF_FRAME_END; 252 if (tx_head == (MACB_TX_RING_SIZE - 1)) { 253 ctrl |= TXBUF_WRAP; 254 macb->tx_head = 0; 255 } else { 256 macb->tx_head++; 257 } 258 259 macb->tx_ring[tx_head].ctrl = ctrl; 260 macb->tx_ring[tx_head].addr = paddr; 261 barrier(); 262 macb_flush_ring_desc(macb, TX); 263 /* Do we need check paddr and length is dcache line aligned? */ 264 flush_dcache_range(paddr, paddr + length); 265 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART)); 266 267 /* 268 * I guess this is necessary because the networking core may 269 * re-use the transmit buffer as soon as we return... 270 */ 271 for (i = 0; i <= MACB_TX_TIMEOUT; i++) { 272 barrier(); 273 macb_invalidate_ring_desc(macb, TX); 274 ctrl = macb->tx_ring[tx_head].ctrl; 275 if (ctrl & TXBUF_USED) 276 break; 277 udelay(1); 278 } 279 280 dma_unmap_single(packet, length, paddr); 281 282 if (i <= MACB_TX_TIMEOUT) { 283 if (ctrl & TXBUF_UNDERRUN) 284 printf("%s: TX underrun\n", netdev->name); 285 if (ctrl & TXBUF_EXHAUSTED) 286 printf("%s: TX buffers exhausted in mid frame\n", 287 netdev->name); 288 } else { 289 printf("%s: TX timeout\n", netdev->name); 290 } 291 292 /* No one cares anyway */ 293 return 0; 294 } 295 296 static void reclaim_rx_buffers(struct macb_device *macb, 297 unsigned int new_tail) 298 { 299 unsigned int i; 300 301 i = macb->rx_tail; 302 303 macb_invalidate_ring_desc(macb, RX); 304 while (i > new_tail) { 305 macb->rx_ring[i].addr &= ~RXADDR_USED; 306 i++; 307 if (i > MACB_RX_RING_SIZE) 308 i = 0; 309 } 310 311 while (i < new_tail) { 312 macb->rx_ring[i].addr &= ~RXADDR_USED; 313 i++; 314 } 315 316 barrier(); 317 macb_flush_ring_desc(macb, RX); 318 macb->rx_tail = new_tail; 319 } 320 321 static int macb_recv(struct eth_device *netdev) 322 { 323 struct macb_device *macb = to_macb(netdev); 324 unsigned int rx_tail = macb->rx_tail; 325 void *buffer; 326 int length; 327 int wrapped = 0; 328 u32 status; 329 330 for (;;) { 331 macb_invalidate_ring_desc(macb, RX); 332 333 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED)) 334 return -1; 335 336 status = macb->rx_ring[rx_tail].ctrl; 337 if (status & RXBUF_FRAME_START) { 338 if (rx_tail != macb->rx_tail) 339 reclaim_rx_buffers(macb, rx_tail); 340 wrapped = 0; 341 } 342 343 if (status & RXBUF_FRAME_END) { 344 buffer = macb->rx_buffer + 128 * macb->rx_tail; 345 length = status & RXBUF_FRMLEN_MASK; 346 347 macb_invalidate_rx_buffer(macb); 348 if (wrapped) { 349 unsigned int headlen, taillen; 350 351 headlen = 128 * (MACB_RX_RING_SIZE 352 - macb->rx_tail); 353 taillen = length - headlen; 354 memcpy((void *)net_rx_packets[0], 355 buffer, headlen); 356 memcpy((void *)net_rx_packets[0] + headlen, 357 macb->rx_buffer, taillen); 358 buffer = (void *)net_rx_packets[0]; 359 } 360 361 net_process_received_packet(buffer, length); 362 if (++rx_tail >= MACB_RX_RING_SIZE) 363 rx_tail = 0; 364 reclaim_rx_buffers(macb, rx_tail); 365 } else { 366 if (++rx_tail >= MACB_RX_RING_SIZE) { 367 wrapped = 1; 368 rx_tail = 0; 369 } 370 } 371 barrier(); 372 } 373 374 return 0; 375 } 376 377 static void macb_phy_reset(struct macb_device *macb) 378 { 379 struct eth_device *netdev = &macb->netdev; 380 int i; 381 u16 status, adv; 382 383 adv = ADVERTISE_CSMA | ADVERTISE_ALL; 384 macb_mdio_write(macb, MII_ADVERTISE, adv); 385 printf("%s: Starting autonegotiation...\n", netdev->name); 386 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE 387 | BMCR_ANRESTART)); 388 389 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 390 status = macb_mdio_read(macb, MII_BMSR); 391 if (status & BMSR_ANEGCOMPLETE) 392 break; 393 udelay(100); 394 } 395 396 if (status & BMSR_ANEGCOMPLETE) 397 printf("%s: Autonegotiation complete\n", netdev->name); 398 else 399 printf("%s: Autonegotiation timed out (status=0x%04x)\n", 400 netdev->name, status); 401 } 402 403 #ifdef CONFIG_MACB_SEARCH_PHY 404 static int macb_phy_find(struct macb_device *macb) 405 { 406 int i; 407 u16 phy_id; 408 409 /* Search for PHY... */ 410 for (i = 0; i < 32; i++) { 411 macb->phy_addr = i; 412 phy_id = macb_mdio_read(macb, MII_PHYSID1); 413 if (phy_id != 0xffff) { 414 printf("%s: PHY present at %d\n", macb->netdev.name, i); 415 return 1; 416 } 417 } 418 419 /* PHY isn't up to snuff */ 420 printf("%s: PHY not found\n", macb->netdev.name); 421 422 return 0; 423 } 424 #endif /* CONFIG_MACB_SEARCH_PHY */ 425 426 427 static int macb_phy_init(struct macb_device *macb) 428 { 429 struct eth_device *netdev = &macb->netdev; 430 #ifdef CONFIG_PHYLIB 431 struct phy_device *phydev; 432 #endif 433 u32 ncfgr; 434 u16 phy_id, status, adv, lpa; 435 int media, speed, duplex; 436 int i; 437 438 arch_get_mdio_control(netdev->name); 439 #ifdef CONFIG_MACB_SEARCH_PHY 440 /* Auto-detect phy_addr */ 441 if (!macb_phy_find(macb)) 442 return 0; 443 #endif /* CONFIG_MACB_SEARCH_PHY */ 444 445 /* Check if the PHY is up to snuff... */ 446 phy_id = macb_mdio_read(macb, MII_PHYSID1); 447 if (phy_id == 0xffff) { 448 printf("%s: No PHY present\n", netdev->name); 449 return 0; 450 } 451 452 #ifdef CONFIG_PHYLIB 453 /* need to consider other phy interface mode */ 454 phydev = phy_connect(macb->bus, macb->phy_addr, netdev, 455 PHY_INTERFACE_MODE_RGMII); 456 if (!phydev) { 457 printf("phy_connect failed\n"); 458 return -ENODEV; 459 } 460 461 phy_config(phydev); 462 #endif 463 464 status = macb_mdio_read(macb, MII_BMSR); 465 if (!(status & BMSR_LSTATUS)) { 466 /* Try to re-negotiate if we don't have link already. */ 467 macb_phy_reset(macb); 468 469 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 470 status = macb_mdio_read(macb, MII_BMSR); 471 if (status & BMSR_LSTATUS) 472 break; 473 udelay(100); 474 } 475 } 476 477 if (!(status & BMSR_LSTATUS)) { 478 printf("%s: link down (status: 0x%04x)\n", 479 netdev->name, status); 480 return 0; 481 } 482 483 /* First check for GMAC */ 484 if (macb_is_gem(macb)) { 485 lpa = macb_mdio_read(macb, MII_STAT1000); 486 487 if (lpa & (LPA_1000FULL | LPA_1000HALF)) { 488 duplex = ((lpa & LPA_1000FULL) ? 1 : 0); 489 490 printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n", 491 netdev->name, 492 duplex ? "full" : "half", 493 lpa); 494 495 ncfgr = macb_readl(macb, NCFGR); 496 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 497 ncfgr |= GEM_BIT(GBE); 498 499 if (duplex) 500 ncfgr |= MACB_BIT(FD); 501 502 macb_writel(macb, NCFGR, ncfgr); 503 504 return 1; 505 } 506 } 507 508 /* fall back for EMAC checking */ 509 adv = macb_mdio_read(macb, MII_ADVERTISE); 510 lpa = macb_mdio_read(macb, MII_LPA); 511 media = mii_nway_result(lpa & adv); 512 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) 513 ? 1 : 0); 514 duplex = (media & ADVERTISE_FULL) ? 1 : 0; 515 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n", 516 netdev->name, 517 speed ? "100" : "10", 518 duplex ? "full" : "half", 519 lpa); 520 521 ncfgr = macb_readl(macb, NCFGR); 522 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE)); 523 if (speed) 524 ncfgr |= MACB_BIT(SPD); 525 if (duplex) 526 ncfgr |= MACB_BIT(FD); 527 macb_writel(macb, NCFGR, ncfgr); 528 529 return 1; 530 } 531 532 static int gmac_init_multi_queues(struct macb_device *macb) 533 { 534 int i, num_queues = 1; 535 u32 queue_mask; 536 537 /* bit 0 is never set but queue 0 always exists */ 538 queue_mask = gem_readl(macb, DCFG6) & 0xff; 539 queue_mask |= 0x1; 540 541 for (i = 1; i < MACB_MAX_QUEUES; i++) 542 if (queue_mask & (1 << i)) 543 num_queues++; 544 545 macb->dummy_desc->ctrl = TXBUF_USED; 546 macb->dummy_desc->addr = 0; 547 flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma + 548 MACB_TX_DUMMY_DMA_DESC_SIZE); 549 550 for (i = 1; i < num_queues; i++) 551 gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1); 552 553 return 0; 554 } 555 556 static int macb_init(struct eth_device *netdev, bd_t *bd) 557 { 558 struct macb_device *macb = to_macb(netdev); 559 unsigned long paddr; 560 int i; 561 562 /* 563 * macb_halt should have been called at some point before now, 564 * so we'll assume the controller is idle. 565 */ 566 567 /* initialize DMA descriptors */ 568 paddr = macb->rx_buffer_dma; 569 for (i = 0; i < MACB_RX_RING_SIZE; i++) { 570 if (i == (MACB_RX_RING_SIZE - 1)) 571 paddr |= RXADDR_WRAP; 572 macb->rx_ring[i].addr = paddr; 573 macb->rx_ring[i].ctrl = 0; 574 paddr += 128; 575 } 576 macb_flush_ring_desc(macb, RX); 577 macb_flush_rx_buffer(macb); 578 579 for (i = 0; i < MACB_TX_RING_SIZE; i++) { 580 macb->tx_ring[i].addr = 0; 581 if (i == (MACB_TX_RING_SIZE - 1)) 582 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP; 583 else 584 macb->tx_ring[i].ctrl = TXBUF_USED; 585 } 586 macb_flush_ring_desc(macb, TX); 587 588 macb->rx_tail = 0; 589 macb->tx_head = 0; 590 macb->tx_tail = 0; 591 592 macb_writel(macb, RBQP, macb->rx_ring_dma); 593 macb_writel(macb, TBQP, macb->tx_ring_dma); 594 595 if (macb_is_gem(macb)) { 596 /* Check the multi queue and initialize the queue for tx */ 597 gmac_init_multi_queues(macb); 598 599 /* 600 * When the GMAC IP with GE feature, this bit is used to 601 * select interface between RGMII and GMII. 602 * When the GMAC IP without GE feature, this bit is used 603 * to select interface between RMII and MII. 604 */ 605 #if defined(CONFIG_RGMII) || defined(CONFIG_RMII) 606 gem_writel(macb, UR, GEM_BIT(RGMII)); 607 #else 608 gem_writel(macb, UR, 0); 609 #endif 610 } else { 611 /* choose RMII or MII mode. This depends on the board */ 612 #ifdef CONFIG_RMII 613 #ifdef CONFIG_AT91FAMILY 614 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN)); 615 #else 616 macb_writel(macb, USRIO, 0); 617 #endif 618 #else 619 #ifdef CONFIG_AT91FAMILY 620 macb_writel(macb, USRIO, MACB_BIT(CLKEN)); 621 #else 622 macb_writel(macb, USRIO, MACB_BIT(MII)); 623 #endif 624 #endif /* CONFIG_RMII */ 625 } 626 627 if (!macb_phy_init(macb)) 628 return -1; 629 630 /* Enable TX and RX */ 631 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE)); 632 633 return 0; 634 } 635 636 static void macb_halt(struct eth_device *netdev) 637 { 638 struct macb_device *macb = to_macb(netdev); 639 u32 ncr, tsr; 640 641 /* Halt the controller and wait for any ongoing transmission to end. */ 642 ncr = macb_readl(macb, NCR); 643 ncr |= MACB_BIT(THALT); 644 macb_writel(macb, NCR, ncr); 645 646 do { 647 tsr = macb_readl(macb, TSR); 648 } while (tsr & MACB_BIT(TGO)); 649 650 /* Disable TX and RX, and clear statistics */ 651 macb_writel(macb, NCR, MACB_BIT(CLRSTAT)); 652 } 653 654 static int macb_write_hwaddr(struct eth_device *dev) 655 { 656 struct macb_device *macb = to_macb(dev); 657 u32 hwaddr_bottom; 658 u16 hwaddr_top; 659 660 /* set hardware address */ 661 hwaddr_bottom = dev->enetaddr[0] | dev->enetaddr[1] << 8 | 662 dev->enetaddr[2] << 16 | dev->enetaddr[3] << 24; 663 macb_writel(macb, SA1B, hwaddr_bottom); 664 hwaddr_top = dev->enetaddr[4] | dev->enetaddr[5] << 8; 665 macb_writel(macb, SA1T, hwaddr_top); 666 return 0; 667 } 668 669 static u32 macb_mdc_clk_div(int id, struct macb_device *macb) 670 { 671 u32 config; 672 unsigned long macb_hz = get_macb_pclk_rate(id); 673 674 if (macb_hz < 20000000) 675 config = MACB_BF(CLK, MACB_CLK_DIV8); 676 else if (macb_hz < 40000000) 677 config = MACB_BF(CLK, MACB_CLK_DIV16); 678 else if (macb_hz < 80000000) 679 config = MACB_BF(CLK, MACB_CLK_DIV32); 680 else 681 config = MACB_BF(CLK, MACB_CLK_DIV64); 682 683 return config; 684 } 685 686 static u32 gem_mdc_clk_div(int id, struct macb_device *macb) 687 { 688 u32 config; 689 unsigned long macb_hz = get_macb_pclk_rate(id); 690 691 if (macb_hz < 20000000) 692 config = GEM_BF(CLK, GEM_CLK_DIV8); 693 else if (macb_hz < 40000000) 694 config = GEM_BF(CLK, GEM_CLK_DIV16); 695 else if (macb_hz < 80000000) 696 config = GEM_BF(CLK, GEM_CLK_DIV32); 697 else if (macb_hz < 120000000) 698 config = GEM_BF(CLK, GEM_CLK_DIV48); 699 else if (macb_hz < 160000000) 700 config = GEM_BF(CLK, GEM_CLK_DIV64); 701 else 702 config = GEM_BF(CLK, GEM_CLK_DIV96); 703 704 return config; 705 } 706 707 /* 708 * Get the DMA bus width field of the network configuration register that we 709 * should program. We find the width from decoding the design configuration 710 * register to find the maximum supported data bus width. 711 */ 712 static u32 macb_dbw(struct macb_device *macb) 713 { 714 switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) { 715 case 4: 716 return GEM_BF(DBW, GEM_DBW128); 717 case 2: 718 return GEM_BF(DBW, GEM_DBW64); 719 case 1: 720 default: 721 return GEM_BF(DBW, GEM_DBW32); 722 } 723 } 724 725 int macb_eth_initialize(int id, void *regs, unsigned int phy_addr) 726 { 727 struct macb_device *macb; 728 struct eth_device *netdev; 729 u32 ncfgr; 730 731 macb = malloc(sizeof(struct macb_device)); 732 if (!macb) { 733 printf("Error: Failed to allocate memory for MACB%d\n", id); 734 return -1; 735 } 736 memset(macb, 0, sizeof(struct macb_device)); 737 738 netdev = &macb->netdev; 739 740 macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE, 741 &macb->rx_buffer_dma); 742 macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE, 743 &macb->rx_ring_dma); 744 macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE, 745 &macb->tx_ring_dma); 746 macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE, 747 &macb->dummy_desc_dma); 748 749 /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */ 750 751 macb->regs = regs; 752 macb->phy_addr = phy_addr; 753 754 if (macb_is_gem(macb)) 755 sprintf(netdev->name, "gmac%d", id); 756 else 757 sprintf(netdev->name, "macb%d", id); 758 759 netdev->init = macb_init; 760 netdev->halt = macb_halt; 761 netdev->send = macb_send; 762 netdev->recv = macb_recv; 763 netdev->write_hwaddr = macb_write_hwaddr; 764 765 /* 766 * Do some basic initialization so that we at least can talk 767 * to the PHY 768 */ 769 if (macb_is_gem(macb)) { 770 ncfgr = gem_mdc_clk_div(id, macb); 771 ncfgr |= macb_dbw(macb); 772 } else { 773 ncfgr = macb_mdc_clk_div(id, macb); 774 } 775 776 macb_writel(macb, NCFGR, ncfgr); 777 778 eth_register(netdev); 779 780 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 781 miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write); 782 macb->bus = miiphy_get_dev_by_name(netdev->name); 783 #endif 784 return 0; 785 } 786 787 #endif 788