1 /* 2 * Copyright (C) 2005-2006 Atmel Corporation 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 #include <common.h> 7 8 /* 9 * The u-boot networking stack is a little weird. It seems like the 10 * networking core allocates receive buffers up front without any 11 * regard to the hardware that's supposed to actually receive those 12 * packets. 13 * 14 * The MACB receives packets into 128-byte receive buffers, so the 15 * buffers allocated by the core isn't very practical to use. We'll 16 * allocate our own, but we need one such buffer in case a packet 17 * wraps around the DMA ring so that we have to copy it. 18 * 19 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific 20 * configuration header. This way, the core allocates one RX buffer 21 * and one TX buffer, each of which can hold a ethernet packet of 22 * maximum size. 23 * 24 * For some reason, the networking core unconditionally specifies a 25 * 32-byte packet "alignment" (which really should be called 26 * "padding"). MACB shouldn't need that, but we'll refrain from any 27 * core modifications here... 28 */ 29 30 #include <net.h> 31 #include <netdev.h> 32 #include <malloc.h> 33 #include <miiphy.h> 34 35 #include <linux/mii.h> 36 #include <asm/io.h> 37 #include <asm/dma-mapping.h> 38 #include <asm/arch/clk.h> 39 #include <asm-generic/errno.h> 40 41 #include "macb.h" 42 43 #define MACB_RX_BUFFER_SIZE 4096 44 #define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128) 45 #define MACB_TX_RING_SIZE 16 46 #define MACB_TX_TIMEOUT 1000 47 #define MACB_AUTONEG_TIMEOUT 5000000 48 49 struct macb_dma_desc { 50 u32 addr; 51 u32 ctrl; 52 }; 53 54 #define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc)) 55 #define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE)) 56 #define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE)) 57 #define MACB_TX_DUMMY_DMA_DESC_SIZE (DMA_DESC_BYTES(1)) 58 59 #define RXADDR_USED 0x00000001 60 #define RXADDR_WRAP 0x00000002 61 62 #define RXBUF_FRMLEN_MASK 0x00000fff 63 #define RXBUF_FRAME_START 0x00004000 64 #define RXBUF_FRAME_END 0x00008000 65 #define RXBUF_TYPEID_MATCH 0x00400000 66 #define RXBUF_ADDR4_MATCH 0x00800000 67 #define RXBUF_ADDR3_MATCH 0x01000000 68 #define RXBUF_ADDR2_MATCH 0x02000000 69 #define RXBUF_ADDR1_MATCH 0x04000000 70 #define RXBUF_BROADCAST 0x80000000 71 72 #define TXBUF_FRMLEN_MASK 0x000007ff 73 #define TXBUF_FRAME_END 0x00008000 74 #define TXBUF_NOCRC 0x00010000 75 #define TXBUF_EXHAUSTED 0x08000000 76 #define TXBUF_UNDERRUN 0x10000000 77 #define TXBUF_MAXRETRY 0x20000000 78 #define TXBUF_WRAP 0x40000000 79 #define TXBUF_USED 0x80000000 80 81 struct macb_device { 82 void *regs; 83 84 unsigned int rx_tail; 85 unsigned int tx_head; 86 unsigned int tx_tail; 87 88 void *rx_buffer; 89 void *tx_buffer; 90 struct macb_dma_desc *rx_ring; 91 struct macb_dma_desc *tx_ring; 92 93 unsigned long rx_buffer_dma; 94 unsigned long rx_ring_dma; 95 unsigned long tx_ring_dma; 96 97 struct macb_dma_desc *dummy_desc; 98 unsigned long dummy_desc_dma; 99 100 const struct device *dev; 101 struct eth_device netdev; 102 unsigned short phy_addr; 103 struct mii_dev *bus; 104 }; 105 #define to_macb(_nd) container_of(_nd, struct macb_device, netdev) 106 107 static int macb_is_gem(struct macb_device *macb) 108 { 109 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2; 110 } 111 112 #ifndef cpu_is_sama5d2 113 #define cpu_is_sama5d2() 0 114 #endif 115 116 #ifndef cpu_is_sama5d4 117 #define cpu_is_sama5d4() 0 118 #endif 119 120 static int gem_is_gigabit_capable(struct macb_device *macb) 121 { 122 /* 123 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are 124 * configured to support only 10/100. 125 */ 126 return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4(); 127 } 128 129 static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value) 130 { 131 unsigned long netctl; 132 unsigned long netstat; 133 unsigned long frame; 134 135 netctl = macb_readl(macb, NCR); 136 netctl |= MACB_BIT(MPE); 137 macb_writel(macb, NCR, netctl); 138 139 frame = (MACB_BF(SOF, 1) 140 | MACB_BF(RW, 1) 141 | MACB_BF(PHYA, macb->phy_addr) 142 | MACB_BF(REGA, reg) 143 | MACB_BF(CODE, 2) 144 | MACB_BF(DATA, value)); 145 macb_writel(macb, MAN, frame); 146 147 do { 148 netstat = macb_readl(macb, NSR); 149 } while (!(netstat & MACB_BIT(IDLE))); 150 151 netctl = macb_readl(macb, NCR); 152 netctl &= ~MACB_BIT(MPE); 153 macb_writel(macb, NCR, netctl); 154 } 155 156 static u16 macb_mdio_read(struct macb_device *macb, u8 reg) 157 { 158 unsigned long netctl; 159 unsigned long netstat; 160 unsigned long frame; 161 162 netctl = macb_readl(macb, NCR); 163 netctl |= MACB_BIT(MPE); 164 macb_writel(macb, NCR, netctl); 165 166 frame = (MACB_BF(SOF, 1) 167 | MACB_BF(RW, 2) 168 | MACB_BF(PHYA, macb->phy_addr) 169 | MACB_BF(REGA, reg) 170 | MACB_BF(CODE, 2)); 171 macb_writel(macb, MAN, frame); 172 173 do { 174 netstat = macb_readl(macb, NSR); 175 } while (!(netstat & MACB_BIT(IDLE))); 176 177 frame = macb_readl(macb, MAN); 178 179 netctl = macb_readl(macb, NCR); 180 netctl &= ~MACB_BIT(MPE); 181 macb_writel(macb, NCR, netctl); 182 183 return MACB_BFEXT(DATA, frame); 184 } 185 186 void __weak arch_get_mdio_control(const char *name) 187 { 188 return; 189 } 190 191 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 192 193 int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value) 194 { 195 struct eth_device *dev = eth_get_dev_by_name(devname); 196 struct macb_device *macb = to_macb(dev); 197 198 if (macb->phy_addr != phy_adr) 199 return -1; 200 201 arch_get_mdio_control(devname); 202 *value = macb_mdio_read(macb, reg); 203 204 return 0; 205 } 206 207 int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value) 208 { 209 struct eth_device *dev = eth_get_dev_by_name(devname); 210 struct macb_device *macb = to_macb(dev); 211 212 if (macb->phy_addr != phy_adr) 213 return -1; 214 215 arch_get_mdio_control(devname); 216 macb_mdio_write(macb, reg, value); 217 218 return 0; 219 } 220 #endif 221 222 #define RX 1 223 #define TX 0 224 static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx) 225 { 226 if (rx) 227 invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 228 MACB_RX_DMA_DESC_SIZE); 229 else 230 invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 231 MACB_TX_DMA_DESC_SIZE); 232 } 233 234 static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx) 235 { 236 if (rx) 237 flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 238 MACB_RX_DMA_DESC_SIZE); 239 else 240 flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 241 MACB_TX_DMA_DESC_SIZE); 242 } 243 244 static inline void macb_flush_rx_buffer(struct macb_device *macb) 245 { 246 flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 247 MACB_RX_BUFFER_SIZE); 248 } 249 250 static inline void macb_invalidate_rx_buffer(struct macb_device *macb) 251 { 252 invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 253 MACB_RX_BUFFER_SIZE); 254 } 255 256 #if defined(CONFIG_CMD_NET) 257 258 static int macb_send(struct eth_device *netdev, void *packet, int length) 259 { 260 struct macb_device *macb = to_macb(netdev); 261 unsigned long paddr, ctrl; 262 unsigned int tx_head = macb->tx_head; 263 int i; 264 265 paddr = dma_map_single(packet, length, DMA_TO_DEVICE); 266 267 ctrl = length & TXBUF_FRMLEN_MASK; 268 ctrl |= TXBUF_FRAME_END; 269 if (tx_head == (MACB_TX_RING_SIZE - 1)) { 270 ctrl |= TXBUF_WRAP; 271 macb->tx_head = 0; 272 } else { 273 macb->tx_head++; 274 } 275 276 macb->tx_ring[tx_head].ctrl = ctrl; 277 macb->tx_ring[tx_head].addr = paddr; 278 barrier(); 279 macb_flush_ring_desc(macb, TX); 280 /* Do we need check paddr and length is dcache line aligned? */ 281 flush_dcache_range(paddr, paddr + length); 282 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART)); 283 284 /* 285 * I guess this is necessary because the networking core may 286 * re-use the transmit buffer as soon as we return... 287 */ 288 for (i = 0; i <= MACB_TX_TIMEOUT; i++) { 289 barrier(); 290 macb_invalidate_ring_desc(macb, TX); 291 ctrl = macb->tx_ring[tx_head].ctrl; 292 if (ctrl & TXBUF_USED) 293 break; 294 udelay(1); 295 } 296 297 dma_unmap_single(packet, length, paddr); 298 299 if (i <= MACB_TX_TIMEOUT) { 300 if (ctrl & TXBUF_UNDERRUN) 301 printf("%s: TX underrun\n", netdev->name); 302 if (ctrl & TXBUF_EXHAUSTED) 303 printf("%s: TX buffers exhausted in mid frame\n", 304 netdev->name); 305 } else { 306 printf("%s: TX timeout\n", netdev->name); 307 } 308 309 /* No one cares anyway */ 310 return 0; 311 } 312 313 static void reclaim_rx_buffers(struct macb_device *macb, 314 unsigned int new_tail) 315 { 316 unsigned int i; 317 318 i = macb->rx_tail; 319 320 macb_invalidate_ring_desc(macb, RX); 321 while (i > new_tail) { 322 macb->rx_ring[i].addr &= ~RXADDR_USED; 323 i++; 324 if (i > MACB_RX_RING_SIZE) 325 i = 0; 326 } 327 328 while (i < new_tail) { 329 macb->rx_ring[i].addr &= ~RXADDR_USED; 330 i++; 331 } 332 333 barrier(); 334 macb_flush_ring_desc(macb, RX); 335 macb->rx_tail = new_tail; 336 } 337 338 static int macb_recv(struct eth_device *netdev) 339 { 340 struct macb_device *macb = to_macb(netdev); 341 unsigned int rx_tail = macb->rx_tail; 342 void *buffer; 343 int length; 344 int wrapped = 0; 345 u32 status; 346 347 for (;;) { 348 macb_invalidate_ring_desc(macb, RX); 349 350 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED)) 351 return -1; 352 353 status = macb->rx_ring[rx_tail].ctrl; 354 if (status & RXBUF_FRAME_START) { 355 if (rx_tail != macb->rx_tail) 356 reclaim_rx_buffers(macb, rx_tail); 357 wrapped = 0; 358 } 359 360 if (status & RXBUF_FRAME_END) { 361 buffer = macb->rx_buffer + 128 * macb->rx_tail; 362 length = status & RXBUF_FRMLEN_MASK; 363 364 macb_invalidate_rx_buffer(macb); 365 if (wrapped) { 366 unsigned int headlen, taillen; 367 368 headlen = 128 * (MACB_RX_RING_SIZE 369 - macb->rx_tail); 370 taillen = length - headlen; 371 memcpy((void *)net_rx_packets[0], 372 buffer, headlen); 373 memcpy((void *)net_rx_packets[0] + headlen, 374 macb->rx_buffer, taillen); 375 buffer = (void *)net_rx_packets[0]; 376 } 377 378 net_process_received_packet(buffer, length); 379 if (++rx_tail >= MACB_RX_RING_SIZE) 380 rx_tail = 0; 381 reclaim_rx_buffers(macb, rx_tail); 382 } else { 383 if (++rx_tail >= MACB_RX_RING_SIZE) { 384 wrapped = 1; 385 rx_tail = 0; 386 } 387 } 388 barrier(); 389 } 390 391 return 0; 392 } 393 394 static void macb_phy_reset(struct macb_device *macb) 395 { 396 struct eth_device *netdev = &macb->netdev; 397 int i; 398 u16 status, adv; 399 400 adv = ADVERTISE_CSMA | ADVERTISE_ALL; 401 macb_mdio_write(macb, MII_ADVERTISE, adv); 402 printf("%s: Starting autonegotiation...\n", netdev->name); 403 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE 404 | BMCR_ANRESTART)); 405 406 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 407 status = macb_mdio_read(macb, MII_BMSR); 408 if (status & BMSR_ANEGCOMPLETE) 409 break; 410 udelay(100); 411 } 412 413 if (status & BMSR_ANEGCOMPLETE) 414 printf("%s: Autonegotiation complete\n", netdev->name); 415 else 416 printf("%s: Autonegotiation timed out (status=0x%04x)\n", 417 netdev->name, status); 418 } 419 420 #ifdef CONFIG_MACB_SEARCH_PHY 421 static int macb_phy_find(struct macb_device *macb) 422 { 423 int i; 424 u16 phy_id; 425 426 /* Search for PHY... */ 427 for (i = 0; i < 32; i++) { 428 macb->phy_addr = i; 429 phy_id = macb_mdio_read(macb, MII_PHYSID1); 430 if (phy_id != 0xffff) { 431 printf("%s: PHY present at %d\n", macb->netdev.name, i); 432 return 1; 433 } 434 } 435 436 /* PHY isn't up to snuff */ 437 printf("%s: PHY not found\n", macb->netdev.name); 438 439 return 0; 440 } 441 #endif /* CONFIG_MACB_SEARCH_PHY */ 442 443 444 static int macb_phy_init(struct macb_device *macb) 445 { 446 struct eth_device *netdev = &macb->netdev; 447 #ifdef CONFIG_PHYLIB 448 struct phy_device *phydev; 449 #endif 450 u32 ncfgr; 451 u16 phy_id, status, adv, lpa; 452 int media, speed, duplex; 453 int i; 454 455 arch_get_mdio_control(netdev->name); 456 #ifdef CONFIG_MACB_SEARCH_PHY 457 /* Auto-detect phy_addr */ 458 if (!macb_phy_find(macb)) 459 return 0; 460 #endif /* CONFIG_MACB_SEARCH_PHY */ 461 462 /* Check if the PHY is up to snuff... */ 463 phy_id = macb_mdio_read(macb, MII_PHYSID1); 464 if (phy_id == 0xffff) { 465 printf("%s: No PHY present\n", netdev->name); 466 return 0; 467 } 468 469 #ifdef CONFIG_PHYLIB 470 /* need to consider other phy interface mode */ 471 phydev = phy_connect(macb->bus, macb->phy_addr, netdev, 472 PHY_INTERFACE_MODE_RGMII); 473 if (!phydev) { 474 printf("phy_connect failed\n"); 475 return -ENODEV; 476 } 477 478 phy_config(phydev); 479 #endif 480 481 status = macb_mdio_read(macb, MII_BMSR); 482 if (!(status & BMSR_LSTATUS)) { 483 /* Try to re-negotiate if we don't have link already. */ 484 macb_phy_reset(macb); 485 486 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 487 status = macb_mdio_read(macb, MII_BMSR); 488 if (status & BMSR_LSTATUS) 489 break; 490 udelay(100); 491 } 492 } 493 494 if (!(status & BMSR_LSTATUS)) { 495 printf("%s: link down (status: 0x%04x)\n", 496 netdev->name, status); 497 return 0; 498 } 499 500 /* First check for GMAC and that it is GiB capable */ 501 if (gem_is_gigabit_capable(macb)) { 502 lpa = macb_mdio_read(macb, MII_STAT1000); 503 504 if (lpa & (LPA_1000FULL | LPA_1000HALF)) { 505 duplex = ((lpa & LPA_1000FULL) ? 1 : 0); 506 507 printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n", 508 netdev->name, 509 duplex ? "full" : "half", 510 lpa); 511 512 ncfgr = macb_readl(macb, NCFGR); 513 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 514 ncfgr |= GEM_BIT(GBE); 515 516 if (duplex) 517 ncfgr |= MACB_BIT(FD); 518 519 macb_writel(macb, NCFGR, ncfgr); 520 521 return 1; 522 } 523 } 524 525 /* fall back for EMAC checking */ 526 adv = macb_mdio_read(macb, MII_ADVERTISE); 527 lpa = macb_mdio_read(macb, MII_LPA); 528 media = mii_nway_result(lpa & adv); 529 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) 530 ? 1 : 0); 531 duplex = (media & ADVERTISE_FULL) ? 1 : 0; 532 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n", 533 netdev->name, 534 speed ? "100" : "10", 535 duplex ? "full" : "half", 536 lpa); 537 538 ncfgr = macb_readl(macb, NCFGR); 539 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE)); 540 if (speed) 541 ncfgr |= MACB_BIT(SPD); 542 if (duplex) 543 ncfgr |= MACB_BIT(FD); 544 macb_writel(macb, NCFGR, ncfgr); 545 546 return 1; 547 } 548 549 static int gmac_init_multi_queues(struct macb_device *macb) 550 { 551 int i, num_queues = 1; 552 u32 queue_mask; 553 554 /* bit 0 is never set but queue 0 always exists */ 555 queue_mask = gem_readl(macb, DCFG6) & 0xff; 556 queue_mask |= 0x1; 557 558 for (i = 1; i < MACB_MAX_QUEUES; i++) 559 if (queue_mask & (1 << i)) 560 num_queues++; 561 562 macb->dummy_desc->ctrl = TXBUF_USED; 563 macb->dummy_desc->addr = 0; 564 flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma + 565 MACB_TX_DUMMY_DMA_DESC_SIZE); 566 567 for (i = 1; i < num_queues; i++) 568 gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1); 569 570 return 0; 571 } 572 573 static int macb_init(struct eth_device *netdev, bd_t *bd) 574 { 575 struct macb_device *macb = to_macb(netdev); 576 unsigned long paddr; 577 int i; 578 579 /* 580 * macb_halt should have been called at some point before now, 581 * so we'll assume the controller is idle. 582 */ 583 584 /* initialize DMA descriptors */ 585 paddr = macb->rx_buffer_dma; 586 for (i = 0; i < MACB_RX_RING_SIZE; i++) { 587 if (i == (MACB_RX_RING_SIZE - 1)) 588 paddr |= RXADDR_WRAP; 589 macb->rx_ring[i].addr = paddr; 590 macb->rx_ring[i].ctrl = 0; 591 paddr += 128; 592 } 593 macb_flush_ring_desc(macb, RX); 594 macb_flush_rx_buffer(macb); 595 596 for (i = 0; i < MACB_TX_RING_SIZE; i++) { 597 macb->tx_ring[i].addr = 0; 598 if (i == (MACB_TX_RING_SIZE - 1)) 599 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP; 600 else 601 macb->tx_ring[i].ctrl = TXBUF_USED; 602 } 603 macb_flush_ring_desc(macb, TX); 604 605 macb->rx_tail = 0; 606 macb->tx_head = 0; 607 macb->tx_tail = 0; 608 609 macb_writel(macb, RBQP, macb->rx_ring_dma); 610 macb_writel(macb, TBQP, macb->tx_ring_dma); 611 612 if (macb_is_gem(macb)) { 613 /* Check the multi queue and initialize the queue for tx */ 614 gmac_init_multi_queues(macb); 615 616 /* 617 * When the GMAC IP with GE feature, this bit is used to 618 * select interface between RGMII and GMII. 619 * When the GMAC IP without GE feature, this bit is used 620 * to select interface between RMII and MII. 621 */ 622 #if defined(CONFIG_RGMII) || defined(CONFIG_RMII) 623 gem_writel(macb, UR, GEM_BIT(RGMII)); 624 #else 625 gem_writel(macb, UR, 0); 626 #endif 627 } else { 628 /* choose RMII or MII mode. This depends on the board */ 629 #ifdef CONFIG_RMII 630 #ifdef CONFIG_AT91FAMILY 631 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN)); 632 #else 633 macb_writel(macb, USRIO, 0); 634 #endif 635 #else 636 #ifdef CONFIG_AT91FAMILY 637 macb_writel(macb, USRIO, MACB_BIT(CLKEN)); 638 #else 639 macb_writel(macb, USRIO, MACB_BIT(MII)); 640 #endif 641 #endif /* CONFIG_RMII */ 642 } 643 644 if (!macb_phy_init(macb)) 645 return -1; 646 647 /* Enable TX and RX */ 648 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE)); 649 650 return 0; 651 } 652 653 static void macb_halt(struct eth_device *netdev) 654 { 655 struct macb_device *macb = to_macb(netdev); 656 u32 ncr, tsr; 657 658 /* Halt the controller and wait for any ongoing transmission to end. */ 659 ncr = macb_readl(macb, NCR); 660 ncr |= MACB_BIT(THALT); 661 macb_writel(macb, NCR, ncr); 662 663 do { 664 tsr = macb_readl(macb, TSR); 665 } while (tsr & MACB_BIT(TGO)); 666 667 /* Disable TX and RX, and clear statistics */ 668 macb_writel(macb, NCR, MACB_BIT(CLRSTAT)); 669 } 670 671 static int macb_write_hwaddr(struct eth_device *dev) 672 { 673 struct macb_device *macb = to_macb(dev); 674 u32 hwaddr_bottom; 675 u16 hwaddr_top; 676 677 /* set hardware address */ 678 hwaddr_bottom = dev->enetaddr[0] | dev->enetaddr[1] << 8 | 679 dev->enetaddr[2] << 16 | dev->enetaddr[3] << 24; 680 macb_writel(macb, SA1B, hwaddr_bottom); 681 hwaddr_top = dev->enetaddr[4] | dev->enetaddr[5] << 8; 682 macb_writel(macb, SA1T, hwaddr_top); 683 return 0; 684 } 685 686 static u32 macb_mdc_clk_div(int id, struct macb_device *macb) 687 { 688 u32 config; 689 unsigned long macb_hz = get_macb_pclk_rate(id); 690 691 if (macb_hz < 20000000) 692 config = MACB_BF(CLK, MACB_CLK_DIV8); 693 else if (macb_hz < 40000000) 694 config = MACB_BF(CLK, MACB_CLK_DIV16); 695 else if (macb_hz < 80000000) 696 config = MACB_BF(CLK, MACB_CLK_DIV32); 697 else 698 config = MACB_BF(CLK, MACB_CLK_DIV64); 699 700 return config; 701 } 702 703 static u32 gem_mdc_clk_div(int id, struct macb_device *macb) 704 { 705 u32 config; 706 unsigned long macb_hz = get_macb_pclk_rate(id); 707 708 if (macb_hz < 20000000) 709 config = GEM_BF(CLK, GEM_CLK_DIV8); 710 else if (macb_hz < 40000000) 711 config = GEM_BF(CLK, GEM_CLK_DIV16); 712 else if (macb_hz < 80000000) 713 config = GEM_BF(CLK, GEM_CLK_DIV32); 714 else if (macb_hz < 120000000) 715 config = GEM_BF(CLK, GEM_CLK_DIV48); 716 else if (macb_hz < 160000000) 717 config = GEM_BF(CLK, GEM_CLK_DIV64); 718 else 719 config = GEM_BF(CLK, GEM_CLK_DIV96); 720 721 return config; 722 } 723 724 /* 725 * Get the DMA bus width field of the network configuration register that we 726 * should program. We find the width from decoding the design configuration 727 * register to find the maximum supported data bus width. 728 */ 729 static u32 macb_dbw(struct macb_device *macb) 730 { 731 switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) { 732 case 4: 733 return GEM_BF(DBW, GEM_DBW128); 734 case 2: 735 return GEM_BF(DBW, GEM_DBW64); 736 case 1: 737 default: 738 return GEM_BF(DBW, GEM_DBW32); 739 } 740 } 741 742 int macb_eth_initialize(int id, void *regs, unsigned int phy_addr) 743 { 744 struct macb_device *macb; 745 struct eth_device *netdev; 746 u32 ncfgr; 747 748 macb = malloc(sizeof(struct macb_device)); 749 if (!macb) { 750 printf("Error: Failed to allocate memory for MACB%d\n", id); 751 return -1; 752 } 753 memset(macb, 0, sizeof(struct macb_device)); 754 755 netdev = &macb->netdev; 756 757 macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE, 758 &macb->rx_buffer_dma); 759 macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE, 760 &macb->rx_ring_dma); 761 macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE, 762 &macb->tx_ring_dma); 763 macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE, 764 &macb->dummy_desc_dma); 765 766 /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */ 767 768 macb->regs = regs; 769 macb->phy_addr = phy_addr; 770 771 if (macb_is_gem(macb)) 772 sprintf(netdev->name, "gmac%d", id); 773 else 774 sprintf(netdev->name, "macb%d", id); 775 776 netdev->init = macb_init; 777 netdev->halt = macb_halt; 778 netdev->send = macb_send; 779 netdev->recv = macb_recv; 780 netdev->write_hwaddr = macb_write_hwaddr; 781 782 /* 783 * Do some basic initialization so that we at least can talk 784 * to the PHY 785 */ 786 if (macb_is_gem(macb)) { 787 ncfgr = gem_mdc_clk_div(id, macb); 788 ncfgr |= macb_dbw(macb); 789 } else { 790 ncfgr = macb_mdc_clk_div(id, macb); 791 } 792 793 macb_writel(macb, NCFGR, ncfgr); 794 795 eth_register(netdev); 796 797 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 798 miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write); 799 macb->bus = miiphy_get_dev_by_name(netdev->name); 800 #endif 801 return 0; 802 } 803 804 #endif 805