1 /* 2 * Copyright (C) 2005-2006 Atmel Corporation 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 #include <common.h> 7 8 /* 9 * The u-boot networking stack is a little weird. It seems like the 10 * networking core allocates receive buffers up front without any 11 * regard to the hardware that's supposed to actually receive those 12 * packets. 13 * 14 * The MACB receives packets into 128-byte receive buffers, so the 15 * buffers allocated by the core isn't very practical to use. We'll 16 * allocate our own, but we need one such buffer in case a packet 17 * wraps around the DMA ring so that we have to copy it. 18 * 19 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific 20 * configuration header. This way, the core allocates one RX buffer 21 * and one TX buffer, each of which can hold a ethernet packet of 22 * maximum size. 23 * 24 * For some reason, the networking core unconditionally specifies a 25 * 32-byte packet "alignment" (which really should be called 26 * "padding"). MACB shouldn't need that, but we'll refrain from any 27 * core modifications here... 28 */ 29 30 #include <net.h> 31 #include <netdev.h> 32 #include <malloc.h> 33 #include <miiphy.h> 34 35 #include <linux/mii.h> 36 #include <asm/io.h> 37 #include <asm/dma-mapping.h> 38 #include <asm/arch/clk.h> 39 #include <asm-generic/errno.h> 40 41 #include "macb.h" 42 43 #define MACB_RX_BUFFER_SIZE 4096 44 #define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128) 45 #define MACB_TX_RING_SIZE 16 46 #define MACB_TX_TIMEOUT 1000 47 #define MACB_AUTONEG_TIMEOUT 5000000 48 49 struct macb_dma_desc { 50 u32 addr; 51 u32 ctrl; 52 }; 53 54 #define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc)) 55 #define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE)) 56 #define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE)) 57 58 #define RXADDR_USED 0x00000001 59 #define RXADDR_WRAP 0x00000002 60 61 #define RXBUF_FRMLEN_MASK 0x00000fff 62 #define RXBUF_FRAME_START 0x00004000 63 #define RXBUF_FRAME_END 0x00008000 64 #define RXBUF_TYPEID_MATCH 0x00400000 65 #define RXBUF_ADDR4_MATCH 0x00800000 66 #define RXBUF_ADDR3_MATCH 0x01000000 67 #define RXBUF_ADDR2_MATCH 0x02000000 68 #define RXBUF_ADDR1_MATCH 0x04000000 69 #define RXBUF_BROADCAST 0x80000000 70 71 #define TXBUF_FRMLEN_MASK 0x000007ff 72 #define TXBUF_FRAME_END 0x00008000 73 #define TXBUF_NOCRC 0x00010000 74 #define TXBUF_EXHAUSTED 0x08000000 75 #define TXBUF_UNDERRUN 0x10000000 76 #define TXBUF_MAXRETRY 0x20000000 77 #define TXBUF_WRAP 0x40000000 78 #define TXBUF_USED 0x80000000 79 80 struct macb_device { 81 void *regs; 82 83 unsigned int rx_tail; 84 unsigned int tx_head; 85 unsigned int tx_tail; 86 87 void *rx_buffer; 88 void *tx_buffer; 89 struct macb_dma_desc *rx_ring; 90 struct macb_dma_desc *tx_ring; 91 92 unsigned long rx_buffer_dma; 93 unsigned long rx_ring_dma; 94 unsigned long tx_ring_dma; 95 96 const struct device *dev; 97 struct eth_device netdev; 98 unsigned short phy_addr; 99 struct mii_dev *bus; 100 }; 101 #define to_macb(_nd) container_of(_nd, struct macb_device, netdev) 102 103 static int macb_is_gem(struct macb_device *macb) 104 { 105 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2; 106 } 107 108 static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value) 109 { 110 unsigned long netctl; 111 unsigned long netstat; 112 unsigned long frame; 113 114 netctl = macb_readl(macb, NCR); 115 netctl |= MACB_BIT(MPE); 116 macb_writel(macb, NCR, netctl); 117 118 frame = (MACB_BF(SOF, 1) 119 | MACB_BF(RW, 1) 120 | MACB_BF(PHYA, macb->phy_addr) 121 | MACB_BF(REGA, reg) 122 | MACB_BF(CODE, 2) 123 | MACB_BF(DATA, value)); 124 macb_writel(macb, MAN, frame); 125 126 do { 127 netstat = macb_readl(macb, NSR); 128 } while (!(netstat & MACB_BIT(IDLE))); 129 130 netctl = macb_readl(macb, NCR); 131 netctl &= ~MACB_BIT(MPE); 132 macb_writel(macb, NCR, netctl); 133 } 134 135 static u16 macb_mdio_read(struct macb_device *macb, u8 reg) 136 { 137 unsigned long netctl; 138 unsigned long netstat; 139 unsigned long frame; 140 141 netctl = macb_readl(macb, NCR); 142 netctl |= MACB_BIT(MPE); 143 macb_writel(macb, NCR, netctl); 144 145 frame = (MACB_BF(SOF, 1) 146 | MACB_BF(RW, 2) 147 | MACB_BF(PHYA, macb->phy_addr) 148 | MACB_BF(REGA, reg) 149 | MACB_BF(CODE, 2)); 150 macb_writel(macb, MAN, frame); 151 152 do { 153 netstat = macb_readl(macb, NSR); 154 } while (!(netstat & MACB_BIT(IDLE))); 155 156 frame = macb_readl(macb, MAN); 157 158 netctl = macb_readl(macb, NCR); 159 netctl &= ~MACB_BIT(MPE); 160 macb_writel(macb, NCR, netctl); 161 162 return MACB_BFEXT(DATA, frame); 163 } 164 165 void __weak arch_get_mdio_control(const char *name) 166 { 167 return; 168 } 169 170 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 171 172 int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value) 173 { 174 struct eth_device *dev = eth_get_dev_by_name(devname); 175 struct macb_device *macb = to_macb(dev); 176 177 if (macb->phy_addr != phy_adr) 178 return -1; 179 180 arch_get_mdio_control(devname); 181 *value = macb_mdio_read(macb, reg); 182 183 return 0; 184 } 185 186 int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value) 187 { 188 struct eth_device *dev = eth_get_dev_by_name(devname); 189 struct macb_device *macb = to_macb(dev); 190 191 if (macb->phy_addr != phy_adr) 192 return -1; 193 194 arch_get_mdio_control(devname); 195 macb_mdio_write(macb, reg, value); 196 197 return 0; 198 } 199 #endif 200 201 #define RX 1 202 #define TX 0 203 static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx) 204 { 205 if (rx) 206 invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 207 MACB_RX_DMA_DESC_SIZE); 208 else 209 invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 210 MACB_TX_DMA_DESC_SIZE); 211 } 212 213 static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx) 214 { 215 if (rx) 216 flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 217 MACB_RX_DMA_DESC_SIZE); 218 else 219 flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 220 MACB_TX_DMA_DESC_SIZE); 221 } 222 223 static inline void macb_flush_rx_buffer(struct macb_device *macb) 224 { 225 flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 226 MACB_RX_BUFFER_SIZE); 227 } 228 229 static inline void macb_invalidate_rx_buffer(struct macb_device *macb) 230 { 231 invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 232 MACB_RX_BUFFER_SIZE); 233 } 234 235 #if defined(CONFIG_CMD_NET) 236 237 static int macb_send(struct eth_device *netdev, void *packet, int length) 238 { 239 struct macb_device *macb = to_macb(netdev); 240 unsigned long paddr, ctrl; 241 unsigned int tx_head = macb->tx_head; 242 int i; 243 244 paddr = dma_map_single(packet, length, DMA_TO_DEVICE); 245 246 ctrl = length & TXBUF_FRMLEN_MASK; 247 ctrl |= TXBUF_FRAME_END; 248 if (tx_head == (MACB_TX_RING_SIZE - 1)) { 249 ctrl |= TXBUF_WRAP; 250 macb->tx_head = 0; 251 } else { 252 macb->tx_head++; 253 } 254 255 macb->tx_ring[tx_head].ctrl = ctrl; 256 macb->tx_ring[tx_head].addr = paddr; 257 barrier(); 258 macb_flush_ring_desc(macb, TX); 259 /* Do we need check paddr and length is dcache line aligned? */ 260 flush_dcache_range(paddr, paddr + length); 261 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART)); 262 263 /* 264 * I guess this is necessary because the networking core may 265 * re-use the transmit buffer as soon as we return... 266 */ 267 for (i = 0; i <= MACB_TX_TIMEOUT; i++) { 268 barrier(); 269 macb_invalidate_ring_desc(macb, TX); 270 ctrl = macb->tx_ring[tx_head].ctrl; 271 if (ctrl & TXBUF_USED) 272 break; 273 udelay(1); 274 } 275 276 dma_unmap_single(packet, length, paddr); 277 278 if (i <= MACB_TX_TIMEOUT) { 279 if (ctrl & TXBUF_UNDERRUN) 280 printf("%s: TX underrun\n", netdev->name); 281 if (ctrl & TXBUF_EXHAUSTED) 282 printf("%s: TX buffers exhausted in mid frame\n", 283 netdev->name); 284 } else { 285 printf("%s: TX timeout\n", netdev->name); 286 } 287 288 /* No one cares anyway */ 289 return 0; 290 } 291 292 static void reclaim_rx_buffers(struct macb_device *macb, 293 unsigned int new_tail) 294 { 295 unsigned int i; 296 297 i = macb->rx_tail; 298 299 macb_invalidate_ring_desc(macb, RX); 300 while (i > new_tail) { 301 macb->rx_ring[i].addr &= ~RXADDR_USED; 302 i++; 303 if (i > MACB_RX_RING_SIZE) 304 i = 0; 305 } 306 307 while (i < new_tail) { 308 macb->rx_ring[i].addr &= ~RXADDR_USED; 309 i++; 310 } 311 312 barrier(); 313 macb_flush_ring_desc(macb, RX); 314 macb->rx_tail = new_tail; 315 } 316 317 static int macb_recv(struct eth_device *netdev) 318 { 319 struct macb_device *macb = to_macb(netdev); 320 unsigned int rx_tail = macb->rx_tail; 321 void *buffer; 322 int length; 323 int wrapped = 0; 324 u32 status; 325 326 for (;;) { 327 macb_invalidate_ring_desc(macb, RX); 328 329 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED)) 330 return -1; 331 332 status = macb->rx_ring[rx_tail].ctrl; 333 if (status & RXBUF_FRAME_START) { 334 if (rx_tail != macb->rx_tail) 335 reclaim_rx_buffers(macb, rx_tail); 336 wrapped = 0; 337 } 338 339 if (status & RXBUF_FRAME_END) { 340 buffer = macb->rx_buffer + 128 * macb->rx_tail; 341 length = status & RXBUF_FRMLEN_MASK; 342 343 macb_invalidate_rx_buffer(macb); 344 if (wrapped) { 345 unsigned int headlen, taillen; 346 347 headlen = 128 * (MACB_RX_RING_SIZE 348 - macb->rx_tail); 349 taillen = length - headlen; 350 memcpy((void *)net_rx_packets[0], 351 buffer, headlen); 352 memcpy((void *)net_rx_packets[0] + headlen, 353 macb->rx_buffer, taillen); 354 buffer = (void *)net_rx_packets[0]; 355 } 356 357 net_process_received_packet(buffer, length); 358 if (++rx_tail >= MACB_RX_RING_SIZE) 359 rx_tail = 0; 360 reclaim_rx_buffers(macb, rx_tail); 361 } else { 362 if (++rx_tail >= MACB_RX_RING_SIZE) { 363 wrapped = 1; 364 rx_tail = 0; 365 } 366 } 367 barrier(); 368 } 369 370 return 0; 371 } 372 373 static void macb_phy_reset(struct macb_device *macb) 374 { 375 struct eth_device *netdev = &macb->netdev; 376 int i; 377 u16 status, adv; 378 379 adv = ADVERTISE_CSMA | ADVERTISE_ALL; 380 macb_mdio_write(macb, MII_ADVERTISE, adv); 381 printf("%s: Starting autonegotiation...\n", netdev->name); 382 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE 383 | BMCR_ANRESTART)); 384 385 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 386 status = macb_mdio_read(macb, MII_BMSR); 387 if (status & BMSR_ANEGCOMPLETE) 388 break; 389 udelay(100); 390 } 391 392 if (status & BMSR_ANEGCOMPLETE) 393 printf("%s: Autonegotiation complete\n", netdev->name); 394 else 395 printf("%s: Autonegotiation timed out (status=0x%04x)\n", 396 netdev->name, status); 397 } 398 399 #ifdef CONFIG_MACB_SEARCH_PHY 400 static int macb_phy_find(struct macb_device *macb) 401 { 402 int i; 403 u16 phy_id; 404 405 /* Search for PHY... */ 406 for (i = 0; i < 32; i++) { 407 macb->phy_addr = i; 408 phy_id = macb_mdio_read(macb, MII_PHYSID1); 409 if (phy_id != 0xffff) { 410 printf("%s: PHY present at %d\n", macb->netdev.name, i); 411 return 1; 412 } 413 } 414 415 /* PHY isn't up to snuff */ 416 printf("%s: PHY not found\n", macb->netdev.name); 417 418 return 0; 419 } 420 #endif /* CONFIG_MACB_SEARCH_PHY */ 421 422 423 static int macb_phy_init(struct macb_device *macb) 424 { 425 struct eth_device *netdev = &macb->netdev; 426 #ifdef CONFIG_PHYLIB 427 struct phy_device *phydev; 428 #endif 429 u32 ncfgr; 430 u16 phy_id, status, adv, lpa; 431 int media, speed, duplex; 432 int i; 433 434 arch_get_mdio_control(netdev->name); 435 #ifdef CONFIG_MACB_SEARCH_PHY 436 /* Auto-detect phy_addr */ 437 if (!macb_phy_find(macb)) 438 return 0; 439 #endif /* CONFIG_MACB_SEARCH_PHY */ 440 441 /* Check if the PHY is up to snuff... */ 442 phy_id = macb_mdio_read(macb, MII_PHYSID1); 443 if (phy_id == 0xffff) { 444 printf("%s: No PHY present\n", netdev->name); 445 return 0; 446 } 447 448 #ifdef CONFIG_PHYLIB 449 /* need to consider other phy interface mode */ 450 phydev = phy_connect(macb->bus, macb->phy_addr, netdev, 451 PHY_INTERFACE_MODE_RGMII); 452 if (!phydev) { 453 printf("phy_connect failed\n"); 454 return -ENODEV; 455 } 456 457 phy_config(phydev); 458 #endif 459 460 status = macb_mdio_read(macb, MII_BMSR); 461 if (!(status & BMSR_LSTATUS)) { 462 /* Try to re-negotiate if we don't have link already. */ 463 macb_phy_reset(macb); 464 465 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 466 status = macb_mdio_read(macb, MII_BMSR); 467 if (status & BMSR_LSTATUS) 468 break; 469 udelay(100); 470 } 471 } 472 473 if (!(status & BMSR_LSTATUS)) { 474 printf("%s: link down (status: 0x%04x)\n", 475 netdev->name, status); 476 return 0; 477 } 478 479 /* First check for GMAC */ 480 if (macb_is_gem(macb)) { 481 lpa = macb_mdio_read(macb, MII_STAT1000); 482 483 if (lpa & (LPA_1000FULL | LPA_1000HALF)) { 484 duplex = ((lpa & LPA_1000FULL) ? 1 : 0); 485 486 printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n", 487 netdev->name, 488 duplex ? "full" : "half", 489 lpa); 490 491 ncfgr = macb_readl(macb, NCFGR); 492 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 493 ncfgr |= GEM_BIT(GBE); 494 495 if (duplex) 496 ncfgr |= MACB_BIT(FD); 497 498 macb_writel(macb, NCFGR, ncfgr); 499 500 return 1; 501 } 502 } 503 504 /* fall back for EMAC checking */ 505 adv = macb_mdio_read(macb, MII_ADVERTISE); 506 lpa = macb_mdio_read(macb, MII_LPA); 507 media = mii_nway_result(lpa & adv); 508 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) 509 ? 1 : 0); 510 duplex = (media & ADVERTISE_FULL) ? 1 : 0; 511 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n", 512 netdev->name, 513 speed ? "100" : "10", 514 duplex ? "full" : "half", 515 lpa); 516 517 ncfgr = macb_readl(macb, NCFGR); 518 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE)); 519 if (speed) 520 ncfgr |= MACB_BIT(SPD); 521 if (duplex) 522 ncfgr |= MACB_BIT(FD); 523 macb_writel(macb, NCFGR, ncfgr); 524 525 return 1; 526 } 527 528 static int macb_init(struct eth_device *netdev, bd_t *bd) 529 { 530 struct macb_device *macb = to_macb(netdev); 531 unsigned long paddr; 532 int i; 533 534 /* 535 * macb_halt should have been called at some point before now, 536 * so we'll assume the controller is idle. 537 */ 538 539 /* initialize DMA descriptors */ 540 paddr = macb->rx_buffer_dma; 541 for (i = 0; i < MACB_RX_RING_SIZE; i++) { 542 if (i == (MACB_RX_RING_SIZE - 1)) 543 paddr |= RXADDR_WRAP; 544 macb->rx_ring[i].addr = paddr; 545 macb->rx_ring[i].ctrl = 0; 546 paddr += 128; 547 } 548 macb_flush_ring_desc(macb, RX); 549 macb_flush_rx_buffer(macb); 550 551 for (i = 0; i < MACB_TX_RING_SIZE; i++) { 552 macb->tx_ring[i].addr = 0; 553 if (i == (MACB_TX_RING_SIZE - 1)) 554 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP; 555 else 556 macb->tx_ring[i].ctrl = TXBUF_USED; 557 } 558 macb_flush_ring_desc(macb, TX); 559 560 macb->rx_tail = 0; 561 macb->tx_head = 0; 562 macb->tx_tail = 0; 563 564 macb_writel(macb, RBQP, macb->rx_ring_dma); 565 macb_writel(macb, TBQP, macb->tx_ring_dma); 566 567 if (macb_is_gem(macb)) { 568 /* 569 * When the GMAC IP with GE feature, this bit is used to 570 * select interface between RGMII and GMII. 571 * When the GMAC IP without GE feature, this bit is used 572 * to select interface between RMII and MII. 573 */ 574 #if defined(CONFIG_RGMII) || defined(CONFIG_RMII) 575 gem_writel(macb, UR, GEM_BIT(RGMII)); 576 #else 577 gem_writel(macb, UR, 0); 578 #endif 579 } else { 580 /* choose RMII or MII mode. This depends on the board */ 581 #ifdef CONFIG_RMII 582 #ifdef CONFIG_AT91FAMILY 583 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN)); 584 #else 585 macb_writel(macb, USRIO, 0); 586 #endif 587 #else 588 #ifdef CONFIG_AT91FAMILY 589 macb_writel(macb, USRIO, MACB_BIT(CLKEN)); 590 #else 591 macb_writel(macb, USRIO, MACB_BIT(MII)); 592 #endif 593 #endif /* CONFIG_RMII */ 594 } 595 596 if (!macb_phy_init(macb)) 597 return -1; 598 599 /* Enable TX and RX */ 600 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE)); 601 602 return 0; 603 } 604 605 static void macb_halt(struct eth_device *netdev) 606 { 607 struct macb_device *macb = to_macb(netdev); 608 u32 ncr, tsr; 609 610 /* Halt the controller and wait for any ongoing transmission to end. */ 611 ncr = macb_readl(macb, NCR); 612 ncr |= MACB_BIT(THALT); 613 macb_writel(macb, NCR, ncr); 614 615 do { 616 tsr = macb_readl(macb, TSR); 617 } while (tsr & MACB_BIT(TGO)); 618 619 /* Disable TX and RX, and clear statistics */ 620 macb_writel(macb, NCR, MACB_BIT(CLRSTAT)); 621 } 622 623 static int macb_write_hwaddr(struct eth_device *dev) 624 { 625 struct macb_device *macb = to_macb(dev); 626 u32 hwaddr_bottom; 627 u16 hwaddr_top; 628 629 /* set hardware address */ 630 hwaddr_bottom = dev->enetaddr[0] | dev->enetaddr[1] << 8 | 631 dev->enetaddr[2] << 16 | dev->enetaddr[3] << 24; 632 macb_writel(macb, SA1B, hwaddr_bottom); 633 hwaddr_top = dev->enetaddr[4] | dev->enetaddr[5] << 8; 634 macb_writel(macb, SA1T, hwaddr_top); 635 return 0; 636 } 637 638 static u32 macb_mdc_clk_div(int id, struct macb_device *macb) 639 { 640 u32 config; 641 unsigned long macb_hz = get_macb_pclk_rate(id); 642 643 if (macb_hz < 20000000) 644 config = MACB_BF(CLK, MACB_CLK_DIV8); 645 else if (macb_hz < 40000000) 646 config = MACB_BF(CLK, MACB_CLK_DIV16); 647 else if (macb_hz < 80000000) 648 config = MACB_BF(CLK, MACB_CLK_DIV32); 649 else 650 config = MACB_BF(CLK, MACB_CLK_DIV64); 651 652 return config; 653 } 654 655 static u32 gem_mdc_clk_div(int id, struct macb_device *macb) 656 { 657 u32 config; 658 unsigned long macb_hz = get_macb_pclk_rate(id); 659 660 if (macb_hz < 20000000) 661 config = GEM_BF(CLK, GEM_CLK_DIV8); 662 else if (macb_hz < 40000000) 663 config = GEM_BF(CLK, GEM_CLK_DIV16); 664 else if (macb_hz < 80000000) 665 config = GEM_BF(CLK, GEM_CLK_DIV32); 666 else if (macb_hz < 120000000) 667 config = GEM_BF(CLK, GEM_CLK_DIV48); 668 else if (macb_hz < 160000000) 669 config = GEM_BF(CLK, GEM_CLK_DIV64); 670 else 671 config = GEM_BF(CLK, GEM_CLK_DIV96); 672 673 return config; 674 } 675 676 /* 677 * Get the DMA bus width field of the network configuration register that we 678 * should program. We find the width from decoding the design configuration 679 * register to find the maximum supported data bus width. 680 */ 681 static u32 macb_dbw(struct macb_device *macb) 682 { 683 switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) { 684 case 4: 685 return GEM_BF(DBW, GEM_DBW128); 686 case 2: 687 return GEM_BF(DBW, GEM_DBW64); 688 case 1: 689 default: 690 return GEM_BF(DBW, GEM_DBW32); 691 } 692 } 693 694 int macb_eth_initialize(int id, void *regs, unsigned int phy_addr) 695 { 696 struct macb_device *macb; 697 struct eth_device *netdev; 698 u32 ncfgr; 699 700 macb = malloc(sizeof(struct macb_device)); 701 if (!macb) { 702 printf("Error: Failed to allocate memory for MACB%d\n", id); 703 return -1; 704 } 705 memset(macb, 0, sizeof(struct macb_device)); 706 707 netdev = &macb->netdev; 708 709 macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE, 710 &macb->rx_buffer_dma); 711 macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE, 712 &macb->rx_ring_dma); 713 macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE, 714 &macb->tx_ring_dma); 715 716 /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */ 717 718 macb->regs = regs; 719 macb->phy_addr = phy_addr; 720 721 if (macb_is_gem(macb)) 722 sprintf(netdev->name, "gmac%d", id); 723 else 724 sprintf(netdev->name, "macb%d", id); 725 726 netdev->init = macb_init; 727 netdev->halt = macb_halt; 728 netdev->send = macb_send; 729 netdev->recv = macb_recv; 730 netdev->write_hwaddr = macb_write_hwaddr; 731 732 /* 733 * Do some basic initialization so that we at least can talk 734 * to the PHY 735 */ 736 if (macb_is_gem(macb)) { 737 ncfgr = gem_mdc_clk_div(id, macb); 738 ncfgr |= macb_dbw(macb); 739 } else { 740 ncfgr = macb_mdc_clk_div(id, macb); 741 } 742 743 macb_writel(macb, NCFGR, ncfgr); 744 745 eth_register(netdev); 746 747 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 748 miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write); 749 macb->bus = miiphy_get_dev_by_name(netdev->name); 750 #endif 751 return 0; 752 } 753 754 #endif 755