1 /* 2 * Copyright (C) 2005-2006 Atmel Corporation 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 #include <common.h> 7 8 /* 9 * The u-boot networking stack is a little weird. It seems like the 10 * networking core allocates receive buffers up front without any 11 * regard to the hardware that's supposed to actually receive those 12 * packets. 13 * 14 * The MACB receives packets into 128-byte receive buffers, so the 15 * buffers allocated by the core isn't very practical to use. We'll 16 * allocate our own, but we need one such buffer in case a packet 17 * wraps around the DMA ring so that we have to copy it. 18 * 19 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific 20 * configuration header. This way, the core allocates one RX buffer 21 * and one TX buffer, each of which can hold a ethernet packet of 22 * maximum size. 23 * 24 * For some reason, the networking core unconditionally specifies a 25 * 32-byte packet "alignment" (which really should be called 26 * "padding"). MACB shouldn't need that, but we'll refrain from any 27 * core modifications here... 28 */ 29 30 #include <net.h> 31 #include <netdev.h> 32 #include <malloc.h> 33 #include <miiphy.h> 34 35 #include <linux/mii.h> 36 #include <asm/io.h> 37 #include <asm/dma-mapping.h> 38 #include <asm/arch/clk.h> 39 #include <asm-generic/errno.h> 40 41 #include "macb.h" 42 43 #define MACB_RX_BUFFER_SIZE 4096 44 #define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128) 45 #define MACB_TX_RING_SIZE 16 46 #define MACB_TX_TIMEOUT 1000 47 #define MACB_AUTONEG_TIMEOUT 5000000 48 49 struct macb_dma_desc { 50 u32 addr; 51 u32 ctrl; 52 }; 53 54 #define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc)) 55 #define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE)) 56 #define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE)) 57 58 #define RXADDR_USED 0x00000001 59 #define RXADDR_WRAP 0x00000002 60 61 #define RXBUF_FRMLEN_MASK 0x00000fff 62 #define RXBUF_FRAME_START 0x00004000 63 #define RXBUF_FRAME_END 0x00008000 64 #define RXBUF_TYPEID_MATCH 0x00400000 65 #define RXBUF_ADDR4_MATCH 0x00800000 66 #define RXBUF_ADDR3_MATCH 0x01000000 67 #define RXBUF_ADDR2_MATCH 0x02000000 68 #define RXBUF_ADDR1_MATCH 0x04000000 69 #define RXBUF_BROADCAST 0x80000000 70 71 #define TXBUF_FRMLEN_MASK 0x000007ff 72 #define TXBUF_FRAME_END 0x00008000 73 #define TXBUF_NOCRC 0x00010000 74 #define TXBUF_EXHAUSTED 0x08000000 75 #define TXBUF_UNDERRUN 0x10000000 76 #define TXBUF_MAXRETRY 0x20000000 77 #define TXBUF_WRAP 0x40000000 78 #define TXBUF_USED 0x80000000 79 80 struct macb_device { 81 void *regs; 82 83 unsigned int rx_tail; 84 unsigned int tx_head; 85 unsigned int tx_tail; 86 87 void *rx_buffer; 88 void *tx_buffer; 89 struct macb_dma_desc *rx_ring; 90 struct macb_dma_desc *tx_ring; 91 92 unsigned long rx_buffer_dma; 93 unsigned long rx_ring_dma; 94 unsigned long tx_ring_dma; 95 96 const struct device *dev; 97 struct eth_device netdev; 98 unsigned short phy_addr; 99 struct mii_dev *bus; 100 }; 101 #define to_macb(_nd) container_of(_nd, struct macb_device, netdev) 102 103 static int macb_is_gem(struct macb_device *macb) 104 { 105 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2; 106 } 107 108 static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value) 109 { 110 unsigned long netctl; 111 unsigned long netstat; 112 unsigned long frame; 113 114 netctl = macb_readl(macb, NCR); 115 netctl |= MACB_BIT(MPE); 116 macb_writel(macb, NCR, netctl); 117 118 frame = (MACB_BF(SOF, 1) 119 | MACB_BF(RW, 1) 120 | MACB_BF(PHYA, macb->phy_addr) 121 | MACB_BF(REGA, reg) 122 | MACB_BF(CODE, 2) 123 | MACB_BF(DATA, value)); 124 macb_writel(macb, MAN, frame); 125 126 do { 127 netstat = macb_readl(macb, NSR); 128 } while (!(netstat & MACB_BIT(IDLE))); 129 130 netctl = macb_readl(macb, NCR); 131 netctl &= ~MACB_BIT(MPE); 132 macb_writel(macb, NCR, netctl); 133 } 134 135 static u16 macb_mdio_read(struct macb_device *macb, u8 reg) 136 { 137 unsigned long netctl; 138 unsigned long netstat; 139 unsigned long frame; 140 141 netctl = macb_readl(macb, NCR); 142 netctl |= MACB_BIT(MPE); 143 macb_writel(macb, NCR, netctl); 144 145 frame = (MACB_BF(SOF, 1) 146 | MACB_BF(RW, 2) 147 | MACB_BF(PHYA, macb->phy_addr) 148 | MACB_BF(REGA, reg) 149 | MACB_BF(CODE, 2)); 150 macb_writel(macb, MAN, frame); 151 152 do { 153 netstat = macb_readl(macb, NSR); 154 } while (!(netstat & MACB_BIT(IDLE))); 155 156 frame = macb_readl(macb, MAN); 157 158 netctl = macb_readl(macb, NCR); 159 netctl &= ~MACB_BIT(MPE); 160 macb_writel(macb, NCR, netctl); 161 162 return MACB_BFEXT(DATA, frame); 163 } 164 165 void __weak arch_get_mdio_control(const char *name) 166 { 167 return; 168 } 169 170 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 171 172 int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value) 173 { 174 struct eth_device *dev = eth_get_dev_by_name(devname); 175 struct macb_device *macb = to_macb(dev); 176 177 if (macb->phy_addr != phy_adr) 178 return -1; 179 180 arch_get_mdio_control(devname); 181 *value = macb_mdio_read(macb, reg); 182 183 return 0; 184 } 185 186 int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value) 187 { 188 struct eth_device *dev = eth_get_dev_by_name(devname); 189 struct macb_device *macb = to_macb(dev); 190 191 if (macb->phy_addr != phy_adr) 192 return -1; 193 194 arch_get_mdio_control(devname); 195 macb_mdio_write(macb, reg, value); 196 197 return 0; 198 } 199 #endif 200 201 #define RX 1 202 #define TX 0 203 static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx) 204 { 205 if (rx) 206 invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 207 MACB_RX_DMA_DESC_SIZE); 208 else 209 invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 210 MACB_TX_DMA_DESC_SIZE); 211 } 212 213 static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx) 214 { 215 if (rx) 216 flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma + 217 MACB_RX_DMA_DESC_SIZE); 218 else 219 flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma + 220 MACB_TX_DMA_DESC_SIZE); 221 } 222 223 static inline void macb_flush_rx_buffer(struct macb_device *macb) 224 { 225 flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 226 MACB_RX_BUFFER_SIZE); 227 } 228 229 static inline void macb_invalidate_rx_buffer(struct macb_device *macb) 230 { 231 invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma + 232 MACB_RX_BUFFER_SIZE); 233 } 234 235 #if defined(CONFIG_CMD_NET) 236 237 static int macb_send(struct eth_device *netdev, void *packet, int length) 238 { 239 struct macb_device *macb = to_macb(netdev); 240 unsigned long paddr, ctrl; 241 unsigned int tx_head = macb->tx_head; 242 int i; 243 244 paddr = dma_map_single(packet, length, DMA_TO_DEVICE); 245 246 ctrl = length & TXBUF_FRMLEN_MASK; 247 ctrl |= TXBUF_FRAME_END; 248 if (tx_head == (MACB_TX_RING_SIZE - 1)) { 249 ctrl |= TXBUF_WRAP; 250 macb->tx_head = 0; 251 } else { 252 macb->tx_head++; 253 } 254 255 macb->tx_ring[tx_head].ctrl = ctrl; 256 macb->tx_ring[tx_head].addr = paddr; 257 barrier(); 258 macb_flush_ring_desc(macb, TX); 259 /* Do we need check paddr and length is dcache line aligned? */ 260 flush_dcache_range(paddr, paddr + length); 261 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART)); 262 263 /* 264 * I guess this is necessary because the networking core may 265 * re-use the transmit buffer as soon as we return... 266 */ 267 for (i = 0; i <= MACB_TX_TIMEOUT; i++) { 268 barrier(); 269 macb_invalidate_ring_desc(macb, TX); 270 ctrl = macb->tx_ring[tx_head].ctrl; 271 if (ctrl & TXBUF_USED) 272 break; 273 udelay(1); 274 } 275 276 dma_unmap_single(packet, length, paddr); 277 278 if (i <= MACB_TX_TIMEOUT) { 279 if (ctrl & TXBUF_UNDERRUN) 280 printf("%s: TX underrun\n", netdev->name); 281 if (ctrl & TXBUF_EXHAUSTED) 282 printf("%s: TX buffers exhausted in mid frame\n", 283 netdev->name); 284 } else { 285 printf("%s: TX timeout\n", netdev->name); 286 } 287 288 /* No one cares anyway */ 289 return 0; 290 } 291 292 static void reclaim_rx_buffers(struct macb_device *macb, 293 unsigned int new_tail) 294 { 295 unsigned int i; 296 297 i = macb->rx_tail; 298 299 macb_invalidate_ring_desc(macb, RX); 300 while (i > new_tail) { 301 macb->rx_ring[i].addr &= ~RXADDR_USED; 302 i++; 303 if (i > MACB_RX_RING_SIZE) 304 i = 0; 305 } 306 307 while (i < new_tail) { 308 macb->rx_ring[i].addr &= ~RXADDR_USED; 309 i++; 310 } 311 312 barrier(); 313 macb_flush_ring_desc(macb, RX); 314 macb->rx_tail = new_tail; 315 } 316 317 static int macb_recv(struct eth_device *netdev) 318 { 319 struct macb_device *macb = to_macb(netdev); 320 unsigned int rx_tail = macb->rx_tail; 321 void *buffer; 322 int length; 323 int wrapped = 0; 324 u32 status; 325 326 for (;;) { 327 macb_invalidate_ring_desc(macb, RX); 328 329 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED)) 330 return -1; 331 332 status = macb->rx_ring[rx_tail].ctrl; 333 if (status & RXBUF_FRAME_START) { 334 if (rx_tail != macb->rx_tail) 335 reclaim_rx_buffers(macb, rx_tail); 336 wrapped = 0; 337 } 338 339 if (status & RXBUF_FRAME_END) { 340 buffer = macb->rx_buffer + 128 * macb->rx_tail; 341 length = status & RXBUF_FRMLEN_MASK; 342 343 macb_invalidate_rx_buffer(macb); 344 if (wrapped) { 345 unsigned int headlen, taillen; 346 347 headlen = 128 * (MACB_RX_RING_SIZE 348 - macb->rx_tail); 349 taillen = length - headlen; 350 memcpy((void *)NetRxPackets[0], 351 buffer, headlen); 352 memcpy((void *)NetRxPackets[0] + headlen, 353 macb->rx_buffer, taillen); 354 buffer = (void *)NetRxPackets[0]; 355 } 356 357 NetReceive(buffer, length); 358 if (++rx_tail >= MACB_RX_RING_SIZE) 359 rx_tail = 0; 360 reclaim_rx_buffers(macb, rx_tail); 361 } else { 362 if (++rx_tail >= MACB_RX_RING_SIZE) { 363 wrapped = 1; 364 rx_tail = 0; 365 } 366 } 367 barrier(); 368 } 369 370 return 0; 371 } 372 373 static void macb_phy_reset(struct macb_device *macb) 374 { 375 struct eth_device *netdev = &macb->netdev; 376 int i; 377 u16 status, adv; 378 379 adv = ADVERTISE_CSMA | ADVERTISE_ALL; 380 macb_mdio_write(macb, MII_ADVERTISE, adv); 381 printf("%s: Starting autonegotiation...\n", netdev->name); 382 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE 383 | BMCR_ANRESTART)); 384 385 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 386 status = macb_mdio_read(macb, MII_BMSR); 387 if (status & BMSR_ANEGCOMPLETE) 388 break; 389 udelay(100); 390 } 391 392 if (status & BMSR_ANEGCOMPLETE) 393 printf("%s: Autonegotiation complete\n", netdev->name); 394 else 395 printf("%s: Autonegotiation timed out (status=0x%04x)\n", 396 netdev->name, status); 397 } 398 399 #ifdef CONFIG_MACB_SEARCH_PHY 400 static int macb_phy_find(struct macb_device *macb) 401 { 402 int i; 403 u16 phy_id; 404 405 /* Search for PHY... */ 406 for (i = 0; i < 32; i++) { 407 macb->phy_addr = i; 408 phy_id = macb_mdio_read(macb, MII_PHYSID1); 409 if (phy_id != 0xffff) { 410 printf("%s: PHY present at %d\n", macb->netdev.name, i); 411 return 1; 412 } 413 } 414 415 /* PHY isn't up to snuff */ 416 printf("%s: PHY not found\n", macb->netdev.name); 417 418 return 0; 419 } 420 #endif /* CONFIG_MACB_SEARCH_PHY */ 421 422 423 static int macb_phy_init(struct macb_device *macb) 424 { 425 struct eth_device *netdev = &macb->netdev; 426 #ifdef CONFIG_PHYLIB 427 struct phy_device *phydev; 428 #endif 429 u32 ncfgr; 430 u16 phy_id, status, adv, lpa; 431 int media, speed, duplex; 432 int i; 433 434 arch_get_mdio_control(netdev->name); 435 #ifdef CONFIG_MACB_SEARCH_PHY 436 /* Auto-detect phy_addr */ 437 if (!macb_phy_find(macb)) 438 return 0; 439 #endif /* CONFIG_MACB_SEARCH_PHY */ 440 441 /* Check if the PHY is up to snuff... */ 442 phy_id = macb_mdio_read(macb, MII_PHYSID1); 443 if (phy_id == 0xffff) { 444 printf("%s: No PHY present\n", netdev->name); 445 return 0; 446 } 447 448 #ifdef CONFIG_PHYLIB 449 /* need to consider other phy interface mode */ 450 phydev = phy_connect(macb->bus, macb->phy_addr, netdev, 451 PHY_INTERFACE_MODE_RGMII); 452 if (!phydev) { 453 printf("phy_connect failed\n"); 454 return -ENODEV; 455 } 456 457 phy_config(phydev); 458 #endif 459 460 status = macb_mdio_read(macb, MII_BMSR); 461 if (!(status & BMSR_LSTATUS)) { 462 /* Try to re-negotiate if we don't have link already. */ 463 macb_phy_reset(macb); 464 465 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) { 466 status = macb_mdio_read(macb, MII_BMSR); 467 if (status & BMSR_LSTATUS) 468 break; 469 udelay(100); 470 } 471 } 472 473 if (!(status & BMSR_LSTATUS)) { 474 printf("%s: link down (status: 0x%04x)\n", 475 netdev->name, status); 476 return 0; 477 } 478 479 /* First check for GMAC */ 480 if (macb_is_gem(macb)) { 481 lpa = macb_mdio_read(macb, MII_STAT1000); 482 483 if (lpa & (LPA_1000FULL | LPA_1000HALF)) { 484 duplex = ((lpa & LPA_1000FULL) ? 1 : 0); 485 486 printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n", 487 netdev->name, 488 duplex ? "full" : "half", 489 lpa); 490 491 ncfgr = macb_readl(macb, NCFGR); 492 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 493 ncfgr |= GEM_BIT(GBE); 494 495 if (duplex) 496 ncfgr |= MACB_BIT(FD); 497 498 macb_writel(macb, NCFGR, ncfgr); 499 500 return 1; 501 } 502 } 503 504 /* fall back for EMAC checking */ 505 adv = macb_mdio_read(macb, MII_ADVERTISE); 506 lpa = macb_mdio_read(macb, MII_LPA); 507 media = mii_nway_result(lpa & adv); 508 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) 509 ? 1 : 0); 510 duplex = (media & ADVERTISE_FULL) ? 1 : 0; 511 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n", 512 netdev->name, 513 speed ? "100" : "10", 514 duplex ? "full" : "half", 515 lpa); 516 517 ncfgr = macb_readl(macb, NCFGR); 518 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE)); 519 if (speed) 520 ncfgr |= MACB_BIT(SPD); 521 if (duplex) 522 ncfgr |= MACB_BIT(FD); 523 macb_writel(macb, NCFGR, ncfgr); 524 525 return 1; 526 } 527 528 static int macb_write_hwaddr(struct eth_device *dev); 529 static int macb_init(struct eth_device *netdev, bd_t *bd) 530 { 531 struct macb_device *macb = to_macb(netdev); 532 unsigned long paddr; 533 int i; 534 535 /* 536 * macb_halt should have been called at some point before now, 537 * so we'll assume the controller is idle. 538 */ 539 540 /* initialize DMA descriptors */ 541 paddr = macb->rx_buffer_dma; 542 for (i = 0; i < MACB_RX_RING_SIZE; i++) { 543 if (i == (MACB_RX_RING_SIZE - 1)) 544 paddr |= RXADDR_WRAP; 545 macb->rx_ring[i].addr = paddr; 546 macb->rx_ring[i].ctrl = 0; 547 paddr += 128; 548 } 549 macb_flush_ring_desc(macb, RX); 550 macb_flush_rx_buffer(macb); 551 552 for (i = 0; i < MACB_TX_RING_SIZE; i++) { 553 macb->tx_ring[i].addr = 0; 554 if (i == (MACB_TX_RING_SIZE - 1)) 555 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP; 556 else 557 macb->tx_ring[i].ctrl = TXBUF_USED; 558 } 559 macb_flush_ring_desc(macb, TX); 560 561 macb->rx_tail = 0; 562 macb->tx_head = 0; 563 macb->tx_tail = 0; 564 565 macb_writel(macb, RBQP, macb->rx_ring_dma); 566 macb_writel(macb, TBQP, macb->tx_ring_dma); 567 568 if (macb_is_gem(macb)) { 569 /* 570 * When the GMAC IP with GE feature, this bit is used to 571 * select interface between RGMII and GMII. 572 * When the GMAC IP without GE feature, this bit is used 573 * to select interface between RMII and MII. 574 */ 575 #if defined(CONFIG_RGMII) || defined(CONFIG_RMII) 576 gem_writel(macb, UR, GEM_BIT(RGMII)); 577 #else 578 gem_writel(macb, UR, 0); 579 #endif 580 } else { 581 /* choose RMII or MII mode. This depends on the board */ 582 #ifdef CONFIG_RMII 583 #ifdef CONFIG_AT91FAMILY 584 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN)); 585 #else 586 macb_writel(macb, USRIO, 0); 587 #endif 588 #else 589 #ifdef CONFIG_AT91FAMILY 590 macb_writel(macb, USRIO, MACB_BIT(CLKEN)); 591 #else 592 macb_writel(macb, USRIO, MACB_BIT(MII)); 593 #endif 594 #endif /* CONFIG_RMII */ 595 } 596 597 /* update the ethaddr */ 598 if (is_valid_ether_addr(netdev->enetaddr)) { 599 macb_write_hwaddr(netdev); 600 } else { 601 printf("%s: mac address is not valid\n", netdev->name); 602 return -1; 603 } 604 605 if (!macb_phy_init(macb)) 606 return -1; 607 608 /* Enable TX and RX */ 609 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE)); 610 611 return 0; 612 } 613 614 static void macb_halt(struct eth_device *netdev) 615 { 616 struct macb_device *macb = to_macb(netdev); 617 u32 ncr, tsr; 618 619 /* Halt the controller and wait for any ongoing transmission to end. */ 620 ncr = macb_readl(macb, NCR); 621 ncr |= MACB_BIT(THALT); 622 macb_writel(macb, NCR, ncr); 623 624 do { 625 tsr = macb_readl(macb, TSR); 626 } while (tsr & MACB_BIT(TGO)); 627 628 /* Disable TX and RX, and clear statistics */ 629 macb_writel(macb, NCR, MACB_BIT(CLRSTAT)); 630 } 631 632 static int macb_write_hwaddr(struct eth_device *dev) 633 { 634 struct macb_device *macb = to_macb(dev); 635 u32 hwaddr_bottom; 636 u16 hwaddr_top; 637 638 /* set hardware address */ 639 hwaddr_bottom = dev->enetaddr[0] | dev->enetaddr[1] << 8 | 640 dev->enetaddr[2] << 16 | dev->enetaddr[3] << 24; 641 macb_writel(macb, SA1B, hwaddr_bottom); 642 hwaddr_top = dev->enetaddr[4] | dev->enetaddr[5] << 8; 643 macb_writel(macb, SA1T, hwaddr_top); 644 return 0; 645 } 646 647 static u32 macb_mdc_clk_div(int id, struct macb_device *macb) 648 { 649 u32 config; 650 unsigned long macb_hz = get_macb_pclk_rate(id); 651 652 if (macb_hz < 20000000) 653 config = MACB_BF(CLK, MACB_CLK_DIV8); 654 else if (macb_hz < 40000000) 655 config = MACB_BF(CLK, MACB_CLK_DIV16); 656 else if (macb_hz < 80000000) 657 config = MACB_BF(CLK, MACB_CLK_DIV32); 658 else 659 config = MACB_BF(CLK, MACB_CLK_DIV64); 660 661 return config; 662 } 663 664 static u32 gem_mdc_clk_div(int id, struct macb_device *macb) 665 { 666 u32 config; 667 unsigned long macb_hz = get_macb_pclk_rate(id); 668 669 if (macb_hz < 20000000) 670 config = GEM_BF(CLK, GEM_CLK_DIV8); 671 else if (macb_hz < 40000000) 672 config = GEM_BF(CLK, GEM_CLK_DIV16); 673 else if (macb_hz < 80000000) 674 config = GEM_BF(CLK, GEM_CLK_DIV32); 675 else if (macb_hz < 120000000) 676 config = GEM_BF(CLK, GEM_CLK_DIV48); 677 else if (macb_hz < 160000000) 678 config = GEM_BF(CLK, GEM_CLK_DIV64); 679 else 680 config = GEM_BF(CLK, GEM_CLK_DIV96); 681 682 return config; 683 } 684 685 /* 686 * Get the DMA bus width field of the network configuration register that we 687 * should program. We find the width from decoding the design configuration 688 * register to find the maximum supported data bus width. 689 */ 690 static u32 macb_dbw(struct macb_device *macb) 691 { 692 switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) { 693 case 4: 694 return GEM_BF(DBW, GEM_DBW128); 695 case 2: 696 return GEM_BF(DBW, GEM_DBW64); 697 case 1: 698 default: 699 return GEM_BF(DBW, GEM_DBW32); 700 } 701 } 702 703 int macb_eth_initialize(int id, void *regs, unsigned int phy_addr) 704 { 705 struct macb_device *macb; 706 struct eth_device *netdev; 707 u32 ncfgr; 708 709 macb = malloc(sizeof(struct macb_device)); 710 if (!macb) { 711 printf("Error: Failed to allocate memory for MACB%d\n", id); 712 return -1; 713 } 714 memset(macb, 0, sizeof(struct macb_device)); 715 716 netdev = &macb->netdev; 717 718 macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE, 719 &macb->rx_buffer_dma); 720 macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE, 721 &macb->rx_ring_dma); 722 macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE, 723 &macb->tx_ring_dma); 724 725 /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */ 726 727 macb->regs = regs; 728 macb->phy_addr = phy_addr; 729 730 if (macb_is_gem(macb)) 731 sprintf(netdev->name, "gmac%d", id); 732 else 733 sprintf(netdev->name, "macb%d", id); 734 735 netdev->init = macb_init; 736 netdev->halt = macb_halt; 737 netdev->send = macb_send; 738 netdev->recv = macb_recv; 739 netdev->write_hwaddr = macb_write_hwaddr; 740 741 /* 742 * Do some basic initialization so that we at least can talk 743 * to the PHY 744 */ 745 if (macb_is_gem(macb)) { 746 ncfgr = gem_mdc_clk_div(id, macb); 747 ncfgr |= macb_dbw(macb); 748 } else { 749 ncfgr = macb_mdc_clk_div(id, macb); 750 } 751 752 macb_writel(macb, NCFGR, ncfgr); 753 754 eth_register(netdev); 755 756 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 757 miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write); 758 macb->bus = miiphy_get_dev_by_name(netdev->name); 759 #endif 760 return 0; 761 } 762 763 #endif 764