xref: /openbmc/u-boot/drivers/net/macb.c (revision 8797b2ca)
1 /*
2  * Copyright (C) 2005-2006 Atmel Corporation
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 #include <common.h>
7 #include <dm.h>
8 
9 /*
10  * The u-boot networking stack is a little weird.  It seems like the
11  * networking core allocates receive buffers up front without any
12  * regard to the hardware that's supposed to actually receive those
13  * packets.
14  *
15  * The MACB receives packets into 128-byte receive buffers, so the
16  * buffers allocated by the core isn't very practical to use.  We'll
17  * allocate our own, but we need one such buffer in case a packet
18  * wraps around the DMA ring so that we have to copy it.
19  *
20  * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
21  * configuration header.  This way, the core allocates one RX buffer
22  * and one TX buffer, each of which can hold a ethernet packet of
23  * maximum size.
24  *
25  * For some reason, the networking core unconditionally specifies a
26  * 32-byte packet "alignment" (which really should be called
27  * "padding").  MACB shouldn't need that, but we'll refrain from any
28  * core modifications here...
29  */
30 
31 #include <net.h>
32 #ifndef CONFIG_DM_ETH
33 #include <netdev.h>
34 #endif
35 #include <malloc.h>
36 #include <miiphy.h>
37 
38 #include <linux/mii.h>
39 #include <asm/io.h>
40 #include <asm/dma-mapping.h>
41 #include <asm/arch/clk.h>
42 #include <asm-generic/errno.h>
43 
44 #include "macb.h"
45 
46 #define MACB_RX_BUFFER_SIZE		4096
47 #define MACB_RX_RING_SIZE		(MACB_RX_BUFFER_SIZE / 128)
48 #define MACB_TX_RING_SIZE		16
49 #define MACB_TX_TIMEOUT		1000
50 #define MACB_AUTONEG_TIMEOUT	5000000
51 
52 struct macb_dma_desc {
53 	u32	addr;
54 	u32	ctrl;
55 };
56 
57 #define DMA_DESC_BYTES(n)	(n * sizeof(struct macb_dma_desc))
58 #define MACB_TX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_TX_RING_SIZE))
59 #define MACB_RX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_RX_RING_SIZE))
60 #define MACB_TX_DUMMY_DMA_DESC_SIZE	(DMA_DESC_BYTES(1))
61 
62 #define RXADDR_USED		0x00000001
63 #define RXADDR_WRAP		0x00000002
64 
65 #define RXBUF_FRMLEN_MASK	0x00000fff
66 #define RXBUF_FRAME_START	0x00004000
67 #define RXBUF_FRAME_END		0x00008000
68 #define RXBUF_TYPEID_MATCH	0x00400000
69 #define RXBUF_ADDR4_MATCH	0x00800000
70 #define RXBUF_ADDR3_MATCH	0x01000000
71 #define RXBUF_ADDR2_MATCH	0x02000000
72 #define RXBUF_ADDR1_MATCH	0x04000000
73 #define RXBUF_BROADCAST		0x80000000
74 
75 #define TXBUF_FRMLEN_MASK	0x000007ff
76 #define TXBUF_FRAME_END		0x00008000
77 #define TXBUF_NOCRC		0x00010000
78 #define TXBUF_EXHAUSTED		0x08000000
79 #define TXBUF_UNDERRUN		0x10000000
80 #define TXBUF_MAXRETRY		0x20000000
81 #define TXBUF_WRAP		0x40000000
82 #define TXBUF_USED		0x80000000
83 
84 struct macb_device {
85 	void			*regs;
86 
87 	unsigned int		rx_tail;
88 	unsigned int		tx_head;
89 	unsigned int		tx_tail;
90 	unsigned int		next_rx_tail;
91 	bool			wrapped;
92 
93 	void			*rx_buffer;
94 	void			*tx_buffer;
95 	struct macb_dma_desc	*rx_ring;
96 	struct macb_dma_desc	*tx_ring;
97 
98 	unsigned long		rx_buffer_dma;
99 	unsigned long		rx_ring_dma;
100 	unsigned long		tx_ring_dma;
101 
102 	struct macb_dma_desc	*dummy_desc;
103 	unsigned long		dummy_desc_dma;
104 
105 	const struct device	*dev;
106 #ifndef CONFIG_DM_ETH
107 	struct eth_device	netdev;
108 #endif
109 	unsigned short		phy_addr;
110 	struct mii_dev		*bus;
111 };
112 #ifndef CONFIG_DM_ETH
113 #define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
114 #endif
115 
116 static int macb_is_gem(struct macb_device *macb)
117 {
118 	return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
119 }
120 
121 #ifndef cpu_is_sama5d2
122 #define cpu_is_sama5d2() 0
123 #endif
124 
125 #ifndef cpu_is_sama5d4
126 #define cpu_is_sama5d4() 0
127 #endif
128 
129 static int gem_is_gigabit_capable(struct macb_device *macb)
130 {
131 	/*
132 	 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
133 	 * configured to support only 10/100.
134 	 */
135 	return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
136 }
137 
138 static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
139 {
140 	unsigned long netctl;
141 	unsigned long netstat;
142 	unsigned long frame;
143 
144 	netctl = macb_readl(macb, NCR);
145 	netctl |= MACB_BIT(MPE);
146 	macb_writel(macb, NCR, netctl);
147 
148 	frame = (MACB_BF(SOF, 1)
149 		 | MACB_BF(RW, 1)
150 		 | MACB_BF(PHYA, macb->phy_addr)
151 		 | MACB_BF(REGA, reg)
152 		 | MACB_BF(CODE, 2)
153 		 | MACB_BF(DATA, value));
154 	macb_writel(macb, MAN, frame);
155 
156 	do {
157 		netstat = macb_readl(macb, NSR);
158 	} while (!(netstat & MACB_BIT(IDLE)));
159 
160 	netctl = macb_readl(macb, NCR);
161 	netctl &= ~MACB_BIT(MPE);
162 	macb_writel(macb, NCR, netctl);
163 }
164 
165 static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
166 {
167 	unsigned long netctl;
168 	unsigned long netstat;
169 	unsigned long frame;
170 
171 	netctl = macb_readl(macb, NCR);
172 	netctl |= MACB_BIT(MPE);
173 	macb_writel(macb, NCR, netctl);
174 
175 	frame = (MACB_BF(SOF, 1)
176 		 | MACB_BF(RW, 2)
177 		 | MACB_BF(PHYA, macb->phy_addr)
178 		 | MACB_BF(REGA, reg)
179 		 | MACB_BF(CODE, 2));
180 	macb_writel(macb, MAN, frame);
181 
182 	do {
183 		netstat = macb_readl(macb, NSR);
184 	} while (!(netstat & MACB_BIT(IDLE)));
185 
186 	frame = macb_readl(macb, MAN);
187 
188 	netctl = macb_readl(macb, NCR);
189 	netctl &= ~MACB_BIT(MPE);
190 	macb_writel(macb, NCR, netctl);
191 
192 	return MACB_BFEXT(DATA, frame);
193 }
194 
195 void __weak arch_get_mdio_control(const char *name)
196 {
197 	return;
198 }
199 
200 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
201 
202 int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value)
203 {
204 #ifdef CONFIG_DM_ETH
205 	struct udevice *dev = eth_get_dev_by_name(devname);
206 	struct macb_device *macb = dev_get_priv(dev);
207 #else
208 	struct eth_device *dev = eth_get_dev_by_name(devname);
209 	struct macb_device *macb = to_macb(dev);
210 #endif
211 
212 	if (macb->phy_addr != phy_adr)
213 		return -1;
214 
215 	arch_get_mdio_control(devname);
216 	*value = macb_mdio_read(macb, reg);
217 
218 	return 0;
219 }
220 
221 int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value)
222 {
223 #ifdef CONFIG_DM_ETH
224 	struct udevice *dev = eth_get_dev_by_name(devname);
225 	struct macb_device *macb = dev_get_priv(dev);
226 #else
227 	struct eth_device *dev = eth_get_dev_by_name(devname);
228 	struct macb_device *macb = to_macb(dev);
229 #endif
230 
231 	if (macb->phy_addr != phy_adr)
232 		return -1;
233 
234 	arch_get_mdio_control(devname);
235 	macb_mdio_write(macb, reg, value);
236 
237 	return 0;
238 }
239 #endif
240 
241 #define RX	1
242 #define TX	0
243 static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
244 {
245 	if (rx)
246 		invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
247 			MACB_RX_DMA_DESC_SIZE);
248 	else
249 		invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
250 			MACB_TX_DMA_DESC_SIZE);
251 }
252 
253 static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
254 {
255 	if (rx)
256 		flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
257 			MACB_RX_DMA_DESC_SIZE);
258 	else
259 		flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
260 			MACB_TX_DMA_DESC_SIZE);
261 }
262 
263 static inline void macb_flush_rx_buffer(struct macb_device *macb)
264 {
265 	flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
266 				MACB_RX_BUFFER_SIZE);
267 }
268 
269 static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
270 {
271 	invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
272 				MACB_RX_BUFFER_SIZE);
273 }
274 
275 #if defined(CONFIG_CMD_NET)
276 
277 static int _macb_send(struct macb_device *macb, const char *name, void *packet,
278 		      int length)
279 {
280 	unsigned long paddr, ctrl;
281 	unsigned int tx_head = macb->tx_head;
282 	int i;
283 
284 	paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
285 
286 	ctrl = length & TXBUF_FRMLEN_MASK;
287 	ctrl |= TXBUF_FRAME_END;
288 	if (tx_head == (MACB_TX_RING_SIZE - 1)) {
289 		ctrl |= TXBUF_WRAP;
290 		macb->tx_head = 0;
291 	} else {
292 		macb->tx_head++;
293 	}
294 
295 	macb->tx_ring[tx_head].ctrl = ctrl;
296 	macb->tx_ring[tx_head].addr = paddr;
297 	barrier();
298 	macb_flush_ring_desc(macb, TX);
299 	/* Do we need check paddr and length is dcache line aligned? */
300 	flush_dcache_range(paddr, paddr + ALIGN(length, ARCH_DMA_MINALIGN));
301 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
302 
303 	/*
304 	 * I guess this is necessary because the networking core may
305 	 * re-use the transmit buffer as soon as we return...
306 	 */
307 	for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
308 		barrier();
309 		macb_invalidate_ring_desc(macb, TX);
310 		ctrl = macb->tx_ring[tx_head].ctrl;
311 		if (ctrl & TXBUF_USED)
312 			break;
313 		udelay(1);
314 	}
315 
316 	dma_unmap_single(packet, length, paddr);
317 
318 	if (i <= MACB_TX_TIMEOUT) {
319 		if (ctrl & TXBUF_UNDERRUN)
320 			printf("%s: TX underrun\n", name);
321 		if (ctrl & TXBUF_EXHAUSTED)
322 			printf("%s: TX buffers exhausted in mid frame\n", name);
323 	} else {
324 		printf("%s: TX timeout\n", name);
325 	}
326 
327 	/* No one cares anyway */
328 	return 0;
329 }
330 
331 static void reclaim_rx_buffers(struct macb_device *macb,
332 			       unsigned int new_tail)
333 {
334 	unsigned int i;
335 
336 	i = macb->rx_tail;
337 
338 	macb_invalidate_ring_desc(macb, RX);
339 	while (i > new_tail) {
340 		macb->rx_ring[i].addr &= ~RXADDR_USED;
341 		i++;
342 		if (i > MACB_RX_RING_SIZE)
343 			i = 0;
344 	}
345 
346 	while (i < new_tail) {
347 		macb->rx_ring[i].addr &= ~RXADDR_USED;
348 		i++;
349 	}
350 
351 	barrier();
352 	macb_flush_ring_desc(macb, RX);
353 	macb->rx_tail = new_tail;
354 }
355 
356 static int _macb_recv(struct macb_device *macb, uchar **packetp)
357 {
358 	unsigned int next_rx_tail = macb->next_rx_tail;
359 	void *buffer;
360 	int length;
361 	u32 status;
362 
363 	macb->wrapped = false;
364 	for (;;) {
365 		macb_invalidate_ring_desc(macb, RX);
366 
367 		if (!(macb->rx_ring[next_rx_tail].addr & RXADDR_USED))
368 			return -EAGAIN;
369 
370 		status = macb->rx_ring[next_rx_tail].ctrl;
371 		if (status & RXBUF_FRAME_START) {
372 			if (next_rx_tail != macb->rx_tail)
373 				reclaim_rx_buffers(macb, next_rx_tail);
374 			macb->wrapped = false;
375 		}
376 
377 		if (status & RXBUF_FRAME_END) {
378 			buffer = macb->rx_buffer + 128 * macb->rx_tail;
379 			length = status & RXBUF_FRMLEN_MASK;
380 
381 			macb_invalidate_rx_buffer(macb);
382 			if (macb->wrapped) {
383 				unsigned int headlen, taillen;
384 
385 				headlen = 128 * (MACB_RX_RING_SIZE
386 						 - macb->rx_tail);
387 				taillen = length - headlen;
388 				memcpy((void *)net_rx_packets[0],
389 				       buffer, headlen);
390 				memcpy((void *)net_rx_packets[0] + headlen,
391 				       macb->rx_buffer, taillen);
392 				*packetp = (void *)net_rx_packets[0];
393 			} else {
394 				*packetp = buffer;
395 			}
396 
397 			if (++next_rx_tail >= MACB_RX_RING_SIZE)
398 				next_rx_tail = 0;
399 			macb->next_rx_tail = next_rx_tail;
400 			return length;
401 		} else {
402 			if (++next_rx_tail >= MACB_RX_RING_SIZE) {
403 				macb->wrapped = true;
404 				next_rx_tail = 0;
405 			}
406 		}
407 		barrier();
408 	}
409 }
410 
411 static void macb_phy_reset(struct macb_device *macb, const char *name)
412 {
413 	int i;
414 	u16 status, adv;
415 
416 	adv = ADVERTISE_CSMA | ADVERTISE_ALL;
417 	macb_mdio_write(macb, MII_ADVERTISE, adv);
418 	printf("%s: Starting autonegotiation...\n", name);
419 	macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
420 					 | BMCR_ANRESTART));
421 
422 	for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
423 		status = macb_mdio_read(macb, MII_BMSR);
424 		if (status & BMSR_ANEGCOMPLETE)
425 			break;
426 		udelay(100);
427 	}
428 
429 	if (status & BMSR_ANEGCOMPLETE)
430 		printf("%s: Autonegotiation complete\n", name);
431 	else
432 		printf("%s: Autonegotiation timed out (status=0x%04x)\n",
433 		       name, status);
434 }
435 
436 #ifdef CONFIG_MACB_SEARCH_PHY
437 static int macb_phy_find(struct macb_device *macb)
438 {
439 	int i;
440 	u16 phy_id;
441 
442 	/* Search for PHY... */
443 	for (i = 0; i < 32; i++) {
444 		macb->phy_addr = i;
445 		phy_id = macb_mdio_read(macb, MII_PHYSID1);
446 		if (phy_id != 0xffff) {
447 			printf("%s: PHY present at %d\n", macb->netdev.name, i);
448 			return 1;
449 		}
450 	}
451 
452 	/* PHY isn't up to snuff */
453 	printf("%s: PHY not found\n", macb->netdev.name);
454 
455 	return 0;
456 }
457 #endif /* CONFIG_MACB_SEARCH_PHY */
458 
459 
460 static int macb_phy_init(struct macb_device *macb, const char *name)
461 {
462 #ifdef CONFIG_PHYLIB
463 	struct phy_device *phydev;
464 #endif
465 	u32 ncfgr;
466 	u16 phy_id, status, adv, lpa;
467 	int media, speed, duplex;
468 	int i;
469 
470 	arch_get_mdio_control(name);
471 #ifdef CONFIG_MACB_SEARCH_PHY
472 	/* Auto-detect phy_addr */
473 	if (!macb_phy_find(macb))
474 		return 0;
475 #endif /* CONFIG_MACB_SEARCH_PHY */
476 
477 	/* Check if the PHY is up to snuff... */
478 	phy_id = macb_mdio_read(macb, MII_PHYSID1);
479 	if (phy_id == 0xffff) {
480 		printf("%s: No PHY present\n", name);
481 		return 0;
482 	}
483 
484 #ifdef CONFIG_PHYLIB
485 	/* need to consider other phy interface mode */
486 	phydev = phy_connect(macb->bus, macb->phy_addr, &macb->netdev,
487 			     PHY_INTERFACE_MODE_RGMII);
488 	if (!phydev) {
489 		printf("phy_connect failed\n");
490 		return -ENODEV;
491 	}
492 
493 	phy_config(phydev);
494 #endif
495 
496 	status = macb_mdio_read(macb, MII_BMSR);
497 	if (!(status & BMSR_LSTATUS)) {
498 		/* Try to re-negotiate if we don't have link already. */
499 		macb_phy_reset(macb, name);
500 
501 		for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
502 			status = macb_mdio_read(macb, MII_BMSR);
503 			if (status & BMSR_LSTATUS)
504 				break;
505 			udelay(100);
506 		}
507 	}
508 
509 	if (!(status & BMSR_LSTATUS)) {
510 		printf("%s: link down (status: 0x%04x)\n",
511 		       name, status);
512 		return 0;
513 	}
514 
515 	/* First check for GMAC and that it is GiB capable */
516 	if (gem_is_gigabit_capable(macb)) {
517 		lpa = macb_mdio_read(macb, MII_STAT1000);
518 
519 		if (lpa & (LPA_1000FULL | LPA_1000HALF)) {
520 			duplex = ((lpa & LPA_1000FULL) ? 1 : 0);
521 
522 			printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
523 			       name,
524 			       duplex ? "full" : "half",
525 			       lpa);
526 
527 			ncfgr = macb_readl(macb, NCFGR);
528 			ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
529 			ncfgr |= GEM_BIT(GBE);
530 
531 			if (duplex)
532 				ncfgr |= MACB_BIT(FD);
533 
534 			macb_writel(macb, NCFGR, ncfgr);
535 
536 			return 1;
537 		}
538 	}
539 
540 	/* fall back for EMAC checking */
541 	adv = macb_mdio_read(macb, MII_ADVERTISE);
542 	lpa = macb_mdio_read(macb, MII_LPA);
543 	media = mii_nway_result(lpa & adv);
544 	speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
545 		 ? 1 : 0);
546 	duplex = (media & ADVERTISE_FULL) ? 1 : 0;
547 	printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
548 	       name,
549 	       speed ? "100" : "10",
550 	       duplex ? "full" : "half",
551 	       lpa);
552 
553 	ncfgr = macb_readl(macb, NCFGR);
554 	ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
555 	if (speed)
556 		ncfgr |= MACB_BIT(SPD);
557 	if (duplex)
558 		ncfgr |= MACB_BIT(FD);
559 	macb_writel(macb, NCFGR, ncfgr);
560 
561 	return 1;
562 }
563 
564 static int gmac_init_multi_queues(struct macb_device *macb)
565 {
566 	int i, num_queues = 1;
567 	u32 queue_mask;
568 
569 	/* bit 0 is never set but queue 0 always exists */
570 	queue_mask = gem_readl(macb, DCFG6) & 0xff;
571 	queue_mask |= 0x1;
572 
573 	for (i = 1; i < MACB_MAX_QUEUES; i++)
574 		if (queue_mask & (1 << i))
575 			num_queues++;
576 
577 	macb->dummy_desc->ctrl = TXBUF_USED;
578 	macb->dummy_desc->addr = 0;
579 	flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
580 			MACB_TX_DUMMY_DMA_DESC_SIZE);
581 
582 	for (i = 1; i < num_queues; i++)
583 		gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1);
584 
585 	return 0;
586 }
587 
588 static int _macb_init(struct macb_device *macb, const char *name)
589 {
590 	unsigned long paddr;
591 	int i;
592 
593 	/*
594 	 * macb_halt should have been called at some point before now,
595 	 * so we'll assume the controller is idle.
596 	 */
597 
598 	/* initialize DMA descriptors */
599 	paddr = macb->rx_buffer_dma;
600 	for (i = 0; i < MACB_RX_RING_SIZE; i++) {
601 		if (i == (MACB_RX_RING_SIZE - 1))
602 			paddr |= RXADDR_WRAP;
603 		macb->rx_ring[i].addr = paddr;
604 		macb->rx_ring[i].ctrl = 0;
605 		paddr += 128;
606 	}
607 	macb_flush_ring_desc(macb, RX);
608 	macb_flush_rx_buffer(macb);
609 
610 	for (i = 0; i < MACB_TX_RING_SIZE; i++) {
611 		macb->tx_ring[i].addr = 0;
612 		if (i == (MACB_TX_RING_SIZE - 1))
613 			macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
614 		else
615 			macb->tx_ring[i].ctrl = TXBUF_USED;
616 	}
617 	macb_flush_ring_desc(macb, TX);
618 
619 	macb->rx_tail = 0;
620 	macb->tx_head = 0;
621 	macb->tx_tail = 0;
622 	macb->next_rx_tail = 0;
623 
624 	macb_writel(macb, RBQP, macb->rx_ring_dma);
625 	macb_writel(macb, TBQP, macb->tx_ring_dma);
626 
627 	if (macb_is_gem(macb)) {
628 		/* Check the multi queue and initialize the queue for tx */
629 		gmac_init_multi_queues(macb);
630 
631 		/*
632 		 * When the GMAC IP with GE feature, this bit is used to
633 		 * select interface between RGMII and GMII.
634 		 * When the GMAC IP without GE feature, this bit is used
635 		 * to select interface between RMII and MII.
636 		 */
637 #if defined(CONFIG_RGMII) || defined(CONFIG_RMII)
638 		gem_writel(macb, UR, GEM_BIT(RGMII));
639 #else
640 		gem_writel(macb, UR, 0);
641 #endif
642 	} else {
643 	/* choose RMII or MII mode. This depends on the board */
644 #ifdef CONFIG_RMII
645 #ifdef CONFIG_AT91FAMILY
646 	macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
647 #else
648 	macb_writel(macb, USRIO, 0);
649 #endif
650 #else
651 #ifdef CONFIG_AT91FAMILY
652 	macb_writel(macb, USRIO, MACB_BIT(CLKEN));
653 #else
654 	macb_writel(macb, USRIO, MACB_BIT(MII));
655 #endif
656 #endif /* CONFIG_RMII */
657 	}
658 
659 	if (!macb_phy_init(macb, name))
660 		return -1;
661 
662 	/* Enable TX and RX */
663 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
664 
665 	return 0;
666 }
667 
668 static void _macb_halt(struct macb_device *macb)
669 {
670 	u32 ncr, tsr;
671 
672 	/* Halt the controller and wait for any ongoing transmission to end. */
673 	ncr = macb_readl(macb, NCR);
674 	ncr |= MACB_BIT(THALT);
675 	macb_writel(macb, NCR, ncr);
676 
677 	do {
678 		tsr = macb_readl(macb, TSR);
679 	} while (tsr & MACB_BIT(TGO));
680 
681 	/* Disable TX and RX, and clear statistics */
682 	macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
683 }
684 
685 static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
686 {
687 	u32 hwaddr_bottom;
688 	u16 hwaddr_top;
689 
690 	/* set hardware address */
691 	hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
692 			enetaddr[2] << 16 | enetaddr[3] << 24;
693 	macb_writel(macb, SA1B, hwaddr_bottom);
694 	hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
695 	macb_writel(macb, SA1T, hwaddr_top);
696 	return 0;
697 }
698 
699 static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
700 {
701 	u32 config;
702 	unsigned long macb_hz = get_macb_pclk_rate(id);
703 
704 	if (macb_hz < 20000000)
705 		config = MACB_BF(CLK, MACB_CLK_DIV8);
706 	else if (macb_hz < 40000000)
707 		config = MACB_BF(CLK, MACB_CLK_DIV16);
708 	else if (macb_hz < 80000000)
709 		config = MACB_BF(CLK, MACB_CLK_DIV32);
710 	else
711 		config = MACB_BF(CLK, MACB_CLK_DIV64);
712 
713 	return config;
714 }
715 
716 static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
717 {
718 	u32 config;
719 	unsigned long macb_hz = get_macb_pclk_rate(id);
720 
721 	if (macb_hz < 20000000)
722 		config = GEM_BF(CLK, GEM_CLK_DIV8);
723 	else if (macb_hz < 40000000)
724 		config = GEM_BF(CLK, GEM_CLK_DIV16);
725 	else if (macb_hz < 80000000)
726 		config = GEM_BF(CLK, GEM_CLK_DIV32);
727 	else if (macb_hz < 120000000)
728 		config = GEM_BF(CLK, GEM_CLK_DIV48);
729 	else if (macb_hz < 160000000)
730 		config = GEM_BF(CLK, GEM_CLK_DIV64);
731 	else
732 		config = GEM_BF(CLK, GEM_CLK_DIV96);
733 
734 	return config;
735 }
736 
737 /*
738  * Get the DMA bus width field of the network configuration register that we
739  * should program. We find the width from decoding the design configuration
740  * register to find the maximum supported data bus width.
741  */
742 static u32 macb_dbw(struct macb_device *macb)
743 {
744 	switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
745 	case 4:
746 		return GEM_BF(DBW, GEM_DBW128);
747 	case 2:
748 		return GEM_BF(DBW, GEM_DBW64);
749 	case 1:
750 	default:
751 		return GEM_BF(DBW, GEM_DBW32);
752 	}
753 }
754 
755 static void _macb_eth_initialize(struct macb_device *macb)
756 {
757 	int id = 0;	/* This is not used by functions we call */
758 	u32 ncfgr;
759 
760 	/* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
761 	macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE,
762 					     &macb->rx_buffer_dma);
763 	macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
764 					   &macb->rx_ring_dma);
765 	macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
766 					   &macb->tx_ring_dma);
767 	macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
768 					   &macb->dummy_desc_dma);
769 
770 	/*
771 	 * Do some basic initialization so that we at least can talk
772 	 * to the PHY
773 	 */
774 	if (macb_is_gem(macb)) {
775 		ncfgr = gem_mdc_clk_div(id, macb);
776 		ncfgr |= macb_dbw(macb);
777 	} else {
778 		ncfgr = macb_mdc_clk_div(id, macb);
779 	}
780 
781 	macb_writel(macb, NCFGR, ncfgr);
782 }
783 
784 #ifndef CONFIG_DM_ETH
785 static int macb_send(struct eth_device *netdev, void *packet, int length)
786 {
787 	struct macb_device *macb = to_macb(netdev);
788 
789 	return _macb_send(macb, netdev->name, packet, length);
790 }
791 
792 static int macb_recv(struct eth_device *netdev)
793 {
794 	struct macb_device *macb = to_macb(netdev);
795 	uchar *packet;
796 	int length;
797 
798 	macb->wrapped = false;
799 	for (;;) {
800 		macb->next_rx_tail = macb->rx_tail;
801 		length = _macb_recv(macb, &packet);
802 		if (length >= 0) {
803 			net_process_received_packet(packet, length);
804 			reclaim_rx_buffers(macb, macb->next_rx_tail);
805 		} else if (length < 0) {
806 			return length;
807 		}
808 	}
809 }
810 
811 static int macb_init(struct eth_device *netdev, bd_t *bd)
812 {
813 	struct macb_device *macb = to_macb(netdev);
814 
815 	return _macb_init(macb, netdev->name);
816 }
817 
818 static void macb_halt(struct eth_device *netdev)
819 {
820 	struct macb_device *macb = to_macb(netdev);
821 
822 	return _macb_halt(macb);
823 }
824 
825 static int macb_write_hwaddr(struct eth_device *netdev)
826 {
827 	struct macb_device *macb = to_macb(netdev);
828 
829 	return _macb_write_hwaddr(macb, netdev->enetaddr);
830 }
831 
832 int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
833 {
834 	struct macb_device *macb;
835 	struct eth_device *netdev;
836 
837 	macb = malloc(sizeof(struct macb_device));
838 	if (!macb) {
839 		printf("Error: Failed to allocate memory for MACB%d\n", id);
840 		return -1;
841 	}
842 	memset(macb, 0, sizeof(struct macb_device));
843 
844 	netdev = &macb->netdev;
845 
846 	macb->regs = regs;
847 	macb->phy_addr = phy_addr;
848 
849 	if (macb_is_gem(macb))
850 		sprintf(netdev->name, "gmac%d", id);
851 	else
852 		sprintf(netdev->name, "macb%d", id);
853 
854 	netdev->init = macb_init;
855 	netdev->halt = macb_halt;
856 	netdev->send = macb_send;
857 	netdev->recv = macb_recv;
858 	netdev->write_hwaddr = macb_write_hwaddr;
859 
860 	_macb_eth_initialize(macb);
861 
862 	eth_register(netdev);
863 
864 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
865 	miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write);
866 	macb->bus = miiphy_get_dev_by_name(netdev->name);
867 #endif
868 	return 0;
869 }
870 #endif /* !CONFIG_DM_ETH */
871 
872 #ifdef CONFIG_DM_ETH
873 
874 static int macb_start(struct udevice *dev)
875 {
876 	struct macb_device *macb = dev_get_priv(dev);
877 
878 	return _macb_init(macb, dev->name);
879 }
880 
881 static int macb_send(struct udevice *dev, void *packet, int length)
882 {
883 	struct macb_device *macb = dev_get_priv(dev);
884 
885 	return _macb_send(macb, dev->name, packet, length);
886 }
887 
888 static int macb_recv(struct udevice *dev, int flags, uchar **packetp)
889 {
890 	struct macb_device *macb = dev_get_priv(dev);
891 
892 	macb->next_rx_tail = macb->rx_tail;
893 	macb->wrapped = false;
894 
895 	return _macb_recv(macb, packetp);
896 }
897 
898 static int macb_free_pkt(struct udevice *dev, uchar *packet, int length)
899 {
900 	struct macb_device *macb = dev_get_priv(dev);
901 
902 	reclaim_rx_buffers(macb, macb->next_rx_tail);
903 
904 	return 0;
905 }
906 
907 static void macb_stop(struct udevice *dev)
908 {
909 	struct macb_device *macb = dev_get_priv(dev);
910 
911 	_macb_halt(macb);
912 }
913 
914 static int macb_write_hwaddr(struct udevice *dev)
915 {
916 	struct eth_pdata *plat = dev_get_platdata(dev);
917 	struct macb_device *macb = dev_get_priv(dev);
918 
919 	return _macb_write_hwaddr(macb, plat->enetaddr);
920 }
921 
922 static const struct eth_ops macb_eth_ops = {
923 	.start	= macb_start,
924 	.send	= macb_send,
925 	.recv	= macb_recv,
926 	.stop	= macb_stop,
927 	.free_pkt	= macb_free_pkt,
928 	.write_hwaddr	= macb_write_hwaddr,
929 };
930 
931 static int macb_eth_probe(struct udevice *dev)
932 {
933 	struct eth_pdata *pdata = dev_get_platdata(dev);
934 	struct macb_device *macb = dev_get_priv(dev);
935 
936 	macb->regs = (void *)pdata->iobase;
937 
938 	_macb_eth_initialize(macb);
939 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
940 	miiphy_register(dev->name, macb_miiphy_read, macb_miiphy_write);
941 	macb->bus = miiphy_get_dev_by_name(dev->name);
942 #endif
943 
944 	return 0;
945 }
946 
947 static int macb_eth_ofdata_to_platdata(struct udevice *dev)
948 {
949 	struct eth_pdata *pdata = dev_get_platdata(dev);
950 
951 	pdata->iobase = dev_get_addr(dev);
952 	return 0;
953 }
954 
955 static const struct udevice_id macb_eth_ids[] = {
956 	{ .compatible = "cdns,macb" },
957 	{ }
958 };
959 
960 U_BOOT_DRIVER(eth_macb) = {
961 	.name	= "eth_macb",
962 	.id	= UCLASS_ETH,
963 	.of_match = macb_eth_ids,
964 	.ofdata_to_platdata = macb_eth_ofdata_to_platdata,
965 	.probe	= macb_eth_probe,
966 	.ops	= &macb_eth_ops,
967 	.priv_auto_alloc_size = sizeof(struct macb_device),
968 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
969 };
970 #endif
971 
972 #endif
973