xref: /openbmc/u-boot/drivers/net/macb.c (revision 1304f4bb)
1 /*
2  * Copyright (C) 2005-2006 Atmel Corporation
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 #include <common.h>
7 #include <clk.h>
8 #include <dm.h>
9 
10 /*
11  * The u-boot networking stack is a little weird.  It seems like the
12  * networking core allocates receive buffers up front without any
13  * regard to the hardware that's supposed to actually receive those
14  * packets.
15  *
16  * The MACB receives packets into 128-byte receive buffers, so the
17  * buffers allocated by the core isn't very practical to use.  We'll
18  * allocate our own, but we need one such buffer in case a packet
19  * wraps around the DMA ring so that we have to copy it.
20  *
21  * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
22  * configuration header.  This way, the core allocates one RX buffer
23  * and one TX buffer, each of which can hold a ethernet packet of
24  * maximum size.
25  *
26  * For some reason, the networking core unconditionally specifies a
27  * 32-byte packet "alignment" (which really should be called
28  * "padding").  MACB shouldn't need that, but we'll refrain from any
29  * core modifications here...
30  */
31 
32 #include <net.h>
33 #ifndef CONFIG_DM_ETH
34 #include <netdev.h>
35 #endif
36 #include <malloc.h>
37 #include <miiphy.h>
38 
39 #include <linux/mii.h>
40 #include <asm/io.h>
41 #include <asm/dma-mapping.h>
42 #include <asm/arch/clk.h>
43 #include <linux/errno.h>
44 
45 #include "macb.h"
46 
47 DECLARE_GLOBAL_DATA_PTR;
48 
49 #define MACB_RX_BUFFER_SIZE		4096
50 #define MACB_RX_RING_SIZE		(MACB_RX_BUFFER_SIZE / 128)
51 #define MACB_TX_RING_SIZE		16
52 #define MACB_TX_TIMEOUT		1000
53 #define MACB_AUTONEG_TIMEOUT	5000000
54 
55 struct macb_dma_desc {
56 	u32	addr;
57 	u32	ctrl;
58 };
59 
60 #define DMA_DESC_BYTES(n)	(n * sizeof(struct macb_dma_desc))
61 #define MACB_TX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_TX_RING_SIZE))
62 #define MACB_RX_DMA_DESC_SIZE	(DMA_DESC_BYTES(MACB_RX_RING_SIZE))
63 #define MACB_TX_DUMMY_DMA_DESC_SIZE	(DMA_DESC_BYTES(1))
64 
65 #define RXADDR_USED		0x00000001
66 #define RXADDR_WRAP		0x00000002
67 
68 #define RXBUF_FRMLEN_MASK	0x00000fff
69 #define RXBUF_FRAME_START	0x00004000
70 #define RXBUF_FRAME_END		0x00008000
71 #define RXBUF_TYPEID_MATCH	0x00400000
72 #define RXBUF_ADDR4_MATCH	0x00800000
73 #define RXBUF_ADDR3_MATCH	0x01000000
74 #define RXBUF_ADDR2_MATCH	0x02000000
75 #define RXBUF_ADDR1_MATCH	0x04000000
76 #define RXBUF_BROADCAST		0x80000000
77 
78 #define TXBUF_FRMLEN_MASK	0x000007ff
79 #define TXBUF_FRAME_END		0x00008000
80 #define TXBUF_NOCRC		0x00010000
81 #define TXBUF_EXHAUSTED		0x08000000
82 #define TXBUF_UNDERRUN		0x10000000
83 #define TXBUF_MAXRETRY		0x20000000
84 #define TXBUF_WRAP		0x40000000
85 #define TXBUF_USED		0x80000000
86 
87 struct macb_device {
88 	void			*regs;
89 
90 	unsigned int		rx_tail;
91 	unsigned int		tx_head;
92 	unsigned int		tx_tail;
93 	unsigned int		next_rx_tail;
94 	bool			wrapped;
95 
96 	void			*rx_buffer;
97 	void			*tx_buffer;
98 	struct macb_dma_desc	*rx_ring;
99 	struct macb_dma_desc	*tx_ring;
100 
101 	unsigned long		rx_buffer_dma;
102 	unsigned long		rx_ring_dma;
103 	unsigned long		tx_ring_dma;
104 
105 	struct macb_dma_desc	*dummy_desc;
106 	unsigned long		dummy_desc_dma;
107 
108 	const struct device	*dev;
109 #ifndef CONFIG_DM_ETH
110 	struct eth_device	netdev;
111 #endif
112 	unsigned short		phy_addr;
113 	struct mii_dev		*bus;
114 
115 #ifdef CONFIG_DM_ETH
116 #ifdef CONFIG_CLK
117 	unsigned long		pclk_rate;
118 #endif
119 	phy_interface_t		phy_interface;
120 #endif
121 };
122 #ifndef CONFIG_DM_ETH
123 #define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
124 #endif
125 
126 static int macb_is_gem(struct macb_device *macb)
127 {
128 	return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
129 }
130 
131 #ifndef cpu_is_sama5d2
132 #define cpu_is_sama5d2() 0
133 #endif
134 
135 #ifndef cpu_is_sama5d4
136 #define cpu_is_sama5d4() 0
137 #endif
138 
139 static int gem_is_gigabit_capable(struct macb_device *macb)
140 {
141 	/*
142 	 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
143 	 * configured to support only 10/100.
144 	 */
145 	return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
146 }
147 
148 static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
149 {
150 	unsigned long netctl;
151 	unsigned long netstat;
152 	unsigned long frame;
153 
154 	netctl = macb_readl(macb, NCR);
155 	netctl |= MACB_BIT(MPE);
156 	macb_writel(macb, NCR, netctl);
157 
158 	frame = (MACB_BF(SOF, 1)
159 		 | MACB_BF(RW, 1)
160 		 | MACB_BF(PHYA, macb->phy_addr)
161 		 | MACB_BF(REGA, reg)
162 		 | MACB_BF(CODE, 2)
163 		 | MACB_BF(DATA, value));
164 	macb_writel(macb, MAN, frame);
165 
166 	do {
167 		netstat = macb_readl(macb, NSR);
168 	} while (!(netstat & MACB_BIT(IDLE)));
169 
170 	netctl = macb_readl(macb, NCR);
171 	netctl &= ~MACB_BIT(MPE);
172 	macb_writel(macb, NCR, netctl);
173 }
174 
175 static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
176 {
177 	unsigned long netctl;
178 	unsigned long netstat;
179 	unsigned long frame;
180 
181 	netctl = macb_readl(macb, NCR);
182 	netctl |= MACB_BIT(MPE);
183 	macb_writel(macb, NCR, netctl);
184 
185 	frame = (MACB_BF(SOF, 1)
186 		 | MACB_BF(RW, 2)
187 		 | MACB_BF(PHYA, macb->phy_addr)
188 		 | MACB_BF(REGA, reg)
189 		 | MACB_BF(CODE, 2));
190 	macb_writel(macb, MAN, frame);
191 
192 	do {
193 		netstat = macb_readl(macb, NSR);
194 	} while (!(netstat & MACB_BIT(IDLE)));
195 
196 	frame = macb_readl(macb, MAN);
197 
198 	netctl = macb_readl(macb, NCR);
199 	netctl &= ~MACB_BIT(MPE);
200 	macb_writel(macb, NCR, netctl);
201 
202 	return MACB_BFEXT(DATA, frame);
203 }
204 
205 void __weak arch_get_mdio_control(const char *name)
206 {
207 	return;
208 }
209 
210 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
211 
212 int macb_miiphy_read(struct mii_dev *bus, int phy_adr, int devad, int reg)
213 {
214 	u16 value = 0;
215 #ifdef CONFIG_DM_ETH
216 	struct udevice *dev = eth_get_dev_by_name(bus->name);
217 	struct macb_device *macb = dev_get_priv(dev);
218 #else
219 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
220 	struct macb_device *macb = to_macb(dev);
221 #endif
222 
223 	if (macb->phy_addr != phy_adr)
224 		return -1;
225 
226 	arch_get_mdio_control(bus->name);
227 	value = macb_mdio_read(macb, reg);
228 
229 	return value;
230 }
231 
232 int macb_miiphy_write(struct mii_dev *bus, int phy_adr, int devad, int reg,
233 		      u16 value)
234 {
235 #ifdef CONFIG_DM_ETH
236 	struct udevice *dev = eth_get_dev_by_name(bus->name);
237 	struct macb_device *macb = dev_get_priv(dev);
238 #else
239 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
240 	struct macb_device *macb = to_macb(dev);
241 #endif
242 
243 	if (macb->phy_addr != phy_adr)
244 		return -1;
245 
246 	arch_get_mdio_control(bus->name);
247 	macb_mdio_write(macb, reg, value);
248 
249 	return 0;
250 }
251 #endif
252 
253 #define RX	1
254 #define TX	0
255 static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
256 {
257 	if (rx)
258 		invalidate_dcache_range(macb->rx_ring_dma,
259 			ALIGN(macb->rx_ring_dma + MACB_RX_DMA_DESC_SIZE,
260 			      PKTALIGN));
261 	else
262 		invalidate_dcache_range(macb->tx_ring_dma,
263 			ALIGN(macb->tx_ring_dma + MACB_TX_DMA_DESC_SIZE,
264 			      PKTALIGN));
265 }
266 
267 static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
268 {
269 	if (rx)
270 		flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
271 				   ALIGN(MACB_RX_DMA_DESC_SIZE, PKTALIGN));
272 	else
273 		flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
274 				   ALIGN(MACB_TX_DMA_DESC_SIZE, PKTALIGN));
275 }
276 
277 static inline void macb_flush_rx_buffer(struct macb_device *macb)
278 {
279 	flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
280 			   ALIGN(MACB_RX_BUFFER_SIZE, PKTALIGN));
281 }
282 
283 static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
284 {
285 	invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
286 				ALIGN(MACB_RX_BUFFER_SIZE, PKTALIGN));
287 }
288 
289 #if defined(CONFIG_CMD_NET)
290 
291 static int _macb_send(struct macb_device *macb, const char *name, void *packet,
292 		      int length)
293 {
294 	unsigned long paddr, ctrl;
295 	unsigned int tx_head = macb->tx_head;
296 	int i;
297 
298 	paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
299 
300 	ctrl = length & TXBUF_FRMLEN_MASK;
301 	ctrl |= TXBUF_FRAME_END;
302 	if (tx_head == (MACB_TX_RING_SIZE - 1)) {
303 		ctrl |= TXBUF_WRAP;
304 		macb->tx_head = 0;
305 	} else {
306 		macb->tx_head++;
307 	}
308 
309 	macb->tx_ring[tx_head].ctrl = ctrl;
310 	macb->tx_ring[tx_head].addr = paddr;
311 	barrier();
312 	macb_flush_ring_desc(macb, TX);
313 	/* Do we need check paddr and length is dcache line aligned? */
314 	flush_dcache_range(paddr, paddr + ALIGN(length, ARCH_DMA_MINALIGN));
315 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
316 
317 	/*
318 	 * I guess this is necessary because the networking core may
319 	 * re-use the transmit buffer as soon as we return...
320 	 */
321 	for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
322 		barrier();
323 		macb_invalidate_ring_desc(macb, TX);
324 		ctrl = macb->tx_ring[tx_head].ctrl;
325 		if (ctrl & TXBUF_USED)
326 			break;
327 		udelay(1);
328 	}
329 
330 	dma_unmap_single(packet, length, paddr);
331 
332 	if (i <= MACB_TX_TIMEOUT) {
333 		if (ctrl & TXBUF_UNDERRUN)
334 			printf("%s: TX underrun\n", name);
335 		if (ctrl & TXBUF_EXHAUSTED)
336 			printf("%s: TX buffers exhausted in mid frame\n", name);
337 	} else {
338 		printf("%s: TX timeout\n", name);
339 	}
340 
341 	/* No one cares anyway */
342 	return 0;
343 }
344 
345 static void reclaim_rx_buffers(struct macb_device *macb,
346 			       unsigned int new_tail)
347 {
348 	unsigned int i;
349 
350 	i = macb->rx_tail;
351 
352 	macb_invalidate_ring_desc(macb, RX);
353 	while (i > new_tail) {
354 		macb->rx_ring[i].addr &= ~RXADDR_USED;
355 		i++;
356 		if (i > MACB_RX_RING_SIZE)
357 			i = 0;
358 	}
359 
360 	while (i < new_tail) {
361 		macb->rx_ring[i].addr &= ~RXADDR_USED;
362 		i++;
363 	}
364 
365 	barrier();
366 	macb_flush_ring_desc(macb, RX);
367 	macb->rx_tail = new_tail;
368 }
369 
370 static int _macb_recv(struct macb_device *macb, uchar **packetp)
371 {
372 	unsigned int next_rx_tail = macb->next_rx_tail;
373 	void *buffer;
374 	int length;
375 	u32 status;
376 
377 	macb->wrapped = false;
378 	for (;;) {
379 		macb_invalidate_ring_desc(macb, RX);
380 
381 		if (!(macb->rx_ring[next_rx_tail].addr & RXADDR_USED))
382 			return -EAGAIN;
383 
384 		status = macb->rx_ring[next_rx_tail].ctrl;
385 		if (status & RXBUF_FRAME_START) {
386 			if (next_rx_tail != macb->rx_tail)
387 				reclaim_rx_buffers(macb, next_rx_tail);
388 			macb->wrapped = false;
389 		}
390 
391 		if (status & RXBUF_FRAME_END) {
392 			buffer = macb->rx_buffer + 128 * macb->rx_tail;
393 			length = status & RXBUF_FRMLEN_MASK;
394 
395 			macb_invalidate_rx_buffer(macb);
396 			if (macb->wrapped) {
397 				unsigned int headlen, taillen;
398 
399 				headlen = 128 * (MACB_RX_RING_SIZE
400 						 - macb->rx_tail);
401 				taillen = length - headlen;
402 				memcpy((void *)net_rx_packets[0],
403 				       buffer, headlen);
404 				memcpy((void *)net_rx_packets[0] + headlen,
405 				       macb->rx_buffer, taillen);
406 				*packetp = (void *)net_rx_packets[0];
407 			} else {
408 				*packetp = buffer;
409 			}
410 
411 			if (++next_rx_tail >= MACB_RX_RING_SIZE)
412 				next_rx_tail = 0;
413 			macb->next_rx_tail = next_rx_tail;
414 			return length;
415 		} else {
416 			if (++next_rx_tail >= MACB_RX_RING_SIZE) {
417 				macb->wrapped = true;
418 				next_rx_tail = 0;
419 			}
420 		}
421 		barrier();
422 	}
423 }
424 
425 static void macb_phy_reset(struct macb_device *macb, const char *name)
426 {
427 	int i;
428 	u16 status, adv;
429 
430 	adv = ADVERTISE_CSMA | ADVERTISE_ALL;
431 	macb_mdio_write(macb, MII_ADVERTISE, adv);
432 	printf("%s: Starting autonegotiation...\n", name);
433 	macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
434 					 | BMCR_ANRESTART));
435 
436 	for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
437 		status = macb_mdio_read(macb, MII_BMSR);
438 		if (status & BMSR_ANEGCOMPLETE)
439 			break;
440 		udelay(100);
441 	}
442 
443 	if (status & BMSR_ANEGCOMPLETE)
444 		printf("%s: Autonegotiation complete\n", name);
445 	else
446 		printf("%s: Autonegotiation timed out (status=0x%04x)\n",
447 		       name, status);
448 }
449 
450 #ifdef CONFIG_MACB_SEARCH_PHY
451 static int macb_phy_find(struct macb_device *macb, const char *name)
452 {
453 	int i;
454 	u16 phy_id;
455 
456 	/* Search for PHY... */
457 	for (i = 0; i < 32; i++) {
458 		macb->phy_addr = i;
459 		phy_id = macb_mdio_read(macb, MII_PHYSID1);
460 		if (phy_id != 0xffff) {
461 			printf("%s: PHY present at %d\n", name, i);
462 			return 1;
463 		}
464 	}
465 
466 	/* PHY isn't up to snuff */
467 	printf("%s: PHY not found\n", name);
468 
469 	return 0;
470 }
471 #endif /* CONFIG_MACB_SEARCH_PHY */
472 
473 #ifdef CONFIG_DM_ETH
474 static int macb_phy_init(struct udevice *dev, const char *name)
475 #else
476 static int macb_phy_init(struct macb_device *macb, const char *name)
477 #endif
478 {
479 #ifdef CONFIG_DM_ETH
480 	struct macb_device *macb = dev_get_priv(dev);
481 #endif
482 #ifdef CONFIG_PHYLIB
483 	struct phy_device *phydev;
484 #endif
485 	u32 ncfgr;
486 	u16 phy_id, status, adv, lpa;
487 	int media, speed, duplex;
488 	int i;
489 
490 	arch_get_mdio_control(name);
491 #ifdef CONFIG_MACB_SEARCH_PHY
492 	/* Auto-detect phy_addr */
493 	if (!macb_phy_find(macb, name))
494 		return 0;
495 #endif /* CONFIG_MACB_SEARCH_PHY */
496 
497 	/* Check if the PHY is up to snuff... */
498 	phy_id = macb_mdio_read(macb, MII_PHYSID1);
499 	if (phy_id == 0xffff) {
500 		printf("%s: No PHY present\n", name);
501 		return 0;
502 	}
503 
504 #ifdef CONFIG_PHYLIB
505 #ifdef CONFIG_DM_ETH
506 	phydev = phy_connect(macb->bus, macb->phy_addr, dev,
507 			     macb->phy_interface);
508 #else
509 	/* need to consider other phy interface mode */
510 	phydev = phy_connect(macb->bus, macb->phy_addr, &macb->netdev,
511 			     PHY_INTERFACE_MODE_RGMII);
512 #endif
513 	if (!phydev) {
514 		printf("phy_connect failed\n");
515 		return -ENODEV;
516 	}
517 
518 	phy_config(phydev);
519 #endif
520 
521 	status = macb_mdio_read(macb, MII_BMSR);
522 	if (!(status & BMSR_LSTATUS)) {
523 		/* Try to re-negotiate if we don't have link already. */
524 		macb_phy_reset(macb, name);
525 
526 		for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
527 			status = macb_mdio_read(macb, MII_BMSR);
528 			if (status & BMSR_LSTATUS)
529 				break;
530 			udelay(100);
531 		}
532 	}
533 
534 	if (!(status & BMSR_LSTATUS)) {
535 		printf("%s: link down (status: 0x%04x)\n",
536 		       name, status);
537 		return 0;
538 	}
539 
540 	/* First check for GMAC and that it is GiB capable */
541 	if (gem_is_gigabit_capable(macb)) {
542 		lpa = macb_mdio_read(macb, MII_STAT1000);
543 
544 		if (lpa & (LPA_1000FULL | LPA_1000HALF)) {
545 			duplex = ((lpa & LPA_1000FULL) ? 1 : 0);
546 
547 			printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
548 			       name,
549 			       duplex ? "full" : "half",
550 			       lpa);
551 
552 			ncfgr = macb_readl(macb, NCFGR);
553 			ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
554 			ncfgr |= GEM_BIT(GBE);
555 
556 			if (duplex)
557 				ncfgr |= MACB_BIT(FD);
558 
559 			macb_writel(macb, NCFGR, ncfgr);
560 
561 			return 1;
562 		}
563 	}
564 
565 	/* fall back for EMAC checking */
566 	adv = macb_mdio_read(macb, MII_ADVERTISE);
567 	lpa = macb_mdio_read(macb, MII_LPA);
568 	media = mii_nway_result(lpa & adv);
569 	speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
570 		 ? 1 : 0);
571 	duplex = (media & ADVERTISE_FULL) ? 1 : 0;
572 	printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
573 	       name,
574 	       speed ? "100" : "10",
575 	       duplex ? "full" : "half",
576 	       lpa);
577 
578 	ncfgr = macb_readl(macb, NCFGR);
579 	ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
580 	if (speed)
581 		ncfgr |= MACB_BIT(SPD);
582 	if (duplex)
583 		ncfgr |= MACB_BIT(FD);
584 	macb_writel(macb, NCFGR, ncfgr);
585 
586 	return 1;
587 }
588 
589 static int gmac_init_multi_queues(struct macb_device *macb)
590 {
591 	int i, num_queues = 1;
592 	u32 queue_mask;
593 
594 	/* bit 0 is never set but queue 0 always exists */
595 	queue_mask = gem_readl(macb, DCFG6) & 0xff;
596 	queue_mask |= 0x1;
597 
598 	for (i = 1; i < MACB_MAX_QUEUES; i++)
599 		if (queue_mask & (1 << i))
600 			num_queues++;
601 
602 	macb->dummy_desc->ctrl = TXBUF_USED;
603 	macb->dummy_desc->addr = 0;
604 	flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
605 			ALIGN(MACB_TX_DUMMY_DMA_DESC_SIZE, PKTALIGN));
606 
607 	for (i = 1; i < num_queues; i++)
608 		gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1);
609 
610 	return 0;
611 }
612 
613 #ifdef CONFIG_DM_ETH
614 static int _macb_init(struct udevice *dev, const char *name)
615 #else
616 static int _macb_init(struct macb_device *macb, const char *name)
617 #endif
618 {
619 #ifdef CONFIG_DM_ETH
620 	struct macb_device *macb = dev_get_priv(dev);
621 #endif
622 	unsigned long paddr;
623 	int i;
624 
625 	/*
626 	 * macb_halt should have been called at some point before now,
627 	 * so we'll assume the controller is idle.
628 	 */
629 
630 	/* initialize DMA descriptors */
631 	paddr = macb->rx_buffer_dma;
632 	for (i = 0; i < MACB_RX_RING_SIZE; i++) {
633 		if (i == (MACB_RX_RING_SIZE - 1))
634 			paddr |= RXADDR_WRAP;
635 		macb->rx_ring[i].addr = paddr;
636 		macb->rx_ring[i].ctrl = 0;
637 		paddr += 128;
638 	}
639 	macb_flush_ring_desc(macb, RX);
640 	macb_flush_rx_buffer(macb);
641 
642 	for (i = 0; i < MACB_TX_RING_SIZE; i++) {
643 		macb->tx_ring[i].addr = 0;
644 		if (i == (MACB_TX_RING_SIZE - 1))
645 			macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
646 		else
647 			macb->tx_ring[i].ctrl = TXBUF_USED;
648 	}
649 	macb_flush_ring_desc(macb, TX);
650 
651 	macb->rx_tail = 0;
652 	macb->tx_head = 0;
653 	macb->tx_tail = 0;
654 	macb->next_rx_tail = 0;
655 
656 	macb_writel(macb, RBQP, macb->rx_ring_dma);
657 	macb_writel(macb, TBQP, macb->tx_ring_dma);
658 
659 	if (macb_is_gem(macb)) {
660 		/* Check the multi queue and initialize the queue for tx */
661 		gmac_init_multi_queues(macb);
662 
663 		/*
664 		 * When the GMAC IP with GE feature, this bit is used to
665 		 * select interface between RGMII and GMII.
666 		 * When the GMAC IP without GE feature, this bit is used
667 		 * to select interface between RMII and MII.
668 		 */
669 #ifdef CONFIG_DM_ETH
670 		if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
671 			gem_writel(macb, UR, GEM_BIT(RGMII));
672 		else
673 			gem_writel(macb, UR, 0);
674 #else
675 #if defined(CONFIG_RGMII) || defined(CONFIG_RMII)
676 		gem_writel(macb, UR, GEM_BIT(RGMII));
677 #else
678 		gem_writel(macb, UR, 0);
679 #endif
680 #endif
681 	} else {
682 	/* choose RMII or MII mode. This depends on the board */
683 #ifdef CONFIG_DM_ETH
684 #ifdef CONFIG_AT91FAMILY
685 		if (macb->phy_interface == PHY_INTERFACE_MODE_RMII) {
686 			macb_writel(macb, USRIO,
687 				    MACB_BIT(RMII) | MACB_BIT(CLKEN));
688 		} else {
689 			macb_writel(macb, USRIO, MACB_BIT(CLKEN));
690 		}
691 #else
692 		if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
693 			macb_writel(macb, USRIO, 0);
694 		else
695 			macb_writel(macb, USRIO, MACB_BIT(MII));
696 #endif
697 #else
698 #ifdef CONFIG_RMII
699 #ifdef CONFIG_AT91FAMILY
700 	macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
701 #else
702 	macb_writel(macb, USRIO, 0);
703 #endif
704 #else
705 #ifdef CONFIG_AT91FAMILY
706 	macb_writel(macb, USRIO, MACB_BIT(CLKEN));
707 #else
708 	macb_writel(macb, USRIO, MACB_BIT(MII));
709 #endif
710 #endif /* CONFIG_RMII */
711 #endif
712 	}
713 
714 #ifdef CONFIG_DM_ETH
715 	if (!macb_phy_init(dev, name))
716 #else
717 	if (!macb_phy_init(macb, name))
718 #endif
719 		return -1;
720 
721 	/* Enable TX and RX */
722 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
723 
724 	return 0;
725 }
726 
727 static void _macb_halt(struct macb_device *macb)
728 {
729 	u32 ncr, tsr;
730 
731 	/* Halt the controller and wait for any ongoing transmission to end. */
732 	ncr = macb_readl(macb, NCR);
733 	ncr |= MACB_BIT(THALT);
734 	macb_writel(macb, NCR, ncr);
735 
736 	do {
737 		tsr = macb_readl(macb, TSR);
738 	} while (tsr & MACB_BIT(TGO));
739 
740 	/* Disable TX and RX, and clear statistics */
741 	macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
742 }
743 
744 static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
745 {
746 	u32 hwaddr_bottom;
747 	u16 hwaddr_top;
748 
749 	/* set hardware address */
750 	hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
751 			enetaddr[2] << 16 | enetaddr[3] << 24;
752 	macb_writel(macb, SA1B, hwaddr_bottom);
753 	hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
754 	macb_writel(macb, SA1T, hwaddr_top);
755 	return 0;
756 }
757 
758 static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
759 {
760 	u32 config;
761 #if defined(CONFIG_DM_ETH) && defined(CONFIG_CLK)
762 	unsigned long macb_hz = macb->pclk_rate;
763 #else
764 	unsigned long macb_hz = get_macb_pclk_rate(id);
765 #endif
766 
767 	if (macb_hz < 20000000)
768 		config = MACB_BF(CLK, MACB_CLK_DIV8);
769 	else if (macb_hz < 40000000)
770 		config = MACB_BF(CLK, MACB_CLK_DIV16);
771 	else if (macb_hz < 80000000)
772 		config = MACB_BF(CLK, MACB_CLK_DIV32);
773 	else
774 		config = MACB_BF(CLK, MACB_CLK_DIV64);
775 
776 	return config;
777 }
778 
779 static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
780 {
781 	u32 config;
782 
783 #if defined(CONFIG_DM_ETH) && defined(CONFIG_CLK)
784 	unsigned long macb_hz = macb->pclk_rate;
785 #else
786 	unsigned long macb_hz = get_macb_pclk_rate(id);
787 #endif
788 
789 	if (macb_hz < 20000000)
790 		config = GEM_BF(CLK, GEM_CLK_DIV8);
791 	else if (macb_hz < 40000000)
792 		config = GEM_BF(CLK, GEM_CLK_DIV16);
793 	else if (macb_hz < 80000000)
794 		config = GEM_BF(CLK, GEM_CLK_DIV32);
795 	else if (macb_hz < 120000000)
796 		config = GEM_BF(CLK, GEM_CLK_DIV48);
797 	else if (macb_hz < 160000000)
798 		config = GEM_BF(CLK, GEM_CLK_DIV64);
799 	else
800 		config = GEM_BF(CLK, GEM_CLK_DIV96);
801 
802 	return config;
803 }
804 
805 /*
806  * Get the DMA bus width field of the network configuration register that we
807  * should program. We find the width from decoding the design configuration
808  * register to find the maximum supported data bus width.
809  */
810 static u32 macb_dbw(struct macb_device *macb)
811 {
812 	switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
813 	case 4:
814 		return GEM_BF(DBW, GEM_DBW128);
815 	case 2:
816 		return GEM_BF(DBW, GEM_DBW64);
817 	case 1:
818 	default:
819 		return GEM_BF(DBW, GEM_DBW32);
820 	}
821 }
822 
823 static void _macb_eth_initialize(struct macb_device *macb)
824 {
825 	int id = 0;	/* This is not used by functions we call */
826 	u32 ncfgr;
827 
828 	/* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
829 	macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE,
830 					     &macb->rx_buffer_dma);
831 	macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
832 					   &macb->rx_ring_dma);
833 	macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
834 					   &macb->tx_ring_dma);
835 	macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
836 					   &macb->dummy_desc_dma);
837 
838 	/*
839 	 * Do some basic initialization so that we at least can talk
840 	 * to the PHY
841 	 */
842 	if (macb_is_gem(macb)) {
843 		ncfgr = gem_mdc_clk_div(id, macb);
844 		ncfgr |= macb_dbw(macb);
845 	} else {
846 		ncfgr = macb_mdc_clk_div(id, macb);
847 	}
848 
849 	macb_writel(macb, NCFGR, ncfgr);
850 }
851 
852 #ifndef CONFIG_DM_ETH
853 static int macb_send(struct eth_device *netdev, void *packet, int length)
854 {
855 	struct macb_device *macb = to_macb(netdev);
856 
857 	return _macb_send(macb, netdev->name, packet, length);
858 }
859 
860 static int macb_recv(struct eth_device *netdev)
861 {
862 	struct macb_device *macb = to_macb(netdev);
863 	uchar *packet;
864 	int length;
865 
866 	macb->wrapped = false;
867 	for (;;) {
868 		macb->next_rx_tail = macb->rx_tail;
869 		length = _macb_recv(macb, &packet);
870 		if (length >= 0) {
871 			net_process_received_packet(packet, length);
872 			reclaim_rx_buffers(macb, macb->next_rx_tail);
873 		} else if (length < 0) {
874 			return length;
875 		}
876 	}
877 }
878 
879 static int macb_init(struct eth_device *netdev, bd_t *bd)
880 {
881 	struct macb_device *macb = to_macb(netdev);
882 
883 	return _macb_init(macb, netdev->name);
884 }
885 
886 static void macb_halt(struct eth_device *netdev)
887 {
888 	struct macb_device *macb = to_macb(netdev);
889 
890 	return _macb_halt(macb);
891 }
892 
893 static int macb_write_hwaddr(struct eth_device *netdev)
894 {
895 	struct macb_device *macb = to_macb(netdev);
896 
897 	return _macb_write_hwaddr(macb, netdev->enetaddr);
898 }
899 
900 int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
901 {
902 	struct macb_device *macb;
903 	struct eth_device *netdev;
904 
905 	macb = malloc(sizeof(struct macb_device));
906 	if (!macb) {
907 		printf("Error: Failed to allocate memory for MACB%d\n", id);
908 		return -1;
909 	}
910 	memset(macb, 0, sizeof(struct macb_device));
911 
912 	netdev = &macb->netdev;
913 
914 	macb->regs = regs;
915 	macb->phy_addr = phy_addr;
916 
917 	if (macb_is_gem(macb))
918 		sprintf(netdev->name, "gmac%d", id);
919 	else
920 		sprintf(netdev->name, "macb%d", id);
921 
922 	netdev->init = macb_init;
923 	netdev->halt = macb_halt;
924 	netdev->send = macb_send;
925 	netdev->recv = macb_recv;
926 	netdev->write_hwaddr = macb_write_hwaddr;
927 
928 	_macb_eth_initialize(macb);
929 
930 	eth_register(netdev);
931 
932 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
933 	int retval;
934 	struct mii_dev *mdiodev = mdio_alloc();
935 	if (!mdiodev)
936 		return -ENOMEM;
937 	strncpy(mdiodev->name, netdev->name, MDIO_NAME_LEN);
938 	mdiodev->read = macb_miiphy_read;
939 	mdiodev->write = macb_miiphy_write;
940 
941 	retval = mdio_register(mdiodev);
942 	if (retval < 0)
943 		return retval;
944 	macb->bus = miiphy_get_dev_by_name(netdev->name);
945 #endif
946 	return 0;
947 }
948 #endif /* !CONFIG_DM_ETH */
949 
950 #ifdef CONFIG_DM_ETH
951 
952 static int macb_start(struct udevice *dev)
953 {
954 	return _macb_init(dev, dev->name);
955 }
956 
957 static int macb_send(struct udevice *dev, void *packet, int length)
958 {
959 	struct macb_device *macb = dev_get_priv(dev);
960 
961 	return _macb_send(macb, dev->name, packet, length);
962 }
963 
964 static int macb_recv(struct udevice *dev, int flags, uchar **packetp)
965 {
966 	struct macb_device *macb = dev_get_priv(dev);
967 
968 	macb->next_rx_tail = macb->rx_tail;
969 	macb->wrapped = false;
970 
971 	return _macb_recv(macb, packetp);
972 }
973 
974 static int macb_free_pkt(struct udevice *dev, uchar *packet, int length)
975 {
976 	struct macb_device *macb = dev_get_priv(dev);
977 
978 	reclaim_rx_buffers(macb, macb->next_rx_tail);
979 
980 	return 0;
981 }
982 
983 static void macb_stop(struct udevice *dev)
984 {
985 	struct macb_device *macb = dev_get_priv(dev);
986 
987 	_macb_halt(macb);
988 }
989 
990 static int macb_write_hwaddr(struct udevice *dev)
991 {
992 	struct eth_pdata *plat = dev_get_platdata(dev);
993 	struct macb_device *macb = dev_get_priv(dev);
994 
995 	return _macb_write_hwaddr(macb, plat->enetaddr);
996 }
997 
998 static const struct eth_ops macb_eth_ops = {
999 	.start	= macb_start,
1000 	.send	= macb_send,
1001 	.recv	= macb_recv,
1002 	.stop	= macb_stop,
1003 	.free_pkt	= macb_free_pkt,
1004 	.write_hwaddr	= macb_write_hwaddr,
1005 };
1006 
1007 #ifdef CONFIG_CLK
1008 static int macb_enable_clk(struct udevice *dev)
1009 {
1010 	struct macb_device *macb = dev_get_priv(dev);
1011 	struct clk clk;
1012 	ulong clk_rate;
1013 	int ret;
1014 
1015 	ret = clk_get_by_index(dev, 0, &clk);
1016 	if (ret)
1017 		return -EINVAL;
1018 
1019 	ret = clk_enable(&clk);
1020 	if (ret)
1021 		return ret;
1022 
1023 	clk_rate = clk_get_rate(&clk);
1024 	if (!clk_rate)
1025 		return -EINVAL;
1026 
1027 	macb->pclk_rate = clk_rate;
1028 
1029 	return 0;
1030 }
1031 #endif
1032 
1033 static int macb_eth_probe(struct udevice *dev)
1034 {
1035 	struct eth_pdata *pdata = dev_get_platdata(dev);
1036 	struct macb_device *macb = dev_get_priv(dev);
1037 	const char *phy_mode;
1038 
1039 	phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1040 			       NULL);
1041 	if (phy_mode)
1042 		macb->phy_interface = phy_get_interface_by_name(phy_mode);
1043 	if (macb->phy_interface == -1) {
1044 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1045 		return -EINVAL;
1046 	}
1047 
1048 	macb->regs = (void *)pdata->iobase;
1049 
1050 #ifdef CONFIG_CLK
1051 	int ret = macb_enable_clk(dev);
1052 	if (ret)
1053 		return ret;
1054 #endif
1055 
1056 	_macb_eth_initialize(macb);
1057 
1058 #if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
1059 	int retval;
1060 	struct mii_dev *mdiodev = mdio_alloc();
1061 	if (!mdiodev)
1062 		return -ENOMEM;
1063 	strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
1064 	mdiodev->read = macb_miiphy_read;
1065 	mdiodev->write = macb_miiphy_write;
1066 
1067 	retval = mdio_register(mdiodev);
1068 	if (retval < 0)
1069 		return retval;
1070 	macb->bus = miiphy_get_dev_by_name(dev->name);
1071 #endif
1072 
1073 	return 0;
1074 }
1075 
1076 static int macb_eth_ofdata_to_platdata(struct udevice *dev)
1077 {
1078 	struct eth_pdata *pdata = dev_get_platdata(dev);
1079 
1080 	pdata->iobase = dev_get_addr(dev);
1081 	return 0;
1082 }
1083 
1084 static const struct udevice_id macb_eth_ids[] = {
1085 	{ .compatible = "cdns,macb" },
1086 	{ }
1087 };
1088 
1089 U_BOOT_DRIVER(eth_macb) = {
1090 	.name	= "eth_macb",
1091 	.id	= UCLASS_ETH,
1092 	.of_match = macb_eth_ids,
1093 	.ofdata_to_platdata = macb_eth_ofdata_to_platdata,
1094 	.probe	= macb_eth_probe,
1095 	.ops	= &macb_eth_ops,
1096 	.priv_auto_alloc_size = sizeof(struct macb_device),
1097 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
1098 };
1099 #endif
1100 
1101 #endif
1102