1 /*
2  * Copyright 2014-2017 Broadcom.
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #ifdef BCM_GMAC_DEBUG
8 #ifndef DEBUG
9 #define DEBUG
10 #endif
11 #endif
12 
13 #include <config.h>
14 #include <common.h>
15 #include <malloc.h>
16 #include <net.h>
17 #include <asm/io.h>
18 #include <phy.h>
19 
20 #include "bcm-sf2-eth.h"
21 #include "bcm-sf2-eth-gmac.h"
22 
23 #define SPINWAIT(exp, us) { \
24 	uint countdown = (us) + 9; \
25 	while ((exp) && (countdown >= 10)) {\
26 		udelay(10); \
27 		countdown -= 10; \
28 	} \
29 }
30 
31 #define RX_BUF_SIZE_ALIGNED	ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
32 #define TX_BUF_SIZE_ALIGNED	ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
33 #define DESCP_SIZE_ALIGNED	ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
34 
35 static int gmac_disable_dma(struct eth_dma *dma, int dir);
36 static int gmac_enable_dma(struct eth_dma *dma, int dir);
37 
38 /* DMA Descriptor */
39 typedef struct {
40 	/* misc control bits */
41 	uint32_t	ctrl1;
42 	/* buffer count and address extension */
43 	uint32_t	ctrl2;
44 	/* memory address of the date buffer, bits 31:0 */
45 	uint32_t	addrlow;
46 	/* memory address of the date buffer, bits 63:32 */
47 	uint32_t	addrhigh;
48 } dma64dd_t;
49 
50 uint32_t g_dmactrlflags;
51 
52 static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
53 {
54 	debug("%s enter\n", __func__);
55 
56 	g_dmactrlflags &= ~mask;
57 	g_dmactrlflags |= flags;
58 
59 	/* If trying to enable parity, check if parity is actually supported */
60 	if (g_dmactrlflags & DMA_CTRL_PEN) {
61 		uint32_t control;
62 
63 		control = readl(GMAC0_DMA_TX_CTRL_ADDR);
64 		writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
65 		if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
66 			/*
67 			 * We *can* disable it, therefore it is supported;
68 			 * restore control register
69 			 */
70 			writel(control, GMAC0_DMA_TX_CTRL_ADDR);
71 		} else {
72 			/* Not supported, don't allow it to be enabled */
73 			g_dmactrlflags &= ~DMA_CTRL_PEN;
74 		}
75 	}
76 
77 	return g_dmactrlflags;
78 }
79 
80 static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
81 {
82 	uint32_t v = readl(reg);
83 	v &= ~(value);
84 	writel(v, reg);
85 }
86 
87 static inline void reg32_set_bits(uint32_t reg, uint32_t value)
88 {
89 	uint32_t v = readl(reg);
90 	v |= value;
91 	writel(v, reg);
92 }
93 
94 #ifdef BCM_GMAC_DEBUG
95 static void dma_tx_dump(struct eth_dma *dma)
96 {
97 	dma64dd_t *descp = NULL;
98 	uint8_t *bufp;
99 	int i;
100 
101 	printf("TX DMA Register:\n");
102 	printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
103 	       readl(GMAC0_DMA_TX_CTRL_ADDR),
104 	       readl(GMAC0_DMA_TX_PTR_ADDR),
105 	       readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
106 	       readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
107 	       readl(GMAC0_DMA_TX_STATUS0_ADDR),
108 	       readl(GMAC0_DMA_TX_STATUS1_ADDR));
109 
110 	printf("TX Descriptors:\n");
111 	for (i = 0; i < TX_BUF_NUM; i++) {
112 		descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
113 		printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
114 		       descp->ctrl1, descp->ctrl2,
115 		       descp->addrhigh, descp->addrlow);
116 	}
117 
118 	printf("TX Buffers:\n");
119 	/* Initialize TX DMA descriptor table */
120 	for (i = 0; i < TX_BUF_NUM; i++) {
121 		bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
122 		printf("buf%d:0x%x; ", i, (uint32_t)bufp);
123 	}
124 	printf("\n");
125 }
126 
127 static void dma_rx_dump(struct eth_dma *dma)
128 {
129 	dma64dd_t *descp = NULL;
130 	uint8_t *bufp;
131 	int i;
132 
133 	printf("RX DMA Register:\n");
134 	printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
135 	       readl(GMAC0_DMA_RX_CTRL_ADDR),
136 	       readl(GMAC0_DMA_RX_PTR_ADDR),
137 	       readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
138 	       readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
139 	       readl(GMAC0_DMA_RX_STATUS0_ADDR),
140 	       readl(GMAC0_DMA_RX_STATUS1_ADDR));
141 
142 	printf("RX Descriptors:\n");
143 	for (i = 0; i < RX_BUF_NUM; i++) {
144 		descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
145 		printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
146 		       descp->ctrl1, descp->ctrl2,
147 		       descp->addrhigh, descp->addrlow);
148 	}
149 
150 	printf("RX Buffers:\n");
151 	for (i = 0; i < RX_BUF_NUM; i++) {
152 		bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
153 		printf("buf%d:0x%x; ", i, (uint32_t)bufp);
154 	}
155 	printf("\n");
156 }
157 #endif
158 
159 static int dma_tx_init(struct eth_dma *dma)
160 {
161 	dma64dd_t *descp = NULL;
162 	uint8_t *bufp;
163 	int i;
164 	uint32_t ctrl;
165 
166 	debug("%s enter\n", __func__);
167 
168 	/* clear descriptor memory */
169 	memset((void *)(dma->tx_desc_aligned), 0,
170 	       TX_BUF_NUM * DESCP_SIZE_ALIGNED);
171 	memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
172 
173 	/* Initialize TX DMA descriptor table */
174 	for (i = 0; i < TX_BUF_NUM; i++) {
175 		descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
176 		bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
177 		/* clear buffer memory */
178 		memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
179 
180 		ctrl = 0;
181 		/* if last descr set endOfTable */
182 		if (i == (TX_BUF_NUM-1))
183 			ctrl = D64_CTRL1_EOT;
184 		descp->ctrl1 = ctrl;
185 		descp->ctrl2 = 0;
186 		descp->addrlow = (uint32_t)bufp;
187 		descp->addrhigh = 0;
188 	}
189 
190 	/* flush descriptor and buffer */
191 	descp = dma->tx_desc_aligned;
192 	bufp = dma->tx_buf;
193 	flush_dcache_range((unsigned long)descp,
194 			   (unsigned long)descp +
195 			   DESCP_SIZE_ALIGNED * TX_BUF_NUM);
196 	flush_dcache_range((unsigned long)bufp,
197 			   (unsigned long)bufp +
198 			   TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
199 
200 	/* initialize the DMA channel */
201 	writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
202 	writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
203 
204 	/* now update the dma last descriptor */
205 	writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
206 	       GMAC0_DMA_TX_PTR_ADDR);
207 
208 	return 0;
209 }
210 
211 static int dma_rx_init(struct eth_dma *dma)
212 {
213 	uint32_t last_desc;
214 	dma64dd_t *descp = NULL;
215 	uint8_t *bufp;
216 	uint32_t ctrl;
217 	int i;
218 
219 	debug("%s enter\n", __func__);
220 
221 	/* clear descriptor memory */
222 	memset((void *)(dma->rx_desc_aligned), 0,
223 	       RX_BUF_NUM * DESCP_SIZE_ALIGNED);
224 	/* clear buffer memory */
225 	memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
226 
227 	/* Initialize RX DMA descriptor table */
228 	for (i = 0; i < RX_BUF_NUM; i++) {
229 		descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
230 		bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
231 		ctrl = 0;
232 		/* if last descr set endOfTable */
233 		if (i == (RX_BUF_NUM - 1))
234 			ctrl = D64_CTRL1_EOT;
235 		descp->ctrl1 = ctrl;
236 		descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
237 		descp->addrlow = (uint32_t)bufp;
238 		descp->addrhigh = 0;
239 
240 		last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
241 				+ sizeof(dma64dd_t);
242 	}
243 
244 	descp = dma->rx_desc_aligned;
245 	bufp = dma->rx_buf;
246 	/* flush descriptor and buffer */
247 	flush_dcache_range((unsigned long)descp,
248 			   (unsigned long)descp +
249 			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);
250 	flush_dcache_range((unsigned long)(bufp),
251 			   (unsigned long)bufp +
252 			   RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
253 
254 	/* initailize the DMA channel */
255 	writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
256 	writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
257 
258 	/* now update the dma last descriptor */
259 	writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
260 
261 	return 0;
262 }
263 
264 static int dma_init(struct eth_dma *dma)
265 {
266 	debug(" %s enter\n", __func__);
267 
268 	/*
269 	 * Default flags: For backwards compatibility both
270 	 * Rx Overflow Continue and Parity are DISABLED.
271 	 */
272 	dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
273 
274 	debug("rx burst len 0x%x\n",
275 	      (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
276 	      >> D64_RC_BL_SHIFT);
277 	debug("tx burst len 0x%x\n",
278 	      (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
279 	      >> D64_XC_BL_SHIFT);
280 
281 	dma_tx_init(dma);
282 	dma_rx_init(dma);
283 
284 	/* From end of chip_init() */
285 	/* enable the overflow continue feature and disable parity */
286 	dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
287 		      DMA_CTRL_ROC /* value */);
288 
289 	return 0;
290 }
291 
292 static int dma_deinit(struct eth_dma *dma)
293 {
294 	debug(" %s enter\n", __func__);
295 
296 	gmac_disable_dma(dma, MAC_DMA_RX);
297 	gmac_disable_dma(dma, MAC_DMA_TX);
298 
299 	free(dma->tx_buf);
300 	dma->tx_buf = NULL;
301 	free(dma->tx_desc_aligned);
302 	dma->tx_desc_aligned = NULL;
303 
304 	free(dma->rx_buf);
305 	dma->rx_buf = NULL;
306 	free(dma->rx_desc_aligned);
307 	dma->rx_desc_aligned = NULL;
308 
309 	return 0;
310 }
311 
312 int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
313 {
314 	uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
315 
316 	/* kick off the dma */
317 	size_t len = length;
318 	int txout = dma->cur_tx_index;
319 	uint32_t flags;
320 	dma64dd_t *descp = NULL;
321 	uint32_t ctrl;
322 	uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
323 			      sizeof(dma64dd_t)) & D64_XP_LD_MASK;
324 	size_t buflen;
325 
326 	debug("%s enter\n", __func__);
327 
328 	/* load the buffer */
329 	memcpy(bufp, packet, len);
330 
331 	/* Add 4 bytes for Ethernet FCS/CRC */
332 	buflen = len + 4;
333 
334 	ctrl = (buflen & D64_CTRL2_BC_MASK);
335 
336 	/* the transmit will only be one frame or set SOF, EOF */
337 	/* also set int on completion */
338 	flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
339 
340 	/* txout points to the descriptor to uset */
341 	/* if last descriptor then set EOT */
342 	if (txout == (TX_BUF_NUM - 1)) {
343 		flags |= D64_CTRL1_EOT;
344 		last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
345 	}
346 
347 	/* write the descriptor */
348 	descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
349 	descp->addrlow = (uint32_t)bufp;
350 	descp->addrhigh = 0;
351 	descp->ctrl1 = flags;
352 	descp->ctrl2 = ctrl;
353 
354 	/* flush descriptor and buffer */
355 	flush_dcache_range((unsigned long)dma->tx_desc_aligned,
356 			   (unsigned long)dma->tx_desc_aligned +
357 			   DESCP_SIZE_ALIGNED * TX_BUF_NUM);
358 	flush_dcache_range((unsigned long)bufp,
359 			   (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
360 
361 	/* now update the dma last descriptor */
362 	writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
363 
364 	/* tx dma should be enabled so packet should go out */
365 
366 	/* update txout */
367 	dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
368 
369 	return 0;
370 }
371 
372 bool gmac_check_tx_done(struct eth_dma *dma)
373 {
374 	/* wait for tx to complete */
375 	uint32_t intstatus;
376 	bool xfrdone = false;
377 
378 	debug("%s enter\n", __func__);
379 
380 	intstatus = readl(GMAC0_INT_STATUS_ADDR);
381 
382 	debug("int(0x%x)\n", intstatus);
383 	if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
384 		xfrdone = true;
385 		/* clear the int bits */
386 		intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
387 		writel(intstatus, GMAC0_INT_STATUS_ADDR);
388 	} else {
389 		debug("Tx int(0x%x)\n", intstatus);
390 	}
391 
392 	return xfrdone;
393 }
394 
395 int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
396 {
397 	void *bufp, *datap;
398 	size_t rcvlen = 0, buflen = 0;
399 	uint32_t stat0 = 0, stat1 = 0;
400 	uint32_t control, offset;
401 	uint8_t statbuf[HWRXOFF*2];
402 
403 	int index, curr, active;
404 	dma64dd_t *descp = NULL;
405 
406 	/* udelay(50); */
407 
408 	/*
409 	 * this api will check if a packet has been received.
410 	 * If so it will return the address of the buffer and current
411 	 * descriptor index will be incremented to the
412 	 * next descriptor. Once done with the frame the buffer should be
413 	 * added back onto the descriptor and the lastdscr should be updated
414 	 * to this descriptor.
415 	 */
416 	index = dma->cur_rx_index;
417 	offset = (uint32_t)(dma->rx_desc_aligned);
418 	stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
419 	stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
420 	curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
421 	active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
422 
423 	/* check if any frame */
424 	if (index == curr)
425 		return -1;
426 
427 	debug("received packet\n");
428 	debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
429 	/* remove warning */
430 	if (index == active)
431 		;
432 
433 	/* get the packet pointer that corresponds to the rx descriptor */
434 	bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
435 
436 	descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
437 	/* flush descriptor and buffer */
438 	flush_dcache_range((unsigned long)dma->rx_desc_aligned,
439 			   (unsigned long)dma->rx_desc_aligned +
440 			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);
441 	flush_dcache_range((unsigned long)bufp,
442 			   (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
443 
444 	buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
445 
446 	stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
447 	stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
448 
449 	debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
450 	      (uint32_t)bufp, index, buflen, stat0, stat1);
451 
452 	dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
453 
454 	/* get buffer offset */
455 	control = readl(GMAC0_DMA_RX_CTRL_ADDR);
456 	offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
457 	rcvlen = *(uint16_t *)bufp;
458 
459 	debug("Received %d bytes\n", rcvlen);
460 	/* copy status into temp buf then copy data from rx buffer */
461 	memcpy(statbuf, bufp, offset);
462 	datap = (void *)((uint32_t)bufp + offset);
463 	memcpy(buf, datap, rcvlen);
464 
465 	/* update descriptor that is being added back on ring */
466 	descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
467 	descp->addrlow = (uint32_t)bufp;
468 	descp->addrhigh = 0;
469 	/* flush descriptor */
470 	flush_dcache_range((unsigned long)dma->rx_desc_aligned,
471 			   (unsigned long)dma->rx_desc_aligned +
472 			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);
473 
474 	/* set the lastdscr for the rx ring */
475 	writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
476 
477 	return (int)rcvlen;
478 }
479 
480 static int gmac_disable_dma(struct eth_dma *dma, int dir)
481 {
482 	int status;
483 
484 	debug("%s enter\n", __func__);
485 
486 	if (dir == MAC_DMA_TX) {
487 		/* address PR8249/PR7577 issue */
488 		/* suspend tx DMA first */
489 		writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
490 		SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
491 				     D64_XS0_XS_MASK)) !=
492 			  D64_XS0_XS_DISABLED) &&
493 			 (status != D64_XS0_XS_IDLE) &&
494 			 (status != D64_XS0_XS_STOPPED), 10000);
495 
496 		/*
497 		 * PR2414 WAR: DMA engines are not disabled until
498 		 * transfer finishes
499 		 */
500 		writel(0, GMAC0_DMA_TX_CTRL_ADDR);
501 		SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
502 				     D64_XS0_XS_MASK)) !=
503 			  D64_XS0_XS_DISABLED), 10000);
504 
505 		/* wait for the last transaction to complete */
506 		udelay(2);
507 
508 		status = (status == D64_XS0_XS_DISABLED);
509 	} else {
510 		/*
511 		 * PR2414 WAR: DMA engines are not disabled until
512 		 * transfer finishes
513 		 */
514 		writel(0, GMAC0_DMA_RX_CTRL_ADDR);
515 		SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
516 				     D64_RS0_RS_MASK)) !=
517 			  D64_RS0_RS_DISABLED), 10000);
518 
519 		status = (status == D64_RS0_RS_DISABLED);
520 	}
521 
522 	return status;
523 }
524 
525 static int gmac_enable_dma(struct eth_dma *dma, int dir)
526 {
527 	uint32_t control;
528 
529 	debug("%s enter\n", __func__);
530 
531 	if (dir == MAC_DMA_TX) {
532 		dma->cur_tx_index = 0;
533 
534 		/*
535 		 * These bits 20:18 (burstLen) of control register can be
536 		 * written but will take effect only if these bits are
537 		 * valid. So this will not affect previous versions
538 		 * of the DMA. They will continue to have those bits set to 0.
539 		 */
540 		control = readl(GMAC0_DMA_TX_CTRL_ADDR);
541 
542 		control |= D64_XC_XE;
543 		if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
544 			control |= D64_XC_PD;
545 
546 		writel(control, GMAC0_DMA_TX_CTRL_ADDR);
547 
548 		/* initailize the DMA channel */
549 		writel((uint32_t)(dma->tx_desc_aligned),
550 		       GMAC0_DMA_TX_ADDR_LOW_ADDR);
551 		writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
552 	} else {
553 		dma->cur_rx_index = 0;
554 
555 		control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
556 			   D64_RC_AE) | D64_RC_RE;
557 
558 		if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
559 			control |= D64_RC_PD;
560 
561 		if (g_dmactrlflags & DMA_CTRL_ROC)
562 			control |= D64_RC_OC;
563 
564 		/*
565 		 * These bits 20:18 (burstLen) of control register can be
566 		 * written but will take effect only if these bits are
567 		 * valid. So this will not affect previous versions
568 		 * of the DMA. They will continue to have those bits set to 0.
569 		 */
570 		control &= ~D64_RC_BL_MASK;
571 		/* Keep default Rx burstlen */
572 		control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
573 		control |= HWRXOFF << D64_RC_RO_SHIFT;
574 
575 		writel(control, GMAC0_DMA_RX_CTRL_ADDR);
576 
577 		/*
578 		 * the rx descriptor ring should have
579 		 * the addresses set properly;
580 		 * set the lastdscr for the rx ring
581 		 */
582 		writel(((uint32_t)(dma->rx_desc_aligned) +
583 			(RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
584 		       D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
585 	}
586 
587 	return 0;
588 }
589 
590 bool gmac_mii_busywait(unsigned int timeout)
591 {
592 	uint32_t tmp = 0;
593 
594 	while (timeout > 10) {
595 		tmp = readl(GMAC_MII_CTRL_ADDR);
596 		if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
597 			udelay(10);
598 			timeout -= 10;
599 		} else {
600 			break;
601 		}
602 	}
603 	return tmp & (1 << GMAC_MII_BUSY_SHIFT);
604 }
605 
606 int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
607 {
608 	uint32_t tmp = 0;
609 	u16 value = 0;
610 
611 	/* Busy wait timeout is 1ms */
612 	if (gmac_mii_busywait(1000)) {
613 		pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
614 		return -1;
615 	}
616 
617 	/* Read operation */
618 	tmp = GMAC_MII_DATA_READ_CMD;
619 	tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
620 		(reg << GMAC_MII_PHY_REG_SHIFT);
621 	debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
622 	writel(tmp, GMAC_MII_DATA_ADDR);
623 
624 	if (gmac_mii_busywait(1000)) {
625 		pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
626 		return -1;
627 	}
628 
629 	value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
630 	debug("MII read data 0x%x\n", value);
631 	return value;
632 }
633 
634 int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
635 		      u16 value)
636 {
637 	uint32_t tmp = 0;
638 
639 	/* Busy wait timeout is 1ms */
640 	if (gmac_mii_busywait(1000)) {
641 		pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
642 		return -1;
643 	}
644 
645 	/* Write operation */
646 	tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
647 	tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
648 		(reg << GMAC_MII_PHY_REG_SHIFT));
649 	debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
650 	      tmp, phyaddr, reg, value);
651 	writel(tmp, GMAC_MII_DATA_ADDR);
652 
653 	if (gmac_mii_busywait(1000)) {
654 		pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
655 		return -1;
656 	}
657 
658 	return 0;
659 }
660 
661 void gmac_init_reset(void)
662 {
663 	debug("%s enter\n", __func__);
664 
665 	/* set command config reg CC_SR */
666 	reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
667 	udelay(GMAC_RESET_DELAY);
668 }
669 
670 void gmac_clear_reset(void)
671 {
672 	debug("%s enter\n", __func__);
673 
674 	/* clear command config reg CC_SR */
675 	reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
676 	udelay(GMAC_RESET_DELAY);
677 }
678 
679 static void gmac_enable_local(bool en)
680 {
681 	uint32_t cmdcfg;
682 
683 	debug("%s enter\n", __func__);
684 
685 	/* read command config reg */
686 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
687 
688 	/* put mac in reset */
689 	gmac_init_reset();
690 
691 	cmdcfg |= CC_SR;
692 
693 	/* first deassert rx_ena and tx_ena while in reset */
694 	cmdcfg &= ~(CC_RE | CC_TE);
695 	/* write command config reg */
696 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
697 
698 	/* bring mac out of reset */
699 	gmac_clear_reset();
700 
701 	/* if not enable exit now */
702 	if (!en)
703 		return;
704 
705 	/* enable the mac transmit and receive paths now */
706 	udelay(2);
707 	cmdcfg &= ~CC_SR;
708 	cmdcfg |= (CC_RE | CC_TE);
709 
710 	/* assert rx_ena and tx_ena when out of reset to enable the mac */
711 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
712 
713 	return;
714 }
715 
716 int gmac_enable(void)
717 {
718 	gmac_enable_local(1);
719 
720 	/* clear interrupts */
721 	writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
722 	return 0;
723 }
724 
725 int gmac_disable(void)
726 {
727 	gmac_enable_local(0);
728 	return 0;
729 }
730 
731 int gmac_set_speed(int speed, int duplex)
732 {
733 	uint32_t cmdcfg;
734 	uint32_t hd_ena;
735 	uint32_t speed_cfg;
736 
737 	hd_ena = duplex ? 0 : CC_HD;
738 	if (speed == 1000) {
739 		speed_cfg = 2;
740 	} else if (speed == 100) {
741 		speed_cfg = 1;
742 	} else if (speed == 10) {
743 		speed_cfg = 0;
744 	} else {
745 		pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
746 		return -1;
747 	}
748 
749 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
750 	cmdcfg &= ~(CC_ES_MASK | CC_HD);
751 	cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
752 
753 	printf("Change GMAC speed to %dMB\n", speed);
754 	debug("GMAC speed cfg 0x%x\n", cmdcfg);
755 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
756 
757 	return 0;
758 }
759 
760 int gmac_set_mac_addr(unsigned char *mac)
761 {
762 	/* set our local address */
763 	debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
764 	      mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
765 	writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
766 	writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
767 
768 	return 0;
769 }
770 
771 int gmac_mac_init(struct eth_device *dev)
772 {
773 	struct eth_info *eth = (struct eth_info *)(dev->priv);
774 	struct eth_dma *dma = &(eth->dma);
775 
776 	uint32_t tmp;
777 	uint32_t cmdcfg;
778 	int chipid;
779 
780 	debug("%s enter\n", __func__);
781 
782 	/* Always use GMAC0 */
783 	printf("Using GMAC%d\n", 0);
784 
785 	/* Reset AMAC0 core */
786 	writel(0, AMAC0_IDM_RESET_ADDR);
787 	tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
788 	/* Set clock */
789 	tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
790 	tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
791 	/* Set Tx clock */
792 	tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
793 	writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
794 
795 	/* reset gmac */
796 	/*
797 	 * As AMAC is just reset, NO need?
798 	 * set eth_data into loopback mode to ensure no rx traffic
799 	 * gmac_loopback(eth_data, TRUE);
800 	 * ET_TRACE(("%s gmac loopback\n", __func__));
801 	 * udelay(1);
802 	 */
803 
804 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
805 	cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
806 		    CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
807 		    CC_PAD_EN | CC_PF);
808 	cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
809 	/* put mac in reset */
810 	gmac_init_reset();
811 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
812 	gmac_clear_reset();
813 
814 	/* enable clear MIB on read */
815 	reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
816 	/* PHY: set smi_master to drive mdc_clk */
817 	reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
818 
819 	/* clear persistent sw intstatus */
820 	writel(0, GMAC0_INT_STATUS_ADDR);
821 
822 	if (dma_init(dma) < 0) {
823 		pr_err("%s: GMAC dma_init failed\n", __func__);
824 		goto err_exit;
825 	}
826 
827 	chipid = CHIPID;
828 	printf("%s: Chip ID: 0x%x\n", __func__, chipid);
829 
830 	/* set switch bypass mode */
831 	tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
832 	tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
833 
834 	/* Switch mode */
835 	/* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
836 
837 	writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
838 
839 	tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
840 	tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
841 	writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
842 
843 	/* Set MDIO to internal GPHY */
844 	tmp = readl(GMAC_MII_CTRL_ADDR);
845 	/* Select internal MDC/MDIO bus*/
846 	tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
847 	/* select MDC/MDIO connecting to on-chip internal PHYs */
848 	tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
849 	/*
850 	 * give bit[6:0](MDCDIV) with required divisor to set
851 	 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
852 	 */
853 	tmp |= 0x1A;
854 
855 	writel(tmp, GMAC_MII_CTRL_ADDR);
856 
857 	if (gmac_mii_busywait(1000)) {
858 		pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
859 		goto err_exit;
860 	}
861 
862 	/* Configure GMAC0 */
863 	/* enable one rx interrupt per received frame */
864 	writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
865 
866 	/* read command config reg */
867 	cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
868 	/* enable 802.3x tx flow control (honor received PAUSE frames) */
869 	cmdcfg &= ~CC_RPI;
870 	/* enable promiscuous mode */
871 	cmdcfg |= CC_PROM;
872 	/* Disable loopback mode */
873 	cmdcfg &= ~CC_ML;
874 	/* set the speed */
875 	cmdcfg &= ~(CC_ES_MASK | CC_HD);
876 	/* Set to 1Gbps and full duplex by default */
877 	cmdcfg |= (2 << CC_ES_SHIFT);
878 
879 	/* put mac in reset */
880 	gmac_init_reset();
881 	/* write register */
882 	writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
883 	/* bring mac out of reset */
884 	gmac_clear_reset();
885 
886 	/* set max frame lengths; account for possible vlan tag */
887 	writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
888 
889 	return 0;
890 
891 err_exit:
892 	dma_deinit(dma);
893 	return -1;
894 }
895 
896 int gmac_add(struct eth_device *dev)
897 {
898 	struct eth_info *eth = (struct eth_info *)(dev->priv);
899 	struct eth_dma *dma = &(eth->dma);
900 	void *tmp;
901 
902 	/*
903 	 * Desc has to be 16-byte aligned. But for dcache flush it must be
904 	 * aligned to ARCH_DMA_MINALIGN.
905 	 */
906 	tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
907 	if (tmp == NULL) {
908 		printf("%s: Failed to allocate TX desc Buffer\n", __func__);
909 		return -1;
910 	}
911 
912 	dma->tx_desc_aligned = (void *)tmp;
913 	debug("TX Descriptor Buffer: %p; length: 0x%x\n",
914 	      dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
915 
916 	tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
917 	if (tmp == NULL) {
918 		printf("%s: Failed to allocate TX Data Buffer\n", __func__);
919 		free(dma->tx_desc_aligned);
920 		return -1;
921 	}
922 	dma->tx_buf = (uint8_t *)tmp;
923 	debug("TX Data Buffer: %p; length: 0x%x\n",
924 	      dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
925 
926 	/* Desc has to be 16-byte aligned */
927 	tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
928 	if (tmp == NULL) {
929 		printf("%s: Failed to allocate RX Descriptor\n", __func__);
930 		free(dma->tx_desc_aligned);
931 		free(dma->tx_buf);
932 		return -1;
933 	}
934 	dma->rx_desc_aligned = (void *)tmp;
935 	debug("RX Descriptor Buffer: %p, length: 0x%x\n",
936 	      dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
937 
938 	tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
939 	if (tmp == NULL) {
940 		printf("%s: Failed to allocate RX Data Buffer\n", __func__);
941 		free(dma->tx_desc_aligned);
942 		free(dma->tx_buf);
943 		free(dma->rx_desc_aligned);
944 		return -1;
945 	}
946 	dma->rx_buf = (uint8_t *)tmp;
947 	debug("RX Data Buffer: %p; length: 0x%x\n",
948 	      dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
949 
950 	g_dmactrlflags = 0;
951 
952 	eth->phy_interface = PHY_INTERFACE_MODE_GMII;
953 
954 	dma->tx_packet = gmac_tx_packet;
955 	dma->check_tx_done = gmac_check_tx_done;
956 
957 	dma->check_rx_done = gmac_check_rx_done;
958 
959 	dma->enable_dma = gmac_enable_dma;
960 	dma->disable_dma = gmac_disable_dma;
961 
962 	eth->miiphy_read = gmac_miiphy_read;
963 	eth->miiphy_write = gmac_miiphy_write;
964 
965 	eth->mac_init = gmac_mac_init;
966 	eth->disable_mac = gmac_disable;
967 	eth->enable_mac = gmac_enable;
968 	eth->set_mac_addr = gmac_set_mac_addr;
969 	eth->set_mac_speed = gmac_set_speed;
970 
971 	return 0;
972 }
973