xref: /openbmc/u-boot/drivers/net/designware.c (revision 2bb1cd53)
1 /*
2  * (C) Copyright 2010
3  * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  */
7 
8 /*
9  * Designware ethernet IP driver for u-boot
10  */
11 
12 #include <common.h>
13 #include <miiphy.h>
14 #include <malloc.h>
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <asm/io.h>
18 #include "designware.h"
19 
20 #if !defined(CONFIG_PHYLIB)
21 # error "DesignWare Ether MAC requires PHYLIB - missing CONFIG_PHYLIB"
22 #endif
23 
24 static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
25 {
26 	struct eth_mac_regs *mac_p = bus->priv;
27 	ulong start;
28 	u16 miiaddr;
29 	int timeout = CONFIG_MDIO_TIMEOUT;
30 
31 	miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
32 		  ((reg << MIIREGSHIFT) & MII_REGMSK);
33 
34 	writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
35 
36 	start = get_timer(0);
37 	while (get_timer(start) < timeout) {
38 		if (!(readl(&mac_p->miiaddr) & MII_BUSY))
39 			return readl(&mac_p->miidata);
40 		udelay(10);
41 	};
42 
43 	return -1;
44 }
45 
46 static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
47 			u16 val)
48 {
49 	struct eth_mac_regs *mac_p = bus->priv;
50 	ulong start;
51 	u16 miiaddr;
52 	int ret = -1, timeout = CONFIG_MDIO_TIMEOUT;
53 
54 	writel(val, &mac_p->miidata);
55 	miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
56 		  ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
57 
58 	writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
59 
60 	start = get_timer(0);
61 	while (get_timer(start) < timeout) {
62 		if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
63 			ret = 0;
64 			break;
65 		}
66 		udelay(10);
67 	};
68 
69 	return ret;
70 }
71 
72 static int dw_mdio_init(char *name, struct eth_mac_regs *mac_regs_p)
73 {
74 	struct mii_dev *bus = mdio_alloc();
75 
76 	if (!bus) {
77 		printf("Failed to allocate MDIO bus\n");
78 		return -1;
79 	}
80 
81 	bus->read = dw_mdio_read;
82 	bus->write = dw_mdio_write;
83 	sprintf(bus->name, name);
84 
85 	bus->priv = (void *)mac_regs_p;
86 
87 	return mdio_register(bus);
88 }
89 
90 static void tx_descs_init(struct eth_device *dev)
91 {
92 	struct dw_eth_dev *priv = dev->priv;
93 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
94 	struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
95 	char *txbuffs = &priv->txbuffs[0];
96 	struct dmamacdescr *desc_p;
97 	u32 idx;
98 
99 	for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
100 		desc_p = &desc_table_p[idx];
101 		desc_p->dmamac_addr = &txbuffs[idx * CONFIG_ETH_BUFSIZE];
102 		desc_p->dmamac_next = &desc_table_p[idx + 1];
103 
104 #if defined(CONFIG_DW_ALTDESCRIPTOR)
105 		desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
106 				DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS | \
107 				DESC_TXSTS_TXCHECKINSCTRL | \
108 				DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
109 
110 		desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
111 		desc_p->dmamac_cntl = 0;
112 		desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
113 #else
114 		desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
115 		desc_p->txrx_status = 0;
116 #endif
117 	}
118 
119 	/* Correcting the last pointer of the chain */
120 	desc_p->dmamac_next = &desc_table_p[0];
121 
122 	/* Flush all Tx buffer descriptors at once */
123 	flush_dcache_range((unsigned int)priv->tx_mac_descrtable,
124 			   (unsigned int)priv->tx_mac_descrtable +
125 			   sizeof(priv->tx_mac_descrtable));
126 
127 	writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
128 	priv->tx_currdescnum = 0;
129 }
130 
131 static void rx_descs_init(struct eth_device *dev)
132 {
133 	struct dw_eth_dev *priv = dev->priv;
134 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
135 	struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
136 	char *rxbuffs = &priv->rxbuffs[0];
137 	struct dmamacdescr *desc_p;
138 	u32 idx;
139 
140 	/* Before passing buffers to GMAC we need to make sure zeros
141 	 * written there right after "priv" structure allocation were
142 	 * flushed into RAM.
143 	 * Otherwise there's a chance to get some of them flushed in RAM when
144 	 * GMAC is already pushing data to RAM via DMA. This way incoming from
145 	 * GMAC data will be corrupted. */
146 	flush_dcache_range((unsigned int)rxbuffs, (unsigned int)rxbuffs +
147 			   RX_TOTAL_BUFSIZE);
148 
149 	for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
150 		desc_p = &desc_table_p[idx];
151 		desc_p->dmamac_addr = &rxbuffs[idx * CONFIG_ETH_BUFSIZE];
152 		desc_p->dmamac_next = &desc_table_p[idx + 1];
153 
154 		desc_p->dmamac_cntl =
155 			(MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) | \
156 				      DESC_RXCTRL_RXCHAIN;
157 
158 		desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
159 	}
160 
161 	/* Correcting the last pointer of the chain */
162 	desc_p->dmamac_next = &desc_table_p[0];
163 
164 	/* Flush all Rx buffer descriptors at once */
165 	flush_dcache_range((unsigned int)priv->rx_mac_descrtable,
166 			   (unsigned int)priv->rx_mac_descrtable +
167 			   sizeof(priv->rx_mac_descrtable));
168 
169 	writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
170 	priv->rx_currdescnum = 0;
171 }
172 
173 static int dw_write_hwaddr(struct eth_device *dev)
174 {
175 	struct dw_eth_dev *priv = dev->priv;
176 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
177 	u32 macid_lo, macid_hi;
178 	u8 *mac_id = &dev->enetaddr[0];
179 
180 	macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
181 		   (mac_id[3] << 24);
182 	macid_hi = mac_id[4] + (mac_id[5] << 8);
183 
184 	writel(macid_hi, &mac_p->macaddr0hi);
185 	writel(macid_lo, &mac_p->macaddr0lo);
186 
187 	return 0;
188 }
189 
190 static void dw_adjust_link(struct eth_mac_regs *mac_p,
191 			   struct phy_device *phydev)
192 {
193 	u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
194 
195 	if (!phydev->link) {
196 		printf("%s: No link.\n", phydev->dev->name);
197 		return;
198 	}
199 
200 	if (phydev->speed != 1000)
201 		conf |= MII_PORTSELECT;
202 
203 	if (phydev->speed == 100)
204 		conf |= FES_100;
205 
206 	if (phydev->duplex)
207 		conf |= FULLDPLXMODE;
208 
209 	writel(conf, &mac_p->conf);
210 
211 	printf("Speed: %d, %s duplex%s\n", phydev->speed,
212 	       (phydev->duplex) ? "full" : "half",
213 	       (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
214 }
215 
216 static void dw_eth_halt(struct eth_device *dev)
217 {
218 	struct dw_eth_dev *priv = dev->priv;
219 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
220 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
221 
222 	writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
223 	writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
224 
225 	phy_shutdown(priv->phydev);
226 }
227 
228 static int dw_eth_init(struct eth_device *dev, bd_t *bis)
229 {
230 	struct dw_eth_dev *priv = dev->priv;
231 	struct eth_mac_regs *mac_p = priv->mac_regs_p;
232 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
233 	unsigned int start;
234 
235 	writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
236 
237 	start = get_timer(0);
238 	while (readl(&dma_p->busmode) & DMAMAC_SRST) {
239 		if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
240 			printf("DMA reset timeout\n");
241 			return -1;
242 		}
243 
244 		mdelay(100);
245 	};
246 
247 	/* Soft reset above clears HW address registers.
248 	 * So we have to set it here once again */
249 	dw_write_hwaddr(dev);
250 
251 	rx_descs_init(dev);
252 	tx_descs_init(dev);
253 
254 	writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
255 
256 #ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
257 	writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
258 	       &dma_p->opmode);
259 #else
260 	writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
261 	       &dma_p->opmode);
262 #endif
263 
264 	writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
265 
266 #ifdef CONFIG_DW_AXI_BURST_LEN
267 	writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
268 #endif
269 
270 	/* Start up the PHY */
271 	if (phy_startup(priv->phydev)) {
272 		printf("Could not initialize PHY %s\n",
273 		       priv->phydev->dev->name);
274 		return -1;
275 	}
276 
277 	dw_adjust_link(mac_p, priv->phydev);
278 
279 	if (!priv->phydev->link)
280 		return -1;
281 
282 	writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
283 
284 	return 0;
285 }
286 
287 static int dw_eth_send(struct eth_device *dev, void *packet, int length)
288 {
289 	struct dw_eth_dev *priv = dev->priv;
290 	struct eth_dma_regs *dma_p = priv->dma_regs_p;
291 	u32 desc_num = priv->tx_currdescnum;
292 	struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
293 	uint32_t desc_start = (uint32_t)desc_p;
294 	uint32_t desc_end = desc_start +
295 		roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
296 	uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
297 	uint32_t data_end = data_start +
298 		roundup(length, ARCH_DMA_MINALIGN);
299 	/*
300 	 * Strictly we only need to invalidate the "txrx_status" field
301 	 * for the following check, but on some platforms we cannot
302 	 * invalidate only 4 bytes, so we flush the entire descriptor,
303 	 * which is 16 bytes in total. This is safe because the
304 	 * individual descriptors in the array are each aligned to
305 	 * ARCH_DMA_MINALIGN and padded appropriately.
306 	 */
307 	invalidate_dcache_range(desc_start, desc_end);
308 
309 	/* Check if the descriptor is owned by CPU */
310 	if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
311 		printf("CPU not owner of tx frame\n");
312 		return -1;
313 	}
314 
315 	memcpy(desc_p->dmamac_addr, packet, length);
316 
317 	/* Flush data to be sent */
318 	flush_dcache_range(data_start, data_end);
319 
320 #if defined(CONFIG_DW_ALTDESCRIPTOR)
321 	desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
322 	desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) & \
323 			       DESC_TXCTRL_SIZE1MASK;
324 
325 	desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
326 	desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
327 #else
328 	desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) & \
329 			       DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST | \
330 			       DESC_TXCTRL_TXFIRST;
331 
332 	desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
333 #endif
334 
335 	/* Flush modified buffer descriptor */
336 	flush_dcache_range(desc_start, desc_end);
337 
338 	/* Test the wrap-around condition. */
339 	if (++desc_num >= CONFIG_TX_DESCR_NUM)
340 		desc_num = 0;
341 
342 	priv->tx_currdescnum = desc_num;
343 
344 	/* Start the transmission */
345 	writel(POLL_DATA, &dma_p->txpolldemand);
346 
347 	return 0;
348 }
349 
350 static int dw_eth_recv(struct eth_device *dev)
351 {
352 	struct dw_eth_dev *priv = dev->priv;
353 	u32 status, desc_num = priv->rx_currdescnum;
354 	struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
355 	int length = 0;
356 	uint32_t desc_start = (uint32_t)desc_p;
357 	uint32_t desc_end = desc_start +
358 		roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
359 	uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
360 	uint32_t data_end;
361 
362 	/* Invalidate entire buffer descriptor */
363 	invalidate_dcache_range(desc_start, desc_end);
364 
365 	status = desc_p->txrx_status;
366 
367 	/* Check  if the owner is the CPU */
368 	if (!(status & DESC_RXSTS_OWNBYDMA)) {
369 
370 		length = (status & DESC_RXSTS_FRMLENMSK) >> \
371 			 DESC_RXSTS_FRMLENSHFT;
372 
373 		/* Invalidate received data */
374 		data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
375 		invalidate_dcache_range(data_start, data_end);
376 
377 		NetReceive(desc_p->dmamac_addr, length);
378 
379 		/*
380 		 * Make the current descriptor valid again and go to
381 		 * the next one
382 		 */
383 		desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
384 
385 		/* Flush only status field - others weren't changed */
386 		flush_dcache_range(desc_start, desc_end);
387 
388 		/* Test the wrap-around condition. */
389 		if (++desc_num >= CONFIG_RX_DESCR_NUM)
390 			desc_num = 0;
391 	}
392 
393 	priv->rx_currdescnum = desc_num;
394 
395 	return length;
396 }
397 
398 static int dw_phy_init(struct eth_device *dev)
399 {
400 	struct dw_eth_dev *priv = dev->priv;
401 	struct phy_device *phydev;
402 	int mask = 0xffffffff;
403 
404 #ifdef CONFIG_PHY_ADDR
405 	mask = 1 << CONFIG_PHY_ADDR;
406 #endif
407 
408 	phydev = phy_find_by_mask(priv->bus, mask, priv->interface);
409 	if (!phydev)
410 		return -1;
411 
412 	phy_connect_dev(phydev, dev);
413 
414 	phydev->supported &= PHY_GBIT_FEATURES;
415 	phydev->advertising = phydev->supported;
416 
417 	priv->phydev = phydev;
418 	phy_config(phydev);
419 
420 	return 1;
421 }
422 
423 int designware_initialize(ulong base_addr, u32 interface)
424 {
425 	struct eth_device *dev;
426 	struct dw_eth_dev *priv;
427 
428 	dev = (struct eth_device *) malloc(sizeof(struct eth_device));
429 	if (!dev)
430 		return -ENOMEM;
431 
432 	/*
433 	 * Since the priv structure contains the descriptors which need a strict
434 	 * buswidth alignment, memalign is used to allocate memory
435 	 */
436 	priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
437 					      sizeof(struct dw_eth_dev));
438 	if (!priv) {
439 		free(dev);
440 		return -ENOMEM;
441 	}
442 
443 	memset(dev, 0, sizeof(struct eth_device));
444 	memset(priv, 0, sizeof(struct dw_eth_dev));
445 
446 	sprintf(dev->name, "dwmac.%lx", base_addr);
447 	dev->iobase = (int)base_addr;
448 	dev->priv = priv;
449 
450 	priv->dev = dev;
451 	priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
452 	priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
453 			DW_DMA_BASE_OFFSET);
454 
455 	dev->init = dw_eth_init;
456 	dev->send = dw_eth_send;
457 	dev->recv = dw_eth_recv;
458 	dev->halt = dw_eth_halt;
459 	dev->write_hwaddr = dw_write_hwaddr;
460 
461 	eth_register(dev);
462 
463 	priv->interface = interface;
464 
465 	dw_mdio_init(dev->name, priv->mac_regs_p);
466 	priv->bus = miiphy_get_dev_by_name(dev->name);
467 
468 	return dw_phy_init(dev);
469 }
470