1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * drivers/net/ravb.c
4 * This file is driver for Renesas Ethernet AVB.
5 *
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
7 *
8 * Based on the SuperH Ethernet driver.
9 */
10
11 #include <common.h>
12 #include <clk.h>
13 #include <dm.h>
14 #include <errno.h>
15 #include <miiphy.h>
16 #include <malloc.h>
17 #include <linux/mii.h>
18 #include <wait_bit.h>
19 #include <asm/io.h>
20 #include <asm/gpio.h>
21
22 /* Registers */
23 #define RAVB_REG_CCC 0x000
24 #define RAVB_REG_DBAT 0x004
25 #define RAVB_REG_CSR 0x00C
26 #define RAVB_REG_APSR 0x08C
27 #define RAVB_REG_RCR 0x090
28 #define RAVB_REG_TGC 0x300
29 #define RAVB_REG_TCCR 0x304
30 #define RAVB_REG_RIC0 0x360
31 #define RAVB_REG_RIC1 0x368
32 #define RAVB_REG_RIC2 0x370
33 #define RAVB_REG_TIC 0x378
34 #define RAVB_REG_ECMR 0x500
35 #define RAVB_REG_RFLR 0x508
36 #define RAVB_REG_ECSIPR 0x518
37 #define RAVB_REG_PIR 0x520
38 #define RAVB_REG_GECMR 0x5b0
39 #define RAVB_REG_MAHR 0x5c0
40 #define RAVB_REG_MALR 0x5c8
41
42 #define CCC_OPC_CONFIG BIT(0)
43 #define CCC_OPC_OPERATION BIT(1)
44 #define CCC_BOC BIT(20)
45
46 #define CSR_OPS 0x0000000F
47 #define CSR_OPS_CONFIG BIT(1)
48
49 #define TCCR_TSRQ0 BIT(0)
50
51 #define RFLR_RFL_MIN 0x05EE
52
53 #define PIR_MDI BIT(3)
54 #define PIR_MDO BIT(2)
55 #define PIR_MMD BIT(1)
56 #define PIR_MDC BIT(0)
57
58 #define ECMR_TRCCM BIT(26)
59 #define ECMR_RZPF BIT(20)
60 #define ECMR_PFR BIT(18)
61 #define ECMR_RXF BIT(17)
62 #define ECMR_RE BIT(6)
63 #define ECMR_TE BIT(5)
64 #define ECMR_DM BIT(1)
65 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
66
67 /* DMA Descriptors */
68 #define RAVB_NUM_BASE_DESC 16
69 #define RAVB_NUM_TX_DESC 8
70 #define RAVB_NUM_RX_DESC 8
71
72 #define RAVB_TX_QUEUE_OFFSET 0
73 #define RAVB_RX_QUEUE_OFFSET 4
74
75 #define RAVB_DESC_DT(n) ((n) << 28)
76 #define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
77 #define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
78 #define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
79 #define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
80 #define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
81 #define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
82
83 #define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
84 #define RAVB_DESC_DS_MASK 0xfff
85
86 #define RAVB_RX_DESC_MSC_MC BIT(23)
87 #define RAVB_RX_DESC_MSC_CEEF BIT(22)
88 #define RAVB_RX_DESC_MSC_CRL BIT(21)
89 #define RAVB_RX_DESC_MSC_FRE BIT(20)
90 #define RAVB_RX_DESC_MSC_RTLF BIT(19)
91 #define RAVB_RX_DESC_MSC_RTSF BIT(18)
92 #define RAVB_RX_DESC_MSC_RFE BIT(17)
93 #define RAVB_RX_DESC_MSC_CRC BIT(16)
94 #define RAVB_RX_DESC_MSC_MASK (0xff << 16)
95
96 #define RAVB_RX_DESC_MSC_RX_ERR_MASK \
97 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
98 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
99
100 #define RAVB_TX_TIMEOUT_MS 1000
101
102 struct ravb_desc {
103 u32 ctrl;
104 u32 dptr;
105 };
106
107 struct ravb_rxdesc {
108 struct ravb_desc data;
109 struct ravb_desc link;
110 u8 __pad[48];
111 u8 packet[PKTSIZE_ALIGN];
112 };
113
114 struct ravb_priv {
115 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
116 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
117 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
118 u32 rx_desc_idx;
119 u32 tx_desc_idx;
120
121 struct phy_device *phydev;
122 struct mii_dev *bus;
123 void __iomem *iobase;
124 struct clk clk;
125 struct gpio_desc reset_gpio;
126 };
127
ravb_flush_dcache(u32 addr,u32 len)128 static inline void ravb_flush_dcache(u32 addr, u32 len)
129 {
130 flush_dcache_range(addr, addr + len);
131 }
132
ravb_invalidate_dcache(u32 addr,u32 len)133 static inline void ravb_invalidate_dcache(u32 addr, u32 len)
134 {
135 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
136 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
137 invalidate_dcache_range(start, end);
138 }
139
ravb_send(struct udevice * dev,void * packet,int len)140 static int ravb_send(struct udevice *dev, void *packet, int len)
141 {
142 struct ravb_priv *eth = dev_get_priv(dev);
143 struct ravb_desc *desc = ð->tx_desc[eth->tx_desc_idx];
144 unsigned int start;
145
146 /* Update TX descriptor */
147 ravb_flush_dcache((uintptr_t)packet, len);
148 memset(desc, 0x0, sizeof(*desc));
149 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
150 desc->dptr = (uintptr_t)packet;
151 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
152
153 /* Restart the transmitter if disabled */
154 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
155 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
156
157 /* Wait until packet is transmitted */
158 start = get_timer(0);
159 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
160 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
161 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
162 break;
163 udelay(10);
164 };
165
166 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
167 return -ETIMEDOUT;
168
169 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
170 return 0;
171 }
172
ravb_recv(struct udevice * dev,int flags,uchar ** packetp)173 static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
174 {
175 struct ravb_priv *eth = dev_get_priv(dev);
176 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
177 int len;
178 u8 *packet;
179
180 /* Check if the rx descriptor is ready */
181 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
182 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
183 return -EAGAIN;
184
185 /* Check for errors */
186 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
187 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
188 return -EAGAIN;
189 }
190
191 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
192 packet = (u8 *)(uintptr_t)desc->data.dptr;
193 ravb_invalidate_dcache((uintptr_t)packet, len);
194
195 *packetp = packet;
196 return len;
197 }
198
ravb_free_pkt(struct udevice * dev,uchar * packet,int length)199 static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
200 {
201 struct ravb_priv *eth = dev_get_priv(dev);
202 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
203
204 /* Make current descriptor available again */
205 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
206 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
207
208 /* Point to the next descriptor */
209 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
210 desc = ð->rx_desc[eth->rx_desc_idx];
211 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
212
213 return 0;
214 }
215
ravb_reset(struct udevice * dev)216 static int ravb_reset(struct udevice *dev)
217 {
218 struct ravb_priv *eth = dev_get_priv(dev);
219
220 /* Set config mode */
221 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
222
223 /* Check the operating mode is changed to the config mode. */
224 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
225 CSR_OPS_CONFIG, true, 100, true);
226 }
227
ravb_base_desc_init(struct ravb_priv * eth)228 static void ravb_base_desc_init(struct ravb_priv *eth)
229 {
230 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
231 int i;
232
233 /* Initialize all descriptors */
234 memset(eth->base_desc, 0x0, desc_size);
235
236 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
237 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
238
239 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
240
241 /* Register the descriptor base address table */
242 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
243 }
244
ravb_tx_desc_init(struct ravb_priv * eth)245 static void ravb_tx_desc_init(struct ravb_priv *eth)
246 {
247 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
248 int i;
249
250 /* Initialize all descriptors */
251 memset(eth->tx_desc, 0x0, desc_size);
252 eth->tx_desc_idx = 0;
253
254 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
255 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
256
257 /* Mark the end of the descriptors */
258 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
259 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
260 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
261
262 /* Point the controller to the TX descriptor list. */
263 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
264 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
265 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_TX_QUEUE_OFFSET],
266 sizeof(struct ravb_desc));
267 }
268
ravb_rx_desc_init(struct ravb_priv * eth)269 static void ravb_rx_desc_init(struct ravb_priv *eth)
270 {
271 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
272 int i;
273
274 /* Initialize all descriptors */
275 memset(eth->rx_desc, 0x0, desc_size);
276 eth->rx_desc_idx = 0;
277
278 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
279 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
280 RAVB_DESC_DS(PKTSIZE_ALIGN);
281 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
282
283 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
284 eth->rx_desc[i].link.dptr = (uintptr_t)ð->rx_desc[i + 1];
285 }
286
287 /* Mark the end of the descriptors */
288 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
289 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
290 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
291
292 /* Point the controller to the rx descriptor list */
293 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
294 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
295 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_RX_QUEUE_OFFSET],
296 sizeof(struct ravb_desc));
297 }
298
ravb_phy_config(struct udevice * dev)299 static int ravb_phy_config(struct udevice *dev)
300 {
301 struct ravb_priv *eth = dev_get_priv(dev);
302 struct eth_pdata *pdata = dev_get_platdata(dev);
303 struct phy_device *phydev;
304 int mask = 0xffffffff, reg;
305
306 if (dm_gpio_is_valid(ð->reset_gpio)) {
307 dm_gpio_set_value(ð->reset_gpio, 1);
308 mdelay(20);
309 dm_gpio_set_value(ð->reset_gpio, 0);
310 mdelay(1);
311 }
312
313 phydev = phy_find_by_mask(eth->bus, mask, pdata->phy_interface);
314 if (!phydev)
315 return -ENODEV;
316
317 phy_connect_dev(phydev, dev);
318
319 eth->phydev = phydev;
320
321 phydev->supported &= SUPPORTED_100baseT_Full |
322 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
323 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
324 SUPPORTED_Asym_Pause;
325
326 if (pdata->max_speed != 1000) {
327 phydev->supported &= ~SUPPORTED_1000baseT_Full;
328 reg = phy_read(phydev, -1, MII_CTRL1000);
329 reg &= ~(BIT(9) | BIT(8));
330 phy_write(phydev, -1, MII_CTRL1000, reg);
331 }
332
333 phy_config(phydev);
334
335 return 0;
336 }
337
338 /* Set Mac address */
ravb_write_hwaddr(struct udevice * dev)339 static int ravb_write_hwaddr(struct udevice *dev)
340 {
341 struct ravb_priv *eth = dev_get_priv(dev);
342 struct eth_pdata *pdata = dev_get_platdata(dev);
343 unsigned char *mac = pdata->enetaddr;
344
345 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
346 eth->iobase + RAVB_REG_MAHR);
347
348 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
349
350 return 0;
351 }
352
353 /* E-MAC init function */
ravb_mac_init(struct ravb_priv * eth)354 static int ravb_mac_init(struct ravb_priv *eth)
355 {
356 /* Disable MAC Interrupt */
357 writel(0, eth->iobase + RAVB_REG_ECSIPR);
358
359 /* Recv frame limit set register */
360 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
361
362 return 0;
363 }
364
365 /* AVB-DMAC init function */
ravb_dmac_init(struct udevice * dev)366 static int ravb_dmac_init(struct udevice *dev)
367 {
368 struct ravb_priv *eth = dev_get_priv(dev);
369 struct eth_pdata *pdata = dev_get_platdata(dev);
370 int ret = 0;
371
372 /* Set CONFIG mode */
373 ret = ravb_reset(dev);
374 if (ret)
375 return ret;
376
377 /* Disable all interrupts */
378 writel(0, eth->iobase + RAVB_REG_RIC0);
379 writel(0, eth->iobase + RAVB_REG_RIC1);
380 writel(0, eth->iobase + RAVB_REG_RIC2);
381 writel(0, eth->iobase + RAVB_REG_TIC);
382
383 /* Set little endian */
384 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
385
386 /* AVB rx set */
387 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
388
389 /* FIFO size set */
390 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
391
392 /* Delay CLK: 2ns */
393 if (pdata->max_speed == 1000)
394 writel(BIT(14), eth->iobase + RAVB_REG_APSR);
395
396 return 0;
397 }
398
ravb_config(struct udevice * dev)399 static int ravb_config(struct udevice *dev)
400 {
401 struct ravb_priv *eth = dev_get_priv(dev);
402 struct phy_device *phy = eth->phydev;
403 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
404 int ret;
405
406 /* Configure AVB-DMAC register */
407 ravb_dmac_init(dev);
408
409 /* Configure E-MAC registers */
410 ravb_mac_init(eth);
411 ravb_write_hwaddr(dev);
412
413 ret = phy_startup(phy);
414 if (ret)
415 return ret;
416
417 /* Set the transfer speed */
418 if (phy->speed == 100)
419 writel(0, eth->iobase + RAVB_REG_GECMR);
420 else if (phy->speed == 1000)
421 writel(1, eth->iobase + RAVB_REG_GECMR);
422
423 /* Check if full duplex mode is supported by the phy */
424 if (phy->duplex)
425 mask |= ECMR_DM;
426
427 writel(mask, eth->iobase + RAVB_REG_ECMR);
428
429 phy->drv->writeext(phy, -1, 0x02, 0x08, (0x0f << 5) | 0x19);
430
431 return 0;
432 }
433
ravb_start(struct udevice * dev)434 static int ravb_start(struct udevice *dev)
435 {
436 struct ravb_priv *eth = dev_get_priv(dev);
437 int ret;
438
439 ret = ravb_reset(dev);
440 if (ret)
441 return ret;
442
443 ravb_base_desc_init(eth);
444 ravb_tx_desc_init(eth);
445 ravb_rx_desc_init(eth);
446
447 ret = ravb_config(dev);
448 if (ret)
449 return ret;
450
451 /* Setting the control will start the AVB-DMAC process. */
452 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
453
454 return 0;
455 }
456
ravb_stop(struct udevice * dev)457 static void ravb_stop(struct udevice *dev)
458 {
459 struct ravb_priv *eth = dev_get_priv(dev);
460
461 phy_shutdown(eth->phydev);
462 ravb_reset(dev);
463 }
464
ravb_probe(struct udevice * dev)465 static int ravb_probe(struct udevice *dev)
466 {
467 struct eth_pdata *pdata = dev_get_platdata(dev);
468 struct ravb_priv *eth = dev_get_priv(dev);
469 struct ofnode_phandle_args phandle_args;
470 struct mii_dev *mdiodev;
471 void __iomem *iobase;
472 int ret;
473
474 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
475 eth->iobase = iobase;
476
477 ret = clk_get_by_index(dev, 0, ð->clk);
478 if (ret < 0)
479 goto err_mdio_alloc;
480
481 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, &phandle_args);
482 if (!ret) {
483 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
484 ð->reset_gpio, GPIOD_IS_OUT);
485 }
486
487 if (!dm_gpio_is_valid(ð->reset_gpio)) {
488 gpio_request_by_name(dev, "reset-gpios", 0, ð->reset_gpio,
489 GPIOD_IS_OUT);
490 }
491
492 mdiodev = mdio_alloc();
493 if (!mdiodev) {
494 ret = -ENOMEM;
495 goto err_mdio_alloc;
496 }
497
498 mdiodev->read = bb_miiphy_read;
499 mdiodev->write = bb_miiphy_write;
500 bb_miiphy_buses[0].priv = eth;
501 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
502
503 ret = mdio_register(mdiodev);
504 if (ret < 0)
505 goto err_mdio_register;
506
507 eth->bus = miiphy_get_dev_by_name(dev->name);
508
509 /* Bring up PHY */
510 ret = clk_enable(ð->clk);
511 if (ret)
512 goto err_mdio_register;
513
514 ret = ravb_reset(dev);
515 if (ret)
516 goto err_mdio_reset;
517
518 ret = ravb_phy_config(dev);
519 if (ret)
520 goto err_mdio_reset;
521
522 return 0;
523
524 err_mdio_reset:
525 clk_disable(ð->clk);
526 err_mdio_register:
527 mdio_free(mdiodev);
528 err_mdio_alloc:
529 unmap_physmem(eth->iobase, MAP_NOCACHE);
530 return ret;
531 }
532
ravb_remove(struct udevice * dev)533 static int ravb_remove(struct udevice *dev)
534 {
535 struct ravb_priv *eth = dev_get_priv(dev);
536
537 clk_disable(ð->clk);
538
539 free(eth->phydev);
540 mdio_unregister(eth->bus);
541 mdio_free(eth->bus);
542 if (dm_gpio_is_valid(ð->reset_gpio))
543 dm_gpio_free(dev, ð->reset_gpio);
544 unmap_physmem(eth->iobase, MAP_NOCACHE);
545
546 return 0;
547 }
548
ravb_bb_init(struct bb_miiphy_bus * bus)549 int ravb_bb_init(struct bb_miiphy_bus *bus)
550 {
551 return 0;
552 }
553
ravb_bb_mdio_active(struct bb_miiphy_bus * bus)554 int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
555 {
556 struct ravb_priv *eth = bus->priv;
557
558 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
559
560 return 0;
561 }
562
ravb_bb_mdio_tristate(struct bb_miiphy_bus * bus)563 int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
564 {
565 struct ravb_priv *eth = bus->priv;
566
567 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
568
569 return 0;
570 }
571
ravb_bb_set_mdio(struct bb_miiphy_bus * bus,int v)572 int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
573 {
574 struct ravb_priv *eth = bus->priv;
575
576 if (v)
577 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
578 else
579 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
580
581 return 0;
582 }
583
ravb_bb_get_mdio(struct bb_miiphy_bus * bus,int * v)584 int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
585 {
586 struct ravb_priv *eth = bus->priv;
587
588 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
589
590 return 0;
591 }
592
ravb_bb_set_mdc(struct bb_miiphy_bus * bus,int v)593 int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
594 {
595 struct ravb_priv *eth = bus->priv;
596
597 if (v)
598 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
599 else
600 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
601
602 return 0;
603 }
604
ravb_bb_delay(struct bb_miiphy_bus * bus)605 int ravb_bb_delay(struct bb_miiphy_bus *bus)
606 {
607 udelay(10);
608
609 return 0;
610 }
611
612 struct bb_miiphy_bus bb_miiphy_buses[] = {
613 {
614 .name = "ravb",
615 .init = ravb_bb_init,
616 .mdio_active = ravb_bb_mdio_active,
617 .mdio_tristate = ravb_bb_mdio_tristate,
618 .set_mdio = ravb_bb_set_mdio,
619 .get_mdio = ravb_bb_get_mdio,
620 .set_mdc = ravb_bb_set_mdc,
621 .delay = ravb_bb_delay,
622 },
623 };
624 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
625
626 static const struct eth_ops ravb_ops = {
627 .start = ravb_start,
628 .send = ravb_send,
629 .recv = ravb_recv,
630 .free_pkt = ravb_free_pkt,
631 .stop = ravb_stop,
632 .write_hwaddr = ravb_write_hwaddr,
633 };
634
ravb_ofdata_to_platdata(struct udevice * dev)635 int ravb_ofdata_to_platdata(struct udevice *dev)
636 {
637 struct eth_pdata *pdata = dev_get_platdata(dev);
638 const char *phy_mode;
639 const fdt32_t *cell;
640 int ret = 0;
641
642 pdata->iobase = devfdt_get_addr(dev);
643 pdata->phy_interface = -1;
644 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
645 NULL);
646 if (phy_mode)
647 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
648 if (pdata->phy_interface == -1) {
649 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
650 return -EINVAL;
651 }
652
653 pdata->max_speed = 1000;
654 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
655 if (cell)
656 pdata->max_speed = fdt32_to_cpu(*cell);
657
658 sprintf(bb_miiphy_buses[0].name, dev->name);
659
660 return ret;
661 }
662
663 static const struct udevice_id ravb_ids[] = {
664 { .compatible = "renesas,etheravb-r8a7795" },
665 { .compatible = "renesas,etheravb-r8a7796" },
666 { .compatible = "renesas,etheravb-r8a77965" },
667 { .compatible = "renesas,etheravb-r8a77970" },
668 { .compatible = "renesas,etheravb-r8a77990" },
669 { .compatible = "renesas,etheravb-r8a77995" },
670 { .compatible = "renesas,etheravb-rcar-gen3" },
671 { }
672 };
673
674 U_BOOT_DRIVER(eth_ravb) = {
675 .name = "ravb",
676 .id = UCLASS_ETH,
677 .of_match = ravb_ids,
678 .ofdata_to_platdata = ravb_ofdata_to_platdata,
679 .probe = ravb_probe,
680 .remove = ravb_remove,
681 .ops = &ravb_ops,
682 .priv_auto_alloc_size = sizeof(struct ravb_priv),
683 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
684 .flags = DM_FLAG_ALLOC_PRIV_DMA,
685 };
686