1 /* 2 * drivers/net/ravb.c 3 * This file is driver for Renesas Ethernet AVB. 4 * 5 * Copyright (C) 2015-2017 Renesas Electronics Corporation 6 * 7 * Based on the SuperH Ethernet driver. 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <common.h> 13 #include <clk.h> 14 #include <dm.h> 15 #include <errno.h> 16 #include <miiphy.h> 17 #include <malloc.h> 18 #include <linux/mii.h> 19 #include <wait_bit.h> 20 #include <asm/io.h> 21 #include <asm/gpio.h> 22 23 /* Registers */ 24 #define RAVB_REG_CCC 0x000 25 #define RAVB_REG_DBAT 0x004 26 #define RAVB_REG_CSR 0x00C 27 #define RAVB_REG_APSR 0x08C 28 #define RAVB_REG_RCR 0x090 29 #define RAVB_REG_TGC 0x300 30 #define RAVB_REG_TCCR 0x304 31 #define RAVB_REG_RIC0 0x360 32 #define RAVB_REG_RIC1 0x368 33 #define RAVB_REG_RIC2 0x370 34 #define RAVB_REG_TIC 0x378 35 #define RAVB_REG_ECMR 0x500 36 #define RAVB_REG_RFLR 0x508 37 #define RAVB_REG_ECSIPR 0x518 38 #define RAVB_REG_PIR 0x520 39 #define RAVB_REG_GECMR 0x5b0 40 #define RAVB_REG_MAHR 0x5c0 41 #define RAVB_REG_MALR 0x5c8 42 43 #define CCC_OPC_CONFIG BIT(0) 44 #define CCC_OPC_OPERATION BIT(1) 45 #define CCC_BOC BIT(20) 46 47 #define CSR_OPS 0x0000000F 48 #define CSR_OPS_CONFIG BIT(1) 49 50 #define TCCR_TSRQ0 BIT(0) 51 52 #define RFLR_RFL_MIN 0x05EE 53 54 #define PIR_MDI BIT(3) 55 #define PIR_MDO BIT(2) 56 #define PIR_MMD BIT(1) 57 #define PIR_MDC BIT(0) 58 59 #define ECMR_TRCCM BIT(26) 60 #define ECMR_RZPF BIT(20) 61 #define ECMR_PFR BIT(18) 62 #define ECMR_RXF BIT(17) 63 #define ECMR_RE BIT(6) 64 #define ECMR_TE BIT(5) 65 #define ECMR_DM BIT(1) 66 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF) 67 68 /* DMA Descriptors */ 69 #define RAVB_NUM_BASE_DESC 16 70 #define RAVB_NUM_TX_DESC 8 71 #define RAVB_NUM_RX_DESC 8 72 73 #define RAVB_TX_QUEUE_OFFSET 0 74 #define RAVB_RX_QUEUE_OFFSET 4 75 76 #define RAVB_DESC_DT(n) ((n) << 28) 77 #define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7) 78 #define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9) 79 #define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa) 80 #define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc) 81 #define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3) 82 #define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf) 83 84 #define RAVB_DESC_DS(n) (((n) & 0xfff) << 0) 85 #define RAVB_DESC_DS_MASK 0xfff 86 87 #define RAVB_RX_DESC_MSC_MC BIT(23) 88 #define RAVB_RX_DESC_MSC_CEEF BIT(22) 89 #define RAVB_RX_DESC_MSC_CRL BIT(21) 90 #define RAVB_RX_DESC_MSC_FRE BIT(20) 91 #define RAVB_RX_DESC_MSC_RTLF BIT(19) 92 #define RAVB_RX_DESC_MSC_RTSF BIT(18) 93 #define RAVB_RX_DESC_MSC_RFE BIT(17) 94 #define RAVB_RX_DESC_MSC_CRC BIT(16) 95 #define RAVB_RX_DESC_MSC_MASK (0xff << 16) 96 97 #define RAVB_RX_DESC_MSC_RX_ERR_MASK \ 98 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \ 99 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF) 100 101 #define RAVB_TX_TIMEOUT_MS 1000 102 103 struct ravb_desc { 104 u32 ctrl; 105 u32 dptr; 106 }; 107 108 struct ravb_rxdesc { 109 struct ravb_desc data; 110 struct ravb_desc link; 111 u8 __pad[48]; 112 u8 packet[PKTSIZE_ALIGN]; 113 }; 114 115 struct ravb_priv { 116 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC]; 117 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC]; 118 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC]; 119 u32 rx_desc_idx; 120 u32 tx_desc_idx; 121 122 struct phy_device *phydev; 123 struct mii_dev *bus; 124 void __iomem *iobase; 125 struct clk clk; 126 struct gpio_desc reset_gpio; 127 }; 128 129 static inline void ravb_flush_dcache(u32 addr, u32 len) 130 { 131 flush_dcache_range(addr, addr + len); 132 } 133 134 static inline void ravb_invalidate_dcache(u32 addr, u32 len) 135 { 136 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1); 137 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN); 138 invalidate_dcache_range(start, end); 139 } 140 141 static int ravb_send(struct udevice *dev, void *packet, int len) 142 { 143 struct ravb_priv *eth = dev_get_priv(dev); 144 struct ravb_desc *desc = ð->tx_desc[eth->tx_desc_idx]; 145 unsigned int start; 146 147 /* Update TX descriptor */ 148 ravb_flush_dcache((uintptr_t)packet, len); 149 memset(desc, 0x0, sizeof(*desc)); 150 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len); 151 desc->dptr = (uintptr_t)packet; 152 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc)); 153 154 /* Restart the transmitter if disabled */ 155 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0)) 156 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0); 157 158 /* Wait until packet is transmitted */ 159 start = get_timer(0); 160 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) { 161 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc)); 162 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE) 163 break; 164 udelay(10); 165 }; 166 167 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS) 168 return -ETIMEDOUT; 169 170 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1); 171 return 0; 172 } 173 174 static int ravb_recv(struct udevice *dev, int flags, uchar **packetp) 175 { 176 struct ravb_priv *eth = dev_get_priv(dev); 177 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx]; 178 int len; 179 u8 *packet; 180 181 /* Check if the rx descriptor is ready */ 182 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc)); 183 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY) 184 return -EAGAIN; 185 186 /* Check for errors */ 187 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) { 188 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK; 189 return -EAGAIN; 190 } 191 192 len = desc->data.ctrl & RAVB_DESC_DS_MASK; 193 packet = (u8 *)(uintptr_t)desc->data.dptr; 194 ravb_invalidate_dcache((uintptr_t)packet, len); 195 196 *packetp = packet; 197 return len; 198 } 199 200 static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length) 201 { 202 struct ravb_priv *eth = dev_get_priv(dev); 203 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx]; 204 205 /* Make current descriptor available again */ 206 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN); 207 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc)); 208 209 /* Point to the next descriptor */ 210 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC; 211 desc = ð->rx_desc[eth->rx_desc_idx]; 212 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc)); 213 214 return 0; 215 } 216 217 static int ravb_reset(struct udevice *dev) 218 { 219 struct ravb_priv *eth = dev_get_priv(dev); 220 221 /* Set config mode */ 222 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC); 223 224 /* Check the operating mode is changed to the config mode. */ 225 return wait_for_bit(dev->name, (void *)eth->iobase + RAVB_REG_CSR, 226 CSR_OPS_CONFIG, true, 100, true); 227 } 228 229 static void ravb_base_desc_init(struct ravb_priv *eth) 230 { 231 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc); 232 int i; 233 234 /* Initialize all descriptors */ 235 memset(eth->base_desc, 0x0, desc_size); 236 237 for (i = 0; i < RAVB_NUM_BASE_DESC; i++) 238 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS; 239 240 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size); 241 242 /* Register the descriptor base address table */ 243 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT); 244 } 245 246 static void ravb_tx_desc_init(struct ravb_priv *eth) 247 { 248 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc); 249 int i; 250 251 /* Initialize all descriptors */ 252 memset(eth->tx_desc, 0x0, desc_size); 253 eth->tx_desc_idx = 0; 254 255 for (i = 0; i < RAVB_NUM_TX_DESC; i++) 256 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY; 257 258 /* Mark the end of the descriptors */ 259 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX; 260 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc; 261 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size); 262 263 /* Point the controller to the TX descriptor list. */ 264 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX; 265 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc; 266 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_TX_QUEUE_OFFSET], 267 sizeof(struct ravb_desc)); 268 } 269 270 static void ravb_rx_desc_init(struct ravb_priv *eth) 271 { 272 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc); 273 int i; 274 275 /* Initialize all descriptors */ 276 memset(eth->rx_desc, 0x0, desc_size); 277 eth->rx_desc_idx = 0; 278 279 for (i = 0; i < RAVB_NUM_RX_DESC; i++) { 280 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY | 281 RAVB_DESC_DS(PKTSIZE_ALIGN); 282 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet; 283 284 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX; 285 eth->rx_desc[i].link.dptr = (uintptr_t)ð->rx_desc[i + 1]; 286 } 287 288 /* Mark the end of the descriptors */ 289 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX; 290 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc; 291 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size); 292 293 /* Point the controller to the rx descriptor list */ 294 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX; 295 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc; 296 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_RX_QUEUE_OFFSET], 297 sizeof(struct ravb_desc)); 298 } 299 300 static int ravb_phy_config(struct udevice *dev) 301 { 302 struct ravb_priv *eth = dev_get_priv(dev); 303 struct eth_pdata *pdata = dev_get_platdata(dev); 304 struct phy_device *phydev; 305 int mask = 0xffffffff, reg; 306 307 if (dm_gpio_is_valid(ð->reset_gpio)) { 308 dm_gpio_set_value(ð->reset_gpio, 1); 309 mdelay(20); 310 dm_gpio_set_value(ð->reset_gpio, 0); 311 mdelay(1); 312 } 313 314 phydev = phy_find_by_mask(eth->bus, mask, pdata->phy_interface); 315 if (!phydev) 316 return -ENODEV; 317 318 phy_connect_dev(phydev, dev); 319 320 eth->phydev = phydev; 321 322 /* 10BASE is not supported for Ethernet AVB MAC */ 323 phydev->supported &= ~(SUPPORTED_10baseT_Full 324 | SUPPORTED_10baseT_Half); 325 if (pdata->max_speed != 1000) { 326 phydev->supported &= ~(SUPPORTED_1000baseT_Half 327 | SUPPORTED_1000baseT_Full); 328 reg = phy_read(phydev, -1, MII_CTRL1000); 329 reg &= ~(BIT(9) | BIT(8)); 330 phy_write(phydev, -1, MII_CTRL1000, reg); 331 } 332 333 phy_config(phydev); 334 335 return 0; 336 } 337 338 /* Set Mac address */ 339 static int ravb_write_hwaddr(struct udevice *dev) 340 { 341 struct ravb_priv *eth = dev_get_priv(dev); 342 struct eth_pdata *pdata = dev_get_platdata(dev); 343 unsigned char *mac = pdata->enetaddr; 344 345 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3], 346 eth->iobase + RAVB_REG_MAHR); 347 348 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR); 349 350 return 0; 351 } 352 353 /* E-MAC init function */ 354 static int ravb_mac_init(struct ravb_priv *eth) 355 { 356 /* Disable MAC Interrupt */ 357 writel(0, eth->iobase + RAVB_REG_ECSIPR); 358 359 /* Recv frame limit set register */ 360 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR); 361 362 return 0; 363 } 364 365 /* AVB-DMAC init function */ 366 static int ravb_dmac_init(struct udevice *dev) 367 { 368 struct ravb_priv *eth = dev_get_priv(dev); 369 struct eth_pdata *pdata = dev_get_platdata(dev); 370 int ret = 0; 371 372 /* Set CONFIG mode */ 373 ret = ravb_reset(dev); 374 if (ret) 375 return ret; 376 377 /* Disable all interrupts */ 378 writel(0, eth->iobase + RAVB_REG_RIC0); 379 writel(0, eth->iobase + RAVB_REG_RIC1); 380 writel(0, eth->iobase + RAVB_REG_RIC2); 381 writel(0, eth->iobase + RAVB_REG_TIC); 382 383 /* Set little endian */ 384 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC); 385 386 /* AVB rx set */ 387 writel(0x18000001, eth->iobase + RAVB_REG_RCR); 388 389 /* FIFO size set */ 390 writel(0x00222210, eth->iobase + RAVB_REG_TGC); 391 392 /* Delay CLK: 2ns */ 393 if (pdata->max_speed == 1000) 394 writel(BIT(14), eth->iobase + RAVB_REG_APSR); 395 396 return 0; 397 } 398 399 static int ravb_config(struct udevice *dev) 400 { 401 struct ravb_priv *eth = dev_get_priv(dev); 402 struct phy_device *phy; 403 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE; 404 int ret; 405 406 /* Configure AVB-DMAC register */ 407 ravb_dmac_init(dev); 408 409 /* Configure E-MAC registers */ 410 ravb_mac_init(eth); 411 ravb_write_hwaddr(dev); 412 413 /* Configure phy */ 414 ret = ravb_phy_config(dev); 415 if (ret) 416 return ret; 417 418 phy = eth->phydev; 419 420 ret = phy_startup(phy); 421 if (ret) 422 return ret; 423 424 /* Set the transfer speed */ 425 if (phy->speed == 100) 426 writel(0, eth->iobase + RAVB_REG_GECMR); 427 else if (phy->speed == 1000) 428 writel(1, eth->iobase + RAVB_REG_GECMR); 429 430 /* Check if full duplex mode is supported by the phy */ 431 if (phy->duplex) 432 mask |= ECMR_DM; 433 434 writel(mask, eth->iobase + RAVB_REG_ECMR); 435 436 phy->drv->writeext(phy, -1, 0x02, 0x08, (0x0f << 5) | 0x19); 437 438 return 0; 439 } 440 441 int ravb_start(struct udevice *dev) 442 { 443 struct ravb_priv *eth = dev_get_priv(dev); 444 int ret; 445 446 ret = clk_enable(ð->clk); 447 if (ret) 448 return ret; 449 450 ret = ravb_reset(dev); 451 if (ret) 452 goto err; 453 454 ravb_base_desc_init(eth); 455 ravb_tx_desc_init(eth); 456 ravb_rx_desc_init(eth); 457 458 ret = ravb_config(dev); 459 if (ret) 460 goto err; 461 462 /* Setting the control will start the AVB-DMAC process. */ 463 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC); 464 465 return 0; 466 467 err: 468 clk_disable(ð->clk); 469 return ret; 470 } 471 472 static void ravb_stop(struct udevice *dev) 473 { 474 struct ravb_priv *eth = dev_get_priv(dev); 475 476 ravb_reset(dev); 477 clk_disable(ð->clk); 478 } 479 480 static int ravb_probe(struct udevice *dev) 481 { 482 struct eth_pdata *pdata = dev_get_platdata(dev); 483 struct ravb_priv *eth = dev_get_priv(dev); 484 struct mii_dev *mdiodev; 485 void __iomem *iobase; 486 int ret; 487 488 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE); 489 eth->iobase = iobase; 490 491 ret = clk_get_by_index(dev, 0, ð->clk); 492 if (ret < 0) 493 goto err_mdio_alloc; 494 495 gpio_request_by_name(dev, "reset-gpios", 0, ð->reset_gpio, 496 GPIOD_IS_OUT); 497 498 mdiodev = mdio_alloc(); 499 if (!mdiodev) { 500 ret = -ENOMEM; 501 goto err_mdio_alloc; 502 } 503 504 mdiodev->read = bb_miiphy_read; 505 mdiodev->write = bb_miiphy_write; 506 bb_miiphy_buses[0].priv = eth; 507 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name); 508 509 ret = mdio_register(mdiodev); 510 if (ret < 0) 511 goto err_mdio_register; 512 513 eth->bus = miiphy_get_dev_by_name(dev->name); 514 515 return 0; 516 517 err_mdio_register: 518 mdio_free(mdiodev); 519 err_mdio_alloc: 520 unmap_physmem(eth->iobase, MAP_NOCACHE); 521 return ret; 522 } 523 524 static int ravb_remove(struct udevice *dev) 525 { 526 struct ravb_priv *eth = dev_get_priv(dev); 527 528 free(eth->phydev); 529 mdio_unregister(eth->bus); 530 mdio_free(eth->bus); 531 if (dm_gpio_is_valid(ð->reset_gpio)) 532 dm_gpio_free(dev, ð->reset_gpio); 533 unmap_physmem(eth->iobase, MAP_NOCACHE); 534 535 return 0; 536 } 537 538 int ravb_bb_init(struct bb_miiphy_bus *bus) 539 { 540 return 0; 541 } 542 543 int ravb_bb_mdio_active(struct bb_miiphy_bus *bus) 544 { 545 struct ravb_priv *eth = bus->priv; 546 547 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD); 548 549 return 0; 550 } 551 552 int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus) 553 { 554 struct ravb_priv *eth = bus->priv; 555 556 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD); 557 558 return 0; 559 } 560 561 int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v) 562 { 563 struct ravb_priv *eth = bus->priv; 564 565 if (v) 566 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO); 567 else 568 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO); 569 570 return 0; 571 } 572 573 int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v) 574 { 575 struct ravb_priv *eth = bus->priv; 576 577 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3; 578 579 return 0; 580 } 581 582 int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v) 583 { 584 struct ravb_priv *eth = bus->priv; 585 586 if (v) 587 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC); 588 else 589 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC); 590 591 return 0; 592 } 593 594 int ravb_bb_delay(struct bb_miiphy_bus *bus) 595 { 596 udelay(10); 597 598 return 0; 599 } 600 601 struct bb_miiphy_bus bb_miiphy_buses[] = { 602 { 603 .name = "ravb", 604 .init = ravb_bb_init, 605 .mdio_active = ravb_bb_mdio_active, 606 .mdio_tristate = ravb_bb_mdio_tristate, 607 .set_mdio = ravb_bb_set_mdio, 608 .get_mdio = ravb_bb_get_mdio, 609 .set_mdc = ravb_bb_set_mdc, 610 .delay = ravb_bb_delay, 611 }, 612 }; 613 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses); 614 615 static const struct eth_ops ravb_ops = { 616 .start = ravb_start, 617 .send = ravb_send, 618 .recv = ravb_recv, 619 .free_pkt = ravb_free_pkt, 620 .stop = ravb_stop, 621 .write_hwaddr = ravb_write_hwaddr, 622 }; 623 624 int ravb_ofdata_to_platdata(struct udevice *dev) 625 { 626 struct eth_pdata *pdata = dev_get_platdata(dev); 627 const char *phy_mode; 628 const fdt32_t *cell; 629 int ret = 0; 630 631 pdata->iobase = devfdt_get_addr(dev); 632 pdata->phy_interface = -1; 633 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", 634 NULL); 635 if (phy_mode) 636 pdata->phy_interface = phy_get_interface_by_name(phy_mode); 637 if (pdata->phy_interface == -1) { 638 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); 639 return -EINVAL; 640 } 641 642 pdata->max_speed = 1000; 643 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL); 644 if (cell) 645 pdata->max_speed = fdt32_to_cpu(*cell); 646 647 sprintf(bb_miiphy_buses[0].name, dev->name); 648 649 return ret; 650 } 651 652 static const struct udevice_id ravb_ids[] = { 653 { .compatible = "renesas,etheravb-r8a7795" }, 654 { .compatible = "renesas,etheravb-r8a7796" }, 655 { .compatible = "renesas,etheravb-r8a77970" }, 656 { .compatible = "renesas,etheravb-rcar-gen3" }, 657 { } 658 }; 659 660 U_BOOT_DRIVER(eth_ravb) = { 661 .name = "ravb", 662 .id = UCLASS_ETH, 663 .of_match = ravb_ids, 664 .ofdata_to_platdata = ravb_ofdata_to_platdata, 665 .probe = ravb_probe, 666 .remove = ravb_remove, 667 .ops = &ravb_ops, 668 .priv_auto_alloc_size = sizeof(struct ravb_priv), 669 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 670 .flags = DM_FLAG_ALLOC_PRIV_DMA, 671 }; 672