1 /* 2 * sh_eth.c - Driver for Renesas ethernet controler. 3 * 4 * Copyright (C) 2008, 2011 Renesas Solutions Corp. 5 * Copyright (c) 2008, 2011 Nobuhiro Iwamatsu 6 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com> 7 * Copyright (C) 2013 Renesas Electronics Corporation 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <config.h> 13 #include <common.h> 14 #include <malloc.h> 15 #include <net.h> 16 #include <netdev.h> 17 #include <miiphy.h> 18 #include <asm/errno.h> 19 #include <asm/io.h> 20 21 #include "sh_eth.h" 22 23 #ifndef CONFIG_SH_ETHER_USE_PORT 24 # error "Please define CONFIG_SH_ETHER_USE_PORT" 25 #endif 26 #ifndef CONFIG_SH_ETHER_PHY_ADDR 27 # error "Please define CONFIG_SH_ETHER_PHY_ADDR" 28 #endif 29 30 #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF) 31 #define flush_cache_wback(addr, len) \ 32 flush_dcache_range((u32)addr, (u32)(addr + len - 1)) 33 #else 34 #define flush_cache_wback(...) 35 #endif 36 37 #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM) 38 #define invalidate_cache(addr, len) \ 39 { \ 40 u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \ 41 u32 start, end; \ 42 \ 43 start = (u32)addr; \ 44 end = start + len; \ 45 start &= ~(line_size - 1); \ 46 end = ((end + line_size - 1) & ~(line_size - 1)); \ 47 \ 48 invalidate_dcache_range(start, end); \ 49 } 50 #else 51 #define invalidate_cache(...) 52 #endif 53 54 #define TIMEOUT_CNT 1000 55 56 int sh_eth_send(struct eth_device *dev, void *packet, int len) 57 { 58 struct sh_eth_dev *eth = dev->priv; 59 int port = eth->port, ret = 0, timeout; 60 struct sh_eth_info *port_info = ð->port_info[port]; 61 62 if (!packet || len > 0xffff) { 63 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__); 64 ret = -EINVAL; 65 goto err; 66 } 67 68 /* packet must be a 4 byte boundary */ 69 if ((int)packet & 3) { 70 printf(SHETHER_NAME ": %s: packet not 4 byte alligned\n", __func__); 71 ret = -EFAULT; 72 goto err; 73 } 74 75 /* Update tx descriptor */ 76 flush_cache_wback(packet, len); 77 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet); 78 port_info->tx_desc_cur->td1 = len << 16; 79 /* Must preserve the end of descriptor list indication */ 80 if (port_info->tx_desc_cur->td0 & TD_TDLE) 81 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE; 82 else 83 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP; 84 85 /* Restart the transmitter if disabled */ 86 if (!(sh_eth_read(eth, EDTRR) & EDTRR_TRNS)) 87 sh_eth_write(eth, EDTRR_TRNS, EDTRR); 88 89 /* Wait until packet is transmitted */ 90 timeout = TIMEOUT_CNT; 91 do { 92 invalidate_cache(port_info->tx_desc_cur, 93 sizeof(struct tx_desc_s)); 94 udelay(100); 95 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--); 96 97 if (timeout < 0) { 98 printf(SHETHER_NAME ": transmit timeout\n"); 99 ret = -ETIMEDOUT; 100 goto err; 101 } 102 103 port_info->tx_desc_cur++; 104 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC) 105 port_info->tx_desc_cur = port_info->tx_desc_base; 106 107 err: 108 return ret; 109 } 110 111 int sh_eth_recv(struct eth_device *dev) 112 { 113 struct sh_eth_dev *eth = dev->priv; 114 int port = eth->port, len = 0; 115 struct sh_eth_info *port_info = ð->port_info[port]; 116 uchar *packet; 117 118 /* Check if the rx descriptor is ready */ 119 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s)); 120 if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) { 121 /* Check for errors */ 122 if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) { 123 len = port_info->rx_desc_cur->rd1 & 0xffff; 124 packet = (uchar *) 125 ADDR_TO_P2(port_info->rx_desc_cur->rd2); 126 invalidate_cache(packet, len); 127 NetReceive(packet, len); 128 } 129 130 /* Make current descriptor available again */ 131 if (port_info->rx_desc_cur->rd0 & RD_RDLE) 132 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE; 133 else 134 port_info->rx_desc_cur->rd0 = RD_RACT; 135 /* Point to the next descriptor */ 136 port_info->rx_desc_cur++; 137 if (port_info->rx_desc_cur >= 138 port_info->rx_desc_base + NUM_RX_DESC) 139 port_info->rx_desc_cur = port_info->rx_desc_base; 140 } 141 142 /* Restart the receiver if disabled */ 143 if (!(sh_eth_read(eth, EDRRR) & EDRRR_R)) 144 sh_eth_write(eth, EDRRR_R, EDRRR); 145 146 return len; 147 } 148 149 static int sh_eth_reset(struct sh_eth_dev *eth) 150 { 151 #if defined(SH_ETH_TYPE_GETHER) 152 int ret = 0, i; 153 154 /* Start e-dmac transmitter and receiver */ 155 sh_eth_write(eth, EDSR_ENALL, EDSR); 156 157 /* Perform a software reset and wait for it to complete */ 158 sh_eth_write(eth, EDMR_SRST, EDMR); 159 for (i = 0; i < TIMEOUT_CNT ; i++) { 160 if (!(sh_eth_read(eth, EDMR) & EDMR_SRST)) 161 break; 162 udelay(1000); 163 } 164 165 if (i == TIMEOUT_CNT) { 166 printf(SHETHER_NAME ": Software reset timeout\n"); 167 ret = -EIO; 168 } 169 170 return ret; 171 #else 172 sh_eth_write(eth, sh_eth_read(eth, EDMR) | EDMR_SRST, EDMR); 173 udelay(3000); 174 sh_eth_write(eth, sh_eth_read(eth, EDMR) & ~EDMR_SRST, EDMR); 175 176 return 0; 177 #endif 178 } 179 180 static int sh_eth_tx_desc_init(struct sh_eth_dev *eth) 181 { 182 int port = eth->port, i, ret = 0; 183 u32 tmp_addr; 184 struct sh_eth_info *port_info = ð->port_info[port]; 185 struct tx_desc_s *cur_tx_desc; 186 187 /* 188 * Allocate tx descriptors. They must be TX_DESC_SIZE bytes aligned 189 */ 190 port_info->tx_desc_malloc = malloc(NUM_TX_DESC * 191 sizeof(struct tx_desc_s) + 192 TX_DESC_SIZE - 1); 193 if (!port_info->tx_desc_malloc) { 194 printf(SHETHER_NAME ": malloc failed\n"); 195 ret = -ENOMEM; 196 goto err; 197 } 198 199 tmp_addr = (u32) (((int)port_info->tx_desc_malloc + TX_DESC_SIZE - 1) & 200 ~(TX_DESC_SIZE - 1)); 201 flush_cache_wback(tmp_addr, NUM_TX_DESC * sizeof(struct tx_desc_s)); 202 /* Make sure we use a P2 address (non-cacheable) */ 203 port_info->tx_desc_base = (struct tx_desc_s *)ADDR_TO_P2(tmp_addr); 204 port_info->tx_desc_cur = port_info->tx_desc_base; 205 206 /* Initialize all descriptors */ 207 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC; 208 cur_tx_desc++, i++) { 209 cur_tx_desc->td0 = 0x00; 210 cur_tx_desc->td1 = 0x00; 211 cur_tx_desc->td2 = 0x00; 212 } 213 214 /* Mark the end of the descriptors */ 215 cur_tx_desc--; 216 cur_tx_desc->td0 |= TD_TDLE; 217 218 /* Point the controller to the tx descriptor list. Must use physical 219 addresses */ 220 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR); 221 #if defined(SH_ETH_TYPE_GETHER) 222 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR); 223 sh_eth_write(eth, ADDR_TO_PHY(cur_tx_desc), TDFXR); 224 sh_eth_write(eth, 0x01, TDFFR);/* Last discriptor bit */ 225 #endif 226 227 err: 228 return ret; 229 } 230 231 static int sh_eth_rx_desc_init(struct sh_eth_dev *eth) 232 { 233 int port = eth->port, i , ret = 0; 234 struct sh_eth_info *port_info = ð->port_info[port]; 235 struct rx_desc_s *cur_rx_desc; 236 u32 tmp_addr; 237 u8 *rx_buf; 238 239 /* 240 * Allocate rx descriptors. They must be RX_DESC_SIZE bytes aligned 241 */ 242 port_info->rx_desc_malloc = malloc(NUM_RX_DESC * 243 sizeof(struct rx_desc_s) + 244 RX_DESC_SIZE - 1); 245 if (!port_info->rx_desc_malloc) { 246 printf(SHETHER_NAME ": malloc failed\n"); 247 ret = -ENOMEM; 248 goto err; 249 } 250 251 tmp_addr = (u32) (((int)port_info->rx_desc_malloc + RX_DESC_SIZE - 1) & 252 ~(RX_DESC_SIZE - 1)); 253 flush_cache_wback(tmp_addr, NUM_RX_DESC * sizeof(struct rx_desc_s)); 254 /* Make sure we use a P2 address (non-cacheable) */ 255 port_info->rx_desc_base = (struct rx_desc_s *)ADDR_TO_P2(tmp_addr); 256 257 port_info->rx_desc_cur = port_info->rx_desc_base; 258 259 /* 260 * Allocate rx data buffers. They must be 32 bytes aligned and in 261 * P2 area 262 */ 263 port_info->rx_buf_malloc = malloc( 264 NUM_RX_DESC * MAX_BUF_SIZE + RX_BUF_ALIGNE_SIZE - 1); 265 if (!port_info->rx_buf_malloc) { 266 printf(SHETHER_NAME ": malloc failed\n"); 267 ret = -ENOMEM; 268 goto err_buf_malloc; 269 } 270 271 tmp_addr = (u32)(((int)port_info->rx_buf_malloc 272 + (RX_BUF_ALIGNE_SIZE - 1)) & 273 ~(RX_BUF_ALIGNE_SIZE - 1)); 274 port_info->rx_buf_base = (u8 *)ADDR_TO_P2(tmp_addr); 275 276 /* Initialize all descriptors */ 277 for (cur_rx_desc = port_info->rx_desc_base, 278 rx_buf = port_info->rx_buf_base, i = 0; 279 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) { 280 cur_rx_desc->rd0 = RD_RACT; 281 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16; 282 cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf); 283 } 284 285 /* Mark the end of the descriptors */ 286 cur_rx_desc--; 287 cur_rx_desc->rd0 |= RD_RDLE; 288 289 /* Point the controller to the rx descriptor list */ 290 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR); 291 #if defined(SH_ETH_TYPE_GETHER) 292 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR); 293 sh_eth_write(eth, ADDR_TO_PHY(cur_rx_desc), RDFXR); 294 sh_eth_write(eth, RDFFR_RDLF, RDFFR); 295 #endif 296 297 return ret; 298 299 err_buf_malloc: 300 free(port_info->rx_desc_malloc); 301 port_info->rx_desc_malloc = NULL; 302 303 err: 304 return ret; 305 } 306 307 static void sh_eth_tx_desc_free(struct sh_eth_dev *eth) 308 { 309 int port = eth->port; 310 struct sh_eth_info *port_info = ð->port_info[port]; 311 312 if (port_info->tx_desc_malloc) { 313 free(port_info->tx_desc_malloc); 314 port_info->tx_desc_malloc = NULL; 315 } 316 } 317 318 static void sh_eth_rx_desc_free(struct sh_eth_dev *eth) 319 { 320 int port = eth->port; 321 struct sh_eth_info *port_info = ð->port_info[port]; 322 323 if (port_info->rx_desc_malloc) { 324 free(port_info->rx_desc_malloc); 325 port_info->rx_desc_malloc = NULL; 326 } 327 328 if (port_info->rx_buf_malloc) { 329 free(port_info->rx_buf_malloc); 330 port_info->rx_buf_malloc = NULL; 331 } 332 } 333 334 static int sh_eth_desc_init(struct sh_eth_dev *eth) 335 { 336 int ret = 0; 337 338 ret = sh_eth_tx_desc_init(eth); 339 if (ret) 340 goto err_tx_init; 341 342 ret = sh_eth_rx_desc_init(eth); 343 if (ret) 344 goto err_rx_init; 345 346 return ret; 347 err_rx_init: 348 sh_eth_tx_desc_free(eth); 349 350 err_tx_init: 351 return ret; 352 } 353 354 static int sh_eth_phy_config(struct sh_eth_dev *eth) 355 { 356 int port = eth->port, ret = 0; 357 struct sh_eth_info *port_info = ð->port_info[port]; 358 struct eth_device *dev = port_info->dev; 359 struct phy_device *phydev; 360 361 phydev = phy_connect( 362 miiphy_get_dev_by_name(dev->name), 363 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE); 364 port_info->phydev = phydev; 365 phy_config(phydev); 366 367 return ret; 368 } 369 370 static int sh_eth_config(struct sh_eth_dev *eth, bd_t *bd) 371 { 372 int port = eth->port, ret = 0; 373 u32 val; 374 struct sh_eth_info *port_info = ð->port_info[port]; 375 struct eth_device *dev = port_info->dev; 376 struct phy_device *phy; 377 378 /* Configure e-dmac registers */ 379 sh_eth_write(eth, (sh_eth_read(eth, EDMR) & ~EMDR_DESC_R) | 380 (EMDR_DESC | EDMR_EL), EDMR); 381 382 sh_eth_write(eth, 0, EESIPR); 383 sh_eth_write(eth, 0, TRSCER); 384 sh_eth_write(eth, 0, TFTR); 385 sh_eth_write(eth, (FIFO_SIZE_T | FIFO_SIZE_R), FDR); 386 sh_eth_write(eth, RMCR_RST, RMCR); 387 #if defined(SH_ETH_TYPE_GETHER) 388 sh_eth_write(eth, 0, RPADIR); 389 #endif 390 sh_eth_write(eth, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR); 391 392 /* Configure e-mac registers */ 393 sh_eth_write(eth, 0, ECSIPR); 394 395 /* Set Mac address */ 396 val = dev->enetaddr[0] << 24 | dev->enetaddr[1] << 16 | 397 dev->enetaddr[2] << 8 | dev->enetaddr[3]; 398 sh_eth_write(eth, val, MAHR); 399 400 val = dev->enetaddr[4] << 8 | dev->enetaddr[5]; 401 sh_eth_write(eth, val, MALR); 402 403 sh_eth_write(eth, RFLR_RFL_MIN, RFLR); 404 #if defined(SH_ETH_TYPE_GETHER) 405 sh_eth_write(eth, 0, PIPR); 406 sh_eth_write(eth, APR_AP, APR); 407 sh_eth_write(eth, MPR_MP, MPR); 408 sh_eth_write(eth, TPAUSER_TPAUSE, TPAUSER); 409 #endif 410 411 #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740) 412 sh_eth_write(eth, CONFIG_SH_ETHER_SH7734_MII, RMII_MII); 413 #elif defined(CONFIG_R8A7790) || defined(CONFIG_R8A7791) 414 sh_eth_write(eth, sh_eth_read(eth, RMIIMR) | 0x1, RMIIMR); 415 #endif 416 /* Configure phy */ 417 ret = sh_eth_phy_config(eth); 418 if (ret) { 419 printf(SHETHER_NAME ": phy config timeout\n"); 420 goto err_phy_cfg; 421 } 422 phy = port_info->phydev; 423 ret = phy_startup(phy); 424 if (ret) { 425 printf(SHETHER_NAME ": phy startup failure\n"); 426 return ret; 427 } 428 429 val = 0; 430 431 /* Set the transfer speed */ 432 if (phy->speed == 100) { 433 printf(SHETHER_NAME ": 100Base/"); 434 #if defined(SH_ETH_TYPE_GETHER) 435 sh_eth_write(eth, GECMR_100B, GECMR); 436 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) 437 sh_eth_write(eth, 1, RTRATE); 438 #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_R8A7790) || \ 439 defined(CONFIG_R8A7791) 440 val = ECMR_RTM; 441 #endif 442 } else if (phy->speed == 10) { 443 printf(SHETHER_NAME ": 10Base/"); 444 #if defined(SH_ETH_TYPE_GETHER) 445 sh_eth_write(eth, GECMR_10B, GECMR); 446 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) 447 sh_eth_write(eth, 0, RTRATE); 448 #endif 449 } 450 #if defined(SH_ETH_TYPE_GETHER) 451 else if (phy->speed == 1000) { 452 printf(SHETHER_NAME ": 1000Base/"); 453 sh_eth_write(eth, GECMR_1000B, GECMR); 454 } 455 #endif 456 457 /* Check if full duplex mode is supported by the phy */ 458 if (phy->duplex) { 459 printf("Full\n"); 460 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE|ECMR_DM), 461 ECMR); 462 } else { 463 printf("Half\n"); 464 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE), ECMR); 465 } 466 467 return ret; 468 469 err_phy_cfg: 470 return ret; 471 } 472 473 static void sh_eth_start(struct sh_eth_dev *eth) 474 { 475 /* 476 * Enable the e-dmac receiver only. The transmitter will be enabled when 477 * we have something to transmit 478 */ 479 sh_eth_write(eth, EDRRR_R, EDRRR); 480 } 481 482 static void sh_eth_stop(struct sh_eth_dev *eth) 483 { 484 sh_eth_write(eth, ~EDRRR_R, EDRRR); 485 } 486 487 int sh_eth_init(struct eth_device *dev, bd_t *bd) 488 { 489 int ret = 0; 490 struct sh_eth_dev *eth = dev->priv; 491 492 ret = sh_eth_reset(eth); 493 if (ret) 494 goto err; 495 496 ret = sh_eth_desc_init(eth); 497 if (ret) 498 goto err; 499 500 ret = sh_eth_config(eth, bd); 501 if (ret) 502 goto err_config; 503 504 sh_eth_start(eth); 505 506 return ret; 507 508 err_config: 509 sh_eth_tx_desc_free(eth); 510 sh_eth_rx_desc_free(eth); 511 512 err: 513 return ret; 514 } 515 516 void sh_eth_halt(struct eth_device *dev) 517 { 518 struct sh_eth_dev *eth = dev->priv; 519 sh_eth_stop(eth); 520 } 521 522 int sh_eth_initialize(bd_t *bd) 523 { 524 int ret = 0; 525 struct sh_eth_dev *eth = NULL; 526 struct eth_device *dev = NULL; 527 528 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev)); 529 if (!eth) { 530 printf(SHETHER_NAME ": %s: malloc failed\n", __func__); 531 ret = -ENOMEM; 532 goto err; 533 } 534 535 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 536 if (!dev) { 537 printf(SHETHER_NAME ": %s: malloc failed\n", __func__); 538 ret = -ENOMEM; 539 goto err; 540 } 541 memset(dev, 0, sizeof(struct eth_device)); 542 memset(eth, 0, sizeof(struct sh_eth_dev)); 543 544 eth->port = CONFIG_SH_ETHER_USE_PORT; 545 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR; 546 547 dev->priv = (void *)eth; 548 dev->iobase = 0; 549 dev->init = sh_eth_init; 550 dev->halt = sh_eth_halt; 551 dev->send = sh_eth_send; 552 dev->recv = sh_eth_recv; 553 eth->port_info[eth->port].dev = dev; 554 555 sprintf(dev->name, SHETHER_NAME); 556 557 /* Register Device to EtherNet subsystem */ 558 eth_register(dev); 559 560 bb_miiphy_buses[0].priv = eth; 561 miiphy_register(dev->name, bb_miiphy_read, bb_miiphy_write); 562 563 if (!eth_getenv_enetaddr("ethaddr", dev->enetaddr)) 564 puts("Please set MAC address\n"); 565 566 return ret; 567 568 err: 569 if (dev) 570 free(dev); 571 572 if (eth) 573 free(eth); 574 575 printf(SHETHER_NAME ": Failed\n"); 576 return ret; 577 } 578 579 /******* for bb_miiphy *******/ 580 static int sh_eth_bb_init(struct bb_miiphy_bus *bus) 581 { 582 return 0; 583 } 584 585 static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus) 586 { 587 struct sh_eth_dev *eth = bus->priv; 588 589 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MMD, PIR); 590 591 return 0; 592 } 593 594 static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus) 595 { 596 struct sh_eth_dev *eth = bus->priv; 597 598 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MMD, PIR); 599 600 return 0; 601 } 602 603 static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v) 604 { 605 struct sh_eth_dev *eth = bus->priv; 606 607 if (v) 608 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDO, PIR); 609 else 610 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDO, PIR); 611 612 return 0; 613 } 614 615 static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v) 616 { 617 struct sh_eth_dev *eth = bus->priv; 618 619 *v = (sh_eth_read(eth, PIR) & PIR_MDI) >> 3; 620 621 return 0; 622 } 623 624 static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v) 625 { 626 struct sh_eth_dev *eth = bus->priv; 627 628 if (v) 629 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDC, PIR); 630 else 631 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDC, PIR); 632 633 return 0; 634 } 635 636 static int sh_eth_bb_delay(struct bb_miiphy_bus *bus) 637 { 638 udelay(10); 639 640 return 0; 641 } 642 643 struct bb_miiphy_bus bb_miiphy_buses[] = { 644 { 645 .name = "sh_eth", 646 .init = sh_eth_bb_init, 647 .mdio_active = sh_eth_bb_mdio_active, 648 .mdio_tristate = sh_eth_bb_mdio_tristate, 649 .set_mdio = sh_eth_bb_set_mdio, 650 .get_mdio = sh_eth_bb_get_mdio, 651 .set_mdc = sh_eth_bb_set_mdc, 652 .delay = sh_eth_bb_delay, 653 } 654 }; 655 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses); 656