1 /* 2 * sh_eth.c - Driver for Renesas ethernet controler. 3 * 4 * Copyright (C) 2008, 2011 Renesas Solutions Corp. 5 * Copyright (c) 2008, 2011 Nobuhiro Iwamatsu 6 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com> 7 * Copyright (C) 2013 Renesas Electronics Corporation 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <config.h> 13 #include <common.h> 14 #include <malloc.h> 15 #include <net.h> 16 #include <netdev.h> 17 #include <miiphy.h> 18 #include <asm/errno.h> 19 #include <asm/io.h> 20 21 #include "sh_eth.h" 22 23 #ifndef CONFIG_SH_ETHER_USE_PORT 24 # error "Please define CONFIG_SH_ETHER_USE_PORT" 25 #endif 26 #ifndef CONFIG_SH_ETHER_PHY_ADDR 27 # error "Please define CONFIG_SH_ETHER_PHY_ADDR" 28 #endif 29 30 #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF) 31 #define flush_cache_wback(addr, len) \ 32 flush_dcache_range((u32)addr, (u32)(addr + len - 1)) 33 #else 34 #define flush_cache_wback(...) 35 #endif 36 37 #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM) 38 #define invalidate_cache(addr, len) \ 39 { \ 40 u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \ 41 u32 start, end; \ 42 \ 43 start = (u32)addr; \ 44 end = start + len; \ 45 start &= ~(line_size - 1); \ 46 end = ((end + line_size - 1) & ~(line_size - 1)); \ 47 \ 48 invalidate_dcache_range(start, end); \ 49 } 50 #else 51 #define invalidate_cache(...) 52 #endif 53 54 #define TIMEOUT_CNT 1000 55 56 int sh_eth_send(struct eth_device *dev, void *packet, int len) 57 { 58 struct sh_eth_dev *eth = dev->priv; 59 int port = eth->port, ret = 0, timeout; 60 struct sh_eth_info *port_info = ð->port_info[port]; 61 62 if (!packet || len > 0xffff) { 63 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__); 64 ret = -EINVAL; 65 goto err; 66 } 67 68 /* packet must be a 4 byte boundary */ 69 if ((int)packet & 3) { 70 printf(SHETHER_NAME ": %s: packet not 4 byte alligned\n" 71 , __func__); 72 ret = -EFAULT; 73 goto err; 74 } 75 76 /* Update tx descriptor */ 77 flush_cache_wback(packet, len); 78 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet); 79 port_info->tx_desc_cur->td1 = len << 16; 80 /* Must preserve the end of descriptor list indication */ 81 if (port_info->tx_desc_cur->td0 & TD_TDLE) 82 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE; 83 else 84 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP; 85 86 /* Restart the transmitter if disabled */ 87 if (!(sh_eth_read(eth, EDTRR) & EDTRR_TRNS)) 88 sh_eth_write(eth, EDTRR_TRNS, EDTRR); 89 90 /* Wait until packet is transmitted */ 91 timeout = TIMEOUT_CNT; 92 do { 93 invalidate_cache(port_info->tx_desc_cur, 94 sizeof(struct tx_desc_s)); 95 udelay(100); 96 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--); 97 98 if (timeout < 0) { 99 printf(SHETHER_NAME ": transmit timeout\n"); 100 ret = -ETIMEDOUT; 101 goto err; 102 } 103 104 port_info->tx_desc_cur++; 105 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC) 106 port_info->tx_desc_cur = port_info->tx_desc_base; 107 108 err: 109 return ret; 110 } 111 112 int sh_eth_recv(struct eth_device *dev) 113 { 114 struct sh_eth_dev *eth = dev->priv; 115 int port = eth->port, len = 0; 116 struct sh_eth_info *port_info = ð->port_info[port]; 117 uchar *packet; 118 119 /* Check if the rx descriptor is ready */ 120 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s)); 121 if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) { 122 /* Check for errors */ 123 if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) { 124 len = port_info->rx_desc_cur->rd1 & 0xffff; 125 packet = (uchar *) 126 ADDR_TO_P2(port_info->rx_desc_cur->rd2); 127 invalidate_cache(packet, len); 128 NetReceive(packet, len); 129 } 130 131 /* Make current descriptor available again */ 132 if (port_info->rx_desc_cur->rd0 & RD_RDLE) 133 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE; 134 else 135 port_info->rx_desc_cur->rd0 = RD_RACT; 136 /* Point to the next descriptor */ 137 port_info->rx_desc_cur++; 138 if (port_info->rx_desc_cur >= 139 port_info->rx_desc_base + NUM_RX_DESC) 140 port_info->rx_desc_cur = port_info->rx_desc_base; 141 } 142 143 /* Restart the receiver if disabled */ 144 if (!(sh_eth_read(eth, EDRRR) & EDRRR_R)) 145 sh_eth_write(eth, EDRRR_R, EDRRR); 146 147 return len; 148 } 149 150 static int sh_eth_reset(struct sh_eth_dev *eth) 151 { 152 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) 153 int ret = 0, i; 154 155 /* Start e-dmac transmitter and receiver */ 156 sh_eth_write(eth, EDSR_ENALL, EDSR); 157 158 /* Perform a software reset and wait for it to complete */ 159 sh_eth_write(eth, EDMR_SRST, EDMR); 160 for (i = 0; i < TIMEOUT_CNT; i++) { 161 if (!(sh_eth_read(eth, EDMR) & EDMR_SRST)) 162 break; 163 udelay(1000); 164 } 165 166 if (i == TIMEOUT_CNT) { 167 printf(SHETHER_NAME ": Software reset timeout\n"); 168 ret = -EIO; 169 } 170 171 return ret; 172 #else 173 sh_eth_write(eth, sh_eth_read(eth, EDMR) | EDMR_SRST, EDMR); 174 udelay(3000); 175 sh_eth_write(eth, sh_eth_read(eth, EDMR) & ~EDMR_SRST, EDMR); 176 177 return 0; 178 #endif 179 } 180 181 static int sh_eth_tx_desc_init(struct sh_eth_dev *eth) 182 { 183 int port = eth->port, i, ret = 0; 184 u32 tmp_addr; 185 struct sh_eth_info *port_info = ð->port_info[port]; 186 struct tx_desc_s *cur_tx_desc; 187 188 /* 189 * Allocate tx descriptors. They must be TX_DESC_SIZE bytes aligned 190 */ 191 port_info->tx_desc_malloc = malloc(NUM_TX_DESC * 192 sizeof(struct tx_desc_s) + 193 TX_DESC_SIZE - 1); 194 if (!port_info->tx_desc_malloc) { 195 printf(SHETHER_NAME ": malloc failed\n"); 196 ret = -ENOMEM; 197 goto err; 198 } 199 200 tmp_addr = (u32) (((int)port_info->tx_desc_malloc + TX_DESC_SIZE - 1) & 201 ~(TX_DESC_SIZE - 1)); 202 flush_cache_wback(tmp_addr, NUM_TX_DESC * sizeof(struct tx_desc_s)); 203 /* Make sure we use a P2 address (non-cacheable) */ 204 port_info->tx_desc_base = (struct tx_desc_s *)ADDR_TO_P2(tmp_addr); 205 port_info->tx_desc_cur = port_info->tx_desc_base; 206 207 /* Initialize all descriptors */ 208 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC; 209 cur_tx_desc++, i++) { 210 cur_tx_desc->td0 = 0x00; 211 cur_tx_desc->td1 = 0x00; 212 cur_tx_desc->td2 = 0x00; 213 } 214 215 /* Mark the end of the descriptors */ 216 cur_tx_desc--; 217 cur_tx_desc->td0 |= TD_TDLE; 218 219 /* Point the controller to the tx descriptor list. Must use physical 220 addresses */ 221 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR); 222 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) 223 sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR); 224 sh_eth_write(eth, ADDR_TO_PHY(cur_tx_desc), TDFXR); 225 sh_eth_write(eth, 0x01, TDFFR);/* Last discriptor bit */ 226 #endif 227 228 err: 229 return ret; 230 } 231 232 static int sh_eth_rx_desc_init(struct sh_eth_dev *eth) 233 { 234 int port = eth->port, i , ret = 0; 235 struct sh_eth_info *port_info = ð->port_info[port]; 236 struct rx_desc_s *cur_rx_desc; 237 u32 tmp_addr; 238 u8 *rx_buf; 239 240 /* 241 * Allocate rx descriptors. They must be RX_DESC_SIZE bytes aligned 242 */ 243 port_info->rx_desc_malloc = malloc(NUM_RX_DESC * 244 sizeof(struct rx_desc_s) + 245 RX_DESC_SIZE - 1); 246 if (!port_info->rx_desc_malloc) { 247 printf(SHETHER_NAME ": malloc failed\n"); 248 ret = -ENOMEM; 249 goto err; 250 } 251 252 tmp_addr = (u32) (((int)port_info->rx_desc_malloc + RX_DESC_SIZE - 1) & 253 ~(RX_DESC_SIZE - 1)); 254 flush_cache_wback(tmp_addr, NUM_RX_DESC * sizeof(struct rx_desc_s)); 255 /* Make sure we use a P2 address (non-cacheable) */ 256 port_info->rx_desc_base = (struct rx_desc_s *)ADDR_TO_P2(tmp_addr); 257 258 port_info->rx_desc_cur = port_info->rx_desc_base; 259 260 /* 261 * Allocate rx data buffers. They must be 32 bytes aligned and in 262 * P2 area 263 */ 264 port_info->rx_buf_malloc = malloc( 265 NUM_RX_DESC * MAX_BUF_SIZE + RX_BUF_ALIGNE_SIZE - 1); 266 if (!port_info->rx_buf_malloc) { 267 printf(SHETHER_NAME ": malloc failed\n"); 268 ret = -ENOMEM; 269 goto err_buf_malloc; 270 } 271 272 tmp_addr = (u32)(((int)port_info->rx_buf_malloc 273 + (RX_BUF_ALIGNE_SIZE - 1)) & 274 ~(RX_BUF_ALIGNE_SIZE - 1)); 275 port_info->rx_buf_base = (u8 *)ADDR_TO_P2(tmp_addr); 276 277 /* Initialize all descriptors */ 278 for (cur_rx_desc = port_info->rx_desc_base, 279 rx_buf = port_info->rx_buf_base, i = 0; 280 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) { 281 cur_rx_desc->rd0 = RD_RACT; 282 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16; 283 cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf); 284 } 285 286 /* Mark the end of the descriptors */ 287 cur_rx_desc--; 288 cur_rx_desc->rd0 |= RD_RDLE; 289 290 /* Point the controller to the rx descriptor list */ 291 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR); 292 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) 293 sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR); 294 sh_eth_write(eth, ADDR_TO_PHY(cur_rx_desc), RDFXR); 295 sh_eth_write(eth, RDFFR_RDLF, RDFFR); 296 #endif 297 298 return ret; 299 300 err_buf_malloc: 301 free(port_info->rx_desc_malloc); 302 port_info->rx_desc_malloc = NULL; 303 304 err: 305 return ret; 306 } 307 308 static void sh_eth_tx_desc_free(struct sh_eth_dev *eth) 309 { 310 int port = eth->port; 311 struct sh_eth_info *port_info = ð->port_info[port]; 312 313 if (port_info->tx_desc_malloc) { 314 free(port_info->tx_desc_malloc); 315 port_info->tx_desc_malloc = NULL; 316 } 317 } 318 319 static void sh_eth_rx_desc_free(struct sh_eth_dev *eth) 320 { 321 int port = eth->port; 322 struct sh_eth_info *port_info = ð->port_info[port]; 323 324 if (port_info->rx_desc_malloc) { 325 free(port_info->rx_desc_malloc); 326 port_info->rx_desc_malloc = NULL; 327 } 328 329 if (port_info->rx_buf_malloc) { 330 free(port_info->rx_buf_malloc); 331 port_info->rx_buf_malloc = NULL; 332 } 333 } 334 335 static int sh_eth_desc_init(struct sh_eth_dev *eth) 336 { 337 int ret = 0; 338 339 ret = sh_eth_tx_desc_init(eth); 340 if (ret) 341 goto err_tx_init; 342 343 ret = sh_eth_rx_desc_init(eth); 344 if (ret) 345 goto err_rx_init; 346 347 return ret; 348 err_rx_init: 349 sh_eth_tx_desc_free(eth); 350 351 err_tx_init: 352 return ret; 353 } 354 355 static int sh_eth_phy_config(struct sh_eth_dev *eth) 356 { 357 int port = eth->port, ret = 0; 358 struct sh_eth_info *port_info = ð->port_info[port]; 359 struct eth_device *dev = port_info->dev; 360 struct phy_device *phydev; 361 362 phydev = phy_connect( 363 miiphy_get_dev_by_name(dev->name), 364 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE); 365 port_info->phydev = phydev; 366 phy_config(phydev); 367 368 return ret; 369 } 370 371 static int sh_eth_config(struct sh_eth_dev *eth, bd_t *bd) 372 { 373 int port = eth->port, ret = 0; 374 u32 val; 375 struct sh_eth_info *port_info = ð->port_info[port]; 376 struct eth_device *dev = port_info->dev; 377 struct phy_device *phy; 378 379 /* Configure e-dmac registers */ 380 sh_eth_write(eth, (sh_eth_read(eth, EDMR) & ~EMDR_DESC_R) | 381 (EMDR_DESC | EDMR_EL), EDMR); 382 383 sh_eth_write(eth, 0, EESIPR); 384 sh_eth_write(eth, 0, TRSCER); 385 sh_eth_write(eth, 0, TFTR); 386 sh_eth_write(eth, (FIFO_SIZE_T | FIFO_SIZE_R), FDR); 387 sh_eth_write(eth, RMCR_RST, RMCR); 388 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) 389 sh_eth_write(eth, 0, RPADIR); 390 #endif 391 sh_eth_write(eth, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR); 392 393 /* Configure e-mac registers */ 394 sh_eth_write(eth, 0, ECSIPR); 395 396 /* Set Mac address */ 397 val = dev->enetaddr[0] << 24 | dev->enetaddr[1] << 16 | 398 dev->enetaddr[2] << 8 | dev->enetaddr[3]; 399 sh_eth_write(eth, val, MAHR); 400 401 val = dev->enetaddr[4] << 8 | dev->enetaddr[5]; 402 sh_eth_write(eth, val, MALR); 403 404 sh_eth_write(eth, RFLR_RFL_MIN, RFLR); 405 #if defined(SH_ETH_TYPE_GETHER) 406 sh_eth_write(eth, 0, PIPR); 407 #endif 408 #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) 409 sh_eth_write(eth, APR_AP, APR); 410 sh_eth_write(eth, MPR_MP, MPR); 411 sh_eth_write(eth, TPAUSER_TPAUSE, TPAUSER); 412 #endif 413 414 #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740) 415 sh_eth_write(eth, CONFIG_SH_ETHER_SH7734_MII, RMII_MII); 416 #elif defined(CONFIG_R8A7790) || defined(CONFIG_R8A7791) || \ 417 defined(CONFIG_R8A7794) 418 sh_eth_write(eth, sh_eth_read(eth, RMIIMR) | 0x1, RMIIMR); 419 #endif 420 /* Configure phy */ 421 ret = sh_eth_phy_config(eth); 422 if (ret) { 423 printf(SHETHER_NAME ": phy config timeout\n"); 424 goto err_phy_cfg; 425 } 426 phy = port_info->phydev; 427 ret = phy_startup(phy); 428 if (ret) { 429 printf(SHETHER_NAME ": phy startup failure\n"); 430 return ret; 431 } 432 433 val = 0; 434 435 /* Set the transfer speed */ 436 if (phy->speed == 100) { 437 printf(SHETHER_NAME ": 100Base/"); 438 #if defined(SH_ETH_TYPE_GETHER) 439 sh_eth_write(eth, GECMR_100B, GECMR); 440 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) 441 sh_eth_write(eth, 1, RTRATE); 442 #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_R8A7790) || \ 443 defined(CONFIG_R8A7791) || defined(CONFIG_R8A7794) 444 val = ECMR_RTM; 445 #endif 446 } else if (phy->speed == 10) { 447 printf(SHETHER_NAME ": 10Base/"); 448 #if defined(SH_ETH_TYPE_GETHER) 449 sh_eth_write(eth, GECMR_10B, GECMR); 450 #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) 451 sh_eth_write(eth, 0, RTRATE); 452 #endif 453 } 454 #if defined(SH_ETH_TYPE_GETHER) 455 else if (phy->speed == 1000) { 456 printf(SHETHER_NAME ": 1000Base/"); 457 sh_eth_write(eth, GECMR_1000B, GECMR); 458 } 459 #endif 460 461 /* Check if full duplex mode is supported by the phy */ 462 if (phy->duplex) { 463 printf("Full\n"); 464 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE|ECMR_DM), 465 ECMR); 466 } else { 467 printf("Half\n"); 468 sh_eth_write(eth, val | (ECMR_CHG_DM|ECMR_RE|ECMR_TE), ECMR); 469 } 470 471 return ret; 472 473 err_phy_cfg: 474 return ret; 475 } 476 477 static void sh_eth_start(struct sh_eth_dev *eth) 478 { 479 /* 480 * Enable the e-dmac receiver only. The transmitter will be enabled when 481 * we have something to transmit 482 */ 483 sh_eth_write(eth, EDRRR_R, EDRRR); 484 } 485 486 static void sh_eth_stop(struct sh_eth_dev *eth) 487 { 488 sh_eth_write(eth, ~EDRRR_R, EDRRR); 489 } 490 491 int sh_eth_init(struct eth_device *dev, bd_t *bd) 492 { 493 int ret = 0; 494 struct sh_eth_dev *eth = dev->priv; 495 496 ret = sh_eth_reset(eth); 497 if (ret) 498 goto err; 499 500 ret = sh_eth_desc_init(eth); 501 if (ret) 502 goto err; 503 504 ret = sh_eth_config(eth, bd); 505 if (ret) 506 goto err_config; 507 508 sh_eth_start(eth); 509 510 return ret; 511 512 err_config: 513 sh_eth_tx_desc_free(eth); 514 sh_eth_rx_desc_free(eth); 515 516 err: 517 return ret; 518 } 519 520 void sh_eth_halt(struct eth_device *dev) 521 { 522 struct sh_eth_dev *eth = dev->priv; 523 sh_eth_stop(eth); 524 } 525 526 int sh_eth_initialize(bd_t *bd) 527 { 528 int ret = 0; 529 struct sh_eth_dev *eth = NULL; 530 struct eth_device *dev = NULL; 531 532 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev)); 533 if (!eth) { 534 printf(SHETHER_NAME ": %s: malloc failed\n", __func__); 535 ret = -ENOMEM; 536 goto err; 537 } 538 539 dev = (struct eth_device *)malloc(sizeof(struct eth_device)); 540 if (!dev) { 541 printf(SHETHER_NAME ": %s: malloc failed\n", __func__); 542 ret = -ENOMEM; 543 goto err; 544 } 545 memset(dev, 0, sizeof(struct eth_device)); 546 memset(eth, 0, sizeof(struct sh_eth_dev)); 547 548 eth->port = CONFIG_SH_ETHER_USE_PORT; 549 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR; 550 551 dev->priv = (void *)eth; 552 dev->iobase = 0; 553 dev->init = sh_eth_init; 554 dev->halt = sh_eth_halt; 555 dev->send = sh_eth_send; 556 dev->recv = sh_eth_recv; 557 eth->port_info[eth->port].dev = dev; 558 559 sprintf(dev->name, SHETHER_NAME); 560 561 /* Register Device to EtherNet subsystem */ 562 eth_register(dev); 563 564 bb_miiphy_buses[0].priv = eth; 565 miiphy_register(dev->name, bb_miiphy_read, bb_miiphy_write); 566 567 if (!eth_getenv_enetaddr("ethaddr", dev->enetaddr)) 568 puts("Please set MAC address\n"); 569 570 return ret; 571 572 err: 573 if (dev) 574 free(dev); 575 576 if (eth) 577 free(eth); 578 579 printf(SHETHER_NAME ": Failed\n"); 580 return ret; 581 } 582 583 /******* for bb_miiphy *******/ 584 static int sh_eth_bb_init(struct bb_miiphy_bus *bus) 585 { 586 return 0; 587 } 588 589 static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus) 590 { 591 struct sh_eth_dev *eth = bus->priv; 592 593 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MMD, PIR); 594 595 return 0; 596 } 597 598 static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus) 599 { 600 struct sh_eth_dev *eth = bus->priv; 601 602 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MMD, PIR); 603 604 return 0; 605 } 606 607 static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v) 608 { 609 struct sh_eth_dev *eth = bus->priv; 610 611 if (v) 612 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDO, PIR); 613 else 614 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDO, PIR); 615 616 return 0; 617 } 618 619 static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v) 620 { 621 struct sh_eth_dev *eth = bus->priv; 622 623 *v = (sh_eth_read(eth, PIR) & PIR_MDI) >> 3; 624 625 return 0; 626 } 627 628 static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v) 629 { 630 struct sh_eth_dev *eth = bus->priv; 631 632 if (v) 633 sh_eth_write(eth, sh_eth_read(eth, PIR) | PIR_MDC, PIR); 634 else 635 sh_eth_write(eth, sh_eth_read(eth, PIR) & ~PIR_MDC, PIR); 636 637 return 0; 638 } 639 640 static int sh_eth_bb_delay(struct bb_miiphy_bus *bus) 641 { 642 udelay(10); 643 644 return 0; 645 } 646 647 struct bb_miiphy_bus bb_miiphy_buses[] = { 648 { 649 .name = "sh_eth", 650 .init = sh_eth_bb_init, 651 .mdio_active = sh_eth_bb_mdio_active, 652 .mdio_tristate = sh_eth_bb_mdio_tristate, 653 .set_mdio = sh_eth_bb_set_mdio, 654 .get_mdio = sh_eth_bb_get_mdio, 655 .set_mdc = sh_eth_bb_set_mdc, 656 .delay = sh_eth_bb_delay, 657 } 658 }; 659 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses); 660