1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * EP93xx ethernet network device driver 4 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 5 * Dedicated to Marija Kulikova. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/netdevice.h> 14 #include <linux/mii.h> 15 #include <linux/etherdevice.h> 16 #include <linux/ethtool.h> 17 #include <linux/interrupt.h> 18 #include <linux/moduleparam.h> 19 #include <linux/platform_device.h> 20 #include <linux/delay.h> 21 #include <linux/io.h> 22 #include <linux/slab.h> 23 24 #include <linux/platform_data/eth-ep93xx.h> 25 26 #define DRV_MODULE_NAME "ep93xx-eth" 27 #define DRV_MODULE_VERSION "0.1" 28 29 #define RX_QUEUE_ENTRIES 64 30 #define TX_QUEUE_ENTRIES 8 31 32 #define MAX_PKT_SIZE 2044 33 #define PKT_BUF_SIZE 2048 34 35 #define REG_RXCTL 0x0000 36 #define REG_RXCTL_DEFAULT 0x00073800 37 #define REG_TXCTL 0x0004 38 #define REG_TXCTL_ENABLE 0x00000001 39 #define REG_MIICMD 0x0010 40 #define REG_MIICMD_READ 0x00008000 41 #define REG_MIICMD_WRITE 0x00004000 42 #define REG_MIIDATA 0x0014 43 #define REG_MIISTS 0x0018 44 #define REG_MIISTS_BUSY 0x00000001 45 #define REG_SELFCTL 0x0020 46 #define REG_SELFCTL_RESET 0x00000001 47 #define REG_INTEN 0x0024 48 #define REG_INTEN_TX 0x00000008 49 #define REG_INTEN_RX 0x00000007 50 #define REG_INTSTSP 0x0028 51 #define REG_INTSTS_TX 0x00000008 52 #define REG_INTSTS_RX 0x00000004 53 #define REG_INTSTSC 0x002c 54 #define REG_AFP 0x004c 55 #define REG_INDAD0 0x0050 56 #define REG_INDAD1 0x0051 57 #define REG_INDAD2 0x0052 58 #define REG_INDAD3 0x0053 59 #define REG_INDAD4 0x0054 60 #define REG_INDAD5 0x0055 61 #define REG_GIINTMSK 0x0064 62 #define REG_GIINTMSK_ENABLE 0x00008000 63 #define REG_BMCTL 0x0080 64 #define REG_BMCTL_ENABLE_TX 0x00000100 65 #define REG_BMCTL_ENABLE_RX 0x00000001 66 #define REG_BMSTS 0x0084 67 #define REG_BMSTS_RX_ACTIVE 0x00000008 68 #define REG_RXDQBADD 0x0090 69 #define REG_RXDQBLEN 0x0094 70 #define REG_RXDCURADD 0x0098 71 #define REG_RXDENQ 0x009c 72 #define REG_RXSTSQBADD 0x00a0 73 #define REG_RXSTSQBLEN 0x00a4 74 #define REG_RXSTSQCURADD 0x00a8 75 #define REG_RXSTSENQ 0x00ac 76 #define REG_TXDQBADD 0x00b0 77 #define REG_TXDQBLEN 0x00b4 78 #define REG_TXDQCURADD 0x00b8 79 #define REG_TXDENQ 0x00bc 80 #define REG_TXSTSQBADD 0x00c0 81 #define REG_TXSTSQBLEN 0x00c4 82 #define REG_TXSTSQCURADD 0x00c8 83 #define REG_MAXFRMLEN 0x00e8 84 85 struct ep93xx_rdesc 86 { 87 u32 buf_addr; 88 u32 rdesc1; 89 }; 90 91 #define RDESC1_NSOF 0x80000000 92 #define RDESC1_BUFFER_INDEX 0x7fff0000 93 #define RDESC1_BUFFER_LENGTH 0x0000ffff 94 95 struct ep93xx_rstat 96 { 97 u32 rstat0; 98 u32 rstat1; 99 }; 100 101 #define RSTAT0_RFP 0x80000000 102 #define RSTAT0_RWE 0x40000000 103 #define RSTAT0_EOF 0x20000000 104 #define RSTAT0_EOB 0x10000000 105 #define RSTAT0_AM 0x00c00000 106 #define RSTAT0_RX_ERR 0x00200000 107 #define RSTAT0_OE 0x00100000 108 #define RSTAT0_FE 0x00080000 109 #define RSTAT0_RUNT 0x00040000 110 #define RSTAT0_EDATA 0x00020000 111 #define RSTAT0_CRCE 0x00010000 112 #define RSTAT0_CRCI 0x00008000 113 #define RSTAT0_HTI 0x00003f00 114 #define RSTAT1_RFP 0x80000000 115 #define RSTAT1_BUFFER_INDEX 0x7fff0000 116 #define RSTAT1_FRAME_LENGTH 0x0000ffff 117 118 struct ep93xx_tdesc 119 { 120 u32 buf_addr; 121 u32 tdesc1; 122 }; 123 124 #define TDESC1_EOF 0x80000000 125 #define TDESC1_BUFFER_INDEX 0x7fff0000 126 #define TDESC1_BUFFER_ABORT 0x00008000 127 #define TDESC1_BUFFER_LENGTH 0x00000fff 128 129 struct ep93xx_tstat 130 { 131 u32 tstat0; 132 }; 133 134 #define TSTAT0_TXFP 0x80000000 135 #define TSTAT0_TXWE 0x40000000 136 #define TSTAT0_FA 0x20000000 137 #define TSTAT0_LCRS 0x10000000 138 #define TSTAT0_OW 0x04000000 139 #define TSTAT0_TXU 0x02000000 140 #define TSTAT0_ECOLL 0x01000000 141 #define TSTAT0_NCOLL 0x001f0000 142 #define TSTAT0_BUFFER_INDEX 0x00007fff 143 144 struct ep93xx_descs 145 { 146 struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES]; 147 struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES]; 148 struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES]; 149 struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES]; 150 }; 151 152 struct ep93xx_priv 153 { 154 struct resource *res; 155 void __iomem *base_addr; 156 int irq; 157 158 struct ep93xx_descs *descs; 159 dma_addr_t descs_dma_addr; 160 161 void *rx_buf[RX_QUEUE_ENTRIES]; 162 void *tx_buf[TX_QUEUE_ENTRIES]; 163 164 spinlock_t rx_lock; 165 unsigned int rx_pointer; 166 unsigned int tx_clean_pointer; 167 unsigned int tx_pointer; 168 spinlock_t tx_pending_lock; 169 unsigned int tx_pending; 170 171 struct net_device *dev; 172 struct napi_struct napi; 173 174 struct mii_if_info mii; 175 u8 mdc_divisor; 176 }; 177 178 #define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) 179 #define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) 180 #define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) 181 #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) 182 #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) 183 #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) 184 185 static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg) 186 { 187 struct ep93xx_priv *ep = netdev_priv(dev); 188 int data; 189 int i; 190 191 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); 192 193 for (i = 0; i < 10; i++) { 194 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) 195 break; 196 msleep(1); 197 } 198 199 if (i == 10) { 200 pr_info("mdio read timed out\n"); 201 data = 0xffff; 202 } else { 203 data = rdl(ep, REG_MIIDATA); 204 } 205 206 return data; 207 } 208 209 static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data) 210 { 211 struct ep93xx_priv *ep = netdev_priv(dev); 212 int i; 213 214 wrl(ep, REG_MIIDATA, data); 215 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); 216 217 for (i = 0; i < 10; i++) { 218 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) 219 break; 220 msleep(1); 221 } 222 223 if (i == 10) 224 pr_info("mdio write timed out\n"); 225 } 226 227 static int ep93xx_rx(struct net_device *dev, int budget) 228 { 229 struct ep93xx_priv *ep = netdev_priv(dev); 230 int processed = 0; 231 232 while (processed < budget) { 233 int entry; 234 struct ep93xx_rstat *rstat; 235 u32 rstat0; 236 u32 rstat1; 237 int length; 238 struct sk_buff *skb; 239 240 entry = ep->rx_pointer; 241 rstat = ep->descs->rstat + entry; 242 243 rstat0 = rstat->rstat0; 244 rstat1 = rstat->rstat1; 245 if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) 246 break; 247 248 rstat->rstat0 = 0; 249 rstat->rstat1 = 0; 250 251 if (!(rstat0 & RSTAT0_EOF)) 252 pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1); 253 if (!(rstat0 & RSTAT0_EOB)) 254 pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1); 255 if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) 256 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); 257 258 if (!(rstat0 & RSTAT0_RWE)) { 259 dev->stats.rx_errors++; 260 if (rstat0 & RSTAT0_OE) 261 dev->stats.rx_fifo_errors++; 262 if (rstat0 & RSTAT0_FE) 263 dev->stats.rx_frame_errors++; 264 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) 265 dev->stats.rx_length_errors++; 266 if (rstat0 & RSTAT0_CRCE) 267 dev->stats.rx_crc_errors++; 268 goto err; 269 } 270 271 length = rstat1 & RSTAT1_FRAME_LENGTH; 272 if (length > MAX_PKT_SIZE) { 273 pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1); 274 goto err; 275 } 276 277 /* Strip FCS. */ 278 if (rstat0 & RSTAT0_CRCI) 279 length -= 4; 280 281 skb = netdev_alloc_skb(dev, length + 2); 282 if (likely(skb != NULL)) { 283 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; 284 skb_reserve(skb, 2); 285 dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, 286 length, DMA_FROM_DEVICE); 287 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 288 dma_sync_single_for_device(dev->dev.parent, 289 rxd->buf_addr, length, 290 DMA_FROM_DEVICE); 291 skb_put(skb, length); 292 skb->protocol = eth_type_trans(skb, dev); 293 294 napi_gro_receive(&ep->napi, skb); 295 296 dev->stats.rx_packets++; 297 dev->stats.rx_bytes += length; 298 } else { 299 dev->stats.rx_dropped++; 300 } 301 302 err: 303 ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); 304 processed++; 305 } 306 307 return processed; 308 } 309 310 static int ep93xx_poll(struct napi_struct *napi, int budget) 311 { 312 struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); 313 struct net_device *dev = ep->dev; 314 int rx; 315 316 rx = ep93xx_rx(dev, budget); 317 if (rx < budget && napi_complete_done(napi, rx)) { 318 spin_lock_irq(&ep->rx_lock); 319 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); 320 spin_unlock_irq(&ep->rx_lock); 321 } 322 323 if (rx) { 324 wrw(ep, REG_RXDENQ, rx); 325 wrw(ep, REG_RXSTSENQ, rx); 326 } 327 328 return rx; 329 } 330 331 static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 332 { 333 struct ep93xx_priv *ep = netdev_priv(dev); 334 struct ep93xx_tdesc *txd; 335 int entry; 336 337 if (unlikely(skb->len > MAX_PKT_SIZE)) { 338 dev->stats.tx_dropped++; 339 dev_kfree_skb(skb); 340 return NETDEV_TX_OK; 341 } 342 343 entry = ep->tx_pointer; 344 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); 345 346 txd = &ep->descs->tdesc[entry]; 347 348 txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 349 dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, 350 DMA_TO_DEVICE); 351 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 352 dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, 353 DMA_TO_DEVICE); 354 dev_kfree_skb(skb); 355 356 spin_lock_irq(&ep->tx_pending_lock); 357 ep->tx_pending++; 358 if (ep->tx_pending == TX_QUEUE_ENTRIES) 359 netif_stop_queue(dev); 360 spin_unlock_irq(&ep->tx_pending_lock); 361 362 wrl(ep, REG_TXDENQ, 1); 363 364 return NETDEV_TX_OK; 365 } 366 367 static void ep93xx_tx_complete(struct net_device *dev) 368 { 369 struct ep93xx_priv *ep = netdev_priv(dev); 370 int wake; 371 372 wake = 0; 373 374 spin_lock(&ep->tx_pending_lock); 375 while (1) { 376 int entry; 377 struct ep93xx_tstat *tstat; 378 u32 tstat0; 379 380 entry = ep->tx_clean_pointer; 381 tstat = ep->descs->tstat + entry; 382 383 tstat0 = tstat->tstat0; 384 if (!(tstat0 & TSTAT0_TXFP)) 385 break; 386 387 tstat->tstat0 = 0; 388 389 if (tstat0 & TSTAT0_FA) 390 pr_crit("frame aborted %.8x\n", tstat0); 391 if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) 392 pr_crit("entry mismatch %.8x\n", tstat0); 393 394 if (tstat0 & TSTAT0_TXWE) { 395 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 396 397 dev->stats.tx_packets++; 398 dev->stats.tx_bytes += length; 399 } else { 400 dev->stats.tx_errors++; 401 } 402 403 if (tstat0 & TSTAT0_OW) 404 dev->stats.tx_window_errors++; 405 if (tstat0 & TSTAT0_TXU) 406 dev->stats.tx_fifo_errors++; 407 dev->stats.collisions += (tstat0 >> 16) & 0x1f; 408 409 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); 410 if (ep->tx_pending == TX_QUEUE_ENTRIES) 411 wake = 1; 412 ep->tx_pending--; 413 } 414 spin_unlock(&ep->tx_pending_lock); 415 416 if (wake) 417 netif_wake_queue(dev); 418 } 419 420 static irqreturn_t ep93xx_irq(int irq, void *dev_id) 421 { 422 struct net_device *dev = dev_id; 423 struct ep93xx_priv *ep = netdev_priv(dev); 424 u32 status; 425 426 status = rdl(ep, REG_INTSTSC); 427 if (status == 0) 428 return IRQ_NONE; 429 430 if (status & REG_INTSTS_RX) { 431 spin_lock(&ep->rx_lock); 432 if (likely(napi_schedule_prep(&ep->napi))) { 433 wrl(ep, REG_INTEN, REG_INTEN_TX); 434 __napi_schedule(&ep->napi); 435 } 436 spin_unlock(&ep->rx_lock); 437 } 438 439 if (status & REG_INTSTS_TX) 440 ep93xx_tx_complete(dev); 441 442 return IRQ_HANDLED; 443 } 444 445 static void ep93xx_free_buffers(struct ep93xx_priv *ep) 446 { 447 struct device *dev = ep->dev->dev.parent; 448 int i; 449 450 if (!ep->descs) 451 return; 452 453 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 454 dma_addr_t d; 455 456 d = ep->descs->rdesc[i].buf_addr; 457 if (d) 458 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); 459 460 kfree(ep->rx_buf[i]); 461 } 462 463 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 464 dma_addr_t d; 465 466 d = ep->descs->tdesc[i].buf_addr; 467 if (d) 468 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); 469 470 kfree(ep->tx_buf[i]); 471 } 472 473 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, 474 ep->descs_dma_addr); 475 ep->descs = NULL; 476 } 477 478 static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 479 { 480 struct device *dev = ep->dev->dev.parent; 481 int i; 482 483 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), 484 &ep->descs_dma_addr, GFP_KERNEL); 485 if (ep->descs == NULL) 486 return 1; 487 488 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 489 void *buf; 490 dma_addr_t d; 491 492 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); 493 if (buf == NULL) 494 goto err; 495 496 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); 497 if (dma_mapping_error(dev, d)) { 498 kfree(buf); 499 goto err; 500 } 501 502 ep->rx_buf[i] = buf; 503 ep->descs->rdesc[i].buf_addr = d; 504 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; 505 } 506 507 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 508 void *buf; 509 dma_addr_t d; 510 511 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); 512 if (buf == NULL) 513 goto err; 514 515 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); 516 if (dma_mapping_error(dev, d)) { 517 kfree(buf); 518 goto err; 519 } 520 521 ep->tx_buf[i] = buf; 522 ep->descs->tdesc[i].buf_addr = d; 523 } 524 525 return 0; 526 527 err: 528 ep93xx_free_buffers(ep); 529 return 1; 530 } 531 532 static int ep93xx_start_hw(struct net_device *dev) 533 { 534 struct ep93xx_priv *ep = netdev_priv(dev); 535 unsigned long addr; 536 int i; 537 538 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); 539 for (i = 0; i < 10; i++) { 540 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) 541 break; 542 msleep(1); 543 } 544 545 if (i == 10) { 546 pr_crit("hw failed to reset\n"); 547 return 1; 548 } 549 550 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); 551 552 /* Does the PHY support preamble suppress? */ 553 if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) 554 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); 555 556 /* Receive descriptor ring. */ 557 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); 558 wrl(ep, REG_RXDQBADD, addr); 559 wrl(ep, REG_RXDCURADD, addr); 560 wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); 561 562 /* Receive status ring. */ 563 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); 564 wrl(ep, REG_RXSTSQBADD, addr); 565 wrl(ep, REG_RXSTSQCURADD, addr); 566 wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); 567 568 /* Transmit descriptor ring. */ 569 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); 570 wrl(ep, REG_TXDQBADD, addr); 571 wrl(ep, REG_TXDQCURADD, addr); 572 wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); 573 574 /* Transmit status ring. */ 575 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); 576 wrl(ep, REG_TXSTSQBADD, addr); 577 wrl(ep, REG_TXSTSQCURADD, addr); 578 wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); 579 580 wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); 581 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); 582 wrl(ep, REG_GIINTMSK, 0); 583 584 for (i = 0; i < 10; i++) { 585 if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) 586 break; 587 msleep(1); 588 } 589 590 if (i == 10) { 591 pr_crit("hw failed to start\n"); 592 return 1; 593 } 594 595 wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); 596 wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); 597 598 wrb(ep, REG_INDAD0, dev->dev_addr[0]); 599 wrb(ep, REG_INDAD1, dev->dev_addr[1]); 600 wrb(ep, REG_INDAD2, dev->dev_addr[2]); 601 wrb(ep, REG_INDAD3, dev->dev_addr[3]); 602 wrb(ep, REG_INDAD4, dev->dev_addr[4]); 603 wrb(ep, REG_INDAD5, dev->dev_addr[5]); 604 wrl(ep, REG_AFP, 0); 605 606 wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); 607 608 wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); 609 wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); 610 611 return 0; 612 } 613 614 static void ep93xx_stop_hw(struct net_device *dev) 615 { 616 struct ep93xx_priv *ep = netdev_priv(dev); 617 int i; 618 619 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); 620 for (i = 0; i < 10; i++) { 621 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) 622 break; 623 msleep(1); 624 } 625 626 if (i == 10) 627 pr_crit("hw failed to reset\n"); 628 } 629 630 static int ep93xx_open(struct net_device *dev) 631 { 632 struct ep93xx_priv *ep = netdev_priv(dev); 633 int err; 634 635 if (ep93xx_alloc_buffers(ep)) 636 return -ENOMEM; 637 638 napi_enable(&ep->napi); 639 640 if (ep93xx_start_hw(dev)) { 641 napi_disable(&ep->napi); 642 ep93xx_free_buffers(ep); 643 return -EIO; 644 } 645 646 spin_lock_init(&ep->rx_lock); 647 ep->rx_pointer = 0; 648 ep->tx_clean_pointer = 0; 649 ep->tx_pointer = 0; 650 spin_lock_init(&ep->tx_pending_lock); 651 ep->tx_pending = 0; 652 653 err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); 654 if (err) { 655 napi_disable(&ep->napi); 656 ep93xx_stop_hw(dev); 657 ep93xx_free_buffers(ep); 658 return err; 659 } 660 661 wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); 662 663 netif_start_queue(dev); 664 665 return 0; 666 } 667 668 static int ep93xx_close(struct net_device *dev) 669 { 670 struct ep93xx_priv *ep = netdev_priv(dev); 671 672 napi_disable(&ep->napi); 673 netif_stop_queue(dev); 674 675 wrl(ep, REG_GIINTMSK, 0); 676 free_irq(ep->irq, dev); 677 ep93xx_stop_hw(dev); 678 ep93xx_free_buffers(ep); 679 680 return 0; 681 } 682 683 static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 684 { 685 struct ep93xx_priv *ep = netdev_priv(dev); 686 struct mii_ioctl_data *data = if_mii(ifr); 687 688 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); 689 } 690 691 static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 692 { 693 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 694 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 695 } 696 697 static int ep93xx_get_link_ksettings(struct net_device *dev, 698 struct ethtool_link_ksettings *cmd) 699 { 700 struct ep93xx_priv *ep = netdev_priv(dev); 701 702 mii_ethtool_get_link_ksettings(&ep->mii, cmd); 703 704 return 0; 705 } 706 707 static int ep93xx_set_link_ksettings(struct net_device *dev, 708 const struct ethtool_link_ksettings *cmd) 709 { 710 struct ep93xx_priv *ep = netdev_priv(dev); 711 return mii_ethtool_set_link_ksettings(&ep->mii, cmd); 712 } 713 714 static int ep93xx_nway_reset(struct net_device *dev) 715 { 716 struct ep93xx_priv *ep = netdev_priv(dev); 717 return mii_nway_restart(&ep->mii); 718 } 719 720 static u32 ep93xx_get_link(struct net_device *dev) 721 { 722 struct ep93xx_priv *ep = netdev_priv(dev); 723 return mii_link_ok(&ep->mii); 724 } 725 726 static const struct ethtool_ops ep93xx_ethtool_ops = { 727 .get_drvinfo = ep93xx_get_drvinfo, 728 .nway_reset = ep93xx_nway_reset, 729 .get_link = ep93xx_get_link, 730 .get_link_ksettings = ep93xx_get_link_ksettings, 731 .set_link_ksettings = ep93xx_set_link_ksettings, 732 }; 733 734 static const struct net_device_ops ep93xx_netdev_ops = { 735 .ndo_open = ep93xx_open, 736 .ndo_stop = ep93xx_close, 737 .ndo_start_xmit = ep93xx_xmit, 738 .ndo_do_ioctl = ep93xx_ioctl, 739 .ndo_validate_addr = eth_validate_addr, 740 .ndo_set_mac_address = eth_mac_addr, 741 }; 742 743 static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) 744 { 745 struct net_device *dev; 746 747 dev = alloc_etherdev(sizeof(struct ep93xx_priv)); 748 if (dev == NULL) 749 return NULL; 750 751 memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); 752 753 dev->ethtool_ops = &ep93xx_ethtool_ops; 754 dev->netdev_ops = &ep93xx_netdev_ops; 755 756 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 757 758 return dev; 759 } 760 761 762 static int ep93xx_eth_remove(struct platform_device *pdev) 763 { 764 struct net_device *dev; 765 struct ep93xx_priv *ep; 766 767 dev = platform_get_drvdata(pdev); 768 if (dev == NULL) 769 return 0; 770 771 ep = netdev_priv(dev); 772 773 /* @@@ Force down. */ 774 unregister_netdev(dev); 775 ep93xx_free_buffers(ep); 776 777 if (ep->base_addr != NULL) 778 iounmap(ep->base_addr); 779 780 if (ep->res != NULL) { 781 release_resource(ep->res); 782 kfree(ep->res); 783 } 784 785 free_netdev(dev); 786 787 return 0; 788 } 789 790 static int ep93xx_eth_probe(struct platform_device *pdev) 791 { 792 struct ep93xx_eth_data *data; 793 struct net_device *dev; 794 struct ep93xx_priv *ep; 795 struct resource *mem; 796 int irq; 797 int err; 798 799 if (pdev == NULL) 800 return -ENODEV; 801 data = dev_get_platdata(&pdev->dev); 802 803 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 804 irq = platform_get_irq(pdev, 0); 805 if (!mem || irq < 0) 806 return -ENXIO; 807 808 dev = ep93xx_dev_alloc(data); 809 if (dev == NULL) { 810 err = -ENOMEM; 811 goto err_out; 812 } 813 ep = netdev_priv(dev); 814 ep->dev = dev; 815 SET_NETDEV_DEV(dev, &pdev->dev); 816 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); 817 818 platform_set_drvdata(pdev, dev); 819 820 ep->res = request_mem_region(mem->start, resource_size(mem), 821 dev_name(&pdev->dev)); 822 if (ep->res == NULL) { 823 dev_err(&pdev->dev, "Could not reserve memory region\n"); 824 err = -ENOMEM; 825 goto err_out; 826 } 827 828 ep->base_addr = ioremap(mem->start, resource_size(mem)); 829 if (ep->base_addr == NULL) { 830 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 831 err = -EIO; 832 goto err_out; 833 } 834 ep->irq = irq; 835 836 ep->mii.phy_id = data->phy_id; 837 ep->mii.phy_id_mask = 0x1f; 838 ep->mii.reg_num_mask = 0x1f; 839 ep->mii.dev = dev; 840 ep->mii.mdio_read = ep93xx_mdio_read; 841 ep->mii.mdio_write = ep93xx_mdio_write; 842 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ 843 844 if (is_zero_ether_addr(dev->dev_addr)) 845 eth_hw_addr_random(dev); 846 847 err = register_netdev(dev); 848 if (err) { 849 dev_err(&pdev->dev, "Failed to register netdev\n"); 850 goto err_out; 851 } 852 853 printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n", 854 dev->name, ep->irq, dev->dev_addr); 855 856 return 0; 857 858 err_out: 859 ep93xx_eth_remove(pdev); 860 return err; 861 } 862 863 864 static struct platform_driver ep93xx_eth_driver = { 865 .probe = ep93xx_eth_probe, 866 .remove = ep93xx_eth_remove, 867 .driver = { 868 .name = "ep93xx-eth", 869 }, 870 }; 871 872 module_platform_driver(ep93xx_eth_driver); 873 874 MODULE_LICENSE("GPL"); 875 MODULE_ALIAS("platform:ep93xx-eth"); 876