1 /* 2 * RDC R6040 Fast Ethernet MAC support 3 * 4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw> 5 * Copyright (C) 2007 6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us> 7 * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the 21 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, 22 * Boston, MA 02110-1301, USA. 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/errno.h> 31 #include <linux/ioport.h> 32 #include <linux/interrupt.h> 33 #include <linux/pci.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/delay.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 #include <linux/crc32.h> 41 #include <linux/spinlock.h> 42 #include <linux/bitops.h> 43 #include <linux/io.h> 44 #include <linux/irq.h> 45 #include <linux/uaccess.h> 46 #include <linux/phy.h> 47 48 #include <asm/processor.h> 49 50 #define DRV_NAME "r6040" 51 #define DRV_VERSION "0.29" 52 #define DRV_RELDATE "04Jul2016" 53 54 /* Time in jiffies before concluding the transmitter is hung. */ 55 #define TX_TIMEOUT (6000 * HZ / 1000) 56 57 /* RDC MAC I/O Size */ 58 #define R6040_IO_SIZE 256 59 60 /* MAX RDC MAC */ 61 #define MAX_MAC 2 62 63 /* MAC registers */ 64 #define MCR0 0x00 /* Control register 0 */ 65 #define MCR0_RCVEN 0x0002 /* Receive enable */ 66 #define MCR0_PROMISC 0x0020 /* Promiscuous mode */ 67 #define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ 68 #define MCR0_XMTEN 0x1000 /* Transmission enable */ 69 #define MCR0_FD 0x8000 /* Full/Half duplex */ 70 #define MCR1 0x04 /* Control register 1 */ 71 #define MAC_RST 0x0001 /* Reset the MAC */ 72 #define MBCR 0x08 /* Bus control */ 73 #define MT_ICR 0x0C /* TX interrupt control */ 74 #define MR_ICR 0x10 /* RX interrupt control */ 75 #define MTPR 0x14 /* TX poll command register */ 76 #define TM2TX 0x0001 /* Trigger MAC to transmit */ 77 #define MR_BSR 0x18 /* RX buffer size */ 78 #define MR_DCR 0x1A /* RX descriptor control */ 79 #define MLSR 0x1C /* Last status */ 80 #define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */ 81 #define TX_EXCEEDC 0x2000 /* Transmit exceed collision */ 82 #define TX_LATEC 0x4000 /* Transmit late collision */ 83 #define MMDIO 0x20 /* MDIO control register */ 84 #define MDIO_WRITE 0x4000 /* MDIO write */ 85 #define MDIO_READ 0x2000 /* MDIO read */ 86 #define MMRD 0x24 /* MDIO read data register */ 87 #define MMWD 0x28 /* MDIO write data register */ 88 #define MTD_SA0 0x2C /* TX descriptor start address 0 */ 89 #define MTD_SA1 0x30 /* TX descriptor start address 1 */ 90 #define MRD_SA0 0x34 /* RX descriptor start address 0 */ 91 #define MRD_SA1 0x38 /* RX descriptor start address 1 */ 92 #define MISR 0x3C /* Status register */ 93 #define MIER 0x40 /* INT enable register */ 94 #define MSK_INT 0x0000 /* Mask off interrupts */ 95 #define RX_FINISH 0x0001 /* RX finished */ 96 #define RX_NO_DESC 0x0002 /* No RX descriptor available */ 97 #define RX_FIFO_FULL 0x0004 /* RX FIFO full */ 98 #define RX_EARLY 0x0008 /* RX early */ 99 #define TX_FINISH 0x0010 /* TX finished */ 100 #define TX_EARLY 0x0080 /* TX early */ 101 #define EVENT_OVRFL 0x0100 /* Event counter overflow */ 102 #define LINK_CHANGED 0x0200 /* PHY link changed */ 103 #define ME_CISR 0x44 /* Event counter INT status */ 104 #define ME_CIER 0x48 /* Event counter INT enable */ 105 #define MR_CNT 0x50 /* Successfully received packet counter */ 106 #define ME_CNT0 0x52 /* Event counter 0 */ 107 #define ME_CNT1 0x54 /* Event counter 1 */ 108 #define ME_CNT2 0x56 /* Event counter 2 */ 109 #define ME_CNT3 0x58 /* Event counter 3 */ 110 #define MT_CNT 0x5A /* Successfully transmit packet counter */ 111 #define ME_CNT4 0x5C /* Event counter 4 */ 112 #define MP_CNT 0x5E /* Pause frame counter register */ 113 #define MAR0 0x60 /* Hash table 0 */ 114 #define MAR1 0x62 /* Hash table 1 */ 115 #define MAR2 0x64 /* Hash table 2 */ 116 #define MAR3 0x66 /* Hash table 3 */ 117 #define MID_0L 0x68 /* Multicast address MID0 Low */ 118 #define MID_0M 0x6A /* Multicast address MID0 Medium */ 119 #define MID_0H 0x6C /* Multicast address MID0 High */ 120 #define MID_1L 0x70 /* MID1 Low */ 121 #define MID_1M 0x72 /* MID1 Medium */ 122 #define MID_1H 0x74 /* MID1 High */ 123 #define MID_2L 0x78 /* MID2 Low */ 124 #define MID_2M 0x7A /* MID2 Medium */ 125 #define MID_2H 0x7C /* MID2 High */ 126 #define MID_3L 0x80 /* MID3 Low */ 127 #define MID_3M 0x82 /* MID3 Medium */ 128 #define MID_3H 0x84 /* MID3 High */ 129 #define PHY_CC 0x88 /* PHY status change configuration register */ 130 #define SCEN 0x8000 /* PHY status change enable */ 131 #define PHYAD_SHIFT 8 /* PHY address shift */ 132 #define TMRDIV_SHIFT 0 /* Timer divider shift */ 133 #define PHY_ST 0x8A /* PHY status register */ 134 #define MAC_SM 0xAC /* MAC status machine */ 135 #define MAC_SM_RST 0x0002 /* MAC status machine reset */ 136 #define MAC_ID 0xBE /* Identifier register */ 137 138 #define TX_DCNT 0x80 /* TX descriptor count */ 139 #define RX_DCNT 0x80 /* RX descriptor count */ 140 #define MAX_BUF_SIZE 0x600 141 #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) 142 #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) 143 #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 144 #define MCAST_MAX 3 /* Max number multicast addresses to filter */ 145 146 #define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */ 147 148 /* Descriptor status */ 149 #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ 150 #define DSC_RX_OK 0x4000 /* RX was successful */ 151 #define DSC_RX_ERR 0x0800 /* RX PHY error */ 152 #define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */ 153 #define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */ 154 #define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */ 155 #define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */ 156 #define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */ 157 #define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */ 158 #define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */ 159 #define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */ 160 #define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */ 161 #define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */ 162 163 MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," 164 "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>," 165 "Florian Fainelli <f.fainelli@gmail.com>"); 166 MODULE_LICENSE("GPL"); 167 MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); 168 MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); 169 170 /* RX and TX interrupts that we handle */ 171 #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) 172 #define TX_INTS (TX_FINISH) 173 #define INT_MASK (RX_INTS | TX_INTS) 174 175 struct r6040_descriptor { 176 u16 status, len; /* 0-3 */ 177 __le32 buf; /* 4-7 */ 178 __le32 ndesc; /* 8-B */ 179 u32 rev1; /* C-F */ 180 char *vbufp; /* 10-13 */ 181 struct r6040_descriptor *vndescp; /* 14-17 */ 182 struct sk_buff *skb_ptr; /* 18-1B */ 183 u32 rev2; /* 1C-1F */ 184 } __aligned(32); 185 186 struct r6040_private { 187 spinlock_t lock; /* driver lock */ 188 struct pci_dev *pdev; 189 struct r6040_descriptor *rx_insert_ptr; 190 struct r6040_descriptor *rx_remove_ptr; 191 struct r6040_descriptor *tx_insert_ptr; 192 struct r6040_descriptor *tx_remove_ptr; 193 struct r6040_descriptor *rx_ring; 194 struct r6040_descriptor *tx_ring; 195 dma_addr_t rx_ring_dma; 196 dma_addr_t tx_ring_dma; 197 u16 tx_free_desc; 198 u16 mcr0; 199 struct net_device *dev; 200 struct mii_bus *mii_bus; 201 struct napi_struct napi; 202 void __iomem *base; 203 int old_link; 204 int old_duplex; 205 }; 206 207 static char version[] = DRV_NAME 208 ": RDC R6040 NAPI net driver," 209 "version "DRV_VERSION " (" DRV_RELDATE ")"; 210 211 /* Read a word data from PHY Chip */ 212 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) 213 { 214 int limit = MAC_DEF_TIMEOUT; 215 u16 cmd; 216 217 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO); 218 /* Wait for the read bit to be cleared */ 219 while (limit--) { 220 cmd = ioread16(ioaddr + MMDIO); 221 if (!(cmd & MDIO_READ)) 222 break; 223 udelay(1); 224 } 225 226 if (limit < 0) 227 return -ETIMEDOUT; 228 229 return ioread16(ioaddr + MMRD); 230 } 231 232 /* Write a word data from PHY Chip */ 233 static int r6040_phy_write(void __iomem *ioaddr, 234 int phy_addr, int reg, u16 val) 235 { 236 int limit = MAC_DEF_TIMEOUT; 237 u16 cmd; 238 239 iowrite16(val, ioaddr + MMWD); 240 /* Write the command to the MDIO bus */ 241 iowrite16(MDIO_WRITE + reg + (phy_addr << 8), ioaddr + MMDIO); 242 /* Wait for the write bit to be cleared */ 243 while (limit--) { 244 cmd = ioread16(ioaddr + MMDIO); 245 if (!(cmd & MDIO_WRITE)) 246 break; 247 udelay(1); 248 } 249 250 return (limit < 0) ? -ETIMEDOUT : 0; 251 } 252 253 static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) 254 { 255 struct net_device *dev = bus->priv; 256 struct r6040_private *lp = netdev_priv(dev); 257 void __iomem *ioaddr = lp->base; 258 259 return r6040_phy_read(ioaddr, phy_addr, reg); 260 } 261 262 static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr, 263 int reg, u16 value) 264 { 265 struct net_device *dev = bus->priv; 266 struct r6040_private *lp = netdev_priv(dev); 267 void __iomem *ioaddr = lp->base; 268 269 return r6040_phy_write(ioaddr, phy_addr, reg, value); 270 } 271 272 static void r6040_free_txbufs(struct net_device *dev) 273 { 274 struct r6040_private *lp = netdev_priv(dev); 275 int i; 276 277 for (i = 0; i < TX_DCNT; i++) { 278 if (lp->tx_insert_ptr->skb_ptr) { 279 pci_unmap_single(lp->pdev, 280 le32_to_cpu(lp->tx_insert_ptr->buf), 281 MAX_BUF_SIZE, PCI_DMA_TODEVICE); 282 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr); 283 lp->tx_insert_ptr->skb_ptr = NULL; 284 } 285 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp; 286 } 287 } 288 289 static void r6040_free_rxbufs(struct net_device *dev) 290 { 291 struct r6040_private *lp = netdev_priv(dev); 292 int i; 293 294 for (i = 0; i < RX_DCNT; i++) { 295 if (lp->rx_insert_ptr->skb_ptr) { 296 pci_unmap_single(lp->pdev, 297 le32_to_cpu(lp->rx_insert_ptr->buf), 298 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); 299 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr); 300 lp->rx_insert_ptr->skb_ptr = NULL; 301 } 302 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp; 303 } 304 } 305 306 static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring, 307 dma_addr_t desc_dma, int size) 308 { 309 struct r6040_descriptor *desc = desc_ring; 310 dma_addr_t mapping = desc_dma; 311 312 while (size-- > 0) { 313 mapping += sizeof(*desc); 314 desc->ndesc = cpu_to_le32(mapping); 315 desc->vndescp = desc + 1; 316 desc++; 317 } 318 desc--; 319 desc->ndesc = cpu_to_le32(desc_dma); 320 desc->vndescp = desc_ring; 321 } 322 323 static void r6040_init_txbufs(struct net_device *dev) 324 { 325 struct r6040_private *lp = netdev_priv(dev); 326 327 lp->tx_free_desc = TX_DCNT; 328 329 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring; 330 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT); 331 } 332 333 static int r6040_alloc_rxbufs(struct net_device *dev) 334 { 335 struct r6040_private *lp = netdev_priv(dev); 336 struct r6040_descriptor *desc; 337 struct sk_buff *skb; 338 int rc; 339 340 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring; 341 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT); 342 343 /* Allocate skbs for the rx descriptors */ 344 desc = lp->rx_ring; 345 do { 346 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 347 if (!skb) { 348 rc = -ENOMEM; 349 goto err_exit; 350 } 351 desc->skb_ptr = skb; 352 desc->buf = cpu_to_le32(pci_map_single(lp->pdev, 353 desc->skb_ptr->data, 354 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); 355 desc->status = DSC_OWNER_MAC; 356 desc = desc->vndescp; 357 } while (desc != lp->rx_ring); 358 359 return 0; 360 361 err_exit: 362 /* Deallocate all previously allocated skbs */ 363 r6040_free_rxbufs(dev); 364 return rc; 365 } 366 367 static void r6040_reset_mac(struct r6040_private *lp) 368 { 369 void __iomem *ioaddr = lp->base; 370 int limit = MAC_DEF_TIMEOUT; 371 u16 cmd; 372 373 iowrite16(MAC_RST, ioaddr + MCR1); 374 while (limit--) { 375 cmd = ioread16(ioaddr + MCR1); 376 if (cmd & MAC_RST) 377 break; 378 } 379 380 /* Reset internal state machine */ 381 iowrite16(MAC_SM_RST, ioaddr + MAC_SM); 382 iowrite16(0, ioaddr + MAC_SM); 383 mdelay(5); 384 } 385 386 static void r6040_init_mac_regs(struct net_device *dev) 387 { 388 struct r6040_private *lp = netdev_priv(dev); 389 void __iomem *ioaddr = lp->base; 390 391 /* Mask Off Interrupt */ 392 iowrite16(MSK_INT, ioaddr + MIER); 393 394 /* Reset RDC MAC */ 395 r6040_reset_mac(lp); 396 397 /* MAC Bus Control Register */ 398 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); 399 400 /* Buffer Size Register */ 401 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); 402 403 /* Write TX ring start address */ 404 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0); 405 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1); 406 407 /* Write RX ring start address */ 408 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0); 409 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1); 410 411 /* Set interrupt waiting time and packet numbers */ 412 iowrite16(0, ioaddr + MT_ICR); 413 iowrite16(0, ioaddr + MR_ICR); 414 415 /* Enable interrupts */ 416 iowrite16(INT_MASK, ioaddr + MIER); 417 418 /* Enable TX and RX */ 419 iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr); 420 421 /* Let TX poll the descriptors 422 * we may got called by r6040_tx_timeout which has left 423 * some unsent tx buffers */ 424 iowrite16(TM2TX, ioaddr + MTPR); 425 } 426 427 static void r6040_tx_timeout(struct net_device *dev) 428 { 429 struct r6040_private *priv = netdev_priv(dev); 430 void __iomem *ioaddr = priv->base; 431 432 netdev_warn(dev, "transmit timed out, int enable %4.4x " 433 "status %4.4x\n", 434 ioread16(ioaddr + MIER), 435 ioread16(ioaddr + MISR)); 436 437 dev->stats.tx_errors++; 438 439 /* Reset MAC and re-init all registers */ 440 r6040_init_mac_regs(dev); 441 } 442 443 static struct net_device_stats *r6040_get_stats(struct net_device *dev) 444 { 445 struct r6040_private *priv = netdev_priv(dev); 446 void __iomem *ioaddr = priv->base; 447 unsigned long flags; 448 449 spin_lock_irqsave(&priv->lock, flags); 450 dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1); 451 dev->stats.multicast += ioread8(ioaddr + ME_CNT0); 452 spin_unlock_irqrestore(&priv->lock, flags); 453 454 return &dev->stats; 455 } 456 457 /* Stop RDC MAC and Free the allocated resource */ 458 static void r6040_down(struct net_device *dev) 459 { 460 struct r6040_private *lp = netdev_priv(dev); 461 void __iomem *ioaddr = lp->base; 462 u16 *adrp; 463 464 /* Stop MAC */ 465 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ 466 467 /* Reset RDC MAC */ 468 r6040_reset_mac(lp); 469 470 /* Restore MAC Address to MIDx */ 471 adrp = (u16 *) dev->dev_addr; 472 iowrite16(adrp[0], ioaddr + MID_0L); 473 iowrite16(adrp[1], ioaddr + MID_0M); 474 iowrite16(adrp[2], ioaddr + MID_0H); 475 } 476 477 static int r6040_close(struct net_device *dev) 478 { 479 struct r6040_private *lp = netdev_priv(dev); 480 struct pci_dev *pdev = lp->pdev; 481 482 phy_stop(dev->phydev); 483 napi_disable(&lp->napi); 484 netif_stop_queue(dev); 485 486 spin_lock_irq(&lp->lock); 487 r6040_down(dev); 488 489 /* Free RX buffer */ 490 r6040_free_rxbufs(dev); 491 492 /* Free TX buffer */ 493 r6040_free_txbufs(dev); 494 495 spin_unlock_irq(&lp->lock); 496 497 free_irq(dev->irq, dev); 498 499 /* Free Descriptor memory */ 500 if (lp->rx_ring) { 501 pci_free_consistent(pdev, 502 RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); 503 lp->rx_ring = NULL; 504 } 505 506 if (lp->tx_ring) { 507 pci_free_consistent(pdev, 508 TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); 509 lp->tx_ring = NULL; 510 } 511 512 return 0; 513 } 514 515 static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 516 { 517 if (!dev->phydev) 518 return -EINVAL; 519 520 return phy_mii_ioctl(dev->phydev, rq, cmd); 521 } 522 523 static int r6040_rx(struct net_device *dev, int limit) 524 { 525 struct r6040_private *priv = netdev_priv(dev); 526 struct r6040_descriptor *descptr = priv->rx_remove_ptr; 527 struct sk_buff *skb_ptr, *new_skb; 528 int count = 0; 529 u16 err; 530 531 /* Limit not reached and the descriptor belongs to the CPU */ 532 while (count < limit && !(descptr->status & DSC_OWNER_MAC)) { 533 /* Read the descriptor status */ 534 err = descptr->status; 535 /* Global error status set */ 536 if (err & DSC_RX_ERR) { 537 /* RX dribble */ 538 if (err & DSC_RX_ERR_DRI) 539 dev->stats.rx_frame_errors++; 540 /* Buffer length exceeded */ 541 if (err & DSC_RX_ERR_BUF) 542 dev->stats.rx_length_errors++; 543 /* Packet too long */ 544 if (err & DSC_RX_ERR_LONG) 545 dev->stats.rx_length_errors++; 546 /* Packet < 64 bytes */ 547 if (err & DSC_RX_ERR_RUNT) 548 dev->stats.rx_length_errors++; 549 /* CRC error */ 550 if (err & DSC_RX_ERR_CRC) { 551 spin_lock(&priv->lock); 552 dev->stats.rx_crc_errors++; 553 spin_unlock(&priv->lock); 554 } 555 goto next_descr; 556 } 557 558 /* Packet successfully received */ 559 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 560 if (!new_skb) { 561 dev->stats.rx_dropped++; 562 goto next_descr; 563 } 564 skb_ptr = descptr->skb_ptr; 565 skb_ptr->dev = priv->dev; 566 567 /* Do not count the CRC */ 568 skb_put(skb_ptr, descptr->len - 4); 569 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), 570 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); 571 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); 572 573 /* Send to upper layer */ 574 netif_receive_skb(skb_ptr); 575 dev->stats.rx_packets++; 576 dev->stats.rx_bytes += descptr->len - 4; 577 578 /* put new skb into descriptor */ 579 descptr->skb_ptr = new_skb; 580 descptr->buf = cpu_to_le32(pci_map_single(priv->pdev, 581 descptr->skb_ptr->data, 582 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); 583 584 next_descr: 585 /* put the descriptor back to the MAC */ 586 descptr->status = DSC_OWNER_MAC; 587 descptr = descptr->vndescp; 588 count++; 589 } 590 priv->rx_remove_ptr = descptr; 591 592 return count; 593 } 594 595 static void r6040_tx(struct net_device *dev) 596 { 597 struct r6040_private *priv = netdev_priv(dev); 598 struct r6040_descriptor *descptr; 599 void __iomem *ioaddr = priv->base; 600 struct sk_buff *skb_ptr; 601 u16 err; 602 603 spin_lock(&priv->lock); 604 descptr = priv->tx_remove_ptr; 605 while (priv->tx_free_desc < TX_DCNT) { 606 /* Check for errors */ 607 err = ioread16(ioaddr + MLSR); 608 609 if (err & TX_FIFO_UNDR) 610 dev->stats.tx_fifo_errors++; 611 if (err & (TX_EXCEEDC | TX_LATEC)) 612 dev->stats.tx_carrier_errors++; 613 614 if (descptr->status & DSC_OWNER_MAC) 615 break; /* Not complete */ 616 skb_ptr = descptr->skb_ptr; 617 618 /* Statistic Counter */ 619 dev->stats.tx_packets++; 620 dev->stats.tx_bytes += skb_ptr->len; 621 622 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), 623 skb_ptr->len, PCI_DMA_TODEVICE); 624 /* Free buffer */ 625 dev_kfree_skb(skb_ptr); 626 descptr->skb_ptr = NULL; 627 /* To next descriptor */ 628 descptr = descptr->vndescp; 629 priv->tx_free_desc++; 630 } 631 priv->tx_remove_ptr = descptr; 632 633 if (priv->tx_free_desc) 634 netif_wake_queue(dev); 635 spin_unlock(&priv->lock); 636 } 637 638 static int r6040_poll(struct napi_struct *napi, int budget) 639 { 640 struct r6040_private *priv = 641 container_of(napi, struct r6040_private, napi); 642 struct net_device *dev = priv->dev; 643 void __iomem *ioaddr = priv->base; 644 int work_done; 645 646 r6040_tx(dev); 647 648 work_done = r6040_rx(dev, budget); 649 650 if (work_done < budget) { 651 napi_complete_done(napi, work_done); 652 /* Enable RX/TX interrupt */ 653 iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS, 654 ioaddr + MIER); 655 } 656 return work_done; 657 } 658 659 /* The RDC interrupt handler. */ 660 static irqreturn_t r6040_interrupt(int irq, void *dev_id) 661 { 662 struct net_device *dev = dev_id; 663 struct r6040_private *lp = netdev_priv(dev); 664 void __iomem *ioaddr = lp->base; 665 u16 misr, status; 666 667 /* Save MIER */ 668 misr = ioread16(ioaddr + MIER); 669 /* Mask off RDC MAC interrupt */ 670 iowrite16(MSK_INT, ioaddr + MIER); 671 /* Read MISR status and clear */ 672 status = ioread16(ioaddr + MISR); 673 674 if (status == 0x0000 || status == 0xffff) { 675 /* Restore RDC MAC interrupt */ 676 iowrite16(misr, ioaddr + MIER); 677 return IRQ_NONE; 678 } 679 680 /* RX interrupt request */ 681 if (status & (RX_INTS | TX_INTS)) { 682 if (status & RX_NO_DESC) { 683 /* RX descriptor unavailable */ 684 dev->stats.rx_dropped++; 685 dev->stats.rx_missed_errors++; 686 } 687 if (status & RX_FIFO_FULL) 688 dev->stats.rx_fifo_errors++; 689 690 if (likely(napi_schedule_prep(&lp->napi))) { 691 /* Mask off RX interrupt */ 692 misr &= ~(RX_INTS | TX_INTS); 693 __napi_schedule_irqoff(&lp->napi); 694 } 695 } 696 697 /* Restore RDC MAC interrupt */ 698 iowrite16(misr, ioaddr + MIER); 699 700 return IRQ_HANDLED; 701 } 702 703 #ifdef CONFIG_NET_POLL_CONTROLLER 704 static void r6040_poll_controller(struct net_device *dev) 705 { 706 disable_irq(dev->irq); 707 r6040_interrupt(dev->irq, dev); 708 enable_irq(dev->irq); 709 } 710 #endif 711 712 /* Init RDC MAC */ 713 static int r6040_up(struct net_device *dev) 714 { 715 struct r6040_private *lp = netdev_priv(dev); 716 void __iomem *ioaddr = lp->base; 717 int ret; 718 719 /* Initialise and alloc RX/TX buffers */ 720 r6040_init_txbufs(dev); 721 ret = r6040_alloc_rxbufs(dev); 722 if (ret) 723 return ret; 724 725 /* improve performance (by RDC guys) */ 726 r6040_phy_write(ioaddr, 30, 17, 727 (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); 728 r6040_phy_write(ioaddr, 30, 17, 729 ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); 730 r6040_phy_write(ioaddr, 0, 19, 0x0000); 731 r6040_phy_write(ioaddr, 0, 30, 0x01F0); 732 733 /* Initialize all MAC registers */ 734 r6040_init_mac_regs(dev); 735 736 phy_start(dev->phydev); 737 738 return 0; 739 } 740 741 742 /* Read/set MAC address routines */ 743 static void r6040_mac_address(struct net_device *dev) 744 { 745 struct r6040_private *lp = netdev_priv(dev); 746 void __iomem *ioaddr = lp->base; 747 u16 *adrp; 748 749 /* Reset MAC */ 750 r6040_reset_mac(lp); 751 752 /* Restore MAC Address */ 753 adrp = (u16 *) dev->dev_addr; 754 iowrite16(adrp[0], ioaddr + MID_0L); 755 iowrite16(adrp[1], ioaddr + MID_0M); 756 iowrite16(adrp[2], ioaddr + MID_0H); 757 } 758 759 static int r6040_open(struct net_device *dev) 760 { 761 struct r6040_private *lp = netdev_priv(dev); 762 int ret; 763 764 /* Request IRQ and Register interrupt handler */ 765 ret = request_irq(dev->irq, r6040_interrupt, 766 IRQF_SHARED, dev->name, dev); 767 if (ret) 768 goto out; 769 770 /* Set MAC address */ 771 r6040_mac_address(dev); 772 773 /* Allocate Descriptor memory */ 774 lp->rx_ring = 775 pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma); 776 if (!lp->rx_ring) { 777 ret = -ENOMEM; 778 goto err_free_irq; 779 } 780 781 lp->tx_ring = 782 pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma); 783 if (!lp->tx_ring) { 784 ret = -ENOMEM; 785 goto err_free_rx_ring; 786 } 787 788 ret = r6040_up(dev); 789 if (ret) 790 goto err_free_tx_ring; 791 792 napi_enable(&lp->napi); 793 netif_start_queue(dev); 794 795 return 0; 796 797 err_free_tx_ring: 798 pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring, 799 lp->tx_ring_dma); 800 err_free_rx_ring: 801 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, 802 lp->rx_ring_dma); 803 err_free_irq: 804 free_irq(dev->irq, dev); 805 out: 806 return ret; 807 } 808 809 static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, 810 struct net_device *dev) 811 { 812 struct r6040_private *lp = netdev_priv(dev); 813 struct r6040_descriptor *descptr; 814 void __iomem *ioaddr = lp->base; 815 unsigned long flags; 816 817 if (skb_put_padto(skb, ETH_ZLEN) < 0) 818 return NETDEV_TX_OK; 819 820 /* Critical Section */ 821 spin_lock_irqsave(&lp->lock, flags); 822 823 /* TX resource check */ 824 if (!lp->tx_free_desc) { 825 spin_unlock_irqrestore(&lp->lock, flags); 826 netif_stop_queue(dev); 827 netdev_err(dev, ": no tx descriptor\n"); 828 return NETDEV_TX_BUSY; 829 } 830 831 /* Set TX descriptor & Transmit it */ 832 lp->tx_free_desc--; 833 descptr = lp->tx_insert_ptr; 834 descptr->len = skb->len; 835 descptr->skb_ptr = skb; 836 descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, 837 skb->data, skb->len, PCI_DMA_TODEVICE)); 838 descptr->status = DSC_OWNER_MAC; 839 840 skb_tx_timestamp(skb); 841 842 /* Trigger the MAC to check the TX descriptor */ 843 if (!skb->xmit_more || netif_queue_stopped(dev)) 844 iowrite16(TM2TX, ioaddr + MTPR); 845 lp->tx_insert_ptr = descptr->vndescp; 846 847 /* If no tx resource, stop */ 848 if (!lp->tx_free_desc) 849 netif_stop_queue(dev); 850 851 spin_unlock_irqrestore(&lp->lock, flags); 852 853 return NETDEV_TX_OK; 854 } 855 856 static void r6040_multicast_list(struct net_device *dev) 857 { 858 struct r6040_private *lp = netdev_priv(dev); 859 void __iomem *ioaddr = lp->base; 860 unsigned long flags; 861 struct netdev_hw_addr *ha; 862 int i; 863 u16 *adrp; 864 u16 hash_table[4] = { 0 }; 865 866 spin_lock_irqsave(&lp->lock, flags); 867 868 /* Keep our MAC Address */ 869 adrp = (u16 *)dev->dev_addr; 870 iowrite16(adrp[0], ioaddr + MID_0L); 871 iowrite16(adrp[1], ioaddr + MID_0M); 872 iowrite16(adrp[2], ioaddr + MID_0H); 873 874 /* Clear AMCP & PROM bits */ 875 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN); 876 877 /* Promiscuous mode */ 878 if (dev->flags & IFF_PROMISC) 879 lp->mcr0 |= MCR0_PROMISC; 880 881 /* Enable multicast hash table function to 882 * receive all multicast packets. */ 883 else if (dev->flags & IFF_ALLMULTI) { 884 lp->mcr0 |= MCR0_HASH_EN; 885 886 for (i = 0; i < MCAST_MAX ; i++) { 887 iowrite16(0, ioaddr + MID_1L + 8 * i); 888 iowrite16(0, ioaddr + MID_1M + 8 * i); 889 iowrite16(0, ioaddr + MID_1H + 8 * i); 890 } 891 892 for (i = 0; i < 4; i++) 893 hash_table[i] = 0xffff; 894 } 895 /* Use internal multicast address registers if the number of 896 * multicast addresses is not greater than MCAST_MAX. */ 897 else if (netdev_mc_count(dev) <= MCAST_MAX) { 898 i = 0; 899 netdev_for_each_mc_addr(ha, dev) { 900 u16 *adrp = (u16 *) ha->addr; 901 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); 902 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); 903 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); 904 i++; 905 } 906 while (i < MCAST_MAX) { 907 iowrite16(0, ioaddr + MID_1L + 8 * i); 908 iowrite16(0, ioaddr + MID_1M + 8 * i); 909 iowrite16(0, ioaddr + MID_1H + 8 * i); 910 i++; 911 } 912 } 913 /* Otherwise, Enable multicast hash table function. */ 914 else { 915 u32 crc; 916 917 lp->mcr0 |= MCR0_HASH_EN; 918 919 for (i = 0; i < MCAST_MAX ; i++) { 920 iowrite16(0, ioaddr + MID_1L + 8 * i); 921 iowrite16(0, ioaddr + MID_1M + 8 * i); 922 iowrite16(0, ioaddr + MID_1H + 8 * i); 923 } 924 925 /* Build multicast hash table */ 926 netdev_for_each_mc_addr(ha, dev) { 927 u8 *addrs = ha->addr; 928 929 crc = ether_crc(ETH_ALEN, addrs); 930 crc >>= 26; 931 hash_table[crc >> 4] |= 1 << (crc & 0xf); 932 } 933 } 934 935 iowrite16(lp->mcr0, ioaddr + MCR0); 936 937 /* Fill the MAC hash tables with their values */ 938 if (lp->mcr0 & MCR0_HASH_EN) { 939 iowrite16(hash_table[0], ioaddr + MAR0); 940 iowrite16(hash_table[1], ioaddr + MAR1); 941 iowrite16(hash_table[2], ioaddr + MAR2); 942 iowrite16(hash_table[3], ioaddr + MAR3); 943 } 944 945 spin_unlock_irqrestore(&lp->lock, flags); 946 } 947 948 static void netdev_get_drvinfo(struct net_device *dev, 949 struct ethtool_drvinfo *info) 950 { 951 struct r6040_private *rp = netdev_priv(dev); 952 953 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 954 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 955 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); 956 } 957 958 static const struct ethtool_ops netdev_ethtool_ops = { 959 .get_drvinfo = netdev_get_drvinfo, 960 .get_link = ethtool_op_get_link, 961 .get_ts_info = ethtool_op_get_ts_info, 962 .get_link_ksettings = phy_ethtool_get_link_ksettings, 963 .set_link_ksettings = phy_ethtool_set_link_ksettings, 964 }; 965 966 static const struct net_device_ops r6040_netdev_ops = { 967 .ndo_open = r6040_open, 968 .ndo_stop = r6040_close, 969 .ndo_start_xmit = r6040_start_xmit, 970 .ndo_get_stats = r6040_get_stats, 971 .ndo_set_rx_mode = r6040_multicast_list, 972 .ndo_validate_addr = eth_validate_addr, 973 .ndo_set_mac_address = eth_mac_addr, 974 .ndo_do_ioctl = r6040_ioctl, 975 .ndo_tx_timeout = r6040_tx_timeout, 976 #ifdef CONFIG_NET_POLL_CONTROLLER 977 .ndo_poll_controller = r6040_poll_controller, 978 #endif 979 }; 980 981 static void r6040_adjust_link(struct net_device *dev) 982 { 983 struct r6040_private *lp = netdev_priv(dev); 984 struct phy_device *phydev = dev->phydev; 985 int status_changed = 0; 986 void __iomem *ioaddr = lp->base; 987 988 BUG_ON(!phydev); 989 990 if (lp->old_link != phydev->link) { 991 status_changed = 1; 992 lp->old_link = phydev->link; 993 } 994 995 /* reflect duplex change */ 996 if (phydev->link && (lp->old_duplex != phydev->duplex)) { 997 lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0); 998 iowrite16(lp->mcr0, ioaddr); 999 1000 status_changed = 1; 1001 lp->old_duplex = phydev->duplex; 1002 } 1003 1004 if (status_changed) 1005 phy_print_status(phydev); 1006 } 1007 1008 static int r6040_mii_probe(struct net_device *dev) 1009 { 1010 struct r6040_private *lp = netdev_priv(dev); 1011 struct phy_device *phydev = NULL; 1012 1013 phydev = phy_find_first(lp->mii_bus); 1014 if (!phydev) { 1015 dev_err(&lp->pdev->dev, "no PHY found\n"); 1016 return -ENODEV; 1017 } 1018 1019 phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link, 1020 PHY_INTERFACE_MODE_MII); 1021 1022 if (IS_ERR(phydev)) { 1023 dev_err(&lp->pdev->dev, "could not attach to PHY\n"); 1024 return PTR_ERR(phydev); 1025 } 1026 1027 phy_set_max_speed(phydev, SPEED_100); 1028 1029 lp->old_link = 0; 1030 lp->old_duplex = -1; 1031 1032 phy_attached_info(phydev); 1033 1034 return 0; 1035 } 1036 1037 static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1038 { 1039 struct net_device *dev; 1040 struct r6040_private *lp; 1041 void __iomem *ioaddr; 1042 int err, io_size = R6040_IO_SIZE; 1043 static int card_idx = -1; 1044 int bar = 0; 1045 u16 *adrp; 1046 1047 pr_info("%s\n", version); 1048 1049 err = pci_enable_device(pdev); 1050 if (err) 1051 goto err_out; 1052 1053 /* this should always be supported */ 1054 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1055 if (err) { 1056 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n"); 1057 goto err_out_disable_dev; 1058 } 1059 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1060 if (err) { 1061 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n"); 1062 goto err_out_disable_dev; 1063 } 1064 1065 /* IO Size check */ 1066 if (pci_resource_len(pdev, bar) < io_size) { 1067 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); 1068 err = -EIO; 1069 goto err_out_disable_dev; 1070 } 1071 1072 pci_set_master(pdev); 1073 1074 dev = alloc_etherdev(sizeof(struct r6040_private)); 1075 if (!dev) { 1076 err = -ENOMEM; 1077 goto err_out_disable_dev; 1078 } 1079 SET_NETDEV_DEV(dev, &pdev->dev); 1080 lp = netdev_priv(dev); 1081 1082 err = pci_request_regions(pdev, DRV_NAME); 1083 1084 if (err) { 1085 dev_err(&pdev->dev, "Failed to request PCI regions\n"); 1086 goto err_out_free_dev; 1087 } 1088 1089 ioaddr = pci_iomap(pdev, bar, io_size); 1090 if (!ioaddr) { 1091 dev_err(&pdev->dev, "ioremap failed for device\n"); 1092 err = -EIO; 1093 goto err_out_free_res; 1094 } 1095 1096 /* If PHY status change register is still set to zero it means the 1097 * bootloader didn't initialize it, so we set it to: 1098 * - enable phy status change 1099 * - enable all phy addresses 1100 * - set to lowest timer divider */ 1101 if (ioread16(ioaddr + PHY_CC) == 0) 1102 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT | 1103 7 << TMRDIV_SHIFT, ioaddr + PHY_CC); 1104 1105 /* Init system & device */ 1106 lp->base = ioaddr; 1107 dev->irq = pdev->irq; 1108 1109 spin_lock_init(&lp->lock); 1110 pci_set_drvdata(pdev, dev); 1111 1112 /* Set MAC address */ 1113 card_idx++; 1114 1115 adrp = (u16 *)dev->dev_addr; 1116 adrp[0] = ioread16(ioaddr + MID_0L); 1117 adrp[1] = ioread16(ioaddr + MID_0M); 1118 adrp[2] = ioread16(ioaddr + MID_0H); 1119 1120 /* Some bootloader/BIOSes do not initialize 1121 * MAC address, warn about that */ 1122 if (!(adrp[0] || adrp[1] || adrp[2])) { 1123 netdev_warn(dev, "MAC address not initialized, " 1124 "generating random\n"); 1125 eth_hw_addr_random(dev); 1126 } 1127 1128 /* Link new device into r6040_root_dev */ 1129 lp->pdev = pdev; 1130 lp->dev = dev; 1131 1132 /* Init RDC private data */ 1133 lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN; 1134 1135 /* The RDC-specific entries in the device structure. */ 1136 dev->netdev_ops = &r6040_netdev_ops; 1137 dev->ethtool_ops = &netdev_ethtool_ops; 1138 dev->watchdog_timeo = TX_TIMEOUT; 1139 1140 netif_napi_add(dev, &lp->napi, r6040_poll, 64); 1141 1142 lp->mii_bus = mdiobus_alloc(); 1143 if (!lp->mii_bus) { 1144 dev_err(&pdev->dev, "mdiobus_alloc() failed\n"); 1145 err = -ENOMEM; 1146 goto err_out_unmap; 1147 } 1148 1149 lp->mii_bus->priv = dev; 1150 lp->mii_bus->read = r6040_mdiobus_read; 1151 lp->mii_bus->write = r6040_mdiobus_write; 1152 lp->mii_bus->name = "r6040_eth_mii"; 1153 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1154 dev_name(&pdev->dev), card_idx); 1155 1156 err = mdiobus_register(lp->mii_bus); 1157 if (err) { 1158 dev_err(&pdev->dev, "failed to register MII bus\n"); 1159 goto err_out_mdio; 1160 } 1161 1162 err = r6040_mii_probe(dev); 1163 if (err) { 1164 dev_err(&pdev->dev, "failed to probe MII bus\n"); 1165 goto err_out_mdio_unregister; 1166 } 1167 1168 /* Register net device. After this dev->name assign */ 1169 err = register_netdev(dev); 1170 if (err) { 1171 dev_err(&pdev->dev, "Failed to register net device\n"); 1172 goto err_out_mdio_unregister; 1173 } 1174 return 0; 1175 1176 err_out_mdio_unregister: 1177 mdiobus_unregister(lp->mii_bus); 1178 err_out_mdio: 1179 mdiobus_free(lp->mii_bus); 1180 err_out_unmap: 1181 netif_napi_del(&lp->napi); 1182 pci_iounmap(pdev, ioaddr); 1183 err_out_free_res: 1184 pci_release_regions(pdev); 1185 err_out_free_dev: 1186 free_netdev(dev); 1187 err_out_disable_dev: 1188 pci_disable_device(pdev); 1189 err_out: 1190 return err; 1191 } 1192 1193 static void r6040_remove_one(struct pci_dev *pdev) 1194 { 1195 struct net_device *dev = pci_get_drvdata(pdev); 1196 struct r6040_private *lp = netdev_priv(dev); 1197 1198 unregister_netdev(dev); 1199 mdiobus_unregister(lp->mii_bus); 1200 mdiobus_free(lp->mii_bus); 1201 netif_napi_del(&lp->napi); 1202 pci_iounmap(pdev, lp->base); 1203 pci_release_regions(pdev); 1204 free_netdev(dev); 1205 pci_disable_device(pdev); 1206 } 1207 1208 1209 static const struct pci_device_id r6040_pci_tbl[] = { 1210 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) }, 1211 { 0 } 1212 }; 1213 MODULE_DEVICE_TABLE(pci, r6040_pci_tbl); 1214 1215 static struct pci_driver r6040_driver = { 1216 .name = DRV_NAME, 1217 .id_table = r6040_pci_tbl, 1218 .probe = r6040_init_one, 1219 .remove = r6040_remove_one, 1220 }; 1221 1222 module_pci_driver(r6040_driver); 1223