1 /*************************************************************************** 2 * 3 * Copyright (C) 2007,2008 SMSC 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 2 8 * of the License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 * 18 *************************************************************************** 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/interrupt.h> 24 #include <linux/kernel.h> 25 #include <linux/netdevice.h> 26 #include <linux/phy.h> 27 #include <linux/pci.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/crc32.h> 31 #include <linux/slab.h> 32 #include <linux/module.h> 33 #include <asm/unaligned.h> 34 #include "smsc9420.h" 35 36 #define DRV_NAME "smsc9420" 37 #define DRV_MDIONAME "smsc9420-mdio" 38 #define DRV_DESCRIPTION "SMSC LAN9420 driver" 39 #define DRV_VERSION "1.01" 40 41 MODULE_LICENSE("GPL"); 42 MODULE_VERSION(DRV_VERSION); 43 44 struct smsc9420_dma_desc { 45 u32 status; 46 u32 length; 47 u32 buffer1; 48 u32 buffer2; 49 }; 50 51 struct smsc9420_ring_info { 52 struct sk_buff *skb; 53 dma_addr_t mapping; 54 }; 55 56 struct smsc9420_pdata { 57 void __iomem *ioaddr; 58 struct pci_dev *pdev; 59 struct net_device *dev; 60 61 struct smsc9420_dma_desc *rx_ring; 62 struct smsc9420_dma_desc *tx_ring; 63 struct smsc9420_ring_info *tx_buffers; 64 struct smsc9420_ring_info *rx_buffers; 65 dma_addr_t rx_dma_addr; 66 dma_addr_t tx_dma_addr; 67 int tx_ring_head, tx_ring_tail; 68 int rx_ring_head, rx_ring_tail; 69 70 spinlock_t int_lock; 71 spinlock_t phy_lock; 72 73 struct napi_struct napi; 74 75 bool software_irq_signal; 76 bool rx_csum; 77 u32 msg_enable; 78 79 struct phy_device *phy_dev; 80 struct mii_bus *mii_bus; 81 int last_duplex; 82 int last_carrier; 83 }; 84 85 static const struct pci_device_id smsc9420_id_table[] = { 86 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, 87 { 0, } 88 }; 89 90 MODULE_DEVICE_TABLE(pci, smsc9420_id_table); 91 92 #define SMSC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 93 94 static uint smsc_debug; 95 static uint debug = -1; 96 module_param(debug, uint, 0); 97 MODULE_PARM_DESC(debug, "debug level"); 98 99 static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) 100 { 101 return ioread32(pd->ioaddr + offset); 102 } 103 104 static inline void 105 smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) 106 { 107 iowrite32(value, pd->ioaddr + offset); 108 } 109 110 static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) 111 { 112 /* to ensure PCI write completion, we must perform a PCI read */ 113 smsc9420_reg_read(pd, ID_REV); 114 } 115 116 static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx) 117 { 118 struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; 119 unsigned long flags; 120 u32 addr; 121 int i, reg = -EIO; 122 123 spin_lock_irqsave(&pd->phy_lock, flags); 124 125 /* confirm MII not busy */ 126 if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { 127 netif_warn(pd, drv, pd->dev, "MII is busy???\n"); 128 goto out; 129 } 130 131 /* set the address, index & direction (read from PHY) */ 132 addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | 133 MII_ACCESS_MII_READ_; 134 smsc9420_reg_write(pd, MII_ACCESS, addr); 135 136 /* wait for read to complete with 50us timeout */ 137 for (i = 0; i < 5; i++) { 138 if (!(smsc9420_reg_read(pd, MII_ACCESS) & 139 MII_ACCESS_MII_BUSY_)) { 140 reg = (u16)smsc9420_reg_read(pd, MII_DATA); 141 goto out; 142 } 143 udelay(10); 144 } 145 146 netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); 147 148 out: 149 spin_unlock_irqrestore(&pd->phy_lock, flags); 150 return reg; 151 } 152 153 static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx, 154 u16 val) 155 { 156 struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; 157 unsigned long flags; 158 u32 addr; 159 int i, reg = -EIO; 160 161 spin_lock_irqsave(&pd->phy_lock, flags); 162 163 /* confirm MII not busy */ 164 if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { 165 netif_warn(pd, drv, pd->dev, "MII is busy???\n"); 166 goto out; 167 } 168 169 /* put the data to write in the MAC */ 170 smsc9420_reg_write(pd, MII_DATA, (u32)val); 171 172 /* set the address, index & direction (write to PHY) */ 173 addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | 174 MII_ACCESS_MII_WRITE_; 175 smsc9420_reg_write(pd, MII_ACCESS, addr); 176 177 /* wait for write to complete with 50us timeout */ 178 for (i = 0; i < 5; i++) { 179 if (!(smsc9420_reg_read(pd, MII_ACCESS) & 180 MII_ACCESS_MII_BUSY_)) { 181 reg = 0; 182 goto out; 183 } 184 udelay(10); 185 } 186 187 netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); 188 189 out: 190 spin_unlock_irqrestore(&pd->phy_lock, flags); 191 return reg; 192 } 193 194 /* Returns hash bit number for given MAC address 195 * Example: 196 * 01 00 5E 00 00 01 -> returns bit number 31 */ 197 static u32 smsc9420_hash(u8 addr[ETH_ALEN]) 198 { 199 return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; 200 } 201 202 static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) 203 { 204 int timeout = 100000; 205 206 BUG_ON(!pd); 207 208 if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { 209 netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__); 210 return -EIO; 211 } 212 213 smsc9420_reg_write(pd, E2P_CMD, 214 (E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_RELOAD_)); 215 216 do { 217 udelay(10); 218 if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_)) 219 return 0; 220 } while (timeout--); 221 222 netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__); 223 return -EIO; 224 } 225 226 /* Standard ioctls for mii-tool */ 227 static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 228 { 229 struct smsc9420_pdata *pd = netdev_priv(dev); 230 231 if (!netif_running(dev) || !pd->phy_dev) 232 return -EINVAL; 233 234 return phy_mii_ioctl(pd->phy_dev, ifr, cmd); 235 } 236 237 static int smsc9420_ethtool_get_settings(struct net_device *dev, 238 struct ethtool_cmd *cmd) 239 { 240 struct smsc9420_pdata *pd = netdev_priv(dev); 241 242 if (!pd->phy_dev) 243 return -ENODEV; 244 245 cmd->maxtxpkt = 1; 246 cmd->maxrxpkt = 1; 247 return phy_ethtool_gset(pd->phy_dev, cmd); 248 } 249 250 static int smsc9420_ethtool_set_settings(struct net_device *dev, 251 struct ethtool_cmd *cmd) 252 { 253 struct smsc9420_pdata *pd = netdev_priv(dev); 254 255 if (!pd->phy_dev) 256 return -ENODEV; 257 258 return phy_ethtool_sset(pd->phy_dev, cmd); 259 } 260 261 static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, 262 struct ethtool_drvinfo *drvinfo) 263 { 264 struct smsc9420_pdata *pd = netdev_priv(netdev); 265 266 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); 267 strlcpy(drvinfo->bus_info, pci_name(pd->pdev), 268 sizeof(drvinfo->bus_info)); 269 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); 270 } 271 272 static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev) 273 { 274 struct smsc9420_pdata *pd = netdev_priv(netdev); 275 return pd->msg_enable; 276 } 277 278 static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data) 279 { 280 struct smsc9420_pdata *pd = netdev_priv(netdev); 281 pd->msg_enable = data; 282 } 283 284 static int smsc9420_ethtool_nway_reset(struct net_device *netdev) 285 { 286 struct smsc9420_pdata *pd = netdev_priv(netdev); 287 288 if (!pd->phy_dev) 289 return -ENODEV; 290 291 return phy_start_aneg(pd->phy_dev); 292 } 293 294 static int smsc9420_ethtool_getregslen(struct net_device *dev) 295 { 296 /* all smsc9420 registers plus all phy registers */ 297 return 0x100 + (32 * sizeof(u32)); 298 } 299 300 static void 301 smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, 302 void *buf) 303 { 304 struct smsc9420_pdata *pd = netdev_priv(dev); 305 struct phy_device *phy_dev = pd->phy_dev; 306 unsigned int i, j = 0; 307 u32 *data = buf; 308 309 regs->version = smsc9420_reg_read(pd, ID_REV); 310 for (i = 0; i < 0x100; i += (sizeof(u32))) 311 data[j++] = smsc9420_reg_read(pd, i); 312 313 // cannot read phy registers if the net device is down 314 if (!phy_dev) 315 return; 316 317 for (i = 0; i <= 31; i++) 318 data[j++] = smsc9420_mii_read(phy_dev->mdio.bus, 319 phy_dev->mdio.addr, i); 320 } 321 322 static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) 323 { 324 unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG); 325 temp &= ~GPIO_CFG_EEPR_EN_; 326 smsc9420_reg_write(pd, GPIO_CFG, temp); 327 msleep(1); 328 } 329 330 static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op) 331 { 332 int timeout = 100; 333 u32 e2cmd; 334 335 netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op); 336 if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { 337 netif_warn(pd, hw, pd->dev, "Busy at start\n"); 338 return -EBUSY; 339 } 340 341 e2cmd = op | E2P_CMD_EPC_BUSY_; 342 smsc9420_reg_write(pd, E2P_CMD, e2cmd); 343 344 do { 345 msleep(1); 346 e2cmd = smsc9420_reg_read(pd, E2P_CMD); 347 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); 348 349 if (!timeout) { 350 netif_info(pd, hw, pd->dev, "TIMED OUT\n"); 351 return -EAGAIN; 352 } 353 354 if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { 355 netif_info(pd, hw, pd->dev, 356 "Error occurred during eeprom operation\n"); 357 return -EINVAL; 358 } 359 360 return 0; 361 } 362 363 static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd, 364 u8 address, u8 *data) 365 { 366 u32 op = E2P_CMD_EPC_CMD_READ_ | address; 367 int ret; 368 369 netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address); 370 ret = smsc9420_eeprom_send_cmd(pd, op); 371 372 if (!ret) 373 data[address] = smsc9420_reg_read(pd, E2P_DATA); 374 375 return ret; 376 } 377 378 static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd, 379 u8 address, u8 data) 380 { 381 u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; 382 int ret; 383 384 netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data); 385 ret = smsc9420_eeprom_send_cmd(pd, op); 386 387 if (!ret) { 388 op = E2P_CMD_EPC_CMD_WRITE_ | address; 389 smsc9420_reg_write(pd, E2P_DATA, (u32)data); 390 ret = smsc9420_eeprom_send_cmd(pd, op); 391 } 392 393 return ret; 394 } 395 396 static int smsc9420_ethtool_get_eeprom_len(struct net_device *dev) 397 { 398 return SMSC9420_EEPROM_SIZE; 399 } 400 401 static int smsc9420_ethtool_get_eeprom(struct net_device *dev, 402 struct ethtool_eeprom *eeprom, u8 *data) 403 { 404 struct smsc9420_pdata *pd = netdev_priv(dev); 405 u8 eeprom_data[SMSC9420_EEPROM_SIZE]; 406 int len, i; 407 408 smsc9420_eeprom_enable_access(pd); 409 410 len = min(eeprom->len, SMSC9420_EEPROM_SIZE); 411 for (i = 0; i < len; i++) { 412 int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data); 413 if (ret < 0) { 414 eeprom->len = 0; 415 return ret; 416 } 417 } 418 419 memcpy(data, &eeprom_data[eeprom->offset], len); 420 eeprom->magic = SMSC9420_EEPROM_MAGIC; 421 eeprom->len = len; 422 return 0; 423 } 424 425 static int smsc9420_ethtool_set_eeprom(struct net_device *dev, 426 struct ethtool_eeprom *eeprom, u8 *data) 427 { 428 struct smsc9420_pdata *pd = netdev_priv(dev); 429 int ret; 430 431 if (eeprom->magic != SMSC9420_EEPROM_MAGIC) 432 return -EINVAL; 433 434 smsc9420_eeprom_enable_access(pd); 435 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_); 436 ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data); 437 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_); 438 439 /* Single byte write, according to man page */ 440 eeprom->len = 1; 441 442 return ret; 443 } 444 445 static const struct ethtool_ops smsc9420_ethtool_ops = { 446 .get_settings = smsc9420_ethtool_get_settings, 447 .set_settings = smsc9420_ethtool_set_settings, 448 .get_drvinfo = smsc9420_ethtool_get_drvinfo, 449 .get_msglevel = smsc9420_ethtool_get_msglevel, 450 .set_msglevel = smsc9420_ethtool_set_msglevel, 451 .nway_reset = smsc9420_ethtool_nway_reset, 452 .get_link = ethtool_op_get_link, 453 .get_eeprom_len = smsc9420_ethtool_get_eeprom_len, 454 .get_eeprom = smsc9420_ethtool_get_eeprom, 455 .set_eeprom = smsc9420_ethtool_set_eeprom, 456 .get_regs_len = smsc9420_ethtool_getregslen, 457 .get_regs = smsc9420_ethtool_getregs, 458 .get_ts_info = ethtool_op_get_ts_info, 459 }; 460 461 /* Sets the device MAC address to dev_addr */ 462 static void smsc9420_set_mac_address(struct net_device *dev) 463 { 464 struct smsc9420_pdata *pd = netdev_priv(dev); 465 u8 *dev_addr = dev->dev_addr; 466 u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; 467 u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | 468 (dev_addr[1] << 8) | dev_addr[0]; 469 470 smsc9420_reg_write(pd, ADDRH, mac_high16); 471 smsc9420_reg_write(pd, ADDRL, mac_low32); 472 } 473 474 static void smsc9420_check_mac_address(struct net_device *dev) 475 { 476 struct smsc9420_pdata *pd = netdev_priv(dev); 477 478 /* Check if mac address has been specified when bringing interface up */ 479 if (is_valid_ether_addr(dev->dev_addr)) { 480 smsc9420_set_mac_address(dev); 481 netif_dbg(pd, probe, pd->dev, 482 "MAC Address is specified by configuration\n"); 483 } else { 484 /* Try reading mac address from device. if EEPROM is present 485 * it will already have been set */ 486 u32 mac_high16 = smsc9420_reg_read(pd, ADDRH); 487 u32 mac_low32 = smsc9420_reg_read(pd, ADDRL); 488 dev->dev_addr[0] = (u8)(mac_low32); 489 dev->dev_addr[1] = (u8)(mac_low32 >> 8); 490 dev->dev_addr[2] = (u8)(mac_low32 >> 16); 491 dev->dev_addr[3] = (u8)(mac_low32 >> 24); 492 dev->dev_addr[4] = (u8)(mac_high16); 493 dev->dev_addr[5] = (u8)(mac_high16 >> 8); 494 495 if (is_valid_ether_addr(dev->dev_addr)) { 496 /* eeprom values are valid so use them */ 497 netif_dbg(pd, probe, pd->dev, 498 "Mac Address is read from EEPROM\n"); 499 } else { 500 /* eeprom values are invalid, generate random MAC */ 501 eth_hw_addr_random(dev); 502 smsc9420_set_mac_address(dev); 503 netif_dbg(pd, probe, pd->dev, 504 "MAC Address is set to random\n"); 505 } 506 } 507 } 508 509 static void smsc9420_stop_tx(struct smsc9420_pdata *pd) 510 { 511 u32 dmac_control, mac_cr, dma_intr_ena; 512 int timeout = 1000; 513 514 /* disable TX DMAC */ 515 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); 516 dmac_control &= (~DMAC_CONTROL_ST_); 517 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); 518 519 /* Wait max 10ms for transmit process to stop */ 520 while (--timeout) { 521 if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_) 522 break; 523 udelay(10); 524 } 525 526 if (!timeout) 527 netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n"); 528 529 /* ACK Tx DMAC stop bit */ 530 smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_); 531 532 /* mask TX DMAC interrupts */ 533 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); 534 dma_intr_ena &= ~(DMAC_INTR_ENA_TX_); 535 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); 536 smsc9420_pci_flush_write(pd); 537 538 /* stop MAC TX */ 539 mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_); 540 smsc9420_reg_write(pd, MAC_CR, mac_cr); 541 smsc9420_pci_flush_write(pd); 542 } 543 544 static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd) 545 { 546 int i; 547 548 BUG_ON(!pd->tx_ring); 549 550 if (!pd->tx_buffers) 551 return; 552 553 for (i = 0; i < TX_RING_SIZE; i++) { 554 struct sk_buff *skb = pd->tx_buffers[i].skb; 555 556 if (skb) { 557 BUG_ON(!pd->tx_buffers[i].mapping); 558 pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, 559 skb->len, PCI_DMA_TODEVICE); 560 dev_kfree_skb_any(skb); 561 } 562 563 pd->tx_ring[i].status = 0; 564 pd->tx_ring[i].length = 0; 565 pd->tx_ring[i].buffer1 = 0; 566 pd->tx_ring[i].buffer2 = 0; 567 } 568 wmb(); 569 570 kfree(pd->tx_buffers); 571 pd->tx_buffers = NULL; 572 573 pd->tx_ring_head = 0; 574 pd->tx_ring_tail = 0; 575 } 576 577 static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd) 578 { 579 int i; 580 581 BUG_ON(!pd->rx_ring); 582 583 if (!pd->rx_buffers) 584 return; 585 586 for (i = 0; i < RX_RING_SIZE; i++) { 587 if (pd->rx_buffers[i].skb) 588 dev_kfree_skb_any(pd->rx_buffers[i].skb); 589 590 if (pd->rx_buffers[i].mapping) 591 pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, 592 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 593 594 pd->rx_ring[i].status = 0; 595 pd->rx_ring[i].length = 0; 596 pd->rx_ring[i].buffer1 = 0; 597 pd->rx_ring[i].buffer2 = 0; 598 } 599 wmb(); 600 601 kfree(pd->rx_buffers); 602 pd->rx_buffers = NULL; 603 604 pd->rx_ring_head = 0; 605 pd->rx_ring_tail = 0; 606 } 607 608 static void smsc9420_stop_rx(struct smsc9420_pdata *pd) 609 { 610 int timeout = 1000; 611 u32 mac_cr, dmac_control, dma_intr_ena; 612 613 /* mask RX DMAC interrupts */ 614 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); 615 dma_intr_ena &= (~DMAC_INTR_ENA_RX_); 616 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); 617 smsc9420_pci_flush_write(pd); 618 619 /* stop RX MAC prior to stoping DMA */ 620 mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_); 621 smsc9420_reg_write(pd, MAC_CR, mac_cr); 622 smsc9420_pci_flush_write(pd); 623 624 /* stop RX DMAC */ 625 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); 626 dmac_control &= (~DMAC_CONTROL_SR_); 627 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); 628 smsc9420_pci_flush_write(pd); 629 630 /* wait up to 10ms for receive to stop */ 631 while (--timeout) { 632 if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_) 633 break; 634 udelay(10); 635 } 636 637 if (!timeout) 638 netif_warn(pd, ifdown, pd->dev, 639 "RX DMAC did not stop! timeout\n"); 640 641 /* ACK the Rx DMAC stop bit */ 642 smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_); 643 } 644 645 static irqreturn_t smsc9420_isr(int irq, void *dev_id) 646 { 647 struct smsc9420_pdata *pd = dev_id; 648 u32 int_cfg, int_sts, int_ctl; 649 irqreturn_t ret = IRQ_NONE; 650 ulong flags; 651 652 BUG_ON(!pd); 653 BUG_ON(!pd->ioaddr); 654 655 int_cfg = smsc9420_reg_read(pd, INT_CFG); 656 657 /* check if it's our interrupt */ 658 if ((int_cfg & (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) != 659 (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) 660 return IRQ_NONE; 661 662 int_sts = smsc9420_reg_read(pd, INT_STAT); 663 664 if (likely(INT_STAT_DMAC_INT_ & int_sts)) { 665 u32 status = smsc9420_reg_read(pd, DMAC_STATUS); 666 u32 ints_to_clear = 0; 667 668 if (status & DMAC_STS_TX_) { 669 ints_to_clear |= (DMAC_STS_TX_ | DMAC_STS_NIS_); 670 netif_wake_queue(pd->dev); 671 } 672 673 if (status & DMAC_STS_RX_) { 674 /* mask RX DMAC interrupts */ 675 u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); 676 dma_intr_ena &= (~DMAC_INTR_ENA_RX_); 677 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); 678 smsc9420_pci_flush_write(pd); 679 680 ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); 681 napi_schedule(&pd->napi); 682 } 683 684 if (ints_to_clear) 685 smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear); 686 687 ret = IRQ_HANDLED; 688 } 689 690 if (unlikely(INT_STAT_SW_INT_ & int_sts)) { 691 /* mask software interrupt */ 692 spin_lock_irqsave(&pd->int_lock, flags); 693 int_ctl = smsc9420_reg_read(pd, INT_CTL); 694 int_ctl &= (~INT_CTL_SW_INT_EN_); 695 smsc9420_reg_write(pd, INT_CTL, int_ctl); 696 spin_unlock_irqrestore(&pd->int_lock, flags); 697 698 smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_); 699 pd->software_irq_signal = true; 700 smp_wmb(); 701 702 ret = IRQ_HANDLED; 703 } 704 705 /* to ensure PCI write completion, we must perform a PCI read */ 706 smsc9420_pci_flush_write(pd); 707 708 return ret; 709 } 710 711 #ifdef CONFIG_NET_POLL_CONTROLLER 712 static void smsc9420_poll_controller(struct net_device *dev) 713 { 714 struct smsc9420_pdata *pd = netdev_priv(dev); 715 const int irq = pd->pdev->irq; 716 717 disable_irq(irq); 718 smsc9420_isr(0, dev); 719 enable_irq(irq); 720 } 721 #endif /* CONFIG_NET_POLL_CONTROLLER */ 722 723 static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd) 724 { 725 smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_); 726 smsc9420_reg_read(pd, BUS_MODE); 727 udelay(2); 728 if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_) 729 netif_warn(pd, drv, pd->dev, "Software reset not cleared\n"); 730 } 731 732 static int smsc9420_stop(struct net_device *dev) 733 { 734 struct smsc9420_pdata *pd = netdev_priv(dev); 735 u32 int_cfg; 736 ulong flags; 737 738 BUG_ON(!pd); 739 BUG_ON(!pd->phy_dev); 740 741 /* disable master interrupt */ 742 spin_lock_irqsave(&pd->int_lock, flags); 743 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); 744 smsc9420_reg_write(pd, INT_CFG, int_cfg); 745 spin_unlock_irqrestore(&pd->int_lock, flags); 746 747 netif_tx_disable(dev); 748 napi_disable(&pd->napi); 749 750 smsc9420_stop_tx(pd); 751 smsc9420_free_tx_ring(pd); 752 753 smsc9420_stop_rx(pd); 754 smsc9420_free_rx_ring(pd); 755 756 free_irq(pd->pdev->irq, pd); 757 758 smsc9420_dmac_soft_reset(pd); 759 760 phy_stop(pd->phy_dev); 761 762 phy_disconnect(pd->phy_dev); 763 pd->phy_dev = NULL; 764 mdiobus_unregister(pd->mii_bus); 765 mdiobus_free(pd->mii_bus); 766 767 return 0; 768 } 769 770 static void smsc9420_rx_count_stats(struct net_device *dev, u32 desc_status) 771 { 772 if (unlikely(desc_status & RDES0_ERROR_SUMMARY_)) { 773 dev->stats.rx_errors++; 774 if (desc_status & RDES0_DESCRIPTOR_ERROR_) 775 dev->stats.rx_over_errors++; 776 else if (desc_status & (RDES0_FRAME_TOO_LONG_ | 777 RDES0_RUNT_FRAME_ | RDES0_COLLISION_SEEN_)) 778 dev->stats.rx_frame_errors++; 779 else if (desc_status & RDES0_CRC_ERROR_) 780 dev->stats.rx_crc_errors++; 781 } 782 783 if (unlikely(desc_status & RDES0_LENGTH_ERROR_)) 784 dev->stats.rx_length_errors++; 785 786 if (unlikely(!((desc_status & RDES0_LAST_DESCRIPTOR_) && 787 (desc_status & RDES0_FIRST_DESCRIPTOR_)))) 788 dev->stats.rx_length_errors++; 789 790 if (desc_status & RDES0_MULTICAST_FRAME_) 791 dev->stats.multicast++; 792 } 793 794 static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index, 795 const u32 status) 796 { 797 struct net_device *dev = pd->dev; 798 struct sk_buff *skb; 799 u16 packet_length = (status & RDES0_FRAME_LENGTH_MASK_) 800 >> RDES0_FRAME_LENGTH_SHFT_; 801 802 /* remove crc from packet lendth */ 803 packet_length -= 4; 804 805 if (pd->rx_csum) 806 packet_length -= 2; 807 808 dev->stats.rx_packets++; 809 dev->stats.rx_bytes += packet_length; 810 811 pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, 812 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 813 pd->rx_buffers[index].mapping = 0; 814 815 skb = pd->rx_buffers[index].skb; 816 pd->rx_buffers[index].skb = NULL; 817 818 if (pd->rx_csum) { 819 u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) + 820 NET_IP_ALIGN + packet_length + 4); 821 put_unaligned_le16(hw_csum, &skb->csum); 822 skb->ip_summed = CHECKSUM_COMPLETE; 823 } 824 825 skb_reserve(skb, NET_IP_ALIGN); 826 skb_put(skb, packet_length); 827 828 skb->protocol = eth_type_trans(skb, dev); 829 830 netif_receive_skb(skb); 831 } 832 833 static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) 834 { 835 struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ); 836 dma_addr_t mapping; 837 838 BUG_ON(pd->rx_buffers[index].skb); 839 BUG_ON(pd->rx_buffers[index].mapping); 840 841 if (unlikely(!skb)) 842 return -ENOMEM; 843 844 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), 845 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 846 if (pci_dma_mapping_error(pd->pdev, mapping)) { 847 dev_kfree_skb_any(skb); 848 netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n"); 849 return -ENOMEM; 850 } 851 852 pd->rx_buffers[index].skb = skb; 853 pd->rx_buffers[index].mapping = mapping; 854 pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN; 855 pd->rx_ring[index].status = RDES0_OWN_; 856 wmb(); 857 858 return 0; 859 } 860 861 static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd) 862 { 863 while (pd->rx_ring_tail != pd->rx_ring_head) { 864 if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail)) 865 break; 866 867 pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE; 868 } 869 } 870 871 static int smsc9420_rx_poll(struct napi_struct *napi, int budget) 872 { 873 struct smsc9420_pdata *pd = 874 container_of(napi, struct smsc9420_pdata, napi); 875 struct net_device *dev = pd->dev; 876 u32 drop_frame_cnt, dma_intr_ena, status; 877 int work_done; 878 879 for (work_done = 0; work_done < budget; work_done++) { 880 rmb(); 881 status = pd->rx_ring[pd->rx_ring_head].status; 882 883 /* stop if DMAC owns this dma descriptor */ 884 if (status & RDES0_OWN_) 885 break; 886 887 smsc9420_rx_count_stats(dev, status); 888 smsc9420_rx_handoff(pd, pd->rx_ring_head, status); 889 pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE; 890 smsc9420_alloc_new_rx_buffers(pd); 891 } 892 893 drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR); 894 dev->stats.rx_dropped += 895 (drop_frame_cnt & 0xFFFF) + ((drop_frame_cnt >> 17) & 0x3FF); 896 897 /* Kick RXDMA */ 898 smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); 899 smsc9420_pci_flush_write(pd); 900 901 if (work_done < budget) { 902 napi_complete(&pd->napi); 903 904 /* re-enable RX DMA interrupts */ 905 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); 906 dma_intr_ena |= (DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); 907 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); 908 smsc9420_pci_flush_write(pd); 909 } 910 return work_done; 911 } 912 913 static void 914 smsc9420_tx_update_stats(struct net_device *dev, u32 status, u32 length) 915 { 916 if (unlikely(status & TDES0_ERROR_SUMMARY_)) { 917 dev->stats.tx_errors++; 918 if (status & (TDES0_EXCESSIVE_DEFERRAL_ | 919 TDES0_EXCESSIVE_COLLISIONS_)) 920 dev->stats.tx_aborted_errors++; 921 922 if (status & (TDES0_LOSS_OF_CARRIER_ | TDES0_NO_CARRIER_)) 923 dev->stats.tx_carrier_errors++; 924 } else { 925 dev->stats.tx_packets++; 926 dev->stats.tx_bytes += (length & 0x7FF); 927 } 928 929 if (unlikely(status & TDES0_EXCESSIVE_COLLISIONS_)) { 930 dev->stats.collisions += 16; 931 } else { 932 dev->stats.collisions += 933 (status & TDES0_COLLISION_COUNT_MASK_) >> 934 TDES0_COLLISION_COUNT_SHFT_; 935 } 936 937 if (unlikely(status & TDES0_HEARTBEAT_FAIL_)) 938 dev->stats.tx_heartbeat_errors++; 939 } 940 941 /* Check for completed dma transfers, update stats and free skbs */ 942 static void smsc9420_complete_tx(struct net_device *dev) 943 { 944 struct smsc9420_pdata *pd = netdev_priv(dev); 945 946 while (pd->tx_ring_tail != pd->tx_ring_head) { 947 int index = pd->tx_ring_tail; 948 u32 status, length; 949 950 rmb(); 951 status = pd->tx_ring[index].status; 952 length = pd->tx_ring[index].length; 953 954 /* Check if DMA still owns this descriptor */ 955 if (unlikely(TDES0_OWN_ & status)) 956 break; 957 958 smsc9420_tx_update_stats(dev, status, length); 959 960 BUG_ON(!pd->tx_buffers[index].skb); 961 BUG_ON(!pd->tx_buffers[index].mapping); 962 963 pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping, 964 pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE); 965 pd->tx_buffers[index].mapping = 0; 966 967 dev_kfree_skb_any(pd->tx_buffers[index].skb); 968 pd->tx_buffers[index].skb = NULL; 969 970 pd->tx_ring[index].buffer1 = 0; 971 wmb(); 972 973 pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; 974 } 975 } 976 977 static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, 978 struct net_device *dev) 979 { 980 struct smsc9420_pdata *pd = netdev_priv(dev); 981 dma_addr_t mapping; 982 int index = pd->tx_ring_head; 983 u32 tmp_desc1; 984 bool about_to_take_last_desc = 985 (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); 986 987 smsc9420_complete_tx(dev); 988 989 rmb(); 990 BUG_ON(pd->tx_ring[index].status & TDES0_OWN_); 991 BUG_ON(pd->tx_buffers[index].skb); 992 BUG_ON(pd->tx_buffers[index].mapping); 993 994 mapping = pci_map_single(pd->pdev, skb->data, 995 skb->len, PCI_DMA_TODEVICE); 996 if (pci_dma_mapping_error(pd->pdev, mapping)) { 997 netif_warn(pd, tx_err, pd->dev, 998 "pci_map_single failed, dropping packet\n"); 999 return NETDEV_TX_BUSY; 1000 } 1001 1002 pd->tx_buffers[index].skb = skb; 1003 pd->tx_buffers[index].mapping = mapping; 1004 1005 tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF)); 1006 if (unlikely(about_to_take_last_desc)) { 1007 tmp_desc1 |= TDES1_IC_; 1008 netif_stop_queue(pd->dev); 1009 } 1010 1011 /* check if we are at the last descriptor and need to set EOR */ 1012 if (unlikely(index == (TX_RING_SIZE - 1))) 1013 tmp_desc1 |= TDES1_TER_; 1014 1015 pd->tx_ring[index].buffer1 = mapping; 1016 pd->tx_ring[index].length = tmp_desc1; 1017 wmb(); 1018 1019 /* increment head */ 1020 pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; 1021 1022 /* assign ownership to DMAC */ 1023 pd->tx_ring[index].status = TDES0_OWN_; 1024 wmb(); 1025 1026 skb_tx_timestamp(skb); 1027 1028 /* kick the DMA */ 1029 smsc9420_reg_write(pd, TX_POLL_DEMAND, 1); 1030 smsc9420_pci_flush_write(pd); 1031 1032 return NETDEV_TX_OK; 1033 } 1034 1035 static struct net_device_stats *smsc9420_get_stats(struct net_device *dev) 1036 { 1037 struct smsc9420_pdata *pd = netdev_priv(dev); 1038 u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR); 1039 dev->stats.rx_dropped += 1040 (counter & 0x0000FFFF) + ((counter >> 17) & 0x000003FF); 1041 return &dev->stats; 1042 } 1043 1044 static void smsc9420_set_multicast_list(struct net_device *dev) 1045 { 1046 struct smsc9420_pdata *pd = netdev_priv(dev); 1047 u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); 1048 1049 if (dev->flags & IFF_PROMISC) { 1050 netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n"); 1051 mac_cr |= MAC_CR_PRMS_; 1052 mac_cr &= (~MAC_CR_MCPAS_); 1053 mac_cr &= (~MAC_CR_HPFILT_); 1054 } else if (dev->flags & IFF_ALLMULTI) { 1055 netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n"); 1056 mac_cr &= (~MAC_CR_PRMS_); 1057 mac_cr |= MAC_CR_MCPAS_; 1058 mac_cr &= (~MAC_CR_HPFILT_); 1059 } else if (!netdev_mc_empty(dev)) { 1060 struct netdev_hw_addr *ha; 1061 u32 hash_lo = 0, hash_hi = 0; 1062 1063 netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n"); 1064 netdev_for_each_mc_addr(ha, dev) { 1065 u32 bit_num = smsc9420_hash(ha->addr); 1066 u32 mask = 1 << (bit_num & 0x1F); 1067 1068 if (bit_num & 0x20) 1069 hash_hi |= mask; 1070 else 1071 hash_lo |= mask; 1072 1073 } 1074 smsc9420_reg_write(pd, HASHH, hash_hi); 1075 smsc9420_reg_write(pd, HASHL, hash_lo); 1076 1077 mac_cr &= (~MAC_CR_PRMS_); 1078 mac_cr &= (~MAC_CR_MCPAS_); 1079 mac_cr |= MAC_CR_HPFILT_; 1080 } else { 1081 netif_dbg(pd, hw, pd->dev, "Receive own packets only\n"); 1082 smsc9420_reg_write(pd, HASHH, 0); 1083 smsc9420_reg_write(pd, HASHL, 0); 1084 1085 mac_cr &= (~MAC_CR_PRMS_); 1086 mac_cr &= (~MAC_CR_MCPAS_); 1087 mac_cr &= (~MAC_CR_HPFILT_); 1088 } 1089 1090 smsc9420_reg_write(pd, MAC_CR, mac_cr); 1091 smsc9420_pci_flush_write(pd); 1092 } 1093 1094 static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) 1095 { 1096 struct phy_device *phy_dev = pd->phy_dev; 1097 u32 flow; 1098 1099 if (phy_dev->duplex == DUPLEX_FULL) { 1100 u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); 1101 u16 rmtadv = phy_read(phy_dev, MII_LPA); 1102 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 1103 1104 if (cap & FLOW_CTRL_RX) 1105 flow = 0xFFFF0002; 1106 else 1107 flow = 0; 1108 1109 netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n", 1110 cap & FLOW_CTRL_RX ? "enabled" : "disabled", 1111 cap & FLOW_CTRL_TX ? "enabled" : "disabled"); 1112 } else { 1113 netif_info(pd, link, pd->dev, "half duplex\n"); 1114 flow = 0; 1115 } 1116 1117 smsc9420_reg_write(pd, FLOW, flow); 1118 } 1119 1120 /* Update link mode if anything has changed. Called periodically when the 1121 * PHY is in polling mode, even if nothing has changed. */ 1122 static void smsc9420_phy_adjust_link(struct net_device *dev) 1123 { 1124 struct smsc9420_pdata *pd = netdev_priv(dev); 1125 struct phy_device *phy_dev = pd->phy_dev; 1126 int carrier; 1127 1128 if (phy_dev->duplex != pd->last_duplex) { 1129 u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); 1130 if (phy_dev->duplex) { 1131 netif_dbg(pd, link, pd->dev, "full duplex mode\n"); 1132 mac_cr |= MAC_CR_FDPX_; 1133 } else { 1134 netif_dbg(pd, link, pd->dev, "half duplex mode\n"); 1135 mac_cr &= ~MAC_CR_FDPX_; 1136 } 1137 smsc9420_reg_write(pd, MAC_CR, mac_cr); 1138 1139 smsc9420_phy_update_flowcontrol(pd); 1140 pd->last_duplex = phy_dev->duplex; 1141 } 1142 1143 carrier = netif_carrier_ok(dev); 1144 if (carrier != pd->last_carrier) { 1145 if (carrier) 1146 netif_dbg(pd, link, pd->dev, "carrier OK\n"); 1147 else 1148 netif_dbg(pd, link, pd->dev, "no carrier\n"); 1149 pd->last_carrier = carrier; 1150 } 1151 } 1152 1153 static int smsc9420_mii_probe(struct net_device *dev) 1154 { 1155 struct smsc9420_pdata *pd = netdev_priv(dev); 1156 struct phy_device *phydev = NULL; 1157 1158 BUG_ON(pd->phy_dev); 1159 1160 /* Device only supports internal PHY at address 1 */ 1161 phydev = mdiobus_get_phy(pd->mii_bus, 1); 1162 if (!phydev) { 1163 netdev_err(dev, "no PHY found at address 1\n"); 1164 return -ENODEV; 1165 } 1166 1167 phydev = phy_connect(dev, phydev_name(phydev), 1168 smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII); 1169 1170 if (IS_ERR(phydev)) { 1171 netdev_err(dev, "Could not attach to PHY\n"); 1172 return PTR_ERR(phydev); 1173 } 1174 1175 /* mask with MAC supported features */ 1176 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 1177 SUPPORTED_Asym_Pause); 1178 phydev->advertising = phydev->supported; 1179 1180 phy_attached_info(phydev); 1181 1182 pd->phy_dev = phydev; 1183 pd->last_duplex = -1; 1184 pd->last_carrier = -1; 1185 1186 return 0; 1187 } 1188 1189 static int smsc9420_mii_init(struct net_device *dev) 1190 { 1191 struct smsc9420_pdata *pd = netdev_priv(dev); 1192 int err = -ENXIO; 1193 1194 pd->mii_bus = mdiobus_alloc(); 1195 if (!pd->mii_bus) { 1196 err = -ENOMEM; 1197 goto err_out_1; 1198 } 1199 pd->mii_bus->name = DRV_MDIONAME; 1200 snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", 1201 (pd->pdev->bus->number << 8) | pd->pdev->devfn); 1202 pd->mii_bus->priv = pd; 1203 pd->mii_bus->read = smsc9420_mii_read; 1204 pd->mii_bus->write = smsc9420_mii_write; 1205 1206 /* Mask all PHYs except ID 1 (internal) */ 1207 pd->mii_bus->phy_mask = ~(1 << 1); 1208 1209 if (mdiobus_register(pd->mii_bus)) { 1210 netif_warn(pd, probe, pd->dev, "Error registering mii bus\n"); 1211 goto err_out_free_bus_2; 1212 } 1213 1214 if (smsc9420_mii_probe(dev) < 0) { 1215 netif_warn(pd, probe, pd->dev, "Error probing mii bus\n"); 1216 goto err_out_unregister_bus_3; 1217 } 1218 1219 return 0; 1220 1221 err_out_unregister_bus_3: 1222 mdiobus_unregister(pd->mii_bus); 1223 err_out_free_bus_2: 1224 mdiobus_free(pd->mii_bus); 1225 err_out_1: 1226 return err; 1227 } 1228 1229 static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd) 1230 { 1231 int i; 1232 1233 BUG_ON(!pd->tx_ring); 1234 1235 pd->tx_buffers = kmalloc_array(TX_RING_SIZE, 1236 sizeof(struct smsc9420_ring_info), 1237 GFP_KERNEL); 1238 if (!pd->tx_buffers) 1239 return -ENOMEM; 1240 1241 /* Initialize the TX Ring */ 1242 for (i = 0; i < TX_RING_SIZE; i++) { 1243 pd->tx_buffers[i].skb = NULL; 1244 pd->tx_buffers[i].mapping = 0; 1245 pd->tx_ring[i].status = 0; 1246 pd->tx_ring[i].length = 0; 1247 pd->tx_ring[i].buffer1 = 0; 1248 pd->tx_ring[i].buffer2 = 0; 1249 } 1250 pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; 1251 wmb(); 1252 1253 pd->tx_ring_head = 0; 1254 pd->tx_ring_tail = 0; 1255 1256 smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr); 1257 smsc9420_pci_flush_write(pd); 1258 1259 return 0; 1260 } 1261 1262 static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) 1263 { 1264 int i; 1265 1266 BUG_ON(!pd->rx_ring); 1267 1268 pd->rx_buffers = kmalloc_array(RX_RING_SIZE, 1269 sizeof(struct smsc9420_ring_info), 1270 GFP_KERNEL); 1271 if (pd->rx_buffers == NULL) 1272 goto out; 1273 1274 /* initialize the rx ring */ 1275 for (i = 0; i < RX_RING_SIZE; i++) { 1276 pd->rx_ring[i].status = 0; 1277 pd->rx_ring[i].length = PKT_BUF_SZ; 1278 pd->rx_ring[i].buffer2 = 0; 1279 pd->rx_buffers[i].skb = NULL; 1280 pd->rx_buffers[i].mapping = 0; 1281 } 1282 pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_); 1283 1284 /* now allocate the entire ring of skbs */ 1285 for (i = 0; i < RX_RING_SIZE; i++) { 1286 if (smsc9420_alloc_rx_buffer(pd, i)) { 1287 netif_warn(pd, ifup, pd->dev, 1288 "failed to allocate rx skb %d\n", i); 1289 goto out_free_rx_skbs; 1290 } 1291 } 1292 1293 pd->rx_ring_head = 0; 1294 pd->rx_ring_tail = 0; 1295 1296 smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q); 1297 netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n", 1298 smsc9420_reg_read(pd, VLAN1)); 1299 1300 if (pd->rx_csum) { 1301 /* Enable RX COE */ 1302 u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN; 1303 smsc9420_reg_write(pd, COE_CR, coe); 1304 netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe); 1305 } 1306 1307 smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr); 1308 smsc9420_pci_flush_write(pd); 1309 1310 return 0; 1311 1312 out_free_rx_skbs: 1313 smsc9420_free_rx_ring(pd); 1314 out: 1315 return -ENOMEM; 1316 } 1317 1318 static int smsc9420_open(struct net_device *dev) 1319 { 1320 struct smsc9420_pdata *pd = netdev_priv(dev); 1321 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; 1322 const int irq = pd->pdev->irq; 1323 unsigned long flags; 1324 int result = 0, timeout; 1325 1326 if (!is_valid_ether_addr(dev->dev_addr)) { 1327 netif_warn(pd, ifup, pd->dev, 1328 "dev_addr is not a valid MAC address\n"); 1329 result = -EADDRNOTAVAIL; 1330 goto out_0; 1331 } 1332 1333 netif_carrier_off(dev); 1334 1335 /* disable, mask and acknowledge all interrupts */ 1336 spin_lock_irqsave(&pd->int_lock, flags); 1337 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); 1338 smsc9420_reg_write(pd, INT_CFG, int_cfg); 1339 smsc9420_reg_write(pd, INT_CTL, 0); 1340 spin_unlock_irqrestore(&pd->int_lock, flags); 1341 smsc9420_reg_write(pd, DMAC_INTR_ENA, 0); 1342 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); 1343 smsc9420_pci_flush_write(pd); 1344 1345 result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd); 1346 if (result) { 1347 netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq); 1348 result = -ENODEV; 1349 goto out_0; 1350 } 1351 1352 smsc9420_dmac_soft_reset(pd); 1353 1354 /* make sure MAC_CR is sane */ 1355 smsc9420_reg_write(pd, MAC_CR, 0); 1356 1357 smsc9420_set_mac_address(dev); 1358 1359 /* Configure GPIO pins to drive LEDs */ 1360 smsc9420_reg_write(pd, GPIO_CFG, 1361 (GPIO_CFG_LED_3_ | GPIO_CFG_LED_2_ | GPIO_CFG_LED_1_)); 1362 1363 bus_mode = BUS_MODE_DMA_BURST_LENGTH_16; 1364 1365 #ifdef __BIG_ENDIAN 1366 bus_mode |= BUS_MODE_DBO_; 1367 #endif 1368 1369 smsc9420_reg_write(pd, BUS_MODE, bus_mode); 1370 1371 smsc9420_pci_flush_write(pd); 1372 1373 /* set bus master bridge arbitration priority for Rx and TX DMA */ 1374 smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1); 1375 1376 smsc9420_reg_write(pd, DMAC_CONTROL, 1377 (DMAC_CONTROL_SF_ | DMAC_CONTROL_OSF_)); 1378 1379 smsc9420_pci_flush_write(pd); 1380 1381 /* test the IRQ connection to the ISR */ 1382 netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq); 1383 pd->software_irq_signal = false; 1384 1385 spin_lock_irqsave(&pd->int_lock, flags); 1386 /* configure interrupt deassertion timer and enable interrupts */ 1387 int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; 1388 int_cfg &= ~(INT_CFG_INT_DEAS_MASK); 1389 int_cfg |= (INT_DEAS_TIME & INT_CFG_INT_DEAS_MASK); 1390 smsc9420_reg_write(pd, INT_CFG, int_cfg); 1391 1392 /* unmask software interrupt */ 1393 int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_; 1394 smsc9420_reg_write(pd, INT_CTL, int_ctl); 1395 spin_unlock_irqrestore(&pd->int_lock, flags); 1396 smsc9420_pci_flush_write(pd); 1397 1398 timeout = 1000; 1399 while (timeout--) { 1400 if (pd->software_irq_signal) 1401 break; 1402 msleep(1); 1403 } 1404 1405 /* disable interrupts */ 1406 spin_lock_irqsave(&pd->int_lock, flags); 1407 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); 1408 smsc9420_reg_write(pd, INT_CFG, int_cfg); 1409 spin_unlock_irqrestore(&pd->int_lock, flags); 1410 1411 if (!pd->software_irq_signal) { 1412 netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n"); 1413 result = -ENODEV; 1414 goto out_free_irq_1; 1415 } 1416 1417 netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq); 1418 1419 result = smsc9420_alloc_tx_ring(pd); 1420 if (result) { 1421 netif_warn(pd, ifup, pd->dev, 1422 "Failed to Initialize tx dma ring\n"); 1423 result = -ENOMEM; 1424 goto out_free_irq_1; 1425 } 1426 1427 result = smsc9420_alloc_rx_ring(pd); 1428 if (result) { 1429 netif_warn(pd, ifup, pd->dev, 1430 "Failed to Initialize rx dma ring\n"); 1431 result = -ENOMEM; 1432 goto out_free_tx_ring_2; 1433 } 1434 1435 result = smsc9420_mii_init(dev); 1436 if (result) { 1437 netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n"); 1438 result = -ENODEV; 1439 goto out_free_rx_ring_3; 1440 } 1441 1442 /* Bring the PHY up */ 1443 phy_start(pd->phy_dev); 1444 1445 napi_enable(&pd->napi); 1446 1447 /* start tx and rx */ 1448 mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_; 1449 smsc9420_reg_write(pd, MAC_CR, mac_cr); 1450 1451 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); 1452 dmac_control |= DMAC_CONTROL_ST_ | DMAC_CONTROL_SR_; 1453 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); 1454 smsc9420_pci_flush_write(pd); 1455 1456 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); 1457 dma_intr_ena |= 1458 (DMAC_INTR_ENA_TX_ | DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); 1459 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); 1460 smsc9420_pci_flush_write(pd); 1461 1462 netif_wake_queue(dev); 1463 1464 smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); 1465 1466 /* enable interrupts */ 1467 spin_lock_irqsave(&pd->int_lock, flags); 1468 int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; 1469 smsc9420_reg_write(pd, INT_CFG, int_cfg); 1470 spin_unlock_irqrestore(&pd->int_lock, flags); 1471 1472 return 0; 1473 1474 out_free_rx_ring_3: 1475 smsc9420_free_rx_ring(pd); 1476 out_free_tx_ring_2: 1477 smsc9420_free_tx_ring(pd); 1478 out_free_irq_1: 1479 free_irq(irq, pd); 1480 out_0: 1481 return result; 1482 } 1483 1484 #ifdef CONFIG_PM 1485 1486 static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state) 1487 { 1488 struct net_device *dev = pci_get_drvdata(pdev); 1489 struct smsc9420_pdata *pd = netdev_priv(dev); 1490 u32 int_cfg; 1491 ulong flags; 1492 1493 /* disable interrupts */ 1494 spin_lock_irqsave(&pd->int_lock, flags); 1495 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); 1496 smsc9420_reg_write(pd, INT_CFG, int_cfg); 1497 spin_unlock_irqrestore(&pd->int_lock, flags); 1498 1499 if (netif_running(dev)) { 1500 netif_tx_disable(dev); 1501 smsc9420_stop_tx(pd); 1502 smsc9420_free_tx_ring(pd); 1503 1504 napi_disable(&pd->napi); 1505 smsc9420_stop_rx(pd); 1506 smsc9420_free_rx_ring(pd); 1507 1508 free_irq(pd->pdev->irq, pd); 1509 1510 netif_device_detach(dev); 1511 } 1512 1513 pci_save_state(pdev); 1514 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 1515 pci_disable_device(pdev); 1516 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1517 1518 return 0; 1519 } 1520 1521 static int smsc9420_resume(struct pci_dev *pdev) 1522 { 1523 struct net_device *dev = pci_get_drvdata(pdev); 1524 struct smsc9420_pdata *pd = netdev_priv(dev); 1525 int err; 1526 1527 pci_set_power_state(pdev, PCI_D0); 1528 pci_restore_state(pdev); 1529 1530 err = pci_enable_device(pdev); 1531 if (err) 1532 return err; 1533 1534 pci_set_master(pdev); 1535 1536 err = pci_enable_wake(pdev, PCI_D0, 0); 1537 if (err) 1538 netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n", 1539 err); 1540 1541 if (netif_running(dev)) { 1542 /* FIXME: gross. It looks like ancient PM relic.*/ 1543 err = smsc9420_open(dev); 1544 netif_device_attach(dev); 1545 } 1546 return err; 1547 } 1548 1549 #endif /* CONFIG_PM */ 1550 1551 static const struct net_device_ops smsc9420_netdev_ops = { 1552 .ndo_open = smsc9420_open, 1553 .ndo_stop = smsc9420_stop, 1554 .ndo_start_xmit = smsc9420_hard_start_xmit, 1555 .ndo_get_stats = smsc9420_get_stats, 1556 .ndo_set_rx_mode = smsc9420_set_multicast_list, 1557 .ndo_do_ioctl = smsc9420_do_ioctl, 1558 .ndo_validate_addr = eth_validate_addr, 1559 .ndo_set_mac_address = eth_mac_addr, 1560 #ifdef CONFIG_NET_POLL_CONTROLLER 1561 .ndo_poll_controller = smsc9420_poll_controller, 1562 #endif /* CONFIG_NET_POLL_CONTROLLER */ 1563 }; 1564 1565 static int 1566 smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1567 { 1568 struct net_device *dev; 1569 struct smsc9420_pdata *pd; 1570 void __iomem *virt_addr; 1571 int result = 0; 1572 u32 id_rev; 1573 1574 pr_info("%s version %s\n", DRV_DESCRIPTION, DRV_VERSION); 1575 1576 /* First do the PCI initialisation */ 1577 result = pci_enable_device(pdev); 1578 if (unlikely(result)) { 1579 pr_err("Cannot enable smsc9420\n"); 1580 goto out_0; 1581 } 1582 1583 pci_set_master(pdev); 1584 1585 dev = alloc_etherdev(sizeof(*pd)); 1586 if (!dev) 1587 goto out_disable_pci_device_1; 1588 1589 SET_NETDEV_DEV(dev, &pdev->dev); 1590 1591 if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) { 1592 netdev_err(dev, "Cannot find PCI device base address\n"); 1593 goto out_free_netdev_2; 1594 } 1595 1596 if ((pci_request_regions(pdev, DRV_NAME))) { 1597 netdev_err(dev, "Cannot obtain PCI resources, aborting\n"); 1598 goto out_free_netdev_2; 1599 } 1600 1601 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 1602 netdev_err(dev, "No usable DMA configuration, aborting\n"); 1603 goto out_free_regions_3; 1604 } 1605 1606 virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR), 1607 pci_resource_len(pdev, SMSC_BAR)); 1608 if (!virt_addr) { 1609 netdev_err(dev, "Cannot map device registers, aborting\n"); 1610 goto out_free_regions_3; 1611 } 1612 1613 /* registers are double mapped with 0 offset for LE and 0x200 for BE */ 1614 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; 1615 1616 pd = netdev_priv(dev); 1617 1618 /* pci descriptors are created in the PCI consistent area */ 1619 pd->rx_ring = pci_alloc_consistent(pdev, 1620 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE + 1621 sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE, 1622 &pd->rx_dma_addr); 1623 1624 if (!pd->rx_ring) 1625 goto out_free_io_4; 1626 1627 /* descriptors are aligned due to the nature of pci_alloc_consistent */ 1628 pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); 1629 pd->tx_dma_addr = pd->rx_dma_addr + 1630 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; 1631 1632 pd->pdev = pdev; 1633 pd->dev = dev; 1634 pd->ioaddr = virt_addr; 1635 pd->msg_enable = smsc_debug; 1636 pd->rx_csum = true; 1637 1638 netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr); 1639 1640 id_rev = smsc9420_reg_read(pd, ID_REV); 1641 switch (id_rev & 0xFFFF0000) { 1642 case 0x94200000: 1643 netif_info(pd, probe, pd->dev, 1644 "LAN9420 identified, ID_REV=0x%08X\n", id_rev); 1645 break; 1646 default: 1647 netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n"); 1648 netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev); 1649 goto out_free_dmadesc_5; 1650 } 1651 1652 smsc9420_dmac_soft_reset(pd); 1653 smsc9420_eeprom_reload(pd); 1654 smsc9420_check_mac_address(dev); 1655 1656 dev->netdev_ops = &smsc9420_netdev_ops; 1657 dev->ethtool_ops = &smsc9420_ethtool_ops; 1658 1659 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); 1660 1661 result = register_netdev(dev); 1662 if (result) { 1663 netif_warn(pd, probe, pd->dev, "error %i registering device\n", 1664 result); 1665 goto out_free_dmadesc_5; 1666 } 1667 1668 pci_set_drvdata(pdev, dev); 1669 1670 spin_lock_init(&pd->int_lock); 1671 spin_lock_init(&pd->phy_lock); 1672 1673 dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); 1674 1675 return 0; 1676 1677 out_free_dmadesc_5: 1678 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * 1679 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); 1680 out_free_io_4: 1681 iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET); 1682 out_free_regions_3: 1683 pci_release_regions(pdev); 1684 out_free_netdev_2: 1685 free_netdev(dev); 1686 out_disable_pci_device_1: 1687 pci_disable_device(pdev); 1688 out_0: 1689 return -ENODEV; 1690 } 1691 1692 static void smsc9420_remove(struct pci_dev *pdev) 1693 { 1694 struct net_device *dev; 1695 struct smsc9420_pdata *pd; 1696 1697 dev = pci_get_drvdata(pdev); 1698 if (!dev) 1699 return; 1700 1701 pd = netdev_priv(dev); 1702 unregister_netdev(dev); 1703 1704 /* tx_buffers and rx_buffers are freed in stop */ 1705 BUG_ON(pd->tx_buffers); 1706 BUG_ON(pd->rx_buffers); 1707 1708 BUG_ON(!pd->tx_ring); 1709 BUG_ON(!pd->rx_ring); 1710 1711 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * 1712 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); 1713 1714 iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET); 1715 pci_release_regions(pdev); 1716 free_netdev(dev); 1717 pci_disable_device(pdev); 1718 } 1719 1720 static struct pci_driver smsc9420_driver = { 1721 .name = DRV_NAME, 1722 .id_table = smsc9420_id_table, 1723 .probe = smsc9420_probe, 1724 .remove = smsc9420_remove, 1725 #ifdef CONFIG_PM 1726 .suspend = smsc9420_suspend, 1727 .resume = smsc9420_resume, 1728 #endif /* CONFIG_PM */ 1729 }; 1730 1731 static int __init smsc9420_init_module(void) 1732 { 1733 smsc_debug = netif_msg_init(debug, SMSC_MSG_DEFAULT); 1734 1735 return pci_register_driver(&smsc9420_driver); 1736 } 1737 1738 static void __exit smsc9420_exit_module(void) 1739 { 1740 pci_unregister_driver(&smsc9420_driver); 1741 } 1742 1743 module_init(smsc9420_init_module); 1744 module_exit(smsc9420_exit_module); 1745