1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver 4 * Copyright (C) 2004 Advanced Micro Devices 5 * 6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ] 7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c] 8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ] 9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker. 10 * Copyright 1993 United States Government as represented by the 11 * Director, National Security Agency.[ pcnet32.c ] 12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ] 13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 14 * 15 16 Module Name: 17 18 amd8111e.c 19 20 Abstract: 21 22 AMD8111 based 10/100 Ethernet Controller Driver. 23 24 Environment: 25 26 Kernel Mode 27 28 Revision History: 29 3.0.0 30 Initial Revision. 31 3.0.1 32 1. Dynamic interrupt coalescing. 33 2. Removed prev_stats. 34 3. MII support. 35 4. Dynamic IPG support 36 3.0.2 05/29/2003 37 1. Bug fix: Fixed failure to send jumbo packets larger than 4k. 38 2. Bug fix: Fixed VLAN support failure. 39 3. Bug fix: Fixed receive interrupt coalescing bug. 40 4. Dynamic IPG support is disabled by default. 41 3.0.3 06/05/2003 42 1. Bug fix: Fixed failure to close the interface if SMP is enabled. 43 3.0.4 12/09/2003 44 1. Added set_mac_address routine for bonding driver support. 45 2. Tested the driver for bonding support 46 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth 47 indicated to the h/w. 48 4. Modified amd8111e_rx() routine to receive all the received packets 49 in the first interrupt. 50 5. Bug fix: Corrected rx_errors reported in get_stats() function. 51 3.0.5 03/22/2004 52 1. Added NAPI support 53 54 */ 55 56 57 #include <linux/module.h> 58 #include <linux/kernel.h> 59 #include <linux/types.h> 60 #include <linux/compiler.h> 61 #include <linux/delay.h> 62 #include <linux/interrupt.h> 63 #include <linux/ioport.h> 64 #include <linux/pci.h> 65 #include <linux/netdevice.h> 66 #include <linux/etherdevice.h> 67 #include <linux/skbuff.h> 68 #include <linux/ethtool.h> 69 #include <linux/mii.h> 70 #include <linux/if_vlan.h> 71 #include <linux/ctype.h> 72 #include <linux/crc32.h> 73 #include <linux/dma-mapping.h> 74 75 #include <asm/io.h> 76 #include <asm/byteorder.h> 77 #include <linux/uaccess.h> 78 79 #if IS_ENABLED(CONFIG_VLAN_8021Q) 80 #define AMD8111E_VLAN_TAG_USED 1 81 #else 82 #define AMD8111E_VLAN_TAG_USED 0 83 #endif 84 85 #include "amd8111e.h" 86 #define MODULE_NAME "amd8111e" 87 MODULE_AUTHOR("Advanced Micro Devices, Inc."); 88 MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller."); 89 MODULE_LICENSE("GPL"); 90 module_param_array(speed_duplex, int, NULL, 0); 91 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); 92 module_param_array(coalesce, bool, NULL, 0); 93 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable"); 94 module_param_array(dynamic_ipg, bool, NULL, 0); 95 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); 96 97 /* This function will read the PHY registers. */ 98 static int amd8111e_read_phy(struct amd8111e_priv *lp, 99 int phy_id, int reg, u32 *val) 100 { 101 void __iomem *mmio = lp->mmio; 102 unsigned int reg_val; 103 unsigned int repeat= REPEAT_CNT; 104 105 reg_val = readl(mmio + PHY_ACCESS); 106 while (reg_val & PHY_CMD_ACTIVE) 107 reg_val = readl( mmio + PHY_ACCESS ); 108 109 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) | 110 ((reg & 0x1f) << 16), mmio +PHY_ACCESS); 111 do{ 112 reg_val = readl(mmio + PHY_ACCESS); 113 udelay(30); /* It takes 30 us to read/write data */ 114 } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); 115 if(reg_val & PHY_RD_ERR) 116 goto err_phy_read; 117 118 *val = reg_val & 0xffff; 119 return 0; 120 err_phy_read: 121 *val = 0; 122 return -EINVAL; 123 124 } 125 126 /* This function will write into PHY registers. */ 127 static int amd8111e_write_phy(struct amd8111e_priv *lp, 128 int phy_id, int reg, u32 val) 129 { 130 unsigned int repeat = REPEAT_CNT; 131 void __iomem *mmio = lp->mmio; 132 unsigned int reg_val; 133 134 reg_val = readl(mmio + PHY_ACCESS); 135 while (reg_val & PHY_CMD_ACTIVE) 136 reg_val = readl( mmio + PHY_ACCESS ); 137 138 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) | 139 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS); 140 141 do{ 142 reg_val = readl(mmio + PHY_ACCESS); 143 udelay(30); /* It takes 30 us to read/write the data */ 144 } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); 145 146 if(reg_val & PHY_RD_ERR) 147 goto err_phy_write; 148 149 return 0; 150 151 err_phy_write: 152 return -EINVAL; 153 154 } 155 156 /* This is the mii register read function provided to the mii interface. */ 157 static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num) 158 { 159 struct amd8111e_priv *lp = netdev_priv(dev); 160 unsigned int reg_val; 161 162 amd8111e_read_phy(lp,phy_id,reg_num,®_val); 163 return reg_val; 164 165 } 166 167 /* This is the mii register write function provided to the mii interface. */ 168 static void amd8111e_mdio_write(struct net_device *dev, 169 int phy_id, int reg_num, int val) 170 { 171 struct amd8111e_priv *lp = netdev_priv(dev); 172 173 amd8111e_write_phy(lp, phy_id, reg_num, val); 174 } 175 176 /* This function will set PHY speed. During initialization sets 177 * the original speed to 100 full 178 */ 179 static void amd8111e_set_ext_phy(struct net_device *dev) 180 { 181 struct amd8111e_priv *lp = netdev_priv(dev); 182 u32 bmcr,advert,tmp; 183 184 /* Determine mii register values to set the speed */ 185 advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE); 186 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 187 switch (lp->ext_phy_option){ 188 189 default: 190 case SPEED_AUTONEG: /* advertise all values */ 191 tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL| 192 ADVERTISE_100HALF|ADVERTISE_100FULL) ; 193 break; 194 case SPEED10_HALF: 195 tmp |= ADVERTISE_10HALF; 196 break; 197 case SPEED10_FULL: 198 tmp |= ADVERTISE_10FULL; 199 break; 200 case SPEED100_HALF: 201 tmp |= ADVERTISE_100HALF; 202 break; 203 case SPEED100_FULL: 204 tmp |= ADVERTISE_100FULL; 205 break; 206 } 207 208 if(advert != tmp) 209 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp); 210 /* Restart auto negotiation */ 211 bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR); 212 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 213 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr); 214 215 } 216 217 /* This function will unmap skb->data space and will free 218 * all transmit and receive skbuffs. 219 */ 220 static int amd8111e_free_skbs(struct net_device *dev) 221 { 222 struct amd8111e_priv *lp = netdev_priv(dev); 223 struct sk_buff *rx_skbuff; 224 int i; 225 226 /* Freeing transmit skbs */ 227 for(i = 0; i < NUM_TX_BUFFERS; i++){ 228 if(lp->tx_skbuff[i]){ 229 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE); 230 dev_kfree_skb (lp->tx_skbuff[i]); 231 lp->tx_skbuff[i] = NULL; 232 lp->tx_dma_addr[i] = 0; 233 } 234 } 235 /* Freeing previously allocated receive buffers */ 236 for (i = 0; i < NUM_RX_BUFFERS; i++){ 237 rx_skbuff = lp->rx_skbuff[i]; 238 if(rx_skbuff != NULL){ 239 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i], 240 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE); 241 dev_kfree_skb(lp->rx_skbuff[i]); 242 lp->rx_skbuff[i] = NULL; 243 lp->rx_dma_addr[i] = 0; 244 } 245 } 246 247 return 0; 248 } 249 250 /* This will set the receive buffer length corresponding 251 * to the mtu size of networkinterface. 252 */ 253 static inline void amd8111e_set_rx_buff_len(struct net_device *dev) 254 { 255 struct amd8111e_priv *lp = netdev_priv(dev); 256 unsigned int mtu = dev->mtu; 257 258 if (mtu > ETH_DATA_LEN){ 259 /* MTU + ethernet header + FCS 260 * + optional VLAN tag + skb reserve space 2 261 */ 262 lp->rx_buff_len = mtu + ETH_HLEN + 10; 263 lp->options |= OPTION_JUMBO_ENABLE; 264 } else{ 265 lp->rx_buff_len = PKT_BUFF_SZ; 266 lp->options &= ~OPTION_JUMBO_ENABLE; 267 } 268 } 269 270 /* This function will free all the previously allocated buffers, 271 * determine new receive buffer length and will allocate new receive buffers. 272 * This function also allocates and initializes both the transmitter 273 * and receive hardware descriptors. 274 */ 275 static int amd8111e_init_ring(struct net_device *dev) 276 { 277 struct amd8111e_priv *lp = netdev_priv(dev); 278 int i; 279 280 lp->rx_idx = lp->tx_idx = 0; 281 lp->tx_complete_idx = 0; 282 lp->tx_ring_idx = 0; 283 284 285 if(lp->opened) 286 /* Free previously allocated transmit and receive skbs */ 287 amd8111e_free_skbs(dev); 288 289 else{ 290 /* allocate the tx and rx descriptors */ 291 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, 292 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, 293 &lp->tx_ring_dma_addr)) == NULL) 294 295 goto err_no_mem; 296 297 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, 298 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, 299 &lp->rx_ring_dma_addr)) == NULL) 300 301 goto err_free_tx_ring; 302 303 } 304 /* Set new receive buff size */ 305 amd8111e_set_rx_buff_len(dev); 306 307 /* Allocating receive skbs */ 308 for (i = 0; i < NUM_RX_BUFFERS; i++) { 309 310 lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len); 311 if (!lp->rx_skbuff[i]) { 312 /* Release previos allocated skbs */ 313 for(--i; i >= 0 ;i--) 314 dev_kfree_skb(lp->rx_skbuff[i]); 315 goto err_free_rx_ring; 316 } 317 skb_reserve(lp->rx_skbuff[i],2); 318 } 319 /* Initilaizing receive descriptors */ 320 for (i = 0; i < NUM_RX_BUFFERS; i++) { 321 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, 322 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); 323 324 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]); 325 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2); 326 wmb(); 327 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT); 328 } 329 330 /* Initializing transmit descriptors */ 331 for (i = 0; i < NUM_TX_RING_DR; i++) { 332 lp->tx_ring[i].buff_phy_addr = 0; 333 lp->tx_ring[i].tx_flags = 0; 334 lp->tx_ring[i].buff_count = 0; 335 } 336 337 return 0; 338 339 err_free_rx_ring: 340 341 pci_free_consistent(lp->pci_dev, 342 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring, 343 lp->rx_ring_dma_addr); 344 345 err_free_tx_ring: 346 347 pci_free_consistent(lp->pci_dev, 348 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring, 349 lp->tx_ring_dma_addr); 350 351 err_no_mem: 352 return -ENOMEM; 353 } 354 355 /* This function will set the interrupt coalescing according 356 * to the input arguments 357 */ 358 static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod) 359 { 360 unsigned int timeout; 361 unsigned int event_count; 362 363 struct amd8111e_priv *lp = netdev_priv(dev); 364 void __iomem *mmio = lp->mmio; 365 struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf; 366 367 368 switch(cmod) 369 { 370 case RX_INTR_COAL : 371 timeout = coal_conf->rx_timeout; 372 event_count = coal_conf->rx_event_count; 373 if( timeout > MAX_TIMEOUT || 374 event_count > MAX_EVENT_COUNT ) 375 return -EINVAL; 376 377 timeout = timeout * DELAY_TIMER_CONV; 378 writel(VAL0|STINTEN, mmio+INTEN0); 379 writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout, 380 mmio+DLY_INT_A); 381 break; 382 383 case TX_INTR_COAL : 384 timeout = coal_conf->tx_timeout; 385 event_count = coal_conf->tx_event_count; 386 if( timeout > MAX_TIMEOUT || 387 event_count > MAX_EVENT_COUNT ) 388 return -EINVAL; 389 390 391 timeout = timeout * DELAY_TIMER_CONV; 392 writel(VAL0|STINTEN,mmio+INTEN0); 393 writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout, 394 mmio+DLY_INT_B); 395 break; 396 397 case DISABLE_COAL: 398 writel(0,mmio+STVAL); 399 writel(STINTEN, mmio+INTEN0); 400 writel(0, mmio +DLY_INT_B); 401 writel(0, mmio+DLY_INT_A); 402 break; 403 case ENABLE_COAL: 404 /* Start the timer */ 405 writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */ 406 writel(VAL0|STINTEN, mmio+INTEN0); 407 break; 408 default: 409 break; 410 411 } 412 return 0; 413 414 } 415 416 /* This function initializes the device registers and starts the device. */ 417 static int amd8111e_restart(struct net_device *dev) 418 { 419 struct amd8111e_priv *lp = netdev_priv(dev); 420 void __iomem *mmio = lp->mmio; 421 int i,reg_val; 422 423 /* stop the chip */ 424 writel(RUN, mmio + CMD0); 425 426 if(amd8111e_init_ring(dev)) 427 return -ENOMEM; 428 429 /* enable the port manager and set auto negotiation always */ 430 writel((u32) VAL1|EN_PMGR, mmio + CMD3 ); 431 writel((u32)XPHYANE|XPHYRST , mmio + CTRL2); 432 433 amd8111e_set_ext_phy(dev); 434 435 /* set control registers */ 436 reg_val = readl(mmio + CTRL1); 437 reg_val &= ~XMTSP_MASK; 438 writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 ); 439 440 /* enable interrupt */ 441 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | 442 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN | 443 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0); 444 445 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0); 446 447 /* initialize tx and rx ring base addresses */ 448 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0); 449 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0); 450 451 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0); 452 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0); 453 454 /* set default IPG to 96 */ 455 writew((u32)DEFAULT_IPG,mmio+IPG); 456 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1); 457 458 if(lp->options & OPTION_JUMBO_ENABLE){ 459 writel((u32)VAL2|JUMBO, mmio + CMD3); 460 /* Reset REX_UFLO */ 461 writel( REX_UFLO, mmio + CMD2); 462 /* Should not set REX_UFLO for jumbo frames */ 463 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2); 464 }else{ 465 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2); 466 writel((u32)JUMBO, mmio + CMD3); 467 } 468 469 #if AMD8111E_VLAN_TAG_USED 470 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3); 471 #endif 472 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); 473 474 /* Setting the MAC address to the device */ 475 for (i = 0; i < ETH_ALEN; i++) 476 writeb( dev->dev_addr[i], mmio + PADR + i ); 477 478 /* Enable interrupt coalesce */ 479 if(lp->options & OPTION_INTR_COAL_ENABLE){ 480 netdev_info(dev, "Interrupt Coalescing Enabled.\n"); 481 amd8111e_set_coalesce(dev,ENABLE_COAL); 482 } 483 484 /* set RUN bit to start the chip */ 485 writel(VAL2 | RDMD0, mmio + CMD0); 486 writel(VAL0 | INTREN | RUN, mmio + CMD0); 487 488 /* To avoid PCI posting bug */ 489 readl(mmio+CMD0); 490 return 0; 491 } 492 493 /* This function clears necessary the device registers. */ 494 static void amd8111e_init_hw_default(struct amd8111e_priv *lp) 495 { 496 unsigned int reg_val; 497 unsigned int logic_filter[2] ={0,}; 498 void __iomem *mmio = lp->mmio; 499 500 501 /* stop the chip */ 502 writel(RUN, mmio + CMD0); 503 504 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */ 505 writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0); 506 507 /* Clear RCV_RING_BASE_ADDR */ 508 writel(0, mmio + RCV_RING_BASE_ADDR0); 509 510 /* Clear XMT_RING_BASE_ADDR */ 511 writel(0, mmio + XMT_RING_BASE_ADDR0); 512 writel(0, mmio + XMT_RING_BASE_ADDR1); 513 writel(0, mmio + XMT_RING_BASE_ADDR2); 514 writel(0, mmio + XMT_RING_BASE_ADDR3); 515 516 /* Clear CMD0 */ 517 writel(CMD0_CLEAR,mmio + CMD0); 518 519 /* Clear CMD2 */ 520 writel(CMD2_CLEAR, mmio +CMD2); 521 522 /* Clear CMD7 */ 523 writel(CMD7_CLEAR , mmio + CMD7); 524 525 /* Clear DLY_INT_A and DLY_INT_B */ 526 writel(0x0, mmio + DLY_INT_A); 527 writel(0x0, mmio + DLY_INT_B); 528 529 /* Clear FLOW_CONTROL */ 530 writel(0x0, mmio + FLOW_CONTROL); 531 532 /* Clear INT0 write 1 to clear register */ 533 reg_val = readl(mmio + INT0); 534 writel(reg_val, mmio + INT0); 535 536 /* Clear STVAL */ 537 writel(0x0, mmio + STVAL); 538 539 /* Clear INTEN0 */ 540 writel( INTEN0_CLEAR, mmio + INTEN0); 541 542 /* Clear LADRF */ 543 writel(0x0 , mmio + LADRF); 544 545 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */ 546 writel( 0x80010,mmio + SRAM_SIZE); 547 548 /* Clear RCV_RING0_LEN */ 549 writel(0x0, mmio + RCV_RING_LEN0); 550 551 /* Clear XMT_RING0/1/2/3_LEN */ 552 writel(0x0, mmio + XMT_RING_LEN0); 553 writel(0x0, mmio + XMT_RING_LEN1); 554 writel(0x0, mmio + XMT_RING_LEN2); 555 writel(0x0, mmio + XMT_RING_LEN3); 556 557 /* Clear XMT_RING_LIMIT */ 558 writel(0x0, mmio + XMT_RING_LIMIT); 559 560 /* Clear MIB */ 561 writew(MIB_CLEAR, mmio + MIB_ADDR); 562 563 /* Clear LARF */ 564 amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF); 565 566 /* SRAM_SIZE register */ 567 reg_val = readl(mmio + SRAM_SIZE); 568 569 if(lp->options & OPTION_JUMBO_ENABLE) 570 writel( VAL2|JUMBO, mmio + CMD3); 571 #if AMD8111E_VLAN_TAG_USED 572 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 ); 573 #endif 574 /* Set default value to CTRL1 Register */ 575 writel(CTRL1_DEFAULT, mmio + CTRL1); 576 577 /* To avoid PCI posting bug */ 578 readl(mmio + CMD2); 579 580 } 581 582 /* This function disables the interrupt and clears all the pending 583 * interrupts in INT0 584 */ 585 static void amd8111e_disable_interrupt(struct amd8111e_priv *lp) 586 { 587 u32 intr0; 588 589 /* Disable interrupt */ 590 writel(INTREN, lp->mmio + CMD0); 591 592 /* Clear INT0 */ 593 intr0 = readl(lp->mmio + INT0); 594 writel(intr0, lp->mmio + INT0); 595 596 /* To avoid PCI posting bug */ 597 readl(lp->mmio + INT0); 598 599 } 600 601 /* This function stops the chip. */ 602 static void amd8111e_stop_chip(struct amd8111e_priv *lp) 603 { 604 writel(RUN, lp->mmio + CMD0); 605 606 /* To avoid PCI posting bug */ 607 readl(lp->mmio + CMD0); 608 } 609 610 /* This function frees the transmiter and receiver descriptor rings. */ 611 static void amd8111e_free_ring(struct amd8111e_priv *lp) 612 { 613 /* Free transmit and receive descriptor rings */ 614 if(lp->rx_ring){ 615 pci_free_consistent(lp->pci_dev, 616 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, 617 lp->rx_ring, lp->rx_ring_dma_addr); 618 lp->rx_ring = NULL; 619 } 620 621 if(lp->tx_ring){ 622 pci_free_consistent(lp->pci_dev, 623 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, 624 lp->tx_ring, lp->tx_ring_dma_addr); 625 626 lp->tx_ring = NULL; 627 } 628 629 } 630 631 /* This function will free all the transmit skbs that are actually 632 * transmitted by the device. It will check the ownership of the 633 * skb before freeing the skb. 634 */ 635 static int amd8111e_tx(struct net_device *dev) 636 { 637 struct amd8111e_priv *lp = netdev_priv(dev); 638 int tx_index; 639 int status; 640 /* Complete all the transmit packet */ 641 while (lp->tx_complete_idx != lp->tx_idx){ 642 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; 643 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags); 644 645 if(status & OWN_BIT) 646 break; /* It still hasn't been Txed */ 647 648 lp->tx_ring[tx_index].buff_phy_addr = 0; 649 650 /* We must free the original skb */ 651 if (lp->tx_skbuff[tx_index]) { 652 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], 653 lp->tx_skbuff[tx_index]->len, 654 PCI_DMA_TODEVICE); 655 dev_consume_skb_irq(lp->tx_skbuff[tx_index]); 656 lp->tx_skbuff[tx_index] = NULL; 657 lp->tx_dma_addr[tx_index] = 0; 658 } 659 lp->tx_complete_idx++; 660 /*COAL update tx coalescing parameters */ 661 lp->coal_conf.tx_packets++; 662 lp->coal_conf.tx_bytes += 663 le16_to_cpu(lp->tx_ring[tx_index].buff_count); 664 665 if (netif_queue_stopped(dev) && 666 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ 667 /* The ring is no longer full, clear tbusy. */ 668 /* lp->tx_full = 0; */ 669 netif_wake_queue (dev); 670 } 671 } 672 return 0; 673 } 674 675 /* This function handles the driver receive operation in polling mode */ 676 static int amd8111e_rx_poll(struct napi_struct *napi, int budget) 677 { 678 struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); 679 struct net_device *dev = lp->amd8111e_net_dev; 680 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; 681 void __iomem *mmio = lp->mmio; 682 struct sk_buff *skb,*new_skb; 683 int min_pkt_len, status; 684 int num_rx_pkt = 0; 685 short pkt_len; 686 #if AMD8111E_VLAN_TAG_USED 687 short vtag; 688 #endif 689 690 while (num_rx_pkt < budget) { 691 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); 692 if (status & OWN_BIT) 693 break; 694 695 /* There is a tricky error noted by John Murphy, 696 * <murf@perftech.com> to Russ Nelson: Even with 697 * full-sized * buffers it's possible for a 698 * jabber packet to use two buffers, with only 699 * the last correctly noting the error. 700 */ 701 if (status & ERR_BIT) { 702 /* resetting flags */ 703 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 704 goto err_next_pkt; 705 } 706 /* check for STP and ENP */ 707 if (!((status & STP_BIT) && (status & ENP_BIT))){ 708 /* resetting flags */ 709 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 710 goto err_next_pkt; 711 } 712 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; 713 714 #if AMD8111E_VLAN_TAG_USED 715 vtag = status & TT_MASK; 716 /* MAC will strip vlan tag */ 717 if (vtag != 0) 718 min_pkt_len = MIN_PKT_LEN - 4; 719 else 720 #endif 721 min_pkt_len = MIN_PKT_LEN; 722 723 if (pkt_len < min_pkt_len) { 724 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 725 lp->drv_rx_errors++; 726 goto err_next_pkt; 727 } 728 new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); 729 if (!new_skb) { 730 /* if allocation fail, 731 * ignore that pkt and go to next one 732 */ 733 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 734 lp->drv_rx_errors++; 735 goto err_next_pkt; 736 } 737 738 skb_reserve(new_skb, 2); 739 skb = lp->rx_skbuff[rx_index]; 740 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], 741 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); 742 skb_put(skb, pkt_len); 743 lp->rx_skbuff[rx_index] = new_skb; 744 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, 745 new_skb->data, 746 lp->rx_buff_len-2, 747 PCI_DMA_FROMDEVICE); 748 749 skb->protocol = eth_type_trans(skb, dev); 750 751 #if AMD8111E_VLAN_TAG_USED 752 if (vtag == TT_VLAN_TAGGED){ 753 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); 754 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 755 } 756 #endif 757 napi_gro_receive(napi, skb); 758 /* COAL update rx coalescing parameters */ 759 lp->coal_conf.rx_packets++; 760 lp->coal_conf.rx_bytes += pkt_len; 761 num_rx_pkt++; 762 763 err_next_pkt: 764 lp->rx_ring[rx_index].buff_phy_addr 765 = cpu_to_le32(lp->rx_dma_addr[rx_index]); 766 lp->rx_ring[rx_index].buff_count = 767 cpu_to_le16(lp->rx_buff_len-2); 768 wmb(); 769 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); 770 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; 771 } 772 773 if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) { 774 unsigned long flags; 775 776 /* Receive descriptor is empty now */ 777 spin_lock_irqsave(&lp->lock, flags); 778 writel(VAL0|RINTEN0, mmio + INTEN0); 779 writel(VAL2 | RDMD0, mmio + CMD0); 780 spin_unlock_irqrestore(&lp->lock, flags); 781 } 782 783 return num_rx_pkt; 784 } 785 786 /* This function will indicate the link status to the kernel. */ 787 static int amd8111e_link_change(struct net_device *dev) 788 { 789 struct amd8111e_priv *lp = netdev_priv(dev); 790 int status0,speed; 791 792 /* read the link change */ 793 status0 = readl(lp->mmio + STAT0); 794 795 if(status0 & LINK_STATS){ 796 if(status0 & AUTONEG_COMPLETE) 797 lp->link_config.autoneg = AUTONEG_ENABLE; 798 else 799 lp->link_config.autoneg = AUTONEG_DISABLE; 800 801 if(status0 & FULL_DPLX) 802 lp->link_config.duplex = DUPLEX_FULL; 803 else 804 lp->link_config.duplex = DUPLEX_HALF; 805 speed = (status0 & SPEED_MASK) >> 7; 806 if(speed == PHY_SPEED_10) 807 lp->link_config.speed = SPEED_10; 808 else if(speed == PHY_SPEED_100) 809 lp->link_config.speed = SPEED_100; 810 811 netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n", 812 (lp->link_config.speed == SPEED_100) ? 813 "100" : "10", 814 (lp->link_config.duplex == DUPLEX_FULL) ? 815 "Full" : "Half"); 816 817 netif_carrier_on(dev); 818 } 819 else{ 820 lp->link_config.speed = SPEED_INVALID; 821 lp->link_config.duplex = DUPLEX_INVALID; 822 lp->link_config.autoneg = AUTONEG_INVALID; 823 netdev_info(dev, "Link is Down.\n"); 824 netif_carrier_off(dev); 825 } 826 827 return 0; 828 } 829 830 /* This function reads the mib counters. */ 831 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) 832 { 833 unsigned int status; 834 unsigned int data; 835 unsigned int repeat = REPEAT_CNT; 836 837 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR); 838 do { 839 status = readw(mmio + MIB_ADDR); 840 udelay(2); /* controller takes MAX 2 us to get mib data */ 841 } 842 while (--repeat && (status & MIB_CMD_ACTIVE)); 843 844 data = readl(mmio + MIB_DATA); 845 return data; 846 } 847 848 /* This function reads the mib registers and returns the hardware statistics. 849 * It updates previous internal driver statistics with new values. 850 */ 851 static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) 852 { 853 struct amd8111e_priv *lp = netdev_priv(dev); 854 void __iomem *mmio = lp->mmio; 855 unsigned long flags; 856 struct net_device_stats *new_stats = &dev->stats; 857 858 if (!lp->opened) 859 return new_stats; 860 spin_lock_irqsave (&lp->lock, flags); 861 862 /* stats.rx_packets */ 863 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+ 864 amd8111e_read_mib(mmio, rcv_multicast_pkts)+ 865 amd8111e_read_mib(mmio, rcv_unicast_pkts); 866 867 /* stats.tx_packets */ 868 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets); 869 870 /*stats.rx_bytes */ 871 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets); 872 873 /* stats.tx_bytes */ 874 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets); 875 876 /* stats.rx_errors */ 877 /* hw errors + errors driver reported */ 878 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+ 879 amd8111e_read_mib(mmio, rcv_fragments)+ 880 amd8111e_read_mib(mmio, rcv_jabbers)+ 881 amd8111e_read_mib(mmio, rcv_alignment_errors)+ 882 amd8111e_read_mib(mmio, rcv_fcs_errors)+ 883 amd8111e_read_mib(mmio, rcv_miss_pkts)+ 884 lp->drv_rx_errors; 885 886 /* stats.tx_errors */ 887 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); 888 889 /* stats.rx_dropped*/ 890 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts); 891 892 /* stats.tx_dropped*/ 893 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts); 894 895 /* stats.multicast*/ 896 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts); 897 898 /* stats.collisions*/ 899 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions); 900 901 /* stats.rx_length_errors*/ 902 new_stats->rx_length_errors = 903 amd8111e_read_mib(mmio, rcv_undersize_pkts)+ 904 amd8111e_read_mib(mmio, rcv_oversize_pkts); 905 906 /* stats.rx_over_errors*/ 907 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); 908 909 /* stats.rx_crc_errors*/ 910 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors); 911 912 /* stats.rx_frame_errors*/ 913 new_stats->rx_frame_errors = 914 amd8111e_read_mib(mmio, rcv_alignment_errors); 915 916 /* stats.rx_fifo_errors */ 917 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); 918 919 /* stats.rx_missed_errors */ 920 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); 921 922 /* stats.tx_aborted_errors*/ 923 new_stats->tx_aborted_errors = 924 amd8111e_read_mib(mmio, xmt_excessive_collision); 925 926 /* stats.tx_carrier_errors*/ 927 new_stats->tx_carrier_errors = 928 amd8111e_read_mib(mmio, xmt_loss_carrier); 929 930 /* stats.tx_fifo_errors*/ 931 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); 932 933 /* stats.tx_window_errors*/ 934 new_stats->tx_window_errors = 935 amd8111e_read_mib(mmio, xmt_late_collision); 936 937 /* Reset the mibs for collecting new statistics */ 938 /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/ 939 940 spin_unlock_irqrestore (&lp->lock, flags); 941 942 return new_stats; 943 } 944 945 /* This function recalculate the interrupt coalescing mode on every interrupt 946 * according to the datarate and the packet rate. 947 */ 948 static int amd8111e_calc_coalesce(struct net_device *dev) 949 { 950 struct amd8111e_priv *lp = netdev_priv(dev); 951 struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf; 952 int tx_pkt_rate; 953 int rx_pkt_rate; 954 int tx_data_rate; 955 int rx_data_rate; 956 int rx_pkt_size; 957 int tx_pkt_size; 958 959 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets; 960 coal_conf->tx_prev_packets = coal_conf->tx_packets; 961 962 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes; 963 coal_conf->tx_prev_bytes = coal_conf->tx_bytes; 964 965 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets; 966 coal_conf->rx_prev_packets = coal_conf->rx_packets; 967 968 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes; 969 coal_conf->rx_prev_bytes = coal_conf->rx_bytes; 970 971 if(rx_pkt_rate < 800){ 972 if(coal_conf->rx_coal_type != NO_COALESCE){ 973 974 coal_conf->rx_timeout = 0x0; 975 coal_conf->rx_event_count = 0; 976 amd8111e_set_coalesce(dev,RX_INTR_COAL); 977 coal_conf->rx_coal_type = NO_COALESCE; 978 } 979 } 980 else{ 981 982 rx_pkt_size = rx_data_rate/rx_pkt_rate; 983 if (rx_pkt_size < 128){ 984 if(coal_conf->rx_coal_type != NO_COALESCE){ 985 986 coal_conf->rx_timeout = 0; 987 coal_conf->rx_event_count = 0; 988 amd8111e_set_coalesce(dev,RX_INTR_COAL); 989 coal_conf->rx_coal_type = NO_COALESCE; 990 } 991 992 } 993 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){ 994 995 if(coal_conf->rx_coal_type != LOW_COALESCE){ 996 coal_conf->rx_timeout = 1; 997 coal_conf->rx_event_count = 4; 998 amd8111e_set_coalesce(dev,RX_INTR_COAL); 999 coal_conf->rx_coal_type = LOW_COALESCE; 1000 } 1001 } 1002 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){ 1003 1004 if(coal_conf->rx_coal_type != MEDIUM_COALESCE){ 1005 coal_conf->rx_timeout = 1; 1006 coal_conf->rx_event_count = 4; 1007 amd8111e_set_coalesce(dev,RX_INTR_COAL); 1008 coal_conf->rx_coal_type = MEDIUM_COALESCE; 1009 } 1010 1011 } 1012 else if(rx_pkt_size >= 1024){ 1013 if(coal_conf->rx_coal_type != HIGH_COALESCE){ 1014 coal_conf->rx_timeout = 2; 1015 coal_conf->rx_event_count = 3; 1016 amd8111e_set_coalesce(dev,RX_INTR_COAL); 1017 coal_conf->rx_coal_type = HIGH_COALESCE; 1018 } 1019 } 1020 } 1021 /* NOW FOR TX INTR COALESC */ 1022 if(tx_pkt_rate < 800){ 1023 if(coal_conf->tx_coal_type != NO_COALESCE){ 1024 1025 coal_conf->tx_timeout = 0x0; 1026 coal_conf->tx_event_count = 0; 1027 amd8111e_set_coalesce(dev,TX_INTR_COAL); 1028 coal_conf->tx_coal_type = NO_COALESCE; 1029 } 1030 } 1031 else{ 1032 1033 tx_pkt_size = tx_data_rate/tx_pkt_rate; 1034 if (tx_pkt_size < 128){ 1035 1036 if(coal_conf->tx_coal_type != NO_COALESCE){ 1037 1038 coal_conf->tx_timeout = 0; 1039 coal_conf->tx_event_count = 0; 1040 amd8111e_set_coalesce(dev,TX_INTR_COAL); 1041 coal_conf->tx_coal_type = NO_COALESCE; 1042 } 1043 1044 } 1045 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){ 1046 1047 if(coal_conf->tx_coal_type != LOW_COALESCE){ 1048 coal_conf->tx_timeout = 1; 1049 coal_conf->tx_event_count = 2; 1050 amd8111e_set_coalesce(dev,TX_INTR_COAL); 1051 coal_conf->tx_coal_type = LOW_COALESCE; 1052 1053 } 1054 } 1055 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){ 1056 1057 if(coal_conf->tx_coal_type != MEDIUM_COALESCE){ 1058 coal_conf->tx_timeout = 2; 1059 coal_conf->tx_event_count = 5; 1060 amd8111e_set_coalesce(dev,TX_INTR_COAL); 1061 coal_conf->tx_coal_type = MEDIUM_COALESCE; 1062 } 1063 } else if (tx_pkt_size >= 1024) { 1064 if (coal_conf->tx_coal_type != HIGH_COALESCE) { 1065 coal_conf->tx_timeout = 4; 1066 coal_conf->tx_event_count = 8; 1067 amd8111e_set_coalesce(dev, TX_INTR_COAL); 1068 coal_conf->tx_coal_type = HIGH_COALESCE; 1069 } 1070 } 1071 } 1072 return 0; 1073 1074 } 1075 1076 /* This is device interrupt function. It handles transmit, 1077 * receive,link change and hardware timer interrupts. 1078 */ 1079 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) 1080 { 1081 1082 struct net_device *dev = (struct net_device *)dev_id; 1083 struct amd8111e_priv *lp = netdev_priv(dev); 1084 void __iomem *mmio = lp->mmio; 1085 unsigned int intr0, intren0; 1086 unsigned int handled = 1; 1087 1088 if(unlikely(dev == NULL)) 1089 return IRQ_NONE; 1090 1091 spin_lock(&lp->lock); 1092 1093 /* disabling interrupt */ 1094 writel(INTREN, mmio + CMD0); 1095 1096 /* Read interrupt status */ 1097 intr0 = readl(mmio + INT0); 1098 intren0 = readl(mmio + INTEN0); 1099 1100 /* Process all the INT event until INTR bit is clear. */ 1101 1102 if (!(intr0 & INTR)){ 1103 handled = 0; 1104 goto err_no_interrupt; 1105 } 1106 1107 /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */ 1108 writel(intr0, mmio + INT0); 1109 1110 /* Check if Receive Interrupt has occurred. */ 1111 if (intr0 & RINT0) { 1112 if (napi_schedule_prep(&lp->napi)) { 1113 /* Disable receive interupts */ 1114 writel(RINTEN0, mmio + INTEN0); 1115 /* Schedule a polling routine */ 1116 __napi_schedule(&lp->napi); 1117 } else if (intren0 & RINTEN0) { 1118 netdev_dbg(dev, "************Driver bug! interrupt while in poll\n"); 1119 /* Fix by disable receive interrupts */ 1120 writel(RINTEN0, mmio + INTEN0); 1121 } 1122 } 1123 1124 /* Check if Transmit Interrupt has occurred. */ 1125 if (intr0 & TINT0) 1126 amd8111e_tx(dev); 1127 1128 /* Check if Link Change Interrupt has occurred. */ 1129 if (intr0 & LCINT) 1130 amd8111e_link_change(dev); 1131 1132 /* Check if Hardware Timer Interrupt has occurred. */ 1133 if (intr0 & STINT) 1134 amd8111e_calc_coalesce(dev); 1135 1136 err_no_interrupt: 1137 writel( VAL0 | INTREN,mmio + CMD0); 1138 1139 spin_unlock(&lp->lock); 1140 1141 return IRQ_RETVAL(handled); 1142 } 1143 1144 #ifdef CONFIG_NET_POLL_CONTROLLER 1145 static void amd8111e_poll(struct net_device *dev) 1146 { 1147 unsigned long flags; 1148 local_irq_save(flags); 1149 amd8111e_interrupt(0, dev); 1150 local_irq_restore(flags); 1151 } 1152 #endif 1153 1154 1155 /* This function closes the network interface and updates 1156 * the statistics so that most recent statistics will be 1157 * available after the interface is down. 1158 */ 1159 static int amd8111e_close(struct net_device *dev) 1160 { 1161 struct amd8111e_priv *lp = netdev_priv(dev); 1162 netif_stop_queue(dev); 1163 1164 napi_disable(&lp->napi); 1165 1166 spin_lock_irq(&lp->lock); 1167 1168 amd8111e_disable_interrupt(lp); 1169 amd8111e_stop_chip(lp); 1170 1171 /* Free transmit and receive skbs */ 1172 amd8111e_free_skbs(lp->amd8111e_net_dev); 1173 1174 netif_carrier_off(lp->amd8111e_net_dev); 1175 1176 /* Delete ipg timer */ 1177 if(lp->options & OPTION_DYN_IPG_ENABLE) 1178 del_timer_sync(&lp->ipg_data.ipg_timer); 1179 1180 spin_unlock_irq(&lp->lock); 1181 free_irq(dev->irq, dev); 1182 amd8111e_free_ring(lp); 1183 1184 /* Update the statistics before closing */ 1185 amd8111e_get_stats(dev); 1186 lp->opened = 0; 1187 return 0; 1188 } 1189 1190 /* This function opens new interface.It requests irq for the device, 1191 * initializes the device,buffers and descriptors, and starts the device. 1192 */ 1193 static int amd8111e_open(struct net_device *dev) 1194 { 1195 struct amd8111e_priv *lp = netdev_priv(dev); 1196 1197 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED, 1198 dev->name, dev)) 1199 return -EAGAIN; 1200 1201 napi_enable(&lp->napi); 1202 1203 spin_lock_irq(&lp->lock); 1204 1205 amd8111e_init_hw_default(lp); 1206 1207 if(amd8111e_restart(dev)){ 1208 spin_unlock_irq(&lp->lock); 1209 napi_disable(&lp->napi); 1210 if (dev->irq) 1211 free_irq(dev->irq, dev); 1212 return -ENOMEM; 1213 } 1214 /* Start ipg timer */ 1215 if(lp->options & OPTION_DYN_IPG_ENABLE){ 1216 add_timer(&lp->ipg_data.ipg_timer); 1217 netdev_info(dev, "Dynamic IPG Enabled\n"); 1218 } 1219 1220 lp->opened = 1; 1221 1222 spin_unlock_irq(&lp->lock); 1223 1224 netif_start_queue(dev); 1225 1226 return 0; 1227 } 1228 1229 /* This function checks if there is any transmit descriptors 1230 * available to queue more packet. 1231 */ 1232 static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp) 1233 { 1234 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; 1235 if (lp->tx_skbuff[tx_index]) 1236 return -1; 1237 else 1238 return 0; 1239 1240 } 1241 1242 /* This function will queue the transmit packets to the 1243 * descriptors and will trigger the send operation. It also 1244 * initializes the transmit descriptors with buffer physical address, 1245 * byte count, ownership to hardware etc. 1246 */ 1247 static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, 1248 struct net_device *dev) 1249 { 1250 struct amd8111e_priv *lp = netdev_priv(dev); 1251 int tx_index; 1252 unsigned long flags; 1253 1254 spin_lock_irqsave(&lp->lock, flags); 1255 1256 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK; 1257 1258 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len); 1259 1260 lp->tx_skbuff[tx_index] = skb; 1261 lp->tx_ring[tx_index].tx_flags = 0; 1262 1263 #if AMD8111E_VLAN_TAG_USED 1264 if (skb_vlan_tag_present(skb)) { 1265 lp->tx_ring[tx_index].tag_ctrl_cmd |= 1266 cpu_to_le16(TCC_VLAN_INSERT); 1267 lp->tx_ring[tx_index].tag_ctrl_info = 1268 cpu_to_le16(skb_vlan_tag_get(skb)); 1269 1270 } 1271 #endif 1272 lp->tx_dma_addr[tx_index] = 1273 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); 1274 lp->tx_ring[tx_index].buff_phy_addr = 1275 cpu_to_le32(lp->tx_dma_addr[tx_index]); 1276 1277 /* Set FCS and LTINT bits */ 1278 wmb(); 1279 lp->tx_ring[tx_index].tx_flags |= 1280 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT); 1281 1282 lp->tx_idx++; 1283 1284 /* Trigger an immediate send poll. */ 1285 writel( VAL1 | TDMD0, lp->mmio + CMD0); 1286 writel( VAL2 | RDMD0,lp->mmio + CMD0); 1287 1288 if(amd8111e_tx_queue_avail(lp) < 0){ 1289 netif_stop_queue(dev); 1290 } 1291 spin_unlock_irqrestore(&lp->lock, flags); 1292 return NETDEV_TX_OK; 1293 } 1294 /* This function returns all the memory mapped registers of the device. */ 1295 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf) 1296 { 1297 void __iomem *mmio = lp->mmio; 1298 /* Read only necessary registers */ 1299 buf[0] = readl(mmio + XMT_RING_BASE_ADDR0); 1300 buf[1] = readl(mmio + XMT_RING_LEN0); 1301 buf[2] = readl(mmio + RCV_RING_BASE_ADDR0); 1302 buf[3] = readl(mmio + RCV_RING_LEN0); 1303 buf[4] = readl(mmio + CMD0); 1304 buf[5] = readl(mmio + CMD2); 1305 buf[6] = readl(mmio + CMD3); 1306 buf[7] = readl(mmio + CMD7); 1307 buf[8] = readl(mmio + INT0); 1308 buf[9] = readl(mmio + INTEN0); 1309 buf[10] = readl(mmio + LADRF); 1310 buf[11] = readl(mmio + LADRF+4); 1311 buf[12] = readl(mmio + STAT0); 1312 } 1313 1314 1315 /* This function sets promiscuos mode, all-multi mode or the multicast address 1316 * list to the device. 1317 */ 1318 static void amd8111e_set_multicast_list(struct net_device *dev) 1319 { 1320 struct netdev_hw_addr *ha; 1321 struct amd8111e_priv *lp = netdev_priv(dev); 1322 u32 mc_filter[2] ; 1323 int bit_num; 1324 1325 if(dev->flags & IFF_PROMISC){ 1326 writel( VAL2 | PROM, lp->mmio + CMD2); 1327 return; 1328 } 1329 else 1330 writel( PROM, lp->mmio + CMD2); 1331 if (dev->flags & IFF_ALLMULTI || 1332 netdev_mc_count(dev) > MAX_FILTER_SIZE) { 1333 /* get all multicast packet */ 1334 mc_filter[1] = mc_filter[0] = 0xffffffff; 1335 lp->options |= OPTION_MULTICAST_ENABLE; 1336 amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); 1337 return; 1338 } 1339 if (netdev_mc_empty(dev)) { 1340 /* get only own packets */ 1341 mc_filter[1] = mc_filter[0] = 0; 1342 lp->options &= ~OPTION_MULTICAST_ENABLE; 1343 amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); 1344 /* disable promiscuous mode */ 1345 writel(PROM, lp->mmio + CMD2); 1346 return; 1347 } 1348 /* load all the multicast addresses in the logic filter */ 1349 lp->options |= OPTION_MULTICAST_ENABLE; 1350 mc_filter[1] = mc_filter[0] = 0; 1351 netdev_for_each_mc_addr(ha, dev) { 1352 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f; 1353 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); 1354 } 1355 amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); 1356 1357 /* To eliminate PCI posting bug */ 1358 readl(lp->mmio + CMD2); 1359 1360 } 1361 1362 static void amd8111e_get_drvinfo(struct net_device *dev, 1363 struct ethtool_drvinfo *info) 1364 { 1365 struct amd8111e_priv *lp = netdev_priv(dev); 1366 struct pci_dev *pci_dev = lp->pci_dev; 1367 strlcpy(info->driver, MODULE_NAME, sizeof(info->driver)); 1368 snprintf(info->fw_version, sizeof(info->fw_version), 1369 "%u", chip_version); 1370 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); 1371 } 1372 1373 static int amd8111e_get_regs_len(struct net_device *dev) 1374 { 1375 return AMD8111E_REG_DUMP_LEN; 1376 } 1377 1378 static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 1379 { 1380 struct amd8111e_priv *lp = netdev_priv(dev); 1381 regs->version = 0; 1382 amd8111e_read_regs(lp, buf); 1383 } 1384 1385 static int amd8111e_get_link_ksettings(struct net_device *dev, 1386 struct ethtool_link_ksettings *cmd) 1387 { 1388 struct amd8111e_priv *lp = netdev_priv(dev); 1389 spin_lock_irq(&lp->lock); 1390 mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); 1391 spin_unlock_irq(&lp->lock); 1392 return 0; 1393 } 1394 1395 static int amd8111e_set_link_ksettings(struct net_device *dev, 1396 const struct ethtool_link_ksettings *cmd) 1397 { 1398 struct amd8111e_priv *lp = netdev_priv(dev); 1399 int res; 1400 spin_lock_irq(&lp->lock); 1401 res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); 1402 spin_unlock_irq(&lp->lock); 1403 return res; 1404 } 1405 1406 static int amd8111e_nway_reset(struct net_device *dev) 1407 { 1408 struct amd8111e_priv *lp = netdev_priv(dev); 1409 return mii_nway_restart(&lp->mii_if); 1410 } 1411 1412 static u32 amd8111e_get_link(struct net_device *dev) 1413 { 1414 struct amd8111e_priv *lp = netdev_priv(dev); 1415 return mii_link_ok(&lp->mii_if); 1416 } 1417 1418 static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) 1419 { 1420 struct amd8111e_priv *lp = netdev_priv(dev); 1421 wol_info->supported = WAKE_MAGIC|WAKE_PHY; 1422 if (lp->options & OPTION_WOL_ENABLE) 1423 wol_info->wolopts = WAKE_MAGIC; 1424 } 1425 1426 static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) 1427 { 1428 struct amd8111e_priv *lp = netdev_priv(dev); 1429 if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY)) 1430 return -EINVAL; 1431 spin_lock_irq(&lp->lock); 1432 if (wol_info->wolopts & WAKE_MAGIC) 1433 lp->options |= 1434 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE); 1435 else if(wol_info->wolopts & WAKE_PHY) 1436 lp->options |= 1437 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE); 1438 else 1439 lp->options &= ~OPTION_WOL_ENABLE; 1440 spin_unlock_irq(&lp->lock); 1441 return 0; 1442 } 1443 1444 static const struct ethtool_ops ops = { 1445 .get_drvinfo = amd8111e_get_drvinfo, 1446 .get_regs_len = amd8111e_get_regs_len, 1447 .get_regs = amd8111e_get_regs, 1448 .nway_reset = amd8111e_nway_reset, 1449 .get_link = amd8111e_get_link, 1450 .get_wol = amd8111e_get_wol, 1451 .set_wol = amd8111e_set_wol, 1452 .get_link_ksettings = amd8111e_get_link_ksettings, 1453 .set_link_ksettings = amd8111e_set_link_ksettings, 1454 }; 1455 1456 /* This function handles all the ethtool ioctls. It gives driver info, 1457 * gets/sets driver speed, gets memory mapped register values, forces 1458 * auto negotiation, sets/gets WOL options for ethtool application. 1459 */ 1460 static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd) 1461 { 1462 struct mii_ioctl_data *data = if_mii(ifr); 1463 struct amd8111e_priv *lp = netdev_priv(dev); 1464 int err; 1465 u32 mii_regval; 1466 1467 switch(cmd) { 1468 case SIOCGMIIPHY: 1469 data->phy_id = lp->ext_phy_addr; 1470 1471 /* fallthru */ 1472 case SIOCGMIIREG: 1473 1474 spin_lock_irq(&lp->lock); 1475 err = amd8111e_read_phy(lp, data->phy_id, 1476 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval); 1477 spin_unlock_irq(&lp->lock); 1478 1479 data->val_out = mii_regval; 1480 return err; 1481 1482 case SIOCSMIIREG: 1483 1484 spin_lock_irq(&lp->lock); 1485 err = amd8111e_write_phy(lp, data->phy_id, 1486 data->reg_num & PHY_REG_ADDR_MASK, data->val_in); 1487 spin_unlock_irq(&lp->lock); 1488 1489 return err; 1490 1491 default: 1492 /* do nothing */ 1493 break; 1494 } 1495 return -EOPNOTSUPP; 1496 } 1497 static int amd8111e_set_mac_address(struct net_device *dev, void *p) 1498 { 1499 struct amd8111e_priv *lp = netdev_priv(dev); 1500 int i; 1501 struct sockaddr *addr = p; 1502 1503 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1504 spin_lock_irq(&lp->lock); 1505 /* Setting the MAC address to the device */ 1506 for (i = 0; i < ETH_ALEN; i++) 1507 writeb( dev->dev_addr[i], lp->mmio + PADR + i ); 1508 1509 spin_unlock_irq(&lp->lock); 1510 1511 return 0; 1512 } 1513 1514 /* This function changes the mtu of the device. It restarts the device to 1515 * initialize the descriptor with new receive buffers. 1516 */ 1517 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) 1518 { 1519 struct amd8111e_priv *lp = netdev_priv(dev); 1520 int err; 1521 1522 if (!netif_running(dev)) { 1523 /* new_mtu will be used 1524 * when device starts netxt time 1525 */ 1526 dev->mtu = new_mtu; 1527 return 0; 1528 } 1529 1530 spin_lock_irq(&lp->lock); 1531 1532 /* stop the chip */ 1533 writel(RUN, lp->mmio + CMD0); 1534 1535 dev->mtu = new_mtu; 1536 1537 err = amd8111e_restart(dev); 1538 spin_unlock_irq(&lp->lock); 1539 if(!err) 1540 netif_start_queue(dev); 1541 return err; 1542 } 1543 1544 static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp) 1545 { 1546 writel( VAL1|MPPLBA, lp->mmio + CMD3); 1547 writel( VAL0|MPEN_SW, lp->mmio + CMD7); 1548 1549 /* To eliminate PCI posting bug */ 1550 readl(lp->mmio + CMD7); 1551 return 0; 1552 } 1553 1554 static int amd8111e_enable_link_change(struct amd8111e_priv *lp) 1555 { 1556 1557 /* Adapter is already stoped/suspended/interrupt-disabled */ 1558 writel(VAL0|LCMODE_SW,lp->mmio + CMD7); 1559 1560 /* To eliminate PCI posting bug */ 1561 readl(lp->mmio + CMD7); 1562 return 0; 1563 } 1564 1565 /* This function is called when a packet transmission fails to complete 1566 * within a reasonable period, on the assumption that an interrupt have 1567 * failed or the interface is locked up. This function will reinitialize 1568 * the hardware. 1569 */ 1570 static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue) 1571 { 1572 struct amd8111e_priv *lp = netdev_priv(dev); 1573 int err; 1574 1575 netdev_err(dev, "transmit timed out, resetting\n"); 1576 1577 spin_lock_irq(&lp->lock); 1578 err = amd8111e_restart(dev); 1579 spin_unlock_irq(&lp->lock); 1580 if(!err) 1581 netif_wake_queue(dev); 1582 } 1583 static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state) 1584 { 1585 struct net_device *dev = pci_get_drvdata(pci_dev); 1586 struct amd8111e_priv *lp = netdev_priv(dev); 1587 1588 if (!netif_running(dev)) 1589 return 0; 1590 1591 /* disable the interrupt */ 1592 spin_lock_irq(&lp->lock); 1593 amd8111e_disable_interrupt(lp); 1594 spin_unlock_irq(&lp->lock); 1595 1596 netif_device_detach(dev); 1597 1598 /* stop chip */ 1599 spin_lock_irq(&lp->lock); 1600 if(lp->options & OPTION_DYN_IPG_ENABLE) 1601 del_timer_sync(&lp->ipg_data.ipg_timer); 1602 amd8111e_stop_chip(lp); 1603 spin_unlock_irq(&lp->lock); 1604 1605 if(lp->options & OPTION_WOL_ENABLE){ 1606 /* enable wol */ 1607 if(lp->options & OPTION_WAKE_MAGIC_ENABLE) 1608 amd8111e_enable_magicpkt(lp); 1609 if(lp->options & OPTION_WAKE_PHY_ENABLE) 1610 amd8111e_enable_link_change(lp); 1611 1612 pci_enable_wake(pci_dev, PCI_D3hot, 1); 1613 pci_enable_wake(pci_dev, PCI_D3cold, 1); 1614 1615 } 1616 else{ 1617 pci_enable_wake(pci_dev, PCI_D3hot, 0); 1618 pci_enable_wake(pci_dev, PCI_D3cold, 0); 1619 } 1620 1621 pci_save_state(pci_dev); 1622 pci_set_power_state(pci_dev, PCI_D3hot); 1623 1624 return 0; 1625 } 1626 static int amd8111e_resume(struct pci_dev *pci_dev) 1627 { 1628 struct net_device *dev = pci_get_drvdata(pci_dev); 1629 struct amd8111e_priv *lp = netdev_priv(dev); 1630 1631 if (!netif_running(dev)) 1632 return 0; 1633 1634 pci_set_power_state(pci_dev, PCI_D0); 1635 pci_restore_state(pci_dev); 1636 1637 pci_enable_wake(pci_dev, PCI_D3hot, 0); 1638 pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */ 1639 1640 netif_device_attach(dev); 1641 1642 spin_lock_irq(&lp->lock); 1643 amd8111e_restart(dev); 1644 /* Restart ipg timer */ 1645 if(lp->options & OPTION_DYN_IPG_ENABLE) 1646 mod_timer(&lp->ipg_data.ipg_timer, 1647 jiffies + IPG_CONVERGE_JIFFIES); 1648 spin_unlock_irq(&lp->lock); 1649 1650 return 0; 1651 } 1652 1653 static void amd8111e_config_ipg(struct timer_list *t) 1654 { 1655 struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer); 1656 struct ipg_info *ipg_data = &lp->ipg_data; 1657 void __iomem *mmio = lp->mmio; 1658 unsigned int prev_col_cnt = ipg_data->col_cnt; 1659 unsigned int total_col_cnt; 1660 unsigned int tmp_ipg; 1661 1662 if(lp->link_config.duplex == DUPLEX_FULL){ 1663 ipg_data->ipg = DEFAULT_IPG; 1664 return; 1665 } 1666 1667 if(ipg_data->ipg_state == SSTATE){ 1668 1669 if(ipg_data->timer_tick == IPG_STABLE_TIME){ 1670 1671 ipg_data->timer_tick = 0; 1672 ipg_data->ipg = MIN_IPG - IPG_STEP; 1673 ipg_data->current_ipg = MIN_IPG; 1674 ipg_data->diff_col_cnt = 0xFFFFFFFF; 1675 ipg_data->ipg_state = CSTATE; 1676 } 1677 else 1678 ipg_data->timer_tick++; 1679 } 1680 1681 if(ipg_data->ipg_state == CSTATE){ 1682 1683 /* Get the current collision count */ 1684 1685 total_col_cnt = ipg_data->col_cnt = 1686 amd8111e_read_mib(mmio, xmt_collisions); 1687 1688 if ((total_col_cnt - prev_col_cnt) < 1689 (ipg_data->diff_col_cnt)){ 1690 1691 ipg_data->diff_col_cnt = 1692 total_col_cnt - prev_col_cnt ; 1693 1694 ipg_data->ipg = ipg_data->current_ipg; 1695 } 1696 1697 ipg_data->current_ipg += IPG_STEP; 1698 1699 if (ipg_data->current_ipg <= MAX_IPG) 1700 tmp_ipg = ipg_data->current_ipg; 1701 else{ 1702 tmp_ipg = ipg_data->ipg; 1703 ipg_data->ipg_state = SSTATE; 1704 } 1705 writew((u32)tmp_ipg, mmio + IPG); 1706 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1); 1707 } 1708 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES); 1709 return; 1710 1711 } 1712 1713 static void amd8111e_probe_ext_phy(struct net_device *dev) 1714 { 1715 struct amd8111e_priv *lp = netdev_priv(dev); 1716 int i; 1717 1718 for (i = 0x1e; i >= 0; i--) { 1719 u32 id1, id2; 1720 1721 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1)) 1722 continue; 1723 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2)) 1724 continue; 1725 lp->ext_phy_id = (id1 << 16) | id2; 1726 lp->ext_phy_addr = i; 1727 return; 1728 } 1729 lp->ext_phy_id = 0; 1730 lp->ext_phy_addr = 1; 1731 } 1732 1733 static const struct net_device_ops amd8111e_netdev_ops = { 1734 .ndo_open = amd8111e_open, 1735 .ndo_stop = amd8111e_close, 1736 .ndo_start_xmit = amd8111e_start_xmit, 1737 .ndo_tx_timeout = amd8111e_tx_timeout, 1738 .ndo_get_stats = amd8111e_get_stats, 1739 .ndo_set_rx_mode = amd8111e_set_multicast_list, 1740 .ndo_validate_addr = eth_validate_addr, 1741 .ndo_set_mac_address = amd8111e_set_mac_address, 1742 .ndo_do_ioctl = amd8111e_ioctl, 1743 .ndo_change_mtu = amd8111e_change_mtu, 1744 #ifdef CONFIG_NET_POLL_CONTROLLER 1745 .ndo_poll_controller = amd8111e_poll, 1746 #endif 1747 }; 1748 1749 static int amd8111e_probe_one(struct pci_dev *pdev, 1750 const struct pci_device_id *ent) 1751 { 1752 int err, i; 1753 unsigned long reg_addr,reg_len; 1754 struct amd8111e_priv *lp; 1755 struct net_device *dev; 1756 1757 err = pci_enable_device(pdev); 1758 if(err){ 1759 dev_err(&pdev->dev, "Cannot enable new PCI device\n"); 1760 return err; 1761 } 1762 1763 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){ 1764 dev_err(&pdev->dev, "Cannot find PCI base address\n"); 1765 err = -ENODEV; 1766 goto err_disable_pdev; 1767 } 1768 1769 err = pci_request_regions(pdev, MODULE_NAME); 1770 if(err){ 1771 dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); 1772 goto err_disable_pdev; 1773 } 1774 1775 pci_set_master(pdev); 1776 1777 /* Find power-management capability. */ 1778 if (!pdev->pm_cap) { 1779 dev_err(&pdev->dev, "No Power Management capability\n"); 1780 err = -ENODEV; 1781 goto err_free_reg; 1782 } 1783 1784 /* Initialize DMA */ 1785 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { 1786 dev_err(&pdev->dev, "DMA not supported\n"); 1787 err = -ENODEV; 1788 goto err_free_reg; 1789 } 1790 1791 reg_addr = pci_resource_start(pdev, 0); 1792 reg_len = pci_resource_len(pdev, 0); 1793 1794 dev = alloc_etherdev(sizeof(struct amd8111e_priv)); 1795 if (!dev) { 1796 err = -ENOMEM; 1797 goto err_free_reg; 1798 } 1799 1800 SET_NETDEV_DEV(dev, &pdev->dev); 1801 1802 #if AMD8111E_VLAN_TAG_USED 1803 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ; 1804 #endif 1805 1806 lp = netdev_priv(dev); 1807 lp->pci_dev = pdev; 1808 lp->amd8111e_net_dev = dev; 1809 lp->pm_cap = pdev->pm_cap; 1810 1811 spin_lock_init(&lp->lock); 1812 1813 lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len); 1814 if (!lp->mmio) { 1815 dev_err(&pdev->dev, "Cannot map device registers\n"); 1816 err = -ENOMEM; 1817 goto err_free_dev; 1818 } 1819 1820 /* Initializing MAC address */ 1821 for (i = 0; i < ETH_ALEN; i++) 1822 dev->dev_addr[i] = readb(lp->mmio + PADR + i); 1823 1824 /* Setting user defined parametrs */ 1825 lp->ext_phy_option = speed_duplex[card_idx]; 1826 if(coalesce[card_idx]) 1827 lp->options |= OPTION_INTR_COAL_ENABLE; 1828 if(dynamic_ipg[card_idx++]) 1829 lp->options |= OPTION_DYN_IPG_ENABLE; 1830 1831 1832 /* Initialize driver entry points */ 1833 dev->netdev_ops = &amd8111e_netdev_ops; 1834 dev->ethtool_ops = &ops; 1835 dev->irq =pdev->irq; 1836 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; 1837 dev->min_mtu = AMD8111E_MIN_MTU; 1838 dev->max_mtu = AMD8111E_MAX_MTU; 1839 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); 1840 1841 #if AMD8111E_VLAN_TAG_USED 1842 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 1843 #endif 1844 /* Probe the external PHY */ 1845 amd8111e_probe_ext_phy(dev); 1846 1847 /* setting mii default values */ 1848 lp->mii_if.dev = dev; 1849 lp->mii_if.mdio_read = amd8111e_mdio_read; 1850 lp->mii_if.mdio_write = amd8111e_mdio_write; 1851 lp->mii_if.phy_id = lp->ext_phy_addr; 1852 1853 /* Set receive buffer length and set jumbo option*/ 1854 amd8111e_set_rx_buff_len(dev); 1855 1856 1857 err = register_netdev(dev); 1858 if (err) { 1859 dev_err(&pdev->dev, "Cannot register net device\n"); 1860 goto err_free_dev; 1861 } 1862 1863 pci_set_drvdata(pdev, dev); 1864 1865 /* Initialize software ipg timer */ 1866 if(lp->options & OPTION_DYN_IPG_ENABLE){ 1867 timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0); 1868 lp->ipg_data.ipg_timer.expires = jiffies + 1869 IPG_CONVERGE_JIFFIES; 1870 lp->ipg_data.ipg = DEFAULT_IPG; 1871 lp->ipg_data.ipg_state = CSTATE; 1872 } 1873 1874 /* display driver and device information */ 1875 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; 1876 dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", 1877 chip_version, dev->dev_addr); 1878 if (lp->ext_phy_id) 1879 dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n", 1880 lp->ext_phy_id, lp->ext_phy_addr); 1881 else 1882 dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n"); 1883 1884 return 0; 1885 1886 err_free_dev: 1887 free_netdev(dev); 1888 1889 err_free_reg: 1890 pci_release_regions(pdev); 1891 1892 err_disable_pdev: 1893 pci_disable_device(pdev); 1894 return err; 1895 1896 } 1897 1898 static void amd8111e_remove_one(struct pci_dev *pdev) 1899 { 1900 struct net_device *dev = pci_get_drvdata(pdev); 1901 1902 if (dev) { 1903 unregister_netdev(dev); 1904 free_netdev(dev); 1905 pci_release_regions(pdev); 1906 pci_disable_device(pdev); 1907 } 1908 } 1909 1910 static const struct pci_device_id amd8111e_pci_tbl[] = { 1911 { 1912 .vendor = PCI_VENDOR_ID_AMD, 1913 .device = PCI_DEVICE_ID_AMD8111E_7462, 1914 }, 1915 { 1916 .vendor = 0, 1917 } 1918 }; 1919 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); 1920 1921 static struct pci_driver amd8111e_driver = { 1922 .name = MODULE_NAME, 1923 .id_table = amd8111e_pci_tbl, 1924 .probe = amd8111e_probe_one, 1925 .remove = amd8111e_remove_one, 1926 .suspend = amd8111e_suspend, 1927 .resume = amd8111e_resume 1928 }; 1929 1930 module_pci_driver(amd8111e_driver); 1931