1 
2 /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
3  * Copyright (C) 2004 Advanced Micro Devices
4  *
5  *
6  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10  * Copyright 1993 United States Government as represented by the
11  *	Director, National Security Agency.[ pcnet32.c ]
12  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14  *
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  *
21  * This program is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  * GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, see <http://www.gnu.org/licenses/>.
28 
29 Module Name:
30 
31 	amd8111e.c
32 
33 Abstract:
34 
35  	 AMD8111 based 10/100 Ethernet Controller Driver.
36 
37 Environment:
38 
39 	Kernel Mode
40 
41 Revision History:
42  	3.0.0
43 	   Initial Revision.
44 	3.0.1
45 	 1. Dynamic interrupt coalescing.
46 	 2. Removed prev_stats.
47 	 3. MII support.
48 	 4. Dynamic IPG support
49 	3.0.2  05/29/2003
50 	 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
51 	 2. Bug fix: Fixed VLAN support failure.
52 	 3. Bug fix: Fixed receive interrupt coalescing bug.
53 	 4. Dynamic IPG support is disabled by default.
54 	3.0.3 06/05/2003
55 	 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
56 	3.0.4 12/09/2003
57 	 1. Added set_mac_address routine for bonding driver support.
58 	 2. Tested the driver for bonding support
59 	 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
60 	    indicated to the h/w.
61 	 4. Modified amd8111e_rx() routine to receive all the received packets
62 	    in the first interrupt.
63 	 5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
64 	3.0.5 03/22/2004
65 	 1. Added NAPI support
66 
67 */
68 
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/types.h>
73 #include <linux/compiler.h>
74 #include <linux/delay.h>
75 #include <linux/interrupt.h>
76 #include <linux/ioport.h>
77 #include <linux/pci.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/ethtool.h>
82 #include <linux/mii.h>
83 #include <linux/if_vlan.h>
84 #include <linux/ctype.h>
85 #include <linux/crc32.h>
86 #include <linux/dma-mapping.h>
87 
88 #include <asm/io.h>
89 #include <asm/byteorder.h>
90 #include <asm/uaccess.h>
91 
92 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
93 #define AMD8111E_VLAN_TAG_USED 1
94 #else
95 #define AMD8111E_VLAN_TAG_USED 0
96 #endif
97 
98 #include "amd8111e.h"
99 #define MODULE_NAME	"amd8111e"
100 #define MODULE_VERS	"3.0.7"
101 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
102 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
103 MODULE_LICENSE("GPL");
104 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
105 module_param_array(speed_duplex, int, NULL, 0);
106 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
107 module_param_array(coalesce, bool, NULL, 0);
108 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
109 module_param_array(dynamic_ipg, bool, NULL, 0);
110 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
111 
112 static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
113 
114 	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
115 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
116 	{ 0, }
117 
118 };
119 /*
120 This function will read the PHY registers.
121 */
122 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
123 {
124 	void __iomem *mmio = lp->mmio;
125 	unsigned int reg_val;
126 	unsigned int repeat= REPEAT_CNT;
127 
128 	reg_val = readl(mmio + PHY_ACCESS);
129 	while (reg_val & PHY_CMD_ACTIVE)
130 		reg_val = readl( mmio + PHY_ACCESS );
131 
132 	writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
133 			   ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
134 	do{
135 		reg_val = readl(mmio + PHY_ACCESS);
136 		udelay(30);  /* It takes 30 us to read/write data */
137 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
138 	if(reg_val & PHY_RD_ERR)
139 		goto err_phy_read;
140 
141 	*val = reg_val & 0xffff;
142 	return 0;
143 err_phy_read:
144 	*val = 0;
145 	return -EINVAL;
146 
147 }
148 
149 /*
150 This function will write into PHY registers.
151 */
152 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
153 {
154 	unsigned int repeat = REPEAT_CNT;
155 	void __iomem *mmio = lp->mmio;
156 	unsigned int reg_val;
157 
158 	reg_val = readl(mmio + PHY_ACCESS);
159 	while (reg_val & PHY_CMD_ACTIVE)
160 		reg_val = readl( mmio + PHY_ACCESS );
161 
162 	writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
163 			   ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
164 
165 	do{
166 		reg_val = readl(mmio + PHY_ACCESS);
167 		udelay(30);  /* It takes 30 us to read/write the data */
168 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
169 
170 	if(reg_val & PHY_RD_ERR)
171 		goto err_phy_write;
172 
173 	return 0;
174 
175 err_phy_write:
176 	return -EINVAL;
177 
178 }
179 /*
180 This is the mii register read function provided to the mii interface.
181 */
182 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
183 {
184 	struct amd8111e_priv* lp = netdev_priv(dev);
185 	unsigned int reg_val;
186 
187 	amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
188 	return reg_val;
189 
190 }
191 
192 /*
193 This is the mii register write function provided to the mii interface.
194 */
195 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
196 {
197 	struct amd8111e_priv* lp = netdev_priv(dev);
198 
199 	amd8111e_write_phy(lp, phy_id, reg_num, val);
200 }
201 
202 /*
203 This function will set PHY speed. During initialization sets the original speed to 100 full.
204 */
205 static void amd8111e_set_ext_phy(struct net_device *dev)
206 {
207 	struct amd8111e_priv *lp = netdev_priv(dev);
208 	u32 bmcr,advert,tmp;
209 
210 	/* Determine mii register values to set the speed */
211 	advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
212 	tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
213 	switch (lp->ext_phy_option){
214 
215 		default:
216 		case SPEED_AUTONEG: /* advertise all values */
217 			tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
218 				ADVERTISE_100HALF|ADVERTISE_100FULL) ;
219 			break;
220 		case SPEED10_HALF:
221 			tmp |= ADVERTISE_10HALF;
222 			break;
223 		case SPEED10_FULL:
224 			tmp |= ADVERTISE_10FULL;
225 			break;
226 		case SPEED100_HALF:
227 			tmp |= ADVERTISE_100HALF;
228 			break;
229 		case SPEED100_FULL:
230 			tmp |= ADVERTISE_100FULL;
231 			break;
232 	}
233 
234 	if(advert != tmp)
235 		amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
236 	/* Restart auto negotiation */
237 	bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
238 	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
239 	amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
240 
241 }
242 
243 /*
244 This function will unmap skb->data space and will free
245 all transmit and receive skbuffs.
246 */
247 static int amd8111e_free_skbs(struct net_device *dev)
248 {
249 	struct amd8111e_priv *lp = netdev_priv(dev);
250 	struct sk_buff* rx_skbuff;
251 	int i;
252 
253 	/* Freeing transmit skbs */
254 	for(i = 0; i < NUM_TX_BUFFERS; i++){
255 		if(lp->tx_skbuff[i]){
256 			pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],					lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
257 			dev_kfree_skb (lp->tx_skbuff[i]);
258 			lp->tx_skbuff[i] = NULL;
259 			lp->tx_dma_addr[i] = 0;
260 		}
261 	}
262 	/* Freeing previously allocated receive buffers */
263 	for (i = 0; i < NUM_RX_BUFFERS; i++){
264 		rx_skbuff = lp->rx_skbuff[i];
265 		if(rx_skbuff != NULL){
266 			pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
267 				  lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
268 			dev_kfree_skb(lp->rx_skbuff[i]);
269 			lp->rx_skbuff[i] = NULL;
270 			lp->rx_dma_addr[i] = 0;
271 		}
272 	}
273 
274 	return 0;
275 }
276 
277 /*
278 This will set the receive buffer length corresponding to the mtu size of networkinterface.
279 */
280 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
281 {
282 	struct amd8111e_priv* lp = netdev_priv(dev);
283 	unsigned int mtu = dev->mtu;
284 
285 	if (mtu > ETH_DATA_LEN){
286 		/* MTU + ethernet header + FCS
287 		+ optional VLAN tag + skb reserve space 2 */
288 
289 		lp->rx_buff_len = mtu + ETH_HLEN + 10;
290 		lp->options |= OPTION_JUMBO_ENABLE;
291 	} else{
292 		lp->rx_buff_len = PKT_BUFF_SZ;
293 		lp->options &= ~OPTION_JUMBO_ENABLE;
294 	}
295 }
296 
297 /*
298 This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
299  */
300 static int amd8111e_init_ring(struct net_device *dev)
301 {
302 	struct amd8111e_priv *lp = netdev_priv(dev);
303 	int i;
304 
305 	lp->rx_idx = lp->tx_idx = 0;
306 	lp->tx_complete_idx = 0;
307 	lp->tx_ring_idx = 0;
308 
309 
310 	if(lp->opened)
311 		/* Free previously allocated transmit and receive skbs */
312 		amd8111e_free_skbs(dev);
313 
314 	else{
315 		 /* allocate the tx and rx descriptors */
316 	     	if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
317 			sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
318 			&lp->tx_ring_dma_addr)) == NULL)
319 
320 			goto err_no_mem;
321 
322 	     	if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
323 			sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
324 			&lp->rx_ring_dma_addr)) == NULL)
325 
326 			goto err_free_tx_ring;
327 
328 	}
329 	/* Set new receive buff size */
330 	amd8111e_set_rx_buff_len(dev);
331 
332 	/* Allocating receive  skbs */
333 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
334 
335 		lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
336 		if (!lp->rx_skbuff[i]) {
337 				/* Release previos allocated skbs */
338 				for(--i; i >= 0 ;i--)
339 					dev_kfree_skb(lp->rx_skbuff[i]);
340 				goto err_free_rx_ring;
341 		}
342 		skb_reserve(lp->rx_skbuff[i],2);
343 	}
344         /* Initilaizing receive descriptors */
345 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
346 		lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
347 			lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
348 
349 		lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
350 		lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
351 		wmb();
352 		lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
353 	}
354 
355 	/* Initializing transmit descriptors */
356 	for (i = 0; i < NUM_TX_RING_DR; i++) {
357 		lp->tx_ring[i].buff_phy_addr = 0;
358 		lp->tx_ring[i].tx_flags = 0;
359 		lp->tx_ring[i].buff_count = 0;
360 	}
361 
362 	return 0;
363 
364 err_free_rx_ring:
365 
366 	pci_free_consistent(lp->pci_dev,
367 		sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
368 		lp->rx_ring_dma_addr);
369 
370 err_free_tx_ring:
371 
372 	pci_free_consistent(lp->pci_dev,
373 		 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
374 		 lp->tx_ring_dma_addr);
375 
376 err_no_mem:
377 	return -ENOMEM;
378 }
379 /* This function will set the interrupt coalescing according to the input arguments */
380 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
381 {
382 	unsigned int timeout;
383 	unsigned int event_count;
384 
385 	struct amd8111e_priv *lp = netdev_priv(dev);
386 	void __iomem *mmio = lp->mmio;
387 	struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
388 
389 
390 	switch(cmod)
391 	{
392 		case RX_INTR_COAL :
393 			timeout = coal_conf->rx_timeout;
394 			event_count = coal_conf->rx_event_count;
395 			if( timeout > MAX_TIMEOUT ||
396 					event_count > MAX_EVENT_COUNT )
397 				return -EINVAL;
398 
399 			timeout = timeout * DELAY_TIMER_CONV;
400 			writel(VAL0|STINTEN, mmio+INTEN0);
401 			writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
402 							mmio+DLY_INT_A);
403 			break;
404 
405 		case TX_INTR_COAL :
406 			timeout = coal_conf->tx_timeout;
407 			event_count = coal_conf->tx_event_count;
408 			if( timeout > MAX_TIMEOUT ||
409 					event_count > MAX_EVENT_COUNT )
410 				return -EINVAL;
411 
412 
413 			timeout = timeout * DELAY_TIMER_CONV;
414 			writel(VAL0|STINTEN,mmio+INTEN0);
415 			writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
416 							 mmio+DLY_INT_B);
417 			break;
418 
419 		case DISABLE_COAL:
420 			writel(0,mmio+STVAL);
421 			writel(STINTEN, mmio+INTEN0);
422 			writel(0, mmio +DLY_INT_B);
423 			writel(0, mmio+DLY_INT_A);
424 			break;
425 		 case ENABLE_COAL:
426 		       /* Start the timer */
427 			writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
428 			writel(VAL0|STINTEN, mmio+INTEN0);
429 			break;
430 		default:
431 			break;
432 
433    }
434 	return 0;
435 
436 }
437 
438 /*
439 This function initializes the device registers  and starts the device.
440 */
441 static int amd8111e_restart(struct net_device *dev)
442 {
443 	struct amd8111e_priv *lp = netdev_priv(dev);
444 	void __iomem *mmio = lp->mmio;
445 	int i,reg_val;
446 
447 	/* stop the chip */
448 	 writel(RUN, mmio + CMD0);
449 
450 	if(amd8111e_init_ring(dev))
451 		return -ENOMEM;
452 
453 	/* enable the port manager and set auto negotiation always */
454 	writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
455 	writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
456 
457 	amd8111e_set_ext_phy(dev);
458 
459 	/* set control registers */
460 	reg_val = readl(mmio + CTRL1);
461 	reg_val &= ~XMTSP_MASK;
462 	writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
463 
464 	/* enable interrupt */
465 	writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
466 		APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
467 		SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
468 
469 	writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
470 
471 	/* initialize tx and rx ring base addresses */
472 	writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
473 	writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
474 
475 	writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
476 	writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
477 
478 	/* set default IPG to 96 */
479 	writew((u32)DEFAULT_IPG,mmio+IPG);
480 	writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
481 
482 	if(lp->options & OPTION_JUMBO_ENABLE){
483 		writel((u32)VAL2|JUMBO, mmio + CMD3);
484 		/* Reset REX_UFLO */
485 		writel( REX_UFLO, mmio + CMD2);
486 		/* Should not set REX_UFLO for jumbo frames */
487 		writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
488 	}else{
489 		writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
490 		writel((u32)JUMBO, mmio + CMD3);
491 	}
492 
493 #if AMD8111E_VLAN_TAG_USED
494 	writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
495 #endif
496 	writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
497 
498 	/* Setting the MAC address to the device */
499 	for (i = 0; i < ETH_ALEN; i++)
500 		writeb( dev->dev_addr[i], mmio + PADR + i );
501 
502 	/* Enable interrupt coalesce */
503 	if(lp->options & OPTION_INTR_COAL_ENABLE){
504 		printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
505 								dev->name);
506 		amd8111e_set_coalesce(dev,ENABLE_COAL);
507 	}
508 
509 	/* set RUN bit to start the chip */
510 	writel(VAL2 | RDMD0, mmio + CMD0);
511 	writel(VAL0 | INTREN | RUN, mmio + CMD0);
512 
513 	/* To avoid PCI posting bug */
514 	readl(mmio+CMD0);
515 	return 0;
516 }
517 /*
518 This function clears necessary the device registers.
519 */
520 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
521 {
522 	unsigned int reg_val;
523 	unsigned int logic_filter[2] ={0,};
524 	void __iomem *mmio = lp->mmio;
525 
526 
527         /* stop the chip */
528 	writel(RUN, mmio + CMD0);
529 
530 	/* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
531 	writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
532 
533 	/* Clear RCV_RING_BASE_ADDR */
534 	writel(0, mmio + RCV_RING_BASE_ADDR0);
535 
536 	/* Clear XMT_RING_BASE_ADDR */
537 	writel(0, mmio + XMT_RING_BASE_ADDR0);
538 	writel(0, mmio + XMT_RING_BASE_ADDR1);
539 	writel(0, mmio + XMT_RING_BASE_ADDR2);
540 	writel(0, mmio + XMT_RING_BASE_ADDR3);
541 
542 	/* Clear CMD0  */
543 	writel(CMD0_CLEAR,mmio + CMD0);
544 
545 	/* Clear CMD2 */
546 	writel(CMD2_CLEAR, mmio +CMD2);
547 
548 	/* Clear CMD7 */
549 	writel(CMD7_CLEAR , mmio + CMD7);
550 
551 	/* Clear DLY_INT_A and DLY_INT_B */
552 	writel(0x0, mmio + DLY_INT_A);
553 	writel(0x0, mmio + DLY_INT_B);
554 
555 	/* Clear FLOW_CONTROL */
556 	writel(0x0, mmio + FLOW_CONTROL);
557 
558 	/* Clear INT0  write 1 to clear register */
559 	reg_val = readl(mmio + INT0);
560 	writel(reg_val, mmio + INT0);
561 
562 	/* Clear STVAL */
563 	writel(0x0, mmio + STVAL);
564 
565 	/* Clear INTEN0 */
566 	writel( INTEN0_CLEAR, mmio + INTEN0);
567 
568 	/* Clear LADRF */
569 	writel(0x0 , mmio + LADRF);
570 
571 	/* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
572 	writel( 0x80010,mmio + SRAM_SIZE);
573 
574 	/* Clear RCV_RING0_LEN */
575 	writel(0x0, mmio +  RCV_RING_LEN0);
576 
577 	/* Clear XMT_RING0/1/2/3_LEN */
578 	writel(0x0, mmio +  XMT_RING_LEN0);
579 	writel(0x0, mmio +  XMT_RING_LEN1);
580 	writel(0x0, mmio +  XMT_RING_LEN2);
581 	writel(0x0, mmio +  XMT_RING_LEN3);
582 
583 	/* Clear XMT_RING_LIMIT */
584 	writel(0x0, mmio + XMT_RING_LIMIT);
585 
586 	/* Clear MIB */
587 	writew(MIB_CLEAR, mmio + MIB_ADDR);
588 
589 	/* Clear LARF */
590 	amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
591 
592 	/* SRAM_SIZE register */
593 	reg_val = readl(mmio + SRAM_SIZE);
594 
595 	if(lp->options & OPTION_JUMBO_ENABLE)
596 		writel( VAL2|JUMBO, mmio + CMD3);
597 #if AMD8111E_VLAN_TAG_USED
598 	writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
599 #endif
600 	/* Set default value to CTRL1 Register */
601 	writel(CTRL1_DEFAULT, mmio + CTRL1);
602 
603 	/* To avoid PCI posting bug */
604 	readl(mmio + CMD2);
605 
606 }
607 
608 /*
609 This function disables the interrupt and clears all the pending
610 interrupts in INT0
611  */
612 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
613 {
614 	u32 intr0;
615 
616 	/* Disable interrupt */
617 	writel(INTREN, lp->mmio + CMD0);
618 
619 	/* Clear INT0 */
620 	intr0 = readl(lp->mmio + INT0);
621 	writel(intr0, lp->mmio + INT0);
622 
623 	/* To avoid PCI posting bug */
624 	readl(lp->mmio + INT0);
625 
626 }
627 
628 /*
629 This function stops the chip.
630 */
631 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
632 {
633 	writel(RUN, lp->mmio + CMD0);
634 
635 	/* To avoid PCI posting bug */
636 	readl(lp->mmio + CMD0);
637 }
638 
639 /*
640 This function frees the  transmiter and receiver descriptor rings.
641 */
642 static void amd8111e_free_ring(struct amd8111e_priv* lp)
643 {
644 	/* Free transmit and receive descriptor rings */
645 	if(lp->rx_ring){
646 		pci_free_consistent(lp->pci_dev,
647 			sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
648 			lp->rx_ring, lp->rx_ring_dma_addr);
649 		lp->rx_ring = NULL;
650 	}
651 
652 	if(lp->tx_ring){
653 		pci_free_consistent(lp->pci_dev,
654 			sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
655 			lp->tx_ring, lp->tx_ring_dma_addr);
656 
657 		lp->tx_ring = NULL;
658 	}
659 
660 }
661 
662 /*
663 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
664 */
665 static int amd8111e_tx(struct net_device *dev)
666 {
667 	struct amd8111e_priv* lp = netdev_priv(dev);
668 	int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
669 	int status;
670 	/* Complete all the transmit packet */
671 	while (lp->tx_complete_idx != lp->tx_idx){
672 		tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
673 		status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
674 
675 		if(status & OWN_BIT)
676 			break;	/* It still hasn't been Txed */
677 
678 		lp->tx_ring[tx_index].buff_phy_addr = 0;
679 
680 		/* We must free the original skb */
681 		if (lp->tx_skbuff[tx_index]) {
682 			pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
683 				  	lp->tx_skbuff[tx_index]->len,
684 					PCI_DMA_TODEVICE);
685 			dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
686 			lp->tx_skbuff[tx_index] = NULL;
687 			lp->tx_dma_addr[tx_index] = 0;
688 		}
689 		lp->tx_complete_idx++;
690 		/*COAL update tx coalescing parameters */
691 		lp->coal_conf.tx_packets++;
692 		lp->coal_conf.tx_bytes +=
693 			le16_to_cpu(lp->tx_ring[tx_index].buff_count);
694 
695 		if (netif_queue_stopped(dev) &&
696 			lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
697 			/* The ring is no longer full, clear tbusy. */
698 			/* lp->tx_full = 0; */
699 			netif_wake_queue (dev);
700 		}
701 	}
702 	return 0;
703 }
704 
705 /* This function handles the driver receive operation in polling mode */
706 static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
707 {
708 	struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
709 	struct net_device *dev = lp->amd8111e_net_dev;
710 	int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
711 	void __iomem *mmio = lp->mmio;
712 	struct sk_buff *skb,*new_skb;
713 	int min_pkt_len, status;
714 	unsigned int intr0;
715 	int num_rx_pkt = 0;
716 	short pkt_len;
717 #if AMD8111E_VLAN_TAG_USED
718 	short vtag;
719 #endif
720 	int rx_pkt_limit = budget;
721 	unsigned long flags;
722 
723 	do{
724 		/* process receive packets until we use the quota*/
725 		/* If we own the next entry, it's a new packet. Send it up. */
726 		while(1) {
727 			status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
728 			if (status & OWN_BIT)
729 				break;
730 
731 			/*
732 			 * There is a tricky error noted by John Murphy,
733 			 * <murf@perftech.com> to Russ Nelson: Even with
734 			 * full-sized * buffers it's possible for a
735 			 * jabber packet to use two buffers, with only
736 			 * the last correctly noting the error.
737 			 */
738 
739 			if(status & ERR_BIT) {
740 				/* reseting flags */
741 				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
742 				goto err_next_pkt;
743 			}
744 			/* check for STP and ENP */
745 			if(!((status & STP_BIT) && (status & ENP_BIT))){
746 				/* reseting flags */
747 				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
748 				goto err_next_pkt;
749 			}
750 			pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
751 
752 #if AMD8111E_VLAN_TAG_USED
753 			vtag = status & TT_MASK;
754 			/*MAC will strip vlan tag*/
755 			if (vtag != 0)
756 				min_pkt_len =MIN_PKT_LEN - 4;
757 			else
758 #endif
759 				min_pkt_len =MIN_PKT_LEN;
760 
761 			if (pkt_len < min_pkt_len) {
762 				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
763 				lp->drv_rx_errors++;
764 				goto err_next_pkt;
765 			}
766 			if(--rx_pkt_limit < 0)
767 				goto rx_not_empty;
768 			new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
769 			if (!new_skb) {
770 				/* if allocation fail,
771 				   ignore that pkt and go to next one */
772 				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
773 				lp->drv_rx_errors++;
774 				goto err_next_pkt;
775 			}
776 
777 			skb_reserve(new_skb, 2);
778 			skb = lp->rx_skbuff[rx_index];
779 			pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
780 					 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
781 			skb_put(skb, pkt_len);
782 			lp->rx_skbuff[rx_index] = new_skb;
783 			lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
784 								   new_skb->data,
785 								   lp->rx_buff_len-2,
786 								   PCI_DMA_FROMDEVICE);
787 
788 			skb->protocol = eth_type_trans(skb, dev);
789 
790 #if AMD8111E_VLAN_TAG_USED
791 			if (vtag == TT_VLAN_TAGGED){
792 				u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
793 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
794 			}
795 #endif
796 			netif_receive_skb(skb);
797 			/*COAL update rx coalescing parameters*/
798 			lp->coal_conf.rx_packets++;
799 			lp->coal_conf.rx_bytes += pkt_len;
800 			num_rx_pkt++;
801 
802 		err_next_pkt:
803 			lp->rx_ring[rx_index].buff_phy_addr
804 				= cpu_to_le32(lp->rx_dma_addr[rx_index]);
805 			lp->rx_ring[rx_index].buff_count =
806 				cpu_to_le16(lp->rx_buff_len-2);
807 			wmb();
808 			lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
809 			rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
810 		}
811 		/* Check the interrupt status register for more packets in the
812 		   mean time. Process them since we have not used up our quota.*/
813 
814 		intr0 = readl(mmio + INT0);
815 		/*Ack receive packets */
816 		writel(intr0 & RINT0,mmio + INT0);
817 
818 	} while(intr0 & RINT0);
819 
820 	if (rx_pkt_limit > 0) {
821 		/* Receive descriptor is empty now */
822 		spin_lock_irqsave(&lp->lock, flags);
823 		__napi_complete(napi);
824 		writel(VAL0|RINTEN0, mmio + INTEN0);
825 		writel(VAL2 | RDMD0, mmio + CMD0);
826 		spin_unlock_irqrestore(&lp->lock, flags);
827 	}
828 
829 rx_not_empty:
830 	return num_rx_pkt;
831 }
832 
833 /*
834 This function will indicate the link status to the kernel.
835 */
836 static int amd8111e_link_change(struct net_device* dev)
837 {
838 	struct amd8111e_priv *lp = netdev_priv(dev);
839 	int status0,speed;
840 
841 	/* read the link change */
842      	status0 = readl(lp->mmio + STAT0);
843 
844 	if(status0 & LINK_STATS){
845 		if(status0 & AUTONEG_COMPLETE)
846 			lp->link_config.autoneg = AUTONEG_ENABLE;
847 		else
848 			lp->link_config.autoneg = AUTONEG_DISABLE;
849 
850 		if(status0 & FULL_DPLX)
851 			lp->link_config.duplex = DUPLEX_FULL;
852 		else
853 			lp->link_config.duplex = DUPLEX_HALF;
854 		speed = (status0 & SPEED_MASK) >> 7;
855 		if(speed == PHY_SPEED_10)
856 			lp->link_config.speed = SPEED_10;
857 		else if(speed == PHY_SPEED_100)
858 			lp->link_config.speed = SPEED_100;
859 
860 		printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",			dev->name,
861 		       (lp->link_config.speed == SPEED_100) ? "100": "10",
862 		       (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
863 		netif_carrier_on(dev);
864 	}
865 	else{
866 		lp->link_config.speed = SPEED_INVALID;
867 		lp->link_config.duplex = DUPLEX_INVALID;
868 		lp->link_config.autoneg = AUTONEG_INVALID;
869 		printk(KERN_INFO "%s: Link is Down.\n",dev->name);
870 		netif_carrier_off(dev);
871 	}
872 
873 	return 0;
874 }
875 /*
876 This function reads the mib counters.
877 */
878 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
879 {
880 	unsigned int  status;
881 	unsigned  int data;
882 	unsigned int repeat = REPEAT_CNT;
883 
884 	writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
885 	do {
886 		status = readw(mmio + MIB_ADDR);
887 		udelay(2);	/* controller takes MAX 2 us to get mib data */
888 	}
889 	while (--repeat && (status & MIB_CMD_ACTIVE));
890 
891 	data = readl(mmio + MIB_DATA);
892 	return data;
893 }
894 
895 /*
896  * This function reads the mib registers and returns the hardware statistics.
897  * It updates previous internal driver statistics with new values.
898  */
899 static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
900 {
901 	struct amd8111e_priv *lp = netdev_priv(dev);
902 	void __iomem *mmio = lp->mmio;
903 	unsigned long flags;
904 	struct net_device_stats *new_stats = &dev->stats;
905 
906 	if (!lp->opened)
907 		return new_stats;
908 	spin_lock_irqsave (&lp->lock, flags);
909 
910 	/* stats.rx_packets */
911 	new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
912 				amd8111e_read_mib(mmio, rcv_multicast_pkts)+
913 				amd8111e_read_mib(mmio, rcv_unicast_pkts);
914 
915 	/* stats.tx_packets */
916 	new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
917 
918 	/*stats.rx_bytes */
919 	new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
920 
921 	/* stats.tx_bytes */
922 	new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
923 
924 	/* stats.rx_errors */
925 	/* hw errors + errors driver reported */
926 	new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
927 				amd8111e_read_mib(mmio, rcv_fragments)+
928 				amd8111e_read_mib(mmio, rcv_jabbers)+
929 				amd8111e_read_mib(mmio, rcv_alignment_errors)+
930 				amd8111e_read_mib(mmio, rcv_fcs_errors)+
931 				amd8111e_read_mib(mmio, rcv_miss_pkts)+
932 				lp->drv_rx_errors;
933 
934 	/* stats.tx_errors */
935 	new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
936 
937 	/* stats.rx_dropped*/
938 	new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
939 
940 	/* stats.tx_dropped*/
941 	new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
942 
943 	/* stats.multicast*/
944 	new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
945 
946 	/* stats.collisions*/
947 	new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
948 
949 	/* stats.rx_length_errors*/
950 	new_stats->rx_length_errors =
951 		amd8111e_read_mib(mmio, rcv_undersize_pkts)+
952 		amd8111e_read_mib(mmio, rcv_oversize_pkts);
953 
954 	/* stats.rx_over_errors*/
955 	new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
956 
957 	/* stats.rx_crc_errors*/
958 	new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
959 
960 	/* stats.rx_frame_errors*/
961 	new_stats->rx_frame_errors =
962 		amd8111e_read_mib(mmio, rcv_alignment_errors);
963 
964 	/* stats.rx_fifo_errors */
965 	new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
966 
967 	/* stats.rx_missed_errors */
968 	new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
969 
970 	/* stats.tx_aborted_errors*/
971 	new_stats->tx_aborted_errors =
972 		amd8111e_read_mib(mmio, xmt_excessive_collision);
973 
974 	/* stats.tx_carrier_errors*/
975 	new_stats->tx_carrier_errors =
976 		amd8111e_read_mib(mmio, xmt_loss_carrier);
977 
978 	/* stats.tx_fifo_errors*/
979 	new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
980 
981 	/* stats.tx_window_errors*/
982 	new_stats->tx_window_errors =
983 		amd8111e_read_mib(mmio, xmt_late_collision);
984 
985 	/* Reset the mibs for collecting new statistics */
986 	/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
987 
988 	spin_unlock_irqrestore (&lp->lock, flags);
989 
990 	return new_stats;
991 }
992 /* This function recalculate the interrupt coalescing  mode on every interrupt
993 according to the datarate and the packet rate.
994 */
995 static int amd8111e_calc_coalesce(struct net_device *dev)
996 {
997 	struct amd8111e_priv *lp = netdev_priv(dev);
998 	struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
999 	int tx_pkt_rate;
1000 	int rx_pkt_rate;
1001 	int tx_data_rate;
1002 	int rx_data_rate;
1003 	int rx_pkt_size;
1004 	int tx_pkt_size;
1005 
1006 	tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1007 	coal_conf->tx_prev_packets =  coal_conf->tx_packets;
1008 
1009 	tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1010 	coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
1011 
1012 	rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1013 	coal_conf->rx_prev_packets =  coal_conf->rx_packets;
1014 
1015 	rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1016 	coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
1017 
1018 	if(rx_pkt_rate < 800){
1019 		if(coal_conf->rx_coal_type != NO_COALESCE){
1020 
1021 			coal_conf->rx_timeout = 0x0;
1022 			coal_conf->rx_event_count = 0;
1023 			amd8111e_set_coalesce(dev,RX_INTR_COAL);
1024 			coal_conf->rx_coal_type = NO_COALESCE;
1025 		}
1026 	}
1027 	else{
1028 
1029 		rx_pkt_size = rx_data_rate/rx_pkt_rate;
1030 		if (rx_pkt_size < 128){
1031 			if(coal_conf->rx_coal_type != NO_COALESCE){
1032 
1033 				coal_conf->rx_timeout = 0;
1034 				coal_conf->rx_event_count = 0;
1035 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1036 				coal_conf->rx_coal_type = NO_COALESCE;
1037 			}
1038 
1039 		}
1040 		else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1041 
1042 			if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1043 				coal_conf->rx_timeout = 1;
1044 				coal_conf->rx_event_count = 4;
1045 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1046 				coal_conf->rx_coal_type = LOW_COALESCE;
1047 			}
1048 		}
1049 		else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1050 
1051 			if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1052 				coal_conf->rx_timeout = 1;
1053 				coal_conf->rx_event_count = 4;
1054 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1055 				coal_conf->rx_coal_type = MEDIUM_COALESCE;
1056 			}
1057 
1058 		}
1059 		else if(rx_pkt_size >= 1024){
1060 			if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1061 				coal_conf->rx_timeout = 2;
1062 				coal_conf->rx_event_count = 3;
1063 				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1064 				coal_conf->rx_coal_type = HIGH_COALESCE;
1065 			}
1066 		}
1067 	}
1068     	/* NOW FOR TX INTR COALESC */
1069 	if(tx_pkt_rate < 800){
1070 		if(coal_conf->tx_coal_type != NO_COALESCE){
1071 
1072 			coal_conf->tx_timeout = 0x0;
1073 			coal_conf->tx_event_count = 0;
1074 			amd8111e_set_coalesce(dev,TX_INTR_COAL);
1075 			coal_conf->tx_coal_type = NO_COALESCE;
1076 		}
1077 	}
1078 	else{
1079 
1080 		tx_pkt_size = tx_data_rate/tx_pkt_rate;
1081 		if (tx_pkt_size < 128){
1082 
1083 			if(coal_conf->tx_coal_type != NO_COALESCE){
1084 
1085 				coal_conf->tx_timeout = 0;
1086 				coal_conf->tx_event_count = 0;
1087 				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1088 				coal_conf->tx_coal_type = NO_COALESCE;
1089 			}
1090 
1091 		}
1092 		else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1093 
1094 			if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1095 				coal_conf->tx_timeout = 1;
1096 				coal_conf->tx_event_count = 2;
1097 				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1098 				coal_conf->tx_coal_type = LOW_COALESCE;
1099 
1100 			}
1101 		}
1102 		else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1103 
1104 			if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1105 				coal_conf->tx_timeout = 2;
1106 				coal_conf->tx_event_count = 5;
1107 				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1108 				coal_conf->tx_coal_type = MEDIUM_COALESCE;
1109 			}
1110 
1111 		}
1112 		else if(tx_pkt_size >= 1024){
1113 			if (tx_pkt_size >= 1024){
1114 				if(coal_conf->tx_coal_type !=  HIGH_COALESCE){
1115 					coal_conf->tx_timeout = 4;
1116 					coal_conf->tx_event_count = 8;
1117 					amd8111e_set_coalesce(dev,TX_INTR_COAL);
1118 					coal_conf->tx_coal_type = HIGH_COALESCE;
1119 				}
1120 			}
1121 		}
1122 	}
1123 	return 0;
1124 
1125 }
1126 /*
1127 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1128 */
1129 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1130 {
1131 
1132 	struct net_device * dev = (struct net_device *) dev_id;
1133 	struct amd8111e_priv *lp = netdev_priv(dev);
1134 	void __iomem *mmio = lp->mmio;
1135 	unsigned int intr0, intren0;
1136 	unsigned int handled = 1;
1137 
1138 	if(unlikely(dev == NULL))
1139 		return IRQ_NONE;
1140 
1141 	spin_lock(&lp->lock);
1142 
1143 	/* disabling interrupt */
1144 	writel(INTREN, mmio + CMD0);
1145 
1146 	/* Read interrupt status */
1147 	intr0 = readl(mmio + INT0);
1148 	intren0 = readl(mmio + INTEN0);
1149 
1150 	/* Process all the INT event until INTR bit is clear. */
1151 
1152 	if (!(intr0 & INTR)){
1153 		handled = 0;
1154 		goto err_no_interrupt;
1155 	}
1156 
1157 	/* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1158 	writel(intr0, mmio + INT0);
1159 
1160 	/* Check if Receive Interrupt has occurred. */
1161 	if (intr0 & RINT0) {
1162 		if (napi_schedule_prep(&lp->napi)) {
1163 			/* Disable receive interupts */
1164 			writel(RINTEN0, mmio + INTEN0);
1165 			/* Schedule a polling routine */
1166 			__napi_schedule(&lp->napi);
1167 		} else if (intren0 & RINTEN0) {
1168 			printk("************Driver bug! interrupt while in poll\n");
1169 			/* Fix by disable receive interrupts */
1170 			writel(RINTEN0, mmio + INTEN0);
1171 		}
1172 	}
1173 
1174 	/* Check if  Transmit Interrupt has occurred. */
1175 	if (intr0 & TINT0)
1176 		amd8111e_tx(dev);
1177 
1178 	/* Check if  Link Change Interrupt has occurred. */
1179 	if (intr0 & LCINT)
1180 		amd8111e_link_change(dev);
1181 
1182 	/* Check if Hardware Timer Interrupt has occurred. */
1183 	if (intr0 & STINT)
1184 		amd8111e_calc_coalesce(dev);
1185 
1186 err_no_interrupt:
1187 	writel( VAL0 | INTREN,mmio + CMD0);
1188 
1189 	spin_unlock(&lp->lock);
1190 
1191 	return IRQ_RETVAL(handled);
1192 }
1193 
1194 #ifdef CONFIG_NET_POLL_CONTROLLER
1195 static void amd8111e_poll(struct net_device *dev)
1196 {
1197 	unsigned long flags;
1198 	local_irq_save(flags);
1199 	amd8111e_interrupt(0, dev);
1200 	local_irq_restore(flags);
1201 }
1202 #endif
1203 
1204 
1205 /*
1206 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1207 */
1208 static int amd8111e_close(struct net_device * dev)
1209 {
1210 	struct amd8111e_priv *lp = netdev_priv(dev);
1211 	netif_stop_queue(dev);
1212 
1213 	napi_disable(&lp->napi);
1214 
1215 	spin_lock_irq(&lp->lock);
1216 
1217 	amd8111e_disable_interrupt(lp);
1218 	amd8111e_stop_chip(lp);
1219 
1220 	/* Free transmit and receive skbs */
1221 	amd8111e_free_skbs(lp->amd8111e_net_dev);
1222 
1223 	netif_carrier_off(lp->amd8111e_net_dev);
1224 
1225 	/* Delete ipg timer */
1226 	if(lp->options & OPTION_DYN_IPG_ENABLE)
1227 		del_timer_sync(&lp->ipg_data.ipg_timer);
1228 
1229 	spin_unlock_irq(&lp->lock);
1230 	free_irq(dev->irq, dev);
1231 	amd8111e_free_ring(lp);
1232 
1233 	/* Update the statistics before closing */
1234 	amd8111e_get_stats(dev);
1235 	lp->opened = 0;
1236 	return 0;
1237 }
1238 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1239 */
1240 static int amd8111e_open(struct net_device * dev )
1241 {
1242 	struct amd8111e_priv *lp = netdev_priv(dev);
1243 
1244 	if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1245 					 dev->name, dev))
1246 		return -EAGAIN;
1247 
1248 	napi_enable(&lp->napi);
1249 
1250 	spin_lock_irq(&lp->lock);
1251 
1252 	amd8111e_init_hw_default(lp);
1253 
1254 	if(amd8111e_restart(dev)){
1255 		spin_unlock_irq(&lp->lock);
1256 		napi_disable(&lp->napi);
1257 		if (dev->irq)
1258 			free_irq(dev->irq, dev);
1259 		return -ENOMEM;
1260 	}
1261 	/* Start ipg timer */
1262 	if(lp->options & OPTION_DYN_IPG_ENABLE){
1263 		add_timer(&lp->ipg_data.ipg_timer);
1264 		printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1265 	}
1266 
1267 	lp->opened = 1;
1268 
1269 	spin_unlock_irq(&lp->lock);
1270 
1271 	netif_start_queue(dev);
1272 
1273 	return 0;
1274 }
1275 /*
1276 This function checks if there is any transmit  descriptors available to queue more packet.
1277 */
1278 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1279 {
1280 	int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1281 	if (lp->tx_skbuff[tx_index])
1282 		return -1;
1283 	else
1284 		return 0;
1285 
1286 }
1287 /*
1288 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1289 */
1290 
1291 static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1292 				       struct net_device * dev)
1293 {
1294 	struct amd8111e_priv *lp = netdev_priv(dev);
1295 	int tx_index;
1296 	unsigned long flags;
1297 
1298 	spin_lock_irqsave(&lp->lock, flags);
1299 
1300 	tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1301 
1302 	lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1303 
1304 	lp->tx_skbuff[tx_index] = skb;
1305 	lp->tx_ring[tx_index].tx_flags = 0;
1306 
1307 #if AMD8111E_VLAN_TAG_USED
1308 	if (vlan_tx_tag_present(skb)) {
1309 		lp->tx_ring[tx_index].tag_ctrl_cmd |=
1310 				cpu_to_le16(TCC_VLAN_INSERT);
1311 		lp->tx_ring[tx_index].tag_ctrl_info =
1312 				cpu_to_le16(vlan_tx_tag_get(skb));
1313 
1314 	}
1315 #endif
1316 	lp->tx_dma_addr[tx_index] =
1317 	    pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1318 	lp->tx_ring[tx_index].buff_phy_addr =
1319 	    cpu_to_le32(lp->tx_dma_addr[tx_index]);
1320 
1321 	/*  Set FCS and LTINT bits */
1322 	wmb();
1323 	lp->tx_ring[tx_index].tx_flags |=
1324 	    cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1325 
1326 	lp->tx_idx++;
1327 
1328 	/* Trigger an immediate send poll. */
1329 	writel( VAL1 | TDMD0, lp->mmio + CMD0);
1330 	writel( VAL2 | RDMD0,lp->mmio + CMD0);
1331 
1332 	if(amd8111e_tx_queue_avail(lp) < 0){
1333 		netif_stop_queue(dev);
1334 	}
1335 	spin_unlock_irqrestore(&lp->lock, flags);
1336 	return NETDEV_TX_OK;
1337 }
1338 /*
1339 This function returns all the memory mapped registers of the device.
1340 */
1341 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1342 {
1343 	void __iomem *mmio = lp->mmio;
1344 	/* Read only necessary registers */
1345 	buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1346 	buf[1] = readl(mmio + XMT_RING_LEN0);
1347 	buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1348 	buf[3] = readl(mmio + RCV_RING_LEN0);
1349 	buf[4] = readl(mmio + CMD0);
1350 	buf[5] = readl(mmio + CMD2);
1351 	buf[6] = readl(mmio + CMD3);
1352 	buf[7] = readl(mmio + CMD7);
1353 	buf[8] = readl(mmio + INT0);
1354 	buf[9] = readl(mmio + INTEN0);
1355 	buf[10] = readl(mmio + LADRF);
1356 	buf[11] = readl(mmio + LADRF+4);
1357 	buf[12] = readl(mmio + STAT0);
1358 }
1359 
1360 
1361 /*
1362 This function sets promiscuos mode, all-multi mode or the multicast address
1363 list to the device.
1364 */
1365 static void amd8111e_set_multicast_list(struct net_device *dev)
1366 {
1367 	struct netdev_hw_addr *ha;
1368 	struct amd8111e_priv *lp = netdev_priv(dev);
1369 	u32 mc_filter[2] ;
1370 	int bit_num;
1371 
1372 	if(dev->flags & IFF_PROMISC){
1373 		writel( VAL2 | PROM, lp->mmio + CMD2);
1374 		return;
1375 	}
1376 	else
1377 		writel( PROM, lp->mmio + CMD2);
1378 	if (dev->flags & IFF_ALLMULTI ||
1379 	    netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1380 		/* get all multicast packet */
1381 		mc_filter[1] = mc_filter[0] = 0xffffffff;
1382 		lp->options |= OPTION_MULTICAST_ENABLE;
1383 		amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1384 		return;
1385 	}
1386 	if (netdev_mc_empty(dev)) {
1387 		/* get only own packets */
1388 		mc_filter[1] = mc_filter[0] = 0;
1389 		lp->options &= ~OPTION_MULTICAST_ENABLE;
1390 		amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1391 		/* disable promiscuous mode */
1392 		writel(PROM, lp->mmio + CMD2);
1393 		return;
1394 	}
1395 	/* load all the multicast addresses in the logic filter */
1396 	lp->options |= OPTION_MULTICAST_ENABLE;
1397 	mc_filter[1] = mc_filter[0] = 0;
1398 	netdev_for_each_mc_addr(ha, dev) {
1399 		bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1400 		mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1401 	}
1402 	amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1403 
1404 	/* To eliminate PCI posting bug */
1405 	readl(lp->mmio + CMD2);
1406 
1407 }
1408 
1409 static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1410 {
1411 	struct amd8111e_priv *lp = netdev_priv(dev);
1412 	struct pci_dev *pci_dev = lp->pci_dev;
1413 	strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
1414 	strlcpy(info->version, MODULE_VERS, sizeof(info->version));
1415 	snprintf(info->fw_version, sizeof(info->fw_version),
1416 		"%u", chip_version);
1417 	strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1418 }
1419 
1420 static int amd8111e_get_regs_len(struct net_device *dev)
1421 {
1422 	return AMD8111E_REG_DUMP_LEN;
1423 }
1424 
1425 static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1426 {
1427 	struct amd8111e_priv *lp = netdev_priv(dev);
1428 	regs->version = 0;
1429 	amd8111e_read_regs(lp, buf);
1430 }
1431 
1432 static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1433 {
1434 	struct amd8111e_priv *lp = netdev_priv(dev);
1435 	spin_lock_irq(&lp->lock);
1436 	mii_ethtool_gset(&lp->mii_if, ecmd);
1437 	spin_unlock_irq(&lp->lock);
1438 	return 0;
1439 }
1440 
1441 static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1442 {
1443 	struct amd8111e_priv *lp = netdev_priv(dev);
1444 	int res;
1445 	spin_lock_irq(&lp->lock);
1446 	res = mii_ethtool_sset(&lp->mii_if, ecmd);
1447 	spin_unlock_irq(&lp->lock);
1448 	return res;
1449 }
1450 
1451 static int amd8111e_nway_reset(struct net_device *dev)
1452 {
1453 	struct amd8111e_priv *lp = netdev_priv(dev);
1454 	return mii_nway_restart(&lp->mii_if);
1455 }
1456 
1457 static u32 amd8111e_get_link(struct net_device *dev)
1458 {
1459 	struct amd8111e_priv *lp = netdev_priv(dev);
1460 	return mii_link_ok(&lp->mii_if);
1461 }
1462 
1463 static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1464 {
1465 	struct amd8111e_priv *lp = netdev_priv(dev);
1466 	wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1467 	if (lp->options & OPTION_WOL_ENABLE)
1468 		wol_info->wolopts = WAKE_MAGIC;
1469 }
1470 
1471 static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1472 {
1473 	struct amd8111e_priv *lp = netdev_priv(dev);
1474 	if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1475 		return -EINVAL;
1476 	spin_lock_irq(&lp->lock);
1477 	if (wol_info->wolopts & WAKE_MAGIC)
1478 		lp->options |=
1479 			(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1480 	else if(wol_info->wolopts & WAKE_PHY)
1481 		lp->options |=
1482 			(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1483 	else
1484 		lp->options &= ~OPTION_WOL_ENABLE;
1485 	spin_unlock_irq(&lp->lock);
1486 	return 0;
1487 }
1488 
1489 static const struct ethtool_ops ops = {
1490 	.get_drvinfo = amd8111e_get_drvinfo,
1491 	.get_regs_len = amd8111e_get_regs_len,
1492 	.get_regs = amd8111e_get_regs,
1493 	.get_settings = amd8111e_get_settings,
1494 	.set_settings = amd8111e_set_settings,
1495 	.nway_reset = amd8111e_nway_reset,
1496 	.get_link = amd8111e_get_link,
1497 	.get_wol = amd8111e_get_wol,
1498 	.set_wol = amd8111e_set_wol,
1499 };
1500 
1501 /*
1502 This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1503 */
1504 
1505 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1506 {
1507 	struct mii_ioctl_data *data = if_mii(ifr);
1508 	struct amd8111e_priv *lp = netdev_priv(dev);
1509 	int err;
1510 	u32 mii_regval;
1511 
1512 	switch(cmd) {
1513 	case SIOCGMIIPHY:
1514 		data->phy_id = lp->ext_phy_addr;
1515 
1516 	/* fallthru */
1517 	case SIOCGMIIREG:
1518 
1519 		spin_lock_irq(&lp->lock);
1520 		err = amd8111e_read_phy(lp, data->phy_id,
1521 			data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1522 		spin_unlock_irq(&lp->lock);
1523 
1524 		data->val_out = mii_regval;
1525 		return err;
1526 
1527 	case SIOCSMIIREG:
1528 
1529 		spin_lock_irq(&lp->lock);
1530 		err = amd8111e_write_phy(lp, data->phy_id,
1531 			data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1532 		spin_unlock_irq(&lp->lock);
1533 
1534 		return err;
1535 
1536 	default:
1537 		/* do nothing */
1538 		break;
1539 	}
1540 	return -EOPNOTSUPP;
1541 }
1542 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1543 {
1544 	struct amd8111e_priv *lp = netdev_priv(dev);
1545 	int i;
1546 	struct sockaddr *addr = p;
1547 
1548 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1549 	spin_lock_irq(&lp->lock);
1550 	/* Setting the MAC address to the device */
1551 	for (i = 0; i < ETH_ALEN; i++)
1552 		writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1553 
1554 	spin_unlock_irq(&lp->lock);
1555 
1556 	return 0;
1557 }
1558 
1559 /*
1560 This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
1561 */
1562 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1563 {
1564 	struct amd8111e_priv *lp = netdev_priv(dev);
1565 	int err;
1566 
1567 	if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1568 		return -EINVAL;
1569 
1570 	if (!netif_running(dev)) {
1571 		/* new_mtu will be used
1572 		   when device starts netxt time */
1573 		dev->mtu = new_mtu;
1574 		return 0;
1575 	}
1576 
1577 	spin_lock_irq(&lp->lock);
1578 
1579         /* stop the chip */
1580 	writel(RUN, lp->mmio + CMD0);
1581 
1582 	dev->mtu = new_mtu;
1583 
1584 	err = amd8111e_restart(dev);
1585 	spin_unlock_irq(&lp->lock);
1586 	if(!err)
1587 		netif_start_queue(dev);
1588 	return err;
1589 }
1590 
1591 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1592 {
1593 	writel( VAL1|MPPLBA, lp->mmio + CMD3);
1594 	writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1595 
1596 	/* To eliminate PCI posting bug */
1597 	readl(lp->mmio + CMD7);
1598 	return 0;
1599 }
1600 
1601 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1602 {
1603 
1604 	/* Adapter is already stoped/suspended/interrupt-disabled */
1605 	writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1606 
1607 	/* To eliminate PCI posting bug */
1608 	readl(lp->mmio + CMD7);
1609 	return 0;
1610 }
1611 
1612 /*
1613  * This function is called when a packet transmission fails to complete
1614  * within a reasonable period, on the assumption that an interrupt have
1615  * failed or the interface is locked up. This function will reinitialize
1616  * the hardware.
1617  */
1618 static void amd8111e_tx_timeout(struct net_device *dev)
1619 {
1620 	struct amd8111e_priv* lp = netdev_priv(dev);
1621 	int err;
1622 
1623 	printk(KERN_ERR "%s: transmit timed out, resetting\n",
1624 	 					      dev->name);
1625 	spin_lock_irq(&lp->lock);
1626 	err = amd8111e_restart(dev);
1627 	spin_unlock_irq(&lp->lock);
1628 	if(!err)
1629 		netif_wake_queue(dev);
1630 }
1631 static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1632 {
1633 	struct net_device *dev = pci_get_drvdata(pci_dev);
1634 	struct amd8111e_priv *lp = netdev_priv(dev);
1635 
1636 	if (!netif_running(dev))
1637 		return 0;
1638 
1639 	/* disable the interrupt */
1640 	spin_lock_irq(&lp->lock);
1641 	amd8111e_disable_interrupt(lp);
1642 	spin_unlock_irq(&lp->lock);
1643 
1644 	netif_device_detach(dev);
1645 
1646 	/* stop chip */
1647 	spin_lock_irq(&lp->lock);
1648 	if(lp->options & OPTION_DYN_IPG_ENABLE)
1649 		del_timer_sync(&lp->ipg_data.ipg_timer);
1650 	amd8111e_stop_chip(lp);
1651 	spin_unlock_irq(&lp->lock);
1652 
1653 	if(lp->options & OPTION_WOL_ENABLE){
1654 		 /* enable wol */
1655 		if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1656 			amd8111e_enable_magicpkt(lp);
1657 		if(lp->options & OPTION_WAKE_PHY_ENABLE)
1658 			amd8111e_enable_link_change(lp);
1659 
1660 		pci_enable_wake(pci_dev, PCI_D3hot, 1);
1661 		pci_enable_wake(pci_dev, PCI_D3cold, 1);
1662 
1663 	}
1664 	else{
1665 		pci_enable_wake(pci_dev, PCI_D3hot, 0);
1666 		pci_enable_wake(pci_dev, PCI_D3cold, 0);
1667 	}
1668 
1669 	pci_save_state(pci_dev);
1670 	pci_set_power_state(pci_dev, PCI_D3hot);
1671 
1672 	return 0;
1673 }
1674 static int amd8111e_resume(struct pci_dev *pci_dev)
1675 {
1676 	struct net_device *dev = pci_get_drvdata(pci_dev);
1677 	struct amd8111e_priv *lp = netdev_priv(dev);
1678 
1679 	if (!netif_running(dev))
1680 		return 0;
1681 
1682 	pci_set_power_state(pci_dev, PCI_D0);
1683 	pci_restore_state(pci_dev);
1684 
1685 	pci_enable_wake(pci_dev, PCI_D3hot, 0);
1686 	pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
1687 
1688 	netif_device_attach(dev);
1689 
1690 	spin_lock_irq(&lp->lock);
1691 	amd8111e_restart(dev);
1692 	/* Restart ipg timer */
1693 	if(lp->options & OPTION_DYN_IPG_ENABLE)
1694 		mod_timer(&lp->ipg_data.ipg_timer,
1695 				jiffies + IPG_CONVERGE_JIFFIES);
1696 	spin_unlock_irq(&lp->lock);
1697 
1698 	return 0;
1699 }
1700 
1701 
1702 static void amd8111e_remove_one(struct pci_dev *pdev)
1703 {
1704 	struct net_device *dev = pci_get_drvdata(pdev);
1705 	if (dev) {
1706 		unregister_netdev(dev);
1707 		iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1708 		free_netdev(dev);
1709 		pci_release_regions(pdev);
1710 		pci_disable_device(pdev);
1711 	}
1712 }
1713 static void amd8111e_config_ipg(struct net_device* dev)
1714 {
1715 	struct amd8111e_priv *lp = netdev_priv(dev);
1716 	struct ipg_info* ipg_data = &lp->ipg_data;
1717 	void __iomem *mmio = lp->mmio;
1718 	unsigned int prev_col_cnt = ipg_data->col_cnt;
1719 	unsigned int total_col_cnt;
1720 	unsigned int tmp_ipg;
1721 
1722 	if(lp->link_config.duplex == DUPLEX_FULL){
1723 		ipg_data->ipg = DEFAULT_IPG;
1724 		return;
1725 	}
1726 
1727 	if(ipg_data->ipg_state == SSTATE){
1728 
1729 		if(ipg_data->timer_tick == IPG_STABLE_TIME){
1730 
1731 			ipg_data->timer_tick = 0;
1732 			ipg_data->ipg = MIN_IPG - IPG_STEP;
1733 			ipg_data->current_ipg = MIN_IPG;
1734 			ipg_data->diff_col_cnt = 0xFFFFFFFF;
1735 			ipg_data->ipg_state = CSTATE;
1736 		}
1737 		else
1738 			ipg_data->timer_tick++;
1739 	}
1740 
1741 	if(ipg_data->ipg_state == CSTATE){
1742 
1743 		/* Get the current collision count */
1744 
1745 		total_col_cnt = ipg_data->col_cnt =
1746 				amd8111e_read_mib(mmio, xmt_collisions);
1747 
1748 		if ((total_col_cnt - prev_col_cnt) <
1749 				(ipg_data->diff_col_cnt)){
1750 
1751 			ipg_data->diff_col_cnt =
1752 				total_col_cnt - prev_col_cnt ;
1753 
1754 			ipg_data->ipg = ipg_data->current_ipg;
1755 		}
1756 
1757 		ipg_data->current_ipg += IPG_STEP;
1758 
1759 		if (ipg_data->current_ipg <= MAX_IPG)
1760 			tmp_ipg = ipg_data->current_ipg;
1761 		else{
1762 			tmp_ipg = ipg_data->ipg;
1763 			ipg_data->ipg_state = SSTATE;
1764 		}
1765 		writew((u32)tmp_ipg, mmio + IPG);
1766 		writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1767 	}
1768 	 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1769 	return;
1770 
1771 }
1772 
1773 static void amd8111e_probe_ext_phy(struct net_device *dev)
1774 {
1775 	struct amd8111e_priv *lp = netdev_priv(dev);
1776 	int i;
1777 
1778 	for (i = 0x1e; i >= 0; i--) {
1779 		u32 id1, id2;
1780 
1781 		if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1782 			continue;
1783 		if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1784 			continue;
1785 		lp->ext_phy_id = (id1 << 16) | id2;
1786 		lp->ext_phy_addr = i;
1787 		return;
1788 	}
1789 	lp->ext_phy_id = 0;
1790 	lp->ext_phy_addr = 1;
1791 }
1792 
1793 static const struct net_device_ops amd8111e_netdev_ops = {
1794 	.ndo_open		= amd8111e_open,
1795 	.ndo_stop		= amd8111e_close,
1796 	.ndo_start_xmit		= amd8111e_start_xmit,
1797 	.ndo_tx_timeout		= amd8111e_tx_timeout,
1798 	.ndo_get_stats		= amd8111e_get_stats,
1799 	.ndo_set_rx_mode	= amd8111e_set_multicast_list,
1800 	.ndo_validate_addr	= eth_validate_addr,
1801 	.ndo_set_mac_address	= amd8111e_set_mac_address,
1802 	.ndo_do_ioctl		= amd8111e_ioctl,
1803 	.ndo_change_mtu		= amd8111e_change_mtu,
1804 #ifdef CONFIG_NET_POLL_CONTROLLER
1805 	.ndo_poll_controller	 = amd8111e_poll,
1806 #endif
1807 };
1808 
1809 static int amd8111e_probe_one(struct pci_dev *pdev,
1810 				  const struct pci_device_id *ent)
1811 {
1812 	int err, i;
1813 	unsigned long reg_addr,reg_len;
1814 	struct amd8111e_priv* lp;
1815 	struct net_device* dev;
1816 
1817 	err = pci_enable_device(pdev);
1818 	if(err){
1819 		printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
1820 			"exiting.\n");
1821 		return err;
1822 	}
1823 
1824 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1825 		printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
1826 		       "exiting.\n");
1827 		err = -ENODEV;
1828 		goto err_disable_pdev;
1829 	}
1830 
1831 	err = pci_request_regions(pdev, MODULE_NAME);
1832 	if(err){
1833 		printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1834 		       "exiting.\n");
1835 		goto err_disable_pdev;
1836 	}
1837 
1838 	pci_set_master(pdev);
1839 
1840 	/* Find power-management capability. */
1841 	if (!pdev->pm_cap) {
1842 		printk(KERN_ERR "amd8111e: No Power Management capability, "
1843 		       "exiting.\n");
1844 		err = -ENODEV;
1845 		goto err_free_reg;
1846 	}
1847 
1848 	/* Initialize DMA */
1849 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1850 		printk(KERN_ERR "amd8111e: DMA not supported,"
1851 			"exiting.\n");
1852 		err = -ENODEV;
1853 		goto err_free_reg;
1854 	}
1855 
1856 	reg_addr = pci_resource_start(pdev, 0);
1857 	reg_len = pci_resource_len(pdev, 0);
1858 
1859 	dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1860 	if (!dev) {
1861 		err = -ENOMEM;
1862 		goto err_free_reg;
1863 	}
1864 
1865 	SET_NETDEV_DEV(dev, &pdev->dev);
1866 
1867 #if AMD8111E_VLAN_TAG_USED
1868 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
1869 #endif
1870 
1871 	lp = netdev_priv(dev);
1872 	lp->pci_dev = pdev;
1873 	lp->amd8111e_net_dev = dev;
1874 	lp->pm_cap = pdev->pm_cap;
1875 
1876 	spin_lock_init(&lp->lock);
1877 
1878 	lp->mmio = ioremap(reg_addr, reg_len);
1879 	if (!lp->mmio) {
1880 		printk(KERN_ERR "amd8111e: Cannot map device registers, "
1881 		       "exiting\n");
1882 		err = -ENOMEM;
1883 		goto err_free_dev;
1884 	}
1885 
1886 	/* Initializing MAC address */
1887 	for (i = 0; i < ETH_ALEN; i++)
1888 		dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1889 
1890 	/* Setting user defined parametrs */
1891 	lp->ext_phy_option = speed_duplex[card_idx];
1892 	if(coalesce[card_idx])
1893 		lp->options |= OPTION_INTR_COAL_ENABLE;
1894 	if(dynamic_ipg[card_idx++])
1895 		lp->options |= OPTION_DYN_IPG_ENABLE;
1896 
1897 
1898 	/* Initialize driver entry points */
1899 	dev->netdev_ops = &amd8111e_netdev_ops;
1900 	SET_ETHTOOL_OPS(dev, &ops);
1901 	dev->irq =pdev->irq;
1902 	dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1903 	netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1904 
1905 #if AMD8111E_VLAN_TAG_USED
1906 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1907 #endif
1908 	/* Probe the external PHY */
1909 	amd8111e_probe_ext_phy(dev);
1910 
1911 	/* setting mii default values */
1912 	lp->mii_if.dev = dev;
1913 	lp->mii_if.mdio_read = amd8111e_mdio_read;
1914 	lp->mii_if.mdio_write = amd8111e_mdio_write;
1915 	lp->mii_if.phy_id = lp->ext_phy_addr;
1916 
1917 	/* Set receive buffer length and set jumbo option*/
1918 	amd8111e_set_rx_buff_len(dev);
1919 
1920 
1921 	err = register_netdev(dev);
1922 	if (err) {
1923 		printk(KERN_ERR "amd8111e: Cannot register net device, "
1924 		       "exiting.\n");
1925 		goto err_iounmap;
1926 	}
1927 
1928 	pci_set_drvdata(pdev, dev);
1929 
1930 	/* Initialize software ipg timer */
1931 	if(lp->options & OPTION_DYN_IPG_ENABLE){
1932 		init_timer(&lp->ipg_data.ipg_timer);
1933 		lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1934 		lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1935 		lp->ipg_data.ipg_timer.expires = jiffies +
1936 						 IPG_CONVERGE_JIFFIES;
1937 		lp->ipg_data.ipg = DEFAULT_IPG;
1938 		lp->ipg_data.ipg_state = CSTATE;
1939 	}
1940 
1941 	/*  display driver and device information */
1942 
1943     	chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1944 	printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1945 	       dev->name,MODULE_VERS);
1946 	printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1947 	       dev->name, chip_version, dev->dev_addr);
1948 	if (lp->ext_phy_id)
1949 		printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1950 		       dev->name, lp->ext_phy_id, lp->ext_phy_addr);
1951 	else
1952 		printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
1953 		       dev->name);
1954     	return 0;
1955 err_iounmap:
1956 	iounmap(lp->mmio);
1957 
1958 err_free_dev:
1959 	free_netdev(dev);
1960 
1961 err_free_reg:
1962 	pci_release_regions(pdev);
1963 
1964 err_disable_pdev:
1965 	pci_disable_device(pdev);
1966 	return err;
1967 
1968 }
1969 
1970 static struct pci_driver amd8111e_driver = {
1971 	.name   	= MODULE_NAME,
1972 	.id_table	= amd8111e_pci_tbl,
1973 	.probe		= amd8111e_probe_one,
1974 	.remove		= amd8111e_remove_one,
1975 	.suspend	= amd8111e_suspend,
1976 	.resume		= amd8111e_resume
1977 };
1978 
1979 module_pci_driver(amd8111e_driver);
1980