11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2b955f6caSJeff Kirsher 
3b955f6caSJeff Kirsher /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
4b955f6caSJeff Kirsher  * Copyright (C) 2004 Advanced Micro Devices
5b955f6caSJeff Kirsher  *
6b955f6caSJeff Kirsher  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7b955f6caSJeff Kirsher  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8b955f6caSJeff Kirsher  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9b955f6caSJeff Kirsher  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10b955f6caSJeff Kirsher  * Copyright 1993 United States Government as represented by the
11b955f6caSJeff Kirsher  *	Director, National Security Agency.[ pcnet32.c ]
12b955f6caSJeff Kirsher  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13b955f6caSJeff Kirsher  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14b955f6caSJeff Kirsher  *
15b955f6caSJeff Kirsher 
16b955f6caSJeff Kirsher Module Name:
17b955f6caSJeff Kirsher 
18b955f6caSJeff Kirsher 	amd8111e.c
19b955f6caSJeff Kirsher 
20b955f6caSJeff Kirsher Abstract:
21b955f6caSJeff Kirsher 
22b955f6caSJeff Kirsher 	 AMD8111 based 10/100 Ethernet Controller Driver.
23b955f6caSJeff Kirsher 
24b955f6caSJeff Kirsher Environment:
25b955f6caSJeff Kirsher 
26b955f6caSJeff Kirsher 	Kernel Mode
27b955f6caSJeff Kirsher 
28b955f6caSJeff Kirsher Revision History:
29b955f6caSJeff Kirsher 	3.0.0
30b955f6caSJeff Kirsher 	   Initial Revision.
31b955f6caSJeff Kirsher 	3.0.1
32b955f6caSJeff Kirsher 	 1. Dynamic interrupt coalescing.
33b955f6caSJeff Kirsher 	 2. Removed prev_stats.
34b955f6caSJeff Kirsher 	 3. MII support.
35b955f6caSJeff Kirsher 	 4. Dynamic IPG support
36b955f6caSJeff Kirsher 	3.0.2  05/29/2003
37b955f6caSJeff Kirsher 	 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
38b955f6caSJeff Kirsher 	 2. Bug fix: Fixed VLAN support failure.
39b955f6caSJeff Kirsher 	 3. Bug fix: Fixed receive interrupt coalescing bug.
40b955f6caSJeff Kirsher 	 4. Dynamic IPG support is disabled by default.
41b955f6caSJeff Kirsher 	3.0.3 06/05/2003
42b955f6caSJeff Kirsher 	 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
43b955f6caSJeff Kirsher 	3.0.4 12/09/2003
44b955f6caSJeff Kirsher 	 1. Added set_mac_address routine for bonding driver support.
45b955f6caSJeff Kirsher 	 2. Tested the driver for bonding support
46b955f6caSJeff Kirsher 	 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
47b955f6caSJeff Kirsher 	    indicated to the h/w.
48b955f6caSJeff Kirsher 	 4. Modified amd8111e_rx() routine to receive all the received packets
49b955f6caSJeff Kirsher 	    in the first interrupt.
50b955f6caSJeff Kirsher 	 5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
51b955f6caSJeff Kirsher 	3.0.5 03/22/2004
52b955f6caSJeff Kirsher 	 1. Added NAPI support
53b955f6caSJeff Kirsher 
54b955f6caSJeff Kirsher */
55b955f6caSJeff Kirsher 
56b955f6caSJeff Kirsher 
57b955f6caSJeff Kirsher #include <linux/module.h>
58b955f6caSJeff Kirsher #include <linux/kernel.h>
59b955f6caSJeff Kirsher #include <linux/types.h>
60b955f6caSJeff Kirsher #include <linux/compiler.h>
61b955f6caSJeff Kirsher #include <linux/delay.h>
62b955f6caSJeff Kirsher #include <linux/interrupt.h>
63b955f6caSJeff Kirsher #include <linux/ioport.h>
64b955f6caSJeff Kirsher #include <linux/pci.h>
65b955f6caSJeff Kirsher #include <linux/netdevice.h>
66b955f6caSJeff Kirsher #include <linux/etherdevice.h>
67b955f6caSJeff Kirsher #include <linux/skbuff.h>
68b955f6caSJeff Kirsher #include <linux/ethtool.h>
69b955f6caSJeff Kirsher #include <linux/mii.h>
70b955f6caSJeff Kirsher #include <linux/if_vlan.h>
71b955f6caSJeff Kirsher #include <linux/ctype.h>
72b955f6caSJeff Kirsher #include <linux/crc32.h>
73b955f6caSJeff Kirsher #include <linux/dma-mapping.h>
74b955f6caSJeff Kirsher 
75b955f6caSJeff Kirsher #include <asm/io.h>
76b955f6caSJeff Kirsher #include <asm/byteorder.h>
777c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
78b955f6caSJeff Kirsher 
79941992d2SJavier Martinez Canillas #if IS_ENABLED(CONFIG_VLAN_8021Q)
80b955f6caSJeff Kirsher #define AMD8111E_VLAN_TAG_USED 1
81b955f6caSJeff Kirsher #else
82b955f6caSJeff Kirsher #define AMD8111E_VLAN_TAG_USED 0
83b955f6caSJeff Kirsher #endif
84b955f6caSJeff Kirsher 
85b955f6caSJeff Kirsher #include "amd8111e.h"
86b955f6caSJeff Kirsher #define MODULE_NAME	"amd8111e"
87b955f6caSJeff Kirsher MODULE_AUTHOR("Advanced Micro Devices, Inc.");
887f4d2537SLeon Romanovsky MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
89b955f6caSJeff Kirsher MODULE_LICENSE("GPL");
90b955f6caSJeff Kirsher module_param_array(speed_duplex, int, NULL, 0);
91b955f6caSJeff Kirsher MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
92b955f6caSJeff Kirsher module_param_array(coalesce, bool, NULL, 0);
93b955f6caSJeff Kirsher MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
94b955f6caSJeff Kirsher module_param_array(dynamic_ipg, bool, NULL, 0);
95b955f6caSJeff Kirsher MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
96b955f6caSJeff Kirsher 
9713a4fa43SVarka Bhadram /* This function will read the PHY registers. */
9846c73eccSVarka Bhadram static int amd8111e_read_phy(struct amd8111e_priv *lp,
9946c73eccSVarka Bhadram 			     int phy_id, int reg, u32 *val)
100b955f6caSJeff Kirsher {
101b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
102b955f6caSJeff Kirsher 	unsigned int reg_val;
103b955f6caSJeff Kirsher 	unsigned int repeat = REPEAT_CNT;
104b955f6caSJeff Kirsher 
105b955f6caSJeff Kirsher 	reg_val = readl(mmio + PHY_ACCESS);
106b955f6caSJeff Kirsher 	while (reg_val & PHY_CMD_ACTIVE)
107b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
108b955f6caSJeff Kirsher 
109b955f6caSJeff Kirsher 	writel(PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
110b955f6caSJeff Kirsher 			   ((reg & 0x1f) << 16), mmio + PHY_ACCESS);
111b955f6caSJeff Kirsher 	do {
112b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
113b955f6caSJeff Kirsher 		udelay(30);  /* It takes 30 us to read/write data */
114b955f6caSJeff Kirsher 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
115b955f6caSJeff Kirsher 	if (reg_val & PHY_RD_ERR)
116b955f6caSJeff Kirsher 		goto err_phy_read;
117b955f6caSJeff Kirsher 
118b955f6caSJeff Kirsher 	*val = reg_val & 0xffff;
119b955f6caSJeff Kirsher 	return 0;
120b955f6caSJeff Kirsher err_phy_read:
121b955f6caSJeff Kirsher 	*val = 0;
122b955f6caSJeff Kirsher 	return -EINVAL;
123b955f6caSJeff Kirsher 
124b955f6caSJeff Kirsher }
125b955f6caSJeff Kirsher 
12613a4fa43SVarka Bhadram /* This function will write into PHY registers. */
12746c73eccSVarka Bhadram static int amd8111e_write_phy(struct amd8111e_priv *lp,
12846c73eccSVarka Bhadram 			      int phy_id, int reg, u32 val)
129b955f6caSJeff Kirsher {
130b955f6caSJeff Kirsher 	unsigned int repeat = REPEAT_CNT;
131b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
132b955f6caSJeff Kirsher 	unsigned int reg_val;
133b955f6caSJeff Kirsher 
134b955f6caSJeff Kirsher 	reg_val = readl(mmio + PHY_ACCESS);
135b955f6caSJeff Kirsher 	while (reg_val & PHY_CMD_ACTIVE)
136b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
137b955f6caSJeff Kirsher 
138b955f6caSJeff Kirsher 	writel(PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
139b955f6caSJeff Kirsher 			   ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
140b955f6caSJeff Kirsher 
141b955f6caSJeff Kirsher 	do {
142b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
143b955f6caSJeff Kirsher 		udelay(30);  /* It takes 30 us to read/write the data */
144b955f6caSJeff Kirsher 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
145b955f6caSJeff Kirsher 
146b955f6caSJeff Kirsher 	if (reg_val & PHY_RD_ERR)
147b955f6caSJeff Kirsher 		goto err_phy_write;
148b955f6caSJeff Kirsher 
149b955f6caSJeff Kirsher 	return 0;
150b955f6caSJeff Kirsher 
151b955f6caSJeff Kirsher err_phy_write:
152b955f6caSJeff Kirsher 	return -EINVAL;
153b955f6caSJeff Kirsher 
154b955f6caSJeff Kirsher }
15513a4fa43SVarka Bhadram 
15613a4fa43SVarka Bhadram /* This is the mii register read function provided to the mii interface. */
157b955f6caSJeff Kirsher static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
158b955f6caSJeff Kirsher {
159b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
160b955f6caSJeff Kirsher 	unsigned int reg_val;
161b955f6caSJeff Kirsher 
162b955f6caSJeff Kirsher 	amd8111e_read_phy(lp, phy_id, reg_num, &reg_val);
163b955f6caSJeff Kirsher 	return reg_val;
164b955f6caSJeff Kirsher 
165b955f6caSJeff Kirsher }
166b955f6caSJeff Kirsher 
16713a4fa43SVarka Bhadram /* This is the mii register write function provided to the mii interface. */
16846c73eccSVarka Bhadram static void amd8111e_mdio_write(struct net_device *dev,
16946c73eccSVarka Bhadram 				int phy_id, int reg_num, int val)
170b955f6caSJeff Kirsher {
171b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
172b955f6caSJeff Kirsher 
173b955f6caSJeff Kirsher 	amd8111e_write_phy(lp, phy_id, reg_num, val);
174b955f6caSJeff Kirsher }
175b955f6caSJeff Kirsher 
17613a4fa43SVarka Bhadram /* This function will set PHY speed. During initialization sets
17713a4fa43SVarka Bhadram  * the original speed to 100 full
178b955f6caSJeff Kirsher  */
179b955f6caSJeff Kirsher static void amd8111e_set_ext_phy(struct net_device *dev)
180b955f6caSJeff Kirsher {
181b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
182b955f6caSJeff Kirsher 	u32 bmcr, advert, tmp;
183b955f6caSJeff Kirsher 
184b955f6caSJeff Kirsher 	/* Determine mii register values to set the speed */
185b955f6caSJeff Kirsher 	advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
186b955f6caSJeff Kirsher 	tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
187b955f6caSJeff Kirsher 	switch (lp->ext_phy_option) {
188b955f6caSJeff Kirsher 
189b955f6caSJeff Kirsher 		default:
190b955f6caSJeff Kirsher 		case SPEED_AUTONEG: /* advertise all values */
191b955f6caSJeff Kirsher 			tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
192b955f6caSJeff Kirsher 				ADVERTISE_100HALF | ADVERTISE_100FULL);
193b955f6caSJeff Kirsher 			break;
194b955f6caSJeff Kirsher 		case SPEED10_HALF:
195b955f6caSJeff Kirsher 			tmp |= ADVERTISE_10HALF;
196b955f6caSJeff Kirsher 			break;
197b955f6caSJeff Kirsher 		case SPEED10_FULL:
198b955f6caSJeff Kirsher 			tmp |= ADVERTISE_10FULL;
199b955f6caSJeff Kirsher 			break;
200b955f6caSJeff Kirsher 		case SPEED100_HALF:
201b955f6caSJeff Kirsher 			tmp |= ADVERTISE_100HALF;
202b955f6caSJeff Kirsher 			break;
203b955f6caSJeff Kirsher 		case SPEED100_FULL:
204b955f6caSJeff Kirsher 			tmp |= ADVERTISE_100FULL;
205b955f6caSJeff Kirsher 			break;
206b955f6caSJeff Kirsher 	}
207b955f6caSJeff Kirsher 
208b955f6caSJeff Kirsher 	if(advert != tmp)
209b955f6caSJeff Kirsher 		amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
210b955f6caSJeff Kirsher 	/* Restart auto negotiation */
211b955f6caSJeff Kirsher 	bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
212b955f6caSJeff Kirsher 	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
213b955f6caSJeff Kirsher 	amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
214b955f6caSJeff Kirsher 
215b955f6caSJeff Kirsher }
216b955f6caSJeff Kirsher 
21713a4fa43SVarka Bhadram /* This function will unmap skb->data space and will free
21813a4fa43SVarka Bhadram  * all transmit and receive skbuffs.
219b955f6caSJeff Kirsher  */
220b955f6caSJeff Kirsher static int amd8111e_free_skbs(struct net_device *dev)
221b955f6caSJeff Kirsher {
222b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
223b955f6caSJeff Kirsher 	struct sk_buff *rx_skbuff;
224b955f6caSJeff Kirsher 	int i;
225b955f6caSJeff Kirsher 
226b955f6caSJeff Kirsher 	/* Freeing transmit skbs */
227b955f6caSJeff Kirsher 	for (i = 0; i < NUM_TX_BUFFERS; i++) {
228b955f6caSJeff Kirsher 		if (lp->tx_skbuff[i]) {
229428f09c2SChristophe JAILLET 			dma_unmap_single(&lp->pci_dev->dev,
230428f09c2SChristophe JAILLET 					 lp->tx_dma_addr[i],
231428f09c2SChristophe JAILLET 					 lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
232b955f6caSJeff Kirsher 			dev_kfree_skb(lp->tx_skbuff[i]);
233b955f6caSJeff Kirsher 			lp->tx_skbuff[i] = NULL;
234b955f6caSJeff Kirsher 			lp->tx_dma_addr[i] = 0;
235b955f6caSJeff Kirsher 		}
236b955f6caSJeff Kirsher 	}
237b955f6caSJeff Kirsher 	/* Freeing previously allocated receive buffers */
238b955f6caSJeff Kirsher 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
239b955f6caSJeff Kirsher 		rx_skbuff = lp->rx_skbuff[i];
240b955f6caSJeff Kirsher 		if (rx_skbuff != NULL) {
241428f09c2SChristophe JAILLET 			dma_unmap_single(&lp->pci_dev->dev,
242428f09c2SChristophe JAILLET 					 lp->rx_dma_addr[i],
243428f09c2SChristophe JAILLET 					 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
244b955f6caSJeff Kirsher 			dev_kfree_skb(lp->rx_skbuff[i]);
245b955f6caSJeff Kirsher 			lp->rx_skbuff[i] = NULL;
246b955f6caSJeff Kirsher 			lp->rx_dma_addr[i] = 0;
247b955f6caSJeff Kirsher 		}
248b955f6caSJeff Kirsher 	}
249b955f6caSJeff Kirsher 
250b955f6caSJeff Kirsher 	return 0;
251b955f6caSJeff Kirsher }
252b955f6caSJeff Kirsher 
25313a4fa43SVarka Bhadram /* This will set the receive buffer length corresponding
25413a4fa43SVarka Bhadram  * to the mtu size of networkinterface.
255b955f6caSJeff Kirsher  */
256b955f6caSJeff Kirsher static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
257b955f6caSJeff Kirsher {
258b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
259b955f6caSJeff Kirsher 	unsigned int mtu = dev->mtu;
260b955f6caSJeff Kirsher 
261b955f6caSJeff Kirsher 	if (mtu > ETH_DATA_LEN) {
262b955f6caSJeff Kirsher 		/* MTU + ethernet header + FCS
26313a4fa43SVarka Bhadram 		 * + optional VLAN tag + skb reserve space 2
26413a4fa43SVarka Bhadram 		 */
265b955f6caSJeff Kirsher 		lp->rx_buff_len = mtu + ETH_HLEN + 10;
266b955f6caSJeff Kirsher 		lp->options |= OPTION_JUMBO_ENABLE;
267b955f6caSJeff Kirsher 	} else {
268b955f6caSJeff Kirsher 		lp->rx_buff_len = PKT_BUFF_SZ;
269b955f6caSJeff Kirsher 		lp->options &= ~OPTION_JUMBO_ENABLE;
270b955f6caSJeff Kirsher 	}
271b955f6caSJeff Kirsher }
272b955f6caSJeff Kirsher 
27313a4fa43SVarka Bhadram /* This function will free all the previously allocated buffers,
27413a4fa43SVarka Bhadram  * determine new receive buffer length  and will allocate new receive buffers.
27513a4fa43SVarka Bhadram  * This function also allocates and initializes both the transmitter
27613a4fa43SVarka Bhadram  * and receive hardware descriptors.
277b955f6caSJeff Kirsher  */
278b955f6caSJeff Kirsher static int amd8111e_init_ring(struct net_device *dev)
279b955f6caSJeff Kirsher {
280b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
281b955f6caSJeff Kirsher 	int i;
282b955f6caSJeff Kirsher 
283b955f6caSJeff Kirsher 	lp->rx_idx = lp->tx_idx = 0;
284b955f6caSJeff Kirsher 	lp->tx_complete_idx = 0;
285b955f6caSJeff Kirsher 	lp->tx_ring_idx = 0;
286b955f6caSJeff Kirsher 
287b955f6caSJeff Kirsher 
288b955f6caSJeff Kirsher 	if (lp->opened)
289b955f6caSJeff Kirsher 		/* Free previously allocated transmit and receive skbs */
290b955f6caSJeff Kirsher 		amd8111e_free_skbs(dev);
291b955f6caSJeff Kirsher 
292b955f6caSJeff Kirsher 	else {
293b955f6caSJeff Kirsher 		/* allocate the tx and rx descriptors */
294428f09c2SChristophe JAILLET 		lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
295b955f6caSJeff Kirsher 			sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
296428f09c2SChristophe JAILLET 			&lp->tx_ring_dma_addr, GFP_ATOMIC);
297428f09c2SChristophe JAILLET 		if (!lp->tx_ring)
298b955f6caSJeff Kirsher 			goto err_no_mem;
299b955f6caSJeff Kirsher 
300428f09c2SChristophe JAILLET 		lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
301b955f6caSJeff Kirsher 			sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
302428f09c2SChristophe JAILLET 			&lp->rx_ring_dma_addr, GFP_ATOMIC);
303428f09c2SChristophe JAILLET 		if (!lp->rx_ring)
304b955f6caSJeff Kirsher 			goto err_free_tx_ring;
305b955f6caSJeff Kirsher 	}
306428f09c2SChristophe JAILLET 
307b955f6caSJeff Kirsher 	/* Set new receive buff size */
308b955f6caSJeff Kirsher 	amd8111e_set_rx_buff_len(dev);
309b955f6caSJeff Kirsher 
310b955f6caSJeff Kirsher 	/* Allocating receive  skbs */
311b955f6caSJeff Kirsher 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
312b955f6caSJeff Kirsher 
3131d266430SPradeep A Dalvi 		lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
3141d266430SPradeep A Dalvi 		if (!lp->rx_skbuff[i]) {
315b955f6caSJeff Kirsher 			/* Release previos allocated skbs */
316b955f6caSJeff Kirsher 			for (--i; i >= 0; i--)
317b955f6caSJeff Kirsher 				dev_kfree_skb(lp->rx_skbuff[i]);
318b955f6caSJeff Kirsher 			goto err_free_rx_ring;
319b955f6caSJeff Kirsher 		}
320b955f6caSJeff Kirsher 		skb_reserve(lp->rx_skbuff[i], 2);
321b955f6caSJeff Kirsher 	}
322b955f6caSJeff Kirsher         /* Initilaizing receive descriptors */
323b955f6caSJeff Kirsher 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
324428f09c2SChristophe JAILLET 		lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev,
325428f09c2SChristophe JAILLET 						    lp->rx_skbuff[i]->data,
326428f09c2SChristophe JAILLET 						    lp->rx_buff_len - 2,
327428f09c2SChristophe JAILLET 						    DMA_FROM_DEVICE);
328b955f6caSJeff Kirsher 
329b955f6caSJeff Kirsher 		lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
330b955f6caSJeff Kirsher 		lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
331b955f6caSJeff Kirsher 		wmb();
332b955f6caSJeff Kirsher 		lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
333b955f6caSJeff Kirsher 	}
334b955f6caSJeff Kirsher 
335b955f6caSJeff Kirsher 	/* Initializing transmit descriptors */
336b955f6caSJeff Kirsher 	for (i = 0; i < NUM_TX_RING_DR; i++) {
337b955f6caSJeff Kirsher 		lp->tx_ring[i].buff_phy_addr = 0;
338b955f6caSJeff Kirsher 		lp->tx_ring[i].tx_flags = 0;
339b955f6caSJeff Kirsher 		lp->tx_ring[i].buff_count = 0;
340b955f6caSJeff Kirsher 	}
341b955f6caSJeff Kirsher 
342b955f6caSJeff Kirsher 	return 0;
343b955f6caSJeff Kirsher 
344b955f6caSJeff Kirsher err_free_rx_ring:
345b955f6caSJeff Kirsher 
346428f09c2SChristophe JAILLET 	dma_free_coherent(&lp->pci_dev->dev,
347428f09c2SChristophe JAILLET 			  sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
348428f09c2SChristophe JAILLET 			  lp->rx_ring, lp->rx_ring_dma_addr);
349b955f6caSJeff Kirsher 
350b955f6caSJeff Kirsher err_free_tx_ring:
351b955f6caSJeff Kirsher 
352428f09c2SChristophe JAILLET 	dma_free_coherent(&lp->pci_dev->dev,
353428f09c2SChristophe JAILLET 			  sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
354428f09c2SChristophe JAILLET 			  lp->tx_ring, lp->tx_ring_dma_addr);
355b955f6caSJeff Kirsher 
356b955f6caSJeff Kirsher err_no_mem:
357b955f6caSJeff Kirsher 	return -ENOMEM;
358b955f6caSJeff Kirsher }
35913a4fa43SVarka Bhadram 
36013a4fa43SVarka Bhadram /* This function will set the interrupt coalescing according
36113a4fa43SVarka Bhadram  * to the input arguments
36213a4fa43SVarka Bhadram  */
363b955f6caSJeff Kirsher static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
364b955f6caSJeff Kirsher {
365b955f6caSJeff Kirsher 	unsigned int timeout;
366b955f6caSJeff Kirsher 	unsigned int event_count;
367b955f6caSJeff Kirsher 
368b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
369b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
370b955f6caSJeff Kirsher 	struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
371b955f6caSJeff Kirsher 
372b955f6caSJeff Kirsher 
373b955f6caSJeff Kirsher 	switch(cmod)
374b955f6caSJeff Kirsher 	{
375b955f6caSJeff Kirsher 		case RX_INTR_COAL :
376b955f6caSJeff Kirsher 			timeout = coal_conf->rx_timeout;
377b955f6caSJeff Kirsher 			event_count = coal_conf->rx_event_count;
378b955f6caSJeff Kirsher 			if (timeout > MAX_TIMEOUT ||
379b955f6caSJeff Kirsher 			    event_count > MAX_EVENT_COUNT)
380b955f6caSJeff Kirsher 				return -EINVAL;
381b955f6caSJeff Kirsher 
382b955f6caSJeff Kirsher 			timeout = timeout * DELAY_TIMER_CONV;
383b955f6caSJeff Kirsher 			writel(VAL0|STINTEN, mmio+INTEN0);
384ca3fc0aaSYixing Liu 			writel((u32)DLY_INT_A_R0 | (event_count << 16) |
385ca3fc0aaSYixing Liu 				timeout, mmio + DLY_INT_A);
386b955f6caSJeff Kirsher 			break;
387b955f6caSJeff Kirsher 
388b955f6caSJeff Kirsher 		case TX_INTR_COAL:
389b955f6caSJeff Kirsher 			timeout = coal_conf->tx_timeout;
390b955f6caSJeff Kirsher 			event_count = coal_conf->tx_event_count;
391b955f6caSJeff Kirsher 			if (timeout > MAX_TIMEOUT ||
392b955f6caSJeff Kirsher 			    event_count > MAX_EVENT_COUNT)
393b955f6caSJeff Kirsher 				return -EINVAL;
394b955f6caSJeff Kirsher 
395b955f6caSJeff Kirsher 
396b955f6caSJeff Kirsher 			timeout = timeout * DELAY_TIMER_CONV;
397b955f6caSJeff Kirsher 			writel(VAL0 | STINTEN, mmio + INTEN0);
398ca3fc0aaSYixing Liu 			writel((u32)DLY_INT_B_T0 | (event_count << 16) |
399ca3fc0aaSYixing Liu 				timeout, mmio + DLY_INT_B);
400b955f6caSJeff Kirsher 			break;
401b955f6caSJeff Kirsher 
402b955f6caSJeff Kirsher 		case DISABLE_COAL:
403b955f6caSJeff Kirsher 			writel(0, mmio + STVAL);
404b955f6caSJeff Kirsher 			writel(STINTEN, mmio + INTEN0);
405b955f6caSJeff Kirsher 			writel(0, mmio + DLY_INT_B);
406b955f6caSJeff Kirsher 			writel(0, mmio + DLY_INT_A);
407b955f6caSJeff Kirsher 			break;
408b955f6caSJeff Kirsher 		 case ENABLE_COAL:
409b955f6caSJeff Kirsher 		       /* Start the timer */
410b955f6caSJeff Kirsher 			writel((u32)SOFT_TIMER_FREQ, mmio + STVAL); /* 0.5 sec */
411b955f6caSJeff Kirsher 			writel(VAL0 | STINTEN, mmio + INTEN0);
412b955f6caSJeff Kirsher 			break;
413b955f6caSJeff Kirsher 		default:
414b955f6caSJeff Kirsher 			break;
415b955f6caSJeff Kirsher 
416b955f6caSJeff Kirsher    }
417b955f6caSJeff Kirsher 	return 0;
418b955f6caSJeff Kirsher 
419b955f6caSJeff Kirsher }
420b955f6caSJeff Kirsher 
42113a4fa43SVarka Bhadram /* This function initializes the device registers  and starts the device. */
422b955f6caSJeff Kirsher static int amd8111e_restart(struct net_device *dev)
423b955f6caSJeff Kirsher {
424b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
425b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
426b955f6caSJeff Kirsher 	int i, reg_val;
427b955f6caSJeff Kirsher 
428b955f6caSJeff Kirsher 	/* stop the chip */
429b955f6caSJeff Kirsher 	writel(RUN, mmio + CMD0);
430b955f6caSJeff Kirsher 
431b955f6caSJeff Kirsher 	if (amd8111e_init_ring(dev))
432b955f6caSJeff Kirsher 		return -ENOMEM;
433b955f6caSJeff Kirsher 
434b955f6caSJeff Kirsher 	/* enable the port manager and set auto negotiation always */
435b955f6caSJeff Kirsher 	writel((u32)VAL1 | EN_PMGR, mmio + CMD3);
436b955f6caSJeff Kirsher 	writel((u32)XPHYANE | XPHYRST, mmio + CTRL2);
437b955f6caSJeff Kirsher 
438b955f6caSJeff Kirsher 	amd8111e_set_ext_phy(dev);
439b955f6caSJeff Kirsher 
440b955f6caSJeff Kirsher 	/* set control registers */
441b955f6caSJeff Kirsher 	reg_val = readl(mmio + CTRL1);
442b955f6caSJeff Kirsher 	reg_val &= ~XMTSP_MASK;
443b955f6caSJeff Kirsher 	writel(reg_val | XMTSP_128 | CACHE_ALIGN, mmio + CTRL1);
444b955f6caSJeff Kirsher 
445b955f6caSJeff Kirsher 	/* enable interrupt */
446b955f6caSJeff Kirsher 	writel(APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
447b955f6caSJeff Kirsher 		APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
448b955f6caSJeff Kirsher 		SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
449b955f6caSJeff Kirsher 
450b955f6caSJeff Kirsher 	writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
451b955f6caSJeff Kirsher 
452b955f6caSJeff Kirsher 	/* initialize tx and rx ring base addresses */
453b955f6caSJeff Kirsher 	writel((u32)lp->tx_ring_dma_addr, mmio + XMT_RING_BASE_ADDR0);
454b955f6caSJeff Kirsher 	writel((u32)lp->rx_ring_dma_addr, mmio + RCV_RING_BASE_ADDR0);
455b955f6caSJeff Kirsher 
456b955f6caSJeff Kirsher 	writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
457b955f6caSJeff Kirsher 	writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
458b955f6caSJeff Kirsher 
459b955f6caSJeff Kirsher 	/* set default IPG to 96 */
460b955f6caSJeff Kirsher 	writew((u32)DEFAULT_IPG, mmio + IPG);
461b955f6caSJeff Kirsher 	writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
462b955f6caSJeff Kirsher 
463b955f6caSJeff Kirsher 	if (lp->options & OPTION_JUMBO_ENABLE) {
464b955f6caSJeff Kirsher 		writel((u32)VAL2|JUMBO, mmio + CMD3);
465b955f6caSJeff Kirsher 		/* Reset REX_UFLO */
466b955f6caSJeff Kirsher 		writel(REX_UFLO, mmio + CMD2);
467b955f6caSJeff Kirsher 		/* Should not set REX_UFLO for jumbo frames */
468b955f6caSJeff Kirsher 		writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
469b955f6caSJeff Kirsher 	} else {
470b955f6caSJeff Kirsher 		writel(VAL0 | APAD_XMT | REX_RTRY | REX_UFLO, mmio + CMD2);
471b955f6caSJeff Kirsher 		writel((u32)JUMBO, mmio + CMD3);
472b955f6caSJeff Kirsher 	}
473b955f6caSJeff Kirsher 
474b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
475b955f6caSJeff Kirsher 	writel((u32)VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
476b955f6caSJeff Kirsher #endif
477b955f6caSJeff Kirsher 	writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
478b955f6caSJeff Kirsher 
479b955f6caSJeff Kirsher 	/* Setting the MAC address to the device */
480c857ff6eSJoe Perches 	for (i = 0; i < ETH_ALEN; i++)
481b955f6caSJeff Kirsher 		writeb(dev->dev_addr[i], mmio + PADR + i);
482b955f6caSJeff Kirsher 
483b955f6caSJeff Kirsher 	/* Enable interrupt coalesce */
484b955f6caSJeff Kirsher 	if (lp->options & OPTION_INTR_COAL_ENABLE) {
485f7afbaa5SVarka Bhadram 		netdev_info(dev, "Interrupt Coalescing Enabled.\n");
486b955f6caSJeff Kirsher 		amd8111e_set_coalesce(dev, ENABLE_COAL);
487b955f6caSJeff Kirsher 	}
488b955f6caSJeff Kirsher 
489b955f6caSJeff Kirsher 	/* set RUN bit to start the chip */
490b955f6caSJeff Kirsher 	writel(VAL2 | RDMD0, mmio + CMD0);
491b955f6caSJeff Kirsher 	writel(VAL0 | INTREN | RUN, mmio + CMD0);
492b955f6caSJeff Kirsher 
493b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
494b955f6caSJeff Kirsher 	readl(mmio+CMD0);
495b955f6caSJeff Kirsher 	return 0;
496b955f6caSJeff Kirsher }
49713a4fa43SVarka Bhadram 
49813a4fa43SVarka Bhadram /* This function clears necessary the device registers. */
499b955f6caSJeff Kirsher static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
500b955f6caSJeff Kirsher {
501b955f6caSJeff Kirsher 	unsigned int reg_val;
502b955f6caSJeff Kirsher 	unsigned int logic_filter[2] = {0,};
503b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
504b955f6caSJeff Kirsher 
505b955f6caSJeff Kirsher 
506b955f6caSJeff Kirsher 	/* stop the chip */
507b955f6caSJeff Kirsher 	writel(RUN, mmio + CMD0);
508b955f6caSJeff Kirsher 
509b955f6caSJeff Kirsher 	/* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
510b955f6caSJeff Kirsher 	writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
511b955f6caSJeff Kirsher 
512b955f6caSJeff Kirsher 	/* Clear RCV_RING_BASE_ADDR */
513b955f6caSJeff Kirsher 	writel(0, mmio + RCV_RING_BASE_ADDR0);
514b955f6caSJeff Kirsher 
515b955f6caSJeff Kirsher 	/* Clear XMT_RING_BASE_ADDR */
516b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR0);
517b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR1);
518b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR2);
519b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR3);
520b955f6caSJeff Kirsher 
521b955f6caSJeff Kirsher 	/* Clear CMD0  */
522b955f6caSJeff Kirsher 	writel(CMD0_CLEAR, mmio + CMD0);
523b955f6caSJeff Kirsher 
524b955f6caSJeff Kirsher 	/* Clear CMD2 */
525b955f6caSJeff Kirsher 	writel(CMD2_CLEAR, mmio + CMD2);
526b955f6caSJeff Kirsher 
527b955f6caSJeff Kirsher 	/* Clear CMD7 */
528b955f6caSJeff Kirsher 	writel(CMD7_CLEAR, mmio + CMD7);
529b955f6caSJeff Kirsher 
530b955f6caSJeff Kirsher 	/* Clear DLY_INT_A and DLY_INT_B */
531b955f6caSJeff Kirsher 	writel(0x0, mmio + DLY_INT_A);
532b955f6caSJeff Kirsher 	writel(0x0, mmio + DLY_INT_B);
533b955f6caSJeff Kirsher 
534b955f6caSJeff Kirsher 	/* Clear FLOW_CONTROL */
535b955f6caSJeff Kirsher 	writel(0x0, mmio + FLOW_CONTROL);
536b955f6caSJeff Kirsher 
537b955f6caSJeff Kirsher 	/* Clear INT0  write 1 to clear register */
538b955f6caSJeff Kirsher 	reg_val = readl(mmio + INT0);
539b955f6caSJeff Kirsher 	writel(reg_val, mmio + INT0);
540b955f6caSJeff Kirsher 
541b955f6caSJeff Kirsher 	/* Clear STVAL */
542b955f6caSJeff Kirsher 	writel(0x0, mmio + STVAL);
543b955f6caSJeff Kirsher 
544b955f6caSJeff Kirsher 	/* Clear INTEN0 */
545b955f6caSJeff Kirsher 	writel(INTEN0_CLEAR, mmio + INTEN0);
546b955f6caSJeff Kirsher 
547b955f6caSJeff Kirsher 	/* Clear LADRF */
548b955f6caSJeff Kirsher 	writel(0x0, mmio + LADRF);
549b955f6caSJeff Kirsher 
550b955f6caSJeff Kirsher 	/* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
551b955f6caSJeff Kirsher 	writel(0x80010, mmio + SRAM_SIZE);
552b955f6caSJeff Kirsher 
553b955f6caSJeff Kirsher 	/* Clear RCV_RING0_LEN */
554b955f6caSJeff Kirsher 	writel(0x0, mmio + RCV_RING_LEN0);
555b955f6caSJeff Kirsher 
556b955f6caSJeff Kirsher 	/* Clear XMT_RING0/1/2/3_LEN */
557b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN0);
558b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN1);
559b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN2);
560b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN3);
561b955f6caSJeff Kirsher 
562b955f6caSJeff Kirsher 	/* Clear XMT_RING_LIMIT */
563b955f6caSJeff Kirsher 	writel(0x0, mmio + XMT_RING_LIMIT);
564b955f6caSJeff Kirsher 
565b955f6caSJeff Kirsher 	/* Clear MIB */
566b955f6caSJeff Kirsher 	writew(MIB_CLEAR, mmio + MIB_ADDR);
567b955f6caSJeff Kirsher 
568b955f6caSJeff Kirsher 	/* Clear LARF */
569b955f6caSJeff Kirsher 	amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
570b955f6caSJeff Kirsher 
571b955f6caSJeff Kirsher 	/* SRAM_SIZE register */
572b955f6caSJeff Kirsher 	reg_val = readl(mmio + SRAM_SIZE);
573b955f6caSJeff Kirsher 
574b955f6caSJeff Kirsher 	if (lp->options & OPTION_JUMBO_ENABLE)
575b955f6caSJeff Kirsher 		writel(VAL2 | JUMBO, mmio + CMD3);
576b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
577b955f6caSJeff Kirsher 	writel(VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
578b955f6caSJeff Kirsher #endif
579b955f6caSJeff Kirsher 	/* Set default value to CTRL1 Register */
580b955f6caSJeff Kirsher 	writel(CTRL1_DEFAULT, mmio + CTRL1);
581b955f6caSJeff Kirsher 
582b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
583b955f6caSJeff Kirsher 	readl(mmio + CMD2);
584b955f6caSJeff Kirsher 
585b955f6caSJeff Kirsher }
586b955f6caSJeff Kirsher 
58713a4fa43SVarka Bhadram /* This function disables the interrupt and clears all the pending
58813a4fa43SVarka Bhadram  * interrupts in INT0
589b955f6caSJeff Kirsher  */
590b955f6caSJeff Kirsher static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
591b955f6caSJeff Kirsher {
592b955f6caSJeff Kirsher 	u32 intr0;
593b955f6caSJeff Kirsher 
594b955f6caSJeff Kirsher 	/* Disable interrupt */
595b955f6caSJeff Kirsher 	writel(INTREN, lp->mmio + CMD0);
596b955f6caSJeff Kirsher 
597b955f6caSJeff Kirsher 	/* Clear INT0 */
598b955f6caSJeff Kirsher 	intr0 = readl(lp->mmio + INT0);
599b955f6caSJeff Kirsher 	writel(intr0, lp->mmio + INT0);
600b955f6caSJeff Kirsher 
601b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
602b955f6caSJeff Kirsher 	readl(lp->mmio + INT0);
603b955f6caSJeff Kirsher 
604b955f6caSJeff Kirsher }
605b955f6caSJeff Kirsher 
60613a4fa43SVarka Bhadram /* This function stops the chip. */
607b955f6caSJeff Kirsher static void amd8111e_stop_chip(struct amd8111e_priv *lp)
608b955f6caSJeff Kirsher {
609b955f6caSJeff Kirsher 	writel(RUN, lp->mmio + CMD0);
610b955f6caSJeff Kirsher 
611b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
612b955f6caSJeff Kirsher 	readl(lp->mmio + CMD0);
613b955f6caSJeff Kirsher }
614b955f6caSJeff Kirsher 
61513a4fa43SVarka Bhadram /* This function frees the  transmiter and receiver descriptor rings. */
616b955f6caSJeff Kirsher static void amd8111e_free_ring(struct amd8111e_priv *lp)
617b955f6caSJeff Kirsher {
618b955f6caSJeff Kirsher 	/* Free transmit and receive descriptor rings */
619b955f6caSJeff Kirsher 	if (lp->rx_ring) {
620428f09c2SChristophe JAILLET 		dma_free_coherent(&lp->pci_dev->dev,
621b955f6caSJeff Kirsher 				  sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
622b955f6caSJeff Kirsher 				  lp->rx_ring, lp->rx_ring_dma_addr);
623b955f6caSJeff Kirsher 		lp->rx_ring = NULL;
624b955f6caSJeff Kirsher 	}
625b955f6caSJeff Kirsher 
626b955f6caSJeff Kirsher 	if (lp->tx_ring) {
627428f09c2SChristophe JAILLET 		dma_free_coherent(&lp->pci_dev->dev,
628b955f6caSJeff Kirsher 				  sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
629b955f6caSJeff Kirsher 				  lp->tx_ring, lp->tx_ring_dma_addr);
630b955f6caSJeff Kirsher 
631b955f6caSJeff Kirsher 		lp->tx_ring = NULL;
632b955f6caSJeff Kirsher 	}
633b955f6caSJeff Kirsher 
634b955f6caSJeff Kirsher }
635b955f6caSJeff Kirsher 
63613a4fa43SVarka Bhadram /* This function will free all the transmit skbs that are actually
63713a4fa43SVarka Bhadram  * transmitted by the device. It will check the ownership of the
63813a4fa43SVarka Bhadram  * skb before freeing the skb.
639b955f6caSJeff Kirsher  */
640b955f6caSJeff Kirsher static int amd8111e_tx(struct net_device *dev)
641b955f6caSJeff Kirsher {
642b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
643129fbeecSColin Ian King 	int tx_index;
644b955f6caSJeff Kirsher 	int status;
645b955f6caSJeff Kirsher 	/* Complete all the transmit packet */
646b955f6caSJeff Kirsher 	while (lp->tx_complete_idx != lp->tx_idx) {
647b955f6caSJeff Kirsher 		tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
648b955f6caSJeff Kirsher 		status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
649b955f6caSJeff Kirsher 
650b955f6caSJeff Kirsher 		if (status & OWN_BIT)
651b955f6caSJeff Kirsher 			break;	/* It still hasn't been Txed */
652b955f6caSJeff Kirsher 
653b955f6caSJeff Kirsher 		lp->tx_ring[tx_index].buff_phy_addr = 0;
654b955f6caSJeff Kirsher 
655b955f6caSJeff Kirsher 		/* We must free the original skb */
656b955f6caSJeff Kirsher 		if (lp->tx_skbuff[tx_index]) {
657428f09c2SChristophe JAILLET 			dma_unmap_single(&lp->pci_dev->dev,
658428f09c2SChristophe JAILLET 					 lp->tx_dma_addr[tx_index],
659b955f6caSJeff Kirsher 					 lp->tx_skbuff[tx_index]->len,
660428f09c2SChristophe JAILLET 					 DMA_TO_DEVICE);
6613afa73ddSYang Wei 			dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
662b955f6caSJeff Kirsher 			lp->tx_skbuff[tx_index] = NULL;
663b955f6caSJeff Kirsher 			lp->tx_dma_addr[tx_index] = 0;
664b955f6caSJeff Kirsher 		}
665b955f6caSJeff Kirsher 		lp->tx_complete_idx++;
666b955f6caSJeff Kirsher 		/*COAL update tx coalescing parameters */
667b955f6caSJeff Kirsher 		lp->coal_conf.tx_packets++;
668b955f6caSJeff Kirsher 		lp->coal_conf.tx_bytes +=
669b955f6caSJeff Kirsher 			le16_to_cpu(lp->tx_ring[tx_index].buff_count);
670b955f6caSJeff Kirsher 
671b955f6caSJeff Kirsher 		if (netif_queue_stopped(dev) &&
672b955f6caSJeff Kirsher 			lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS + 2) {
673b955f6caSJeff Kirsher 			/* The ring is no longer full, clear tbusy. */
674b955f6caSJeff Kirsher 			/* lp->tx_full = 0; */
675b955f6caSJeff Kirsher 			netif_wake_queue(dev);
676b955f6caSJeff Kirsher 		}
677b955f6caSJeff Kirsher 	}
678b955f6caSJeff Kirsher 	return 0;
679b955f6caSJeff Kirsher }
680b955f6caSJeff Kirsher 
681b955f6caSJeff Kirsher /* This function handles the driver receive operation in polling mode */
682b955f6caSJeff Kirsher static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
683b955f6caSJeff Kirsher {
684b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
685b955f6caSJeff Kirsher 	struct net_device *dev = lp->amd8111e_net_dev;
686b955f6caSJeff Kirsher 	int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
687b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
688b955f6caSJeff Kirsher 	struct sk_buff *skb, *new_skb;
689b955f6caSJeff Kirsher 	int min_pkt_len, status;
690b955f6caSJeff Kirsher 	int num_rx_pkt = 0;
691b955f6caSJeff Kirsher 	short pkt_len;
692b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
693b955f6caSJeff Kirsher 	short vtag;
694b955f6caSJeff Kirsher #endif
695b955f6caSJeff Kirsher 
696c46e9907SEric Dumazet 	while (num_rx_pkt < budget) {
697b955f6caSJeff Kirsher 		status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
698b955f6caSJeff Kirsher 		if (status & OWN_BIT)
699b955f6caSJeff Kirsher 			break;
700b955f6caSJeff Kirsher 
70113a4fa43SVarka Bhadram 		/* There is a tricky error noted by John Murphy,
702b955f6caSJeff Kirsher 		 * <murf@perftech.com> to Russ Nelson: Even with
703b955f6caSJeff Kirsher 		 * full-sized * buffers it's possible for a
704b955f6caSJeff Kirsher 		 * jabber packet to use two buffers, with only
705b955f6caSJeff Kirsher 		 * the last correctly noting the error.
706b955f6caSJeff Kirsher 		 */
707b955f6caSJeff Kirsher 		if (status & ERR_BIT) {
708dbedd44eSJoe Perches 			/* resetting flags */
709b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
710b955f6caSJeff Kirsher 			goto err_next_pkt;
711b955f6caSJeff Kirsher 		}
712b955f6caSJeff Kirsher 		/* check for STP and ENP */
713b955f6caSJeff Kirsher 		if (!((status & STP_BIT) && (status & ENP_BIT))) {
714dbedd44eSJoe Perches 			/* resetting flags */
715b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
716b955f6caSJeff Kirsher 			goto err_next_pkt;
717b955f6caSJeff Kirsher 		}
718b955f6caSJeff Kirsher 		pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
719b955f6caSJeff Kirsher 
720b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
721b955f6caSJeff Kirsher 		vtag = status & TT_MASK;
722b955f6caSJeff Kirsher 		/* MAC will strip vlan tag */
723b955f6caSJeff Kirsher 		if (vtag != 0)
724b955f6caSJeff Kirsher 			min_pkt_len = MIN_PKT_LEN - 4;
725b955f6caSJeff Kirsher 			else
726b955f6caSJeff Kirsher #endif
727b955f6caSJeff Kirsher 			min_pkt_len = MIN_PKT_LEN;
728b955f6caSJeff Kirsher 
729b955f6caSJeff Kirsher 		if (pkt_len < min_pkt_len) {
730b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
731b955f6caSJeff Kirsher 			lp->drv_rx_errors++;
732b955f6caSJeff Kirsher 			goto err_next_pkt;
733b955f6caSJeff Kirsher 		}
7341d266430SPradeep A Dalvi 		new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
7351d266430SPradeep A Dalvi 		if (!new_skb) {
736b955f6caSJeff Kirsher 			/* if allocation fail,
73713a4fa43SVarka Bhadram 			 * ignore that pkt and go to next one
73813a4fa43SVarka Bhadram 			 */
739b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
740b955f6caSJeff Kirsher 			lp->drv_rx_errors++;
741b955f6caSJeff Kirsher 			goto err_next_pkt;
742b955f6caSJeff Kirsher 		}
743b955f6caSJeff Kirsher 
744b955f6caSJeff Kirsher 		skb_reserve(new_skb, 2);
745b955f6caSJeff Kirsher 		skb = lp->rx_skbuff[rx_index];
746428f09c2SChristophe JAILLET 		dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index],
747428f09c2SChristophe JAILLET 				 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
748b955f6caSJeff Kirsher 		skb_put(skb, pkt_len);
749b955f6caSJeff Kirsher 		lp->rx_skbuff[rx_index] = new_skb;
750428f09c2SChristophe JAILLET 		lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev,
751b955f6caSJeff Kirsher 							   new_skb->data,
752b955f6caSJeff Kirsher 							   lp->rx_buff_len - 2,
753428f09c2SChristophe JAILLET 							   DMA_FROM_DEVICE);
754b955f6caSJeff Kirsher 
755b955f6caSJeff Kirsher 		skb->protocol = eth_type_trans(skb, dev);
756b955f6caSJeff Kirsher 
757b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
758b955f6caSJeff Kirsher 		if (vtag == TT_VLAN_TAGGED) {
759b955f6caSJeff Kirsher 			u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
76086a9bad3SPatrick McHardy 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
761b955f6caSJeff Kirsher 		}
762b955f6caSJeff Kirsher #endif
763c46e9907SEric Dumazet 		napi_gro_receive(napi, skb);
764b955f6caSJeff Kirsher 		/* COAL update rx coalescing parameters */
765b955f6caSJeff Kirsher 		lp->coal_conf.rx_packets++;
766b955f6caSJeff Kirsher 		lp->coal_conf.rx_bytes += pkt_len;
767b955f6caSJeff Kirsher 		num_rx_pkt++;
768b955f6caSJeff Kirsher 
769b955f6caSJeff Kirsher err_next_pkt:
770b955f6caSJeff Kirsher 		lp->rx_ring[rx_index].buff_phy_addr
771b955f6caSJeff Kirsher 			= cpu_to_le32(lp->rx_dma_addr[rx_index]);
772b955f6caSJeff Kirsher 		lp->rx_ring[rx_index].buff_count =
773b955f6caSJeff Kirsher 			cpu_to_le16(lp->rx_buff_len-2);
774b955f6caSJeff Kirsher 		wmb();
775b955f6caSJeff Kirsher 		lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
776b955f6caSJeff Kirsher 		rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
777b955f6caSJeff Kirsher 	}
778b955f6caSJeff Kirsher 
779c46e9907SEric Dumazet 	if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
780c46e9907SEric Dumazet 		unsigned long flags;
781b955f6caSJeff Kirsher 
782b955f6caSJeff Kirsher 		/* Receive descriptor is empty now */
783b955f6caSJeff Kirsher 		spin_lock_irqsave(&lp->lock, flags);
784b955f6caSJeff Kirsher 		writel(VAL0|RINTEN0, mmio + INTEN0);
785b955f6caSJeff Kirsher 		writel(VAL2 | RDMD0, mmio + CMD0);
786b955f6caSJeff Kirsher 		spin_unlock_irqrestore(&lp->lock, flags);
787b955f6caSJeff Kirsher 	}
788b955f6caSJeff Kirsher 
789b955f6caSJeff Kirsher 	return num_rx_pkt;
790b955f6caSJeff Kirsher }
791b955f6caSJeff Kirsher 
79213a4fa43SVarka Bhadram /* This function will indicate the link status to the kernel. */
793b955f6caSJeff Kirsher static int amd8111e_link_change(struct net_device *dev)
794b955f6caSJeff Kirsher {
795b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
796b955f6caSJeff Kirsher 	int status0, speed;
797b955f6caSJeff Kirsher 
798b955f6caSJeff Kirsher 	/* read the link change */
799b955f6caSJeff Kirsher 	status0 = readl(lp->mmio + STAT0);
800b955f6caSJeff Kirsher 
801b955f6caSJeff Kirsher 	if (status0 & LINK_STATS) {
802b955f6caSJeff Kirsher 		if (status0 & AUTONEG_COMPLETE)
803b955f6caSJeff Kirsher 			lp->link_config.autoneg = AUTONEG_ENABLE;
804b955f6caSJeff Kirsher 		else
805b955f6caSJeff Kirsher 			lp->link_config.autoneg = AUTONEG_DISABLE;
806b955f6caSJeff Kirsher 
807b955f6caSJeff Kirsher 		if (status0 & FULL_DPLX)
808b955f6caSJeff Kirsher 			lp->link_config.duplex = DUPLEX_FULL;
809b955f6caSJeff Kirsher 		else
810b955f6caSJeff Kirsher 			lp->link_config.duplex = DUPLEX_HALF;
811b955f6caSJeff Kirsher 		speed = (status0 & SPEED_MASK) >> 7;
812b955f6caSJeff Kirsher 		if (speed == PHY_SPEED_10)
813b955f6caSJeff Kirsher 			lp->link_config.speed = SPEED_10;
814b955f6caSJeff Kirsher 		else if (speed == PHY_SPEED_100)
815b955f6caSJeff Kirsher 			lp->link_config.speed = SPEED_100;
816b955f6caSJeff Kirsher 
817f7afbaa5SVarka Bhadram 		netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
818f7afbaa5SVarka Bhadram 			    (lp->link_config.speed == SPEED_100) ?
819f7afbaa5SVarka Bhadram 							"100" : "10",
820f7afbaa5SVarka Bhadram 			    (lp->link_config.duplex == DUPLEX_FULL) ?
821f7afbaa5SVarka Bhadram 							"Full" : "Half");
822f7afbaa5SVarka Bhadram 
823b955f6caSJeff Kirsher 		netif_carrier_on(dev);
824ca3fc0aaSYixing Liu 	} else {
825b955f6caSJeff Kirsher 		lp->link_config.speed = SPEED_INVALID;
826b955f6caSJeff Kirsher 		lp->link_config.duplex = DUPLEX_INVALID;
827b955f6caSJeff Kirsher 		lp->link_config.autoneg = AUTONEG_INVALID;
828f7afbaa5SVarka Bhadram 		netdev_info(dev, "Link is Down.\n");
829b955f6caSJeff Kirsher 		netif_carrier_off(dev);
830b955f6caSJeff Kirsher 	}
831b955f6caSJeff Kirsher 
832b955f6caSJeff Kirsher 	return 0;
833b955f6caSJeff Kirsher }
83413a4fa43SVarka Bhadram 
83513a4fa43SVarka Bhadram /* This function reads the mib counters. */
836b955f6caSJeff Kirsher static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
837b955f6caSJeff Kirsher {
838b955f6caSJeff Kirsher 	unsigned int  status;
839b955f6caSJeff Kirsher 	unsigned  int data;
840b955f6caSJeff Kirsher 	unsigned int repeat = REPEAT_CNT;
841b955f6caSJeff Kirsher 
842b955f6caSJeff Kirsher 	writew(MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
843b955f6caSJeff Kirsher 	do {
844b955f6caSJeff Kirsher 		status = readw(mmio + MIB_ADDR);
845b955f6caSJeff Kirsher 		udelay(2);	/* controller takes MAX 2 us to get mib data */
846b955f6caSJeff Kirsher 	}
847b955f6caSJeff Kirsher 	while (--repeat && (status & MIB_CMD_ACTIVE));
848b955f6caSJeff Kirsher 
849b955f6caSJeff Kirsher 	data = readl(mmio + MIB_DATA);
850b955f6caSJeff Kirsher 	return data;
851b955f6caSJeff Kirsher }
852b955f6caSJeff Kirsher 
85313a4fa43SVarka Bhadram /* This function reads the mib registers and returns the hardware statistics.
854b955f6caSJeff Kirsher  * It updates previous internal driver statistics with new values.
855b955f6caSJeff Kirsher  */
856b955f6caSJeff Kirsher static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
857b955f6caSJeff Kirsher {
858b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
859b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
860b955f6caSJeff Kirsher 	unsigned long flags;
861b955f6caSJeff Kirsher 	struct net_device_stats *new_stats = &dev->stats;
862b955f6caSJeff Kirsher 
863b955f6caSJeff Kirsher 	if (!lp->opened)
864b955f6caSJeff Kirsher 		return new_stats;
865b955f6caSJeff Kirsher 	spin_lock_irqsave(&lp->lock, flags);
866b955f6caSJeff Kirsher 
867b955f6caSJeff Kirsher 	/* stats.rx_packets */
868b955f6caSJeff Kirsher 	new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
869b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_multicast_pkts)+
870b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_unicast_pkts);
871b955f6caSJeff Kirsher 
872b955f6caSJeff Kirsher 	/* stats.tx_packets */
873b955f6caSJeff Kirsher 	new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
874b955f6caSJeff Kirsher 
875b955f6caSJeff Kirsher 	/*stats.rx_bytes */
876b955f6caSJeff Kirsher 	new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
877b955f6caSJeff Kirsher 
878b955f6caSJeff Kirsher 	/* stats.tx_bytes */
879b955f6caSJeff Kirsher 	new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
880b955f6caSJeff Kirsher 
881b955f6caSJeff Kirsher 	/* stats.rx_errors */
882b955f6caSJeff Kirsher 	/* hw errors + errors driver reported */
883b955f6caSJeff Kirsher 	new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
884b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_fragments)+
885b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_jabbers)+
886b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_alignment_errors)+
887b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_fcs_errors)+
888b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_miss_pkts)+
889b955f6caSJeff Kirsher 				lp->drv_rx_errors;
890b955f6caSJeff Kirsher 
891b955f6caSJeff Kirsher 	/* stats.tx_errors */
892b955f6caSJeff Kirsher 	new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
893b955f6caSJeff Kirsher 
894b955f6caSJeff Kirsher 	/* stats.rx_dropped*/
895b955f6caSJeff Kirsher 	new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
896b955f6caSJeff Kirsher 
897b955f6caSJeff Kirsher 	/* stats.tx_dropped*/
898b955f6caSJeff Kirsher 	new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
899b955f6caSJeff Kirsher 
900b955f6caSJeff Kirsher 	/* stats.multicast*/
901b955f6caSJeff Kirsher 	new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
902b955f6caSJeff Kirsher 
903b955f6caSJeff Kirsher 	/* stats.collisions*/
904b955f6caSJeff Kirsher 	new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
905b955f6caSJeff Kirsher 
906b955f6caSJeff Kirsher 	/* stats.rx_length_errors*/
907b955f6caSJeff Kirsher 	new_stats->rx_length_errors =
908b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, rcv_undersize_pkts)+
909b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, rcv_oversize_pkts);
910b955f6caSJeff Kirsher 
911b955f6caSJeff Kirsher 	/* stats.rx_over_errors*/
912b955f6caSJeff Kirsher 	new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
913b955f6caSJeff Kirsher 
914b955f6caSJeff Kirsher 	/* stats.rx_crc_errors*/
915b955f6caSJeff Kirsher 	new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
916b955f6caSJeff Kirsher 
917b955f6caSJeff Kirsher 	/* stats.rx_frame_errors*/
918b955f6caSJeff Kirsher 	new_stats->rx_frame_errors =
919b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, rcv_alignment_errors);
920b955f6caSJeff Kirsher 
921b955f6caSJeff Kirsher 	/* stats.rx_fifo_errors */
922b955f6caSJeff Kirsher 	new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
923b955f6caSJeff Kirsher 
924b955f6caSJeff Kirsher 	/* stats.rx_missed_errors */
925b955f6caSJeff Kirsher 	new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
926b955f6caSJeff Kirsher 
927b955f6caSJeff Kirsher 	/* stats.tx_aborted_errors*/
928b955f6caSJeff Kirsher 	new_stats->tx_aborted_errors =
929b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, xmt_excessive_collision);
930b955f6caSJeff Kirsher 
931b955f6caSJeff Kirsher 	/* stats.tx_carrier_errors*/
932b955f6caSJeff Kirsher 	new_stats->tx_carrier_errors =
933b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, xmt_loss_carrier);
934b955f6caSJeff Kirsher 
935b955f6caSJeff Kirsher 	/* stats.tx_fifo_errors*/
936b955f6caSJeff Kirsher 	new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
937b955f6caSJeff Kirsher 
938b955f6caSJeff Kirsher 	/* stats.tx_window_errors*/
939b955f6caSJeff Kirsher 	new_stats->tx_window_errors =
940b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, xmt_late_collision);
941b955f6caSJeff Kirsher 
942b955f6caSJeff Kirsher 	/* Reset the mibs for collecting new statistics */
943b955f6caSJeff Kirsher 	/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
944b955f6caSJeff Kirsher 
945b955f6caSJeff Kirsher 	spin_unlock_irqrestore(&lp->lock, flags);
946b955f6caSJeff Kirsher 
947b955f6caSJeff Kirsher 	return new_stats;
948b955f6caSJeff Kirsher }
94913a4fa43SVarka Bhadram 
950b955f6caSJeff Kirsher /* This function recalculate the interrupt coalescing  mode on every interrupt
95113a4fa43SVarka Bhadram  * according to the datarate and the packet rate.
952b955f6caSJeff Kirsher  */
953b955f6caSJeff Kirsher static int amd8111e_calc_coalesce(struct net_device *dev)
954b955f6caSJeff Kirsher {
955b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
956b955f6caSJeff Kirsher 	struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
957b955f6caSJeff Kirsher 	int tx_pkt_rate;
958b955f6caSJeff Kirsher 	int rx_pkt_rate;
959b955f6caSJeff Kirsher 	int tx_data_rate;
960b955f6caSJeff Kirsher 	int rx_data_rate;
961b955f6caSJeff Kirsher 	int rx_pkt_size;
962b955f6caSJeff Kirsher 	int tx_pkt_size;
963b955f6caSJeff Kirsher 
964b955f6caSJeff Kirsher 	tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
965b955f6caSJeff Kirsher 	coal_conf->tx_prev_packets =  coal_conf->tx_packets;
966b955f6caSJeff Kirsher 
967b955f6caSJeff Kirsher 	tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
968b955f6caSJeff Kirsher 	coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
969b955f6caSJeff Kirsher 
970b955f6caSJeff Kirsher 	rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
971b955f6caSJeff Kirsher 	coal_conf->rx_prev_packets =  coal_conf->rx_packets;
972b955f6caSJeff Kirsher 
973b955f6caSJeff Kirsher 	rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
974b955f6caSJeff Kirsher 	coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
975b955f6caSJeff Kirsher 
976b955f6caSJeff Kirsher 	if (rx_pkt_rate < 800) {
977b955f6caSJeff Kirsher 		if (coal_conf->rx_coal_type != NO_COALESCE) {
978b955f6caSJeff Kirsher 
979b955f6caSJeff Kirsher 			coal_conf->rx_timeout = 0x0;
980b955f6caSJeff Kirsher 			coal_conf->rx_event_count = 0;
981b955f6caSJeff Kirsher 			amd8111e_set_coalesce(dev, RX_INTR_COAL);
982b955f6caSJeff Kirsher 			coal_conf->rx_coal_type = NO_COALESCE;
983b955f6caSJeff Kirsher 		}
984ca3fc0aaSYixing Liu 	} else {
985b955f6caSJeff Kirsher 
986b955f6caSJeff Kirsher 		rx_pkt_size = rx_data_rate/rx_pkt_rate;
987b955f6caSJeff Kirsher 		if (rx_pkt_size < 128) {
988b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type != NO_COALESCE) {
989b955f6caSJeff Kirsher 
990b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 0;
991b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 0;
992b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
993b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = NO_COALESCE;
994b955f6caSJeff Kirsher 			}
995b955f6caSJeff Kirsher 
996ca3fc0aaSYixing Liu 		} else if ((rx_pkt_size >= 128) && (rx_pkt_size < 512)) {
997b955f6caSJeff Kirsher 
998b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type !=  LOW_COALESCE) {
999b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 1;
1000b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 4;
1001b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
1002b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = LOW_COALESCE;
1003b955f6caSJeff Kirsher 			}
1004ca3fc0aaSYixing Liu 		} else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)) {
1005b955f6caSJeff Kirsher 
1006b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type != MEDIUM_COALESCE) {
1007b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 1;
1008b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 4;
1009b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
1010b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = MEDIUM_COALESCE;
1011b955f6caSJeff Kirsher 			}
1012b955f6caSJeff Kirsher 
1013ca3fc0aaSYixing Liu 		} else if (rx_pkt_size >= 1024) {
1014ca3fc0aaSYixing Liu 
1015b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type !=  HIGH_COALESCE) {
1016b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 2;
1017b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 3;
1018b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
1019b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = HIGH_COALESCE;
1020b955f6caSJeff Kirsher 			}
1021b955f6caSJeff Kirsher 		}
1022b955f6caSJeff Kirsher 	}
1023b955f6caSJeff Kirsher 	/* NOW FOR TX INTR COALESC */
1024b955f6caSJeff Kirsher 	if (tx_pkt_rate < 800) {
1025b955f6caSJeff Kirsher 		if (coal_conf->tx_coal_type != NO_COALESCE) {
1026b955f6caSJeff Kirsher 
1027b955f6caSJeff Kirsher 			coal_conf->tx_timeout = 0x0;
1028b955f6caSJeff Kirsher 			coal_conf->tx_event_count = 0;
1029b955f6caSJeff Kirsher 			amd8111e_set_coalesce(dev, TX_INTR_COAL);
1030b955f6caSJeff Kirsher 			coal_conf->tx_coal_type = NO_COALESCE;
1031b955f6caSJeff Kirsher 		}
1032ca3fc0aaSYixing Liu 	} else {
1033b955f6caSJeff Kirsher 
1034b955f6caSJeff Kirsher 		tx_pkt_size = tx_data_rate/tx_pkt_rate;
1035b955f6caSJeff Kirsher 		if (tx_pkt_size < 128) {
1036b955f6caSJeff Kirsher 
1037b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != NO_COALESCE) {
1038b955f6caSJeff Kirsher 
1039b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 0;
1040b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 0;
1041b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1042b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = NO_COALESCE;
1043b955f6caSJeff Kirsher 			}
1044b955f6caSJeff Kirsher 
1045ca3fc0aaSYixing Liu 		} else if ((tx_pkt_size >= 128) && (tx_pkt_size < 512)) {
1046b955f6caSJeff Kirsher 
1047b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != LOW_COALESCE) {
1048b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 1;
1049b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 2;
1050b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1051b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = LOW_COALESCE;
1052b955f6caSJeff Kirsher 
1053b955f6caSJeff Kirsher 			}
1054ca3fc0aaSYixing Liu 		} else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)) {
1055b955f6caSJeff Kirsher 
1056b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != MEDIUM_COALESCE) {
1057b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 2;
1058b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 5;
1059b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1060b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = MEDIUM_COALESCE;
1061b955f6caSJeff Kirsher 			}
10624d7b4483SColin Ian King 		} else if (tx_pkt_size >= 1024) {
1063b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != HIGH_COALESCE) {
1064b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 4;
1065b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 8;
1066b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1067b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = HIGH_COALESCE;
1068b955f6caSJeff Kirsher 			}
1069b955f6caSJeff Kirsher 		}
1070b955f6caSJeff Kirsher 	}
1071b955f6caSJeff Kirsher 	return 0;
1072b955f6caSJeff Kirsher 
1073b955f6caSJeff Kirsher }
107413a4fa43SVarka Bhadram 
107513a4fa43SVarka Bhadram /* This is device interrupt function. It handles transmit,
107613a4fa43SVarka Bhadram  * receive,link change and hardware timer interrupts.
1077b955f6caSJeff Kirsher  */
1078b955f6caSJeff Kirsher static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1079b955f6caSJeff Kirsher {
1080b955f6caSJeff Kirsher 
1081b955f6caSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
1082b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1083b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
1084b955f6caSJeff Kirsher 	unsigned int intr0, intren0;
1085b955f6caSJeff Kirsher 	unsigned int handled = 1;
1086b955f6caSJeff Kirsher 
1087b955f6caSJeff Kirsher 	if (unlikely(dev == NULL))
1088b955f6caSJeff Kirsher 		return IRQ_NONE;
1089b955f6caSJeff Kirsher 
1090b955f6caSJeff Kirsher 	spin_lock(&lp->lock);
1091b955f6caSJeff Kirsher 
1092b955f6caSJeff Kirsher 	/* disabling interrupt */
1093b955f6caSJeff Kirsher 	writel(INTREN, mmio + CMD0);
1094b955f6caSJeff Kirsher 
1095b955f6caSJeff Kirsher 	/* Read interrupt status */
1096b955f6caSJeff Kirsher 	intr0 = readl(mmio + INT0);
1097b955f6caSJeff Kirsher 	intren0 = readl(mmio + INTEN0);
1098b955f6caSJeff Kirsher 
1099b955f6caSJeff Kirsher 	/* Process all the INT event until INTR bit is clear. */
1100b955f6caSJeff Kirsher 
1101b955f6caSJeff Kirsher 	if (!(intr0 & INTR)) {
1102b955f6caSJeff Kirsher 		handled = 0;
1103b955f6caSJeff Kirsher 		goto err_no_interrupt;
1104b955f6caSJeff Kirsher 	}
1105b955f6caSJeff Kirsher 
1106b955f6caSJeff Kirsher 	/* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1107b955f6caSJeff Kirsher 	writel(intr0, mmio + INT0);
1108b955f6caSJeff Kirsher 
1109b955f6caSJeff Kirsher 	/* Check if Receive Interrupt has occurred. */
1110b955f6caSJeff Kirsher 	if (intr0 & RINT0) {
1111b955f6caSJeff Kirsher 		if (napi_schedule_prep(&lp->napi)) {
1112b955f6caSJeff Kirsher 			/* Disable receive interupts */
1113b955f6caSJeff Kirsher 			writel(RINTEN0, mmio + INTEN0);
1114b955f6caSJeff Kirsher 			/* Schedule a polling routine */
1115b955f6caSJeff Kirsher 			__napi_schedule(&lp->napi);
1116b955f6caSJeff Kirsher 		} else if (intren0 & RINTEN0) {
1117f7afbaa5SVarka Bhadram 			netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
1118b955f6caSJeff Kirsher 			/* Fix by disable receive interrupts */
1119b955f6caSJeff Kirsher 			writel(RINTEN0, mmio + INTEN0);
1120b955f6caSJeff Kirsher 		}
1121b955f6caSJeff Kirsher 	}
1122b955f6caSJeff Kirsher 
1123b955f6caSJeff Kirsher 	/* Check if  Transmit Interrupt has occurred. */
1124b955f6caSJeff Kirsher 	if (intr0 & TINT0)
1125b955f6caSJeff Kirsher 		amd8111e_tx(dev);
1126b955f6caSJeff Kirsher 
1127b955f6caSJeff Kirsher 	/* Check if  Link Change Interrupt has occurred. */
1128b955f6caSJeff Kirsher 	if (intr0 & LCINT)
1129b955f6caSJeff Kirsher 		amd8111e_link_change(dev);
1130b955f6caSJeff Kirsher 
1131b955f6caSJeff Kirsher 	/* Check if Hardware Timer Interrupt has occurred. */
1132b955f6caSJeff Kirsher 	if (intr0 & STINT)
1133b955f6caSJeff Kirsher 		amd8111e_calc_coalesce(dev);
1134b955f6caSJeff Kirsher 
1135b955f6caSJeff Kirsher err_no_interrupt:
1136b955f6caSJeff Kirsher 	writel(VAL0 | INTREN, mmio + CMD0);
1137b955f6caSJeff Kirsher 
1138b955f6caSJeff Kirsher 	spin_unlock(&lp->lock);
1139b955f6caSJeff Kirsher 
1140b955f6caSJeff Kirsher 	return IRQ_RETVAL(handled);
1141b955f6caSJeff Kirsher }
1142b955f6caSJeff Kirsher 
1143b955f6caSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
1144b955f6caSJeff Kirsher static void amd8111e_poll(struct net_device *dev)
1145b955f6caSJeff Kirsher {
1146b955f6caSJeff Kirsher 	unsigned long flags;
1147b955f6caSJeff Kirsher 	local_irq_save(flags);
1148b955f6caSJeff Kirsher 	amd8111e_interrupt(0, dev);
1149b955f6caSJeff Kirsher 	local_irq_restore(flags);
1150b955f6caSJeff Kirsher }
1151b955f6caSJeff Kirsher #endif
1152b955f6caSJeff Kirsher 
1153b955f6caSJeff Kirsher 
115413a4fa43SVarka Bhadram /* This function closes the network interface and updates
115513a4fa43SVarka Bhadram  * the statistics so that most recent statistics will be
115613a4fa43SVarka Bhadram  * available after the interface is down.
1157b955f6caSJeff Kirsher  */
1158b955f6caSJeff Kirsher static int amd8111e_close(struct net_device *dev)
1159b955f6caSJeff Kirsher {
1160b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1161b955f6caSJeff Kirsher 	netif_stop_queue(dev);
1162b955f6caSJeff Kirsher 
1163b955f6caSJeff Kirsher 	napi_disable(&lp->napi);
1164b955f6caSJeff Kirsher 
1165b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1166b955f6caSJeff Kirsher 
1167b955f6caSJeff Kirsher 	amd8111e_disable_interrupt(lp);
1168b955f6caSJeff Kirsher 	amd8111e_stop_chip(lp);
1169b955f6caSJeff Kirsher 
1170b955f6caSJeff Kirsher 	/* Free transmit and receive skbs */
1171b955f6caSJeff Kirsher 	amd8111e_free_skbs(lp->amd8111e_net_dev);
1172b955f6caSJeff Kirsher 
1173b955f6caSJeff Kirsher 	netif_carrier_off(lp->amd8111e_net_dev);
1174b955f6caSJeff Kirsher 
1175b955f6caSJeff Kirsher 	/* Delete ipg timer */
1176b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE)
1177b955f6caSJeff Kirsher 		del_timer_sync(&lp->ipg_data.ipg_timer);
1178b955f6caSJeff Kirsher 
1179b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1180b955f6caSJeff Kirsher 	free_irq(dev->irq, dev);
1181b955f6caSJeff Kirsher 	amd8111e_free_ring(lp);
1182b955f6caSJeff Kirsher 
1183b955f6caSJeff Kirsher 	/* Update the statistics before closing */
1184b955f6caSJeff Kirsher 	amd8111e_get_stats(dev);
1185b955f6caSJeff Kirsher 	lp->opened = 0;
1186b955f6caSJeff Kirsher 	return 0;
1187b955f6caSJeff Kirsher }
118813a4fa43SVarka Bhadram 
118913a4fa43SVarka Bhadram /* This function opens new interface.It requests irq for the device,
119013a4fa43SVarka Bhadram  * initializes the device,buffers and descriptors, and starts the device.
1191b955f6caSJeff Kirsher  */
1192b955f6caSJeff Kirsher static int amd8111e_open(struct net_device *dev)
1193b955f6caSJeff Kirsher {
1194b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1195b955f6caSJeff Kirsher 
1196ca3fc0aaSYixing Liu 	if (dev->irq == 0 || request_irq(dev->irq, amd8111e_interrupt,
1197ca3fc0aaSYixing Liu 					 IRQF_SHARED, dev->name, dev))
1198b955f6caSJeff Kirsher 		return -EAGAIN;
1199b955f6caSJeff Kirsher 
1200b955f6caSJeff Kirsher 	napi_enable(&lp->napi);
1201b955f6caSJeff Kirsher 
1202b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1203b955f6caSJeff Kirsher 
1204b955f6caSJeff Kirsher 	amd8111e_init_hw_default(lp);
1205b955f6caSJeff Kirsher 
1206b955f6caSJeff Kirsher 	if (amd8111e_restart(dev)) {
1207b955f6caSJeff Kirsher 		spin_unlock_irq(&lp->lock);
1208b955f6caSJeff Kirsher 		napi_disable(&lp->napi);
1209b955f6caSJeff Kirsher 		if (dev->irq)
1210b955f6caSJeff Kirsher 			free_irq(dev->irq, dev);
1211b955f6caSJeff Kirsher 		return -ENOMEM;
1212b955f6caSJeff Kirsher 	}
1213b955f6caSJeff Kirsher 	/* Start ipg timer */
1214b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE) {
1215b955f6caSJeff Kirsher 		add_timer(&lp->ipg_data.ipg_timer);
1216f7afbaa5SVarka Bhadram 		netdev_info(dev, "Dynamic IPG Enabled\n");
1217b955f6caSJeff Kirsher 	}
1218b955f6caSJeff Kirsher 
1219b955f6caSJeff Kirsher 	lp->opened = 1;
1220b955f6caSJeff Kirsher 
1221b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1222b955f6caSJeff Kirsher 
1223b955f6caSJeff Kirsher 	netif_start_queue(dev);
1224b955f6caSJeff Kirsher 
1225b955f6caSJeff Kirsher 	return 0;
1226b955f6caSJeff Kirsher }
122713a4fa43SVarka Bhadram 
122813a4fa43SVarka Bhadram /* This function checks if there is any transmit  descriptors
122913a4fa43SVarka Bhadram  * available to queue more packet.
1230b955f6caSJeff Kirsher  */
1231b955f6caSJeff Kirsher static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
1232b955f6caSJeff Kirsher {
1233b955f6caSJeff Kirsher 	int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1234b955f6caSJeff Kirsher 	if (lp->tx_skbuff[tx_index])
1235b955f6caSJeff Kirsher 		return -1;
1236b955f6caSJeff Kirsher 	else
1237b955f6caSJeff Kirsher 		return 0;
1238b955f6caSJeff Kirsher 
1239b955f6caSJeff Kirsher }
1240b955f6caSJeff Kirsher 
124113a4fa43SVarka Bhadram /* This function will queue the transmit packets to the
124213a4fa43SVarka Bhadram  * descriptors and will trigger the send operation. It also
124313a4fa43SVarka Bhadram  * initializes the transmit descriptors with buffer physical address,
124413a4fa43SVarka Bhadram  * byte count, ownership to hardware etc.
124513a4fa43SVarka Bhadram  */
1246b955f6caSJeff Kirsher static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1247b955f6caSJeff Kirsher 				       struct net_device *dev)
1248b955f6caSJeff Kirsher {
1249b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1250b955f6caSJeff Kirsher 	int tx_index;
1251b955f6caSJeff Kirsher 	unsigned long flags;
1252b955f6caSJeff Kirsher 
1253b955f6caSJeff Kirsher 	spin_lock_irqsave(&lp->lock, flags);
1254b955f6caSJeff Kirsher 
1255b955f6caSJeff Kirsher 	tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1256b955f6caSJeff Kirsher 
1257b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1258b955f6caSJeff Kirsher 
1259b955f6caSJeff Kirsher 	lp->tx_skbuff[tx_index] = skb;
1260b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].tx_flags = 0;
1261b955f6caSJeff Kirsher 
1262b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
1263df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1264b955f6caSJeff Kirsher 		lp->tx_ring[tx_index].tag_ctrl_cmd |=
1265b955f6caSJeff Kirsher 				cpu_to_le16(TCC_VLAN_INSERT);
1266b955f6caSJeff Kirsher 		lp->tx_ring[tx_index].tag_ctrl_info =
1267df8a39deSJiri Pirko 				cpu_to_le16(skb_vlan_tag_get(skb));
1268b955f6caSJeff Kirsher 
1269b955f6caSJeff Kirsher 	}
1270b955f6caSJeff Kirsher #endif
1271b955f6caSJeff Kirsher 	lp->tx_dma_addr[tx_index] =
1272428f09c2SChristophe JAILLET 	    dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
1273428f09c2SChristophe JAILLET 			   DMA_TO_DEVICE);
1274b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].buff_phy_addr =
1275b955f6caSJeff Kirsher 	    cpu_to_le32(lp->tx_dma_addr[tx_index]);
1276b955f6caSJeff Kirsher 
1277b955f6caSJeff Kirsher 	/*  Set FCS and LTINT bits */
1278b955f6caSJeff Kirsher 	wmb();
1279b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].tx_flags |=
1280b955f6caSJeff Kirsher 	    cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1281b955f6caSJeff Kirsher 
1282b955f6caSJeff Kirsher 	lp->tx_idx++;
1283b955f6caSJeff Kirsher 
1284b955f6caSJeff Kirsher 	/* Trigger an immediate send poll. */
1285b955f6caSJeff Kirsher 	writel(VAL1 | TDMD0, lp->mmio + CMD0);
1286b955f6caSJeff Kirsher 	writel(VAL2 | RDMD0, lp->mmio + CMD0);
1287b955f6caSJeff Kirsher 
1288b955f6caSJeff Kirsher 	if (amd8111e_tx_queue_avail(lp) < 0) {
1289b955f6caSJeff Kirsher 		netif_stop_queue(dev);
1290b955f6caSJeff Kirsher 	}
1291b955f6caSJeff Kirsher 	spin_unlock_irqrestore(&lp->lock, flags);
1292b955f6caSJeff Kirsher 	return NETDEV_TX_OK;
1293b955f6caSJeff Kirsher }
129413a4fa43SVarka Bhadram /* This function returns all the memory mapped registers of the device. */
1295b955f6caSJeff Kirsher static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1296b955f6caSJeff Kirsher {
1297b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
1298b955f6caSJeff Kirsher 	/* Read only necessary registers */
1299b955f6caSJeff Kirsher 	buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1300b955f6caSJeff Kirsher 	buf[1] = readl(mmio + XMT_RING_LEN0);
1301b955f6caSJeff Kirsher 	buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1302b955f6caSJeff Kirsher 	buf[3] = readl(mmio + RCV_RING_LEN0);
1303b955f6caSJeff Kirsher 	buf[4] = readl(mmio + CMD0);
1304b955f6caSJeff Kirsher 	buf[5] = readl(mmio + CMD2);
1305b955f6caSJeff Kirsher 	buf[6] = readl(mmio + CMD3);
1306b955f6caSJeff Kirsher 	buf[7] = readl(mmio + CMD7);
1307b955f6caSJeff Kirsher 	buf[8] = readl(mmio + INT0);
1308b955f6caSJeff Kirsher 	buf[9] = readl(mmio + INTEN0);
1309b955f6caSJeff Kirsher 	buf[10] = readl(mmio + LADRF);
1310b955f6caSJeff Kirsher 	buf[11] = readl(mmio + LADRF+4);
1311b955f6caSJeff Kirsher 	buf[12] = readl(mmio + STAT0);
1312b955f6caSJeff Kirsher }
1313b955f6caSJeff Kirsher 
1314b955f6caSJeff Kirsher 
131513a4fa43SVarka Bhadram /* This function sets promiscuos mode, all-multi mode or the multicast address
131613a4fa43SVarka Bhadram  * list to the device.
1317b955f6caSJeff Kirsher  */
1318b955f6caSJeff Kirsher static void amd8111e_set_multicast_list(struct net_device *dev)
1319b955f6caSJeff Kirsher {
1320b955f6caSJeff Kirsher 	struct netdev_hw_addr *ha;
1321b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1322b955f6caSJeff Kirsher 	u32 mc_filter[2];
1323b955f6caSJeff Kirsher 	int bit_num;
1324b955f6caSJeff Kirsher 
1325b955f6caSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {
1326b955f6caSJeff Kirsher 		writel(VAL2 | PROM, lp->mmio + CMD2);
1327b955f6caSJeff Kirsher 		return;
1328b955f6caSJeff Kirsher 	}
1329b955f6caSJeff Kirsher 	else
1330b955f6caSJeff Kirsher 		writel(PROM, lp->mmio + CMD2);
1331b955f6caSJeff Kirsher 	if (dev->flags & IFF_ALLMULTI ||
1332b955f6caSJeff Kirsher 	    netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1333b955f6caSJeff Kirsher 		/* get all multicast packet */
1334b955f6caSJeff Kirsher 		mc_filter[1] = mc_filter[0] = 0xffffffff;
1335b955f6caSJeff Kirsher 		lp->options |= OPTION_MULTICAST_ENABLE;
1336b955f6caSJeff Kirsher 		amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1337b955f6caSJeff Kirsher 		return;
1338b955f6caSJeff Kirsher 	}
1339b955f6caSJeff Kirsher 	if (netdev_mc_empty(dev)) {
1340b955f6caSJeff Kirsher 		/* get only own packets */
1341b955f6caSJeff Kirsher 		mc_filter[1] = mc_filter[0] = 0;
1342b955f6caSJeff Kirsher 		lp->options &= ~OPTION_MULTICAST_ENABLE;
1343b955f6caSJeff Kirsher 		amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1344b955f6caSJeff Kirsher 		/* disable promiscuous mode */
1345b955f6caSJeff Kirsher 		writel(PROM, lp->mmio + CMD2);
1346b955f6caSJeff Kirsher 		return;
1347b955f6caSJeff Kirsher 	}
1348b955f6caSJeff Kirsher 	/* load all the multicast addresses in the logic filter */
1349b955f6caSJeff Kirsher 	lp->options |= OPTION_MULTICAST_ENABLE;
1350b955f6caSJeff Kirsher 	mc_filter[1] = mc_filter[0] = 0;
1351b955f6caSJeff Kirsher 	netdev_for_each_mc_addr(ha, dev) {
1352b955f6caSJeff Kirsher 		bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1353b955f6caSJeff Kirsher 		mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1354b955f6caSJeff Kirsher 	}
1355b955f6caSJeff Kirsher 	amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1356b955f6caSJeff Kirsher 
1357b955f6caSJeff Kirsher 	/* To eliminate PCI posting bug */
1358b955f6caSJeff Kirsher 	readl(lp->mmio + CMD2);
1359b955f6caSJeff Kirsher 
1360b955f6caSJeff Kirsher }
1361b955f6caSJeff Kirsher 
136246c73eccSVarka Bhadram static void amd8111e_get_drvinfo(struct net_device *dev,
136346c73eccSVarka Bhadram 				 struct ethtool_drvinfo *info)
1364b955f6caSJeff Kirsher {
1365b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1366b955f6caSJeff Kirsher 	struct pci_dev *pci_dev = lp->pci_dev;
1367*f029c781SWolfram Sang 	strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
136823020ab3SRick Jones 	snprintf(info->fw_version, sizeof(info->fw_version),
136923020ab3SRick Jones 		"%u", chip_version);
1370*f029c781SWolfram Sang 	strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1371b955f6caSJeff Kirsher }
1372b955f6caSJeff Kirsher 
1373b955f6caSJeff Kirsher static int amd8111e_get_regs_len(struct net_device *dev)
1374b955f6caSJeff Kirsher {
1375b955f6caSJeff Kirsher 	return AMD8111E_REG_DUMP_LEN;
1376b955f6caSJeff Kirsher }
1377b955f6caSJeff Kirsher 
1378b955f6caSJeff Kirsher static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1379b955f6caSJeff Kirsher {
1380b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1381b955f6caSJeff Kirsher 	regs->version = 0;
1382b955f6caSJeff Kirsher 	amd8111e_read_regs(lp, buf);
1383b955f6caSJeff Kirsher }
1384b955f6caSJeff Kirsher 
13851435003cSPhilippe Reynes static int amd8111e_get_link_ksettings(struct net_device *dev,
13861435003cSPhilippe Reynes 				       struct ethtool_link_ksettings *cmd)
1387b955f6caSJeff Kirsher {
1388b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1389b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
13901435003cSPhilippe Reynes 	mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
1391b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1392b955f6caSJeff Kirsher 	return 0;
1393b955f6caSJeff Kirsher }
1394b955f6caSJeff Kirsher 
13951435003cSPhilippe Reynes static int amd8111e_set_link_ksettings(struct net_device *dev,
13961435003cSPhilippe Reynes 				       const struct ethtool_link_ksettings *cmd)
1397b955f6caSJeff Kirsher {
1398b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1399b955f6caSJeff Kirsher 	int res;
1400b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
14011435003cSPhilippe Reynes 	res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
1402b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1403b955f6caSJeff Kirsher 	return res;
1404b955f6caSJeff Kirsher }
1405b955f6caSJeff Kirsher 
1406b955f6caSJeff Kirsher static int amd8111e_nway_reset(struct net_device *dev)
1407b955f6caSJeff Kirsher {
1408b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1409b955f6caSJeff Kirsher 	return mii_nway_restart(&lp->mii_if);
1410b955f6caSJeff Kirsher }
1411b955f6caSJeff Kirsher 
1412b955f6caSJeff Kirsher static u32 amd8111e_get_link(struct net_device *dev)
1413b955f6caSJeff Kirsher {
1414b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1415b955f6caSJeff Kirsher 	return mii_link_ok(&lp->mii_if);
1416b955f6caSJeff Kirsher }
1417b955f6caSJeff Kirsher 
1418b955f6caSJeff Kirsher static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1419b955f6caSJeff Kirsher {
1420b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1421b955f6caSJeff Kirsher 	wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1422b955f6caSJeff Kirsher 	if (lp->options & OPTION_WOL_ENABLE)
1423b955f6caSJeff Kirsher 		wol_info->wolopts = WAKE_MAGIC;
1424b955f6caSJeff Kirsher }
1425b955f6caSJeff Kirsher 
1426b955f6caSJeff Kirsher static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1427b955f6caSJeff Kirsher {
1428b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1429b955f6caSJeff Kirsher 	if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1430b955f6caSJeff Kirsher 		return -EINVAL;
1431b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1432b955f6caSJeff Kirsher 	if (wol_info->wolopts & WAKE_MAGIC)
1433b955f6caSJeff Kirsher 		lp->options |=
1434b955f6caSJeff Kirsher 			(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1435b955f6caSJeff Kirsher 	else if (wol_info->wolopts & WAKE_PHY)
1436b955f6caSJeff Kirsher 		lp->options |=
1437b955f6caSJeff Kirsher 			(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1438b955f6caSJeff Kirsher 	else
1439b955f6caSJeff Kirsher 		lp->options &= ~OPTION_WOL_ENABLE;
1440b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1441b955f6caSJeff Kirsher 	return 0;
1442b955f6caSJeff Kirsher }
1443b955f6caSJeff Kirsher 
1444b955f6caSJeff Kirsher static const struct ethtool_ops ops = {
1445b955f6caSJeff Kirsher 	.get_drvinfo = amd8111e_get_drvinfo,
1446b955f6caSJeff Kirsher 	.get_regs_len = amd8111e_get_regs_len,
1447b955f6caSJeff Kirsher 	.get_regs = amd8111e_get_regs,
1448b955f6caSJeff Kirsher 	.nway_reset = amd8111e_nway_reset,
1449b955f6caSJeff Kirsher 	.get_link = amd8111e_get_link,
1450b955f6caSJeff Kirsher 	.get_wol = amd8111e_get_wol,
1451b955f6caSJeff Kirsher 	.set_wol = amd8111e_set_wol,
14521435003cSPhilippe Reynes 	.get_link_ksettings = amd8111e_get_link_ksettings,
14531435003cSPhilippe Reynes 	.set_link_ksettings = amd8111e_set_link_ksettings,
1454b955f6caSJeff Kirsher };
1455b955f6caSJeff Kirsher 
145613a4fa43SVarka Bhadram /* This function handles all the  ethtool ioctls. It gives driver info,
145713a4fa43SVarka Bhadram  * gets/sets driver speed, gets memory mapped register values, forces
145813a4fa43SVarka Bhadram  * auto negotiation, sets/gets WOL options for ethtool application.
1459b955f6caSJeff Kirsher  */
1460b955f6caSJeff Kirsher static int amd8111e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1461b955f6caSJeff Kirsher {
1462b955f6caSJeff Kirsher 	struct mii_ioctl_data *data = if_mii(ifr);
1463b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1464b955f6caSJeff Kirsher 	int err;
1465b955f6caSJeff Kirsher 	u32 mii_regval;
1466b955f6caSJeff Kirsher 
1467b955f6caSJeff Kirsher 	switch (cmd) {
1468b955f6caSJeff Kirsher 	case SIOCGMIIPHY:
1469b955f6caSJeff Kirsher 		data->phy_id = lp->ext_phy_addr;
1470b955f6caSJeff Kirsher 
1471df561f66SGustavo A. R. Silva 		fallthrough;
1472b955f6caSJeff Kirsher 	case SIOCGMIIREG:
1473b955f6caSJeff Kirsher 
1474b955f6caSJeff Kirsher 		spin_lock_irq(&lp->lock);
1475b955f6caSJeff Kirsher 		err = amd8111e_read_phy(lp, data->phy_id,
1476b955f6caSJeff Kirsher 			data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1477b955f6caSJeff Kirsher 		spin_unlock_irq(&lp->lock);
1478b955f6caSJeff Kirsher 
1479b955f6caSJeff Kirsher 		data->val_out = mii_regval;
1480b955f6caSJeff Kirsher 		return err;
1481b955f6caSJeff Kirsher 
1482b955f6caSJeff Kirsher 	case SIOCSMIIREG:
1483b955f6caSJeff Kirsher 
1484b955f6caSJeff Kirsher 		spin_lock_irq(&lp->lock);
1485b955f6caSJeff Kirsher 		err = amd8111e_write_phy(lp, data->phy_id,
1486b955f6caSJeff Kirsher 			data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1487b955f6caSJeff Kirsher 		spin_unlock_irq(&lp->lock);
1488b955f6caSJeff Kirsher 
1489b955f6caSJeff Kirsher 		return err;
1490b955f6caSJeff Kirsher 
1491b955f6caSJeff Kirsher 	default:
1492b955f6caSJeff Kirsher 		/* do nothing */
1493b955f6caSJeff Kirsher 		break;
1494b955f6caSJeff Kirsher 	}
1495b955f6caSJeff Kirsher 	return -EOPNOTSUPP;
1496b955f6caSJeff Kirsher }
1497b955f6caSJeff Kirsher static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1498b955f6caSJeff Kirsher {
1499b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1500b955f6caSJeff Kirsher 	int i;
1501b955f6caSJeff Kirsher 	struct sockaddr *addr = p;
1502b955f6caSJeff Kirsher 
1503a05e4c0aSJakub Kicinski 	eth_hw_addr_set(dev, addr->sa_data);
1504b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1505b955f6caSJeff Kirsher 	/* Setting the MAC address to the device */
1506c857ff6eSJoe Perches 	for (i = 0; i < ETH_ALEN; i++)
1507b955f6caSJeff Kirsher 		writeb(dev->dev_addr[i], lp->mmio + PADR + i);
1508b955f6caSJeff Kirsher 
1509b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1510b955f6caSJeff Kirsher 
1511b955f6caSJeff Kirsher 	return 0;
1512b955f6caSJeff Kirsher }
1513b955f6caSJeff Kirsher 
151413a4fa43SVarka Bhadram /* This function changes the mtu of the device. It restarts the device  to
151513a4fa43SVarka Bhadram  * initialize the descriptor with new receive buffers.
1516b955f6caSJeff Kirsher  */
1517b955f6caSJeff Kirsher static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1518b955f6caSJeff Kirsher {
1519b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1520b955f6caSJeff Kirsher 	int err;
1521b955f6caSJeff Kirsher 
1522b955f6caSJeff Kirsher 	if (!netif_running(dev)) {
1523b955f6caSJeff Kirsher 		/* new_mtu will be used
152413a4fa43SVarka Bhadram 		 * when device starts netxt time
152513a4fa43SVarka Bhadram 		 */
1526b955f6caSJeff Kirsher 		dev->mtu = new_mtu;
1527b955f6caSJeff Kirsher 		return 0;
1528b955f6caSJeff Kirsher 	}
1529b955f6caSJeff Kirsher 
1530b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1531b955f6caSJeff Kirsher 
1532b955f6caSJeff Kirsher 	/* stop the chip */
1533b955f6caSJeff Kirsher 	writel(RUN, lp->mmio + CMD0);
1534b955f6caSJeff Kirsher 
1535b955f6caSJeff Kirsher 	dev->mtu = new_mtu;
1536b955f6caSJeff Kirsher 
1537b955f6caSJeff Kirsher 	err = amd8111e_restart(dev);
1538b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1539b955f6caSJeff Kirsher 	if (!err)
1540b955f6caSJeff Kirsher 		netif_start_queue(dev);
1541b955f6caSJeff Kirsher 	return err;
1542b955f6caSJeff Kirsher }
1543b955f6caSJeff Kirsher 
1544b955f6caSJeff Kirsher static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
1545b955f6caSJeff Kirsher {
1546b955f6caSJeff Kirsher 	writel(VAL1 | MPPLBA, lp->mmio + CMD3);
1547b955f6caSJeff Kirsher 	writel(VAL0 | MPEN_SW, lp->mmio + CMD7);
1548b955f6caSJeff Kirsher 
1549b955f6caSJeff Kirsher 	/* To eliminate PCI posting bug */
1550b955f6caSJeff Kirsher 	readl(lp->mmio + CMD7);
1551b955f6caSJeff Kirsher 	return 0;
1552b955f6caSJeff Kirsher }
1553b955f6caSJeff Kirsher 
1554b955f6caSJeff Kirsher static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
1555b955f6caSJeff Kirsher {
1556b955f6caSJeff Kirsher 
1557b955f6caSJeff Kirsher 	/* Adapter is already stoped/suspended/interrupt-disabled */
1558b955f6caSJeff Kirsher 	writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
1559b955f6caSJeff Kirsher 
1560b955f6caSJeff Kirsher 	/* To eliminate PCI posting bug */
1561b955f6caSJeff Kirsher 	readl(lp->mmio + CMD7);
1562b955f6caSJeff Kirsher 	return 0;
1563b955f6caSJeff Kirsher }
1564b955f6caSJeff Kirsher 
156513a4fa43SVarka Bhadram /* This function is called when a packet transmission fails to complete
1566b955f6caSJeff Kirsher  * within a reasonable period, on the assumption that an interrupt have
1567b955f6caSJeff Kirsher  * failed or the interface is locked up. This function will reinitialize
1568b955f6caSJeff Kirsher  * the hardware.
1569b955f6caSJeff Kirsher  */
15700290bd29SMichael S. Tsirkin static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
1571b955f6caSJeff Kirsher {
1572b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1573b955f6caSJeff Kirsher 	int err;
1574b955f6caSJeff Kirsher 
1575f7afbaa5SVarka Bhadram 	netdev_err(dev, "transmit timed out, resetting\n");
1576f7afbaa5SVarka Bhadram 
1577b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1578b955f6caSJeff Kirsher 	err = amd8111e_restart(dev);
1579b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1580b955f6caSJeff Kirsher 	if (!err)
1581b955f6caSJeff Kirsher 		netif_wake_queue(dev);
1582b955f6caSJeff Kirsher }
15832caf751fSVaibhav Gupta 
15840adcd298SNathan Chancellor static int __maybe_unused amd8111e_suspend(struct device *dev_d)
1585b955f6caSJeff Kirsher {
15862caf751fSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
1587b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1588b955f6caSJeff Kirsher 
1589b955f6caSJeff Kirsher 	if (!netif_running(dev))
1590b955f6caSJeff Kirsher 		return 0;
1591b955f6caSJeff Kirsher 
1592b955f6caSJeff Kirsher 	/* disable the interrupt */
1593b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1594b955f6caSJeff Kirsher 	amd8111e_disable_interrupt(lp);
1595b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1596b955f6caSJeff Kirsher 
1597b955f6caSJeff Kirsher 	netif_device_detach(dev);
1598b955f6caSJeff Kirsher 
1599b955f6caSJeff Kirsher 	/* stop chip */
1600b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1601b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE)
1602b955f6caSJeff Kirsher 		del_timer_sync(&lp->ipg_data.ipg_timer);
1603b955f6caSJeff Kirsher 	amd8111e_stop_chip(lp);
1604b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1605b955f6caSJeff Kirsher 
1606b955f6caSJeff Kirsher 	if (lp->options & OPTION_WOL_ENABLE) {
1607b955f6caSJeff Kirsher 		 /* enable wol */
1608b955f6caSJeff Kirsher 		if (lp->options & OPTION_WAKE_MAGIC_ENABLE)
1609b955f6caSJeff Kirsher 			amd8111e_enable_magicpkt(lp);
1610b955f6caSJeff Kirsher 		if (lp->options & OPTION_WAKE_PHY_ENABLE)
1611b955f6caSJeff Kirsher 			amd8111e_enable_link_change(lp);
1612b955f6caSJeff Kirsher 
16132caf751fSVaibhav Gupta 		device_set_wakeup_enable(dev_d, 1);
1614b955f6caSJeff Kirsher 
1615ca3fc0aaSYixing Liu 	} else {
16162caf751fSVaibhav Gupta 		device_set_wakeup_enable(dev_d, 0);
1617b955f6caSJeff Kirsher 	}
1618b955f6caSJeff Kirsher 
1619b955f6caSJeff Kirsher 	return 0;
1620b955f6caSJeff Kirsher }
16212caf751fSVaibhav Gupta 
16220adcd298SNathan Chancellor static int __maybe_unused amd8111e_resume(struct device *dev_d)
1623b955f6caSJeff Kirsher {
16242caf751fSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
1625b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1626b955f6caSJeff Kirsher 
1627b955f6caSJeff Kirsher 	if (!netif_running(dev))
1628b955f6caSJeff Kirsher 		return 0;
1629b955f6caSJeff Kirsher 
1630b955f6caSJeff Kirsher 	netif_device_attach(dev);
1631b955f6caSJeff Kirsher 
1632b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1633b955f6caSJeff Kirsher 	amd8111e_restart(dev);
1634b955f6caSJeff Kirsher 	/* Restart ipg timer */
1635b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE)
1636b955f6caSJeff Kirsher 		mod_timer(&lp->ipg_data.ipg_timer,
1637b955f6caSJeff Kirsher 				jiffies + IPG_CONVERGE_JIFFIES);
1638b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1639b955f6caSJeff Kirsher 
1640b955f6caSJeff Kirsher 	return 0;
1641b955f6caSJeff Kirsher }
1642b955f6caSJeff Kirsher 
1643495ad986SKees Cook static void amd8111e_config_ipg(struct timer_list *t)
1644b955f6caSJeff Kirsher {
1645495ad986SKees Cook 	struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
1646b955f6caSJeff Kirsher 	struct ipg_info *ipg_data = &lp->ipg_data;
1647b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
1648b955f6caSJeff Kirsher 	unsigned int prev_col_cnt = ipg_data->col_cnt;
1649b955f6caSJeff Kirsher 	unsigned int total_col_cnt;
1650b955f6caSJeff Kirsher 	unsigned int tmp_ipg;
1651b955f6caSJeff Kirsher 
1652b955f6caSJeff Kirsher 	if (lp->link_config.duplex == DUPLEX_FULL) {
1653b955f6caSJeff Kirsher 		ipg_data->ipg = DEFAULT_IPG;
1654b955f6caSJeff Kirsher 		return;
1655b955f6caSJeff Kirsher 	}
1656b955f6caSJeff Kirsher 
1657b955f6caSJeff Kirsher 	if (ipg_data->ipg_state == SSTATE) {
1658b955f6caSJeff Kirsher 
1659b955f6caSJeff Kirsher 		if (ipg_data->timer_tick == IPG_STABLE_TIME) {
1660b955f6caSJeff Kirsher 
1661b955f6caSJeff Kirsher 			ipg_data->timer_tick = 0;
1662b955f6caSJeff Kirsher 			ipg_data->ipg = MIN_IPG - IPG_STEP;
1663b955f6caSJeff Kirsher 			ipg_data->current_ipg = MIN_IPG;
1664b955f6caSJeff Kirsher 			ipg_data->diff_col_cnt = 0xFFFFFFFF;
1665b955f6caSJeff Kirsher 			ipg_data->ipg_state = CSTATE;
1666b955f6caSJeff Kirsher 		}
1667b955f6caSJeff Kirsher 		else
1668b955f6caSJeff Kirsher 			ipg_data->timer_tick++;
1669b955f6caSJeff Kirsher 	}
1670b955f6caSJeff Kirsher 
1671b955f6caSJeff Kirsher 	if (ipg_data->ipg_state == CSTATE) {
1672b955f6caSJeff Kirsher 
1673b955f6caSJeff Kirsher 		/* Get the current collision count */
1674b955f6caSJeff Kirsher 
1675b955f6caSJeff Kirsher 		total_col_cnt = ipg_data->col_cnt =
1676b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, xmt_collisions);
1677b955f6caSJeff Kirsher 
1678b955f6caSJeff Kirsher 		if ((total_col_cnt - prev_col_cnt) <
1679b955f6caSJeff Kirsher 				(ipg_data->diff_col_cnt)) {
1680b955f6caSJeff Kirsher 
1681b955f6caSJeff Kirsher 			ipg_data->diff_col_cnt =
1682b955f6caSJeff Kirsher 				total_col_cnt - prev_col_cnt;
1683b955f6caSJeff Kirsher 
1684b955f6caSJeff Kirsher 			ipg_data->ipg = ipg_data->current_ipg;
1685b955f6caSJeff Kirsher 		}
1686b955f6caSJeff Kirsher 
1687b955f6caSJeff Kirsher 		ipg_data->current_ipg += IPG_STEP;
1688b955f6caSJeff Kirsher 
1689b955f6caSJeff Kirsher 		if (ipg_data->current_ipg <= MAX_IPG)
1690b955f6caSJeff Kirsher 			tmp_ipg = ipg_data->current_ipg;
1691b955f6caSJeff Kirsher 		else {
1692b955f6caSJeff Kirsher 			tmp_ipg = ipg_data->ipg;
1693b955f6caSJeff Kirsher 			ipg_data->ipg_state = SSTATE;
1694b955f6caSJeff Kirsher 		}
1695b955f6caSJeff Kirsher 		writew((u32)tmp_ipg, mmio + IPG);
1696b955f6caSJeff Kirsher 		writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1697b955f6caSJeff Kirsher 	}
1698b955f6caSJeff Kirsher 	mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1699b955f6caSJeff Kirsher 	return;
1700b955f6caSJeff Kirsher 
1701b955f6caSJeff Kirsher }
1702b955f6caSJeff Kirsher 
17030cb0568dSBill Pemberton static void amd8111e_probe_ext_phy(struct net_device *dev)
1704b955f6caSJeff Kirsher {
1705b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1706b955f6caSJeff Kirsher 	int i;
1707b955f6caSJeff Kirsher 
1708b955f6caSJeff Kirsher 	for (i = 0x1e; i >= 0; i--) {
1709b955f6caSJeff Kirsher 		u32 id1, id2;
1710b955f6caSJeff Kirsher 
1711b955f6caSJeff Kirsher 		if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1712b955f6caSJeff Kirsher 			continue;
1713b955f6caSJeff Kirsher 		if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1714b955f6caSJeff Kirsher 			continue;
1715b955f6caSJeff Kirsher 		lp->ext_phy_id = (id1 << 16) | id2;
1716b955f6caSJeff Kirsher 		lp->ext_phy_addr = i;
1717b955f6caSJeff Kirsher 		return;
1718b955f6caSJeff Kirsher 	}
1719b955f6caSJeff Kirsher 	lp->ext_phy_id = 0;
1720b955f6caSJeff Kirsher 	lp->ext_phy_addr = 1;
1721b955f6caSJeff Kirsher }
1722b955f6caSJeff Kirsher 
1723b955f6caSJeff Kirsher static const struct net_device_ops amd8111e_netdev_ops = {
1724b955f6caSJeff Kirsher 	.ndo_open		= amd8111e_open,
1725b955f6caSJeff Kirsher 	.ndo_stop		= amd8111e_close,
1726b955f6caSJeff Kirsher 	.ndo_start_xmit		= amd8111e_start_xmit,
1727b955f6caSJeff Kirsher 	.ndo_tx_timeout		= amd8111e_tx_timeout,
1728b955f6caSJeff Kirsher 	.ndo_get_stats		= amd8111e_get_stats,
1729afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= amd8111e_set_multicast_list,
1730b955f6caSJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
1731b955f6caSJeff Kirsher 	.ndo_set_mac_address	= amd8111e_set_mac_address,
1732a7605370SArnd Bergmann 	.ndo_eth_ioctl		= amd8111e_ioctl,
1733b955f6caSJeff Kirsher 	.ndo_change_mtu		= amd8111e_change_mtu,
1734b955f6caSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
1735b955f6caSJeff Kirsher 	.ndo_poll_controller	 = amd8111e_poll,
1736b955f6caSJeff Kirsher #endif
1737b955f6caSJeff Kirsher };
1738b955f6caSJeff Kirsher 
17390cb0568dSBill Pemberton static int amd8111e_probe_one(struct pci_dev *pdev,
1740b955f6caSJeff Kirsher 				  const struct pci_device_id *ent)
1741b955f6caSJeff Kirsher {
1742f9c7da5eSYijing Wang 	int err, i;
1743b955f6caSJeff Kirsher 	unsigned long reg_addr, reg_len;
1744b955f6caSJeff Kirsher 	struct amd8111e_priv *lp;
1745b955f6caSJeff Kirsher 	struct net_device *dev;
1746f98c5050SJakub Kicinski 	u8 addr[ETH_ALEN];
1747b955f6caSJeff Kirsher 
1748b955f6caSJeff Kirsher 	err = pci_enable_device(pdev);
1749b955f6caSJeff Kirsher 	if (err) {
1750f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
1751b955f6caSJeff Kirsher 		return err;
1752b955f6caSJeff Kirsher 	}
1753b955f6caSJeff Kirsher 
1754b955f6caSJeff Kirsher 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1755f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot find PCI base address\n");
1756b955f6caSJeff Kirsher 		err = -ENODEV;
1757b955f6caSJeff Kirsher 		goto err_disable_pdev;
1758b955f6caSJeff Kirsher 	}
1759b955f6caSJeff Kirsher 
1760b955f6caSJeff Kirsher 	err = pci_request_regions(pdev, MODULE_NAME);
1761b955f6caSJeff Kirsher 	if (err) {
1762f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
1763b955f6caSJeff Kirsher 		goto err_disable_pdev;
1764b955f6caSJeff Kirsher 	}
1765b955f6caSJeff Kirsher 
1766b955f6caSJeff Kirsher 	pci_set_master(pdev);
1767b955f6caSJeff Kirsher 
1768b955f6caSJeff Kirsher 	/* Find power-management capability. */
1769f9c7da5eSYijing Wang 	if (!pdev->pm_cap) {
1770f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "No Power Management capability\n");
177186e506e3SPeter Senna Tschudin 		err = -ENODEV;
1772b955f6caSJeff Kirsher 		goto err_free_reg;
1773b955f6caSJeff Kirsher 	}
1774b955f6caSJeff Kirsher 
1775b955f6caSJeff Kirsher 	/* Initialize DMA */
1776428f09c2SChristophe JAILLET 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) {
1777f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "DMA not supported\n");
177886e506e3SPeter Senna Tschudin 		err = -ENODEV;
1779b955f6caSJeff Kirsher 		goto err_free_reg;
1780b955f6caSJeff Kirsher 	}
1781b955f6caSJeff Kirsher 
1782b955f6caSJeff Kirsher 	reg_addr = pci_resource_start(pdev, 0);
1783b955f6caSJeff Kirsher 	reg_len = pci_resource_len(pdev, 0);
1784b955f6caSJeff Kirsher 
1785b955f6caSJeff Kirsher 	dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1786b955f6caSJeff Kirsher 	if (!dev) {
1787b955f6caSJeff Kirsher 		err = -ENOMEM;
1788b955f6caSJeff Kirsher 		goto err_free_reg;
1789b955f6caSJeff Kirsher 	}
1790b955f6caSJeff Kirsher 
1791b955f6caSJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
1792b955f6caSJeff Kirsher 
1793b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
1794f646968fSPatrick McHardy 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1795b955f6caSJeff Kirsher #endif
1796b955f6caSJeff Kirsher 
1797b955f6caSJeff Kirsher 	lp = netdev_priv(dev);
1798b955f6caSJeff Kirsher 	lp->pci_dev = pdev;
1799b955f6caSJeff Kirsher 	lp->amd8111e_net_dev = dev;
1800f9c7da5eSYijing Wang 	lp->pm_cap = pdev->pm_cap;
1801b955f6caSJeff Kirsher 
1802b955f6caSJeff Kirsher 	spin_lock_init(&lp->lock);
1803b955f6caSJeff Kirsher 
1804711fec5dSVarka Bhadram 	lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
1805b955f6caSJeff Kirsher 	if (!lp->mmio) {
1806f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot map device registers\n");
1807b955f6caSJeff Kirsher 		err = -ENOMEM;
1808b955f6caSJeff Kirsher 		goto err_free_dev;
1809b955f6caSJeff Kirsher 	}
1810b955f6caSJeff Kirsher 
1811b955f6caSJeff Kirsher 	/* Initializing MAC address */
1812c857ff6eSJoe Perches 	for (i = 0; i < ETH_ALEN; i++)
1813f98c5050SJakub Kicinski 		addr[i] = readb(lp->mmio + PADR + i);
1814f98c5050SJakub Kicinski 	eth_hw_addr_set(dev, addr);
1815b955f6caSJeff Kirsher 
1816b955f6caSJeff Kirsher 	/* Setting user defined parametrs */
1817b955f6caSJeff Kirsher 	lp->ext_phy_option = speed_duplex[card_idx];
1818b955f6caSJeff Kirsher 	if (coalesce[card_idx])
1819b955f6caSJeff Kirsher 		lp->options |= OPTION_INTR_COAL_ENABLE;
1820b955f6caSJeff Kirsher 	if (dynamic_ipg[card_idx++])
1821b955f6caSJeff Kirsher 		lp->options |= OPTION_DYN_IPG_ENABLE;
1822b955f6caSJeff Kirsher 
1823b955f6caSJeff Kirsher 
1824b955f6caSJeff Kirsher 	/* Initialize driver entry points */
1825b955f6caSJeff Kirsher 	dev->netdev_ops = &amd8111e_netdev_ops;
18267ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &ops;
1827b955f6caSJeff Kirsher 	dev->irq = pdev->irq;
1828b955f6caSJeff Kirsher 	dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
182944770e11SJarod Wilson 	dev->min_mtu = AMD8111E_MIN_MTU;
183044770e11SJarod Wilson 	dev->max_mtu = AMD8111E_MAX_MTU;
1831b707b89fSJakub Kicinski 	netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
1832b955f6caSJeff Kirsher 
1833b955f6caSJeff Kirsher 	/* Probe the external PHY */
1834b955f6caSJeff Kirsher 	amd8111e_probe_ext_phy(dev);
1835b955f6caSJeff Kirsher 
1836b955f6caSJeff Kirsher 	/* setting mii default values */
1837b955f6caSJeff Kirsher 	lp->mii_if.dev = dev;
1838b955f6caSJeff Kirsher 	lp->mii_if.mdio_read = amd8111e_mdio_read;
1839b955f6caSJeff Kirsher 	lp->mii_if.mdio_write = amd8111e_mdio_write;
1840b955f6caSJeff Kirsher 	lp->mii_if.phy_id = lp->ext_phy_addr;
1841b955f6caSJeff Kirsher 
1842b955f6caSJeff Kirsher 	/* Set receive buffer length and set jumbo option*/
1843b955f6caSJeff Kirsher 	amd8111e_set_rx_buff_len(dev);
1844b955f6caSJeff Kirsher 
1845b955f6caSJeff Kirsher 
1846b955f6caSJeff Kirsher 	err = register_netdev(dev);
1847b955f6caSJeff Kirsher 	if (err) {
1848f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot register net device\n");
1849711fec5dSVarka Bhadram 		goto err_free_dev;
1850b955f6caSJeff Kirsher 	}
1851b955f6caSJeff Kirsher 
1852b955f6caSJeff Kirsher 	pci_set_drvdata(pdev, dev);
1853b955f6caSJeff Kirsher 
1854b955f6caSJeff Kirsher 	/* Initialize software ipg timer */
1855b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE) {
1856495ad986SKees Cook 		timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
1857b955f6caSJeff Kirsher 		lp->ipg_data.ipg_timer.expires = jiffies +
1858b955f6caSJeff Kirsher 						 IPG_CONVERGE_JIFFIES;
1859b955f6caSJeff Kirsher 		lp->ipg_data.ipg = DEFAULT_IPG;
1860b955f6caSJeff Kirsher 		lp->ipg_data.ipg_state = CSTATE;
1861b955f6caSJeff Kirsher 	}
1862b955f6caSJeff Kirsher 
1863b955f6caSJeff Kirsher 	/*  display driver and device information */
1864b955f6caSJeff Kirsher 	chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000) >> 28;
1865f7afbaa5SVarka Bhadram 	dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1866f7afbaa5SVarka Bhadram 		 chip_version, dev->dev_addr);
1867b955f6caSJeff Kirsher 	if (lp->ext_phy_id)
1868f7afbaa5SVarka Bhadram 		dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
1869f7afbaa5SVarka Bhadram 			 lp->ext_phy_id, lp->ext_phy_addr);
1870b955f6caSJeff Kirsher 	else
1871f7afbaa5SVarka Bhadram 		dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
1872f7afbaa5SVarka Bhadram 
1873b955f6caSJeff Kirsher 	return 0;
1874b955f6caSJeff Kirsher 
1875b955f6caSJeff Kirsher err_free_dev:
1876b955f6caSJeff Kirsher 	free_netdev(dev);
1877b955f6caSJeff Kirsher 
1878b955f6caSJeff Kirsher err_free_reg:
1879b955f6caSJeff Kirsher 	pci_release_regions(pdev);
1880b955f6caSJeff Kirsher 
1881b955f6caSJeff Kirsher err_disable_pdev:
1882b955f6caSJeff Kirsher 	pci_disable_device(pdev);
1883b955f6caSJeff Kirsher 	return err;
1884b955f6caSJeff Kirsher 
1885b955f6caSJeff Kirsher }
1886b955f6caSJeff Kirsher 
188743519e60SVarka Bhadram static void amd8111e_remove_one(struct pci_dev *pdev)
188843519e60SVarka Bhadram {
188943519e60SVarka Bhadram 	struct net_device *dev = pci_get_drvdata(pdev);
189043519e60SVarka Bhadram 
189143519e60SVarka Bhadram 	if (dev) {
189243519e60SVarka Bhadram 		unregister_netdev(dev);
189343519e60SVarka Bhadram 		free_netdev(dev);
189443519e60SVarka Bhadram 		pci_release_regions(pdev);
189543519e60SVarka Bhadram 		pci_disable_device(pdev);
189643519e60SVarka Bhadram 	}
189743519e60SVarka Bhadram }
189843519e60SVarka Bhadram 
1899ba69a3d7SVarka Bhadram static const struct pci_device_id amd8111e_pci_tbl[] = {
1900ba69a3d7SVarka Bhadram 	{
1901ba69a3d7SVarka Bhadram 	 .vendor = PCI_VENDOR_ID_AMD,
1902ba69a3d7SVarka Bhadram 	 .device = PCI_DEVICE_ID_AMD8111E_7462,
1903ba69a3d7SVarka Bhadram 	},
1904ba69a3d7SVarka Bhadram 	{
1905ba69a3d7SVarka Bhadram 	 .vendor = 0,
1906ba69a3d7SVarka Bhadram 	}
1907ba69a3d7SVarka Bhadram };
1908ba69a3d7SVarka Bhadram MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
1909ba69a3d7SVarka Bhadram 
19102caf751fSVaibhav Gupta static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
19112caf751fSVaibhav Gupta 
1912b955f6caSJeff Kirsher static struct pci_driver amd8111e_driver = {
1913b955f6caSJeff Kirsher 	.name		= MODULE_NAME,
1914b955f6caSJeff Kirsher 	.id_table	= amd8111e_pci_tbl,
1915b955f6caSJeff Kirsher 	.probe		= amd8111e_probe_one,
19160cb0568dSBill Pemberton 	.remove		= amd8111e_remove_one,
19172caf751fSVaibhav Gupta 	.driver.pm	= &amd8111e_pm_ops
1918b955f6caSJeff Kirsher };
1919b955f6caSJeff Kirsher 
1920a46e6ccdSPeter Hüwe module_pci_driver(amd8111e_driver);
1921