11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2b955f6caSJeff Kirsher 
3b955f6caSJeff Kirsher /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
4b955f6caSJeff Kirsher  * Copyright (C) 2004 Advanced Micro Devices
5b955f6caSJeff Kirsher  *
6b955f6caSJeff Kirsher  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7b955f6caSJeff Kirsher  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8b955f6caSJeff Kirsher  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9b955f6caSJeff Kirsher  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10b955f6caSJeff Kirsher  * Copyright 1993 United States Government as represented by the
11b955f6caSJeff Kirsher  *	Director, National Security Agency.[ pcnet32.c ]
12b955f6caSJeff Kirsher  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13b955f6caSJeff Kirsher  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14b955f6caSJeff Kirsher  *
15b955f6caSJeff Kirsher 
16b955f6caSJeff Kirsher Module Name:
17b955f6caSJeff Kirsher 
18b955f6caSJeff Kirsher 	amd8111e.c
19b955f6caSJeff Kirsher 
20b955f6caSJeff Kirsher Abstract:
21b955f6caSJeff Kirsher 
22b955f6caSJeff Kirsher 	 AMD8111 based 10/100 Ethernet Controller Driver.
23b955f6caSJeff Kirsher 
24b955f6caSJeff Kirsher Environment:
25b955f6caSJeff Kirsher 
26b955f6caSJeff Kirsher 	Kernel Mode
27b955f6caSJeff Kirsher 
28b955f6caSJeff Kirsher Revision History:
29b955f6caSJeff Kirsher 	3.0.0
30b955f6caSJeff Kirsher 	   Initial Revision.
31b955f6caSJeff Kirsher 	3.0.1
32b955f6caSJeff Kirsher 	 1. Dynamic interrupt coalescing.
33b955f6caSJeff Kirsher 	 2. Removed prev_stats.
34b955f6caSJeff Kirsher 	 3. MII support.
35b955f6caSJeff Kirsher 	 4. Dynamic IPG support
36b955f6caSJeff Kirsher 	3.0.2  05/29/2003
37b955f6caSJeff Kirsher 	 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
38b955f6caSJeff Kirsher 	 2. Bug fix: Fixed VLAN support failure.
39b955f6caSJeff Kirsher 	 3. Bug fix: Fixed receive interrupt coalescing bug.
40b955f6caSJeff Kirsher 	 4. Dynamic IPG support is disabled by default.
41b955f6caSJeff Kirsher 	3.0.3 06/05/2003
42b955f6caSJeff Kirsher 	 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
43b955f6caSJeff Kirsher 	3.0.4 12/09/2003
44b955f6caSJeff Kirsher 	 1. Added set_mac_address routine for bonding driver support.
45b955f6caSJeff Kirsher 	 2. Tested the driver for bonding support
46*7c13f442SGuofeng Yue 	 3. Bug fix: Fixed mismach in actual receive buffer length and length
47b955f6caSJeff Kirsher 	    indicated to the h/w.
48b955f6caSJeff Kirsher 	 4. Modified amd8111e_rx() routine to receive all the received packets
49b955f6caSJeff Kirsher 	    in the first interrupt.
50b955f6caSJeff Kirsher 	 5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
51b955f6caSJeff Kirsher 	3.0.5 03/22/2004
52b955f6caSJeff Kirsher 	 1. Added NAPI support
53b955f6caSJeff Kirsher 
54b955f6caSJeff Kirsher */
55b955f6caSJeff Kirsher 
56b955f6caSJeff Kirsher 
57b955f6caSJeff Kirsher #include <linux/module.h>
58b955f6caSJeff Kirsher #include <linux/kernel.h>
59b955f6caSJeff Kirsher #include <linux/types.h>
60b955f6caSJeff Kirsher #include <linux/compiler.h>
61b955f6caSJeff Kirsher #include <linux/delay.h>
62b955f6caSJeff Kirsher #include <linux/interrupt.h>
63b955f6caSJeff Kirsher #include <linux/ioport.h>
64b955f6caSJeff Kirsher #include <linux/pci.h>
65b955f6caSJeff Kirsher #include <linux/netdevice.h>
66b955f6caSJeff Kirsher #include <linux/etherdevice.h>
67b955f6caSJeff Kirsher #include <linux/skbuff.h>
68b955f6caSJeff Kirsher #include <linux/ethtool.h>
69b955f6caSJeff Kirsher #include <linux/mii.h>
70b955f6caSJeff Kirsher #include <linux/if_vlan.h>
71b955f6caSJeff Kirsher #include <linux/ctype.h>
72b955f6caSJeff Kirsher #include <linux/crc32.h>
73b955f6caSJeff Kirsher #include <linux/dma-mapping.h>
74b955f6caSJeff Kirsher 
75b955f6caSJeff Kirsher #include <asm/io.h>
76b955f6caSJeff Kirsher #include <asm/byteorder.h>
777c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
78b955f6caSJeff Kirsher 
79941992d2SJavier Martinez Canillas #if IS_ENABLED(CONFIG_VLAN_8021Q)
80b955f6caSJeff Kirsher #define AMD8111E_VLAN_TAG_USED 1
81b955f6caSJeff Kirsher #else
82b955f6caSJeff Kirsher #define AMD8111E_VLAN_TAG_USED 0
83b955f6caSJeff Kirsher #endif
84b955f6caSJeff Kirsher 
85b955f6caSJeff Kirsher #include "amd8111e.h"
86b955f6caSJeff Kirsher #define MODULE_NAME	"amd8111e"
87b955f6caSJeff Kirsher MODULE_AUTHOR("Advanced Micro Devices, Inc.");
887f4d2537SLeon Romanovsky MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
89b955f6caSJeff Kirsher MODULE_LICENSE("GPL");
90b955f6caSJeff Kirsher module_param_array(speed_duplex, int, NULL, 0);
91b955f6caSJeff Kirsher MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
92b955f6caSJeff Kirsher module_param_array(coalesce, bool, NULL, 0);
93b955f6caSJeff Kirsher MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
94b955f6caSJeff Kirsher module_param_array(dynamic_ipg, bool, NULL, 0);
95b955f6caSJeff Kirsher MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
96b955f6caSJeff Kirsher 
9713a4fa43SVarka Bhadram /* This function will read the PHY registers. */
amd8111e_read_phy(struct amd8111e_priv * lp,int phy_id,int reg,u32 * val)9846c73eccSVarka Bhadram static int amd8111e_read_phy(struct amd8111e_priv *lp,
9946c73eccSVarka Bhadram 			     int phy_id, int reg, u32 *val)
100b955f6caSJeff Kirsher {
101b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
102b955f6caSJeff Kirsher 	unsigned int reg_val;
103b955f6caSJeff Kirsher 	unsigned int repeat = REPEAT_CNT;
104b955f6caSJeff Kirsher 
105b955f6caSJeff Kirsher 	reg_val = readl(mmio + PHY_ACCESS);
106b955f6caSJeff Kirsher 	while (reg_val & PHY_CMD_ACTIVE)
107b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
108b955f6caSJeff Kirsher 
109b955f6caSJeff Kirsher 	writel(PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
110b955f6caSJeff Kirsher 			   ((reg & 0x1f) << 16), mmio + PHY_ACCESS);
111b955f6caSJeff Kirsher 	do {
112b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
113b955f6caSJeff Kirsher 		udelay(30);  /* It takes 30 us to read/write data */
114b955f6caSJeff Kirsher 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
115b955f6caSJeff Kirsher 	if (reg_val & PHY_RD_ERR)
116b955f6caSJeff Kirsher 		goto err_phy_read;
117b955f6caSJeff Kirsher 
118b955f6caSJeff Kirsher 	*val = reg_val & 0xffff;
119b955f6caSJeff Kirsher 	return 0;
120b955f6caSJeff Kirsher err_phy_read:
121b955f6caSJeff Kirsher 	*val = 0;
122b955f6caSJeff Kirsher 	return -EINVAL;
123b955f6caSJeff Kirsher 
124b955f6caSJeff Kirsher }
125b955f6caSJeff Kirsher 
12613a4fa43SVarka Bhadram /* This function will write into PHY registers. */
amd8111e_write_phy(struct amd8111e_priv * lp,int phy_id,int reg,u32 val)12746c73eccSVarka Bhadram static int amd8111e_write_phy(struct amd8111e_priv *lp,
12846c73eccSVarka Bhadram 			      int phy_id, int reg, u32 val)
129b955f6caSJeff Kirsher {
130b955f6caSJeff Kirsher 	unsigned int repeat = REPEAT_CNT;
131b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
132b955f6caSJeff Kirsher 	unsigned int reg_val;
133b955f6caSJeff Kirsher 
134b955f6caSJeff Kirsher 	reg_val = readl(mmio + PHY_ACCESS);
135b955f6caSJeff Kirsher 	while (reg_val & PHY_CMD_ACTIVE)
136b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
137b955f6caSJeff Kirsher 
138b955f6caSJeff Kirsher 	writel(PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
139b955f6caSJeff Kirsher 			   ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
140b955f6caSJeff Kirsher 
141b955f6caSJeff Kirsher 	do {
142b955f6caSJeff Kirsher 		reg_val = readl(mmio + PHY_ACCESS);
143b955f6caSJeff Kirsher 		udelay(30);  /* It takes 30 us to read/write the data */
144b955f6caSJeff Kirsher 	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
145b955f6caSJeff Kirsher 
146b955f6caSJeff Kirsher 	if (reg_val & PHY_RD_ERR)
147b955f6caSJeff Kirsher 		goto err_phy_write;
148b955f6caSJeff Kirsher 
149b955f6caSJeff Kirsher 	return 0;
150b955f6caSJeff Kirsher 
151b955f6caSJeff Kirsher err_phy_write:
152b955f6caSJeff Kirsher 	return -EINVAL;
153b955f6caSJeff Kirsher 
154b955f6caSJeff Kirsher }
15513a4fa43SVarka Bhadram 
15613a4fa43SVarka Bhadram /* This is the mii register read function provided to the mii interface. */
amd8111e_mdio_read(struct net_device * dev,int phy_id,int reg_num)157b955f6caSJeff Kirsher static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
158b955f6caSJeff Kirsher {
159b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
160b955f6caSJeff Kirsher 	unsigned int reg_val;
161b955f6caSJeff Kirsher 
162b955f6caSJeff Kirsher 	amd8111e_read_phy(lp, phy_id, reg_num, &reg_val);
163b955f6caSJeff Kirsher 	return reg_val;
164b955f6caSJeff Kirsher 
165b955f6caSJeff Kirsher }
166b955f6caSJeff Kirsher 
16713a4fa43SVarka Bhadram /* This is the mii register write function provided to the mii interface. */
amd8111e_mdio_write(struct net_device * dev,int phy_id,int reg_num,int val)16846c73eccSVarka Bhadram static void amd8111e_mdio_write(struct net_device *dev,
16946c73eccSVarka Bhadram 				int phy_id, int reg_num, int val)
170b955f6caSJeff Kirsher {
171b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
172b955f6caSJeff Kirsher 
173b955f6caSJeff Kirsher 	amd8111e_write_phy(lp, phy_id, reg_num, val);
174b955f6caSJeff Kirsher }
175b955f6caSJeff Kirsher 
17613a4fa43SVarka Bhadram /* This function will set PHY speed. During initialization sets
17713a4fa43SVarka Bhadram  * the original speed to 100 full
178b955f6caSJeff Kirsher  */
amd8111e_set_ext_phy(struct net_device * dev)179b955f6caSJeff Kirsher static void amd8111e_set_ext_phy(struct net_device *dev)
180b955f6caSJeff Kirsher {
181b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
182b955f6caSJeff Kirsher 	u32 bmcr, advert, tmp;
183b955f6caSJeff Kirsher 
184b955f6caSJeff Kirsher 	/* Determine mii register values to set the speed */
185b955f6caSJeff Kirsher 	advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
186b955f6caSJeff Kirsher 	tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
187b955f6caSJeff Kirsher 	switch (lp->ext_phy_option) {
188b955f6caSJeff Kirsher 	default:
189b955f6caSJeff Kirsher 	case SPEED_AUTONEG: /* advertise all values */
190b955f6caSJeff Kirsher 		tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
191b955f6caSJeff Kirsher 			ADVERTISE_100HALF | ADVERTISE_100FULL);
192b955f6caSJeff Kirsher 		break;
193b955f6caSJeff Kirsher 	case SPEED10_HALF:
194b955f6caSJeff Kirsher 		tmp |= ADVERTISE_10HALF;
195b955f6caSJeff Kirsher 		break;
196b955f6caSJeff Kirsher 	case SPEED10_FULL:
197b955f6caSJeff Kirsher 		tmp |= ADVERTISE_10FULL;
198b955f6caSJeff Kirsher 		break;
199b955f6caSJeff Kirsher 	case SPEED100_HALF:
200b955f6caSJeff Kirsher 		tmp |= ADVERTISE_100HALF;
201b955f6caSJeff Kirsher 		break;
202b955f6caSJeff Kirsher 	case SPEED100_FULL:
203b955f6caSJeff Kirsher 		tmp |= ADVERTISE_100FULL;
204b955f6caSJeff Kirsher 		break;
205b955f6caSJeff Kirsher 	}
206b955f6caSJeff Kirsher 
207b955f6caSJeff Kirsher 	if(advert != tmp)
208b955f6caSJeff Kirsher 		amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
209b955f6caSJeff Kirsher 	/* Restart auto negotiation */
210b955f6caSJeff Kirsher 	bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
211b955f6caSJeff Kirsher 	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
212b955f6caSJeff Kirsher 	amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
213b955f6caSJeff Kirsher 
214b955f6caSJeff Kirsher }
215b955f6caSJeff Kirsher 
21613a4fa43SVarka Bhadram /* This function will unmap skb->data space and will free
21713a4fa43SVarka Bhadram  * all transmit and receive skbuffs.
218b955f6caSJeff Kirsher  */
amd8111e_free_skbs(struct net_device * dev)219b955f6caSJeff Kirsher static int amd8111e_free_skbs(struct net_device *dev)
220b955f6caSJeff Kirsher {
221b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
222b955f6caSJeff Kirsher 	struct sk_buff *rx_skbuff;
223b955f6caSJeff Kirsher 	int i;
224b955f6caSJeff Kirsher 
225b955f6caSJeff Kirsher 	/* Freeing transmit skbs */
226b955f6caSJeff Kirsher 	for (i = 0; i < NUM_TX_BUFFERS; i++) {
227b955f6caSJeff Kirsher 		if (lp->tx_skbuff[i]) {
228428f09c2SChristophe JAILLET 			dma_unmap_single(&lp->pci_dev->dev,
229428f09c2SChristophe JAILLET 					 lp->tx_dma_addr[i],
230428f09c2SChristophe JAILLET 					 lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
231b955f6caSJeff Kirsher 			dev_kfree_skb(lp->tx_skbuff[i]);
232b955f6caSJeff Kirsher 			lp->tx_skbuff[i] = NULL;
233b955f6caSJeff Kirsher 			lp->tx_dma_addr[i] = 0;
234b955f6caSJeff Kirsher 		}
235b955f6caSJeff Kirsher 	}
236b955f6caSJeff Kirsher 	/* Freeing previously allocated receive buffers */
237b955f6caSJeff Kirsher 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
238b955f6caSJeff Kirsher 		rx_skbuff = lp->rx_skbuff[i];
239b0b815a3SGuofeng Yue 		if (rx_skbuff) {
240428f09c2SChristophe JAILLET 			dma_unmap_single(&lp->pci_dev->dev,
241428f09c2SChristophe JAILLET 					 lp->rx_dma_addr[i],
242428f09c2SChristophe JAILLET 					 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
243b955f6caSJeff Kirsher 			dev_kfree_skb(lp->rx_skbuff[i]);
244b955f6caSJeff Kirsher 			lp->rx_skbuff[i] = NULL;
245b955f6caSJeff Kirsher 			lp->rx_dma_addr[i] = 0;
246b955f6caSJeff Kirsher 		}
247b955f6caSJeff Kirsher 	}
248b955f6caSJeff Kirsher 
249b955f6caSJeff Kirsher 	return 0;
250b955f6caSJeff Kirsher }
251b955f6caSJeff Kirsher 
25213a4fa43SVarka Bhadram /* This will set the receive buffer length corresponding
25313a4fa43SVarka Bhadram  * to the mtu size of networkinterface.
254b955f6caSJeff Kirsher  */
amd8111e_set_rx_buff_len(struct net_device * dev)255b955f6caSJeff Kirsher static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
256b955f6caSJeff Kirsher {
257b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
258b955f6caSJeff Kirsher 	unsigned int mtu = dev->mtu;
259b955f6caSJeff Kirsher 
260b955f6caSJeff Kirsher 	if (mtu > ETH_DATA_LEN) {
261b955f6caSJeff Kirsher 		/* MTU + ethernet header + FCS
26213a4fa43SVarka Bhadram 		 * + optional VLAN tag + skb reserve space 2
26313a4fa43SVarka Bhadram 		 */
264b955f6caSJeff Kirsher 		lp->rx_buff_len = mtu + ETH_HLEN + 10;
265b955f6caSJeff Kirsher 		lp->options |= OPTION_JUMBO_ENABLE;
266b955f6caSJeff Kirsher 	} else {
267b955f6caSJeff Kirsher 		lp->rx_buff_len = PKT_BUFF_SZ;
268b955f6caSJeff Kirsher 		lp->options &= ~OPTION_JUMBO_ENABLE;
269b955f6caSJeff Kirsher 	}
270b955f6caSJeff Kirsher }
271b955f6caSJeff Kirsher 
27213a4fa43SVarka Bhadram /* This function will free all the previously allocated buffers,
27313a4fa43SVarka Bhadram  * determine new receive buffer length  and will allocate new receive buffers.
27413a4fa43SVarka Bhadram  * This function also allocates and initializes both the transmitter
27513a4fa43SVarka Bhadram  * and receive hardware descriptors.
276b955f6caSJeff Kirsher  */
amd8111e_init_ring(struct net_device * dev)277b955f6caSJeff Kirsher static int amd8111e_init_ring(struct net_device *dev)
278b955f6caSJeff Kirsher {
279b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
280b955f6caSJeff Kirsher 	int i;
281b955f6caSJeff Kirsher 
282b955f6caSJeff Kirsher 	lp->rx_idx = lp->tx_idx = 0;
283b955f6caSJeff Kirsher 	lp->tx_complete_idx = 0;
284b955f6caSJeff Kirsher 	lp->tx_ring_idx = 0;
285b955f6caSJeff Kirsher 
286b955f6caSJeff Kirsher 
287b955f6caSJeff Kirsher 	if (lp->opened)
288b955f6caSJeff Kirsher 		/* Free previously allocated transmit and receive skbs */
289b955f6caSJeff Kirsher 		amd8111e_free_skbs(dev);
290b955f6caSJeff Kirsher 
291b955f6caSJeff Kirsher 	else {
292b955f6caSJeff Kirsher 		/* allocate the tx and rx descriptors */
293428f09c2SChristophe JAILLET 		lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
294b955f6caSJeff Kirsher 			sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
295428f09c2SChristophe JAILLET 			&lp->tx_ring_dma_addr, GFP_ATOMIC);
296428f09c2SChristophe JAILLET 		if (!lp->tx_ring)
297b955f6caSJeff Kirsher 			goto err_no_mem;
298b955f6caSJeff Kirsher 
299428f09c2SChristophe JAILLET 		lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
300b955f6caSJeff Kirsher 			sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
301428f09c2SChristophe JAILLET 			&lp->rx_ring_dma_addr, GFP_ATOMIC);
302428f09c2SChristophe JAILLET 		if (!lp->rx_ring)
303b955f6caSJeff Kirsher 			goto err_free_tx_ring;
304b955f6caSJeff Kirsher 	}
305428f09c2SChristophe JAILLET 
306b955f6caSJeff Kirsher 	/* Set new receive buff size */
307b955f6caSJeff Kirsher 	amd8111e_set_rx_buff_len(dev);
308b955f6caSJeff Kirsher 
309b955f6caSJeff Kirsher 	/* Allocating receive  skbs */
310b955f6caSJeff Kirsher 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
311b955f6caSJeff Kirsher 
3121d266430SPradeep A Dalvi 		lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
3131d266430SPradeep A Dalvi 		if (!lp->rx_skbuff[i]) {
314b955f6caSJeff Kirsher 			/* Release previos allocated skbs */
315b955f6caSJeff Kirsher 			for (--i; i >= 0; i--)
316b955f6caSJeff Kirsher 				dev_kfree_skb(lp->rx_skbuff[i]);
317b955f6caSJeff Kirsher 			goto err_free_rx_ring;
318b955f6caSJeff Kirsher 		}
319b955f6caSJeff Kirsher 		skb_reserve(lp->rx_skbuff[i], 2);
320b955f6caSJeff Kirsher 	}
321b955f6caSJeff Kirsher         /* Initilaizing receive descriptors */
322b955f6caSJeff Kirsher 	for (i = 0; i < NUM_RX_BUFFERS; i++) {
323428f09c2SChristophe JAILLET 		lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev,
324428f09c2SChristophe JAILLET 						    lp->rx_skbuff[i]->data,
325428f09c2SChristophe JAILLET 						    lp->rx_buff_len - 2,
326428f09c2SChristophe JAILLET 						    DMA_FROM_DEVICE);
327b955f6caSJeff Kirsher 
328b955f6caSJeff Kirsher 		lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
329b955f6caSJeff Kirsher 		lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
330b955f6caSJeff Kirsher 		wmb();
331b955f6caSJeff Kirsher 		lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
332b955f6caSJeff Kirsher 	}
333b955f6caSJeff Kirsher 
334b955f6caSJeff Kirsher 	/* Initializing transmit descriptors */
335b955f6caSJeff Kirsher 	for (i = 0; i < NUM_TX_RING_DR; i++) {
336b955f6caSJeff Kirsher 		lp->tx_ring[i].buff_phy_addr = 0;
337b955f6caSJeff Kirsher 		lp->tx_ring[i].tx_flags = 0;
338b955f6caSJeff Kirsher 		lp->tx_ring[i].buff_count = 0;
339b955f6caSJeff Kirsher 	}
340b955f6caSJeff Kirsher 
341b955f6caSJeff Kirsher 	return 0;
342b955f6caSJeff Kirsher 
343b955f6caSJeff Kirsher err_free_rx_ring:
344b955f6caSJeff Kirsher 
345428f09c2SChristophe JAILLET 	dma_free_coherent(&lp->pci_dev->dev,
346428f09c2SChristophe JAILLET 			  sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
347428f09c2SChristophe JAILLET 			  lp->rx_ring, lp->rx_ring_dma_addr);
348b955f6caSJeff Kirsher 
349b955f6caSJeff Kirsher err_free_tx_ring:
350b955f6caSJeff Kirsher 
351428f09c2SChristophe JAILLET 	dma_free_coherent(&lp->pci_dev->dev,
352428f09c2SChristophe JAILLET 			  sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
353428f09c2SChristophe JAILLET 			  lp->tx_ring, lp->tx_ring_dma_addr);
354b955f6caSJeff Kirsher 
355b955f6caSJeff Kirsher err_no_mem:
356b955f6caSJeff Kirsher 	return -ENOMEM;
357b955f6caSJeff Kirsher }
35813a4fa43SVarka Bhadram 
35913a4fa43SVarka Bhadram /* This function will set the interrupt coalescing according
36013a4fa43SVarka Bhadram  * to the input arguments
36113a4fa43SVarka Bhadram  */
amd8111e_set_coalesce(struct net_device * dev,enum coal_mode cmod)362b955f6caSJeff Kirsher static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
363b955f6caSJeff Kirsher {
364b955f6caSJeff Kirsher 	unsigned int timeout;
365b955f6caSJeff Kirsher 	unsigned int event_count;
366b955f6caSJeff Kirsher 
367b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
368b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
369b955f6caSJeff Kirsher 	struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
370b955f6caSJeff Kirsher 
371b955f6caSJeff Kirsher 
372b955f6caSJeff Kirsher 	switch(cmod)
373b955f6caSJeff Kirsher 	{
374b955f6caSJeff Kirsher 		case RX_INTR_COAL :
375b955f6caSJeff Kirsher 			timeout = coal_conf->rx_timeout;
376b955f6caSJeff Kirsher 			event_count = coal_conf->rx_event_count;
377b955f6caSJeff Kirsher 			if (timeout > MAX_TIMEOUT ||
378b955f6caSJeff Kirsher 			    event_count > MAX_EVENT_COUNT)
379b955f6caSJeff Kirsher 				return -EINVAL;
380b955f6caSJeff Kirsher 
381b955f6caSJeff Kirsher 			timeout = timeout * DELAY_TIMER_CONV;
382b955f6caSJeff Kirsher 			writel(VAL0|STINTEN, mmio+INTEN0);
383ca3fc0aaSYixing Liu 			writel((u32)DLY_INT_A_R0 | (event_count << 16) |
384ca3fc0aaSYixing Liu 				timeout, mmio + DLY_INT_A);
385b955f6caSJeff Kirsher 			break;
386b955f6caSJeff Kirsher 
387b955f6caSJeff Kirsher 		case TX_INTR_COAL:
388b955f6caSJeff Kirsher 			timeout = coal_conf->tx_timeout;
389b955f6caSJeff Kirsher 			event_count = coal_conf->tx_event_count;
390b955f6caSJeff Kirsher 			if (timeout > MAX_TIMEOUT ||
391b955f6caSJeff Kirsher 			    event_count > MAX_EVENT_COUNT)
392b955f6caSJeff Kirsher 				return -EINVAL;
393b955f6caSJeff Kirsher 
394b955f6caSJeff Kirsher 
395b955f6caSJeff Kirsher 			timeout = timeout * DELAY_TIMER_CONV;
396b955f6caSJeff Kirsher 			writel(VAL0 | STINTEN, mmio + INTEN0);
397ca3fc0aaSYixing Liu 			writel((u32)DLY_INT_B_T0 | (event_count << 16) |
398ca3fc0aaSYixing Liu 				timeout, mmio + DLY_INT_B);
399b955f6caSJeff Kirsher 			break;
400b955f6caSJeff Kirsher 
401b955f6caSJeff Kirsher 		case DISABLE_COAL:
402b955f6caSJeff Kirsher 			writel(0, mmio + STVAL);
403b955f6caSJeff Kirsher 			writel(STINTEN, mmio + INTEN0);
404b955f6caSJeff Kirsher 			writel(0, mmio + DLY_INT_B);
405b955f6caSJeff Kirsher 			writel(0, mmio + DLY_INT_A);
406b955f6caSJeff Kirsher 			break;
407b955f6caSJeff Kirsher 		 case ENABLE_COAL:
408b955f6caSJeff Kirsher 		       /* Start the timer */
409b955f6caSJeff Kirsher 			writel((u32)SOFT_TIMER_FREQ, mmio + STVAL); /* 0.5 sec */
410b955f6caSJeff Kirsher 			writel(VAL0 | STINTEN, mmio + INTEN0);
411b955f6caSJeff Kirsher 			break;
412b955f6caSJeff Kirsher 		default:
413b955f6caSJeff Kirsher 			break;
414b955f6caSJeff Kirsher 
415b955f6caSJeff Kirsher    }
416b955f6caSJeff Kirsher 	return 0;
417b955f6caSJeff Kirsher 
418b955f6caSJeff Kirsher }
419b955f6caSJeff Kirsher 
42013a4fa43SVarka Bhadram /* This function initializes the device registers  and starts the device. */
amd8111e_restart(struct net_device * dev)421b955f6caSJeff Kirsher static int amd8111e_restart(struct net_device *dev)
422b955f6caSJeff Kirsher {
423b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
424b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
425b955f6caSJeff Kirsher 	int i, reg_val;
426b955f6caSJeff Kirsher 
427b955f6caSJeff Kirsher 	/* stop the chip */
428b955f6caSJeff Kirsher 	writel(RUN, mmio + CMD0);
429b955f6caSJeff Kirsher 
430b955f6caSJeff Kirsher 	if (amd8111e_init_ring(dev))
431b955f6caSJeff Kirsher 		return -ENOMEM;
432b955f6caSJeff Kirsher 
433b955f6caSJeff Kirsher 	/* enable the port manager and set auto negotiation always */
434b955f6caSJeff Kirsher 	writel((u32)VAL1 | EN_PMGR, mmio + CMD3);
435b955f6caSJeff Kirsher 	writel((u32)XPHYANE | XPHYRST, mmio + CTRL2);
436b955f6caSJeff Kirsher 
437b955f6caSJeff Kirsher 	amd8111e_set_ext_phy(dev);
438b955f6caSJeff Kirsher 
439b955f6caSJeff Kirsher 	/* set control registers */
440b955f6caSJeff Kirsher 	reg_val = readl(mmio + CTRL1);
441b955f6caSJeff Kirsher 	reg_val &= ~XMTSP_MASK;
442b955f6caSJeff Kirsher 	writel(reg_val | XMTSP_128 | CACHE_ALIGN, mmio + CTRL1);
443b955f6caSJeff Kirsher 
444b955f6caSJeff Kirsher 	/* enable interrupt */
445b955f6caSJeff Kirsher 	writel(APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
446b955f6caSJeff Kirsher 		APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
447b955f6caSJeff Kirsher 		SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
448b955f6caSJeff Kirsher 
449b955f6caSJeff Kirsher 	writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
450b955f6caSJeff Kirsher 
451b955f6caSJeff Kirsher 	/* initialize tx and rx ring base addresses */
452b955f6caSJeff Kirsher 	writel((u32)lp->tx_ring_dma_addr, mmio + XMT_RING_BASE_ADDR0);
453b955f6caSJeff Kirsher 	writel((u32)lp->rx_ring_dma_addr, mmio + RCV_RING_BASE_ADDR0);
454b955f6caSJeff Kirsher 
455b955f6caSJeff Kirsher 	writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
456b955f6caSJeff Kirsher 	writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
457b955f6caSJeff Kirsher 
458b955f6caSJeff Kirsher 	/* set default IPG to 96 */
459b955f6caSJeff Kirsher 	writew((u32)DEFAULT_IPG, mmio + IPG);
460b955f6caSJeff Kirsher 	writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
461b955f6caSJeff Kirsher 
462b955f6caSJeff Kirsher 	if (lp->options & OPTION_JUMBO_ENABLE) {
463b955f6caSJeff Kirsher 		writel((u32)VAL2|JUMBO, mmio + CMD3);
464b955f6caSJeff Kirsher 		/* Reset REX_UFLO */
465b955f6caSJeff Kirsher 		writel(REX_UFLO, mmio + CMD2);
466b955f6caSJeff Kirsher 		/* Should not set REX_UFLO for jumbo frames */
467b955f6caSJeff Kirsher 		writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
468b955f6caSJeff Kirsher 	} else {
469b955f6caSJeff Kirsher 		writel(VAL0 | APAD_XMT | REX_RTRY | REX_UFLO, mmio + CMD2);
470b955f6caSJeff Kirsher 		writel((u32)JUMBO, mmio + CMD3);
471b955f6caSJeff Kirsher 	}
472b955f6caSJeff Kirsher 
473b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
474b955f6caSJeff Kirsher 	writel((u32)VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
475b955f6caSJeff Kirsher #endif
476b955f6caSJeff Kirsher 	writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
477b955f6caSJeff Kirsher 
478b955f6caSJeff Kirsher 	/* Setting the MAC address to the device */
479c857ff6eSJoe Perches 	for (i = 0; i < ETH_ALEN; i++)
480b955f6caSJeff Kirsher 		writeb(dev->dev_addr[i], mmio + PADR + i);
481b955f6caSJeff Kirsher 
482b955f6caSJeff Kirsher 	/* Enable interrupt coalesce */
483b955f6caSJeff Kirsher 	if (lp->options & OPTION_INTR_COAL_ENABLE) {
484f7afbaa5SVarka Bhadram 		netdev_info(dev, "Interrupt Coalescing Enabled.\n");
485b955f6caSJeff Kirsher 		amd8111e_set_coalesce(dev, ENABLE_COAL);
486b955f6caSJeff Kirsher 	}
487b955f6caSJeff Kirsher 
488b955f6caSJeff Kirsher 	/* set RUN bit to start the chip */
489b955f6caSJeff Kirsher 	writel(VAL2 | RDMD0, mmio + CMD0);
490b955f6caSJeff Kirsher 	writel(VAL0 | INTREN | RUN, mmio + CMD0);
491b955f6caSJeff Kirsher 
492b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
493b955f6caSJeff Kirsher 	readl(mmio+CMD0);
494b955f6caSJeff Kirsher 	return 0;
495b955f6caSJeff Kirsher }
49613a4fa43SVarka Bhadram 
49713a4fa43SVarka Bhadram /* This function clears necessary the device registers. */
amd8111e_init_hw_default(struct amd8111e_priv * lp)498b955f6caSJeff Kirsher static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
499b955f6caSJeff Kirsher {
500b955f6caSJeff Kirsher 	unsigned int reg_val;
501b955f6caSJeff Kirsher 	unsigned int logic_filter[2] = {0,};
502b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
503b955f6caSJeff Kirsher 
504b955f6caSJeff Kirsher 
505b955f6caSJeff Kirsher 	/* stop the chip */
506b955f6caSJeff Kirsher 	writel(RUN, mmio + CMD0);
507b955f6caSJeff Kirsher 
508b955f6caSJeff Kirsher 	/* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
509b955f6caSJeff Kirsher 	writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
510b955f6caSJeff Kirsher 
511b955f6caSJeff Kirsher 	/* Clear RCV_RING_BASE_ADDR */
512b955f6caSJeff Kirsher 	writel(0, mmio + RCV_RING_BASE_ADDR0);
513b955f6caSJeff Kirsher 
514b955f6caSJeff Kirsher 	/* Clear XMT_RING_BASE_ADDR */
515b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR0);
516b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR1);
517b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR2);
518b955f6caSJeff Kirsher 	writel(0, mmio + XMT_RING_BASE_ADDR3);
519b955f6caSJeff Kirsher 
520b955f6caSJeff Kirsher 	/* Clear CMD0  */
521b955f6caSJeff Kirsher 	writel(CMD0_CLEAR, mmio + CMD0);
522b955f6caSJeff Kirsher 
523b955f6caSJeff Kirsher 	/* Clear CMD2 */
524b955f6caSJeff Kirsher 	writel(CMD2_CLEAR, mmio + CMD2);
525b955f6caSJeff Kirsher 
526b955f6caSJeff Kirsher 	/* Clear CMD7 */
527b955f6caSJeff Kirsher 	writel(CMD7_CLEAR, mmio + CMD7);
528b955f6caSJeff Kirsher 
529b955f6caSJeff Kirsher 	/* Clear DLY_INT_A and DLY_INT_B */
530b955f6caSJeff Kirsher 	writel(0x0, mmio + DLY_INT_A);
531b955f6caSJeff Kirsher 	writel(0x0, mmio + DLY_INT_B);
532b955f6caSJeff Kirsher 
533b955f6caSJeff Kirsher 	/* Clear FLOW_CONTROL */
534b955f6caSJeff Kirsher 	writel(0x0, mmio + FLOW_CONTROL);
535b955f6caSJeff Kirsher 
536b955f6caSJeff Kirsher 	/* Clear INT0  write 1 to clear register */
537b955f6caSJeff Kirsher 	reg_val = readl(mmio + INT0);
538b955f6caSJeff Kirsher 	writel(reg_val, mmio + INT0);
539b955f6caSJeff Kirsher 
540b955f6caSJeff Kirsher 	/* Clear STVAL */
541b955f6caSJeff Kirsher 	writel(0x0, mmio + STVAL);
542b955f6caSJeff Kirsher 
543b955f6caSJeff Kirsher 	/* Clear INTEN0 */
544b955f6caSJeff Kirsher 	writel(INTEN0_CLEAR, mmio + INTEN0);
545b955f6caSJeff Kirsher 
546b955f6caSJeff Kirsher 	/* Clear LADRF */
547b955f6caSJeff Kirsher 	writel(0x0, mmio + LADRF);
548b955f6caSJeff Kirsher 
549b955f6caSJeff Kirsher 	/* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
550b955f6caSJeff Kirsher 	writel(0x80010, mmio + SRAM_SIZE);
551b955f6caSJeff Kirsher 
552b955f6caSJeff Kirsher 	/* Clear RCV_RING0_LEN */
553b955f6caSJeff Kirsher 	writel(0x0, mmio + RCV_RING_LEN0);
554b955f6caSJeff Kirsher 
555b955f6caSJeff Kirsher 	/* Clear XMT_RING0/1/2/3_LEN */
556b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN0);
557b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN1);
558b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN2);
559b955f6caSJeff Kirsher 	writel(0x0, mmio +  XMT_RING_LEN3);
560b955f6caSJeff Kirsher 
561b955f6caSJeff Kirsher 	/* Clear XMT_RING_LIMIT */
562b955f6caSJeff Kirsher 	writel(0x0, mmio + XMT_RING_LIMIT);
563b955f6caSJeff Kirsher 
564b955f6caSJeff Kirsher 	/* Clear MIB */
565b955f6caSJeff Kirsher 	writew(MIB_CLEAR, mmio + MIB_ADDR);
566b955f6caSJeff Kirsher 
567b955f6caSJeff Kirsher 	/* Clear LARF */
568b955f6caSJeff Kirsher 	amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
569b955f6caSJeff Kirsher 
570b955f6caSJeff Kirsher 	/* SRAM_SIZE register */
571b955f6caSJeff Kirsher 	reg_val = readl(mmio + SRAM_SIZE);
572b955f6caSJeff Kirsher 
573b955f6caSJeff Kirsher 	if (lp->options & OPTION_JUMBO_ENABLE)
574b955f6caSJeff Kirsher 		writel(VAL2 | JUMBO, mmio + CMD3);
575b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
576b955f6caSJeff Kirsher 	writel(VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
577b955f6caSJeff Kirsher #endif
578b955f6caSJeff Kirsher 	/* Set default value to CTRL1 Register */
579b955f6caSJeff Kirsher 	writel(CTRL1_DEFAULT, mmio + CTRL1);
580b955f6caSJeff Kirsher 
581b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
582b955f6caSJeff Kirsher 	readl(mmio + CMD2);
583b955f6caSJeff Kirsher 
584b955f6caSJeff Kirsher }
585b955f6caSJeff Kirsher 
58613a4fa43SVarka Bhadram /* This function disables the interrupt and clears all the pending
58713a4fa43SVarka Bhadram  * interrupts in INT0
588b955f6caSJeff Kirsher  */
amd8111e_disable_interrupt(struct amd8111e_priv * lp)589b955f6caSJeff Kirsher static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
590b955f6caSJeff Kirsher {
591b955f6caSJeff Kirsher 	u32 intr0;
592b955f6caSJeff Kirsher 
593b955f6caSJeff Kirsher 	/* Disable interrupt */
594b955f6caSJeff Kirsher 	writel(INTREN, lp->mmio + CMD0);
595b955f6caSJeff Kirsher 
596b955f6caSJeff Kirsher 	/* Clear INT0 */
597b955f6caSJeff Kirsher 	intr0 = readl(lp->mmio + INT0);
598b955f6caSJeff Kirsher 	writel(intr0, lp->mmio + INT0);
599b955f6caSJeff Kirsher 
600b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
601b955f6caSJeff Kirsher 	readl(lp->mmio + INT0);
602b955f6caSJeff Kirsher 
603b955f6caSJeff Kirsher }
604b955f6caSJeff Kirsher 
60513a4fa43SVarka Bhadram /* This function stops the chip. */
amd8111e_stop_chip(struct amd8111e_priv * lp)606b955f6caSJeff Kirsher static void amd8111e_stop_chip(struct amd8111e_priv *lp)
607b955f6caSJeff Kirsher {
608b955f6caSJeff Kirsher 	writel(RUN, lp->mmio + CMD0);
609b955f6caSJeff Kirsher 
610b955f6caSJeff Kirsher 	/* To avoid PCI posting bug */
611b955f6caSJeff Kirsher 	readl(lp->mmio + CMD0);
612b955f6caSJeff Kirsher }
613b955f6caSJeff Kirsher 
61413a4fa43SVarka Bhadram /* This function frees the  transmiter and receiver descriptor rings. */
amd8111e_free_ring(struct amd8111e_priv * lp)615b955f6caSJeff Kirsher static void amd8111e_free_ring(struct amd8111e_priv *lp)
616b955f6caSJeff Kirsher {
617b955f6caSJeff Kirsher 	/* Free transmit and receive descriptor rings */
618b955f6caSJeff Kirsher 	if (lp->rx_ring) {
619428f09c2SChristophe JAILLET 		dma_free_coherent(&lp->pci_dev->dev,
620b955f6caSJeff Kirsher 				  sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
621b955f6caSJeff Kirsher 				  lp->rx_ring, lp->rx_ring_dma_addr);
622b955f6caSJeff Kirsher 		lp->rx_ring = NULL;
623b955f6caSJeff Kirsher 	}
624b955f6caSJeff Kirsher 
625b955f6caSJeff Kirsher 	if (lp->tx_ring) {
626428f09c2SChristophe JAILLET 		dma_free_coherent(&lp->pci_dev->dev,
627b955f6caSJeff Kirsher 				  sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
628b955f6caSJeff Kirsher 				  lp->tx_ring, lp->tx_ring_dma_addr);
629b955f6caSJeff Kirsher 
630b955f6caSJeff Kirsher 		lp->tx_ring = NULL;
631b955f6caSJeff Kirsher 	}
632b955f6caSJeff Kirsher 
633b955f6caSJeff Kirsher }
634b955f6caSJeff Kirsher 
63513a4fa43SVarka Bhadram /* This function will free all the transmit skbs that are actually
63613a4fa43SVarka Bhadram  * transmitted by the device. It will check the ownership of the
63713a4fa43SVarka Bhadram  * skb before freeing the skb.
638b955f6caSJeff Kirsher  */
amd8111e_tx(struct net_device * dev)639b955f6caSJeff Kirsher static int amd8111e_tx(struct net_device *dev)
640b955f6caSJeff Kirsher {
641b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
642129fbeecSColin Ian King 	int tx_index;
643b955f6caSJeff Kirsher 	int status;
644b955f6caSJeff Kirsher 	/* Complete all the transmit packet */
645b955f6caSJeff Kirsher 	while (lp->tx_complete_idx != lp->tx_idx) {
646b955f6caSJeff Kirsher 		tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
647b955f6caSJeff Kirsher 		status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
648b955f6caSJeff Kirsher 
649b955f6caSJeff Kirsher 		if (status & OWN_BIT)
650b955f6caSJeff Kirsher 			break;	/* It still hasn't been Txed */
651b955f6caSJeff Kirsher 
652b955f6caSJeff Kirsher 		lp->tx_ring[tx_index].buff_phy_addr = 0;
653b955f6caSJeff Kirsher 
654b955f6caSJeff Kirsher 		/* We must free the original skb */
655b955f6caSJeff Kirsher 		if (lp->tx_skbuff[tx_index]) {
656428f09c2SChristophe JAILLET 			dma_unmap_single(&lp->pci_dev->dev,
657428f09c2SChristophe JAILLET 					 lp->tx_dma_addr[tx_index],
658b955f6caSJeff Kirsher 					 lp->tx_skbuff[tx_index]->len,
659428f09c2SChristophe JAILLET 					 DMA_TO_DEVICE);
6603afa73ddSYang Wei 			dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
661b955f6caSJeff Kirsher 			lp->tx_skbuff[tx_index] = NULL;
662b955f6caSJeff Kirsher 			lp->tx_dma_addr[tx_index] = 0;
663b955f6caSJeff Kirsher 		}
664b955f6caSJeff Kirsher 		lp->tx_complete_idx++;
665b955f6caSJeff Kirsher 		/*COAL update tx coalescing parameters */
666b955f6caSJeff Kirsher 		lp->coal_conf.tx_packets++;
667b955f6caSJeff Kirsher 		lp->coal_conf.tx_bytes +=
668b955f6caSJeff Kirsher 			le16_to_cpu(lp->tx_ring[tx_index].buff_count);
669b955f6caSJeff Kirsher 
670b955f6caSJeff Kirsher 		if (netif_queue_stopped(dev) &&
671b955f6caSJeff Kirsher 			lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS + 2) {
672b955f6caSJeff Kirsher 			/* The ring is no longer full, clear tbusy. */
673b955f6caSJeff Kirsher 			/* lp->tx_full = 0; */
674b955f6caSJeff Kirsher 			netif_wake_queue(dev);
675b955f6caSJeff Kirsher 		}
676b955f6caSJeff Kirsher 	}
677b955f6caSJeff Kirsher 	return 0;
678b955f6caSJeff Kirsher }
679b955f6caSJeff Kirsher 
680b955f6caSJeff Kirsher /* This function handles the driver receive operation in polling mode */
amd8111e_rx_poll(struct napi_struct * napi,int budget)681b955f6caSJeff Kirsher static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
682b955f6caSJeff Kirsher {
683b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
684b955f6caSJeff Kirsher 	struct net_device *dev = lp->amd8111e_net_dev;
685b955f6caSJeff Kirsher 	int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
686b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
687b955f6caSJeff Kirsher 	struct sk_buff *skb, *new_skb;
688b955f6caSJeff Kirsher 	int min_pkt_len, status;
689b955f6caSJeff Kirsher 	int num_rx_pkt = 0;
690b955f6caSJeff Kirsher 	short pkt_len;
691b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
692b955f6caSJeff Kirsher 	short vtag;
693b955f6caSJeff Kirsher #endif
694b955f6caSJeff Kirsher 
695c46e9907SEric Dumazet 	while (num_rx_pkt < budget) {
696b955f6caSJeff Kirsher 		status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
697b955f6caSJeff Kirsher 		if (status & OWN_BIT)
698b955f6caSJeff Kirsher 			break;
699b955f6caSJeff Kirsher 
70013a4fa43SVarka Bhadram 		/* There is a tricky error noted by John Murphy,
701b955f6caSJeff Kirsher 		 * <murf@perftech.com> to Russ Nelson: Even with
702b955f6caSJeff Kirsher 		 * full-sized * buffers it's possible for a
703b955f6caSJeff Kirsher 		 * jabber packet to use two buffers, with only
704b955f6caSJeff Kirsher 		 * the last correctly noting the error.
705b955f6caSJeff Kirsher 		 */
706b955f6caSJeff Kirsher 		if (status & ERR_BIT) {
707dbedd44eSJoe Perches 			/* resetting flags */
708b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
709b955f6caSJeff Kirsher 			goto err_next_pkt;
710b955f6caSJeff Kirsher 		}
711b955f6caSJeff Kirsher 		/* check for STP and ENP */
712b955f6caSJeff Kirsher 		if (!((status & STP_BIT) && (status & ENP_BIT))) {
713dbedd44eSJoe Perches 			/* resetting flags */
714b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
715b955f6caSJeff Kirsher 			goto err_next_pkt;
716b955f6caSJeff Kirsher 		}
717b955f6caSJeff Kirsher 		pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
718b955f6caSJeff Kirsher 
719b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
720b955f6caSJeff Kirsher 		vtag = status & TT_MASK;
721b955f6caSJeff Kirsher 		/* MAC will strip vlan tag */
722b955f6caSJeff Kirsher 		if (vtag != 0)
723b955f6caSJeff Kirsher 			min_pkt_len = MIN_PKT_LEN - 4;
724b955f6caSJeff Kirsher 			else
725b955f6caSJeff Kirsher #endif
726b955f6caSJeff Kirsher 			min_pkt_len = MIN_PKT_LEN;
727b955f6caSJeff Kirsher 
728b955f6caSJeff Kirsher 		if (pkt_len < min_pkt_len) {
729b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
730b955f6caSJeff Kirsher 			lp->drv_rx_errors++;
731b955f6caSJeff Kirsher 			goto err_next_pkt;
732b955f6caSJeff Kirsher 		}
7331d266430SPradeep A Dalvi 		new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
7341d266430SPradeep A Dalvi 		if (!new_skb) {
735b955f6caSJeff Kirsher 			/* if allocation fail,
73613a4fa43SVarka Bhadram 			 * ignore that pkt and go to next one
73713a4fa43SVarka Bhadram 			 */
738b955f6caSJeff Kirsher 			lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
739b955f6caSJeff Kirsher 			lp->drv_rx_errors++;
740b955f6caSJeff Kirsher 			goto err_next_pkt;
741b955f6caSJeff Kirsher 		}
742b955f6caSJeff Kirsher 
743b955f6caSJeff Kirsher 		skb_reserve(new_skb, 2);
744b955f6caSJeff Kirsher 		skb = lp->rx_skbuff[rx_index];
745428f09c2SChristophe JAILLET 		dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index],
746428f09c2SChristophe JAILLET 				 lp->rx_buff_len - 2, DMA_FROM_DEVICE);
747b955f6caSJeff Kirsher 		skb_put(skb, pkt_len);
748b955f6caSJeff Kirsher 		lp->rx_skbuff[rx_index] = new_skb;
749428f09c2SChristophe JAILLET 		lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev,
750b955f6caSJeff Kirsher 							   new_skb->data,
751b955f6caSJeff Kirsher 							   lp->rx_buff_len - 2,
752428f09c2SChristophe JAILLET 							   DMA_FROM_DEVICE);
753b955f6caSJeff Kirsher 
754b955f6caSJeff Kirsher 		skb->protocol = eth_type_trans(skb, dev);
755b955f6caSJeff Kirsher 
756b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
757b955f6caSJeff Kirsher 		if (vtag == TT_VLAN_TAGGED) {
758b955f6caSJeff Kirsher 			u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
75986a9bad3SPatrick McHardy 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
760b955f6caSJeff Kirsher 		}
761b955f6caSJeff Kirsher #endif
762c46e9907SEric Dumazet 		napi_gro_receive(napi, skb);
763b955f6caSJeff Kirsher 		/* COAL update rx coalescing parameters */
764b955f6caSJeff Kirsher 		lp->coal_conf.rx_packets++;
765b955f6caSJeff Kirsher 		lp->coal_conf.rx_bytes += pkt_len;
766b955f6caSJeff Kirsher 		num_rx_pkt++;
767b955f6caSJeff Kirsher 
768b955f6caSJeff Kirsher err_next_pkt:
769b955f6caSJeff Kirsher 		lp->rx_ring[rx_index].buff_phy_addr
770b955f6caSJeff Kirsher 			= cpu_to_le32(lp->rx_dma_addr[rx_index]);
771b955f6caSJeff Kirsher 		lp->rx_ring[rx_index].buff_count =
772b955f6caSJeff Kirsher 			cpu_to_le16(lp->rx_buff_len-2);
773b955f6caSJeff Kirsher 		wmb();
774b955f6caSJeff Kirsher 		lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
775b955f6caSJeff Kirsher 		rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
776b955f6caSJeff Kirsher 	}
777b955f6caSJeff Kirsher 
778c46e9907SEric Dumazet 	if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
779c46e9907SEric Dumazet 		unsigned long flags;
780b955f6caSJeff Kirsher 
781b955f6caSJeff Kirsher 		/* Receive descriptor is empty now */
782b955f6caSJeff Kirsher 		spin_lock_irqsave(&lp->lock, flags);
783b955f6caSJeff Kirsher 		writel(VAL0|RINTEN0, mmio + INTEN0);
784b955f6caSJeff Kirsher 		writel(VAL2 | RDMD0, mmio + CMD0);
785b955f6caSJeff Kirsher 		spin_unlock_irqrestore(&lp->lock, flags);
786b955f6caSJeff Kirsher 	}
787b955f6caSJeff Kirsher 
788b955f6caSJeff Kirsher 	return num_rx_pkt;
789b955f6caSJeff Kirsher }
790b955f6caSJeff Kirsher 
79113a4fa43SVarka Bhadram /* This function will indicate the link status to the kernel. */
amd8111e_link_change(struct net_device * dev)792b955f6caSJeff Kirsher static int amd8111e_link_change(struct net_device *dev)
793b955f6caSJeff Kirsher {
794b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
795b955f6caSJeff Kirsher 	int status0, speed;
796b955f6caSJeff Kirsher 
797b955f6caSJeff Kirsher 	/* read the link change */
798b955f6caSJeff Kirsher 	status0 = readl(lp->mmio + STAT0);
799b955f6caSJeff Kirsher 
800b955f6caSJeff Kirsher 	if (status0 & LINK_STATS) {
801b955f6caSJeff Kirsher 		if (status0 & AUTONEG_COMPLETE)
802b955f6caSJeff Kirsher 			lp->link_config.autoneg = AUTONEG_ENABLE;
803b955f6caSJeff Kirsher 		else
804b955f6caSJeff Kirsher 			lp->link_config.autoneg = AUTONEG_DISABLE;
805b955f6caSJeff Kirsher 
806b955f6caSJeff Kirsher 		if (status0 & FULL_DPLX)
807b955f6caSJeff Kirsher 			lp->link_config.duplex = DUPLEX_FULL;
808b955f6caSJeff Kirsher 		else
809b955f6caSJeff Kirsher 			lp->link_config.duplex = DUPLEX_HALF;
810b955f6caSJeff Kirsher 		speed = (status0 & SPEED_MASK) >> 7;
811b955f6caSJeff Kirsher 		if (speed == PHY_SPEED_10)
812b955f6caSJeff Kirsher 			lp->link_config.speed = SPEED_10;
813b955f6caSJeff Kirsher 		else if (speed == PHY_SPEED_100)
814b955f6caSJeff Kirsher 			lp->link_config.speed = SPEED_100;
815b955f6caSJeff Kirsher 
816f7afbaa5SVarka Bhadram 		netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
817f7afbaa5SVarka Bhadram 			    (lp->link_config.speed == SPEED_100) ?
818f7afbaa5SVarka Bhadram 							"100" : "10",
819f7afbaa5SVarka Bhadram 			    (lp->link_config.duplex == DUPLEX_FULL) ?
820f7afbaa5SVarka Bhadram 							"Full" : "Half");
821f7afbaa5SVarka Bhadram 
822b955f6caSJeff Kirsher 		netif_carrier_on(dev);
823ca3fc0aaSYixing Liu 	} else {
824b955f6caSJeff Kirsher 		lp->link_config.speed = SPEED_INVALID;
825b955f6caSJeff Kirsher 		lp->link_config.duplex = DUPLEX_INVALID;
826b955f6caSJeff Kirsher 		lp->link_config.autoneg = AUTONEG_INVALID;
827f7afbaa5SVarka Bhadram 		netdev_info(dev, "Link is Down.\n");
828b955f6caSJeff Kirsher 		netif_carrier_off(dev);
829b955f6caSJeff Kirsher 	}
830b955f6caSJeff Kirsher 
831b955f6caSJeff Kirsher 	return 0;
832b955f6caSJeff Kirsher }
83313a4fa43SVarka Bhadram 
83413a4fa43SVarka Bhadram /* This function reads the mib counters. */
amd8111e_read_mib(void __iomem * mmio,u8 MIB_COUNTER)835b955f6caSJeff Kirsher static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
836b955f6caSJeff Kirsher {
837b955f6caSJeff Kirsher 	unsigned int  status;
838b955f6caSJeff Kirsher 	unsigned  int data;
839b955f6caSJeff Kirsher 	unsigned int repeat = REPEAT_CNT;
840b955f6caSJeff Kirsher 
841b955f6caSJeff Kirsher 	writew(MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
842b955f6caSJeff Kirsher 	do {
843b955f6caSJeff Kirsher 		status = readw(mmio + MIB_ADDR);
844b955f6caSJeff Kirsher 		udelay(2);	/* controller takes MAX 2 us to get mib data */
845b955f6caSJeff Kirsher 	}
846b955f6caSJeff Kirsher 	while (--repeat && (status & MIB_CMD_ACTIVE));
847b955f6caSJeff Kirsher 
848b955f6caSJeff Kirsher 	data = readl(mmio + MIB_DATA);
849b955f6caSJeff Kirsher 	return data;
850b955f6caSJeff Kirsher }
851b955f6caSJeff Kirsher 
85213a4fa43SVarka Bhadram /* This function reads the mib registers and returns the hardware statistics.
853b955f6caSJeff Kirsher  * It updates previous internal driver statistics with new values.
854b955f6caSJeff Kirsher  */
amd8111e_get_stats(struct net_device * dev)855b955f6caSJeff Kirsher static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
856b955f6caSJeff Kirsher {
857b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
858b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
859b955f6caSJeff Kirsher 	unsigned long flags;
860b955f6caSJeff Kirsher 	struct net_device_stats *new_stats = &dev->stats;
861b955f6caSJeff Kirsher 
862b955f6caSJeff Kirsher 	if (!lp->opened)
863b955f6caSJeff Kirsher 		return new_stats;
864b955f6caSJeff Kirsher 	spin_lock_irqsave(&lp->lock, flags);
865b955f6caSJeff Kirsher 
866b955f6caSJeff Kirsher 	/* stats.rx_packets */
867b955f6caSJeff Kirsher 	new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
868b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_multicast_pkts)+
869b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_unicast_pkts);
870b955f6caSJeff Kirsher 
871b955f6caSJeff Kirsher 	/* stats.tx_packets */
872b955f6caSJeff Kirsher 	new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
873b955f6caSJeff Kirsher 
874b955f6caSJeff Kirsher 	/*stats.rx_bytes */
875b955f6caSJeff Kirsher 	new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
876b955f6caSJeff Kirsher 
877b955f6caSJeff Kirsher 	/* stats.tx_bytes */
878b955f6caSJeff Kirsher 	new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
879b955f6caSJeff Kirsher 
880b955f6caSJeff Kirsher 	/* stats.rx_errors */
881b955f6caSJeff Kirsher 	/* hw errors + errors driver reported */
882b955f6caSJeff Kirsher 	new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
883b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_fragments)+
884b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_jabbers)+
885b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_alignment_errors)+
886b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_fcs_errors)+
887b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, rcv_miss_pkts)+
888b955f6caSJeff Kirsher 				lp->drv_rx_errors;
889b955f6caSJeff Kirsher 
890b955f6caSJeff Kirsher 	/* stats.tx_errors */
891b955f6caSJeff Kirsher 	new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
892b955f6caSJeff Kirsher 
893b955f6caSJeff Kirsher 	/* stats.rx_dropped*/
894b955f6caSJeff Kirsher 	new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
895b955f6caSJeff Kirsher 
896b955f6caSJeff Kirsher 	/* stats.tx_dropped*/
897b955f6caSJeff Kirsher 	new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
898b955f6caSJeff Kirsher 
899b955f6caSJeff Kirsher 	/* stats.multicast*/
900b955f6caSJeff Kirsher 	new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
901b955f6caSJeff Kirsher 
902b955f6caSJeff Kirsher 	/* stats.collisions*/
903b955f6caSJeff Kirsher 	new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
904b955f6caSJeff Kirsher 
905b955f6caSJeff Kirsher 	/* stats.rx_length_errors*/
906b955f6caSJeff Kirsher 	new_stats->rx_length_errors =
907b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, rcv_undersize_pkts)+
908b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, rcv_oversize_pkts);
909b955f6caSJeff Kirsher 
910b955f6caSJeff Kirsher 	/* stats.rx_over_errors*/
911b955f6caSJeff Kirsher 	new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
912b955f6caSJeff Kirsher 
913b955f6caSJeff Kirsher 	/* stats.rx_crc_errors*/
914b955f6caSJeff Kirsher 	new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
915b955f6caSJeff Kirsher 
916b955f6caSJeff Kirsher 	/* stats.rx_frame_errors*/
917b955f6caSJeff Kirsher 	new_stats->rx_frame_errors =
918b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, rcv_alignment_errors);
919b955f6caSJeff Kirsher 
920b955f6caSJeff Kirsher 	/* stats.rx_fifo_errors */
921b955f6caSJeff Kirsher 	new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
922b955f6caSJeff Kirsher 
923b955f6caSJeff Kirsher 	/* stats.rx_missed_errors */
924b955f6caSJeff Kirsher 	new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
925b955f6caSJeff Kirsher 
926b955f6caSJeff Kirsher 	/* stats.tx_aborted_errors*/
927b955f6caSJeff Kirsher 	new_stats->tx_aborted_errors =
928b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, xmt_excessive_collision);
929b955f6caSJeff Kirsher 
930b955f6caSJeff Kirsher 	/* stats.tx_carrier_errors*/
931b955f6caSJeff Kirsher 	new_stats->tx_carrier_errors =
932b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, xmt_loss_carrier);
933b955f6caSJeff Kirsher 
934b955f6caSJeff Kirsher 	/* stats.tx_fifo_errors*/
935b955f6caSJeff Kirsher 	new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
936b955f6caSJeff Kirsher 
937b955f6caSJeff Kirsher 	/* stats.tx_window_errors*/
938b955f6caSJeff Kirsher 	new_stats->tx_window_errors =
939b955f6caSJeff Kirsher 		amd8111e_read_mib(mmio, xmt_late_collision);
940b955f6caSJeff Kirsher 
941b955f6caSJeff Kirsher 	/* Reset the mibs for collecting new statistics */
942b955f6caSJeff Kirsher 	/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
943b955f6caSJeff Kirsher 
944b955f6caSJeff Kirsher 	spin_unlock_irqrestore(&lp->lock, flags);
945b955f6caSJeff Kirsher 
946b955f6caSJeff Kirsher 	return new_stats;
947b955f6caSJeff Kirsher }
94813a4fa43SVarka Bhadram 
949b955f6caSJeff Kirsher /* This function recalculate the interrupt coalescing  mode on every interrupt
95013a4fa43SVarka Bhadram  * according to the datarate and the packet rate.
951b955f6caSJeff Kirsher  */
amd8111e_calc_coalesce(struct net_device * dev)952b955f6caSJeff Kirsher static int amd8111e_calc_coalesce(struct net_device *dev)
953b955f6caSJeff Kirsher {
954b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
955b955f6caSJeff Kirsher 	struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
956b955f6caSJeff Kirsher 	int tx_pkt_rate;
957b955f6caSJeff Kirsher 	int rx_pkt_rate;
958b955f6caSJeff Kirsher 	int tx_data_rate;
959b955f6caSJeff Kirsher 	int rx_data_rate;
960b955f6caSJeff Kirsher 	int rx_pkt_size;
961b955f6caSJeff Kirsher 	int tx_pkt_size;
962b955f6caSJeff Kirsher 
963b955f6caSJeff Kirsher 	tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
964b955f6caSJeff Kirsher 	coal_conf->tx_prev_packets =  coal_conf->tx_packets;
965b955f6caSJeff Kirsher 
966b955f6caSJeff Kirsher 	tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
967b955f6caSJeff Kirsher 	coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
968b955f6caSJeff Kirsher 
969b955f6caSJeff Kirsher 	rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
970b955f6caSJeff Kirsher 	coal_conf->rx_prev_packets =  coal_conf->rx_packets;
971b955f6caSJeff Kirsher 
972b955f6caSJeff Kirsher 	rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
973b955f6caSJeff Kirsher 	coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
974b955f6caSJeff Kirsher 
975b955f6caSJeff Kirsher 	if (rx_pkt_rate < 800) {
976b955f6caSJeff Kirsher 		if (coal_conf->rx_coal_type != NO_COALESCE) {
977b955f6caSJeff Kirsher 
978b955f6caSJeff Kirsher 			coal_conf->rx_timeout = 0x0;
979b955f6caSJeff Kirsher 			coal_conf->rx_event_count = 0;
980b955f6caSJeff Kirsher 			amd8111e_set_coalesce(dev, RX_INTR_COAL);
981b955f6caSJeff Kirsher 			coal_conf->rx_coal_type = NO_COALESCE;
982b955f6caSJeff Kirsher 		}
983ca3fc0aaSYixing Liu 	} else {
984b955f6caSJeff Kirsher 
985b955f6caSJeff Kirsher 		rx_pkt_size = rx_data_rate/rx_pkt_rate;
986b955f6caSJeff Kirsher 		if (rx_pkt_size < 128) {
987b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type != NO_COALESCE) {
988b955f6caSJeff Kirsher 
989b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 0;
990b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 0;
991b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
992b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = NO_COALESCE;
993b955f6caSJeff Kirsher 			}
994b955f6caSJeff Kirsher 
995ca3fc0aaSYixing Liu 		} else if ((rx_pkt_size >= 128) && (rx_pkt_size < 512)) {
996b955f6caSJeff Kirsher 
997b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type !=  LOW_COALESCE) {
998b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 1;
999b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 4;
1000b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
1001b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = LOW_COALESCE;
1002b955f6caSJeff Kirsher 			}
1003ca3fc0aaSYixing Liu 		} else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)) {
1004b955f6caSJeff Kirsher 
1005b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type != MEDIUM_COALESCE) {
1006b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 1;
1007b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 4;
1008b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
1009b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = MEDIUM_COALESCE;
1010b955f6caSJeff Kirsher 			}
1011b955f6caSJeff Kirsher 
1012ca3fc0aaSYixing Liu 		} else if (rx_pkt_size >= 1024) {
1013ca3fc0aaSYixing Liu 
1014b955f6caSJeff Kirsher 			if (coal_conf->rx_coal_type !=  HIGH_COALESCE) {
1015b955f6caSJeff Kirsher 				coal_conf->rx_timeout = 2;
1016b955f6caSJeff Kirsher 				coal_conf->rx_event_count = 3;
1017b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, RX_INTR_COAL);
1018b955f6caSJeff Kirsher 				coal_conf->rx_coal_type = HIGH_COALESCE;
1019b955f6caSJeff Kirsher 			}
1020b955f6caSJeff Kirsher 		}
1021b955f6caSJeff Kirsher 	}
1022b955f6caSJeff Kirsher 	/* NOW FOR TX INTR COALESC */
1023b955f6caSJeff Kirsher 	if (tx_pkt_rate < 800) {
1024b955f6caSJeff Kirsher 		if (coal_conf->tx_coal_type != NO_COALESCE) {
1025b955f6caSJeff Kirsher 
1026b955f6caSJeff Kirsher 			coal_conf->tx_timeout = 0x0;
1027b955f6caSJeff Kirsher 			coal_conf->tx_event_count = 0;
1028b955f6caSJeff Kirsher 			amd8111e_set_coalesce(dev, TX_INTR_COAL);
1029b955f6caSJeff Kirsher 			coal_conf->tx_coal_type = NO_COALESCE;
1030b955f6caSJeff Kirsher 		}
1031ca3fc0aaSYixing Liu 	} else {
1032b955f6caSJeff Kirsher 
1033b955f6caSJeff Kirsher 		tx_pkt_size = tx_data_rate/tx_pkt_rate;
1034b955f6caSJeff Kirsher 		if (tx_pkt_size < 128) {
1035b955f6caSJeff Kirsher 
1036b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != NO_COALESCE) {
1037b955f6caSJeff Kirsher 
1038b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 0;
1039b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 0;
1040b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1041b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = NO_COALESCE;
1042b955f6caSJeff Kirsher 			}
1043b955f6caSJeff Kirsher 
1044ca3fc0aaSYixing Liu 		} else if ((tx_pkt_size >= 128) && (tx_pkt_size < 512)) {
1045b955f6caSJeff Kirsher 
1046b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != LOW_COALESCE) {
1047b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 1;
1048b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 2;
1049b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1050b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = LOW_COALESCE;
1051b955f6caSJeff Kirsher 
1052b955f6caSJeff Kirsher 			}
1053ca3fc0aaSYixing Liu 		} else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)) {
1054b955f6caSJeff Kirsher 
1055b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != MEDIUM_COALESCE) {
1056b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 2;
1057b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 5;
1058b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1059b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = MEDIUM_COALESCE;
1060b955f6caSJeff Kirsher 			}
10614d7b4483SColin Ian King 		} else if (tx_pkt_size >= 1024) {
1062b955f6caSJeff Kirsher 			if (coal_conf->tx_coal_type != HIGH_COALESCE) {
1063b955f6caSJeff Kirsher 				coal_conf->tx_timeout = 4;
1064b955f6caSJeff Kirsher 				coal_conf->tx_event_count = 8;
1065b955f6caSJeff Kirsher 				amd8111e_set_coalesce(dev, TX_INTR_COAL);
1066b955f6caSJeff Kirsher 				coal_conf->tx_coal_type = HIGH_COALESCE;
1067b955f6caSJeff Kirsher 			}
1068b955f6caSJeff Kirsher 		}
1069b955f6caSJeff Kirsher 	}
1070b955f6caSJeff Kirsher 	return 0;
1071b955f6caSJeff Kirsher 
1072b955f6caSJeff Kirsher }
107313a4fa43SVarka Bhadram 
107413a4fa43SVarka Bhadram /* This is device interrupt function. It handles transmit,
107513a4fa43SVarka Bhadram  * receive,link change and hardware timer interrupts.
1076b955f6caSJeff Kirsher  */
amd8111e_interrupt(int irq,void * dev_id)1077b955f6caSJeff Kirsher static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1078b955f6caSJeff Kirsher {
1079b955f6caSJeff Kirsher 
1080b955f6caSJeff Kirsher 	struct net_device *dev = (struct net_device *)dev_id;
1081b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1082b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
1083b955f6caSJeff Kirsher 	unsigned int intr0, intren0;
1084b955f6caSJeff Kirsher 	unsigned int handled = 1;
1085b955f6caSJeff Kirsher 
1086b0b815a3SGuofeng Yue 	if (unlikely(!dev))
1087b955f6caSJeff Kirsher 		return IRQ_NONE;
1088b955f6caSJeff Kirsher 
1089b955f6caSJeff Kirsher 	spin_lock(&lp->lock);
1090b955f6caSJeff Kirsher 
1091b955f6caSJeff Kirsher 	/* disabling interrupt */
1092b955f6caSJeff Kirsher 	writel(INTREN, mmio + CMD0);
1093b955f6caSJeff Kirsher 
1094b955f6caSJeff Kirsher 	/* Read interrupt status */
1095b955f6caSJeff Kirsher 	intr0 = readl(mmio + INT0);
1096b955f6caSJeff Kirsher 	intren0 = readl(mmio + INTEN0);
1097b955f6caSJeff Kirsher 
1098b955f6caSJeff Kirsher 	/* Process all the INT event until INTR bit is clear. */
1099b955f6caSJeff Kirsher 
1100b955f6caSJeff Kirsher 	if (!(intr0 & INTR)) {
1101b955f6caSJeff Kirsher 		handled = 0;
1102b955f6caSJeff Kirsher 		goto err_no_interrupt;
1103b955f6caSJeff Kirsher 	}
1104b955f6caSJeff Kirsher 
1105b955f6caSJeff Kirsher 	/* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1106b955f6caSJeff Kirsher 	writel(intr0, mmio + INT0);
1107b955f6caSJeff Kirsher 
1108b955f6caSJeff Kirsher 	/* Check if Receive Interrupt has occurred. */
1109b955f6caSJeff Kirsher 	if (intr0 & RINT0) {
1110b955f6caSJeff Kirsher 		if (napi_schedule_prep(&lp->napi)) {
1111*7c13f442SGuofeng Yue 			/* Disable receive interrupts */
1112b955f6caSJeff Kirsher 			writel(RINTEN0, mmio + INTEN0);
1113b955f6caSJeff Kirsher 			/* Schedule a polling routine */
1114b955f6caSJeff Kirsher 			__napi_schedule(&lp->napi);
1115b955f6caSJeff Kirsher 		} else if (intren0 & RINTEN0) {
1116f7afbaa5SVarka Bhadram 			netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
1117b955f6caSJeff Kirsher 			/* Fix by disable receive interrupts */
1118b955f6caSJeff Kirsher 			writel(RINTEN0, mmio + INTEN0);
1119b955f6caSJeff Kirsher 		}
1120b955f6caSJeff Kirsher 	}
1121b955f6caSJeff Kirsher 
1122b955f6caSJeff Kirsher 	/* Check if  Transmit Interrupt has occurred. */
1123b955f6caSJeff Kirsher 	if (intr0 & TINT0)
1124b955f6caSJeff Kirsher 		amd8111e_tx(dev);
1125b955f6caSJeff Kirsher 
1126b955f6caSJeff Kirsher 	/* Check if  Link Change Interrupt has occurred. */
1127b955f6caSJeff Kirsher 	if (intr0 & LCINT)
1128b955f6caSJeff Kirsher 		amd8111e_link_change(dev);
1129b955f6caSJeff Kirsher 
1130b955f6caSJeff Kirsher 	/* Check if Hardware Timer Interrupt has occurred. */
1131b955f6caSJeff Kirsher 	if (intr0 & STINT)
1132b955f6caSJeff Kirsher 		amd8111e_calc_coalesce(dev);
1133b955f6caSJeff Kirsher 
1134b955f6caSJeff Kirsher err_no_interrupt:
1135b955f6caSJeff Kirsher 	writel(VAL0 | INTREN, mmio + CMD0);
1136b955f6caSJeff Kirsher 
1137b955f6caSJeff Kirsher 	spin_unlock(&lp->lock);
1138b955f6caSJeff Kirsher 
1139b955f6caSJeff Kirsher 	return IRQ_RETVAL(handled);
1140b955f6caSJeff Kirsher }
1141b955f6caSJeff Kirsher 
1142b955f6caSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
amd8111e_poll(struct net_device * dev)1143b955f6caSJeff Kirsher static void amd8111e_poll(struct net_device *dev)
1144b955f6caSJeff Kirsher {
1145b955f6caSJeff Kirsher 	unsigned long flags;
1146b955f6caSJeff Kirsher 	local_irq_save(flags);
1147b955f6caSJeff Kirsher 	amd8111e_interrupt(0, dev);
1148b955f6caSJeff Kirsher 	local_irq_restore(flags);
1149b955f6caSJeff Kirsher }
1150b955f6caSJeff Kirsher #endif
1151b955f6caSJeff Kirsher 
1152b955f6caSJeff Kirsher 
115313a4fa43SVarka Bhadram /* This function closes the network interface and updates
115413a4fa43SVarka Bhadram  * the statistics so that most recent statistics will be
115513a4fa43SVarka Bhadram  * available after the interface is down.
1156b955f6caSJeff Kirsher  */
amd8111e_close(struct net_device * dev)1157b955f6caSJeff Kirsher static int amd8111e_close(struct net_device *dev)
1158b955f6caSJeff Kirsher {
1159b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1160b955f6caSJeff Kirsher 	netif_stop_queue(dev);
1161b955f6caSJeff Kirsher 
1162b955f6caSJeff Kirsher 	napi_disable(&lp->napi);
1163b955f6caSJeff Kirsher 
1164b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1165b955f6caSJeff Kirsher 
1166b955f6caSJeff Kirsher 	amd8111e_disable_interrupt(lp);
1167b955f6caSJeff Kirsher 	amd8111e_stop_chip(lp);
1168b955f6caSJeff Kirsher 
1169b955f6caSJeff Kirsher 	/* Free transmit and receive skbs */
1170b955f6caSJeff Kirsher 	amd8111e_free_skbs(lp->amd8111e_net_dev);
1171b955f6caSJeff Kirsher 
1172b955f6caSJeff Kirsher 	netif_carrier_off(lp->amd8111e_net_dev);
1173b955f6caSJeff Kirsher 
1174b955f6caSJeff Kirsher 	/* Delete ipg timer */
1175b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE)
1176b955f6caSJeff Kirsher 		del_timer_sync(&lp->ipg_data.ipg_timer);
1177b955f6caSJeff Kirsher 
1178b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1179b955f6caSJeff Kirsher 	free_irq(dev->irq, dev);
1180b955f6caSJeff Kirsher 	amd8111e_free_ring(lp);
1181b955f6caSJeff Kirsher 
1182b955f6caSJeff Kirsher 	/* Update the statistics before closing */
1183b955f6caSJeff Kirsher 	amd8111e_get_stats(dev);
1184b955f6caSJeff Kirsher 	lp->opened = 0;
1185b955f6caSJeff Kirsher 	return 0;
1186b955f6caSJeff Kirsher }
118713a4fa43SVarka Bhadram 
118813a4fa43SVarka Bhadram /* This function opens new interface.It requests irq for the device,
118913a4fa43SVarka Bhadram  * initializes the device,buffers and descriptors, and starts the device.
1190b955f6caSJeff Kirsher  */
amd8111e_open(struct net_device * dev)1191b955f6caSJeff Kirsher static int amd8111e_open(struct net_device *dev)
1192b955f6caSJeff Kirsher {
1193b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1194b955f6caSJeff Kirsher 
1195ca3fc0aaSYixing Liu 	if (dev->irq == 0 || request_irq(dev->irq, amd8111e_interrupt,
1196ca3fc0aaSYixing Liu 					 IRQF_SHARED, dev->name, dev))
1197b955f6caSJeff Kirsher 		return -EAGAIN;
1198b955f6caSJeff Kirsher 
1199b955f6caSJeff Kirsher 	napi_enable(&lp->napi);
1200b955f6caSJeff Kirsher 
1201b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1202b955f6caSJeff Kirsher 
1203b955f6caSJeff Kirsher 	amd8111e_init_hw_default(lp);
1204b955f6caSJeff Kirsher 
1205b955f6caSJeff Kirsher 	if (amd8111e_restart(dev)) {
1206b955f6caSJeff Kirsher 		spin_unlock_irq(&lp->lock);
1207b955f6caSJeff Kirsher 		napi_disable(&lp->napi);
1208b955f6caSJeff Kirsher 		if (dev->irq)
1209b955f6caSJeff Kirsher 			free_irq(dev->irq, dev);
1210b955f6caSJeff Kirsher 		return -ENOMEM;
1211b955f6caSJeff Kirsher 	}
1212b955f6caSJeff Kirsher 	/* Start ipg timer */
1213b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE) {
1214b955f6caSJeff Kirsher 		add_timer(&lp->ipg_data.ipg_timer);
1215f7afbaa5SVarka Bhadram 		netdev_info(dev, "Dynamic IPG Enabled\n");
1216b955f6caSJeff Kirsher 	}
1217b955f6caSJeff Kirsher 
1218b955f6caSJeff Kirsher 	lp->opened = 1;
1219b955f6caSJeff Kirsher 
1220b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1221b955f6caSJeff Kirsher 
1222b955f6caSJeff Kirsher 	netif_start_queue(dev);
1223b955f6caSJeff Kirsher 
1224b955f6caSJeff Kirsher 	return 0;
1225b955f6caSJeff Kirsher }
122613a4fa43SVarka Bhadram 
122713a4fa43SVarka Bhadram /* This function checks if there is any transmit  descriptors
122813a4fa43SVarka Bhadram  * available to queue more packet.
1229b955f6caSJeff Kirsher  */
amd8111e_tx_queue_avail(struct amd8111e_priv * lp)1230b955f6caSJeff Kirsher static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
1231b955f6caSJeff Kirsher {
1232b955f6caSJeff Kirsher 	int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1233b955f6caSJeff Kirsher 	if (lp->tx_skbuff[tx_index])
1234b955f6caSJeff Kirsher 		return -1;
1235b955f6caSJeff Kirsher 	else
1236b955f6caSJeff Kirsher 		return 0;
1237b955f6caSJeff Kirsher 
1238b955f6caSJeff Kirsher }
1239b955f6caSJeff Kirsher 
124013a4fa43SVarka Bhadram /* This function will queue the transmit packets to the
124113a4fa43SVarka Bhadram  * descriptors and will trigger the send operation. It also
124213a4fa43SVarka Bhadram  * initializes the transmit descriptors with buffer physical address,
124313a4fa43SVarka Bhadram  * byte count, ownership to hardware etc.
124413a4fa43SVarka Bhadram  */
amd8111e_start_xmit(struct sk_buff * skb,struct net_device * dev)1245b955f6caSJeff Kirsher static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1246b955f6caSJeff Kirsher 				       struct net_device *dev)
1247b955f6caSJeff Kirsher {
1248b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1249b955f6caSJeff Kirsher 	int tx_index;
1250b955f6caSJeff Kirsher 	unsigned long flags;
1251b955f6caSJeff Kirsher 
1252b955f6caSJeff Kirsher 	spin_lock_irqsave(&lp->lock, flags);
1253b955f6caSJeff Kirsher 
1254b955f6caSJeff Kirsher 	tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1255b955f6caSJeff Kirsher 
1256b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1257b955f6caSJeff Kirsher 
1258b955f6caSJeff Kirsher 	lp->tx_skbuff[tx_index] = skb;
1259b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].tx_flags = 0;
1260b955f6caSJeff Kirsher 
1261b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
1262df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1263b955f6caSJeff Kirsher 		lp->tx_ring[tx_index].tag_ctrl_cmd |=
1264b955f6caSJeff Kirsher 				cpu_to_le16(TCC_VLAN_INSERT);
1265b955f6caSJeff Kirsher 		lp->tx_ring[tx_index].tag_ctrl_info =
1266df8a39deSJiri Pirko 				cpu_to_le16(skb_vlan_tag_get(skb));
1267b955f6caSJeff Kirsher 
1268b955f6caSJeff Kirsher 	}
1269b955f6caSJeff Kirsher #endif
1270b955f6caSJeff Kirsher 	lp->tx_dma_addr[tx_index] =
1271428f09c2SChristophe JAILLET 	    dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
1272428f09c2SChristophe JAILLET 			   DMA_TO_DEVICE);
1273b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].buff_phy_addr =
1274b955f6caSJeff Kirsher 	    cpu_to_le32(lp->tx_dma_addr[tx_index]);
1275b955f6caSJeff Kirsher 
1276b955f6caSJeff Kirsher 	/*  Set FCS and LTINT bits */
1277b955f6caSJeff Kirsher 	wmb();
1278b955f6caSJeff Kirsher 	lp->tx_ring[tx_index].tx_flags |=
1279b955f6caSJeff Kirsher 	    cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1280b955f6caSJeff Kirsher 
1281b955f6caSJeff Kirsher 	lp->tx_idx++;
1282b955f6caSJeff Kirsher 
1283b955f6caSJeff Kirsher 	/* Trigger an immediate send poll. */
1284b955f6caSJeff Kirsher 	writel(VAL1 | TDMD0, lp->mmio + CMD0);
1285b955f6caSJeff Kirsher 	writel(VAL2 | RDMD0, lp->mmio + CMD0);
1286b955f6caSJeff Kirsher 
1287b955f6caSJeff Kirsher 	if (amd8111e_tx_queue_avail(lp) < 0) {
1288b955f6caSJeff Kirsher 		netif_stop_queue(dev);
1289b955f6caSJeff Kirsher 	}
1290b955f6caSJeff Kirsher 	spin_unlock_irqrestore(&lp->lock, flags);
1291b955f6caSJeff Kirsher 	return NETDEV_TX_OK;
1292b955f6caSJeff Kirsher }
129313a4fa43SVarka Bhadram /* This function returns all the memory mapped registers of the device. */
amd8111e_read_regs(struct amd8111e_priv * lp,u32 * buf)1294b955f6caSJeff Kirsher static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1295b955f6caSJeff Kirsher {
1296b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
1297b955f6caSJeff Kirsher 	/* Read only necessary registers */
1298b955f6caSJeff Kirsher 	buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1299b955f6caSJeff Kirsher 	buf[1] = readl(mmio + XMT_RING_LEN0);
1300b955f6caSJeff Kirsher 	buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1301b955f6caSJeff Kirsher 	buf[3] = readl(mmio + RCV_RING_LEN0);
1302b955f6caSJeff Kirsher 	buf[4] = readl(mmio + CMD0);
1303b955f6caSJeff Kirsher 	buf[5] = readl(mmio + CMD2);
1304b955f6caSJeff Kirsher 	buf[6] = readl(mmio + CMD3);
1305b955f6caSJeff Kirsher 	buf[7] = readl(mmio + CMD7);
1306b955f6caSJeff Kirsher 	buf[8] = readl(mmio + INT0);
1307b955f6caSJeff Kirsher 	buf[9] = readl(mmio + INTEN0);
1308b955f6caSJeff Kirsher 	buf[10] = readl(mmio + LADRF);
1309b955f6caSJeff Kirsher 	buf[11] = readl(mmio + LADRF+4);
1310b955f6caSJeff Kirsher 	buf[12] = readl(mmio + STAT0);
1311b955f6caSJeff Kirsher }
1312b955f6caSJeff Kirsher 
1313b955f6caSJeff Kirsher 
131413a4fa43SVarka Bhadram /* This function sets promiscuos mode, all-multi mode or the multicast address
131513a4fa43SVarka Bhadram  * list to the device.
1316b955f6caSJeff Kirsher  */
amd8111e_set_multicast_list(struct net_device * dev)1317b955f6caSJeff Kirsher static void amd8111e_set_multicast_list(struct net_device *dev)
1318b955f6caSJeff Kirsher {
1319b955f6caSJeff Kirsher 	struct netdev_hw_addr *ha;
1320b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1321b955f6caSJeff Kirsher 	u32 mc_filter[2];
1322b955f6caSJeff Kirsher 	int bit_num;
1323b955f6caSJeff Kirsher 
1324b955f6caSJeff Kirsher 	if (dev->flags & IFF_PROMISC) {
1325b955f6caSJeff Kirsher 		writel(VAL2 | PROM, lp->mmio + CMD2);
1326b955f6caSJeff Kirsher 		return;
1327b955f6caSJeff Kirsher 	}
1328b955f6caSJeff Kirsher 	else
1329b955f6caSJeff Kirsher 		writel(PROM, lp->mmio + CMD2);
1330b955f6caSJeff Kirsher 	if (dev->flags & IFF_ALLMULTI ||
1331b955f6caSJeff Kirsher 	    netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1332b955f6caSJeff Kirsher 		/* get all multicast packet */
1333b955f6caSJeff Kirsher 		mc_filter[1] = mc_filter[0] = 0xffffffff;
1334b955f6caSJeff Kirsher 		lp->options |= OPTION_MULTICAST_ENABLE;
1335b955f6caSJeff Kirsher 		amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1336b955f6caSJeff Kirsher 		return;
1337b955f6caSJeff Kirsher 	}
1338b955f6caSJeff Kirsher 	if (netdev_mc_empty(dev)) {
1339b955f6caSJeff Kirsher 		/* get only own packets */
1340b955f6caSJeff Kirsher 		mc_filter[1] = mc_filter[0] = 0;
1341b955f6caSJeff Kirsher 		lp->options &= ~OPTION_MULTICAST_ENABLE;
1342b955f6caSJeff Kirsher 		amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1343b955f6caSJeff Kirsher 		/* disable promiscuous mode */
1344b955f6caSJeff Kirsher 		writel(PROM, lp->mmio + CMD2);
1345b955f6caSJeff Kirsher 		return;
1346b955f6caSJeff Kirsher 	}
1347b955f6caSJeff Kirsher 	/* load all the multicast addresses in the logic filter */
1348b955f6caSJeff Kirsher 	lp->options |= OPTION_MULTICAST_ENABLE;
1349b955f6caSJeff Kirsher 	mc_filter[1] = mc_filter[0] = 0;
1350b955f6caSJeff Kirsher 	netdev_for_each_mc_addr(ha, dev) {
1351b955f6caSJeff Kirsher 		bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1352b955f6caSJeff Kirsher 		mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1353b955f6caSJeff Kirsher 	}
1354b955f6caSJeff Kirsher 	amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
1355b955f6caSJeff Kirsher 
1356b955f6caSJeff Kirsher 	/* To eliminate PCI posting bug */
1357b955f6caSJeff Kirsher 	readl(lp->mmio + CMD2);
1358b955f6caSJeff Kirsher 
1359b955f6caSJeff Kirsher }
1360b955f6caSJeff Kirsher 
amd8111e_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)136146c73eccSVarka Bhadram static void amd8111e_get_drvinfo(struct net_device *dev,
136246c73eccSVarka Bhadram 				 struct ethtool_drvinfo *info)
1363b955f6caSJeff Kirsher {
1364b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1365b955f6caSJeff Kirsher 	struct pci_dev *pci_dev = lp->pci_dev;
1366f029c781SWolfram Sang 	strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
136723020ab3SRick Jones 	snprintf(info->fw_version, sizeof(info->fw_version),
136823020ab3SRick Jones 		"%u", chip_version);
1369f029c781SWolfram Sang 	strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1370b955f6caSJeff Kirsher }
1371b955f6caSJeff Kirsher 
amd8111e_get_regs_len(struct net_device * dev)1372b955f6caSJeff Kirsher static int amd8111e_get_regs_len(struct net_device *dev)
1373b955f6caSJeff Kirsher {
1374b955f6caSJeff Kirsher 	return AMD8111E_REG_DUMP_LEN;
1375b955f6caSJeff Kirsher }
1376b955f6caSJeff Kirsher 
amd8111e_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)1377b955f6caSJeff Kirsher static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1378b955f6caSJeff Kirsher {
1379b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1380b955f6caSJeff Kirsher 	regs->version = 0;
1381b955f6caSJeff Kirsher 	amd8111e_read_regs(lp, buf);
1382b955f6caSJeff Kirsher }
1383b955f6caSJeff Kirsher 
amd8111e_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)13841435003cSPhilippe Reynes static int amd8111e_get_link_ksettings(struct net_device *dev,
13851435003cSPhilippe Reynes 				       struct ethtool_link_ksettings *cmd)
1386b955f6caSJeff Kirsher {
1387b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1388b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
13891435003cSPhilippe Reynes 	mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
1390b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1391b955f6caSJeff Kirsher 	return 0;
1392b955f6caSJeff Kirsher }
1393b955f6caSJeff Kirsher 
amd8111e_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)13941435003cSPhilippe Reynes static int amd8111e_set_link_ksettings(struct net_device *dev,
13951435003cSPhilippe Reynes 				       const struct ethtool_link_ksettings *cmd)
1396b955f6caSJeff Kirsher {
1397b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1398b955f6caSJeff Kirsher 	int res;
1399b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
14001435003cSPhilippe Reynes 	res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
1401b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1402b955f6caSJeff Kirsher 	return res;
1403b955f6caSJeff Kirsher }
1404b955f6caSJeff Kirsher 
amd8111e_nway_reset(struct net_device * dev)1405b955f6caSJeff Kirsher static int amd8111e_nway_reset(struct net_device *dev)
1406b955f6caSJeff Kirsher {
1407b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1408b955f6caSJeff Kirsher 	return mii_nway_restart(&lp->mii_if);
1409b955f6caSJeff Kirsher }
1410b955f6caSJeff Kirsher 
amd8111e_get_link(struct net_device * dev)1411b955f6caSJeff Kirsher static u32 amd8111e_get_link(struct net_device *dev)
1412b955f6caSJeff Kirsher {
1413b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1414b955f6caSJeff Kirsher 	return mii_link_ok(&lp->mii_if);
1415b955f6caSJeff Kirsher }
1416b955f6caSJeff Kirsher 
amd8111e_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol_info)1417b955f6caSJeff Kirsher static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1418b955f6caSJeff Kirsher {
1419b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1420b955f6caSJeff Kirsher 	wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1421b955f6caSJeff Kirsher 	if (lp->options & OPTION_WOL_ENABLE)
1422b955f6caSJeff Kirsher 		wol_info->wolopts = WAKE_MAGIC;
1423b955f6caSJeff Kirsher }
1424b955f6caSJeff Kirsher 
amd8111e_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol_info)1425b955f6caSJeff Kirsher static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1426b955f6caSJeff Kirsher {
1427b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1428b955f6caSJeff Kirsher 	if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1429b955f6caSJeff Kirsher 		return -EINVAL;
1430b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1431b955f6caSJeff Kirsher 	if (wol_info->wolopts & WAKE_MAGIC)
1432b955f6caSJeff Kirsher 		lp->options |=
1433b955f6caSJeff Kirsher 			(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1434b955f6caSJeff Kirsher 	else if (wol_info->wolopts & WAKE_PHY)
1435b955f6caSJeff Kirsher 		lp->options |=
1436b955f6caSJeff Kirsher 			(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1437b955f6caSJeff Kirsher 	else
1438b955f6caSJeff Kirsher 		lp->options &= ~OPTION_WOL_ENABLE;
1439b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1440b955f6caSJeff Kirsher 	return 0;
1441b955f6caSJeff Kirsher }
1442b955f6caSJeff Kirsher 
1443b955f6caSJeff Kirsher static const struct ethtool_ops ops = {
1444b955f6caSJeff Kirsher 	.get_drvinfo = amd8111e_get_drvinfo,
1445b955f6caSJeff Kirsher 	.get_regs_len = amd8111e_get_regs_len,
1446b955f6caSJeff Kirsher 	.get_regs = amd8111e_get_regs,
1447b955f6caSJeff Kirsher 	.nway_reset = amd8111e_nway_reset,
1448b955f6caSJeff Kirsher 	.get_link = amd8111e_get_link,
1449b955f6caSJeff Kirsher 	.get_wol = amd8111e_get_wol,
1450b955f6caSJeff Kirsher 	.set_wol = amd8111e_set_wol,
14511435003cSPhilippe Reynes 	.get_link_ksettings = amd8111e_get_link_ksettings,
14521435003cSPhilippe Reynes 	.set_link_ksettings = amd8111e_set_link_ksettings,
1453b955f6caSJeff Kirsher };
1454b955f6caSJeff Kirsher 
145513a4fa43SVarka Bhadram /* This function handles all the  ethtool ioctls. It gives driver info,
145613a4fa43SVarka Bhadram  * gets/sets driver speed, gets memory mapped register values, forces
145713a4fa43SVarka Bhadram  * auto negotiation, sets/gets WOL options for ethtool application.
1458b955f6caSJeff Kirsher  */
amd8111e_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1459b955f6caSJeff Kirsher static int amd8111e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1460b955f6caSJeff Kirsher {
1461b955f6caSJeff Kirsher 	struct mii_ioctl_data *data = if_mii(ifr);
1462b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1463b955f6caSJeff Kirsher 	int err;
1464b955f6caSJeff Kirsher 	u32 mii_regval;
1465b955f6caSJeff Kirsher 
1466b955f6caSJeff Kirsher 	switch (cmd) {
1467b955f6caSJeff Kirsher 	case SIOCGMIIPHY:
1468b955f6caSJeff Kirsher 		data->phy_id = lp->ext_phy_addr;
1469b955f6caSJeff Kirsher 
1470df561f66SGustavo A. R. Silva 		fallthrough;
1471b955f6caSJeff Kirsher 	case SIOCGMIIREG:
1472b955f6caSJeff Kirsher 
1473b955f6caSJeff Kirsher 		spin_lock_irq(&lp->lock);
1474b955f6caSJeff Kirsher 		err = amd8111e_read_phy(lp, data->phy_id,
1475b955f6caSJeff Kirsher 			data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1476b955f6caSJeff Kirsher 		spin_unlock_irq(&lp->lock);
1477b955f6caSJeff Kirsher 
1478b955f6caSJeff Kirsher 		data->val_out = mii_regval;
1479b955f6caSJeff Kirsher 		return err;
1480b955f6caSJeff Kirsher 
1481b955f6caSJeff Kirsher 	case SIOCSMIIREG:
1482b955f6caSJeff Kirsher 
1483b955f6caSJeff Kirsher 		spin_lock_irq(&lp->lock);
1484b955f6caSJeff Kirsher 		err = amd8111e_write_phy(lp, data->phy_id,
1485b955f6caSJeff Kirsher 			data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1486b955f6caSJeff Kirsher 		spin_unlock_irq(&lp->lock);
1487b955f6caSJeff Kirsher 
1488b955f6caSJeff Kirsher 		return err;
1489b955f6caSJeff Kirsher 
1490b955f6caSJeff Kirsher 	default:
1491b955f6caSJeff Kirsher 		/* do nothing */
1492b955f6caSJeff Kirsher 		break;
1493b955f6caSJeff Kirsher 	}
1494b955f6caSJeff Kirsher 	return -EOPNOTSUPP;
1495b955f6caSJeff Kirsher }
amd8111e_set_mac_address(struct net_device * dev,void * p)1496b955f6caSJeff Kirsher static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1497b955f6caSJeff Kirsher {
1498b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1499b955f6caSJeff Kirsher 	int i;
1500b955f6caSJeff Kirsher 	struct sockaddr *addr = p;
1501b955f6caSJeff Kirsher 
1502a05e4c0aSJakub Kicinski 	eth_hw_addr_set(dev, addr->sa_data);
1503b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1504b955f6caSJeff Kirsher 	/* Setting the MAC address to the device */
1505c857ff6eSJoe Perches 	for (i = 0; i < ETH_ALEN; i++)
1506b955f6caSJeff Kirsher 		writeb(dev->dev_addr[i], lp->mmio + PADR + i);
1507b955f6caSJeff Kirsher 
1508b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1509b955f6caSJeff Kirsher 
1510b955f6caSJeff Kirsher 	return 0;
1511b955f6caSJeff Kirsher }
1512b955f6caSJeff Kirsher 
151313a4fa43SVarka Bhadram /* This function changes the mtu of the device. It restarts the device  to
151413a4fa43SVarka Bhadram  * initialize the descriptor with new receive buffers.
1515b955f6caSJeff Kirsher  */
amd8111e_change_mtu(struct net_device * dev,int new_mtu)1516b955f6caSJeff Kirsher static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1517b955f6caSJeff Kirsher {
1518b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1519b955f6caSJeff Kirsher 	int err;
1520b955f6caSJeff Kirsher 
1521b955f6caSJeff Kirsher 	if (!netif_running(dev)) {
1522b955f6caSJeff Kirsher 		/* new_mtu will be used
152313a4fa43SVarka Bhadram 		 * when device starts netxt time
152413a4fa43SVarka Bhadram 		 */
1525b955f6caSJeff Kirsher 		dev->mtu = new_mtu;
1526b955f6caSJeff Kirsher 		return 0;
1527b955f6caSJeff Kirsher 	}
1528b955f6caSJeff Kirsher 
1529b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1530b955f6caSJeff Kirsher 
1531b955f6caSJeff Kirsher 	/* stop the chip */
1532b955f6caSJeff Kirsher 	writel(RUN, lp->mmio + CMD0);
1533b955f6caSJeff Kirsher 
1534b955f6caSJeff Kirsher 	dev->mtu = new_mtu;
1535b955f6caSJeff Kirsher 
1536b955f6caSJeff Kirsher 	err = amd8111e_restart(dev);
1537b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1538b955f6caSJeff Kirsher 	if (!err)
1539b955f6caSJeff Kirsher 		netif_start_queue(dev);
1540b955f6caSJeff Kirsher 	return err;
1541b955f6caSJeff Kirsher }
1542b955f6caSJeff Kirsher 
amd8111e_enable_magicpkt(struct amd8111e_priv * lp)1543b955f6caSJeff Kirsher static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
1544b955f6caSJeff Kirsher {
1545b955f6caSJeff Kirsher 	writel(VAL1 | MPPLBA, lp->mmio + CMD3);
1546b955f6caSJeff Kirsher 	writel(VAL0 | MPEN_SW, lp->mmio + CMD7);
1547b955f6caSJeff Kirsher 
1548b955f6caSJeff Kirsher 	/* To eliminate PCI posting bug */
1549b955f6caSJeff Kirsher 	readl(lp->mmio + CMD7);
1550b955f6caSJeff Kirsher 	return 0;
1551b955f6caSJeff Kirsher }
1552b955f6caSJeff Kirsher 
amd8111e_enable_link_change(struct amd8111e_priv * lp)1553b955f6caSJeff Kirsher static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
1554b955f6caSJeff Kirsher {
1555b955f6caSJeff Kirsher 
1556*7c13f442SGuofeng Yue 	/* Adapter is already stopped/suspended/interrupt-disabled */
1557b955f6caSJeff Kirsher 	writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
1558b955f6caSJeff Kirsher 
1559b955f6caSJeff Kirsher 	/* To eliminate PCI posting bug */
1560b955f6caSJeff Kirsher 	readl(lp->mmio + CMD7);
1561b955f6caSJeff Kirsher 	return 0;
1562b955f6caSJeff Kirsher }
1563b955f6caSJeff Kirsher 
156413a4fa43SVarka Bhadram /* This function is called when a packet transmission fails to complete
1565b955f6caSJeff Kirsher  * within a reasonable period, on the assumption that an interrupt have
1566b955f6caSJeff Kirsher  * failed or the interface is locked up. This function will reinitialize
1567b955f6caSJeff Kirsher  * the hardware.
1568b955f6caSJeff Kirsher  */
amd8111e_tx_timeout(struct net_device * dev,unsigned int txqueue)15690290bd29SMichael S. Tsirkin static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
1570b955f6caSJeff Kirsher {
1571b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1572b955f6caSJeff Kirsher 	int err;
1573b955f6caSJeff Kirsher 
1574f7afbaa5SVarka Bhadram 	netdev_err(dev, "transmit timed out, resetting\n");
1575f7afbaa5SVarka Bhadram 
1576b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1577b955f6caSJeff Kirsher 	err = amd8111e_restart(dev);
1578b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1579b955f6caSJeff Kirsher 	if (!err)
1580b955f6caSJeff Kirsher 		netif_wake_queue(dev);
1581b955f6caSJeff Kirsher }
15822caf751fSVaibhav Gupta 
amd8111e_suspend(struct device * dev_d)15830adcd298SNathan Chancellor static int __maybe_unused amd8111e_suspend(struct device *dev_d)
1584b955f6caSJeff Kirsher {
15852caf751fSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
1586b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1587b955f6caSJeff Kirsher 
1588b955f6caSJeff Kirsher 	if (!netif_running(dev))
1589b955f6caSJeff Kirsher 		return 0;
1590b955f6caSJeff Kirsher 
1591b955f6caSJeff Kirsher 	/* disable the interrupt */
1592b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1593b955f6caSJeff Kirsher 	amd8111e_disable_interrupt(lp);
1594b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1595b955f6caSJeff Kirsher 
1596b955f6caSJeff Kirsher 	netif_device_detach(dev);
1597b955f6caSJeff Kirsher 
1598b955f6caSJeff Kirsher 	/* stop chip */
1599b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1600b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE)
1601b955f6caSJeff Kirsher 		del_timer_sync(&lp->ipg_data.ipg_timer);
1602b955f6caSJeff Kirsher 	amd8111e_stop_chip(lp);
1603b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1604b955f6caSJeff Kirsher 
1605b955f6caSJeff Kirsher 	if (lp->options & OPTION_WOL_ENABLE) {
1606b955f6caSJeff Kirsher 		 /* enable wol */
1607b955f6caSJeff Kirsher 		if (lp->options & OPTION_WAKE_MAGIC_ENABLE)
1608b955f6caSJeff Kirsher 			amd8111e_enable_magicpkt(lp);
1609b955f6caSJeff Kirsher 		if (lp->options & OPTION_WAKE_PHY_ENABLE)
1610b955f6caSJeff Kirsher 			amd8111e_enable_link_change(lp);
1611b955f6caSJeff Kirsher 
16122caf751fSVaibhav Gupta 		device_set_wakeup_enable(dev_d, 1);
1613b955f6caSJeff Kirsher 
1614ca3fc0aaSYixing Liu 	} else {
16152caf751fSVaibhav Gupta 		device_set_wakeup_enable(dev_d, 0);
1616b955f6caSJeff Kirsher 	}
1617b955f6caSJeff Kirsher 
1618b955f6caSJeff Kirsher 	return 0;
1619b955f6caSJeff Kirsher }
16202caf751fSVaibhav Gupta 
amd8111e_resume(struct device * dev_d)16210adcd298SNathan Chancellor static int __maybe_unused amd8111e_resume(struct device *dev_d)
1622b955f6caSJeff Kirsher {
16232caf751fSVaibhav Gupta 	struct net_device *dev = dev_get_drvdata(dev_d);
1624b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1625b955f6caSJeff Kirsher 
1626b955f6caSJeff Kirsher 	if (!netif_running(dev))
1627b955f6caSJeff Kirsher 		return 0;
1628b955f6caSJeff Kirsher 
1629b955f6caSJeff Kirsher 	netif_device_attach(dev);
1630b955f6caSJeff Kirsher 
1631b955f6caSJeff Kirsher 	spin_lock_irq(&lp->lock);
1632b955f6caSJeff Kirsher 	amd8111e_restart(dev);
1633b955f6caSJeff Kirsher 	/* Restart ipg timer */
1634b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE)
1635b955f6caSJeff Kirsher 		mod_timer(&lp->ipg_data.ipg_timer,
1636b955f6caSJeff Kirsher 				jiffies + IPG_CONVERGE_JIFFIES);
1637b955f6caSJeff Kirsher 	spin_unlock_irq(&lp->lock);
1638b955f6caSJeff Kirsher 
1639b955f6caSJeff Kirsher 	return 0;
1640b955f6caSJeff Kirsher }
1641b955f6caSJeff Kirsher 
amd8111e_config_ipg(struct timer_list * t)1642495ad986SKees Cook static void amd8111e_config_ipg(struct timer_list *t)
1643b955f6caSJeff Kirsher {
1644495ad986SKees Cook 	struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
1645b955f6caSJeff Kirsher 	struct ipg_info *ipg_data = &lp->ipg_data;
1646b955f6caSJeff Kirsher 	void __iomem *mmio = lp->mmio;
1647b955f6caSJeff Kirsher 	unsigned int prev_col_cnt = ipg_data->col_cnt;
1648b955f6caSJeff Kirsher 	unsigned int total_col_cnt;
1649b955f6caSJeff Kirsher 	unsigned int tmp_ipg;
1650b955f6caSJeff Kirsher 
1651b955f6caSJeff Kirsher 	if (lp->link_config.duplex == DUPLEX_FULL) {
1652b955f6caSJeff Kirsher 		ipg_data->ipg = DEFAULT_IPG;
1653b955f6caSJeff Kirsher 		return;
1654b955f6caSJeff Kirsher 	}
1655b955f6caSJeff Kirsher 
1656b955f6caSJeff Kirsher 	if (ipg_data->ipg_state == SSTATE) {
1657b955f6caSJeff Kirsher 
1658b955f6caSJeff Kirsher 		if (ipg_data->timer_tick == IPG_STABLE_TIME) {
1659b955f6caSJeff Kirsher 
1660b955f6caSJeff Kirsher 			ipg_data->timer_tick = 0;
1661b955f6caSJeff Kirsher 			ipg_data->ipg = MIN_IPG - IPG_STEP;
1662b955f6caSJeff Kirsher 			ipg_data->current_ipg = MIN_IPG;
1663b955f6caSJeff Kirsher 			ipg_data->diff_col_cnt = 0xFFFFFFFF;
1664b955f6caSJeff Kirsher 			ipg_data->ipg_state = CSTATE;
1665b955f6caSJeff Kirsher 		}
1666b955f6caSJeff Kirsher 		else
1667b955f6caSJeff Kirsher 			ipg_data->timer_tick++;
1668b955f6caSJeff Kirsher 	}
1669b955f6caSJeff Kirsher 
1670b955f6caSJeff Kirsher 	if (ipg_data->ipg_state == CSTATE) {
1671b955f6caSJeff Kirsher 
1672b955f6caSJeff Kirsher 		/* Get the current collision count */
1673b955f6caSJeff Kirsher 
1674b955f6caSJeff Kirsher 		total_col_cnt = ipg_data->col_cnt =
1675b955f6caSJeff Kirsher 				amd8111e_read_mib(mmio, xmt_collisions);
1676b955f6caSJeff Kirsher 
1677b955f6caSJeff Kirsher 		if ((total_col_cnt - prev_col_cnt) <
1678b955f6caSJeff Kirsher 				(ipg_data->diff_col_cnt)) {
1679b955f6caSJeff Kirsher 
1680b955f6caSJeff Kirsher 			ipg_data->diff_col_cnt =
1681b955f6caSJeff Kirsher 				total_col_cnt - prev_col_cnt;
1682b955f6caSJeff Kirsher 
1683b955f6caSJeff Kirsher 			ipg_data->ipg = ipg_data->current_ipg;
1684b955f6caSJeff Kirsher 		}
1685b955f6caSJeff Kirsher 
1686b955f6caSJeff Kirsher 		ipg_data->current_ipg += IPG_STEP;
1687b955f6caSJeff Kirsher 
1688b955f6caSJeff Kirsher 		if (ipg_data->current_ipg <= MAX_IPG)
1689b955f6caSJeff Kirsher 			tmp_ipg = ipg_data->current_ipg;
1690b955f6caSJeff Kirsher 		else {
1691b955f6caSJeff Kirsher 			tmp_ipg = ipg_data->ipg;
1692b955f6caSJeff Kirsher 			ipg_data->ipg_state = SSTATE;
1693b955f6caSJeff Kirsher 		}
1694b955f6caSJeff Kirsher 		writew((u32)tmp_ipg, mmio + IPG);
1695b955f6caSJeff Kirsher 		writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1696b955f6caSJeff Kirsher 	}
1697b955f6caSJeff Kirsher 	mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1698b955f6caSJeff Kirsher 	return;
1699b955f6caSJeff Kirsher 
1700b955f6caSJeff Kirsher }
1701b955f6caSJeff Kirsher 
amd8111e_probe_ext_phy(struct net_device * dev)17020cb0568dSBill Pemberton static void amd8111e_probe_ext_phy(struct net_device *dev)
1703b955f6caSJeff Kirsher {
1704b955f6caSJeff Kirsher 	struct amd8111e_priv *lp = netdev_priv(dev);
1705b955f6caSJeff Kirsher 	int i;
1706b955f6caSJeff Kirsher 
1707b955f6caSJeff Kirsher 	for (i = 0x1e; i >= 0; i--) {
1708b955f6caSJeff Kirsher 		u32 id1, id2;
1709b955f6caSJeff Kirsher 
1710b955f6caSJeff Kirsher 		if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1711b955f6caSJeff Kirsher 			continue;
1712b955f6caSJeff Kirsher 		if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1713b955f6caSJeff Kirsher 			continue;
1714b955f6caSJeff Kirsher 		lp->ext_phy_id = (id1 << 16) | id2;
1715b955f6caSJeff Kirsher 		lp->ext_phy_addr = i;
1716b955f6caSJeff Kirsher 		return;
1717b955f6caSJeff Kirsher 	}
1718b955f6caSJeff Kirsher 	lp->ext_phy_id = 0;
1719b955f6caSJeff Kirsher 	lp->ext_phy_addr = 1;
1720b955f6caSJeff Kirsher }
1721b955f6caSJeff Kirsher 
1722b955f6caSJeff Kirsher static const struct net_device_ops amd8111e_netdev_ops = {
1723b955f6caSJeff Kirsher 	.ndo_open		= amd8111e_open,
1724b955f6caSJeff Kirsher 	.ndo_stop		= amd8111e_close,
1725b955f6caSJeff Kirsher 	.ndo_start_xmit		= amd8111e_start_xmit,
1726b955f6caSJeff Kirsher 	.ndo_tx_timeout		= amd8111e_tx_timeout,
1727b955f6caSJeff Kirsher 	.ndo_get_stats		= amd8111e_get_stats,
1728afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= amd8111e_set_multicast_list,
1729b955f6caSJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
1730b955f6caSJeff Kirsher 	.ndo_set_mac_address	= amd8111e_set_mac_address,
1731a7605370SArnd Bergmann 	.ndo_eth_ioctl		= amd8111e_ioctl,
1732b955f6caSJeff Kirsher 	.ndo_change_mtu		= amd8111e_change_mtu,
1733b955f6caSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
1734b955f6caSJeff Kirsher 	.ndo_poll_controller	 = amd8111e_poll,
1735b955f6caSJeff Kirsher #endif
1736b955f6caSJeff Kirsher };
1737b955f6caSJeff Kirsher 
amd8111e_probe_one(struct pci_dev * pdev,const struct pci_device_id * ent)17380cb0568dSBill Pemberton static int amd8111e_probe_one(struct pci_dev *pdev,
1739b955f6caSJeff Kirsher 				  const struct pci_device_id *ent)
1740b955f6caSJeff Kirsher {
1741f9c7da5eSYijing Wang 	int err, i;
1742b955f6caSJeff Kirsher 	unsigned long reg_addr, reg_len;
1743b955f6caSJeff Kirsher 	struct amd8111e_priv *lp;
1744b955f6caSJeff Kirsher 	struct net_device *dev;
1745f98c5050SJakub Kicinski 	u8 addr[ETH_ALEN];
1746b955f6caSJeff Kirsher 
1747b955f6caSJeff Kirsher 	err = pci_enable_device(pdev);
1748b955f6caSJeff Kirsher 	if (err) {
1749f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
1750b955f6caSJeff Kirsher 		return err;
1751b955f6caSJeff Kirsher 	}
1752b955f6caSJeff Kirsher 
1753b955f6caSJeff Kirsher 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1754f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot find PCI base address\n");
1755b955f6caSJeff Kirsher 		err = -ENODEV;
1756b955f6caSJeff Kirsher 		goto err_disable_pdev;
1757b955f6caSJeff Kirsher 	}
1758b955f6caSJeff Kirsher 
1759b955f6caSJeff Kirsher 	err = pci_request_regions(pdev, MODULE_NAME);
1760b955f6caSJeff Kirsher 	if (err) {
1761f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
1762b955f6caSJeff Kirsher 		goto err_disable_pdev;
1763b955f6caSJeff Kirsher 	}
1764b955f6caSJeff Kirsher 
1765b955f6caSJeff Kirsher 	pci_set_master(pdev);
1766b955f6caSJeff Kirsher 
1767b955f6caSJeff Kirsher 	/* Find power-management capability. */
1768f9c7da5eSYijing Wang 	if (!pdev->pm_cap) {
1769f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "No Power Management capability\n");
177086e506e3SPeter Senna Tschudin 		err = -ENODEV;
1771b955f6caSJeff Kirsher 		goto err_free_reg;
1772b955f6caSJeff Kirsher 	}
1773b955f6caSJeff Kirsher 
1774b955f6caSJeff Kirsher 	/* Initialize DMA */
1775428f09c2SChristophe JAILLET 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) {
1776f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "DMA not supported\n");
177786e506e3SPeter Senna Tschudin 		err = -ENODEV;
1778b955f6caSJeff Kirsher 		goto err_free_reg;
1779b955f6caSJeff Kirsher 	}
1780b955f6caSJeff Kirsher 
1781b955f6caSJeff Kirsher 	reg_addr = pci_resource_start(pdev, 0);
1782b955f6caSJeff Kirsher 	reg_len = pci_resource_len(pdev, 0);
1783b955f6caSJeff Kirsher 
1784b955f6caSJeff Kirsher 	dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1785b955f6caSJeff Kirsher 	if (!dev) {
1786b955f6caSJeff Kirsher 		err = -ENOMEM;
1787b955f6caSJeff Kirsher 		goto err_free_reg;
1788b955f6caSJeff Kirsher 	}
1789b955f6caSJeff Kirsher 
1790b955f6caSJeff Kirsher 	SET_NETDEV_DEV(dev, &pdev->dev);
1791b955f6caSJeff Kirsher 
1792b955f6caSJeff Kirsher #if AMD8111E_VLAN_TAG_USED
1793f646968fSPatrick McHardy 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1794b955f6caSJeff Kirsher #endif
1795b955f6caSJeff Kirsher 
1796b955f6caSJeff Kirsher 	lp = netdev_priv(dev);
1797b955f6caSJeff Kirsher 	lp->pci_dev = pdev;
1798b955f6caSJeff Kirsher 	lp->amd8111e_net_dev = dev;
1799f9c7da5eSYijing Wang 	lp->pm_cap = pdev->pm_cap;
1800b955f6caSJeff Kirsher 
1801b955f6caSJeff Kirsher 	spin_lock_init(&lp->lock);
1802b955f6caSJeff Kirsher 
1803711fec5dSVarka Bhadram 	lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
1804b955f6caSJeff Kirsher 	if (!lp->mmio) {
1805f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot map device registers\n");
1806b955f6caSJeff Kirsher 		err = -ENOMEM;
1807b955f6caSJeff Kirsher 		goto err_free_dev;
1808b955f6caSJeff Kirsher 	}
1809b955f6caSJeff Kirsher 
1810b955f6caSJeff Kirsher 	/* Initializing MAC address */
1811c857ff6eSJoe Perches 	for (i = 0; i < ETH_ALEN; i++)
1812f98c5050SJakub Kicinski 		addr[i] = readb(lp->mmio + PADR + i);
1813f98c5050SJakub Kicinski 	eth_hw_addr_set(dev, addr);
1814b955f6caSJeff Kirsher 
1815b955f6caSJeff Kirsher 	/* Setting user defined parametrs */
1816b955f6caSJeff Kirsher 	lp->ext_phy_option = speed_duplex[card_idx];
1817b955f6caSJeff Kirsher 	if (coalesce[card_idx])
1818b955f6caSJeff Kirsher 		lp->options |= OPTION_INTR_COAL_ENABLE;
1819b955f6caSJeff Kirsher 	if (dynamic_ipg[card_idx++])
1820b955f6caSJeff Kirsher 		lp->options |= OPTION_DYN_IPG_ENABLE;
1821b955f6caSJeff Kirsher 
1822b955f6caSJeff Kirsher 
1823b955f6caSJeff Kirsher 	/* Initialize driver entry points */
1824b955f6caSJeff Kirsher 	dev->netdev_ops = &amd8111e_netdev_ops;
18257ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &ops;
1826b955f6caSJeff Kirsher 	dev->irq = pdev->irq;
1827b955f6caSJeff Kirsher 	dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
182844770e11SJarod Wilson 	dev->min_mtu = AMD8111E_MIN_MTU;
182944770e11SJarod Wilson 	dev->max_mtu = AMD8111E_MAX_MTU;
1830b707b89fSJakub Kicinski 	netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
1831b955f6caSJeff Kirsher 
1832b955f6caSJeff Kirsher 	/* Probe the external PHY */
1833b955f6caSJeff Kirsher 	amd8111e_probe_ext_phy(dev);
1834b955f6caSJeff Kirsher 
1835b955f6caSJeff Kirsher 	/* setting mii default values */
1836b955f6caSJeff Kirsher 	lp->mii_if.dev = dev;
1837b955f6caSJeff Kirsher 	lp->mii_if.mdio_read = amd8111e_mdio_read;
1838b955f6caSJeff Kirsher 	lp->mii_if.mdio_write = amd8111e_mdio_write;
1839b955f6caSJeff Kirsher 	lp->mii_if.phy_id = lp->ext_phy_addr;
1840b955f6caSJeff Kirsher 
1841b955f6caSJeff Kirsher 	/* Set receive buffer length and set jumbo option*/
1842b955f6caSJeff Kirsher 	amd8111e_set_rx_buff_len(dev);
1843b955f6caSJeff Kirsher 
1844b955f6caSJeff Kirsher 
1845b955f6caSJeff Kirsher 	err = register_netdev(dev);
1846b955f6caSJeff Kirsher 	if (err) {
1847f7afbaa5SVarka Bhadram 		dev_err(&pdev->dev, "Cannot register net device\n");
1848711fec5dSVarka Bhadram 		goto err_free_dev;
1849b955f6caSJeff Kirsher 	}
1850b955f6caSJeff Kirsher 
1851b955f6caSJeff Kirsher 	pci_set_drvdata(pdev, dev);
1852b955f6caSJeff Kirsher 
1853b955f6caSJeff Kirsher 	/* Initialize software ipg timer */
1854b955f6caSJeff Kirsher 	if (lp->options & OPTION_DYN_IPG_ENABLE) {
1855495ad986SKees Cook 		timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
1856b955f6caSJeff Kirsher 		lp->ipg_data.ipg_timer.expires = jiffies +
1857b955f6caSJeff Kirsher 						 IPG_CONVERGE_JIFFIES;
1858b955f6caSJeff Kirsher 		lp->ipg_data.ipg = DEFAULT_IPG;
1859b955f6caSJeff Kirsher 		lp->ipg_data.ipg_state = CSTATE;
1860b955f6caSJeff Kirsher 	}
1861b955f6caSJeff Kirsher 
1862b955f6caSJeff Kirsher 	/*  display driver and device information */
1863b955f6caSJeff Kirsher 	chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000) >> 28;
1864f7afbaa5SVarka Bhadram 	dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1865f7afbaa5SVarka Bhadram 		 chip_version, dev->dev_addr);
1866b955f6caSJeff Kirsher 	if (lp->ext_phy_id)
1867f7afbaa5SVarka Bhadram 		dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
1868f7afbaa5SVarka Bhadram 			 lp->ext_phy_id, lp->ext_phy_addr);
1869b955f6caSJeff Kirsher 	else
1870f7afbaa5SVarka Bhadram 		dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
1871f7afbaa5SVarka Bhadram 
1872b955f6caSJeff Kirsher 	return 0;
1873b955f6caSJeff Kirsher 
1874b955f6caSJeff Kirsher err_free_dev:
1875b955f6caSJeff Kirsher 	free_netdev(dev);
1876b955f6caSJeff Kirsher 
1877b955f6caSJeff Kirsher err_free_reg:
1878b955f6caSJeff Kirsher 	pci_release_regions(pdev);
1879b955f6caSJeff Kirsher 
1880b955f6caSJeff Kirsher err_disable_pdev:
1881b955f6caSJeff Kirsher 	pci_disable_device(pdev);
1882b955f6caSJeff Kirsher 	return err;
1883b955f6caSJeff Kirsher 
1884b955f6caSJeff Kirsher }
1885b955f6caSJeff Kirsher 
amd8111e_remove_one(struct pci_dev * pdev)188643519e60SVarka Bhadram static void amd8111e_remove_one(struct pci_dev *pdev)
188743519e60SVarka Bhadram {
188843519e60SVarka Bhadram 	struct net_device *dev = pci_get_drvdata(pdev);
188943519e60SVarka Bhadram 
189043519e60SVarka Bhadram 	if (dev) {
189143519e60SVarka Bhadram 		unregister_netdev(dev);
189243519e60SVarka Bhadram 		free_netdev(dev);
189343519e60SVarka Bhadram 		pci_release_regions(pdev);
189443519e60SVarka Bhadram 		pci_disable_device(pdev);
189543519e60SVarka Bhadram 	}
189643519e60SVarka Bhadram }
189743519e60SVarka Bhadram 
1898ba69a3d7SVarka Bhadram static const struct pci_device_id amd8111e_pci_tbl[] = {
1899ba69a3d7SVarka Bhadram 	{
1900ba69a3d7SVarka Bhadram 	 .vendor = PCI_VENDOR_ID_AMD,
1901ba69a3d7SVarka Bhadram 	 .device = PCI_DEVICE_ID_AMD8111E_7462,
1902ba69a3d7SVarka Bhadram 	},
1903ba69a3d7SVarka Bhadram 	{
1904ba69a3d7SVarka Bhadram 	 .vendor = 0,
1905ba69a3d7SVarka Bhadram 	}
1906ba69a3d7SVarka Bhadram };
1907ba69a3d7SVarka Bhadram MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
1908ba69a3d7SVarka Bhadram 
19092caf751fSVaibhav Gupta static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
19102caf751fSVaibhav Gupta 
1911b955f6caSJeff Kirsher static struct pci_driver amd8111e_driver = {
1912b955f6caSJeff Kirsher 	.name		= MODULE_NAME,
1913b955f6caSJeff Kirsher 	.id_table	= amd8111e_pci_tbl,
1914b955f6caSJeff Kirsher 	.probe		= amd8111e_probe_one,
19150cb0568dSBill Pemberton 	.remove		= amd8111e_remove_one,
19162caf751fSVaibhav Gupta 	.driver.pm	= &amd8111e_pm_ops
1917b955f6caSJeff Kirsher };
1918b955f6caSJeff Kirsher 
1919a46e6ccdSPeter Hüwe module_pci_driver(amd8111e_driver);
1920