xref: /openbmc/linux/drivers/net/ethernet/intel/e1000/e1000_main.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
251dce24bSJeff Kirsher /* Copyright(c) 1999 - 2006 Intel Corporation. */
3dee1ad47SJeff Kirsher 
4dee1ad47SJeff Kirsher #include "e1000.h"
5dee1ad47SJeff Kirsher #include <net/ip6_checksum.h>
6dee1ad47SJeff Kirsher #include <linux/io.h>
7dee1ad47SJeff Kirsher #include <linux/prefetch.h>
8dee1ad47SJeff Kirsher #include <linux/bitops.h>
9dee1ad47SJeff Kirsher #include <linux/if_vlan.h>
10dee1ad47SJeff Kirsher 
11dee1ad47SJeff Kirsher char e1000_driver_name[] = "e1000";
12dee1ad47SJeff Kirsher static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13dee1ad47SJeff Kirsher static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
14dee1ad47SJeff Kirsher 
15dee1ad47SJeff Kirsher /* e1000_pci_tbl - PCI Device ID Table
16dee1ad47SJeff Kirsher  *
17dee1ad47SJeff Kirsher  * Last entry must be all 0s
18dee1ad47SJeff Kirsher  *
19dee1ad47SJeff Kirsher  * Macro expands to...
20dee1ad47SJeff Kirsher  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
21dee1ad47SJeff Kirsher  */
229baa3c34SBenoit Taine static const struct pci_device_id e1000_pci_tbl[] = {
23dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
24dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
25dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
26dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
27dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
28dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
29dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
30dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
31dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
32dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
33dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
34dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
35dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
36dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
37dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
38dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
39dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
40dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
41dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
42dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
43dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
44dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
45dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
46dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
47dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
48dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
49dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
50dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
51dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
52dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
53dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
54dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
55dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
56dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
57dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
58dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
59dee1ad47SJeff Kirsher 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
60dee1ad47SJeff Kirsher 	/* required last entry */
61dee1ad47SJeff Kirsher 	{0,}
62dee1ad47SJeff Kirsher };
63dee1ad47SJeff Kirsher 
64dee1ad47SJeff Kirsher MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
65dee1ad47SJeff Kirsher 
66dee1ad47SJeff Kirsher int e1000_up(struct e1000_adapter *adapter);
67dee1ad47SJeff Kirsher void e1000_down(struct e1000_adapter *adapter);
68dee1ad47SJeff Kirsher void e1000_reinit_locked(struct e1000_adapter *adapter);
69dee1ad47SJeff Kirsher void e1000_reset(struct e1000_adapter *adapter);
70dee1ad47SJeff Kirsher int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
71dee1ad47SJeff Kirsher int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
72dee1ad47SJeff Kirsher void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
73dee1ad47SJeff Kirsher void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
74dee1ad47SJeff Kirsher static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
75dee1ad47SJeff Kirsher 				    struct e1000_tx_ring *txdr);
76dee1ad47SJeff Kirsher static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
77dee1ad47SJeff Kirsher 				    struct e1000_rx_ring *rxdr);
78dee1ad47SJeff Kirsher static void e1000_free_tx_resources(struct e1000_adapter *adapter,
79dee1ad47SJeff Kirsher 				    struct e1000_tx_ring *tx_ring);
80dee1ad47SJeff Kirsher static void e1000_free_rx_resources(struct e1000_adapter *adapter,
81dee1ad47SJeff Kirsher 				    struct e1000_rx_ring *rx_ring);
82dee1ad47SJeff Kirsher void e1000_update_stats(struct e1000_adapter *adapter);
83dee1ad47SJeff Kirsher 
84dee1ad47SJeff Kirsher static int e1000_init_module(void);
85dee1ad47SJeff Kirsher static void e1000_exit_module(void);
86dee1ad47SJeff Kirsher static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
879f9a12f8SBill Pemberton static void e1000_remove(struct pci_dev *pdev);
88dee1ad47SJeff Kirsher static int e1000_alloc_queues(struct e1000_adapter *adapter);
89dee1ad47SJeff Kirsher static int e1000_sw_init(struct e1000_adapter *adapter);
901f2f83f8SStefan Assmann int e1000_open(struct net_device *netdev);
911f2f83f8SStefan Assmann int e1000_close(struct net_device *netdev);
92dee1ad47SJeff Kirsher static void e1000_configure_tx(struct e1000_adapter *adapter);
93dee1ad47SJeff Kirsher static void e1000_configure_rx(struct e1000_adapter *adapter);
94dee1ad47SJeff Kirsher static void e1000_setup_rctl(struct e1000_adapter *adapter);
95dee1ad47SJeff Kirsher static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
96dee1ad47SJeff Kirsher static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
97dee1ad47SJeff Kirsher static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
98dee1ad47SJeff Kirsher 				struct e1000_tx_ring *tx_ring);
99dee1ad47SJeff Kirsher static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
100dee1ad47SJeff Kirsher 				struct e1000_rx_ring *rx_ring);
101dee1ad47SJeff Kirsher static void e1000_set_rx_mode(struct net_device *netdev);
102dee1ad47SJeff Kirsher static void e1000_update_phy_info_task(struct work_struct *work);
103a4010afeSJesse Brandeburg static void e1000_watchdog(struct work_struct *work);
104dee1ad47SJeff Kirsher static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
105dee1ad47SJeff Kirsher static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
106dee1ad47SJeff Kirsher 				    struct net_device *netdev);
107dee1ad47SJeff Kirsher static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
108dee1ad47SJeff Kirsher static int e1000_set_mac(struct net_device *netdev, void *p);
109dee1ad47SJeff Kirsher static irqreturn_t e1000_intr(int irq, void *data);
110dee1ad47SJeff Kirsher static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
111dee1ad47SJeff Kirsher 			       struct e1000_tx_ring *tx_ring);
112dee1ad47SJeff Kirsher static int e1000_clean(struct napi_struct *napi, int budget);
113dee1ad47SJeff Kirsher static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
114dee1ad47SJeff Kirsher 			       struct e1000_rx_ring *rx_ring,
115dee1ad47SJeff Kirsher 			       int *work_done, int work_to_do);
116dee1ad47SJeff Kirsher static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
117dee1ad47SJeff Kirsher 				     struct e1000_rx_ring *rx_ring,
118dee1ad47SJeff Kirsher 				     int *work_done, int work_to_do);
e1000_alloc_dummy_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)11908e83316SSabrina Dubroca static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
12008e83316SSabrina Dubroca 					 struct e1000_rx_ring *rx_ring,
12108e83316SSabrina Dubroca 					 int cleaned_count)
12208e83316SSabrina Dubroca {
12308e83316SSabrina Dubroca }
124dee1ad47SJeff Kirsher static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
125dee1ad47SJeff Kirsher 				   struct e1000_rx_ring *rx_ring,
126dee1ad47SJeff Kirsher 				   int cleaned_count);
127dee1ad47SJeff Kirsher static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
128dee1ad47SJeff Kirsher 					 struct e1000_rx_ring *rx_ring,
129dee1ad47SJeff Kirsher 					 int cleaned_count);
130dee1ad47SJeff Kirsher static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
131dee1ad47SJeff Kirsher static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
132dee1ad47SJeff Kirsher 			   int cmd);
133dee1ad47SJeff Kirsher static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
134dee1ad47SJeff Kirsher static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
1350290bd29SMichael S. Tsirkin static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
136dee1ad47SJeff Kirsher static void e1000_reset_task(struct work_struct *work);
137dee1ad47SJeff Kirsher static void e1000_smartspeed(struct e1000_adapter *adapter);
138dee1ad47SJeff Kirsher static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
139dee1ad47SJeff Kirsher 				       struct sk_buff *skb);
140dee1ad47SJeff Kirsher 
141dee1ad47SJeff Kirsher static bool e1000_vlan_used(struct e1000_adapter *adapter);
142c8f44affSMichał Mirosław static void e1000_vlan_mode(struct net_device *netdev,
143c8f44affSMichał Mirosław 			    netdev_features_t features);
14452f5509fSJiri Pirko static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
14552f5509fSJiri Pirko 				     bool filter_on);
14680d5c368SPatrick McHardy static int e1000_vlan_rx_add_vid(struct net_device *netdev,
14780d5c368SPatrick McHardy 				 __be16 proto, u16 vid);
14880d5c368SPatrick McHardy static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
14980d5c368SPatrick McHardy 				  __be16 proto, u16 vid);
150dee1ad47SJeff Kirsher static void e1000_restore_vlan(struct e1000_adapter *adapter);
151dee1ad47SJeff Kirsher 
152eb6779d4SVaibhav Gupta static int __maybe_unused e1000_suspend(struct device *dev);
153eb6779d4SVaibhav Gupta static int __maybe_unused e1000_resume(struct device *dev);
154dee1ad47SJeff Kirsher static void e1000_shutdown(struct pci_dev *pdev);
155dee1ad47SJeff Kirsher 
156dee1ad47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
157dee1ad47SJeff Kirsher /* for netdump / net console */
158dee1ad47SJeff Kirsher static void e1000_netpoll (struct net_device *netdev);
159dee1ad47SJeff Kirsher #endif
160dee1ad47SJeff Kirsher 
161dee1ad47SJeff Kirsher #define COPYBREAK_DEFAULT 256
162dee1ad47SJeff Kirsher static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
163dee1ad47SJeff Kirsher module_param(copybreak, uint, 0644);
164dee1ad47SJeff Kirsher MODULE_PARM_DESC(copybreak,
165dee1ad47SJeff Kirsher 	"Maximum size of packet that is copied to a new buffer on receive");
166dee1ad47SJeff Kirsher 
167dee1ad47SJeff Kirsher static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
168dee1ad47SJeff Kirsher 						pci_channel_state_t state);
169dee1ad47SJeff Kirsher static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
170dee1ad47SJeff Kirsher static void e1000_io_resume(struct pci_dev *pdev);
171dee1ad47SJeff Kirsher 
1723646f0e5SStephen Hemminger static const struct pci_error_handlers e1000_err_handler = {
173dee1ad47SJeff Kirsher 	.error_detected = e1000_io_error_detected,
174dee1ad47SJeff Kirsher 	.slot_reset = e1000_io_slot_reset,
175dee1ad47SJeff Kirsher 	.resume = e1000_io_resume,
176dee1ad47SJeff Kirsher };
177dee1ad47SJeff Kirsher 
178eb6779d4SVaibhav Gupta static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
179eb6779d4SVaibhav Gupta 
180dee1ad47SJeff Kirsher static struct pci_driver e1000_driver = {
181dee1ad47SJeff Kirsher 	.name     = e1000_driver_name,
182dee1ad47SJeff Kirsher 	.id_table = e1000_pci_tbl,
183dee1ad47SJeff Kirsher 	.probe    = e1000_probe,
1849f9a12f8SBill Pemberton 	.remove   = e1000_remove,
185eb6779d4SVaibhav Gupta 	.driver = {
186eb6779d4SVaibhav Gupta 		.pm = &e1000_pm_ops,
187eb6779d4SVaibhav Gupta 	},
188dee1ad47SJeff Kirsher 	.shutdown = e1000_shutdown,
189dee1ad47SJeff Kirsher 	.err_handler = &e1000_err_handler
190dee1ad47SJeff Kirsher };
191dee1ad47SJeff Kirsher 
192dee1ad47SJeff Kirsher MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
193dee1ad47SJeff Kirsher MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
19498674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2");
195dee1ad47SJeff Kirsher 
196b3f4d599Sstephen hemminger #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
197b3f4d599Sstephen hemminger static int debug = -1;
198dee1ad47SJeff Kirsher module_param(debug, int, 0);
199dee1ad47SJeff Kirsher MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
200dee1ad47SJeff Kirsher 
201dee1ad47SJeff Kirsher /**
202b50f7bcaSJesse Brandeburg  * e1000_get_hw_dev - helper function for getting netdev
203b50f7bcaSJesse Brandeburg  * @hw: pointer to HW struct
204b50f7bcaSJesse Brandeburg  *
205b50f7bcaSJesse Brandeburg  * return device used by hardware layer to print debugging information
206dee1ad47SJeff Kirsher  *
207dee1ad47SJeff Kirsher  **/
e1000_get_hw_dev(struct e1000_hw * hw)208dee1ad47SJeff Kirsher struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
209dee1ad47SJeff Kirsher {
210dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = hw->back;
211dee1ad47SJeff Kirsher 	return adapter->netdev;
212dee1ad47SJeff Kirsher }
213dee1ad47SJeff Kirsher 
214dee1ad47SJeff Kirsher /**
215dee1ad47SJeff Kirsher  * e1000_init_module - Driver Registration Routine
216dee1ad47SJeff Kirsher  *
217dee1ad47SJeff Kirsher  * e1000_init_module is the first routine called when the driver is
218dee1ad47SJeff Kirsher  * loaded. All it does is register with the PCI subsystem.
219dee1ad47SJeff Kirsher  **/
e1000_init_module(void)220dee1ad47SJeff Kirsher static int __init e1000_init_module(void)
221dee1ad47SJeff Kirsher {
222dee1ad47SJeff Kirsher 	int ret;
22334a2a3b8SJeff Kirsher 	pr_info("%s\n", e1000_driver_string);
224dee1ad47SJeff Kirsher 
225dee1ad47SJeff Kirsher 	pr_info("%s\n", e1000_copyright);
226dee1ad47SJeff Kirsher 
227dee1ad47SJeff Kirsher 	ret = pci_register_driver(&e1000_driver);
228dee1ad47SJeff Kirsher 	if (copybreak != COPYBREAK_DEFAULT) {
229dee1ad47SJeff Kirsher 		if (copybreak == 0)
230dee1ad47SJeff Kirsher 			pr_info("copybreak disabled\n");
231dee1ad47SJeff Kirsher 		else
232dee1ad47SJeff Kirsher 			pr_info("copybreak enabled for "
233dee1ad47SJeff Kirsher 				   "packets <= %u bytes\n", copybreak);
234dee1ad47SJeff Kirsher 	}
235dee1ad47SJeff Kirsher 	return ret;
236dee1ad47SJeff Kirsher }
237dee1ad47SJeff Kirsher 
238dee1ad47SJeff Kirsher module_init(e1000_init_module);
239dee1ad47SJeff Kirsher 
240dee1ad47SJeff Kirsher /**
241dee1ad47SJeff Kirsher  * e1000_exit_module - Driver Exit Cleanup Routine
242dee1ad47SJeff Kirsher  *
243dee1ad47SJeff Kirsher  * e1000_exit_module is called just before the driver is removed
244dee1ad47SJeff Kirsher  * from memory.
245dee1ad47SJeff Kirsher  **/
e1000_exit_module(void)246dee1ad47SJeff Kirsher static void __exit e1000_exit_module(void)
247dee1ad47SJeff Kirsher {
248dee1ad47SJeff Kirsher 	pci_unregister_driver(&e1000_driver);
249dee1ad47SJeff Kirsher }
250dee1ad47SJeff Kirsher 
251dee1ad47SJeff Kirsher module_exit(e1000_exit_module);
252dee1ad47SJeff Kirsher 
e1000_request_irq(struct e1000_adapter * adapter)253dee1ad47SJeff Kirsher static int e1000_request_irq(struct e1000_adapter *adapter)
254dee1ad47SJeff Kirsher {
255dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
256dee1ad47SJeff Kirsher 	irq_handler_t handler = e1000_intr;
257dee1ad47SJeff Kirsher 	int irq_flags = IRQF_SHARED;
258dee1ad47SJeff Kirsher 	int err;
259dee1ad47SJeff Kirsher 
260dee1ad47SJeff Kirsher 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
261dee1ad47SJeff Kirsher 			  netdev);
262dee1ad47SJeff Kirsher 	if (err) {
263dee1ad47SJeff Kirsher 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
264dee1ad47SJeff Kirsher 	}
265dee1ad47SJeff Kirsher 
266dee1ad47SJeff Kirsher 	return err;
267dee1ad47SJeff Kirsher }
268dee1ad47SJeff Kirsher 
e1000_free_irq(struct e1000_adapter * adapter)269dee1ad47SJeff Kirsher static void e1000_free_irq(struct e1000_adapter *adapter)
270dee1ad47SJeff Kirsher {
271dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
272dee1ad47SJeff Kirsher 
273dee1ad47SJeff Kirsher 	free_irq(adapter->pdev->irq, netdev);
274dee1ad47SJeff Kirsher }
275dee1ad47SJeff Kirsher 
276dee1ad47SJeff Kirsher /**
277dee1ad47SJeff Kirsher  * e1000_irq_disable - Mask off interrupt generation on the NIC
278dee1ad47SJeff Kirsher  * @adapter: board private structure
279dee1ad47SJeff Kirsher  **/
e1000_irq_disable(struct e1000_adapter * adapter)280dee1ad47SJeff Kirsher static void e1000_irq_disable(struct e1000_adapter *adapter)
281dee1ad47SJeff Kirsher {
282dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
283dee1ad47SJeff Kirsher 
284dee1ad47SJeff Kirsher 	ew32(IMC, ~0);
285dee1ad47SJeff Kirsher 	E1000_WRITE_FLUSH();
286dee1ad47SJeff Kirsher 	synchronize_irq(adapter->pdev->irq);
287dee1ad47SJeff Kirsher }
288dee1ad47SJeff Kirsher 
289dee1ad47SJeff Kirsher /**
290dee1ad47SJeff Kirsher  * e1000_irq_enable - Enable default interrupt generation settings
291dee1ad47SJeff Kirsher  * @adapter: board private structure
292dee1ad47SJeff Kirsher  **/
e1000_irq_enable(struct e1000_adapter * adapter)293dee1ad47SJeff Kirsher static void e1000_irq_enable(struct e1000_adapter *adapter)
294dee1ad47SJeff Kirsher {
295dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
296dee1ad47SJeff Kirsher 
297dee1ad47SJeff Kirsher 	ew32(IMS, IMS_ENABLE_MASK);
298dee1ad47SJeff Kirsher 	E1000_WRITE_FLUSH();
299dee1ad47SJeff Kirsher }
300dee1ad47SJeff Kirsher 
e1000_update_mng_vlan(struct e1000_adapter * adapter)301dee1ad47SJeff Kirsher static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
302dee1ad47SJeff Kirsher {
303dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
304dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
305dee1ad47SJeff Kirsher 	u16 vid = hw->mng_cookie.vlan_id;
306dee1ad47SJeff Kirsher 	u16 old_vid = adapter->mng_vlan_id;
307dee1ad47SJeff Kirsher 
308dee1ad47SJeff Kirsher 	if (!e1000_vlan_used(adapter))
309dee1ad47SJeff Kirsher 		return;
310dee1ad47SJeff Kirsher 
311dee1ad47SJeff Kirsher 	if (!test_bit(vid, adapter->active_vlans)) {
312dee1ad47SJeff Kirsher 		if (hw->mng_cookie.status &
313dee1ad47SJeff Kirsher 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
31480d5c368SPatrick McHardy 			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
315dee1ad47SJeff Kirsher 			adapter->mng_vlan_id = vid;
316dee1ad47SJeff Kirsher 		} else {
317dee1ad47SJeff Kirsher 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318dee1ad47SJeff Kirsher 		}
319dee1ad47SJeff Kirsher 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
320dee1ad47SJeff Kirsher 		    (vid != old_vid) &&
321dee1ad47SJeff Kirsher 		    !test_bit(old_vid, adapter->active_vlans))
32280d5c368SPatrick McHardy 			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
32380d5c368SPatrick McHardy 					       old_vid);
324dee1ad47SJeff Kirsher 	} else {
325dee1ad47SJeff Kirsher 		adapter->mng_vlan_id = vid;
326dee1ad47SJeff Kirsher 	}
327dee1ad47SJeff Kirsher }
328dee1ad47SJeff Kirsher 
e1000_init_manageability(struct e1000_adapter * adapter)329dee1ad47SJeff Kirsher static void e1000_init_manageability(struct e1000_adapter *adapter)
330dee1ad47SJeff Kirsher {
331dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
332dee1ad47SJeff Kirsher 
333dee1ad47SJeff Kirsher 	if (adapter->en_mng_pt) {
334dee1ad47SJeff Kirsher 		u32 manc = er32(MANC);
335dee1ad47SJeff Kirsher 
336dee1ad47SJeff Kirsher 		/* disable hardware interception of ARP */
337dee1ad47SJeff Kirsher 		manc &= ~(E1000_MANC_ARP_EN);
338dee1ad47SJeff Kirsher 
339dee1ad47SJeff Kirsher 		ew32(MANC, manc);
340dee1ad47SJeff Kirsher 	}
341dee1ad47SJeff Kirsher }
342dee1ad47SJeff Kirsher 
e1000_release_manageability(struct e1000_adapter * adapter)343dee1ad47SJeff Kirsher static void e1000_release_manageability(struct e1000_adapter *adapter)
344dee1ad47SJeff Kirsher {
345dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
346dee1ad47SJeff Kirsher 
347dee1ad47SJeff Kirsher 	if (adapter->en_mng_pt) {
348dee1ad47SJeff Kirsher 		u32 manc = er32(MANC);
349dee1ad47SJeff Kirsher 
350dee1ad47SJeff Kirsher 		/* re-enable hardware interception of ARP */
351dee1ad47SJeff Kirsher 		manc |= E1000_MANC_ARP_EN;
352dee1ad47SJeff Kirsher 
353dee1ad47SJeff Kirsher 		ew32(MANC, manc);
354dee1ad47SJeff Kirsher 	}
355dee1ad47SJeff Kirsher }
356dee1ad47SJeff Kirsher 
357dee1ad47SJeff Kirsher /**
358dee1ad47SJeff Kirsher  * e1000_configure - configure the hardware for RX and TX
359b50f7bcaSJesse Brandeburg  * @adapter: private board structure
360dee1ad47SJeff Kirsher  **/
e1000_configure(struct e1000_adapter * adapter)361dee1ad47SJeff Kirsher static void e1000_configure(struct e1000_adapter *adapter)
362dee1ad47SJeff Kirsher {
363dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
364dee1ad47SJeff Kirsher 	int i;
365dee1ad47SJeff Kirsher 
366dee1ad47SJeff Kirsher 	e1000_set_rx_mode(netdev);
367dee1ad47SJeff Kirsher 
368dee1ad47SJeff Kirsher 	e1000_restore_vlan(adapter);
369dee1ad47SJeff Kirsher 	e1000_init_manageability(adapter);
370dee1ad47SJeff Kirsher 
371dee1ad47SJeff Kirsher 	e1000_configure_tx(adapter);
372dee1ad47SJeff Kirsher 	e1000_setup_rctl(adapter);
373dee1ad47SJeff Kirsher 	e1000_configure_rx(adapter);
374dee1ad47SJeff Kirsher 	/* call E1000_DESC_UNUSED which always leaves
375dee1ad47SJeff Kirsher 	 * at least 1 descriptor unused to make sure
3766cfbd97bSJeff Kirsher 	 * next_to_use != next_to_clean
3776cfbd97bSJeff Kirsher 	 */
378dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_rx_queues; i++) {
379dee1ad47SJeff Kirsher 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
380dee1ad47SJeff Kirsher 		adapter->alloc_rx_buf(adapter, ring,
381dee1ad47SJeff Kirsher 				      E1000_DESC_UNUSED(ring));
382dee1ad47SJeff Kirsher 	}
383dee1ad47SJeff Kirsher }
384dee1ad47SJeff Kirsher 
e1000_up(struct e1000_adapter * adapter)385dee1ad47SJeff Kirsher int e1000_up(struct e1000_adapter *adapter)
386dee1ad47SJeff Kirsher {
387dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
388dee1ad47SJeff Kirsher 
389dee1ad47SJeff Kirsher 	/* hardware has been reset, we need to reload some things */
390dee1ad47SJeff Kirsher 	e1000_configure(adapter);
391dee1ad47SJeff Kirsher 
392dee1ad47SJeff Kirsher 	clear_bit(__E1000_DOWN, &adapter->flags);
393dee1ad47SJeff Kirsher 
394dee1ad47SJeff Kirsher 	napi_enable(&adapter->napi);
395dee1ad47SJeff Kirsher 
396dee1ad47SJeff Kirsher 	e1000_irq_enable(adapter);
397dee1ad47SJeff Kirsher 
398dee1ad47SJeff Kirsher 	netif_wake_queue(adapter->netdev);
399dee1ad47SJeff Kirsher 
400dee1ad47SJeff Kirsher 	/* fire a link change interrupt to start the watchdog */
401dee1ad47SJeff Kirsher 	ew32(ICS, E1000_ICS_LSC);
402dee1ad47SJeff Kirsher 	return 0;
403dee1ad47SJeff Kirsher }
404dee1ad47SJeff Kirsher 
405dee1ad47SJeff Kirsher /**
406dee1ad47SJeff Kirsher  * e1000_power_up_phy - restore link in case the phy was powered down
407dee1ad47SJeff Kirsher  * @adapter: address of board private structure
408dee1ad47SJeff Kirsher  *
409dee1ad47SJeff Kirsher  * The phy may be powered down to save power and turn off link when the
410dee1ad47SJeff Kirsher  * driver is unloaded and wake on lan is not enabled (among others)
411dee1ad47SJeff Kirsher  * *** this routine MUST be followed by a call to e1000_reset ***
412dee1ad47SJeff Kirsher  **/
e1000_power_up_phy(struct e1000_adapter * adapter)413dee1ad47SJeff Kirsher void e1000_power_up_phy(struct e1000_adapter *adapter)
414dee1ad47SJeff Kirsher {
415dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
416dee1ad47SJeff Kirsher 	u16 mii_reg = 0;
417dee1ad47SJeff Kirsher 
418dee1ad47SJeff Kirsher 	/* Just clear the power down bit to wake the phy back up */
419dee1ad47SJeff Kirsher 	if (hw->media_type == e1000_media_type_copper) {
420dee1ad47SJeff Kirsher 		/* according to the manual, the phy will retain its
4216cfbd97bSJeff Kirsher 		 * settings across a power-down/up cycle
4226cfbd97bSJeff Kirsher 		 */
423dee1ad47SJeff Kirsher 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
424dee1ad47SJeff Kirsher 		mii_reg &= ~MII_CR_POWER_DOWN;
425dee1ad47SJeff Kirsher 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
426dee1ad47SJeff Kirsher 	}
427dee1ad47SJeff Kirsher }
428dee1ad47SJeff Kirsher 
e1000_power_down_phy(struct e1000_adapter * adapter)429dee1ad47SJeff Kirsher static void e1000_power_down_phy(struct e1000_adapter *adapter)
430dee1ad47SJeff Kirsher {
431dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
432dee1ad47SJeff Kirsher 
433dee1ad47SJeff Kirsher 	/* Power down the PHY so no link is implied when interface is down *
434dee1ad47SJeff Kirsher 	 * The PHY cannot be powered down if any of the following is true *
435dee1ad47SJeff Kirsher 	 * (a) WoL is enabled
436dee1ad47SJeff Kirsher 	 * (b) AMT is active
4376cfbd97bSJeff Kirsher 	 * (c) SoL/IDER session is active
4386cfbd97bSJeff Kirsher 	 */
439dee1ad47SJeff Kirsher 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
440dee1ad47SJeff Kirsher 	   hw->media_type == e1000_media_type_copper) {
441dee1ad47SJeff Kirsher 		u16 mii_reg = 0;
442dee1ad47SJeff Kirsher 
443dee1ad47SJeff Kirsher 		switch (hw->mac_type) {
444dee1ad47SJeff Kirsher 		case e1000_82540:
445dee1ad47SJeff Kirsher 		case e1000_82545:
446dee1ad47SJeff Kirsher 		case e1000_82545_rev_3:
447dee1ad47SJeff Kirsher 		case e1000_82546:
448dee1ad47SJeff Kirsher 		case e1000_ce4100:
449dee1ad47SJeff Kirsher 		case e1000_82546_rev_3:
450dee1ad47SJeff Kirsher 		case e1000_82541:
451dee1ad47SJeff Kirsher 		case e1000_82541_rev_2:
452dee1ad47SJeff Kirsher 		case e1000_82547:
453dee1ad47SJeff Kirsher 		case e1000_82547_rev_2:
454dee1ad47SJeff Kirsher 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
455dee1ad47SJeff Kirsher 				goto out;
456dee1ad47SJeff Kirsher 			break;
457dee1ad47SJeff Kirsher 		default:
458dee1ad47SJeff Kirsher 			goto out;
459dee1ad47SJeff Kirsher 		}
460dee1ad47SJeff Kirsher 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
461dee1ad47SJeff Kirsher 		mii_reg |= MII_CR_POWER_DOWN;
462dee1ad47SJeff Kirsher 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
4634e0d8f7dSJesse Brandeburg 		msleep(1);
464dee1ad47SJeff Kirsher 	}
465dee1ad47SJeff Kirsher out:
466dee1ad47SJeff Kirsher 	return;
467dee1ad47SJeff Kirsher }
468dee1ad47SJeff Kirsher 
e1000_down_and_stop(struct e1000_adapter * adapter)469a4010afeSJesse Brandeburg static void e1000_down_and_stop(struct e1000_adapter *adapter)
470a4010afeSJesse Brandeburg {
471a4010afeSJesse Brandeburg 	set_bit(__E1000_DOWN, &adapter->flags);
4728ce6909fSTushar Dave 
47374a1b1eaSVladimir Davydov 	cancel_delayed_work_sync(&adapter->watchdog_task);
47474a1b1eaSVladimir Davydov 
47574a1b1eaSVladimir Davydov 	/*
47674a1b1eaSVladimir Davydov 	 * Since the watchdog task can reschedule other tasks, we should cancel
47774a1b1eaSVladimir Davydov 	 * it first, otherwise we can run into the situation when a work is
47874a1b1eaSVladimir Davydov 	 * still running after the adapter has been turned down.
47974a1b1eaSVladimir Davydov 	 */
48074a1b1eaSVladimir Davydov 
48174a1b1eaSVladimir Davydov 	cancel_delayed_work_sync(&adapter->phy_info_task);
48274a1b1eaSVladimir Davydov 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
48374a1b1eaSVladimir Davydov 
4848ce6909fSTushar Dave 	/* Only kill reset task if adapter is not resetting */
4858ce6909fSTushar Dave 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
486a4010afeSJesse Brandeburg 		cancel_work_sync(&adapter->reset_task);
487a4010afeSJesse Brandeburg }
488a4010afeSJesse Brandeburg 
e1000_down(struct e1000_adapter * adapter)489dee1ad47SJeff Kirsher void e1000_down(struct e1000_adapter *adapter)
490dee1ad47SJeff Kirsher {
491dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
492dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
493dee1ad47SJeff Kirsher 	u32 rctl, tctl;
494dee1ad47SJeff Kirsher 
495dee1ad47SJeff Kirsher 	/* disable receives in the hardware */
496dee1ad47SJeff Kirsher 	rctl = er32(RCTL);
497dee1ad47SJeff Kirsher 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
498dee1ad47SJeff Kirsher 	/* flush and sleep below */
499dee1ad47SJeff Kirsher 
500dee1ad47SJeff Kirsher 	netif_tx_disable(netdev);
501dee1ad47SJeff Kirsher 
502dee1ad47SJeff Kirsher 	/* disable transmits in the hardware */
503dee1ad47SJeff Kirsher 	tctl = er32(TCTL);
504dee1ad47SJeff Kirsher 	tctl &= ~E1000_TCTL_EN;
505dee1ad47SJeff Kirsher 	ew32(TCTL, tctl);
506dee1ad47SJeff Kirsher 	/* flush both disables and wait for them to finish */
507dee1ad47SJeff Kirsher 	E1000_WRITE_FLUSH();
508dee1ad47SJeff Kirsher 	msleep(10);
509dee1ad47SJeff Kirsher 
51044c445c3SVincenzo Maffione 	/* Set the carrier off after transmits have been disabled in the
51144c445c3SVincenzo Maffione 	 * hardware, to avoid race conditions with e1000_watchdog() (which
51244c445c3SVincenzo Maffione 	 * may be running concurrently to us, checking for the carrier
51344c445c3SVincenzo Maffione 	 * bit to decide whether it should enable transmits again). Such
51444c445c3SVincenzo Maffione 	 * a race condition would result into transmission being disabled
51544c445c3SVincenzo Maffione 	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
51644c445c3SVincenzo Maffione 	 */
51744c445c3SVincenzo Maffione 	netif_carrier_off(netdev);
51844c445c3SVincenzo Maffione 
519dee1ad47SJeff Kirsher 	napi_disable(&adapter->napi);
520dee1ad47SJeff Kirsher 
521dee1ad47SJeff Kirsher 	e1000_irq_disable(adapter);
522dee1ad47SJeff Kirsher 
5236cfbd97bSJeff Kirsher 	/* Setting DOWN must be after irq_disable to prevent
524dee1ad47SJeff Kirsher 	 * a screaming interrupt.  Setting DOWN also prevents
525a4010afeSJesse Brandeburg 	 * tasks from rescheduling.
526dee1ad47SJeff Kirsher 	 */
527a4010afeSJesse Brandeburg 	e1000_down_and_stop(adapter);
528dee1ad47SJeff Kirsher 
529dee1ad47SJeff Kirsher 	adapter->link_speed = 0;
530dee1ad47SJeff Kirsher 	adapter->link_duplex = 0;
531dee1ad47SJeff Kirsher 
532dee1ad47SJeff Kirsher 	e1000_reset(adapter);
533dee1ad47SJeff Kirsher 	e1000_clean_all_tx_rings(adapter);
534dee1ad47SJeff Kirsher 	e1000_clean_all_rx_rings(adapter);
535dee1ad47SJeff Kirsher }
536dee1ad47SJeff Kirsher 
e1000_reinit_locked(struct e1000_adapter * adapter)537dee1ad47SJeff Kirsher void e1000_reinit_locked(struct e1000_adapter *adapter)
538dee1ad47SJeff Kirsher {
539dee1ad47SJeff Kirsher 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
540dee1ad47SJeff Kirsher 		msleep(1);
54149ee3c2aSAlexander Duyck 
54249ee3c2aSAlexander Duyck 	/* only run the task if not already down */
54349ee3c2aSAlexander Duyck 	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
544dee1ad47SJeff Kirsher 		e1000_down(adapter);
545dee1ad47SJeff Kirsher 		e1000_up(adapter);
54649ee3c2aSAlexander Duyck 	}
54749ee3c2aSAlexander Duyck 
548dee1ad47SJeff Kirsher 	clear_bit(__E1000_RESETTING, &adapter->flags);
549dee1ad47SJeff Kirsher }
550dee1ad47SJeff Kirsher 
e1000_reset(struct e1000_adapter * adapter)551dee1ad47SJeff Kirsher void e1000_reset(struct e1000_adapter *adapter)
552dee1ad47SJeff Kirsher {
553dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
554dee1ad47SJeff Kirsher 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
555dee1ad47SJeff Kirsher 	bool legacy_pba_adjust = false;
556dee1ad47SJeff Kirsher 	u16 hwm;
557dee1ad47SJeff Kirsher 
558dee1ad47SJeff Kirsher 	/* Repartition Pba for greater than 9k mtu
559dee1ad47SJeff Kirsher 	 * To take effect CTRL.RST is required.
560dee1ad47SJeff Kirsher 	 */
561dee1ad47SJeff Kirsher 
562dee1ad47SJeff Kirsher 	switch (hw->mac_type) {
563dee1ad47SJeff Kirsher 	case e1000_82542_rev2_0:
564dee1ad47SJeff Kirsher 	case e1000_82542_rev2_1:
565dee1ad47SJeff Kirsher 	case e1000_82543:
566dee1ad47SJeff Kirsher 	case e1000_82544:
567dee1ad47SJeff Kirsher 	case e1000_82540:
568dee1ad47SJeff Kirsher 	case e1000_82541:
569dee1ad47SJeff Kirsher 	case e1000_82541_rev_2:
570dee1ad47SJeff Kirsher 		legacy_pba_adjust = true;
571dee1ad47SJeff Kirsher 		pba = E1000_PBA_48K;
572dee1ad47SJeff Kirsher 		break;
573dee1ad47SJeff Kirsher 	case e1000_82545:
574dee1ad47SJeff Kirsher 	case e1000_82545_rev_3:
575dee1ad47SJeff Kirsher 	case e1000_82546:
576dee1ad47SJeff Kirsher 	case e1000_ce4100:
577dee1ad47SJeff Kirsher 	case e1000_82546_rev_3:
578dee1ad47SJeff Kirsher 		pba = E1000_PBA_48K;
579dee1ad47SJeff Kirsher 		break;
580dee1ad47SJeff Kirsher 	case e1000_82547:
581dee1ad47SJeff Kirsher 	case e1000_82547_rev_2:
582dee1ad47SJeff Kirsher 		legacy_pba_adjust = true;
583dee1ad47SJeff Kirsher 		pba = E1000_PBA_30K;
584dee1ad47SJeff Kirsher 		break;
585dee1ad47SJeff Kirsher 	case e1000_undefined:
586dee1ad47SJeff Kirsher 	case e1000_num_macs:
587dee1ad47SJeff Kirsher 		break;
588dee1ad47SJeff Kirsher 	}
589dee1ad47SJeff Kirsher 
590dee1ad47SJeff Kirsher 	if (legacy_pba_adjust) {
591dee1ad47SJeff Kirsher 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
592dee1ad47SJeff Kirsher 			pba -= 8; /* allocate more FIFO for Tx */
593dee1ad47SJeff Kirsher 
594dee1ad47SJeff Kirsher 		if (hw->mac_type == e1000_82547) {
595dee1ad47SJeff Kirsher 			adapter->tx_fifo_head = 0;
596dee1ad47SJeff Kirsher 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
597dee1ad47SJeff Kirsher 			adapter->tx_fifo_size =
598dee1ad47SJeff Kirsher 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
599dee1ad47SJeff Kirsher 			atomic_set(&adapter->tx_fifo_stall, 0);
600dee1ad47SJeff Kirsher 		}
601dee1ad47SJeff Kirsher 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
602dee1ad47SJeff Kirsher 		/* adjust PBA for jumbo frames */
603dee1ad47SJeff Kirsher 		ew32(PBA, pba);
604dee1ad47SJeff Kirsher 
605dee1ad47SJeff Kirsher 		/* To maintain wire speed transmits, the Tx FIFO should be
606dee1ad47SJeff Kirsher 		 * large enough to accommodate two full transmit packets,
607dee1ad47SJeff Kirsher 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
608dee1ad47SJeff Kirsher 		 * the Rx FIFO should be large enough to accommodate at least
609dee1ad47SJeff Kirsher 		 * one full receive packet and is similarly rounded up and
6106cfbd97bSJeff Kirsher 		 * expressed in KB.
6116cfbd97bSJeff Kirsher 		 */
612dee1ad47SJeff Kirsher 		pba = er32(PBA);
613dee1ad47SJeff Kirsher 		/* upper 16 bits has Tx packet buffer allocation size in KB */
614dee1ad47SJeff Kirsher 		tx_space = pba >> 16;
615dee1ad47SJeff Kirsher 		/* lower 16 bits has Rx packet buffer allocation size in KB */
616dee1ad47SJeff Kirsher 		pba &= 0xffff;
6176cfbd97bSJeff Kirsher 		/* the Tx fifo also stores 16 bytes of information about the Tx
618dee1ad47SJeff Kirsher 		 * but don't include ethernet FCS because hardware appends it
619dee1ad47SJeff Kirsher 		 */
620dee1ad47SJeff Kirsher 		min_tx_space = (hw->max_frame_size +
621dee1ad47SJeff Kirsher 				sizeof(struct e1000_tx_desc) -
622dee1ad47SJeff Kirsher 				ETH_FCS_LEN) * 2;
623dee1ad47SJeff Kirsher 		min_tx_space = ALIGN(min_tx_space, 1024);
624dee1ad47SJeff Kirsher 		min_tx_space >>= 10;
625dee1ad47SJeff Kirsher 		/* software strips receive CRC, so leave room for it */
626dee1ad47SJeff Kirsher 		min_rx_space = hw->max_frame_size;
627dee1ad47SJeff Kirsher 		min_rx_space = ALIGN(min_rx_space, 1024);
628dee1ad47SJeff Kirsher 		min_rx_space >>= 10;
629dee1ad47SJeff Kirsher 
630dee1ad47SJeff Kirsher 		/* If current Tx allocation is less than the min Tx FIFO size,
631dee1ad47SJeff Kirsher 		 * and the min Tx FIFO size is less than the current Rx FIFO
6326cfbd97bSJeff Kirsher 		 * allocation, take space away from current Rx allocation
6336cfbd97bSJeff Kirsher 		 */
634dee1ad47SJeff Kirsher 		if (tx_space < min_tx_space &&
635dee1ad47SJeff Kirsher 		    ((min_tx_space - tx_space) < pba)) {
636dee1ad47SJeff Kirsher 			pba = pba - (min_tx_space - tx_space);
637dee1ad47SJeff Kirsher 
638dee1ad47SJeff Kirsher 			/* PCI/PCIx hardware has PBA alignment constraints */
639dee1ad47SJeff Kirsher 			switch (hw->mac_type) {
640dee1ad47SJeff Kirsher 			case e1000_82545 ... e1000_82546_rev_3:
641dee1ad47SJeff Kirsher 				pba &= ~(E1000_PBA_8K - 1);
642dee1ad47SJeff Kirsher 				break;
643dee1ad47SJeff Kirsher 			default:
644dee1ad47SJeff Kirsher 				break;
645dee1ad47SJeff Kirsher 			}
646dee1ad47SJeff Kirsher 
6476cfbd97bSJeff Kirsher 			/* if short on Rx space, Rx wins and must trump Tx
6486cfbd97bSJeff Kirsher 			 * adjustment or use Early Receive if available
6496cfbd97bSJeff Kirsher 			 */
650dee1ad47SJeff Kirsher 			if (pba < min_rx_space)
651dee1ad47SJeff Kirsher 				pba = min_rx_space;
652dee1ad47SJeff Kirsher 		}
653dee1ad47SJeff Kirsher 	}
654dee1ad47SJeff Kirsher 
655dee1ad47SJeff Kirsher 	ew32(PBA, pba);
656dee1ad47SJeff Kirsher 
6576cfbd97bSJeff Kirsher 	/* flow control settings:
658dee1ad47SJeff Kirsher 	 * The high water mark must be low enough to fit one full frame
659dee1ad47SJeff Kirsher 	 * (or the size used for early receive) above it in the Rx FIFO.
660dee1ad47SJeff Kirsher 	 * Set it to the lower of:
661dee1ad47SJeff Kirsher 	 * - 90% of the Rx FIFO size, and
662dee1ad47SJeff Kirsher 	 * - the full Rx FIFO size minus the early receive size (for parts
663dee1ad47SJeff Kirsher 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
664dee1ad47SJeff Kirsher 	 * - the full Rx FIFO size minus one full frame
665dee1ad47SJeff Kirsher 	 */
666dee1ad47SJeff Kirsher 	hwm = min(((pba << 10) * 9 / 10),
667dee1ad47SJeff Kirsher 		  ((pba << 10) - hw->max_frame_size));
668dee1ad47SJeff Kirsher 
669dee1ad47SJeff Kirsher 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
670dee1ad47SJeff Kirsher 	hw->fc_low_water = hw->fc_high_water - 8;
671dee1ad47SJeff Kirsher 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
672dee1ad47SJeff Kirsher 	hw->fc_send_xon = 1;
673dee1ad47SJeff Kirsher 	hw->fc = hw->original_fc;
674dee1ad47SJeff Kirsher 
675dee1ad47SJeff Kirsher 	/* Allow time for pending master requests to run */
676dee1ad47SJeff Kirsher 	e1000_reset_hw(hw);
677dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82544)
678dee1ad47SJeff Kirsher 		ew32(WUC, 0);
679dee1ad47SJeff Kirsher 
680dee1ad47SJeff Kirsher 	if (e1000_init_hw(hw))
681dee1ad47SJeff Kirsher 		e_dev_err("Hardware Error\n");
682dee1ad47SJeff Kirsher 	e1000_update_mng_vlan(adapter);
683dee1ad47SJeff Kirsher 
684dee1ad47SJeff Kirsher 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
685dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82544 &&
686dee1ad47SJeff Kirsher 	    hw->autoneg == 1 &&
687dee1ad47SJeff Kirsher 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
688dee1ad47SJeff Kirsher 		u32 ctrl = er32(CTRL);
689dee1ad47SJeff Kirsher 		/* clear phy power management bit if we are in gig only mode,
690dee1ad47SJeff Kirsher 		 * which if enabled will attempt negotiation to 100Mb, which
6916cfbd97bSJeff Kirsher 		 * can cause a loss of link at power off or driver unload
6926cfbd97bSJeff Kirsher 		 */
693dee1ad47SJeff Kirsher 		ctrl &= ~E1000_CTRL_SWDPIN3;
694dee1ad47SJeff Kirsher 		ew32(CTRL, ctrl);
695dee1ad47SJeff Kirsher 	}
696dee1ad47SJeff Kirsher 
697dee1ad47SJeff Kirsher 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
698dee1ad47SJeff Kirsher 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
699dee1ad47SJeff Kirsher 
700dee1ad47SJeff Kirsher 	e1000_reset_adaptive(hw);
701dee1ad47SJeff Kirsher 	e1000_phy_get_info(hw, &adapter->phy_info);
702dee1ad47SJeff Kirsher 
703dee1ad47SJeff Kirsher 	e1000_release_manageability(adapter);
704dee1ad47SJeff Kirsher }
705dee1ad47SJeff Kirsher 
7061aa8b471SBen Hutchings /* Dump the eeprom for users having checksum issues */
e1000_dump_eeprom(struct e1000_adapter * adapter)707dee1ad47SJeff Kirsher static void e1000_dump_eeprom(struct e1000_adapter *adapter)
708dee1ad47SJeff Kirsher {
709dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
710dee1ad47SJeff Kirsher 	struct ethtool_eeprom eeprom;
711dee1ad47SJeff Kirsher 	const struct ethtool_ops *ops = netdev->ethtool_ops;
712dee1ad47SJeff Kirsher 	u8 *data;
713dee1ad47SJeff Kirsher 	int i;
714dee1ad47SJeff Kirsher 	u16 csum_old, csum_new = 0;
715dee1ad47SJeff Kirsher 
716dee1ad47SJeff Kirsher 	eeprom.len = ops->get_eeprom_len(netdev);
717dee1ad47SJeff Kirsher 	eeprom.offset = 0;
718dee1ad47SJeff Kirsher 
719dee1ad47SJeff Kirsher 	data = kmalloc(eeprom.len, GFP_KERNEL);
720e404decbSJoe Perches 	if (!data)
721dee1ad47SJeff Kirsher 		return;
722dee1ad47SJeff Kirsher 
723dee1ad47SJeff Kirsher 	ops->get_eeprom(netdev, &eeprom, data);
724dee1ad47SJeff Kirsher 
725dee1ad47SJeff Kirsher 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
726dee1ad47SJeff Kirsher 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
727dee1ad47SJeff Kirsher 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
728dee1ad47SJeff Kirsher 		csum_new += data[i] + (data[i + 1] << 8);
729dee1ad47SJeff Kirsher 	csum_new = EEPROM_SUM - csum_new;
730dee1ad47SJeff Kirsher 
731dee1ad47SJeff Kirsher 	pr_err("/*********************/\n");
732dee1ad47SJeff Kirsher 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
733dee1ad47SJeff Kirsher 	pr_err("Calculated              : 0x%04x\n", csum_new);
734dee1ad47SJeff Kirsher 
735dee1ad47SJeff Kirsher 	pr_err("Offset    Values\n");
736dee1ad47SJeff Kirsher 	pr_err("========  ======\n");
737dee1ad47SJeff Kirsher 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
738dee1ad47SJeff Kirsher 
739dee1ad47SJeff Kirsher 	pr_err("Include this output when contacting your support provider.\n");
740dee1ad47SJeff Kirsher 	pr_err("This is not a software error! Something bad happened to\n");
741dee1ad47SJeff Kirsher 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
742dee1ad47SJeff Kirsher 	pr_err("result in further problems, possibly loss of data,\n");
743dee1ad47SJeff Kirsher 	pr_err("corruption or system hangs!\n");
744dee1ad47SJeff Kirsher 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
745dee1ad47SJeff Kirsher 	pr_err("which is invalid and requires you to set the proper MAC\n");
746dee1ad47SJeff Kirsher 	pr_err("address manually before continuing to enable this network\n");
747dee1ad47SJeff Kirsher 	pr_err("device. Please inspect the EEPROM dump and report the\n");
748dee1ad47SJeff Kirsher 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
749dee1ad47SJeff Kirsher 	pr_err("/*********************/\n");
750dee1ad47SJeff Kirsher 
751dee1ad47SJeff Kirsher 	kfree(data);
752dee1ad47SJeff Kirsher }
753dee1ad47SJeff Kirsher 
754dee1ad47SJeff Kirsher /**
755dee1ad47SJeff Kirsher  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
756dee1ad47SJeff Kirsher  * @pdev: PCI device information struct
757dee1ad47SJeff Kirsher  *
758dee1ad47SJeff Kirsher  * Return true if an adapter needs ioport resources
759dee1ad47SJeff Kirsher  **/
e1000_is_need_ioport(struct pci_dev * pdev)760dee1ad47SJeff Kirsher static int e1000_is_need_ioport(struct pci_dev *pdev)
761dee1ad47SJeff Kirsher {
762dee1ad47SJeff Kirsher 	switch (pdev->device) {
763dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82540EM:
764dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82540EM_LOM:
765dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82540EP:
766dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82540EP_LOM:
767dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82540EP_LP:
768dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82541EI:
769dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82541EI_MOBILE:
770dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82541ER:
771dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82541ER_LOM:
772dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82541GI:
773dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82541GI_LF:
774dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82541GI_MOBILE:
775dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82544EI_COPPER:
776dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82544EI_FIBER:
777dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82544GC_COPPER:
778dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82544GC_LOM:
779dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82545EM_COPPER:
780dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82545EM_FIBER:
781dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82546EB_COPPER:
782dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82546EB_FIBER:
783dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
784dee1ad47SJeff Kirsher 		return true;
785dee1ad47SJeff Kirsher 	default:
786dee1ad47SJeff Kirsher 		return false;
787dee1ad47SJeff Kirsher 	}
788dee1ad47SJeff Kirsher }
789dee1ad47SJeff Kirsher 
e1000_fix_features(struct net_device * netdev,netdev_features_t features)790c8f44affSMichał Mirosław static netdev_features_t e1000_fix_features(struct net_device *netdev,
791c8f44affSMichał Mirosław 	netdev_features_t features)
792dee1ad47SJeff Kirsher {
7936cfbd97bSJeff Kirsher 	/* Since there is no support for separate Rx/Tx vlan accel
7946cfbd97bSJeff Kirsher 	 * enable/disable make sure Tx flag is always in same state as Rx.
795dee1ad47SJeff Kirsher 	 */
796f646968fSPatrick McHardy 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
797f646968fSPatrick McHardy 		features |= NETIF_F_HW_VLAN_CTAG_TX;
798dee1ad47SJeff Kirsher 	else
799f646968fSPatrick McHardy 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
800dee1ad47SJeff Kirsher 
801dee1ad47SJeff Kirsher 	return features;
802dee1ad47SJeff Kirsher }
803dee1ad47SJeff Kirsher 
e1000_set_features(struct net_device * netdev,netdev_features_t features)804c8f44affSMichał Mirosław static int e1000_set_features(struct net_device *netdev,
805c8f44affSMichał Mirosław 	netdev_features_t features)
806dee1ad47SJeff Kirsher {
807dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
808c8f44affSMichał Mirosław 	netdev_features_t changed = features ^ netdev->features;
809dee1ad47SJeff Kirsher 
810f646968fSPatrick McHardy 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
811dee1ad47SJeff Kirsher 		e1000_vlan_mode(netdev, features);
812dee1ad47SJeff Kirsher 
813e825b731SBen Greear 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
814dee1ad47SJeff Kirsher 		return 0;
815dee1ad47SJeff Kirsher 
816e825b731SBen Greear 	netdev->features = features;
817dee1ad47SJeff Kirsher 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
818dee1ad47SJeff Kirsher 
819dee1ad47SJeff Kirsher 	if (netif_running(netdev))
820dee1ad47SJeff Kirsher 		e1000_reinit_locked(adapter);
821dee1ad47SJeff Kirsher 	else
822dee1ad47SJeff Kirsher 		e1000_reset(adapter);
823dee1ad47SJeff Kirsher 
824b0ddfe2bSSerhey Popovych 	return 1;
825dee1ad47SJeff Kirsher }
826dee1ad47SJeff Kirsher 
827dee1ad47SJeff Kirsher static const struct net_device_ops e1000_netdev_ops = {
828dee1ad47SJeff Kirsher 	.ndo_open		= e1000_open,
829dee1ad47SJeff Kirsher 	.ndo_stop		= e1000_close,
830dee1ad47SJeff Kirsher 	.ndo_start_xmit		= e1000_xmit_frame,
831dee1ad47SJeff Kirsher 	.ndo_set_rx_mode	= e1000_set_rx_mode,
832dee1ad47SJeff Kirsher 	.ndo_set_mac_address	= e1000_set_mac,
833dee1ad47SJeff Kirsher 	.ndo_tx_timeout		= e1000_tx_timeout,
834dee1ad47SJeff Kirsher 	.ndo_change_mtu		= e1000_change_mtu,
835a7605370SArnd Bergmann 	.ndo_eth_ioctl		= e1000_ioctl,
836dee1ad47SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
837dee1ad47SJeff Kirsher 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
838dee1ad47SJeff Kirsher 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
839dee1ad47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
840dee1ad47SJeff Kirsher 	.ndo_poll_controller	= e1000_netpoll,
841dee1ad47SJeff Kirsher #endif
842dee1ad47SJeff Kirsher 	.ndo_fix_features	= e1000_fix_features,
843dee1ad47SJeff Kirsher 	.ndo_set_features	= e1000_set_features,
844dee1ad47SJeff Kirsher };
845dee1ad47SJeff Kirsher 
846dee1ad47SJeff Kirsher /**
847dee1ad47SJeff Kirsher  * e1000_init_hw_struct - initialize members of hw struct
848dee1ad47SJeff Kirsher  * @adapter: board private struct
849dee1ad47SJeff Kirsher  * @hw: structure used by e1000_hw.c
850dee1ad47SJeff Kirsher  *
851dee1ad47SJeff Kirsher  * Factors out initialization of the e1000_hw struct to its own function
852dee1ad47SJeff Kirsher  * that can be called very early at init (just after struct allocation).
853dee1ad47SJeff Kirsher  * Fields are initialized based on PCI device information and
854dee1ad47SJeff Kirsher  * OS network device settings (MTU size).
855dee1ad47SJeff Kirsher  * Returns negative error codes if MAC type setup fails.
856dee1ad47SJeff Kirsher  */
e1000_init_hw_struct(struct e1000_adapter * adapter,struct e1000_hw * hw)857dee1ad47SJeff Kirsher static int e1000_init_hw_struct(struct e1000_adapter *adapter,
858dee1ad47SJeff Kirsher 				struct e1000_hw *hw)
859dee1ad47SJeff Kirsher {
860dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
861dee1ad47SJeff Kirsher 
862dee1ad47SJeff Kirsher 	/* PCI config space info */
863dee1ad47SJeff Kirsher 	hw->vendor_id = pdev->vendor;
864dee1ad47SJeff Kirsher 	hw->device_id = pdev->device;
865dee1ad47SJeff Kirsher 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
866dee1ad47SJeff Kirsher 	hw->subsystem_id = pdev->subsystem_device;
867dee1ad47SJeff Kirsher 	hw->revision_id = pdev->revision;
868dee1ad47SJeff Kirsher 
869dee1ad47SJeff Kirsher 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
870dee1ad47SJeff Kirsher 
871dee1ad47SJeff Kirsher 	hw->max_frame_size = adapter->netdev->mtu +
872dee1ad47SJeff Kirsher 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
873dee1ad47SJeff Kirsher 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
874dee1ad47SJeff Kirsher 
875dee1ad47SJeff Kirsher 	/* identify the MAC */
876dee1ad47SJeff Kirsher 	if (e1000_set_mac_type(hw)) {
877dee1ad47SJeff Kirsher 		e_err(probe, "Unknown MAC Type\n");
878dee1ad47SJeff Kirsher 		return -EIO;
879dee1ad47SJeff Kirsher 	}
880dee1ad47SJeff Kirsher 
881dee1ad47SJeff Kirsher 	switch (hw->mac_type) {
882dee1ad47SJeff Kirsher 	default:
883dee1ad47SJeff Kirsher 		break;
884dee1ad47SJeff Kirsher 	case e1000_82541:
885dee1ad47SJeff Kirsher 	case e1000_82547:
886dee1ad47SJeff Kirsher 	case e1000_82541_rev_2:
887dee1ad47SJeff Kirsher 	case e1000_82547_rev_2:
888dee1ad47SJeff Kirsher 		hw->phy_init_script = 1;
889dee1ad47SJeff Kirsher 		break;
890dee1ad47SJeff Kirsher 	}
891dee1ad47SJeff Kirsher 
892dee1ad47SJeff Kirsher 	e1000_set_media_type(hw);
893dee1ad47SJeff Kirsher 	e1000_get_bus_info(hw);
894dee1ad47SJeff Kirsher 
895dee1ad47SJeff Kirsher 	hw->wait_autoneg_complete = false;
896dee1ad47SJeff Kirsher 	hw->tbi_compatibility_en = true;
897dee1ad47SJeff Kirsher 	hw->adaptive_ifs = true;
898dee1ad47SJeff Kirsher 
899dee1ad47SJeff Kirsher 	/* Copper options */
900dee1ad47SJeff Kirsher 
901dee1ad47SJeff Kirsher 	if (hw->media_type == e1000_media_type_copper) {
902dee1ad47SJeff Kirsher 		hw->mdix = AUTO_ALL_MODES;
903dee1ad47SJeff Kirsher 		hw->disable_polarity_correction = false;
904dee1ad47SJeff Kirsher 		hw->master_slave = E1000_MASTER_SLAVE;
905dee1ad47SJeff Kirsher 	}
906dee1ad47SJeff Kirsher 
907dee1ad47SJeff Kirsher 	return 0;
908dee1ad47SJeff Kirsher }
909dee1ad47SJeff Kirsher 
910dee1ad47SJeff Kirsher /**
911dee1ad47SJeff Kirsher  * e1000_probe - Device Initialization Routine
912dee1ad47SJeff Kirsher  * @pdev: PCI device information struct
913dee1ad47SJeff Kirsher  * @ent: entry in e1000_pci_tbl
914dee1ad47SJeff Kirsher  *
915dee1ad47SJeff Kirsher  * Returns 0 on success, negative on failure
916dee1ad47SJeff Kirsher  *
917dee1ad47SJeff Kirsher  * e1000_probe initializes an adapter identified by a pci_dev structure.
918dee1ad47SJeff Kirsher  * The OS initialization, configuring of the adapter private structure,
919dee1ad47SJeff Kirsher  * and a hardware reset occur.
920dee1ad47SJeff Kirsher  **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)9211dd06ae8SGreg Kroah-Hartman static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
922dee1ad47SJeff Kirsher {
923dee1ad47SJeff Kirsher 	struct net_device *netdev;
9240b76aae7STushar Dave 	struct e1000_adapter *adapter = NULL;
925dee1ad47SJeff Kirsher 	struct e1000_hw *hw;
926dee1ad47SJeff Kirsher 
927a48954c8SJanusz Wolak 	static int cards_found;
928a48954c8SJanusz Wolak 	static int global_quad_port_a; /* global ksp3 port a indication */
929dee1ad47SJeff Kirsher 	int i, err, pci_using_dac;
930dee1ad47SJeff Kirsher 	u16 eeprom_data = 0;
931dee1ad47SJeff Kirsher 	u16 tmp = 0;
932dee1ad47SJeff Kirsher 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
933dee1ad47SJeff Kirsher 	int bars, need_ioport;
9340b76aae7STushar Dave 	bool disable_dev = false;
935dee1ad47SJeff Kirsher 
936dee1ad47SJeff Kirsher 	/* do not allocate ioport bars when not needed */
937dee1ad47SJeff Kirsher 	need_ioport = e1000_is_need_ioport(pdev);
938dee1ad47SJeff Kirsher 	if (need_ioport) {
939dee1ad47SJeff Kirsher 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
940dee1ad47SJeff Kirsher 		err = pci_enable_device(pdev);
941dee1ad47SJeff Kirsher 	} else {
942dee1ad47SJeff Kirsher 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
943dee1ad47SJeff Kirsher 		err = pci_enable_device_mem(pdev);
944dee1ad47SJeff Kirsher 	}
945dee1ad47SJeff Kirsher 	if (err)
946dee1ad47SJeff Kirsher 		return err;
947dee1ad47SJeff Kirsher 
948dee1ad47SJeff Kirsher 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
949dee1ad47SJeff Kirsher 	if (err)
950dee1ad47SJeff Kirsher 		goto err_pci_reg;
951dee1ad47SJeff Kirsher 
952dee1ad47SJeff Kirsher 	pci_set_master(pdev);
953dee1ad47SJeff Kirsher 	err = pci_save_state(pdev);
954dee1ad47SJeff Kirsher 	if (err)
955dee1ad47SJeff Kirsher 		goto err_alloc_etherdev;
956dee1ad47SJeff Kirsher 
957dee1ad47SJeff Kirsher 	err = -ENOMEM;
958dee1ad47SJeff Kirsher 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
959dee1ad47SJeff Kirsher 	if (!netdev)
960dee1ad47SJeff Kirsher 		goto err_alloc_etherdev;
961dee1ad47SJeff Kirsher 
962dee1ad47SJeff Kirsher 	SET_NETDEV_DEV(netdev, &pdev->dev);
963dee1ad47SJeff Kirsher 
964dee1ad47SJeff Kirsher 	pci_set_drvdata(pdev, netdev);
965dee1ad47SJeff Kirsher 	adapter = netdev_priv(netdev);
966dee1ad47SJeff Kirsher 	adapter->netdev = netdev;
967dee1ad47SJeff Kirsher 	adapter->pdev = pdev;
968b3f4d599Sstephen hemminger 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
969dee1ad47SJeff Kirsher 	adapter->bars = bars;
970dee1ad47SJeff Kirsher 	adapter->need_ioport = need_ioport;
971dee1ad47SJeff Kirsher 
972dee1ad47SJeff Kirsher 	hw = &adapter->hw;
973dee1ad47SJeff Kirsher 	hw->back = adapter;
974dee1ad47SJeff Kirsher 
975dee1ad47SJeff Kirsher 	err = -EIO;
976dee1ad47SJeff Kirsher 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
977dee1ad47SJeff Kirsher 	if (!hw->hw_addr)
978dee1ad47SJeff Kirsher 		goto err_ioremap;
979dee1ad47SJeff Kirsher 
980dee1ad47SJeff Kirsher 	if (adapter->need_ioport) {
981c9c13ba4SDenis Efremov 		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
982dee1ad47SJeff Kirsher 			if (pci_resource_len(pdev, i) == 0)
983dee1ad47SJeff Kirsher 				continue;
984dee1ad47SJeff Kirsher 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
985dee1ad47SJeff Kirsher 				hw->io_base = pci_resource_start(pdev, i);
986dee1ad47SJeff Kirsher 				break;
987dee1ad47SJeff Kirsher 			}
988dee1ad47SJeff Kirsher 		}
989dee1ad47SJeff Kirsher 	}
990dee1ad47SJeff Kirsher 
991dee1ad47SJeff Kirsher 	/* make ready for any if (hw->...) below */
992dee1ad47SJeff Kirsher 	err = e1000_init_hw_struct(adapter, hw);
993dee1ad47SJeff Kirsher 	if (err)
994dee1ad47SJeff Kirsher 		goto err_sw_init;
995dee1ad47SJeff Kirsher 
9966cfbd97bSJeff Kirsher 	/* there is a workaround being applied below that limits
997dee1ad47SJeff Kirsher 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
998dee1ad47SJeff Kirsher 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
999dee1ad47SJeff Kirsher 	 */
1000dee1ad47SJeff Kirsher 	pci_using_dac = 0;
1001dee1ad47SJeff Kirsher 	if ((hw->bus_type == e1000_bus_type_pcix) &&
10029931a26eSRussell King 	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1003dee1ad47SJeff Kirsher 		pci_using_dac = 1;
1004dee1ad47SJeff Kirsher 	} else {
10059931a26eSRussell King 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1006dee1ad47SJeff Kirsher 		if (err) {
1007dee1ad47SJeff Kirsher 			pr_err("No usable DMA config, aborting\n");
1008dee1ad47SJeff Kirsher 			goto err_dma;
1009dee1ad47SJeff Kirsher 		}
1010dee1ad47SJeff Kirsher 	}
1011dee1ad47SJeff Kirsher 
1012dee1ad47SJeff Kirsher 	netdev->netdev_ops = &e1000_netdev_ops;
1013dee1ad47SJeff Kirsher 	e1000_set_ethtool_ops(netdev);
1014dee1ad47SJeff Kirsher 	netdev->watchdog_timeo = 5 * HZ;
1015b48b89f9SJakub Kicinski 	netif_napi_add(netdev, &adapter->napi, e1000_clean);
1016dee1ad47SJeff Kirsher 
1017dee1ad47SJeff Kirsher 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1018dee1ad47SJeff Kirsher 
1019dee1ad47SJeff Kirsher 	adapter->bd_number = cards_found;
1020dee1ad47SJeff Kirsher 
1021dee1ad47SJeff Kirsher 	/* setup the private structure */
1022dee1ad47SJeff Kirsher 
1023dee1ad47SJeff Kirsher 	err = e1000_sw_init(adapter);
1024dee1ad47SJeff Kirsher 	if (err)
1025dee1ad47SJeff Kirsher 		goto err_sw_init;
1026dee1ad47SJeff Kirsher 
1027dee1ad47SJeff Kirsher 	err = -EIO;
1028dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_ce4100) {
102913acde8fSFlorian Fainelli 		hw->ce4100_gbe_mdio_base_virt =
103013acde8fSFlorian Fainelli 					ioremap(pci_resource_start(pdev, BAR_1),
1031dee1ad47SJeff Kirsher 						pci_resource_len(pdev, BAR_1));
1032dee1ad47SJeff Kirsher 
103313acde8fSFlorian Fainelli 		if (!hw->ce4100_gbe_mdio_base_virt)
1034dee1ad47SJeff Kirsher 			goto err_mdio_ioremap;
1035dee1ad47SJeff Kirsher 	}
1036dee1ad47SJeff Kirsher 
1037dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82543) {
1038dee1ad47SJeff Kirsher 		netdev->hw_features = NETIF_F_SG |
1039dee1ad47SJeff Kirsher 				   NETIF_F_HW_CSUM |
1040f646968fSPatrick McHardy 				   NETIF_F_HW_VLAN_CTAG_RX;
1041f646968fSPatrick McHardy 		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1042f646968fSPatrick McHardy 				   NETIF_F_HW_VLAN_CTAG_FILTER;
1043dee1ad47SJeff Kirsher 	}
1044dee1ad47SJeff Kirsher 
1045dee1ad47SJeff Kirsher 	if ((hw->mac_type >= e1000_82544) &&
1046dee1ad47SJeff Kirsher 	   (hw->mac_type != e1000_82547))
1047dee1ad47SJeff Kirsher 		netdev->hw_features |= NETIF_F_TSO;
1048dee1ad47SJeff Kirsher 
104911a78dcfSBen Greear 	netdev->priv_flags |= IFF_SUPP_NOFCS;
105011a78dcfSBen Greear 
1051dee1ad47SJeff Kirsher 	netdev->features |= netdev->hw_features;
10527500673bSTushar Dave 	netdev->hw_features |= (NETIF_F_RXCSUM |
10537500673bSTushar Dave 				NETIF_F_RXALL |
10547500673bSTushar Dave 				NETIF_F_RXFCS);
1055dee1ad47SJeff Kirsher 
1056dee1ad47SJeff Kirsher 	if (pci_using_dac) {
1057dee1ad47SJeff Kirsher 		netdev->features |= NETIF_F_HIGHDMA;
1058dee1ad47SJeff Kirsher 		netdev->vlan_features |= NETIF_F_HIGHDMA;
1059dee1ad47SJeff Kirsher 	}
1060dee1ad47SJeff Kirsher 
10617500673bSTushar Dave 	netdev->vlan_features |= (NETIF_F_TSO |
10627500673bSTushar Dave 				  NETIF_F_HW_CSUM |
10637500673bSTushar Dave 				  NETIF_F_SG);
1064dee1ad47SJeff Kirsher 
1065a22bb0b9SFrancesco Ruggeri 	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1066a22bb0b9SFrancesco Ruggeri 	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1067a22bb0b9SFrancesco Ruggeri 	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
106801789349SJiri Pirko 		netdev->priv_flags |= IFF_UNICAST_FLT;
106901789349SJiri Pirko 
107091c527a5SJarod Wilson 	/* MTU range: 46 - 16110 */
107191c527a5SJarod Wilson 	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
107291c527a5SJarod Wilson 	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
107391c527a5SJarod Wilson 
1074dee1ad47SJeff Kirsher 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1075dee1ad47SJeff Kirsher 
1076dee1ad47SJeff Kirsher 	/* initialize eeprom parameters */
1077dee1ad47SJeff Kirsher 	if (e1000_init_eeprom_params(hw)) {
1078dee1ad47SJeff Kirsher 		e_err(probe, "EEPROM initialization failed\n");
1079dee1ad47SJeff Kirsher 		goto err_eeprom;
1080dee1ad47SJeff Kirsher 	}
1081dee1ad47SJeff Kirsher 
1082dee1ad47SJeff Kirsher 	/* before reading the EEPROM, reset the controller to
10836cfbd97bSJeff Kirsher 	 * put the device in a known good starting state
10846cfbd97bSJeff Kirsher 	 */
1085dee1ad47SJeff Kirsher 
1086dee1ad47SJeff Kirsher 	e1000_reset_hw(hw);
1087dee1ad47SJeff Kirsher 
1088dee1ad47SJeff Kirsher 	/* make sure the EEPROM is good */
1089dee1ad47SJeff Kirsher 	if (e1000_validate_eeprom_checksum(hw) < 0) {
1090dee1ad47SJeff Kirsher 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1091dee1ad47SJeff Kirsher 		e1000_dump_eeprom(adapter);
10926cfbd97bSJeff Kirsher 		/* set MAC address to all zeroes to invalidate and temporary
1093dee1ad47SJeff Kirsher 		 * disable this device for the user. This blocks regular
1094dee1ad47SJeff Kirsher 		 * traffic while still permitting ethtool ioctls from reaching
1095dee1ad47SJeff Kirsher 		 * the hardware as well as allowing the user to run the
1096dee1ad47SJeff Kirsher 		 * interface after manually setting a hw addr using
1097dee1ad47SJeff Kirsher 		 * `ip set address`
1098dee1ad47SJeff Kirsher 		 */
1099dee1ad47SJeff Kirsher 		memset(hw->mac_addr, 0, netdev->addr_len);
1100dee1ad47SJeff Kirsher 	} else {
1101dee1ad47SJeff Kirsher 		/* copy the MAC address out of the EEPROM */
1102dee1ad47SJeff Kirsher 		if (e1000_read_mac_addr(hw))
1103dee1ad47SJeff Kirsher 			e_err(probe, "EEPROM Read Error\n");
1104dee1ad47SJeff Kirsher 	}
1105dbedd44eSJoe Perches 	/* don't block initialization here due to bad MAC address */
1106a05e4c0aSJakub Kicinski 	eth_hw_addr_set(netdev, hw->mac_addr);
1107dee1ad47SJeff Kirsher 
1108aaeb6cdfSJiri Pirko 	if (!is_valid_ether_addr(netdev->dev_addr))
1109dee1ad47SJeff Kirsher 		e_err(probe, "Invalid MAC Address\n");
1110dee1ad47SJeff Kirsher 
1111dee1ad47SJeff Kirsher 
1112a4010afeSJesse Brandeburg 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1113a4010afeSJesse Brandeburg 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1114a4010afeSJesse Brandeburg 			  e1000_82547_tx_fifo_stall_task);
1115a4010afeSJesse Brandeburg 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1116dee1ad47SJeff Kirsher 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1117dee1ad47SJeff Kirsher 
1118dee1ad47SJeff Kirsher 	e1000_check_options(adapter);
1119dee1ad47SJeff Kirsher 
1120dee1ad47SJeff Kirsher 	/* Initial Wake on LAN setting
1121dee1ad47SJeff Kirsher 	 * If APM wake is enabled in the EEPROM,
1122dee1ad47SJeff Kirsher 	 * enable the ACPI Magic Packet filter
1123dee1ad47SJeff Kirsher 	 */
1124dee1ad47SJeff Kirsher 
1125dee1ad47SJeff Kirsher 	switch (hw->mac_type) {
1126dee1ad47SJeff Kirsher 	case e1000_82542_rev2_0:
1127dee1ad47SJeff Kirsher 	case e1000_82542_rev2_1:
1128dee1ad47SJeff Kirsher 	case e1000_82543:
1129dee1ad47SJeff Kirsher 		break;
1130dee1ad47SJeff Kirsher 	case e1000_82544:
1131dee1ad47SJeff Kirsher 		e1000_read_eeprom(hw,
1132dee1ad47SJeff Kirsher 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1133dee1ad47SJeff Kirsher 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1134dee1ad47SJeff Kirsher 		break;
1135dee1ad47SJeff Kirsher 	case e1000_82546:
1136dee1ad47SJeff Kirsher 	case e1000_82546_rev_3:
1137dee1ad47SJeff Kirsher 		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1138dee1ad47SJeff Kirsher 			e1000_read_eeprom(hw,
1139dee1ad47SJeff Kirsher 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1140dee1ad47SJeff Kirsher 			break;
1141dee1ad47SJeff Kirsher 		}
11425463fce6SJeff Kirsher 		fallthrough;
1143dee1ad47SJeff Kirsher 	default:
1144dee1ad47SJeff Kirsher 		e1000_read_eeprom(hw,
1145dee1ad47SJeff Kirsher 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1146dee1ad47SJeff Kirsher 		break;
1147dee1ad47SJeff Kirsher 	}
1148dee1ad47SJeff Kirsher 	if (eeprom_data & eeprom_apme_mask)
1149dee1ad47SJeff Kirsher 		adapter->eeprom_wol |= E1000_WUFC_MAG;
1150dee1ad47SJeff Kirsher 
1151dee1ad47SJeff Kirsher 	/* now that we have the eeprom settings, apply the special cases
1152dee1ad47SJeff Kirsher 	 * where the eeprom may be wrong or the board simply won't support
11536cfbd97bSJeff Kirsher 	 * wake on lan on a particular port
11546cfbd97bSJeff Kirsher 	 */
1155dee1ad47SJeff Kirsher 	switch (pdev->device) {
1156dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82546GB_PCIE:
1157dee1ad47SJeff Kirsher 		adapter->eeprom_wol = 0;
1158dee1ad47SJeff Kirsher 		break;
1159dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82546EB_FIBER:
1160dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82546GB_FIBER:
1161dee1ad47SJeff Kirsher 		/* Wake events only supported on port A for dual fiber
11626cfbd97bSJeff Kirsher 		 * regardless of eeprom setting
11636cfbd97bSJeff Kirsher 		 */
1164dee1ad47SJeff Kirsher 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1165dee1ad47SJeff Kirsher 			adapter->eeprom_wol = 0;
1166dee1ad47SJeff Kirsher 		break;
1167dee1ad47SJeff Kirsher 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1168dee1ad47SJeff Kirsher 		/* if quad port adapter, disable WoL on all but port A */
1169dee1ad47SJeff Kirsher 		if (global_quad_port_a != 0)
1170dee1ad47SJeff Kirsher 			adapter->eeprom_wol = 0;
1171dee1ad47SJeff Kirsher 		else
11723db1cd5cSRusty Russell 			adapter->quad_port_a = true;
1173dee1ad47SJeff Kirsher 		/* Reset for multiple quad port adapters */
1174dee1ad47SJeff Kirsher 		if (++global_quad_port_a == 4)
1175dee1ad47SJeff Kirsher 			global_quad_port_a = 0;
1176dee1ad47SJeff Kirsher 		break;
1177dee1ad47SJeff Kirsher 	}
1178dee1ad47SJeff Kirsher 
1179dee1ad47SJeff Kirsher 	/* initialize the wol settings based on the eeprom settings */
1180dee1ad47SJeff Kirsher 	adapter->wol = adapter->eeprom_wol;
1181dee1ad47SJeff Kirsher 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182dee1ad47SJeff Kirsher 
1183dee1ad47SJeff Kirsher 	/* Auto detect PHY address */
1184dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_ce4100) {
1185dee1ad47SJeff Kirsher 		for (i = 0; i < 32; i++) {
1186dee1ad47SJeff Kirsher 			hw->phy_addr = i;
1187dee1ad47SJeff Kirsher 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
11884e01f3a8SJean Sacren 
11894e01f3a8SJean Sacren 			if (tmp != 0 && tmp != 0xFF)
1190dee1ad47SJeff Kirsher 				break;
1191dee1ad47SJeff Kirsher 		}
11924e01f3a8SJean Sacren 
11934e01f3a8SJean Sacren 		if (i >= 32)
11944e01f3a8SJean Sacren 			goto err_eeprom;
1195dee1ad47SJeff Kirsher 	}
1196dee1ad47SJeff Kirsher 
1197dee1ad47SJeff Kirsher 	/* reset the hardware with the new settings */
1198dee1ad47SJeff Kirsher 	e1000_reset(adapter);
1199dee1ad47SJeff Kirsher 
1200dee1ad47SJeff Kirsher 	strcpy(netdev->name, "eth%d");
1201dee1ad47SJeff Kirsher 	err = register_netdev(netdev);
1202dee1ad47SJeff Kirsher 	if (err)
1203dee1ad47SJeff Kirsher 		goto err_register;
1204dee1ad47SJeff Kirsher 
120552f5509fSJiri Pirko 	e1000_vlan_filter_on_off(adapter, false);
1206dee1ad47SJeff Kirsher 
1207dee1ad47SJeff Kirsher 	/* print bus type/speed/width info */
1208dee1ad47SJeff Kirsher 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1209dee1ad47SJeff Kirsher 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1210dee1ad47SJeff Kirsher 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1211dee1ad47SJeff Kirsher 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1212dee1ad47SJeff Kirsher 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1213dee1ad47SJeff Kirsher 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1214dee1ad47SJeff Kirsher 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1215dee1ad47SJeff Kirsher 	       netdev->dev_addr);
1216dee1ad47SJeff Kirsher 
1217dee1ad47SJeff Kirsher 	/* carrier off reporting is important to ethtool even BEFORE open */
1218dee1ad47SJeff Kirsher 	netif_carrier_off(netdev);
1219dee1ad47SJeff Kirsher 
1220dee1ad47SJeff Kirsher 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1221dee1ad47SJeff Kirsher 
1222dee1ad47SJeff Kirsher 	cards_found++;
1223dee1ad47SJeff Kirsher 	return 0;
1224dee1ad47SJeff Kirsher 
1225dee1ad47SJeff Kirsher err_register:
1226dee1ad47SJeff Kirsher err_eeprom:
1227dee1ad47SJeff Kirsher 	e1000_phy_hw_reset(hw);
1228dee1ad47SJeff Kirsher 
1229dee1ad47SJeff Kirsher 	if (hw->flash_address)
1230dee1ad47SJeff Kirsher 		iounmap(hw->flash_address);
1231dee1ad47SJeff Kirsher 	kfree(adapter->tx_ring);
1232dee1ad47SJeff Kirsher 	kfree(adapter->rx_ring);
1233dee1ad47SJeff Kirsher err_dma:
1234dee1ad47SJeff Kirsher err_sw_init:
1235dee1ad47SJeff Kirsher err_mdio_ioremap:
123613acde8fSFlorian Fainelli 	iounmap(hw->ce4100_gbe_mdio_base_virt);
1237dee1ad47SJeff Kirsher 	iounmap(hw->hw_addr);
1238dee1ad47SJeff Kirsher err_ioremap:
12390b76aae7STushar Dave 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1240dee1ad47SJeff Kirsher 	free_netdev(netdev);
1241dee1ad47SJeff Kirsher err_alloc_etherdev:
1242dee1ad47SJeff Kirsher 	pci_release_selected_regions(pdev, bars);
1243dee1ad47SJeff Kirsher err_pci_reg:
12440b76aae7STushar Dave 	if (!adapter || disable_dev)
1245dee1ad47SJeff Kirsher 		pci_disable_device(pdev);
1246dee1ad47SJeff Kirsher 	return err;
1247dee1ad47SJeff Kirsher }
1248dee1ad47SJeff Kirsher 
1249dee1ad47SJeff Kirsher /**
1250dee1ad47SJeff Kirsher  * e1000_remove - Device Removal Routine
1251dee1ad47SJeff Kirsher  * @pdev: PCI device information struct
1252dee1ad47SJeff Kirsher  *
1253dee1ad47SJeff Kirsher  * e1000_remove is called by the PCI subsystem to alert the driver
1254b6fad9f9SJean Sacren  * that it should release a PCI device. That could be caused by a
1255dee1ad47SJeff Kirsher  * Hot-Plug event, or because the driver is going to be removed from
1256dee1ad47SJeff Kirsher  * memory.
1257dee1ad47SJeff Kirsher  **/
e1000_remove(struct pci_dev * pdev)12589f9a12f8SBill Pemberton static void e1000_remove(struct pci_dev *pdev)
1259dee1ad47SJeff Kirsher {
1260dee1ad47SJeff Kirsher 	struct net_device *netdev = pci_get_drvdata(pdev);
1261dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
1262dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
12630b76aae7STushar Dave 	bool disable_dev;
1264dee1ad47SJeff Kirsher 
1265a4010afeSJesse Brandeburg 	e1000_down_and_stop(adapter);
1266dee1ad47SJeff Kirsher 	e1000_release_manageability(adapter);
1267dee1ad47SJeff Kirsher 
1268dee1ad47SJeff Kirsher 	unregister_netdev(netdev);
1269dee1ad47SJeff Kirsher 
1270dee1ad47SJeff Kirsher 	e1000_phy_hw_reset(hw);
1271dee1ad47SJeff Kirsher 
1272dee1ad47SJeff Kirsher 	kfree(adapter->tx_ring);
1273dee1ad47SJeff Kirsher 	kfree(adapter->rx_ring);
1274dee1ad47SJeff Kirsher 
12751c26750cSFlorian Fainelli 	if (hw->mac_type == e1000_ce4100)
127613acde8fSFlorian Fainelli 		iounmap(hw->ce4100_gbe_mdio_base_virt);
1277dee1ad47SJeff Kirsher 	iounmap(hw->hw_addr);
1278dee1ad47SJeff Kirsher 	if (hw->flash_address)
1279dee1ad47SJeff Kirsher 		iounmap(hw->flash_address);
1280dee1ad47SJeff Kirsher 	pci_release_selected_regions(pdev, adapter->bars);
1281dee1ad47SJeff Kirsher 
12820b76aae7STushar Dave 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1283dee1ad47SJeff Kirsher 	free_netdev(netdev);
1284dee1ad47SJeff Kirsher 
12850b76aae7STushar Dave 	if (disable_dev)
1286dee1ad47SJeff Kirsher 		pci_disable_device(pdev);
1287dee1ad47SJeff Kirsher }
1288dee1ad47SJeff Kirsher 
1289dee1ad47SJeff Kirsher /**
1290dee1ad47SJeff Kirsher  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1291dee1ad47SJeff Kirsher  * @adapter: board private structure to initialize
1292dee1ad47SJeff Kirsher  *
1293dee1ad47SJeff Kirsher  * e1000_sw_init initializes the Adapter private data structure.
1294dee1ad47SJeff Kirsher  * e1000_init_hw_struct MUST be called before this function
1295dee1ad47SJeff Kirsher  **/
e1000_sw_init(struct e1000_adapter * adapter)12969f9a12f8SBill Pemberton static int e1000_sw_init(struct e1000_adapter *adapter)
1297dee1ad47SJeff Kirsher {
1298dee1ad47SJeff Kirsher 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1299dee1ad47SJeff Kirsher 
1300dee1ad47SJeff Kirsher 	adapter->num_tx_queues = 1;
1301dee1ad47SJeff Kirsher 	adapter->num_rx_queues = 1;
1302dee1ad47SJeff Kirsher 
1303dee1ad47SJeff Kirsher 	if (e1000_alloc_queues(adapter)) {
1304dee1ad47SJeff Kirsher 		e_err(probe, "Unable to allocate memory for queues\n");
1305dee1ad47SJeff Kirsher 		return -ENOMEM;
1306dee1ad47SJeff Kirsher 	}
1307dee1ad47SJeff Kirsher 
1308dee1ad47SJeff Kirsher 	/* Explicitly disable IRQ since the NIC can be in any state. */
1309dee1ad47SJeff Kirsher 	e1000_irq_disable(adapter);
1310dee1ad47SJeff Kirsher 
1311dee1ad47SJeff Kirsher 	spin_lock_init(&adapter->stats_lock);
1312dee1ad47SJeff Kirsher 
1313dee1ad47SJeff Kirsher 	set_bit(__E1000_DOWN, &adapter->flags);
1314dee1ad47SJeff Kirsher 
1315dee1ad47SJeff Kirsher 	return 0;
1316dee1ad47SJeff Kirsher }
1317dee1ad47SJeff Kirsher 
1318dee1ad47SJeff Kirsher /**
1319dee1ad47SJeff Kirsher  * e1000_alloc_queues - Allocate memory for all rings
1320dee1ad47SJeff Kirsher  * @adapter: board private structure to initialize
1321dee1ad47SJeff Kirsher  *
1322dee1ad47SJeff Kirsher  * We allocate one ring per queue at run-time since we don't know the
1323dee1ad47SJeff Kirsher  * number of queues at compile-time.
1324dee1ad47SJeff Kirsher  **/
e1000_alloc_queues(struct e1000_adapter * adapter)13259f9a12f8SBill Pemberton static int e1000_alloc_queues(struct e1000_adapter *adapter)
1326dee1ad47SJeff Kirsher {
1327dee1ad47SJeff Kirsher 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1328dee1ad47SJeff Kirsher 				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1329dee1ad47SJeff Kirsher 	if (!adapter->tx_ring)
1330dee1ad47SJeff Kirsher 		return -ENOMEM;
1331dee1ad47SJeff Kirsher 
1332dee1ad47SJeff Kirsher 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1333dee1ad47SJeff Kirsher 				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1334dee1ad47SJeff Kirsher 	if (!adapter->rx_ring) {
1335dee1ad47SJeff Kirsher 		kfree(adapter->tx_ring);
1336dee1ad47SJeff Kirsher 		return -ENOMEM;
1337dee1ad47SJeff Kirsher 	}
1338dee1ad47SJeff Kirsher 
1339dee1ad47SJeff Kirsher 	return E1000_SUCCESS;
1340dee1ad47SJeff Kirsher }
1341dee1ad47SJeff Kirsher 
1342dee1ad47SJeff Kirsher /**
1343dee1ad47SJeff Kirsher  * e1000_open - Called when a network interface is made active
1344dee1ad47SJeff Kirsher  * @netdev: network interface device structure
1345dee1ad47SJeff Kirsher  *
1346dee1ad47SJeff Kirsher  * Returns 0 on success, negative value on failure
1347dee1ad47SJeff Kirsher  *
1348dee1ad47SJeff Kirsher  * The open entry point is called when a network interface is made
1349dee1ad47SJeff Kirsher  * active by the system (IFF_UP).  At this point all resources needed
1350dee1ad47SJeff Kirsher  * for transmit and receive operations are allocated, the interrupt
1351a4010afeSJesse Brandeburg  * handler is registered with the OS, the watchdog task is started,
1352dee1ad47SJeff Kirsher  * and the stack is notified that the interface is ready.
1353dee1ad47SJeff Kirsher  **/
e1000_open(struct net_device * netdev)13541f2f83f8SStefan Assmann int e1000_open(struct net_device *netdev)
1355dee1ad47SJeff Kirsher {
1356dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
1357dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
1358dee1ad47SJeff Kirsher 	int err;
1359dee1ad47SJeff Kirsher 
1360dee1ad47SJeff Kirsher 	/* disallow open during test */
1361dee1ad47SJeff Kirsher 	if (test_bit(__E1000_TESTING, &adapter->flags))
1362dee1ad47SJeff Kirsher 		return -EBUSY;
1363dee1ad47SJeff Kirsher 
1364dee1ad47SJeff Kirsher 	netif_carrier_off(netdev);
1365dee1ad47SJeff Kirsher 
1366dee1ad47SJeff Kirsher 	/* allocate transmit descriptors */
1367dee1ad47SJeff Kirsher 	err = e1000_setup_all_tx_resources(adapter);
1368dee1ad47SJeff Kirsher 	if (err)
1369dee1ad47SJeff Kirsher 		goto err_setup_tx;
1370dee1ad47SJeff Kirsher 
1371dee1ad47SJeff Kirsher 	/* allocate receive descriptors */
1372dee1ad47SJeff Kirsher 	err = e1000_setup_all_rx_resources(adapter);
1373dee1ad47SJeff Kirsher 	if (err)
1374dee1ad47SJeff Kirsher 		goto err_setup_rx;
1375dee1ad47SJeff Kirsher 
1376dee1ad47SJeff Kirsher 	e1000_power_up_phy(adapter);
1377dee1ad47SJeff Kirsher 
1378dee1ad47SJeff Kirsher 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1379dee1ad47SJeff Kirsher 	if ((hw->mng_cookie.status &
1380dee1ad47SJeff Kirsher 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1381dee1ad47SJeff Kirsher 		e1000_update_mng_vlan(adapter);
1382dee1ad47SJeff Kirsher 	}
1383dee1ad47SJeff Kirsher 
1384dee1ad47SJeff Kirsher 	/* before we allocate an interrupt, we must be ready to handle it.
1385dee1ad47SJeff Kirsher 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1386dee1ad47SJeff Kirsher 	 * as soon as we call pci_request_irq, so we have to setup our
13876cfbd97bSJeff Kirsher 	 * clean_rx handler before we do so.
13886cfbd97bSJeff Kirsher 	 */
1389dee1ad47SJeff Kirsher 	e1000_configure(adapter);
1390dee1ad47SJeff Kirsher 
1391dee1ad47SJeff Kirsher 	err = e1000_request_irq(adapter);
1392dee1ad47SJeff Kirsher 	if (err)
1393dee1ad47SJeff Kirsher 		goto err_req_irq;
1394dee1ad47SJeff Kirsher 
1395dee1ad47SJeff Kirsher 	/* From here on the code is the same as e1000_up() */
1396dee1ad47SJeff Kirsher 	clear_bit(__E1000_DOWN, &adapter->flags);
1397dee1ad47SJeff Kirsher 
1398dee1ad47SJeff Kirsher 	napi_enable(&adapter->napi);
1399dee1ad47SJeff Kirsher 
1400dee1ad47SJeff Kirsher 	e1000_irq_enable(adapter);
1401dee1ad47SJeff Kirsher 
1402dee1ad47SJeff Kirsher 	netif_start_queue(netdev);
1403dee1ad47SJeff Kirsher 
1404dee1ad47SJeff Kirsher 	/* fire a link status change interrupt to start the watchdog */
1405dee1ad47SJeff Kirsher 	ew32(ICS, E1000_ICS_LSC);
1406dee1ad47SJeff Kirsher 
1407dee1ad47SJeff Kirsher 	return E1000_SUCCESS;
1408dee1ad47SJeff Kirsher 
1409dee1ad47SJeff Kirsher err_req_irq:
1410dee1ad47SJeff Kirsher 	e1000_power_down_phy(adapter);
1411dee1ad47SJeff Kirsher 	e1000_free_all_rx_resources(adapter);
1412dee1ad47SJeff Kirsher err_setup_rx:
1413dee1ad47SJeff Kirsher 	e1000_free_all_tx_resources(adapter);
1414dee1ad47SJeff Kirsher err_setup_tx:
1415dee1ad47SJeff Kirsher 	e1000_reset(adapter);
1416dee1ad47SJeff Kirsher 
1417dee1ad47SJeff Kirsher 	return err;
1418dee1ad47SJeff Kirsher }
1419dee1ad47SJeff Kirsher 
1420dee1ad47SJeff Kirsher /**
1421dee1ad47SJeff Kirsher  * e1000_close - Disables a network interface
1422dee1ad47SJeff Kirsher  * @netdev: network interface device structure
1423dee1ad47SJeff Kirsher  *
1424dee1ad47SJeff Kirsher  * Returns 0, this is not allowed to fail
1425dee1ad47SJeff Kirsher  *
1426dee1ad47SJeff Kirsher  * The close entry point is called when an interface is de-activated
1427dee1ad47SJeff Kirsher  * by the OS.  The hardware is still under the drivers control, but
1428dee1ad47SJeff Kirsher  * needs to be disabled.  A global MAC reset is issued to stop the
1429dee1ad47SJeff Kirsher  * hardware, and all transmit and receive resources are freed.
1430dee1ad47SJeff Kirsher  **/
e1000_close(struct net_device * netdev)14311f2f83f8SStefan Assmann int e1000_close(struct net_device *netdev)
1432dee1ad47SJeff Kirsher {
1433dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
1434dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
14356a7d64e3Syzhu1 	int count = E1000_CHECK_RESET_COUNT;
14366a7d64e3Syzhu1 
143749ee3c2aSAlexander Duyck 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
14386a7d64e3Syzhu1 		usleep_range(10000, 20000);
1439dee1ad47SJeff Kirsher 
144049ee3c2aSAlexander Duyck 	WARN_ON(count < 0);
144149ee3c2aSAlexander Duyck 
144249ee3c2aSAlexander Duyck 	/* signal that we're down so that the reset task will no longer run */
144349ee3c2aSAlexander Duyck 	set_bit(__E1000_DOWN, &adapter->flags);
144449ee3c2aSAlexander Duyck 	clear_bit(__E1000_RESETTING, &adapter->flags);
144549ee3c2aSAlexander Duyck 
1446dee1ad47SJeff Kirsher 	e1000_down(adapter);
1447dee1ad47SJeff Kirsher 	e1000_power_down_phy(adapter);
1448dee1ad47SJeff Kirsher 	e1000_free_irq(adapter);
1449dee1ad47SJeff Kirsher 
1450dee1ad47SJeff Kirsher 	e1000_free_all_tx_resources(adapter);
1451dee1ad47SJeff Kirsher 	e1000_free_all_rx_resources(adapter);
1452dee1ad47SJeff Kirsher 
1453dee1ad47SJeff Kirsher 	/* kill manageability vlan ID if supported, but not if a vlan with
14546cfbd97bSJeff Kirsher 	 * the same ID is registered on the host OS (let 8021q kill it)
14556cfbd97bSJeff Kirsher 	 */
1456dee1ad47SJeff Kirsher 	if ((hw->mng_cookie.status &
1457dee1ad47SJeff Kirsher 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1458dee1ad47SJeff Kirsher 	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
145980d5c368SPatrick McHardy 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
146080d5c368SPatrick McHardy 				       adapter->mng_vlan_id);
1461dee1ad47SJeff Kirsher 	}
1462dee1ad47SJeff Kirsher 
1463dee1ad47SJeff Kirsher 	return 0;
1464dee1ad47SJeff Kirsher }
1465dee1ad47SJeff Kirsher 
1466dee1ad47SJeff Kirsher /**
1467dee1ad47SJeff Kirsher  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468dee1ad47SJeff Kirsher  * @adapter: address of board private structure
1469dee1ad47SJeff Kirsher  * @start: address of beginning of memory
1470dee1ad47SJeff Kirsher  * @len: length of memory
1471dee1ad47SJeff Kirsher  **/
e1000_check_64k_bound(struct e1000_adapter * adapter,void * start,unsigned long len)1472dee1ad47SJeff Kirsher static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473dee1ad47SJeff Kirsher 				  unsigned long len)
1474dee1ad47SJeff Kirsher {
1475dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
1476dee1ad47SJeff Kirsher 	unsigned long begin = (unsigned long)start;
1477dee1ad47SJeff Kirsher 	unsigned long end = begin + len;
1478dee1ad47SJeff Kirsher 
1479dee1ad47SJeff Kirsher 	/* First rev 82545 and 82546 need to not allow any memory
14806cfbd97bSJeff Kirsher 	 * write location to cross 64k boundary due to errata 23
14816cfbd97bSJeff Kirsher 	 */
1482dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_82545 ||
1483dee1ad47SJeff Kirsher 	    hw->mac_type == e1000_ce4100 ||
1484dee1ad47SJeff Kirsher 	    hw->mac_type == e1000_82546) {
1485c95576a3SJason Yan 		return ((begin ^ (end - 1)) >> 16) == 0;
1486dee1ad47SJeff Kirsher 	}
1487dee1ad47SJeff Kirsher 
1488dee1ad47SJeff Kirsher 	return true;
1489dee1ad47SJeff Kirsher }
1490dee1ad47SJeff Kirsher 
1491dee1ad47SJeff Kirsher /**
1492dee1ad47SJeff Kirsher  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493dee1ad47SJeff Kirsher  * @adapter: board private structure
1494dee1ad47SJeff Kirsher  * @txdr:    tx descriptor ring (for a specific queue) to setup
1495dee1ad47SJeff Kirsher  *
1496dee1ad47SJeff Kirsher  * Return 0 on success, negative on failure
1497dee1ad47SJeff Kirsher  **/
e1000_setup_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * txdr)1498dee1ad47SJeff Kirsher static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499dee1ad47SJeff Kirsher 				    struct e1000_tx_ring *txdr)
1500dee1ad47SJeff Kirsher {
1501dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
1502dee1ad47SJeff Kirsher 	int size;
1503dee1ad47SJeff Kirsher 
1504580f321dSFlorian Westphal 	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1505dee1ad47SJeff Kirsher 	txdr->buffer_info = vzalloc(size);
150614f8dc49SJoe Perches 	if (!txdr->buffer_info)
1507dee1ad47SJeff Kirsher 		return -ENOMEM;
1508dee1ad47SJeff Kirsher 
1509dee1ad47SJeff Kirsher 	/* round up to nearest 4K */
1510dee1ad47SJeff Kirsher 
1511dee1ad47SJeff Kirsher 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512dee1ad47SJeff Kirsher 	txdr->size = ALIGN(txdr->size, 4096);
1513dee1ad47SJeff Kirsher 
1514dee1ad47SJeff Kirsher 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515dee1ad47SJeff Kirsher 					GFP_KERNEL);
1516dee1ad47SJeff Kirsher 	if (!txdr->desc) {
1517dee1ad47SJeff Kirsher setup_tx_desc_die:
1518dee1ad47SJeff Kirsher 		vfree(txdr->buffer_info);
1519dee1ad47SJeff Kirsher 		return -ENOMEM;
1520dee1ad47SJeff Kirsher 	}
1521dee1ad47SJeff Kirsher 
1522dee1ad47SJeff Kirsher 	/* Fix for errata 23, can't cross 64kB boundary */
1523dee1ad47SJeff Kirsher 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1524dee1ad47SJeff Kirsher 		void *olddesc = txdr->desc;
1525dee1ad47SJeff Kirsher 		dma_addr_t olddma = txdr->dma;
1526dee1ad47SJeff Kirsher 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1527dee1ad47SJeff Kirsher 		      txdr->size, txdr->desc);
1528dee1ad47SJeff Kirsher 		/* Try again, without freeing the previous */
1529dee1ad47SJeff Kirsher 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1530dee1ad47SJeff Kirsher 						&txdr->dma, GFP_KERNEL);
1531dee1ad47SJeff Kirsher 		/* Failed allocation, critical failure */
1532dee1ad47SJeff Kirsher 		if (!txdr->desc) {
1533dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1534dee1ad47SJeff Kirsher 					  olddma);
1535dee1ad47SJeff Kirsher 			goto setup_tx_desc_die;
1536dee1ad47SJeff Kirsher 		}
1537dee1ad47SJeff Kirsher 
1538dee1ad47SJeff Kirsher 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539dee1ad47SJeff Kirsher 			/* give up */
1540dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1541dee1ad47SJeff Kirsher 					  txdr->dma);
1542dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543dee1ad47SJeff Kirsher 					  olddma);
1544dee1ad47SJeff Kirsher 			e_err(probe, "Unable to allocate aligned memory "
1545dee1ad47SJeff Kirsher 			      "for the transmit descriptor ring\n");
1546dee1ad47SJeff Kirsher 			vfree(txdr->buffer_info);
1547dee1ad47SJeff Kirsher 			return -ENOMEM;
1548dee1ad47SJeff Kirsher 		} else {
1549dee1ad47SJeff Kirsher 			/* Free old allocation, new allocation was successful */
1550dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551dee1ad47SJeff Kirsher 					  olddma);
1552dee1ad47SJeff Kirsher 		}
1553dee1ad47SJeff Kirsher 	}
1554dee1ad47SJeff Kirsher 	memset(txdr->desc, 0, txdr->size);
1555dee1ad47SJeff Kirsher 
1556dee1ad47SJeff Kirsher 	txdr->next_to_use = 0;
1557dee1ad47SJeff Kirsher 	txdr->next_to_clean = 0;
1558dee1ad47SJeff Kirsher 
1559dee1ad47SJeff Kirsher 	return 0;
1560dee1ad47SJeff Kirsher }
1561dee1ad47SJeff Kirsher 
1562dee1ad47SJeff Kirsher /**
1563dee1ad47SJeff Kirsher  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1564dee1ad47SJeff Kirsher  * 				  (Descriptors) for all queues
1565dee1ad47SJeff Kirsher  * @adapter: board private structure
1566dee1ad47SJeff Kirsher  *
1567dee1ad47SJeff Kirsher  * Return 0 on success, negative on failure
1568dee1ad47SJeff Kirsher  **/
e1000_setup_all_tx_resources(struct e1000_adapter * adapter)1569dee1ad47SJeff Kirsher int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1570dee1ad47SJeff Kirsher {
1571dee1ad47SJeff Kirsher 	int i, err = 0;
1572dee1ad47SJeff Kirsher 
1573dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_tx_queues; i++) {
1574dee1ad47SJeff Kirsher 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1575dee1ad47SJeff Kirsher 		if (err) {
1576dee1ad47SJeff Kirsher 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1577dee1ad47SJeff Kirsher 			for (i-- ; i >= 0; i--)
1578dee1ad47SJeff Kirsher 				e1000_free_tx_resources(adapter,
1579dee1ad47SJeff Kirsher 							&adapter->tx_ring[i]);
1580dee1ad47SJeff Kirsher 			break;
1581dee1ad47SJeff Kirsher 		}
1582dee1ad47SJeff Kirsher 	}
1583dee1ad47SJeff Kirsher 
1584dee1ad47SJeff Kirsher 	return err;
1585dee1ad47SJeff Kirsher }
1586dee1ad47SJeff Kirsher 
1587dee1ad47SJeff Kirsher /**
1588dee1ad47SJeff Kirsher  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1589dee1ad47SJeff Kirsher  * @adapter: board private structure
1590dee1ad47SJeff Kirsher  *
1591dee1ad47SJeff Kirsher  * Configure the Tx unit of the MAC after a reset.
1592dee1ad47SJeff Kirsher  **/
e1000_configure_tx(struct e1000_adapter * adapter)1593dee1ad47SJeff Kirsher static void e1000_configure_tx(struct e1000_adapter *adapter)
1594dee1ad47SJeff Kirsher {
1595dee1ad47SJeff Kirsher 	u64 tdba;
1596dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
1597dee1ad47SJeff Kirsher 	u32 tdlen, tctl, tipg;
1598dee1ad47SJeff Kirsher 	u32 ipgr1, ipgr2;
1599dee1ad47SJeff Kirsher 
1600dee1ad47SJeff Kirsher 	/* Setup the HW Tx Head and Tail descriptor pointers */
1601dee1ad47SJeff Kirsher 
1602dee1ad47SJeff Kirsher 	switch (adapter->num_tx_queues) {
1603dee1ad47SJeff Kirsher 	case 1:
1604dee1ad47SJeff Kirsher 	default:
1605dee1ad47SJeff Kirsher 		tdba = adapter->tx_ring[0].dma;
1606dee1ad47SJeff Kirsher 		tdlen = adapter->tx_ring[0].count *
1607dee1ad47SJeff Kirsher 			sizeof(struct e1000_tx_desc);
1608dee1ad47SJeff Kirsher 		ew32(TDLEN, tdlen);
1609dee1ad47SJeff Kirsher 		ew32(TDBAH, (tdba >> 32));
1610dee1ad47SJeff Kirsher 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1611dee1ad47SJeff Kirsher 		ew32(TDT, 0);
1612dee1ad47SJeff Kirsher 		ew32(TDH, 0);
16136cfbd97bSJeff Kirsher 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
16146cfbd97bSJeff Kirsher 					   E1000_TDH : E1000_82542_TDH);
16156cfbd97bSJeff Kirsher 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
16166cfbd97bSJeff Kirsher 					   E1000_TDT : E1000_82542_TDT);
1617dee1ad47SJeff Kirsher 		break;
1618dee1ad47SJeff Kirsher 	}
1619dee1ad47SJeff Kirsher 
1620dee1ad47SJeff Kirsher 	/* Set the default values for the Tx Inter Packet Gap timer */
1621dee1ad47SJeff Kirsher 	if ((hw->media_type == e1000_media_type_fiber ||
1622dee1ad47SJeff Kirsher 	     hw->media_type == e1000_media_type_internal_serdes))
1623dee1ad47SJeff Kirsher 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1624dee1ad47SJeff Kirsher 	else
1625dee1ad47SJeff Kirsher 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1626dee1ad47SJeff Kirsher 
1627dee1ad47SJeff Kirsher 	switch (hw->mac_type) {
1628dee1ad47SJeff Kirsher 	case e1000_82542_rev2_0:
1629dee1ad47SJeff Kirsher 	case e1000_82542_rev2_1:
1630dee1ad47SJeff Kirsher 		tipg = DEFAULT_82542_TIPG_IPGT;
1631dee1ad47SJeff Kirsher 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1632dee1ad47SJeff Kirsher 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1633dee1ad47SJeff Kirsher 		break;
1634dee1ad47SJeff Kirsher 	default:
1635dee1ad47SJeff Kirsher 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1636dee1ad47SJeff Kirsher 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1637dee1ad47SJeff Kirsher 		break;
1638dee1ad47SJeff Kirsher 	}
1639dee1ad47SJeff Kirsher 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1640dee1ad47SJeff Kirsher 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1641dee1ad47SJeff Kirsher 	ew32(TIPG, tipg);
1642dee1ad47SJeff Kirsher 
1643dee1ad47SJeff Kirsher 	/* Set the Tx Interrupt Delay register */
1644dee1ad47SJeff Kirsher 
1645dee1ad47SJeff Kirsher 	ew32(TIDV, adapter->tx_int_delay);
1646dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82540)
1647dee1ad47SJeff Kirsher 		ew32(TADV, adapter->tx_abs_int_delay);
1648dee1ad47SJeff Kirsher 
1649dee1ad47SJeff Kirsher 	/* Program the Transmit Control Register */
1650dee1ad47SJeff Kirsher 
1651dee1ad47SJeff Kirsher 	tctl = er32(TCTL);
1652dee1ad47SJeff Kirsher 	tctl &= ~E1000_TCTL_CT;
1653dee1ad47SJeff Kirsher 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1654dee1ad47SJeff Kirsher 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1655dee1ad47SJeff Kirsher 
1656dee1ad47SJeff Kirsher 	e1000_config_collision_dist(hw);
1657dee1ad47SJeff Kirsher 
1658dee1ad47SJeff Kirsher 	/* Setup Transmit Descriptor Settings for eop descriptor */
1659dee1ad47SJeff Kirsher 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1660dee1ad47SJeff Kirsher 
1661dee1ad47SJeff Kirsher 	/* only set IDE if we are delaying interrupts using the timers */
1662dee1ad47SJeff Kirsher 	if (adapter->tx_int_delay)
1663dee1ad47SJeff Kirsher 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1664dee1ad47SJeff Kirsher 
1665dee1ad47SJeff Kirsher 	if (hw->mac_type < e1000_82543)
1666dee1ad47SJeff Kirsher 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1667dee1ad47SJeff Kirsher 	else
1668dee1ad47SJeff Kirsher 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1669dee1ad47SJeff Kirsher 
1670dee1ad47SJeff Kirsher 	/* Cache if we're 82544 running in PCI-X because we'll
16716cfbd97bSJeff Kirsher 	 * need this to apply a workaround later in the send path.
16726cfbd97bSJeff Kirsher 	 */
1673dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_82544 &&
1674dee1ad47SJeff Kirsher 	    hw->bus_type == e1000_bus_type_pcix)
16753db1cd5cSRusty Russell 		adapter->pcix_82544 = true;
1676dee1ad47SJeff Kirsher 
1677dee1ad47SJeff Kirsher 	ew32(TCTL, tctl);
1678dee1ad47SJeff Kirsher 
1679dee1ad47SJeff Kirsher }
1680dee1ad47SJeff Kirsher 
1681dee1ad47SJeff Kirsher /**
1682dee1ad47SJeff Kirsher  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1683dee1ad47SJeff Kirsher  * @adapter: board private structure
1684dee1ad47SJeff Kirsher  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1685dee1ad47SJeff Kirsher  *
1686dee1ad47SJeff Kirsher  * Returns 0 on success, negative on failure
1687dee1ad47SJeff Kirsher  **/
e1000_setup_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rxdr)1688dee1ad47SJeff Kirsher static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1689dee1ad47SJeff Kirsher 				    struct e1000_rx_ring *rxdr)
1690dee1ad47SJeff Kirsher {
1691dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
1692dee1ad47SJeff Kirsher 	int size, desc_len;
1693dee1ad47SJeff Kirsher 
169493f0afe9SFlorian Westphal 	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1695dee1ad47SJeff Kirsher 	rxdr->buffer_info = vzalloc(size);
169614f8dc49SJoe Perches 	if (!rxdr->buffer_info)
1697dee1ad47SJeff Kirsher 		return -ENOMEM;
1698dee1ad47SJeff Kirsher 
1699dee1ad47SJeff Kirsher 	desc_len = sizeof(struct e1000_rx_desc);
1700dee1ad47SJeff Kirsher 
1701dee1ad47SJeff Kirsher 	/* Round up to nearest 4K */
1702dee1ad47SJeff Kirsher 
1703dee1ad47SJeff Kirsher 	rxdr->size = rxdr->count * desc_len;
1704dee1ad47SJeff Kirsher 	rxdr->size = ALIGN(rxdr->size, 4096);
1705dee1ad47SJeff Kirsher 
1706dee1ad47SJeff Kirsher 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1707dee1ad47SJeff Kirsher 					GFP_KERNEL);
1708dee1ad47SJeff Kirsher 	if (!rxdr->desc) {
1709dee1ad47SJeff Kirsher setup_rx_desc_die:
1710dee1ad47SJeff Kirsher 		vfree(rxdr->buffer_info);
1711dee1ad47SJeff Kirsher 		return -ENOMEM;
1712dee1ad47SJeff Kirsher 	}
1713dee1ad47SJeff Kirsher 
1714dee1ad47SJeff Kirsher 	/* Fix for errata 23, can't cross 64kB boundary */
1715dee1ad47SJeff Kirsher 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1716dee1ad47SJeff Kirsher 		void *olddesc = rxdr->desc;
1717dee1ad47SJeff Kirsher 		dma_addr_t olddma = rxdr->dma;
1718dee1ad47SJeff Kirsher 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1719dee1ad47SJeff Kirsher 		      rxdr->size, rxdr->desc);
1720dee1ad47SJeff Kirsher 		/* Try again, without freeing the previous */
1721dee1ad47SJeff Kirsher 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1722dee1ad47SJeff Kirsher 						&rxdr->dma, GFP_KERNEL);
1723dee1ad47SJeff Kirsher 		/* Failed allocation, critical failure */
1724dee1ad47SJeff Kirsher 		if (!rxdr->desc) {
1725dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1726dee1ad47SJeff Kirsher 					  olddma);
1727dee1ad47SJeff Kirsher 			goto setup_rx_desc_die;
1728dee1ad47SJeff Kirsher 		}
1729dee1ad47SJeff Kirsher 
1730dee1ad47SJeff Kirsher 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731dee1ad47SJeff Kirsher 			/* give up */
1732dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1733dee1ad47SJeff Kirsher 					  rxdr->dma);
1734dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735dee1ad47SJeff Kirsher 					  olddma);
1736dee1ad47SJeff Kirsher 			e_err(probe, "Unable to allocate aligned memory for "
1737dee1ad47SJeff Kirsher 			      "the Rx descriptor ring\n");
1738dee1ad47SJeff Kirsher 			goto setup_rx_desc_die;
1739dee1ad47SJeff Kirsher 		} else {
1740dee1ad47SJeff Kirsher 			/* Free old allocation, new allocation was successful */
1741dee1ad47SJeff Kirsher 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742dee1ad47SJeff Kirsher 					  olddma);
1743dee1ad47SJeff Kirsher 		}
1744dee1ad47SJeff Kirsher 	}
1745dee1ad47SJeff Kirsher 	memset(rxdr->desc, 0, rxdr->size);
1746dee1ad47SJeff Kirsher 
1747dee1ad47SJeff Kirsher 	rxdr->next_to_clean = 0;
1748dee1ad47SJeff Kirsher 	rxdr->next_to_use = 0;
1749dee1ad47SJeff Kirsher 	rxdr->rx_skb_top = NULL;
1750dee1ad47SJeff Kirsher 
1751dee1ad47SJeff Kirsher 	return 0;
1752dee1ad47SJeff Kirsher }
1753dee1ad47SJeff Kirsher 
1754dee1ad47SJeff Kirsher /**
1755dee1ad47SJeff Kirsher  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1756dee1ad47SJeff Kirsher  * 				  (Descriptors) for all queues
1757dee1ad47SJeff Kirsher  * @adapter: board private structure
1758dee1ad47SJeff Kirsher  *
1759dee1ad47SJeff Kirsher  * Return 0 on success, negative on failure
1760dee1ad47SJeff Kirsher  **/
e1000_setup_all_rx_resources(struct e1000_adapter * adapter)1761dee1ad47SJeff Kirsher int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1762dee1ad47SJeff Kirsher {
1763dee1ad47SJeff Kirsher 	int i, err = 0;
1764dee1ad47SJeff Kirsher 
1765dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_rx_queues; i++) {
1766dee1ad47SJeff Kirsher 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767dee1ad47SJeff Kirsher 		if (err) {
1768dee1ad47SJeff Kirsher 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1769dee1ad47SJeff Kirsher 			for (i-- ; i >= 0; i--)
1770dee1ad47SJeff Kirsher 				e1000_free_rx_resources(adapter,
1771dee1ad47SJeff Kirsher 							&adapter->rx_ring[i]);
1772dee1ad47SJeff Kirsher 			break;
1773dee1ad47SJeff Kirsher 		}
1774dee1ad47SJeff Kirsher 	}
1775dee1ad47SJeff Kirsher 
1776dee1ad47SJeff Kirsher 	return err;
1777dee1ad47SJeff Kirsher }
1778dee1ad47SJeff Kirsher 
1779dee1ad47SJeff Kirsher /**
1780dee1ad47SJeff Kirsher  * e1000_setup_rctl - configure the receive control registers
1781dee1ad47SJeff Kirsher  * @adapter: Board private structure
1782dee1ad47SJeff Kirsher  **/
e1000_setup_rctl(struct e1000_adapter * adapter)1783dee1ad47SJeff Kirsher static void e1000_setup_rctl(struct e1000_adapter *adapter)
1784dee1ad47SJeff Kirsher {
1785dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
1786dee1ad47SJeff Kirsher 	u32 rctl;
1787dee1ad47SJeff Kirsher 
1788dee1ad47SJeff Kirsher 	rctl = er32(RCTL);
1789dee1ad47SJeff Kirsher 
1790dee1ad47SJeff Kirsher 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1791dee1ad47SJeff Kirsher 
1792d5bc77a2SDean Nelson 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1793d5bc77a2SDean Nelson 		E1000_RCTL_RDMTS_HALF |
1794dee1ad47SJeff Kirsher 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1795dee1ad47SJeff Kirsher 
1796dee1ad47SJeff Kirsher 	if (hw->tbi_compatibility_on == 1)
1797dee1ad47SJeff Kirsher 		rctl |= E1000_RCTL_SBP;
1798dee1ad47SJeff Kirsher 	else
1799dee1ad47SJeff Kirsher 		rctl &= ~E1000_RCTL_SBP;
1800dee1ad47SJeff Kirsher 
1801dee1ad47SJeff Kirsher 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1802dee1ad47SJeff Kirsher 		rctl &= ~E1000_RCTL_LPE;
1803dee1ad47SJeff Kirsher 	else
1804dee1ad47SJeff Kirsher 		rctl |= E1000_RCTL_LPE;
1805dee1ad47SJeff Kirsher 
1806dee1ad47SJeff Kirsher 	/* Setup buffer sizes */
1807dee1ad47SJeff Kirsher 	rctl &= ~E1000_RCTL_SZ_4096;
1808dee1ad47SJeff Kirsher 	rctl |= E1000_RCTL_BSEX;
1809dee1ad47SJeff Kirsher 	switch (adapter->rx_buffer_len) {
1810dee1ad47SJeff Kirsher 	case E1000_RXBUFFER_2048:
1811dee1ad47SJeff Kirsher 	default:
1812dee1ad47SJeff Kirsher 		rctl |= E1000_RCTL_SZ_2048;
1813dee1ad47SJeff Kirsher 		rctl &= ~E1000_RCTL_BSEX;
1814dee1ad47SJeff Kirsher 		break;
1815dee1ad47SJeff Kirsher 	case E1000_RXBUFFER_4096:
1816dee1ad47SJeff Kirsher 		rctl |= E1000_RCTL_SZ_4096;
1817dee1ad47SJeff Kirsher 		break;
1818dee1ad47SJeff Kirsher 	case E1000_RXBUFFER_8192:
1819dee1ad47SJeff Kirsher 		rctl |= E1000_RCTL_SZ_8192;
1820dee1ad47SJeff Kirsher 		break;
1821dee1ad47SJeff Kirsher 	case E1000_RXBUFFER_16384:
1822dee1ad47SJeff Kirsher 		rctl |= E1000_RCTL_SZ_16384;
1823dee1ad47SJeff Kirsher 		break;
1824dee1ad47SJeff Kirsher 	}
1825dee1ad47SJeff Kirsher 
1826e825b731SBen Greear 	/* This is useful for sniffing bad packets. */
1827e825b731SBen Greear 	if (adapter->netdev->features & NETIF_F_RXALL) {
1828e825b731SBen Greear 		/* UPE and MPE will be handled by normal PROMISC logic
18296cfbd97bSJeff Kirsher 		 * in e1000e_set_rx_mode
18306cfbd97bSJeff Kirsher 		 */
1831e825b731SBen Greear 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1832e825b731SBen Greear 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1833e825b731SBen Greear 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1834e825b731SBen Greear 
1835e825b731SBen Greear 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1836e825b731SBen Greear 			  E1000_RCTL_DPF | /* Allow filtered pause */
1837e825b731SBen Greear 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1838e825b731SBen Greear 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1839e825b731SBen Greear 		 * and that breaks VLANs.
1840e825b731SBen Greear 		 */
1841e825b731SBen Greear 	}
1842e825b731SBen Greear 
1843dee1ad47SJeff Kirsher 	ew32(RCTL, rctl);
1844dee1ad47SJeff Kirsher }
1845dee1ad47SJeff Kirsher 
1846dee1ad47SJeff Kirsher /**
1847dee1ad47SJeff Kirsher  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1848dee1ad47SJeff Kirsher  * @adapter: board private structure
1849dee1ad47SJeff Kirsher  *
1850dee1ad47SJeff Kirsher  * Configure the Rx unit of the MAC after a reset.
1851dee1ad47SJeff Kirsher  **/
e1000_configure_rx(struct e1000_adapter * adapter)1852dee1ad47SJeff Kirsher static void e1000_configure_rx(struct e1000_adapter *adapter)
1853dee1ad47SJeff Kirsher {
1854dee1ad47SJeff Kirsher 	u64 rdba;
1855dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
1856dee1ad47SJeff Kirsher 	u32 rdlen, rctl, rxcsum;
1857dee1ad47SJeff Kirsher 
1858dee1ad47SJeff Kirsher 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1859dee1ad47SJeff Kirsher 		rdlen = adapter->rx_ring[0].count *
1860dee1ad47SJeff Kirsher 			sizeof(struct e1000_rx_desc);
1861dee1ad47SJeff Kirsher 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1862dee1ad47SJeff Kirsher 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1863dee1ad47SJeff Kirsher 	} else {
1864dee1ad47SJeff Kirsher 		rdlen = adapter->rx_ring[0].count *
1865dee1ad47SJeff Kirsher 			sizeof(struct e1000_rx_desc);
1866dee1ad47SJeff Kirsher 		adapter->clean_rx = e1000_clean_rx_irq;
1867dee1ad47SJeff Kirsher 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1868dee1ad47SJeff Kirsher 	}
1869dee1ad47SJeff Kirsher 
1870dee1ad47SJeff Kirsher 	/* disable receives while setting up the descriptors */
1871dee1ad47SJeff Kirsher 	rctl = er32(RCTL);
1872dee1ad47SJeff Kirsher 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1873dee1ad47SJeff Kirsher 
1874dee1ad47SJeff Kirsher 	/* set the Receive Delay Timer Register */
1875dee1ad47SJeff Kirsher 	ew32(RDTR, adapter->rx_int_delay);
1876dee1ad47SJeff Kirsher 
1877dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82540) {
1878dee1ad47SJeff Kirsher 		ew32(RADV, adapter->rx_abs_int_delay);
1879dee1ad47SJeff Kirsher 		if (adapter->itr_setting != 0)
1880dee1ad47SJeff Kirsher 			ew32(ITR, 1000000000 / (adapter->itr * 256));
1881dee1ad47SJeff Kirsher 	}
1882dee1ad47SJeff Kirsher 
1883dee1ad47SJeff Kirsher 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
18846cfbd97bSJeff Kirsher 	 * the Base and Length of the Rx Descriptor Ring
18856cfbd97bSJeff Kirsher 	 */
1886dee1ad47SJeff Kirsher 	switch (adapter->num_rx_queues) {
1887dee1ad47SJeff Kirsher 	case 1:
1888dee1ad47SJeff Kirsher 	default:
1889dee1ad47SJeff Kirsher 		rdba = adapter->rx_ring[0].dma;
1890dee1ad47SJeff Kirsher 		ew32(RDLEN, rdlen);
1891dee1ad47SJeff Kirsher 		ew32(RDBAH, (rdba >> 32));
1892dee1ad47SJeff Kirsher 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893dee1ad47SJeff Kirsher 		ew32(RDT, 0);
1894dee1ad47SJeff Kirsher 		ew32(RDH, 0);
18956cfbd97bSJeff Kirsher 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
18966cfbd97bSJeff Kirsher 					   E1000_RDH : E1000_82542_RDH);
18976cfbd97bSJeff Kirsher 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
18986cfbd97bSJeff Kirsher 					   E1000_RDT : E1000_82542_RDT);
1899dee1ad47SJeff Kirsher 		break;
1900dee1ad47SJeff Kirsher 	}
1901dee1ad47SJeff Kirsher 
1902dee1ad47SJeff Kirsher 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1903dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82543) {
1904dee1ad47SJeff Kirsher 		rxcsum = er32(RXCSUM);
1905dee1ad47SJeff Kirsher 		if (adapter->rx_csum)
1906dee1ad47SJeff Kirsher 			rxcsum |= E1000_RXCSUM_TUOFL;
1907dee1ad47SJeff Kirsher 		else
1908dee1ad47SJeff Kirsher 			/* don't need to clear IPPCSE as it defaults to 0 */
1909dee1ad47SJeff Kirsher 			rxcsum &= ~E1000_RXCSUM_TUOFL;
1910dee1ad47SJeff Kirsher 		ew32(RXCSUM, rxcsum);
1911dee1ad47SJeff Kirsher 	}
1912dee1ad47SJeff Kirsher 
1913dee1ad47SJeff Kirsher 	/* Enable Receives */
1914d5bc77a2SDean Nelson 	ew32(RCTL, rctl | E1000_RCTL_EN);
1915dee1ad47SJeff Kirsher }
1916dee1ad47SJeff Kirsher 
1917dee1ad47SJeff Kirsher /**
1918dee1ad47SJeff Kirsher  * e1000_free_tx_resources - Free Tx Resources per Queue
1919dee1ad47SJeff Kirsher  * @adapter: board private structure
1920dee1ad47SJeff Kirsher  * @tx_ring: Tx descriptor ring for a specific queue
1921dee1ad47SJeff Kirsher  *
1922dee1ad47SJeff Kirsher  * Free all transmit software resources
1923dee1ad47SJeff Kirsher  **/
e1000_free_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1924dee1ad47SJeff Kirsher static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925dee1ad47SJeff Kirsher 				    struct e1000_tx_ring *tx_ring)
1926dee1ad47SJeff Kirsher {
1927dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
1928dee1ad47SJeff Kirsher 
1929dee1ad47SJeff Kirsher 	e1000_clean_tx_ring(adapter, tx_ring);
1930dee1ad47SJeff Kirsher 
1931dee1ad47SJeff Kirsher 	vfree(tx_ring->buffer_info);
1932dee1ad47SJeff Kirsher 	tx_ring->buffer_info = NULL;
1933dee1ad47SJeff Kirsher 
1934dee1ad47SJeff Kirsher 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935dee1ad47SJeff Kirsher 			  tx_ring->dma);
1936dee1ad47SJeff Kirsher 
1937dee1ad47SJeff Kirsher 	tx_ring->desc = NULL;
1938dee1ad47SJeff Kirsher }
1939dee1ad47SJeff Kirsher 
1940dee1ad47SJeff Kirsher /**
1941dee1ad47SJeff Kirsher  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1942dee1ad47SJeff Kirsher  * @adapter: board private structure
1943dee1ad47SJeff Kirsher  *
1944dee1ad47SJeff Kirsher  * Free all transmit software resources
1945dee1ad47SJeff Kirsher  **/
e1000_free_all_tx_resources(struct e1000_adapter * adapter)1946dee1ad47SJeff Kirsher void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1947dee1ad47SJeff Kirsher {
1948dee1ad47SJeff Kirsher 	int i;
1949dee1ad47SJeff Kirsher 
1950dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_tx_queues; i++)
1951dee1ad47SJeff Kirsher 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1952dee1ad47SJeff Kirsher }
1953dee1ad47SJeff Kirsher 
1954580f321dSFlorian Westphal static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter * adapter,struct e1000_tx_buffer * buffer_info,int budget)1955580f321dSFlorian Westphal e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956dcb95f06SAlexander Lobakin 				 struct e1000_tx_buffer *buffer_info,
1957dcb95f06SAlexander Lobakin 				 int budget)
1958dee1ad47SJeff Kirsher {
1959dee1ad47SJeff Kirsher 	if (buffer_info->dma) {
1960dee1ad47SJeff Kirsher 		if (buffer_info->mapped_as_page)
1961dee1ad47SJeff Kirsher 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1962dee1ad47SJeff Kirsher 				       buffer_info->length, DMA_TO_DEVICE);
1963dee1ad47SJeff Kirsher 		else
1964dee1ad47SJeff Kirsher 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1965dee1ad47SJeff Kirsher 					 buffer_info->length,
1966dee1ad47SJeff Kirsher 					 DMA_TO_DEVICE);
1967dee1ad47SJeff Kirsher 		buffer_info->dma = 0;
1968dee1ad47SJeff Kirsher 	}
1969dee1ad47SJeff Kirsher 	if (buffer_info->skb) {
1970dcb95f06SAlexander Lobakin 		napi_consume_skb(buffer_info->skb, budget);
1971dee1ad47SJeff Kirsher 		buffer_info->skb = NULL;
1972dee1ad47SJeff Kirsher 	}
1973dee1ad47SJeff Kirsher 	buffer_info->time_stamp = 0;
1974dee1ad47SJeff Kirsher 	/* buffer_info must be completely set up in the transmit path */
1975dee1ad47SJeff Kirsher }
1976dee1ad47SJeff Kirsher 
1977dee1ad47SJeff Kirsher /**
1978dee1ad47SJeff Kirsher  * e1000_clean_tx_ring - Free Tx Buffers
1979dee1ad47SJeff Kirsher  * @adapter: board private structure
1980dee1ad47SJeff Kirsher  * @tx_ring: ring to be cleaned
1981dee1ad47SJeff Kirsher  **/
e1000_clean_tx_ring(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1982dee1ad47SJeff Kirsher static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1983dee1ad47SJeff Kirsher 				struct e1000_tx_ring *tx_ring)
1984dee1ad47SJeff Kirsher {
1985dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
1986580f321dSFlorian Westphal 	struct e1000_tx_buffer *buffer_info;
1987dee1ad47SJeff Kirsher 	unsigned long size;
1988dee1ad47SJeff Kirsher 	unsigned int i;
1989dee1ad47SJeff Kirsher 
1990dee1ad47SJeff Kirsher 	/* Free all the Tx ring sk_buffs */
1991dee1ad47SJeff Kirsher 
1992dee1ad47SJeff Kirsher 	for (i = 0; i < tx_ring->count; i++) {
1993dee1ad47SJeff Kirsher 		buffer_info = &tx_ring->buffer_info[i];
1994dcb95f06SAlexander Lobakin 		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1995dee1ad47SJeff Kirsher 	}
1996dee1ad47SJeff Kirsher 
19972f66fd36SOtto Estuardo Solares Cabrera 	netdev_reset_queue(adapter->netdev);
1998580f321dSFlorian Westphal 	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1999dee1ad47SJeff Kirsher 	memset(tx_ring->buffer_info, 0, size);
2000dee1ad47SJeff Kirsher 
2001dee1ad47SJeff Kirsher 	/* Zero out the descriptor ring */
2002dee1ad47SJeff Kirsher 
2003dee1ad47SJeff Kirsher 	memset(tx_ring->desc, 0, tx_ring->size);
2004dee1ad47SJeff Kirsher 
2005dee1ad47SJeff Kirsher 	tx_ring->next_to_use = 0;
2006dee1ad47SJeff Kirsher 	tx_ring->next_to_clean = 0;
20073db1cd5cSRusty Russell 	tx_ring->last_tx_tso = false;
2008dee1ad47SJeff Kirsher 
2009dee1ad47SJeff Kirsher 	writel(0, hw->hw_addr + tx_ring->tdh);
2010dee1ad47SJeff Kirsher 	writel(0, hw->hw_addr + tx_ring->tdt);
2011dee1ad47SJeff Kirsher }
2012dee1ad47SJeff Kirsher 
2013dee1ad47SJeff Kirsher /**
2014dee1ad47SJeff Kirsher  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2015dee1ad47SJeff Kirsher  * @adapter: board private structure
2016dee1ad47SJeff Kirsher  **/
e1000_clean_all_tx_rings(struct e1000_adapter * adapter)2017dee1ad47SJeff Kirsher static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2018dee1ad47SJeff Kirsher {
2019dee1ad47SJeff Kirsher 	int i;
2020dee1ad47SJeff Kirsher 
2021dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_tx_queues; i++)
2022dee1ad47SJeff Kirsher 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2023dee1ad47SJeff Kirsher }
2024dee1ad47SJeff Kirsher 
2025dee1ad47SJeff Kirsher /**
2026dee1ad47SJeff Kirsher  * e1000_free_rx_resources - Free Rx Resources
2027dee1ad47SJeff Kirsher  * @adapter: board private structure
2028dee1ad47SJeff Kirsher  * @rx_ring: ring to clean the resources from
2029dee1ad47SJeff Kirsher  *
2030dee1ad47SJeff Kirsher  * Free all receive software resources
2031dee1ad47SJeff Kirsher  **/
e1000_free_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2032dee1ad47SJeff Kirsher static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2033dee1ad47SJeff Kirsher 				    struct e1000_rx_ring *rx_ring)
2034dee1ad47SJeff Kirsher {
2035dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
2036dee1ad47SJeff Kirsher 
2037dee1ad47SJeff Kirsher 	e1000_clean_rx_ring(adapter, rx_ring);
2038dee1ad47SJeff Kirsher 
2039dee1ad47SJeff Kirsher 	vfree(rx_ring->buffer_info);
2040dee1ad47SJeff Kirsher 	rx_ring->buffer_info = NULL;
2041dee1ad47SJeff Kirsher 
2042dee1ad47SJeff Kirsher 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2043dee1ad47SJeff Kirsher 			  rx_ring->dma);
2044dee1ad47SJeff Kirsher 
2045dee1ad47SJeff Kirsher 	rx_ring->desc = NULL;
2046dee1ad47SJeff Kirsher }
2047dee1ad47SJeff Kirsher 
2048dee1ad47SJeff Kirsher /**
2049dee1ad47SJeff Kirsher  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2050dee1ad47SJeff Kirsher  * @adapter: board private structure
2051dee1ad47SJeff Kirsher  *
2052dee1ad47SJeff Kirsher  * Free all receive software resources
2053dee1ad47SJeff Kirsher  **/
e1000_free_all_rx_resources(struct e1000_adapter * adapter)2054dee1ad47SJeff Kirsher void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2055dee1ad47SJeff Kirsher {
2056dee1ad47SJeff Kirsher 	int i;
2057dee1ad47SJeff Kirsher 
2058dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_rx_queues; i++)
2059dee1ad47SJeff Kirsher 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2060dee1ad47SJeff Kirsher }
2061dee1ad47SJeff Kirsher 
206213809609SFlorian Westphal #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
e1000_frag_len(const struct e1000_adapter * a)206313809609SFlorian Westphal static unsigned int e1000_frag_len(const struct e1000_adapter *a)
206413809609SFlorian Westphal {
206513809609SFlorian Westphal 	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
206613809609SFlorian Westphal 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
206713809609SFlorian Westphal }
206813809609SFlorian Westphal 
e1000_alloc_frag(const struct e1000_adapter * a)206913809609SFlorian Westphal static void *e1000_alloc_frag(const struct e1000_adapter *a)
207013809609SFlorian Westphal {
207113809609SFlorian Westphal 	unsigned int len = e1000_frag_len(a);
207213809609SFlorian Westphal 	u8 *data = netdev_alloc_frag(len);
207313809609SFlorian Westphal 
207413809609SFlorian Westphal 	if (likely(data))
207513809609SFlorian Westphal 		data += E1000_HEADROOM;
207613809609SFlorian Westphal 	return data;
207713809609SFlorian Westphal }
207813809609SFlorian Westphal 
2079dee1ad47SJeff Kirsher /**
2080dee1ad47SJeff Kirsher  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2081dee1ad47SJeff Kirsher  * @adapter: board private structure
2082dee1ad47SJeff Kirsher  * @rx_ring: ring to free buffers from
2083dee1ad47SJeff Kirsher  **/
e1000_clean_rx_ring(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2084dee1ad47SJeff Kirsher static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2085dee1ad47SJeff Kirsher 				struct e1000_rx_ring *rx_ring)
2086dee1ad47SJeff Kirsher {
2087dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
208893f0afe9SFlorian Westphal 	struct e1000_rx_buffer *buffer_info;
2089dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
2090dee1ad47SJeff Kirsher 	unsigned long size;
2091dee1ad47SJeff Kirsher 	unsigned int i;
2092dee1ad47SJeff Kirsher 
209313809609SFlorian Westphal 	/* Free all the Rx netfrags */
2094dee1ad47SJeff Kirsher 	for (i = 0; i < rx_ring->count; i++) {
2095dee1ad47SJeff Kirsher 		buffer_info = &rx_ring->buffer_info[i];
209613809609SFlorian Westphal 		if (adapter->clean_rx == e1000_clean_rx_irq) {
209713809609SFlorian Westphal 			if (buffer_info->dma)
2098dee1ad47SJeff Kirsher 				dma_unmap_single(&pdev->dev, buffer_info->dma,
209993f0afe9SFlorian Westphal 						 adapter->rx_buffer_len,
2100dee1ad47SJeff Kirsher 						 DMA_FROM_DEVICE);
210113809609SFlorian Westphal 			if (buffer_info->rxbuf.data) {
21026bf93ba8SAlexander Duyck 				skb_free_frag(buffer_info->rxbuf.data);
210313809609SFlorian Westphal 				buffer_info->rxbuf.data = NULL;
210413809609SFlorian Westphal 			}
210513809609SFlorian Westphal 		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
210613809609SFlorian Westphal 			if (buffer_info->dma)
2107dee1ad47SJeff Kirsher 				dma_unmap_page(&pdev->dev, buffer_info->dma,
210893f0afe9SFlorian Westphal 					       adapter->rx_buffer_len,
2109dee1ad47SJeff Kirsher 					       DMA_FROM_DEVICE);
211013809609SFlorian Westphal 			if (buffer_info->rxbuf.page) {
211113809609SFlorian Westphal 				put_page(buffer_info->rxbuf.page);
211213809609SFlorian Westphal 				buffer_info->rxbuf.page = NULL;
211313809609SFlorian Westphal 			}
2114dee1ad47SJeff Kirsher 		}
2115dee1ad47SJeff Kirsher 
2116dee1ad47SJeff Kirsher 		buffer_info->dma = 0;
2117dee1ad47SJeff Kirsher 	}
2118dee1ad47SJeff Kirsher 
2119dee1ad47SJeff Kirsher 	/* there also may be some cached data from a chained receive */
2120de591c78SFlorian Westphal 	napi_free_frags(&adapter->napi);
2121dee1ad47SJeff Kirsher 	rx_ring->rx_skb_top = NULL;
2122dee1ad47SJeff Kirsher 
212393f0afe9SFlorian Westphal 	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2124dee1ad47SJeff Kirsher 	memset(rx_ring->buffer_info, 0, size);
2125dee1ad47SJeff Kirsher 
2126dee1ad47SJeff Kirsher 	/* Zero out the descriptor ring */
2127dee1ad47SJeff Kirsher 	memset(rx_ring->desc, 0, rx_ring->size);
2128dee1ad47SJeff Kirsher 
2129dee1ad47SJeff Kirsher 	rx_ring->next_to_clean = 0;
2130dee1ad47SJeff Kirsher 	rx_ring->next_to_use = 0;
2131dee1ad47SJeff Kirsher 
2132dee1ad47SJeff Kirsher 	writel(0, hw->hw_addr + rx_ring->rdh);
2133dee1ad47SJeff Kirsher 	writel(0, hw->hw_addr + rx_ring->rdt);
2134dee1ad47SJeff Kirsher }
2135dee1ad47SJeff Kirsher 
2136dee1ad47SJeff Kirsher /**
2137dee1ad47SJeff Kirsher  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2138dee1ad47SJeff Kirsher  * @adapter: board private structure
2139dee1ad47SJeff Kirsher  **/
e1000_clean_all_rx_rings(struct e1000_adapter * adapter)2140dee1ad47SJeff Kirsher static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2141dee1ad47SJeff Kirsher {
2142dee1ad47SJeff Kirsher 	int i;
2143dee1ad47SJeff Kirsher 
2144dee1ad47SJeff Kirsher 	for (i = 0; i < adapter->num_rx_queues; i++)
2145dee1ad47SJeff Kirsher 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2146dee1ad47SJeff Kirsher }
2147dee1ad47SJeff Kirsher 
2148dee1ad47SJeff Kirsher /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2149dee1ad47SJeff Kirsher  * and memory write and invalidate disabled for certain operations
2150dee1ad47SJeff Kirsher  */
e1000_enter_82542_rst(struct e1000_adapter * adapter)2151dee1ad47SJeff Kirsher static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2152dee1ad47SJeff Kirsher {
2153dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2154dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
2155dee1ad47SJeff Kirsher 	u32 rctl;
2156dee1ad47SJeff Kirsher 
2157dee1ad47SJeff Kirsher 	e1000_pci_clear_mwi(hw);
2158dee1ad47SJeff Kirsher 
2159dee1ad47SJeff Kirsher 	rctl = er32(RCTL);
2160dee1ad47SJeff Kirsher 	rctl |= E1000_RCTL_RST;
2161dee1ad47SJeff Kirsher 	ew32(RCTL, rctl);
2162dee1ad47SJeff Kirsher 	E1000_WRITE_FLUSH();
2163dee1ad47SJeff Kirsher 	mdelay(5);
2164dee1ad47SJeff Kirsher 
2165dee1ad47SJeff Kirsher 	if (netif_running(netdev))
2166dee1ad47SJeff Kirsher 		e1000_clean_all_rx_rings(adapter);
2167dee1ad47SJeff Kirsher }
2168dee1ad47SJeff Kirsher 
e1000_leave_82542_rst(struct e1000_adapter * adapter)2169dee1ad47SJeff Kirsher static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2170dee1ad47SJeff Kirsher {
2171dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2172dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
2173dee1ad47SJeff Kirsher 	u32 rctl;
2174dee1ad47SJeff Kirsher 
2175dee1ad47SJeff Kirsher 	rctl = er32(RCTL);
2176dee1ad47SJeff Kirsher 	rctl &= ~E1000_RCTL_RST;
2177dee1ad47SJeff Kirsher 	ew32(RCTL, rctl);
2178dee1ad47SJeff Kirsher 	E1000_WRITE_FLUSH();
2179dee1ad47SJeff Kirsher 	mdelay(5);
2180dee1ad47SJeff Kirsher 
2181dee1ad47SJeff Kirsher 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2182dee1ad47SJeff Kirsher 		e1000_pci_set_mwi(hw);
2183dee1ad47SJeff Kirsher 
2184dee1ad47SJeff Kirsher 	if (netif_running(netdev)) {
2185dee1ad47SJeff Kirsher 		/* No need to loop, because 82542 supports only 1 queue */
2186dee1ad47SJeff Kirsher 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2187dee1ad47SJeff Kirsher 		e1000_configure_rx(adapter);
2188dee1ad47SJeff Kirsher 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2189dee1ad47SJeff Kirsher 	}
2190dee1ad47SJeff Kirsher }
2191dee1ad47SJeff Kirsher 
2192dee1ad47SJeff Kirsher /**
2193dee1ad47SJeff Kirsher  * e1000_set_mac - Change the Ethernet Address of the NIC
2194dee1ad47SJeff Kirsher  * @netdev: network interface device structure
2195dee1ad47SJeff Kirsher  * @p: pointer to an address structure
2196dee1ad47SJeff Kirsher  *
2197dee1ad47SJeff Kirsher  * Returns 0 on success, negative on failure
2198dee1ad47SJeff Kirsher  **/
e1000_set_mac(struct net_device * netdev,void * p)2199dee1ad47SJeff Kirsher static int e1000_set_mac(struct net_device *netdev, void *p)
2200dee1ad47SJeff Kirsher {
2201dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
2202dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2203dee1ad47SJeff Kirsher 	struct sockaddr *addr = p;
2204dee1ad47SJeff Kirsher 
2205dee1ad47SJeff Kirsher 	if (!is_valid_ether_addr(addr->sa_data))
2206dee1ad47SJeff Kirsher 		return -EADDRNOTAVAIL;
2207dee1ad47SJeff Kirsher 
2208dee1ad47SJeff Kirsher 	/* 82542 2.0 needs to be in reset to write receive address registers */
2209dee1ad47SJeff Kirsher 
2210dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_82542_rev2_0)
2211dee1ad47SJeff Kirsher 		e1000_enter_82542_rst(adapter);
2212dee1ad47SJeff Kirsher 
2213a05e4c0aSJakub Kicinski 	eth_hw_addr_set(netdev, addr->sa_data);
2214dee1ad47SJeff Kirsher 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2215dee1ad47SJeff Kirsher 
2216dee1ad47SJeff Kirsher 	e1000_rar_set(hw, hw->mac_addr, 0);
2217dee1ad47SJeff Kirsher 
2218dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_82542_rev2_0)
2219dee1ad47SJeff Kirsher 		e1000_leave_82542_rst(adapter);
2220dee1ad47SJeff Kirsher 
2221dee1ad47SJeff Kirsher 	return 0;
2222dee1ad47SJeff Kirsher }
2223dee1ad47SJeff Kirsher 
2224dee1ad47SJeff Kirsher /**
2225dee1ad47SJeff Kirsher  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2226dee1ad47SJeff Kirsher  * @netdev: network interface device structure
2227dee1ad47SJeff Kirsher  *
2228dee1ad47SJeff Kirsher  * The set_rx_mode entry point is called whenever the unicast or multicast
2229dee1ad47SJeff Kirsher  * address lists or the network interface flags are updated. This routine is
2230dee1ad47SJeff Kirsher  * responsible for configuring the hardware for proper unicast, multicast,
2231dee1ad47SJeff Kirsher  * promiscuous mode, and all-multi behavior.
2232dee1ad47SJeff Kirsher  **/
e1000_set_rx_mode(struct net_device * netdev)2233dee1ad47SJeff Kirsher static void e1000_set_rx_mode(struct net_device *netdev)
2234dee1ad47SJeff Kirsher {
2235dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
2236dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2237dee1ad47SJeff Kirsher 	struct netdev_hw_addr *ha;
2238dee1ad47SJeff Kirsher 	bool use_uc = false;
2239dee1ad47SJeff Kirsher 	u32 rctl;
2240dee1ad47SJeff Kirsher 	u32 hash_value;
2241dee1ad47SJeff Kirsher 	int i, rar_entries = E1000_RAR_ENTRIES;
2242dee1ad47SJeff Kirsher 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2243dee1ad47SJeff Kirsher 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2244dee1ad47SJeff Kirsher 
224514f8dc49SJoe Perches 	if (!mcarray)
2246dee1ad47SJeff Kirsher 		return;
2247dee1ad47SJeff Kirsher 
2248dee1ad47SJeff Kirsher 	/* Check for Promiscuous and All Multicast modes */
2249dee1ad47SJeff Kirsher 
2250dee1ad47SJeff Kirsher 	rctl = er32(RCTL);
2251dee1ad47SJeff Kirsher 
2252dee1ad47SJeff Kirsher 	if (netdev->flags & IFF_PROMISC) {
2253dee1ad47SJeff Kirsher 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2254dee1ad47SJeff Kirsher 		rctl &= ~E1000_RCTL_VFE;
2255dee1ad47SJeff Kirsher 	} else {
2256dee1ad47SJeff Kirsher 		if (netdev->flags & IFF_ALLMULTI)
2257dee1ad47SJeff Kirsher 			rctl |= E1000_RCTL_MPE;
2258dee1ad47SJeff Kirsher 		else
2259dee1ad47SJeff Kirsher 			rctl &= ~E1000_RCTL_MPE;
2260dee1ad47SJeff Kirsher 		/* Enable VLAN filter if there is a VLAN */
2261dee1ad47SJeff Kirsher 		if (e1000_vlan_used(adapter))
2262dee1ad47SJeff Kirsher 			rctl |= E1000_RCTL_VFE;
2263dee1ad47SJeff Kirsher 	}
2264dee1ad47SJeff Kirsher 
2265dee1ad47SJeff Kirsher 	if (netdev_uc_count(netdev) > rar_entries - 1) {
2266dee1ad47SJeff Kirsher 		rctl |= E1000_RCTL_UPE;
2267dee1ad47SJeff Kirsher 	} else if (!(netdev->flags & IFF_PROMISC)) {
2268dee1ad47SJeff Kirsher 		rctl &= ~E1000_RCTL_UPE;
2269dee1ad47SJeff Kirsher 		use_uc = true;
2270dee1ad47SJeff Kirsher 	}
2271dee1ad47SJeff Kirsher 
2272dee1ad47SJeff Kirsher 	ew32(RCTL, rctl);
2273dee1ad47SJeff Kirsher 
2274dee1ad47SJeff Kirsher 	/* 82542 2.0 needs to be in reset to write receive address registers */
2275dee1ad47SJeff Kirsher 
2276dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_82542_rev2_0)
2277dee1ad47SJeff Kirsher 		e1000_enter_82542_rst(adapter);
2278dee1ad47SJeff Kirsher 
2279dee1ad47SJeff Kirsher 	/* load the first 14 addresses into the exact filters 1-14. Unicast
2280dee1ad47SJeff Kirsher 	 * addresses take precedence to avoid disabling unicast filtering
2281dee1ad47SJeff Kirsher 	 * when possible.
2282dee1ad47SJeff Kirsher 	 *
2283dee1ad47SJeff Kirsher 	 * RAR 0 is used for the station MAC address
2284dee1ad47SJeff Kirsher 	 * if there are not 14 addresses, go ahead and clear the filters
2285dee1ad47SJeff Kirsher 	 */
2286dee1ad47SJeff Kirsher 	i = 1;
2287dee1ad47SJeff Kirsher 	if (use_uc)
2288dee1ad47SJeff Kirsher 		netdev_for_each_uc_addr(ha, netdev) {
2289dee1ad47SJeff Kirsher 			if (i == rar_entries)
2290dee1ad47SJeff Kirsher 				break;
2291dee1ad47SJeff Kirsher 			e1000_rar_set(hw, ha->addr, i++);
2292dee1ad47SJeff Kirsher 		}
2293dee1ad47SJeff Kirsher 
2294dee1ad47SJeff Kirsher 	netdev_for_each_mc_addr(ha, netdev) {
2295dee1ad47SJeff Kirsher 		if (i == rar_entries) {
2296dee1ad47SJeff Kirsher 			/* load any remaining addresses into the hash table */
2297dee1ad47SJeff Kirsher 			u32 hash_reg, hash_bit, mta;
2298dee1ad47SJeff Kirsher 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2299dee1ad47SJeff Kirsher 			hash_reg = (hash_value >> 5) & 0x7F;
2300dee1ad47SJeff Kirsher 			hash_bit = hash_value & 0x1F;
2301dee1ad47SJeff Kirsher 			mta = (1 << hash_bit);
2302dee1ad47SJeff Kirsher 			mcarray[hash_reg] |= mta;
2303dee1ad47SJeff Kirsher 		} else {
2304dee1ad47SJeff Kirsher 			e1000_rar_set(hw, ha->addr, i++);
2305dee1ad47SJeff Kirsher 		}
2306dee1ad47SJeff Kirsher 	}
2307dee1ad47SJeff Kirsher 
2308dee1ad47SJeff Kirsher 	for (; i < rar_entries; i++) {
2309dee1ad47SJeff Kirsher 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2310dee1ad47SJeff Kirsher 		E1000_WRITE_FLUSH();
2311dee1ad47SJeff Kirsher 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2312dee1ad47SJeff Kirsher 		E1000_WRITE_FLUSH();
2313dee1ad47SJeff Kirsher 	}
2314dee1ad47SJeff Kirsher 
2315dee1ad47SJeff Kirsher 	/* write the hash table completely, write from bottom to avoid
23166cfbd97bSJeff Kirsher 	 * both stupid write combining chipsets, and flushing each write
23176cfbd97bSJeff Kirsher 	 */
2318dee1ad47SJeff Kirsher 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
23196cfbd97bSJeff Kirsher 		/* If we are on an 82544 has an errata where writing odd
2320dee1ad47SJeff Kirsher 		 * offsets overwrites the previous even offset, but writing
2321dee1ad47SJeff Kirsher 		 * backwards over the range solves the issue by always
2322dee1ad47SJeff Kirsher 		 * writing the odd offset first
2323dee1ad47SJeff Kirsher 		 */
2324dee1ad47SJeff Kirsher 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2325dee1ad47SJeff Kirsher 	}
2326dee1ad47SJeff Kirsher 	E1000_WRITE_FLUSH();
2327dee1ad47SJeff Kirsher 
2328dee1ad47SJeff Kirsher 	if (hw->mac_type == e1000_82542_rev2_0)
2329dee1ad47SJeff Kirsher 		e1000_leave_82542_rst(adapter);
2330dee1ad47SJeff Kirsher 
2331dee1ad47SJeff Kirsher 	kfree(mcarray);
2332dee1ad47SJeff Kirsher }
2333dee1ad47SJeff Kirsher 
2334a4010afeSJesse Brandeburg /**
2335a4010afeSJesse Brandeburg  * e1000_update_phy_info_task - get phy info
2336a4010afeSJesse Brandeburg  * @work: work struct contained inside adapter struct
2337a4010afeSJesse Brandeburg  *
2338a4010afeSJesse Brandeburg  * Need to wait a few seconds after link up to get diagnostic information from
2339a4010afeSJesse Brandeburg  * the phy
2340a4010afeSJesse Brandeburg  */
e1000_update_phy_info_task(struct work_struct * work)2341dee1ad47SJeff Kirsher static void e1000_update_phy_info_task(struct work_struct *work)
2342dee1ad47SJeff Kirsher {
2343dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = container_of(work,
2344dee1ad47SJeff Kirsher 						     struct e1000_adapter,
2345a4010afeSJesse Brandeburg 						     phy_info_task.work);
2346b2f963bfSVladimir Davydov 
2347a4010afeSJesse Brandeburg 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2348dee1ad47SJeff Kirsher }
2349dee1ad47SJeff Kirsher 
2350dee1ad47SJeff Kirsher /**
2351dee1ad47SJeff Kirsher  * e1000_82547_tx_fifo_stall_task - task to complete work
2352dee1ad47SJeff Kirsher  * @work: work struct contained inside adapter struct
2353dee1ad47SJeff Kirsher  **/
e1000_82547_tx_fifo_stall_task(struct work_struct * work)2354dee1ad47SJeff Kirsher static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2355dee1ad47SJeff Kirsher {
2356dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = container_of(work,
2357dee1ad47SJeff Kirsher 						     struct e1000_adapter,
2358a4010afeSJesse Brandeburg 						     fifo_stall_task.work);
2359dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2360dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
2361dee1ad47SJeff Kirsher 	u32 tctl;
2362dee1ad47SJeff Kirsher 
2363dee1ad47SJeff Kirsher 	if (atomic_read(&adapter->tx_fifo_stall)) {
2364dee1ad47SJeff Kirsher 		if ((er32(TDT) == er32(TDH)) &&
2365dee1ad47SJeff Kirsher 		   (er32(TDFT) == er32(TDFH)) &&
2366dee1ad47SJeff Kirsher 		   (er32(TDFTS) == er32(TDFHS))) {
2367dee1ad47SJeff Kirsher 			tctl = er32(TCTL);
2368dee1ad47SJeff Kirsher 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2369dee1ad47SJeff Kirsher 			ew32(TDFT, adapter->tx_head_addr);
2370dee1ad47SJeff Kirsher 			ew32(TDFH, adapter->tx_head_addr);
2371dee1ad47SJeff Kirsher 			ew32(TDFTS, adapter->tx_head_addr);
2372dee1ad47SJeff Kirsher 			ew32(TDFHS, adapter->tx_head_addr);
2373dee1ad47SJeff Kirsher 			ew32(TCTL, tctl);
2374dee1ad47SJeff Kirsher 			E1000_WRITE_FLUSH();
2375dee1ad47SJeff Kirsher 
2376dee1ad47SJeff Kirsher 			adapter->tx_fifo_head = 0;
2377dee1ad47SJeff Kirsher 			atomic_set(&adapter->tx_fifo_stall, 0);
2378dee1ad47SJeff Kirsher 			netif_wake_queue(netdev);
2379dee1ad47SJeff Kirsher 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2380a4010afeSJesse Brandeburg 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2381dee1ad47SJeff Kirsher 		}
2382dee1ad47SJeff Kirsher 	}
2383dee1ad47SJeff Kirsher }
2384dee1ad47SJeff Kirsher 
e1000_has_link(struct e1000_adapter * adapter)2385dee1ad47SJeff Kirsher bool e1000_has_link(struct e1000_adapter *adapter)
2386dee1ad47SJeff Kirsher {
2387dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2388dee1ad47SJeff Kirsher 	bool link_active = false;
2389dee1ad47SJeff Kirsher 
2390dee1ad47SJeff Kirsher 	/* get_link_status is set on LSC (link status) interrupt or rx
2391dee1ad47SJeff Kirsher 	 * sequence error interrupt (except on intel ce4100).
2392dee1ad47SJeff Kirsher 	 * get_link_status will stay false until the
2393dee1ad47SJeff Kirsher 	 * e1000_check_for_link establishes link for copper adapters
2394dee1ad47SJeff Kirsher 	 * ONLY
2395dee1ad47SJeff Kirsher 	 */
2396dee1ad47SJeff Kirsher 	switch (hw->media_type) {
2397dee1ad47SJeff Kirsher 	case e1000_media_type_copper:
2398dee1ad47SJeff Kirsher 		if (hw->mac_type == e1000_ce4100)
2399dee1ad47SJeff Kirsher 			hw->get_link_status = 1;
2400dee1ad47SJeff Kirsher 		if (hw->get_link_status) {
2401dee1ad47SJeff Kirsher 			e1000_check_for_link(hw);
2402dee1ad47SJeff Kirsher 			link_active = !hw->get_link_status;
2403dee1ad47SJeff Kirsher 		} else {
2404dee1ad47SJeff Kirsher 			link_active = true;
2405dee1ad47SJeff Kirsher 		}
2406dee1ad47SJeff Kirsher 		break;
2407dee1ad47SJeff Kirsher 	case e1000_media_type_fiber:
2408dee1ad47SJeff Kirsher 		e1000_check_for_link(hw);
2409dee1ad47SJeff Kirsher 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2410dee1ad47SJeff Kirsher 		break;
2411dee1ad47SJeff Kirsher 	case e1000_media_type_internal_serdes:
2412dee1ad47SJeff Kirsher 		e1000_check_for_link(hw);
2413dee1ad47SJeff Kirsher 		link_active = hw->serdes_has_link;
2414dee1ad47SJeff Kirsher 		break;
2415dee1ad47SJeff Kirsher 	default:
2416dee1ad47SJeff Kirsher 		break;
2417dee1ad47SJeff Kirsher 	}
2418dee1ad47SJeff Kirsher 
2419dee1ad47SJeff Kirsher 	return link_active;
2420dee1ad47SJeff Kirsher }
2421dee1ad47SJeff Kirsher 
2422dee1ad47SJeff Kirsher /**
2423a4010afeSJesse Brandeburg  * e1000_watchdog - work function
2424a4010afeSJesse Brandeburg  * @work: work struct contained inside adapter struct
2425dee1ad47SJeff Kirsher  **/
e1000_watchdog(struct work_struct * work)2426a4010afeSJesse Brandeburg static void e1000_watchdog(struct work_struct *work)
2427dee1ad47SJeff Kirsher {
2428a4010afeSJesse Brandeburg 	struct e1000_adapter *adapter = container_of(work,
2429a4010afeSJesse Brandeburg 						     struct e1000_adapter,
2430a4010afeSJesse Brandeburg 						     watchdog_task.work);
2431dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2432dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
2433dee1ad47SJeff Kirsher 	struct e1000_tx_ring *txdr = adapter->tx_ring;
2434dee1ad47SJeff Kirsher 	u32 link, tctl;
2435dee1ad47SJeff Kirsher 
2436dee1ad47SJeff Kirsher 	link = e1000_has_link(adapter);
2437dee1ad47SJeff Kirsher 	if ((netif_carrier_ok(netdev)) && link)
2438dee1ad47SJeff Kirsher 		goto link_up;
2439dee1ad47SJeff Kirsher 
2440dee1ad47SJeff Kirsher 	if (link) {
2441dee1ad47SJeff Kirsher 		if (!netif_carrier_ok(netdev)) {
2442dee1ad47SJeff Kirsher 			u32 ctrl;
2443dee1ad47SJeff Kirsher 			/* update snapshot of PHY registers on LSC */
2444dee1ad47SJeff Kirsher 			e1000_get_speed_and_duplex(hw,
2445dee1ad47SJeff Kirsher 						   &adapter->link_speed,
2446dee1ad47SJeff Kirsher 						   &adapter->link_duplex);
2447dee1ad47SJeff Kirsher 
2448dee1ad47SJeff Kirsher 			ctrl = er32(CTRL);
2449dee1ad47SJeff Kirsher 			pr_info("%s NIC Link is Up %d Mbps %s, "
2450dee1ad47SJeff Kirsher 				"Flow Control: %s\n",
2451dee1ad47SJeff Kirsher 				netdev->name,
2452dee1ad47SJeff Kirsher 				adapter->link_speed,
2453dee1ad47SJeff Kirsher 				adapter->link_duplex == FULL_DUPLEX ?
2454dee1ad47SJeff Kirsher 				"Full Duplex" : "Half Duplex",
2455dee1ad47SJeff Kirsher 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2456dee1ad47SJeff Kirsher 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2457dee1ad47SJeff Kirsher 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2458dee1ad47SJeff Kirsher 				E1000_CTRL_TFCE) ? "TX" : "None")));
2459dee1ad47SJeff Kirsher 
2460dee1ad47SJeff Kirsher 			/* adjust timeout factor according to speed/duplex */
2461dee1ad47SJeff Kirsher 			adapter->tx_timeout_factor = 1;
2462dee1ad47SJeff Kirsher 			switch (adapter->link_speed) {
2463dee1ad47SJeff Kirsher 			case SPEED_10:
2464dee1ad47SJeff Kirsher 				adapter->tx_timeout_factor = 16;
2465dee1ad47SJeff Kirsher 				break;
2466dee1ad47SJeff Kirsher 			case SPEED_100:
2467dee1ad47SJeff Kirsher 				/* maybe add some timeout factor ? */
2468dee1ad47SJeff Kirsher 				break;
2469dee1ad47SJeff Kirsher 			}
2470dee1ad47SJeff Kirsher 
2471dee1ad47SJeff Kirsher 			/* enable transmits in the hardware */
2472dee1ad47SJeff Kirsher 			tctl = er32(TCTL);
2473dee1ad47SJeff Kirsher 			tctl |= E1000_TCTL_EN;
2474dee1ad47SJeff Kirsher 			ew32(TCTL, tctl);
2475dee1ad47SJeff Kirsher 
2476dee1ad47SJeff Kirsher 			netif_carrier_on(netdev);
2477dee1ad47SJeff Kirsher 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2478a4010afeSJesse Brandeburg 				schedule_delayed_work(&adapter->phy_info_task,
2479a4010afeSJesse Brandeburg 						      2 * HZ);
2480dee1ad47SJeff Kirsher 			adapter->smartspeed = 0;
2481dee1ad47SJeff Kirsher 		}
2482dee1ad47SJeff Kirsher 	} else {
2483dee1ad47SJeff Kirsher 		if (netif_carrier_ok(netdev)) {
2484dee1ad47SJeff Kirsher 			adapter->link_speed = 0;
2485dee1ad47SJeff Kirsher 			adapter->link_duplex = 0;
2486dee1ad47SJeff Kirsher 			pr_info("%s NIC Link is Down\n",
2487dee1ad47SJeff Kirsher 				netdev->name);
2488dee1ad47SJeff Kirsher 			netif_carrier_off(netdev);
2489dee1ad47SJeff Kirsher 
2490dee1ad47SJeff Kirsher 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2491a4010afeSJesse Brandeburg 				schedule_delayed_work(&adapter->phy_info_task,
2492a4010afeSJesse Brandeburg 						      2 * HZ);
2493dee1ad47SJeff Kirsher 		}
2494dee1ad47SJeff Kirsher 
2495dee1ad47SJeff Kirsher 		e1000_smartspeed(adapter);
2496dee1ad47SJeff Kirsher 	}
2497dee1ad47SJeff Kirsher 
2498dee1ad47SJeff Kirsher link_up:
2499dee1ad47SJeff Kirsher 	e1000_update_stats(adapter);
2500dee1ad47SJeff Kirsher 
2501dee1ad47SJeff Kirsher 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2502dee1ad47SJeff Kirsher 	adapter->tpt_old = adapter->stats.tpt;
2503dee1ad47SJeff Kirsher 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2504dee1ad47SJeff Kirsher 	adapter->colc_old = adapter->stats.colc;
2505dee1ad47SJeff Kirsher 
2506dee1ad47SJeff Kirsher 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2507dee1ad47SJeff Kirsher 	adapter->gorcl_old = adapter->stats.gorcl;
2508dee1ad47SJeff Kirsher 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2509dee1ad47SJeff Kirsher 	adapter->gotcl_old = adapter->stats.gotcl;
2510dee1ad47SJeff Kirsher 
2511dee1ad47SJeff Kirsher 	e1000_update_adaptive(hw);
2512dee1ad47SJeff Kirsher 
2513dee1ad47SJeff Kirsher 	if (!netif_carrier_ok(netdev)) {
2514dee1ad47SJeff Kirsher 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2515dee1ad47SJeff Kirsher 			/* We've lost link, so the controller stops DMA,
2516dee1ad47SJeff Kirsher 			 * but we've got queued Tx work that's never going
2517dee1ad47SJeff Kirsher 			 * to get done, so reset controller to flush Tx.
25186cfbd97bSJeff Kirsher 			 * (Do the reset outside of interrupt context).
25196cfbd97bSJeff Kirsher 			 */
2520dee1ad47SJeff Kirsher 			adapter->tx_timeout_count++;
2521dee1ad47SJeff Kirsher 			schedule_work(&adapter->reset_task);
25220ef4eedcSJesse Brandeburg 			/* exit immediately since reset is imminent */
2523b2f963bfSVladimir Davydov 			return;
2524dee1ad47SJeff Kirsher 		}
2525dee1ad47SJeff Kirsher 	}
2526dee1ad47SJeff Kirsher 
2527dee1ad47SJeff Kirsher 	/* Simple mode for Interrupt Throttle Rate (ITR) */
2528dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
25296cfbd97bSJeff Kirsher 		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2530dee1ad47SJeff Kirsher 		 * Total asymmetrical Tx or Rx gets ITR=8000;
2531dee1ad47SJeff Kirsher 		 * everyone else is between 2000-8000.
2532dee1ad47SJeff Kirsher 		 */
2533dee1ad47SJeff Kirsher 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2534dee1ad47SJeff Kirsher 		u32 dif = (adapter->gotcl > adapter->gorcl ?
2535dee1ad47SJeff Kirsher 			    adapter->gotcl - adapter->gorcl :
2536dee1ad47SJeff Kirsher 			    adapter->gorcl - adapter->gotcl) / 10000;
2537dee1ad47SJeff Kirsher 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2538dee1ad47SJeff Kirsher 
2539dee1ad47SJeff Kirsher 		ew32(ITR, 1000000000 / (itr * 256));
2540dee1ad47SJeff Kirsher 	}
2541dee1ad47SJeff Kirsher 
2542dee1ad47SJeff Kirsher 	/* Cause software interrupt to ensure rx ring is cleaned */
2543dee1ad47SJeff Kirsher 	ew32(ICS, E1000_ICS_RXDMT0);
2544dee1ad47SJeff Kirsher 
2545dee1ad47SJeff Kirsher 	/* Force detection of hung controller every watchdog period */
2546dee1ad47SJeff Kirsher 	adapter->detect_tx_hung = true;
2547dee1ad47SJeff Kirsher 
2548a4010afeSJesse Brandeburg 	/* Reschedule the task */
2549dee1ad47SJeff Kirsher 	if (!test_bit(__E1000_DOWN, &adapter->flags))
2550a4010afeSJesse Brandeburg 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2551dee1ad47SJeff Kirsher }
2552dee1ad47SJeff Kirsher 
2553dee1ad47SJeff Kirsher enum latency_range {
2554dee1ad47SJeff Kirsher 	lowest_latency = 0,
2555dee1ad47SJeff Kirsher 	low_latency = 1,
2556dee1ad47SJeff Kirsher 	bulk_latency = 2,
2557dee1ad47SJeff Kirsher 	latency_invalid = 255
2558dee1ad47SJeff Kirsher };
2559dee1ad47SJeff Kirsher 
2560dee1ad47SJeff Kirsher /**
2561dee1ad47SJeff Kirsher  * e1000_update_itr - update the dynamic ITR value based on statistics
2562dee1ad47SJeff Kirsher  * @adapter: pointer to adapter
2563dee1ad47SJeff Kirsher  * @itr_setting: current adapter->itr
2564dee1ad47SJeff Kirsher  * @packets: the number of packets during this measurement interval
2565dee1ad47SJeff Kirsher  * @bytes: the number of bytes during this measurement interval
2566dee1ad47SJeff Kirsher  *
2567dee1ad47SJeff Kirsher  *      Stores a new ITR value based on packets and byte
2568dee1ad47SJeff Kirsher  *      counts during the last interrupt.  The advantage of per interrupt
2569dee1ad47SJeff Kirsher  *      computation is faster updates and more accurate ITR for the current
2570dee1ad47SJeff Kirsher  *      traffic pattern.  Constants in this function were computed
2571dee1ad47SJeff Kirsher  *      based on theoretical maximum wire speed and thresholds were set based
2572dee1ad47SJeff Kirsher  *      on testing data as well as attempting to minimize response time
2573dee1ad47SJeff Kirsher  *      while increasing bulk throughput.
2574dee1ad47SJeff Kirsher  *      this functionality is controlled by the InterruptThrottleRate module
2575dee1ad47SJeff Kirsher  *      parameter (see e1000_param.c)
2576dee1ad47SJeff Kirsher  **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2577dee1ad47SJeff Kirsher static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2578dee1ad47SJeff Kirsher 				     u16 itr_setting, int packets, int bytes)
2579dee1ad47SJeff Kirsher {
2580dee1ad47SJeff Kirsher 	unsigned int retval = itr_setting;
2581dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2582dee1ad47SJeff Kirsher 
2583dee1ad47SJeff Kirsher 	if (unlikely(hw->mac_type < e1000_82540))
2584dee1ad47SJeff Kirsher 		goto update_itr_done;
2585dee1ad47SJeff Kirsher 
2586dee1ad47SJeff Kirsher 	if (packets == 0)
2587dee1ad47SJeff Kirsher 		goto update_itr_done;
2588dee1ad47SJeff Kirsher 
2589dee1ad47SJeff Kirsher 	switch (itr_setting) {
2590dee1ad47SJeff Kirsher 	case lowest_latency:
2591dee1ad47SJeff Kirsher 		/* jumbo frames get bulk treatment*/
2592dee1ad47SJeff Kirsher 		if (bytes/packets > 8000)
2593dee1ad47SJeff Kirsher 			retval = bulk_latency;
2594dee1ad47SJeff Kirsher 		else if ((packets < 5) && (bytes > 512))
2595dee1ad47SJeff Kirsher 			retval = low_latency;
2596dee1ad47SJeff Kirsher 		break;
2597dee1ad47SJeff Kirsher 	case low_latency:  /* 50 usec aka 20000 ints/s */
2598dee1ad47SJeff Kirsher 		if (bytes > 10000) {
2599dee1ad47SJeff Kirsher 			/* jumbo frames need bulk latency setting */
2600dee1ad47SJeff Kirsher 			if (bytes/packets > 8000)
2601dee1ad47SJeff Kirsher 				retval = bulk_latency;
2602dee1ad47SJeff Kirsher 			else if ((packets < 10) || ((bytes/packets) > 1200))
2603dee1ad47SJeff Kirsher 				retval = bulk_latency;
2604dee1ad47SJeff Kirsher 			else if ((packets > 35))
2605dee1ad47SJeff Kirsher 				retval = lowest_latency;
2606dee1ad47SJeff Kirsher 		} else if (bytes/packets > 2000)
2607dee1ad47SJeff Kirsher 			retval = bulk_latency;
2608dee1ad47SJeff Kirsher 		else if (packets <= 2 && bytes < 512)
2609dee1ad47SJeff Kirsher 			retval = lowest_latency;
2610dee1ad47SJeff Kirsher 		break;
2611dee1ad47SJeff Kirsher 	case bulk_latency: /* 250 usec aka 4000 ints/s */
2612dee1ad47SJeff Kirsher 		if (bytes > 25000) {
2613dee1ad47SJeff Kirsher 			if (packets > 35)
2614dee1ad47SJeff Kirsher 				retval = low_latency;
2615dee1ad47SJeff Kirsher 		} else if (bytes < 6000) {
2616dee1ad47SJeff Kirsher 			retval = low_latency;
2617dee1ad47SJeff Kirsher 		}
2618dee1ad47SJeff Kirsher 		break;
2619dee1ad47SJeff Kirsher 	}
2620dee1ad47SJeff Kirsher 
2621dee1ad47SJeff Kirsher update_itr_done:
2622dee1ad47SJeff Kirsher 	return retval;
2623dee1ad47SJeff Kirsher }
2624dee1ad47SJeff Kirsher 
e1000_set_itr(struct e1000_adapter * adapter)2625dee1ad47SJeff Kirsher static void e1000_set_itr(struct e1000_adapter *adapter)
2626dee1ad47SJeff Kirsher {
2627dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2628dee1ad47SJeff Kirsher 	u16 current_itr;
2629dee1ad47SJeff Kirsher 	u32 new_itr = adapter->itr;
2630dee1ad47SJeff Kirsher 
2631dee1ad47SJeff Kirsher 	if (unlikely(hw->mac_type < e1000_82540))
2632dee1ad47SJeff Kirsher 		return;
2633dee1ad47SJeff Kirsher 
2634dee1ad47SJeff Kirsher 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2635dee1ad47SJeff Kirsher 	if (unlikely(adapter->link_speed != SPEED_1000)) {
2636dee1ad47SJeff Kirsher 		new_itr = 4000;
2637dee1ad47SJeff Kirsher 		goto set_itr_now;
2638dee1ad47SJeff Kirsher 	}
2639dee1ad47SJeff Kirsher 
26406cfbd97bSJeff Kirsher 	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2641dee1ad47SJeff Kirsher 					   adapter->total_tx_packets,
2642dee1ad47SJeff Kirsher 					   adapter->total_tx_bytes);
2643dee1ad47SJeff Kirsher 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2644dee1ad47SJeff Kirsher 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2645dee1ad47SJeff Kirsher 		adapter->tx_itr = low_latency;
2646dee1ad47SJeff Kirsher 
26476cfbd97bSJeff Kirsher 	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2648dee1ad47SJeff Kirsher 					   adapter->total_rx_packets,
2649dee1ad47SJeff Kirsher 					   adapter->total_rx_bytes);
2650dee1ad47SJeff Kirsher 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2651dee1ad47SJeff Kirsher 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2652dee1ad47SJeff Kirsher 		adapter->rx_itr = low_latency;
2653dee1ad47SJeff Kirsher 
2654dee1ad47SJeff Kirsher 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2655dee1ad47SJeff Kirsher 
2656dee1ad47SJeff Kirsher 	switch (current_itr) {
2657dee1ad47SJeff Kirsher 	/* counts and packets in update_itr are dependent on these numbers */
2658dee1ad47SJeff Kirsher 	case lowest_latency:
2659dee1ad47SJeff Kirsher 		new_itr = 70000;
2660dee1ad47SJeff Kirsher 		break;
2661dee1ad47SJeff Kirsher 	case low_latency:
2662dee1ad47SJeff Kirsher 		new_itr = 20000; /* aka hwitr = ~200 */
2663dee1ad47SJeff Kirsher 		break;
2664dee1ad47SJeff Kirsher 	case bulk_latency:
2665dee1ad47SJeff Kirsher 		new_itr = 4000;
2666dee1ad47SJeff Kirsher 		break;
2667dee1ad47SJeff Kirsher 	default:
2668dee1ad47SJeff Kirsher 		break;
2669dee1ad47SJeff Kirsher 	}
2670dee1ad47SJeff Kirsher 
2671dee1ad47SJeff Kirsher set_itr_now:
2672dee1ad47SJeff Kirsher 	if (new_itr != adapter->itr) {
2673dee1ad47SJeff Kirsher 		/* this attempts to bias the interrupt rate towards Bulk
2674dee1ad47SJeff Kirsher 		 * by adding intermediate steps when interrupt rate is
26756cfbd97bSJeff Kirsher 		 * increasing
26766cfbd97bSJeff Kirsher 		 */
2677dee1ad47SJeff Kirsher 		new_itr = new_itr > adapter->itr ?
2678dee1ad47SJeff Kirsher 			  min(adapter->itr + (new_itr >> 2), new_itr) :
2679dee1ad47SJeff Kirsher 			  new_itr;
2680dee1ad47SJeff Kirsher 		adapter->itr = new_itr;
2681dee1ad47SJeff Kirsher 		ew32(ITR, 1000000000 / (new_itr * 256));
2682dee1ad47SJeff Kirsher 	}
2683dee1ad47SJeff Kirsher }
2684dee1ad47SJeff Kirsher 
2685dee1ad47SJeff Kirsher #define E1000_TX_FLAGS_CSUM		0x00000001
2686dee1ad47SJeff Kirsher #define E1000_TX_FLAGS_VLAN		0x00000002
2687dee1ad47SJeff Kirsher #define E1000_TX_FLAGS_TSO		0x00000004
2688dee1ad47SJeff Kirsher #define E1000_TX_FLAGS_IPV4		0x00000008
268911a78dcfSBen Greear #define E1000_TX_FLAGS_NO_FCS		0x00000010
2690dee1ad47SJeff Kirsher #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2691dee1ad47SJeff Kirsher #define E1000_TX_FLAGS_VLAN_SHIFT	16
2692dee1ad47SJeff Kirsher 
e1000_tso(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2693dee1ad47SJeff Kirsher static int e1000_tso(struct e1000_adapter *adapter,
269406f4d033SVlad Yasevich 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
269506f4d033SVlad Yasevich 		     __be16 protocol)
2696dee1ad47SJeff Kirsher {
2697dee1ad47SJeff Kirsher 	struct e1000_context_desc *context_desc;
2698580f321dSFlorian Westphal 	struct e1000_tx_buffer *buffer_info;
2699dee1ad47SJeff Kirsher 	unsigned int i;
2700dee1ad47SJeff Kirsher 	u32 cmd_length = 0;
2701dee1ad47SJeff Kirsher 	u16 ipcse = 0, tucse, mss;
2702dee1ad47SJeff Kirsher 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2703dee1ad47SJeff Kirsher 
2704dee1ad47SJeff Kirsher 	if (skb_is_gso(skb)) {
27054a54b1e5SFrancois Romieu 		int err;
27064a54b1e5SFrancois Romieu 
27074a54b1e5SFrancois Romieu 		err = skb_cow_head(skb, 0);
27084a54b1e5SFrancois Romieu 		if (err < 0)
2709dee1ad47SJeff Kirsher 			return err;
2710dee1ad47SJeff Kirsher 
2711504148feSEric Dumazet 		hdr_len = skb_tcp_all_headers(skb);
2712dee1ad47SJeff Kirsher 		mss = skb_shinfo(skb)->gso_size;
271306f4d033SVlad Yasevich 		if (protocol == htons(ETH_P_IP)) {
2714dee1ad47SJeff Kirsher 			struct iphdr *iph = ip_hdr(skb);
2715dee1ad47SJeff Kirsher 			iph->tot_len = 0;
2716dee1ad47SJeff Kirsher 			iph->check = 0;
2717dee1ad47SJeff Kirsher 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2718dee1ad47SJeff Kirsher 								 iph->daddr, 0,
2719dee1ad47SJeff Kirsher 								 IPPROTO_TCP,
2720dee1ad47SJeff Kirsher 								 0);
2721dee1ad47SJeff Kirsher 			cmd_length = E1000_TXD_CMD_IP;
2722dee1ad47SJeff Kirsher 			ipcse = skb_transport_offset(skb) - 1;
272306f4d033SVlad Yasevich 		} else if (skb_is_gso_v6(skb)) {
27242b316fbcSHeiner Kallweit 			tcp_v6_gso_csum_prep(skb);
2725dee1ad47SJeff Kirsher 			ipcse = 0;
2726dee1ad47SJeff Kirsher 		}
2727dee1ad47SJeff Kirsher 		ipcss = skb_network_offset(skb);
2728dee1ad47SJeff Kirsher 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2729dee1ad47SJeff Kirsher 		tucss = skb_transport_offset(skb);
2730dee1ad47SJeff Kirsher 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2731dee1ad47SJeff Kirsher 		tucse = 0;
2732dee1ad47SJeff Kirsher 
2733dee1ad47SJeff Kirsher 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2734dee1ad47SJeff Kirsher 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2735dee1ad47SJeff Kirsher 
2736dee1ad47SJeff Kirsher 		i = tx_ring->next_to_use;
2737dee1ad47SJeff Kirsher 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2738dee1ad47SJeff Kirsher 		buffer_info = &tx_ring->buffer_info[i];
2739dee1ad47SJeff Kirsher 
2740dee1ad47SJeff Kirsher 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2741dee1ad47SJeff Kirsher 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2742dee1ad47SJeff Kirsher 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2743dee1ad47SJeff Kirsher 		context_desc->upper_setup.tcp_fields.tucss = tucss;
2744dee1ad47SJeff Kirsher 		context_desc->upper_setup.tcp_fields.tucso = tucso;
2745dee1ad47SJeff Kirsher 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2746dee1ad47SJeff Kirsher 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2747dee1ad47SJeff Kirsher 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2748dee1ad47SJeff Kirsher 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2749dee1ad47SJeff Kirsher 
2750dee1ad47SJeff Kirsher 		buffer_info->time_stamp = jiffies;
2751dee1ad47SJeff Kirsher 		buffer_info->next_to_watch = i;
2752dee1ad47SJeff Kirsher 
2753a48954c8SJanusz Wolak 		if (++i == tx_ring->count)
2754a48954c8SJanusz Wolak 			i = 0;
2755a48954c8SJanusz Wolak 
2756dee1ad47SJeff Kirsher 		tx_ring->next_to_use = i;
2757dee1ad47SJeff Kirsher 
2758dee1ad47SJeff Kirsher 		return true;
2759dee1ad47SJeff Kirsher 	}
2760dee1ad47SJeff Kirsher 	return false;
2761dee1ad47SJeff Kirsher }
2762dee1ad47SJeff Kirsher 
e1000_tx_csum(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2763dee1ad47SJeff Kirsher static bool e1000_tx_csum(struct e1000_adapter *adapter,
276406f4d033SVlad Yasevich 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
276506f4d033SVlad Yasevich 			  __be16 protocol)
2766dee1ad47SJeff Kirsher {
2767dee1ad47SJeff Kirsher 	struct e1000_context_desc *context_desc;
2768580f321dSFlorian Westphal 	struct e1000_tx_buffer *buffer_info;
2769dee1ad47SJeff Kirsher 	unsigned int i;
2770dee1ad47SJeff Kirsher 	u8 css;
2771dee1ad47SJeff Kirsher 	u32 cmd_len = E1000_TXD_CMD_DEXT;
2772dee1ad47SJeff Kirsher 
2773dee1ad47SJeff Kirsher 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2774dee1ad47SJeff Kirsher 		return false;
2775dee1ad47SJeff Kirsher 
277606f4d033SVlad Yasevich 	switch (protocol) {
2777dee1ad47SJeff Kirsher 	case cpu_to_be16(ETH_P_IP):
2778dee1ad47SJeff Kirsher 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2779dee1ad47SJeff Kirsher 			cmd_len |= E1000_TXD_CMD_TCP;
2780dee1ad47SJeff Kirsher 		break;
2781dee1ad47SJeff Kirsher 	case cpu_to_be16(ETH_P_IPV6):
2782dee1ad47SJeff Kirsher 		/* XXX not handling all IPV6 headers */
2783dee1ad47SJeff Kirsher 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2784dee1ad47SJeff Kirsher 			cmd_len |= E1000_TXD_CMD_TCP;
2785dee1ad47SJeff Kirsher 		break;
2786dee1ad47SJeff Kirsher 	default:
2787dee1ad47SJeff Kirsher 		if (unlikely(net_ratelimit()))
2788dee1ad47SJeff Kirsher 			e_warn(drv, "checksum_partial proto=%x!\n",
2789dee1ad47SJeff Kirsher 			       skb->protocol);
2790dee1ad47SJeff Kirsher 		break;
2791dee1ad47SJeff Kirsher 	}
2792dee1ad47SJeff Kirsher 
2793dee1ad47SJeff Kirsher 	css = skb_checksum_start_offset(skb);
2794dee1ad47SJeff Kirsher 
2795dee1ad47SJeff Kirsher 	i = tx_ring->next_to_use;
2796dee1ad47SJeff Kirsher 	buffer_info = &tx_ring->buffer_info[i];
2797dee1ad47SJeff Kirsher 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2798dee1ad47SJeff Kirsher 
2799dee1ad47SJeff Kirsher 	context_desc->lower_setup.ip_config = 0;
2800dee1ad47SJeff Kirsher 	context_desc->upper_setup.tcp_fields.tucss = css;
2801dee1ad47SJeff Kirsher 	context_desc->upper_setup.tcp_fields.tucso =
2802dee1ad47SJeff Kirsher 		css + skb->csum_offset;
2803dee1ad47SJeff Kirsher 	context_desc->upper_setup.tcp_fields.tucse = 0;
2804dee1ad47SJeff Kirsher 	context_desc->tcp_seg_setup.data = 0;
2805dee1ad47SJeff Kirsher 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2806dee1ad47SJeff Kirsher 
2807dee1ad47SJeff Kirsher 	buffer_info->time_stamp = jiffies;
2808dee1ad47SJeff Kirsher 	buffer_info->next_to_watch = i;
2809dee1ad47SJeff Kirsher 
2810a48954c8SJanusz Wolak 	if (unlikely(++i == tx_ring->count))
2811a48954c8SJanusz Wolak 		i = 0;
2812a48954c8SJanusz Wolak 
2813dee1ad47SJeff Kirsher 	tx_ring->next_to_use = i;
2814dee1ad47SJeff Kirsher 
2815dee1ad47SJeff Kirsher 	return true;
2816dee1ad47SJeff Kirsher }
2817dee1ad47SJeff Kirsher 
2818dee1ad47SJeff Kirsher #define E1000_MAX_TXD_PWR	12
2819dee1ad47SJeff Kirsher #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2820dee1ad47SJeff Kirsher 
e1000_tx_map(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags,unsigned int mss)2821dee1ad47SJeff Kirsher static int e1000_tx_map(struct e1000_adapter *adapter,
2822dee1ad47SJeff Kirsher 			struct e1000_tx_ring *tx_ring,
2823dee1ad47SJeff Kirsher 			struct sk_buff *skb, unsigned int first,
2824dee1ad47SJeff Kirsher 			unsigned int max_per_txd, unsigned int nr_frags,
2825dee1ad47SJeff Kirsher 			unsigned int mss)
2826dee1ad47SJeff Kirsher {
2827dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
2828dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
2829580f321dSFlorian Westphal 	struct e1000_tx_buffer *buffer_info;
2830dee1ad47SJeff Kirsher 	unsigned int len = skb_headlen(skb);
2831dee1ad47SJeff Kirsher 	unsigned int offset = 0, size, count = 0, i;
283231c15a2fSDean Nelson 	unsigned int f, bytecount, segs;
2833dee1ad47SJeff Kirsher 
2834dee1ad47SJeff Kirsher 	i = tx_ring->next_to_use;
2835dee1ad47SJeff Kirsher 
2836dee1ad47SJeff Kirsher 	while (len) {
2837dee1ad47SJeff Kirsher 		buffer_info = &tx_ring->buffer_info[i];
2838dee1ad47SJeff Kirsher 		size = min(len, max_per_txd);
2839dee1ad47SJeff Kirsher 		/* Workaround for Controller erratum --
2840dee1ad47SJeff Kirsher 		 * descriptor for non-tso packet in a linear SKB that follows a
2841dee1ad47SJeff Kirsher 		 * tso gets written back prematurely before the data is fully
28426cfbd97bSJeff Kirsher 		 * DMA'd to the controller
28436cfbd97bSJeff Kirsher 		 */
2844dee1ad47SJeff Kirsher 		if (!skb->data_len && tx_ring->last_tx_tso &&
2845dee1ad47SJeff Kirsher 		    !skb_is_gso(skb)) {
28463db1cd5cSRusty Russell 			tx_ring->last_tx_tso = false;
2847dee1ad47SJeff Kirsher 			size -= 4;
2848dee1ad47SJeff Kirsher 		}
2849dee1ad47SJeff Kirsher 
2850dee1ad47SJeff Kirsher 		/* Workaround for premature desc write-backs
28516cfbd97bSJeff Kirsher 		 * in TSO mode.  Append 4-byte sentinel desc
28526cfbd97bSJeff Kirsher 		 */
2853dee1ad47SJeff Kirsher 		if (unlikely(mss && !nr_frags && size == len && size > 8))
2854dee1ad47SJeff Kirsher 			size -= 4;
2855dee1ad47SJeff Kirsher 		/* work-around for errata 10 and it applies
2856dee1ad47SJeff Kirsher 		 * to all controllers in PCI-X mode
2857dee1ad47SJeff Kirsher 		 * The fix is to make sure that the first descriptor of a
2858dee1ad47SJeff Kirsher 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2859dee1ad47SJeff Kirsher 		 */
2860dee1ad47SJeff Kirsher 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2861dee1ad47SJeff Kirsher 			     (size > 2015) && count == 0))
2862dee1ad47SJeff Kirsher 			size = 2015;
2863dee1ad47SJeff Kirsher 
2864dee1ad47SJeff Kirsher 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
28656cfbd97bSJeff Kirsher 		 * terminating buffers within evenly-aligned dwords.
28666cfbd97bSJeff Kirsher 		 */
2867dee1ad47SJeff Kirsher 		if (unlikely(adapter->pcix_82544 &&
2868dee1ad47SJeff Kirsher 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2869dee1ad47SJeff Kirsher 		   size > 4))
2870dee1ad47SJeff Kirsher 			size -= 4;
2871dee1ad47SJeff Kirsher 
2872dee1ad47SJeff Kirsher 		buffer_info->length = size;
2873dee1ad47SJeff Kirsher 		/* set time_stamp *before* dma to help avoid a possible race */
2874dee1ad47SJeff Kirsher 		buffer_info->time_stamp = jiffies;
2875dee1ad47SJeff Kirsher 		buffer_info->mapped_as_page = false;
2876dee1ad47SJeff Kirsher 		buffer_info->dma = dma_map_single(&pdev->dev,
2877dee1ad47SJeff Kirsher 						  skb->data + offset,
2878dee1ad47SJeff Kirsher 						  size, DMA_TO_DEVICE);
2879dee1ad47SJeff Kirsher 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2880dee1ad47SJeff Kirsher 			goto dma_error;
2881dee1ad47SJeff Kirsher 		buffer_info->next_to_watch = i;
2882dee1ad47SJeff Kirsher 
2883dee1ad47SJeff Kirsher 		len -= size;
2884dee1ad47SJeff Kirsher 		offset += size;
2885dee1ad47SJeff Kirsher 		count++;
2886dee1ad47SJeff Kirsher 		if (len) {
2887dee1ad47SJeff Kirsher 			i++;
2888dee1ad47SJeff Kirsher 			if (unlikely(i == tx_ring->count))
2889dee1ad47SJeff Kirsher 				i = 0;
2890dee1ad47SJeff Kirsher 		}
2891dee1ad47SJeff Kirsher 	}
2892dee1ad47SJeff Kirsher 
2893dee1ad47SJeff Kirsher 	for (f = 0; f < nr_frags; f++) {
2894d7840976SMatthew Wilcox (Oracle) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2895dee1ad47SJeff Kirsher 
28969e903e08SEric Dumazet 		len = skb_frag_size(frag);
2897877749bfSIan Campbell 		offset = 0;
2898dee1ad47SJeff Kirsher 
2899dee1ad47SJeff Kirsher 		while (len) {
2900877749bfSIan Campbell 			unsigned long bufend;
2901dee1ad47SJeff Kirsher 			i++;
2902dee1ad47SJeff Kirsher 			if (unlikely(i == tx_ring->count))
2903dee1ad47SJeff Kirsher 				i = 0;
2904dee1ad47SJeff Kirsher 
2905dee1ad47SJeff Kirsher 			buffer_info = &tx_ring->buffer_info[i];
2906dee1ad47SJeff Kirsher 			size = min(len, max_per_txd);
2907dee1ad47SJeff Kirsher 			/* Workaround for premature desc write-backs
29086cfbd97bSJeff Kirsher 			 * in TSO mode.  Append 4-byte sentinel desc
29096cfbd97bSJeff Kirsher 			 */
29106cfbd97bSJeff Kirsher 			if (unlikely(mss && f == (nr_frags-1) &&
29116cfbd97bSJeff Kirsher 			    size == len && size > 8))
2912dee1ad47SJeff Kirsher 				size -= 4;
2913dee1ad47SJeff Kirsher 			/* Workaround for potential 82544 hang in PCI-X.
2914dee1ad47SJeff Kirsher 			 * Avoid terminating buffers within evenly-aligned
29156cfbd97bSJeff Kirsher 			 * dwords.
29166cfbd97bSJeff Kirsher 			 */
2917877749bfSIan Campbell 			bufend = (unsigned long)
2918877749bfSIan Campbell 				page_to_phys(skb_frag_page(frag));
2919877749bfSIan Campbell 			bufend += offset + size - 1;
2920dee1ad47SJeff Kirsher 			if (unlikely(adapter->pcix_82544 &&
2921877749bfSIan Campbell 				     !(bufend & 4) &&
2922dee1ad47SJeff Kirsher 				     size > 4))
2923dee1ad47SJeff Kirsher 				size -= 4;
2924dee1ad47SJeff Kirsher 
2925dee1ad47SJeff Kirsher 			buffer_info->length = size;
2926dee1ad47SJeff Kirsher 			buffer_info->time_stamp = jiffies;
2927dee1ad47SJeff Kirsher 			buffer_info->mapped_as_page = true;
2928877749bfSIan Campbell 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2929877749bfSIan Campbell 						offset, size, DMA_TO_DEVICE);
2930dee1ad47SJeff Kirsher 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2931dee1ad47SJeff Kirsher 				goto dma_error;
2932dee1ad47SJeff Kirsher 			buffer_info->next_to_watch = i;
2933dee1ad47SJeff Kirsher 
2934dee1ad47SJeff Kirsher 			len -= size;
2935dee1ad47SJeff Kirsher 			offset += size;
2936dee1ad47SJeff Kirsher 			count++;
2937dee1ad47SJeff Kirsher 		}
2938dee1ad47SJeff Kirsher 	}
2939dee1ad47SJeff Kirsher 
294031c15a2fSDean Nelson 	segs = skb_shinfo(skb)->gso_segs ?: 1;
294131c15a2fSDean Nelson 	/* multiply data chunks by size of headers */
294231c15a2fSDean Nelson 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
294331c15a2fSDean Nelson 
2944dee1ad47SJeff Kirsher 	tx_ring->buffer_info[i].skb = skb;
294531c15a2fSDean Nelson 	tx_ring->buffer_info[i].segs = segs;
294631c15a2fSDean Nelson 	tx_ring->buffer_info[i].bytecount = bytecount;
2947dee1ad47SJeff Kirsher 	tx_ring->buffer_info[first].next_to_watch = i;
2948dee1ad47SJeff Kirsher 
2949dee1ad47SJeff Kirsher 	return count;
2950dee1ad47SJeff Kirsher 
2951dee1ad47SJeff Kirsher dma_error:
2952dee1ad47SJeff Kirsher 	dev_err(&pdev->dev, "TX DMA map failed\n");
2953dee1ad47SJeff Kirsher 	buffer_info->dma = 0;
2954dee1ad47SJeff Kirsher 	if (count)
2955dee1ad47SJeff Kirsher 		count--;
2956dee1ad47SJeff Kirsher 
2957dee1ad47SJeff Kirsher 	while (count--) {
2958dee1ad47SJeff Kirsher 		if (i == 0)
2959dee1ad47SJeff Kirsher 			i += tx_ring->count;
2960dee1ad47SJeff Kirsher 		i--;
2961dee1ad47SJeff Kirsher 		buffer_info = &tx_ring->buffer_info[i];
2962dcb95f06SAlexander Lobakin 		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2963dee1ad47SJeff Kirsher 	}
2964dee1ad47SJeff Kirsher 
2965dee1ad47SJeff Kirsher 	return 0;
2966dee1ad47SJeff Kirsher }
2967dee1ad47SJeff Kirsher 
e1000_tx_queue(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,int tx_flags,int count)2968dee1ad47SJeff Kirsher static void e1000_tx_queue(struct e1000_adapter *adapter,
2969dee1ad47SJeff Kirsher 			   struct e1000_tx_ring *tx_ring, int tx_flags,
2970dee1ad47SJeff Kirsher 			   int count)
2971dee1ad47SJeff Kirsher {
2972dee1ad47SJeff Kirsher 	struct e1000_tx_desc *tx_desc = NULL;
2973580f321dSFlorian Westphal 	struct e1000_tx_buffer *buffer_info;
2974dee1ad47SJeff Kirsher 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2975dee1ad47SJeff Kirsher 	unsigned int i;
2976dee1ad47SJeff Kirsher 
2977dee1ad47SJeff Kirsher 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2978dee1ad47SJeff Kirsher 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2979dee1ad47SJeff Kirsher 			     E1000_TXD_CMD_TSE;
2980dee1ad47SJeff Kirsher 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2981dee1ad47SJeff Kirsher 
2982dee1ad47SJeff Kirsher 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2983dee1ad47SJeff Kirsher 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2984dee1ad47SJeff Kirsher 	}
2985dee1ad47SJeff Kirsher 
2986dee1ad47SJeff Kirsher 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2987dee1ad47SJeff Kirsher 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2988dee1ad47SJeff Kirsher 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2989dee1ad47SJeff Kirsher 	}
2990dee1ad47SJeff Kirsher 
2991dee1ad47SJeff Kirsher 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2992dee1ad47SJeff Kirsher 		txd_lower |= E1000_TXD_CMD_VLE;
2993dee1ad47SJeff Kirsher 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2994dee1ad47SJeff Kirsher 	}
2995dee1ad47SJeff Kirsher 
299611a78dcfSBen Greear 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
299711a78dcfSBen Greear 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
299811a78dcfSBen Greear 
2999dee1ad47SJeff Kirsher 	i = tx_ring->next_to_use;
3000dee1ad47SJeff Kirsher 
3001dee1ad47SJeff Kirsher 	while (count--) {
3002dee1ad47SJeff Kirsher 		buffer_info = &tx_ring->buffer_info[i];
3003dee1ad47SJeff Kirsher 		tx_desc = E1000_TX_DESC(*tx_ring, i);
3004dee1ad47SJeff Kirsher 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3005dee1ad47SJeff Kirsher 		tx_desc->lower.data =
3006dee1ad47SJeff Kirsher 			cpu_to_le32(txd_lower | buffer_info->length);
3007dee1ad47SJeff Kirsher 		tx_desc->upper.data = cpu_to_le32(txd_upper);
3008a48954c8SJanusz Wolak 		if (unlikely(++i == tx_ring->count))
3009a48954c8SJanusz Wolak 			i = 0;
3010dee1ad47SJeff Kirsher 	}
3011dee1ad47SJeff Kirsher 
3012dee1ad47SJeff Kirsher 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3013dee1ad47SJeff Kirsher 
301411a78dcfSBen Greear 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
301511a78dcfSBen Greear 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
301611a78dcfSBen Greear 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
301711a78dcfSBen Greear 
3018dee1ad47SJeff Kirsher 	/* Force memory writes to complete before letting h/w
3019dee1ad47SJeff Kirsher 	 * know there are new descriptors to fetch.  (Only
3020dee1ad47SJeff Kirsher 	 * applicable for weak-ordered memory model archs,
30216cfbd97bSJeff Kirsher 	 * such as IA-64).
30226cfbd97bSJeff Kirsher 	 */
3023583cf7beSVenkatesh Srinivas 	dma_wmb();
3024dee1ad47SJeff Kirsher 
3025dee1ad47SJeff Kirsher 	tx_ring->next_to_use = i;
3026dee1ad47SJeff Kirsher }
3027dee1ad47SJeff Kirsher 
30281aa8b471SBen Hutchings /* 82547 workaround to avoid controller hang in half-duplex environment.
3029dee1ad47SJeff Kirsher  * The workaround is to avoid queuing a large packet that would span
3030dee1ad47SJeff Kirsher  * the internal Tx FIFO ring boundary by notifying the stack to resend
3031dee1ad47SJeff Kirsher  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3032dee1ad47SJeff Kirsher  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3033dee1ad47SJeff Kirsher  * to the beginning of the Tx FIFO.
30341aa8b471SBen Hutchings  */
3035dee1ad47SJeff Kirsher 
3036dee1ad47SJeff Kirsher #define E1000_FIFO_HDR			0x10
3037dee1ad47SJeff Kirsher #define E1000_82547_PAD_LEN		0x3E0
3038dee1ad47SJeff Kirsher 
e1000_82547_fifo_workaround(struct e1000_adapter * adapter,struct sk_buff * skb)3039dee1ad47SJeff Kirsher static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3040dee1ad47SJeff Kirsher 				       struct sk_buff *skb)
3041dee1ad47SJeff Kirsher {
3042dee1ad47SJeff Kirsher 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3043dee1ad47SJeff Kirsher 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3044dee1ad47SJeff Kirsher 
3045dee1ad47SJeff Kirsher 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3046dee1ad47SJeff Kirsher 
3047dee1ad47SJeff Kirsher 	if (adapter->link_duplex != HALF_DUPLEX)
3048dee1ad47SJeff Kirsher 		goto no_fifo_stall_required;
3049dee1ad47SJeff Kirsher 
3050dee1ad47SJeff Kirsher 	if (atomic_read(&adapter->tx_fifo_stall))
3051dee1ad47SJeff Kirsher 		return 1;
3052dee1ad47SJeff Kirsher 
3053dee1ad47SJeff Kirsher 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3054dee1ad47SJeff Kirsher 		atomic_set(&adapter->tx_fifo_stall, 1);
3055dee1ad47SJeff Kirsher 		return 1;
3056dee1ad47SJeff Kirsher 	}
3057dee1ad47SJeff Kirsher 
3058dee1ad47SJeff Kirsher no_fifo_stall_required:
3059dee1ad47SJeff Kirsher 	adapter->tx_fifo_head += skb_fifo_len;
3060dee1ad47SJeff Kirsher 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3061dee1ad47SJeff Kirsher 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3062dee1ad47SJeff Kirsher 	return 0;
3063dee1ad47SJeff Kirsher }
3064dee1ad47SJeff Kirsher 
__e1000_maybe_stop_tx(struct net_device * netdev,int size)3065dee1ad47SJeff Kirsher static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3066dee1ad47SJeff Kirsher {
3067dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
3068dee1ad47SJeff Kirsher 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3069dee1ad47SJeff Kirsher 
3070dee1ad47SJeff Kirsher 	netif_stop_queue(netdev);
3071dee1ad47SJeff Kirsher 	/* Herbert's original patch had:
3072dee1ad47SJeff Kirsher 	 *  smp_mb__after_netif_stop_queue();
30736cfbd97bSJeff Kirsher 	 * but since that doesn't exist yet, just open code it.
30746cfbd97bSJeff Kirsher 	 */
3075dee1ad47SJeff Kirsher 	smp_mb();
3076dee1ad47SJeff Kirsher 
3077dee1ad47SJeff Kirsher 	/* We need to check again in a case another CPU has just
30786cfbd97bSJeff Kirsher 	 * made room available.
30796cfbd97bSJeff Kirsher 	 */
3080dee1ad47SJeff Kirsher 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3081dee1ad47SJeff Kirsher 		return -EBUSY;
3082dee1ad47SJeff Kirsher 
3083dee1ad47SJeff Kirsher 	/* A reprieve! */
3084dee1ad47SJeff Kirsher 	netif_start_queue(netdev);
3085dee1ad47SJeff Kirsher 	++adapter->restart_queue;
3086dee1ad47SJeff Kirsher 	return 0;
3087dee1ad47SJeff Kirsher }
3088dee1ad47SJeff Kirsher 
e1000_maybe_stop_tx(struct net_device * netdev,struct e1000_tx_ring * tx_ring,int size)3089dee1ad47SJeff Kirsher static int e1000_maybe_stop_tx(struct net_device *netdev,
3090dee1ad47SJeff Kirsher 			       struct e1000_tx_ring *tx_ring, int size)
3091dee1ad47SJeff Kirsher {
3092dee1ad47SJeff Kirsher 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3093dee1ad47SJeff Kirsher 		return 0;
3094dee1ad47SJeff Kirsher 	return __e1000_maybe_stop_tx(netdev, size);
3095dee1ad47SJeff Kirsher }
3096dee1ad47SJeff Kirsher 
3097847a1d67SAlexander Duyck #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)3098dee1ad47SJeff Kirsher static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3099dee1ad47SJeff Kirsher 				    struct net_device *netdev)
3100dee1ad47SJeff Kirsher {
3101dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
3102dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
3103dee1ad47SJeff Kirsher 	struct e1000_tx_ring *tx_ring;
3104dee1ad47SJeff Kirsher 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3105dee1ad47SJeff Kirsher 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3106dee1ad47SJeff Kirsher 	unsigned int tx_flags = 0;
3107dee1ad47SJeff Kirsher 	unsigned int len = skb_headlen(skb);
3108dee1ad47SJeff Kirsher 	unsigned int nr_frags;
3109dee1ad47SJeff Kirsher 	unsigned int mss;
3110dee1ad47SJeff Kirsher 	int count = 0;
3111dee1ad47SJeff Kirsher 	int tso;
3112dee1ad47SJeff Kirsher 	unsigned int f;
311306f4d033SVlad Yasevich 	__be16 protocol = vlan_get_protocol(skb);
3114dee1ad47SJeff Kirsher 
31156cfbd97bSJeff Kirsher 	/* This goes back to the question of how to logically map a Tx queue
3116dee1ad47SJeff Kirsher 	 * to a flow.  Right now, performance is impacted slightly negatively
31176cfbd97bSJeff Kirsher 	 * if using multiple Tx queues.  If the stack breaks away from a
31186cfbd97bSJeff Kirsher 	 * single qdisc implementation, we can look at this again.
31196cfbd97bSJeff Kirsher 	 */
3120dee1ad47SJeff Kirsher 	tx_ring = adapter->tx_ring;
3121dee1ad47SJeff Kirsher 
312259d86c76STushar Dave 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
312359d86c76STushar Dave 	 * packets may get corrupted during padding by HW.
312459d86c76STushar Dave 	 * To WA this issue, pad all small packets manually.
312559d86c76STushar Dave 	 */
3126a94d9e22SAlexander Duyck 	if (eth_skb_pad(skb))
312759d86c76STushar Dave 		return NETDEV_TX_OK;
312859d86c76STushar Dave 
3129dee1ad47SJeff Kirsher 	mss = skb_shinfo(skb)->gso_size;
3130dee1ad47SJeff Kirsher 	/* The controller does a simple calculation to
3131dee1ad47SJeff Kirsher 	 * make sure there is enough room in the FIFO before
3132dee1ad47SJeff Kirsher 	 * initiating the DMA for each buffer.  The calc is:
3133dee1ad47SJeff Kirsher 	 * 4 = ceil(buffer len/mss).  To make sure we don't
3134dee1ad47SJeff Kirsher 	 * overrun the FIFO, adjust the max buffer len if mss
31356cfbd97bSJeff Kirsher 	 * drops.
31366cfbd97bSJeff Kirsher 	 */
3137dee1ad47SJeff Kirsher 	if (mss) {
3138dee1ad47SJeff Kirsher 		u8 hdr_len;
3139dee1ad47SJeff Kirsher 		max_per_txd = min(mss << 2, max_per_txd);
3140dee1ad47SJeff Kirsher 		max_txd_pwr = fls(max_per_txd) - 1;
3141dee1ad47SJeff Kirsher 
3142504148feSEric Dumazet 		hdr_len = skb_tcp_all_headers(skb);
3143dee1ad47SJeff Kirsher 		if (skb->data_len && hdr_len == len) {
3144dee1ad47SJeff Kirsher 			switch (hw->mac_type) {
3145a34c7f51SKees Cook 			case e1000_82544: {
3146dee1ad47SJeff Kirsher 				unsigned int pull_size;
3147a34c7f51SKees Cook 
3148dee1ad47SJeff Kirsher 				/* Make sure we have room to chop off 4 bytes,
3149dee1ad47SJeff Kirsher 				 * and that the end alignment will work out to
3150dee1ad47SJeff Kirsher 				 * this hardware's requirements
3151dee1ad47SJeff Kirsher 				 * NOTE: this is a TSO only workaround
3152dee1ad47SJeff Kirsher 				 * if end byte alignment not correct move us
31536cfbd97bSJeff Kirsher 				 * into the next dword
31546cfbd97bSJeff Kirsher 				 */
31556cfbd97bSJeff Kirsher 				if ((unsigned long)(skb_tail_pointer(skb) - 1)
31566cfbd97bSJeff Kirsher 				    & 4)
3157dee1ad47SJeff Kirsher 					break;
3158dee1ad47SJeff Kirsher 				pull_size = min((unsigned int)4, skb->data_len);
3159dee1ad47SJeff Kirsher 				if (!__pskb_pull_tail(skb, pull_size)) {
3160dee1ad47SJeff Kirsher 					e_err(drv, "__pskb_pull_tail "
3161dee1ad47SJeff Kirsher 					      "failed.\n");
3162dee1ad47SJeff Kirsher 					dev_kfree_skb_any(skb);
3163dee1ad47SJeff Kirsher 					return NETDEV_TX_OK;
3164dee1ad47SJeff Kirsher 				}
3165dee1ad47SJeff Kirsher 				len = skb_headlen(skb);
3166dee1ad47SJeff Kirsher 				break;
3167a34c7f51SKees Cook 			}
3168dee1ad47SJeff Kirsher 			default:
3169dee1ad47SJeff Kirsher 				/* do nothing */
3170dee1ad47SJeff Kirsher 				break;
3171dee1ad47SJeff Kirsher 			}
3172dee1ad47SJeff Kirsher 		}
3173dee1ad47SJeff Kirsher 	}
3174dee1ad47SJeff Kirsher 
3175dee1ad47SJeff Kirsher 	/* reserve a descriptor for the offload context */
3176dee1ad47SJeff Kirsher 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3177dee1ad47SJeff Kirsher 		count++;
3178dee1ad47SJeff Kirsher 	count++;
3179dee1ad47SJeff Kirsher 
3180dee1ad47SJeff Kirsher 	/* Controller Erratum workaround */
3181dee1ad47SJeff Kirsher 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3182dee1ad47SJeff Kirsher 		count++;
3183dee1ad47SJeff Kirsher 
3184dee1ad47SJeff Kirsher 	count += TXD_USE_COUNT(len, max_txd_pwr);
3185dee1ad47SJeff Kirsher 
3186dee1ad47SJeff Kirsher 	if (adapter->pcix_82544)
3187dee1ad47SJeff Kirsher 		count++;
3188dee1ad47SJeff Kirsher 
3189dee1ad47SJeff Kirsher 	/* work-around for errata 10 and it applies to all controllers
3190dee1ad47SJeff Kirsher 	 * in PCI-X mode, so add one more descriptor to the count
3191dee1ad47SJeff Kirsher 	 */
3192dee1ad47SJeff Kirsher 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3193dee1ad47SJeff Kirsher 			(len > 2015)))
3194dee1ad47SJeff Kirsher 		count++;
3195dee1ad47SJeff Kirsher 
3196dee1ad47SJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
3197dee1ad47SJeff Kirsher 	for (f = 0; f < nr_frags; f++)
31989e903e08SEric Dumazet 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3199dee1ad47SJeff Kirsher 				       max_txd_pwr);
3200dee1ad47SJeff Kirsher 	if (adapter->pcix_82544)
3201dee1ad47SJeff Kirsher 		count += nr_frags;
3202dee1ad47SJeff Kirsher 
3203dee1ad47SJeff Kirsher 	/* need: count + 2 desc gap to keep tail from touching
32046cfbd97bSJeff Kirsher 	 * head, otherwise try next time
32056cfbd97bSJeff Kirsher 	 */
3206dee1ad47SJeff Kirsher 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3207dee1ad47SJeff Kirsher 		return NETDEV_TX_BUSY;
3208dee1ad47SJeff Kirsher 
3209a4010afeSJesse Brandeburg 	if (unlikely((hw->mac_type == e1000_82547) &&
3210a4010afeSJesse Brandeburg 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3211dee1ad47SJeff Kirsher 		netif_stop_queue(netdev);
3212dee1ad47SJeff Kirsher 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3213a4010afeSJesse Brandeburg 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3214dee1ad47SJeff Kirsher 		return NETDEV_TX_BUSY;
3215dee1ad47SJeff Kirsher 	}
3216dee1ad47SJeff Kirsher 
3217df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
3218dee1ad47SJeff Kirsher 		tx_flags |= E1000_TX_FLAGS_VLAN;
3219df8a39deSJiri Pirko 		tx_flags |= (skb_vlan_tag_get(skb) <<
3220df8a39deSJiri Pirko 			     E1000_TX_FLAGS_VLAN_SHIFT);
3221dee1ad47SJeff Kirsher 	}
3222dee1ad47SJeff Kirsher 
3223dee1ad47SJeff Kirsher 	first = tx_ring->next_to_use;
3224dee1ad47SJeff Kirsher 
322506f4d033SVlad Yasevich 	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3226dee1ad47SJeff Kirsher 	if (tso < 0) {
3227dee1ad47SJeff Kirsher 		dev_kfree_skb_any(skb);
3228dee1ad47SJeff Kirsher 		return NETDEV_TX_OK;
3229dee1ad47SJeff Kirsher 	}
3230dee1ad47SJeff Kirsher 
3231dee1ad47SJeff Kirsher 	if (likely(tso)) {
3232dee1ad47SJeff Kirsher 		if (likely(hw->mac_type != e1000_82544))
32333db1cd5cSRusty Russell 			tx_ring->last_tx_tso = true;
3234dee1ad47SJeff Kirsher 		tx_flags |= E1000_TX_FLAGS_TSO;
323506f4d033SVlad Yasevich 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3236dee1ad47SJeff Kirsher 		tx_flags |= E1000_TX_FLAGS_CSUM;
3237dee1ad47SJeff Kirsher 
323806f4d033SVlad Yasevich 	if (protocol == htons(ETH_P_IP))
3239dee1ad47SJeff Kirsher 		tx_flags |= E1000_TX_FLAGS_IPV4;
3240dee1ad47SJeff Kirsher 
324111a78dcfSBen Greear 	if (unlikely(skb->no_fcs))
324211a78dcfSBen Greear 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
324311a78dcfSBen Greear 
3244dee1ad47SJeff Kirsher 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3245dee1ad47SJeff Kirsher 			     nr_frags, mss);
3246dee1ad47SJeff Kirsher 
3247dee1ad47SJeff Kirsher 	if (count) {
3248a4605fefSAlexander Duyck 		/* The descriptors needed is higher than other Intel drivers
3249a4605fefSAlexander Duyck 		 * due to a number of workarounds.  The breakdown is below:
3250a4605fefSAlexander Duyck 		 * Data descriptors: MAX_SKB_FRAGS + 1
3251a4605fefSAlexander Duyck 		 * Context Descriptor: 1
3252a4605fefSAlexander Duyck 		 * Keep head from touching tail: 2
3253a4605fefSAlexander Duyck 		 * Workarounds: 3
3254a4605fefSAlexander Duyck 		 */
3255a4605fefSAlexander Duyck 		int desc_needed = MAX_SKB_FRAGS + 7;
3256a4605fefSAlexander Duyck 
32572f66fd36SOtto Estuardo Solares Cabrera 		netdev_sent_queue(netdev, skb->len);
3258eab467f5SWillem de Bruijn 		skb_tx_timestamp(skb);
3259eab467f5SWillem de Bruijn 
3260dee1ad47SJeff Kirsher 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3261a4605fefSAlexander Duyck 
3262a4605fefSAlexander Duyck 		/* 82544 potentially requires twice as many data descriptors
3263a4605fefSAlexander Duyck 		 * in order to guarantee buffers don't end on evenly-aligned
3264a4605fefSAlexander Duyck 		 * dwords
3265a4605fefSAlexander Duyck 		 */
3266a4605fefSAlexander Duyck 		if (adapter->pcix_82544)
3267a4605fefSAlexander Duyck 			desc_needed += MAX_SKB_FRAGS + 1;
3268a4605fefSAlexander Duyck 
3269dee1ad47SJeff Kirsher 		/* Make sure there is space in the ring for the next send. */
3270a4605fefSAlexander Duyck 		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3271dee1ad47SJeff Kirsher 
32726b16f9eeSFlorian Westphal 		if (!netdev_xmit_more() ||
32738a4d0b93SFlorian Westphal 		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
32748a4d0b93SFlorian Westphal 			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
32758a4d0b93SFlorian Westphal 		}
3276dee1ad47SJeff Kirsher 	} else {
3277dee1ad47SJeff Kirsher 		dev_kfree_skb_any(skb);
3278dee1ad47SJeff Kirsher 		tx_ring->buffer_info[first].time_stamp = 0;
3279dee1ad47SJeff Kirsher 		tx_ring->next_to_use = first;
3280dee1ad47SJeff Kirsher 	}
3281dee1ad47SJeff Kirsher 
3282dee1ad47SJeff Kirsher 	return NETDEV_TX_OK;
3283dee1ad47SJeff Kirsher }
3284dee1ad47SJeff Kirsher 
3285b04e36baSTushar Dave #define NUM_REGS 38 /* 1 based count */
e1000_regdump(struct e1000_adapter * adapter)3286b04e36baSTushar Dave static void e1000_regdump(struct e1000_adapter *adapter)
3287b04e36baSTushar Dave {
3288b04e36baSTushar Dave 	struct e1000_hw *hw = &adapter->hw;
3289b04e36baSTushar Dave 	u32 regs[NUM_REGS];
3290b04e36baSTushar Dave 	u32 *regs_buff = regs;
3291b04e36baSTushar Dave 	int i = 0;
3292b04e36baSTushar Dave 
3293e29b5d8fSTushar Dave 	static const char * const reg_name[] = {
3294b04e36baSTushar Dave 		"CTRL",  "STATUS",
3295b04e36baSTushar Dave 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3296b04e36baSTushar Dave 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3297b04e36baSTushar Dave 		"TIDV", "TXDCTL", "TADV", "TARC0",
3298b04e36baSTushar Dave 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3299b04e36baSTushar Dave 		"TXDCTL1", "TARC1",
3300b04e36baSTushar Dave 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3301b04e36baSTushar Dave 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3302b04e36baSTushar Dave 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303b04e36baSTushar Dave 	};
3304b04e36baSTushar Dave 
3305b04e36baSTushar Dave 	regs_buff[0]  = er32(CTRL);
3306b04e36baSTushar Dave 	regs_buff[1]  = er32(STATUS);
3307b04e36baSTushar Dave 
3308b04e36baSTushar Dave 	regs_buff[2]  = er32(RCTL);
3309b04e36baSTushar Dave 	regs_buff[3]  = er32(RDLEN);
3310b04e36baSTushar Dave 	regs_buff[4]  = er32(RDH);
3311b04e36baSTushar Dave 	regs_buff[5]  = er32(RDT);
3312b04e36baSTushar Dave 	regs_buff[6]  = er32(RDTR);
3313b04e36baSTushar Dave 
3314b04e36baSTushar Dave 	regs_buff[7]  = er32(TCTL);
3315b04e36baSTushar Dave 	regs_buff[8]  = er32(TDBAL);
3316b04e36baSTushar Dave 	regs_buff[9]  = er32(TDBAH);
3317b04e36baSTushar Dave 	regs_buff[10] = er32(TDLEN);
3318b04e36baSTushar Dave 	regs_buff[11] = er32(TDH);
3319b04e36baSTushar Dave 	regs_buff[12] = er32(TDT);
3320b04e36baSTushar Dave 	regs_buff[13] = er32(TIDV);
3321b04e36baSTushar Dave 	regs_buff[14] = er32(TXDCTL);
3322b04e36baSTushar Dave 	regs_buff[15] = er32(TADV);
3323b04e36baSTushar Dave 	regs_buff[16] = er32(TARC0);
3324b04e36baSTushar Dave 
3325b04e36baSTushar Dave 	regs_buff[17] = er32(TDBAL1);
3326b04e36baSTushar Dave 	regs_buff[18] = er32(TDBAH1);
3327b04e36baSTushar Dave 	regs_buff[19] = er32(TDLEN1);
3328b04e36baSTushar Dave 	regs_buff[20] = er32(TDH1);
3329b04e36baSTushar Dave 	regs_buff[21] = er32(TDT1);
3330b04e36baSTushar Dave 	regs_buff[22] = er32(TXDCTL1);
3331b04e36baSTushar Dave 	regs_buff[23] = er32(TARC1);
3332b04e36baSTushar Dave 	regs_buff[24] = er32(CTRL_EXT);
3333b04e36baSTushar Dave 	regs_buff[25] = er32(ERT);
3334b04e36baSTushar Dave 	regs_buff[26] = er32(RDBAL0);
3335b04e36baSTushar Dave 	regs_buff[27] = er32(RDBAH0);
3336b04e36baSTushar Dave 	regs_buff[28] = er32(TDFH);
3337b04e36baSTushar Dave 	regs_buff[29] = er32(TDFT);
3338b04e36baSTushar Dave 	regs_buff[30] = er32(TDFHS);
3339b04e36baSTushar Dave 	regs_buff[31] = er32(TDFTS);
3340b04e36baSTushar Dave 	regs_buff[32] = er32(TDFPC);
3341b04e36baSTushar Dave 	regs_buff[33] = er32(RDFH);
3342b04e36baSTushar Dave 	regs_buff[34] = er32(RDFT);
3343b04e36baSTushar Dave 	regs_buff[35] = er32(RDFHS);
3344b04e36baSTushar Dave 	regs_buff[36] = er32(RDFTS);
3345b04e36baSTushar Dave 	regs_buff[37] = er32(RDFPC);
3346b04e36baSTushar Dave 
3347b04e36baSTushar Dave 	pr_info("Register dump\n");
3348e29b5d8fSTushar Dave 	for (i = 0; i < NUM_REGS; i++)
3349e29b5d8fSTushar Dave 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3350b04e36baSTushar Dave }
3351b04e36baSTushar Dave 
3352b04e36baSTushar Dave /*
3353b04e36baSTushar Dave  * e1000_dump: Print registers, tx ring and rx ring
3354b04e36baSTushar Dave  */
e1000_dump(struct e1000_adapter * adapter)3355b04e36baSTushar Dave static void e1000_dump(struct e1000_adapter *adapter)
3356b04e36baSTushar Dave {
3357b04e36baSTushar Dave 	/* this code doesn't handle multiple rings */
3358b04e36baSTushar Dave 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3359b04e36baSTushar Dave 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360b04e36baSTushar Dave 	int i;
3361b04e36baSTushar Dave 
3362b04e36baSTushar Dave 	if (!netif_msg_hw(adapter))
3363b04e36baSTushar Dave 		return;
3364b04e36baSTushar Dave 
3365b04e36baSTushar Dave 	/* Print Registers */
3366b04e36baSTushar Dave 	e1000_regdump(adapter);
3367b04e36baSTushar Dave 
33686cfbd97bSJeff Kirsher 	/* transmit dump */
3369b04e36baSTushar Dave 	pr_info("TX Desc ring0 dump\n");
3370b04e36baSTushar Dave 
3371b04e36baSTushar Dave 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372b04e36baSTushar Dave 	 *
3373b04e36baSTushar Dave 	 * Legacy Transmit Descriptor
3374b04e36baSTushar Dave 	 *   +--------------------------------------------------------------+
3375b04e36baSTushar Dave 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3376b04e36baSTushar Dave 	 *   +--------------------------------------------------------------+
3377b04e36baSTushar Dave 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3378b04e36baSTushar Dave 	 *   +--------------------------------------------------------------+
3379b04e36baSTushar Dave 	 *   63       48 47        36 35    32 31     24 23    16 15        0
3380b04e36baSTushar Dave 	 *
3381b04e36baSTushar Dave 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382b04e36baSTushar Dave 	 *   63      48 47    40 39       32 31             16 15    8 7      0
3383b04e36baSTushar Dave 	 *   +----------------------------------------------------------------+
3384b04e36baSTushar Dave 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3385b04e36baSTushar Dave 	 *   +----------------------------------------------------------------+
3386b04e36baSTushar Dave 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3387b04e36baSTushar Dave 	 *   +----------------------------------------------------------------+
3388b04e36baSTushar Dave 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3389b04e36baSTushar Dave 	 *
3390b04e36baSTushar Dave 	 * Extended Data Descriptor (DTYP=0x1)
3391b04e36baSTushar Dave 	 *   +----------------------------------------------------------------+
3392b04e36baSTushar Dave 	 * 0 |                     Buffer Address [63:0]                      |
3393b04e36baSTushar Dave 	 *   +----------------------------------------------------------------+
3394b04e36baSTushar Dave 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3395b04e36baSTushar Dave 	 *   +----------------------------------------------------------------+
3396b04e36baSTushar Dave 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3397b04e36baSTushar Dave 	 */
3398e29b5d8fSTushar Dave 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399e29b5d8fSTushar Dave 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3400b04e36baSTushar Dave 
3401b04e36baSTushar Dave 	if (!netif_msg_tx_done(adapter))
3402b04e36baSTushar Dave 		goto rx_ring_summary;
3403b04e36baSTushar Dave 
3404b04e36baSTushar Dave 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405b04e36baSTushar Dave 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406580f321dSFlorian Westphal 		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3407dd7f5c9eSAndrei Emeltchenko 		struct my_u { __le64 a; __le64 b; };
3408b04e36baSTushar Dave 		struct my_u *u = (struct my_u *)tx_desc;
3409e29b5d8fSTushar Dave 		const char *type;
3410e29b5d8fSTushar Dave 
3411e29b5d8fSTushar Dave 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412e29b5d8fSTushar Dave 			type = "NTC/U";
3413e29b5d8fSTushar Dave 		else if (i == tx_ring->next_to_use)
3414e29b5d8fSTushar Dave 			type = "NTU";
3415e29b5d8fSTushar Dave 		else if (i == tx_ring->next_to_clean)
3416e29b5d8fSTushar Dave 			type = "NTC";
3417e29b5d8fSTushar Dave 		else
3418e29b5d8fSTushar Dave 			type = "";
3419e29b5d8fSTushar Dave 
3420e29b5d8fSTushar Dave 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3421b04e36baSTushar Dave 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422b04e36baSTushar Dave 			le64_to_cpu(u->a), le64_to_cpu(u->b),
3423b04e36baSTushar Dave 			(u64)buffer_info->dma, buffer_info->length,
3424e29b5d8fSTushar Dave 			buffer_info->next_to_watch,
3425e29b5d8fSTushar Dave 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3426b04e36baSTushar Dave 	}
3427b04e36baSTushar Dave 
3428b04e36baSTushar Dave rx_ring_summary:
34296cfbd97bSJeff Kirsher 	/* receive dump */
3430b04e36baSTushar Dave 	pr_info("\nRX Desc ring dump\n");
3431b04e36baSTushar Dave 
3432b04e36baSTushar Dave 	/* Legacy Receive Descriptor Format
3433b04e36baSTushar Dave 	 *
3434b04e36baSTushar Dave 	 * +-----------------------------------------------------+
3435b04e36baSTushar Dave 	 * |                Buffer Address [63:0]                |
3436b04e36baSTushar Dave 	 * +-----------------------------------------------------+
3437b04e36baSTushar Dave 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3438b04e36baSTushar Dave 	 * +-----------------------------------------------------+
3439b04e36baSTushar Dave 	 * 63       48 47    40 39      32 31         16 15      0
3440b04e36baSTushar Dave 	 */
3441e29b5d8fSTushar Dave 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3442b04e36baSTushar Dave 
3443b04e36baSTushar Dave 	if (!netif_msg_rx_status(adapter))
3444b04e36baSTushar Dave 		goto exit;
3445b04e36baSTushar Dave 
3446b04e36baSTushar Dave 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3447b04e36baSTushar Dave 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
344893f0afe9SFlorian Westphal 		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3449dd7f5c9eSAndrei Emeltchenko 		struct my_u { __le64 a; __le64 b; };
3450b04e36baSTushar Dave 		struct my_u *u = (struct my_u *)rx_desc;
3451e29b5d8fSTushar Dave 		const char *type;
3452e29b5d8fSTushar Dave 
3453b04e36baSTushar Dave 		if (i == rx_ring->next_to_use)
3454e29b5d8fSTushar Dave 			type = "NTU";
3455b04e36baSTushar Dave 		else if (i == rx_ring->next_to_clean)
3456e29b5d8fSTushar Dave 			type = "NTC";
3457b04e36baSTushar Dave 		else
3458e29b5d8fSTushar Dave 			type = "";
3459b04e36baSTushar Dave 
3460e29b5d8fSTushar Dave 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3461e29b5d8fSTushar Dave 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
346213809609SFlorian Westphal 			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3463b04e36baSTushar Dave 	} /* for */
3464b04e36baSTushar Dave 
3465b04e36baSTushar Dave 	/* dump the descriptor caches */
3466b04e36baSTushar Dave 	/* rx */
3467e29b5d8fSTushar Dave 	pr_info("Rx descriptor cache in 64bit format\n");
3468b04e36baSTushar Dave 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3469e29b5d8fSTushar Dave 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3470b04e36baSTushar Dave 			i,
3471b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i+4),
3472b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i),
3473b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i+12),
3474b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i+8));
3475b04e36baSTushar Dave 	}
3476b04e36baSTushar Dave 	/* tx */
3477e29b5d8fSTushar Dave 	pr_info("Tx descriptor cache in 64bit format\n");
3478b04e36baSTushar Dave 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3479e29b5d8fSTushar Dave 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3480b04e36baSTushar Dave 			i,
3481b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i+4),
3482b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i),
3483b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i+12),
3484b04e36baSTushar Dave 			readl(adapter->hw.hw_addr + i+8));
3485b04e36baSTushar Dave 	}
3486b04e36baSTushar Dave exit:
3487b04e36baSTushar Dave 	return;
3488b04e36baSTushar Dave }
3489b04e36baSTushar Dave 
3490dee1ad47SJeff Kirsher /**
3491dee1ad47SJeff Kirsher  * e1000_tx_timeout - Respond to a Tx Hang
3492dee1ad47SJeff Kirsher  * @netdev: network interface device structure
3493b50f7bcaSJesse Brandeburg  * @txqueue: number of the Tx queue that hung (unused)
3494dee1ad47SJeff Kirsher  **/
e1000_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)3495b50f7bcaSJesse Brandeburg static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3496dee1ad47SJeff Kirsher {
3497dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
3498dee1ad47SJeff Kirsher 
3499dee1ad47SJeff Kirsher 	/* Do the reset outside of interrupt context */
3500dee1ad47SJeff Kirsher 	adapter->tx_timeout_count++;
3501dee1ad47SJeff Kirsher 	schedule_work(&adapter->reset_task);
3502dee1ad47SJeff Kirsher }
3503dee1ad47SJeff Kirsher 
e1000_reset_task(struct work_struct * work)3504dee1ad47SJeff Kirsher static void e1000_reset_task(struct work_struct *work)
3505dee1ad47SJeff Kirsher {
3506dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter =
3507dee1ad47SJeff Kirsher 		container_of(work, struct e1000_adapter, reset_task);
3508dee1ad47SJeff Kirsher 
3509b04e36baSTushar Dave 	e_err(drv, "Reset adapter\n");
3510b2f963bfSVladimir Davydov 	e1000_reinit_locked(adapter);
3511dee1ad47SJeff Kirsher }
3512dee1ad47SJeff Kirsher 
3513dee1ad47SJeff Kirsher /**
3514dee1ad47SJeff Kirsher  * e1000_change_mtu - Change the Maximum Transfer Unit
3515dee1ad47SJeff Kirsher  * @netdev: network interface device structure
3516dee1ad47SJeff Kirsher  * @new_mtu: new value for maximum frame size
3517dee1ad47SJeff Kirsher  *
3518dee1ad47SJeff Kirsher  * Returns 0 on success, negative on failure
3519dee1ad47SJeff Kirsher  **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)3520dee1ad47SJeff Kirsher static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3521dee1ad47SJeff Kirsher {
3522dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
3523dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
352491c527a5SJarod Wilson 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3525dee1ad47SJeff Kirsher 
3526dee1ad47SJeff Kirsher 	/* Adapter-specific max frame size limits. */
3527dee1ad47SJeff Kirsher 	switch (hw->mac_type) {
3528dee1ad47SJeff Kirsher 	case e1000_undefined ... e1000_82542_rev2_1:
3529dee1ad47SJeff Kirsher 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3530dee1ad47SJeff Kirsher 			e_err(probe, "Jumbo Frames not supported.\n");
3531dee1ad47SJeff Kirsher 			return -EINVAL;
3532dee1ad47SJeff Kirsher 		}
3533dee1ad47SJeff Kirsher 		break;
3534dee1ad47SJeff Kirsher 	default:
3535dee1ad47SJeff Kirsher 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3536dee1ad47SJeff Kirsher 		break;
3537dee1ad47SJeff Kirsher 	}
3538dee1ad47SJeff Kirsher 
3539dee1ad47SJeff Kirsher 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3540dee1ad47SJeff Kirsher 		msleep(1);
3541dee1ad47SJeff Kirsher 	/* e1000_down has a dependency on max_frame_size */
3542dee1ad47SJeff Kirsher 	hw->max_frame_size = max_frame;
354308e83316SSabrina Dubroca 	if (netif_running(netdev)) {
354408e83316SSabrina Dubroca 		/* prevent buffers from being reallocated */
354508e83316SSabrina Dubroca 		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3546dee1ad47SJeff Kirsher 		e1000_down(adapter);
354708e83316SSabrina Dubroca 	}
3548dee1ad47SJeff Kirsher 
3549dee1ad47SJeff Kirsher 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3550dee1ad47SJeff Kirsher 	 * means we reserve 2 more, this pushes us to allocate from the next
3551dee1ad47SJeff Kirsher 	 * larger slab size.
3552dee1ad47SJeff Kirsher 	 * i.e. RXBUFFER_2048 --> size-4096 slab
3553dee1ad47SJeff Kirsher 	 * however with the new *_jumbo_rx* routines, jumbo receives will use
35546cfbd97bSJeff Kirsher 	 * fragmented skbs
35556cfbd97bSJeff Kirsher 	 */
3556dee1ad47SJeff Kirsher 
3557dee1ad47SJeff Kirsher 	if (max_frame <= E1000_RXBUFFER_2048)
3558dee1ad47SJeff Kirsher 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3559dee1ad47SJeff Kirsher 	else
3560dee1ad47SJeff Kirsher #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3561dee1ad47SJeff Kirsher 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3562dee1ad47SJeff Kirsher #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3563dee1ad47SJeff Kirsher 		adapter->rx_buffer_len = PAGE_SIZE;
3564dee1ad47SJeff Kirsher #endif
3565dee1ad47SJeff Kirsher 
3566dee1ad47SJeff Kirsher 	/* adjust allocation if LPE protects us, and we aren't using SBP */
3567dee1ad47SJeff Kirsher 	if (!hw->tbi_compatibility_on &&
3568dee1ad47SJeff Kirsher 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3569dee1ad47SJeff Kirsher 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3570dee1ad47SJeff Kirsher 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3571dee1ad47SJeff Kirsher 
357212299132SFlorian Fainelli 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
357312299132SFlorian Fainelli 		   netdev->mtu, new_mtu);
3574dee1ad47SJeff Kirsher 	netdev->mtu = new_mtu;
3575dee1ad47SJeff Kirsher 
3576dee1ad47SJeff Kirsher 	if (netif_running(netdev))
3577dee1ad47SJeff Kirsher 		e1000_up(adapter);
3578dee1ad47SJeff Kirsher 	else
3579dee1ad47SJeff Kirsher 		e1000_reset(adapter);
3580dee1ad47SJeff Kirsher 
3581dee1ad47SJeff Kirsher 	clear_bit(__E1000_RESETTING, &adapter->flags);
3582dee1ad47SJeff Kirsher 
3583dee1ad47SJeff Kirsher 	return 0;
3584dee1ad47SJeff Kirsher }
3585dee1ad47SJeff Kirsher 
3586dee1ad47SJeff Kirsher /**
3587dee1ad47SJeff Kirsher  * e1000_update_stats - Update the board statistics counters
3588dee1ad47SJeff Kirsher  * @adapter: board private structure
3589dee1ad47SJeff Kirsher  **/
e1000_update_stats(struct e1000_adapter * adapter)3590dee1ad47SJeff Kirsher void e1000_update_stats(struct e1000_adapter *adapter)
3591dee1ad47SJeff Kirsher {
3592dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
3593dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
3594dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
3595dee1ad47SJeff Kirsher 	unsigned long flags;
3596dee1ad47SJeff Kirsher 	u16 phy_tmp;
3597dee1ad47SJeff Kirsher 
3598dee1ad47SJeff Kirsher #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3599dee1ad47SJeff Kirsher 
36006cfbd97bSJeff Kirsher 	/* Prevent stats update while adapter is being reset, or if the pci
3601dee1ad47SJeff Kirsher 	 * connection is down.
3602dee1ad47SJeff Kirsher 	 */
3603dee1ad47SJeff Kirsher 	if (adapter->link_speed == 0)
3604dee1ad47SJeff Kirsher 		return;
3605dee1ad47SJeff Kirsher 	if (pci_channel_offline(pdev))
3606dee1ad47SJeff Kirsher 		return;
3607dee1ad47SJeff Kirsher 
3608dee1ad47SJeff Kirsher 	spin_lock_irqsave(&adapter->stats_lock, flags);
3609dee1ad47SJeff Kirsher 
3610dee1ad47SJeff Kirsher 	/* these counters are modified from e1000_tbi_adjust_stats,
3611dee1ad47SJeff Kirsher 	 * called from the interrupt context, so they must only
3612dee1ad47SJeff Kirsher 	 * be written while holding adapter->stats_lock
3613dee1ad47SJeff Kirsher 	 */
3614dee1ad47SJeff Kirsher 
3615dee1ad47SJeff Kirsher 	adapter->stats.crcerrs += er32(CRCERRS);
3616dee1ad47SJeff Kirsher 	adapter->stats.gprc += er32(GPRC);
3617dee1ad47SJeff Kirsher 	adapter->stats.gorcl += er32(GORCL);
3618dee1ad47SJeff Kirsher 	adapter->stats.gorch += er32(GORCH);
3619dee1ad47SJeff Kirsher 	adapter->stats.bprc += er32(BPRC);
3620dee1ad47SJeff Kirsher 	adapter->stats.mprc += er32(MPRC);
3621dee1ad47SJeff Kirsher 	adapter->stats.roc += er32(ROC);
3622dee1ad47SJeff Kirsher 
3623dee1ad47SJeff Kirsher 	adapter->stats.prc64 += er32(PRC64);
3624dee1ad47SJeff Kirsher 	adapter->stats.prc127 += er32(PRC127);
3625dee1ad47SJeff Kirsher 	adapter->stats.prc255 += er32(PRC255);
3626dee1ad47SJeff Kirsher 	adapter->stats.prc511 += er32(PRC511);
3627dee1ad47SJeff Kirsher 	adapter->stats.prc1023 += er32(PRC1023);
3628dee1ad47SJeff Kirsher 	adapter->stats.prc1522 += er32(PRC1522);
3629dee1ad47SJeff Kirsher 
3630dee1ad47SJeff Kirsher 	adapter->stats.symerrs += er32(SYMERRS);
3631dee1ad47SJeff Kirsher 	adapter->stats.mpc += er32(MPC);
3632dee1ad47SJeff Kirsher 	adapter->stats.scc += er32(SCC);
3633dee1ad47SJeff Kirsher 	adapter->stats.ecol += er32(ECOL);
3634dee1ad47SJeff Kirsher 	adapter->stats.mcc += er32(MCC);
3635dee1ad47SJeff Kirsher 	adapter->stats.latecol += er32(LATECOL);
3636dee1ad47SJeff Kirsher 	adapter->stats.dc += er32(DC);
3637dee1ad47SJeff Kirsher 	adapter->stats.sec += er32(SEC);
3638dee1ad47SJeff Kirsher 	adapter->stats.rlec += er32(RLEC);
3639dee1ad47SJeff Kirsher 	adapter->stats.xonrxc += er32(XONRXC);
3640dee1ad47SJeff Kirsher 	adapter->stats.xontxc += er32(XONTXC);
3641dee1ad47SJeff Kirsher 	adapter->stats.xoffrxc += er32(XOFFRXC);
3642dee1ad47SJeff Kirsher 	adapter->stats.xofftxc += er32(XOFFTXC);
3643dee1ad47SJeff Kirsher 	adapter->stats.fcruc += er32(FCRUC);
3644dee1ad47SJeff Kirsher 	adapter->stats.gptc += er32(GPTC);
3645dee1ad47SJeff Kirsher 	adapter->stats.gotcl += er32(GOTCL);
3646dee1ad47SJeff Kirsher 	adapter->stats.gotch += er32(GOTCH);
3647dee1ad47SJeff Kirsher 	adapter->stats.rnbc += er32(RNBC);
3648dee1ad47SJeff Kirsher 	adapter->stats.ruc += er32(RUC);
3649dee1ad47SJeff Kirsher 	adapter->stats.rfc += er32(RFC);
3650dee1ad47SJeff Kirsher 	adapter->stats.rjc += er32(RJC);
3651dee1ad47SJeff Kirsher 	adapter->stats.torl += er32(TORL);
3652dee1ad47SJeff Kirsher 	adapter->stats.torh += er32(TORH);
3653dee1ad47SJeff Kirsher 	adapter->stats.totl += er32(TOTL);
3654dee1ad47SJeff Kirsher 	adapter->stats.toth += er32(TOTH);
3655dee1ad47SJeff Kirsher 	adapter->stats.tpr += er32(TPR);
3656dee1ad47SJeff Kirsher 
3657dee1ad47SJeff Kirsher 	adapter->stats.ptc64 += er32(PTC64);
3658dee1ad47SJeff Kirsher 	adapter->stats.ptc127 += er32(PTC127);
3659dee1ad47SJeff Kirsher 	adapter->stats.ptc255 += er32(PTC255);
3660dee1ad47SJeff Kirsher 	adapter->stats.ptc511 += er32(PTC511);
3661dee1ad47SJeff Kirsher 	adapter->stats.ptc1023 += er32(PTC1023);
3662dee1ad47SJeff Kirsher 	adapter->stats.ptc1522 += er32(PTC1522);
3663dee1ad47SJeff Kirsher 
3664dee1ad47SJeff Kirsher 	adapter->stats.mptc += er32(MPTC);
3665dee1ad47SJeff Kirsher 	adapter->stats.bptc += er32(BPTC);
3666dee1ad47SJeff Kirsher 
3667dee1ad47SJeff Kirsher 	/* used for adaptive IFS */
3668dee1ad47SJeff Kirsher 
3669dee1ad47SJeff Kirsher 	hw->tx_packet_delta = er32(TPT);
3670dee1ad47SJeff Kirsher 	adapter->stats.tpt += hw->tx_packet_delta;
3671dee1ad47SJeff Kirsher 	hw->collision_delta = er32(COLC);
3672dee1ad47SJeff Kirsher 	adapter->stats.colc += hw->collision_delta;
3673dee1ad47SJeff Kirsher 
3674dee1ad47SJeff Kirsher 	if (hw->mac_type >= e1000_82543) {
3675dee1ad47SJeff Kirsher 		adapter->stats.algnerrc += er32(ALGNERRC);
3676dee1ad47SJeff Kirsher 		adapter->stats.rxerrc += er32(RXERRC);
3677dee1ad47SJeff Kirsher 		adapter->stats.tncrs += er32(TNCRS);
3678dee1ad47SJeff Kirsher 		adapter->stats.cexterr += er32(CEXTERR);
3679dee1ad47SJeff Kirsher 		adapter->stats.tsctc += er32(TSCTC);
3680dee1ad47SJeff Kirsher 		adapter->stats.tsctfc += er32(TSCTFC);
3681dee1ad47SJeff Kirsher 	}
3682dee1ad47SJeff Kirsher 
3683dee1ad47SJeff Kirsher 	/* Fill out the OS statistics structure */
3684dee1ad47SJeff Kirsher 	netdev->stats.multicast = adapter->stats.mprc;
3685dee1ad47SJeff Kirsher 	netdev->stats.collisions = adapter->stats.colc;
3686dee1ad47SJeff Kirsher 
3687dee1ad47SJeff Kirsher 	/* Rx Errors */
3688dee1ad47SJeff Kirsher 
3689dee1ad47SJeff Kirsher 	/* RLEC on some newer hardware can be incorrect so build
36906cfbd97bSJeff Kirsher 	 * our own version based on RUC and ROC
36916cfbd97bSJeff Kirsher 	 */
3692dee1ad47SJeff Kirsher 	netdev->stats.rx_errors = adapter->stats.rxerrc +
3693dee1ad47SJeff Kirsher 		adapter->stats.crcerrs + adapter->stats.algnerrc +
3694dee1ad47SJeff Kirsher 		adapter->stats.ruc + adapter->stats.roc +
3695dee1ad47SJeff Kirsher 		adapter->stats.cexterr;
3696dee1ad47SJeff Kirsher 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3697dee1ad47SJeff Kirsher 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3698dee1ad47SJeff Kirsher 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3699dee1ad47SJeff Kirsher 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3700dee1ad47SJeff Kirsher 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3701dee1ad47SJeff Kirsher 
3702dee1ad47SJeff Kirsher 	/* Tx Errors */
3703dee1ad47SJeff Kirsher 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3704dee1ad47SJeff Kirsher 	netdev->stats.tx_errors = adapter->stats.txerrc;
3705dee1ad47SJeff Kirsher 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3706dee1ad47SJeff Kirsher 	netdev->stats.tx_window_errors = adapter->stats.latecol;
3707dee1ad47SJeff Kirsher 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3708dee1ad47SJeff Kirsher 	if (hw->bad_tx_carr_stats_fd &&
3709dee1ad47SJeff Kirsher 	    adapter->link_duplex == FULL_DUPLEX) {
3710dee1ad47SJeff Kirsher 		netdev->stats.tx_carrier_errors = 0;
3711dee1ad47SJeff Kirsher 		adapter->stats.tncrs = 0;
3712dee1ad47SJeff Kirsher 	}
3713dee1ad47SJeff Kirsher 
3714dee1ad47SJeff Kirsher 	/* Tx Dropped needs to be maintained elsewhere */
3715dee1ad47SJeff Kirsher 
3716dee1ad47SJeff Kirsher 	/* Phy Stats */
3717dee1ad47SJeff Kirsher 	if (hw->media_type == e1000_media_type_copper) {
3718dee1ad47SJeff Kirsher 		if ((adapter->link_speed == SPEED_1000) &&
3719dee1ad47SJeff Kirsher 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3720dee1ad47SJeff Kirsher 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3721dee1ad47SJeff Kirsher 			adapter->phy_stats.idle_errors += phy_tmp;
3722dee1ad47SJeff Kirsher 		}
3723dee1ad47SJeff Kirsher 
3724dee1ad47SJeff Kirsher 		if ((hw->mac_type <= e1000_82546) &&
3725dee1ad47SJeff Kirsher 		   (hw->phy_type == e1000_phy_m88) &&
3726dee1ad47SJeff Kirsher 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3727dee1ad47SJeff Kirsher 			adapter->phy_stats.receive_errors += phy_tmp;
3728dee1ad47SJeff Kirsher 	}
3729dee1ad47SJeff Kirsher 
3730dee1ad47SJeff Kirsher 	/* Management Stats */
3731dee1ad47SJeff Kirsher 	if (hw->has_smbus) {
3732dee1ad47SJeff Kirsher 		adapter->stats.mgptc += er32(MGTPTC);
3733dee1ad47SJeff Kirsher 		adapter->stats.mgprc += er32(MGTPRC);
3734dee1ad47SJeff Kirsher 		adapter->stats.mgpdc += er32(MGTPDC);
3735dee1ad47SJeff Kirsher 	}
3736dee1ad47SJeff Kirsher 
3737dee1ad47SJeff Kirsher 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3738dee1ad47SJeff Kirsher }
3739dee1ad47SJeff Kirsher 
3740dee1ad47SJeff Kirsher /**
3741dee1ad47SJeff Kirsher  * e1000_intr - Interrupt Handler
3742dee1ad47SJeff Kirsher  * @irq: interrupt number
3743dee1ad47SJeff Kirsher  * @data: pointer to a network interface device structure
3744dee1ad47SJeff Kirsher  **/
e1000_intr(int irq,void * data)3745dee1ad47SJeff Kirsher static irqreturn_t e1000_intr(int irq, void *data)
3746dee1ad47SJeff Kirsher {
3747dee1ad47SJeff Kirsher 	struct net_device *netdev = data;
3748dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
3749dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
3750dee1ad47SJeff Kirsher 	u32 icr = er32(ICR);
3751dee1ad47SJeff Kirsher 
3752dee1ad47SJeff Kirsher 	if (unlikely((!icr)))
3753dee1ad47SJeff Kirsher 		return IRQ_NONE;  /* Not our interrupt */
3754dee1ad47SJeff Kirsher 
37556cfbd97bSJeff Kirsher 	/* we might have caused the interrupt, but the above
3756dee1ad47SJeff Kirsher 	 * read cleared it, and just in case the driver is
3757dee1ad47SJeff Kirsher 	 * down there is nothing to do so return handled
3758dee1ad47SJeff Kirsher 	 */
3759dee1ad47SJeff Kirsher 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3760dee1ad47SJeff Kirsher 		return IRQ_HANDLED;
3761dee1ad47SJeff Kirsher 
3762dee1ad47SJeff Kirsher 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3763dee1ad47SJeff Kirsher 		hw->get_link_status = 1;
3764dee1ad47SJeff Kirsher 		/* guard against interrupt when we're going down */
3765dee1ad47SJeff Kirsher 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3766a4010afeSJesse Brandeburg 			schedule_delayed_work(&adapter->watchdog_task, 1);
3767dee1ad47SJeff Kirsher 	}
3768dee1ad47SJeff Kirsher 
3769dee1ad47SJeff Kirsher 	/* disable interrupts, without the synchronize_irq bit */
3770dee1ad47SJeff Kirsher 	ew32(IMC, ~0);
3771dee1ad47SJeff Kirsher 	E1000_WRITE_FLUSH();
3772dee1ad47SJeff Kirsher 
3773dee1ad47SJeff Kirsher 	if (likely(napi_schedule_prep(&adapter->napi))) {
3774dee1ad47SJeff Kirsher 		adapter->total_tx_bytes = 0;
3775dee1ad47SJeff Kirsher 		adapter->total_tx_packets = 0;
3776dee1ad47SJeff Kirsher 		adapter->total_rx_bytes = 0;
3777dee1ad47SJeff Kirsher 		adapter->total_rx_packets = 0;
3778dee1ad47SJeff Kirsher 		__napi_schedule(&adapter->napi);
3779dee1ad47SJeff Kirsher 	} else {
3780dee1ad47SJeff Kirsher 		/* this really should not happen! if it does it is basically a
37816cfbd97bSJeff Kirsher 		 * bug, but not a hard error, so enable ints and continue
37826cfbd97bSJeff Kirsher 		 */
3783dee1ad47SJeff Kirsher 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3784dee1ad47SJeff Kirsher 			e1000_irq_enable(adapter);
3785dee1ad47SJeff Kirsher 	}
3786dee1ad47SJeff Kirsher 
3787dee1ad47SJeff Kirsher 	return IRQ_HANDLED;
3788dee1ad47SJeff Kirsher }
3789dee1ad47SJeff Kirsher 
3790dee1ad47SJeff Kirsher /**
3791dee1ad47SJeff Kirsher  * e1000_clean - NAPI Rx polling callback
3792b50f7bcaSJesse Brandeburg  * @napi: napi struct containing references to driver info
3793b50f7bcaSJesse Brandeburg  * @budget: budget given to driver for receive packets
3794dee1ad47SJeff Kirsher  **/
e1000_clean(struct napi_struct * napi,int budget)3795dee1ad47SJeff Kirsher static int e1000_clean(struct napi_struct *napi, int budget)
3796dee1ad47SJeff Kirsher {
37976cfbd97bSJeff Kirsher 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
37986cfbd97bSJeff Kirsher 						     napi);
3799dee1ad47SJeff Kirsher 	int tx_clean_complete = 0, work_done = 0;
3800dee1ad47SJeff Kirsher 
3801dee1ad47SJeff Kirsher 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3802dee1ad47SJeff Kirsher 
3803dee1ad47SJeff Kirsher 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3804dee1ad47SJeff Kirsher 
38050bcd952fSJesse Brandeburg 	if (!tx_clean_complete || work_done == budget)
38060bcd952fSJesse Brandeburg 		return budget;
3807dee1ad47SJeff Kirsher 
38080bcd952fSJesse Brandeburg 	/* Exit the polling mode, but don't re-enable interrupts if stack might
38090bcd952fSJesse Brandeburg 	 * poll us due to busy-polling
38100bcd952fSJesse Brandeburg 	 */
38110bcd952fSJesse Brandeburg 	if (likely(napi_complete_done(napi, work_done))) {
3812dee1ad47SJeff Kirsher 		if (likely(adapter->itr_setting & 3))
3813dee1ad47SJeff Kirsher 			e1000_set_itr(adapter);
3814dee1ad47SJeff Kirsher 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3815dee1ad47SJeff Kirsher 			e1000_irq_enable(adapter);
3816dee1ad47SJeff Kirsher 	}
3817dee1ad47SJeff Kirsher 
3818dee1ad47SJeff Kirsher 	return work_done;
3819dee1ad47SJeff Kirsher }
3820dee1ad47SJeff Kirsher 
3821dee1ad47SJeff Kirsher /**
3822dee1ad47SJeff Kirsher  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3823dee1ad47SJeff Kirsher  * @adapter: board private structure
3824b50f7bcaSJesse Brandeburg  * @tx_ring: ring to clean
3825dee1ad47SJeff Kirsher  **/
e1000_clean_tx_irq(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)3826dee1ad47SJeff Kirsher static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3827dee1ad47SJeff Kirsher 			       struct e1000_tx_ring *tx_ring)
3828dee1ad47SJeff Kirsher {
3829dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
3830dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
3831dee1ad47SJeff Kirsher 	struct e1000_tx_desc *tx_desc, *eop_desc;
3832580f321dSFlorian Westphal 	struct e1000_tx_buffer *buffer_info;
3833dee1ad47SJeff Kirsher 	unsigned int i, eop;
3834dee1ad47SJeff Kirsher 	unsigned int count = 0;
3835dee1ad47SJeff Kirsher 	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
38362f66fd36SOtto Estuardo Solares Cabrera 	unsigned int bytes_compl = 0, pkts_compl = 0;
3837dee1ad47SJeff Kirsher 
3838dee1ad47SJeff Kirsher 	i = tx_ring->next_to_clean;
3839dee1ad47SJeff Kirsher 	eop = tx_ring->buffer_info[i].next_to_watch;
3840dee1ad47SJeff Kirsher 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3841dee1ad47SJeff Kirsher 
3842dee1ad47SJeff Kirsher 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3843dee1ad47SJeff Kirsher 	       (count < tx_ring->count)) {
3844dee1ad47SJeff Kirsher 		bool cleaned = false;
3845837a1dbaSAlexander Duyck 		dma_rmb();	/* read buffer_info after eop_desc */
3846dee1ad47SJeff Kirsher 		for ( ; !cleaned; count++) {
3847dee1ad47SJeff Kirsher 			tx_desc = E1000_TX_DESC(*tx_ring, i);
3848dee1ad47SJeff Kirsher 			buffer_info = &tx_ring->buffer_info[i];
3849dee1ad47SJeff Kirsher 			cleaned = (i == eop);
3850dee1ad47SJeff Kirsher 
3851dee1ad47SJeff Kirsher 			if (cleaned) {
385231c15a2fSDean Nelson 				total_tx_packets += buffer_info->segs;
385331c15a2fSDean Nelson 				total_tx_bytes += buffer_info->bytecount;
38542f66fd36SOtto Estuardo Solares Cabrera 				if (buffer_info->skb) {
38552f66fd36SOtto Estuardo Solares Cabrera 					bytes_compl += buffer_info->skb->len;
38562f66fd36SOtto Estuardo Solares Cabrera 					pkts_compl++;
38572f66fd36SOtto Estuardo Solares Cabrera 				}
38582f66fd36SOtto Estuardo Solares Cabrera 
3859dee1ad47SJeff Kirsher 			}
3860dcb95f06SAlexander Lobakin 			e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3861dcb95f06SAlexander Lobakin 							 64);
3862dee1ad47SJeff Kirsher 			tx_desc->upper.data = 0;
3863dee1ad47SJeff Kirsher 
3864a48954c8SJanusz Wolak 			if (unlikely(++i == tx_ring->count))
3865a48954c8SJanusz Wolak 				i = 0;
3866dee1ad47SJeff Kirsher 		}
3867dee1ad47SJeff Kirsher 
3868dee1ad47SJeff Kirsher 		eop = tx_ring->buffer_info[i].next_to_watch;
3869dee1ad47SJeff Kirsher 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870dee1ad47SJeff Kirsher 	}
3871dee1ad47SJeff Kirsher 
38729eab46b7SDmitriy Vyukov 	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
38739eab46b7SDmitriy Vyukov 	 * which will reuse the cleaned buffers.
38749eab46b7SDmitriy Vyukov 	 */
38759eab46b7SDmitriy Vyukov 	smp_store_release(&tx_ring->next_to_clean, i);
3876dee1ad47SJeff Kirsher 
38772f66fd36SOtto Estuardo Solares Cabrera 	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
38782f66fd36SOtto Estuardo Solares Cabrera 
3879dee1ad47SJeff Kirsher #define TX_WAKE_THRESHOLD 32
3880dee1ad47SJeff Kirsher 	if (unlikely(count && netif_carrier_ok(netdev) &&
3881dee1ad47SJeff Kirsher 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3882dee1ad47SJeff Kirsher 		/* Make sure that anybody stopping the queue after this
3883dee1ad47SJeff Kirsher 		 * sees the new next_to_clean.
3884dee1ad47SJeff Kirsher 		 */
3885dee1ad47SJeff Kirsher 		smp_mb();
3886dee1ad47SJeff Kirsher 
3887dee1ad47SJeff Kirsher 		if (netif_queue_stopped(netdev) &&
3888dee1ad47SJeff Kirsher 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3889dee1ad47SJeff Kirsher 			netif_wake_queue(netdev);
3890dee1ad47SJeff Kirsher 			++adapter->restart_queue;
3891dee1ad47SJeff Kirsher 		}
3892dee1ad47SJeff Kirsher 	}
3893dee1ad47SJeff Kirsher 
3894dee1ad47SJeff Kirsher 	if (adapter->detect_tx_hung) {
3895dee1ad47SJeff Kirsher 		/* Detect a transmit hang in hardware, this serializes the
38966cfbd97bSJeff Kirsher 		 * check with the clearing of time_stamp and movement of i
38976cfbd97bSJeff Kirsher 		 */
3898dee1ad47SJeff Kirsher 		adapter->detect_tx_hung = false;
3899dee1ad47SJeff Kirsher 		if (tx_ring->buffer_info[eop].time_stamp &&
3900dee1ad47SJeff Kirsher 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3901dee1ad47SJeff Kirsher 			       (adapter->tx_timeout_factor * HZ)) &&
3902dee1ad47SJeff Kirsher 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3903dee1ad47SJeff Kirsher 
3904dee1ad47SJeff Kirsher 			/* detected Tx unit hang */
3905dee1ad47SJeff Kirsher 			e_err(drv, "Detected Tx Unit Hang\n"
3906dee1ad47SJeff Kirsher 			      "  Tx Queue             <%lu>\n"
3907dee1ad47SJeff Kirsher 			      "  TDH                  <%x>\n"
3908dee1ad47SJeff Kirsher 			      "  TDT                  <%x>\n"
3909dee1ad47SJeff Kirsher 			      "  next_to_use          <%x>\n"
3910dee1ad47SJeff Kirsher 			      "  next_to_clean        <%x>\n"
3911dee1ad47SJeff Kirsher 			      "buffer_info[next_to_clean]\n"
3912dee1ad47SJeff Kirsher 			      "  time_stamp           <%lx>\n"
3913dee1ad47SJeff Kirsher 			      "  next_to_watch        <%x>\n"
3914dee1ad47SJeff Kirsher 			      "  jiffies              <%lx>\n"
3915dee1ad47SJeff Kirsher 			      "  next_to_watch.status <%x>\n",
391649a45a06SHong Zhiguo 				(unsigned long)(tx_ring - adapter->tx_ring),
3917dee1ad47SJeff Kirsher 				readl(hw->hw_addr + tx_ring->tdh),
3918dee1ad47SJeff Kirsher 				readl(hw->hw_addr + tx_ring->tdt),
3919dee1ad47SJeff Kirsher 				tx_ring->next_to_use,
3920dee1ad47SJeff Kirsher 				tx_ring->next_to_clean,
3921dee1ad47SJeff Kirsher 				tx_ring->buffer_info[eop].time_stamp,
3922dee1ad47SJeff Kirsher 				eop,
3923dee1ad47SJeff Kirsher 				jiffies,
3924dee1ad47SJeff Kirsher 				eop_desc->upper.fields.status);
3925b04e36baSTushar Dave 			e1000_dump(adapter);
3926dee1ad47SJeff Kirsher 			netif_stop_queue(netdev);
3927dee1ad47SJeff Kirsher 		}
3928dee1ad47SJeff Kirsher 	}
3929dee1ad47SJeff Kirsher 	adapter->total_tx_bytes += total_tx_bytes;
3930dee1ad47SJeff Kirsher 	adapter->total_tx_packets += total_tx_packets;
3931dee1ad47SJeff Kirsher 	netdev->stats.tx_bytes += total_tx_bytes;
3932dee1ad47SJeff Kirsher 	netdev->stats.tx_packets += total_tx_packets;
3933dee1ad47SJeff Kirsher 	return count < tx_ring->count;
3934dee1ad47SJeff Kirsher }
3935dee1ad47SJeff Kirsher 
3936dee1ad47SJeff Kirsher /**
3937dee1ad47SJeff Kirsher  * e1000_rx_checksum - Receive Checksum Offload for 82543
3938dee1ad47SJeff Kirsher  * @adapter:     board private structure
3939dee1ad47SJeff Kirsher  * @status_err:  receive descriptor status and error fields
3940dee1ad47SJeff Kirsher  * @csum:        receive descriptor csum field
3941b50f7bcaSJesse Brandeburg  * @skb:         socket buffer with received data
3942dee1ad47SJeff Kirsher  **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,u32 csum,struct sk_buff * skb)3943dee1ad47SJeff Kirsher static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3944dee1ad47SJeff Kirsher 			      u32 csum, struct sk_buff *skb)
3945dee1ad47SJeff Kirsher {
3946dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
3947dee1ad47SJeff Kirsher 	u16 status = (u16)status_err;
3948dee1ad47SJeff Kirsher 	u8 errors = (u8)(status_err >> 24);
3949dee1ad47SJeff Kirsher 
3950dee1ad47SJeff Kirsher 	skb_checksum_none_assert(skb);
3951dee1ad47SJeff Kirsher 
3952dee1ad47SJeff Kirsher 	/* 82543 or newer only */
3953a48954c8SJanusz Wolak 	if (unlikely(hw->mac_type < e1000_82543))
3954a48954c8SJanusz Wolak 		return;
3955dee1ad47SJeff Kirsher 	/* Ignore Checksum bit is set */
3956a48954c8SJanusz Wolak 	if (unlikely(status & E1000_RXD_STAT_IXSM))
3957a48954c8SJanusz Wolak 		return;
3958dee1ad47SJeff Kirsher 	/* TCP/UDP checksum error bit is set */
3959dee1ad47SJeff Kirsher 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3960dee1ad47SJeff Kirsher 		/* let the stack verify checksum errors */
3961dee1ad47SJeff Kirsher 		adapter->hw_csum_err++;
3962dee1ad47SJeff Kirsher 		return;
3963dee1ad47SJeff Kirsher 	}
3964dee1ad47SJeff Kirsher 	/* TCP/UDP Checksum has not been calculated */
3965dee1ad47SJeff Kirsher 	if (!(status & E1000_RXD_STAT_TCPCS))
3966dee1ad47SJeff Kirsher 		return;
3967dee1ad47SJeff Kirsher 
3968dee1ad47SJeff Kirsher 	/* It must be a TCP or UDP packet with a valid checksum */
3969dee1ad47SJeff Kirsher 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3970dee1ad47SJeff Kirsher 		/* TCP checksum is good */
3971dee1ad47SJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3972dee1ad47SJeff Kirsher 	}
3973dee1ad47SJeff Kirsher 	adapter->hw_csum_good++;
3974dee1ad47SJeff Kirsher }
3975dee1ad47SJeff Kirsher 
3976dee1ad47SJeff Kirsher /**
397713809609SFlorian Westphal  * e1000_consume_page - helper function for jumbo Rx path
3978b50f7bcaSJesse Brandeburg  * @bi: software descriptor shadow data
3979b50f7bcaSJesse Brandeburg  * @skb: skb being modified
3980b50f7bcaSJesse Brandeburg  * @length: length of data being added
3981dee1ad47SJeff Kirsher  **/
e1000_consume_page(struct e1000_rx_buffer * bi,struct sk_buff * skb,u16 length)398293f0afe9SFlorian Westphal static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3983dee1ad47SJeff Kirsher 			       u16 length)
3984dee1ad47SJeff Kirsher {
398513809609SFlorian Westphal 	bi->rxbuf.page = NULL;
3986dee1ad47SJeff Kirsher 	skb->len += length;
3987dee1ad47SJeff Kirsher 	skb->data_len += length;
3988ed64b3ccSEric Dumazet 	skb->truesize += PAGE_SIZE;
3989dee1ad47SJeff Kirsher }
3990dee1ad47SJeff Kirsher 
3991dee1ad47SJeff Kirsher /**
3992dee1ad47SJeff Kirsher  * e1000_receive_skb - helper function to handle rx indications
3993dee1ad47SJeff Kirsher  * @adapter: board private structure
3994dee1ad47SJeff Kirsher  * @status: descriptor status field as written by hardware
3995dee1ad47SJeff Kirsher  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3996dee1ad47SJeff Kirsher  * @skb: pointer to sk_buff to be indicated to stack
3997dee1ad47SJeff Kirsher  */
e1000_receive_skb(struct e1000_adapter * adapter,u8 status,__le16 vlan,struct sk_buff * skb)3998dee1ad47SJeff Kirsher static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3999dee1ad47SJeff Kirsher 			      __le16 vlan, struct sk_buff *skb)
4000dee1ad47SJeff Kirsher {
4001dee1ad47SJeff Kirsher 	skb->protocol = eth_type_trans(skb, adapter->netdev);
4002dee1ad47SJeff Kirsher 
4003dee1ad47SJeff Kirsher 	if (status & E1000_RXD_STAT_VP) {
4004dee1ad47SJeff Kirsher 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4005dee1ad47SJeff Kirsher 
400686a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4007dee1ad47SJeff Kirsher 	}
4008dee1ad47SJeff Kirsher 	napi_gro_receive(&adapter->napi, skb);
4009dee1ad47SJeff Kirsher }
4010dee1ad47SJeff Kirsher 
4011dee1ad47SJeff Kirsher /**
40124f0aeb1eSFlorian Westphal  * e1000_tbi_adjust_stats
40134f0aeb1eSFlorian Westphal  * @hw: Struct containing variables accessed by shared code
4014b50f7bcaSJesse Brandeburg  * @stats: point to stats struct
40154f0aeb1eSFlorian Westphal  * @frame_len: The length of the frame in question
40164f0aeb1eSFlorian Westphal  * @mac_addr: The Ethernet destination address of the frame in question
40174f0aeb1eSFlorian Westphal  *
40184f0aeb1eSFlorian Westphal  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
40194f0aeb1eSFlorian Westphal  */
e1000_tbi_adjust_stats(struct e1000_hw * hw,struct e1000_hw_stats * stats,u32 frame_len,const u8 * mac_addr)40204f0aeb1eSFlorian Westphal static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
40214f0aeb1eSFlorian Westphal 				   struct e1000_hw_stats *stats,
40224f0aeb1eSFlorian Westphal 				   u32 frame_len, const u8 *mac_addr)
40234f0aeb1eSFlorian Westphal {
40244f0aeb1eSFlorian Westphal 	u64 carry_bit;
40254f0aeb1eSFlorian Westphal 
40264f0aeb1eSFlorian Westphal 	/* First adjust the frame length. */
40274f0aeb1eSFlorian Westphal 	frame_len--;
40284f0aeb1eSFlorian Westphal 	/* We need to adjust the statistics counters, since the hardware
40294f0aeb1eSFlorian Westphal 	 * counters overcount this packet as a CRC error and undercount
40304f0aeb1eSFlorian Westphal 	 * the packet as a good packet
40314f0aeb1eSFlorian Westphal 	 */
40324f0aeb1eSFlorian Westphal 	/* This packet should not be counted as a CRC error. */
40334f0aeb1eSFlorian Westphal 	stats->crcerrs--;
40344f0aeb1eSFlorian Westphal 	/* This packet does count as a Good Packet Received. */
40354f0aeb1eSFlorian Westphal 	stats->gprc++;
40364f0aeb1eSFlorian Westphal 
40374f0aeb1eSFlorian Westphal 	/* Adjust the Good Octets received counters */
40384f0aeb1eSFlorian Westphal 	carry_bit = 0x80000000 & stats->gorcl;
40394f0aeb1eSFlorian Westphal 	stats->gorcl += frame_len;
40404f0aeb1eSFlorian Westphal 	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
40414f0aeb1eSFlorian Westphal 	 * Received Count) was one before the addition,
40424f0aeb1eSFlorian Westphal 	 * AND it is zero after, then we lost the carry out,
40434f0aeb1eSFlorian Westphal 	 * need to add one to Gorch (Good Octets Received Count High).
40444f0aeb1eSFlorian Westphal 	 * This could be simplified if all environments supported
40454f0aeb1eSFlorian Westphal 	 * 64-bit integers.
40464f0aeb1eSFlorian Westphal 	 */
40474f0aeb1eSFlorian Westphal 	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
40484f0aeb1eSFlorian Westphal 		stats->gorch++;
40494f0aeb1eSFlorian Westphal 	/* Is this a broadcast or multicast?  Check broadcast first,
40504f0aeb1eSFlorian Westphal 	 * since the test for a multicast frame will test positive on
40514f0aeb1eSFlorian Westphal 	 * a broadcast frame.
40524f0aeb1eSFlorian Westphal 	 */
40534f0aeb1eSFlorian Westphal 	if (is_broadcast_ether_addr(mac_addr))
40544f0aeb1eSFlorian Westphal 		stats->bprc++;
40554f0aeb1eSFlorian Westphal 	else if (is_multicast_ether_addr(mac_addr))
40564f0aeb1eSFlorian Westphal 		stats->mprc++;
40574f0aeb1eSFlorian Westphal 
40584f0aeb1eSFlorian Westphal 	if (frame_len == hw->max_frame_size) {
40594f0aeb1eSFlorian Westphal 		/* In this case, the hardware has overcounted the number of
40604f0aeb1eSFlorian Westphal 		 * oversize frames.
40614f0aeb1eSFlorian Westphal 		 */
40624f0aeb1eSFlorian Westphal 		if (stats->roc > 0)
40634f0aeb1eSFlorian Westphal 			stats->roc--;
40644f0aeb1eSFlorian Westphal 	}
40654f0aeb1eSFlorian Westphal 
40664f0aeb1eSFlorian Westphal 	/* Adjust the bin counters when the extra byte put the frame in the
40674f0aeb1eSFlorian Westphal 	 * wrong bin. Remember that the frame_len was adjusted above.
40684f0aeb1eSFlorian Westphal 	 */
40694f0aeb1eSFlorian Westphal 	if (frame_len == 64) {
40704f0aeb1eSFlorian Westphal 		stats->prc64++;
40714f0aeb1eSFlorian Westphal 		stats->prc127--;
40724f0aeb1eSFlorian Westphal 	} else if (frame_len == 127) {
40734f0aeb1eSFlorian Westphal 		stats->prc127++;
40744f0aeb1eSFlorian Westphal 		stats->prc255--;
40754f0aeb1eSFlorian Westphal 	} else if (frame_len == 255) {
40764f0aeb1eSFlorian Westphal 		stats->prc255++;
40774f0aeb1eSFlorian Westphal 		stats->prc511--;
40784f0aeb1eSFlorian Westphal 	} else if (frame_len == 511) {
40794f0aeb1eSFlorian Westphal 		stats->prc511++;
40804f0aeb1eSFlorian Westphal 		stats->prc1023--;
40814f0aeb1eSFlorian Westphal 	} else if (frame_len == 1023) {
40824f0aeb1eSFlorian Westphal 		stats->prc1023++;
40834f0aeb1eSFlorian Westphal 		stats->prc1522--;
40844f0aeb1eSFlorian Westphal 	} else if (frame_len == 1522) {
40854f0aeb1eSFlorian Westphal 		stats->prc1522++;
40864f0aeb1eSFlorian Westphal 	}
40874f0aeb1eSFlorian Westphal }
40884f0aeb1eSFlorian Westphal 
e1000_tbi_should_accept(struct e1000_adapter * adapter,u8 status,u8 errors,u32 length,const u8 * data)40892037110cSFlorian Westphal static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
40902037110cSFlorian Westphal 				    u8 status, u8 errors,
40912037110cSFlorian Westphal 				    u32 length, const u8 *data)
40922037110cSFlorian Westphal {
40932037110cSFlorian Westphal 	struct e1000_hw *hw = &adapter->hw;
40942037110cSFlorian Westphal 	u8 last_byte = *(data + length - 1);
40952037110cSFlorian Westphal 
40962037110cSFlorian Westphal 	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
40972037110cSFlorian Westphal 		unsigned long irq_flags;
40982037110cSFlorian Westphal 
40992037110cSFlorian Westphal 		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
41002037110cSFlorian Westphal 		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
41012037110cSFlorian Westphal 		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
41022037110cSFlorian Westphal 
41032037110cSFlorian Westphal 		return true;
41042037110cSFlorian Westphal 	}
41052037110cSFlorian Westphal 
41062037110cSFlorian Westphal 	return false;
41072037110cSFlorian Westphal }
41082037110cSFlorian Westphal 
e1000_alloc_rx_skb(struct e1000_adapter * adapter,unsigned int bufsz)41092b294b18SFlorian Westphal static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
41102b294b18SFlorian Westphal 					  unsigned int bufsz)
41112b294b18SFlorian Westphal {
411267fd893eSAlexander Duyck 	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
41132b294b18SFlorian Westphal 
41142b294b18SFlorian Westphal 	if (unlikely(!skb))
41152b294b18SFlorian Westphal 		adapter->alloc_rx_buff_failed++;
41162b294b18SFlorian Westphal 	return skb;
41172b294b18SFlorian Westphal }
41182b294b18SFlorian Westphal 
41194f0aeb1eSFlorian Westphal /**
4120dee1ad47SJeff Kirsher  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4121dee1ad47SJeff Kirsher  * @adapter: board private structure
4122dee1ad47SJeff Kirsher  * @rx_ring: ring to clean
4123dee1ad47SJeff Kirsher  * @work_done: amount of napi work completed this call
4124dee1ad47SJeff Kirsher  * @work_to_do: max amount of work allowed for this call to do
4125dee1ad47SJeff Kirsher  *
4126dee1ad47SJeff Kirsher  * the return value indicates whether actual cleaning was done, there
4127dee1ad47SJeff Kirsher  * is no guarantee that everything was cleaned
4128dee1ad47SJeff Kirsher  */
e1000_clean_jumbo_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4129dee1ad47SJeff Kirsher static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4130dee1ad47SJeff Kirsher 				     struct e1000_rx_ring *rx_ring,
4131dee1ad47SJeff Kirsher 				     int *work_done, int work_to_do)
4132dee1ad47SJeff Kirsher {
4133dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
4134dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
4135dee1ad47SJeff Kirsher 	struct e1000_rx_desc *rx_desc, *next_rxd;
413693f0afe9SFlorian Westphal 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4137dee1ad47SJeff Kirsher 	u32 length;
4138dee1ad47SJeff Kirsher 	unsigned int i;
4139dee1ad47SJeff Kirsher 	int cleaned_count = 0;
4140dee1ad47SJeff Kirsher 	bool cleaned = false;
4141dee1ad47SJeff Kirsher 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4142dee1ad47SJeff Kirsher 
4143dee1ad47SJeff Kirsher 	i = rx_ring->next_to_clean;
4144dee1ad47SJeff Kirsher 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4145dee1ad47SJeff Kirsher 	buffer_info = &rx_ring->buffer_info[i];
4146dee1ad47SJeff Kirsher 
4147dee1ad47SJeff Kirsher 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4148dee1ad47SJeff Kirsher 		struct sk_buff *skb;
4149dee1ad47SJeff Kirsher 		u8 status;
4150dee1ad47SJeff Kirsher 
4151dee1ad47SJeff Kirsher 		if (*work_done >= work_to_do)
4152dee1ad47SJeff Kirsher 			break;
4153dee1ad47SJeff Kirsher 		(*work_done)++;
4154837a1dbaSAlexander Duyck 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4155dee1ad47SJeff Kirsher 
4156dee1ad47SJeff Kirsher 		status = rx_desc->status;
4157dee1ad47SJeff Kirsher 
4158a48954c8SJanusz Wolak 		if (++i == rx_ring->count)
4159a48954c8SJanusz Wolak 			i = 0;
4160a48954c8SJanusz Wolak 
4161dee1ad47SJeff Kirsher 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4162dee1ad47SJeff Kirsher 		prefetch(next_rxd);
4163dee1ad47SJeff Kirsher 
4164dee1ad47SJeff Kirsher 		next_buffer = &rx_ring->buffer_info[i];
4165dee1ad47SJeff Kirsher 
4166dee1ad47SJeff Kirsher 		cleaned = true;
4167dee1ad47SJeff Kirsher 		cleaned_count++;
4168dee1ad47SJeff Kirsher 		dma_unmap_page(&pdev->dev, buffer_info->dma,
416993f0afe9SFlorian Westphal 			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4170dee1ad47SJeff Kirsher 		buffer_info->dma = 0;
4171dee1ad47SJeff Kirsher 
4172dee1ad47SJeff Kirsher 		length = le16_to_cpu(rx_desc->length);
4173dee1ad47SJeff Kirsher 
4174dee1ad47SJeff Kirsher 		/* errors is only valid for DD + EOP descriptors */
4175dee1ad47SJeff Kirsher 		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4176dee1ad47SJeff Kirsher 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
417713809609SFlorian Westphal 			u8 *mapped = page_address(buffer_info->rxbuf.page);
4178a3060858SSebastian Andrzej Siewior 
41792037110cSFlorian Westphal 			if (e1000_tbi_should_accept(adapter, status,
41802037110cSFlorian Westphal 						    rx_desc->errors,
41812037110cSFlorian Westphal 						    length, mapped)) {
4182dee1ad47SJeff Kirsher 				length--;
41832037110cSFlorian Westphal 			} else if (netdev->features & NETIF_F_RXALL) {
4184e825b731SBen Greear 				goto process_skb;
41852037110cSFlorian Westphal 			} else {
4186dee1ad47SJeff Kirsher 				/* an error means any chain goes out the window
41876cfbd97bSJeff Kirsher 				 * too
41886cfbd97bSJeff Kirsher 				 */
4189dee1ad47SJeff Kirsher 				dev_kfree_skb(rx_ring->rx_skb_top);
4190dee1ad47SJeff Kirsher 				rx_ring->rx_skb_top = NULL;
4191dee1ad47SJeff Kirsher 				goto next_desc;
4192dee1ad47SJeff Kirsher 			}
4193dee1ad47SJeff Kirsher 		}
4194dee1ad47SJeff Kirsher 
4195dee1ad47SJeff Kirsher #define rxtop rx_ring->rx_skb_top
4196e825b731SBen Greear process_skb:
4197dee1ad47SJeff Kirsher 		if (!(status & E1000_RXD_STAT_EOP)) {
4198dee1ad47SJeff Kirsher 			/* this descriptor is only the beginning (or middle) */
4199dee1ad47SJeff Kirsher 			if (!rxtop) {
4200dee1ad47SJeff Kirsher 				/* this is the beginning of a chain */
4201de591c78SFlorian Westphal 				rxtop = napi_get_frags(&adapter->napi);
420213809609SFlorian Westphal 				if (!rxtop)
420313809609SFlorian Westphal 					break;
420413809609SFlorian Westphal 
420513809609SFlorian Westphal 				skb_fill_page_desc(rxtop, 0,
420613809609SFlorian Westphal 						   buffer_info->rxbuf.page,
4207dee1ad47SJeff Kirsher 						   0, length);
4208dee1ad47SJeff Kirsher 			} else {
4209dee1ad47SJeff Kirsher 				/* this is the middle of a chain */
4210dee1ad47SJeff Kirsher 				skb_fill_page_desc(rxtop,
4211dee1ad47SJeff Kirsher 				    skb_shinfo(rxtop)->nr_frags,
421213809609SFlorian Westphal 				    buffer_info->rxbuf.page, 0, length);
4213dee1ad47SJeff Kirsher 			}
4214dee1ad47SJeff Kirsher 			e1000_consume_page(buffer_info, rxtop, length);
4215dee1ad47SJeff Kirsher 			goto next_desc;
4216dee1ad47SJeff Kirsher 		} else {
4217dee1ad47SJeff Kirsher 			if (rxtop) {
4218dee1ad47SJeff Kirsher 				/* end of the chain */
4219dee1ad47SJeff Kirsher 				skb_fill_page_desc(rxtop,
4220dee1ad47SJeff Kirsher 				    skb_shinfo(rxtop)->nr_frags,
422113809609SFlorian Westphal 				    buffer_info->rxbuf.page, 0, length);
4222dee1ad47SJeff Kirsher 				skb = rxtop;
4223dee1ad47SJeff Kirsher 				rxtop = NULL;
4224dee1ad47SJeff Kirsher 				e1000_consume_page(buffer_info, skb, length);
4225dee1ad47SJeff Kirsher 			} else {
422613809609SFlorian Westphal 				struct page *p;
4227dee1ad47SJeff Kirsher 				/* no chain, got EOP, this buf is the packet
42286cfbd97bSJeff Kirsher 				 * copybreak to save the put_page/alloc_page
42296cfbd97bSJeff Kirsher 				 */
4230de591c78SFlorian Westphal 				p = buffer_info->rxbuf.page;
4231de591c78SFlorian Westphal 				if (length <= copybreak) {
4232de591c78SFlorian Westphal 					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4233de591c78SFlorian Westphal 						length -= 4;
4234de591c78SFlorian Westphal 					skb = e1000_alloc_rx_skb(adapter,
4235de591c78SFlorian Westphal 								 length);
423613809609SFlorian Westphal 					if (!skb)
423713809609SFlorian Westphal 						break;
423813809609SFlorian Westphal 
4239*3e7b52e0SAnirudh Venkataramanan 					memcpy(skb_tail_pointer(skb),
4240*3e7b52e0SAnirudh Venkataramanan 					       page_address(p), length);
4241*3e7b52e0SAnirudh Venkataramanan 
4242dee1ad47SJeff Kirsher 					/* re-use the page, so don't erase
424313809609SFlorian Westphal 					 * buffer_info->rxbuf.page
42446cfbd97bSJeff Kirsher 					 */
4245dee1ad47SJeff Kirsher 					skb_put(skb, length);
4246de591c78SFlorian Westphal 					e1000_rx_checksum(adapter,
4247de591c78SFlorian Westphal 							  status | rx_desc->errors << 24,
4248de591c78SFlorian Westphal 							  le16_to_cpu(rx_desc->csum), skb);
4249de591c78SFlorian Westphal 
4250de591c78SFlorian Westphal 					total_rx_bytes += skb->len;
4251de591c78SFlorian Westphal 					total_rx_packets++;
4252de591c78SFlorian Westphal 
4253de591c78SFlorian Westphal 					e1000_receive_skb(adapter, status,
4254de591c78SFlorian Westphal 							  rx_desc->special, skb);
4255de591c78SFlorian Westphal 					goto next_desc;
4256dee1ad47SJeff Kirsher 				} else {
4257de591c78SFlorian Westphal 					skb = napi_get_frags(&adapter->napi);
4258de591c78SFlorian Westphal 					if (!skb) {
4259de591c78SFlorian Westphal 						adapter->alloc_rx_buff_failed++;
4260de591c78SFlorian Westphal 						break;
4261de591c78SFlorian Westphal 					}
426213809609SFlorian Westphal 					skb_fill_page_desc(skb, 0, p, 0,
4263dee1ad47SJeff Kirsher 							   length);
4264dee1ad47SJeff Kirsher 					e1000_consume_page(buffer_info, skb,
4265dee1ad47SJeff Kirsher 							   length);
4266dee1ad47SJeff Kirsher 				}
4267dee1ad47SJeff Kirsher 			}
4268dee1ad47SJeff Kirsher 		}
4269dee1ad47SJeff Kirsher 
4270dee1ad47SJeff Kirsher 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4271dee1ad47SJeff Kirsher 		e1000_rx_checksum(adapter,
4272dee1ad47SJeff Kirsher 				  (u32)(status) |
4273dee1ad47SJeff Kirsher 				  ((u32)(rx_desc->errors) << 24),
4274dee1ad47SJeff Kirsher 				  le16_to_cpu(rx_desc->csum), skb);
4275dee1ad47SJeff Kirsher 
4276b0d1562cSBen Greear 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4277b0d1562cSBen Greear 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4278dee1ad47SJeff Kirsher 			pskb_trim(skb, skb->len - 4);
4279dee1ad47SJeff Kirsher 		total_rx_packets++;
4280dee1ad47SJeff Kirsher 
4281de591c78SFlorian Westphal 		if (status & E1000_RXD_STAT_VP) {
4282de591c78SFlorian Westphal 			__le16 vlan = rx_desc->special;
4283de591c78SFlorian Westphal 			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4284de591c78SFlorian Westphal 
4285de591c78SFlorian Westphal 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4286dee1ad47SJeff Kirsher 		}
4287dee1ad47SJeff Kirsher 
4288de591c78SFlorian Westphal 		napi_gro_frags(&adapter->napi);
4289dee1ad47SJeff Kirsher 
4290dee1ad47SJeff Kirsher next_desc:
4291dee1ad47SJeff Kirsher 		rx_desc->status = 0;
4292dee1ad47SJeff Kirsher 
4293dee1ad47SJeff Kirsher 		/* return some buffers to hardware, one at a time is too slow */
4294dee1ad47SJeff Kirsher 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4295dee1ad47SJeff Kirsher 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4296dee1ad47SJeff Kirsher 			cleaned_count = 0;
4297dee1ad47SJeff Kirsher 		}
4298dee1ad47SJeff Kirsher 
4299dee1ad47SJeff Kirsher 		/* use prefetched values */
4300dee1ad47SJeff Kirsher 		rx_desc = next_rxd;
4301dee1ad47SJeff Kirsher 		buffer_info = next_buffer;
4302dee1ad47SJeff Kirsher 	}
4303dee1ad47SJeff Kirsher 	rx_ring->next_to_clean = i;
4304dee1ad47SJeff Kirsher 
4305dee1ad47SJeff Kirsher 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4306dee1ad47SJeff Kirsher 	if (cleaned_count)
4307dee1ad47SJeff Kirsher 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4308dee1ad47SJeff Kirsher 
4309dee1ad47SJeff Kirsher 	adapter->total_rx_packets += total_rx_packets;
4310dee1ad47SJeff Kirsher 	adapter->total_rx_bytes += total_rx_bytes;
4311dee1ad47SJeff Kirsher 	netdev->stats.rx_bytes += total_rx_bytes;
4312dee1ad47SJeff Kirsher 	netdev->stats.rx_packets += total_rx_packets;
4313dee1ad47SJeff Kirsher 	return cleaned;
4314dee1ad47SJeff Kirsher }
4315dee1ad47SJeff Kirsher 
43166cfbd97bSJeff Kirsher /* this should improve performance for small packets with large amounts
4317dee1ad47SJeff Kirsher  * of reassembly being done in the stack
4318dee1ad47SJeff Kirsher  */
e1000_copybreak(struct e1000_adapter * adapter,struct e1000_rx_buffer * buffer_info,u32 length,const void * data)43192b294b18SFlorian Westphal static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
432093f0afe9SFlorian Westphal 				       struct e1000_rx_buffer *buffer_info,
43212b294b18SFlorian Westphal 				       u32 length, const void *data)
4322dee1ad47SJeff Kirsher {
43232b294b18SFlorian Westphal 	struct sk_buff *skb;
4324dee1ad47SJeff Kirsher 
4325dee1ad47SJeff Kirsher 	if (length > copybreak)
43262b294b18SFlorian Westphal 		return NULL;
4327dee1ad47SJeff Kirsher 
43282b294b18SFlorian Westphal 	skb = e1000_alloc_rx_skb(adapter, length);
43292b294b18SFlorian Westphal 	if (!skb)
43302b294b18SFlorian Westphal 		return NULL;
4331dee1ad47SJeff Kirsher 
43322b294b18SFlorian Westphal 	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
43332b294b18SFlorian Westphal 				length, DMA_FROM_DEVICE);
43342b294b18SFlorian Westphal 
433559ae1d12SJohannes Berg 	skb_put_data(skb, data, length);
43362b294b18SFlorian Westphal 
43372b294b18SFlorian Westphal 	return skb;
4338dee1ad47SJeff Kirsher }
4339dee1ad47SJeff Kirsher 
4340dee1ad47SJeff Kirsher /**
4341dee1ad47SJeff Kirsher  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4342dee1ad47SJeff Kirsher  * @adapter: board private structure
4343dee1ad47SJeff Kirsher  * @rx_ring: ring to clean
4344dee1ad47SJeff Kirsher  * @work_done: amount of napi work completed this call
4345dee1ad47SJeff Kirsher  * @work_to_do: max amount of work allowed for this call to do
4346dee1ad47SJeff Kirsher  */
e1000_clean_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4347dee1ad47SJeff Kirsher static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4348dee1ad47SJeff Kirsher 			       struct e1000_rx_ring *rx_ring,
4349dee1ad47SJeff Kirsher 			       int *work_done, int work_to_do)
4350dee1ad47SJeff Kirsher {
4351dee1ad47SJeff Kirsher 	struct net_device *netdev = adapter->netdev;
4352dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
4353dee1ad47SJeff Kirsher 	struct e1000_rx_desc *rx_desc, *next_rxd;
435493f0afe9SFlorian Westphal 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4355dee1ad47SJeff Kirsher 	u32 length;
4356dee1ad47SJeff Kirsher 	unsigned int i;
4357dee1ad47SJeff Kirsher 	int cleaned_count = 0;
4358dee1ad47SJeff Kirsher 	bool cleaned = false;
4359dee1ad47SJeff Kirsher 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4360dee1ad47SJeff Kirsher 
4361dee1ad47SJeff Kirsher 	i = rx_ring->next_to_clean;
4362dee1ad47SJeff Kirsher 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4363dee1ad47SJeff Kirsher 	buffer_info = &rx_ring->buffer_info[i];
4364dee1ad47SJeff Kirsher 
4365dee1ad47SJeff Kirsher 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4366dee1ad47SJeff Kirsher 		struct sk_buff *skb;
436713809609SFlorian Westphal 		u8 *data;
4368dee1ad47SJeff Kirsher 		u8 status;
4369dee1ad47SJeff Kirsher 
4370dee1ad47SJeff Kirsher 		if (*work_done >= work_to_do)
4371dee1ad47SJeff Kirsher 			break;
4372dee1ad47SJeff Kirsher 		(*work_done)++;
4373837a1dbaSAlexander Duyck 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4374dee1ad47SJeff Kirsher 
4375dee1ad47SJeff Kirsher 		status = rx_desc->status;
43762b294b18SFlorian Westphal 		length = le16_to_cpu(rx_desc->length);
43772b294b18SFlorian Westphal 
437813809609SFlorian Westphal 		data = buffer_info->rxbuf.data;
437913809609SFlorian Westphal 		prefetch(data);
438013809609SFlorian Westphal 		skb = e1000_copybreak(adapter, buffer_info, length, data);
43812b294b18SFlorian Westphal 		if (!skb) {
438213809609SFlorian Westphal 			unsigned int frag_len = e1000_frag_len(adapter);
438313809609SFlorian Westphal 
438489a354c0SAlexander Lobakin 			skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
438513809609SFlorian Westphal 			if (!skb) {
438613809609SFlorian Westphal 				adapter->alloc_rx_buff_failed++;
438713809609SFlorian Westphal 				break;
438813809609SFlorian Westphal 			}
438913809609SFlorian Westphal 
439013809609SFlorian Westphal 			skb_reserve(skb, E1000_HEADROOM);
43912b294b18SFlorian Westphal 			dma_unmap_single(&pdev->dev, buffer_info->dma,
439293f0afe9SFlorian Westphal 					 adapter->rx_buffer_len,
439393f0afe9SFlorian Westphal 					 DMA_FROM_DEVICE);
43942b294b18SFlorian Westphal 			buffer_info->dma = 0;
439513809609SFlorian Westphal 			buffer_info->rxbuf.data = NULL;
43962b294b18SFlorian Westphal 		}
4397dee1ad47SJeff Kirsher 
4398a48954c8SJanusz Wolak 		if (++i == rx_ring->count)
4399a48954c8SJanusz Wolak 			i = 0;
4400a48954c8SJanusz Wolak 
4401dee1ad47SJeff Kirsher 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4402dee1ad47SJeff Kirsher 		prefetch(next_rxd);
4403dee1ad47SJeff Kirsher 
4404dee1ad47SJeff Kirsher 		next_buffer = &rx_ring->buffer_info[i];
4405dee1ad47SJeff Kirsher 
4406dee1ad47SJeff Kirsher 		cleaned = true;
4407dee1ad47SJeff Kirsher 		cleaned_count++;
4408dee1ad47SJeff Kirsher 
4409dee1ad47SJeff Kirsher 		/* !EOP means multiple descriptors were used to store a single
4410dee1ad47SJeff Kirsher 		 * packet, if thats the case we need to toss it.  In fact, we
4411dee1ad47SJeff Kirsher 		 * to toss every packet with the EOP bit clear and the next
4412dee1ad47SJeff Kirsher 		 * frame that _does_ have the EOP bit set, as it is by
4413dee1ad47SJeff Kirsher 		 * definition only a frame fragment
4414dee1ad47SJeff Kirsher 		 */
4415dee1ad47SJeff Kirsher 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4416dee1ad47SJeff Kirsher 			adapter->discarding = true;
4417dee1ad47SJeff Kirsher 
4418dee1ad47SJeff Kirsher 		if (adapter->discarding) {
4419dee1ad47SJeff Kirsher 			/* All receives must fit into a single buffer */
44202037110cSFlorian Westphal 			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
44212b294b18SFlorian Westphal 			dev_kfree_skb(skb);
4422dee1ad47SJeff Kirsher 			if (status & E1000_RXD_STAT_EOP)
4423dee1ad47SJeff Kirsher 				adapter->discarding = false;
4424dee1ad47SJeff Kirsher 			goto next_desc;
4425dee1ad47SJeff Kirsher 		}
4426dee1ad47SJeff Kirsher 
4427dee1ad47SJeff Kirsher 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
44282037110cSFlorian Westphal 			if (e1000_tbi_should_accept(adapter, status,
44292037110cSFlorian Westphal 						    rx_desc->errors,
443013809609SFlorian Westphal 						    length, data)) {
4431dee1ad47SJeff Kirsher 				length--;
44322037110cSFlorian Westphal 			} else if (netdev->features & NETIF_F_RXALL) {
4433e825b731SBen Greear 				goto process_skb;
44342037110cSFlorian Westphal 			} else {
44352b294b18SFlorian Westphal 				dev_kfree_skb(skb);
4436dee1ad47SJeff Kirsher 				goto next_desc;
4437dee1ad47SJeff Kirsher 			}
4438dee1ad47SJeff Kirsher 		}
4439dee1ad47SJeff Kirsher 
4440e825b731SBen Greear process_skb:
4441b0d1562cSBen Greear 		total_rx_bytes += (length - 4); /* don't count FCS */
4442dee1ad47SJeff Kirsher 		total_rx_packets++;
4443dee1ad47SJeff Kirsher 
4444b0d1562cSBen Greear 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4445b0d1562cSBen Greear 			/* adjust length to remove Ethernet CRC, this must be
4446b0d1562cSBen Greear 			 * done after the TBI_ACCEPT workaround above
4447b0d1562cSBen Greear 			 */
4448b0d1562cSBen Greear 			length -= 4;
4449b0d1562cSBen Greear 
445013809609SFlorian Westphal 		if (buffer_info->rxbuf.data == NULL)
4451dee1ad47SJeff Kirsher 			skb_put(skb, length);
44522b294b18SFlorian Westphal 		else /* copybreak skb */
44532b294b18SFlorian Westphal 			skb_trim(skb, length);
4454dee1ad47SJeff Kirsher 
4455dee1ad47SJeff Kirsher 		/* Receive Checksum Offload */
4456dee1ad47SJeff Kirsher 		e1000_rx_checksum(adapter,
4457dee1ad47SJeff Kirsher 				  (u32)(status) |
4458dee1ad47SJeff Kirsher 				  ((u32)(rx_desc->errors) << 24),
4459dee1ad47SJeff Kirsher 				  le16_to_cpu(rx_desc->csum), skb);
4460dee1ad47SJeff Kirsher 
4461dee1ad47SJeff Kirsher 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4462dee1ad47SJeff Kirsher 
4463dee1ad47SJeff Kirsher next_desc:
4464dee1ad47SJeff Kirsher 		rx_desc->status = 0;
4465dee1ad47SJeff Kirsher 
4466dee1ad47SJeff Kirsher 		/* return some buffers to hardware, one at a time is too slow */
4467dee1ad47SJeff Kirsher 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4468dee1ad47SJeff Kirsher 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4469dee1ad47SJeff Kirsher 			cleaned_count = 0;
4470dee1ad47SJeff Kirsher 		}
4471dee1ad47SJeff Kirsher 
4472dee1ad47SJeff Kirsher 		/* use prefetched values */
4473dee1ad47SJeff Kirsher 		rx_desc = next_rxd;
4474dee1ad47SJeff Kirsher 		buffer_info = next_buffer;
4475dee1ad47SJeff Kirsher 	}
4476dee1ad47SJeff Kirsher 	rx_ring->next_to_clean = i;
4477dee1ad47SJeff Kirsher 
4478dee1ad47SJeff Kirsher 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4479dee1ad47SJeff Kirsher 	if (cleaned_count)
4480dee1ad47SJeff Kirsher 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4481dee1ad47SJeff Kirsher 
4482dee1ad47SJeff Kirsher 	adapter->total_rx_packets += total_rx_packets;
4483dee1ad47SJeff Kirsher 	adapter->total_rx_bytes += total_rx_bytes;
4484dee1ad47SJeff Kirsher 	netdev->stats.rx_bytes += total_rx_bytes;
4485dee1ad47SJeff Kirsher 	netdev->stats.rx_packets += total_rx_packets;
4486dee1ad47SJeff Kirsher 	return cleaned;
4487dee1ad47SJeff Kirsher }
4488dee1ad47SJeff Kirsher 
4489dee1ad47SJeff Kirsher /**
4490dee1ad47SJeff Kirsher  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4491dee1ad47SJeff Kirsher  * @adapter: address of board private structure
4492dee1ad47SJeff Kirsher  * @rx_ring: pointer to receive ring structure
4493dee1ad47SJeff Kirsher  * @cleaned_count: number of buffers to allocate this pass
4494dee1ad47SJeff Kirsher  **/
4495dee1ad47SJeff Kirsher static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4496dee1ad47SJeff Kirsher e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4497dee1ad47SJeff Kirsher 			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4498dee1ad47SJeff Kirsher {
4499dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
4500dee1ad47SJeff Kirsher 	struct e1000_rx_desc *rx_desc;
450193f0afe9SFlorian Westphal 	struct e1000_rx_buffer *buffer_info;
4502dee1ad47SJeff Kirsher 	unsigned int i;
4503dee1ad47SJeff Kirsher 
4504dee1ad47SJeff Kirsher 	i = rx_ring->next_to_use;
4505dee1ad47SJeff Kirsher 	buffer_info = &rx_ring->buffer_info[i];
4506dee1ad47SJeff Kirsher 
4507dee1ad47SJeff Kirsher 	while (cleaned_count--) {
4508dee1ad47SJeff Kirsher 		/* allocate a new page if necessary */
450913809609SFlorian Westphal 		if (!buffer_info->rxbuf.page) {
451013809609SFlorian Westphal 			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
451113809609SFlorian Westphal 			if (unlikely(!buffer_info->rxbuf.page)) {
4512dee1ad47SJeff Kirsher 				adapter->alloc_rx_buff_failed++;
4513dee1ad47SJeff Kirsher 				break;
4514dee1ad47SJeff Kirsher 			}
4515dee1ad47SJeff Kirsher 		}
4516dee1ad47SJeff Kirsher 
4517dee1ad47SJeff Kirsher 		if (!buffer_info->dma) {
4518dee1ad47SJeff Kirsher 			buffer_info->dma = dma_map_page(&pdev->dev,
451913809609SFlorian Westphal 							buffer_info->rxbuf.page, 0,
452013809609SFlorian Westphal 							adapter->rx_buffer_len,
4521dee1ad47SJeff Kirsher 							DMA_FROM_DEVICE);
4522dee1ad47SJeff Kirsher 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
452313809609SFlorian Westphal 				put_page(buffer_info->rxbuf.page);
452413809609SFlorian Westphal 				buffer_info->rxbuf.page = NULL;
4525dee1ad47SJeff Kirsher 				buffer_info->dma = 0;
4526dee1ad47SJeff Kirsher 				adapter->alloc_rx_buff_failed++;
452713809609SFlorian Westphal 				break;
4528dee1ad47SJeff Kirsher 			}
4529dee1ad47SJeff Kirsher 		}
4530dee1ad47SJeff Kirsher 
4531dee1ad47SJeff Kirsher 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4532dee1ad47SJeff Kirsher 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4533dee1ad47SJeff Kirsher 
4534dee1ad47SJeff Kirsher 		if (unlikely(++i == rx_ring->count))
4535dee1ad47SJeff Kirsher 			i = 0;
4536dee1ad47SJeff Kirsher 		buffer_info = &rx_ring->buffer_info[i];
4537dee1ad47SJeff Kirsher 	}
4538dee1ad47SJeff Kirsher 
4539dee1ad47SJeff Kirsher 	if (likely(rx_ring->next_to_use != i)) {
4540dee1ad47SJeff Kirsher 		rx_ring->next_to_use = i;
4541dee1ad47SJeff Kirsher 		if (unlikely(i-- == 0))
4542dee1ad47SJeff Kirsher 			i = (rx_ring->count - 1);
4543dee1ad47SJeff Kirsher 
4544dee1ad47SJeff Kirsher 		/* Force memory writes to complete before letting h/w
4545dee1ad47SJeff Kirsher 		 * know there are new descriptors to fetch.  (Only
4546dee1ad47SJeff Kirsher 		 * applicable for weak-ordered memory model archs,
45476cfbd97bSJeff Kirsher 		 * such as IA-64).
45486cfbd97bSJeff Kirsher 		 */
4549583cf7beSVenkatesh Srinivas 		dma_wmb();
4550dee1ad47SJeff Kirsher 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4551dee1ad47SJeff Kirsher 	}
4552dee1ad47SJeff Kirsher }
4553dee1ad47SJeff Kirsher 
4554dee1ad47SJeff Kirsher /**
4555dee1ad47SJeff Kirsher  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4556dee1ad47SJeff Kirsher  * @adapter: address of board private structure
4557b50f7bcaSJesse Brandeburg  * @rx_ring: pointer to ring struct
4558b50f7bcaSJesse Brandeburg  * @cleaned_count: number of new Rx buffers to try to allocate
4559dee1ad47SJeff Kirsher  **/
e1000_alloc_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4560dee1ad47SJeff Kirsher static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4561dee1ad47SJeff Kirsher 				   struct e1000_rx_ring *rx_ring,
4562dee1ad47SJeff Kirsher 				   int cleaned_count)
4563dee1ad47SJeff Kirsher {
4564dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
4565dee1ad47SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
4566dee1ad47SJeff Kirsher 	struct e1000_rx_desc *rx_desc;
456793f0afe9SFlorian Westphal 	struct e1000_rx_buffer *buffer_info;
4568dee1ad47SJeff Kirsher 	unsigned int i;
4569dee1ad47SJeff Kirsher 	unsigned int bufsz = adapter->rx_buffer_len;
4570dee1ad47SJeff Kirsher 
4571dee1ad47SJeff Kirsher 	i = rx_ring->next_to_use;
4572dee1ad47SJeff Kirsher 	buffer_info = &rx_ring->buffer_info[i];
4573dee1ad47SJeff Kirsher 
4574dee1ad47SJeff Kirsher 	while (cleaned_count--) {
457513809609SFlorian Westphal 		void *data;
4576dee1ad47SJeff Kirsher 
457713809609SFlorian Westphal 		if (buffer_info->rxbuf.data)
457813809609SFlorian Westphal 			goto skip;
457913809609SFlorian Westphal 
458013809609SFlorian Westphal 		data = e1000_alloc_frag(adapter);
458113809609SFlorian Westphal 		if (!data) {
4582dee1ad47SJeff Kirsher 			/* Better luck next round */
4583dee1ad47SJeff Kirsher 			adapter->alloc_rx_buff_failed++;
4584dee1ad47SJeff Kirsher 			break;
4585dee1ad47SJeff Kirsher 		}
4586dee1ad47SJeff Kirsher 
4587dee1ad47SJeff Kirsher 		/* Fix for errata 23, can't cross 64kB boundary */
458813809609SFlorian Westphal 		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
458913809609SFlorian Westphal 			void *olddata = data;
4590dee1ad47SJeff Kirsher 			e_err(rx_err, "skb align check failed: %u bytes at "
459113809609SFlorian Westphal 			      "%p\n", bufsz, data);
4592dee1ad47SJeff Kirsher 			/* Try again, without freeing the previous */
459313809609SFlorian Westphal 			data = e1000_alloc_frag(adapter);
4594dee1ad47SJeff Kirsher 			/* Failed allocation, critical failure */
459513809609SFlorian Westphal 			if (!data) {
45966bf93ba8SAlexander Duyck 				skb_free_frag(olddata);
4597dee1ad47SJeff Kirsher 				adapter->alloc_rx_buff_failed++;
4598dee1ad47SJeff Kirsher 				break;
4599dee1ad47SJeff Kirsher 			}
4600dee1ad47SJeff Kirsher 
460113809609SFlorian Westphal 			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4602dee1ad47SJeff Kirsher 				/* give up */
46036bf93ba8SAlexander Duyck 				skb_free_frag(data);
46046bf93ba8SAlexander Duyck 				skb_free_frag(olddata);
4605dee1ad47SJeff Kirsher 				adapter->alloc_rx_buff_failed++;
460613809609SFlorian Westphal 				break;
4607dee1ad47SJeff Kirsher 			}
4608dee1ad47SJeff Kirsher 
4609dee1ad47SJeff Kirsher 			/* Use new allocation */
46106bf93ba8SAlexander Duyck 			skb_free_frag(olddata);
4611dee1ad47SJeff Kirsher 		}
4612dee1ad47SJeff Kirsher 		buffer_info->dma = dma_map_single(&pdev->dev,
461313809609SFlorian Westphal 						  data,
461493f0afe9SFlorian Westphal 						  adapter->rx_buffer_len,
4615dee1ad47SJeff Kirsher 						  DMA_FROM_DEVICE);
4616dee1ad47SJeff Kirsher 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
46176bf93ba8SAlexander Duyck 			skb_free_frag(data);
4618dee1ad47SJeff Kirsher 			buffer_info->dma = 0;
4619dee1ad47SJeff Kirsher 			adapter->alloc_rx_buff_failed++;
462013809609SFlorian Westphal 			break;
4621dee1ad47SJeff Kirsher 		}
4622dee1ad47SJeff Kirsher 
46236cfbd97bSJeff Kirsher 		/* XXX if it was allocated cleanly it will never map to a
4624dee1ad47SJeff Kirsher 		 * boundary crossing
4625dee1ad47SJeff Kirsher 		 */
4626dee1ad47SJeff Kirsher 
4627dee1ad47SJeff Kirsher 		/* Fix for errata 23, can't cross 64kB boundary */
4628dee1ad47SJeff Kirsher 		if (!e1000_check_64k_bound(adapter,
4629dee1ad47SJeff Kirsher 					(void *)(unsigned long)buffer_info->dma,
4630dee1ad47SJeff Kirsher 					adapter->rx_buffer_len)) {
4631dee1ad47SJeff Kirsher 			e_err(rx_err, "dma align check failed: %u bytes at "
4632dee1ad47SJeff Kirsher 			      "%p\n", adapter->rx_buffer_len,
4633dee1ad47SJeff Kirsher 			      (void *)(unsigned long)buffer_info->dma);
4634dee1ad47SJeff Kirsher 
4635dee1ad47SJeff Kirsher 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4636dee1ad47SJeff Kirsher 					 adapter->rx_buffer_len,
4637dee1ad47SJeff Kirsher 					 DMA_FROM_DEVICE);
463813809609SFlorian Westphal 
46396bf93ba8SAlexander Duyck 			skb_free_frag(data);
464013809609SFlorian Westphal 			buffer_info->rxbuf.data = NULL;
4641dee1ad47SJeff Kirsher 			buffer_info->dma = 0;
4642dee1ad47SJeff Kirsher 
4643dee1ad47SJeff Kirsher 			adapter->alloc_rx_buff_failed++;
464413809609SFlorian Westphal 			break;
4645dee1ad47SJeff Kirsher 		}
464613809609SFlorian Westphal 		buffer_info->rxbuf.data = data;
464713809609SFlorian Westphal  skip:
4648dee1ad47SJeff Kirsher 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4649dee1ad47SJeff Kirsher 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4650dee1ad47SJeff Kirsher 
4651dee1ad47SJeff Kirsher 		if (unlikely(++i == rx_ring->count))
4652dee1ad47SJeff Kirsher 			i = 0;
4653dee1ad47SJeff Kirsher 		buffer_info = &rx_ring->buffer_info[i];
4654dee1ad47SJeff Kirsher 	}
4655dee1ad47SJeff Kirsher 
4656dee1ad47SJeff Kirsher 	if (likely(rx_ring->next_to_use != i)) {
4657dee1ad47SJeff Kirsher 		rx_ring->next_to_use = i;
4658dee1ad47SJeff Kirsher 		if (unlikely(i-- == 0))
4659dee1ad47SJeff Kirsher 			i = (rx_ring->count - 1);
4660dee1ad47SJeff Kirsher 
4661dee1ad47SJeff Kirsher 		/* Force memory writes to complete before letting h/w
4662dee1ad47SJeff Kirsher 		 * know there are new descriptors to fetch.  (Only
4663dee1ad47SJeff Kirsher 		 * applicable for weak-ordered memory model archs,
46646cfbd97bSJeff Kirsher 		 * such as IA-64).
46656cfbd97bSJeff Kirsher 		 */
4666583cf7beSVenkatesh Srinivas 		dma_wmb();
4667dee1ad47SJeff Kirsher 		writel(i, hw->hw_addr + rx_ring->rdt);
4668dee1ad47SJeff Kirsher 	}
4669dee1ad47SJeff Kirsher }
4670dee1ad47SJeff Kirsher 
4671dee1ad47SJeff Kirsher /**
4672dee1ad47SJeff Kirsher  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4673b50f7bcaSJesse Brandeburg  * @adapter: address of board private structure
4674dee1ad47SJeff Kirsher  **/
e1000_smartspeed(struct e1000_adapter * adapter)4675dee1ad47SJeff Kirsher static void e1000_smartspeed(struct e1000_adapter *adapter)
4676dee1ad47SJeff Kirsher {
4677dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
4678dee1ad47SJeff Kirsher 	u16 phy_status;
4679dee1ad47SJeff Kirsher 	u16 phy_ctrl;
4680dee1ad47SJeff Kirsher 
4681dee1ad47SJeff Kirsher 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4682dee1ad47SJeff Kirsher 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4683dee1ad47SJeff Kirsher 		return;
4684dee1ad47SJeff Kirsher 
4685dee1ad47SJeff Kirsher 	if (adapter->smartspeed == 0) {
4686dee1ad47SJeff Kirsher 		/* If Master/Slave config fault is asserted twice,
46876cfbd97bSJeff Kirsher 		 * we assume back-to-back
46886cfbd97bSJeff Kirsher 		 */
4689dee1ad47SJeff Kirsher 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4690a48954c8SJanusz Wolak 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4691a48954c8SJanusz Wolak 			return;
4692dee1ad47SJeff Kirsher 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4693a48954c8SJanusz Wolak 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4694a48954c8SJanusz Wolak 			return;
4695dee1ad47SJeff Kirsher 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4696dee1ad47SJeff Kirsher 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4697dee1ad47SJeff Kirsher 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4698dee1ad47SJeff Kirsher 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4699dee1ad47SJeff Kirsher 					    phy_ctrl);
4700dee1ad47SJeff Kirsher 			adapter->smartspeed++;
4701dee1ad47SJeff Kirsher 			if (!e1000_phy_setup_autoneg(hw) &&
4702dee1ad47SJeff Kirsher 			   !e1000_read_phy_reg(hw, PHY_CTRL,
4703dee1ad47SJeff Kirsher 					       &phy_ctrl)) {
4704dee1ad47SJeff Kirsher 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4705dee1ad47SJeff Kirsher 					     MII_CR_RESTART_AUTO_NEG);
4706dee1ad47SJeff Kirsher 				e1000_write_phy_reg(hw, PHY_CTRL,
4707dee1ad47SJeff Kirsher 						    phy_ctrl);
4708dee1ad47SJeff Kirsher 			}
4709dee1ad47SJeff Kirsher 		}
4710dee1ad47SJeff Kirsher 		return;
4711dee1ad47SJeff Kirsher 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4712dee1ad47SJeff Kirsher 		/* If still no link, perhaps using 2/3 pair cable */
4713dee1ad47SJeff Kirsher 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4714dee1ad47SJeff Kirsher 		phy_ctrl |= CR_1000T_MS_ENABLE;
4715dee1ad47SJeff Kirsher 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4716dee1ad47SJeff Kirsher 		if (!e1000_phy_setup_autoneg(hw) &&
4717dee1ad47SJeff Kirsher 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4718dee1ad47SJeff Kirsher 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4719dee1ad47SJeff Kirsher 				     MII_CR_RESTART_AUTO_NEG);
4720dee1ad47SJeff Kirsher 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4721dee1ad47SJeff Kirsher 		}
4722dee1ad47SJeff Kirsher 	}
4723dee1ad47SJeff Kirsher 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4724dee1ad47SJeff Kirsher 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4725dee1ad47SJeff Kirsher 		adapter->smartspeed = 0;
4726dee1ad47SJeff Kirsher }
4727dee1ad47SJeff Kirsher 
4728dee1ad47SJeff Kirsher /**
4729b50f7bcaSJesse Brandeburg  * e1000_ioctl - handle ioctl calls
4730b50f7bcaSJesse Brandeburg  * @netdev: pointer to our netdev
4731b50f7bcaSJesse Brandeburg  * @ifr: pointer to interface request structure
4732b50f7bcaSJesse Brandeburg  * @cmd: ioctl data
4733dee1ad47SJeff Kirsher  **/
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4734dee1ad47SJeff Kirsher static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4735dee1ad47SJeff Kirsher {
4736dee1ad47SJeff Kirsher 	switch (cmd) {
4737dee1ad47SJeff Kirsher 	case SIOCGMIIPHY:
4738dee1ad47SJeff Kirsher 	case SIOCGMIIREG:
4739dee1ad47SJeff Kirsher 	case SIOCSMIIREG:
4740dee1ad47SJeff Kirsher 		return e1000_mii_ioctl(netdev, ifr, cmd);
4741dee1ad47SJeff Kirsher 	default:
4742dee1ad47SJeff Kirsher 		return -EOPNOTSUPP;
4743dee1ad47SJeff Kirsher 	}
4744dee1ad47SJeff Kirsher }
4745dee1ad47SJeff Kirsher 
4746dee1ad47SJeff Kirsher /**
4747dee1ad47SJeff Kirsher  * e1000_mii_ioctl -
4748b50f7bcaSJesse Brandeburg  * @netdev: pointer to our netdev
4749b50f7bcaSJesse Brandeburg  * @ifr: pointer to interface request structure
4750b50f7bcaSJesse Brandeburg  * @cmd: ioctl data
4751dee1ad47SJeff Kirsher  **/
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4752dee1ad47SJeff Kirsher static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4753dee1ad47SJeff Kirsher 			   int cmd)
4754dee1ad47SJeff Kirsher {
4755dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
4756dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
4757dee1ad47SJeff Kirsher 	struct mii_ioctl_data *data = if_mii(ifr);
4758dee1ad47SJeff Kirsher 	int retval;
4759dee1ad47SJeff Kirsher 	u16 mii_reg;
4760dee1ad47SJeff Kirsher 	unsigned long flags;
4761dee1ad47SJeff Kirsher 
4762dee1ad47SJeff Kirsher 	if (hw->media_type != e1000_media_type_copper)
4763dee1ad47SJeff Kirsher 		return -EOPNOTSUPP;
4764dee1ad47SJeff Kirsher 
4765dee1ad47SJeff Kirsher 	switch (cmd) {
4766dee1ad47SJeff Kirsher 	case SIOCGMIIPHY:
4767dee1ad47SJeff Kirsher 		data->phy_id = hw->phy_addr;
4768dee1ad47SJeff Kirsher 		break;
4769dee1ad47SJeff Kirsher 	case SIOCGMIIREG:
4770dee1ad47SJeff Kirsher 		spin_lock_irqsave(&adapter->stats_lock, flags);
4771dee1ad47SJeff Kirsher 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4772dee1ad47SJeff Kirsher 				   &data->val_out)) {
4773dee1ad47SJeff Kirsher 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4774dee1ad47SJeff Kirsher 			return -EIO;
4775dee1ad47SJeff Kirsher 		}
4776dee1ad47SJeff Kirsher 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4777dee1ad47SJeff Kirsher 		break;
4778dee1ad47SJeff Kirsher 	case SIOCSMIIREG:
4779dee1ad47SJeff Kirsher 		if (data->reg_num & ~(0x1F))
4780dee1ad47SJeff Kirsher 			return -EFAULT;
4781dee1ad47SJeff Kirsher 		mii_reg = data->val_in;
4782dee1ad47SJeff Kirsher 		spin_lock_irqsave(&adapter->stats_lock, flags);
4783dee1ad47SJeff Kirsher 		if (e1000_write_phy_reg(hw, data->reg_num,
4784dee1ad47SJeff Kirsher 					mii_reg)) {
4785dee1ad47SJeff Kirsher 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4786dee1ad47SJeff Kirsher 			return -EIO;
4787dee1ad47SJeff Kirsher 		}
4788dee1ad47SJeff Kirsher 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4789dee1ad47SJeff Kirsher 		if (hw->media_type == e1000_media_type_copper) {
4790dee1ad47SJeff Kirsher 			switch (data->reg_num) {
4791dee1ad47SJeff Kirsher 			case PHY_CTRL:
4792dee1ad47SJeff Kirsher 				if (mii_reg & MII_CR_POWER_DOWN)
4793dee1ad47SJeff Kirsher 					break;
4794dee1ad47SJeff Kirsher 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4795dee1ad47SJeff Kirsher 					hw->autoneg = 1;
4796dee1ad47SJeff Kirsher 					hw->autoneg_advertised = 0x2F;
4797dee1ad47SJeff Kirsher 				} else {
4798dee1ad47SJeff Kirsher 					u32 speed;
4799dee1ad47SJeff Kirsher 					if (mii_reg & 0x40)
4800dee1ad47SJeff Kirsher 						speed = SPEED_1000;
4801dee1ad47SJeff Kirsher 					else if (mii_reg & 0x2000)
4802dee1ad47SJeff Kirsher 						speed = SPEED_100;
4803dee1ad47SJeff Kirsher 					else
4804dee1ad47SJeff Kirsher 						speed = SPEED_10;
4805dee1ad47SJeff Kirsher 					retval = e1000_set_spd_dplx(
4806dee1ad47SJeff Kirsher 						adapter, speed,
4807dee1ad47SJeff Kirsher 						((mii_reg & 0x100)
4808dee1ad47SJeff Kirsher 						 ? DUPLEX_FULL :
4809dee1ad47SJeff Kirsher 						 DUPLEX_HALF));
4810dee1ad47SJeff Kirsher 					if (retval)
4811dee1ad47SJeff Kirsher 						return retval;
4812dee1ad47SJeff Kirsher 				}
4813dee1ad47SJeff Kirsher 				if (netif_running(adapter->netdev))
4814dee1ad47SJeff Kirsher 					e1000_reinit_locked(adapter);
4815dee1ad47SJeff Kirsher 				else
4816dee1ad47SJeff Kirsher 					e1000_reset(adapter);
4817dee1ad47SJeff Kirsher 				break;
4818dee1ad47SJeff Kirsher 			case M88E1000_PHY_SPEC_CTRL:
4819dee1ad47SJeff Kirsher 			case M88E1000_EXT_PHY_SPEC_CTRL:
4820dee1ad47SJeff Kirsher 				if (e1000_phy_reset(hw))
4821dee1ad47SJeff Kirsher 					return -EIO;
4822dee1ad47SJeff Kirsher 				break;
4823dee1ad47SJeff Kirsher 			}
4824dee1ad47SJeff Kirsher 		} else {
4825dee1ad47SJeff Kirsher 			switch (data->reg_num) {
4826dee1ad47SJeff Kirsher 			case PHY_CTRL:
4827dee1ad47SJeff Kirsher 				if (mii_reg & MII_CR_POWER_DOWN)
4828dee1ad47SJeff Kirsher 					break;
4829dee1ad47SJeff Kirsher 				if (netif_running(adapter->netdev))
4830dee1ad47SJeff Kirsher 					e1000_reinit_locked(adapter);
4831dee1ad47SJeff Kirsher 				else
4832dee1ad47SJeff Kirsher 					e1000_reset(adapter);
4833dee1ad47SJeff Kirsher 				break;
4834dee1ad47SJeff Kirsher 			}
4835dee1ad47SJeff Kirsher 		}
4836dee1ad47SJeff Kirsher 		break;
4837dee1ad47SJeff Kirsher 	default:
4838dee1ad47SJeff Kirsher 		return -EOPNOTSUPP;
4839dee1ad47SJeff Kirsher 	}
4840dee1ad47SJeff Kirsher 	return E1000_SUCCESS;
4841dee1ad47SJeff Kirsher }
4842dee1ad47SJeff Kirsher 
e1000_pci_set_mwi(struct e1000_hw * hw)4843dee1ad47SJeff Kirsher void e1000_pci_set_mwi(struct e1000_hw *hw)
4844dee1ad47SJeff Kirsher {
4845dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = hw->back;
4846dee1ad47SJeff Kirsher 	int ret_val = pci_set_mwi(adapter->pdev);
4847dee1ad47SJeff Kirsher 
4848dee1ad47SJeff Kirsher 	if (ret_val)
4849dee1ad47SJeff Kirsher 		e_err(probe, "Error in setting MWI\n");
4850dee1ad47SJeff Kirsher }
4851dee1ad47SJeff Kirsher 
e1000_pci_clear_mwi(struct e1000_hw * hw)4852dee1ad47SJeff Kirsher void e1000_pci_clear_mwi(struct e1000_hw *hw)
4853dee1ad47SJeff Kirsher {
4854dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = hw->back;
4855dee1ad47SJeff Kirsher 
4856dee1ad47SJeff Kirsher 	pci_clear_mwi(adapter->pdev);
4857dee1ad47SJeff Kirsher }
4858dee1ad47SJeff Kirsher 
e1000_pcix_get_mmrbc(struct e1000_hw * hw)4859dee1ad47SJeff Kirsher int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4860dee1ad47SJeff Kirsher {
4861dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = hw->back;
4862dee1ad47SJeff Kirsher 	return pcix_get_mmrbc(adapter->pdev);
4863dee1ad47SJeff Kirsher }
4864dee1ad47SJeff Kirsher 
e1000_pcix_set_mmrbc(struct e1000_hw * hw,int mmrbc)4865dee1ad47SJeff Kirsher void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4866dee1ad47SJeff Kirsher {
4867dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = hw->back;
4868dee1ad47SJeff Kirsher 	pcix_set_mmrbc(adapter->pdev, mmrbc);
4869dee1ad47SJeff Kirsher }
4870dee1ad47SJeff Kirsher 
e1000_io_write(struct e1000_hw * hw,unsigned long port,u32 value)4871dee1ad47SJeff Kirsher void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4872dee1ad47SJeff Kirsher {
4873dee1ad47SJeff Kirsher 	outl(value, port);
4874dee1ad47SJeff Kirsher }
4875dee1ad47SJeff Kirsher 
e1000_vlan_used(struct e1000_adapter * adapter)4876dee1ad47SJeff Kirsher static bool e1000_vlan_used(struct e1000_adapter *adapter)
4877dee1ad47SJeff Kirsher {
4878dee1ad47SJeff Kirsher 	u16 vid;
4879dee1ad47SJeff Kirsher 
4880dee1ad47SJeff Kirsher 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4881dee1ad47SJeff Kirsher 		return true;
4882dee1ad47SJeff Kirsher 	return false;
4883dee1ad47SJeff Kirsher }
4884dee1ad47SJeff Kirsher 
__e1000_vlan_mode(struct e1000_adapter * adapter,netdev_features_t features)488552f5509fSJiri Pirko static void __e1000_vlan_mode(struct e1000_adapter *adapter,
488652f5509fSJiri Pirko 			      netdev_features_t features)
488752f5509fSJiri Pirko {
488852f5509fSJiri Pirko 	struct e1000_hw *hw = &adapter->hw;
488952f5509fSJiri Pirko 	u32 ctrl;
489052f5509fSJiri Pirko 
489152f5509fSJiri Pirko 	ctrl = er32(CTRL);
4892f646968fSPatrick McHardy 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
489352f5509fSJiri Pirko 		/* enable VLAN tag insert/strip */
489452f5509fSJiri Pirko 		ctrl |= E1000_CTRL_VME;
489552f5509fSJiri Pirko 	} else {
489652f5509fSJiri Pirko 		/* disable VLAN tag insert/strip */
489752f5509fSJiri Pirko 		ctrl &= ~E1000_CTRL_VME;
489852f5509fSJiri Pirko 	}
489952f5509fSJiri Pirko 	ew32(CTRL, ctrl);
490052f5509fSJiri Pirko }
e1000_vlan_filter_on_off(struct e1000_adapter * adapter,bool filter_on)4901dee1ad47SJeff Kirsher static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4902dee1ad47SJeff Kirsher 				     bool filter_on)
4903dee1ad47SJeff Kirsher {
4904dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
4905dee1ad47SJeff Kirsher 	u32 rctl;
4906dee1ad47SJeff Kirsher 
4907dee1ad47SJeff Kirsher 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4908dee1ad47SJeff Kirsher 		e1000_irq_disable(adapter);
4909dee1ad47SJeff Kirsher 
491052f5509fSJiri Pirko 	__e1000_vlan_mode(adapter, adapter->netdev->features);
4911dee1ad47SJeff Kirsher 	if (filter_on) {
4912dee1ad47SJeff Kirsher 		/* enable VLAN receive filtering */
4913dee1ad47SJeff Kirsher 		rctl = er32(RCTL);
4914dee1ad47SJeff Kirsher 		rctl &= ~E1000_RCTL_CFIEN;
4915dee1ad47SJeff Kirsher 		if (!(adapter->netdev->flags & IFF_PROMISC))
4916dee1ad47SJeff Kirsher 			rctl |= E1000_RCTL_VFE;
4917dee1ad47SJeff Kirsher 		ew32(RCTL, rctl);
4918dee1ad47SJeff Kirsher 		e1000_update_mng_vlan(adapter);
4919dee1ad47SJeff Kirsher 	} else {
4920dee1ad47SJeff Kirsher 		/* disable VLAN receive filtering */
4921dee1ad47SJeff Kirsher 		rctl = er32(RCTL);
4922dee1ad47SJeff Kirsher 		rctl &= ~E1000_RCTL_VFE;
4923dee1ad47SJeff Kirsher 		ew32(RCTL, rctl);
4924dee1ad47SJeff Kirsher 	}
4925dee1ad47SJeff Kirsher 
4926dee1ad47SJeff Kirsher 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4927dee1ad47SJeff Kirsher 		e1000_irq_enable(adapter);
4928dee1ad47SJeff Kirsher }
4929dee1ad47SJeff Kirsher 
e1000_vlan_mode(struct net_device * netdev,netdev_features_t features)4930c8f44affSMichał Mirosław static void e1000_vlan_mode(struct net_device *netdev,
4931c8f44affSMichał Mirosław 			    netdev_features_t features)
4932dee1ad47SJeff Kirsher {
4933dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
4934dee1ad47SJeff Kirsher 
4935dee1ad47SJeff Kirsher 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4936dee1ad47SJeff Kirsher 		e1000_irq_disable(adapter);
4937dee1ad47SJeff Kirsher 
493852f5509fSJiri Pirko 	__e1000_vlan_mode(adapter, features);
4939dee1ad47SJeff Kirsher 
4940dee1ad47SJeff Kirsher 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4941dee1ad47SJeff Kirsher 		e1000_irq_enable(adapter);
4942dee1ad47SJeff Kirsher }
4943dee1ad47SJeff Kirsher 
e1000_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)494480d5c368SPatrick McHardy static int e1000_vlan_rx_add_vid(struct net_device *netdev,
494580d5c368SPatrick McHardy 				 __be16 proto, u16 vid)
4946dee1ad47SJeff Kirsher {
4947dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
4948dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
4949dee1ad47SJeff Kirsher 	u32 vfta, index;
4950dee1ad47SJeff Kirsher 
4951dee1ad47SJeff Kirsher 	if ((hw->mng_cookie.status &
4952dee1ad47SJeff Kirsher 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4953dee1ad47SJeff Kirsher 	    (vid == adapter->mng_vlan_id))
49548e586137SJiri Pirko 		return 0;
4955dee1ad47SJeff Kirsher 
4956dee1ad47SJeff Kirsher 	if (!e1000_vlan_used(adapter))
4957dee1ad47SJeff Kirsher 		e1000_vlan_filter_on_off(adapter, true);
4958dee1ad47SJeff Kirsher 
4959dee1ad47SJeff Kirsher 	/* add VID to filter table */
4960dee1ad47SJeff Kirsher 	index = (vid >> 5) & 0x7F;
4961dee1ad47SJeff Kirsher 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4962dee1ad47SJeff Kirsher 	vfta |= (1 << (vid & 0x1F));
4963dee1ad47SJeff Kirsher 	e1000_write_vfta(hw, index, vfta);
4964dee1ad47SJeff Kirsher 
4965dee1ad47SJeff Kirsher 	set_bit(vid, adapter->active_vlans);
49668e586137SJiri Pirko 
49678e586137SJiri Pirko 	return 0;
4968dee1ad47SJeff Kirsher }
4969dee1ad47SJeff Kirsher 
e1000_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)497080d5c368SPatrick McHardy static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
497180d5c368SPatrick McHardy 				  __be16 proto, u16 vid)
4972dee1ad47SJeff Kirsher {
4973dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
4974dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
4975dee1ad47SJeff Kirsher 	u32 vfta, index;
4976dee1ad47SJeff Kirsher 
4977dee1ad47SJeff Kirsher 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4978dee1ad47SJeff Kirsher 		e1000_irq_disable(adapter);
4979dee1ad47SJeff Kirsher 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4980dee1ad47SJeff Kirsher 		e1000_irq_enable(adapter);
4981dee1ad47SJeff Kirsher 
4982dee1ad47SJeff Kirsher 	/* remove VID from filter table */
4983dee1ad47SJeff Kirsher 	index = (vid >> 5) & 0x7F;
4984dee1ad47SJeff Kirsher 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4985dee1ad47SJeff Kirsher 	vfta &= ~(1 << (vid & 0x1F));
4986dee1ad47SJeff Kirsher 	e1000_write_vfta(hw, index, vfta);
4987dee1ad47SJeff Kirsher 
4988dee1ad47SJeff Kirsher 	clear_bit(vid, adapter->active_vlans);
4989dee1ad47SJeff Kirsher 
4990dee1ad47SJeff Kirsher 	if (!e1000_vlan_used(adapter))
4991dee1ad47SJeff Kirsher 		e1000_vlan_filter_on_off(adapter, false);
49928e586137SJiri Pirko 
49938e586137SJiri Pirko 	return 0;
4994dee1ad47SJeff Kirsher }
4995dee1ad47SJeff Kirsher 
e1000_restore_vlan(struct e1000_adapter * adapter)4996dee1ad47SJeff Kirsher static void e1000_restore_vlan(struct e1000_adapter *adapter)
4997dee1ad47SJeff Kirsher {
4998dee1ad47SJeff Kirsher 	u16 vid;
4999dee1ad47SJeff Kirsher 
5000dee1ad47SJeff Kirsher 	if (!e1000_vlan_used(adapter))
5001dee1ad47SJeff Kirsher 		return;
5002dee1ad47SJeff Kirsher 
5003dee1ad47SJeff Kirsher 	e1000_vlan_filter_on_off(adapter, true);
5004dee1ad47SJeff Kirsher 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
500580d5c368SPatrick McHardy 		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5006dee1ad47SJeff Kirsher }
5007dee1ad47SJeff Kirsher 
e1000_set_spd_dplx(struct e1000_adapter * adapter,u32 spd,u8 dplx)5008dee1ad47SJeff Kirsher int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5009dee1ad47SJeff Kirsher {
5010dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
5011dee1ad47SJeff Kirsher 
5012dee1ad47SJeff Kirsher 	hw->autoneg = 0;
5013dee1ad47SJeff Kirsher 
5014dee1ad47SJeff Kirsher 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
50156cfbd97bSJeff Kirsher 	 * for the switch() below to work
50166cfbd97bSJeff Kirsher 	 */
5017dee1ad47SJeff Kirsher 	if ((spd & 1) || (dplx & ~1))
5018dee1ad47SJeff Kirsher 		goto err_inval;
5019dee1ad47SJeff Kirsher 
5020dee1ad47SJeff Kirsher 	/* Fiber NICs only allow 1000 gbps Full duplex */
5021dee1ad47SJeff Kirsher 	if ((hw->media_type == e1000_media_type_fiber) &&
5022dee1ad47SJeff Kirsher 	    spd != SPEED_1000 &&
5023dee1ad47SJeff Kirsher 	    dplx != DUPLEX_FULL)
5024dee1ad47SJeff Kirsher 		goto err_inval;
5025dee1ad47SJeff Kirsher 
5026dee1ad47SJeff Kirsher 	switch (spd + dplx) {
5027dee1ad47SJeff Kirsher 	case SPEED_10 + DUPLEX_HALF:
5028dee1ad47SJeff Kirsher 		hw->forced_speed_duplex = e1000_10_half;
5029dee1ad47SJeff Kirsher 		break;
5030dee1ad47SJeff Kirsher 	case SPEED_10 + DUPLEX_FULL:
5031dee1ad47SJeff Kirsher 		hw->forced_speed_duplex = e1000_10_full;
5032dee1ad47SJeff Kirsher 		break;
5033dee1ad47SJeff Kirsher 	case SPEED_100 + DUPLEX_HALF:
5034dee1ad47SJeff Kirsher 		hw->forced_speed_duplex = e1000_100_half;
5035dee1ad47SJeff Kirsher 		break;
5036dee1ad47SJeff Kirsher 	case SPEED_100 + DUPLEX_FULL:
5037dee1ad47SJeff Kirsher 		hw->forced_speed_duplex = e1000_100_full;
5038dee1ad47SJeff Kirsher 		break;
5039dee1ad47SJeff Kirsher 	case SPEED_1000 + DUPLEX_FULL:
5040dee1ad47SJeff Kirsher 		hw->autoneg = 1;
5041dee1ad47SJeff Kirsher 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5042dee1ad47SJeff Kirsher 		break;
5043dee1ad47SJeff Kirsher 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5044dee1ad47SJeff Kirsher 	default:
5045dee1ad47SJeff Kirsher 		goto err_inval;
5046dee1ad47SJeff Kirsher 	}
5047c819bbd5SJesse Brandeburg 
5048c819bbd5SJesse Brandeburg 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5049c819bbd5SJesse Brandeburg 	hw->mdix = AUTO_ALL_MODES;
5050c819bbd5SJesse Brandeburg 
5051dee1ad47SJeff Kirsher 	return 0;
5052dee1ad47SJeff Kirsher 
5053dee1ad47SJeff Kirsher err_inval:
5054dee1ad47SJeff Kirsher 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5055dee1ad47SJeff Kirsher 	return -EINVAL;
5056dee1ad47SJeff Kirsher }
5057dee1ad47SJeff Kirsher 
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake)5058dee1ad47SJeff Kirsher static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5059dee1ad47SJeff Kirsher {
5060dee1ad47SJeff Kirsher 	struct net_device *netdev = pci_get_drvdata(pdev);
5061dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
5062dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
5063dee1ad47SJeff Kirsher 	u32 ctrl, ctrl_ext, rctl, status;
5064dee1ad47SJeff Kirsher 	u32 wufc = adapter->wol;
5065dee1ad47SJeff Kirsher 
5066dee1ad47SJeff Kirsher 	netif_device_detach(netdev);
5067dee1ad47SJeff Kirsher 
5068dee1ad47SJeff Kirsher 	if (netif_running(netdev)) {
50696a7d64e3Syzhu1 		int count = E1000_CHECK_RESET_COUNT;
50706a7d64e3Syzhu1 
50716a7d64e3Syzhu1 		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
50726a7d64e3Syzhu1 			usleep_range(10000, 20000);
50736a7d64e3Syzhu1 
5074dee1ad47SJeff Kirsher 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5075dee1ad47SJeff Kirsher 		e1000_down(adapter);
5076dee1ad47SJeff Kirsher 	}
5077dee1ad47SJeff Kirsher 
5078dee1ad47SJeff Kirsher 	status = er32(STATUS);
5079dee1ad47SJeff Kirsher 	if (status & E1000_STATUS_LU)
5080dee1ad47SJeff Kirsher 		wufc &= ~E1000_WUFC_LNKC;
5081dee1ad47SJeff Kirsher 
5082dee1ad47SJeff Kirsher 	if (wufc) {
5083dee1ad47SJeff Kirsher 		e1000_setup_rctl(adapter);
5084dee1ad47SJeff Kirsher 		e1000_set_rx_mode(netdev);
5085dee1ad47SJeff Kirsher 
5086dee1ad47SJeff Kirsher 		rctl = er32(RCTL);
5087b868179cSDean Nelson 
5088b868179cSDean Nelson 		/* turn on all-multi mode if wake on multicast is enabled */
5089b868179cSDean Nelson 		if (wufc & E1000_WUFC_MC)
5090dee1ad47SJeff Kirsher 			rctl |= E1000_RCTL_MPE;
5091b868179cSDean Nelson 
5092b868179cSDean Nelson 		/* enable receives in the hardware */
5093b868179cSDean Nelson 		ew32(RCTL, rctl | E1000_RCTL_EN);
5094dee1ad47SJeff Kirsher 
5095dee1ad47SJeff Kirsher 		if (hw->mac_type >= e1000_82540) {
5096dee1ad47SJeff Kirsher 			ctrl = er32(CTRL);
5097dee1ad47SJeff Kirsher 			/* advertise wake from D3Cold */
5098dee1ad47SJeff Kirsher 			#define E1000_CTRL_ADVD3WUC 0x00100000
5099dee1ad47SJeff Kirsher 			/* phy power management enable */
5100dee1ad47SJeff Kirsher 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5101dee1ad47SJeff Kirsher 			ctrl |= E1000_CTRL_ADVD3WUC |
5102dee1ad47SJeff Kirsher 				E1000_CTRL_EN_PHY_PWR_MGMT;
5103dee1ad47SJeff Kirsher 			ew32(CTRL, ctrl);
5104dee1ad47SJeff Kirsher 		}
5105dee1ad47SJeff Kirsher 
5106dee1ad47SJeff Kirsher 		if (hw->media_type == e1000_media_type_fiber ||
5107dee1ad47SJeff Kirsher 		    hw->media_type == e1000_media_type_internal_serdes) {
5108dee1ad47SJeff Kirsher 			/* keep the laser running in D3 */
5109dee1ad47SJeff Kirsher 			ctrl_ext = er32(CTRL_EXT);
5110dee1ad47SJeff Kirsher 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5111dee1ad47SJeff Kirsher 			ew32(CTRL_EXT, ctrl_ext);
5112dee1ad47SJeff Kirsher 		}
5113dee1ad47SJeff Kirsher 
5114dee1ad47SJeff Kirsher 		ew32(WUC, E1000_WUC_PME_EN);
5115dee1ad47SJeff Kirsher 		ew32(WUFC, wufc);
5116dee1ad47SJeff Kirsher 	} else {
5117dee1ad47SJeff Kirsher 		ew32(WUC, 0);
5118dee1ad47SJeff Kirsher 		ew32(WUFC, 0);
5119dee1ad47SJeff Kirsher 	}
5120dee1ad47SJeff Kirsher 
5121dee1ad47SJeff Kirsher 	e1000_release_manageability(adapter);
5122dee1ad47SJeff Kirsher 
5123dee1ad47SJeff Kirsher 	*enable_wake = !!wufc;
5124dee1ad47SJeff Kirsher 
5125dee1ad47SJeff Kirsher 	/* make sure adapter isn't asleep if manageability is enabled */
5126dee1ad47SJeff Kirsher 	if (adapter->en_mng_pt)
5127dee1ad47SJeff Kirsher 		*enable_wake = true;
5128dee1ad47SJeff Kirsher 
5129dee1ad47SJeff Kirsher 	if (netif_running(netdev))
5130dee1ad47SJeff Kirsher 		e1000_free_irq(adapter);
5131dee1ad47SJeff Kirsher 
51320b76aae7STushar Dave 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5133dee1ad47SJeff Kirsher 		pci_disable_device(pdev);
5134dee1ad47SJeff Kirsher 
5135dee1ad47SJeff Kirsher 	return 0;
5136dee1ad47SJeff Kirsher }
5137dee1ad47SJeff Kirsher 
e1000_suspend(struct device * dev)5138eb6779d4SVaibhav Gupta static int __maybe_unused e1000_suspend(struct device *dev)
5139dee1ad47SJeff Kirsher {
5140dee1ad47SJeff Kirsher 	int retval;
5141eb6779d4SVaibhav Gupta 	struct pci_dev *pdev = to_pci_dev(dev);
5142dee1ad47SJeff Kirsher 	bool wake;
5143dee1ad47SJeff Kirsher 
5144dee1ad47SJeff Kirsher 	retval = __e1000_shutdown(pdev, &wake);
5145eb6779d4SVaibhav Gupta 	device_set_wakeup_enable(dev, wake);
5146eb6779d4SVaibhav Gupta 
5147dee1ad47SJeff Kirsher 	return retval;
5148dee1ad47SJeff Kirsher }
5149dee1ad47SJeff Kirsher 
e1000_resume(struct device * dev)5150eb6779d4SVaibhav Gupta static int __maybe_unused e1000_resume(struct device *dev)
5151dee1ad47SJeff Kirsher {
5152eb6779d4SVaibhav Gupta 	struct pci_dev *pdev = to_pci_dev(dev);
5153dee1ad47SJeff Kirsher 	struct net_device *netdev = pci_get_drvdata(pdev);
5154dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
5155dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
5156dee1ad47SJeff Kirsher 	u32 err;
5157dee1ad47SJeff Kirsher 
5158dee1ad47SJeff Kirsher 	if (adapter->need_ioport)
5159dee1ad47SJeff Kirsher 		err = pci_enable_device(pdev);
5160dee1ad47SJeff Kirsher 	else
5161dee1ad47SJeff Kirsher 		err = pci_enable_device_mem(pdev);
5162dee1ad47SJeff Kirsher 	if (err) {
5163dee1ad47SJeff Kirsher 		pr_err("Cannot enable PCI device from suspend\n");
5164dee1ad47SJeff Kirsher 		return err;
5165dee1ad47SJeff Kirsher 	}
51660b76aae7STushar Dave 
51670b76aae7STushar Dave 	/* flush memory to make sure state is correct */
51680b76aae7STushar Dave 	smp_mb__before_atomic();
51690b76aae7STushar Dave 	clear_bit(__E1000_DISABLED, &adapter->flags);
5170dee1ad47SJeff Kirsher 	pci_set_master(pdev);
5171dee1ad47SJeff Kirsher 
5172dee1ad47SJeff Kirsher 	pci_enable_wake(pdev, PCI_D3hot, 0);
5173dee1ad47SJeff Kirsher 	pci_enable_wake(pdev, PCI_D3cold, 0);
5174dee1ad47SJeff Kirsher 
5175dee1ad47SJeff Kirsher 	if (netif_running(netdev)) {
5176dee1ad47SJeff Kirsher 		err = e1000_request_irq(adapter);
5177dee1ad47SJeff Kirsher 		if (err)
5178dee1ad47SJeff Kirsher 			return err;
5179dee1ad47SJeff Kirsher 	}
5180dee1ad47SJeff Kirsher 
5181dee1ad47SJeff Kirsher 	e1000_power_up_phy(adapter);
5182dee1ad47SJeff Kirsher 	e1000_reset(adapter);
5183dee1ad47SJeff Kirsher 	ew32(WUS, ~0);
5184dee1ad47SJeff Kirsher 
5185dee1ad47SJeff Kirsher 	e1000_init_manageability(adapter);
5186dee1ad47SJeff Kirsher 
5187dee1ad47SJeff Kirsher 	if (netif_running(netdev))
5188dee1ad47SJeff Kirsher 		e1000_up(adapter);
5189dee1ad47SJeff Kirsher 
5190dee1ad47SJeff Kirsher 	netif_device_attach(netdev);
5191dee1ad47SJeff Kirsher 
5192dee1ad47SJeff Kirsher 	return 0;
5193dee1ad47SJeff Kirsher }
5194dee1ad47SJeff Kirsher 
e1000_shutdown(struct pci_dev * pdev)5195dee1ad47SJeff Kirsher static void e1000_shutdown(struct pci_dev *pdev)
5196dee1ad47SJeff Kirsher {
5197dee1ad47SJeff Kirsher 	bool wake;
5198dee1ad47SJeff Kirsher 
5199dee1ad47SJeff Kirsher 	__e1000_shutdown(pdev, &wake);
5200dee1ad47SJeff Kirsher 
5201dee1ad47SJeff Kirsher 	if (system_state == SYSTEM_POWER_OFF) {
5202dee1ad47SJeff Kirsher 		pci_wake_from_d3(pdev, wake);
5203dee1ad47SJeff Kirsher 		pci_set_power_state(pdev, PCI_D3hot);
5204dee1ad47SJeff Kirsher 	}
5205dee1ad47SJeff Kirsher }
5206dee1ad47SJeff Kirsher 
5207dee1ad47SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
52086cfbd97bSJeff Kirsher /* Polling 'interrupt' - used by things like netconsole to send skbs
5209dee1ad47SJeff Kirsher  * without having to re-enable interrupts. It's not called while
5210dee1ad47SJeff Kirsher  * the interrupt routine is executing.
5211dee1ad47SJeff Kirsher  */
e1000_netpoll(struct net_device * netdev)5212dee1ad47SJeff Kirsher static void e1000_netpoll(struct net_device *netdev)
5213dee1ad47SJeff Kirsher {
5214dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
5215dee1ad47SJeff Kirsher 
521631119129SWANG Cong 	if (disable_hardirq(adapter->pdev->irq))
5217dee1ad47SJeff Kirsher 		e1000_intr(adapter->pdev->irq, netdev);
5218dee1ad47SJeff Kirsher 	enable_irq(adapter->pdev->irq);
5219dee1ad47SJeff Kirsher }
5220dee1ad47SJeff Kirsher #endif
5221dee1ad47SJeff Kirsher 
5222dee1ad47SJeff Kirsher /**
5223dee1ad47SJeff Kirsher  * e1000_io_error_detected - called when PCI error is detected
5224dee1ad47SJeff Kirsher  * @pdev: Pointer to PCI device
5225dee1ad47SJeff Kirsher  * @state: The current pci connection state
5226dee1ad47SJeff Kirsher  *
5227dee1ad47SJeff Kirsher  * This function is called after a PCI bus error affecting
5228dee1ad47SJeff Kirsher  * this device has been detected.
5229dee1ad47SJeff Kirsher  */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5230dee1ad47SJeff Kirsher static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5231dee1ad47SJeff Kirsher 						pci_channel_state_t state)
5232dee1ad47SJeff Kirsher {
5233dee1ad47SJeff Kirsher 	struct net_device *netdev = pci_get_drvdata(pdev);
5234dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
5235dee1ad47SJeff Kirsher 
5236dee1ad47SJeff Kirsher 	netif_device_detach(netdev);
5237dee1ad47SJeff Kirsher 
5238dee1ad47SJeff Kirsher 	if (state == pci_channel_io_perm_failure)
5239dee1ad47SJeff Kirsher 		return PCI_ERS_RESULT_DISCONNECT;
5240dee1ad47SJeff Kirsher 
5241dee1ad47SJeff Kirsher 	if (netif_running(netdev))
5242dee1ad47SJeff Kirsher 		e1000_down(adapter);
52430b76aae7STushar Dave 
52440b76aae7STushar Dave 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5245dee1ad47SJeff Kirsher 		pci_disable_device(pdev);
5246dee1ad47SJeff Kirsher 
52474b63b27fSHao Chen 	/* Request a slot reset. */
5248dee1ad47SJeff Kirsher 	return PCI_ERS_RESULT_NEED_RESET;
5249dee1ad47SJeff Kirsher }
5250dee1ad47SJeff Kirsher 
5251dee1ad47SJeff Kirsher /**
5252dee1ad47SJeff Kirsher  * e1000_io_slot_reset - called after the pci bus has been reset.
5253dee1ad47SJeff Kirsher  * @pdev: Pointer to PCI device
5254dee1ad47SJeff Kirsher  *
5255dee1ad47SJeff Kirsher  * Restart the card from scratch, as if from a cold-boot. Implementation
5256dee1ad47SJeff Kirsher  * resembles the first-half of the e1000_resume routine.
5257dee1ad47SJeff Kirsher  */
e1000_io_slot_reset(struct pci_dev * pdev)5258dee1ad47SJeff Kirsher static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5259dee1ad47SJeff Kirsher {
5260dee1ad47SJeff Kirsher 	struct net_device *netdev = pci_get_drvdata(pdev);
5261dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
5262dee1ad47SJeff Kirsher 	struct e1000_hw *hw = &adapter->hw;
5263dee1ad47SJeff Kirsher 	int err;
5264dee1ad47SJeff Kirsher 
5265dee1ad47SJeff Kirsher 	if (adapter->need_ioport)
5266dee1ad47SJeff Kirsher 		err = pci_enable_device(pdev);
5267dee1ad47SJeff Kirsher 	else
5268dee1ad47SJeff Kirsher 		err = pci_enable_device_mem(pdev);
5269dee1ad47SJeff Kirsher 	if (err) {
5270dee1ad47SJeff Kirsher 		pr_err("Cannot re-enable PCI device after reset.\n");
5271dee1ad47SJeff Kirsher 		return PCI_ERS_RESULT_DISCONNECT;
5272dee1ad47SJeff Kirsher 	}
52730b76aae7STushar Dave 
52740b76aae7STushar Dave 	/* flush memory to make sure state is correct */
52750b76aae7STushar Dave 	smp_mb__before_atomic();
52760b76aae7STushar Dave 	clear_bit(__E1000_DISABLED, &adapter->flags);
5277dee1ad47SJeff Kirsher 	pci_set_master(pdev);
5278dee1ad47SJeff Kirsher 
5279dee1ad47SJeff Kirsher 	pci_enable_wake(pdev, PCI_D3hot, 0);
5280dee1ad47SJeff Kirsher 	pci_enable_wake(pdev, PCI_D3cold, 0);
5281dee1ad47SJeff Kirsher 
5282dee1ad47SJeff Kirsher 	e1000_reset(adapter);
5283dee1ad47SJeff Kirsher 	ew32(WUS, ~0);
5284dee1ad47SJeff Kirsher 
5285dee1ad47SJeff Kirsher 	return PCI_ERS_RESULT_RECOVERED;
5286dee1ad47SJeff Kirsher }
5287dee1ad47SJeff Kirsher 
5288dee1ad47SJeff Kirsher /**
5289dee1ad47SJeff Kirsher  * e1000_io_resume - called when traffic can start flowing again.
5290dee1ad47SJeff Kirsher  * @pdev: Pointer to PCI device
5291dee1ad47SJeff Kirsher  *
5292dee1ad47SJeff Kirsher  * This callback is called when the error recovery driver tells us that
5293dee1ad47SJeff Kirsher  * its OK to resume normal operation. Implementation resembles the
5294dee1ad47SJeff Kirsher  * second-half of the e1000_resume routine.
5295dee1ad47SJeff Kirsher  */
e1000_io_resume(struct pci_dev * pdev)5296dee1ad47SJeff Kirsher static void e1000_io_resume(struct pci_dev *pdev)
5297dee1ad47SJeff Kirsher {
5298dee1ad47SJeff Kirsher 	struct net_device *netdev = pci_get_drvdata(pdev);
5299dee1ad47SJeff Kirsher 	struct e1000_adapter *adapter = netdev_priv(netdev);
5300dee1ad47SJeff Kirsher 
5301dee1ad47SJeff Kirsher 	e1000_init_manageability(adapter);
5302dee1ad47SJeff Kirsher 
5303dee1ad47SJeff Kirsher 	if (netif_running(netdev)) {
5304dee1ad47SJeff Kirsher 		if (e1000_up(adapter)) {
5305dee1ad47SJeff Kirsher 			pr_info("can't bring device back up after reset\n");
5306dee1ad47SJeff Kirsher 			return;
5307dee1ad47SJeff Kirsher 		}
5308dee1ad47SJeff Kirsher 	}
5309dee1ad47SJeff Kirsher 
5310dee1ad47SJeff Kirsher 	netif_device_attach(netdev);
5311dee1ad47SJeff Kirsher }
5312dee1ad47SJeff Kirsher 
5313dee1ad47SJeff Kirsher /* e1000_main.c */
5314