1 /*******************************************************************************
2 
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35 
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41 
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 	/* required last entry */
88 	{0,}
89 };
90 
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92 
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                              struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                              struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110 
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void __devexit e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 				    struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 			       struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 			       struct e1000_rx_ring *rx_ring,
143 			       int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 				     struct e1000_rx_ring *rx_ring,
146 				     int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148 				   struct e1000_rx_ring *rx_ring,
149 				   int cleaned_count);
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 					 struct e1000_rx_ring *rx_ring,
152 					 int cleaned_count);
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 			   int cmd);
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162                                        struct sk_buff *skb);
163 
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166 			    netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 				     bool filter_on);
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
172 
173 #ifdef CONFIG_PM
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
176 #endif
177 static void e1000_shutdown(struct pci_dev *pdev);
178 
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
182 #endif
183 
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
187 MODULE_PARM_DESC(copybreak,
188 	"Maximum size of packet that is copied to a new buffer on receive");
189 
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191                      pci_channel_state_t state);
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
194 
195 static struct pci_error_handlers e1000_err_handler = {
196 	.error_detected = e1000_io_error_detected,
197 	.slot_reset = e1000_io_slot_reset,
198 	.resume = e1000_io_resume,
199 };
200 
201 static struct pci_driver e1000_driver = {
202 	.name     = e1000_driver_name,
203 	.id_table = e1000_pci_tbl,
204 	.probe    = e1000_probe,
205 	.remove   = __devexit_p(e1000_remove),
206 #ifdef CONFIG_PM
207 	/* Power Management Hooks */
208 	.suspend  = e1000_suspend,
209 	.resume   = e1000_resume,
210 #endif
211 	.shutdown = e1000_shutdown,
212 	.err_handler = &e1000_err_handler
213 };
214 
215 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_VERSION);
219 
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224 
225 /**
226  * e1000_get_hw_dev - return device
227  * used by hardware layer to print debugging information
228  *
229  **/
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231 {
232 	struct e1000_adapter *adapter = hw->back;
233 	return adapter->netdev;
234 }
235 
236 /**
237  * e1000_init_module - Driver Registration Routine
238  *
239  * e1000_init_module is the first routine called when the driver is
240  * loaded. All it does is register with the PCI subsystem.
241  **/
242 
243 static int __init e1000_init_module(void)
244 {
245 	int ret;
246 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
247 
248 	pr_info("%s\n", e1000_copyright);
249 
250 	ret = pci_register_driver(&e1000_driver);
251 	if (copybreak != COPYBREAK_DEFAULT) {
252 		if (copybreak == 0)
253 			pr_info("copybreak disabled\n");
254 		else
255 			pr_info("copybreak enabled for "
256 				   "packets <= %u bytes\n", copybreak);
257 	}
258 	return ret;
259 }
260 
261 module_init(e1000_init_module);
262 
263 /**
264  * e1000_exit_module - Driver Exit Cleanup Routine
265  *
266  * e1000_exit_module is called just before the driver is removed
267  * from memory.
268  **/
269 
270 static void __exit e1000_exit_module(void)
271 {
272 	pci_unregister_driver(&e1000_driver);
273 }
274 
275 module_exit(e1000_exit_module);
276 
277 static int e1000_request_irq(struct e1000_adapter *adapter)
278 {
279 	struct net_device *netdev = adapter->netdev;
280 	irq_handler_t handler = e1000_intr;
281 	int irq_flags = IRQF_SHARED;
282 	int err;
283 
284 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 	                  netdev);
286 	if (err) {
287 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
288 	}
289 
290 	return err;
291 }
292 
293 static void e1000_free_irq(struct e1000_adapter *adapter)
294 {
295 	struct net_device *netdev = adapter->netdev;
296 
297 	free_irq(adapter->pdev->irq, netdev);
298 }
299 
300 /**
301  * e1000_irq_disable - Mask off interrupt generation on the NIC
302  * @adapter: board private structure
303  **/
304 
305 static void e1000_irq_disable(struct e1000_adapter *adapter)
306 {
307 	struct e1000_hw *hw = &adapter->hw;
308 
309 	ew32(IMC, ~0);
310 	E1000_WRITE_FLUSH();
311 	synchronize_irq(adapter->pdev->irq);
312 }
313 
314 /**
315  * e1000_irq_enable - Enable default interrupt generation settings
316  * @adapter: board private structure
317  **/
318 
319 static void e1000_irq_enable(struct e1000_adapter *adapter)
320 {
321 	struct e1000_hw *hw = &adapter->hw;
322 
323 	ew32(IMS, IMS_ENABLE_MASK);
324 	E1000_WRITE_FLUSH();
325 }
326 
327 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
328 {
329 	struct e1000_hw *hw = &adapter->hw;
330 	struct net_device *netdev = adapter->netdev;
331 	u16 vid = hw->mng_cookie.vlan_id;
332 	u16 old_vid = adapter->mng_vlan_id;
333 
334 	if (!e1000_vlan_used(adapter))
335 		return;
336 
337 	if (!test_bit(vid, adapter->active_vlans)) {
338 		if (hw->mng_cookie.status &
339 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340 			e1000_vlan_rx_add_vid(netdev, vid);
341 			adapter->mng_vlan_id = vid;
342 		} else {
343 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344 		}
345 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346 		    (vid != old_vid) &&
347 		    !test_bit(old_vid, adapter->active_vlans))
348 			e1000_vlan_rx_kill_vid(netdev, old_vid);
349 	} else {
350 		adapter->mng_vlan_id = vid;
351 	}
352 }
353 
354 static void e1000_init_manageability(struct e1000_adapter *adapter)
355 {
356 	struct e1000_hw *hw = &adapter->hw;
357 
358 	if (adapter->en_mng_pt) {
359 		u32 manc = er32(MANC);
360 
361 		/* disable hardware interception of ARP */
362 		manc &= ~(E1000_MANC_ARP_EN);
363 
364 		ew32(MANC, manc);
365 	}
366 }
367 
368 static void e1000_release_manageability(struct e1000_adapter *adapter)
369 {
370 	struct e1000_hw *hw = &adapter->hw;
371 
372 	if (adapter->en_mng_pt) {
373 		u32 manc = er32(MANC);
374 
375 		/* re-enable hardware interception of ARP */
376 		manc |= E1000_MANC_ARP_EN;
377 
378 		ew32(MANC, manc);
379 	}
380 }
381 
382 /**
383  * e1000_configure - configure the hardware for RX and TX
384  * @adapter = private board structure
385  **/
386 static void e1000_configure(struct e1000_adapter *adapter)
387 {
388 	struct net_device *netdev = adapter->netdev;
389 	int i;
390 
391 	e1000_set_rx_mode(netdev);
392 
393 	e1000_restore_vlan(adapter);
394 	e1000_init_manageability(adapter);
395 
396 	e1000_configure_tx(adapter);
397 	e1000_setup_rctl(adapter);
398 	e1000_configure_rx(adapter);
399 	/* call E1000_DESC_UNUSED which always leaves
400 	 * at least 1 descriptor unused to make sure
401 	 * next_to_use != next_to_clean */
402 	for (i = 0; i < adapter->num_rx_queues; i++) {
403 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404 		adapter->alloc_rx_buf(adapter, ring,
405 		                      E1000_DESC_UNUSED(ring));
406 	}
407 }
408 
409 int e1000_up(struct e1000_adapter *adapter)
410 {
411 	struct e1000_hw *hw = &adapter->hw;
412 
413 	/* hardware has been reset, we need to reload some things */
414 	e1000_configure(adapter);
415 
416 	clear_bit(__E1000_DOWN, &adapter->flags);
417 
418 	napi_enable(&adapter->napi);
419 
420 	e1000_irq_enable(adapter);
421 
422 	netif_wake_queue(adapter->netdev);
423 
424 	/* fire a link change interrupt to start the watchdog */
425 	ew32(ICS, E1000_ICS_LSC);
426 	return 0;
427 }
428 
429 /**
430  * e1000_power_up_phy - restore link in case the phy was powered down
431  * @adapter: address of board private structure
432  *
433  * The phy may be powered down to save power and turn off link when the
434  * driver is unloaded and wake on lan is not enabled (among others)
435  * *** this routine MUST be followed by a call to e1000_reset ***
436  *
437  **/
438 
439 void e1000_power_up_phy(struct e1000_adapter *adapter)
440 {
441 	struct e1000_hw *hw = &adapter->hw;
442 	u16 mii_reg = 0;
443 
444 	/* Just clear the power down bit to wake the phy back up */
445 	if (hw->media_type == e1000_media_type_copper) {
446 		/* according to the manual, the phy will retain its
447 		 * settings across a power-down/up cycle */
448 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
449 		mii_reg &= ~MII_CR_POWER_DOWN;
450 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
451 	}
452 }
453 
454 static void e1000_power_down_phy(struct e1000_adapter *adapter)
455 {
456 	struct e1000_hw *hw = &adapter->hw;
457 
458 	/* Power down the PHY so no link is implied when interface is down *
459 	 * The PHY cannot be powered down if any of the following is true *
460 	 * (a) WoL is enabled
461 	 * (b) AMT is active
462 	 * (c) SoL/IDER session is active */
463 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 	   hw->media_type == e1000_media_type_copper) {
465 		u16 mii_reg = 0;
466 
467 		switch (hw->mac_type) {
468 		case e1000_82540:
469 		case e1000_82545:
470 		case e1000_82545_rev_3:
471 		case e1000_82546:
472 		case e1000_ce4100:
473 		case e1000_82546_rev_3:
474 		case e1000_82541:
475 		case e1000_82541_rev_2:
476 		case e1000_82547:
477 		case e1000_82547_rev_2:
478 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
479 				goto out;
480 			break;
481 		default:
482 			goto out;
483 		}
484 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485 		mii_reg |= MII_CR_POWER_DOWN;
486 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
487 		msleep(1);
488 	}
489 out:
490 	return;
491 }
492 
493 static void e1000_down_and_stop(struct e1000_adapter *adapter)
494 {
495 	set_bit(__E1000_DOWN, &adapter->flags);
496 	cancel_work_sync(&adapter->reset_task);
497 	cancel_delayed_work_sync(&adapter->watchdog_task);
498 	cancel_delayed_work_sync(&adapter->phy_info_task);
499 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
500 }
501 
502 void e1000_down(struct e1000_adapter *adapter)
503 {
504 	struct e1000_hw *hw = &adapter->hw;
505 	struct net_device *netdev = adapter->netdev;
506 	u32 rctl, tctl;
507 
508 
509 	/* disable receives in the hardware */
510 	rctl = er32(RCTL);
511 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
512 	/* flush and sleep below */
513 
514 	netif_tx_disable(netdev);
515 
516 	/* disable transmits in the hardware */
517 	tctl = er32(TCTL);
518 	tctl &= ~E1000_TCTL_EN;
519 	ew32(TCTL, tctl);
520 	/* flush both disables and wait for them to finish */
521 	E1000_WRITE_FLUSH();
522 	msleep(10);
523 
524 	napi_disable(&adapter->napi);
525 
526 	e1000_irq_disable(adapter);
527 
528 	/*
529 	 * Setting DOWN must be after irq_disable to prevent
530 	 * a screaming interrupt.  Setting DOWN also prevents
531 	 * tasks from rescheduling.
532 	 */
533 	e1000_down_and_stop(adapter);
534 
535 	adapter->link_speed = 0;
536 	adapter->link_duplex = 0;
537 	netif_carrier_off(netdev);
538 
539 	e1000_reset(adapter);
540 	e1000_clean_all_tx_rings(adapter);
541 	e1000_clean_all_rx_rings(adapter);
542 }
543 
544 static void e1000_reinit_safe(struct e1000_adapter *adapter)
545 {
546 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
547 		msleep(1);
548 	mutex_lock(&adapter->mutex);
549 	e1000_down(adapter);
550 	e1000_up(adapter);
551 	mutex_unlock(&adapter->mutex);
552 	clear_bit(__E1000_RESETTING, &adapter->flags);
553 }
554 
555 void e1000_reinit_locked(struct e1000_adapter *adapter)
556 {
557 	/* if rtnl_lock is not held the call path is bogus */
558 	ASSERT_RTNL();
559 	WARN_ON(in_interrupt());
560 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
561 		msleep(1);
562 	e1000_down(adapter);
563 	e1000_up(adapter);
564 	clear_bit(__E1000_RESETTING, &adapter->flags);
565 }
566 
567 void e1000_reset(struct e1000_adapter *adapter)
568 {
569 	struct e1000_hw *hw = &adapter->hw;
570 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
571 	bool legacy_pba_adjust = false;
572 	u16 hwm;
573 
574 	/* Repartition Pba for greater than 9k mtu
575 	 * To take effect CTRL.RST is required.
576 	 */
577 
578 	switch (hw->mac_type) {
579 	case e1000_82542_rev2_0:
580 	case e1000_82542_rev2_1:
581 	case e1000_82543:
582 	case e1000_82544:
583 	case e1000_82540:
584 	case e1000_82541:
585 	case e1000_82541_rev_2:
586 		legacy_pba_adjust = true;
587 		pba = E1000_PBA_48K;
588 		break;
589 	case e1000_82545:
590 	case e1000_82545_rev_3:
591 	case e1000_82546:
592 	case e1000_ce4100:
593 	case e1000_82546_rev_3:
594 		pba = E1000_PBA_48K;
595 		break;
596 	case e1000_82547:
597 	case e1000_82547_rev_2:
598 		legacy_pba_adjust = true;
599 		pba = E1000_PBA_30K;
600 		break;
601 	case e1000_undefined:
602 	case e1000_num_macs:
603 		break;
604 	}
605 
606 	if (legacy_pba_adjust) {
607 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
608 			pba -= 8; /* allocate more FIFO for Tx */
609 
610 		if (hw->mac_type == e1000_82547) {
611 			adapter->tx_fifo_head = 0;
612 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
613 			adapter->tx_fifo_size =
614 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
615 			atomic_set(&adapter->tx_fifo_stall, 0);
616 		}
617 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
618 		/* adjust PBA for jumbo frames */
619 		ew32(PBA, pba);
620 
621 		/* To maintain wire speed transmits, the Tx FIFO should be
622 		 * large enough to accommodate two full transmit packets,
623 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
624 		 * the Rx FIFO should be large enough to accommodate at least
625 		 * one full receive packet and is similarly rounded up and
626 		 * expressed in KB. */
627 		pba = er32(PBA);
628 		/* upper 16 bits has Tx packet buffer allocation size in KB */
629 		tx_space = pba >> 16;
630 		/* lower 16 bits has Rx packet buffer allocation size in KB */
631 		pba &= 0xffff;
632 		/*
633 		 * the tx fifo also stores 16 bytes of information about the tx
634 		 * but don't include ethernet FCS because hardware appends it
635 		 */
636 		min_tx_space = (hw->max_frame_size +
637 		                sizeof(struct e1000_tx_desc) -
638 		                ETH_FCS_LEN) * 2;
639 		min_tx_space = ALIGN(min_tx_space, 1024);
640 		min_tx_space >>= 10;
641 		/* software strips receive CRC, so leave room for it */
642 		min_rx_space = hw->max_frame_size;
643 		min_rx_space = ALIGN(min_rx_space, 1024);
644 		min_rx_space >>= 10;
645 
646 		/* If current Tx allocation is less than the min Tx FIFO size,
647 		 * and the min Tx FIFO size is less than the current Rx FIFO
648 		 * allocation, take space away from current Rx allocation */
649 		if (tx_space < min_tx_space &&
650 		    ((min_tx_space - tx_space) < pba)) {
651 			pba = pba - (min_tx_space - tx_space);
652 
653 			/* PCI/PCIx hardware has PBA alignment constraints */
654 			switch (hw->mac_type) {
655 			case e1000_82545 ... e1000_82546_rev_3:
656 				pba &= ~(E1000_PBA_8K - 1);
657 				break;
658 			default:
659 				break;
660 			}
661 
662 			/* if short on rx space, rx wins and must trump tx
663 			 * adjustment or use Early Receive if available */
664 			if (pba < min_rx_space)
665 				pba = min_rx_space;
666 		}
667 	}
668 
669 	ew32(PBA, pba);
670 
671 	/*
672 	 * flow control settings:
673 	 * The high water mark must be low enough to fit one full frame
674 	 * (or the size used for early receive) above it in the Rx FIFO.
675 	 * Set it to the lower of:
676 	 * - 90% of the Rx FIFO size, and
677 	 * - the full Rx FIFO size minus the early receive size (for parts
678 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
679 	 * - the full Rx FIFO size minus one full frame
680 	 */
681 	hwm = min(((pba << 10) * 9 / 10),
682 		  ((pba << 10) - hw->max_frame_size));
683 
684 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
685 	hw->fc_low_water = hw->fc_high_water - 8;
686 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
687 	hw->fc_send_xon = 1;
688 	hw->fc = hw->original_fc;
689 
690 	/* Allow time for pending master requests to run */
691 	e1000_reset_hw(hw);
692 	if (hw->mac_type >= e1000_82544)
693 		ew32(WUC, 0);
694 
695 	if (e1000_init_hw(hw))
696 		e_dev_err("Hardware Error\n");
697 	e1000_update_mng_vlan(adapter);
698 
699 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
700 	if (hw->mac_type >= e1000_82544 &&
701 	    hw->autoneg == 1 &&
702 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
703 		u32 ctrl = er32(CTRL);
704 		/* clear phy power management bit if we are in gig only mode,
705 		 * which if enabled will attempt negotiation to 100Mb, which
706 		 * can cause a loss of link at power off or driver unload */
707 		ctrl &= ~E1000_CTRL_SWDPIN3;
708 		ew32(CTRL, ctrl);
709 	}
710 
711 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
712 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
713 
714 	e1000_reset_adaptive(hw);
715 	e1000_phy_get_info(hw, &adapter->phy_info);
716 
717 	e1000_release_manageability(adapter);
718 }
719 
720 /**
721  *  Dump the eeprom for users having checksum issues
722  **/
723 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
724 {
725 	struct net_device *netdev = adapter->netdev;
726 	struct ethtool_eeprom eeprom;
727 	const struct ethtool_ops *ops = netdev->ethtool_ops;
728 	u8 *data;
729 	int i;
730 	u16 csum_old, csum_new = 0;
731 
732 	eeprom.len = ops->get_eeprom_len(netdev);
733 	eeprom.offset = 0;
734 
735 	data = kmalloc(eeprom.len, GFP_KERNEL);
736 	if (!data)
737 		return;
738 
739 	ops->get_eeprom(netdev, &eeprom, data);
740 
741 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
742 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
743 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
744 		csum_new += data[i] + (data[i + 1] << 8);
745 	csum_new = EEPROM_SUM - csum_new;
746 
747 	pr_err("/*********************/\n");
748 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
749 	pr_err("Calculated              : 0x%04x\n", csum_new);
750 
751 	pr_err("Offset    Values\n");
752 	pr_err("========  ======\n");
753 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
754 
755 	pr_err("Include this output when contacting your support provider.\n");
756 	pr_err("This is not a software error! Something bad happened to\n");
757 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
758 	pr_err("result in further problems, possibly loss of data,\n");
759 	pr_err("corruption or system hangs!\n");
760 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
761 	pr_err("which is invalid and requires you to set the proper MAC\n");
762 	pr_err("address manually before continuing to enable this network\n");
763 	pr_err("device. Please inspect the EEPROM dump and report the\n");
764 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
765 	pr_err("/*********************/\n");
766 
767 	kfree(data);
768 }
769 
770 /**
771  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
772  * @pdev: PCI device information struct
773  *
774  * Return true if an adapter needs ioport resources
775  **/
776 static int e1000_is_need_ioport(struct pci_dev *pdev)
777 {
778 	switch (pdev->device) {
779 	case E1000_DEV_ID_82540EM:
780 	case E1000_DEV_ID_82540EM_LOM:
781 	case E1000_DEV_ID_82540EP:
782 	case E1000_DEV_ID_82540EP_LOM:
783 	case E1000_DEV_ID_82540EP_LP:
784 	case E1000_DEV_ID_82541EI:
785 	case E1000_DEV_ID_82541EI_MOBILE:
786 	case E1000_DEV_ID_82541ER:
787 	case E1000_DEV_ID_82541ER_LOM:
788 	case E1000_DEV_ID_82541GI:
789 	case E1000_DEV_ID_82541GI_LF:
790 	case E1000_DEV_ID_82541GI_MOBILE:
791 	case E1000_DEV_ID_82544EI_COPPER:
792 	case E1000_DEV_ID_82544EI_FIBER:
793 	case E1000_DEV_ID_82544GC_COPPER:
794 	case E1000_DEV_ID_82544GC_LOM:
795 	case E1000_DEV_ID_82545EM_COPPER:
796 	case E1000_DEV_ID_82545EM_FIBER:
797 	case E1000_DEV_ID_82546EB_COPPER:
798 	case E1000_DEV_ID_82546EB_FIBER:
799 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
800 		return true;
801 	default:
802 		return false;
803 	}
804 }
805 
806 static netdev_features_t e1000_fix_features(struct net_device *netdev,
807 	netdev_features_t features)
808 {
809 	/*
810 	 * Since there is no support for separate rx/tx vlan accel
811 	 * enable/disable make sure tx flag is always in same state as rx.
812 	 */
813 	if (features & NETIF_F_HW_VLAN_RX)
814 		features |= NETIF_F_HW_VLAN_TX;
815 	else
816 		features &= ~NETIF_F_HW_VLAN_TX;
817 
818 	return features;
819 }
820 
821 static int e1000_set_features(struct net_device *netdev,
822 	netdev_features_t features)
823 {
824 	struct e1000_adapter *adapter = netdev_priv(netdev);
825 	netdev_features_t changed = features ^ netdev->features;
826 
827 	if (changed & NETIF_F_HW_VLAN_RX)
828 		e1000_vlan_mode(netdev, features);
829 
830 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
831 		return 0;
832 
833 	netdev->features = features;
834 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
835 
836 	if (netif_running(netdev))
837 		e1000_reinit_locked(adapter);
838 	else
839 		e1000_reset(adapter);
840 
841 	return 0;
842 }
843 
844 static const struct net_device_ops e1000_netdev_ops = {
845 	.ndo_open		= e1000_open,
846 	.ndo_stop		= e1000_close,
847 	.ndo_start_xmit		= e1000_xmit_frame,
848 	.ndo_get_stats		= e1000_get_stats,
849 	.ndo_set_rx_mode	= e1000_set_rx_mode,
850 	.ndo_set_mac_address	= e1000_set_mac,
851 	.ndo_tx_timeout		= e1000_tx_timeout,
852 	.ndo_change_mtu		= e1000_change_mtu,
853 	.ndo_do_ioctl		= e1000_ioctl,
854 	.ndo_validate_addr	= eth_validate_addr,
855 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
856 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
857 #ifdef CONFIG_NET_POLL_CONTROLLER
858 	.ndo_poll_controller	= e1000_netpoll,
859 #endif
860 	.ndo_fix_features	= e1000_fix_features,
861 	.ndo_set_features	= e1000_set_features,
862 };
863 
864 /**
865  * e1000_init_hw_struct - initialize members of hw struct
866  * @adapter: board private struct
867  * @hw: structure used by e1000_hw.c
868  *
869  * Factors out initialization of the e1000_hw struct to its own function
870  * that can be called very early at init (just after struct allocation).
871  * Fields are initialized based on PCI device information and
872  * OS network device settings (MTU size).
873  * Returns negative error codes if MAC type setup fails.
874  */
875 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
876 				struct e1000_hw *hw)
877 {
878 	struct pci_dev *pdev = adapter->pdev;
879 
880 	/* PCI config space info */
881 	hw->vendor_id = pdev->vendor;
882 	hw->device_id = pdev->device;
883 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
884 	hw->subsystem_id = pdev->subsystem_device;
885 	hw->revision_id = pdev->revision;
886 
887 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
888 
889 	hw->max_frame_size = adapter->netdev->mtu +
890 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
891 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
892 
893 	/* identify the MAC */
894 	if (e1000_set_mac_type(hw)) {
895 		e_err(probe, "Unknown MAC Type\n");
896 		return -EIO;
897 	}
898 
899 	switch (hw->mac_type) {
900 	default:
901 		break;
902 	case e1000_82541:
903 	case e1000_82547:
904 	case e1000_82541_rev_2:
905 	case e1000_82547_rev_2:
906 		hw->phy_init_script = 1;
907 		break;
908 	}
909 
910 	e1000_set_media_type(hw);
911 	e1000_get_bus_info(hw);
912 
913 	hw->wait_autoneg_complete = false;
914 	hw->tbi_compatibility_en = true;
915 	hw->adaptive_ifs = true;
916 
917 	/* Copper options */
918 
919 	if (hw->media_type == e1000_media_type_copper) {
920 		hw->mdix = AUTO_ALL_MODES;
921 		hw->disable_polarity_correction = false;
922 		hw->master_slave = E1000_MASTER_SLAVE;
923 	}
924 
925 	return 0;
926 }
927 
928 /**
929  * e1000_probe - Device Initialization Routine
930  * @pdev: PCI device information struct
931  * @ent: entry in e1000_pci_tbl
932  *
933  * Returns 0 on success, negative on failure
934  *
935  * e1000_probe initializes an adapter identified by a pci_dev structure.
936  * The OS initialization, configuring of the adapter private structure,
937  * and a hardware reset occur.
938  **/
939 static int __devinit e1000_probe(struct pci_dev *pdev,
940 				 const struct pci_device_id *ent)
941 {
942 	struct net_device *netdev;
943 	struct e1000_adapter *adapter;
944 	struct e1000_hw *hw;
945 
946 	static int cards_found = 0;
947 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
948 	int i, err, pci_using_dac;
949 	u16 eeprom_data = 0;
950 	u16 tmp = 0;
951 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
952 	int bars, need_ioport;
953 
954 	/* do not allocate ioport bars when not needed */
955 	need_ioport = e1000_is_need_ioport(pdev);
956 	if (need_ioport) {
957 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
958 		err = pci_enable_device(pdev);
959 	} else {
960 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
961 		err = pci_enable_device_mem(pdev);
962 	}
963 	if (err)
964 		return err;
965 
966 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
967 	if (err)
968 		goto err_pci_reg;
969 
970 	pci_set_master(pdev);
971 	err = pci_save_state(pdev);
972 	if (err)
973 		goto err_alloc_etherdev;
974 
975 	err = -ENOMEM;
976 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
977 	if (!netdev)
978 		goto err_alloc_etherdev;
979 
980 	SET_NETDEV_DEV(netdev, &pdev->dev);
981 
982 	pci_set_drvdata(pdev, netdev);
983 	adapter = netdev_priv(netdev);
984 	adapter->netdev = netdev;
985 	adapter->pdev = pdev;
986 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
987 	adapter->bars = bars;
988 	adapter->need_ioport = need_ioport;
989 
990 	hw = &adapter->hw;
991 	hw->back = adapter;
992 
993 	err = -EIO;
994 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
995 	if (!hw->hw_addr)
996 		goto err_ioremap;
997 
998 	if (adapter->need_ioport) {
999 		for (i = BAR_1; i <= BAR_5; i++) {
1000 			if (pci_resource_len(pdev, i) == 0)
1001 				continue;
1002 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1003 				hw->io_base = pci_resource_start(pdev, i);
1004 				break;
1005 			}
1006 		}
1007 	}
1008 
1009 	/* make ready for any if (hw->...) below */
1010 	err = e1000_init_hw_struct(adapter, hw);
1011 	if (err)
1012 		goto err_sw_init;
1013 
1014 	/*
1015 	 * there is a workaround being applied below that limits
1016 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
1017 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1018 	 */
1019 	pci_using_dac = 0;
1020 	if ((hw->bus_type == e1000_bus_type_pcix) &&
1021 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1022 		/*
1023 		 * according to DMA-API-HOWTO, coherent calls will always
1024 		 * succeed if the set call did
1025 		 */
1026 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1027 		pci_using_dac = 1;
1028 	} else {
1029 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1030 		if (err) {
1031 			pr_err("No usable DMA config, aborting\n");
1032 			goto err_dma;
1033 		}
1034 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1035 	}
1036 
1037 	netdev->netdev_ops = &e1000_netdev_ops;
1038 	e1000_set_ethtool_ops(netdev);
1039 	netdev->watchdog_timeo = 5 * HZ;
1040 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1041 
1042 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1043 
1044 	adapter->bd_number = cards_found;
1045 
1046 	/* setup the private structure */
1047 
1048 	err = e1000_sw_init(adapter);
1049 	if (err)
1050 		goto err_sw_init;
1051 
1052 	err = -EIO;
1053 	if (hw->mac_type == e1000_ce4100) {
1054 		hw->ce4100_gbe_mdio_base_virt =
1055 					ioremap(pci_resource_start(pdev, BAR_1),
1056 		                                pci_resource_len(pdev, BAR_1));
1057 
1058 		if (!hw->ce4100_gbe_mdio_base_virt)
1059 			goto err_mdio_ioremap;
1060 	}
1061 
1062 	if (hw->mac_type >= e1000_82543) {
1063 		netdev->hw_features = NETIF_F_SG |
1064 				   NETIF_F_HW_CSUM |
1065 				   NETIF_F_HW_VLAN_RX;
1066 		netdev->features = NETIF_F_HW_VLAN_TX |
1067 				   NETIF_F_HW_VLAN_FILTER;
1068 	}
1069 
1070 	if ((hw->mac_type >= e1000_82544) &&
1071 	   (hw->mac_type != e1000_82547))
1072 		netdev->hw_features |= NETIF_F_TSO;
1073 
1074 	netdev->priv_flags |= IFF_SUPP_NOFCS;
1075 
1076 	netdev->features |= netdev->hw_features;
1077 	netdev->hw_features |= NETIF_F_RXCSUM;
1078 	netdev->hw_features |= NETIF_F_RXALL;
1079 	netdev->hw_features |= NETIF_F_RXFCS;
1080 
1081 	if (pci_using_dac) {
1082 		netdev->features |= NETIF_F_HIGHDMA;
1083 		netdev->vlan_features |= NETIF_F_HIGHDMA;
1084 	}
1085 
1086 	netdev->vlan_features |= NETIF_F_TSO;
1087 	netdev->vlan_features |= NETIF_F_HW_CSUM;
1088 	netdev->vlan_features |= NETIF_F_SG;
1089 
1090 	netdev->priv_flags |= IFF_UNICAST_FLT;
1091 
1092 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1093 
1094 	/* initialize eeprom parameters */
1095 	if (e1000_init_eeprom_params(hw)) {
1096 		e_err(probe, "EEPROM initialization failed\n");
1097 		goto err_eeprom;
1098 	}
1099 
1100 	/* before reading the EEPROM, reset the controller to
1101 	 * put the device in a known good starting state */
1102 
1103 	e1000_reset_hw(hw);
1104 
1105 	/* make sure the EEPROM is good */
1106 	if (e1000_validate_eeprom_checksum(hw) < 0) {
1107 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1108 		e1000_dump_eeprom(adapter);
1109 		/*
1110 		 * set MAC address to all zeroes to invalidate and temporary
1111 		 * disable this device for the user. This blocks regular
1112 		 * traffic while still permitting ethtool ioctls from reaching
1113 		 * the hardware as well as allowing the user to run the
1114 		 * interface after manually setting a hw addr using
1115 		 * `ip set address`
1116 		 */
1117 		memset(hw->mac_addr, 0, netdev->addr_len);
1118 	} else {
1119 		/* copy the MAC address out of the EEPROM */
1120 		if (e1000_read_mac_addr(hw))
1121 			e_err(probe, "EEPROM Read Error\n");
1122 	}
1123 	/* don't block initalization here due to bad MAC address */
1124 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1125 	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1126 
1127 	if (!is_valid_ether_addr(netdev->perm_addr))
1128 		e_err(probe, "Invalid MAC Address\n");
1129 
1130 
1131 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1132 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1133 			  e1000_82547_tx_fifo_stall_task);
1134 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1135 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1136 
1137 	e1000_check_options(adapter);
1138 
1139 	/* Initial Wake on LAN setting
1140 	 * If APM wake is enabled in the EEPROM,
1141 	 * enable the ACPI Magic Packet filter
1142 	 */
1143 
1144 	switch (hw->mac_type) {
1145 	case e1000_82542_rev2_0:
1146 	case e1000_82542_rev2_1:
1147 	case e1000_82543:
1148 		break;
1149 	case e1000_82544:
1150 		e1000_read_eeprom(hw,
1151 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1152 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1153 		break;
1154 	case e1000_82546:
1155 	case e1000_82546_rev_3:
1156 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
1157 			e1000_read_eeprom(hw,
1158 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1159 			break;
1160 		}
1161 		/* Fall Through */
1162 	default:
1163 		e1000_read_eeprom(hw,
1164 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1165 		break;
1166 	}
1167 	if (eeprom_data & eeprom_apme_mask)
1168 		adapter->eeprom_wol |= E1000_WUFC_MAG;
1169 
1170 	/* now that we have the eeprom settings, apply the special cases
1171 	 * where the eeprom may be wrong or the board simply won't support
1172 	 * wake on lan on a particular port */
1173 	switch (pdev->device) {
1174 	case E1000_DEV_ID_82546GB_PCIE:
1175 		adapter->eeprom_wol = 0;
1176 		break;
1177 	case E1000_DEV_ID_82546EB_FIBER:
1178 	case E1000_DEV_ID_82546GB_FIBER:
1179 		/* Wake events only supported on port A for dual fiber
1180 		 * regardless of eeprom setting */
1181 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1182 			adapter->eeprom_wol = 0;
1183 		break;
1184 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1185 		/* if quad port adapter, disable WoL on all but port A */
1186 		if (global_quad_port_a != 0)
1187 			adapter->eeprom_wol = 0;
1188 		else
1189 			adapter->quad_port_a = true;
1190 		/* Reset for multiple quad port adapters */
1191 		if (++global_quad_port_a == 4)
1192 			global_quad_port_a = 0;
1193 		break;
1194 	}
1195 
1196 	/* initialize the wol settings based on the eeprom settings */
1197 	adapter->wol = adapter->eeprom_wol;
1198 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1199 
1200 	/* Auto detect PHY address */
1201 	if (hw->mac_type == e1000_ce4100) {
1202 		for (i = 0; i < 32; i++) {
1203 			hw->phy_addr = i;
1204 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1205 			if (tmp == 0 || tmp == 0xFF) {
1206 				if (i == 31)
1207 					goto err_eeprom;
1208 				continue;
1209 			} else
1210 				break;
1211 		}
1212 	}
1213 
1214 	/* reset the hardware with the new settings */
1215 	e1000_reset(adapter);
1216 
1217 	strcpy(netdev->name, "eth%d");
1218 	err = register_netdev(netdev);
1219 	if (err)
1220 		goto err_register;
1221 
1222 	e1000_vlan_filter_on_off(adapter, false);
1223 
1224 	/* print bus type/speed/width info */
1225 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1226 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1227 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1228 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1229 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1230 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1231 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1232 	       netdev->dev_addr);
1233 
1234 	/* carrier off reporting is important to ethtool even BEFORE open */
1235 	netif_carrier_off(netdev);
1236 
1237 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1238 
1239 	cards_found++;
1240 	return 0;
1241 
1242 err_register:
1243 err_eeprom:
1244 	e1000_phy_hw_reset(hw);
1245 
1246 	if (hw->flash_address)
1247 		iounmap(hw->flash_address);
1248 	kfree(adapter->tx_ring);
1249 	kfree(adapter->rx_ring);
1250 err_dma:
1251 err_sw_init:
1252 err_mdio_ioremap:
1253 	iounmap(hw->ce4100_gbe_mdio_base_virt);
1254 	iounmap(hw->hw_addr);
1255 err_ioremap:
1256 	free_netdev(netdev);
1257 err_alloc_etherdev:
1258 	pci_release_selected_regions(pdev, bars);
1259 err_pci_reg:
1260 	pci_disable_device(pdev);
1261 	return err;
1262 }
1263 
1264 /**
1265  * e1000_remove - Device Removal Routine
1266  * @pdev: PCI device information struct
1267  *
1268  * e1000_remove is called by the PCI subsystem to alert the driver
1269  * that it should release a PCI device.  The could be caused by a
1270  * Hot-Plug event, or because the driver is going to be removed from
1271  * memory.
1272  **/
1273 
1274 static void __devexit e1000_remove(struct pci_dev *pdev)
1275 {
1276 	struct net_device *netdev = pci_get_drvdata(pdev);
1277 	struct e1000_adapter *adapter = netdev_priv(netdev);
1278 	struct e1000_hw *hw = &adapter->hw;
1279 
1280 	e1000_down_and_stop(adapter);
1281 	e1000_release_manageability(adapter);
1282 
1283 	unregister_netdev(netdev);
1284 
1285 	e1000_phy_hw_reset(hw);
1286 
1287 	kfree(adapter->tx_ring);
1288 	kfree(adapter->rx_ring);
1289 
1290 	if (hw->mac_type == e1000_ce4100)
1291 		iounmap(hw->ce4100_gbe_mdio_base_virt);
1292 	iounmap(hw->hw_addr);
1293 	if (hw->flash_address)
1294 		iounmap(hw->flash_address);
1295 	pci_release_selected_regions(pdev, adapter->bars);
1296 
1297 	free_netdev(netdev);
1298 
1299 	pci_disable_device(pdev);
1300 }
1301 
1302 /**
1303  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1304  * @adapter: board private structure to initialize
1305  *
1306  * e1000_sw_init initializes the Adapter private data structure.
1307  * e1000_init_hw_struct MUST be called before this function
1308  **/
1309 
1310 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1311 {
1312 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1313 
1314 	adapter->num_tx_queues = 1;
1315 	adapter->num_rx_queues = 1;
1316 
1317 	if (e1000_alloc_queues(adapter)) {
1318 		e_err(probe, "Unable to allocate memory for queues\n");
1319 		return -ENOMEM;
1320 	}
1321 
1322 	/* Explicitly disable IRQ since the NIC can be in any state. */
1323 	e1000_irq_disable(adapter);
1324 
1325 	spin_lock_init(&adapter->stats_lock);
1326 	mutex_init(&adapter->mutex);
1327 
1328 	set_bit(__E1000_DOWN, &adapter->flags);
1329 
1330 	return 0;
1331 }
1332 
1333 /**
1334  * e1000_alloc_queues - Allocate memory for all rings
1335  * @adapter: board private structure to initialize
1336  *
1337  * We allocate one ring per queue at run-time since we don't know the
1338  * number of queues at compile-time.
1339  **/
1340 
1341 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1342 {
1343 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1344 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
1345 	if (!adapter->tx_ring)
1346 		return -ENOMEM;
1347 
1348 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1349 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
1350 	if (!adapter->rx_ring) {
1351 		kfree(adapter->tx_ring);
1352 		return -ENOMEM;
1353 	}
1354 
1355 	return E1000_SUCCESS;
1356 }
1357 
1358 /**
1359  * e1000_open - Called when a network interface is made active
1360  * @netdev: network interface device structure
1361  *
1362  * Returns 0 on success, negative value on failure
1363  *
1364  * The open entry point is called when a network interface is made
1365  * active by the system (IFF_UP).  At this point all resources needed
1366  * for transmit and receive operations are allocated, the interrupt
1367  * handler is registered with the OS, the watchdog task is started,
1368  * and the stack is notified that the interface is ready.
1369  **/
1370 
1371 static int e1000_open(struct net_device *netdev)
1372 {
1373 	struct e1000_adapter *adapter = netdev_priv(netdev);
1374 	struct e1000_hw *hw = &adapter->hw;
1375 	int err;
1376 
1377 	/* disallow open during test */
1378 	if (test_bit(__E1000_TESTING, &adapter->flags))
1379 		return -EBUSY;
1380 
1381 	netif_carrier_off(netdev);
1382 
1383 	/* allocate transmit descriptors */
1384 	err = e1000_setup_all_tx_resources(adapter);
1385 	if (err)
1386 		goto err_setup_tx;
1387 
1388 	/* allocate receive descriptors */
1389 	err = e1000_setup_all_rx_resources(adapter);
1390 	if (err)
1391 		goto err_setup_rx;
1392 
1393 	e1000_power_up_phy(adapter);
1394 
1395 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1396 	if ((hw->mng_cookie.status &
1397 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1398 		e1000_update_mng_vlan(adapter);
1399 	}
1400 
1401 	/* before we allocate an interrupt, we must be ready to handle it.
1402 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1403 	 * as soon as we call pci_request_irq, so we have to setup our
1404 	 * clean_rx handler before we do so.  */
1405 	e1000_configure(adapter);
1406 
1407 	err = e1000_request_irq(adapter);
1408 	if (err)
1409 		goto err_req_irq;
1410 
1411 	/* From here on the code is the same as e1000_up() */
1412 	clear_bit(__E1000_DOWN, &adapter->flags);
1413 
1414 	napi_enable(&adapter->napi);
1415 
1416 	e1000_irq_enable(adapter);
1417 
1418 	netif_start_queue(netdev);
1419 
1420 	/* fire a link status change interrupt to start the watchdog */
1421 	ew32(ICS, E1000_ICS_LSC);
1422 
1423 	return E1000_SUCCESS;
1424 
1425 err_req_irq:
1426 	e1000_power_down_phy(adapter);
1427 	e1000_free_all_rx_resources(adapter);
1428 err_setup_rx:
1429 	e1000_free_all_tx_resources(adapter);
1430 err_setup_tx:
1431 	e1000_reset(adapter);
1432 
1433 	return err;
1434 }
1435 
1436 /**
1437  * e1000_close - Disables a network interface
1438  * @netdev: network interface device structure
1439  *
1440  * Returns 0, this is not allowed to fail
1441  *
1442  * The close entry point is called when an interface is de-activated
1443  * by the OS.  The hardware is still under the drivers control, but
1444  * needs to be disabled.  A global MAC reset is issued to stop the
1445  * hardware, and all transmit and receive resources are freed.
1446  **/
1447 
1448 static int e1000_close(struct net_device *netdev)
1449 {
1450 	struct e1000_adapter *adapter = netdev_priv(netdev);
1451 	struct e1000_hw *hw = &adapter->hw;
1452 
1453 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1454 	e1000_down(adapter);
1455 	e1000_power_down_phy(adapter);
1456 	e1000_free_irq(adapter);
1457 
1458 	e1000_free_all_tx_resources(adapter);
1459 	e1000_free_all_rx_resources(adapter);
1460 
1461 	/* kill manageability vlan ID if supported, but not if a vlan with
1462 	 * the same ID is registered on the host OS (let 8021q kill it) */
1463 	if ((hw->mng_cookie.status &
1464 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1465 	     !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1466 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 /**
1473  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1474  * @adapter: address of board private structure
1475  * @start: address of beginning of memory
1476  * @len: length of memory
1477  **/
1478 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1479 				  unsigned long len)
1480 {
1481 	struct e1000_hw *hw = &adapter->hw;
1482 	unsigned long begin = (unsigned long)start;
1483 	unsigned long end = begin + len;
1484 
1485 	/* First rev 82545 and 82546 need to not allow any memory
1486 	 * write location to cross 64k boundary due to errata 23 */
1487 	if (hw->mac_type == e1000_82545 ||
1488 	    hw->mac_type == e1000_ce4100 ||
1489 	    hw->mac_type == e1000_82546) {
1490 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1491 	}
1492 
1493 	return true;
1494 }
1495 
1496 /**
1497  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1498  * @adapter: board private structure
1499  * @txdr:    tx descriptor ring (for a specific queue) to setup
1500  *
1501  * Return 0 on success, negative on failure
1502  **/
1503 
1504 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1505 				    struct e1000_tx_ring *txdr)
1506 {
1507 	struct pci_dev *pdev = adapter->pdev;
1508 	int size;
1509 
1510 	size = sizeof(struct e1000_buffer) * txdr->count;
1511 	txdr->buffer_info = vzalloc(size);
1512 	if (!txdr->buffer_info) {
1513 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
1514 		      "ring\n");
1515 		return -ENOMEM;
1516 	}
1517 
1518 	/* round up to nearest 4K */
1519 
1520 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1521 	txdr->size = ALIGN(txdr->size, 4096);
1522 
1523 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1524 					GFP_KERNEL);
1525 	if (!txdr->desc) {
1526 setup_tx_desc_die:
1527 		vfree(txdr->buffer_info);
1528 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
1529 		      "ring\n");
1530 		return -ENOMEM;
1531 	}
1532 
1533 	/* Fix for errata 23, can't cross 64kB boundary */
1534 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1535 		void *olddesc = txdr->desc;
1536 		dma_addr_t olddma = txdr->dma;
1537 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1538 		      txdr->size, txdr->desc);
1539 		/* Try again, without freeing the previous */
1540 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1541 						&txdr->dma, GFP_KERNEL);
1542 		/* Failed allocation, critical failure */
1543 		if (!txdr->desc) {
1544 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1545 					  olddma);
1546 			goto setup_tx_desc_die;
1547 		}
1548 
1549 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1550 			/* give up */
1551 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1552 					  txdr->dma);
1553 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1554 					  olddma);
1555 			e_err(probe, "Unable to allocate aligned memory "
1556 			      "for the transmit descriptor ring\n");
1557 			vfree(txdr->buffer_info);
1558 			return -ENOMEM;
1559 		} else {
1560 			/* Free old allocation, new allocation was successful */
1561 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1562 					  olddma);
1563 		}
1564 	}
1565 	memset(txdr->desc, 0, txdr->size);
1566 
1567 	txdr->next_to_use = 0;
1568 	txdr->next_to_clean = 0;
1569 
1570 	return 0;
1571 }
1572 
1573 /**
1574  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1575  * 				  (Descriptors) for all queues
1576  * @adapter: board private structure
1577  *
1578  * Return 0 on success, negative on failure
1579  **/
1580 
1581 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1582 {
1583 	int i, err = 0;
1584 
1585 	for (i = 0; i < adapter->num_tx_queues; i++) {
1586 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1587 		if (err) {
1588 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1589 			for (i-- ; i >= 0; i--)
1590 				e1000_free_tx_resources(adapter,
1591 							&adapter->tx_ring[i]);
1592 			break;
1593 		}
1594 	}
1595 
1596 	return err;
1597 }
1598 
1599 /**
1600  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1601  * @adapter: board private structure
1602  *
1603  * Configure the Tx unit of the MAC after a reset.
1604  **/
1605 
1606 static void e1000_configure_tx(struct e1000_adapter *adapter)
1607 {
1608 	u64 tdba;
1609 	struct e1000_hw *hw = &adapter->hw;
1610 	u32 tdlen, tctl, tipg;
1611 	u32 ipgr1, ipgr2;
1612 
1613 	/* Setup the HW Tx Head and Tail descriptor pointers */
1614 
1615 	switch (adapter->num_tx_queues) {
1616 	case 1:
1617 	default:
1618 		tdba = adapter->tx_ring[0].dma;
1619 		tdlen = adapter->tx_ring[0].count *
1620 			sizeof(struct e1000_tx_desc);
1621 		ew32(TDLEN, tdlen);
1622 		ew32(TDBAH, (tdba >> 32));
1623 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1624 		ew32(TDT, 0);
1625 		ew32(TDH, 0);
1626 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1627 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1628 		break;
1629 	}
1630 
1631 	/* Set the default values for the Tx Inter Packet Gap timer */
1632 	if ((hw->media_type == e1000_media_type_fiber ||
1633 	     hw->media_type == e1000_media_type_internal_serdes))
1634 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1635 	else
1636 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1637 
1638 	switch (hw->mac_type) {
1639 	case e1000_82542_rev2_0:
1640 	case e1000_82542_rev2_1:
1641 		tipg = DEFAULT_82542_TIPG_IPGT;
1642 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1643 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1644 		break;
1645 	default:
1646 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1647 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1648 		break;
1649 	}
1650 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1651 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1652 	ew32(TIPG, tipg);
1653 
1654 	/* Set the Tx Interrupt Delay register */
1655 
1656 	ew32(TIDV, adapter->tx_int_delay);
1657 	if (hw->mac_type >= e1000_82540)
1658 		ew32(TADV, adapter->tx_abs_int_delay);
1659 
1660 	/* Program the Transmit Control Register */
1661 
1662 	tctl = er32(TCTL);
1663 	tctl &= ~E1000_TCTL_CT;
1664 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1665 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1666 
1667 	e1000_config_collision_dist(hw);
1668 
1669 	/* Setup Transmit Descriptor Settings for eop descriptor */
1670 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1671 
1672 	/* only set IDE if we are delaying interrupts using the timers */
1673 	if (adapter->tx_int_delay)
1674 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1675 
1676 	if (hw->mac_type < e1000_82543)
1677 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1678 	else
1679 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1680 
1681 	/* Cache if we're 82544 running in PCI-X because we'll
1682 	 * need this to apply a workaround later in the send path. */
1683 	if (hw->mac_type == e1000_82544 &&
1684 	    hw->bus_type == e1000_bus_type_pcix)
1685 		adapter->pcix_82544 = true;
1686 
1687 	ew32(TCTL, tctl);
1688 
1689 }
1690 
1691 /**
1692  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1693  * @adapter: board private structure
1694  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1695  *
1696  * Returns 0 on success, negative on failure
1697  **/
1698 
1699 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1700 				    struct e1000_rx_ring *rxdr)
1701 {
1702 	struct pci_dev *pdev = adapter->pdev;
1703 	int size, desc_len;
1704 
1705 	size = sizeof(struct e1000_buffer) * rxdr->count;
1706 	rxdr->buffer_info = vzalloc(size);
1707 	if (!rxdr->buffer_info) {
1708 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
1709 		      "ring\n");
1710 		return -ENOMEM;
1711 	}
1712 
1713 	desc_len = sizeof(struct e1000_rx_desc);
1714 
1715 	/* Round up to nearest 4K */
1716 
1717 	rxdr->size = rxdr->count * desc_len;
1718 	rxdr->size = ALIGN(rxdr->size, 4096);
1719 
1720 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1721 					GFP_KERNEL);
1722 
1723 	if (!rxdr->desc) {
1724 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
1725 		      "ring\n");
1726 setup_rx_desc_die:
1727 		vfree(rxdr->buffer_info);
1728 		return -ENOMEM;
1729 	}
1730 
1731 	/* Fix for errata 23, can't cross 64kB boundary */
1732 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1733 		void *olddesc = rxdr->desc;
1734 		dma_addr_t olddma = rxdr->dma;
1735 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1736 		      rxdr->size, rxdr->desc);
1737 		/* Try again, without freeing the previous */
1738 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1739 						&rxdr->dma, GFP_KERNEL);
1740 		/* Failed allocation, critical failure */
1741 		if (!rxdr->desc) {
1742 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1743 					  olddma);
1744 			e_err(probe, "Unable to allocate memory for the Rx "
1745 			      "descriptor ring\n");
1746 			goto setup_rx_desc_die;
1747 		}
1748 
1749 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1750 			/* give up */
1751 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1752 					  rxdr->dma);
1753 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1754 					  olddma);
1755 			e_err(probe, "Unable to allocate aligned memory for "
1756 			      "the Rx descriptor ring\n");
1757 			goto setup_rx_desc_die;
1758 		} else {
1759 			/* Free old allocation, new allocation was successful */
1760 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1761 					  olddma);
1762 		}
1763 	}
1764 	memset(rxdr->desc, 0, rxdr->size);
1765 
1766 	rxdr->next_to_clean = 0;
1767 	rxdr->next_to_use = 0;
1768 	rxdr->rx_skb_top = NULL;
1769 
1770 	return 0;
1771 }
1772 
1773 /**
1774  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1775  * 				  (Descriptors) for all queues
1776  * @adapter: board private structure
1777  *
1778  * Return 0 on success, negative on failure
1779  **/
1780 
1781 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1782 {
1783 	int i, err = 0;
1784 
1785 	for (i = 0; i < adapter->num_rx_queues; i++) {
1786 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1787 		if (err) {
1788 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1789 			for (i-- ; i >= 0; i--)
1790 				e1000_free_rx_resources(adapter,
1791 							&adapter->rx_ring[i]);
1792 			break;
1793 		}
1794 	}
1795 
1796 	return err;
1797 }
1798 
1799 /**
1800  * e1000_setup_rctl - configure the receive control registers
1801  * @adapter: Board private structure
1802  **/
1803 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1804 {
1805 	struct e1000_hw *hw = &adapter->hw;
1806 	u32 rctl;
1807 
1808 	rctl = er32(RCTL);
1809 
1810 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1811 
1812 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1813 		E1000_RCTL_RDMTS_HALF |
1814 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1815 
1816 	if (hw->tbi_compatibility_on == 1)
1817 		rctl |= E1000_RCTL_SBP;
1818 	else
1819 		rctl &= ~E1000_RCTL_SBP;
1820 
1821 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1822 		rctl &= ~E1000_RCTL_LPE;
1823 	else
1824 		rctl |= E1000_RCTL_LPE;
1825 
1826 	/* Setup buffer sizes */
1827 	rctl &= ~E1000_RCTL_SZ_4096;
1828 	rctl |= E1000_RCTL_BSEX;
1829 	switch (adapter->rx_buffer_len) {
1830 		case E1000_RXBUFFER_2048:
1831 		default:
1832 			rctl |= E1000_RCTL_SZ_2048;
1833 			rctl &= ~E1000_RCTL_BSEX;
1834 			break;
1835 		case E1000_RXBUFFER_4096:
1836 			rctl |= E1000_RCTL_SZ_4096;
1837 			break;
1838 		case E1000_RXBUFFER_8192:
1839 			rctl |= E1000_RCTL_SZ_8192;
1840 			break;
1841 		case E1000_RXBUFFER_16384:
1842 			rctl |= E1000_RCTL_SZ_16384;
1843 			break;
1844 	}
1845 
1846 	/* This is useful for sniffing bad packets. */
1847 	if (adapter->netdev->features & NETIF_F_RXALL) {
1848 		/* UPE and MPE will be handled by normal PROMISC logic
1849 		 * in e1000e_set_rx_mode */
1850 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1851 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1852 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1853 
1854 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1855 			  E1000_RCTL_DPF | /* Allow filtered pause */
1856 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1857 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1858 		 * and that breaks VLANs.
1859 		 */
1860 	}
1861 
1862 	ew32(RCTL, rctl);
1863 }
1864 
1865 /**
1866  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1867  * @adapter: board private structure
1868  *
1869  * Configure the Rx unit of the MAC after a reset.
1870  **/
1871 
1872 static void e1000_configure_rx(struct e1000_adapter *adapter)
1873 {
1874 	u64 rdba;
1875 	struct e1000_hw *hw = &adapter->hw;
1876 	u32 rdlen, rctl, rxcsum;
1877 
1878 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1879 		rdlen = adapter->rx_ring[0].count *
1880 		        sizeof(struct e1000_rx_desc);
1881 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1882 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1883 	} else {
1884 		rdlen = adapter->rx_ring[0].count *
1885 		        sizeof(struct e1000_rx_desc);
1886 		adapter->clean_rx = e1000_clean_rx_irq;
1887 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1888 	}
1889 
1890 	/* disable receives while setting up the descriptors */
1891 	rctl = er32(RCTL);
1892 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1893 
1894 	/* set the Receive Delay Timer Register */
1895 	ew32(RDTR, adapter->rx_int_delay);
1896 
1897 	if (hw->mac_type >= e1000_82540) {
1898 		ew32(RADV, adapter->rx_abs_int_delay);
1899 		if (adapter->itr_setting != 0)
1900 			ew32(ITR, 1000000000 / (adapter->itr * 256));
1901 	}
1902 
1903 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1904 	 * the Base and Length of the Rx Descriptor Ring */
1905 	switch (adapter->num_rx_queues) {
1906 	case 1:
1907 	default:
1908 		rdba = adapter->rx_ring[0].dma;
1909 		ew32(RDLEN, rdlen);
1910 		ew32(RDBAH, (rdba >> 32));
1911 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1912 		ew32(RDT, 0);
1913 		ew32(RDH, 0);
1914 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1915 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1916 		break;
1917 	}
1918 
1919 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1920 	if (hw->mac_type >= e1000_82543) {
1921 		rxcsum = er32(RXCSUM);
1922 		if (adapter->rx_csum)
1923 			rxcsum |= E1000_RXCSUM_TUOFL;
1924 		else
1925 			/* don't need to clear IPPCSE as it defaults to 0 */
1926 			rxcsum &= ~E1000_RXCSUM_TUOFL;
1927 		ew32(RXCSUM, rxcsum);
1928 	}
1929 
1930 	/* Enable Receives */
1931 	ew32(RCTL, rctl | E1000_RCTL_EN);
1932 }
1933 
1934 /**
1935  * e1000_free_tx_resources - Free Tx Resources per Queue
1936  * @adapter: board private structure
1937  * @tx_ring: Tx descriptor ring for a specific queue
1938  *
1939  * Free all transmit software resources
1940  **/
1941 
1942 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1943 				    struct e1000_tx_ring *tx_ring)
1944 {
1945 	struct pci_dev *pdev = adapter->pdev;
1946 
1947 	e1000_clean_tx_ring(adapter, tx_ring);
1948 
1949 	vfree(tx_ring->buffer_info);
1950 	tx_ring->buffer_info = NULL;
1951 
1952 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1953 			  tx_ring->dma);
1954 
1955 	tx_ring->desc = NULL;
1956 }
1957 
1958 /**
1959  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1960  * @adapter: board private structure
1961  *
1962  * Free all transmit software resources
1963  **/
1964 
1965 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1966 {
1967 	int i;
1968 
1969 	for (i = 0; i < adapter->num_tx_queues; i++)
1970 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1971 }
1972 
1973 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1974 					     struct e1000_buffer *buffer_info)
1975 {
1976 	if (buffer_info->dma) {
1977 		if (buffer_info->mapped_as_page)
1978 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1979 				       buffer_info->length, DMA_TO_DEVICE);
1980 		else
1981 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1982 					 buffer_info->length,
1983 					 DMA_TO_DEVICE);
1984 		buffer_info->dma = 0;
1985 	}
1986 	if (buffer_info->skb) {
1987 		dev_kfree_skb_any(buffer_info->skb);
1988 		buffer_info->skb = NULL;
1989 	}
1990 	buffer_info->time_stamp = 0;
1991 	/* buffer_info must be completely set up in the transmit path */
1992 }
1993 
1994 /**
1995  * e1000_clean_tx_ring - Free Tx Buffers
1996  * @adapter: board private structure
1997  * @tx_ring: ring to be cleaned
1998  **/
1999 
2000 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2001 				struct e1000_tx_ring *tx_ring)
2002 {
2003 	struct e1000_hw *hw = &adapter->hw;
2004 	struct e1000_buffer *buffer_info;
2005 	unsigned long size;
2006 	unsigned int i;
2007 
2008 	/* Free all the Tx ring sk_buffs */
2009 
2010 	for (i = 0; i < tx_ring->count; i++) {
2011 		buffer_info = &tx_ring->buffer_info[i];
2012 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2013 	}
2014 
2015 	size = sizeof(struct e1000_buffer) * tx_ring->count;
2016 	memset(tx_ring->buffer_info, 0, size);
2017 
2018 	/* Zero out the descriptor ring */
2019 
2020 	memset(tx_ring->desc, 0, tx_ring->size);
2021 
2022 	tx_ring->next_to_use = 0;
2023 	tx_ring->next_to_clean = 0;
2024 	tx_ring->last_tx_tso = false;
2025 
2026 	writel(0, hw->hw_addr + tx_ring->tdh);
2027 	writel(0, hw->hw_addr + tx_ring->tdt);
2028 }
2029 
2030 /**
2031  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2032  * @adapter: board private structure
2033  **/
2034 
2035 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2036 {
2037 	int i;
2038 
2039 	for (i = 0; i < adapter->num_tx_queues; i++)
2040 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2041 }
2042 
2043 /**
2044  * e1000_free_rx_resources - Free Rx Resources
2045  * @adapter: board private structure
2046  * @rx_ring: ring to clean the resources from
2047  *
2048  * Free all receive software resources
2049  **/
2050 
2051 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2052 				    struct e1000_rx_ring *rx_ring)
2053 {
2054 	struct pci_dev *pdev = adapter->pdev;
2055 
2056 	e1000_clean_rx_ring(adapter, rx_ring);
2057 
2058 	vfree(rx_ring->buffer_info);
2059 	rx_ring->buffer_info = NULL;
2060 
2061 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2062 			  rx_ring->dma);
2063 
2064 	rx_ring->desc = NULL;
2065 }
2066 
2067 /**
2068  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2069  * @adapter: board private structure
2070  *
2071  * Free all receive software resources
2072  **/
2073 
2074 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2075 {
2076 	int i;
2077 
2078 	for (i = 0; i < adapter->num_rx_queues; i++)
2079 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2080 }
2081 
2082 /**
2083  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2084  * @adapter: board private structure
2085  * @rx_ring: ring to free buffers from
2086  **/
2087 
2088 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2089 				struct e1000_rx_ring *rx_ring)
2090 {
2091 	struct e1000_hw *hw = &adapter->hw;
2092 	struct e1000_buffer *buffer_info;
2093 	struct pci_dev *pdev = adapter->pdev;
2094 	unsigned long size;
2095 	unsigned int i;
2096 
2097 	/* Free all the Rx ring sk_buffs */
2098 	for (i = 0; i < rx_ring->count; i++) {
2099 		buffer_info = &rx_ring->buffer_info[i];
2100 		if (buffer_info->dma &&
2101 		    adapter->clean_rx == e1000_clean_rx_irq) {
2102 			dma_unmap_single(&pdev->dev, buffer_info->dma,
2103 			                 buffer_info->length,
2104 					 DMA_FROM_DEVICE);
2105 		} else if (buffer_info->dma &&
2106 		           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2107 			dma_unmap_page(&pdev->dev, buffer_info->dma,
2108 				       buffer_info->length,
2109 				       DMA_FROM_DEVICE);
2110 		}
2111 
2112 		buffer_info->dma = 0;
2113 		if (buffer_info->page) {
2114 			put_page(buffer_info->page);
2115 			buffer_info->page = NULL;
2116 		}
2117 		if (buffer_info->skb) {
2118 			dev_kfree_skb(buffer_info->skb);
2119 			buffer_info->skb = NULL;
2120 		}
2121 	}
2122 
2123 	/* there also may be some cached data from a chained receive */
2124 	if (rx_ring->rx_skb_top) {
2125 		dev_kfree_skb(rx_ring->rx_skb_top);
2126 		rx_ring->rx_skb_top = NULL;
2127 	}
2128 
2129 	size = sizeof(struct e1000_buffer) * rx_ring->count;
2130 	memset(rx_ring->buffer_info, 0, size);
2131 
2132 	/* Zero out the descriptor ring */
2133 	memset(rx_ring->desc, 0, rx_ring->size);
2134 
2135 	rx_ring->next_to_clean = 0;
2136 	rx_ring->next_to_use = 0;
2137 
2138 	writel(0, hw->hw_addr + rx_ring->rdh);
2139 	writel(0, hw->hw_addr + rx_ring->rdt);
2140 }
2141 
2142 /**
2143  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2144  * @adapter: board private structure
2145  **/
2146 
2147 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2148 {
2149 	int i;
2150 
2151 	for (i = 0; i < adapter->num_rx_queues; i++)
2152 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2153 }
2154 
2155 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2156  * and memory write and invalidate disabled for certain operations
2157  */
2158 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2159 {
2160 	struct e1000_hw *hw = &adapter->hw;
2161 	struct net_device *netdev = adapter->netdev;
2162 	u32 rctl;
2163 
2164 	e1000_pci_clear_mwi(hw);
2165 
2166 	rctl = er32(RCTL);
2167 	rctl |= E1000_RCTL_RST;
2168 	ew32(RCTL, rctl);
2169 	E1000_WRITE_FLUSH();
2170 	mdelay(5);
2171 
2172 	if (netif_running(netdev))
2173 		e1000_clean_all_rx_rings(adapter);
2174 }
2175 
2176 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2177 {
2178 	struct e1000_hw *hw = &adapter->hw;
2179 	struct net_device *netdev = adapter->netdev;
2180 	u32 rctl;
2181 
2182 	rctl = er32(RCTL);
2183 	rctl &= ~E1000_RCTL_RST;
2184 	ew32(RCTL, rctl);
2185 	E1000_WRITE_FLUSH();
2186 	mdelay(5);
2187 
2188 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2189 		e1000_pci_set_mwi(hw);
2190 
2191 	if (netif_running(netdev)) {
2192 		/* No need to loop, because 82542 supports only 1 queue */
2193 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2194 		e1000_configure_rx(adapter);
2195 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2196 	}
2197 }
2198 
2199 /**
2200  * e1000_set_mac - Change the Ethernet Address of the NIC
2201  * @netdev: network interface device structure
2202  * @p: pointer to an address structure
2203  *
2204  * Returns 0 on success, negative on failure
2205  **/
2206 
2207 static int e1000_set_mac(struct net_device *netdev, void *p)
2208 {
2209 	struct e1000_adapter *adapter = netdev_priv(netdev);
2210 	struct e1000_hw *hw = &adapter->hw;
2211 	struct sockaddr *addr = p;
2212 
2213 	if (!is_valid_ether_addr(addr->sa_data))
2214 		return -EADDRNOTAVAIL;
2215 
2216 	/* 82542 2.0 needs to be in reset to write receive address registers */
2217 
2218 	if (hw->mac_type == e1000_82542_rev2_0)
2219 		e1000_enter_82542_rst(adapter);
2220 
2221 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2222 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2223 
2224 	e1000_rar_set(hw, hw->mac_addr, 0);
2225 
2226 	if (hw->mac_type == e1000_82542_rev2_0)
2227 		e1000_leave_82542_rst(adapter);
2228 
2229 	return 0;
2230 }
2231 
2232 /**
2233  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2234  * @netdev: network interface device structure
2235  *
2236  * The set_rx_mode entry point is called whenever the unicast or multicast
2237  * address lists or the network interface flags are updated. This routine is
2238  * responsible for configuring the hardware for proper unicast, multicast,
2239  * promiscuous mode, and all-multi behavior.
2240  **/
2241 
2242 static void e1000_set_rx_mode(struct net_device *netdev)
2243 {
2244 	struct e1000_adapter *adapter = netdev_priv(netdev);
2245 	struct e1000_hw *hw = &adapter->hw;
2246 	struct netdev_hw_addr *ha;
2247 	bool use_uc = false;
2248 	u32 rctl;
2249 	u32 hash_value;
2250 	int i, rar_entries = E1000_RAR_ENTRIES;
2251 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2252 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2253 
2254 	if (!mcarray) {
2255 		e_err(probe, "memory allocation failed\n");
2256 		return;
2257 	}
2258 
2259 	/* Check for Promiscuous and All Multicast modes */
2260 
2261 	rctl = er32(RCTL);
2262 
2263 	if (netdev->flags & IFF_PROMISC) {
2264 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2265 		rctl &= ~E1000_RCTL_VFE;
2266 	} else {
2267 		if (netdev->flags & IFF_ALLMULTI)
2268 			rctl |= E1000_RCTL_MPE;
2269 		else
2270 			rctl &= ~E1000_RCTL_MPE;
2271 		/* Enable VLAN filter if there is a VLAN */
2272 		if (e1000_vlan_used(adapter))
2273 			rctl |= E1000_RCTL_VFE;
2274 	}
2275 
2276 	if (netdev_uc_count(netdev) > rar_entries - 1) {
2277 		rctl |= E1000_RCTL_UPE;
2278 	} else if (!(netdev->flags & IFF_PROMISC)) {
2279 		rctl &= ~E1000_RCTL_UPE;
2280 		use_uc = true;
2281 	}
2282 
2283 	ew32(RCTL, rctl);
2284 
2285 	/* 82542 2.0 needs to be in reset to write receive address registers */
2286 
2287 	if (hw->mac_type == e1000_82542_rev2_0)
2288 		e1000_enter_82542_rst(adapter);
2289 
2290 	/* load the first 14 addresses into the exact filters 1-14. Unicast
2291 	 * addresses take precedence to avoid disabling unicast filtering
2292 	 * when possible.
2293 	 *
2294 	 * RAR 0 is used for the station MAC address
2295 	 * if there are not 14 addresses, go ahead and clear the filters
2296 	 */
2297 	i = 1;
2298 	if (use_uc)
2299 		netdev_for_each_uc_addr(ha, netdev) {
2300 			if (i == rar_entries)
2301 				break;
2302 			e1000_rar_set(hw, ha->addr, i++);
2303 		}
2304 
2305 	netdev_for_each_mc_addr(ha, netdev) {
2306 		if (i == rar_entries) {
2307 			/* load any remaining addresses into the hash table */
2308 			u32 hash_reg, hash_bit, mta;
2309 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2310 			hash_reg = (hash_value >> 5) & 0x7F;
2311 			hash_bit = hash_value & 0x1F;
2312 			mta = (1 << hash_bit);
2313 			mcarray[hash_reg] |= mta;
2314 		} else {
2315 			e1000_rar_set(hw, ha->addr, i++);
2316 		}
2317 	}
2318 
2319 	for (; i < rar_entries; i++) {
2320 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2321 		E1000_WRITE_FLUSH();
2322 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2323 		E1000_WRITE_FLUSH();
2324 	}
2325 
2326 	/* write the hash table completely, write from bottom to avoid
2327 	 * both stupid write combining chipsets, and flushing each write */
2328 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2329 		/*
2330 		 * If we are on an 82544 has an errata where writing odd
2331 		 * offsets overwrites the previous even offset, but writing
2332 		 * backwards over the range solves the issue by always
2333 		 * writing the odd offset first
2334 		 */
2335 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2336 	}
2337 	E1000_WRITE_FLUSH();
2338 
2339 	if (hw->mac_type == e1000_82542_rev2_0)
2340 		e1000_leave_82542_rst(adapter);
2341 
2342 	kfree(mcarray);
2343 }
2344 
2345 /**
2346  * e1000_update_phy_info_task - get phy info
2347  * @work: work struct contained inside adapter struct
2348  *
2349  * Need to wait a few seconds after link up to get diagnostic information from
2350  * the phy
2351  */
2352 static void e1000_update_phy_info_task(struct work_struct *work)
2353 {
2354 	struct e1000_adapter *adapter = container_of(work,
2355 						     struct e1000_adapter,
2356 						     phy_info_task.work);
2357 	if (test_bit(__E1000_DOWN, &adapter->flags))
2358 		return;
2359 	mutex_lock(&adapter->mutex);
2360 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2361 	mutex_unlock(&adapter->mutex);
2362 }
2363 
2364 /**
2365  * e1000_82547_tx_fifo_stall_task - task to complete work
2366  * @work: work struct contained inside adapter struct
2367  **/
2368 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2369 {
2370 	struct e1000_adapter *adapter = container_of(work,
2371 						     struct e1000_adapter,
2372 						     fifo_stall_task.work);
2373 	struct e1000_hw *hw = &adapter->hw;
2374 	struct net_device *netdev = adapter->netdev;
2375 	u32 tctl;
2376 
2377 	if (test_bit(__E1000_DOWN, &adapter->flags))
2378 		return;
2379 	mutex_lock(&adapter->mutex);
2380 	if (atomic_read(&adapter->tx_fifo_stall)) {
2381 		if ((er32(TDT) == er32(TDH)) &&
2382 		   (er32(TDFT) == er32(TDFH)) &&
2383 		   (er32(TDFTS) == er32(TDFHS))) {
2384 			tctl = er32(TCTL);
2385 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2386 			ew32(TDFT, adapter->tx_head_addr);
2387 			ew32(TDFH, adapter->tx_head_addr);
2388 			ew32(TDFTS, adapter->tx_head_addr);
2389 			ew32(TDFHS, adapter->tx_head_addr);
2390 			ew32(TCTL, tctl);
2391 			E1000_WRITE_FLUSH();
2392 
2393 			adapter->tx_fifo_head = 0;
2394 			atomic_set(&adapter->tx_fifo_stall, 0);
2395 			netif_wake_queue(netdev);
2396 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2397 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2398 		}
2399 	}
2400 	mutex_unlock(&adapter->mutex);
2401 }
2402 
2403 bool e1000_has_link(struct e1000_adapter *adapter)
2404 {
2405 	struct e1000_hw *hw = &adapter->hw;
2406 	bool link_active = false;
2407 
2408 	/* get_link_status is set on LSC (link status) interrupt or rx
2409 	 * sequence error interrupt (except on intel ce4100).
2410 	 * get_link_status will stay false until the
2411 	 * e1000_check_for_link establishes link for copper adapters
2412 	 * ONLY
2413 	 */
2414 	switch (hw->media_type) {
2415 	case e1000_media_type_copper:
2416 		if (hw->mac_type == e1000_ce4100)
2417 			hw->get_link_status = 1;
2418 		if (hw->get_link_status) {
2419 			e1000_check_for_link(hw);
2420 			link_active = !hw->get_link_status;
2421 		} else {
2422 			link_active = true;
2423 		}
2424 		break;
2425 	case e1000_media_type_fiber:
2426 		e1000_check_for_link(hw);
2427 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2428 		break;
2429 	case e1000_media_type_internal_serdes:
2430 		e1000_check_for_link(hw);
2431 		link_active = hw->serdes_has_link;
2432 		break;
2433 	default:
2434 		break;
2435 	}
2436 
2437 	return link_active;
2438 }
2439 
2440 /**
2441  * e1000_watchdog - work function
2442  * @work: work struct contained inside adapter struct
2443  **/
2444 static void e1000_watchdog(struct work_struct *work)
2445 {
2446 	struct e1000_adapter *adapter = container_of(work,
2447 						     struct e1000_adapter,
2448 						     watchdog_task.work);
2449 	struct e1000_hw *hw = &adapter->hw;
2450 	struct net_device *netdev = adapter->netdev;
2451 	struct e1000_tx_ring *txdr = adapter->tx_ring;
2452 	u32 link, tctl;
2453 
2454 	if (test_bit(__E1000_DOWN, &adapter->flags))
2455 		return;
2456 
2457 	mutex_lock(&adapter->mutex);
2458 	link = e1000_has_link(adapter);
2459 	if ((netif_carrier_ok(netdev)) && link)
2460 		goto link_up;
2461 
2462 	if (link) {
2463 		if (!netif_carrier_ok(netdev)) {
2464 			u32 ctrl;
2465 			bool txb2b = true;
2466 			/* update snapshot of PHY registers on LSC */
2467 			e1000_get_speed_and_duplex(hw,
2468 			                           &adapter->link_speed,
2469 			                           &adapter->link_duplex);
2470 
2471 			ctrl = er32(CTRL);
2472 			pr_info("%s NIC Link is Up %d Mbps %s, "
2473 				"Flow Control: %s\n",
2474 				netdev->name,
2475 				adapter->link_speed,
2476 				adapter->link_duplex == FULL_DUPLEX ?
2477 				"Full Duplex" : "Half Duplex",
2478 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2479 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2480 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2481 				E1000_CTRL_TFCE) ? "TX" : "None")));
2482 
2483 			/* adjust timeout factor according to speed/duplex */
2484 			adapter->tx_timeout_factor = 1;
2485 			switch (adapter->link_speed) {
2486 			case SPEED_10:
2487 				txb2b = false;
2488 				adapter->tx_timeout_factor = 16;
2489 				break;
2490 			case SPEED_100:
2491 				txb2b = false;
2492 				/* maybe add some timeout factor ? */
2493 				break;
2494 			}
2495 
2496 			/* enable transmits in the hardware */
2497 			tctl = er32(TCTL);
2498 			tctl |= E1000_TCTL_EN;
2499 			ew32(TCTL, tctl);
2500 
2501 			netif_carrier_on(netdev);
2502 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2503 				schedule_delayed_work(&adapter->phy_info_task,
2504 						      2 * HZ);
2505 			adapter->smartspeed = 0;
2506 		}
2507 	} else {
2508 		if (netif_carrier_ok(netdev)) {
2509 			adapter->link_speed = 0;
2510 			adapter->link_duplex = 0;
2511 			pr_info("%s NIC Link is Down\n",
2512 				netdev->name);
2513 			netif_carrier_off(netdev);
2514 
2515 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2516 				schedule_delayed_work(&adapter->phy_info_task,
2517 						      2 * HZ);
2518 		}
2519 
2520 		e1000_smartspeed(adapter);
2521 	}
2522 
2523 link_up:
2524 	e1000_update_stats(adapter);
2525 
2526 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2527 	adapter->tpt_old = adapter->stats.tpt;
2528 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2529 	adapter->colc_old = adapter->stats.colc;
2530 
2531 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2532 	adapter->gorcl_old = adapter->stats.gorcl;
2533 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2534 	adapter->gotcl_old = adapter->stats.gotcl;
2535 
2536 	e1000_update_adaptive(hw);
2537 
2538 	if (!netif_carrier_ok(netdev)) {
2539 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2540 			/* We've lost link, so the controller stops DMA,
2541 			 * but we've got queued Tx work that's never going
2542 			 * to get done, so reset controller to flush Tx.
2543 			 * (Do the reset outside of interrupt context). */
2544 			adapter->tx_timeout_count++;
2545 			schedule_work(&adapter->reset_task);
2546 			/* exit immediately since reset is imminent */
2547 			goto unlock;
2548 		}
2549 	}
2550 
2551 	/* Simple mode for Interrupt Throttle Rate (ITR) */
2552 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2553 		/*
2554 		 * Symmetric Tx/Rx gets a reduced ITR=2000;
2555 		 * Total asymmetrical Tx or Rx gets ITR=8000;
2556 		 * everyone else is between 2000-8000.
2557 		 */
2558 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2559 		u32 dif = (adapter->gotcl > adapter->gorcl ?
2560 			    adapter->gotcl - adapter->gorcl :
2561 			    adapter->gorcl - adapter->gotcl) / 10000;
2562 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2563 
2564 		ew32(ITR, 1000000000 / (itr * 256));
2565 	}
2566 
2567 	/* Cause software interrupt to ensure rx ring is cleaned */
2568 	ew32(ICS, E1000_ICS_RXDMT0);
2569 
2570 	/* Force detection of hung controller every watchdog period */
2571 	adapter->detect_tx_hung = true;
2572 
2573 	/* Reschedule the task */
2574 	if (!test_bit(__E1000_DOWN, &adapter->flags))
2575 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2576 
2577 unlock:
2578 	mutex_unlock(&adapter->mutex);
2579 }
2580 
2581 enum latency_range {
2582 	lowest_latency = 0,
2583 	low_latency = 1,
2584 	bulk_latency = 2,
2585 	latency_invalid = 255
2586 };
2587 
2588 /**
2589  * e1000_update_itr - update the dynamic ITR value based on statistics
2590  * @adapter: pointer to adapter
2591  * @itr_setting: current adapter->itr
2592  * @packets: the number of packets during this measurement interval
2593  * @bytes: the number of bytes during this measurement interval
2594  *
2595  *      Stores a new ITR value based on packets and byte
2596  *      counts during the last interrupt.  The advantage of per interrupt
2597  *      computation is faster updates and more accurate ITR for the current
2598  *      traffic pattern.  Constants in this function were computed
2599  *      based on theoretical maximum wire speed and thresholds were set based
2600  *      on testing data as well as attempting to minimize response time
2601  *      while increasing bulk throughput.
2602  *      this functionality is controlled by the InterruptThrottleRate module
2603  *      parameter (see e1000_param.c)
2604  **/
2605 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2606 				     u16 itr_setting, int packets, int bytes)
2607 {
2608 	unsigned int retval = itr_setting;
2609 	struct e1000_hw *hw = &adapter->hw;
2610 
2611 	if (unlikely(hw->mac_type < e1000_82540))
2612 		goto update_itr_done;
2613 
2614 	if (packets == 0)
2615 		goto update_itr_done;
2616 
2617 	switch (itr_setting) {
2618 	case lowest_latency:
2619 		/* jumbo frames get bulk treatment*/
2620 		if (bytes/packets > 8000)
2621 			retval = bulk_latency;
2622 		else if ((packets < 5) && (bytes > 512))
2623 			retval = low_latency;
2624 		break;
2625 	case low_latency:  /* 50 usec aka 20000 ints/s */
2626 		if (bytes > 10000) {
2627 			/* jumbo frames need bulk latency setting */
2628 			if (bytes/packets > 8000)
2629 				retval = bulk_latency;
2630 			else if ((packets < 10) || ((bytes/packets) > 1200))
2631 				retval = bulk_latency;
2632 			else if ((packets > 35))
2633 				retval = lowest_latency;
2634 		} else if (bytes/packets > 2000)
2635 			retval = bulk_latency;
2636 		else if (packets <= 2 && bytes < 512)
2637 			retval = lowest_latency;
2638 		break;
2639 	case bulk_latency: /* 250 usec aka 4000 ints/s */
2640 		if (bytes > 25000) {
2641 			if (packets > 35)
2642 				retval = low_latency;
2643 		} else if (bytes < 6000) {
2644 			retval = low_latency;
2645 		}
2646 		break;
2647 	}
2648 
2649 update_itr_done:
2650 	return retval;
2651 }
2652 
2653 static void e1000_set_itr(struct e1000_adapter *adapter)
2654 {
2655 	struct e1000_hw *hw = &adapter->hw;
2656 	u16 current_itr;
2657 	u32 new_itr = adapter->itr;
2658 
2659 	if (unlikely(hw->mac_type < e1000_82540))
2660 		return;
2661 
2662 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2663 	if (unlikely(adapter->link_speed != SPEED_1000)) {
2664 		current_itr = 0;
2665 		new_itr = 4000;
2666 		goto set_itr_now;
2667 	}
2668 
2669 	adapter->tx_itr = e1000_update_itr(adapter,
2670 	                            adapter->tx_itr,
2671 	                            adapter->total_tx_packets,
2672 	                            adapter->total_tx_bytes);
2673 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2674 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2675 		adapter->tx_itr = low_latency;
2676 
2677 	adapter->rx_itr = e1000_update_itr(adapter,
2678 	                            adapter->rx_itr,
2679 	                            adapter->total_rx_packets,
2680 	                            adapter->total_rx_bytes);
2681 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2682 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2683 		adapter->rx_itr = low_latency;
2684 
2685 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2686 
2687 	switch (current_itr) {
2688 	/* counts and packets in update_itr are dependent on these numbers */
2689 	case lowest_latency:
2690 		new_itr = 70000;
2691 		break;
2692 	case low_latency:
2693 		new_itr = 20000; /* aka hwitr = ~200 */
2694 		break;
2695 	case bulk_latency:
2696 		new_itr = 4000;
2697 		break;
2698 	default:
2699 		break;
2700 	}
2701 
2702 set_itr_now:
2703 	if (new_itr != adapter->itr) {
2704 		/* this attempts to bias the interrupt rate towards Bulk
2705 		 * by adding intermediate steps when interrupt rate is
2706 		 * increasing */
2707 		new_itr = new_itr > adapter->itr ?
2708 		             min(adapter->itr + (new_itr >> 2), new_itr) :
2709 		             new_itr;
2710 		adapter->itr = new_itr;
2711 		ew32(ITR, 1000000000 / (new_itr * 256));
2712 	}
2713 }
2714 
2715 #define E1000_TX_FLAGS_CSUM		0x00000001
2716 #define E1000_TX_FLAGS_VLAN		0x00000002
2717 #define E1000_TX_FLAGS_TSO		0x00000004
2718 #define E1000_TX_FLAGS_IPV4		0x00000008
2719 #define E1000_TX_FLAGS_NO_FCS		0x00000010
2720 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2721 #define E1000_TX_FLAGS_VLAN_SHIFT	16
2722 
2723 static int e1000_tso(struct e1000_adapter *adapter,
2724 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2725 {
2726 	struct e1000_context_desc *context_desc;
2727 	struct e1000_buffer *buffer_info;
2728 	unsigned int i;
2729 	u32 cmd_length = 0;
2730 	u16 ipcse = 0, tucse, mss;
2731 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2732 	int err;
2733 
2734 	if (skb_is_gso(skb)) {
2735 		if (skb_header_cloned(skb)) {
2736 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2737 			if (err)
2738 				return err;
2739 		}
2740 
2741 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2742 		mss = skb_shinfo(skb)->gso_size;
2743 		if (skb->protocol == htons(ETH_P_IP)) {
2744 			struct iphdr *iph = ip_hdr(skb);
2745 			iph->tot_len = 0;
2746 			iph->check = 0;
2747 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2748 								 iph->daddr, 0,
2749 								 IPPROTO_TCP,
2750 								 0);
2751 			cmd_length = E1000_TXD_CMD_IP;
2752 			ipcse = skb_transport_offset(skb) - 1;
2753 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
2754 			ipv6_hdr(skb)->payload_len = 0;
2755 			tcp_hdr(skb)->check =
2756 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2757 						 &ipv6_hdr(skb)->daddr,
2758 						 0, IPPROTO_TCP, 0);
2759 			ipcse = 0;
2760 		}
2761 		ipcss = skb_network_offset(skb);
2762 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2763 		tucss = skb_transport_offset(skb);
2764 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2765 		tucse = 0;
2766 
2767 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2768 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2769 
2770 		i = tx_ring->next_to_use;
2771 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2772 		buffer_info = &tx_ring->buffer_info[i];
2773 
2774 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2775 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2776 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2777 		context_desc->upper_setup.tcp_fields.tucss = tucss;
2778 		context_desc->upper_setup.tcp_fields.tucso = tucso;
2779 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2780 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2781 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2782 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2783 
2784 		buffer_info->time_stamp = jiffies;
2785 		buffer_info->next_to_watch = i;
2786 
2787 		if (++i == tx_ring->count) i = 0;
2788 		tx_ring->next_to_use = i;
2789 
2790 		return true;
2791 	}
2792 	return false;
2793 }
2794 
2795 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2796 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2797 {
2798 	struct e1000_context_desc *context_desc;
2799 	struct e1000_buffer *buffer_info;
2800 	unsigned int i;
2801 	u8 css;
2802 	u32 cmd_len = E1000_TXD_CMD_DEXT;
2803 
2804 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2805 		return false;
2806 
2807 	switch (skb->protocol) {
2808 	case cpu_to_be16(ETH_P_IP):
2809 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2810 			cmd_len |= E1000_TXD_CMD_TCP;
2811 		break;
2812 	case cpu_to_be16(ETH_P_IPV6):
2813 		/* XXX not handling all IPV6 headers */
2814 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2815 			cmd_len |= E1000_TXD_CMD_TCP;
2816 		break;
2817 	default:
2818 		if (unlikely(net_ratelimit()))
2819 			e_warn(drv, "checksum_partial proto=%x!\n",
2820 			       skb->protocol);
2821 		break;
2822 	}
2823 
2824 	css = skb_checksum_start_offset(skb);
2825 
2826 	i = tx_ring->next_to_use;
2827 	buffer_info = &tx_ring->buffer_info[i];
2828 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2829 
2830 	context_desc->lower_setup.ip_config = 0;
2831 	context_desc->upper_setup.tcp_fields.tucss = css;
2832 	context_desc->upper_setup.tcp_fields.tucso =
2833 		css + skb->csum_offset;
2834 	context_desc->upper_setup.tcp_fields.tucse = 0;
2835 	context_desc->tcp_seg_setup.data = 0;
2836 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2837 
2838 	buffer_info->time_stamp = jiffies;
2839 	buffer_info->next_to_watch = i;
2840 
2841 	if (unlikely(++i == tx_ring->count)) i = 0;
2842 	tx_ring->next_to_use = i;
2843 
2844 	return true;
2845 }
2846 
2847 #define E1000_MAX_TXD_PWR	12
2848 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2849 
2850 static int e1000_tx_map(struct e1000_adapter *adapter,
2851 			struct e1000_tx_ring *tx_ring,
2852 			struct sk_buff *skb, unsigned int first,
2853 			unsigned int max_per_txd, unsigned int nr_frags,
2854 			unsigned int mss)
2855 {
2856 	struct e1000_hw *hw = &adapter->hw;
2857 	struct pci_dev *pdev = adapter->pdev;
2858 	struct e1000_buffer *buffer_info;
2859 	unsigned int len = skb_headlen(skb);
2860 	unsigned int offset = 0, size, count = 0, i;
2861 	unsigned int f, bytecount, segs;
2862 
2863 	i = tx_ring->next_to_use;
2864 
2865 	while (len) {
2866 		buffer_info = &tx_ring->buffer_info[i];
2867 		size = min(len, max_per_txd);
2868 		/* Workaround for Controller erratum --
2869 		 * descriptor for non-tso packet in a linear SKB that follows a
2870 		 * tso gets written back prematurely before the data is fully
2871 		 * DMA'd to the controller */
2872 		if (!skb->data_len && tx_ring->last_tx_tso &&
2873 		    !skb_is_gso(skb)) {
2874 			tx_ring->last_tx_tso = false;
2875 			size -= 4;
2876 		}
2877 
2878 		/* Workaround for premature desc write-backs
2879 		 * in TSO mode.  Append 4-byte sentinel desc */
2880 		if (unlikely(mss && !nr_frags && size == len && size > 8))
2881 			size -= 4;
2882 		/* work-around for errata 10 and it applies
2883 		 * to all controllers in PCI-X mode
2884 		 * The fix is to make sure that the first descriptor of a
2885 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2886 		 */
2887 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2888 		                (size > 2015) && count == 0))
2889 		        size = 2015;
2890 
2891 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2892 		 * terminating buffers within evenly-aligned dwords. */
2893 		if (unlikely(adapter->pcix_82544 &&
2894 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2895 		   size > 4))
2896 			size -= 4;
2897 
2898 		buffer_info->length = size;
2899 		/* set time_stamp *before* dma to help avoid a possible race */
2900 		buffer_info->time_stamp = jiffies;
2901 		buffer_info->mapped_as_page = false;
2902 		buffer_info->dma = dma_map_single(&pdev->dev,
2903 						  skb->data + offset,
2904 						  size,	DMA_TO_DEVICE);
2905 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2906 			goto dma_error;
2907 		buffer_info->next_to_watch = i;
2908 
2909 		len -= size;
2910 		offset += size;
2911 		count++;
2912 		if (len) {
2913 			i++;
2914 			if (unlikely(i == tx_ring->count))
2915 				i = 0;
2916 		}
2917 	}
2918 
2919 	for (f = 0; f < nr_frags; f++) {
2920 		const struct skb_frag_struct *frag;
2921 
2922 		frag = &skb_shinfo(skb)->frags[f];
2923 		len = skb_frag_size(frag);
2924 		offset = 0;
2925 
2926 		while (len) {
2927 			unsigned long bufend;
2928 			i++;
2929 			if (unlikely(i == tx_ring->count))
2930 				i = 0;
2931 
2932 			buffer_info = &tx_ring->buffer_info[i];
2933 			size = min(len, max_per_txd);
2934 			/* Workaround for premature desc write-backs
2935 			 * in TSO mode.  Append 4-byte sentinel desc */
2936 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2937 				size -= 4;
2938 			/* Workaround for potential 82544 hang in PCI-X.
2939 			 * Avoid terminating buffers within evenly-aligned
2940 			 * dwords. */
2941 			bufend = (unsigned long)
2942 				page_to_phys(skb_frag_page(frag));
2943 			bufend += offset + size - 1;
2944 			if (unlikely(adapter->pcix_82544 &&
2945 				     !(bufend & 4) &&
2946 				     size > 4))
2947 				size -= 4;
2948 
2949 			buffer_info->length = size;
2950 			buffer_info->time_stamp = jiffies;
2951 			buffer_info->mapped_as_page = true;
2952 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2953 						offset, size, DMA_TO_DEVICE);
2954 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2955 				goto dma_error;
2956 			buffer_info->next_to_watch = i;
2957 
2958 			len -= size;
2959 			offset += size;
2960 			count++;
2961 		}
2962 	}
2963 
2964 	segs = skb_shinfo(skb)->gso_segs ?: 1;
2965 	/* multiply data chunks by size of headers */
2966 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2967 
2968 	tx_ring->buffer_info[i].skb = skb;
2969 	tx_ring->buffer_info[i].segs = segs;
2970 	tx_ring->buffer_info[i].bytecount = bytecount;
2971 	tx_ring->buffer_info[first].next_to_watch = i;
2972 
2973 	return count;
2974 
2975 dma_error:
2976 	dev_err(&pdev->dev, "TX DMA map failed\n");
2977 	buffer_info->dma = 0;
2978 	if (count)
2979 		count--;
2980 
2981 	while (count--) {
2982 		if (i==0)
2983 			i += tx_ring->count;
2984 		i--;
2985 		buffer_info = &tx_ring->buffer_info[i];
2986 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2987 	}
2988 
2989 	return 0;
2990 }
2991 
2992 static void e1000_tx_queue(struct e1000_adapter *adapter,
2993 			   struct e1000_tx_ring *tx_ring, int tx_flags,
2994 			   int count)
2995 {
2996 	struct e1000_hw *hw = &adapter->hw;
2997 	struct e1000_tx_desc *tx_desc = NULL;
2998 	struct e1000_buffer *buffer_info;
2999 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3000 	unsigned int i;
3001 
3002 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3003 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3004 		             E1000_TXD_CMD_TSE;
3005 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3006 
3007 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3008 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3009 	}
3010 
3011 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3012 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3013 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3014 	}
3015 
3016 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3017 		txd_lower |= E1000_TXD_CMD_VLE;
3018 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3019 	}
3020 
3021 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3022 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
3023 
3024 	i = tx_ring->next_to_use;
3025 
3026 	while (count--) {
3027 		buffer_info = &tx_ring->buffer_info[i];
3028 		tx_desc = E1000_TX_DESC(*tx_ring, i);
3029 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3030 		tx_desc->lower.data =
3031 			cpu_to_le32(txd_lower | buffer_info->length);
3032 		tx_desc->upper.data = cpu_to_le32(txd_upper);
3033 		if (unlikely(++i == tx_ring->count)) i = 0;
3034 	}
3035 
3036 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3037 
3038 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3039 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3040 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3041 
3042 	/* Force memory writes to complete before letting h/w
3043 	 * know there are new descriptors to fetch.  (Only
3044 	 * applicable for weak-ordered memory model archs,
3045 	 * such as IA-64). */
3046 	wmb();
3047 
3048 	tx_ring->next_to_use = i;
3049 	writel(i, hw->hw_addr + tx_ring->tdt);
3050 	/* we need this if more than one processor can write to our tail
3051 	 * at a time, it syncronizes IO on IA64/Altix systems */
3052 	mmiowb();
3053 }
3054 
3055 /**
3056  * 82547 workaround to avoid controller hang in half-duplex environment.
3057  * The workaround is to avoid queuing a large packet that would span
3058  * the internal Tx FIFO ring boundary by notifying the stack to resend
3059  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3060  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3061  * to the beginning of the Tx FIFO.
3062  **/
3063 
3064 #define E1000_FIFO_HDR			0x10
3065 #define E1000_82547_PAD_LEN		0x3E0
3066 
3067 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3068 				       struct sk_buff *skb)
3069 {
3070 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3071 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3072 
3073 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3074 
3075 	if (adapter->link_duplex != HALF_DUPLEX)
3076 		goto no_fifo_stall_required;
3077 
3078 	if (atomic_read(&adapter->tx_fifo_stall))
3079 		return 1;
3080 
3081 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3082 		atomic_set(&adapter->tx_fifo_stall, 1);
3083 		return 1;
3084 	}
3085 
3086 no_fifo_stall_required:
3087 	adapter->tx_fifo_head += skb_fifo_len;
3088 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3089 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3090 	return 0;
3091 }
3092 
3093 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3094 {
3095 	struct e1000_adapter *adapter = netdev_priv(netdev);
3096 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3097 
3098 	netif_stop_queue(netdev);
3099 	/* Herbert's original patch had:
3100 	 *  smp_mb__after_netif_stop_queue();
3101 	 * but since that doesn't exist yet, just open code it. */
3102 	smp_mb();
3103 
3104 	/* We need to check again in a case another CPU has just
3105 	 * made room available. */
3106 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3107 		return -EBUSY;
3108 
3109 	/* A reprieve! */
3110 	netif_start_queue(netdev);
3111 	++adapter->restart_queue;
3112 	return 0;
3113 }
3114 
3115 static int e1000_maybe_stop_tx(struct net_device *netdev,
3116                                struct e1000_tx_ring *tx_ring, int size)
3117 {
3118 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3119 		return 0;
3120 	return __e1000_maybe_stop_tx(netdev, size);
3121 }
3122 
3123 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3124 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3125 				    struct net_device *netdev)
3126 {
3127 	struct e1000_adapter *adapter = netdev_priv(netdev);
3128 	struct e1000_hw *hw = &adapter->hw;
3129 	struct e1000_tx_ring *tx_ring;
3130 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3131 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3132 	unsigned int tx_flags = 0;
3133 	unsigned int len = skb_headlen(skb);
3134 	unsigned int nr_frags;
3135 	unsigned int mss;
3136 	int count = 0;
3137 	int tso;
3138 	unsigned int f;
3139 
3140 	/* This goes back to the question of how to logically map a tx queue
3141 	 * to a flow.  Right now, performance is impacted slightly negatively
3142 	 * if using multiple tx queues.  If the stack breaks away from a
3143 	 * single qdisc implementation, we can look at this again. */
3144 	tx_ring = adapter->tx_ring;
3145 
3146 	if (unlikely(skb->len <= 0)) {
3147 		dev_kfree_skb_any(skb);
3148 		return NETDEV_TX_OK;
3149 	}
3150 
3151 	mss = skb_shinfo(skb)->gso_size;
3152 	/* The controller does a simple calculation to
3153 	 * make sure there is enough room in the FIFO before
3154 	 * initiating the DMA for each buffer.  The calc is:
3155 	 * 4 = ceil(buffer len/mss).  To make sure we don't
3156 	 * overrun the FIFO, adjust the max buffer len if mss
3157 	 * drops. */
3158 	if (mss) {
3159 		u8 hdr_len;
3160 		max_per_txd = min(mss << 2, max_per_txd);
3161 		max_txd_pwr = fls(max_per_txd) - 1;
3162 
3163 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3164 		if (skb->data_len && hdr_len == len) {
3165 			switch (hw->mac_type) {
3166 				unsigned int pull_size;
3167 			case e1000_82544:
3168 				/* Make sure we have room to chop off 4 bytes,
3169 				 * and that the end alignment will work out to
3170 				 * this hardware's requirements
3171 				 * NOTE: this is a TSO only workaround
3172 				 * if end byte alignment not correct move us
3173 				 * into the next dword */
3174 				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3175 					break;
3176 				/* fall through */
3177 				pull_size = min((unsigned int)4, skb->data_len);
3178 				if (!__pskb_pull_tail(skb, pull_size)) {
3179 					e_err(drv, "__pskb_pull_tail "
3180 					      "failed.\n");
3181 					dev_kfree_skb_any(skb);
3182 					return NETDEV_TX_OK;
3183 				}
3184 				len = skb_headlen(skb);
3185 				break;
3186 			default:
3187 				/* do nothing */
3188 				break;
3189 			}
3190 		}
3191 	}
3192 
3193 	/* reserve a descriptor for the offload context */
3194 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3195 		count++;
3196 	count++;
3197 
3198 	/* Controller Erratum workaround */
3199 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3200 		count++;
3201 
3202 	count += TXD_USE_COUNT(len, max_txd_pwr);
3203 
3204 	if (adapter->pcix_82544)
3205 		count++;
3206 
3207 	/* work-around for errata 10 and it applies to all controllers
3208 	 * in PCI-X mode, so add one more descriptor to the count
3209 	 */
3210 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3211 			(len > 2015)))
3212 		count++;
3213 
3214 	nr_frags = skb_shinfo(skb)->nr_frags;
3215 	for (f = 0; f < nr_frags; f++)
3216 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3217 				       max_txd_pwr);
3218 	if (adapter->pcix_82544)
3219 		count += nr_frags;
3220 
3221 	/* need: count + 2 desc gap to keep tail from touching
3222 	 * head, otherwise try next time */
3223 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3224 		return NETDEV_TX_BUSY;
3225 
3226 	if (unlikely((hw->mac_type == e1000_82547) &&
3227 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3228 		netif_stop_queue(netdev);
3229 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3230 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3231 		return NETDEV_TX_BUSY;
3232 	}
3233 
3234 	if (vlan_tx_tag_present(skb)) {
3235 		tx_flags |= E1000_TX_FLAGS_VLAN;
3236 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3237 	}
3238 
3239 	first = tx_ring->next_to_use;
3240 
3241 	tso = e1000_tso(adapter, tx_ring, skb);
3242 	if (tso < 0) {
3243 		dev_kfree_skb_any(skb);
3244 		return NETDEV_TX_OK;
3245 	}
3246 
3247 	if (likely(tso)) {
3248 		if (likely(hw->mac_type != e1000_82544))
3249 			tx_ring->last_tx_tso = true;
3250 		tx_flags |= E1000_TX_FLAGS_TSO;
3251 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3252 		tx_flags |= E1000_TX_FLAGS_CSUM;
3253 
3254 	if (likely(skb->protocol == htons(ETH_P_IP)))
3255 		tx_flags |= E1000_TX_FLAGS_IPV4;
3256 
3257 	if (unlikely(skb->no_fcs))
3258 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3259 
3260 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3261 	                     nr_frags, mss);
3262 
3263 	if (count) {
3264 		skb_tx_timestamp(skb);
3265 
3266 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3267 		/* Make sure there is space in the ring for the next send. */
3268 		e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3269 
3270 	} else {
3271 		dev_kfree_skb_any(skb);
3272 		tx_ring->buffer_info[first].time_stamp = 0;
3273 		tx_ring->next_to_use = first;
3274 	}
3275 
3276 	return NETDEV_TX_OK;
3277 }
3278 
3279 #define NUM_REGS 38 /* 1 based count */
3280 static void e1000_regdump(struct e1000_adapter *adapter)
3281 {
3282 	struct e1000_hw *hw = &adapter->hw;
3283 	u32 regs[NUM_REGS];
3284 	u32 *regs_buff = regs;
3285 	int i = 0;
3286 
3287 	static const char * const reg_name[] = {
3288 		"CTRL",  "STATUS",
3289 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3290 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3291 		"TIDV", "TXDCTL", "TADV", "TARC0",
3292 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3293 		"TXDCTL1", "TARC1",
3294 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3295 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3296 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3297 	};
3298 
3299 	regs_buff[0]  = er32(CTRL);
3300 	regs_buff[1]  = er32(STATUS);
3301 
3302 	regs_buff[2]  = er32(RCTL);
3303 	regs_buff[3]  = er32(RDLEN);
3304 	regs_buff[4]  = er32(RDH);
3305 	regs_buff[5]  = er32(RDT);
3306 	regs_buff[6]  = er32(RDTR);
3307 
3308 	regs_buff[7]  = er32(TCTL);
3309 	regs_buff[8]  = er32(TDBAL);
3310 	regs_buff[9]  = er32(TDBAH);
3311 	regs_buff[10] = er32(TDLEN);
3312 	regs_buff[11] = er32(TDH);
3313 	regs_buff[12] = er32(TDT);
3314 	regs_buff[13] = er32(TIDV);
3315 	regs_buff[14] = er32(TXDCTL);
3316 	regs_buff[15] = er32(TADV);
3317 	regs_buff[16] = er32(TARC0);
3318 
3319 	regs_buff[17] = er32(TDBAL1);
3320 	regs_buff[18] = er32(TDBAH1);
3321 	regs_buff[19] = er32(TDLEN1);
3322 	regs_buff[20] = er32(TDH1);
3323 	regs_buff[21] = er32(TDT1);
3324 	regs_buff[22] = er32(TXDCTL1);
3325 	regs_buff[23] = er32(TARC1);
3326 	regs_buff[24] = er32(CTRL_EXT);
3327 	regs_buff[25] = er32(ERT);
3328 	regs_buff[26] = er32(RDBAL0);
3329 	regs_buff[27] = er32(RDBAH0);
3330 	regs_buff[28] = er32(TDFH);
3331 	regs_buff[29] = er32(TDFT);
3332 	regs_buff[30] = er32(TDFHS);
3333 	regs_buff[31] = er32(TDFTS);
3334 	regs_buff[32] = er32(TDFPC);
3335 	regs_buff[33] = er32(RDFH);
3336 	regs_buff[34] = er32(RDFT);
3337 	regs_buff[35] = er32(RDFHS);
3338 	regs_buff[36] = er32(RDFTS);
3339 	regs_buff[37] = er32(RDFPC);
3340 
3341 	pr_info("Register dump\n");
3342 	for (i = 0; i < NUM_REGS; i++)
3343 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3344 }
3345 
3346 /*
3347  * e1000_dump: Print registers, tx ring and rx ring
3348  */
3349 static void e1000_dump(struct e1000_adapter *adapter)
3350 {
3351 	/* this code doesn't handle multiple rings */
3352 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3353 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3354 	int i;
3355 
3356 	if (!netif_msg_hw(adapter))
3357 		return;
3358 
3359 	/* Print Registers */
3360 	e1000_regdump(adapter);
3361 
3362 	/*
3363 	 * transmit dump
3364 	 */
3365 	pr_info("TX Desc ring0 dump\n");
3366 
3367 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3368 	 *
3369 	 * Legacy Transmit Descriptor
3370 	 *   +--------------------------------------------------------------+
3371 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3372 	 *   +--------------------------------------------------------------+
3373 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3374 	 *   +--------------------------------------------------------------+
3375 	 *   63       48 47        36 35    32 31     24 23    16 15        0
3376 	 *
3377 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3378 	 *   63      48 47    40 39       32 31             16 15    8 7      0
3379 	 *   +----------------------------------------------------------------+
3380 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3381 	 *   +----------------------------------------------------------------+
3382 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3383 	 *   +----------------------------------------------------------------+
3384 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3385 	 *
3386 	 * Extended Data Descriptor (DTYP=0x1)
3387 	 *   +----------------------------------------------------------------+
3388 	 * 0 |                     Buffer Address [63:0]                      |
3389 	 *   +----------------------------------------------------------------+
3390 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3391 	 *   +----------------------------------------------------------------+
3392 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3393 	 */
3394 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3395 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3396 
3397 	if (!netif_msg_tx_done(adapter))
3398 		goto rx_ring_summary;
3399 
3400 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3401 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3402 		struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3403 		struct my_u { __le64 a; __le64 b; };
3404 		struct my_u *u = (struct my_u *)tx_desc;
3405 		const char *type;
3406 
3407 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3408 			type = "NTC/U";
3409 		else if (i == tx_ring->next_to_use)
3410 			type = "NTU";
3411 		else if (i == tx_ring->next_to_clean)
3412 			type = "NTC";
3413 		else
3414 			type = "";
3415 
3416 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3417 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3418 			le64_to_cpu(u->a), le64_to_cpu(u->b),
3419 			(u64)buffer_info->dma, buffer_info->length,
3420 			buffer_info->next_to_watch,
3421 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3422 	}
3423 
3424 rx_ring_summary:
3425 	/*
3426 	 * receive dump
3427 	 */
3428 	pr_info("\nRX Desc ring dump\n");
3429 
3430 	/* Legacy Receive Descriptor Format
3431 	 *
3432 	 * +-----------------------------------------------------+
3433 	 * |                Buffer Address [63:0]                |
3434 	 * +-----------------------------------------------------+
3435 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3436 	 * +-----------------------------------------------------+
3437 	 * 63       48 47    40 39      32 31         16 15      0
3438 	 */
3439 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3440 
3441 	if (!netif_msg_rx_status(adapter))
3442 		goto exit;
3443 
3444 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3445 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3446 		struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3447 		struct my_u { __le64 a; __le64 b; };
3448 		struct my_u *u = (struct my_u *)rx_desc;
3449 		const char *type;
3450 
3451 		if (i == rx_ring->next_to_use)
3452 			type = "NTU";
3453 		else if (i == rx_ring->next_to_clean)
3454 			type = "NTC";
3455 		else
3456 			type = "";
3457 
3458 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3459 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3460 			(u64)buffer_info->dma, buffer_info->skb, type);
3461 	} /* for */
3462 
3463 	/* dump the descriptor caches */
3464 	/* rx */
3465 	pr_info("Rx descriptor cache in 64bit format\n");
3466 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3467 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3468 			i,
3469 			readl(adapter->hw.hw_addr + i+4),
3470 			readl(adapter->hw.hw_addr + i),
3471 			readl(adapter->hw.hw_addr + i+12),
3472 			readl(adapter->hw.hw_addr + i+8));
3473 	}
3474 	/* tx */
3475 	pr_info("Tx descriptor cache in 64bit format\n");
3476 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3477 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3478 			i,
3479 			readl(adapter->hw.hw_addr + i+4),
3480 			readl(adapter->hw.hw_addr + i),
3481 			readl(adapter->hw.hw_addr + i+12),
3482 			readl(adapter->hw.hw_addr + i+8));
3483 	}
3484 exit:
3485 	return;
3486 }
3487 
3488 /**
3489  * e1000_tx_timeout - Respond to a Tx Hang
3490  * @netdev: network interface device structure
3491  **/
3492 
3493 static void e1000_tx_timeout(struct net_device *netdev)
3494 {
3495 	struct e1000_adapter *adapter = netdev_priv(netdev);
3496 
3497 	/* Do the reset outside of interrupt context */
3498 	adapter->tx_timeout_count++;
3499 	schedule_work(&adapter->reset_task);
3500 }
3501 
3502 static void e1000_reset_task(struct work_struct *work)
3503 {
3504 	struct e1000_adapter *adapter =
3505 		container_of(work, struct e1000_adapter, reset_task);
3506 
3507 	if (test_bit(__E1000_DOWN, &adapter->flags))
3508 		return;
3509 	e_err(drv, "Reset adapter\n");
3510 	e1000_reinit_safe(adapter);
3511 }
3512 
3513 /**
3514  * e1000_get_stats - Get System Network Statistics
3515  * @netdev: network interface device structure
3516  *
3517  * Returns the address of the device statistics structure.
3518  * The statistics are actually updated from the watchdog.
3519  **/
3520 
3521 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3522 {
3523 	/* only return the current stats */
3524 	return &netdev->stats;
3525 }
3526 
3527 /**
3528  * e1000_change_mtu - Change the Maximum Transfer Unit
3529  * @netdev: network interface device structure
3530  * @new_mtu: new value for maximum frame size
3531  *
3532  * Returns 0 on success, negative on failure
3533  **/
3534 
3535 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3536 {
3537 	struct e1000_adapter *adapter = netdev_priv(netdev);
3538 	struct e1000_hw *hw = &adapter->hw;
3539 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3540 
3541 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3542 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3543 		e_err(probe, "Invalid MTU setting\n");
3544 		return -EINVAL;
3545 	}
3546 
3547 	/* Adapter-specific max frame size limits. */
3548 	switch (hw->mac_type) {
3549 	case e1000_undefined ... e1000_82542_rev2_1:
3550 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3551 			e_err(probe, "Jumbo Frames not supported.\n");
3552 			return -EINVAL;
3553 		}
3554 		break;
3555 	default:
3556 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3557 		break;
3558 	}
3559 
3560 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3561 		msleep(1);
3562 	/* e1000_down has a dependency on max_frame_size */
3563 	hw->max_frame_size = max_frame;
3564 	if (netif_running(netdev))
3565 		e1000_down(adapter);
3566 
3567 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3568 	 * means we reserve 2 more, this pushes us to allocate from the next
3569 	 * larger slab size.
3570 	 * i.e. RXBUFFER_2048 --> size-4096 slab
3571 	 *  however with the new *_jumbo_rx* routines, jumbo receives will use
3572 	 *  fragmented skbs */
3573 
3574 	if (max_frame <= E1000_RXBUFFER_2048)
3575 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3576 	else
3577 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3578 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3579 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3580 		adapter->rx_buffer_len = PAGE_SIZE;
3581 #endif
3582 
3583 	/* adjust allocation if LPE protects us, and we aren't using SBP */
3584 	if (!hw->tbi_compatibility_on &&
3585 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3586 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3587 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3588 
3589 	pr_info("%s changing MTU from %d to %d\n",
3590 		netdev->name, netdev->mtu, new_mtu);
3591 	netdev->mtu = new_mtu;
3592 
3593 	if (netif_running(netdev))
3594 		e1000_up(adapter);
3595 	else
3596 		e1000_reset(adapter);
3597 
3598 	clear_bit(__E1000_RESETTING, &adapter->flags);
3599 
3600 	return 0;
3601 }
3602 
3603 /**
3604  * e1000_update_stats - Update the board statistics counters
3605  * @adapter: board private structure
3606  **/
3607 
3608 void e1000_update_stats(struct e1000_adapter *adapter)
3609 {
3610 	struct net_device *netdev = adapter->netdev;
3611 	struct e1000_hw *hw = &adapter->hw;
3612 	struct pci_dev *pdev = adapter->pdev;
3613 	unsigned long flags;
3614 	u16 phy_tmp;
3615 
3616 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3617 
3618 	/*
3619 	 * Prevent stats update while adapter is being reset, or if the pci
3620 	 * connection is down.
3621 	 */
3622 	if (adapter->link_speed == 0)
3623 		return;
3624 	if (pci_channel_offline(pdev))
3625 		return;
3626 
3627 	spin_lock_irqsave(&adapter->stats_lock, flags);
3628 
3629 	/* these counters are modified from e1000_tbi_adjust_stats,
3630 	 * called from the interrupt context, so they must only
3631 	 * be written while holding adapter->stats_lock
3632 	 */
3633 
3634 	adapter->stats.crcerrs += er32(CRCERRS);
3635 	adapter->stats.gprc += er32(GPRC);
3636 	adapter->stats.gorcl += er32(GORCL);
3637 	adapter->stats.gorch += er32(GORCH);
3638 	adapter->stats.bprc += er32(BPRC);
3639 	adapter->stats.mprc += er32(MPRC);
3640 	adapter->stats.roc += er32(ROC);
3641 
3642 	adapter->stats.prc64 += er32(PRC64);
3643 	adapter->stats.prc127 += er32(PRC127);
3644 	adapter->stats.prc255 += er32(PRC255);
3645 	adapter->stats.prc511 += er32(PRC511);
3646 	adapter->stats.prc1023 += er32(PRC1023);
3647 	adapter->stats.prc1522 += er32(PRC1522);
3648 
3649 	adapter->stats.symerrs += er32(SYMERRS);
3650 	adapter->stats.mpc += er32(MPC);
3651 	adapter->stats.scc += er32(SCC);
3652 	adapter->stats.ecol += er32(ECOL);
3653 	adapter->stats.mcc += er32(MCC);
3654 	adapter->stats.latecol += er32(LATECOL);
3655 	adapter->stats.dc += er32(DC);
3656 	adapter->stats.sec += er32(SEC);
3657 	adapter->stats.rlec += er32(RLEC);
3658 	adapter->stats.xonrxc += er32(XONRXC);
3659 	adapter->stats.xontxc += er32(XONTXC);
3660 	adapter->stats.xoffrxc += er32(XOFFRXC);
3661 	adapter->stats.xofftxc += er32(XOFFTXC);
3662 	adapter->stats.fcruc += er32(FCRUC);
3663 	adapter->stats.gptc += er32(GPTC);
3664 	adapter->stats.gotcl += er32(GOTCL);
3665 	adapter->stats.gotch += er32(GOTCH);
3666 	adapter->stats.rnbc += er32(RNBC);
3667 	adapter->stats.ruc += er32(RUC);
3668 	adapter->stats.rfc += er32(RFC);
3669 	adapter->stats.rjc += er32(RJC);
3670 	adapter->stats.torl += er32(TORL);
3671 	adapter->stats.torh += er32(TORH);
3672 	adapter->stats.totl += er32(TOTL);
3673 	adapter->stats.toth += er32(TOTH);
3674 	adapter->stats.tpr += er32(TPR);
3675 
3676 	adapter->stats.ptc64 += er32(PTC64);
3677 	adapter->stats.ptc127 += er32(PTC127);
3678 	adapter->stats.ptc255 += er32(PTC255);
3679 	adapter->stats.ptc511 += er32(PTC511);
3680 	adapter->stats.ptc1023 += er32(PTC1023);
3681 	adapter->stats.ptc1522 += er32(PTC1522);
3682 
3683 	adapter->stats.mptc += er32(MPTC);
3684 	adapter->stats.bptc += er32(BPTC);
3685 
3686 	/* used for adaptive IFS */
3687 
3688 	hw->tx_packet_delta = er32(TPT);
3689 	adapter->stats.tpt += hw->tx_packet_delta;
3690 	hw->collision_delta = er32(COLC);
3691 	adapter->stats.colc += hw->collision_delta;
3692 
3693 	if (hw->mac_type >= e1000_82543) {
3694 		adapter->stats.algnerrc += er32(ALGNERRC);
3695 		adapter->stats.rxerrc += er32(RXERRC);
3696 		adapter->stats.tncrs += er32(TNCRS);
3697 		adapter->stats.cexterr += er32(CEXTERR);
3698 		adapter->stats.tsctc += er32(TSCTC);
3699 		adapter->stats.tsctfc += er32(TSCTFC);
3700 	}
3701 
3702 	/* Fill out the OS statistics structure */
3703 	netdev->stats.multicast = adapter->stats.mprc;
3704 	netdev->stats.collisions = adapter->stats.colc;
3705 
3706 	/* Rx Errors */
3707 
3708 	/* RLEC on some newer hardware can be incorrect so build
3709 	* our own version based on RUC and ROC */
3710 	netdev->stats.rx_errors = adapter->stats.rxerrc +
3711 		adapter->stats.crcerrs + adapter->stats.algnerrc +
3712 		adapter->stats.ruc + adapter->stats.roc +
3713 		adapter->stats.cexterr;
3714 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3715 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3716 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3717 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3718 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3719 
3720 	/* Tx Errors */
3721 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3722 	netdev->stats.tx_errors = adapter->stats.txerrc;
3723 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3724 	netdev->stats.tx_window_errors = adapter->stats.latecol;
3725 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3726 	if (hw->bad_tx_carr_stats_fd &&
3727 	    adapter->link_duplex == FULL_DUPLEX) {
3728 		netdev->stats.tx_carrier_errors = 0;
3729 		adapter->stats.tncrs = 0;
3730 	}
3731 
3732 	/* Tx Dropped needs to be maintained elsewhere */
3733 
3734 	/* Phy Stats */
3735 	if (hw->media_type == e1000_media_type_copper) {
3736 		if ((adapter->link_speed == SPEED_1000) &&
3737 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3738 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3739 			adapter->phy_stats.idle_errors += phy_tmp;
3740 		}
3741 
3742 		if ((hw->mac_type <= e1000_82546) &&
3743 		   (hw->phy_type == e1000_phy_m88) &&
3744 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3745 			adapter->phy_stats.receive_errors += phy_tmp;
3746 	}
3747 
3748 	/* Management Stats */
3749 	if (hw->has_smbus) {
3750 		adapter->stats.mgptc += er32(MGTPTC);
3751 		adapter->stats.mgprc += er32(MGTPRC);
3752 		adapter->stats.mgpdc += er32(MGTPDC);
3753 	}
3754 
3755 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3756 }
3757 
3758 /**
3759  * e1000_intr - Interrupt Handler
3760  * @irq: interrupt number
3761  * @data: pointer to a network interface device structure
3762  **/
3763 
3764 static irqreturn_t e1000_intr(int irq, void *data)
3765 {
3766 	struct net_device *netdev = data;
3767 	struct e1000_adapter *adapter = netdev_priv(netdev);
3768 	struct e1000_hw *hw = &adapter->hw;
3769 	u32 icr = er32(ICR);
3770 
3771 	if (unlikely((!icr)))
3772 		return IRQ_NONE;  /* Not our interrupt */
3773 
3774 	/*
3775 	 * we might have caused the interrupt, but the above
3776 	 * read cleared it, and just in case the driver is
3777 	 * down there is nothing to do so return handled
3778 	 */
3779 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3780 		return IRQ_HANDLED;
3781 
3782 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3783 		hw->get_link_status = 1;
3784 		/* guard against interrupt when we're going down */
3785 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3786 			schedule_delayed_work(&adapter->watchdog_task, 1);
3787 	}
3788 
3789 	/* disable interrupts, without the synchronize_irq bit */
3790 	ew32(IMC, ~0);
3791 	E1000_WRITE_FLUSH();
3792 
3793 	if (likely(napi_schedule_prep(&adapter->napi))) {
3794 		adapter->total_tx_bytes = 0;
3795 		adapter->total_tx_packets = 0;
3796 		adapter->total_rx_bytes = 0;
3797 		adapter->total_rx_packets = 0;
3798 		__napi_schedule(&adapter->napi);
3799 	} else {
3800 		/* this really should not happen! if it does it is basically a
3801 		 * bug, but not a hard error, so enable ints and continue */
3802 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3803 			e1000_irq_enable(adapter);
3804 	}
3805 
3806 	return IRQ_HANDLED;
3807 }
3808 
3809 /**
3810  * e1000_clean - NAPI Rx polling callback
3811  * @adapter: board private structure
3812  **/
3813 static int e1000_clean(struct napi_struct *napi, int budget)
3814 {
3815 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3816 	int tx_clean_complete = 0, work_done = 0;
3817 
3818 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3819 
3820 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3821 
3822 	if (!tx_clean_complete)
3823 		work_done = budget;
3824 
3825 	/* If budget not fully consumed, exit the polling mode */
3826 	if (work_done < budget) {
3827 		if (likely(adapter->itr_setting & 3))
3828 			e1000_set_itr(adapter);
3829 		napi_complete(napi);
3830 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3831 			e1000_irq_enable(adapter);
3832 	}
3833 
3834 	return work_done;
3835 }
3836 
3837 /**
3838  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3839  * @adapter: board private structure
3840  **/
3841 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3842 			       struct e1000_tx_ring *tx_ring)
3843 {
3844 	struct e1000_hw *hw = &adapter->hw;
3845 	struct net_device *netdev = adapter->netdev;
3846 	struct e1000_tx_desc *tx_desc, *eop_desc;
3847 	struct e1000_buffer *buffer_info;
3848 	unsigned int i, eop;
3849 	unsigned int count = 0;
3850 	unsigned int total_tx_bytes=0, total_tx_packets=0;
3851 
3852 	i = tx_ring->next_to_clean;
3853 	eop = tx_ring->buffer_info[i].next_to_watch;
3854 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3855 
3856 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3857 	       (count < tx_ring->count)) {
3858 		bool cleaned = false;
3859 		rmb();	/* read buffer_info after eop_desc */
3860 		for ( ; !cleaned; count++) {
3861 			tx_desc = E1000_TX_DESC(*tx_ring, i);
3862 			buffer_info = &tx_ring->buffer_info[i];
3863 			cleaned = (i == eop);
3864 
3865 			if (cleaned) {
3866 				total_tx_packets += buffer_info->segs;
3867 				total_tx_bytes += buffer_info->bytecount;
3868 			}
3869 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3870 			tx_desc->upper.data = 0;
3871 
3872 			if (unlikely(++i == tx_ring->count)) i = 0;
3873 		}
3874 
3875 		eop = tx_ring->buffer_info[i].next_to_watch;
3876 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3877 	}
3878 
3879 	tx_ring->next_to_clean = i;
3880 
3881 #define TX_WAKE_THRESHOLD 32
3882 	if (unlikely(count && netif_carrier_ok(netdev) &&
3883 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3884 		/* Make sure that anybody stopping the queue after this
3885 		 * sees the new next_to_clean.
3886 		 */
3887 		smp_mb();
3888 
3889 		if (netif_queue_stopped(netdev) &&
3890 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3891 			netif_wake_queue(netdev);
3892 			++adapter->restart_queue;
3893 		}
3894 	}
3895 
3896 	if (adapter->detect_tx_hung) {
3897 		/* Detect a transmit hang in hardware, this serializes the
3898 		 * check with the clearing of time_stamp and movement of i */
3899 		adapter->detect_tx_hung = false;
3900 		if (tx_ring->buffer_info[eop].time_stamp &&
3901 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3902 		               (adapter->tx_timeout_factor * HZ)) &&
3903 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3904 
3905 			/* detected Tx unit hang */
3906 			e_err(drv, "Detected Tx Unit Hang\n"
3907 			      "  Tx Queue             <%lu>\n"
3908 			      "  TDH                  <%x>\n"
3909 			      "  TDT                  <%x>\n"
3910 			      "  next_to_use          <%x>\n"
3911 			      "  next_to_clean        <%x>\n"
3912 			      "buffer_info[next_to_clean]\n"
3913 			      "  time_stamp           <%lx>\n"
3914 			      "  next_to_watch        <%x>\n"
3915 			      "  jiffies              <%lx>\n"
3916 			      "  next_to_watch.status <%x>\n",
3917 				(unsigned long)((tx_ring - adapter->tx_ring) /
3918 					sizeof(struct e1000_tx_ring)),
3919 				readl(hw->hw_addr + tx_ring->tdh),
3920 				readl(hw->hw_addr + tx_ring->tdt),
3921 				tx_ring->next_to_use,
3922 				tx_ring->next_to_clean,
3923 				tx_ring->buffer_info[eop].time_stamp,
3924 				eop,
3925 				jiffies,
3926 				eop_desc->upper.fields.status);
3927 			e1000_dump(adapter);
3928 			netif_stop_queue(netdev);
3929 		}
3930 	}
3931 	adapter->total_tx_bytes += total_tx_bytes;
3932 	adapter->total_tx_packets += total_tx_packets;
3933 	netdev->stats.tx_bytes += total_tx_bytes;
3934 	netdev->stats.tx_packets += total_tx_packets;
3935 	return count < tx_ring->count;
3936 }
3937 
3938 /**
3939  * e1000_rx_checksum - Receive Checksum Offload for 82543
3940  * @adapter:     board private structure
3941  * @status_err:  receive descriptor status and error fields
3942  * @csum:        receive descriptor csum field
3943  * @sk_buff:     socket buffer with received data
3944  **/
3945 
3946 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3947 			      u32 csum, struct sk_buff *skb)
3948 {
3949 	struct e1000_hw *hw = &adapter->hw;
3950 	u16 status = (u16)status_err;
3951 	u8 errors = (u8)(status_err >> 24);
3952 
3953 	skb_checksum_none_assert(skb);
3954 
3955 	/* 82543 or newer only */
3956 	if (unlikely(hw->mac_type < e1000_82543)) return;
3957 	/* Ignore Checksum bit is set */
3958 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3959 	/* TCP/UDP checksum error bit is set */
3960 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3961 		/* let the stack verify checksum errors */
3962 		adapter->hw_csum_err++;
3963 		return;
3964 	}
3965 	/* TCP/UDP Checksum has not been calculated */
3966 	if (!(status & E1000_RXD_STAT_TCPCS))
3967 		return;
3968 
3969 	/* It must be a TCP or UDP packet with a valid checksum */
3970 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3971 		/* TCP checksum is good */
3972 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3973 	}
3974 	adapter->hw_csum_good++;
3975 }
3976 
3977 /**
3978  * e1000_consume_page - helper function
3979  **/
3980 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3981                                u16 length)
3982 {
3983 	bi->page = NULL;
3984 	skb->len += length;
3985 	skb->data_len += length;
3986 	skb->truesize += PAGE_SIZE;
3987 }
3988 
3989 /**
3990  * e1000_receive_skb - helper function to handle rx indications
3991  * @adapter: board private structure
3992  * @status: descriptor status field as written by hardware
3993  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3994  * @skb: pointer to sk_buff to be indicated to stack
3995  */
3996 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3997 			      __le16 vlan, struct sk_buff *skb)
3998 {
3999 	skb->protocol = eth_type_trans(skb, adapter->netdev);
4000 
4001 	if (status & E1000_RXD_STAT_VP) {
4002 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4003 
4004 		__vlan_hwaccel_put_tag(skb, vid);
4005 	}
4006 	napi_gro_receive(&adapter->napi, skb);
4007 }
4008 
4009 /**
4010  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4011  * @adapter: board private structure
4012  * @rx_ring: ring to clean
4013  * @work_done: amount of napi work completed this call
4014  * @work_to_do: max amount of work allowed for this call to do
4015  *
4016  * the return value indicates whether actual cleaning was done, there
4017  * is no guarantee that everything was cleaned
4018  */
4019 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4020 				     struct e1000_rx_ring *rx_ring,
4021 				     int *work_done, int work_to_do)
4022 {
4023 	struct e1000_hw *hw = &adapter->hw;
4024 	struct net_device *netdev = adapter->netdev;
4025 	struct pci_dev *pdev = adapter->pdev;
4026 	struct e1000_rx_desc *rx_desc, *next_rxd;
4027 	struct e1000_buffer *buffer_info, *next_buffer;
4028 	unsigned long irq_flags;
4029 	u32 length;
4030 	unsigned int i;
4031 	int cleaned_count = 0;
4032 	bool cleaned = false;
4033 	unsigned int total_rx_bytes=0, total_rx_packets=0;
4034 
4035 	i = rx_ring->next_to_clean;
4036 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4037 	buffer_info = &rx_ring->buffer_info[i];
4038 
4039 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4040 		struct sk_buff *skb;
4041 		u8 status;
4042 
4043 		if (*work_done >= work_to_do)
4044 			break;
4045 		(*work_done)++;
4046 		rmb(); /* read descriptor and rx_buffer_info after status DD */
4047 
4048 		status = rx_desc->status;
4049 		skb = buffer_info->skb;
4050 		buffer_info->skb = NULL;
4051 
4052 		if (++i == rx_ring->count) i = 0;
4053 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4054 		prefetch(next_rxd);
4055 
4056 		next_buffer = &rx_ring->buffer_info[i];
4057 
4058 		cleaned = true;
4059 		cleaned_count++;
4060 		dma_unmap_page(&pdev->dev, buffer_info->dma,
4061 			       buffer_info->length, DMA_FROM_DEVICE);
4062 		buffer_info->dma = 0;
4063 
4064 		length = le16_to_cpu(rx_desc->length);
4065 
4066 		/* errors is only valid for DD + EOP descriptors */
4067 		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4068 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4069 			u8 last_byte = *(skb->data + length - 1);
4070 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4071 				       last_byte)) {
4072 				spin_lock_irqsave(&adapter->stats_lock,
4073 				                  irq_flags);
4074 				e1000_tbi_adjust_stats(hw, &adapter->stats,
4075 				                       length, skb->data);
4076 				spin_unlock_irqrestore(&adapter->stats_lock,
4077 				                       irq_flags);
4078 				length--;
4079 			} else {
4080 				if (netdev->features & NETIF_F_RXALL)
4081 					goto process_skb;
4082 				/* recycle both page and skb */
4083 				buffer_info->skb = skb;
4084 				/* an error means any chain goes out the window
4085 				 * too */
4086 				if (rx_ring->rx_skb_top)
4087 					dev_kfree_skb(rx_ring->rx_skb_top);
4088 				rx_ring->rx_skb_top = NULL;
4089 				goto next_desc;
4090 			}
4091 		}
4092 
4093 #define rxtop rx_ring->rx_skb_top
4094 process_skb:
4095 		if (!(status & E1000_RXD_STAT_EOP)) {
4096 			/* this descriptor is only the beginning (or middle) */
4097 			if (!rxtop) {
4098 				/* this is the beginning of a chain */
4099 				rxtop = skb;
4100 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
4101 				                   0, length);
4102 			} else {
4103 				/* this is the middle of a chain */
4104 				skb_fill_page_desc(rxtop,
4105 				    skb_shinfo(rxtop)->nr_frags,
4106 				    buffer_info->page, 0, length);
4107 				/* re-use the skb, only consumed the page */
4108 				buffer_info->skb = skb;
4109 			}
4110 			e1000_consume_page(buffer_info, rxtop, length);
4111 			goto next_desc;
4112 		} else {
4113 			if (rxtop) {
4114 				/* end of the chain */
4115 				skb_fill_page_desc(rxtop,
4116 				    skb_shinfo(rxtop)->nr_frags,
4117 				    buffer_info->page, 0, length);
4118 				/* re-use the current skb, we only consumed the
4119 				 * page */
4120 				buffer_info->skb = skb;
4121 				skb = rxtop;
4122 				rxtop = NULL;
4123 				e1000_consume_page(buffer_info, skb, length);
4124 			} else {
4125 				/* no chain, got EOP, this buf is the packet
4126 				 * copybreak to save the put_page/alloc_page */
4127 				if (length <= copybreak &&
4128 				    skb_tailroom(skb) >= length) {
4129 					u8 *vaddr;
4130 					vaddr = kmap_atomic(buffer_info->page);
4131 					memcpy(skb_tail_pointer(skb), vaddr, length);
4132 					kunmap_atomic(vaddr);
4133 					/* re-use the page, so don't erase
4134 					 * buffer_info->page */
4135 					skb_put(skb, length);
4136 				} else {
4137 					skb_fill_page_desc(skb, 0,
4138 					                   buffer_info->page, 0,
4139 				                           length);
4140 					e1000_consume_page(buffer_info, skb,
4141 					                   length);
4142 				}
4143 			}
4144 		}
4145 
4146 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4147 		e1000_rx_checksum(adapter,
4148 		                  (u32)(status) |
4149 		                  ((u32)(rx_desc->errors) << 24),
4150 		                  le16_to_cpu(rx_desc->csum), skb);
4151 
4152 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4153 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4154 			pskb_trim(skb, skb->len - 4);
4155 		total_rx_packets++;
4156 
4157 		/* eth type trans needs skb->data to point to something */
4158 		if (!pskb_may_pull(skb, ETH_HLEN)) {
4159 			e_err(drv, "pskb_may_pull failed.\n");
4160 			dev_kfree_skb(skb);
4161 			goto next_desc;
4162 		}
4163 
4164 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4165 
4166 next_desc:
4167 		rx_desc->status = 0;
4168 
4169 		/* return some buffers to hardware, one at a time is too slow */
4170 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4171 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4172 			cleaned_count = 0;
4173 		}
4174 
4175 		/* use prefetched values */
4176 		rx_desc = next_rxd;
4177 		buffer_info = next_buffer;
4178 	}
4179 	rx_ring->next_to_clean = i;
4180 
4181 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4182 	if (cleaned_count)
4183 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4184 
4185 	adapter->total_rx_packets += total_rx_packets;
4186 	adapter->total_rx_bytes += total_rx_bytes;
4187 	netdev->stats.rx_bytes += total_rx_bytes;
4188 	netdev->stats.rx_packets += total_rx_packets;
4189 	return cleaned;
4190 }
4191 
4192 /*
4193  * this should improve performance for small packets with large amounts
4194  * of reassembly being done in the stack
4195  */
4196 static void e1000_check_copybreak(struct net_device *netdev,
4197 				 struct e1000_buffer *buffer_info,
4198 				 u32 length, struct sk_buff **skb)
4199 {
4200 	struct sk_buff *new_skb;
4201 
4202 	if (length > copybreak)
4203 		return;
4204 
4205 	new_skb = netdev_alloc_skb_ip_align(netdev, length);
4206 	if (!new_skb)
4207 		return;
4208 
4209 	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4210 				       (*skb)->data - NET_IP_ALIGN,
4211 				       length + NET_IP_ALIGN);
4212 	/* save the skb in buffer_info as good */
4213 	buffer_info->skb = *skb;
4214 	*skb = new_skb;
4215 }
4216 
4217 /**
4218  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4219  * @adapter: board private structure
4220  * @rx_ring: ring to clean
4221  * @work_done: amount of napi work completed this call
4222  * @work_to_do: max amount of work allowed for this call to do
4223  */
4224 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4225 			       struct e1000_rx_ring *rx_ring,
4226 			       int *work_done, int work_to_do)
4227 {
4228 	struct e1000_hw *hw = &adapter->hw;
4229 	struct net_device *netdev = adapter->netdev;
4230 	struct pci_dev *pdev = adapter->pdev;
4231 	struct e1000_rx_desc *rx_desc, *next_rxd;
4232 	struct e1000_buffer *buffer_info, *next_buffer;
4233 	unsigned long flags;
4234 	u32 length;
4235 	unsigned int i;
4236 	int cleaned_count = 0;
4237 	bool cleaned = false;
4238 	unsigned int total_rx_bytes=0, total_rx_packets=0;
4239 
4240 	i = rx_ring->next_to_clean;
4241 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4242 	buffer_info = &rx_ring->buffer_info[i];
4243 
4244 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4245 		struct sk_buff *skb;
4246 		u8 status;
4247 
4248 		if (*work_done >= work_to_do)
4249 			break;
4250 		(*work_done)++;
4251 		rmb(); /* read descriptor and rx_buffer_info after status DD */
4252 
4253 		status = rx_desc->status;
4254 		skb = buffer_info->skb;
4255 		buffer_info->skb = NULL;
4256 
4257 		prefetch(skb->data - NET_IP_ALIGN);
4258 
4259 		if (++i == rx_ring->count) i = 0;
4260 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4261 		prefetch(next_rxd);
4262 
4263 		next_buffer = &rx_ring->buffer_info[i];
4264 
4265 		cleaned = true;
4266 		cleaned_count++;
4267 		dma_unmap_single(&pdev->dev, buffer_info->dma,
4268 				 buffer_info->length, DMA_FROM_DEVICE);
4269 		buffer_info->dma = 0;
4270 
4271 		length = le16_to_cpu(rx_desc->length);
4272 		/* !EOP means multiple descriptors were used to store a single
4273 		 * packet, if thats the case we need to toss it.  In fact, we
4274 		 * to toss every packet with the EOP bit clear and the next
4275 		 * frame that _does_ have the EOP bit set, as it is by
4276 		 * definition only a frame fragment
4277 		 */
4278 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4279 			adapter->discarding = true;
4280 
4281 		if (adapter->discarding) {
4282 			/* All receives must fit into a single buffer */
4283 			e_dbg("Receive packet consumed multiple buffers\n");
4284 			/* recycle */
4285 			buffer_info->skb = skb;
4286 			if (status & E1000_RXD_STAT_EOP)
4287 				adapter->discarding = false;
4288 			goto next_desc;
4289 		}
4290 
4291 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4292 			u8 last_byte = *(skb->data + length - 1);
4293 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4294 				       last_byte)) {
4295 				spin_lock_irqsave(&adapter->stats_lock, flags);
4296 				e1000_tbi_adjust_stats(hw, &adapter->stats,
4297 				                       length, skb->data);
4298 				spin_unlock_irqrestore(&adapter->stats_lock,
4299 				                       flags);
4300 				length--;
4301 			} else {
4302 				if (netdev->features & NETIF_F_RXALL)
4303 					goto process_skb;
4304 				/* recycle */
4305 				buffer_info->skb = skb;
4306 				goto next_desc;
4307 			}
4308 		}
4309 
4310 process_skb:
4311 		total_rx_bytes += (length - 4); /* don't count FCS */
4312 		total_rx_packets++;
4313 
4314 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4315 			/* adjust length to remove Ethernet CRC, this must be
4316 			 * done after the TBI_ACCEPT workaround above
4317 			 */
4318 			length -= 4;
4319 
4320 		e1000_check_copybreak(netdev, buffer_info, length, &skb);
4321 
4322 		skb_put(skb, length);
4323 
4324 		/* Receive Checksum Offload */
4325 		e1000_rx_checksum(adapter,
4326 				  (u32)(status) |
4327 				  ((u32)(rx_desc->errors) << 24),
4328 				  le16_to_cpu(rx_desc->csum), skb);
4329 
4330 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4331 
4332 next_desc:
4333 		rx_desc->status = 0;
4334 
4335 		/* return some buffers to hardware, one at a time is too slow */
4336 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4337 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4338 			cleaned_count = 0;
4339 		}
4340 
4341 		/* use prefetched values */
4342 		rx_desc = next_rxd;
4343 		buffer_info = next_buffer;
4344 	}
4345 	rx_ring->next_to_clean = i;
4346 
4347 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4348 	if (cleaned_count)
4349 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4350 
4351 	adapter->total_rx_packets += total_rx_packets;
4352 	adapter->total_rx_bytes += total_rx_bytes;
4353 	netdev->stats.rx_bytes += total_rx_bytes;
4354 	netdev->stats.rx_packets += total_rx_packets;
4355 	return cleaned;
4356 }
4357 
4358 /**
4359  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4360  * @adapter: address of board private structure
4361  * @rx_ring: pointer to receive ring structure
4362  * @cleaned_count: number of buffers to allocate this pass
4363  **/
4364 
4365 static void
4366 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4367                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4368 {
4369 	struct net_device *netdev = adapter->netdev;
4370 	struct pci_dev *pdev = adapter->pdev;
4371 	struct e1000_rx_desc *rx_desc;
4372 	struct e1000_buffer *buffer_info;
4373 	struct sk_buff *skb;
4374 	unsigned int i;
4375 	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4376 
4377 	i = rx_ring->next_to_use;
4378 	buffer_info = &rx_ring->buffer_info[i];
4379 
4380 	while (cleaned_count--) {
4381 		skb = buffer_info->skb;
4382 		if (skb) {
4383 			skb_trim(skb, 0);
4384 			goto check_page;
4385 		}
4386 
4387 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4388 		if (unlikely(!skb)) {
4389 			/* Better luck next round */
4390 			adapter->alloc_rx_buff_failed++;
4391 			break;
4392 		}
4393 
4394 		/* Fix for errata 23, can't cross 64kB boundary */
4395 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4396 			struct sk_buff *oldskb = skb;
4397 			e_err(rx_err, "skb align check failed: %u bytes at "
4398 			      "%p\n", bufsz, skb->data);
4399 			/* Try again, without freeing the previous */
4400 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4401 			/* Failed allocation, critical failure */
4402 			if (!skb) {
4403 				dev_kfree_skb(oldskb);
4404 				adapter->alloc_rx_buff_failed++;
4405 				break;
4406 			}
4407 
4408 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4409 				/* give up */
4410 				dev_kfree_skb(skb);
4411 				dev_kfree_skb(oldskb);
4412 				break; /* while (cleaned_count--) */
4413 			}
4414 
4415 			/* Use new allocation */
4416 			dev_kfree_skb(oldskb);
4417 		}
4418 		buffer_info->skb = skb;
4419 		buffer_info->length = adapter->rx_buffer_len;
4420 check_page:
4421 		/* allocate a new page if necessary */
4422 		if (!buffer_info->page) {
4423 			buffer_info->page = alloc_page(GFP_ATOMIC);
4424 			if (unlikely(!buffer_info->page)) {
4425 				adapter->alloc_rx_buff_failed++;
4426 				break;
4427 			}
4428 		}
4429 
4430 		if (!buffer_info->dma) {
4431 			buffer_info->dma = dma_map_page(&pdev->dev,
4432 			                                buffer_info->page, 0,
4433 							buffer_info->length,
4434 							DMA_FROM_DEVICE);
4435 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4436 				put_page(buffer_info->page);
4437 				dev_kfree_skb(skb);
4438 				buffer_info->page = NULL;
4439 				buffer_info->skb = NULL;
4440 				buffer_info->dma = 0;
4441 				adapter->alloc_rx_buff_failed++;
4442 				break; /* while !buffer_info->skb */
4443 			}
4444 		}
4445 
4446 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4447 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4448 
4449 		if (unlikely(++i == rx_ring->count))
4450 			i = 0;
4451 		buffer_info = &rx_ring->buffer_info[i];
4452 	}
4453 
4454 	if (likely(rx_ring->next_to_use != i)) {
4455 		rx_ring->next_to_use = i;
4456 		if (unlikely(i-- == 0))
4457 			i = (rx_ring->count - 1);
4458 
4459 		/* Force memory writes to complete before letting h/w
4460 		 * know there are new descriptors to fetch.  (Only
4461 		 * applicable for weak-ordered memory model archs,
4462 		 * such as IA-64). */
4463 		wmb();
4464 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4465 	}
4466 }
4467 
4468 /**
4469  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4470  * @adapter: address of board private structure
4471  **/
4472 
4473 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4474 				   struct e1000_rx_ring *rx_ring,
4475 				   int cleaned_count)
4476 {
4477 	struct e1000_hw *hw = &adapter->hw;
4478 	struct net_device *netdev = adapter->netdev;
4479 	struct pci_dev *pdev = adapter->pdev;
4480 	struct e1000_rx_desc *rx_desc;
4481 	struct e1000_buffer *buffer_info;
4482 	struct sk_buff *skb;
4483 	unsigned int i;
4484 	unsigned int bufsz = adapter->rx_buffer_len;
4485 
4486 	i = rx_ring->next_to_use;
4487 	buffer_info = &rx_ring->buffer_info[i];
4488 
4489 	while (cleaned_count--) {
4490 		skb = buffer_info->skb;
4491 		if (skb) {
4492 			skb_trim(skb, 0);
4493 			goto map_skb;
4494 		}
4495 
4496 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4497 		if (unlikely(!skb)) {
4498 			/* Better luck next round */
4499 			adapter->alloc_rx_buff_failed++;
4500 			break;
4501 		}
4502 
4503 		/* Fix for errata 23, can't cross 64kB boundary */
4504 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4505 			struct sk_buff *oldskb = skb;
4506 			e_err(rx_err, "skb align check failed: %u bytes at "
4507 			      "%p\n", bufsz, skb->data);
4508 			/* Try again, without freeing the previous */
4509 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4510 			/* Failed allocation, critical failure */
4511 			if (!skb) {
4512 				dev_kfree_skb(oldskb);
4513 				adapter->alloc_rx_buff_failed++;
4514 				break;
4515 			}
4516 
4517 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4518 				/* give up */
4519 				dev_kfree_skb(skb);
4520 				dev_kfree_skb(oldskb);
4521 				adapter->alloc_rx_buff_failed++;
4522 				break; /* while !buffer_info->skb */
4523 			}
4524 
4525 			/* Use new allocation */
4526 			dev_kfree_skb(oldskb);
4527 		}
4528 		buffer_info->skb = skb;
4529 		buffer_info->length = adapter->rx_buffer_len;
4530 map_skb:
4531 		buffer_info->dma = dma_map_single(&pdev->dev,
4532 						  skb->data,
4533 						  buffer_info->length,
4534 						  DMA_FROM_DEVICE);
4535 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4536 			dev_kfree_skb(skb);
4537 			buffer_info->skb = NULL;
4538 			buffer_info->dma = 0;
4539 			adapter->alloc_rx_buff_failed++;
4540 			break; /* while !buffer_info->skb */
4541 		}
4542 
4543 		/*
4544 		 * XXX if it was allocated cleanly it will never map to a
4545 		 * boundary crossing
4546 		 */
4547 
4548 		/* Fix for errata 23, can't cross 64kB boundary */
4549 		if (!e1000_check_64k_bound(adapter,
4550 					(void *)(unsigned long)buffer_info->dma,
4551 					adapter->rx_buffer_len)) {
4552 			e_err(rx_err, "dma align check failed: %u bytes at "
4553 			      "%p\n", adapter->rx_buffer_len,
4554 			      (void *)(unsigned long)buffer_info->dma);
4555 			dev_kfree_skb(skb);
4556 			buffer_info->skb = NULL;
4557 
4558 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4559 					 adapter->rx_buffer_len,
4560 					 DMA_FROM_DEVICE);
4561 			buffer_info->dma = 0;
4562 
4563 			adapter->alloc_rx_buff_failed++;
4564 			break; /* while !buffer_info->skb */
4565 		}
4566 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4567 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4568 
4569 		if (unlikely(++i == rx_ring->count))
4570 			i = 0;
4571 		buffer_info = &rx_ring->buffer_info[i];
4572 	}
4573 
4574 	if (likely(rx_ring->next_to_use != i)) {
4575 		rx_ring->next_to_use = i;
4576 		if (unlikely(i-- == 0))
4577 			i = (rx_ring->count - 1);
4578 
4579 		/* Force memory writes to complete before letting h/w
4580 		 * know there are new descriptors to fetch.  (Only
4581 		 * applicable for weak-ordered memory model archs,
4582 		 * such as IA-64). */
4583 		wmb();
4584 		writel(i, hw->hw_addr + rx_ring->rdt);
4585 	}
4586 }
4587 
4588 /**
4589  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4590  * @adapter:
4591  **/
4592 
4593 static void e1000_smartspeed(struct e1000_adapter *adapter)
4594 {
4595 	struct e1000_hw *hw = &adapter->hw;
4596 	u16 phy_status;
4597 	u16 phy_ctrl;
4598 
4599 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4600 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4601 		return;
4602 
4603 	if (adapter->smartspeed == 0) {
4604 		/* If Master/Slave config fault is asserted twice,
4605 		 * we assume back-to-back */
4606 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4607 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4608 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4609 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4610 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4611 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4612 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4613 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4614 					    phy_ctrl);
4615 			adapter->smartspeed++;
4616 			if (!e1000_phy_setup_autoneg(hw) &&
4617 			   !e1000_read_phy_reg(hw, PHY_CTRL,
4618 				   	       &phy_ctrl)) {
4619 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4620 					     MII_CR_RESTART_AUTO_NEG);
4621 				e1000_write_phy_reg(hw, PHY_CTRL,
4622 						    phy_ctrl);
4623 			}
4624 		}
4625 		return;
4626 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4627 		/* If still no link, perhaps using 2/3 pair cable */
4628 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4629 		phy_ctrl |= CR_1000T_MS_ENABLE;
4630 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4631 		if (!e1000_phy_setup_autoneg(hw) &&
4632 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4633 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4634 				     MII_CR_RESTART_AUTO_NEG);
4635 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4636 		}
4637 	}
4638 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4639 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4640 		adapter->smartspeed = 0;
4641 }
4642 
4643 /**
4644  * e1000_ioctl -
4645  * @netdev:
4646  * @ifreq:
4647  * @cmd:
4648  **/
4649 
4650 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4651 {
4652 	switch (cmd) {
4653 	case SIOCGMIIPHY:
4654 	case SIOCGMIIREG:
4655 	case SIOCSMIIREG:
4656 		return e1000_mii_ioctl(netdev, ifr, cmd);
4657 	default:
4658 		return -EOPNOTSUPP;
4659 	}
4660 }
4661 
4662 /**
4663  * e1000_mii_ioctl -
4664  * @netdev:
4665  * @ifreq:
4666  * @cmd:
4667  **/
4668 
4669 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4670 			   int cmd)
4671 {
4672 	struct e1000_adapter *adapter = netdev_priv(netdev);
4673 	struct e1000_hw *hw = &adapter->hw;
4674 	struct mii_ioctl_data *data = if_mii(ifr);
4675 	int retval;
4676 	u16 mii_reg;
4677 	unsigned long flags;
4678 
4679 	if (hw->media_type != e1000_media_type_copper)
4680 		return -EOPNOTSUPP;
4681 
4682 	switch (cmd) {
4683 	case SIOCGMIIPHY:
4684 		data->phy_id = hw->phy_addr;
4685 		break;
4686 	case SIOCGMIIREG:
4687 		spin_lock_irqsave(&adapter->stats_lock, flags);
4688 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4689 				   &data->val_out)) {
4690 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4691 			return -EIO;
4692 		}
4693 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4694 		break;
4695 	case SIOCSMIIREG:
4696 		if (data->reg_num & ~(0x1F))
4697 			return -EFAULT;
4698 		mii_reg = data->val_in;
4699 		spin_lock_irqsave(&adapter->stats_lock, flags);
4700 		if (e1000_write_phy_reg(hw, data->reg_num,
4701 					mii_reg)) {
4702 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4703 			return -EIO;
4704 		}
4705 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4706 		if (hw->media_type == e1000_media_type_copper) {
4707 			switch (data->reg_num) {
4708 			case PHY_CTRL:
4709 				if (mii_reg & MII_CR_POWER_DOWN)
4710 					break;
4711 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4712 					hw->autoneg = 1;
4713 					hw->autoneg_advertised = 0x2F;
4714 				} else {
4715 					u32 speed;
4716 					if (mii_reg & 0x40)
4717 						speed = SPEED_1000;
4718 					else if (mii_reg & 0x2000)
4719 						speed = SPEED_100;
4720 					else
4721 						speed = SPEED_10;
4722 					retval = e1000_set_spd_dplx(
4723 						adapter, speed,
4724 						((mii_reg & 0x100)
4725 						 ? DUPLEX_FULL :
4726 						 DUPLEX_HALF));
4727 					if (retval)
4728 						return retval;
4729 				}
4730 				if (netif_running(adapter->netdev))
4731 					e1000_reinit_locked(adapter);
4732 				else
4733 					e1000_reset(adapter);
4734 				break;
4735 			case M88E1000_PHY_SPEC_CTRL:
4736 			case M88E1000_EXT_PHY_SPEC_CTRL:
4737 				if (e1000_phy_reset(hw))
4738 					return -EIO;
4739 				break;
4740 			}
4741 		} else {
4742 			switch (data->reg_num) {
4743 			case PHY_CTRL:
4744 				if (mii_reg & MII_CR_POWER_DOWN)
4745 					break;
4746 				if (netif_running(adapter->netdev))
4747 					e1000_reinit_locked(adapter);
4748 				else
4749 					e1000_reset(adapter);
4750 				break;
4751 			}
4752 		}
4753 		break;
4754 	default:
4755 		return -EOPNOTSUPP;
4756 	}
4757 	return E1000_SUCCESS;
4758 }
4759 
4760 void e1000_pci_set_mwi(struct e1000_hw *hw)
4761 {
4762 	struct e1000_adapter *adapter = hw->back;
4763 	int ret_val = pci_set_mwi(adapter->pdev);
4764 
4765 	if (ret_val)
4766 		e_err(probe, "Error in setting MWI\n");
4767 }
4768 
4769 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4770 {
4771 	struct e1000_adapter *adapter = hw->back;
4772 
4773 	pci_clear_mwi(adapter->pdev);
4774 }
4775 
4776 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4777 {
4778 	struct e1000_adapter *adapter = hw->back;
4779 	return pcix_get_mmrbc(adapter->pdev);
4780 }
4781 
4782 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4783 {
4784 	struct e1000_adapter *adapter = hw->back;
4785 	pcix_set_mmrbc(adapter->pdev, mmrbc);
4786 }
4787 
4788 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4789 {
4790 	outl(value, port);
4791 }
4792 
4793 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4794 {
4795 	u16 vid;
4796 
4797 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4798 		return true;
4799 	return false;
4800 }
4801 
4802 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4803 			      netdev_features_t features)
4804 {
4805 	struct e1000_hw *hw = &adapter->hw;
4806 	u32 ctrl;
4807 
4808 	ctrl = er32(CTRL);
4809 	if (features & NETIF_F_HW_VLAN_RX) {
4810 		/* enable VLAN tag insert/strip */
4811 		ctrl |= E1000_CTRL_VME;
4812 	} else {
4813 		/* disable VLAN tag insert/strip */
4814 		ctrl &= ~E1000_CTRL_VME;
4815 	}
4816 	ew32(CTRL, ctrl);
4817 }
4818 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4819 				     bool filter_on)
4820 {
4821 	struct e1000_hw *hw = &adapter->hw;
4822 	u32 rctl;
4823 
4824 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4825 		e1000_irq_disable(adapter);
4826 
4827 	__e1000_vlan_mode(adapter, adapter->netdev->features);
4828 	if (filter_on) {
4829 		/* enable VLAN receive filtering */
4830 		rctl = er32(RCTL);
4831 		rctl &= ~E1000_RCTL_CFIEN;
4832 		if (!(adapter->netdev->flags & IFF_PROMISC))
4833 			rctl |= E1000_RCTL_VFE;
4834 		ew32(RCTL, rctl);
4835 		e1000_update_mng_vlan(adapter);
4836 	} else {
4837 		/* disable VLAN receive filtering */
4838 		rctl = er32(RCTL);
4839 		rctl &= ~E1000_RCTL_VFE;
4840 		ew32(RCTL, rctl);
4841 	}
4842 
4843 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4844 		e1000_irq_enable(adapter);
4845 }
4846 
4847 static void e1000_vlan_mode(struct net_device *netdev,
4848 			    netdev_features_t features)
4849 {
4850 	struct e1000_adapter *adapter = netdev_priv(netdev);
4851 
4852 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4853 		e1000_irq_disable(adapter);
4854 
4855 	__e1000_vlan_mode(adapter, features);
4856 
4857 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4858 		e1000_irq_enable(adapter);
4859 }
4860 
4861 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4862 {
4863 	struct e1000_adapter *adapter = netdev_priv(netdev);
4864 	struct e1000_hw *hw = &adapter->hw;
4865 	u32 vfta, index;
4866 
4867 	if ((hw->mng_cookie.status &
4868 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4869 	    (vid == adapter->mng_vlan_id))
4870 		return 0;
4871 
4872 	if (!e1000_vlan_used(adapter))
4873 		e1000_vlan_filter_on_off(adapter, true);
4874 
4875 	/* add VID to filter table */
4876 	index = (vid >> 5) & 0x7F;
4877 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4878 	vfta |= (1 << (vid & 0x1F));
4879 	e1000_write_vfta(hw, index, vfta);
4880 
4881 	set_bit(vid, adapter->active_vlans);
4882 
4883 	return 0;
4884 }
4885 
4886 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4887 {
4888 	struct e1000_adapter *adapter = netdev_priv(netdev);
4889 	struct e1000_hw *hw = &adapter->hw;
4890 	u32 vfta, index;
4891 
4892 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4893 		e1000_irq_disable(adapter);
4894 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4895 		e1000_irq_enable(adapter);
4896 
4897 	/* remove VID from filter table */
4898 	index = (vid >> 5) & 0x7F;
4899 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4900 	vfta &= ~(1 << (vid & 0x1F));
4901 	e1000_write_vfta(hw, index, vfta);
4902 
4903 	clear_bit(vid, adapter->active_vlans);
4904 
4905 	if (!e1000_vlan_used(adapter))
4906 		e1000_vlan_filter_on_off(adapter, false);
4907 
4908 	return 0;
4909 }
4910 
4911 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4912 {
4913 	u16 vid;
4914 
4915 	if (!e1000_vlan_used(adapter))
4916 		return;
4917 
4918 	e1000_vlan_filter_on_off(adapter, true);
4919 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4920 		e1000_vlan_rx_add_vid(adapter->netdev, vid);
4921 }
4922 
4923 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4924 {
4925 	struct e1000_hw *hw = &adapter->hw;
4926 
4927 	hw->autoneg = 0;
4928 
4929 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
4930 	 * for the switch() below to work */
4931 	if ((spd & 1) || (dplx & ~1))
4932 		goto err_inval;
4933 
4934 	/* Fiber NICs only allow 1000 gbps Full duplex */
4935 	if ((hw->media_type == e1000_media_type_fiber) &&
4936 	    spd != SPEED_1000 &&
4937 	    dplx != DUPLEX_FULL)
4938 		goto err_inval;
4939 
4940 	switch (spd + dplx) {
4941 	case SPEED_10 + DUPLEX_HALF:
4942 		hw->forced_speed_duplex = e1000_10_half;
4943 		break;
4944 	case SPEED_10 + DUPLEX_FULL:
4945 		hw->forced_speed_duplex = e1000_10_full;
4946 		break;
4947 	case SPEED_100 + DUPLEX_HALF:
4948 		hw->forced_speed_duplex = e1000_100_half;
4949 		break;
4950 	case SPEED_100 + DUPLEX_FULL:
4951 		hw->forced_speed_duplex = e1000_100_full;
4952 		break;
4953 	case SPEED_1000 + DUPLEX_FULL:
4954 		hw->autoneg = 1;
4955 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
4956 		break;
4957 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
4958 	default:
4959 		goto err_inval;
4960 	}
4961 	return 0;
4962 
4963 err_inval:
4964 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
4965 	return -EINVAL;
4966 }
4967 
4968 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4969 {
4970 	struct net_device *netdev = pci_get_drvdata(pdev);
4971 	struct e1000_adapter *adapter = netdev_priv(netdev);
4972 	struct e1000_hw *hw = &adapter->hw;
4973 	u32 ctrl, ctrl_ext, rctl, status;
4974 	u32 wufc = adapter->wol;
4975 #ifdef CONFIG_PM
4976 	int retval = 0;
4977 #endif
4978 
4979 	netif_device_detach(netdev);
4980 
4981 	if (netif_running(netdev)) {
4982 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4983 		e1000_down(adapter);
4984 	}
4985 
4986 #ifdef CONFIG_PM
4987 	retval = pci_save_state(pdev);
4988 	if (retval)
4989 		return retval;
4990 #endif
4991 
4992 	status = er32(STATUS);
4993 	if (status & E1000_STATUS_LU)
4994 		wufc &= ~E1000_WUFC_LNKC;
4995 
4996 	if (wufc) {
4997 		e1000_setup_rctl(adapter);
4998 		e1000_set_rx_mode(netdev);
4999 
5000 		rctl = er32(RCTL);
5001 
5002 		/* turn on all-multi mode if wake on multicast is enabled */
5003 		if (wufc & E1000_WUFC_MC)
5004 			rctl |= E1000_RCTL_MPE;
5005 
5006 		/* enable receives in the hardware */
5007 		ew32(RCTL, rctl | E1000_RCTL_EN);
5008 
5009 		if (hw->mac_type >= e1000_82540) {
5010 			ctrl = er32(CTRL);
5011 			/* advertise wake from D3Cold */
5012 			#define E1000_CTRL_ADVD3WUC 0x00100000
5013 			/* phy power management enable */
5014 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5015 			ctrl |= E1000_CTRL_ADVD3WUC |
5016 				E1000_CTRL_EN_PHY_PWR_MGMT;
5017 			ew32(CTRL, ctrl);
5018 		}
5019 
5020 		if (hw->media_type == e1000_media_type_fiber ||
5021 		    hw->media_type == e1000_media_type_internal_serdes) {
5022 			/* keep the laser running in D3 */
5023 			ctrl_ext = er32(CTRL_EXT);
5024 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5025 			ew32(CTRL_EXT, ctrl_ext);
5026 		}
5027 
5028 		ew32(WUC, E1000_WUC_PME_EN);
5029 		ew32(WUFC, wufc);
5030 	} else {
5031 		ew32(WUC, 0);
5032 		ew32(WUFC, 0);
5033 	}
5034 
5035 	e1000_release_manageability(adapter);
5036 
5037 	*enable_wake = !!wufc;
5038 
5039 	/* make sure adapter isn't asleep if manageability is enabled */
5040 	if (adapter->en_mng_pt)
5041 		*enable_wake = true;
5042 
5043 	if (netif_running(netdev))
5044 		e1000_free_irq(adapter);
5045 
5046 	pci_disable_device(pdev);
5047 
5048 	return 0;
5049 }
5050 
5051 #ifdef CONFIG_PM
5052 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5053 {
5054 	int retval;
5055 	bool wake;
5056 
5057 	retval = __e1000_shutdown(pdev, &wake);
5058 	if (retval)
5059 		return retval;
5060 
5061 	if (wake) {
5062 		pci_prepare_to_sleep(pdev);
5063 	} else {
5064 		pci_wake_from_d3(pdev, false);
5065 		pci_set_power_state(pdev, PCI_D3hot);
5066 	}
5067 
5068 	return 0;
5069 }
5070 
5071 static int e1000_resume(struct pci_dev *pdev)
5072 {
5073 	struct net_device *netdev = pci_get_drvdata(pdev);
5074 	struct e1000_adapter *adapter = netdev_priv(netdev);
5075 	struct e1000_hw *hw = &adapter->hw;
5076 	u32 err;
5077 
5078 	pci_set_power_state(pdev, PCI_D0);
5079 	pci_restore_state(pdev);
5080 	pci_save_state(pdev);
5081 
5082 	if (adapter->need_ioport)
5083 		err = pci_enable_device(pdev);
5084 	else
5085 		err = pci_enable_device_mem(pdev);
5086 	if (err) {
5087 		pr_err("Cannot enable PCI device from suspend\n");
5088 		return err;
5089 	}
5090 	pci_set_master(pdev);
5091 
5092 	pci_enable_wake(pdev, PCI_D3hot, 0);
5093 	pci_enable_wake(pdev, PCI_D3cold, 0);
5094 
5095 	if (netif_running(netdev)) {
5096 		err = e1000_request_irq(adapter);
5097 		if (err)
5098 			return err;
5099 	}
5100 
5101 	e1000_power_up_phy(adapter);
5102 	e1000_reset(adapter);
5103 	ew32(WUS, ~0);
5104 
5105 	e1000_init_manageability(adapter);
5106 
5107 	if (netif_running(netdev))
5108 		e1000_up(adapter);
5109 
5110 	netif_device_attach(netdev);
5111 
5112 	return 0;
5113 }
5114 #endif
5115 
5116 static void e1000_shutdown(struct pci_dev *pdev)
5117 {
5118 	bool wake;
5119 
5120 	__e1000_shutdown(pdev, &wake);
5121 
5122 	if (system_state == SYSTEM_POWER_OFF) {
5123 		pci_wake_from_d3(pdev, wake);
5124 		pci_set_power_state(pdev, PCI_D3hot);
5125 	}
5126 }
5127 
5128 #ifdef CONFIG_NET_POLL_CONTROLLER
5129 /*
5130  * Polling 'interrupt' - used by things like netconsole to send skbs
5131  * without having to re-enable interrupts. It's not called while
5132  * the interrupt routine is executing.
5133  */
5134 static void e1000_netpoll(struct net_device *netdev)
5135 {
5136 	struct e1000_adapter *adapter = netdev_priv(netdev);
5137 
5138 	disable_irq(adapter->pdev->irq);
5139 	e1000_intr(adapter->pdev->irq, netdev);
5140 	enable_irq(adapter->pdev->irq);
5141 }
5142 #endif
5143 
5144 /**
5145  * e1000_io_error_detected - called when PCI error is detected
5146  * @pdev: Pointer to PCI device
5147  * @state: The current pci connection state
5148  *
5149  * This function is called after a PCI bus error affecting
5150  * this device has been detected.
5151  */
5152 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5153 						pci_channel_state_t state)
5154 {
5155 	struct net_device *netdev = pci_get_drvdata(pdev);
5156 	struct e1000_adapter *adapter = netdev_priv(netdev);
5157 
5158 	netif_device_detach(netdev);
5159 
5160 	if (state == pci_channel_io_perm_failure)
5161 		return PCI_ERS_RESULT_DISCONNECT;
5162 
5163 	if (netif_running(netdev))
5164 		e1000_down(adapter);
5165 	pci_disable_device(pdev);
5166 
5167 	/* Request a slot slot reset. */
5168 	return PCI_ERS_RESULT_NEED_RESET;
5169 }
5170 
5171 /**
5172  * e1000_io_slot_reset - called after the pci bus has been reset.
5173  * @pdev: Pointer to PCI device
5174  *
5175  * Restart the card from scratch, as if from a cold-boot. Implementation
5176  * resembles the first-half of the e1000_resume routine.
5177  */
5178 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5179 {
5180 	struct net_device *netdev = pci_get_drvdata(pdev);
5181 	struct e1000_adapter *adapter = netdev_priv(netdev);
5182 	struct e1000_hw *hw = &adapter->hw;
5183 	int err;
5184 
5185 	if (adapter->need_ioport)
5186 		err = pci_enable_device(pdev);
5187 	else
5188 		err = pci_enable_device_mem(pdev);
5189 	if (err) {
5190 		pr_err("Cannot re-enable PCI device after reset.\n");
5191 		return PCI_ERS_RESULT_DISCONNECT;
5192 	}
5193 	pci_set_master(pdev);
5194 
5195 	pci_enable_wake(pdev, PCI_D3hot, 0);
5196 	pci_enable_wake(pdev, PCI_D3cold, 0);
5197 
5198 	e1000_reset(adapter);
5199 	ew32(WUS, ~0);
5200 
5201 	return PCI_ERS_RESULT_RECOVERED;
5202 }
5203 
5204 /**
5205  * e1000_io_resume - called when traffic can start flowing again.
5206  * @pdev: Pointer to PCI device
5207  *
5208  * This callback is called when the error recovery driver tells us that
5209  * its OK to resume normal operation. Implementation resembles the
5210  * second-half of the e1000_resume routine.
5211  */
5212 static void e1000_io_resume(struct pci_dev *pdev)
5213 {
5214 	struct net_device *netdev = pci_get_drvdata(pdev);
5215 	struct e1000_adapter *adapter = netdev_priv(netdev);
5216 
5217 	e1000_init_manageability(adapter);
5218 
5219 	if (netif_running(netdev)) {
5220 		if (e1000_up(adapter)) {
5221 			pr_info("can't bring device back up after reset\n");
5222 			return;
5223 		}
5224 	}
5225 
5226 	netif_device_attach(netdev);
5227 }
5228 
5229 /* e1000_main.c */
5230