1 /* Intel(R) Gigabit Ethernet Linux driver
2  * Copyright(c) 2007-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * The full GNU General Public License is included in this distribution in
17  * the file called "COPYING".
18  *
19  * Contact Information:
20  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/init.h>
29 #include <linux/bitops.h>
30 #include <linux/vmalloc.h>
31 #include <linux/pagemap.h>
32 #include <linux/netdevice.h>
33 #include <linux/ipv6.h>
34 #include <linux/slab.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <linux/pci.h>
43 #include <linux/pci-aspm.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/ip.h>
47 #include <linux/tcp.h>
48 #include <linux/sctp.h>
49 #include <linux/if_ether.h>
50 #include <linux/aer.h>
51 #include <linux/prefetch.h>
52 #include <linux/pm_runtime.h>
53 #ifdef CONFIG_IGB_DCA
54 #include <linux/dca.h>
55 #endif
56 #include <linux/i2c.h>
57 #include "igb.h"
58 
59 #define MAJ 5
60 #define MIN 2
61 #define BUILD 15
62 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
63 __stringify(BUILD) "-k"
64 char igb_driver_name[] = "igb";
65 char igb_driver_version[] = DRV_VERSION;
66 static const char igb_driver_string[] =
67 				"Intel(R) Gigabit Ethernet Network Driver";
68 static const char igb_copyright[] =
69 				"Copyright (c) 2007-2014 Intel Corporation.";
70 
71 static const struct e1000_info *igb_info_tbl[] = {
72 	[board_82575] = &e1000_82575_info,
73 };
74 
75 static const struct pci_device_id igb_pci_tbl[] = {
76 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
77 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
78 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
79 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
80 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
81 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
82 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
83 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
84 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
85 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
86 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
87 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
88 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
89 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
90 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
91 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
92 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
93 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
94 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
95 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
96 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
97 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
98 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
99 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
100 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
101 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
102 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
103 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
104 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
105 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
106 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
107 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
108 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
109 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
110 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
111 	/* required last entry */
112 	{0, }
113 };
114 
115 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
116 
117 static int igb_setup_all_tx_resources(struct igb_adapter *);
118 static int igb_setup_all_rx_resources(struct igb_adapter *);
119 static void igb_free_all_tx_resources(struct igb_adapter *);
120 static void igb_free_all_rx_resources(struct igb_adapter *);
121 static void igb_setup_mrqc(struct igb_adapter *);
122 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
123 static void igb_remove(struct pci_dev *pdev);
124 static int igb_sw_init(struct igb_adapter *);
125 static int igb_open(struct net_device *);
126 static int igb_close(struct net_device *);
127 static void igb_configure(struct igb_adapter *);
128 static void igb_configure_tx(struct igb_adapter *);
129 static void igb_configure_rx(struct igb_adapter *);
130 static void igb_clean_all_tx_rings(struct igb_adapter *);
131 static void igb_clean_all_rx_rings(struct igb_adapter *);
132 static void igb_clean_tx_ring(struct igb_ring *);
133 static void igb_clean_rx_ring(struct igb_ring *);
134 static void igb_set_rx_mode(struct net_device *);
135 static void igb_update_phy_info(unsigned long);
136 static void igb_watchdog(unsigned long);
137 static void igb_watchdog_task(struct work_struct *);
138 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
139 static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
140 					  struct rtnl_link_stats64 *stats);
141 static int igb_change_mtu(struct net_device *, int);
142 static int igb_set_mac(struct net_device *, void *);
143 static void igb_set_uta(struct igb_adapter *adapter);
144 static irqreturn_t igb_intr(int irq, void *);
145 static irqreturn_t igb_intr_msi(int irq, void *);
146 static irqreturn_t igb_msix_other(int irq, void *);
147 static irqreturn_t igb_msix_ring(int irq, void *);
148 #ifdef CONFIG_IGB_DCA
149 static void igb_update_dca(struct igb_q_vector *);
150 static void igb_setup_dca(struct igb_adapter *);
151 #endif /* CONFIG_IGB_DCA */
152 static int igb_poll(struct napi_struct *, int);
153 static bool igb_clean_tx_irq(struct igb_q_vector *);
154 static bool igb_clean_rx_irq(struct igb_q_vector *, int);
155 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
156 static void igb_tx_timeout(struct net_device *);
157 static void igb_reset_task(struct work_struct *);
158 static void igb_vlan_mode(struct net_device *netdev,
159 			  netdev_features_t features);
160 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
161 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
162 static void igb_restore_vlan(struct igb_adapter *);
163 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
164 static void igb_ping_all_vfs(struct igb_adapter *);
165 static void igb_msg_task(struct igb_adapter *);
166 static void igb_vmm_control(struct igb_adapter *);
167 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
168 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
169 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
170 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
171 			       int vf, u16 vlan, u8 qos);
172 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
173 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
174 				   bool setting);
175 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
176 				 struct ifla_vf_info *ivi);
177 static void igb_check_vf_rate_limit(struct igb_adapter *);
178 
179 #ifdef CONFIG_PCI_IOV
180 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
181 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
182 #endif
183 
184 #ifdef CONFIG_PM
185 #ifdef CONFIG_PM_SLEEP
186 static int igb_suspend(struct device *);
187 #endif
188 static int igb_resume(struct device *);
189 #ifdef CONFIG_PM_RUNTIME
190 static int igb_runtime_suspend(struct device *dev);
191 static int igb_runtime_resume(struct device *dev);
192 static int igb_runtime_idle(struct device *dev);
193 #endif
194 static const struct dev_pm_ops igb_pm_ops = {
195 	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
196 	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
197 			igb_runtime_idle)
198 };
199 #endif
200 static void igb_shutdown(struct pci_dev *);
201 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
202 #ifdef CONFIG_IGB_DCA
203 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
204 static struct notifier_block dca_notifier = {
205 	.notifier_call	= igb_notify_dca,
206 	.next		= NULL,
207 	.priority	= 0
208 };
209 #endif
210 #ifdef CONFIG_NET_POLL_CONTROLLER
211 /* for netdump / net console */
212 static void igb_netpoll(struct net_device *);
213 #endif
214 #ifdef CONFIG_PCI_IOV
215 static unsigned int max_vfs;
216 module_param(max_vfs, uint, 0);
217 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
218 #endif /* CONFIG_PCI_IOV */
219 
220 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
221 		     pci_channel_state_t);
222 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
223 static void igb_io_resume(struct pci_dev *);
224 
225 static const struct pci_error_handlers igb_err_handler = {
226 	.error_detected = igb_io_error_detected,
227 	.slot_reset = igb_io_slot_reset,
228 	.resume = igb_io_resume,
229 };
230 
231 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
232 
233 static struct pci_driver igb_driver = {
234 	.name     = igb_driver_name,
235 	.id_table = igb_pci_tbl,
236 	.probe    = igb_probe,
237 	.remove   = igb_remove,
238 #ifdef CONFIG_PM
239 	.driver.pm = &igb_pm_ops,
240 #endif
241 	.shutdown = igb_shutdown,
242 	.sriov_configure = igb_pci_sriov_configure,
243 	.err_handler = &igb_err_handler
244 };
245 
246 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
247 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
248 MODULE_LICENSE("GPL");
249 MODULE_VERSION(DRV_VERSION);
250 
251 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
252 static int debug = -1;
253 module_param(debug, int, 0);
254 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
255 
256 struct igb_reg_info {
257 	u32 ofs;
258 	char *name;
259 };
260 
261 static const struct igb_reg_info igb_reg_info_tbl[] = {
262 
263 	/* General Registers */
264 	{E1000_CTRL, "CTRL"},
265 	{E1000_STATUS, "STATUS"},
266 	{E1000_CTRL_EXT, "CTRL_EXT"},
267 
268 	/* Interrupt Registers */
269 	{E1000_ICR, "ICR"},
270 
271 	/* RX Registers */
272 	{E1000_RCTL, "RCTL"},
273 	{E1000_RDLEN(0), "RDLEN"},
274 	{E1000_RDH(0), "RDH"},
275 	{E1000_RDT(0), "RDT"},
276 	{E1000_RXDCTL(0), "RXDCTL"},
277 	{E1000_RDBAL(0), "RDBAL"},
278 	{E1000_RDBAH(0), "RDBAH"},
279 
280 	/* TX Registers */
281 	{E1000_TCTL, "TCTL"},
282 	{E1000_TDBAL(0), "TDBAL"},
283 	{E1000_TDBAH(0), "TDBAH"},
284 	{E1000_TDLEN(0), "TDLEN"},
285 	{E1000_TDH(0), "TDH"},
286 	{E1000_TDT(0), "TDT"},
287 	{E1000_TXDCTL(0), "TXDCTL"},
288 	{E1000_TDFH, "TDFH"},
289 	{E1000_TDFT, "TDFT"},
290 	{E1000_TDFHS, "TDFHS"},
291 	{E1000_TDFPC, "TDFPC"},
292 
293 	/* List Terminator */
294 	{}
295 };
296 
297 /* igb_regdump - register printout routine */
298 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
299 {
300 	int n = 0;
301 	char rname[16];
302 	u32 regs[8];
303 
304 	switch (reginfo->ofs) {
305 	case E1000_RDLEN(0):
306 		for (n = 0; n < 4; n++)
307 			regs[n] = rd32(E1000_RDLEN(n));
308 		break;
309 	case E1000_RDH(0):
310 		for (n = 0; n < 4; n++)
311 			regs[n] = rd32(E1000_RDH(n));
312 		break;
313 	case E1000_RDT(0):
314 		for (n = 0; n < 4; n++)
315 			regs[n] = rd32(E1000_RDT(n));
316 		break;
317 	case E1000_RXDCTL(0):
318 		for (n = 0; n < 4; n++)
319 			regs[n] = rd32(E1000_RXDCTL(n));
320 		break;
321 	case E1000_RDBAL(0):
322 		for (n = 0; n < 4; n++)
323 			regs[n] = rd32(E1000_RDBAL(n));
324 		break;
325 	case E1000_RDBAH(0):
326 		for (n = 0; n < 4; n++)
327 			regs[n] = rd32(E1000_RDBAH(n));
328 		break;
329 	case E1000_TDBAL(0):
330 		for (n = 0; n < 4; n++)
331 			regs[n] = rd32(E1000_RDBAL(n));
332 		break;
333 	case E1000_TDBAH(0):
334 		for (n = 0; n < 4; n++)
335 			regs[n] = rd32(E1000_TDBAH(n));
336 		break;
337 	case E1000_TDLEN(0):
338 		for (n = 0; n < 4; n++)
339 			regs[n] = rd32(E1000_TDLEN(n));
340 		break;
341 	case E1000_TDH(0):
342 		for (n = 0; n < 4; n++)
343 			regs[n] = rd32(E1000_TDH(n));
344 		break;
345 	case E1000_TDT(0):
346 		for (n = 0; n < 4; n++)
347 			regs[n] = rd32(E1000_TDT(n));
348 		break;
349 	case E1000_TXDCTL(0):
350 		for (n = 0; n < 4; n++)
351 			regs[n] = rd32(E1000_TXDCTL(n));
352 		break;
353 	default:
354 		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
355 		return;
356 	}
357 
358 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
359 	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
360 		regs[2], regs[3]);
361 }
362 
363 /* igb_dump - Print registers, Tx-rings and Rx-rings */
364 static void igb_dump(struct igb_adapter *adapter)
365 {
366 	struct net_device *netdev = adapter->netdev;
367 	struct e1000_hw *hw = &adapter->hw;
368 	struct igb_reg_info *reginfo;
369 	struct igb_ring *tx_ring;
370 	union e1000_adv_tx_desc *tx_desc;
371 	struct my_u0 { u64 a; u64 b; } *u0;
372 	struct igb_ring *rx_ring;
373 	union e1000_adv_rx_desc *rx_desc;
374 	u32 staterr;
375 	u16 i, n;
376 
377 	if (!netif_msg_hw(adapter))
378 		return;
379 
380 	/* Print netdevice Info */
381 	if (netdev) {
382 		dev_info(&adapter->pdev->dev, "Net device Info\n");
383 		pr_info("Device Name     state            trans_start      last_rx\n");
384 		pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
385 			netdev->state, netdev->trans_start, netdev->last_rx);
386 	}
387 
388 	/* Print Registers */
389 	dev_info(&adapter->pdev->dev, "Register Dump\n");
390 	pr_info(" Register Name   Value\n");
391 	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
392 	     reginfo->name; reginfo++) {
393 		igb_regdump(hw, reginfo);
394 	}
395 
396 	/* Print TX Ring Summary */
397 	if (!netdev || !netif_running(netdev))
398 		goto exit;
399 
400 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
401 	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
402 	for (n = 0; n < adapter->num_tx_queues; n++) {
403 		struct igb_tx_buffer *buffer_info;
404 		tx_ring = adapter->tx_ring[n];
405 		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
406 		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
407 			n, tx_ring->next_to_use, tx_ring->next_to_clean,
408 			(u64)dma_unmap_addr(buffer_info, dma),
409 			dma_unmap_len(buffer_info, len),
410 			buffer_info->next_to_watch,
411 			(u64)buffer_info->time_stamp);
412 	}
413 
414 	/* Print TX Rings */
415 	if (!netif_msg_tx_done(adapter))
416 		goto rx_ring_summary;
417 
418 	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
419 
420 	/* Transmit Descriptor Formats
421 	 *
422 	 * Advanced Transmit Descriptor
423 	 *   +--------------------------------------------------------------+
424 	 * 0 |         Buffer Address [63:0]                                |
425 	 *   +--------------------------------------------------------------+
426 	 * 8 | PAYLEN  | PORTS  |CC|IDX | STA | DCMD  |DTYP|MAC|RSV| DTALEN |
427 	 *   +--------------------------------------------------------------+
428 	 *   63      46 45    40 39 38 36 35 32 31   24             15       0
429 	 */
430 
431 	for (n = 0; n < adapter->num_tx_queues; n++) {
432 		tx_ring = adapter->tx_ring[n];
433 		pr_info("------------------------------------\n");
434 		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
435 		pr_info("------------------------------------\n");
436 		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] [bi->dma       ] leng  ntw timestamp        bi->skb\n");
437 
438 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
439 			const char *next_desc;
440 			struct igb_tx_buffer *buffer_info;
441 			tx_desc = IGB_TX_DESC(tx_ring, i);
442 			buffer_info = &tx_ring->tx_buffer_info[i];
443 			u0 = (struct my_u0 *)tx_desc;
444 			if (i == tx_ring->next_to_use &&
445 			    i == tx_ring->next_to_clean)
446 				next_desc = " NTC/U";
447 			else if (i == tx_ring->next_to_use)
448 				next_desc = " NTU";
449 			else if (i == tx_ring->next_to_clean)
450 				next_desc = " NTC";
451 			else
452 				next_desc = "";
453 
454 			pr_info("T [0x%03X]    %016llX %016llX %016llX %04X  %p %016llX %p%s\n",
455 				i, le64_to_cpu(u0->a),
456 				le64_to_cpu(u0->b),
457 				(u64)dma_unmap_addr(buffer_info, dma),
458 				dma_unmap_len(buffer_info, len),
459 				buffer_info->next_to_watch,
460 				(u64)buffer_info->time_stamp,
461 				buffer_info->skb, next_desc);
462 
463 			if (netif_msg_pktdata(adapter) && buffer_info->skb)
464 				print_hex_dump(KERN_INFO, "",
465 					DUMP_PREFIX_ADDRESS,
466 					16, 1, buffer_info->skb->data,
467 					dma_unmap_len(buffer_info, len),
468 					true);
469 		}
470 	}
471 
472 	/* Print RX Rings Summary */
473 rx_ring_summary:
474 	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
475 	pr_info("Queue [NTU] [NTC]\n");
476 	for (n = 0; n < adapter->num_rx_queues; n++) {
477 		rx_ring = adapter->rx_ring[n];
478 		pr_info(" %5d %5X %5X\n",
479 			n, rx_ring->next_to_use, rx_ring->next_to_clean);
480 	}
481 
482 	/* Print RX Rings */
483 	if (!netif_msg_rx_status(adapter))
484 		goto exit;
485 
486 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
487 
488 	/* Advanced Receive Descriptor (Read) Format
489 	 *    63                                           1        0
490 	 *    +-----------------------------------------------------+
491 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
492 	 *    +----------------------------------------------+------+
493 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
494 	 *    +-----------------------------------------------------+
495 	 *
496 	 *
497 	 * Advanced Receive Descriptor (Write-Back) Format
498 	 *
499 	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
500 	 *   +------------------------------------------------------+
501 	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
502 	 *   | Checksum   Ident  |   |           |    | Type | Type |
503 	 *   +------------------------------------------------------+
504 	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
505 	 *   +------------------------------------------------------+
506 	 *   63       48 47    32 31            20 19               0
507 	 */
508 
509 	for (n = 0; n < adapter->num_rx_queues; n++) {
510 		rx_ring = adapter->rx_ring[n];
511 		pr_info("------------------------------------\n");
512 		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
513 		pr_info("------------------------------------\n");
514 		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] [bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
515 		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
516 
517 		for (i = 0; i < rx_ring->count; i++) {
518 			const char *next_desc;
519 			struct igb_rx_buffer *buffer_info;
520 			buffer_info = &rx_ring->rx_buffer_info[i];
521 			rx_desc = IGB_RX_DESC(rx_ring, i);
522 			u0 = (struct my_u0 *)rx_desc;
523 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
524 
525 			if (i == rx_ring->next_to_use)
526 				next_desc = " NTU";
527 			else if (i == rx_ring->next_to_clean)
528 				next_desc = " NTC";
529 			else
530 				next_desc = "";
531 
532 			if (staterr & E1000_RXD_STAT_DD) {
533 				/* Descriptor Done */
534 				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
535 					"RWB", i,
536 					le64_to_cpu(u0->a),
537 					le64_to_cpu(u0->b),
538 					next_desc);
539 			} else {
540 				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
541 					"R  ", i,
542 					le64_to_cpu(u0->a),
543 					le64_to_cpu(u0->b),
544 					(u64)buffer_info->dma,
545 					next_desc);
546 
547 				if (netif_msg_pktdata(adapter) &&
548 				    buffer_info->dma && buffer_info->page) {
549 					print_hex_dump(KERN_INFO, "",
550 					  DUMP_PREFIX_ADDRESS,
551 					  16, 1,
552 					  page_address(buffer_info->page) +
553 						      buffer_info->page_offset,
554 					  IGB_RX_BUFSZ, true);
555 				}
556 			}
557 		}
558 	}
559 
560 exit:
561 	return;
562 }
563 
564 /**
565  *  igb_get_i2c_data - Reads the I2C SDA data bit
566  *  @hw: pointer to hardware structure
567  *  @i2cctl: Current value of I2CCTL register
568  *
569  *  Returns the I2C data bit value
570  **/
571 static int igb_get_i2c_data(void *data)
572 {
573 	struct igb_adapter *adapter = (struct igb_adapter *)data;
574 	struct e1000_hw *hw = &adapter->hw;
575 	s32 i2cctl = rd32(E1000_I2CPARAMS);
576 
577 	return !!(i2cctl & E1000_I2C_DATA_IN);
578 }
579 
580 /**
581  *  igb_set_i2c_data - Sets the I2C data bit
582  *  @data: pointer to hardware structure
583  *  @state: I2C data value (0 or 1) to set
584  *
585  *  Sets the I2C data bit
586  **/
587 static void igb_set_i2c_data(void *data, int state)
588 {
589 	struct igb_adapter *adapter = (struct igb_adapter *)data;
590 	struct e1000_hw *hw = &adapter->hw;
591 	s32 i2cctl = rd32(E1000_I2CPARAMS);
592 
593 	if (state)
594 		i2cctl |= E1000_I2C_DATA_OUT;
595 	else
596 		i2cctl &= ~E1000_I2C_DATA_OUT;
597 
598 	i2cctl &= ~E1000_I2C_DATA_OE_N;
599 	i2cctl |= E1000_I2C_CLK_OE_N;
600 	wr32(E1000_I2CPARAMS, i2cctl);
601 	wrfl();
602 
603 }
604 
605 /**
606  *  igb_set_i2c_clk - Sets the I2C SCL clock
607  *  @data: pointer to hardware structure
608  *  @state: state to set clock
609  *
610  *  Sets the I2C clock line to state
611  **/
612 static void igb_set_i2c_clk(void *data, int state)
613 {
614 	struct igb_adapter *adapter = (struct igb_adapter *)data;
615 	struct e1000_hw *hw = &adapter->hw;
616 	s32 i2cctl = rd32(E1000_I2CPARAMS);
617 
618 	if (state) {
619 		i2cctl |= E1000_I2C_CLK_OUT;
620 		i2cctl &= ~E1000_I2C_CLK_OE_N;
621 	} else {
622 		i2cctl &= ~E1000_I2C_CLK_OUT;
623 		i2cctl &= ~E1000_I2C_CLK_OE_N;
624 	}
625 	wr32(E1000_I2CPARAMS, i2cctl);
626 	wrfl();
627 }
628 
629 /**
630  *  igb_get_i2c_clk - Gets the I2C SCL clock state
631  *  @data: pointer to hardware structure
632  *
633  *  Gets the I2C clock state
634  **/
635 static int igb_get_i2c_clk(void *data)
636 {
637 	struct igb_adapter *adapter = (struct igb_adapter *)data;
638 	struct e1000_hw *hw = &adapter->hw;
639 	s32 i2cctl = rd32(E1000_I2CPARAMS);
640 
641 	return !!(i2cctl & E1000_I2C_CLK_IN);
642 }
643 
644 static const struct i2c_algo_bit_data igb_i2c_algo = {
645 	.setsda		= igb_set_i2c_data,
646 	.setscl		= igb_set_i2c_clk,
647 	.getsda		= igb_get_i2c_data,
648 	.getscl		= igb_get_i2c_clk,
649 	.udelay		= 5,
650 	.timeout	= 20,
651 };
652 
653 /**
654  *  igb_get_hw_dev - return device
655  *  @hw: pointer to hardware structure
656  *
657  *  used by hardware layer to print debugging information
658  **/
659 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
660 {
661 	struct igb_adapter *adapter = hw->back;
662 	return adapter->netdev;
663 }
664 
665 /**
666  *  igb_init_module - Driver Registration Routine
667  *
668  *  igb_init_module is the first routine called when the driver is
669  *  loaded. All it does is register with the PCI subsystem.
670  **/
671 static int __init igb_init_module(void)
672 {
673 	int ret;
674 
675 	pr_info("%s - version %s\n",
676 	       igb_driver_string, igb_driver_version);
677 	pr_info("%s\n", igb_copyright);
678 
679 #ifdef CONFIG_IGB_DCA
680 	dca_register_notify(&dca_notifier);
681 #endif
682 	ret = pci_register_driver(&igb_driver);
683 	return ret;
684 }
685 
686 module_init(igb_init_module);
687 
688 /**
689  *  igb_exit_module - Driver Exit Cleanup Routine
690  *
691  *  igb_exit_module is called just before the driver is removed
692  *  from memory.
693  **/
694 static void __exit igb_exit_module(void)
695 {
696 #ifdef CONFIG_IGB_DCA
697 	dca_unregister_notify(&dca_notifier);
698 #endif
699 	pci_unregister_driver(&igb_driver);
700 }
701 
702 module_exit(igb_exit_module);
703 
704 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
705 /**
706  *  igb_cache_ring_register - Descriptor ring to register mapping
707  *  @adapter: board private structure to initialize
708  *
709  *  Once we know the feature-set enabled for the device, we'll cache
710  *  the register offset the descriptor ring is assigned to.
711  **/
712 static void igb_cache_ring_register(struct igb_adapter *adapter)
713 {
714 	int i = 0, j = 0;
715 	u32 rbase_offset = adapter->vfs_allocated_count;
716 
717 	switch (adapter->hw.mac.type) {
718 	case e1000_82576:
719 		/* The queues are allocated for virtualization such that VF 0
720 		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
721 		 * In order to avoid collision we start at the first free queue
722 		 * and continue consuming queues in the same sequence
723 		 */
724 		if (adapter->vfs_allocated_count) {
725 			for (; i < adapter->rss_queues; i++)
726 				adapter->rx_ring[i]->reg_idx = rbase_offset +
727 							       Q_IDX_82576(i);
728 		}
729 		/* Fall through */
730 	case e1000_82575:
731 	case e1000_82580:
732 	case e1000_i350:
733 	case e1000_i354:
734 	case e1000_i210:
735 	case e1000_i211:
736 		/* Fall through */
737 	default:
738 		for (; i < adapter->num_rx_queues; i++)
739 			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
740 		for (; j < adapter->num_tx_queues; j++)
741 			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
742 		break;
743 	}
744 }
745 
746 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
747 {
748 	struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
749 	u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
750 	u32 value = 0;
751 
752 	if (E1000_REMOVED(hw_addr))
753 		return ~value;
754 
755 	value = readl(&hw_addr[reg]);
756 
757 	/* reads should not return all F's */
758 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
759 		struct net_device *netdev = igb->netdev;
760 		hw->hw_addr = NULL;
761 		netif_device_detach(netdev);
762 		netdev_err(netdev, "PCIe link lost, device now detached\n");
763 	}
764 
765 	return value;
766 }
767 
768 /**
769  *  igb_write_ivar - configure ivar for given MSI-X vector
770  *  @hw: pointer to the HW structure
771  *  @msix_vector: vector number we are allocating to a given ring
772  *  @index: row index of IVAR register to write within IVAR table
773  *  @offset: column offset of in IVAR, should be multiple of 8
774  *
775  *  This function is intended to handle the writing of the IVAR register
776  *  for adapters 82576 and newer.  The IVAR table consists of 2 columns,
777  *  each containing an cause allocation for an Rx and Tx ring, and a
778  *  variable number of rows depending on the number of queues supported.
779  **/
780 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
781 			   int index, int offset)
782 {
783 	u32 ivar = array_rd32(E1000_IVAR0, index);
784 
785 	/* clear any bits that are currently set */
786 	ivar &= ~((u32)0xFF << offset);
787 
788 	/* write vector and valid bit */
789 	ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
790 
791 	array_wr32(E1000_IVAR0, index, ivar);
792 }
793 
794 #define IGB_N0_QUEUE -1
795 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
796 {
797 	struct igb_adapter *adapter = q_vector->adapter;
798 	struct e1000_hw *hw = &adapter->hw;
799 	int rx_queue = IGB_N0_QUEUE;
800 	int tx_queue = IGB_N0_QUEUE;
801 	u32 msixbm = 0;
802 
803 	if (q_vector->rx.ring)
804 		rx_queue = q_vector->rx.ring->reg_idx;
805 	if (q_vector->tx.ring)
806 		tx_queue = q_vector->tx.ring->reg_idx;
807 
808 	switch (hw->mac.type) {
809 	case e1000_82575:
810 		/* The 82575 assigns vectors using a bitmask, which matches the
811 		 * bitmask for the EICR/EIMS/EIMC registers.  To assign one
812 		 * or more queues to a vector, we write the appropriate bits
813 		 * into the MSIXBM register for that vector.
814 		 */
815 		if (rx_queue > IGB_N0_QUEUE)
816 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
817 		if (tx_queue > IGB_N0_QUEUE)
818 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
819 		if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
820 			msixbm |= E1000_EIMS_OTHER;
821 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
822 		q_vector->eims_value = msixbm;
823 		break;
824 	case e1000_82576:
825 		/* 82576 uses a table that essentially consists of 2 columns
826 		 * with 8 rows.  The ordering is column-major so we use the
827 		 * lower 3 bits as the row index, and the 4th bit as the
828 		 * column offset.
829 		 */
830 		if (rx_queue > IGB_N0_QUEUE)
831 			igb_write_ivar(hw, msix_vector,
832 				       rx_queue & 0x7,
833 				       (rx_queue & 0x8) << 1);
834 		if (tx_queue > IGB_N0_QUEUE)
835 			igb_write_ivar(hw, msix_vector,
836 				       tx_queue & 0x7,
837 				       ((tx_queue & 0x8) << 1) + 8);
838 		q_vector->eims_value = 1 << msix_vector;
839 		break;
840 	case e1000_82580:
841 	case e1000_i350:
842 	case e1000_i354:
843 	case e1000_i210:
844 	case e1000_i211:
845 		/* On 82580 and newer adapters the scheme is similar to 82576
846 		 * however instead of ordering column-major we have things
847 		 * ordered row-major.  So we traverse the table by using
848 		 * bit 0 as the column offset, and the remaining bits as the
849 		 * row index.
850 		 */
851 		if (rx_queue > IGB_N0_QUEUE)
852 			igb_write_ivar(hw, msix_vector,
853 				       rx_queue >> 1,
854 				       (rx_queue & 0x1) << 4);
855 		if (tx_queue > IGB_N0_QUEUE)
856 			igb_write_ivar(hw, msix_vector,
857 				       tx_queue >> 1,
858 				       ((tx_queue & 0x1) << 4) + 8);
859 		q_vector->eims_value = 1 << msix_vector;
860 		break;
861 	default:
862 		BUG();
863 		break;
864 	}
865 
866 	/* add q_vector eims value to global eims_enable_mask */
867 	adapter->eims_enable_mask |= q_vector->eims_value;
868 
869 	/* configure q_vector to set itr on first interrupt */
870 	q_vector->set_itr = 1;
871 }
872 
873 /**
874  *  igb_configure_msix - Configure MSI-X hardware
875  *  @adapter: board private structure to initialize
876  *
877  *  igb_configure_msix sets up the hardware to properly
878  *  generate MSI-X interrupts.
879  **/
880 static void igb_configure_msix(struct igb_adapter *adapter)
881 {
882 	u32 tmp;
883 	int i, vector = 0;
884 	struct e1000_hw *hw = &adapter->hw;
885 
886 	adapter->eims_enable_mask = 0;
887 
888 	/* set vector for other causes, i.e. link changes */
889 	switch (hw->mac.type) {
890 	case e1000_82575:
891 		tmp = rd32(E1000_CTRL_EXT);
892 		/* enable MSI-X PBA support*/
893 		tmp |= E1000_CTRL_EXT_PBA_CLR;
894 
895 		/* Auto-Mask interrupts upon ICR read. */
896 		tmp |= E1000_CTRL_EXT_EIAME;
897 		tmp |= E1000_CTRL_EXT_IRCA;
898 
899 		wr32(E1000_CTRL_EXT, tmp);
900 
901 		/* enable msix_other interrupt */
902 		array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
903 		adapter->eims_other = E1000_EIMS_OTHER;
904 
905 		break;
906 
907 	case e1000_82576:
908 	case e1000_82580:
909 	case e1000_i350:
910 	case e1000_i354:
911 	case e1000_i210:
912 	case e1000_i211:
913 		/* Turn on MSI-X capability first, or our settings
914 		 * won't stick.  And it will take days to debug.
915 		 */
916 		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
917 		     E1000_GPIE_PBA | E1000_GPIE_EIAME |
918 		     E1000_GPIE_NSICR);
919 
920 		/* enable msix_other interrupt */
921 		adapter->eims_other = 1 << vector;
922 		tmp = (vector++ | E1000_IVAR_VALID) << 8;
923 
924 		wr32(E1000_IVAR_MISC, tmp);
925 		break;
926 	default:
927 		/* do nothing, since nothing else supports MSI-X */
928 		break;
929 	} /* switch (hw->mac.type) */
930 
931 	adapter->eims_enable_mask |= adapter->eims_other;
932 
933 	for (i = 0; i < adapter->num_q_vectors; i++)
934 		igb_assign_vector(adapter->q_vector[i], vector++);
935 
936 	wrfl();
937 }
938 
939 /**
940  *  igb_request_msix - Initialize MSI-X interrupts
941  *  @adapter: board private structure to initialize
942  *
943  *  igb_request_msix allocates MSI-X vectors and requests interrupts from the
944  *  kernel.
945  **/
946 static int igb_request_msix(struct igb_adapter *adapter)
947 {
948 	struct net_device *netdev = adapter->netdev;
949 	struct e1000_hw *hw = &adapter->hw;
950 	int i, err = 0, vector = 0, free_vector = 0;
951 
952 	err = request_irq(adapter->msix_entries[vector].vector,
953 			  igb_msix_other, 0, netdev->name, adapter);
954 	if (err)
955 		goto err_out;
956 
957 	for (i = 0; i < adapter->num_q_vectors; i++) {
958 		struct igb_q_vector *q_vector = adapter->q_vector[i];
959 
960 		vector++;
961 
962 		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
963 
964 		if (q_vector->rx.ring && q_vector->tx.ring)
965 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
966 				q_vector->rx.ring->queue_index);
967 		else if (q_vector->tx.ring)
968 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
969 				q_vector->tx.ring->queue_index);
970 		else if (q_vector->rx.ring)
971 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
972 				q_vector->rx.ring->queue_index);
973 		else
974 			sprintf(q_vector->name, "%s-unused", netdev->name);
975 
976 		err = request_irq(adapter->msix_entries[vector].vector,
977 				  igb_msix_ring, 0, q_vector->name,
978 				  q_vector);
979 		if (err)
980 			goto err_free;
981 	}
982 
983 	igb_configure_msix(adapter);
984 	return 0;
985 
986 err_free:
987 	/* free already assigned IRQs */
988 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
989 
990 	vector--;
991 	for (i = 0; i < vector; i++) {
992 		free_irq(adapter->msix_entries[free_vector++].vector,
993 			 adapter->q_vector[i]);
994 	}
995 err_out:
996 	return err;
997 }
998 
999 /**
1000  *  igb_free_q_vector - Free memory allocated for specific interrupt vector
1001  *  @adapter: board private structure to initialize
1002  *  @v_idx: Index of vector to be freed
1003  *
1004  *  This function frees the memory allocated to the q_vector.
1005  **/
1006 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1007 {
1008 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1009 
1010 	adapter->q_vector[v_idx] = NULL;
1011 
1012 	/* igb_get_stats64() might access the rings on this vector,
1013 	 * we must wait a grace period before freeing it.
1014 	 */
1015 	kfree_rcu(q_vector, rcu);
1016 }
1017 
1018 /**
1019  *  igb_reset_q_vector - Reset config for interrupt vector
1020  *  @adapter: board private structure to initialize
1021  *  @v_idx: Index of vector to be reset
1022  *
1023  *  If NAPI is enabled it will delete any references to the
1024  *  NAPI struct. This is preparation for igb_free_q_vector.
1025  **/
1026 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1027 {
1028 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1029 
1030 	/* Coming from igb_set_interrupt_capability, the vectors are not yet
1031 	 * allocated. So, q_vector is NULL so we should stop here.
1032 	 */
1033 	if (!q_vector)
1034 		return;
1035 
1036 	if (q_vector->tx.ring)
1037 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1038 
1039 	if (q_vector->rx.ring)
1040 		adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
1041 
1042 	netif_napi_del(&q_vector->napi);
1043 
1044 }
1045 
1046 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1047 {
1048 	int v_idx = adapter->num_q_vectors;
1049 
1050 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
1051 		pci_disable_msix(adapter->pdev);
1052 	else if (adapter->flags & IGB_FLAG_HAS_MSI)
1053 		pci_disable_msi(adapter->pdev);
1054 
1055 	while (v_idx--)
1056 		igb_reset_q_vector(adapter, v_idx);
1057 }
1058 
1059 /**
1060  *  igb_free_q_vectors - Free memory allocated for interrupt vectors
1061  *  @adapter: board private structure to initialize
1062  *
1063  *  This function frees the memory allocated to the q_vectors.  In addition if
1064  *  NAPI is enabled it will delete any references to the NAPI struct prior
1065  *  to freeing the q_vector.
1066  **/
1067 static void igb_free_q_vectors(struct igb_adapter *adapter)
1068 {
1069 	int v_idx = adapter->num_q_vectors;
1070 
1071 	adapter->num_tx_queues = 0;
1072 	adapter->num_rx_queues = 0;
1073 	adapter->num_q_vectors = 0;
1074 
1075 	while (v_idx--) {
1076 		igb_reset_q_vector(adapter, v_idx);
1077 		igb_free_q_vector(adapter, v_idx);
1078 	}
1079 }
1080 
1081 /**
1082  *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1083  *  @adapter: board private structure to initialize
1084  *
1085  *  This function resets the device so that it has 0 Rx queues, Tx queues, and
1086  *  MSI-X interrupts allocated.
1087  */
1088 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1089 {
1090 	igb_free_q_vectors(adapter);
1091 	igb_reset_interrupt_capability(adapter);
1092 }
1093 
1094 /**
1095  *  igb_set_interrupt_capability - set MSI or MSI-X if supported
1096  *  @adapter: board private structure to initialize
1097  *  @msix: boolean value of MSIX capability
1098  *
1099  *  Attempt to configure interrupts using the best available
1100  *  capabilities of the hardware and kernel.
1101  **/
1102 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1103 {
1104 	int err;
1105 	int numvecs, i;
1106 
1107 	if (!msix)
1108 		goto msi_only;
1109 	adapter->flags |= IGB_FLAG_HAS_MSIX;
1110 
1111 	/* Number of supported queues. */
1112 	adapter->num_rx_queues = adapter->rss_queues;
1113 	if (adapter->vfs_allocated_count)
1114 		adapter->num_tx_queues = 1;
1115 	else
1116 		adapter->num_tx_queues = adapter->rss_queues;
1117 
1118 	/* start with one vector for every Rx queue */
1119 	numvecs = adapter->num_rx_queues;
1120 
1121 	/* if Tx handler is separate add 1 for every Tx queue */
1122 	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1123 		numvecs += adapter->num_tx_queues;
1124 
1125 	/* store the number of vectors reserved for queues */
1126 	adapter->num_q_vectors = numvecs;
1127 
1128 	/* add 1 vector for link status interrupts */
1129 	numvecs++;
1130 	for (i = 0; i < numvecs; i++)
1131 		adapter->msix_entries[i].entry = i;
1132 
1133 	err = pci_enable_msix_range(adapter->pdev,
1134 				    adapter->msix_entries,
1135 				    numvecs,
1136 				    numvecs);
1137 	if (err > 0)
1138 		return;
1139 
1140 	igb_reset_interrupt_capability(adapter);
1141 
1142 	/* If we can't do MSI-X, try MSI */
1143 msi_only:
1144 	adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1145 #ifdef CONFIG_PCI_IOV
1146 	/* disable SR-IOV for non MSI-X configurations */
1147 	if (adapter->vf_data) {
1148 		struct e1000_hw *hw = &adapter->hw;
1149 		/* disable iov and allow time for transactions to clear */
1150 		pci_disable_sriov(adapter->pdev);
1151 		msleep(500);
1152 
1153 		kfree(adapter->vf_data);
1154 		adapter->vf_data = NULL;
1155 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1156 		wrfl();
1157 		msleep(100);
1158 		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1159 	}
1160 #endif
1161 	adapter->vfs_allocated_count = 0;
1162 	adapter->rss_queues = 1;
1163 	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1164 	adapter->num_rx_queues = 1;
1165 	adapter->num_tx_queues = 1;
1166 	adapter->num_q_vectors = 1;
1167 	if (!pci_enable_msi(adapter->pdev))
1168 		adapter->flags |= IGB_FLAG_HAS_MSI;
1169 }
1170 
1171 static void igb_add_ring(struct igb_ring *ring,
1172 			 struct igb_ring_container *head)
1173 {
1174 	head->ring = ring;
1175 	head->count++;
1176 }
1177 
1178 /**
1179  *  igb_alloc_q_vector - Allocate memory for a single interrupt vector
1180  *  @adapter: board private structure to initialize
1181  *  @v_count: q_vectors allocated on adapter, used for ring interleaving
1182  *  @v_idx: index of vector in adapter struct
1183  *  @txr_count: total number of Tx rings to allocate
1184  *  @txr_idx: index of first Tx ring to allocate
1185  *  @rxr_count: total number of Rx rings to allocate
1186  *  @rxr_idx: index of first Rx ring to allocate
1187  *
1188  *  We allocate one q_vector.  If allocation fails we return -ENOMEM.
1189  **/
1190 static int igb_alloc_q_vector(struct igb_adapter *adapter,
1191 			      int v_count, int v_idx,
1192 			      int txr_count, int txr_idx,
1193 			      int rxr_count, int rxr_idx)
1194 {
1195 	struct igb_q_vector *q_vector;
1196 	struct igb_ring *ring;
1197 	int ring_count, size;
1198 
1199 	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
1200 	if (txr_count > 1 || rxr_count > 1)
1201 		return -ENOMEM;
1202 
1203 	ring_count = txr_count + rxr_count;
1204 	size = sizeof(struct igb_q_vector) +
1205 	       (sizeof(struct igb_ring) * ring_count);
1206 
1207 	/* allocate q_vector and rings */
1208 	q_vector = adapter->q_vector[v_idx];
1209 	if (!q_vector)
1210 		q_vector = kzalloc(size, GFP_KERNEL);
1211 	if (!q_vector)
1212 		return -ENOMEM;
1213 
1214 	/* initialize NAPI */
1215 	netif_napi_add(adapter->netdev, &q_vector->napi,
1216 		       igb_poll, 64);
1217 
1218 	/* tie q_vector and adapter together */
1219 	adapter->q_vector[v_idx] = q_vector;
1220 	q_vector->adapter = adapter;
1221 
1222 	/* initialize work limits */
1223 	q_vector->tx.work_limit = adapter->tx_work_limit;
1224 
1225 	/* initialize ITR configuration */
1226 	q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
1227 	q_vector->itr_val = IGB_START_ITR;
1228 
1229 	/* initialize pointer to rings */
1230 	ring = q_vector->ring;
1231 
1232 	/* intialize ITR */
1233 	if (rxr_count) {
1234 		/* rx or rx/tx vector */
1235 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1236 			q_vector->itr_val = adapter->rx_itr_setting;
1237 	} else {
1238 		/* tx only vector */
1239 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1240 			q_vector->itr_val = adapter->tx_itr_setting;
1241 	}
1242 
1243 	if (txr_count) {
1244 		/* assign generic ring traits */
1245 		ring->dev = &adapter->pdev->dev;
1246 		ring->netdev = adapter->netdev;
1247 
1248 		/* configure backlink on ring */
1249 		ring->q_vector = q_vector;
1250 
1251 		/* update q_vector Tx values */
1252 		igb_add_ring(ring, &q_vector->tx);
1253 
1254 		/* For 82575, context index must be unique per ring. */
1255 		if (adapter->hw.mac.type == e1000_82575)
1256 			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1257 
1258 		/* apply Tx specific ring traits */
1259 		ring->count = adapter->tx_ring_count;
1260 		ring->queue_index = txr_idx;
1261 
1262 		u64_stats_init(&ring->tx_syncp);
1263 		u64_stats_init(&ring->tx_syncp2);
1264 
1265 		/* assign ring to adapter */
1266 		adapter->tx_ring[txr_idx] = ring;
1267 
1268 		/* push pointer to next ring */
1269 		ring++;
1270 	}
1271 
1272 	if (rxr_count) {
1273 		/* assign generic ring traits */
1274 		ring->dev = &adapter->pdev->dev;
1275 		ring->netdev = adapter->netdev;
1276 
1277 		/* configure backlink on ring */
1278 		ring->q_vector = q_vector;
1279 
1280 		/* update q_vector Rx values */
1281 		igb_add_ring(ring, &q_vector->rx);
1282 
1283 		/* set flag indicating ring supports SCTP checksum offload */
1284 		if (adapter->hw.mac.type >= e1000_82576)
1285 			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1286 
1287 		/* On i350, i354, i210, and i211, loopback VLAN packets
1288 		 * have the tag byte-swapped.
1289 		 */
1290 		if (adapter->hw.mac.type >= e1000_i350)
1291 			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1292 
1293 		/* apply Rx specific ring traits */
1294 		ring->count = adapter->rx_ring_count;
1295 		ring->queue_index = rxr_idx;
1296 
1297 		u64_stats_init(&ring->rx_syncp);
1298 
1299 		/* assign ring to adapter */
1300 		adapter->rx_ring[rxr_idx] = ring;
1301 	}
1302 
1303 	return 0;
1304 }
1305 
1306 
1307 /**
1308  *  igb_alloc_q_vectors - Allocate memory for interrupt vectors
1309  *  @adapter: board private structure to initialize
1310  *
1311  *  We allocate one q_vector per queue interrupt.  If allocation fails we
1312  *  return -ENOMEM.
1313  **/
1314 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1315 {
1316 	int q_vectors = adapter->num_q_vectors;
1317 	int rxr_remaining = adapter->num_rx_queues;
1318 	int txr_remaining = adapter->num_tx_queues;
1319 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1320 	int err;
1321 
1322 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
1323 		for (; rxr_remaining; v_idx++) {
1324 			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1325 						 0, 0, 1, rxr_idx);
1326 
1327 			if (err)
1328 				goto err_out;
1329 
1330 			/* update counts and index */
1331 			rxr_remaining--;
1332 			rxr_idx++;
1333 		}
1334 	}
1335 
1336 	for (; v_idx < q_vectors; v_idx++) {
1337 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1338 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1339 
1340 		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1341 					 tqpv, txr_idx, rqpv, rxr_idx);
1342 
1343 		if (err)
1344 			goto err_out;
1345 
1346 		/* update counts and index */
1347 		rxr_remaining -= rqpv;
1348 		txr_remaining -= tqpv;
1349 		rxr_idx++;
1350 		txr_idx++;
1351 	}
1352 
1353 	return 0;
1354 
1355 err_out:
1356 	adapter->num_tx_queues = 0;
1357 	adapter->num_rx_queues = 0;
1358 	adapter->num_q_vectors = 0;
1359 
1360 	while (v_idx--)
1361 		igb_free_q_vector(adapter, v_idx);
1362 
1363 	return -ENOMEM;
1364 }
1365 
1366 /**
1367  *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1368  *  @adapter: board private structure to initialize
1369  *  @msix: boolean value of MSIX capability
1370  *
1371  *  This function initializes the interrupts and allocates all of the queues.
1372  **/
1373 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1374 {
1375 	struct pci_dev *pdev = adapter->pdev;
1376 	int err;
1377 
1378 	igb_set_interrupt_capability(adapter, msix);
1379 
1380 	err = igb_alloc_q_vectors(adapter);
1381 	if (err) {
1382 		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1383 		goto err_alloc_q_vectors;
1384 	}
1385 
1386 	igb_cache_ring_register(adapter);
1387 
1388 	return 0;
1389 
1390 err_alloc_q_vectors:
1391 	igb_reset_interrupt_capability(adapter);
1392 	return err;
1393 }
1394 
1395 /**
1396  *  igb_request_irq - initialize interrupts
1397  *  @adapter: board private structure to initialize
1398  *
1399  *  Attempts to configure interrupts using the best available
1400  *  capabilities of the hardware and kernel.
1401  **/
1402 static int igb_request_irq(struct igb_adapter *adapter)
1403 {
1404 	struct net_device *netdev = adapter->netdev;
1405 	struct pci_dev *pdev = adapter->pdev;
1406 	int err = 0;
1407 
1408 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1409 		err = igb_request_msix(adapter);
1410 		if (!err)
1411 			goto request_done;
1412 		/* fall back to MSI */
1413 		igb_free_all_tx_resources(adapter);
1414 		igb_free_all_rx_resources(adapter);
1415 
1416 		igb_clear_interrupt_scheme(adapter);
1417 		err = igb_init_interrupt_scheme(adapter, false);
1418 		if (err)
1419 			goto request_done;
1420 
1421 		igb_setup_all_tx_resources(adapter);
1422 		igb_setup_all_rx_resources(adapter);
1423 		igb_configure(adapter);
1424 	}
1425 
1426 	igb_assign_vector(adapter->q_vector[0], 0);
1427 
1428 	if (adapter->flags & IGB_FLAG_HAS_MSI) {
1429 		err = request_irq(pdev->irq, igb_intr_msi, 0,
1430 				  netdev->name, adapter);
1431 		if (!err)
1432 			goto request_done;
1433 
1434 		/* fall back to legacy interrupts */
1435 		igb_reset_interrupt_capability(adapter);
1436 		adapter->flags &= ~IGB_FLAG_HAS_MSI;
1437 	}
1438 
1439 	err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1440 			  netdev->name, adapter);
1441 
1442 	if (err)
1443 		dev_err(&pdev->dev, "Error %d getting interrupt\n",
1444 			err);
1445 
1446 request_done:
1447 	return err;
1448 }
1449 
1450 static void igb_free_irq(struct igb_adapter *adapter)
1451 {
1452 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1453 		int vector = 0, i;
1454 
1455 		free_irq(adapter->msix_entries[vector++].vector, adapter);
1456 
1457 		for (i = 0; i < adapter->num_q_vectors; i++)
1458 			free_irq(adapter->msix_entries[vector++].vector,
1459 				 adapter->q_vector[i]);
1460 	} else {
1461 		free_irq(adapter->pdev->irq, adapter);
1462 	}
1463 }
1464 
1465 /**
1466  *  igb_irq_disable - Mask off interrupt generation on the NIC
1467  *  @adapter: board private structure
1468  **/
1469 static void igb_irq_disable(struct igb_adapter *adapter)
1470 {
1471 	struct e1000_hw *hw = &adapter->hw;
1472 
1473 	/* we need to be careful when disabling interrupts.  The VFs are also
1474 	 * mapped into these registers and so clearing the bits can cause
1475 	 * issues on the VF drivers so we only need to clear what we set
1476 	 */
1477 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1478 		u32 regval = rd32(E1000_EIAM);
1479 
1480 		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1481 		wr32(E1000_EIMC, adapter->eims_enable_mask);
1482 		regval = rd32(E1000_EIAC);
1483 		wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1484 	}
1485 
1486 	wr32(E1000_IAM, 0);
1487 	wr32(E1000_IMC, ~0);
1488 	wrfl();
1489 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1490 		int i;
1491 
1492 		for (i = 0; i < adapter->num_q_vectors; i++)
1493 			synchronize_irq(adapter->msix_entries[i].vector);
1494 	} else {
1495 		synchronize_irq(adapter->pdev->irq);
1496 	}
1497 }
1498 
1499 /**
1500  *  igb_irq_enable - Enable default interrupt generation settings
1501  *  @adapter: board private structure
1502  **/
1503 static void igb_irq_enable(struct igb_adapter *adapter)
1504 {
1505 	struct e1000_hw *hw = &adapter->hw;
1506 
1507 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1508 		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1509 		u32 regval = rd32(E1000_EIAC);
1510 
1511 		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1512 		regval = rd32(E1000_EIAM);
1513 		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1514 		wr32(E1000_EIMS, adapter->eims_enable_mask);
1515 		if (adapter->vfs_allocated_count) {
1516 			wr32(E1000_MBVFIMR, 0xFF);
1517 			ims |= E1000_IMS_VMMB;
1518 		}
1519 		wr32(E1000_IMS, ims);
1520 	} else {
1521 		wr32(E1000_IMS, IMS_ENABLE_MASK |
1522 				E1000_IMS_DRSTA);
1523 		wr32(E1000_IAM, IMS_ENABLE_MASK |
1524 				E1000_IMS_DRSTA);
1525 	}
1526 }
1527 
1528 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1529 {
1530 	struct e1000_hw *hw = &adapter->hw;
1531 	u16 vid = adapter->hw.mng_cookie.vlan_id;
1532 	u16 old_vid = adapter->mng_vlan_id;
1533 
1534 	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1535 		/* add VID to filter table */
1536 		igb_vfta_set(hw, vid, true);
1537 		adapter->mng_vlan_id = vid;
1538 	} else {
1539 		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1540 	}
1541 
1542 	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1543 	    (vid != old_vid) &&
1544 	    !test_bit(old_vid, adapter->active_vlans)) {
1545 		/* remove VID from filter table */
1546 		igb_vfta_set(hw, old_vid, false);
1547 	}
1548 }
1549 
1550 /**
1551  *  igb_release_hw_control - release control of the h/w to f/w
1552  *  @adapter: address of board private structure
1553  *
1554  *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1555  *  For ASF and Pass Through versions of f/w this means that the
1556  *  driver is no longer loaded.
1557  **/
1558 static void igb_release_hw_control(struct igb_adapter *adapter)
1559 {
1560 	struct e1000_hw *hw = &adapter->hw;
1561 	u32 ctrl_ext;
1562 
1563 	/* Let firmware take over control of h/w */
1564 	ctrl_ext = rd32(E1000_CTRL_EXT);
1565 	wr32(E1000_CTRL_EXT,
1566 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1567 }
1568 
1569 /**
1570  *  igb_get_hw_control - get control of the h/w from f/w
1571  *  @adapter: address of board private structure
1572  *
1573  *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1574  *  For ASF and Pass Through versions of f/w this means that
1575  *  the driver is loaded.
1576  **/
1577 static void igb_get_hw_control(struct igb_adapter *adapter)
1578 {
1579 	struct e1000_hw *hw = &adapter->hw;
1580 	u32 ctrl_ext;
1581 
1582 	/* Let firmware know the driver has taken over */
1583 	ctrl_ext = rd32(E1000_CTRL_EXT);
1584 	wr32(E1000_CTRL_EXT,
1585 			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1586 }
1587 
1588 /**
1589  *  igb_configure - configure the hardware for RX and TX
1590  *  @adapter: private board structure
1591  **/
1592 static void igb_configure(struct igb_adapter *adapter)
1593 {
1594 	struct net_device *netdev = adapter->netdev;
1595 	int i;
1596 
1597 	igb_get_hw_control(adapter);
1598 	igb_set_rx_mode(netdev);
1599 
1600 	igb_restore_vlan(adapter);
1601 
1602 	igb_setup_tctl(adapter);
1603 	igb_setup_mrqc(adapter);
1604 	igb_setup_rctl(adapter);
1605 
1606 	igb_configure_tx(adapter);
1607 	igb_configure_rx(adapter);
1608 
1609 	igb_rx_fifo_flush_82575(&adapter->hw);
1610 
1611 	/* call igb_desc_unused which always leaves
1612 	 * at least 1 descriptor unused to make sure
1613 	 * next_to_use != next_to_clean
1614 	 */
1615 	for (i = 0; i < adapter->num_rx_queues; i++) {
1616 		struct igb_ring *ring = adapter->rx_ring[i];
1617 		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
1618 	}
1619 }
1620 
1621 /**
1622  *  igb_power_up_link - Power up the phy/serdes link
1623  *  @adapter: address of board private structure
1624  **/
1625 void igb_power_up_link(struct igb_adapter *adapter)
1626 {
1627 	igb_reset_phy(&adapter->hw);
1628 
1629 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
1630 		igb_power_up_phy_copper(&adapter->hw);
1631 	else
1632 		igb_power_up_serdes_link_82575(&adapter->hw);
1633 
1634 	igb_setup_link(&adapter->hw);
1635 }
1636 
1637 /**
1638  *  igb_power_down_link - Power down the phy/serdes link
1639  *  @adapter: address of board private structure
1640  */
1641 static void igb_power_down_link(struct igb_adapter *adapter)
1642 {
1643 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
1644 		igb_power_down_phy_copper_82575(&adapter->hw);
1645 	else
1646 		igb_shutdown_serdes_link_82575(&adapter->hw);
1647 }
1648 
1649 /**
1650  * Detect and switch function for Media Auto Sense
1651  * @adapter: address of the board private structure
1652  **/
1653 static void igb_check_swap_media(struct igb_adapter *adapter)
1654 {
1655 	struct e1000_hw *hw = &adapter->hw;
1656 	u32 ctrl_ext, connsw;
1657 	bool swap_now = false;
1658 
1659 	ctrl_ext = rd32(E1000_CTRL_EXT);
1660 	connsw = rd32(E1000_CONNSW);
1661 
1662 	/* need to live swap if current media is copper and we have fiber/serdes
1663 	 * to go to.
1664 	 */
1665 
1666 	if ((hw->phy.media_type == e1000_media_type_copper) &&
1667 	    (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
1668 		swap_now = true;
1669 	} else if (!(connsw & E1000_CONNSW_SERDESD)) {
1670 		/* copper signal takes time to appear */
1671 		if (adapter->copper_tries < 4) {
1672 			adapter->copper_tries++;
1673 			connsw |= E1000_CONNSW_AUTOSENSE_CONF;
1674 			wr32(E1000_CONNSW, connsw);
1675 			return;
1676 		} else {
1677 			adapter->copper_tries = 0;
1678 			if ((connsw & E1000_CONNSW_PHYSD) &&
1679 			    (!(connsw & E1000_CONNSW_PHY_PDN))) {
1680 				swap_now = true;
1681 				connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
1682 				wr32(E1000_CONNSW, connsw);
1683 			}
1684 		}
1685 	}
1686 
1687 	if (!swap_now)
1688 		return;
1689 
1690 	switch (hw->phy.media_type) {
1691 	case e1000_media_type_copper:
1692 		netdev_info(adapter->netdev,
1693 			"MAS: changing media to fiber/serdes\n");
1694 		ctrl_ext |=
1695 			E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1696 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
1697 		adapter->copper_tries = 0;
1698 		break;
1699 	case e1000_media_type_internal_serdes:
1700 	case e1000_media_type_fiber:
1701 		netdev_info(adapter->netdev,
1702 			"MAS: changing media to copper\n");
1703 		ctrl_ext &=
1704 			~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1705 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
1706 		break;
1707 	default:
1708 		/* shouldn't get here during regular operation */
1709 		netdev_err(adapter->netdev,
1710 			"AMS: Invalid media type found, returning\n");
1711 		break;
1712 	}
1713 	wr32(E1000_CTRL_EXT, ctrl_ext);
1714 }
1715 
1716 /**
1717  *  igb_up - Open the interface and prepare it to handle traffic
1718  *  @adapter: board private structure
1719  **/
1720 int igb_up(struct igb_adapter *adapter)
1721 {
1722 	struct e1000_hw *hw = &adapter->hw;
1723 	int i;
1724 
1725 	/* hardware has been reset, we need to reload some things */
1726 	igb_configure(adapter);
1727 
1728 	clear_bit(__IGB_DOWN, &adapter->state);
1729 
1730 	for (i = 0; i < adapter->num_q_vectors; i++)
1731 		napi_enable(&(adapter->q_vector[i]->napi));
1732 
1733 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
1734 		igb_configure_msix(adapter);
1735 	else
1736 		igb_assign_vector(adapter->q_vector[0], 0);
1737 
1738 	/* Clear any pending interrupts. */
1739 	rd32(E1000_ICR);
1740 	igb_irq_enable(adapter);
1741 
1742 	/* notify VFs that reset has been completed */
1743 	if (adapter->vfs_allocated_count) {
1744 		u32 reg_data = rd32(E1000_CTRL_EXT);
1745 
1746 		reg_data |= E1000_CTRL_EXT_PFRSTD;
1747 		wr32(E1000_CTRL_EXT, reg_data);
1748 	}
1749 
1750 	netif_tx_start_all_queues(adapter->netdev);
1751 
1752 	/* start the watchdog. */
1753 	hw->mac.get_link_status = 1;
1754 	schedule_work(&adapter->watchdog_task);
1755 
1756 	if ((adapter->flags & IGB_FLAG_EEE) &&
1757 	    (!hw->dev_spec._82575.eee_disable))
1758 		adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
1759 
1760 	return 0;
1761 }
1762 
1763 void igb_down(struct igb_adapter *adapter)
1764 {
1765 	struct net_device *netdev = adapter->netdev;
1766 	struct e1000_hw *hw = &adapter->hw;
1767 	u32 tctl, rctl;
1768 	int i;
1769 
1770 	/* signal that we're down so the interrupt handler does not
1771 	 * reschedule our watchdog timer
1772 	 */
1773 	set_bit(__IGB_DOWN, &adapter->state);
1774 
1775 	/* disable receives in the hardware */
1776 	rctl = rd32(E1000_RCTL);
1777 	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1778 	/* flush and sleep below */
1779 
1780 	netif_tx_stop_all_queues(netdev);
1781 
1782 	/* disable transmits in the hardware */
1783 	tctl = rd32(E1000_TCTL);
1784 	tctl &= ~E1000_TCTL_EN;
1785 	wr32(E1000_TCTL, tctl);
1786 	/* flush both disables and wait for them to finish */
1787 	wrfl();
1788 	usleep_range(10000, 11000);
1789 
1790 	igb_irq_disable(adapter);
1791 
1792 	adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
1793 
1794 	for (i = 0; i < adapter->num_q_vectors; i++) {
1795 		napi_synchronize(&(adapter->q_vector[i]->napi));
1796 		napi_disable(&(adapter->q_vector[i]->napi));
1797 	}
1798 
1799 
1800 	del_timer_sync(&adapter->watchdog_timer);
1801 	del_timer_sync(&adapter->phy_info_timer);
1802 
1803 	netif_carrier_off(netdev);
1804 
1805 	/* record the stats before reset*/
1806 	spin_lock(&adapter->stats64_lock);
1807 	igb_update_stats(adapter, &adapter->stats64);
1808 	spin_unlock(&adapter->stats64_lock);
1809 
1810 	adapter->link_speed = 0;
1811 	adapter->link_duplex = 0;
1812 
1813 	if (!pci_channel_offline(adapter->pdev))
1814 		igb_reset(adapter);
1815 	igb_clean_all_tx_rings(adapter);
1816 	igb_clean_all_rx_rings(adapter);
1817 #ifdef CONFIG_IGB_DCA
1818 
1819 	/* since we reset the hardware DCA settings were cleared */
1820 	igb_setup_dca(adapter);
1821 #endif
1822 }
1823 
1824 void igb_reinit_locked(struct igb_adapter *adapter)
1825 {
1826 	WARN_ON(in_interrupt());
1827 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1828 		usleep_range(1000, 2000);
1829 	igb_down(adapter);
1830 	igb_up(adapter);
1831 	clear_bit(__IGB_RESETTING, &adapter->state);
1832 }
1833 
1834 /** igb_enable_mas - Media Autosense re-enable after swap
1835  *
1836  * @adapter: adapter struct
1837  **/
1838 static s32 igb_enable_mas(struct igb_adapter *adapter)
1839 {
1840 	struct e1000_hw *hw = &adapter->hw;
1841 	u32 connsw;
1842 	s32 ret_val = 0;
1843 
1844 	connsw = rd32(E1000_CONNSW);
1845 	if (!(hw->phy.media_type == e1000_media_type_copper))
1846 		return ret_val;
1847 
1848 	/* configure for SerDes media detect */
1849 	if (!(connsw & E1000_CONNSW_SERDESD)) {
1850 		connsw |= E1000_CONNSW_ENRGSRC;
1851 		connsw |= E1000_CONNSW_AUTOSENSE_EN;
1852 		wr32(E1000_CONNSW, connsw);
1853 		wrfl();
1854 	} else if (connsw & E1000_CONNSW_SERDESD) {
1855 		/* already SerDes, no need to enable anything */
1856 		return ret_val;
1857 	} else {
1858 		netdev_info(adapter->netdev,
1859 			"MAS: Unable to configure feature, disabling..\n");
1860 		adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
1861 	}
1862 	return ret_val;
1863 }
1864 
1865 void igb_reset(struct igb_adapter *adapter)
1866 {
1867 	struct pci_dev *pdev = adapter->pdev;
1868 	struct e1000_hw *hw = &adapter->hw;
1869 	struct e1000_mac_info *mac = &hw->mac;
1870 	struct e1000_fc_info *fc = &hw->fc;
1871 	u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
1872 
1873 	/* Repartition Pba for greater than 9k mtu
1874 	 * To take effect CTRL.RST is required.
1875 	 */
1876 	switch (mac->type) {
1877 	case e1000_i350:
1878 	case e1000_i354:
1879 	case e1000_82580:
1880 		pba = rd32(E1000_RXPBS);
1881 		pba = igb_rxpbs_adjust_82580(pba);
1882 		break;
1883 	case e1000_82576:
1884 		pba = rd32(E1000_RXPBS);
1885 		pba &= E1000_RXPBS_SIZE_MASK_82576;
1886 		break;
1887 	case e1000_82575:
1888 	case e1000_i210:
1889 	case e1000_i211:
1890 	default:
1891 		pba = E1000_PBA_34K;
1892 		break;
1893 	}
1894 
1895 	if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1896 	    (mac->type < e1000_82576)) {
1897 		/* adjust PBA for jumbo frames */
1898 		wr32(E1000_PBA, pba);
1899 
1900 		/* To maintain wire speed transmits, the Tx FIFO should be
1901 		 * large enough to accommodate two full transmit packets,
1902 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
1903 		 * the Rx FIFO should be large enough to accommodate at least
1904 		 * one full receive packet and is similarly rounded up and
1905 		 * expressed in KB.
1906 		 */
1907 		pba = rd32(E1000_PBA);
1908 		/* upper 16 bits has Tx packet buffer allocation size in KB */
1909 		tx_space = pba >> 16;
1910 		/* lower 16 bits has Rx packet buffer allocation size in KB */
1911 		pba &= 0xffff;
1912 		/* the Tx fifo also stores 16 bytes of information about the Tx
1913 		 * but don't include ethernet FCS because hardware appends it
1914 		 */
1915 		min_tx_space = (adapter->max_frame_size +
1916 				sizeof(union e1000_adv_tx_desc) -
1917 				ETH_FCS_LEN) * 2;
1918 		min_tx_space = ALIGN(min_tx_space, 1024);
1919 		min_tx_space >>= 10;
1920 		/* software strips receive CRC, so leave room for it */
1921 		min_rx_space = adapter->max_frame_size;
1922 		min_rx_space = ALIGN(min_rx_space, 1024);
1923 		min_rx_space >>= 10;
1924 
1925 		/* If current Tx allocation is less than the min Tx FIFO size,
1926 		 * and the min Tx FIFO size is less than the current Rx FIFO
1927 		 * allocation, take space away from current Rx allocation
1928 		 */
1929 		if (tx_space < min_tx_space &&
1930 		    ((min_tx_space - tx_space) < pba)) {
1931 			pba = pba - (min_tx_space - tx_space);
1932 
1933 			/* if short on Rx space, Rx wins and must trump Tx
1934 			 * adjustment
1935 			 */
1936 			if (pba < min_rx_space)
1937 				pba = min_rx_space;
1938 		}
1939 		wr32(E1000_PBA, pba);
1940 	}
1941 
1942 	/* flow control settings */
1943 	/* The high water mark must be low enough to fit one full frame
1944 	 * (or the size used for early receive) above it in the Rx FIFO.
1945 	 * Set it to the lower of:
1946 	 * - 90% of the Rx FIFO size, or
1947 	 * - the full Rx FIFO size minus one full frame
1948 	 */
1949 	hwm = min(((pba << 10) * 9 / 10),
1950 			((pba << 10) - 2 * adapter->max_frame_size));
1951 
1952 	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
1953 	fc->low_water = fc->high_water - 16;
1954 	fc->pause_time = 0xFFFF;
1955 	fc->send_xon = 1;
1956 	fc->current_mode = fc->requested_mode;
1957 
1958 	/* disable receive for all VFs and wait one second */
1959 	if (adapter->vfs_allocated_count) {
1960 		int i;
1961 
1962 		for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1963 			adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1964 
1965 		/* ping all the active vfs to let them know we are going down */
1966 		igb_ping_all_vfs(adapter);
1967 
1968 		/* disable transmits and receives */
1969 		wr32(E1000_VFRE, 0);
1970 		wr32(E1000_VFTE, 0);
1971 	}
1972 
1973 	/* Allow time for pending master requests to run */
1974 	hw->mac.ops.reset_hw(hw);
1975 	wr32(E1000_WUC, 0);
1976 
1977 	if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
1978 		/* need to resetup here after media swap */
1979 		adapter->ei.get_invariants(hw);
1980 		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
1981 	}
1982 	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
1983 		if (igb_enable_mas(adapter))
1984 			dev_err(&pdev->dev,
1985 				"Error enabling Media Auto Sense\n");
1986 	}
1987 	if (hw->mac.ops.init_hw(hw))
1988 		dev_err(&pdev->dev, "Hardware Error\n");
1989 
1990 	/* Flow control settings reset on hardware reset, so guarantee flow
1991 	 * control is off when forcing speed.
1992 	 */
1993 	if (!hw->mac.autoneg)
1994 		igb_force_mac_fc(hw);
1995 
1996 	igb_init_dmac(adapter, pba);
1997 #ifdef CONFIG_IGB_HWMON
1998 	/* Re-initialize the thermal sensor on i350 devices. */
1999 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
2000 		if (mac->type == e1000_i350 && hw->bus.func == 0) {
2001 			/* If present, re-initialize the external thermal sensor
2002 			 * interface.
2003 			 */
2004 			if (adapter->ets)
2005 				mac->ops.init_thermal_sensor_thresh(hw);
2006 		}
2007 	}
2008 #endif
2009 	/* Re-establish EEE setting */
2010 	if (hw->phy.media_type == e1000_media_type_copper) {
2011 		switch (mac->type) {
2012 		case e1000_i350:
2013 		case e1000_i210:
2014 		case e1000_i211:
2015 			igb_set_eee_i350(hw, true, true);
2016 			break;
2017 		case e1000_i354:
2018 			igb_set_eee_i354(hw, true, true);
2019 			break;
2020 		default:
2021 			break;
2022 		}
2023 	}
2024 	if (!netif_running(adapter->netdev))
2025 		igb_power_down_link(adapter);
2026 
2027 	igb_update_mng_vlan(adapter);
2028 
2029 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2030 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2031 
2032 	/* Re-enable PTP, where applicable. */
2033 	igb_ptp_reset(adapter);
2034 
2035 	igb_get_phy_info(hw);
2036 }
2037 
2038 static netdev_features_t igb_fix_features(struct net_device *netdev,
2039 	netdev_features_t features)
2040 {
2041 	/* Since there is no support for separate Rx/Tx vlan accel
2042 	 * enable/disable make sure Tx flag is always in same state as Rx.
2043 	 */
2044 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2045 		features |= NETIF_F_HW_VLAN_CTAG_TX;
2046 	else
2047 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2048 
2049 	return features;
2050 }
2051 
2052 static int igb_set_features(struct net_device *netdev,
2053 	netdev_features_t features)
2054 {
2055 	netdev_features_t changed = netdev->features ^ features;
2056 	struct igb_adapter *adapter = netdev_priv(netdev);
2057 
2058 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2059 		igb_vlan_mode(netdev, features);
2060 
2061 	if (!(changed & NETIF_F_RXALL))
2062 		return 0;
2063 
2064 	netdev->features = features;
2065 
2066 	if (netif_running(netdev))
2067 		igb_reinit_locked(adapter);
2068 	else
2069 		igb_reset(adapter);
2070 
2071 	return 0;
2072 }
2073 
2074 static const struct net_device_ops igb_netdev_ops = {
2075 	.ndo_open		= igb_open,
2076 	.ndo_stop		= igb_close,
2077 	.ndo_start_xmit		= igb_xmit_frame,
2078 	.ndo_get_stats64	= igb_get_stats64,
2079 	.ndo_set_rx_mode	= igb_set_rx_mode,
2080 	.ndo_set_mac_address	= igb_set_mac,
2081 	.ndo_change_mtu		= igb_change_mtu,
2082 	.ndo_do_ioctl		= igb_ioctl,
2083 	.ndo_tx_timeout		= igb_tx_timeout,
2084 	.ndo_validate_addr	= eth_validate_addr,
2085 	.ndo_vlan_rx_add_vid	= igb_vlan_rx_add_vid,
2086 	.ndo_vlan_rx_kill_vid	= igb_vlan_rx_kill_vid,
2087 	.ndo_set_vf_mac		= igb_ndo_set_vf_mac,
2088 	.ndo_set_vf_vlan	= igb_ndo_set_vf_vlan,
2089 	.ndo_set_vf_rate	= igb_ndo_set_vf_bw,
2090 	.ndo_set_vf_spoofchk	= igb_ndo_set_vf_spoofchk,
2091 	.ndo_get_vf_config	= igb_ndo_get_vf_config,
2092 #ifdef CONFIG_NET_POLL_CONTROLLER
2093 	.ndo_poll_controller	= igb_netpoll,
2094 #endif
2095 	.ndo_fix_features	= igb_fix_features,
2096 	.ndo_set_features	= igb_set_features,
2097 };
2098 
2099 /**
2100  * igb_set_fw_version - Configure version string for ethtool
2101  * @adapter: adapter struct
2102  **/
2103 void igb_set_fw_version(struct igb_adapter *adapter)
2104 {
2105 	struct e1000_hw *hw = &adapter->hw;
2106 	struct e1000_fw_version fw;
2107 
2108 	igb_get_fw_version(hw, &fw);
2109 
2110 	switch (hw->mac.type) {
2111 	case e1000_i210:
2112 	case e1000_i211:
2113 		if (!(igb_get_flash_presence_i210(hw))) {
2114 			snprintf(adapter->fw_version,
2115 				 sizeof(adapter->fw_version),
2116 				 "%2d.%2d-%d",
2117 				 fw.invm_major, fw.invm_minor,
2118 				 fw.invm_img_type);
2119 			break;
2120 		}
2121 		/* fall through */
2122 	default:
2123 		/* if option is rom valid, display its version too */
2124 		if (fw.or_valid) {
2125 			snprintf(adapter->fw_version,
2126 				 sizeof(adapter->fw_version),
2127 				 "%d.%d, 0x%08x, %d.%d.%d",
2128 				 fw.eep_major, fw.eep_minor, fw.etrack_id,
2129 				 fw.or_major, fw.or_build, fw.or_patch);
2130 		/* no option rom */
2131 		} else if (fw.etrack_id != 0X0000) {
2132 			snprintf(adapter->fw_version,
2133 			    sizeof(adapter->fw_version),
2134 			    "%d.%d, 0x%08x",
2135 			    fw.eep_major, fw.eep_minor, fw.etrack_id);
2136 		} else {
2137 		snprintf(adapter->fw_version,
2138 		    sizeof(adapter->fw_version),
2139 		    "%d.%d.%d",
2140 		    fw.eep_major, fw.eep_minor, fw.eep_build);
2141 		}
2142 		break;
2143 	}
2144 }
2145 
2146 /**
2147  * igb_init_mas - init Media Autosense feature if enabled in the NVM
2148  *
2149  * @adapter: adapter struct
2150  **/
2151 static void igb_init_mas(struct igb_adapter *adapter)
2152 {
2153 	struct e1000_hw *hw = &adapter->hw;
2154 	u16 eeprom_data;
2155 
2156 	hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2157 	switch (hw->bus.func) {
2158 	case E1000_FUNC_0:
2159 		if (eeprom_data & IGB_MAS_ENABLE_0) {
2160 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2161 			netdev_info(adapter->netdev,
2162 				"MAS: Enabling Media Autosense for port %d\n",
2163 				hw->bus.func);
2164 		}
2165 		break;
2166 	case E1000_FUNC_1:
2167 		if (eeprom_data & IGB_MAS_ENABLE_1) {
2168 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2169 			netdev_info(adapter->netdev,
2170 				"MAS: Enabling Media Autosense for port %d\n",
2171 				hw->bus.func);
2172 		}
2173 		break;
2174 	case E1000_FUNC_2:
2175 		if (eeprom_data & IGB_MAS_ENABLE_2) {
2176 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2177 			netdev_info(adapter->netdev,
2178 				"MAS: Enabling Media Autosense for port %d\n",
2179 				hw->bus.func);
2180 		}
2181 		break;
2182 	case E1000_FUNC_3:
2183 		if (eeprom_data & IGB_MAS_ENABLE_3) {
2184 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2185 			netdev_info(adapter->netdev,
2186 				"MAS: Enabling Media Autosense for port %d\n",
2187 				hw->bus.func);
2188 		}
2189 		break;
2190 	default:
2191 		/* Shouldn't get here */
2192 		netdev_err(adapter->netdev,
2193 			"MAS: Invalid port configuration, returning\n");
2194 		break;
2195 	}
2196 }
2197 
2198 /**
2199  *  igb_init_i2c - Init I2C interface
2200  *  @adapter: pointer to adapter structure
2201  **/
2202 static s32 igb_init_i2c(struct igb_adapter *adapter)
2203 {
2204 	s32 status = 0;
2205 
2206 	/* I2C interface supported on i350 devices */
2207 	if (adapter->hw.mac.type != e1000_i350)
2208 		return 0;
2209 
2210 	/* Initialize the i2c bus which is controlled by the registers.
2211 	 * This bus will use the i2c_algo_bit structue that implements
2212 	 * the protocol through toggling of the 4 bits in the register.
2213 	 */
2214 	adapter->i2c_adap.owner = THIS_MODULE;
2215 	adapter->i2c_algo = igb_i2c_algo;
2216 	adapter->i2c_algo.data = adapter;
2217 	adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2218 	adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2219 	strlcpy(adapter->i2c_adap.name, "igb BB",
2220 		sizeof(adapter->i2c_adap.name));
2221 	status = i2c_bit_add_bus(&adapter->i2c_adap);
2222 	return status;
2223 }
2224 
2225 /**
2226  *  igb_probe - Device Initialization Routine
2227  *  @pdev: PCI device information struct
2228  *  @ent: entry in igb_pci_tbl
2229  *
2230  *  Returns 0 on success, negative on failure
2231  *
2232  *  igb_probe initializes an adapter identified by a pci_dev structure.
2233  *  The OS initialization, configuring of the adapter private structure,
2234  *  and a hardware reset occur.
2235  **/
2236 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2237 {
2238 	struct net_device *netdev;
2239 	struct igb_adapter *adapter;
2240 	struct e1000_hw *hw;
2241 	u16 eeprom_data = 0;
2242 	s32 ret_val;
2243 	static int global_quad_port_a; /* global quad port a indication */
2244 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
2245 	int err, pci_using_dac;
2246 	u8 part_str[E1000_PBANUM_LENGTH];
2247 
2248 	/* Catch broken hardware that put the wrong VF device ID in
2249 	 * the PCIe SR-IOV capability.
2250 	 */
2251 	if (pdev->is_virtfn) {
2252 		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
2253 			pci_name(pdev), pdev->vendor, pdev->device);
2254 		return -EINVAL;
2255 	}
2256 
2257 	err = pci_enable_device_mem(pdev);
2258 	if (err)
2259 		return err;
2260 
2261 	pci_using_dac = 0;
2262 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2263 	if (!err) {
2264 		pci_using_dac = 1;
2265 	} else {
2266 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2267 		if (err) {
2268 			dev_err(&pdev->dev,
2269 				"No usable DMA configuration, aborting\n");
2270 			goto err_dma;
2271 		}
2272 	}
2273 
2274 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
2275 					   IORESOURCE_MEM),
2276 					   igb_driver_name);
2277 	if (err)
2278 		goto err_pci_reg;
2279 
2280 	pci_enable_pcie_error_reporting(pdev);
2281 
2282 	pci_set_master(pdev);
2283 	pci_save_state(pdev);
2284 
2285 	err = -ENOMEM;
2286 	netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
2287 				   IGB_MAX_TX_QUEUES);
2288 	if (!netdev)
2289 		goto err_alloc_etherdev;
2290 
2291 	SET_NETDEV_DEV(netdev, &pdev->dev);
2292 
2293 	pci_set_drvdata(pdev, netdev);
2294 	adapter = netdev_priv(netdev);
2295 	adapter->netdev = netdev;
2296 	adapter->pdev = pdev;
2297 	hw = &adapter->hw;
2298 	hw->back = adapter;
2299 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2300 
2301 	err = -EIO;
2302 	hw->hw_addr = pci_iomap(pdev, 0, 0);
2303 	if (!hw->hw_addr)
2304 		goto err_ioremap;
2305 
2306 	netdev->netdev_ops = &igb_netdev_ops;
2307 	igb_set_ethtool_ops(netdev);
2308 	netdev->watchdog_timeo = 5 * HZ;
2309 
2310 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2311 
2312 	netdev->mem_start = pci_resource_start(pdev, 0);
2313 	netdev->mem_end = pci_resource_end(pdev, 0);
2314 
2315 	/* PCI config space info */
2316 	hw->vendor_id = pdev->vendor;
2317 	hw->device_id = pdev->device;
2318 	hw->revision_id = pdev->revision;
2319 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2320 	hw->subsystem_device_id = pdev->subsystem_device;
2321 
2322 	/* Copy the default MAC, PHY and NVM function pointers */
2323 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2324 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2325 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2326 	/* Initialize skew-specific constants */
2327 	err = ei->get_invariants(hw);
2328 	if (err)
2329 		goto err_sw_init;
2330 
2331 	/* setup the private structure */
2332 	err = igb_sw_init(adapter);
2333 	if (err)
2334 		goto err_sw_init;
2335 
2336 	igb_get_bus_info_pcie(hw);
2337 
2338 	hw->phy.autoneg_wait_to_complete = false;
2339 
2340 	/* Copper options */
2341 	if (hw->phy.media_type == e1000_media_type_copper) {
2342 		hw->phy.mdix = AUTO_ALL_MODES;
2343 		hw->phy.disable_polarity_correction = false;
2344 		hw->phy.ms_type = e1000_ms_hw_default;
2345 	}
2346 
2347 	if (igb_check_reset_block(hw))
2348 		dev_info(&pdev->dev,
2349 			"PHY reset is blocked due to SOL/IDER session.\n");
2350 
2351 	/* features is initialized to 0 in allocation, it might have bits
2352 	 * set by igb_sw_init so we should use an or instead of an
2353 	 * assignment.
2354 	 */
2355 	netdev->features |= NETIF_F_SG |
2356 			    NETIF_F_IP_CSUM |
2357 			    NETIF_F_IPV6_CSUM |
2358 			    NETIF_F_TSO |
2359 			    NETIF_F_TSO6 |
2360 			    NETIF_F_RXHASH |
2361 			    NETIF_F_RXCSUM |
2362 			    NETIF_F_HW_VLAN_CTAG_RX |
2363 			    NETIF_F_HW_VLAN_CTAG_TX;
2364 
2365 	/* copy netdev features into list of user selectable features */
2366 	netdev->hw_features |= netdev->features;
2367 	netdev->hw_features |= NETIF_F_RXALL;
2368 
2369 	/* set this bit last since it cannot be part of hw_features */
2370 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2371 
2372 	netdev->vlan_features |= NETIF_F_TSO |
2373 				 NETIF_F_TSO6 |
2374 				 NETIF_F_IP_CSUM |
2375 				 NETIF_F_IPV6_CSUM |
2376 				 NETIF_F_SG;
2377 
2378 	netdev->priv_flags |= IFF_SUPP_NOFCS;
2379 
2380 	if (pci_using_dac) {
2381 		netdev->features |= NETIF_F_HIGHDMA;
2382 		netdev->vlan_features |= NETIF_F_HIGHDMA;
2383 	}
2384 
2385 	if (hw->mac.type >= e1000_82576) {
2386 		netdev->hw_features |= NETIF_F_SCTP_CSUM;
2387 		netdev->features |= NETIF_F_SCTP_CSUM;
2388 	}
2389 
2390 	netdev->priv_flags |= IFF_UNICAST_FLT;
2391 
2392 	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
2393 
2394 	/* before reading the NVM, reset the controller to put the device in a
2395 	 * known good starting state
2396 	 */
2397 	hw->mac.ops.reset_hw(hw);
2398 
2399 	/* make sure the NVM is good , i211/i210 parts can have special NVM
2400 	 * that doesn't contain a checksum
2401 	 */
2402 	switch (hw->mac.type) {
2403 	case e1000_i210:
2404 	case e1000_i211:
2405 		if (igb_get_flash_presence_i210(hw)) {
2406 			if (hw->nvm.ops.validate(hw) < 0) {
2407 				dev_err(&pdev->dev,
2408 					"The NVM Checksum Is Not Valid\n");
2409 				err = -EIO;
2410 				goto err_eeprom;
2411 			}
2412 		}
2413 		break;
2414 	default:
2415 		if (hw->nvm.ops.validate(hw) < 0) {
2416 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2417 			err = -EIO;
2418 			goto err_eeprom;
2419 		}
2420 		break;
2421 	}
2422 
2423 	/* copy the MAC address out of the NVM */
2424 	if (hw->mac.ops.read_mac_addr(hw))
2425 		dev_err(&pdev->dev, "NVM Read Error\n");
2426 
2427 	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2428 
2429 	if (!is_valid_ether_addr(netdev->dev_addr)) {
2430 		dev_err(&pdev->dev, "Invalid MAC Address\n");
2431 		err = -EIO;
2432 		goto err_eeprom;
2433 	}
2434 
2435 	/* get firmware version for ethtool -i */
2436 	igb_set_fw_version(adapter);
2437 
2438 	/* configure RXPBSIZE and TXPBSIZE */
2439 	if (hw->mac.type == e1000_i210) {
2440 		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2441 		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2442 	}
2443 
2444 	setup_timer(&adapter->watchdog_timer, igb_watchdog,
2445 		    (unsigned long) adapter);
2446 	setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
2447 		    (unsigned long) adapter);
2448 
2449 	INIT_WORK(&adapter->reset_task, igb_reset_task);
2450 	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2451 
2452 	/* Initialize link properties that are user-changeable */
2453 	adapter->fc_autoneg = true;
2454 	hw->mac.autoneg = true;
2455 	hw->phy.autoneg_advertised = 0x2f;
2456 
2457 	hw->fc.requested_mode = e1000_fc_default;
2458 	hw->fc.current_mode = e1000_fc_default;
2459 
2460 	igb_validate_mdi_setting(hw);
2461 
2462 	/* By default, support wake on port A */
2463 	if (hw->bus.func == 0)
2464 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2465 
2466 	/* Check the NVM for wake support on non-port A ports */
2467 	if (hw->mac.type >= e1000_82580)
2468 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2469 				 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2470 				 &eeprom_data);
2471 	else if (hw->bus.func == 1)
2472 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2473 
2474 	if (eeprom_data & IGB_EEPROM_APME)
2475 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2476 
2477 	/* now that we have the eeprom settings, apply the special cases where
2478 	 * the eeprom may be wrong or the board simply won't support wake on
2479 	 * lan on a particular port
2480 	 */
2481 	switch (pdev->device) {
2482 	case E1000_DEV_ID_82575GB_QUAD_COPPER:
2483 		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2484 		break;
2485 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
2486 	case E1000_DEV_ID_82576_FIBER:
2487 	case E1000_DEV_ID_82576_SERDES:
2488 		/* Wake events only supported on port A for dual fiber
2489 		 * regardless of eeprom setting
2490 		 */
2491 		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2492 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2493 		break;
2494 	case E1000_DEV_ID_82576_QUAD_COPPER:
2495 	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
2496 		/* if quad port adapter, disable WoL on all but port A */
2497 		if (global_quad_port_a != 0)
2498 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2499 		else
2500 			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2501 		/* Reset for multiple quad port adapters */
2502 		if (++global_quad_port_a == 4)
2503 			global_quad_port_a = 0;
2504 		break;
2505 	default:
2506 		/* If the device can't wake, don't set software support */
2507 		if (!device_can_wakeup(&adapter->pdev->dev))
2508 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2509 	}
2510 
2511 	/* initialize the wol settings based on the eeprom settings */
2512 	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2513 		adapter->wol |= E1000_WUFC_MAG;
2514 
2515 	/* Some vendors want WoL disabled by default, but still supported */
2516 	if ((hw->mac.type == e1000_i350) &&
2517 	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2518 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2519 		adapter->wol = 0;
2520 	}
2521 
2522 	device_set_wakeup_enable(&adapter->pdev->dev,
2523 				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
2524 
2525 	/* reset the hardware with the new settings */
2526 	igb_reset(adapter);
2527 
2528 	/* Init the I2C interface */
2529 	err = igb_init_i2c(adapter);
2530 	if (err) {
2531 		dev_err(&pdev->dev, "failed to init i2c interface\n");
2532 		goto err_eeprom;
2533 	}
2534 
2535 	/* let the f/w know that the h/w is now under the control of the
2536 	 * driver.
2537 	 */
2538 	igb_get_hw_control(adapter);
2539 
2540 	strcpy(netdev->name, "eth%d");
2541 	err = register_netdev(netdev);
2542 	if (err)
2543 		goto err_register;
2544 
2545 	/* carrier off reporting is important to ethtool even BEFORE open */
2546 	netif_carrier_off(netdev);
2547 
2548 #ifdef CONFIG_IGB_DCA
2549 	if (dca_add_requester(&pdev->dev) == 0) {
2550 		adapter->flags |= IGB_FLAG_DCA_ENABLED;
2551 		dev_info(&pdev->dev, "DCA enabled\n");
2552 		igb_setup_dca(adapter);
2553 	}
2554 
2555 #endif
2556 #ifdef CONFIG_IGB_HWMON
2557 	/* Initialize the thermal sensor on i350 devices. */
2558 	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2559 		u16 ets_word;
2560 
2561 		/* Read the NVM to determine if this i350 device supports an
2562 		 * external thermal sensor.
2563 		 */
2564 		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
2565 		if (ets_word != 0x0000 && ets_word != 0xFFFF)
2566 			adapter->ets = true;
2567 		else
2568 			adapter->ets = false;
2569 		if (igb_sysfs_init(adapter))
2570 			dev_err(&pdev->dev,
2571 				"failed to allocate sysfs resources\n");
2572 	} else {
2573 		adapter->ets = false;
2574 	}
2575 #endif
2576 	/* Check if Media Autosense is enabled */
2577 	adapter->ei = *ei;
2578 	if (hw->dev_spec._82575.mas_capable)
2579 		igb_init_mas(adapter);
2580 
2581 	/* do hw tstamp init after resetting */
2582 	igb_ptp_init(adapter);
2583 
2584 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2585 	/* print bus type/speed/width info, not applicable to i354 */
2586 	if (hw->mac.type != e1000_i354) {
2587 		dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
2588 			 netdev->name,
2589 			 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2590 			  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2591 			   "unknown"),
2592 			 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
2593 			  "Width x4" :
2594 			  (hw->bus.width == e1000_bus_width_pcie_x2) ?
2595 			  "Width x2" :
2596 			  (hw->bus.width == e1000_bus_width_pcie_x1) ?
2597 			  "Width x1" : "unknown"), netdev->dev_addr);
2598 	}
2599 
2600 	if ((hw->mac.type >= e1000_i210 ||
2601 	     igb_get_flash_presence_i210(hw))) {
2602 		ret_val = igb_read_part_string(hw, part_str,
2603 					       E1000_PBANUM_LENGTH);
2604 	} else {
2605 		ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
2606 	}
2607 
2608 	if (ret_val)
2609 		strcpy(part_str, "Unknown");
2610 	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
2611 	dev_info(&pdev->dev,
2612 		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2613 		(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
2614 		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2615 		adapter->num_rx_queues, adapter->num_tx_queues);
2616 	if (hw->phy.media_type == e1000_media_type_copper) {
2617 		switch (hw->mac.type) {
2618 		case e1000_i350:
2619 		case e1000_i210:
2620 		case e1000_i211:
2621 			/* Enable EEE for internal copper PHY devices */
2622 			err = igb_set_eee_i350(hw, true, true);
2623 			if ((!err) &&
2624 			    (!hw->dev_spec._82575.eee_disable)) {
2625 				adapter->eee_advert =
2626 					MDIO_EEE_100TX | MDIO_EEE_1000T;
2627 				adapter->flags |= IGB_FLAG_EEE;
2628 			}
2629 			break;
2630 		case e1000_i354:
2631 			if ((rd32(E1000_CTRL_EXT) &
2632 			    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
2633 				err = igb_set_eee_i354(hw, true, true);
2634 				if ((!err) &&
2635 					(!hw->dev_spec._82575.eee_disable)) {
2636 					adapter->eee_advert =
2637 					   MDIO_EEE_100TX | MDIO_EEE_1000T;
2638 					adapter->flags |= IGB_FLAG_EEE;
2639 				}
2640 			}
2641 			break;
2642 		default:
2643 			break;
2644 		}
2645 	}
2646 	pm_runtime_put_noidle(&pdev->dev);
2647 	return 0;
2648 
2649 err_register:
2650 	igb_release_hw_control(adapter);
2651 	memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
2652 err_eeprom:
2653 	if (!igb_check_reset_block(hw))
2654 		igb_reset_phy(hw);
2655 
2656 	if (hw->flash_address)
2657 		iounmap(hw->flash_address);
2658 err_sw_init:
2659 	igb_clear_interrupt_scheme(adapter);
2660 	pci_iounmap(pdev, hw->hw_addr);
2661 err_ioremap:
2662 	free_netdev(netdev);
2663 err_alloc_etherdev:
2664 	pci_release_selected_regions(pdev,
2665 				     pci_select_bars(pdev, IORESOURCE_MEM));
2666 err_pci_reg:
2667 err_dma:
2668 	pci_disable_device(pdev);
2669 	return err;
2670 }
2671 
2672 #ifdef CONFIG_PCI_IOV
2673 static int igb_disable_sriov(struct pci_dev *pdev)
2674 {
2675 	struct net_device *netdev = pci_get_drvdata(pdev);
2676 	struct igb_adapter *adapter = netdev_priv(netdev);
2677 	struct e1000_hw *hw = &adapter->hw;
2678 
2679 	/* reclaim resources allocated to VFs */
2680 	if (adapter->vf_data) {
2681 		/* disable iov and allow time for transactions to clear */
2682 		if (pci_vfs_assigned(pdev)) {
2683 			dev_warn(&pdev->dev,
2684 				 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
2685 			return -EPERM;
2686 		} else {
2687 			pci_disable_sriov(pdev);
2688 			msleep(500);
2689 		}
2690 
2691 		kfree(adapter->vf_data);
2692 		adapter->vf_data = NULL;
2693 		adapter->vfs_allocated_count = 0;
2694 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2695 		wrfl();
2696 		msleep(100);
2697 		dev_info(&pdev->dev, "IOV Disabled\n");
2698 
2699 		/* Re-enable DMA Coalescing flag since IOV is turned off */
2700 		adapter->flags |= IGB_FLAG_DMAC;
2701 	}
2702 
2703 	return 0;
2704 }
2705 
2706 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2707 {
2708 	struct net_device *netdev = pci_get_drvdata(pdev);
2709 	struct igb_adapter *adapter = netdev_priv(netdev);
2710 	int old_vfs = pci_num_vf(pdev);
2711 	int err = 0;
2712 	int i;
2713 
2714 	if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
2715 		err = -EPERM;
2716 		goto out;
2717 	}
2718 	if (!num_vfs)
2719 		goto out;
2720 
2721 	if (old_vfs) {
2722 		dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
2723 			 old_vfs, max_vfs);
2724 		adapter->vfs_allocated_count = old_vfs;
2725 	} else
2726 		adapter->vfs_allocated_count = num_vfs;
2727 
2728 	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2729 				sizeof(struct vf_data_storage), GFP_KERNEL);
2730 
2731 	/* if allocation failed then we do not support SR-IOV */
2732 	if (!adapter->vf_data) {
2733 		adapter->vfs_allocated_count = 0;
2734 		dev_err(&pdev->dev,
2735 			"Unable to allocate memory for VF Data Storage\n");
2736 		err = -ENOMEM;
2737 		goto out;
2738 	}
2739 
2740 	/* only call pci_enable_sriov() if no VFs are allocated already */
2741 	if (!old_vfs) {
2742 		err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
2743 		if (err)
2744 			goto err_out;
2745 	}
2746 	dev_info(&pdev->dev, "%d VFs allocated\n",
2747 		 adapter->vfs_allocated_count);
2748 	for (i = 0; i < adapter->vfs_allocated_count; i++)
2749 		igb_vf_configure(adapter, i);
2750 
2751 	/* DMA Coalescing is not supported in IOV mode. */
2752 	adapter->flags &= ~IGB_FLAG_DMAC;
2753 	goto out;
2754 
2755 err_out:
2756 	kfree(adapter->vf_data);
2757 	adapter->vf_data = NULL;
2758 	adapter->vfs_allocated_count = 0;
2759 out:
2760 	return err;
2761 }
2762 
2763 #endif
2764 /**
2765  *  igb_remove_i2c - Cleanup  I2C interface
2766  *  @adapter: pointer to adapter structure
2767  **/
2768 static void igb_remove_i2c(struct igb_adapter *adapter)
2769 {
2770 	/* free the adapter bus structure */
2771 	i2c_del_adapter(&adapter->i2c_adap);
2772 }
2773 
2774 /**
2775  *  igb_remove - Device Removal Routine
2776  *  @pdev: PCI device information struct
2777  *
2778  *  igb_remove is called by the PCI subsystem to alert the driver
2779  *  that it should release a PCI device.  The could be caused by a
2780  *  Hot-Plug event, or because the driver is going to be removed from
2781  *  memory.
2782  **/
2783 static void igb_remove(struct pci_dev *pdev)
2784 {
2785 	struct net_device *netdev = pci_get_drvdata(pdev);
2786 	struct igb_adapter *adapter = netdev_priv(netdev);
2787 	struct e1000_hw *hw = &adapter->hw;
2788 
2789 	pm_runtime_get_noresume(&pdev->dev);
2790 #ifdef CONFIG_IGB_HWMON
2791 	igb_sysfs_exit(adapter);
2792 #endif
2793 	igb_remove_i2c(adapter);
2794 	igb_ptp_stop(adapter);
2795 	/* The watchdog timer may be rescheduled, so explicitly
2796 	 * disable watchdog from being rescheduled.
2797 	 */
2798 	set_bit(__IGB_DOWN, &adapter->state);
2799 	del_timer_sync(&adapter->watchdog_timer);
2800 	del_timer_sync(&adapter->phy_info_timer);
2801 
2802 	cancel_work_sync(&adapter->reset_task);
2803 	cancel_work_sync(&adapter->watchdog_task);
2804 
2805 #ifdef CONFIG_IGB_DCA
2806 	if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
2807 		dev_info(&pdev->dev, "DCA disabled\n");
2808 		dca_remove_requester(&pdev->dev);
2809 		adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
2810 		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
2811 	}
2812 #endif
2813 
2814 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
2815 	 * would have already happened in close and is redundant.
2816 	 */
2817 	igb_release_hw_control(adapter);
2818 
2819 	unregister_netdev(netdev);
2820 
2821 	igb_clear_interrupt_scheme(adapter);
2822 
2823 #ifdef CONFIG_PCI_IOV
2824 	igb_disable_sriov(pdev);
2825 #endif
2826 
2827 	pci_iounmap(pdev, hw->hw_addr);
2828 	if (hw->flash_address)
2829 		iounmap(hw->flash_address);
2830 	pci_release_selected_regions(pdev,
2831 				     pci_select_bars(pdev, IORESOURCE_MEM));
2832 
2833 	kfree(adapter->shadow_vfta);
2834 	free_netdev(netdev);
2835 
2836 	pci_disable_pcie_error_reporting(pdev);
2837 
2838 	pci_disable_device(pdev);
2839 }
2840 
2841 /**
2842  *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2843  *  @adapter: board private structure to initialize
2844  *
2845  *  This function initializes the vf specific data storage and then attempts to
2846  *  allocate the VFs.  The reason for ordering it this way is because it is much
2847  *  mor expensive time wise to disable SR-IOV than it is to allocate and free
2848  *  the memory for the VFs.
2849  **/
2850 static void igb_probe_vfs(struct igb_adapter *adapter)
2851 {
2852 #ifdef CONFIG_PCI_IOV
2853 	struct pci_dev *pdev = adapter->pdev;
2854 	struct e1000_hw *hw = &adapter->hw;
2855 
2856 	/* Virtualization features not supported on i210 family. */
2857 	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2858 		return;
2859 
2860 	pci_sriov_set_totalvfs(pdev, 7);
2861 	igb_pci_enable_sriov(pdev, max_vfs);
2862 
2863 #endif /* CONFIG_PCI_IOV */
2864 }
2865 
2866 static void igb_init_queue_configuration(struct igb_adapter *adapter)
2867 {
2868 	struct e1000_hw *hw = &adapter->hw;
2869 	u32 max_rss_queues;
2870 
2871 	/* Determine the maximum number of RSS queues supported. */
2872 	switch (hw->mac.type) {
2873 	case e1000_i211:
2874 		max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2875 		break;
2876 	case e1000_82575:
2877 	case e1000_i210:
2878 		max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2879 		break;
2880 	case e1000_i350:
2881 		/* I350 cannot do RSS and SR-IOV at the same time */
2882 		if (!!adapter->vfs_allocated_count) {
2883 			max_rss_queues = 1;
2884 			break;
2885 		}
2886 		/* fall through */
2887 	case e1000_82576:
2888 		if (!!adapter->vfs_allocated_count) {
2889 			max_rss_queues = 2;
2890 			break;
2891 		}
2892 		/* fall through */
2893 	case e1000_82580:
2894 	case e1000_i354:
2895 	default:
2896 		max_rss_queues = IGB_MAX_RX_QUEUES;
2897 		break;
2898 	}
2899 
2900 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2901 
2902 	/* Determine if we need to pair queues. */
2903 	switch (hw->mac.type) {
2904 	case e1000_82575:
2905 	case e1000_i211:
2906 		/* Device supports enough interrupts without queue pairing. */
2907 		break;
2908 	case e1000_82576:
2909 		/* If VFs are going to be allocated with RSS queues then we
2910 		 * should pair the queues in order to conserve interrupts due
2911 		 * to limited supply.
2912 		 */
2913 		if ((adapter->rss_queues > 1) &&
2914 		    (adapter->vfs_allocated_count > 6))
2915 			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2916 		/* fall through */
2917 	case e1000_82580:
2918 	case e1000_i350:
2919 	case e1000_i354:
2920 	case e1000_i210:
2921 	default:
2922 		/* If rss_queues > half of max_rss_queues, pair the queues in
2923 		 * order to conserve interrupts due to limited supply.
2924 		 */
2925 		if (adapter->rss_queues > (max_rss_queues / 2))
2926 			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2927 		break;
2928 	}
2929 }
2930 
2931 /**
2932  *  igb_sw_init - Initialize general software structures (struct igb_adapter)
2933  *  @adapter: board private structure to initialize
2934  *
2935  *  igb_sw_init initializes the Adapter private data structure.
2936  *  Fields are initialized based on PCI device information and
2937  *  OS network device settings (MTU size).
2938  **/
2939 static int igb_sw_init(struct igb_adapter *adapter)
2940 {
2941 	struct e1000_hw *hw = &adapter->hw;
2942 	struct net_device *netdev = adapter->netdev;
2943 	struct pci_dev *pdev = adapter->pdev;
2944 
2945 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2946 
2947 	/* set default ring sizes */
2948 	adapter->tx_ring_count = IGB_DEFAULT_TXD;
2949 	adapter->rx_ring_count = IGB_DEFAULT_RXD;
2950 
2951 	/* set default ITR values */
2952 	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2953 	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2954 
2955 	/* set default work limits */
2956 	adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2957 
2958 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2959 				  VLAN_HLEN;
2960 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2961 
2962 	spin_lock_init(&adapter->stats64_lock);
2963 #ifdef CONFIG_PCI_IOV
2964 	switch (hw->mac.type) {
2965 	case e1000_82576:
2966 	case e1000_i350:
2967 		if (max_vfs > 7) {
2968 			dev_warn(&pdev->dev,
2969 				 "Maximum of 7 VFs per PF, using max\n");
2970 			max_vfs = adapter->vfs_allocated_count = 7;
2971 		} else
2972 			adapter->vfs_allocated_count = max_vfs;
2973 		if (adapter->vfs_allocated_count)
2974 			dev_warn(&pdev->dev,
2975 				 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
2976 		break;
2977 	default:
2978 		break;
2979 	}
2980 #endif /* CONFIG_PCI_IOV */
2981 
2982 	igb_init_queue_configuration(adapter);
2983 
2984 	/* Setup and initialize a copy of the hw vlan table array */
2985 	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
2986 				       GFP_ATOMIC);
2987 
2988 	/* This call may decrease the number of queues */
2989 	if (igb_init_interrupt_scheme(adapter, true)) {
2990 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2991 		return -ENOMEM;
2992 	}
2993 
2994 	igb_probe_vfs(adapter);
2995 
2996 	/* Explicitly disable IRQ since the NIC can be in any state. */
2997 	igb_irq_disable(adapter);
2998 
2999 	if (hw->mac.type >= e1000_i350)
3000 		adapter->flags &= ~IGB_FLAG_DMAC;
3001 
3002 	set_bit(__IGB_DOWN, &adapter->state);
3003 	return 0;
3004 }
3005 
3006 /**
3007  *  igb_open - Called when a network interface is made active
3008  *  @netdev: network interface device structure
3009  *
3010  *  Returns 0 on success, negative value on failure
3011  *
3012  *  The open entry point is called when a network interface is made
3013  *  active by the system (IFF_UP).  At this point all resources needed
3014  *  for transmit and receive operations are allocated, the interrupt
3015  *  handler is registered with the OS, the watchdog timer is started,
3016  *  and the stack is notified that the interface is ready.
3017  **/
3018 static int __igb_open(struct net_device *netdev, bool resuming)
3019 {
3020 	struct igb_adapter *adapter = netdev_priv(netdev);
3021 	struct e1000_hw *hw = &adapter->hw;
3022 	struct pci_dev *pdev = adapter->pdev;
3023 	int err;
3024 	int i;
3025 
3026 	/* disallow open during test */
3027 	if (test_bit(__IGB_TESTING, &adapter->state)) {
3028 		WARN_ON(resuming);
3029 		return -EBUSY;
3030 	}
3031 
3032 	if (!resuming)
3033 		pm_runtime_get_sync(&pdev->dev);
3034 
3035 	netif_carrier_off(netdev);
3036 
3037 	/* allocate transmit descriptors */
3038 	err = igb_setup_all_tx_resources(adapter);
3039 	if (err)
3040 		goto err_setup_tx;
3041 
3042 	/* allocate receive descriptors */
3043 	err = igb_setup_all_rx_resources(adapter);
3044 	if (err)
3045 		goto err_setup_rx;
3046 
3047 	igb_power_up_link(adapter);
3048 
3049 	/* before we allocate an interrupt, we must be ready to handle it.
3050 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3051 	 * as soon as we call pci_request_irq, so we have to setup our
3052 	 * clean_rx handler before we do so.
3053 	 */
3054 	igb_configure(adapter);
3055 
3056 	err = igb_request_irq(adapter);
3057 	if (err)
3058 		goto err_req_irq;
3059 
3060 	/* Notify the stack of the actual queue counts. */
3061 	err = netif_set_real_num_tx_queues(adapter->netdev,
3062 					   adapter->num_tx_queues);
3063 	if (err)
3064 		goto err_set_queues;
3065 
3066 	err = netif_set_real_num_rx_queues(adapter->netdev,
3067 					   adapter->num_rx_queues);
3068 	if (err)
3069 		goto err_set_queues;
3070 
3071 	/* From here on the code is the same as igb_up() */
3072 	clear_bit(__IGB_DOWN, &adapter->state);
3073 
3074 	for (i = 0; i < adapter->num_q_vectors; i++)
3075 		napi_enable(&(adapter->q_vector[i]->napi));
3076 
3077 	/* Clear any pending interrupts. */
3078 	rd32(E1000_ICR);
3079 
3080 	igb_irq_enable(adapter);
3081 
3082 	/* notify VFs that reset has been completed */
3083 	if (adapter->vfs_allocated_count) {
3084 		u32 reg_data = rd32(E1000_CTRL_EXT);
3085 
3086 		reg_data |= E1000_CTRL_EXT_PFRSTD;
3087 		wr32(E1000_CTRL_EXT, reg_data);
3088 	}
3089 
3090 	netif_tx_start_all_queues(netdev);
3091 
3092 	if (!resuming)
3093 		pm_runtime_put(&pdev->dev);
3094 
3095 	/* start the watchdog. */
3096 	hw->mac.get_link_status = 1;
3097 	schedule_work(&adapter->watchdog_task);
3098 
3099 	return 0;
3100 
3101 err_set_queues:
3102 	igb_free_irq(adapter);
3103 err_req_irq:
3104 	igb_release_hw_control(adapter);
3105 	igb_power_down_link(adapter);
3106 	igb_free_all_rx_resources(adapter);
3107 err_setup_rx:
3108 	igb_free_all_tx_resources(adapter);
3109 err_setup_tx:
3110 	igb_reset(adapter);
3111 	if (!resuming)
3112 		pm_runtime_put(&pdev->dev);
3113 
3114 	return err;
3115 }
3116 
3117 static int igb_open(struct net_device *netdev)
3118 {
3119 	return __igb_open(netdev, false);
3120 }
3121 
3122 /**
3123  *  igb_close - Disables a network interface
3124  *  @netdev: network interface device structure
3125  *
3126  *  Returns 0, this is not allowed to fail
3127  *
3128  *  The close entry point is called when an interface is de-activated
3129  *  by the OS.  The hardware is still under the driver's control, but
3130  *  needs to be disabled.  A global MAC reset is issued to stop the
3131  *  hardware, and all transmit and receive resources are freed.
3132  **/
3133 static int __igb_close(struct net_device *netdev, bool suspending)
3134 {
3135 	struct igb_adapter *adapter = netdev_priv(netdev);
3136 	struct pci_dev *pdev = adapter->pdev;
3137 
3138 	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
3139 
3140 	if (!suspending)
3141 		pm_runtime_get_sync(&pdev->dev);
3142 
3143 	igb_down(adapter);
3144 	igb_free_irq(adapter);
3145 
3146 	igb_free_all_tx_resources(adapter);
3147 	igb_free_all_rx_resources(adapter);
3148 
3149 	if (!suspending)
3150 		pm_runtime_put_sync(&pdev->dev);
3151 	return 0;
3152 }
3153 
3154 static int igb_close(struct net_device *netdev)
3155 {
3156 	return __igb_close(netdev, false);
3157 }
3158 
3159 /**
3160  *  igb_setup_tx_resources - allocate Tx resources (Descriptors)
3161  *  @tx_ring: tx descriptor ring (for a specific queue) to setup
3162  *
3163  *  Return 0 on success, negative on failure
3164  **/
3165 int igb_setup_tx_resources(struct igb_ring *tx_ring)
3166 {
3167 	struct device *dev = tx_ring->dev;
3168 	int size;
3169 
3170 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3171 
3172 	tx_ring->tx_buffer_info = vzalloc(size);
3173 	if (!tx_ring->tx_buffer_info)
3174 		goto err;
3175 
3176 	/* round up to nearest 4K */
3177 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
3178 	tx_ring->size = ALIGN(tx_ring->size, 4096);
3179 
3180 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
3181 					   &tx_ring->dma, GFP_KERNEL);
3182 	if (!tx_ring->desc)
3183 		goto err;
3184 
3185 	tx_ring->next_to_use = 0;
3186 	tx_ring->next_to_clean = 0;
3187 
3188 	return 0;
3189 
3190 err:
3191 	vfree(tx_ring->tx_buffer_info);
3192 	tx_ring->tx_buffer_info = NULL;
3193 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
3194 	return -ENOMEM;
3195 }
3196 
3197 /**
3198  *  igb_setup_all_tx_resources - wrapper to allocate Tx resources
3199  *				 (Descriptors) for all queues
3200  *  @adapter: board private structure
3201  *
3202  *  Return 0 on success, negative on failure
3203  **/
3204 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
3205 {
3206 	struct pci_dev *pdev = adapter->pdev;
3207 	int i, err = 0;
3208 
3209 	for (i = 0; i < adapter->num_tx_queues; i++) {
3210 		err = igb_setup_tx_resources(adapter->tx_ring[i]);
3211 		if (err) {
3212 			dev_err(&pdev->dev,
3213 				"Allocation for Tx Queue %u failed\n", i);
3214 			for (i--; i >= 0; i--)
3215 				igb_free_tx_resources(adapter->tx_ring[i]);
3216 			break;
3217 		}
3218 	}
3219 
3220 	return err;
3221 }
3222 
3223 /**
3224  *  igb_setup_tctl - configure the transmit control registers
3225  *  @adapter: Board private structure
3226  **/
3227 void igb_setup_tctl(struct igb_adapter *adapter)
3228 {
3229 	struct e1000_hw *hw = &adapter->hw;
3230 	u32 tctl;
3231 
3232 	/* disable queue 0 which is enabled by default on 82575 and 82576 */
3233 	wr32(E1000_TXDCTL(0), 0);
3234 
3235 	/* Program the Transmit Control Register */
3236 	tctl = rd32(E1000_TCTL);
3237 	tctl &= ~E1000_TCTL_CT;
3238 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
3239 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
3240 
3241 	igb_config_collision_dist(hw);
3242 
3243 	/* Enable transmits */
3244 	tctl |= E1000_TCTL_EN;
3245 
3246 	wr32(E1000_TCTL, tctl);
3247 }
3248 
3249 /**
3250  *  igb_configure_tx_ring - Configure transmit ring after Reset
3251  *  @adapter: board private structure
3252  *  @ring: tx ring to configure
3253  *
3254  *  Configure a transmit ring after a reset.
3255  **/
3256 void igb_configure_tx_ring(struct igb_adapter *adapter,
3257 			   struct igb_ring *ring)
3258 {
3259 	struct e1000_hw *hw = &adapter->hw;
3260 	u32 txdctl = 0;
3261 	u64 tdba = ring->dma;
3262 	int reg_idx = ring->reg_idx;
3263 
3264 	/* disable the queue */
3265 	wr32(E1000_TXDCTL(reg_idx), 0);
3266 	wrfl();
3267 	mdelay(10);
3268 
3269 	wr32(E1000_TDLEN(reg_idx),
3270 	     ring->count * sizeof(union e1000_adv_tx_desc));
3271 	wr32(E1000_TDBAL(reg_idx),
3272 	     tdba & 0x00000000ffffffffULL);
3273 	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
3274 
3275 	ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
3276 	wr32(E1000_TDH(reg_idx), 0);
3277 	writel(0, ring->tail);
3278 
3279 	txdctl |= IGB_TX_PTHRESH;
3280 	txdctl |= IGB_TX_HTHRESH << 8;
3281 	txdctl |= IGB_TX_WTHRESH << 16;
3282 
3283 	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3284 	wr32(E1000_TXDCTL(reg_idx), txdctl);
3285 }
3286 
3287 /**
3288  *  igb_configure_tx - Configure transmit Unit after Reset
3289  *  @adapter: board private structure
3290  *
3291  *  Configure the Tx unit of the MAC after a reset.
3292  **/
3293 static void igb_configure_tx(struct igb_adapter *adapter)
3294 {
3295 	int i;
3296 
3297 	for (i = 0; i < adapter->num_tx_queues; i++)
3298 		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
3299 }
3300 
3301 /**
3302  *  igb_setup_rx_resources - allocate Rx resources (Descriptors)
3303  *  @rx_ring: Rx descriptor ring (for a specific queue) to setup
3304  *
3305  *  Returns 0 on success, negative on failure
3306  **/
3307 int igb_setup_rx_resources(struct igb_ring *rx_ring)
3308 {
3309 	struct device *dev = rx_ring->dev;
3310 	int size;
3311 
3312 	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3313 
3314 	rx_ring->rx_buffer_info = vzalloc(size);
3315 	if (!rx_ring->rx_buffer_info)
3316 		goto err;
3317 
3318 	/* Round up to nearest 4K */
3319 	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
3320 	rx_ring->size = ALIGN(rx_ring->size, 4096);
3321 
3322 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3323 					   &rx_ring->dma, GFP_KERNEL);
3324 	if (!rx_ring->desc)
3325 		goto err;
3326 
3327 	rx_ring->next_to_alloc = 0;
3328 	rx_ring->next_to_clean = 0;
3329 	rx_ring->next_to_use = 0;
3330 
3331 	return 0;
3332 
3333 err:
3334 	vfree(rx_ring->rx_buffer_info);
3335 	rx_ring->rx_buffer_info = NULL;
3336 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
3337 	return -ENOMEM;
3338 }
3339 
3340 /**
3341  *  igb_setup_all_rx_resources - wrapper to allocate Rx resources
3342  *				 (Descriptors) for all queues
3343  *  @adapter: board private structure
3344  *
3345  *  Return 0 on success, negative on failure
3346  **/
3347 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3348 {
3349 	struct pci_dev *pdev = adapter->pdev;
3350 	int i, err = 0;
3351 
3352 	for (i = 0; i < adapter->num_rx_queues; i++) {
3353 		err = igb_setup_rx_resources(adapter->rx_ring[i]);
3354 		if (err) {
3355 			dev_err(&pdev->dev,
3356 				"Allocation for Rx Queue %u failed\n", i);
3357 			for (i--; i >= 0; i--)
3358 				igb_free_rx_resources(adapter->rx_ring[i]);
3359 			break;
3360 		}
3361 	}
3362 
3363 	return err;
3364 }
3365 
3366 /**
3367  *  igb_setup_mrqc - configure the multiple receive queue control registers
3368  *  @adapter: Board private structure
3369  **/
3370 static void igb_setup_mrqc(struct igb_adapter *adapter)
3371 {
3372 	struct e1000_hw *hw = &adapter->hw;
3373 	u32 mrqc, rxcsum;
3374 	u32 j, num_rx_queues;
3375 	static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
3376 					0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
3377 					0xA32DCB77, 0x0CF23080, 0x3BB7426A,
3378 					0xFA01ACBE };
3379 
3380 	/* Fill out hash function seeds */
3381 	for (j = 0; j < 10; j++)
3382 		wr32(E1000_RSSRK(j), rsskey[j]);
3383 
3384 	num_rx_queues = adapter->rss_queues;
3385 
3386 	switch (hw->mac.type) {
3387 	case e1000_82576:
3388 		/* 82576 supports 2 RSS queues for SR-IOV */
3389 		if (adapter->vfs_allocated_count)
3390 			num_rx_queues = 2;
3391 		break;
3392 	default:
3393 		break;
3394 	}
3395 
3396 	if (adapter->rss_indir_tbl_init != num_rx_queues) {
3397 		for (j = 0; j < IGB_RETA_SIZE; j++)
3398 			adapter->rss_indir_tbl[j] =
3399 			(j * num_rx_queues) / IGB_RETA_SIZE;
3400 		adapter->rss_indir_tbl_init = num_rx_queues;
3401 	}
3402 	igb_write_rss_indir_tbl(adapter);
3403 
3404 	/* Disable raw packet checksumming so that RSS hash is placed in
3405 	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
3406 	 * offloads as they are enabled by default
3407 	 */
3408 	rxcsum = rd32(E1000_RXCSUM);
3409 	rxcsum |= E1000_RXCSUM_PCSD;
3410 
3411 	if (adapter->hw.mac.type >= e1000_82576)
3412 		/* Enable Receive Checksum Offload for SCTP */
3413 		rxcsum |= E1000_RXCSUM_CRCOFL;
3414 
3415 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
3416 	wr32(E1000_RXCSUM, rxcsum);
3417 
3418 	/* Generate RSS hash based on packet types, TCP/UDP
3419 	 * port numbers and/or IPv4/v6 src and dst addresses
3420 	 */
3421 	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3422 	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
3423 	       E1000_MRQC_RSS_FIELD_IPV6 |
3424 	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
3425 	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
3426 
3427 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3428 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3429 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3430 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3431 
3432 	/* If VMDq is enabled then we set the appropriate mode for that, else
3433 	 * we default to RSS so that an RSS hash is calculated per packet even
3434 	 * if we are only using one queue
3435 	 */
3436 	if (adapter->vfs_allocated_count) {
3437 		if (hw->mac.type > e1000_82575) {
3438 			/* Set the default pool for the PF's first queue */
3439 			u32 vtctl = rd32(E1000_VT_CTL);
3440 
3441 			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3442 				   E1000_VT_CTL_DISABLE_DEF_POOL);
3443 			vtctl |= adapter->vfs_allocated_count <<
3444 				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3445 			wr32(E1000_VT_CTL, vtctl);
3446 		}
3447 		if (adapter->rss_queues > 1)
3448 			mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
3449 		else
3450 			mrqc |= E1000_MRQC_ENABLE_VMDQ;
3451 	} else {
3452 		if (hw->mac.type != e1000_i211)
3453 			mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
3454 	}
3455 	igb_vmm_control(adapter);
3456 
3457 	wr32(E1000_MRQC, mrqc);
3458 }
3459 
3460 /**
3461  *  igb_setup_rctl - configure the receive control registers
3462  *  @adapter: Board private structure
3463  **/
3464 void igb_setup_rctl(struct igb_adapter *adapter)
3465 {
3466 	struct e1000_hw *hw = &adapter->hw;
3467 	u32 rctl;
3468 
3469 	rctl = rd32(E1000_RCTL);
3470 
3471 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3472 	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
3473 
3474 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
3475 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3476 
3477 	/* enable stripping of CRC. It's unlikely this will break BMC
3478 	 * redirection as it did with e1000. Newer features require
3479 	 * that the HW strips the CRC.
3480 	 */
3481 	rctl |= E1000_RCTL_SECRC;
3482 
3483 	/* disable store bad packets and clear size bits. */
3484 	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
3485 
3486 	/* enable LPE to prevent packets larger than max_frame_size */
3487 	rctl |= E1000_RCTL_LPE;
3488 
3489 	/* disable queue 0 to prevent tail write w/o re-config */
3490 	wr32(E1000_RXDCTL(0), 0);
3491 
3492 	/* Attention!!!  For SR-IOV PF driver operations you must enable
3493 	 * queue drop for all VF and PF queues to prevent head of line blocking
3494 	 * if an un-trusted VF does not provide descriptors to hardware.
3495 	 */
3496 	if (adapter->vfs_allocated_count) {
3497 		/* set all queue drop enable bits */
3498 		wr32(E1000_QDE, ALL_QUEUES);
3499 	}
3500 
3501 	/* This is useful for sniffing bad packets. */
3502 	if (adapter->netdev->features & NETIF_F_RXALL) {
3503 		/* UPE and MPE will be handled by normal PROMISC logic
3504 		 * in e1000e_set_rx_mode
3505 		 */
3506 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3507 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3508 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3509 
3510 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3511 			  E1000_RCTL_DPF | /* Allow filtered pause */
3512 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3513 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3514 		 * and that breaks VLANs.
3515 		 */
3516 	}
3517 
3518 	wr32(E1000_RCTL, rctl);
3519 }
3520 
3521 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3522 				   int vfn)
3523 {
3524 	struct e1000_hw *hw = &adapter->hw;
3525 	u32 vmolr;
3526 
3527 	/* if it isn't the PF check to see if VFs are enabled and
3528 	 * increase the size to support vlan tags
3529 	 */
3530 	if (vfn < adapter->vfs_allocated_count &&
3531 	    adapter->vf_data[vfn].vlans_enabled)
3532 		size += VLAN_TAG_SIZE;
3533 
3534 	vmolr = rd32(E1000_VMOLR(vfn));
3535 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
3536 	vmolr |= size | E1000_VMOLR_LPE;
3537 	wr32(E1000_VMOLR(vfn), vmolr);
3538 
3539 	return 0;
3540 }
3541 
3542 /**
3543  *  igb_rlpml_set - set maximum receive packet size
3544  *  @adapter: board private structure
3545  *
3546  *  Configure maximum receivable packet size.
3547  **/
3548 static void igb_rlpml_set(struct igb_adapter *adapter)
3549 {
3550 	u32 max_frame_size = adapter->max_frame_size;
3551 	struct e1000_hw *hw = &adapter->hw;
3552 	u16 pf_id = adapter->vfs_allocated_count;
3553 
3554 	if (pf_id) {
3555 		igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
3556 		/* If we're in VMDQ or SR-IOV mode, then set global RLPML
3557 		 * to our max jumbo frame size, in case we need to enable
3558 		 * jumbo frames on one of the rings later.
3559 		 * This will not pass over-length frames into the default
3560 		 * queue because it's gated by the VMOLR.RLPML.
3561 		 */
3562 		max_frame_size = MAX_JUMBO_FRAME_SIZE;
3563 	}
3564 
3565 	wr32(E1000_RLPML, max_frame_size);
3566 }
3567 
3568 static inline void igb_set_vmolr(struct igb_adapter *adapter,
3569 				 int vfn, bool aupe)
3570 {
3571 	struct e1000_hw *hw = &adapter->hw;
3572 	u32 vmolr;
3573 
3574 	/* This register exists only on 82576 and newer so if we are older then
3575 	 * we should exit and do nothing
3576 	 */
3577 	if (hw->mac.type < e1000_82576)
3578 		return;
3579 
3580 	vmolr = rd32(E1000_VMOLR(vfn));
3581 	vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3582 	if (hw->mac.type == e1000_i350) {
3583 		u32 dvmolr;
3584 
3585 		dvmolr = rd32(E1000_DVMOLR(vfn));
3586 		dvmolr |= E1000_DVMOLR_STRVLAN;
3587 		wr32(E1000_DVMOLR(vfn), dvmolr);
3588 	}
3589 	if (aupe)
3590 		vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3591 	else
3592 		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
3593 
3594 	/* clear all bits that might not be set */
3595 	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3596 
3597 	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
3598 		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3599 	/* for VMDq only allow the VFs and pool 0 to accept broadcast and
3600 	 * multicast packets
3601 	 */
3602 	if (vfn <= adapter->vfs_allocated_count)
3603 		vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3604 
3605 	wr32(E1000_VMOLR(vfn), vmolr);
3606 }
3607 
3608 /**
3609  *  igb_configure_rx_ring - Configure a receive ring after Reset
3610  *  @adapter: board private structure
3611  *  @ring: receive ring to be configured
3612  *
3613  *  Configure the Rx unit of the MAC after a reset.
3614  **/
3615 void igb_configure_rx_ring(struct igb_adapter *adapter,
3616 			   struct igb_ring *ring)
3617 {
3618 	struct e1000_hw *hw = &adapter->hw;
3619 	u64 rdba = ring->dma;
3620 	int reg_idx = ring->reg_idx;
3621 	u32 srrctl = 0, rxdctl = 0;
3622 
3623 	/* disable the queue */
3624 	wr32(E1000_RXDCTL(reg_idx), 0);
3625 
3626 	/* Set DMA base address registers */
3627 	wr32(E1000_RDBAL(reg_idx),
3628 	     rdba & 0x00000000ffffffffULL);
3629 	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3630 	wr32(E1000_RDLEN(reg_idx),
3631 	     ring->count * sizeof(union e1000_adv_rx_desc));
3632 
3633 	/* initialize head and tail */
3634 	ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
3635 	wr32(E1000_RDH(reg_idx), 0);
3636 	writel(0, ring->tail);
3637 
3638 	/* set descriptor configuration */
3639 	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3640 	srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3641 	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3642 	if (hw->mac.type >= e1000_82580)
3643 		srrctl |= E1000_SRRCTL_TIMESTAMP;
3644 	/* Only set Drop Enable if we are supporting multiple queues */
3645 	if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3646 		srrctl |= E1000_SRRCTL_DROP_EN;
3647 
3648 	wr32(E1000_SRRCTL(reg_idx), srrctl);
3649 
3650 	/* set filtering for VMDQ pools */
3651 	igb_set_vmolr(adapter, reg_idx & 0x7, true);
3652 
3653 	rxdctl |= IGB_RX_PTHRESH;
3654 	rxdctl |= IGB_RX_HTHRESH << 8;
3655 	rxdctl |= IGB_RX_WTHRESH << 16;
3656 
3657 	/* enable receive descriptor fetching */
3658 	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3659 	wr32(E1000_RXDCTL(reg_idx), rxdctl);
3660 }
3661 
3662 /**
3663  *  igb_configure_rx - Configure receive Unit after Reset
3664  *  @adapter: board private structure
3665  *
3666  *  Configure the Rx unit of the MAC after a reset.
3667  **/
3668 static void igb_configure_rx(struct igb_adapter *adapter)
3669 {
3670 	int i;
3671 
3672 	/* set UTA to appropriate mode */
3673 	igb_set_uta(adapter);
3674 
3675 	/* set the correct pool for the PF default MAC address in entry 0 */
3676 	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3677 			 adapter->vfs_allocated_count);
3678 
3679 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
3680 	 * the Base and Length of the Rx Descriptor Ring
3681 	 */
3682 	for (i = 0; i < adapter->num_rx_queues; i++)
3683 		igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
3684 }
3685 
3686 /**
3687  *  igb_free_tx_resources - Free Tx Resources per Queue
3688  *  @tx_ring: Tx descriptor ring for a specific queue
3689  *
3690  *  Free all transmit software resources
3691  **/
3692 void igb_free_tx_resources(struct igb_ring *tx_ring)
3693 {
3694 	igb_clean_tx_ring(tx_ring);
3695 
3696 	vfree(tx_ring->tx_buffer_info);
3697 	tx_ring->tx_buffer_info = NULL;
3698 
3699 	/* if not set, then don't free */
3700 	if (!tx_ring->desc)
3701 		return;
3702 
3703 	dma_free_coherent(tx_ring->dev, tx_ring->size,
3704 			  tx_ring->desc, tx_ring->dma);
3705 
3706 	tx_ring->desc = NULL;
3707 }
3708 
3709 /**
3710  *  igb_free_all_tx_resources - Free Tx Resources for All Queues
3711  *  @adapter: board private structure
3712  *
3713  *  Free all transmit software resources
3714  **/
3715 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3716 {
3717 	int i;
3718 
3719 	for (i = 0; i < adapter->num_tx_queues; i++)
3720 		igb_free_tx_resources(adapter->tx_ring[i]);
3721 }
3722 
3723 void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3724 				    struct igb_tx_buffer *tx_buffer)
3725 {
3726 	if (tx_buffer->skb) {
3727 		dev_kfree_skb_any(tx_buffer->skb);
3728 		if (dma_unmap_len(tx_buffer, len))
3729 			dma_unmap_single(ring->dev,
3730 					 dma_unmap_addr(tx_buffer, dma),
3731 					 dma_unmap_len(tx_buffer, len),
3732 					 DMA_TO_DEVICE);
3733 	} else if (dma_unmap_len(tx_buffer, len)) {
3734 		dma_unmap_page(ring->dev,
3735 			       dma_unmap_addr(tx_buffer, dma),
3736 			       dma_unmap_len(tx_buffer, len),
3737 			       DMA_TO_DEVICE);
3738 	}
3739 	tx_buffer->next_to_watch = NULL;
3740 	tx_buffer->skb = NULL;
3741 	dma_unmap_len_set(tx_buffer, len, 0);
3742 	/* buffer_info must be completely set up in the transmit path */
3743 }
3744 
3745 /**
3746  *  igb_clean_tx_ring - Free Tx Buffers
3747  *  @tx_ring: ring to be cleaned
3748  **/
3749 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3750 {
3751 	struct igb_tx_buffer *buffer_info;
3752 	unsigned long size;
3753 	u16 i;
3754 
3755 	if (!tx_ring->tx_buffer_info)
3756 		return;
3757 	/* Free all the Tx ring sk_buffs */
3758 
3759 	for (i = 0; i < tx_ring->count; i++) {
3760 		buffer_info = &tx_ring->tx_buffer_info[i];
3761 		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3762 	}
3763 
3764 	netdev_tx_reset_queue(txring_txq(tx_ring));
3765 
3766 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3767 	memset(tx_ring->tx_buffer_info, 0, size);
3768 
3769 	/* Zero out the descriptor ring */
3770 	memset(tx_ring->desc, 0, tx_ring->size);
3771 
3772 	tx_ring->next_to_use = 0;
3773 	tx_ring->next_to_clean = 0;
3774 }
3775 
3776 /**
3777  *  igb_clean_all_tx_rings - Free Tx Buffers for all queues
3778  *  @adapter: board private structure
3779  **/
3780 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3781 {
3782 	int i;
3783 
3784 	for (i = 0; i < adapter->num_tx_queues; i++)
3785 		igb_clean_tx_ring(adapter->tx_ring[i]);
3786 }
3787 
3788 /**
3789  *  igb_free_rx_resources - Free Rx Resources
3790  *  @rx_ring: ring to clean the resources from
3791  *
3792  *  Free all receive software resources
3793  **/
3794 void igb_free_rx_resources(struct igb_ring *rx_ring)
3795 {
3796 	igb_clean_rx_ring(rx_ring);
3797 
3798 	vfree(rx_ring->rx_buffer_info);
3799 	rx_ring->rx_buffer_info = NULL;
3800 
3801 	/* if not set, then don't free */
3802 	if (!rx_ring->desc)
3803 		return;
3804 
3805 	dma_free_coherent(rx_ring->dev, rx_ring->size,
3806 			  rx_ring->desc, rx_ring->dma);
3807 
3808 	rx_ring->desc = NULL;
3809 }
3810 
3811 /**
3812  *  igb_free_all_rx_resources - Free Rx Resources for All Queues
3813  *  @adapter: board private structure
3814  *
3815  *  Free all receive software resources
3816  **/
3817 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3818 {
3819 	int i;
3820 
3821 	for (i = 0; i < adapter->num_rx_queues; i++)
3822 		igb_free_rx_resources(adapter->rx_ring[i]);
3823 }
3824 
3825 /**
3826  *  igb_clean_rx_ring - Free Rx Buffers per Queue
3827  *  @rx_ring: ring to free buffers from
3828  **/
3829 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3830 {
3831 	unsigned long size;
3832 	u16 i;
3833 
3834 	if (rx_ring->skb)
3835 		dev_kfree_skb(rx_ring->skb);
3836 	rx_ring->skb = NULL;
3837 
3838 	if (!rx_ring->rx_buffer_info)
3839 		return;
3840 
3841 	/* Free all the Rx ring sk_buffs */
3842 	for (i = 0; i < rx_ring->count; i++) {
3843 		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
3844 
3845 		if (!buffer_info->page)
3846 			continue;
3847 
3848 		dma_unmap_page(rx_ring->dev,
3849 			       buffer_info->dma,
3850 			       PAGE_SIZE,
3851 			       DMA_FROM_DEVICE);
3852 		__free_page(buffer_info->page);
3853 
3854 		buffer_info->page = NULL;
3855 	}
3856 
3857 	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3858 	memset(rx_ring->rx_buffer_info, 0, size);
3859 
3860 	/* Zero out the descriptor ring */
3861 	memset(rx_ring->desc, 0, rx_ring->size);
3862 
3863 	rx_ring->next_to_alloc = 0;
3864 	rx_ring->next_to_clean = 0;
3865 	rx_ring->next_to_use = 0;
3866 }
3867 
3868 /**
3869  *  igb_clean_all_rx_rings - Free Rx Buffers for all queues
3870  *  @adapter: board private structure
3871  **/
3872 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3873 {
3874 	int i;
3875 
3876 	for (i = 0; i < adapter->num_rx_queues; i++)
3877 		igb_clean_rx_ring(adapter->rx_ring[i]);
3878 }
3879 
3880 /**
3881  *  igb_set_mac - Change the Ethernet Address of the NIC
3882  *  @netdev: network interface device structure
3883  *  @p: pointer to an address structure
3884  *
3885  *  Returns 0 on success, negative on failure
3886  **/
3887 static int igb_set_mac(struct net_device *netdev, void *p)
3888 {
3889 	struct igb_adapter *adapter = netdev_priv(netdev);
3890 	struct e1000_hw *hw = &adapter->hw;
3891 	struct sockaddr *addr = p;
3892 
3893 	if (!is_valid_ether_addr(addr->sa_data))
3894 		return -EADDRNOTAVAIL;
3895 
3896 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3897 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3898 
3899 	/* set the correct pool for the new PF MAC address in entry 0 */
3900 	igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3901 			 adapter->vfs_allocated_count);
3902 
3903 	return 0;
3904 }
3905 
3906 /**
3907  *  igb_write_mc_addr_list - write multicast addresses to MTA
3908  *  @netdev: network interface device structure
3909  *
3910  *  Writes multicast address list to the MTA hash table.
3911  *  Returns: -ENOMEM on failure
3912  *           0 on no addresses written
3913  *           X on writing X addresses to MTA
3914  **/
3915 static int igb_write_mc_addr_list(struct net_device *netdev)
3916 {
3917 	struct igb_adapter *adapter = netdev_priv(netdev);
3918 	struct e1000_hw *hw = &adapter->hw;
3919 	struct netdev_hw_addr *ha;
3920 	u8  *mta_list;
3921 	int i;
3922 
3923 	if (netdev_mc_empty(netdev)) {
3924 		/* nothing to program, so clear mc list */
3925 		igb_update_mc_addr_list(hw, NULL, 0);
3926 		igb_restore_vf_multicasts(adapter);
3927 		return 0;
3928 	}
3929 
3930 	mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3931 	if (!mta_list)
3932 		return -ENOMEM;
3933 
3934 	/* The shared function expects a packed array of only addresses. */
3935 	i = 0;
3936 	netdev_for_each_mc_addr(ha, netdev)
3937 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3938 
3939 	igb_update_mc_addr_list(hw, mta_list, i);
3940 	kfree(mta_list);
3941 
3942 	return netdev_mc_count(netdev);
3943 }
3944 
3945 /**
3946  *  igb_write_uc_addr_list - write unicast addresses to RAR table
3947  *  @netdev: network interface device structure
3948  *
3949  *  Writes unicast address list to the RAR table.
3950  *  Returns: -ENOMEM on failure/insufficient address space
3951  *           0 on no addresses written
3952  *           X on writing X addresses to the RAR table
3953  **/
3954 static int igb_write_uc_addr_list(struct net_device *netdev)
3955 {
3956 	struct igb_adapter *adapter = netdev_priv(netdev);
3957 	struct e1000_hw *hw = &adapter->hw;
3958 	unsigned int vfn = adapter->vfs_allocated_count;
3959 	unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3960 	int count = 0;
3961 
3962 	/* return ENOMEM indicating insufficient memory for addresses */
3963 	if (netdev_uc_count(netdev) > rar_entries)
3964 		return -ENOMEM;
3965 
3966 	if (!netdev_uc_empty(netdev) && rar_entries) {
3967 		struct netdev_hw_addr *ha;
3968 
3969 		netdev_for_each_uc_addr(ha, netdev) {
3970 			if (!rar_entries)
3971 				break;
3972 			igb_rar_set_qsel(adapter, ha->addr,
3973 					 rar_entries--,
3974 					 vfn);
3975 			count++;
3976 		}
3977 	}
3978 	/* write the addresses in reverse order to avoid write combining */
3979 	for (; rar_entries > 0 ; rar_entries--) {
3980 		wr32(E1000_RAH(rar_entries), 0);
3981 		wr32(E1000_RAL(rar_entries), 0);
3982 	}
3983 	wrfl();
3984 
3985 	return count;
3986 }
3987 
3988 /**
3989  *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3990  *  @netdev: network interface device structure
3991  *
3992  *  The set_rx_mode entry point is called whenever the unicast or multicast
3993  *  address lists or the network interface flags are updated.  This routine is
3994  *  responsible for configuring the hardware for proper unicast, multicast,
3995  *  promiscuous mode, and all-multi behavior.
3996  **/
3997 static void igb_set_rx_mode(struct net_device *netdev)
3998 {
3999 	struct igb_adapter *adapter = netdev_priv(netdev);
4000 	struct e1000_hw *hw = &adapter->hw;
4001 	unsigned int vfn = adapter->vfs_allocated_count;
4002 	u32 rctl, vmolr = 0;
4003 	int count;
4004 
4005 	/* Check for Promiscuous and All Multicast modes */
4006 	rctl = rd32(E1000_RCTL);
4007 
4008 	/* clear the effected bits */
4009 	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
4010 
4011 	if (netdev->flags & IFF_PROMISC) {
4012 		/* retain VLAN HW filtering if in VT mode */
4013 		if (adapter->vfs_allocated_count)
4014 			rctl |= E1000_RCTL_VFE;
4015 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
4016 		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
4017 	} else {
4018 		if (netdev->flags & IFF_ALLMULTI) {
4019 			rctl |= E1000_RCTL_MPE;
4020 			vmolr |= E1000_VMOLR_MPME;
4021 		} else {
4022 			/* Write addresses to the MTA, if the attempt fails
4023 			 * then we should just turn on promiscuous mode so
4024 			 * that we can at least receive multicast traffic
4025 			 */
4026 			count = igb_write_mc_addr_list(netdev);
4027 			if (count < 0) {
4028 				rctl |= E1000_RCTL_MPE;
4029 				vmolr |= E1000_VMOLR_MPME;
4030 			} else if (count) {
4031 				vmolr |= E1000_VMOLR_ROMPE;
4032 			}
4033 		}
4034 		/* Write addresses to available RAR registers, if there is not
4035 		 * sufficient space to store all the addresses then enable
4036 		 * unicast promiscuous mode
4037 		 */
4038 		count = igb_write_uc_addr_list(netdev);
4039 		if (count < 0) {
4040 			rctl |= E1000_RCTL_UPE;
4041 			vmolr |= E1000_VMOLR_ROPE;
4042 		}
4043 		rctl |= E1000_RCTL_VFE;
4044 	}
4045 	wr32(E1000_RCTL, rctl);
4046 
4047 	/* In order to support SR-IOV and eventually VMDq it is necessary to set
4048 	 * the VMOLR to enable the appropriate modes.  Without this workaround
4049 	 * we will have issues with VLAN tag stripping not being done for frames
4050 	 * that are only arriving because we are the default pool
4051 	 */
4052 	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
4053 		return;
4054 
4055 	vmolr |= rd32(E1000_VMOLR(vfn)) &
4056 		 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
4057 	wr32(E1000_VMOLR(vfn), vmolr);
4058 	igb_restore_vf_multicasts(adapter);
4059 }
4060 
4061 static void igb_check_wvbr(struct igb_adapter *adapter)
4062 {
4063 	struct e1000_hw *hw = &adapter->hw;
4064 	u32 wvbr = 0;
4065 
4066 	switch (hw->mac.type) {
4067 	case e1000_82576:
4068 	case e1000_i350:
4069 		wvbr = rd32(E1000_WVBR);
4070 		if (!wvbr)
4071 			return;
4072 		break;
4073 	default:
4074 		break;
4075 	}
4076 
4077 	adapter->wvbr |= wvbr;
4078 }
4079 
4080 #define IGB_STAGGERED_QUEUE_OFFSET 8
4081 
4082 static void igb_spoof_check(struct igb_adapter *adapter)
4083 {
4084 	int j;
4085 
4086 	if (!adapter->wvbr)
4087 		return;
4088 
4089 	for (j = 0; j < adapter->vfs_allocated_count; j++) {
4090 		if (adapter->wvbr & (1 << j) ||
4091 		    adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
4092 			dev_warn(&adapter->pdev->dev,
4093 				"Spoof event(s) detected on VF %d\n", j);
4094 			adapter->wvbr &=
4095 				~((1 << j) |
4096 				  (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
4097 		}
4098 	}
4099 }
4100 
4101 /* Need to wait a few seconds after link up to get diagnostic information from
4102  * the phy
4103  */
4104 static void igb_update_phy_info(unsigned long data)
4105 {
4106 	struct igb_adapter *adapter = (struct igb_adapter *) data;
4107 	igb_get_phy_info(&adapter->hw);
4108 }
4109 
4110 /**
4111  *  igb_has_link - check shared code for link and determine up/down
4112  *  @adapter: pointer to driver private info
4113  **/
4114 bool igb_has_link(struct igb_adapter *adapter)
4115 {
4116 	struct e1000_hw *hw = &adapter->hw;
4117 	bool link_active = false;
4118 
4119 	/* get_link_status is set on LSC (link status) interrupt or
4120 	 * rx sequence error interrupt.  get_link_status will stay
4121 	 * false until the e1000_check_for_link establishes link
4122 	 * for copper adapters ONLY
4123 	 */
4124 	switch (hw->phy.media_type) {
4125 	case e1000_media_type_copper:
4126 		if (!hw->mac.get_link_status)
4127 			return true;
4128 	case e1000_media_type_internal_serdes:
4129 		hw->mac.ops.check_for_link(hw);
4130 		link_active = !hw->mac.get_link_status;
4131 		break;
4132 	default:
4133 	case e1000_media_type_unknown:
4134 		break;
4135 	}
4136 
4137 	if (((hw->mac.type == e1000_i210) ||
4138 	     (hw->mac.type == e1000_i211)) &&
4139 	     (hw->phy.id == I210_I_PHY_ID)) {
4140 		if (!netif_carrier_ok(adapter->netdev)) {
4141 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4142 		} else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
4143 			adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
4144 			adapter->link_check_timeout = jiffies;
4145 		}
4146 	}
4147 
4148 	return link_active;
4149 }
4150 
4151 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
4152 {
4153 	bool ret = false;
4154 	u32 ctrl_ext, thstat;
4155 
4156 	/* check for thermal sensor event on i350 copper only */
4157 	if (hw->mac.type == e1000_i350) {
4158 		thstat = rd32(E1000_THSTAT);
4159 		ctrl_ext = rd32(E1000_CTRL_EXT);
4160 
4161 		if ((hw->phy.media_type == e1000_media_type_copper) &&
4162 		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
4163 			ret = !!(thstat & event);
4164 	}
4165 
4166 	return ret;
4167 }
4168 
4169 /**
4170  *  igb_check_lvmmc - check for malformed packets received
4171  *  and indicated in LVMMC register
4172  *  @adapter: pointer to adapter
4173  **/
4174 static void igb_check_lvmmc(struct igb_adapter *adapter)
4175 {
4176 	struct e1000_hw *hw = &adapter->hw;
4177 	u32 lvmmc;
4178 
4179 	lvmmc = rd32(E1000_LVMMC);
4180 	if (lvmmc) {
4181 		if (unlikely(net_ratelimit())) {
4182 			netdev_warn(adapter->netdev,
4183 				    "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
4184 				    lvmmc);
4185 		}
4186 	}
4187 }
4188 
4189 /**
4190  *  igb_watchdog - Timer Call-back
4191  *  @data: pointer to adapter cast into an unsigned long
4192  **/
4193 static void igb_watchdog(unsigned long data)
4194 {
4195 	struct igb_adapter *adapter = (struct igb_adapter *)data;
4196 	/* Do the rest outside of interrupt context */
4197 	schedule_work(&adapter->watchdog_task);
4198 }
4199 
4200 static void igb_watchdog_task(struct work_struct *work)
4201 {
4202 	struct igb_adapter *adapter = container_of(work,
4203 						   struct igb_adapter,
4204 						   watchdog_task);
4205 	struct e1000_hw *hw = &adapter->hw;
4206 	struct e1000_phy_info *phy = &hw->phy;
4207 	struct net_device *netdev = adapter->netdev;
4208 	u32 link;
4209 	int i;
4210 	u32 connsw;
4211 
4212 	link = igb_has_link(adapter);
4213 
4214 	if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
4215 		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4216 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4217 		else
4218 			link = false;
4219 	}
4220 
4221 	/* Force link down if we have fiber to swap to */
4222 	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
4223 		if (hw->phy.media_type == e1000_media_type_copper) {
4224 			connsw = rd32(E1000_CONNSW);
4225 			if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
4226 				link = 0;
4227 		}
4228 	}
4229 	if (link) {
4230 		/* Perform a reset if the media type changed. */
4231 		if (hw->dev_spec._82575.media_changed) {
4232 			hw->dev_spec._82575.media_changed = false;
4233 			adapter->flags |= IGB_FLAG_MEDIA_RESET;
4234 			igb_reset(adapter);
4235 		}
4236 		/* Cancel scheduled suspend requests. */
4237 		pm_runtime_resume(netdev->dev.parent);
4238 
4239 		if (!netif_carrier_ok(netdev)) {
4240 			u32 ctrl;
4241 
4242 			hw->mac.ops.get_speed_and_duplex(hw,
4243 							 &adapter->link_speed,
4244 							 &adapter->link_duplex);
4245 
4246 			ctrl = rd32(E1000_CTRL);
4247 			/* Links status message must follow this format */
4248 			netdev_info(netdev,
4249 			       "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4250 			       netdev->name,
4251 			       adapter->link_speed,
4252 			       adapter->link_duplex == FULL_DUPLEX ?
4253 			       "Full" : "Half",
4254 			       (ctrl & E1000_CTRL_TFCE) &&
4255 			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
4256 			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
4257 			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
4258 
4259 			/* disable EEE if enabled */
4260 			if ((adapter->flags & IGB_FLAG_EEE) &&
4261 				(adapter->link_duplex == HALF_DUPLEX)) {
4262 				dev_info(&adapter->pdev->dev,
4263 				"EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
4264 				adapter->hw.dev_spec._82575.eee_disable = true;
4265 				adapter->flags &= ~IGB_FLAG_EEE;
4266 			}
4267 
4268 			/* check if SmartSpeed worked */
4269 			igb_check_downshift(hw);
4270 			if (phy->speed_downgraded)
4271 				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4272 
4273 			/* check for thermal sensor event */
4274 			if (igb_thermal_sensor_event(hw,
4275 			    E1000_THSTAT_LINK_THROTTLE))
4276 				netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
4277 
4278 			/* adjust timeout factor according to speed/duplex */
4279 			adapter->tx_timeout_factor = 1;
4280 			switch (adapter->link_speed) {
4281 			case SPEED_10:
4282 				adapter->tx_timeout_factor = 14;
4283 				break;
4284 			case SPEED_100:
4285 				/* maybe add some timeout factor ? */
4286 				break;
4287 			}
4288 
4289 			netif_carrier_on(netdev);
4290 
4291 			igb_ping_all_vfs(adapter);
4292 			igb_check_vf_rate_limit(adapter);
4293 
4294 			/* link state has changed, schedule phy info update */
4295 			if (!test_bit(__IGB_DOWN, &adapter->state))
4296 				mod_timer(&adapter->phy_info_timer,
4297 					  round_jiffies(jiffies + 2 * HZ));
4298 		}
4299 	} else {
4300 		if (netif_carrier_ok(netdev)) {
4301 			adapter->link_speed = 0;
4302 			adapter->link_duplex = 0;
4303 
4304 			/* check for thermal sensor event */
4305 			if (igb_thermal_sensor_event(hw,
4306 			    E1000_THSTAT_PWR_DOWN)) {
4307 				netdev_err(netdev, "The network adapter was stopped because it overheated\n");
4308 			}
4309 
4310 			/* Links status message must follow this format */
4311 			netdev_info(netdev, "igb: %s NIC Link is Down\n",
4312 			       netdev->name);
4313 			netif_carrier_off(netdev);
4314 
4315 			igb_ping_all_vfs(adapter);
4316 
4317 			/* link state has changed, schedule phy info update */
4318 			if (!test_bit(__IGB_DOWN, &adapter->state))
4319 				mod_timer(&adapter->phy_info_timer,
4320 					  round_jiffies(jiffies + 2 * HZ));
4321 
4322 			/* link is down, time to check for alternate media */
4323 			if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
4324 				igb_check_swap_media(adapter);
4325 				if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
4326 					schedule_work(&adapter->reset_task);
4327 					/* return immediately */
4328 					return;
4329 				}
4330 			}
4331 			pm_schedule_suspend(netdev->dev.parent,
4332 					    MSEC_PER_SEC * 5);
4333 
4334 		/* also check for alternate media here */
4335 		} else if (!netif_carrier_ok(netdev) &&
4336 			   (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
4337 			igb_check_swap_media(adapter);
4338 			if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
4339 				schedule_work(&adapter->reset_task);
4340 				/* return immediately */
4341 				return;
4342 			}
4343 		}
4344 	}
4345 
4346 	spin_lock(&adapter->stats64_lock);
4347 	igb_update_stats(adapter, &adapter->stats64);
4348 	spin_unlock(&adapter->stats64_lock);
4349 
4350 	for (i = 0; i < adapter->num_tx_queues; i++) {
4351 		struct igb_ring *tx_ring = adapter->tx_ring[i];
4352 		if (!netif_carrier_ok(netdev)) {
4353 			/* We've lost link, so the controller stops DMA,
4354 			 * but we've got queued Tx work that's never going
4355 			 * to get done, so reset controller to flush Tx.
4356 			 * (Do the reset outside of interrupt context).
4357 			 */
4358 			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
4359 				adapter->tx_timeout_count++;
4360 				schedule_work(&adapter->reset_task);
4361 				/* return immediately since reset is imminent */
4362 				return;
4363 			}
4364 		}
4365 
4366 		/* Force detection of hung controller every watchdog period */
4367 		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4368 	}
4369 
4370 	/* Cause software interrupt to ensure Rx ring is cleaned */
4371 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
4372 		u32 eics = 0;
4373 
4374 		for (i = 0; i < adapter->num_q_vectors; i++)
4375 			eics |= adapter->q_vector[i]->eims_value;
4376 		wr32(E1000_EICS, eics);
4377 	} else {
4378 		wr32(E1000_ICS, E1000_ICS_RXDMT0);
4379 	}
4380 
4381 	igb_spoof_check(adapter);
4382 	igb_ptp_rx_hang(adapter);
4383 
4384 	/* Check LVMMC register on i350/i354 only */
4385 	if ((adapter->hw.mac.type == e1000_i350) ||
4386 	    (adapter->hw.mac.type == e1000_i354))
4387 		igb_check_lvmmc(adapter);
4388 
4389 	/* Reset the timer */
4390 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
4391 		if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
4392 			mod_timer(&adapter->watchdog_timer,
4393 				  round_jiffies(jiffies +  HZ));
4394 		else
4395 			mod_timer(&adapter->watchdog_timer,
4396 				  round_jiffies(jiffies + 2 * HZ));
4397 	}
4398 }
4399 
4400 enum latency_range {
4401 	lowest_latency = 0,
4402 	low_latency = 1,
4403 	bulk_latency = 2,
4404 	latency_invalid = 255
4405 };
4406 
4407 /**
4408  *  igb_update_ring_itr - update the dynamic ITR value based on packet size
4409  *  @q_vector: pointer to q_vector
4410  *
4411  *  Stores a new ITR value based on strictly on packet size.  This
4412  *  algorithm is less sophisticated than that used in igb_update_itr,
4413  *  due to the difficulty of synchronizing statistics across multiple
4414  *  receive rings.  The divisors and thresholds used by this function
4415  *  were determined based on theoretical maximum wire speed and testing
4416  *  data, in order to minimize response time while increasing bulk
4417  *  throughput.
4418  *  This functionality is controlled by ethtool's coalescing settings.
4419  *  NOTE:  This function is called only when operating in a multiqueue
4420  *         receive environment.
4421  **/
4422 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
4423 {
4424 	int new_val = q_vector->itr_val;
4425 	int avg_wire_size = 0;
4426 	struct igb_adapter *adapter = q_vector->adapter;
4427 	unsigned int packets;
4428 
4429 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
4430 	 * ints/sec - ITR timer value of 120 ticks.
4431 	 */
4432 	if (adapter->link_speed != SPEED_1000) {
4433 		new_val = IGB_4K_ITR;
4434 		goto set_itr_val;
4435 	}
4436 
4437 	packets = q_vector->rx.total_packets;
4438 	if (packets)
4439 		avg_wire_size = q_vector->rx.total_bytes / packets;
4440 
4441 	packets = q_vector->tx.total_packets;
4442 	if (packets)
4443 		avg_wire_size = max_t(u32, avg_wire_size,
4444 				      q_vector->tx.total_bytes / packets);
4445 
4446 	/* if avg_wire_size isn't set no work was done */
4447 	if (!avg_wire_size)
4448 		goto clear_counts;
4449 
4450 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
4451 	avg_wire_size += 24;
4452 
4453 	/* Don't starve jumbo frames */
4454 	avg_wire_size = min(avg_wire_size, 3000);
4455 
4456 	/* Give a little boost to mid-size frames */
4457 	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
4458 		new_val = avg_wire_size / 3;
4459 	else
4460 		new_val = avg_wire_size / 2;
4461 
4462 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
4463 	if (new_val < IGB_20K_ITR &&
4464 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4465 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4466 		new_val = IGB_20K_ITR;
4467 
4468 set_itr_val:
4469 	if (new_val != q_vector->itr_val) {
4470 		q_vector->itr_val = new_val;
4471 		q_vector->set_itr = 1;
4472 	}
4473 clear_counts:
4474 	q_vector->rx.total_bytes = 0;
4475 	q_vector->rx.total_packets = 0;
4476 	q_vector->tx.total_bytes = 0;
4477 	q_vector->tx.total_packets = 0;
4478 }
4479 
4480 /**
4481  *  igb_update_itr - update the dynamic ITR value based on statistics
4482  *  @q_vector: pointer to q_vector
4483  *  @ring_container: ring info to update the itr for
4484  *
4485  *  Stores a new ITR value based on packets and byte
4486  *  counts during the last interrupt.  The advantage of per interrupt
4487  *  computation is faster updates and more accurate ITR for the current
4488  *  traffic pattern.  Constants in this function were computed
4489  *  based on theoretical maximum wire speed and thresholds were set based
4490  *  on testing data as well as attempting to minimize response time
4491  *  while increasing bulk throughput.
4492  *  This functionality is controlled by ethtool's coalescing settings.
4493  *  NOTE:  These calculations are only valid when operating in a single-
4494  *         queue environment.
4495  **/
4496 static void igb_update_itr(struct igb_q_vector *q_vector,
4497 			   struct igb_ring_container *ring_container)
4498 {
4499 	unsigned int packets = ring_container->total_packets;
4500 	unsigned int bytes = ring_container->total_bytes;
4501 	u8 itrval = ring_container->itr;
4502 
4503 	/* no packets, exit with status unchanged */
4504 	if (packets == 0)
4505 		return;
4506 
4507 	switch (itrval) {
4508 	case lowest_latency:
4509 		/* handle TSO and jumbo frames */
4510 		if (bytes/packets > 8000)
4511 			itrval = bulk_latency;
4512 		else if ((packets < 5) && (bytes > 512))
4513 			itrval = low_latency;
4514 		break;
4515 	case low_latency:  /* 50 usec aka 20000 ints/s */
4516 		if (bytes > 10000) {
4517 			/* this if handles the TSO accounting */
4518 			if (bytes/packets > 8000)
4519 				itrval = bulk_latency;
4520 			else if ((packets < 10) || ((bytes/packets) > 1200))
4521 				itrval = bulk_latency;
4522 			else if ((packets > 35))
4523 				itrval = lowest_latency;
4524 		} else if (bytes/packets > 2000) {
4525 			itrval = bulk_latency;
4526 		} else if (packets <= 2 && bytes < 512) {
4527 			itrval = lowest_latency;
4528 		}
4529 		break;
4530 	case bulk_latency: /* 250 usec aka 4000 ints/s */
4531 		if (bytes > 25000) {
4532 			if (packets > 35)
4533 				itrval = low_latency;
4534 		} else if (bytes < 1500) {
4535 			itrval = low_latency;
4536 		}
4537 		break;
4538 	}
4539 
4540 	/* clear work counters since we have the values we need */
4541 	ring_container->total_bytes = 0;
4542 	ring_container->total_packets = 0;
4543 
4544 	/* write updated itr to ring container */
4545 	ring_container->itr = itrval;
4546 }
4547 
4548 static void igb_set_itr(struct igb_q_vector *q_vector)
4549 {
4550 	struct igb_adapter *adapter = q_vector->adapter;
4551 	u32 new_itr = q_vector->itr_val;
4552 	u8 current_itr = 0;
4553 
4554 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4555 	if (adapter->link_speed != SPEED_1000) {
4556 		current_itr = 0;
4557 		new_itr = IGB_4K_ITR;
4558 		goto set_itr_now;
4559 	}
4560 
4561 	igb_update_itr(q_vector, &q_vector->tx);
4562 	igb_update_itr(q_vector, &q_vector->rx);
4563 
4564 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4565 
4566 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
4567 	if (current_itr == lowest_latency &&
4568 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4569 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4570 		current_itr = low_latency;
4571 
4572 	switch (current_itr) {
4573 	/* counts and packets in update_itr are dependent on these numbers */
4574 	case lowest_latency:
4575 		new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
4576 		break;
4577 	case low_latency:
4578 		new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
4579 		break;
4580 	case bulk_latency:
4581 		new_itr = IGB_4K_ITR;  /* 4,000 ints/sec */
4582 		break;
4583 	default:
4584 		break;
4585 	}
4586 
4587 set_itr_now:
4588 	if (new_itr != q_vector->itr_val) {
4589 		/* this attempts to bias the interrupt rate towards Bulk
4590 		 * by adding intermediate steps when interrupt rate is
4591 		 * increasing
4592 		 */
4593 		new_itr = new_itr > q_vector->itr_val ?
4594 			  max((new_itr * q_vector->itr_val) /
4595 			  (new_itr + (q_vector->itr_val >> 2)),
4596 			  new_itr) : new_itr;
4597 		/* Don't write the value here; it resets the adapter's
4598 		 * internal timer, and causes us to delay far longer than
4599 		 * we should between interrupts.  Instead, we write the ITR
4600 		 * value at the beginning of the next interrupt so the timing
4601 		 * ends up being correct.
4602 		 */
4603 		q_vector->itr_val = new_itr;
4604 		q_vector->set_itr = 1;
4605 	}
4606 }
4607 
4608 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4609 			    u32 type_tucmd, u32 mss_l4len_idx)
4610 {
4611 	struct e1000_adv_tx_context_desc *context_desc;
4612 	u16 i = tx_ring->next_to_use;
4613 
4614 	context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4615 
4616 	i++;
4617 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4618 
4619 	/* set bits to identify this as an advanced context descriptor */
4620 	type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4621 
4622 	/* For 82575, context index must be unique per ring. */
4623 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4624 		mss_l4len_idx |= tx_ring->reg_idx << 4;
4625 
4626 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
4627 	context_desc->seqnum_seed	= 0;
4628 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
4629 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
4630 }
4631 
4632 static int igb_tso(struct igb_ring *tx_ring,
4633 		   struct igb_tx_buffer *first,
4634 		   u8 *hdr_len)
4635 {
4636 	struct sk_buff *skb = first->skb;
4637 	u32 vlan_macip_lens, type_tucmd;
4638 	u32 mss_l4len_idx, l4len;
4639 	int err;
4640 
4641 	if (skb->ip_summed != CHECKSUM_PARTIAL)
4642 		return 0;
4643 
4644 	if (!skb_is_gso(skb))
4645 		return 0;
4646 
4647 	err = skb_cow_head(skb, 0);
4648 	if (err < 0)
4649 		return err;
4650 
4651 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4652 	type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
4653 
4654 	if (first->protocol == htons(ETH_P_IP)) {
4655 		struct iphdr *iph = ip_hdr(skb);
4656 		iph->tot_len = 0;
4657 		iph->check = 0;
4658 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4659 							 iph->daddr, 0,
4660 							 IPPROTO_TCP,
4661 							 0);
4662 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4663 		first->tx_flags |= IGB_TX_FLAGS_TSO |
4664 				   IGB_TX_FLAGS_CSUM |
4665 				   IGB_TX_FLAGS_IPV4;
4666 	} else if (skb_is_gso_v6(skb)) {
4667 		ipv6_hdr(skb)->payload_len = 0;
4668 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4669 						       &ipv6_hdr(skb)->daddr,
4670 						       0, IPPROTO_TCP, 0);
4671 		first->tx_flags |= IGB_TX_FLAGS_TSO |
4672 				   IGB_TX_FLAGS_CSUM;
4673 	}
4674 
4675 	/* compute header lengths */
4676 	l4len = tcp_hdrlen(skb);
4677 	*hdr_len = skb_transport_offset(skb) + l4len;
4678 
4679 	/* update gso size and bytecount with header size */
4680 	first->gso_segs = skb_shinfo(skb)->gso_segs;
4681 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
4682 
4683 	/* MSS L4LEN IDX */
4684 	mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4685 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
4686 
4687 	/* VLAN MACLEN IPLEN */
4688 	vlan_macip_lens = skb_network_header_len(skb);
4689 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4690 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4691 
4692 	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4693 
4694 	return 1;
4695 }
4696 
4697 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4698 {
4699 	struct sk_buff *skb = first->skb;
4700 	u32 vlan_macip_lens = 0;
4701 	u32 mss_l4len_idx = 0;
4702 	u32 type_tucmd = 0;
4703 
4704 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
4705 		if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4706 			return;
4707 	} else {
4708 		u8 l4_hdr = 0;
4709 
4710 		switch (first->protocol) {
4711 		case htons(ETH_P_IP):
4712 			vlan_macip_lens |= skb_network_header_len(skb);
4713 			type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4714 			l4_hdr = ip_hdr(skb)->protocol;
4715 			break;
4716 		case htons(ETH_P_IPV6):
4717 			vlan_macip_lens |= skb_network_header_len(skb);
4718 			l4_hdr = ipv6_hdr(skb)->nexthdr;
4719 			break;
4720 		default:
4721 			if (unlikely(net_ratelimit())) {
4722 				dev_warn(tx_ring->dev,
4723 					 "partial checksum but proto=%x!\n",
4724 					 first->protocol);
4725 			}
4726 			break;
4727 		}
4728 
4729 		switch (l4_hdr) {
4730 		case IPPROTO_TCP:
4731 			type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4732 			mss_l4len_idx = tcp_hdrlen(skb) <<
4733 					E1000_ADVTXD_L4LEN_SHIFT;
4734 			break;
4735 		case IPPROTO_SCTP:
4736 			type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4737 			mss_l4len_idx = sizeof(struct sctphdr) <<
4738 					E1000_ADVTXD_L4LEN_SHIFT;
4739 			break;
4740 		case IPPROTO_UDP:
4741 			mss_l4len_idx = sizeof(struct udphdr) <<
4742 					E1000_ADVTXD_L4LEN_SHIFT;
4743 			break;
4744 		default:
4745 			if (unlikely(net_ratelimit())) {
4746 				dev_warn(tx_ring->dev,
4747 					 "partial checksum but l4 proto=%x!\n",
4748 					 l4_hdr);
4749 			}
4750 			break;
4751 		}
4752 
4753 		/* update TX checksum flag */
4754 		first->tx_flags |= IGB_TX_FLAGS_CSUM;
4755 	}
4756 
4757 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4758 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4759 
4760 	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4761 }
4762 
4763 #define IGB_SET_FLAG(_input, _flag, _result) \
4764 	((_flag <= _result) ? \
4765 	 ((u32)(_input & _flag) * (_result / _flag)) : \
4766 	 ((u32)(_input & _flag) / (_flag / _result)))
4767 
4768 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
4769 {
4770 	/* set type for advanced descriptor with frame checksum insertion */
4771 	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
4772 		       E1000_ADVTXD_DCMD_DEXT |
4773 		       E1000_ADVTXD_DCMD_IFCS;
4774 
4775 	/* set HW vlan bit if vlan is present */
4776 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
4777 				 (E1000_ADVTXD_DCMD_VLE));
4778 
4779 	/* set segmentation bits for TSO */
4780 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
4781 				 (E1000_ADVTXD_DCMD_TSE));
4782 
4783 	/* set timestamp bit if present */
4784 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
4785 				 (E1000_ADVTXD_MAC_TSTAMP));
4786 
4787 	/* insert frame checksum */
4788 	cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
4789 
4790 	return cmd_type;
4791 }
4792 
4793 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4794 				 union e1000_adv_tx_desc *tx_desc,
4795 				 u32 tx_flags, unsigned int paylen)
4796 {
4797 	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4798 
4799 	/* 82575 requires a unique index per ring */
4800 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4801 		olinfo_status |= tx_ring->reg_idx << 4;
4802 
4803 	/* insert L4 checksum */
4804 	olinfo_status |= IGB_SET_FLAG(tx_flags,
4805 				      IGB_TX_FLAGS_CSUM,
4806 				      (E1000_TXD_POPTS_TXSM << 8));
4807 
4808 	/* insert IPv4 checksum */
4809 	olinfo_status |= IGB_SET_FLAG(tx_flags,
4810 				      IGB_TX_FLAGS_IPV4,
4811 				      (E1000_TXD_POPTS_IXSM << 8));
4812 
4813 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4814 }
4815 
4816 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4817 {
4818 	struct net_device *netdev = tx_ring->netdev;
4819 
4820 	netif_stop_subqueue(netdev, tx_ring->queue_index);
4821 
4822 	/* Herbert's original patch had:
4823 	 *  smp_mb__after_netif_stop_queue();
4824 	 * but since that doesn't exist yet, just open code it.
4825 	 */
4826 	smp_mb();
4827 
4828 	/* We need to check again in a case another CPU has just
4829 	 * made room available.
4830 	 */
4831 	if (igb_desc_unused(tx_ring) < size)
4832 		return -EBUSY;
4833 
4834 	/* A reprieve! */
4835 	netif_wake_subqueue(netdev, tx_ring->queue_index);
4836 
4837 	u64_stats_update_begin(&tx_ring->tx_syncp2);
4838 	tx_ring->tx_stats.restart_queue2++;
4839 	u64_stats_update_end(&tx_ring->tx_syncp2);
4840 
4841 	return 0;
4842 }
4843 
4844 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4845 {
4846 	if (igb_desc_unused(tx_ring) >= size)
4847 		return 0;
4848 	return __igb_maybe_stop_tx(tx_ring, size);
4849 }
4850 
4851 static void igb_tx_map(struct igb_ring *tx_ring,
4852 		       struct igb_tx_buffer *first,
4853 		       const u8 hdr_len)
4854 {
4855 	struct sk_buff *skb = first->skb;
4856 	struct igb_tx_buffer *tx_buffer;
4857 	union e1000_adv_tx_desc *tx_desc;
4858 	struct skb_frag_struct *frag;
4859 	dma_addr_t dma;
4860 	unsigned int data_len, size;
4861 	u32 tx_flags = first->tx_flags;
4862 	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
4863 	u16 i = tx_ring->next_to_use;
4864 
4865 	tx_desc = IGB_TX_DESC(tx_ring, i);
4866 
4867 	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
4868 
4869 	size = skb_headlen(skb);
4870 	data_len = skb->data_len;
4871 
4872 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4873 
4874 	tx_buffer = first;
4875 
4876 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
4877 		if (dma_mapping_error(tx_ring->dev, dma))
4878 			goto dma_error;
4879 
4880 		/* record length, and DMA address */
4881 		dma_unmap_len_set(tx_buffer, len, size);
4882 		dma_unmap_addr_set(tx_buffer, dma, dma);
4883 
4884 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
4885 
4886 		while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4887 			tx_desc->read.cmd_type_len =
4888 				cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
4889 
4890 			i++;
4891 			tx_desc++;
4892 			if (i == tx_ring->count) {
4893 				tx_desc = IGB_TX_DESC(tx_ring, 0);
4894 				i = 0;
4895 			}
4896 			tx_desc->read.olinfo_status = 0;
4897 
4898 			dma += IGB_MAX_DATA_PER_TXD;
4899 			size -= IGB_MAX_DATA_PER_TXD;
4900 
4901 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
4902 		}
4903 
4904 		if (likely(!data_len))
4905 			break;
4906 
4907 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
4908 
4909 		i++;
4910 		tx_desc++;
4911 		if (i == tx_ring->count) {
4912 			tx_desc = IGB_TX_DESC(tx_ring, 0);
4913 			i = 0;
4914 		}
4915 		tx_desc->read.olinfo_status = 0;
4916 
4917 		size = skb_frag_size(frag);
4918 		data_len -= size;
4919 
4920 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4921 				       size, DMA_TO_DEVICE);
4922 
4923 		tx_buffer = &tx_ring->tx_buffer_info[i];
4924 	}
4925 
4926 	/* write last descriptor with RS and EOP bits */
4927 	cmd_type |= size | IGB_TXD_DCMD;
4928 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
4929 
4930 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4931 
4932 	/* set the timestamp */
4933 	first->time_stamp = jiffies;
4934 
4935 	/* Force memory writes to complete before letting h/w know there
4936 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
4937 	 * memory model archs, such as IA-64).
4938 	 *
4939 	 * We also need this memory barrier to make certain all of the
4940 	 * status bits have been updated before next_to_watch is written.
4941 	 */
4942 	wmb();
4943 
4944 	/* set next_to_watch value indicating a packet is present */
4945 	first->next_to_watch = tx_desc;
4946 
4947 	i++;
4948 	if (i == tx_ring->count)
4949 		i = 0;
4950 
4951 	tx_ring->next_to_use = i;
4952 
4953 	/* Make sure there is space in the ring for the next send. */
4954 	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
4955 
4956 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
4957 		writel(i, tx_ring->tail);
4958 
4959 		/* we need this if more than one processor can write to our tail
4960 		 * at a time, it synchronizes IO on IA64/Altix systems
4961 		 */
4962 		mmiowb();
4963 	}
4964 	return;
4965 
4966 dma_error:
4967 	dev_err(tx_ring->dev, "TX DMA map failed\n");
4968 
4969 	/* clear dma mappings for failed tx_buffer_info map */
4970 	for (;;) {
4971 		tx_buffer = &tx_ring->tx_buffer_info[i];
4972 		igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4973 		if (tx_buffer == first)
4974 			break;
4975 		if (i == 0)
4976 			i = tx_ring->count;
4977 		i--;
4978 	}
4979 
4980 	tx_ring->next_to_use = i;
4981 }
4982 
4983 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4984 				struct igb_ring *tx_ring)
4985 {
4986 	struct igb_tx_buffer *first;
4987 	int tso;
4988 	u32 tx_flags = 0;
4989 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
4990 	__be16 protocol = vlan_get_protocol(skb);
4991 	u8 hdr_len = 0;
4992 
4993 	/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
4994 	 *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
4995 	 *       + 2 desc gap to keep tail from touching head,
4996 	 *       + 1 desc for context descriptor,
4997 	 * otherwise try next time
4998 	 */
4999 	if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
5000 		unsigned short f;
5001 
5002 		for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5003 			count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5004 	} else {
5005 		count += skb_shinfo(skb)->nr_frags;
5006 	}
5007 
5008 	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
5009 		/* this is a hard error */
5010 		return NETDEV_TX_BUSY;
5011 	}
5012 
5013 	/* record the location of the first descriptor for this packet */
5014 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
5015 	first->skb = skb;
5016 	first->bytecount = skb->len;
5017 	first->gso_segs = 1;
5018 
5019 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
5020 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
5021 
5022 		if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
5023 					   &adapter->state)) {
5024 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5025 			tx_flags |= IGB_TX_FLAGS_TSTAMP;
5026 
5027 			adapter->ptp_tx_skb = skb_get(skb);
5028 			adapter->ptp_tx_start = jiffies;
5029 			if (adapter->hw.mac.type == e1000_82576)
5030 				schedule_work(&adapter->ptp_tx_work);
5031 		}
5032 	}
5033 
5034 	skb_tx_timestamp(skb);
5035 
5036 	if (vlan_tx_tag_present(skb)) {
5037 		tx_flags |= IGB_TX_FLAGS_VLAN;
5038 		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
5039 	}
5040 
5041 	/* record initial flags and protocol */
5042 	first->tx_flags = tx_flags;
5043 	first->protocol = protocol;
5044 
5045 	tso = igb_tso(tx_ring, first, &hdr_len);
5046 	if (tso < 0)
5047 		goto out_drop;
5048 	else if (!tso)
5049 		igb_tx_csum(tx_ring, first);
5050 
5051 	igb_tx_map(tx_ring, first, hdr_len);
5052 
5053 	return NETDEV_TX_OK;
5054 
5055 out_drop:
5056 	igb_unmap_and_free_tx_resource(tx_ring, first);
5057 
5058 	return NETDEV_TX_OK;
5059 }
5060 
5061 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
5062 						    struct sk_buff *skb)
5063 {
5064 	unsigned int r_idx = skb->queue_mapping;
5065 
5066 	if (r_idx >= adapter->num_tx_queues)
5067 		r_idx = r_idx % adapter->num_tx_queues;
5068 
5069 	return adapter->tx_ring[r_idx];
5070 }
5071 
5072 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
5073 				  struct net_device *netdev)
5074 {
5075 	struct igb_adapter *adapter = netdev_priv(netdev);
5076 
5077 	if (test_bit(__IGB_DOWN, &adapter->state)) {
5078 		dev_kfree_skb_any(skb);
5079 		return NETDEV_TX_OK;
5080 	}
5081 
5082 	if (skb->len <= 0) {
5083 		dev_kfree_skb_any(skb);
5084 		return NETDEV_TX_OK;
5085 	}
5086 
5087 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
5088 	 * in order to meet this minimum size requirement.
5089 	 */
5090 	if (unlikely(skb->len < 17)) {
5091 		if (skb_pad(skb, 17 - skb->len))
5092 			return NETDEV_TX_OK;
5093 		skb->len = 17;
5094 		skb_set_tail_pointer(skb, 17);
5095 	}
5096 
5097 	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
5098 }
5099 
5100 /**
5101  *  igb_tx_timeout - Respond to a Tx Hang
5102  *  @netdev: network interface device structure
5103  **/
5104 static void igb_tx_timeout(struct net_device *netdev)
5105 {
5106 	struct igb_adapter *adapter = netdev_priv(netdev);
5107 	struct e1000_hw *hw = &adapter->hw;
5108 
5109 	/* Do the reset outside of interrupt context */
5110 	adapter->tx_timeout_count++;
5111 
5112 	if (hw->mac.type >= e1000_82580)
5113 		hw->dev_spec._82575.global_device_reset = true;
5114 
5115 	schedule_work(&adapter->reset_task);
5116 	wr32(E1000_EICS,
5117 	     (adapter->eims_enable_mask & ~adapter->eims_other));
5118 }
5119 
5120 static void igb_reset_task(struct work_struct *work)
5121 {
5122 	struct igb_adapter *adapter;
5123 	adapter = container_of(work, struct igb_adapter, reset_task);
5124 
5125 	igb_dump(adapter);
5126 	netdev_err(adapter->netdev, "Reset adapter\n");
5127 	igb_reinit_locked(adapter);
5128 }
5129 
5130 /**
5131  *  igb_get_stats64 - Get System Network Statistics
5132  *  @netdev: network interface device structure
5133  *  @stats: rtnl_link_stats64 pointer
5134  **/
5135 static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
5136 						struct rtnl_link_stats64 *stats)
5137 {
5138 	struct igb_adapter *adapter = netdev_priv(netdev);
5139 
5140 	spin_lock(&adapter->stats64_lock);
5141 	igb_update_stats(adapter, &adapter->stats64);
5142 	memcpy(stats, &adapter->stats64, sizeof(*stats));
5143 	spin_unlock(&adapter->stats64_lock);
5144 
5145 	return stats;
5146 }
5147 
5148 /**
5149  *  igb_change_mtu - Change the Maximum Transfer Unit
5150  *  @netdev: network interface device structure
5151  *  @new_mtu: new value for maximum frame size
5152  *
5153  *  Returns 0 on success, negative on failure
5154  **/
5155 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5156 {
5157 	struct igb_adapter *adapter = netdev_priv(netdev);
5158 	struct pci_dev *pdev = adapter->pdev;
5159 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5160 
5161 	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
5162 		dev_err(&pdev->dev, "Invalid MTU setting\n");
5163 		return -EINVAL;
5164 	}
5165 
5166 #define MAX_STD_JUMBO_FRAME_SIZE 9238
5167 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
5168 		dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
5169 		return -EINVAL;
5170 	}
5171 
5172 	/* adjust max frame to be at least the size of a standard frame */
5173 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5174 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5175 
5176 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
5177 		usleep_range(1000, 2000);
5178 
5179 	/* igb_down has a dependency on max_frame_size */
5180 	adapter->max_frame_size = max_frame;
5181 
5182 	if (netif_running(netdev))
5183 		igb_down(adapter);
5184 
5185 	dev_info(&pdev->dev, "changing MTU from %d to %d\n",
5186 		 netdev->mtu, new_mtu);
5187 	netdev->mtu = new_mtu;
5188 
5189 	if (netif_running(netdev))
5190 		igb_up(adapter);
5191 	else
5192 		igb_reset(adapter);
5193 
5194 	clear_bit(__IGB_RESETTING, &adapter->state);
5195 
5196 	return 0;
5197 }
5198 
5199 /**
5200  *  igb_update_stats - Update the board statistics counters
5201  *  @adapter: board private structure
5202  **/
5203 void igb_update_stats(struct igb_adapter *adapter,
5204 		      struct rtnl_link_stats64 *net_stats)
5205 {
5206 	struct e1000_hw *hw = &adapter->hw;
5207 	struct pci_dev *pdev = adapter->pdev;
5208 	u32 reg, mpc;
5209 	int i;
5210 	u64 bytes, packets;
5211 	unsigned int start;
5212 	u64 _bytes, _packets;
5213 
5214 	/* Prevent stats update while adapter is being reset, or if the pci
5215 	 * connection is down.
5216 	 */
5217 	if (adapter->link_speed == 0)
5218 		return;
5219 	if (pci_channel_offline(pdev))
5220 		return;
5221 
5222 	bytes = 0;
5223 	packets = 0;
5224 
5225 	rcu_read_lock();
5226 	for (i = 0; i < adapter->num_rx_queues; i++) {
5227 		struct igb_ring *ring = adapter->rx_ring[i];
5228 		u32 rqdpc = rd32(E1000_RQDPC(i));
5229 		if (hw->mac.type >= e1000_i210)
5230 			wr32(E1000_RQDPC(i), 0);
5231 
5232 		if (rqdpc) {
5233 			ring->rx_stats.drops += rqdpc;
5234 			net_stats->rx_fifo_errors += rqdpc;
5235 		}
5236 
5237 		do {
5238 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
5239 			_bytes = ring->rx_stats.bytes;
5240 			_packets = ring->rx_stats.packets;
5241 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
5242 		bytes += _bytes;
5243 		packets += _packets;
5244 	}
5245 
5246 	net_stats->rx_bytes = bytes;
5247 	net_stats->rx_packets = packets;
5248 
5249 	bytes = 0;
5250 	packets = 0;
5251 	for (i = 0; i < adapter->num_tx_queues; i++) {
5252 		struct igb_ring *ring = adapter->tx_ring[i];
5253 		do {
5254 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
5255 			_bytes = ring->tx_stats.bytes;
5256 			_packets = ring->tx_stats.packets;
5257 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
5258 		bytes += _bytes;
5259 		packets += _packets;
5260 	}
5261 	net_stats->tx_bytes = bytes;
5262 	net_stats->tx_packets = packets;
5263 	rcu_read_unlock();
5264 
5265 	/* read stats registers */
5266 	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
5267 	adapter->stats.gprc += rd32(E1000_GPRC);
5268 	adapter->stats.gorc += rd32(E1000_GORCL);
5269 	rd32(E1000_GORCH); /* clear GORCL */
5270 	adapter->stats.bprc += rd32(E1000_BPRC);
5271 	adapter->stats.mprc += rd32(E1000_MPRC);
5272 	adapter->stats.roc += rd32(E1000_ROC);
5273 
5274 	adapter->stats.prc64 += rd32(E1000_PRC64);
5275 	adapter->stats.prc127 += rd32(E1000_PRC127);
5276 	adapter->stats.prc255 += rd32(E1000_PRC255);
5277 	adapter->stats.prc511 += rd32(E1000_PRC511);
5278 	adapter->stats.prc1023 += rd32(E1000_PRC1023);
5279 	adapter->stats.prc1522 += rd32(E1000_PRC1522);
5280 	adapter->stats.symerrs += rd32(E1000_SYMERRS);
5281 	adapter->stats.sec += rd32(E1000_SEC);
5282 
5283 	mpc = rd32(E1000_MPC);
5284 	adapter->stats.mpc += mpc;
5285 	net_stats->rx_fifo_errors += mpc;
5286 	adapter->stats.scc += rd32(E1000_SCC);
5287 	adapter->stats.ecol += rd32(E1000_ECOL);
5288 	adapter->stats.mcc += rd32(E1000_MCC);
5289 	adapter->stats.latecol += rd32(E1000_LATECOL);
5290 	adapter->stats.dc += rd32(E1000_DC);
5291 	adapter->stats.rlec += rd32(E1000_RLEC);
5292 	adapter->stats.xonrxc += rd32(E1000_XONRXC);
5293 	adapter->stats.xontxc += rd32(E1000_XONTXC);
5294 	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
5295 	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
5296 	adapter->stats.fcruc += rd32(E1000_FCRUC);
5297 	adapter->stats.gptc += rd32(E1000_GPTC);
5298 	adapter->stats.gotc += rd32(E1000_GOTCL);
5299 	rd32(E1000_GOTCH); /* clear GOTCL */
5300 	adapter->stats.rnbc += rd32(E1000_RNBC);
5301 	adapter->stats.ruc += rd32(E1000_RUC);
5302 	adapter->stats.rfc += rd32(E1000_RFC);
5303 	adapter->stats.rjc += rd32(E1000_RJC);
5304 	adapter->stats.tor += rd32(E1000_TORH);
5305 	adapter->stats.tot += rd32(E1000_TOTH);
5306 	adapter->stats.tpr += rd32(E1000_TPR);
5307 
5308 	adapter->stats.ptc64 += rd32(E1000_PTC64);
5309 	adapter->stats.ptc127 += rd32(E1000_PTC127);
5310 	adapter->stats.ptc255 += rd32(E1000_PTC255);
5311 	adapter->stats.ptc511 += rd32(E1000_PTC511);
5312 	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
5313 	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
5314 
5315 	adapter->stats.mptc += rd32(E1000_MPTC);
5316 	adapter->stats.bptc += rd32(E1000_BPTC);
5317 
5318 	adapter->stats.tpt += rd32(E1000_TPT);
5319 	adapter->stats.colc += rd32(E1000_COLC);
5320 
5321 	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
5322 	/* read internal phy specific stats */
5323 	reg = rd32(E1000_CTRL_EXT);
5324 	if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
5325 		adapter->stats.rxerrc += rd32(E1000_RXERRC);
5326 
5327 		/* this stat has invalid values on i210/i211 */
5328 		if ((hw->mac.type != e1000_i210) &&
5329 		    (hw->mac.type != e1000_i211))
5330 			adapter->stats.tncrs += rd32(E1000_TNCRS);
5331 	}
5332 
5333 	adapter->stats.tsctc += rd32(E1000_TSCTC);
5334 	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
5335 
5336 	adapter->stats.iac += rd32(E1000_IAC);
5337 	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
5338 	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
5339 	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
5340 	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
5341 	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
5342 	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
5343 	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
5344 	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
5345 
5346 	/* Fill out the OS statistics structure */
5347 	net_stats->multicast = adapter->stats.mprc;
5348 	net_stats->collisions = adapter->stats.colc;
5349 
5350 	/* Rx Errors */
5351 
5352 	/* RLEC on some newer hardware can be incorrect so build
5353 	 * our own version based on RUC and ROC
5354 	 */
5355 	net_stats->rx_errors = adapter->stats.rxerrc +
5356 		adapter->stats.crcerrs + adapter->stats.algnerrc +
5357 		adapter->stats.ruc + adapter->stats.roc +
5358 		adapter->stats.cexterr;
5359 	net_stats->rx_length_errors = adapter->stats.ruc +
5360 				      adapter->stats.roc;
5361 	net_stats->rx_crc_errors = adapter->stats.crcerrs;
5362 	net_stats->rx_frame_errors = adapter->stats.algnerrc;
5363 	net_stats->rx_missed_errors = adapter->stats.mpc;
5364 
5365 	/* Tx Errors */
5366 	net_stats->tx_errors = adapter->stats.ecol +
5367 			       adapter->stats.latecol;
5368 	net_stats->tx_aborted_errors = adapter->stats.ecol;
5369 	net_stats->tx_window_errors = adapter->stats.latecol;
5370 	net_stats->tx_carrier_errors = adapter->stats.tncrs;
5371 
5372 	/* Tx Dropped needs to be maintained elsewhere */
5373 
5374 	/* Management Stats */
5375 	adapter->stats.mgptc += rd32(E1000_MGTPTC);
5376 	adapter->stats.mgprc += rd32(E1000_MGTPRC);
5377 	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
5378 
5379 	/* OS2BMC Stats */
5380 	reg = rd32(E1000_MANC);
5381 	if (reg & E1000_MANC_EN_BMC2OS) {
5382 		adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
5383 		adapter->stats.o2bspc += rd32(E1000_O2BSPC);
5384 		adapter->stats.b2ospc += rd32(E1000_B2OSPC);
5385 		adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
5386 	}
5387 }
5388 
5389 static irqreturn_t igb_msix_other(int irq, void *data)
5390 {
5391 	struct igb_adapter *adapter = data;
5392 	struct e1000_hw *hw = &adapter->hw;
5393 	u32 icr = rd32(E1000_ICR);
5394 	/* reading ICR causes bit 31 of EICR to be cleared */
5395 
5396 	if (icr & E1000_ICR_DRSTA)
5397 		schedule_work(&adapter->reset_task);
5398 
5399 	if (icr & E1000_ICR_DOUTSYNC) {
5400 		/* HW is reporting DMA is out of sync */
5401 		adapter->stats.doosync++;
5402 		/* The DMA Out of Sync is also indication of a spoof event
5403 		 * in IOV mode. Check the Wrong VM Behavior register to
5404 		 * see if it is really a spoof event.
5405 		 */
5406 		igb_check_wvbr(adapter);
5407 	}
5408 
5409 	/* Check for a mailbox event */
5410 	if (icr & E1000_ICR_VMMB)
5411 		igb_msg_task(adapter);
5412 
5413 	if (icr & E1000_ICR_LSC) {
5414 		hw->mac.get_link_status = 1;
5415 		/* guard against interrupt when we're going down */
5416 		if (!test_bit(__IGB_DOWN, &adapter->state))
5417 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
5418 	}
5419 
5420 	if (icr & E1000_ICR_TS) {
5421 		u32 tsicr = rd32(E1000_TSICR);
5422 
5423 		if (tsicr & E1000_TSICR_TXTS) {
5424 			/* acknowledge the interrupt */
5425 			wr32(E1000_TSICR, E1000_TSICR_TXTS);
5426 			/* retrieve hardware timestamp */
5427 			schedule_work(&adapter->ptp_tx_work);
5428 		}
5429 	}
5430 
5431 	wr32(E1000_EIMS, adapter->eims_other);
5432 
5433 	return IRQ_HANDLED;
5434 }
5435 
5436 static void igb_write_itr(struct igb_q_vector *q_vector)
5437 {
5438 	struct igb_adapter *adapter = q_vector->adapter;
5439 	u32 itr_val = q_vector->itr_val & 0x7FFC;
5440 
5441 	if (!q_vector->set_itr)
5442 		return;
5443 
5444 	if (!itr_val)
5445 		itr_val = 0x4;
5446 
5447 	if (adapter->hw.mac.type == e1000_82575)
5448 		itr_val |= itr_val << 16;
5449 	else
5450 		itr_val |= E1000_EITR_CNT_IGNR;
5451 
5452 	writel(itr_val, q_vector->itr_register);
5453 	q_vector->set_itr = 0;
5454 }
5455 
5456 static irqreturn_t igb_msix_ring(int irq, void *data)
5457 {
5458 	struct igb_q_vector *q_vector = data;
5459 
5460 	/* Write the ITR value calculated from the previous interrupt. */
5461 	igb_write_itr(q_vector);
5462 
5463 	napi_schedule(&q_vector->napi);
5464 
5465 	return IRQ_HANDLED;
5466 }
5467 
5468 #ifdef CONFIG_IGB_DCA
5469 static void igb_update_tx_dca(struct igb_adapter *adapter,
5470 			      struct igb_ring *tx_ring,
5471 			      int cpu)
5472 {
5473 	struct e1000_hw *hw = &adapter->hw;
5474 	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
5475 
5476 	if (hw->mac.type != e1000_82575)
5477 		txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5478 
5479 	/* We can enable relaxed ordering for reads, but not writes when
5480 	 * DCA is enabled.  This is due to a known issue in some chipsets
5481 	 * which will cause the DCA tag to be cleared.
5482 	 */
5483 	txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
5484 		  E1000_DCA_TXCTRL_DATA_RRO_EN |
5485 		  E1000_DCA_TXCTRL_DESC_DCA_EN;
5486 
5487 	wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
5488 }
5489 
5490 static void igb_update_rx_dca(struct igb_adapter *adapter,
5491 			      struct igb_ring *rx_ring,
5492 			      int cpu)
5493 {
5494 	struct e1000_hw *hw = &adapter->hw;
5495 	u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
5496 
5497 	if (hw->mac.type != e1000_82575)
5498 		rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5499 
5500 	/* We can enable relaxed ordering for reads, but not writes when
5501 	 * DCA is enabled.  This is due to a known issue in some chipsets
5502 	 * which will cause the DCA tag to be cleared.
5503 	 */
5504 	rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
5505 		  E1000_DCA_RXCTRL_DESC_DCA_EN;
5506 
5507 	wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
5508 }
5509 
5510 static void igb_update_dca(struct igb_q_vector *q_vector)
5511 {
5512 	struct igb_adapter *adapter = q_vector->adapter;
5513 	int cpu = get_cpu();
5514 
5515 	if (q_vector->cpu == cpu)
5516 		goto out_no_update;
5517 
5518 	if (q_vector->tx.ring)
5519 		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
5520 
5521 	if (q_vector->rx.ring)
5522 		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
5523 
5524 	q_vector->cpu = cpu;
5525 out_no_update:
5526 	put_cpu();
5527 }
5528 
5529 static void igb_setup_dca(struct igb_adapter *adapter)
5530 {
5531 	struct e1000_hw *hw = &adapter->hw;
5532 	int i;
5533 
5534 	if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
5535 		return;
5536 
5537 	/* Always use CB2 mode, difference is masked in the CB driver. */
5538 	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
5539 
5540 	for (i = 0; i < adapter->num_q_vectors; i++) {
5541 		adapter->q_vector[i]->cpu = -1;
5542 		igb_update_dca(adapter->q_vector[i]);
5543 	}
5544 }
5545 
5546 static int __igb_notify_dca(struct device *dev, void *data)
5547 {
5548 	struct net_device *netdev = dev_get_drvdata(dev);
5549 	struct igb_adapter *adapter = netdev_priv(netdev);
5550 	struct pci_dev *pdev = adapter->pdev;
5551 	struct e1000_hw *hw = &adapter->hw;
5552 	unsigned long event = *(unsigned long *)data;
5553 
5554 	switch (event) {
5555 	case DCA_PROVIDER_ADD:
5556 		/* if already enabled, don't do it again */
5557 		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
5558 			break;
5559 		if (dca_add_requester(dev) == 0) {
5560 			adapter->flags |= IGB_FLAG_DCA_ENABLED;
5561 			dev_info(&pdev->dev, "DCA enabled\n");
5562 			igb_setup_dca(adapter);
5563 			break;
5564 		}
5565 		/* Fall Through since DCA is disabled. */
5566 	case DCA_PROVIDER_REMOVE:
5567 		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
5568 			/* without this a class_device is left
5569 			 * hanging around in the sysfs model
5570 			 */
5571 			dca_remove_requester(dev);
5572 			dev_info(&pdev->dev, "DCA disabled\n");
5573 			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
5574 			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
5575 		}
5576 		break;
5577 	}
5578 
5579 	return 0;
5580 }
5581 
5582 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5583 			  void *p)
5584 {
5585 	int ret_val;
5586 
5587 	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5588 					 __igb_notify_dca);
5589 
5590 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5591 }
5592 #endif /* CONFIG_IGB_DCA */
5593 
5594 #ifdef CONFIG_PCI_IOV
5595 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5596 {
5597 	unsigned char mac_addr[ETH_ALEN];
5598 
5599 	eth_zero_addr(mac_addr);
5600 	igb_set_vf_mac(adapter, vf, mac_addr);
5601 
5602 	/* By default spoof check is enabled for all VFs */
5603 	adapter->vf_data[vf].spoofchk_enabled = true;
5604 
5605 	return 0;
5606 }
5607 
5608 #endif
5609 static void igb_ping_all_vfs(struct igb_adapter *adapter)
5610 {
5611 	struct e1000_hw *hw = &adapter->hw;
5612 	u32 ping;
5613 	int i;
5614 
5615 	for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5616 		ping = E1000_PF_CONTROL_MSG;
5617 		if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
5618 			ping |= E1000_VT_MSGTYPE_CTS;
5619 		igb_write_mbx(hw, &ping, 1, i);
5620 	}
5621 }
5622 
5623 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5624 {
5625 	struct e1000_hw *hw = &adapter->hw;
5626 	u32 vmolr = rd32(E1000_VMOLR(vf));
5627 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5628 
5629 	vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
5630 			    IGB_VF_FLAG_MULTI_PROMISC);
5631 	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5632 
5633 	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5634 		vmolr |= E1000_VMOLR_MPME;
5635 		vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
5636 		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5637 	} else {
5638 		/* if we have hashes and we are clearing a multicast promisc
5639 		 * flag we need to write the hashes to the MTA as this step
5640 		 * was previously skipped
5641 		 */
5642 		if (vf_data->num_vf_mc_hashes > 30) {
5643 			vmolr |= E1000_VMOLR_MPME;
5644 		} else if (vf_data->num_vf_mc_hashes) {
5645 			int j;
5646 
5647 			vmolr |= E1000_VMOLR_ROMPE;
5648 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5649 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5650 		}
5651 	}
5652 
5653 	wr32(E1000_VMOLR(vf), vmolr);
5654 
5655 	/* there are flags left unprocessed, likely not supported */
5656 	if (*msgbuf & E1000_VT_MSGINFO_MASK)
5657 		return -EINVAL;
5658 
5659 	return 0;
5660 }
5661 
5662 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5663 				  u32 *msgbuf, u32 vf)
5664 {
5665 	int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5666 	u16 *hash_list = (u16 *)&msgbuf[1];
5667 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5668 	int i;
5669 
5670 	/* salt away the number of multicast addresses assigned
5671 	 * to this VF for later use to restore when the PF multi cast
5672 	 * list changes
5673 	 */
5674 	vf_data->num_vf_mc_hashes = n;
5675 
5676 	/* only up to 30 hash values supported */
5677 	if (n > 30)
5678 		n = 30;
5679 
5680 	/* store the hashes for later use */
5681 	for (i = 0; i < n; i++)
5682 		vf_data->vf_mc_hashes[i] = hash_list[i];
5683 
5684 	/* Flush and reset the mta with the new values */
5685 	igb_set_rx_mode(adapter->netdev);
5686 
5687 	return 0;
5688 }
5689 
5690 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5691 {
5692 	struct e1000_hw *hw = &adapter->hw;
5693 	struct vf_data_storage *vf_data;
5694 	int i, j;
5695 
5696 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
5697 		u32 vmolr = rd32(E1000_VMOLR(i));
5698 
5699 		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5700 
5701 		vf_data = &adapter->vf_data[i];
5702 
5703 		if ((vf_data->num_vf_mc_hashes > 30) ||
5704 		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5705 			vmolr |= E1000_VMOLR_MPME;
5706 		} else if (vf_data->num_vf_mc_hashes) {
5707 			vmolr |= E1000_VMOLR_ROMPE;
5708 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5709 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5710 		}
5711 		wr32(E1000_VMOLR(i), vmolr);
5712 	}
5713 }
5714 
5715 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5716 {
5717 	struct e1000_hw *hw = &adapter->hw;
5718 	u32 pool_mask, reg, vid;
5719 	int i;
5720 
5721 	pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5722 
5723 	/* Find the vlan filter for this id */
5724 	for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5725 		reg = rd32(E1000_VLVF(i));
5726 
5727 		/* remove the vf from the pool */
5728 		reg &= ~pool_mask;
5729 
5730 		/* if pool is empty then remove entry from vfta */
5731 		if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5732 		    (reg & E1000_VLVF_VLANID_ENABLE)) {
5733 			reg = 0;
5734 			vid = reg & E1000_VLVF_VLANID_MASK;
5735 			igb_vfta_set(hw, vid, false);
5736 		}
5737 
5738 		wr32(E1000_VLVF(i), reg);
5739 	}
5740 
5741 	adapter->vf_data[vf].vlans_enabled = 0;
5742 }
5743 
5744 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5745 {
5746 	struct e1000_hw *hw = &adapter->hw;
5747 	u32 reg, i;
5748 
5749 	/* The vlvf table only exists on 82576 hardware and newer */
5750 	if (hw->mac.type < e1000_82576)
5751 		return -1;
5752 
5753 	/* we only need to do this if VMDq is enabled */
5754 	if (!adapter->vfs_allocated_count)
5755 		return -1;
5756 
5757 	/* Find the vlan filter for this id */
5758 	for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5759 		reg = rd32(E1000_VLVF(i));
5760 		if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5761 		    vid == (reg & E1000_VLVF_VLANID_MASK))
5762 			break;
5763 	}
5764 
5765 	if (add) {
5766 		if (i == E1000_VLVF_ARRAY_SIZE) {
5767 			/* Did not find a matching VLAN ID entry that was
5768 			 * enabled.  Search for a free filter entry, i.e.
5769 			 * one without the enable bit set
5770 			 */
5771 			for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5772 				reg = rd32(E1000_VLVF(i));
5773 				if (!(reg & E1000_VLVF_VLANID_ENABLE))
5774 					break;
5775 			}
5776 		}
5777 		if (i < E1000_VLVF_ARRAY_SIZE) {
5778 			/* Found an enabled/available entry */
5779 			reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5780 
5781 			/* if !enabled we need to set this up in vfta */
5782 			if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
5783 				/* add VID to filter table */
5784 				igb_vfta_set(hw, vid, true);
5785 				reg |= E1000_VLVF_VLANID_ENABLE;
5786 			}
5787 			reg &= ~E1000_VLVF_VLANID_MASK;
5788 			reg |= vid;
5789 			wr32(E1000_VLVF(i), reg);
5790 
5791 			/* do not modify RLPML for PF devices */
5792 			if (vf >= adapter->vfs_allocated_count)
5793 				return 0;
5794 
5795 			if (!adapter->vf_data[vf].vlans_enabled) {
5796 				u32 size;
5797 
5798 				reg = rd32(E1000_VMOLR(vf));
5799 				size = reg & E1000_VMOLR_RLPML_MASK;
5800 				size += 4;
5801 				reg &= ~E1000_VMOLR_RLPML_MASK;
5802 				reg |= size;
5803 				wr32(E1000_VMOLR(vf), reg);
5804 			}
5805 
5806 			adapter->vf_data[vf].vlans_enabled++;
5807 		}
5808 	} else {
5809 		if (i < E1000_VLVF_ARRAY_SIZE) {
5810 			/* remove vf from the pool */
5811 			reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5812 			/* if pool is empty then remove entry from vfta */
5813 			if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5814 				reg = 0;
5815 				igb_vfta_set(hw, vid, false);
5816 			}
5817 			wr32(E1000_VLVF(i), reg);
5818 
5819 			/* do not modify RLPML for PF devices */
5820 			if (vf >= adapter->vfs_allocated_count)
5821 				return 0;
5822 
5823 			adapter->vf_data[vf].vlans_enabled--;
5824 			if (!adapter->vf_data[vf].vlans_enabled) {
5825 				u32 size;
5826 
5827 				reg = rd32(E1000_VMOLR(vf));
5828 				size = reg & E1000_VMOLR_RLPML_MASK;
5829 				size -= 4;
5830 				reg &= ~E1000_VMOLR_RLPML_MASK;
5831 				reg |= size;
5832 				wr32(E1000_VMOLR(vf), reg);
5833 			}
5834 		}
5835 	}
5836 	return 0;
5837 }
5838 
5839 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5840 {
5841 	struct e1000_hw *hw = &adapter->hw;
5842 
5843 	if (vid)
5844 		wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5845 	else
5846 		wr32(E1000_VMVIR(vf), 0);
5847 }
5848 
5849 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5850 			       int vf, u16 vlan, u8 qos)
5851 {
5852 	int err = 0;
5853 	struct igb_adapter *adapter = netdev_priv(netdev);
5854 
5855 	if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5856 		return -EINVAL;
5857 	if (vlan || qos) {
5858 		err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5859 		if (err)
5860 			goto out;
5861 		igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5862 		igb_set_vmolr(adapter, vf, !vlan);
5863 		adapter->vf_data[vf].pf_vlan = vlan;
5864 		adapter->vf_data[vf].pf_qos = qos;
5865 		dev_info(&adapter->pdev->dev,
5866 			 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5867 		if (test_bit(__IGB_DOWN, &adapter->state)) {
5868 			dev_warn(&adapter->pdev->dev,
5869 				 "The VF VLAN has been set, but the PF device is not up.\n");
5870 			dev_warn(&adapter->pdev->dev,
5871 				 "Bring the PF device up before attempting to use the VF device.\n");
5872 		}
5873 	} else {
5874 		igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5875 			     false, vf);
5876 		igb_set_vmvir(adapter, vlan, vf);
5877 		igb_set_vmolr(adapter, vf, true);
5878 		adapter->vf_data[vf].pf_vlan = 0;
5879 		adapter->vf_data[vf].pf_qos = 0;
5880 	}
5881 out:
5882 	return err;
5883 }
5884 
5885 static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
5886 {
5887 	struct e1000_hw *hw = &adapter->hw;
5888 	int i;
5889 	u32 reg;
5890 
5891 	/* Find the vlan filter for this id */
5892 	for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5893 		reg = rd32(E1000_VLVF(i));
5894 		if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5895 		    vid == (reg & E1000_VLVF_VLANID_MASK))
5896 			break;
5897 	}
5898 
5899 	if (i >= E1000_VLVF_ARRAY_SIZE)
5900 		i = -1;
5901 
5902 	return i;
5903 }
5904 
5905 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5906 {
5907 	struct e1000_hw *hw = &adapter->hw;
5908 	int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5909 	int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5910 	int err = 0;
5911 
5912 	/* If in promiscuous mode we need to make sure the PF also has
5913 	 * the VLAN filter set.
5914 	 */
5915 	if (add && (adapter->netdev->flags & IFF_PROMISC))
5916 		err = igb_vlvf_set(adapter, vid, add,
5917 				   adapter->vfs_allocated_count);
5918 	if (err)
5919 		goto out;
5920 
5921 	err = igb_vlvf_set(adapter, vid, add, vf);
5922 
5923 	if (err)
5924 		goto out;
5925 
5926 	/* Go through all the checks to see if the VLAN filter should
5927 	 * be wiped completely.
5928 	 */
5929 	if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
5930 		u32 vlvf, bits;
5931 		int regndx = igb_find_vlvf_entry(adapter, vid);
5932 
5933 		if (regndx < 0)
5934 			goto out;
5935 		/* See if any other pools are set for this VLAN filter
5936 		 * entry other than the PF.
5937 		 */
5938 		vlvf = bits = rd32(E1000_VLVF(regndx));
5939 		bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
5940 			      adapter->vfs_allocated_count);
5941 		/* If the filter was removed then ensure PF pool bit
5942 		 * is cleared if the PF only added itself to the pool
5943 		 * because the PF is in promiscuous mode.
5944 		 */
5945 		if ((vlvf & VLAN_VID_MASK) == vid &&
5946 		    !test_bit(vid, adapter->active_vlans) &&
5947 		    !bits)
5948 			igb_vlvf_set(adapter, vid, add,
5949 				     adapter->vfs_allocated_count);
5950 	}
5951 
5952 out:
5953 	return err;
5954 }
5955 
5956 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
5957 {
5958 	/* clear flags - except flag that indicates PF has set the MAC */
5959 	adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
5960 	adapter->vf_data[vf].last_nack = jiffies;
5961 
5962 	/* reset offloads to defaults */
5963 	igb_set_vmolr(adapter, vf, true);
5964 
5965 	/* reset vlans for device */
5966 	igb_clear_vf_vfta(adapter, vf);
5967 	if (adapter->vf_data[vf].pf_vlan)
5968 		igb_ndo_set_vf_vlan(adapter->netdev, vf,
5969 				    adapter->vf_data[vf].pf_vlan,
5970 				    adapter->vf_data[vf].pf_qos);
5971 	else
5972 		igb_clear_vf_vfta(adapter, vf);
5973 
5974 	/* reset multicast table array for vf */
5975 	adapter->vf_data[vf].num_vf_mc_hashes = 0;
5976 
5977 	/* Flush and reset the mta with the new values */
5978 	igb_set_rx_mode(adapter->netdev);
5979 }
5980 
5981 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5982 {
5983 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5984 
5985 	/* clear mac address as we were hotplug removed/added */
5986 	if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5987 		eth_zero_addr(vf_mac);
5988 
5989 	/* process remaining reset events */
5990 	igb_vf_reset(adapter, vf);
5991 }
5992 
5993 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5994 {
5995 	struct e1000_hw *hw = &adapter->hw;
5996 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5997 	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
5998 	u32 reg, msgbuf[3];
5999 	u8 *addr = (u8 *)(&msgbuf[1]);
6000 
6001 	/* process all the same items cleared in a function level reset */
6002 	igb_vf_reset(adapter, vf);
6003 
6004 	/* set vf mac address */
6005 	igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
6006 
6007 	/* enable transmit and receive for vf */
6008 	reg = rd32(E1000_VFTE);
6009 	wr32(E1000_VFTE, reg | (1 << vf));
6010 	reg = rd32(E1000_VFRE);
6011 	wr32(E1000_VFRE, reg | (1 << vf));
6012 
6013 	adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
6014 
6015 	/* reply to reset with ack and vf mac address */
6016 	msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6017 	memcpy(addr, vf_mac, ETH_ALEN);
6018 	igb_write_mbx(hw, msgbuf, 3, vf);
6019 }
6020 
6021 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
6022 {
6023 	/* The VF MAC Address is stored in a packed array of bytes
6024 	 * starting at the second 32 bit word of the msg array
6025 	 */
6026 	unsigned char *addr = (char *)&msg[1];
6027 	int err = -1;
6028 
6029 	if (is_valid_ether_addr(addr))
6030 		err = igb_set_vf_mac(adapter, vf, addr);
6031 
6032 	return err;
6033 }
6034 
6035 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
6036 {
6037 	struct e1000_hw *hw = &adapter->hw;
6038 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6039 	u32 msg = E1000_VT_MSGTYPE_NACK;
6040 
6041 	/* if device isn't clear to send it shouldn't be reading either */
6042 	if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
6043 	    time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
6044 		igb_write_mbx(hw, &msg, 1, vf);
6045 		vf_data->last_nack = jiffies;
6046 	}
6047 }
6048 
6049 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
6050 {
6051 	struct pci_dev *pdev = adapter->pdev;
6052 	u32 msgbuf[E1000_VFMAILBOX_SIZE];
6053 	struct e1000_hw *hw = &adapter->hw;
6054 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6055 	s32 retval;
6056 
6057 	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
6058 
6059 	if (retval) {
6060 		/* if receive failed revoke VF CTS stats and restart init */
6061 		dev_err(&pdev->dev, "Error receiving message from VF\n");
6062 		vf_data->flags &= ~IGB_VF_FLAG_CTS;
6063 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
6064 			return;
6065 		goto out;
6066 	}
6067 
6068 	/* this is a message we already processed, do nothing */
6069 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
6070 		return;
6071 
6072 	/* until the vf completes a reset it should not be
6073 	 * allowed to start any configuration.
6074 	 */
6075 	if (msgbuf[0] == E1000_VF_RESET) {
6076 		igb_vf_reset_msg(adapter, vf);
6077 		return;
6078 	}
6079 
6080 	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
6081 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
6082 			return;
6083 		retval = -1;
6084 		goto out;
6085 	}
6086 
6087 	switch ((msgbuf[0] & 0xFFFF)) {
6088 	case E1000_VF_SET_MAC_ADDR:
6089 		retval = -EINVAL;
6090 		if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
6091 			retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
6092 		else
6093 			dev_warn(&pdev->dev,
6094 				 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
6095 				 vf);
6096 		break;
6097 	case E1000_VF_SET_PROMISC:
6098 		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
6099 		break;
6100 	case E1000_VF_SET_MULTICAST:
6101 		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
6102 		break;
6103 	case E1000_VF_SET_LPE:
6104 		retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
6105 		break;
6106 	case E1000_VF_SET_VLAN:
6107 		retval = -1;
6108 		if (vf_data->pf_vlan)
6109 			dev_warn(&pdev->dev,
6110 				 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
6111 				 vf);
6112 		else
6113 			retval = igb_set_vf_vlan(adapter, msgbuf, vf);
6114 		break;
6115 	default:
6116 		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
6117 		retval = -1;
6118 		break;
6119 	}
6120 
6121 	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
6122 out:
6123 	/* notify the VF of the results of what it sent us */
6124 	if (retval)
6125 		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
6126 	else
6127 		msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
6128 
6129 	igb_write_mbx(hw, msgbuf, 1, vf);
6130 }
6131 
6132 static void igb_msg_task(struct igb_adapter *adapter)
6133 {
6134 	struct e1000_hw *hw = &adapter->hw;
6135 	u32 vf;
6136 
6137 	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
6138 		/* process any reset requests */
6139 		if (!igb_check_for_rst(hw, vf))
6140 			igb_vf_reset_event(adapter, vf);
6141 
6142 		/* process any messages pending */
6143 		if (!igb_check_for_msg(hw, vf))
6144 			igb_rcv_msg_from_vf(adapter, vf);
6145 
6146 		/* process any acks */
6147 		if (!igb_check_for_ack(hw, vf))
6148 			igb_rcv_ack_from_vf(adapter, vf);
6149 	}
6150 }
6151 
6152 /**
6153  *  igb_set_uta - Set unicast filter table address
6154  *  @adapter: board private structure
6155  *
6156  *  The unicast table address is a register array of 32-bit registers.
6157  *  The table is meant to be used in a way similar to how the MTA is used
6158  *  however due to certain limitations in the hardware it is necessary to
6159  *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
6160  *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
6161  **/
6162 static void igb_set_uta(struct igb_adapter *adapter)
6163 {
6164 	struct e1000_hw *hw = &adapter->hw;
6165 	int i;
6166 
6167 	/* The UTA table only exists on 82576 hardware and newer */
6168 	if (hw->mac.type < e1000_82576)
6169 		return;
6170 
6171 	/* we only need to do this if VMDq is enabled */
6172 	if (!adapter->vfs_allocated_count)
6173 		return;
6174 
6175 	for (i = 0; i < hw->mac.uta_reg_count; i++)
6176 		array_wr32(E1000_UTA, i, ~0);
6177 }
6178 
6179 /**
6180  *  igb_intr_msi - Interrupt Handler
6181  *  @irq: interrupt number
6182  *  @data: pointer to a network interface device structure
6183  **/
6184 static irqreturn_t igb_intr_msi(int irq, void *data)
6185 {
6186 	struct igb_adapter *adapter = data;
6187 	struct igb_q_vector *q_vector = adapter->q_vector[0];
6188 	struct e1000_hw *hw = &adapter->hw;
6189 	/* read ICR disables interrupts using IAM */
6190 	u32 icr = rd32(E1000_ICR);
6191 
6192 	igb_write_itr(q_vector);
6193 
6194 	if (icr & E1000_ICR_DRSTA)
6195 		schedule_work(&adapter->reset_task);
6196 
6197 	if (icr & E1000_ICR_DOUTSYNC) {
6198 		/* HW is reporting DMA is out of sync */
6199 		adapter->stats.doosync++;
6200 	}
6201 
6202 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
6203 		hw->mac.get_link_status = 1;
6204 		if (!test_bit(__IGB_DOWN, &adapter->state))
6205 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
6206 	}
6207 
6208 	if (icr & E1000_ICR_TS) {
6209 		u32 tsicr = rd32(E1000_TSICR);
6210 
6211 		if (tsicr & E1000_TSICR_TXTS) {
6212 			/* acknowledge the interrupt */
6213 			wr32(E1000_TSICR, E1000_TSICR_TXTS);
6214 			/* retrieve hardware timestamp */
6215 			schedule_work(&adapter->ptp_tx_work);
6216 		}
6217 	}
6218 
6219 	napi_schedule(&q_vector->napi);
6220 
6221 	return IRQ_HANDLED;
6222 }
6223 
6224 /**
6225  *  igb_intr - Legacy Interrupt Handler
6226  *  @irq: interrupt number
6227  *  @data: pointer to a network interface device structure
6228  **/
6229 static irqreturn_t igb_intr(int irq, void *data)
6230 {
6231 	struct igb_adapter *adapter = data;
6232 	struct igb_q_vector *q_vector = adapter->q_vector[0];
6233 	struct e1000_hw *hw = &adapter->hw;
6234 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
6235 	 * need for the IMC write
6236 	 */
6237 	u32 icr = rd32(E1000_ICR);
6238 
6239 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
6240 	 * not set, then the adapter didn't send an interrupt
6241 	 */
6242 	if (!(icr & E1000_ICR_INT_ASSERTED))
6243 		return IRQ_NONE;
6244 
6245 	igb_write_itr(q_vector);
6246 
6247 	if (icr & E1000_ICR_DRSTA)
6248 		schedule_work(&adapter->reset_task);
6249 
6250 	if (icr & E1000_ICR_DOUTSYNC) {
6251 		/* HW is reporting DMA is out of sync */
6252 		adapter->stats.doosync++;
6253 	}
6254 
6255 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
6256 		hw->mac.get_link_status = 1;
6257 		/* guard against interrupt when we're going down */
6258 		if (!test_bit(__IGB_DOWN, &adapter->state))
6259 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
6260 	}
6261 
6262 	if (icr & E1000_ICR_TS) {
6263 		u32 tsicr = rd32(E1000_TSICR);
6264 
6265 		if (tsicr & E1000_TSICR_TXTS) {
6266 			/* acknowledge the interrupt */
6267 			wr32(E1000_TSICR, E1000_TSICR_TXTS);
6268 			/* retrieve hardware timestamp */
6269 			schedule_work(&adapter->ptp_tx_work);
6270 		}
6271 	}
6272 
6273 	napi_schedule(&q_vector->napi);
6274 
6275 	return IRQ_HANDLED;
6276 }
6277 
6278 static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
6279 {
6280 	struct igb_adapter *adapter = q_vector->adapter;
6281 	struct e1000_hw *hw = &adapter->hw;
6282 
6283 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
6284 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
6285 		if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
6286 			igb_set_itr(q_vector);
6287 		else
6288 			igb_update_ring_itr(q_vector);
6289 	}
6290 
6291 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
6292 		if (adapter->flags & IGB_FLAG_HAS_MSIX)
6293 			wr32(E1000_EIMS, q_vector->eims_value);
6294 		else
6295 			igb_irq_enable(adapter);
6296 	}
6297 }
6298 
6299 /**
6300  *  igb_poll - NAPI Rx polling callback
6301  *  @napi: napi polling structure
6302  *  @budget: count of how many packets we should handle
6303  **/
6304 static int igb_poll(struct napi_struct *napi, int budget)
6305 {
6306 	struct igb_q_vector *q_vector = container_of(napi,
6307 						     struct igb_q_vector,
6308 						     napi);
6309 	bool clean_complete = true;
6310 
6311 #ifdef CONFIG_IGB_DCA
6312 	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
6313 		igb_update_dca(q_vector);
6314 #endif
6315 	if (q_vector->tx.ring)
6316 		clean_complete = igb_clean_tx_irq(q_vector);
6317 
6318 	if (q_vector->rx.ring)
6319 		clean_complete &= igb_clean_rx_irq(q_vector, budget);
6320 
6321 	/* If all work not completed, return budget and keep polling */
6322 	if (!clean_complete)
6323 		return budget;
6324 
6325 	/* If not enough Rx work done, exit the polling mode */
6326 	napi_complete(napi);
6327 	igb_ring_irq_enable(q_vector);
6328 
6329 	return 0;
6330 }
6331 
6332 /**
6333  *  igb_clean_tx_irq - Reclaim resources after transmit completes
6334  *  @q_vector: pointer to q_vector containing needed info
6335  *
6336  *  returns true if ring is completely cleaned
6337  **/
6338 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6339 {
6340 	struct igb_adapter *adapter = q_vector->adapter;
6341 	struct igb_ring *tx_ring = q_vector->tx.ring;
6342 	struct igb_tx_buffer *tx_buffer;
6343 	union e1000_adv_tx_desc *tx_desc;
6344 	unsigned int total_bytes = 0, total_packets = 0;
6345 	unsigned int budget = q_vector->tx.work_limit;
6346 	unsigned int i = tx_ring->next_to_clean;
6347 
6348 	if (test_bit(__IGB_DOWN, &adapter->state))
6349 		return true;
6350 
6351 	tx_buffer = &tx_ring->tx_buffer_info[i];
6352 	tx_desc = IGB_TX_DESC(tx_ring, i);
6353 	i -= tx_ring->count;
6354 
6355 	do {
6356 		union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
6357 
6358 		/* if next_to_watch is not set then there is no work pending */
6359 		if (!eop_desc)
6360 			break;
6361 
6362 		/* prevent any other reads prior to eop_desc */
6363 		read_barrier_depends();
6364 
6365 		/* if DD is not set pending work has not been completed */
6366 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
6367 			break;
6368 
6369 		/* clear next_to_watch to prevent false hangs */
6370 		tx_buffer->next_to_watch = NULL;
6371 
6372 		/* update the statistics for this packet */
6373 		total_bytes += tx_buffer->bytecount;
6374 		total_packets += tx_buffer->gso_segs;
6375 
6376 		/* free the skb */
6377 		dev_consume_skb_any(tx_buffer->skb);
6378 
6379 		/* unmap skb header data */
6380 		dma_unmap_single(tx_ring->dev,
6381 				 dma_unmap_addr(tx_buffer, dma),
6382 				 dma_unmap_len(tx_buffer, len),
6383 				 DMA_TO_DEVICE);
6384 
6385 		/* clear tx_buffer data */
6386 		tx_buffer->skb = NULL;
6387 		dma_unmap_len_set(tx_buffer, len, 0);
6388 
6389 		/* clear last DMA location and unmap remaining buffers */
6390 		while (tx_desc != eop_desc) {
6391 			tx_buffer++;
6392 			tx_desc++;
6393 			i++;
6394 			if (unlikely(!i)) {
6395 				i -= tx_ring->count;
6396 				tx_buffer = tx_ring->tx_buffer_info;
6397 				tx_desc = IGB_TX_DESC(tx_ring, 0);
6398 			}
6399 
6400 			/* unmap any remaining paged data */
6401 			if (dma_unmap_len(tx_buffer, len)) {
6402 				dma_unmap_page(tx_ring->dev,
6403 					       dma_unmap_addr(tx_buffer, dma),
6404 					       dma_unmap_len(tx_buffer, len),
6405 					       DMA_TO_DEVICE);
6406 				dma_unmap_len_set(tx_buffer, len, 0);
6407 			}
6408 		}
6409 
6410 		/* move us one more past the eop_desc for start of next pkt */
6411 		tx_buffer++;
6412 		tx_desc++;
6413 		i++;
6414 		if (unlikely(!i)) {
6415 			i -= tx_ring->count;
6416 			tx_buffer = tx_ring->tx_buffer_info;
6417 			tx_desc = IGB_TX_DESC(tx_ring, 0);
6418 		}
6419 
6420 		/* issue prefetch for next Tx descriptor */
6421 		prefetch(tx_desc);
6422 
6423 		/* update budget accounting */
6424 		budget--;
6425 	} while (likely(budget));
6426 
6427 	netdev_tx_completed_queue(txring_txq(tx_ring),
6428 				  total_packets, total_bytes);
6429 	i += tx_ring->count;
6430 	tx_ring->next_to_clean = i;
6431 	u64_stats_update_begin(&tx_ring->tx_syncp);
6432 	tx_ring->tx_stats.bytes += total_bytes;
6433 	tx_ring->tx_stats.packets += total_packets;
6434 	u64_stats_update_end(&tx_ring->tx_syncp);
6435 	q_vector->tx.total_bytes += total_bytes;
6436 	q_vector->tx.total_packets += total_packets;
6437 
6438 	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
6439 		struct e1000_hw *hw = &adapter->hw;
6440 
6441 		/* Detect a transmit hang in hardware, this serializes the
6442 		 * check with the clearing of time_stamp and movement of i
6443 		 */
6444 		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
6445 		if (tx_buffer->next_to_watch &&
6446 		    time_after(jiffies, tx_buffer->time_stamp +
6447 			       (adapter->tx_timeout_factor * HZ)) &&
6448 		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
6449 
6450 			/* detected Tx unit hang */
6451 			dev_err(tx_ring->dev,
6452 				"Detected Tx Unit Hang\n"
6453 				"  Tx Queue             <%d>\n"
6454 				"  TDH                  <%x>\n"
6455 				"  TDT                  <%x>\n"
6456 				"  next_to_use          <%x>\n"
6457 				"  next_to_clean        <%x>\n"
6458 				"buffer_info[next_to_clean]\n"
6459 				"  time_stamp           <%lx>\n"
6460 				"  next_to_watch        <%p>\n"
6461 				"  jiffies              <%lx>\n"
6462 				"  desc.status          <%x>\n",
6463 				tx_ring->queue_index,
6464 				rd32(E1000_TDH(tx_ring->reg_idx)),
6465 				readl(tx_ring->tail),
6466 				tx_ring->next_to_use,
6467 				tx_ring->next_to_clean,
6468 				tx_buffer->time_stamp,
6469 				tx_buffer->next_to_watch,
6470 				jiffies,
6471 				tx_buffer->next_to_watch->wb.status);
6472 			netif_stop_subqueue(tx_ring->netdev,
6473 					    tx_ring->queue_index);
6474 
6475 			/* we are about to reset, no point in enabling stuff */
6476 			return true;
6477 		}
6478 	}
6479 
6480 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
6481 	if (unlikely(total_packets &&
6482 	    netif_carrier_ok(tx_ring->netdev) &&
6483 	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
6484 		/* Make sure that anybody stopping the queue after this
6485 		 * sees the new next_to_clean.
6486 		 */
6487 		smp_mb();
6488 		if (__netif_subqueue_stopped(tx_ring->netdev,
6489 					     tx_ring->queue_index) &&
6490 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
6491 			netif_wake_subqueue(tx_ring->netdev,
6492 					    tx_ring->queue_index);
6493 
6494 			u64_stats_update_begin(&tx_ring->tx_syncp);
6495 			tx_ring->tx_stats.restart_queue++;
6496 			u64_stats_update_end(&tx_ring->tx_syncp);
6497 		}
6498 	}
6499 
6500 	return !!budget;
6501 }
6502 
6503 /**
6504  *  igb_reuse_rx_page - page flip buffer and store it back on the ring
6505  *  @rx_ring: rx descriptor ring to store buffers on
6506  *  @old_buff: donor buffer to have page reused
6507  *
6508  *  Synchronizes page for reuse by the adapter
6509  **/
6510 static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6511 			      struct igb_rx_buffer *old_buff)
6512 {
6513 	struct igb_rx_buffer *new_buff;
6514 	u16 nta = rx_ring->next_to_alloc;
6515 
6516 	new_buff = &rx_ring->rx_buffer_info[nta];
6517 
6518 	/* update, and store next to alloc */
6519 	nta++;
6520 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6521 
6522 	/* transfer page from old buffer to new buffer */
6523 	*new_buff = *old_buff;
6524 
6525 	/* sync the buffer for use by the device */
6526 	dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
6527 					 old_buff->page_offset,
6528 					 IGB_RX_BUFSZ,
6529 					 DMA_FROM_DEVICE);
6530 }
6531 
6532 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6533 				  struct page *page,
6534 				  unsigned int truesize)
6535 {
6536 	/* avoid re-using remote pages */
6537 	if (unlikely(page_to_nid(page) != numa_node_id()))
6538 		return false;
6539 
6540 	if (unlikely(page->pfmemalloc))
6541 		return false;
6542 
6543 #if (PAGE_SIZE < 8192)
6544 	/* if we are only owner of page we can reuse it */
6545 	if (unlikely(page_count(page) != 1))
6546 		return false;
6547 
6548 	/* flip page offset to other buffer */
6549 	rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6550 
6551 	/* Even if we own the page, we are not allowed to use atomic_set()
6552 	 * This would break get_page_unless_zero() users.
6553 	 */
6554 	atomic_inc(&page->_count);
6555 #else
6556 	/* move offset up to the next cache line */
6557 	rx_buffer->page_offset += truesize;
6558 
6559 	if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6560 		return false;
6561 
6562 	/* bump ref count on page before it is given to the stack */
6563 	get_page(page);
6564 #endif
6565 
6566 	return true;
6567 }
6568 
6569 /**
6570  *  igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6571  *  @rx_ring: rx descriptor ring to transact packets on
6572  *  @rx_buffer: buffer containing page to add
6573  *  @rx_desc: descriptor containing length of buffer written by hardware
6574  *  @skb: sk_buff to place the data into
6575  *
6576  *  This function will add the data contained in rx_buffer->page to the skb.
6577  *  This is done either through a direct copy if the data in the buffer is
6578  *  less than the skb header size, otherwise it will just attach the page as
6579  *  a frag to the skb.
6580  *
6581  *  The function will then update the page offset if necessary and return
6582  *  true if the buffer can be reused by the adapter.
6583  **/
6584 static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6585 			    struct igb_rx_buffer *rx_buffer,
6586 			    union e1000_adv_rx_desc *rx_desc,
6587 			    struct sk_buff *skb)
6588 {
6589 	struct page *page = rx_buffer->page;
6590 	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
6591 #if (PAGE_SIZE < 8192)
6592 	unsigned int truesize = IGB_RX_BUFSZ;
6593 #else
6594 	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
6595 #endif
6596 
6597 	if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
6598 		unsigned char *va = page_address(page) + rx_buffer->page_offset;
6599 
6600 		if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6601 			igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6602 			va += IGB_TS_HDR_LEN;
6603 			size -= IGB_TS_HDR_LEN;
6604 		}
6605 
6606 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6607 
6608 		/* we can reuse buffer as-is, just make sure it is local */
6609 		if (likely((page_to_nid(page) == numa_node_id()) &&
6610 			   !page->pfmemalloc))
6611 			return true;
6612 
6613 		/* this page cannot be reused so discard it */
6614 		put_page(page);
6615 		return false;
6616 	}
6617 
6618 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
6619 			rx_buffer->page_offset, size, truesize);
6620 
6621 	return igb_can_reuse_rx_page(rx_buffer, page, truesize);
6622 }
6623 
6624 static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6625 					   union e1000_adv_rx_desc *rx_desc,
6626 					   struct sk_buff *skb)
6627 {
6628 	struct igb_rx_buffer *rx_buffer;
6629 	struct page *page;
6630 
6631 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6632 
6633 	page = rx_buffer->page;
6634 	prefetchw(page);
6635 
6636 	if (likely(!skb)) {
6637 		void *page_addr = page_address(page) +
6638 				  rx_buffer->page_offset;
6639 
6640 		/* prefetch first cache line of first page */
6641 		prefetch(page_addr);
6642 #if L1_CACHE_BYTES < 128
6643 		prefetch(page_addr + L1_CACHE_BYTES);
6644 #endif
6645 
6646 		/* allocate a skb to store the frags */
6647 		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6648 						IGB_RX_HDR_LEN);
6649 		if (unlikely(!skb)) {
6650 			rx_ring->rx_stats.alloc_failed++;
6651 			return NULL;
6652 		}
6653 
6654 		/* we will be copying header into skb->data in
6655 		 * pskb_may_pull so it is in our interest to prefetch
6656 		 * it now to avoid a possible cache miss
6657 		 */
6658 		prefetchw(skb->data);
6659 	}
6660 
6661 	/* we are reusing so sync this buffer for CPU use */
6662 	dma_sync_single_range_for_cpu(rx_ring->dev,
6663 				      rx_buffer->dma,
6664 				      rx_buffer->page_offset,
6665 				      IGB_RX_BUFSZ,
6666 				      DMA_FROM_DEVICE);
6667 
6668 	/* pull page into skb */
6669 	if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
6670 		/* hand second half of page back to the ring */
6671 		igb_reuse_rx_page(rx_ring, rx_buffer);
6672 	} else {
6673 		/* we are not reusing the buffer so unmap it */
6674 		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
6675 			       PAGE_SIZE, DMA_FROM_DEVICE);
6676 	}
6677 
6678 	/* clear contents of rx_buffer */
6679 	rx_buffer->page = NULL;
6680 
6681 	return skb;
6682 }
6683 
6684 static inline void igb_rx_checksum(struct igb_ring *ring,
6685 				   union e1000_adv_rx_desc *rx_desc,
6686 				   struct sk_buff *skb)
6687 {
6688 	skb_checksum_none_assert(skb);
6689 
6690 	/* Ignore Checksum bit is set */
6691 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
6692 		return;
6693 
6694 	/* Rx checksum disabled via ethtool */
6695 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
6696 		return;
6697 
6698 	/* TCP/UDP checksum error bit is set */
6699 	if (igb_test_staterr(rx_desc,
6700 			     E1000_RXDEXT_STATERR_TCPE |
6701 			     E1000_RXDEXT_STATERR_IPE)) {
6702 		/* work around errata with sctp packets where the TCPE aka
6703 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
6704 		 * packets, (aka let the stack check the crc32c)
6705 		 */
6706 		if (!((skb->len == 60) &&
6707 		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
6708 			u64_stats_update_begin(&ring->rx_syncp);
6709 			ring->rx_stats.csum_err++;
6710 			u64_stats_update_end(&ring->rx_syncp);
6711 		}
6712 		/* let the stack verify checksum errors */
6713 		return;
6714 	}
6715 	/* It must be a TCP or UDP packet with a valid checksum */
6716 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
6717 				      E1000_RXD_STAT_UDPCS))
6718 		skb->ip_summed = CHECKSUM_UNNECESSARY;
6719 
6720 	dev_dbg(ring->dev, "cksum success: bits %08X\n",
6721 		le32_to_cpu(rx_desc->wb.upper.status_error));
6722 }
6723 
6724 static inline void igb_rx_hash(struct igb_ring *ring,
6725 			       union e1000_adv_rx_desc *rx_desc,
6726 			       struct sk_buff *skb)
6727 {
6728 	if (ring->netdev->features & NETIF_F_RXHASH)
6729 		skb_set_hash(skb,
6730 			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
6731 			     PKT_HASH_TYPE_L3);
6732 }
6733 
6734 /**
6735  *  igb_is_non_eop - process handling of non-EOP buffers
6736  *  @rx_ring: Rx ring being processed
6737  *  @rx_desc: Rx descriptor for current buffer
6738  *  @skb: current socket buffer containing buffer in progress
6739  *
6740  *  This function updates next to clean.  If the buffer is an EOP buffer
6741  *  this function exits returning false, otherwise it will place the
6742  *  sk_buff in the next buffer to be chained and return true indicating
6743  *  that this is in fact a non-EOP buffer.
6744  **/
6745 static bool igb_is_non_eop(struct igb_ring *rx_ring,
6746 			   union e1000_adv_rx_desc *rx_desc)
6747 {
6748 	u32 ntc = rx_ring->next_to_clean + 1;
6749 
6750 	/* fetch, update, and store next to clean */
6751 	ntc = (ntc < rx_ring->count) ? ntc : 0;
6752 	rx_ring->next_to_clean = ntc;
6753 
6754 	prefetch(IGB_RX_DESC(rx_ring, ntc));
6755 
6756 	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
6757 		return false;
6758 
6759 	return true;
6760 }
6761 
6762 /**
6763  *  igb_pull_tail - igb specific version of skb_pull_tail
6764  *  @rx_ring: rx descriptor ring packet is being transacted on
6765  *  @rx_desc: pointer to the EOP Rx descriptor
6766  *  @skb: pointer to current skb being adjusted
6767  *
6768  *  This function is an igb specific version of __pskb_pull_tail.  The
6769  *  main difference between this version and the original function is that
6770  *  this function can make several assumptions about the state of things
6771  *  that allow for significant optimizations versus the standard function.
6772  *  As a result we can do things like drop a frag and maintain an accurate
6773  *  truesize for the skb.
6774  */
6775 static void igb_pull_tail(struct igb_ring *rx_ring,
6776 			  union e1000_adv_rx_desc *rx_desc,
6777 			  struct sk_buff *skb)
6778 {
6779 	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
6780 	unsigned char *va;
6781 	unsigned int pull_len;
6782 
6783 	/* it is valid to use page_address instead of kmap since we are
6784 	 * working with pages allocated out of the lomem pool per
6785 	 * alloc_page(GFP_ATOMIC)
6786 	 */
6787 	va = skb_frag_address(frag);
6788 
6789 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6790 		/* retrieve timestamp from buffer */
6791 		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6792 
6793 		/* update pointers to remove timestamp header */
6794 		skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
6795 		frag->page_offset += IGB_TS_HDR_LEN;
6796 		skb->data_len -= IGB_TS_HDR_LEN;
6797 		skb->len -= IGB_TS_HDR_LEN;
6798 
6799 		/* move va to start of packet data */
6800 		va += IGB_TS_HDR_LEN;
6801 	}
6802 
6803 	/* we need the header to contain the greater of either ETH_HLEN or
6804 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
6805 	 */
6806 	pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
6807 
6808 	/* align pull length to size of long to optimize memcpy performance */
6809 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
6810 
6811 	/* update all of the pointers */
6812 	skb_frag_size_sub(frag, pull_len);
6813 	frag->page_offset += pull_len;
6814 	skb->data_len -= pull_len;
6815 	skb->tail += pull_len;
6816 }
6817 
6818 /**
6819  *  igb_cleanup_headers - Correct corrupted or empty headers
6820  *  @rx_ring: rx descriptor ring packet is being transacted on
6821  *  @rx_desc: pointer to the EOP Rx descriptor
6822  *  @skb: pointer to current skb being fixed
6823  *
6824  *  Address the case where we are pulling data in on pages only
6825  *  and as such no data is present in the skb header.
6826  *
6827  *  In addition if skb is not at least 60 bytes we need to pad it so that
6828  *  it is large enough to qualify as a valid Ethernet frame.
6829  *
6830  *  Returns true if an error was encountered and skb was freed.
6831  **/
6832 static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6833 				union e1000_adv_rx_desc *rx_desc,
6834 				struct sk_buff *skb)
6835 {
6836 	if (unlikely((igb_test_staterr(rx_desc,
6837 				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6838 		struct net_device *netdev = rx_ring->netdev;
6839 		if (!(netdev->features & NETIF_F_RXALL)) {
6840 			dev_kfree_skb_any(skb);
6841 			return true;
6842 		}
6843 	}
6844 
6845 	/* place header in linear portion of buffer */
6846 	if (skb_is_nonlinear(skb))
6847 		igb_pull_tail(rx_ring, rx_desc, skb);
6848 
6849 	/* if skb_pad returns an error the skb was freed */
6850 	if (unlikely(skb->len < 60)) {
6851 		int pad_len = 60 - skb->len;
6852 
6853 		if (skb_pad(skb, pad_len))
6854 			return true;
6855 		__skb_put(skb, pad_len);
6856 	}
6857 
6858 	return false;
6859 }
6860 
6861 /**
6862  *  igb_process_skb_fields - Populate skb header fields from Rx descriptor
6863  *  @rx_ring: rx descriptor ring packet is being transacted on
6864  *  @rx_desc: pointer to the EOP Rx descriptor
6865  *  @skb: pointer to current skb being populated
6866  *
6867  *  This function checks the ring, descriptor, and packet information in
6868  *  order to populate the hash, checksum, VLAN, timestamp, protocol, and
6869  *  other fields within the skb.
6870  **/
6871 static void igb_process_skb_fields(struct igb_ring *rx_ring,
6872 				   union e1000_adv_rx_desc *rx_desc,
6873 				   struct sk_buff *skb)
6874 {
6875 	struct net_device *dev = rx_ring->netdev;
6876 
6877 	igb_rx_hash(rx_ring, rx_desc, skb);
6878 
6879 	igb_rx_checksum(rx_ring, rx_desc, skb);
6880 
6881 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
6882 	    !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
6883 		igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
6884 
6885 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
6886 	    igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6887 		u16 vid;
6888 
6889 		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6890 		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6891 			vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6892 		else
6893 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6894 
6895 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
6896 	}
6897 
6898 	skb_record_rx_queue(skb, rx_ring->queue_index);
6899 
6900 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6901 }
6902 
6903 static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6904 {
6905 	struct igb_ring *rx_ring = q_vector->rx.ring;
6906 	struct sk_buff *skb = rx_ring->skb;
6907 	unsigned int total_bytes = 0, total_packets = 0;
6908 	u16 cleaned_count = igb_desc_unused(rx_ring);
6909 
6910 	while (likely(total_packets < budget)) {
6911 		union e1000_adv_rx_desc *rx_desc;
6912 
6913 		/* return some buffers to hardware, one at a time is too slow */
6914 		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6915 			igb_alloc_rx_buffers(rx_ring, cleaned_count);
6916 			cleaned_count = 0;
6917 		}
6918 
6919 		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
6920 
6921 		if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
6922 			break;
6923 
6924 		/* This memory barrier is needed to keep us from reading
6925 		 * any other fields out of the rx_desc until we know the
6926 		 * RXD_STAT_DD bit is set
6927 		 */
6928 		rmb();
6929 
6930 		/* retrieve a buffer from the ring */
6931 		skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
6932 
6933 		/* exit if we failed to retrieve a buffer */
6934 		if (!skb)
6935 			break;
6936 
6937 		cleaned_count++;
6938 
6939 		/* fetch next buffer in frame if non-eop */
6940 		if (igb_is_non_eop(rx_ring, rx_desc))
6941 			continue;
6942 
6943 		/* verify the packet layout is correct */
6944 		if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
6945 			skb = NULL;
6946 			continue;
6947 		}
6948 
6949 		/* probably a little skewed due to removing CRC */
6950 		total_bytes += skb->len;
6951 
6952 		/* populate checksum, timestamp, VLAN, and protocol */
6953 		igb_process_skb_fields(rx_ring, rx_desc, skb);
6954 
6955 		napi_gro_receive(&q_vector->napi, skb);
6956 
6957 		/* reset skb pointer */
6958 		skb = NULL;
6959 
6960 		/* update budget accounting */
6961 		total_packets++;
6962 	}
6963 
6964 	/* place incomplete frames back on ring for completion */
6965 	rx_ring->skb = skb;
6966 
6967 	u64_stats_update_begin(&rx_ring->rx_syncp);
6968 	rx_ring->rx_stats.packets += total_packets;
6969 	rx_ring->rx_stats.bytes += total_bytes;
6970 	u64_stats_update_end(&rx_ring->rx_syncp);
6971 	q_vector->rx.total_packets += total_packets;
6972 	q_vector->rx.total_bytes += total_bytes;
6973 
6974 	if (cleaned_count)
6975 		igb_alloc_rx_buffers(rx_ring, cleaned_count);
6976 
6977 	return total_packets < budget;
6978 }
6979 
6980 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6981 				  struct igb_rx_buffer *bi)
6982 {
6983 	struct page *page = bi->page;
6984 	dma_addr_t dma;
6985 
6986 	/* since we are recycling buffers we should seldom need to alloc */
6987 	if (likely(page))
6988 		return true;
6989 
6990 	/* alloc new page for storage */
6991 	page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
6992 	if (unlikely(!page)) {
6993 		rx_ring->rx_stats.alloc_failed++;
6994 		return false;
6995 	}
6996 
6997 	/* map page for use */
6998 	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
6999 
7000 	/* if mapping failed free memory back to system since
7001 	 * there isn't much point in holding memory we can't use
7002 	 */
7003 	if (dma_mapping_error(rx_ring->dev, dma)) {
7004 		__free_page(page);
7005 
7006 		rx_ring->rx_stats.alloc_failed++;
7007 		return false;
7008 	}
7009 
7010 	bi->dma = dma;
7011 	bi->page = page;
7012 	bi->page_offset = 0;
7013 
7014 	return true;
7015 }
7016 
7017 /**
7018  *  igb_alloc_rx_buffers - Replace used receive buffers; packet split
7019  *  @adapter: address of board private structure
7020  **/
7021 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7022 {
7023 	union e1000_adv_rx_desc *rx_desc;
7024 	struct igb_rx_buffer *bi;
7025 	u16 i = rx_ring->next_to_use;
7026 
7027 	/* nothing to do */
7028 	if (!cleaned_count)
7029 		return;
7030 
7031 	rx_desc = IGB_RX_DESC(rx_ring, i);
7032 	bi = &rx_ring->rx_buffer_info[i];
7033 	i -= rx_ring->count;
7034 
7035 	do {
7036 		if (!igb_alloc_mapped_page(rx_ring, bi))
7037 			break;
7038 
7039 		/* Refresh the desc even if buffer_addrs didn't change
7040 		 * because each write-back erases this info.
7041 		 */
7042 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
7043 
7044 		rx_desc++;
7045 		bi++;
7046 		i++;
7047 		if (unlikely(!i)) {
7048 			rx_desc = IGB_RX_DESC(rx_ring, 0);
7049 			bi = rx_ring->rx_buffer_info;
7050 			i -= rx_ring->count;
7051 		}
7052 
7053 		/* clear the hdr_addr for the next_to_use descriptor */
7054 		rx_desc->read.hdr_addr = 0;
7055 
7056 		cleaned_count--;
7057 	} while (cleaned_count);
7058 
7059 	i += rx_ring->count;
7060 
7061 	if (rx_ring->next_to_use != i) {
7062 		/* record the next descriptor to use */
7063 		rx_ring->next_to_use = i;
7064 
7065 		/* update next to alloc since we have filled the ring */
7066 		rx_ring->next_to_alloc = i;
7067 
7068 		/* Force memory writes to complete before letting h/w
7069 		 * know there are new descriptors to fetch.  (Only
7070 		 * applicable for weak-ordered memory model archs,
7071 		 * such as IA-64).
7072 		 */
7073 		wmb();
7074 		writel(i, rx_ring->tail);
7075 	}
7076 }
7077 
7078 /**
7079  * igb_mii_ioctl -
7080  * @netdev:
7081  * @ifreq:
7082  * @cmd:
7083  **/
7084 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7085 {
7086 	struct igb_adapter *adapter = netdev_priv(netdev);
7087 	struct mii_ioctl_data *data = if_mii(ifr);
7088 
7089 	if (adapter->hw.phy.media_type != e1000_media_type_copper)
7090 		return -EOPNOTSUPP;
7091 
7092 	switch (cmd) {
7093 	case SIOCGMIIPHY:
7094 		data->phy_id = adapter->hw.phy.addr;
7095 		break;
7096 	case SIOCGMIIREG:
7097 		if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
7098 				     &data->val_out))
7099 			return -EIO;
7100 		break;
7101 	case SIOCSMIIREG:
7102 	default:
7103 		return -EOPNOTSUPP;
7104 	}
7105 	return 0;
7106 }
7107 
7108 /**
7109  * igb_ioctl -
7110  * @netdev:
7111  * @ifreq:
7112  * @cmd:
7113  **/
7114 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7115 {
7116 	switch (cmd) {
7117 	case SIOCGMIIPHY:
7118 	case SIOCGMIIREG:
7119 	case SIOCSMIIREG:
7120 		return igb_mii_ioctl(netdev, ifr, cmd);
7121 	case SIOCGHWTSTAMP:
7122 		return igb_ptp_get_ts_config(netdev, ifr);
7123 	case SIOCSHWTSTAMP:
7124 		return igb_ptp_set_ts_config(netdev, ifr);
7125 	default:
7126 		return -EOPNOTSUPP;
7127 	}
7128 }
7129 
7130 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7131 {
7132 	struct igb_adapter *adapter = hw->back;
7133 
7134 	pci_read_config_word(adapter->pdev, reg, value);
7135 }
7136 
7137 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7138 {
7139 	struct igb_adapter *adapter = hw->back;
7140 
7141 	pci_write_config_word(adapter->pdev, reg, *value);
7142 }
7143 
7144 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7145 {
7146 	struct igb_adapter *adapter = hw->back;
7147 
7148 	if (pcie_capability_read_word(adapter->pdev, reg, value))
7149 		return -E1000_ERR_CONFIG;
7150 
7151 	return 0;
7152 }
7153 
7154 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7155 {
7156 	struct igb_adapter *adapter = hw->back;
7157 
7158 	if (pcie_capability_write_word(adapter->pdev, reg, *value))
7159 		return -E1000_ERR_CONFIG;
7160 
7161 	return 0;
7162 }
7163 
7164 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
7165 {
7166 	struct igb_adapter *adapter = netdev_priv(netdev);
7167 	struct e1000_hw *hw = &adapter->hw;
7168 	u32 ctrl, rctl;
7169 	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
7170 
7171 	if (enable) {
7172 		/* enable VLAN tag insert/strip */
7173 		ctrl = rd32(E1000_CTRL);
7174 		ctrl |= E1000_CTRL_VME;
7175 		wr32(E1000_CTRL, ctrl);
7176 
7177 		/* Disable CFI check */
7178 		rctl = rd32(E1000_RCTL);
7179 		rctl &= ~E1000_RCTL_CFIEN;
7180 		wr32(E1000_RCTL, rctl);
7181 	} else {
7182 		/* disable VLAN tag insert/strip */
7183 		ctrl = rd32(E1000_CTRL);
7184 		ctrl &= ~E1000_CTRL_VME;
7185 		wr32(E1000_CTRL, ctrl);
7186 	}
7187 
7188 	igb_rlpml_set(adapter);
7189 }
7190 
7191 static int igb_vlan_rx_add_vid(struct net_device *netdev,
7192 			       __be16 proto, u16 vid)
7193 {
7194 	struct igb_adapter *adapter = netdev_priv(netdev);
7195 	struct e1000_hw *hw = &adapter->hw;
7196 	int pf_id = adapter->vfs_allocated_count;
7197 
7198 	/* attempt to add filter to vlvf array */
7199 	igb_vlvf_set(adapter, vid, true, pf_id);
7200 
7201 	/* add the filter since PF can receive vlans w/o entry in vlvf */
7202 	igb_vfta_set(hw, vid, true);
7203 
7204 	set_bit(vid, adapter->active_vlans);
7205 
7206 	return 0;
7207 }
7208 
7209 static int igb_vlan_rx_kill_vid(struct net_device *netdev,
7210 				__be16 proto, u16 vid)
7211 {
7212 	struct igb_adapter *adapter = netdev_priv(netdev);
7213 	struct e1000_hw *hw = &adapter->hw;
7214 	int pf_id = adapter->vfs_allocated_count;
7215 	s32 err;
7216 
7217 	/* remove vlan from VLVF table array */
7218 	err = igb_vlvf_set(adapter, vid, false, pf_id);
7219 
7220 	/* if vid was not present in VLVF just remove it from table */
7221 	if (err)
7222 		igb_vfta_set(hw, vid, false);
7223 
7224 	clear_bit(vid, adapter->active_vlans);
7225 
7226 	return 0;
7227 }
7228 
7229 static void igb_restore_vlan(struct igb_adapter *adapter)
7230 {
7231 	u16 vid;
7232 
7233 	igb_vlan_mode(adapter->netdev, adapter->netdev->features);
7234 
7235 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
7236 		igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
7237 }
7238 
7239 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
7240 {
7241 	struct pci_dev *pdev = adapter->pdev;
7242 	struct e1000_mac_info *mac = &adapter->hw.mac;
7243 
7244 	mac->autoneg = 0;
7245 
7246 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
7247 	 * for the switch() below to work
7248 	 */
7249 	if ((spd & 1) || (dplx & ~1))
7250 		goto err_inval;
7251 
7252 	/* Fiber NIC's only allow 1000 gbps Full duplex
7253 	 * and 100Mbps Full duplex for 100baseFx sfp
7254 	 */
7255 	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
7256 		switch (spd + dplx) {
7257 		case SPEED_10 + DUPLEX_HALF:
7258 		case SPEED_10 + DUPLEX_FULL:
7259 		case SPEED_100 + DUPLEX_HALF:
7260 			goto err_inval;
7261 		default:
7262 			break;
7263 		}
7264 	}
7265 
7266 	switch (spd + dplx) {
7267 	case SPEED_10 + DUPLEX_HALF:
7268 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
7269 		break;
7270 	case SPEED_10 + DUPLEX_FULL:
7271 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
7272 		break;
7273 	case SPEED_100 + DUPLEX_HALF:
7274 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
7275 		break;
7276 	case SPEED_100 + DUPLEX_FULL:
7277 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
7278 		break;
7279 	case SPEED_1000 + DUPLEX_FULL:
7280 		mac->autoneg = 1;
7281 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
7282 		break;
7283 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
7284 	default:
7285 		goto err_inval;
7286 	}
7287 
7288 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
7289 	adapter->hw.phy.mdix = AUTO_ALL_MODES;
7290 
7291 	return 0;
7292 
7293 err_inval:
7294 	dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
7295 	return -EINVAL;
7296 }
7297 
7298 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7299 			  bool runtime)
7300 {
7301 	struct net_device *netdev = pci_get_drvdata(pdev);
7302 	struct igb_adapter *adapter = netdev_priv(netdev);
7303 	struct e1000_hw *hw = &adapter->hw;
7304 	u32 ctrl, rctl, status;
7305 	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
7306 #ifdef CONFIG_PM
7307 	int retval = 0;
7308 #endif
7309 
7310 	netif_device_detach(netdev);
7311 
7312 	if (netif_running(netdev))
7313 		__igb_close(netdev, true);
7314 
7315 	igb_clear_interrupt_scheme(adapter);
7316 
7317 #ifdef CONFIG_PM
7318 	retval = pci_save_state(pdev);
7319 	if (retval)
7320 		return retval;
7321 #endif
7322 
7323 	status = rd32(E1000_STATUS);
7324 	if (status & E1000_STATUS_LU)
7325 		wufc &= ~E1000_WUFC_LNKC;
7326 
7327 	if (wufc) {
7328 		igb_setup_rctl(adapter);
7329 		igb_set_rx_mode(netdev);
7330 
7331 		/* turn on all-multi mode if wake on multicast is enabled */
7332 		if (wufc & E1000_WUFC_MC) {
7333 			rctl = rd32(E1000_RCTL);
7334 			rctl |= E1000_RCTL_MPE;
7335 			wr32(E1000_RCTL, rctl);
7336 		}
7337 
7338 		ctrl = rd32(E1000_CTRL);
7339 		/* advertise wake from D3Cold */
7340 		#define E1000_CTRL_ADVD3WUC 0x00100000
7341 		/* phy power management enable */
7342 		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
7343 		ctrl |= E1000_CTRL_ADVD3WUC;
7344 		wr32(E1000_CTRL, ctrl);
7345 
7346 		/* Allow time for pending master requests to run */
7347 		igb_disable_pcie_master(hw);
7348 
7349 		wr32(E1000_WUC, E1000_WUC_PME_EN);
7350 		wr32(E1000_WUFC, wufc);
7351 	} else {
7352 		wr32(E1000_WUC, 0);
7353 		wr32(E1000_WUFC, 0);
7354 	}
7355 
7356 	*enable_wake = wufc || adapter->en_mng_pt;
7357 	if (!*enable_wake)
7358 		igb_power_down_link(adapter);
7359 	else
7360 		igb_power_up_link(adapter);
7361 
7362 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
7363 	 * would have already happened in close and is redundant.
7364 	 */
7365 	igb_release_hw_control(adapter);
7366 
7367 	pci_disable_device(pdev);
7368 
7369 	return 0;
7370 }
7371 
7372 #ifdef CONFIG_PM
7373 #ifdef CONFIG_PM_SLEEP
7374 static int igb_suspend(struct device *dev)
7375 {
7376 	int retval;
7377 	bool wake;
7378 	struct pci_dev *pdev = to_pci_dev(dev);
7379 
7380 	retval = __igb_shutdown(pdev, &wake, 0);
7381 	if (retval)
7382 		return retval;
7383 
7384 	if (wake) {
7385 		pci_prepare_to_sleep(pdev);
7386 	} else {
7387 		pci_wake_from_d3(pdev, false);
7388 		pci_set_power_state(pdev, PCI_D3hot);
7389 	}
7390 
7391 	return 0;
7392 }
7393 #endif /* CONFIG_PM_SLEEP */
7394 
7395 static int igb_resume(struct device *dev)
7396 {
7397 	struct pci_dev *pdev = to_pci_dev(dev);
7398 	struct net_device *netdev = pci_get_drvdata(pdev);
7399 	struct igb_adapter *adapter = netdev_priv(netdev);
7400 	struct e1000_hw *hw = &adapter->hw;
7401 	u32 err;
7402 
7403 	pci_set_power_state(pdev, PCI_D0);
7404 	pci_restore_state(pdev);
7405 	pci_save_state(pdev);
7406 
7407 	err = pci_enable_device_mem(pdev);
7408 	if (err) {
7409 		dev_err(&pdev->dev,
7410 			"igb: Cannot enable PCI device from suspend\n");
7411 		return err;
7412 	}
7413 	pci_set_master(pdev);
7414 
7415 	pci_enable_wake(pdev, PCI_D3hot, 0);
7416 	pci_enable_wake(pdev, PCI_D3cold, 0);
7417 
7418 	if (igb_init_interrupt_scheme(adapter, true)) {
7419 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7420 		return -ENOMEM;
7421 	}
7422 
7423 	igb_reset(adapter);
7424 
7425 	/* let the f/w know that the h/w is now under the control of the
7426 	 * driver.
7427 	 */
7428 	igb_get_hw_control(adapter);
7429 
7430 	wr32(E1000_WUS, ~0);
7431 
7432 	if (netdev->flags & IFF_UP) {
7433 		rtnl_lock();
7434 		err = __igb_open(netdev, true);
7435 		rtnl_unlock();
7436 		if (err)
7437 			return err;
7438 	}
7439 
7440 	netif_device_attach(netdev);
7441 	return 0;
7442 }
7443 
7444 #ifdef CONFIG_PM_RUNTIME
7445 static int igb_runtime_idle(struct device *dev)
7446 {
7447 	struct pci_dev *pdev = to_pci_dev(dev);
7448 	struct net_device *netdev = pci_get_drvdata(pdev);
7449 	struct igb_adapter *adapter = netdev_priv(netdev);
7450 
7451 	if (!igb_has_link(adapter))
7452 		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7453 
7454 	return -EBUSY;
7455 }
7456 
7457 static int igb_runtime_suspend(struct device *dev)
7458 {
7459 	struct pci_dev *pdev = to_pci_dev(dev);
7460 	int retval;
7461 	bool wake;
7462 
7463 	retval = __igb_shutdown(pdev, &wake, 1);
7464 	if (retval)
7465 		return retval;
7466 
7467 	if (wake) {
7468 		pci_prepare_to_sleep(pdev);
7469 	} else {
7470 		pci_wake_from_d3(pdev, false);
7471 		pci_set_power_state(pdev, PCI_D3hot);
7472 	}
7473 
7474 	return 0;
7475 }
7476 
7477 static int igb_runtime_resume(struct device *dev)
7478 {
7479 	return igb_resume(dev);
7480 }
7481 #endif /* CONFIG_PM_RUNTIME */
7482 #endif
7483 
7484 static void igb_shutdown(struct pci_dev *pdev)
7485 {
7486 	bool wake;
7487 
7488 	__igb_shutdown(pdev, &wake, 0);
7489 
7490 	if (system_state == SYSTEM_POWER_OFF) {
7491 		pci_wake_from_d3(pdev, wake);
7492 		pci_set_power_state(pdev, PCI_D3hot);
7493 	}
7494 }
7495 
7496 #ifdef CONFIG_PCI_IOV
7497 static int igb_sriov_reinit(struct pci_dev *dev)
7498 {
7499 	struct net_device *netdev = pci_get_drvdata(dev);
7500 	struct igb_adapter *adapter = netdev_priv(netdev);
7501 	struct pci_dev *pdev = adapter->pdev;
7502 
7503 	rtnl_lock();
7504 
7505 	if (netif_running(netdev))
7506 		igb_close(netdev);
7507 	else
7508 		igb_reset(adapter);
7509 
7510 	igb_clear_interrupt_scheme(adapter);
7511 
7512 	igb_init_queue_configuration(adapter);
7513 
7514 	if (igb_init_interrupt_scheme(adapter, true)) {
7515 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7516 		return -ENOMEM;
7517 	}
7518 
7519 	if (netif_running(netdev))
7520 		igb_open(netdev);
7521 
7522 	rtnl_unlock();
7523 
7524 	return 0;
7525 }
7526 
7527 static int igb_pci_disable_sriov(struct pci_dev *dev)
7528 {
7529 	int err = igb_disable_sriov(dev);
7530 
7531 	if (!err)
7532 		err = igb_sriov_reinit(dev);
7533 
7534 	return err;
7535 }
7536 
7537 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
7538 {
7539 	int err = igb_enable_sriov(dev, num_vfs);
7540 
7541 	if (err)
7542 		goto out;
7543 
7544 	err = igb_sriov_reinit(dev);
7545 	if (!err)
7546 		return num_vfs;
7547 
7548 out:
7549 	return err;
7550 }
7551 
7552 #endif
7553 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7554 {
7555 #ifdef CONFIG_PCI_IOV
7556 	if (num_vfs == 0)
7557 		return igb_pci_disable_sriov(dev);
7558 	else
7559 		return igb_pci_enable_sriov(dev, num_vfs);
7560 #endif
7561 	return 0;
7562 }
7563 
7564 #ifdef CONFIG_NET_POLL_CONTROLLER
7565 /* Polling 'interrupt' - used by things like netconsole to send skbs
7566  * without having to re-enable interrupts. It's not called while
7567  * the interrupt routine is executing.
7568  */
7569 static void igb_netpoll(struct net_device *netdev)
7570 {
7571 	struct igb_adapter *adapter = netdev_priv(netdev);
7572 	struct e1000_hw *hw = &adapter->hw;
7573 	struct igb_q_vector *q_vector;
7574 	int i;
7575 
7576 	for (i = 0; i < adapter->num_q_vectors; i++) {
7577 		q_vector = adapter->q_vector[i];
7578 		if (adapter->flags & IGB_FLAG_HAS_MSIX)
7579 			wr32(E1000_EIMC, q_vector->eims_value);
7580 		else
7581 			igb_irq_disable(adapter);
7582 		napi_schedule(&q_vector->napi);
7583 	}
7584 }
7585 #endif /* CONFIG_NET_POLL_CONTROLLER */
7586 
7587 /**
7588  *  igb_io_error_detected - called when PCI error is detected
7589  *  @pdev: Pointer to PCI device
7590  *  @state: The current pci connection state
7591  *
7592  *  This function is called after a PCI bus error affecting
7593  *  this device has been detected.
7594  **/
7595 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7596 					      pci_channel_state_t state)
7597 {
7598 	struct net_device *netdev = pci_get_drvdata(pdev);
7599 	struct igb_adapter *adapter = netdev_priv(netdev);
7600 
7601 	netif_device_detach(netdev);
7602 
7603 	if (state == pci_channel_io_perm_failure)
7604 		return PCI_ERS_RESULT_DISCONNECT;
7605 
7606 	if (netif_running(netdev))
7607 		igb_down(adapter);
7608 	pci_disable_device(pdev);
7609 
7610 	/* Request a slot slot reset. */
7611 	return PCI_ERS_RESULT_NEED_RESET;
7612 }
7613 
7614 /**
7615  *  igb_io_slot_reset - called after the pci bus has been reset.
7616  *  @pdev: Pointer to PCI device
7617  *
7618  *  Restart the card from scratch, as if from a cold-boot. Implementation
7619  *  resembles the first-half of the igb_resume routine.
7620  **/
7621 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7622 {
7623 	struct net_device *netdev = pci_get_drvdata(pdev);
7624 	struct igb_adapter *adapter = netdev_priv(netdev);
7625 	struct e1000_hw *hw = &adapter->hw;
7626 	pci_ers_result_t result;
7627 	int err;
7628 
7629 	if (pci_enable_device_mem(pdev)) {
7630 		dev_err(&pdev->dev,
7631 			"Cannot re-enable PCI device after reset.\n");
7632 		result = PCI_ERS_RESULT_DISCONNECT;
7633 	} else {
7634 		pci_set_master(pdev);
7635 		pci_restore_state(pdev);
7636 		pci_save_state(pdev);
7637 
7638 		pci_enable_wake(pdev, PCI_D3hot, 0);
7639 		pci_enable_wake(pdev, PCI_D3cold, 0);
7640 
7641 		igb_reset(adapter);
7642 		wr32(E1000_WUS, ~0);
7643 		result = PCI_ERS_RESULT_RECOVERED;
7644 	}
7645 
7646 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
7647 	if (err) {
7648 		dev_err(&pdev->dev,
7649 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7650 			err);
7651 		/* non-fatal, continue */
7652 	}
7653 
7654 	return result;
7655 }
7656 
7657 /**
7658  *  igb_io_resume - called when traffic can start flowing again.
7659  *  @pdev: Pointer to PCI device
7660  *
7661  *  This callback is called when the error recovery driver tells us that
7662  *  its OK to resume normal operation. Implementation resembles the
7663  *  second-half of the igb_resume routine.
7664  */
7665 static void igb_io_resume(struct pci_dev *pdev)
7666 {
7667 	struct net_device *netdev = pci_get_drvdata(pdev);
7668 	struct igb_adapter *adapter = netdev_priv(netdev);
7669 
7670 	if (netif_running(netdev)) {
7671 		if (igb_up(adapter)) {
7672 			dev_err(&pdev->dev, "igb_up failed after reset\n");
7673 			return;
7674 		}
7675 	}
7676 
7677 	netif_device_attach(netdev);
7678 
7679 	/* let the f/w know that the h/w is now under the control of the
7680 	 * driver.
7681 	 */
7682 	igb_get_hw_control(adapter);
7683 }
7684 
7685 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7686 			     u8 qsel)
7687 {
7688 	u32 rar_low, rar_high;
7689 	struct e1000_hw *hw = &adapter->hw;
7690 
7691 	/* HW expects these in little endian so we reverse the byte order
7692 	 * from network order (big endian) to little endian
7693 	 */
7694 	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
7695 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
7696 	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7697 
7698 	/* Indicate to hardware the Address is Valid. */
7699 	rar_high |= E1000_RAH_AV;
7700 
7701 	if (hw->mac.type == e1000_82575)
7702 		rar_high |= E1000_RAH_POOL_1 * qsel;
7703 	else
7704 		rar_high |= E1000_RAH_POOL_1 << qsel;
7705 
7706 	wr32(E1000_RAL(index), rar_low);
7707 	wrfl();
7708 	wr32(E1000_RAH(index), rar_high);
7709 	wrfl();
7710 }
7711 
7712 static int igb_set_vf_mac(struct igb_adapter *adapter,
7713 			  int vf, unsigned char *mac_addr)
7714 {
7715 	struct e1000_hw *hw = &adapter->hw;
7716 	/* VF MAC addresses start at end of receive addresses and moves
7717 	 * towards the first, as a result a collision should not be possible
7718 	 */
7719 	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
7720 
7721 	memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
7722 
7723 	igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
7724 
7725 	return 0;
7726 }
7727 
7728 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7729 {
7730 	struct igb_adapter *adapter = netdev_priv(netdev);
7731 	if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7732 		return -EINVAL;
7733 	adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7734 	dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7735 	dev_info(&adapter->pdev->dev,
7736 		 "Reload the VF driver to make this change effective.");
7737 	if (test_bit(__IGB_DOWN, &adapter->state)) {
7738 		dev_warn(&adapter->pdev->dev,
7739 			 "The VF MAC address has been set, but the PF device is not up.\n");
7740 		dev_warn(&adapter->pdev->dev,
7741 			 "Bring the PF device up before attempting to use the VF device.\n");
7742 	}
7743 	return igb_set_vf_mac(adapter, vf, mac);
7744 }
7745 
7746 static int igb_link_mbps(int internal_link_speed)
7747 {
7748 	switch (internal_link_speed) {
7749 	case SPEED_100:
7750 		return 100;
7751 	case SPEED_1000:
7752 		return 1000;
7753 	default:
7754 		return 0;
7755 	}
7756 }
7757 
7758 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7759 				  int link_speed)
7760 {
7761 	int rf_dec, rf_int;
7762 	u32 bcnrc_val;
7763 
7764 	if (tx_rate != 0) {
7765 		/* Calculate the rate factor values to set */
7766 		rf_int = link_speed / tx_rate;
7767 		rf_dec = (link_speed - (rf_int * tx_rate));
7768 		rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
7769 			 tx_rate;
7770 
7771 		bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7772 		bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
7773 			      E1000_RTTBCNRC_RF_INT_MASK);
7774 		bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7775 	} else {
7776 		bcnrc_val = 0;
7777 	}
7778 
7779 	wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7780 	/* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7781 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7782 	 */
7783 	wr32(E1000_RTTBCNRM, 0x14);
7784 	wr32(E1000_RTTBCNRC, bcnrc_val);
7785 }
7786 
7787 static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7788 {
7789 	int actual_link_speed, i;
7790 	bool reset_rate = false;
7791 
7792 	/* VF TX rate limit was not set or not supported */
7793 	if ((adapter->vf_rate_link_speed == 0) ||
7794 	    (adapter->hw.mac.type != e1000_82576))
7795 		return;
7796 
7797 	actual_link_speed = igb_link_mbps(adapter->link_speed);
7798 	if (actual_link_speed != adapter->vf_rate_link_speed) {
7799 		reset_rate = true;
7800 		adapter->vf_rate_link_speed = 0;
7801 		dev_info(&adapter->pdev->dev,
7802 			 "Link speed has been changed. VF Transmit rate is disabled\n");
7803 	}
7804 
7805 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
7806 		if (reset_rate)
7807 			adapter->vf_data[i].tx_rate = 0;
7808 
7809 		igb_set_vf_rate_limit(&adapter->hw, i,
7810 				      adapter->vf_data[i].tx_rate,
7811 				      actual_link_speed);
7812 	}
7813 }
7814 
7815 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
7816 			     int min_tx_rate, int max_tx_rate)
7817 {
7818 	struct igb_adapter *adapter = netdev_priv(netdev);
7819 	struct e1000_hw *hw = &adapter->hw;
7820 	int actual_link_speed;
7821 
7822 	if (hw->mac.type != e1000_82576)
7823 		return -EOPNOTSUPP;
7824 
7825 	if (min_tx_rate)
7826 		return -EINVAL;
7827 
7828 	actual_link_speed = igb_link_mbps(adapter->link_speed);
7829 	if ((vf >= adapter->vfs_allocated_count) ||
7830 	    (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
7831 	    (max_tx_rate < 0) ||
7832 	    (max_tx_rate > actual_link_speed))
7833 		return -EINVAL;
7834 
7835 	adapter->vf_rate_link_speed = actual_link_speed;
7836 	adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
7837 	igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
7838 
7839 	return 0;
7840 }
7841 
7842 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
7843 				   bool setting)
7844 {
7845 	struct igb_adapter *adapter = netdev_priv(netdev);
7846 	struct e1000_hw *hw = &adapter->hw;
7847 	u32 reg_val, reg_offset;
7848 
7849 	if (!adapter->vfs_allocated_count)
7850 		return -EOPNOTSUPP;
7851 
7852 	if (vf >= adapter->vfs_allocated_count)
7853 		return -EINVAL;
7854 
7855 	reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
7856 	reg_val = rd32(reg_offset);
7857 	if (setting)
7858 		reg_val |= ((1 << vf) |
7859 			    (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7860 	else
7861 		reg_val &= ~((1 << vf) |
7862 			     (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7863 	wr32(reg_offset, reg_val);
7864 
7865 	adapter->vf_data[vf].spoofchk_enabled = setting;
7866 	return 0;
7867 }
7868 
7869 static int igb_ndo_get_vf_config(struct net_device *netdev,
7870 				 int vf, struct ifla_vf_info *ivi)
7871 {
7872 	struct igb_adapter *adapter = netdev_priv(netdev);
7873 	if (vf >= adapter->vfs_allocated_count)
7874 		return -EINVAL;
7875 	ivi->vf = vf;
7876 	memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
7877 	ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
7878 	ivi->min_tx_rate = 0;
7879 	ivi->vlan = adapter->vf_data[vf].pf_vlan;
7880 	ivi->qos = adapter->vf_data[vf].pf_qos;
7881 	ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
7882 	return 0;
7883 }
7884 
7885 static void igb_vmm_control(struct igb_adapter *adapter)
7886 {
7887 	struct e1000_hw *hw = &adapter->hw;
7888 	u32 reg;
7889 
7890 	switch (hw->mac.type) {
7891 	case e1000_82575:
7892 	case e1000_i210:
7893 	case e1000_i211:
7894 	case e1000_i354:
7895 	default:
7896 		/* replication is not supported for 82575 */
7897 		return;
7898 	case e1000_82576:
7899 		/* notify HW that the MAC is adding vlan tags */
7900 		reg = rd32(E1000_DTXCTL);
7901 		reg |= E1000_DTXCTL_VLAN_ADDED;
7902 		wr32(E1000_DTXCTL, reg);
7903 		/* Fall through */
7904 	case e1000_82580:
7905 		/* enable replication vlan tag stripping */
7906 		reg = rd32(E1000_RPLOLR);
7907 		reg |= E1000_RPLOLR_STRVLAN;
7908 		wr32(E1000_RPLOLR, reg);
7909 		/* Fall through */
7910 	case e1000_i350:
7911 		/* none of the above registers are supported by i350 */
7912 		break;
7913 	}
7914 
7915 	if (adapter->vfs_allocated_count) {
7916 		igb_vmdq_set_loopback_pf(hw, true);
7917 		igb_vmdq_set_replication_pf(hw, true);
7918 		igb_vmdq_set_anti_spoofing_pf(hw, true,
7919 					      adapter->vfs_allocated_count);
7920 	} else {
7921 		igb_vmdq_set_loopback_pf(hw, false);
7922 		igb_vmdq_set_replication_pf(hw, false);
7923 	}
7924 }
7925 
7926 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7927 {
7928 	struct e1000_hw *hw = &adapter->hw;
7929 	u32 dmac_thr;
7930 	u16 hwm;
7931 
7932 	if (hw->mac.type > e1000_82580) {
7933 		if (adapter->flags & IGB_FLAG_DMAC) {
7934 			u32 reg;
7935 
7936 			/* force threshold to 0. */
7937 			wr32(E1000_DMCTXTH, 0);
7938 
7939 			/* DMA Coalescing high water mark needs to be greater
7940 			 * than the Rx threshold. Set hwm to PBA - max frame
7941 			 * size in 16B units, capping it at PBA - 6KB.
7942 			 */
7943 			hwm = 64 * pba - adapter->max_frame_size / 16;
7944 			if (hwm < 64 * (pba - 6))
7945 				hwm = 64 * (pba - 6);
7946 			reg = rd32(E1000_FCRTC);
7947 			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
7948 			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
7949 				& E1000_FCRTC_RTH_COAL_MASK);
7950 			wr32(E1000_FCRTC, reg);
7951 
7952 			/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
7953 			 * frame size, capping it at PBA - 10KB.
7954 			 */
7955 			dmac_thr = pba - adapter->max_frame_size / 512;
7956 			if (dmac_thr < pba - 10)
7957 				dmac_thr = pba - 10;
7958 			reg = rd32(E1000_DMACR);
7959 			reg &= ~E1000_DMACR_DMACTHR_MASK;
7960 			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
7961 				& E1000_DMACR_DMACTHR_MASK);
7962 
7963 			/* transition to L0x or L1 if available..*/
7964 			reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
7965 
7966 			/* watchdog timer= +-1000 usec in 32usec intervals */
7967 			reg |= (1000 >> 5);
7968 
7969 			/* Disable BMC-to-OS Watchdog Enable */
7970 			if (hw->mac.type != e1000_i354)
7971 				reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
7972 
7973 			wr32(E1000_DMACR, reg);
7974 
7975 			/* no lower threshold to disable
7976 			 * coalescing(smart fifb)-UTRESH=0
7977 			 */
7978 			wr32(E1000_DMCRTRH, 0);
7979 
7980 			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7981 
7982 			wr32(E1000_DMCTLX, reg);
7983 
7984 			/* free space in tx packet buffer to wake from
7985 			 * DMA coal
7986 			 */
7987 			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7988 			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7989 
7990 			/* make low power state decision controlled
7991 			 * by DMA coal
7992 			 */
7993 			reg = rd32(E1000_PCIEMISC);
7994 			reg &= ~E1000_PCIEMISC_LX_DECISION;
7995 			wr32(E1000_PCIEMISC, reg);
7996 		} /* endif adapter->dmac is not disabled */
7997 	} else if (hw->mac.type == e1000_82580) {
7998 		u32 reg = rd32(E1000_PCIEMISC);
7999 
8000 		wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
8001 		wr32(E1000_DMACR, 0);
8002 	}
8003 }
8004 
8005 /**
8006  *  igb_read_i2c_byte - Reads 8 bit word over I2C
8007  *  @hw: pointer to hardware structure
8008  *  @byte_offset: byte offset to read
8009  *  @dev_addr: device address
8010  *  @data: value read
8011  *
8012  *  Performs byte read operation over I2C interface at
8013  *  a specified device address.
8014  **/
8015 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8016 		      u8 dev_addr, u8 *data)
8017 {
8018 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
8019 	struct i2c_client *this_client = adapter->i2c_client;
8020 	s32 status;
8021 	u16 swfw_mask = 0;
8022 
8023 	if (!this_client)
8024 		return E1000_ERR_I2C;
8025 
8026 	swfw_mask = E1000_SWFW_PHY0_SM;
8027 
8028 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8029 		return E1000_ERR_SWFW_SYNC;
8030 
8031 	status = i2c_smbus_read_byte_data(this_client, byte_offset);
8032 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
8033 
8034 	if (status < 0)
8035 		return E1000_ERR_I2C;
8036 	else {
8037 		*data = status;
8038 		return 0;
8039 	}
8040 }
8041 
8042 /**
8043  *  igb_write_i2c_byte - Writes 8 bit word over I2C
8044  *  @hw: pointer to hardware structure
8045  *  @byte_offset: byte offset to write
8046  *  @dev_addr: device address
8047  *  @data: value to write
8048  *
8049  *  Performs byte write operation over I2C interface at
8050  *  a specified device address.
8051  **/
8052 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8053 		       u8 dev_addr, u8 data)
8054 {
8055 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
8056 	struct i2c_client *this_client = adapter->i2c_client;
8057 	s32 status;
8058 	u16 swfw_mask = E1000_SWFW_PHY0_SM;
8059 
8060 	if (!this_client)
8061 		return E1000_ERR_I2C;
8062 
8063 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8064 		return E1000_ERR_SWFW_SYNC;
8065 	status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
8066 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
8067 
8068 	if (status)
8069 		return E1000_ERR_I2C;
8070 	else
8071 		return 0;
8072 
8073 }
8074 
8075 int igb_reinit_queues(struct igb_adapter *adapter)
8076 {
8077 	struct net_device *netdev = adapter->netdev;
8078 	struct pci_dev *pdev = adapter->pdev;
8079 	int err = 0;
8080 
8081 	if (netif_running(netdev))
8082 		igb_close(netdev);
8083 
8084 	igb_reset_interrupt_capability(adapter);
8085 
8086 	if (igb_init_interrupt_scheme(adapter, true)) {
8087 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8088 		return -ENOMEM;
8089 	}
8090 
8091 	if (netif_running(netdev))
8092 		err = igb_open(netdev);
8093 
8094 	return err;
8095 }
8096 /* igb_main.c */
8097