1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
3 
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
9 #include <linux/bitops.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/netdevice.h>
13 #include <linux/ipv6.h>
14 #include <linux/slab.h>
15 #include <net/checksum.h>
16 #include <net/ip6_checksum.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/mii.h>
21 #include <linux/ethtool.h>
22 #include <linux/if.h>
23 #include <linux/if_vlan.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/ip.h>
28 #include <linux/tcp.h>
29 #include <linux/sctp.h>
30 #include <linux/if_ether.h>
31 #include <linux/aer.h>
32 #include <linux/prefetch.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/etherdevice.h>
35 #ifdef CONFIG_IGB_DCA
36 #include <linux/dca.h>
37 #endif
38 #include <linux/i2c.h>
39 #include "igb.h"
40 
41 #define MAJ 5
42 #define MIN 6
43 #define BUILD 0
44 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
45 __stringify(BUILD) "-k"
46 
47 enum queue_mode {
48 	QUEUE_MODE_STRICT_PRIORITY,
49 	QUEUE_MODE_STREAM_RESERVATION,
50 };
51 
52 enum tx_queue_prio {
53 	TX_QUEUE_PRIO_HIGH,
54 	TX_QUEUE_PRIO_LOW,
55 };
56 
57 char igb_driver_name[] = "igb";
58 char igb_driver_version[] = DRV_VERSION;
59 static const char igb_driver_string[] =
60 				"Intel(R) Gigabit Ethernet Network Driver";
61 static const char igb_copyright[] =
62 				"Copyright (c) 2007-2014 Intel Corporation.";
63 
64 static const struct e1000_info *igb_info_tbl[] = {
65 	[board_82575] = &e1000_82575_info,
66 };
67 
68 static const struct pci_device_id igb_pci_tbl[] = {
69 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
70 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
71 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
72 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
73 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
74 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
75 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
76 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
77 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
78 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
79 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
80 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
81 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
82 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
83 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
84 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
85 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
86 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
87 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
88 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
89 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
90 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
91 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
92 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
93 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
94 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
95 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
96 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
97 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
98 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
99 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
100 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
101 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
102 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
103 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
104 	/* required last entry */
105 	{0, }
106 };
107 
108 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
109 
110 static int igb_setup_all_tx_resources(struct igb_adapter *);
111 static int igb_setup_all_rx_resources(struct igb_adapter *);
112 static void igb_free_all_tx_resources(struct igb_adapter *);
113 static void igb_free_all_rx_resources(struct igb_adapter *);
114 static void igb_setup_mrqc(struct igb_adapter *);
115 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116 static void igb_remove(struct pci_dev *pdev);
117 static int igb_sw_init(struct igb_adapter *);
118 int igb_open(struct net_device *);
119 int igb_close(struct net_device *);
120 static void igb_configure(struct igb_adapter *);
121 static void igb_configure_tx(struct igb_adapter *);
122 static void igb_configure_rx(struct igb_adapter *);
123 static void igb_clean_all_tx_rings(struct igb_adapter *);
124 static void igb_clean_all_rx_rings(struct igb_adapter *);
125 static void igb_clean_tx_ring(struct igb_ring *);
126 static void igb_clean_rx_ring(struct igb_ring *);
127 static void igb_set_rx_mode(struct net_device *);
128 static void igb_update_phy_info(struct timer_list *);
129 static void igb_watchdog(struct timer_list *);
130 static void igb_watchdog_task(struct work_struct *);
131 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
132 static void igb_get_stats64(struct net_device *dev,
133 			    struct rtnl_link_stats64 *stats);
134 static int igb_change_mtu(struct net_device *, int);
135 static int igb_set_mac(struct net_device *, void *);
136 static void igb_set_uta(struct igb_adapter *adapter, bool set);
137 static irqreturn_t igb_intr(int irq, void *);
138 static irqreturn_t igb_intr_msi(int irq, void *);
139 static irqreturn_t igb_msix_other(int irq, void *);
140 static irqreturn_t igb_msix_ring(int irq, void *);
141 #ifdef CONFIG_IGB_DCA
142 static void igb_update_dca(struct igb_q_vector *);
143 static void igb_setup_dca(struct igb_adapter *);
144 #endif /* CONFIG_IGB_DCA */
145 static int igb_poll(struct napi_struct *, int);
146 static bool igb_clean_tx_irq(struct igb_q_vector *, int);
147 static int igb_clean_rx_irq(struct igb_q_vector *, int);
148 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149 static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
150 static void igb_reset_task(struct work_struct *);
151 static void igb_vlan_mode(struct net_device *netdev,
152 			  netdev_features_t features);
153 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
154 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
155 static void igb_restore_vlan(struct igb_adapter *);
156 static void igb_rar_set_index(struct igb_adapter *, u32);
157 static void igb_ping_all_vfs(struct igb_adapter *);
158 static void igb_msg_task(struct igb_adapter *);
159 static void igb_vmm_control(struct igb_adapter *);
160 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
161 static void igb_flush_mac_table(struct igb_adapter *);
162 static int igb_available_rars(struct igb_adapter *, u8);
163 static void igb_set_default_mac_filter(struct igb_adapter *);
164 static int igb_uc_sync(struct net_device *, const unsigned char *);
165 static int igb_uc_unsync(struct net_device *, const unsigned char *);
166 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
167 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
168 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
169 			       int vf, u16 vlan, u8 qos, __be16 vlan_proto);
170 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
171 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
172 				   bool setting);
173 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
174 				bool setting);
175 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
176 				 struct ifla_vf_info *ivi);
177 static void igb_check_vf_rate_limit(struct igb_adapter *);
178 static void igb_nfc_filter_exit(struct igb_adapter *adapter);
179 static void igb_nfc_filter_restore(struct igb_adapter *adapter);
180 
181 #ifdef CONFIG_PCI_IOV
182 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
183 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
184 static int igb_disable_sriov(struct pci_dev *dev);
185 static int igb_pci_disable_sriov(struct pci_dev *dev);
186 #endif
187 
188 static int igb_suspend(struct device *);
189 static int igb_resume(struct device *);
190 static int igb_runtime_suspend(struct device *dev);
191 static int igb_runtime_resume(struct device *dev);
192 static int igb_runtime_idle(struct device *dev);
193 static const struct dev_pm_ops igb_pm_ops = {
194 	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
195 	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
196 			igb_runtime_idle)
197 };
198 static void igb_shutdown(struct pci_dev *);
199 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
200 #ifdef CONFIG_IGB_DCA
201 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
202 static struct notifier_block dca_notifier = {
203 	.notifier_call	= igb_notify_dca,
204 	.next		= NULL,
205 	.priority	= 0
206 };
207 #endif
208 #ifdef CONFIG_PCI_IOV
209 static unsigned int max_vfs;
210 module_param(max_vfs, uint, 0);
211 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
212 #endif /* CONFIG_PCI_IOV */
213 
214 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 		     pci_channel_state_t);
216 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217 static void igb_io_resume(struct pci_dev *);
218 
219 static const struct pci_error_handlers igb_err_handler = {
220 	.error_detected = igb_io_error_detected,
221 	.slot_reset = igb_io_slot_reset,
222 	.resume = igb_io_resume,
223 };
224 
225 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
226 
227 static struct pci_driver igb_driver = {
228 	.name     = igb_driver_name,
229 	.id_table = igb_pci_tbl,
230 	.probe    = igb_probe,
231 	.remove   = igb_remove,
232 #ifdef CONFIG_PM
233 	.driver.pm = &igb_pm_ops,
234 #endif
235 	.shutdown = igb_shutdown,
236 	.sriov_configure = igb_pci_sriov_configure,
237 	.err_handler = &igb_err_handler
238 };
239 
240 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242 MODULE_LICENSE("GPL v2");
243 MODULE_VERSION(DRV_VERSION);
244 
245 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246 static int debug = -1;
247 module_param(debug, int, 0);
248 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
249 
250 struct igb_reg_info {
251 	u32 ofs;
252 	char *name;
253 };
254 
255 static const struct igb_reg_info igb_reg_info_tbl[] = {
256 
257 	/* General Registers */
258 	{E1000_CTRL, "CTRL"},
259 	{E1000_STATUS, "STATUS"},
260 	{E1000_CTRL_EXT, "CTRL_EXT"},
261 
262 	/* Interrupt Registers */
263 	{E1000_ICR, "ICR"},
264 
265 	/* RX Registers */
266 	{E1000_RCTL, "RCTL"},
267 	{E1000_RDLEN(0), "RDLEN"},
268 	{E1000_RDH(0), "RDH"},
269 	{E1000_RDT(0), "RDT"},
270 	{E1000_RXDCTL(0), "RXDCTL"},
271 	{E1000_RDBAL(0), "RDBAL"},
272 	{E1000_RDBAH(0), "RDBAH"},
273 
274 	/* TX Registers */
275 	{E1000_TCTL, "TCTL"},
276 	{E1000_TDBAL(0), "TDBAL"},
277 	{E1000_TDBAH(0), "TDBAH"},
278 	{E1000_TDLEN(0), "TDLEN"},
279 	{E1000_TDH(0), "TDH"},
280 	{E1000_TDT(0), "TDT"},
281 	{E1000_TXDCTL(0), "TXDCTL"},
282 	{E1000_TDFH, "TDFH"},
283 	{E1000_TDFT, "TDFT"},
284 	{E1000_TDFHS, "TDFHS"},
285 	{E1000_TDFPC, "TDFPC"},
286 
287 	/* List Terminator */
288 	{}
289 };
290 
291 /* igb_regdump - register printout routine */
292 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
293 {
294 	int n = 0;
295 	char rname[16];
296 	u32 regs[8];
297 
298 	switch (reginfo->ofs) {
299 	case E1000_RDLEN(0):
300 		for (n = 0; n < 4; n++)
301 			regs[n] = rd32(E1000_RDLEN(n));
302 		break;
303 	case E1000_RDH(0):
304 		for (n = 0; n < 4; n++)
305 			regs[n] = rd32(E1000_RDH(n));
306 		break;
307 	case E1000_RDT(0):
308 		for (n = 0; n < 4; n++)
309 			regs[n] = rd32(E1000_RDT(n));
310 		break;
311 	case E1000_RXDCTL(0):
312 		for (n = 0; n < 4; n++)
313 			regs[n] = rd32(E1000_RXDCTL(n));
314 		break;
315 	case E1000_RDBAL(0):
316 		for (n = 0; n < 4; n++)
317 			regs[n] = rd32(E1000_RDBAL(n));
318 		break;
319 	case E1000_RDBAH(0):
320 		for (n = 0; n < 4; n++)
321 			regs[n] = rd32(E1000_RDBAH(n));
322 		break;
323 	case E1000_TDBAL(0):
324 		for (n = 0; n < 4; n++)
325 			regs[n] = rd32(E1000_RDBAL(n));
326 		break;
327 	case E1000_TDBAH(0):
328 		for (n = 0; n < 4; n++)
329 			regs[n] = rd32(E1000_TDBAH(n));
330 		break;
331 	case E1000_TDLEN(0):
332 		for (n = 0; n < 4; n++)
333 			regs[n] = rd32(E1000_TDLEN(n));
334 		break;
335 	case E1000_TDH(0):
336 		for (n = 0; n < 4; n++)
337 			regs[n] = rd32(E1000_TDH(n));
338 		break;
339 	case E1000_TDT(0):
340 		for (n = 0; n < 4; n++)
341 			regs[n] = rd32(E1000_TDT(n));
342 		break;
343 	case E1000_TXDCTL(0):
344 		for (n = 0; n < 4; n++)
345 			regs[n] = rd32(E1000_TXDCTL(n));
346 		break;
347 	default:
348 		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
349 		return;
350 	}
351 
352 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
353 	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
354 		regs[2], regs[3]);
355 }
356 
357 /* igb_dump - Print registers, Tx-rings and Rx-rings */
358 static void igb_dump(struct igb_adapter *adapter)
359 {
360 	struct net_device *netdev = adapter->netdev;
361 	struct e1000_hw *hw = &adapter->hw;
362 	struct igb_reg_info *reginfo;
363 	struct igb_ring *tx_ring;
364 	union e1000_adv_tx_desc *tx_desc;
365 	struct my_u0 { u64 a; u64 b; } *u0;
366 	struct igb_ring *rx_ring;
367 	union e1000_adv_rx_desc *rx_desc;
368 	u32 staterr;
369 	u16 i, n;
370 
371 	if (!netif_msg_hw(adapter))
372 		return;
373 
374 	/* Print netdevice Info */
375 	if (netdev) {
376 		dev_info(&adapter->pdev->dev, "Net device Info\n");
377 		pr_info("Device Name     state            trans_start\n");
378 		pr_info("%-15s %016lX %016lX\n", netdev->name,
379 			netdev->state, dev_trans_start(netdev));
380 	}
381 
382 	/* Print Registers */
383 	dev_info(&adapter->pdev->dev, "Register Dump\n");
384 	pr_info(" Register Name   Value\n");
385 	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
386 	     reginfo->name; reginfo++) {
387 		igb_regdump(hw, reginfo);
388 	}
389 
390 	/* Print TX Ring Summary */
391 	if (!netdev || !netif_running(netdev))
392 		goto exit;
393 
394 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
395 	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
396 	for (n = 0; n < adapter->num_tx_queues; n++) {
397 		struct igb_tx_buffer *buffer_info;
398 		tx_ring = adapter->tx_ring[n];
399 		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
400 		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
401 			n, tx_ring->next_to_use, tx_ring->next_to_clean,
402 			(u64)dma_unmap_addr(buffer_info, dma),
403 			dma_unmap_len(buffer_info, len),
404 			buffer_info->next_to_watch,
405 			(u64)buffer_info->time_stamp);
406 	}
407 
408 	/* Print TX Rings */
409 	if (!netif_msg_tx_done(adapter))
410 		goto rx_ring_summary;
411 
412 	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
413 
414 	/* Transmit Descriptor Formats
415 	 *
416 	 * Advanced Transmit Descriptor
417 	 *   +--------------------------------------------------------------+
418 	 * 0 |         Buffer Address [63:0]                                |
419 	 *   +--------------------------------------------------------------+
420 	 * 8 | PAYLEN  | PORTS  |CC|IDX | STA | DCMD  |DTYP|MAC|RSV| DTALEN |
421 	 *   +--------------------------------------------------------------+
422 	 *   63      46 45    40 39 38 36 35 32 31   24             15       0
423 	 */
424 
425 	for (n = 0; n < adapter->num_tx_queues; n++) {
426 		tx_ring = adapter->tx_ring[n];
427 		pr_info("------------------------------------\n");
428 		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
429 		pr_info("------------------------------------\n");
430 		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] [bi->dma       ] leng  ntw timestamp        bi->skb\n");
431 
432 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
433 			const char *next_desc;
434 			struct igb_tx_buffer *buffer_info;
435 			tx_desc = IGB_TX_DESC(tx_ring, i);
436 			buffer_info = &tx_ring->tx_buffer_info[i];
437 			u0 = (struct my_u0 *)tx_desc;
438 			if (i == tx_ring->next_to_use &&
439 			    i == tx_ring->next_to_clean)
440 				next_desc = " NTC/U";
441 			else if (i == tx_ring->next_to_use)
442 				next_desc = " NTU";
443 			else if (i == tx_ring->next_to_clean)
444 				next_desc = " NTC";
445 			else
446 				next_desc = "";
447 
448 			pr_info("T [0x%03X]    %016llX %016llX %016llX %04X  %p %016llX %p%s\n",
449 				i, le64_to_cpu(u0->a),
450 				le64_to_cpu(u0->b),
451 				(u64)dma_unmap_addr(buffer_info, dma),
452 				dma_unmap_len(buffer_info, len),
453 				buffer_info->next_to_watch,
454 				(u64)buffer_info->time_stamp,
455 				buffer_info->skb, next_desc);
456 
457 			if (netif_msg_pktdata(adapter) && buffer_info->skb)
458 				print_hex_dump(KERN_INFO, "",
459 					DUMP_PREFIX_ADDRESS,
460 					16, 1, buffer_info->skb->data,
461 					dma_unmap_len(buffer_info, len),
462 					true);
463 		}
464 	}
465 
466 	/* Print RX Rings Summary */
467 rx_ring_summary:
468 	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
469 	pr_info("Queue [NTU] [NTC]\n");
470 	for (n = 0; n < adapter->num_rx_queues; n++) {
471 		rx_ring = adapter->rx_ring[n];
472 		pr_info(" %5d %5X %5X\n",
473 			n, rx_ring->next_to_use, rx_ring->next_to_clean);
474 	}
475 
476 	/* Print RX Rings */
477 	if (!netif_msg_rx_status(adapter))
478 		goto exit;
479 
480 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
481 
482 	/* Advanced Receive Descriptor (Read) Format
483 	 *    63                                           1        0
484 	 *    +-----------------------------------------------------+
485 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
486 	 *    +----------------------------------------------+------+
487 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
488 	 *    +-----------------------------------------------------+
489 	 *
490 	 *
491 	 * Advanced Receive Descriptor (Write-Back) Format
492 	 *
493 	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
494 	 *   +------------------------------------------------------+
495 	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
496 	 *   | Checksum   Ident  |   |           |    | Type | Type |
497 	 *   +------------------------------------------------------+
498 	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
499 	 *   +------------------------------------------------------+
500 	 *   63       48 47    32 31            20 19               0
501 	 */
502 
503 	for (n = 0; n < adapter->num_rx_queues; n++) {
504 		rx_ring = adapter->rx_ring[n];
505 		pr_info("------------------------------------\n");
506 		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
507 		pr_info("------------------------------------\n");
508 		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] [bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
509 		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
510 
511 		for (i = 0; i < rx_ring->count; i++) {
512 			const char *next_desc;
513 			struct igb_rx_buffer *buffer_info;
514 			buffer_info = &rx_ring->rx_buffer_info[i];
515 			rx_desc = IGB_RX_DESC(rx_ring, i);
516 			u0 = (struct my_u0 *)rx_desc;
517 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
518 
519 			if (i == rx_ring->next_to_use)
520 				next_desc = " NTU";
521 			else if (i == rx_ring->next_to_clean)
522 				next_desc = " NTC";
523 			else
524 				next_desc = "";
525 
526 			if (staterr & E1000_RXD_STAT_DD) {
527 				/* Descriptor Done */
528 				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
529 					"RWB", i,
530 					le64_to_cpu(u0->a),
531 					le64_to_cpu(u0->b),
532 					next_desc);
533 			} else {
534 				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
535 					"R  ", i,
536 					le64_to_cpu(u0->a),
537 					le64_to_cpu(u0->b),
538 					(u64)buffer_info->dma,
539 					next_desc);
540 
541 				if (netif_msg_pktdata(adapter) &&
542 				    buffer_info->dma && buffer_info->page) {
543 					print_hex_dump(KERN_INFO, "",
544 					  DUMP_PREFIX_ADDRESS,
545 					  16, 1,
546 					  page_address(buffer_info->page) +
547 						      buffer_info->page_offset,
548 					  igb_rx_bufsz(rx_ring), true);
549 				}
550 			}
551 		}
552 	}
553 
554 exit:
555 	return;
556 }
557 
558 /**
559  *  igb_get_i2c_data - Reads the I2C SDA data bit
560  *  @hw: pointer to hardware structure
561  *  @i2cctl: Current value of I2CCTL register
562  *
563  *  Returns the I2C data bit value
564  **/
565 static int igb_get_i2c_data(void *data)
566 {
567 	struct igb_adapter *adapter = (struct igb_adapter *)data;
568 	struct e1000_hw *hw = &adapter->hw;
569 	s32 i2cctl = rd32(E1000_I2CPARAMS);
570 
571 	return !!(i2cctl & E1000_I2C_DATA_IN);
572 }
573 
574 /**
575  *  igb_set_i2c_data - Sets the I2C data bit
576  *  @data: pointer to hardware structure
577  *  @state: I2C data value (0 or 1) to set
578  *
579  *  Sets the I2C data bit
580  **/
581 static void igb_set_i2c_data(void *data, int state)
582 {
583 	struct igb_adapter *adapter = (struct igb_adapter *)data;
584 	struct e1000_hw *hw = &adapter->hw;
585 	s32 i2cctl = rd32(E1000_I2CPARAMS);
586 
587 	if (state)
588 		i2cctl |= E1000_I2C_DATA_OUT;
589 	else
590 		i2cctl &= ~E1000_I2C_DATA_OUT;
591 
592 	i2cctl &= ~E1000_I2C_DATA_OE_N;
593 	i2cctl |= E1000_I2C_CLK_OE_N;
594 	wr32(E1000_I2CPARAMS, i2cctl);
595 	wrfl();
596 
597 }
598 
599 /**
600  *  igb_set_i2c_clk - Sets the I2C SCL clock
601  *  @data: pointer to hardware structure
602  *  @state: state to set clock
603  *
604  *  Sets the I2C clock line to state
605  **/
606 static void igb_set_i2c_clk(void *data, int state)
607 {
608 	struct igb_adapter *adapter = (struct igb_adapter *)data;
609 	struct e1000_hw *hw = &adapter->hw;
610 	s32 i2cctl = rd32(E1000_I2CPARAMS);
611 
612 	if (state) {
613 		i2cctl |= E1000_I2C_CLK_OUT;
614 		i2cctl &= ~E1000_I2C_CLK_OE_N;
615 	} else {
616 		i2cctl &= ~E1000_I2C_CLK_OUT;
617 		i2cctl &= ~E1000_I2C_CLK_OE_N;
618 	}
619 	wr32(E1000_I2CPARAMS, i2cctl);
620 	wrfl();
621 }
622 
623 /**
624  *  igb_get_i2c_clk - Gets the I2C SCL clock state
625  *  @data: pointer to hardware structure
626  *
627  *  Gets the I2C clock state
628  **/
629 static int igb_get_i2c_clk(void *data)
630 {
631 	struct igb_adapter *adapter = (struct igb_adapter *)data;
632 	struct e1000_hw *hw = &adapter->hw;
633 	s32 i2cctl = rd32(E1000_I2CPARAMS);
634 
635 	return !!(i2cctl & E1000_I2C_CLK_IN);
636 }
637 
638 static const struct i2c_algo_bit_data igb_i2c_algo = {
639 	.setsda		= igb_set_i2c_data,
640 	.setscl		= igb_set_i2c_clk,
641 	.getsda		= igb_get_i2c_data,
642 	.getscl		= igb_get_i2c_clk,
643 	.udelay		= 5,
644 	.timeout	= 20,
645 };
646 
647 /**
648  *  igb_get_hw_dev - return device
649  *  @hw: pointer to hardware structure
650  *
651  *  used by hardware layer to print debugging information
652  **/
653 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
654 {
655 	struct igb_adapter *adapter = hw->back;
656 	return adapter->netdev;
657 }
658 
659 /**
660  *  igb_init_module - Driver Registration Routine
661  *
662  *  igb_init_module is the first routine called when the driver is
663  *  loaded. All it does is register with the PCI subsystem.
664  **/
665 static int __init igb_init_module(void)
666 {
667 	int ret;
668 
669 	pr_info("%s - version %s\n",
670 	       igb_driver_string, igb_driver_version);
671 	pr_info("%s\n", igb_copyright);
672 
673 #ifdef CONFIG_IGB_DCA
674 	dca_register_notify(&dca_notifier);
675 #endif
676 	ret = pci_register_driver(&igb_driver);
677 	return ret;
678 }
679 
680 module_init(igb_init_module);
681 
682 /**
683  *  igb_exit_module - Driver Exit Cleanup Routine
684  *
685  *  igb_exit_module is called just before the driver is removed
686  *  from memory.
687  **/
688 static void __exit igb_exit_module(void)
689 {
690 #ifdef CONFIG_IGB_DCA
691 	dca_unregister_notify(&dca_notifier);
692 #endif
693 	pci_unregister_driver(&igb_driver);
694 }
695 
696 module_exit(igb_exit_module);
697 
698 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
699 /**
700  *  igb_cache_ring_register - Descriptor ring to register mapping
701  *  @adapter: board private structure to initialize
702  *
703  *  Once we know the feature-set enabled for the device, we'll cache
704  *  the register offset the descriptor ring is assigned to.
705  **/
706 static void igb_cache_ring_register(struct igb_adapter *adapter)
707 {
708 	int i = 0, j = 0;
709 	u32 rbase_offset = adapter->vfs_allocated_count;
710 
711 	switch (adapter->hw.mac.type) {
712 	case e1000_82576:
713 		/* The queues are allocated for virtualization such that VF 0
714 		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
715 		 * In order to avoid collision we start at the first free queue
716 		 * and continue consuming queues in the same sequence
717 		 */
718 		if (adapter->vfs_allocated_count) {
719 			for (; i < adapter->rss_queues; i++)
720 				adapter->rx_ring[i]->reg_idx = rbase_offset +
721 							       Q_IDX_82576(i);
722 		}
723 		/* Fall through */
724 	case e1000_82575:
725 	case e1000_82580:
726 	case e1000_i350:
727 	case e1000_i354:
728 	case e1000_i210:
729 	case e1000_i211:
730 		/* Fall through */
731 	default:
732 		for (; i < adapter->num_rx_queues; i++)
733 			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
734 		for (; j < adapter->num_tx_queues; j++)
735 			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
736 		break;
737 	}
738 }
739 
740 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
741 {
742 	struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
743 	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
744 	u32 value = 0;
745 
746 	if (E1000_REMOVED(hw_addr))
747 		return ~value;
748 
749 	value = readl(&hw_addr[reg]);
750 
751 	/* reads should not return all F's */
752 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
753 		struct net_device *netdev = igb->netdev;
754 		hw->hw_addr = NULL;
755 		netdev_err(netdev, "PCIe link lost\n");
756 		WARN(pci_device_is_present(igb->pdev),
757 		     "igb: Failed to read reg 0x%x!\n", reg);
758 	}
759 
760 	return value;
761 }
762 
763 /**
764  *  igb_write_ivar - configure ivar for given MSI-X vector
765  *  @hw: pointer to the HW structure
766  *  @msix_vector: vector number we are allocating to a given ring
767  *  @index: row index of IVAR register to write within IVAR table
768  *  @offset: column offset of in IVAR, should be multiple of 8
769  *
770  *  This function is intended to handle the writing of the IVAR register
771  *  for adapters 82576 and newer.  The IVAR table consists of 2 columns,
772  *  each containing an cause allocation for an Rx and Tx ring, and a
773  *  variable number of rows depending on the number of queues supported.
774  **/
775 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
776 			   int index, int offset)
777 {
778 	u32 ivar = array_rd32(E1000_IVAR0, index);
779 
780 	/* clear any bits that are currently set */
781 	ivar &= ~((u32)0xFF << offset);
782 
783 	/* write vector and valid bit */
784 	ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
785 
786 	array_wr32(E1000_IVAR0, index, ivar);
787 }
788 
789 #define IGB_N0_QUEUE -1
790 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
791 {
792 	struct igb_adapter *adapter = q_vector->adapter;
793 	struct e1000_hw *hw = &adapter->hw;
794 	int rx_queue = IGB_N0_QUEUE;
795 	int tx_queue = IGB_N0_QUEUE;
796 	u32 msixbm = 0;
797 
798 	if (q_vector->rx.ring)
799 		rx_queue = q_vector->rx.ring->reg_idx;
800 	if (q_vector->tx.ring)
801 		tx_queue = q_vector->tx.ring->reg_idx;
802 
803 	switch (hw->mac.type) {
804 	case e1000_82575:
805 		/* The 82575 assigns vectors using a bitmask, which matches the
806 		 * bitmask for the EICR/EIMS/EIMC registers.  To assign one
807 		 * or more queues to a vector, we write the appropriate bits
808 		 * into the MSIXBM register for that vector.
809 		 */
810 		if (rx_queue > IGB_N0_QUEUE)
811 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
812 		if (tx_queue > IGB_N0_QUEUE)
813 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
814 		if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
815 			msixbm |= E1000_EIMS_OTHER;
816 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
817 		q_vector->eims_value = msixbm;
818 		break;
819 	case e1000_82576:
820 		/* 82576 uses a table that essentially consists of 2 columns
821 		 * with 8 rows.  The ordering is column-major so we use the
822 		 * lower 3 bits as the row index, and the 4th bit as the
823 		 * column offset.
824 		 */
825 		if (rx_queue > IGB_N0_QUEUE)
826 			igb_write_ivar(hw, msix_vector,
827 				       rx_queue & 0x7,
828 				       (rx_queue & 0x8) << 1);
829 		if (tx_queue > IGB_N0_QUEUE)
830 			igb_write_ivar(hw, msix_vector,
831 				       tx_queue & 0x7,
832 				       ((tx_queue & 0x8) << 1) + 8);
833 		q_vector->eims_value = BIT(msix_vector);
834 		break;
835 	case e1000_82580:
836 	case e1000_i350:
837 	case e1000_i354:
838 	case e1000_i210:
839 	case e1000_i211:
840 		/* On 82580 and newer adapters the scheme is similar to 82576
841 		 * however instead of ordering column-major we have things
842 		 * ordered row-major.  So we traverse the table by using
843 		 * bit 0 as the column offset, and the remaining bits as the
844 		 * row index.
845 		 */
846 		if (rx_queue > IGB_N0_QUEUE)
847 			igb_write_ivar(hw, msix_vector,
848 				       rx_queue >> 1,
849 				       (rx_queue & 0x1) << 4);
850 		if (tx_queue > IGB_N0_QUEUE)
851 			igb_write_ivar(hw, msix_vector,
852 				       tx_queue >> 1,
853 				       ((tx_queue & 0x1) << 4) + 8);
854 		q_vector->eims_value = BIT(msix_vector);
855 		break;
856 	default:
857 		BUG();
858 		break;
859 	}
860 
861 	/* add q_vector eims value to global eims_enable_mask */
862 	adapter->eims_enable_mask |= q_vector->eims_value;
863 
864 	/* configure q_vector to set itr on first interrupt */
865 	q_vector->set_itr = 1;
866 }
867 
868 /**
869  *  igb_configure_msix - Configure MSI-X hardware
870  *  @adapter: board private structure to initialize
871  *
872  *  igb_configure_msix sets up the hardware to properly
873  *  generate MSI-X interrupts.
874  **/
875 static void igb_configure_msix(struct igb_adapter *adapter)
876 {
877 	u32 tmp;
878 	int i, vector = 0;
879 	struct e1000_hw *hw = &adapter->hw;
880 
881 	adapter->eims_enable_mask = 0;
882 
883 	/* set vector for other causes, i.e. link changes */
884 	switch (hw->mac.type) {
885 	case e1000_82575:
886 		tmp = rd32(E1000_CTRL_EXT);
887 		/* enable MSI-X PBA support*/
888 		tmp |= E1000_CTRL_EXT_PBA_CLR;
889 
890 		/* Auto-Mask interrupts upon ICR read. */
891 		tmp |= E1000_CTRL_EXT_EIAME;
892 		tmp |= E1000_CTRL_EXT_IRCA;
893 
894 		wr32(E1000_CTRL_EXT, tmp);
895 
896 		/* enable msix_other interrupt */
897 		array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
898 		adapter->eims_other = E1000_EIMS_OTHER;
899 
900 		break;
901 
902 	case e1000_82576:
903 	case e1000_82580:
904 	case e1000_i350:
905 	case e1000_i354:
906 	case e1000_i210:
907 	case e1000_i211:
908 		/* Turn on MSI-X capability first, or our settings
909 		 * won't stick.  And it will take days to debug.
910 		 */
911 		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
912 		     E1000_GPIE_PBA | E1000_GPIE_EIAME |
913 		     E1000_GPIE_NSICR);
914 
915 		/* enable msix_other interrupt */
916 		adapter->eims_other = BIT(vector);
917 		tmp = (vector++ | E1000_IVAR_VALID) << 8;
918 
919 		wr32(E1000_IVAR_MISC, tmp);
920 		break;
921 	default:
922 		/* do nothing, since nothing else supports MSI-X */
923 		break;
924 	} /* switch (hw->mac.type) */
925 
926 	adapter->eims_enable_mask |= adapter->eims_other;
927 
928 	for (i = 0; i < adapter->num_q_vectors; i++)
929 		igb_assign_vector(adapter->q_vector[i], vector++);
930 
931 	wrfl();
932 }
933 
934 /**
935  *  igb_request_msix - Initialize MSI-X interrupts
936  *  @adapter: board private structure to initialize
937  *
938  *  igb_request_msix allocates MSI-X vectors and requests interrupts from the
939  *  kernel.
940  **/
941 static int igb_request_msix(struct igb_adapter *adapter)
942 {
943 	struct net_device *netdev = adapter->netdev;
944 	int i, err = 0, vector = 0, free_vector = 0;
945 
946 	err = request_irq(adapter->msix_entries[vector].vector,
947 			  igb_msix_other, 0, netdev->name, adapter);
948 	if (err)
949 		goto err_out;
950 
951 	for (i = 0; i < adapter->num_q_vectors; i++) {
952 		struct igb_q_vector *q_vector = adapter->q_vector[i];
953 
954 		vector++;
955 
956 		q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
957 
958 		if (q_vector->rx.ring && q_vector->tx.ring)
959 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
960 				q_vector->rx.ring->queue_index);
961 		else if (q_vector->tx.ring)
962 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
963 				q_vector->tx.ring->queue_index);
964 		else if (q_vector->rx.ring)
965 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
966 				q_vector->rx.ring->queue_index);
967 		else
968 			sprintf(q_vector->name, "%s-unused", netdev->name);
969 
970 		err = request_irq(adapter->msix_entries[vector].vector,
971 				  igb_msix_ring, 0, q_vector->name,
972 				  q_vector);
973 		if (err)
974 			goto err_free;
975 	}
976 
977 	igb_configure_msix(adapter);
978 	return 0;
979 
980 err_free:
981 	/* free already assigned IRQs */
982 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
983 
984 	vector--;
985 	for (i = 0; i < vector; i++) {
986 		free_irq(adapter->msix_entries[free_vector++].vector,
987 			 adapter->q_vector[i]);
988 	}
989 err_out:
990 	return err;
991 }
992 
993 /**
994  *  igb_free_q_vector - Free memory allocated for specific interrupt vector
995  *  @adapter: board private structure to initialize
996  *  @v_idx: Index of vector to be freed
997  *
998  *  This function frees the memory allocated to the q_vector.
999  **/
1000 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1001 {
1002 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1003 
1004 	adapter->q_vector[v_idx] = NULL;
1005 
1006 	/* igb_get_stats64() might access the rings on this vector,
1007 	 * we must wait a grace period before freeing it.
1008 	 */
1009 	if (q_vector)
1010 		kfree_rcu(q_vector, rcu);
1011 }
1012 
1013 /**
1014  *  igb_reset_q_vector - Reset config for interrupt vector
1015  *  @adapter: board private structure to initialize
1016  *  @v_idx: Index of vector to be reset
1017  *
1018  *  If NAPI is enabled it will delete any references to the
1019  *  NAPI struct. This is preparation for igb_free_q_vector.
1020  **/
1021 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1022 {
1023 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1024 
1025 	/* Coming from igb_set_interrupt_capability, the vectors are not yet
1026 	 * allocated. So, q_vector is NULL so we should stop here.
1027 	 */
1028 	if (!q_vector)
1029 		return;
1030 
1031 	if (q_vector->tx.ring)
1032 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1033 
1034 	if (q_vector->rx.ring)
1035 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1036 
1037 	netif_napi_del(&q_vector->napi);
1038 
1039 }
1040 
1041 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1042 {
1043 	int v_idx = adapter->num_q_vectors;
1044 
1045 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
1046 		pci_disable_msix(adapter->pdev);
1047 	else if (adapter->flags & IGB_FLAG_HAS_MSI)
1048 		pci_disable_msi(adapter->pdev);
1049 
1050 	while (v_idx--)
1051 		igb_reset_q_vector(adapter, v_idx);
1052 }
1053 
1054 /**
1055  *  igb_free_q_vectors - Free memory allocated for interrupt vectors
1056  *  @adapter: board private structure to initialize
1057  *
1058  *  This function frees the memory allocated to the q_vectors.  In addition if
1059  *  NAPI is enabled it will delete any references to the NAPI struct prior
1060  *  to freeing the q_vector.
1061  **/
1062 static void igb_free_q_vectors(struct igb_adapter *adapter)
1063 {
1064 	int v_idx = adapter->num_q_vectors;
1065 
1066 	adapter->num_tx_queues = 0;
1067 	adapter->num_rx_queues = 0;
1068 	adapter->num_q_vectors = 0;
1069 
1070 	while (v_idx--) {
1071 		igb_reset_q_vector(adapter, v_idx);
1072 		igb_free_q_vector(adapter, v_idx);
1073 	}
1074 }
1075 
1076 /**
1077  *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1078  *  @adapter: board private structure to initialize
1079  *
1080  *  This function resets the device so that it has 0 Rx queues, Tx queues, and
1081  *  MSI-X interrupts allocated.
1082  */
1083 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1084 {
1085 	igb_free_q_vectors(adapter);
1086 	igb_reset_interrupt_capability(adapter);
1087 }
1088 
1089 /**
1090  *  igb_set_interrupt_capability - set MSI or MSI-X if supported
1091  *  @adapter: board private structure to initialize
1092  *  @msix: boolean value of MSIX capability
1093  *
1094  *  Attempt to configure interrupts using the best available
1095  *  capabilities of the hardware and kernel.
1096  **/
1097 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1098 {
1099 	int err;
1100 	int numvecs, i;
1101 
1102 	if (!msix)
1103 		goto msi_only;
1104 	adapter->flags |= IGB_FLAG_HAS_MSIX;
1105 
1106 	/* Number of supported queues. */
1107 	adapter->num_rx_queues = adapter->rss_queues;
1108 	if (adapter->vfs_allocated_count)
1109 		adapter->num_tx_queues = 1;
1110 	else
1111 		adapter->num_tx_queues = adapter->rss_queues;
1112 
1113 	/* start with one vector for every Rx queue */
1114 	numvecs = adapter->num_rx_queues;
1115 
1116 	/* if Tx handler is separate add 1 for every Tx queue */
1117 	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1118 		numvecs += adapter->num_tx_queues;
1119 
1120 	/* store the number of vectors reserved for queues */
1121 	adapter->num_q_vectors = numvecs;
1122 
1123 	/* add 1 vector for link status interrupts */
1124 	numvecs++;
1125 	for (i = 0; i < numvecs; i++)
1126 		adapter->msix_entries[i].entry = i;
1127 
1128 	err = pci_enable_msix_range(adapter->pdev,
1129 				    adapter->msix_entries,
1130 				    numvecs,
1131 				    numvecs);
1132 	if (err > 0)
1133 		return;
1134 
1135 	igb_reset_interrupt_capability(adapter);
1136 
1137 	/* If we can't do MSI-X, try MSI */
1138 msi_only:
1139 	adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1140 #ifdef CONFIG_PCI_IOV
1141 	/* disable SR-IOV for non MSI-X configurations */
1142 	if (adapter->vf_data) {
1143 		struct e1000_hw *hw = &adapter->hw;
1144 		/* disable iov and allow time for transactions to clear */
1145 		pci_disable_sriov(adapter->pdev);
1146 		msleep(500);
1147 
1148 		kfree(adapter->vf_mac_list);
1149 		adapter->vf_mac_list = NULL;
1150 		kfree(adapter->vf_data);
1151 		adapter->vf_data = NULL;
1152 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1153 		wrfl();
1154 		msleep(100);
1155 		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1156 	}
1157 #endif
1158 	adapter->vfs_allocated_count = 0;
1159 	adapter->rss_queues = 1;
1160 	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1161 	adapter->num_rx_queues = 1;
1162 	adapter->num_tx_queues = 1;
1163 	adapter->num_q_vectors = 1;
1164 	if (!pci_enable_msi(adapter->pdev))
1165 		adapter->flags |= IGB_FLAG_HAS_MSI;
1166 }
1167 
1168 static void igb_add_ring(struct igb_ring *ring,
1169 			 struct igb_ring_container *head)
1170 {
1171 	head->ring = ring;
1172 	head->count++;
1173 }
1174 
1175 /**
1176  *  igb_alloc_q_vector - Allocate memory for a single interrupt vector
1177  *  @adapter: board private structure to initialize
1178  *  @v_count: q_vectors allocated on adapter, used for ring interleaving
1179  *  @v_idx: index of vector in adapter struct
1180  *  @txr_count: total number of Tx rings to allocate
1181  *  @txr_idx: index of first Tx ring to allocate
1182  *  @rxr_count: total number of Rx rings to allocate
1183  *  @rxr_idx: index of first Rx ring to allocate
1184  *
1185  *  We allocate one q_vector.  If allocation fails we return -ENOMEM.
1186  **/
1187 static int igb_alloc_q_vector(struct igb_adapter *adapter,
1188 			      int v_count, int v_idx,
1189 			      int txr_count, int txr_idx,
1190 			      int rxr_count, int rxr_idx)
1191 {
1192 	struct igb_q_vector *q_vector;
1193 	struct igb_ring *ring;
1194 	int ring_count;
1195 	size_t size;
1196 
1197 	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
1198 	if (txr_count > 1 || rxr_count > 1)
1199 		return -ENOMEM;
1200 
1201 	ring_count = txr_count + rxr_count;
1202 	size = struct_size(q_vector, ring, ring_count);
1203 
1204 	/* allocate q_vector and rings */
1205 	q_vector = adapter->q_vector[v_idx];
1206 	if (!q_vector) {
1207 		q_vector = kzalloc(size, GFP_KERNEL);
1208 	} else if (size > ksize(q_vector)) {
1209 		kfree_rcu(q_vector, rcu);
1210 		q_vector = kzalloc(size, GFP_KERNEL);
1211 	} else {
1212 		memset(q_vector, 0, size);
1213 	}
1214 	if (!q_vector)
1215 		return -ENOMEM;
1216 
1217 	/* initialize NAPI */
1218 	netif_napi_add(adapter->netdev, &q_vector->napi,
1219 		       igb_poll, 64);
1220 
1221 	/* tie q_vector and adapter together */
1222 	adapter->q_vector[v_idx] = q_vector;
1223 	q_vector->adapter = adapter;
1224 
1225 	/* initialize work limits */
1226 	q_vector->tx.work_limit = adapter->tx_work_limit;
1227 
1228 	/* initialize ITR configuration */
1229 	q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1230 	q_vector->itr_val = IGB_START_ITR;
1231 
1232 	/* initialize pointer to rings */
1233 	ring = q_vector->ring;
1234 
1235 	/* intialize ITR */
1236 	if (rxr_count) {
1237 		/* rx or rx/tx vector */
1238 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1239 			q_vector->itr_val = adapter->rx_itr_setting;
1240 	} else {
1241 		/* tx only vector */
1242 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1243 			q_vector->itr_val = adapter->tx_itr_setting;
1244 	}
1245 
1246 	if (txr_count) {
1247 		/* assign generic ring traits */
1248 		ring->dev = &adapter->pdev->dev;
1249 		ring->netdev = adapter->netdev;
1250 
1251 		/* configure backlink on ring */
1252 		ring->q_vector = q_vector;
1253 
1254 		/* update q_vector Tx values */
1255 		igb_add_ring(ring, &q_vector->tx);
1256 
1257 		/* For 82575, context index must be unique per ring. */
1258 		if (adapter->hw.mac.type == e1000_82575)
1259 			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1260 
1261 		/* apply Tx specific ring traits */
1262 		ring->count = adapter->tx_ring_count;
1263 		ring->queue_index = txr_idx;
1264 
1265 		ring->cbs_enable = false;
1266 		ring->idleslope = 0;
1267 		ring->sendslope = 0;
1268 		ring->hicredit = 0;
1269 		ring->locredit = 0;
1270 
1271 		u64_stats_init(&ring->tx_syncp);
1272 		u64_stats_init(&ring->tx_syncp2);
1273 
1274 		/* assign ring to adapter */
1275 		adapter->tx_ring[txr_idx] = ring;
1276 
1277 		/* push pointer to next ring */
1278 		ring++;
1279 	}
1280 
1281 	if (rxr_count) {
1282 		/* assign generic ring traits */
1283 		ring->dev = &adapter->pdev->dev;
1284 		ring->netdev = adapter->netdev;
1285 
1286 		/* configure backlink on ring */
1287 		ring->q_vector = q_vector;
1288 
1289 		/* update q_vector Rx values */
1290 		igb_add_ring(ring, &q_vector->rx);
1291 
1292 		/* set flag indicating ring supports SCTP checksum offload */
1293 		if (adapter->hw.mac.type >= e1000_82576)
1294 			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1295 
1296 		/* On i350, i354, i210, and i211, loopback VLAN packets
1297 		 * have the tag byte-swapped.
1298 		 */
1299 		if (adapter->hw.mac.type >= e1000_i350)
1300 			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1301 
1302 		/* apply Rx specific ring traits */
1303 		ring->count = adapter->rx_ring_count;
1304 		ring->queue_index = rxr_idx;
1305 
1306 		u64_stats_init(&ring->rx_syncp);
1307 
1308 		/* assign ring to adapter */
1309 		adapter->rx_ring[rxr_idx] = ring;
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 
1316 /**
1317  *  igb_alloc_q_vectors - Allocate memory for interrupt vectors
1318  *  @adapter: board private structure to initialize
1319  *
1320  *  We allocate one q_vector per queue interrupt.  If allocation fails we
1321  *  return -ENOMEM.
1322  **/
1323 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1324 {
1325 	int q_vectors = adapter->num_q_vectors;
1326 	int rxr_remaining = adapter->num_rx_queues;
1327 	int txr_remaining = adapter->num_tx_queues;
1328 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1329 	int err;
1330 
1331 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
1332 		for (; rxr_remaining; v_idx++) {
1333 			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1334 						 0, 0, 1, rxr_idx);
1335 
1336 			if (err)
1337 				goto err_out;
1338 
1339 			/* update counts and index */
1340 			rxr_remaining--;
1341 			rxr_idx++;
1342 		}
1343 	}
1344 
1345 	for (; v_idx < q_vectors; v_idx++) {
1346 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1347 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1348 
1349 		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1350 					 tqpv, txr_idx, rqpv, rxr_idx);
1351 
1352 		if (err)
1353 			goto err_out;
1354 
1355 		/* update counts and index */
1356 		rxr_remaining -= rqpv;
1357 		txr_remaining -= tqpv;
1358 		rxr_idx++;
1359 		txr_idx++;
1360 	}
1361 
1362 	return 0;
1363 
1364 err_out:
1365 	adapter->num_tx_queues = 0;
1366 	adapter->num_rx_queues = 0;
1367 	adapter->num_q_vectors = 0;
1368 
1369 	while (v_idx--)
1370 		igb_free_q_vector(adapter, v_idx);
1371 
1372 	return -ENOMEM;
1373 }
1374 
1375 /**
1376  *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1377  *  @adapter: board private structure to initialize
1378  *  @msix: boolean value of MSIX capability
1379  *
1380  *  This function initializes the interrupts and allocates all of the queues.
1381  **/
1382 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1383 {
1384 	struct pci_dev *pdev = adapter->pdev;
1385 	int err;
1386 
1387 	igb_set_interrupt_capability(adapter, msix);
1388 
1389 	err = igb_alloc_q_vectors(adapter);
1390 	if (err) {
1391 		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1392 		goto err_alloc_q_vectors;
1393 	}
1394 
1395 	igb_cache_ring_register(adapter);
1396 
1397 	return 0;
1398 
1399 err_alloc_q_vectors:
1400 	igb_reset_interrupt_capability(adapter);
1401 	return err;
1402 }
1403 
1404 /**
1405  *  igb_request_irq - initialize interrupts
1406  *  @adapter: board private structure to initialize
1407  *
1408  *  Attempts to configure interrupts using the best available
1409  *  capabilities of the hardware and kernel.
1410  **/
1411 static int igb_request_irq(struct igb_adapter *adapter)
1412 {
1413 	struct net_device *netdev = adapter->netdev;
1414 	struct pci_dev *pdev = adapter->pdev;
1415 	int err = 0;
1416 
1417 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1418 		err = igb_request_msix(adapter);
1419 		if (!err)
1420 			goto request_done;
1421 		/* fall back to MSI */
1422 		igb_free_all_tx_resources(adapter);
1423 		igb_free_all_rx_resources(adapter);
1424 
1425 		igb_clear_interrupt_scheme(adapter);
1426 		err = igb_init_interrupt_scheme(adapter, false);
1427 		if (err)
1428 			goto request_done;
1429 
1430 		igb_setup_all_tx_resources(adapter);
1431 		igb_setup_all_rx_resources(adapter);
1432 		igb_configure(adapter);
1433 	}
1434 
1435 	igb_assign_vector(adapter->q_vector[0], 0);
1436 
1437 	if (adapter->flags & IGB_FLAG_HAS_MSI) {
1438 		err = request_irq(pdev->irq, igb_intr_msi, 0,
1439 				  netdev->name, adapter);
1440 		if (!err)
1441 			goto request_done;
1442 
1443 		/* fall back to legacy interrupts */
1444 		igb_reset_interrupt_capability(adapter);
1445 		adapter->flags &= ~IGB_FLAG_HAS_MSI;
1446 	}
1447 
1448 	err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1449 			  netdev->name, adapter);
1450 
1451 	if (err)
1452 		dev_err(&pdev->dev, "Error %d getting interrupt\n",
1453 			err);
1454 
1455 request_done:
1456 	return err;
1457 }
1458 
1459 static void igb_free_irq(struct igb_adapter *adapter)
1460 {
1461 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1462 		int vector = 0, i;
1463 
1464 		free_irq(adapter->msix_entries[vector++].vector, adapter);
1465 
1466 		for (i = 0; i < adapter->num_q_vectors; i++)
1467 			free_irq(adapter->msix_entries[vector++].vector,
1468 				 adapter->q_vector[i]);
1469 	} else {
1470 		free_irq(adapter->pdev->irq, adapter);
1471 	}
1472 }
1473 
1474 /**
1475  *  igb_irq_disable - Mask off interrupt generation on the NIC
1476  *  @adapter: board private structure
1477  **/
1478 static void igb_irq_disable(struct igb_adapter *adapter)
1479 {
1480 	struct e1000_hw *hw = &adapter->hw;
1481 
1482 	/* we need to be careful when disabling interrupts.  The VFs are also
1483 	 * mapped into these registers and so clearing the bits can cause
1484 	 * issues on the VF drivers so we only need to clear what we set
1485 	 */
1486 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1487 		u32 regval = rd32(E1000_EIAM);
1488 
1489 		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1490 		wr32(E1000_EIMC, adapter->eims_enable_mask);
1491 		regval = rd32(E1000_EIAC);
1492 		wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1493 	}
1494 
1495 	wr32(E1000_IAM, 0);
1496 	wr32(E1000_IMC, ~0);
1497 	wrfl();
1498 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1499 		int i;
1500 
1501 		for (i = 0; i < adapter->num_q_vectors; i++)
1502 			synchronize_irq(adapter->msix_entries[i].vector);
1503 	} else {
1504 		synchronize_irq(adapter->pdev->irq);
1505 	}
1506 }
1507 
1508 /**
1509  *  igb_irq_enable - Enable default interrupt generation settings
1510  *  @adapter: board private structure
1511  **/
1512 static void igb_irq_enable(struct igb_adapter *adapter)
1513 {
1514 	struct e1000_hw *hw = &adapter->hw;
1515 
1516 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1517 		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1518 		u32 regval = rd32(E1000_EIAC);
1519 
1520 		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1521 		regval = rd32(E1000_EIAM);
1522 		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1523 		wr32(E1000_EIMS, adapter->eims_enable_mask);
1524 		if (adapter->vfs_allocated_count) {
1525 			wr32(E1000_MBVFIMR, 0xFF);
1526 			ims |= E1000_IMS_VMMB;
1527 		}
1528 		wr32(E1000_IMS, ims);
1529 	} else {
1530 		wr32(E1000_IMS, IMS_ENABLE_MASK |
1531 				E1000_IMS_DRSTA);
1532 		wr32(E1000_IAM, IMS_ENABLE_MASK |
1533 				E1000_IMS_DRSTA);
1534 	}
1535 }
1536 
1537 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1538 {
1539 	struct e1000_hw *hw = &adapter->hw;
1540 	u16 pf_id = adapter->vfs_allocated_count;
1541 	u16 vid = adapter->hw.mng_cookie.vlan_id;
1542 	u16 old_vid = adapter->mng_vlan_id;
1543 
1544 	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1545 		/* add VID to filter table */
1546 		igb_vfta_set(hw, vid, pf_id, true, true);
1547 		adapter->mng_vlan_id = vid;
1548 	} else {
1549 		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1550 	}
1551 
1552 	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1553 	    (vid != old_vid) &&
1554 	    !test_bit(old_vid, adapter->active_vlans)) {
1555 		/* remove VID from filter table */
1556 		igb_vfta_set(hw, vid, pf_id, false, true);
1557 	}
1558 }
1559 
1560 /**
1561  *  igb_release_hw_control - release control of the h/w to f/w
1562  *  @adapter: address of board private structure
1563  *
1564  *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1565  *  For ASF and Pass Through versions of f/w this means that the
1566  *  driver is no longer loaded.
1567  **/
1568 static void igb_release_hw_control(struct igb_adapter *adapter)
1569 {
1570 	struct e1000_hw *hw = &adapter->hw;
1571 	u32 ctrl_ext;
1572 
1573 	/* Let firmware take over control of h/w */
1574 	ctrl_ext = rd32(E1000_CTRL_EXT);
1575 	wr32(E1000_CTRL_EXT,
1576 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1577 }
1578 
1579 /**
1580  *  igb_get_hw_control - get control of the h/w from f/w
1581  *  @adapter: address of board private structure
1582  *
1583  *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1584  *  For ASF and Pass Through versions of f/w this means that
1585  *  the driver is loaded.
1586  **/
1587 static void igb_get_hw_control(struct igb_adapter *adapter)
1588 {
1589 	struct e1000_hw *hw = &adapter->hw;
1590 	u32 ctrl_ext;
1591 
1592 	/* Let firmware know the driver has taken over */
1593 	ctrl_ext = rd32(E1000_CTRL_EXT);
1594 	wr32(E1000_CTRL_EXT,
1595 			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1596 }
1597 
1598 static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1599 {
1600 	struct net_device *netdev = adapter->netdev;
1601 	struct e1000_hw *hw = &adapter->hw;
1602 
1603 	WARN_ON(hw->mac.type != e1000_i210);
1604 
1605 	if (enable)
1606 		adapter->flags |= IGB_FLAG_FQTSS;
1607 	else
1608 		adapter->flags &= ~IGB_FLAG_FQTSS;
1609 
1610 	if (netif_running(netdev))
1611 		schedule_work(&adapter->reset_task);
1612 }
1613 
1614 static bool is_fqtss_enabled(struct igb_adapter *adapter)
1615 {
1616 	return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1617 }
1618 
1619 static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1620 				   enum tx_queue_prio prio)
1621 {
1622 	u32 val;
1623 
1624 	WARN_ON(hw->mac.type != e1000_i210);
1625 	WARN_ON(queue < 0 || queue > 4);
1626 
1627 	val = rd32(E1000_I210_TXDCTL(queue));
1628 
1629 	if (prio == TX_QUEUE_PRIO_HIGH)
1630 		val |= E1000_TXDCTL_PRIORITY;
1631 	else
1632 		val &= ~E1000_TXDCTL_PRIORITY;
1633 
1634 	wr32(E1000_I210_TXDCTL(queue), val);
1635 }
1636 
1637 static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1638 {
1639 	u32 val;
1640 
1641 	WARN_ON(hw->mac.type != e1000_i210);
1642 	WARN_ON(queue < 0 || queue > 1);
1643 
1644 	val = rd32(E1000_I210_TQAVCC(queue));
1645 
1646 	if (mode == QUEUE_MODE_STREAM_RESERVATION)
1647 		val |= E1000_TQAVCC_QUEUEMODE;
1648 	else
1649 		val &= ~E1000_TQAVCC_QUEUEMODE;
1650 
1651 	wr32(E1000_I210_TQAVCC(queue), val);
1652 }
1653 
1654 static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1655 {
1656 	int i;
1657 
1658 	for (i = 0; i < adapter->num_tx_queues; i++) {
1659 		if (adapter->tx_ring[i]->cbs_enable)
1660 			return true;
1661 	}
1662 
1663 	return false;
1664 }
1665 
1666 static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1667 {
1668 	int i;
1669 
1670 	for (i = 0; i < adapter->num_tx_queues; i++) {
1671 		if (adapter->tx_ring[i]->launchtime_enable)
1672 			return true;
1673 	}
1674 
1675 	return false;
1676 }
1677 
1678 /**
1679  *  igb_config_tx_modes - Configure "Qav Tx mode" features on igb
1680  *  @adapter: pointer to adapter struct
1681  *  @queue: queue number
1682  *
1683  *  Configure CBS and Launchtime for a given hardware queue.
1684  *  Parameters are retrieved from the correct Tx ring, so
1685  *  igb_save_cbs_params() and igb_save_txtime_params() should be used
1686  *  for setting those correctly prior to this function being called.
1687  **/
1688 static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1689 {
1690 	struct igb_ring *ring = adapter->tx_ring[queue];
1691 	struct net_device *netdev = adapter->netdev;
1692 	struct e1000_hw *hw = &adapter->hw;
1693 	u32 tqavcc, tqavctrl;
1694 	u16 value;
1695 
1696 	WARN_ON(hw->mac.type != e1000_i210);
1697 	WARN_ON(queue < 0 || queue > 1);
1698 
1699 	/* If any of the Qav features is enabled, configure queues as SR and
1700 	 * with HIGH PRIO. If none is, then configure them with LOW PRIO and
1701 	 * as SP.
1702 	 */
1703 	if (ring->cbs_enable || ring->launchtime_enable) {
1704 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1705 		set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1706 	} else {
1707 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1708 		set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1709 	}
1710 
1711 	/* If CBS is enabled, set DataTranARB and config its parameters. */
1712 	if (ring->cbs_enable || queue == 0) {
1713 		/* i210 does not allow the queue 0 to be in the Strict
1714 		 * Priority mode while the Qav mode is enabled, so,
1715 		 * instead of disabling strict priority mode, we give
1716 		 * queue 0 the maximum of credits possible.
1717 		 *
1718 		 * See section 8.12.19 of the i210 datasheet, "Note:
1719 		 * Queue0 QueueMode must be set to 1b when
1720 		 * TransmitMode is set to Qav."
1721 		 */
1722 		if (queue == 0 && !ring->cbs_enable) {
1723 			/* max "linkspeed" idleslope in kbps */
1724 			ring->idleslope = 1000000;
1725 			ring->hicredit = ETH_FRAME_LEN;
1726 		}
1727 
1728 		/* Always set data transfer arbitration to credit-based
1729 		 * shaper algorithm on TQAVCTRL if CBS is enabled for any of
1730 		 * the queues.
1731 		 */
1732 		tqavctrl = rd32(E1000_I210_TQAVCTRL);
1733 		tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1734 		wr32(E1000_I210_TQAVCTRL, tqavctrl);
1735 
1736 		/* According to i210 datasheet section 7.2.7.7, we should set
1737 		 * the 'idleSlope' field from TQAVCC register following the
1738 		 * equation:
1739 		 *
1740 		 * For 100 Mbps link speed:
1741 		 *
1742 		 *     value = BW * 0x7735 * 0.2                          (E1)
1743 		 *
1744 		 * For 1000Mbps link speed:
1745 		 *
1746 		 *     value = BW * 0x7735 * 2                            (E2)
1747 		 *
1748 		 * E1 and E2 can be merged into one equation as shown below.
1749 		 * Note that 'link-speed' is in Mbps.
1750 		 *
1751 		 *     value = BW * 0x7735 * 2 * link-speed
1752 		 *                           --------------               (E3)
1753 		 *                                1000
1754 		 *
1755 		 * 'BW' is the percentage bandwidth out of full link speed
1756 		 * which can be found with the following equation. Note that
1757 		 * idleSlope here is the parameter from this function which
1758 		 * is in kbps.
1759 		 *
1760 		 *     BW =     idleSlope
1761 		 *          -----------------                             (E4)
1762 		 *          link-speed * 1000
1763 		 *
1764 		 * That said, we can come up with a generic equation to
1765 		 * calculate the value we should set it TQAVCC register by
1766 		 * replacing 'BW' in E3 by E4. The resulting equation is:
1767 		 *
1768 		 * value =     idleSlope     * 0x7735 * 2 * link-speed
1769 		 *         -----------------            --------------    (E5)
1770 		 *         link-speed * 1000                 1000
1771 		 *
1772 		 * 'link-speed' is present in both sides of the fraction so
1773 		 * it is canceled out. The final equation is the following:
1774 		 *
1775 		 *     value = idleSlope * 61034
1776 		 *             -----------------                          (E6)
1777 		 *                  1000000
1778 		 *
1779 		 * NOTE: For i210, given the above, we can see that idleslope
1780 		 *       is represented in 16.38431 kbps units by the value at
1781 		 *       the TQAVCC register (1Gbps / 61034), which reduces
1782 		 *       the granularity for idleslope increments.
1783 		 *       For instance, if you want to configure a 2576kbps
1784 		 *       idleslope, the value to be written on the register
1785 		 *       would have to be 157.23. If rounded down, you end
1786 		 *       up with less bandwidth available than originally
1787 		 *       required (~2572 kbps). If rounded up, you end up
1788 		 *       with a higher bandwidth (~2589 kbps). Below the
1789 		 *       approach we take is to always round up the
1790 		 *       calculated value, so the resulting bandwidth might
1791 		 *       be slightly higher for some configurations.
1792 		 */
1793 		value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1794 
1795 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
1796 		tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1797 		tqavcc |= value;
1798 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
1799 
1800 		wr32(E1000_I210_TQAVHC(queue),
1801 		     0x80000000 + ring->hicredit * 0x7735);
1802 	} else {
1803 
1804 		/* Set idleSlope to zero. */
1805 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
1806 		tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1807 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
1808 
1809 		/* Set hiCredit to zero. */
1810 		wr32(E1000_I210_TQAVHC(queue), 0);
1811 
1812 		/* If CBS is not enabled for any queues anymore, then return to
1813 		 * the default state of Data Transmission Arbitration on
1814 		 * TQAVCTRL.
1815 		 */
1816 		if (!is_any_cbs_enabled(adapter)) {
1817 			tqavctrl = rd32(E1000_I210_TQAVCTRL);
1818 			tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1819 			wr32(E1000_I210_TQAVCTRL, tqavctrl);
1820 		}
1821 	}
1822 
1823 	/* If LaunchTime is enabled, set DataTranTIM. */
1824 	if (ring->launchtime_enable) {
1825 		/* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
1826 		 * for any of the SR queues, and configure fetchtime delta.
1827 		 * XXX NOTE:
1828 		 *     - LaunchTime will be enabled for all SR queues.
1829 		 *     - A fixed offset can be added relative to the launch
1830 		 *       time of all packets if configured at reg LAUNCH_OS0.
1831 		 *       We are keeping it as 0 for now (default value).
1832 		 */
1833 		tqavctrl = rd32(E1000_I210_TQAVCTRL);
1834 		tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1835 		       E1000_TQAVCTRL_FETCHTIME_DELTA;
1836 		wr32(E1000_I210_TQAVCTRL, tqavctrl);
1837 	} else {
1838 		/* If Launchtime is not enabled for any SR queues anymore,
1839 		 * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
1840 		 * effectively disabling Launchtime.
1841 		 */
1842 		if (!is_any_txtime_enabled(adapter)) {
1843 			tqavctrl = rd32(E1000_I210_TQAVCTRL);
1844 			tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1845 			tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1846 			wr32(E1000_I210_TQAVCTRL, tqavctrl);
1847 		}
1848 	}
1849 
1850 	/* XXX: In i210 controller the sendSlope and loCredit parameters from
1851 	 * CBS are not configurable by software so we don't do any 'controller
1852 	 * configuration' in respect to these parameters.
1853 	 */
1854 
1855 	netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1856 		   ring->cbs_enable ? "enabled" : "disabled",
1857 		   ring->launchtime_enable ? "enabled" : "disabled",
1858 		   queue,
1859 		   ring->idleslope, ring->sendslope,
1860 		   ring->hicredit, ring->locredit);
1861 }
1862 
1863 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1864 				  bool enable)
1865 {
1866 	struct igb_ring *ring;
1867 
1868 	if (queue < 0 || queue > adapter->num_tx_queues)
1869 		return -EINVAL;
1870 
1871 	ring = adapter->tx_ring[queue];
1872 	ring->launchtime_enable = enable;
1873 
1874 	return 0;
1875 }
1876 
1877 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1878 			       bool enable, int idleslope, int sendslope,
1879 			       int hicredit, int locredit)
1880 {
1881 	struct igb_ring *ring;
1882 
1883 	if (queue < 0 || queue > adapter->num_tx_queues)
1884 		return -EINVAL;
1885 
1886 	ring = adapter->tx_ring[queue];
1887 
1888 	ring->cbs_enable = enable;
1889 	ring->idleslope = idleslope;
1890 	ring->sendslope = sendslope;
1891 	ring->hicredit = hicredit;
1892 	ring->locredit = locredit;
1893 
1894 	return 0;
1895 }
1896 
1897 /**
1898  *  igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
1899  *  @adapter: pointer to adapter struct
1900  *
1901  *  Configure TQAVCTRL register switching the controller's Tx mode
1902  *  if FQTSS mode is enabled or disabled. Additionally, will issue
1903  *  a call to igb_config_tx_modes() per queue so any previously saved
1904  *  Tx parameters are applied.
1905  **/
1906 static void igb_setup_tx_mode(struct igb_adapter *adapter)
1907 {
1908 	struct net_device *netdev = adapter->netdev;
1909 	struct e1000_hw *hw = &adapter->hw;
1910 	u32 val;
1911 
1912 	/* Only i210 controller supports changing the transmission mode. */
1913 	if (hw->mac.type != e1000_i210)
1914 		return;
1915 
1916 	if (is_fqtss_enabled(adapter)) {
1917 		int i, max_queue;
1918 
1919 		/* Configure TQAVCTRL register: set transmit mode to 'Qav',
1920 		 * set data fetch arbitration to 'round robin', set SP_WAIT_SR
1921 		 * so SP queues wait for SR ones.
1922 		 */
1923 		val = rd32(E1000_I210_TQAVCTRL);
1924 		val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1925 		val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1926 		wr32(E1000_I210_TQAVCTRL, val);
1927 
1928 		/* Configure Tx and Rx packet buffers sizes as described in
1929 		 * i210 datasheet section 7.2.7.7.
1930 		 */
1931 		val = rd32(E1000_TXPBS);
1932 		val &= ~I210_TXPBSIZE_MASK;
1933 		val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1934 			I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1935 		wr32(E1000_TXPBS, val);
1936 
1937 		val = rd32(E1000_RXPBS);
1938 		val &= ~I210_RXPBSIZE_MASK;
1939 		val |= I210_RXPBSIZE_PB_30KB;
1940 		wr32(E1000_RXPBS, val);
1941 
1942 		/* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
1943 		 * register should not exceed the buffer size programmed in
1944 		 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
1945 		 * so according to the datasheet we should set MAX_TPKT_SIZE to
1946 		 * 4kB / 64.
1947 		 *
1948 		 * However, when we do so, no frame from queue 2 and 3 are
1949 		 * transmitted.  It seems the MAX_TPKT_SIZE should not be great
1950 		 * or _equal_ to the buffer size programmed in TXPBS. For this
1951 		 * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
1952 		 */
1953 		val = (4096 - 1) / 64;
1954 		wr32(E1000_I210_DTXMXPKTSZ, val);
1955 
1956 		/* Since FQTSS mode is enabled, apply any CBS configuration
1957 		 * previously set. If no previous CBS configuration has been
1958 		 * done, then the initial configuration is applied, which means
1959 		 * CBS is disabled.
1960 		 */
1961 		max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1962 			    adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1963 
1964 		for (i = 0; i < max_queue; i++) {
1965 			igb_config_tx_modes(adapter, i);
1966 		}
1967 	} else {
1968 		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1969 		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1970 		wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1971 
1972 		val = rd32(E1000_I210_TQAVCTRL);
1973 		/* According to Section 8.12.21, the other flags we've set when
1974 		 * enabling FQTSS are not relevant when disabling FQTSS so we
1975 		 * don't set they here.
1976 		 */
1977 		val &= ~E1000_TQAVCTRL_XMIT_MODE;
1978 		wr32(E1000_I210_TQAVCTRL, val);
1979 	}
1980 
1981 	netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1982 		   "enabled" : "disabled");
1983 }
1984 
1985 /**
1986  *  igb_configure - configure the hardware for RX and TX
1987  *  @adapter: private board structure
1988  **/
1989 static void igb_configure(struct igb_adapter *adapter)
1990 {
1991 	struct net_device *netdev = adapter->netdev;
1992 	int i;
1993 
1994 	igb_get_hw_control(adapter);
1995 	igb_set_rx_mode(netdev);
1996 	igb_setup_tx_mode(adapter);
1997 
1998 	igb_restore_vlan(adapter);
1999 
2000 	igb_setup_tctl(adapter);
2001 	igb_setup_mrqc(adapter);
2002 	igb_setup_rctl(adapter);
2003 
2004 	igb_nfc_filter_restore(adapter);
2005 	igb_configure_tx(adapter);
2006 	igb_configure_rx(adapter);
2007 
2008 	igb_rx_fifo_flush_82575(&adapter->hw);
2009 
2010 	/* call igb_desc_unused which always leaves
2011 	 * at least 1 descriptor unused to make sure
2012 	 * next_to_use != next_to_clean
2013 	 */
2014 	for (i = 0; i < adapter->num_rx_queues; i++) {
2015 		struct igb_ring *ring = adapter->rx_ring[i];
2016 		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2017 	}
2018 }
2019 
2020 /**
2021  *  igb_power_up_link - Power up the phy/serdes link
2022  *  @adapter: address of board private structure
2023  **/
2024 void igb_power_up_link(struct igb_adapter *adapter)
2025 {
2026 	igb_reset_phy(&adapter->hw);
2027 
2028 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
2029 		igb_power_up_phy_copper(&adapter->hw);
2030 	else
2031 		igb_power_up_serdes_link_82575(&adapter->hw);
2032 
2033 	igb_setup_link(&adapter->hw);
2034 }
2035 
2036 /**
2037  *  igb_power_down_link - Power down the phy/serdes link
2038  *  @adapter: address of board private structure
2039  */
2040 static void igb_power_down_link(struct igb_adapter *adapter)
2041 {
2042 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
2043 		igb_power_down_phy_copper_82575(&adapter->hw);
2044 	else
2045 		igb_shutdown_serdes_link_82575(&adapter->hw);
2046 }
2047 
2048 /**
2049  * Detect and switch function for Media Auto Sense
2050  * @adapter: address of the board private structure
2051  **/
2052 static void igb_check_swap_media(struct igb_adapter *adapter)
2053 {
2054 	struct e1000_hw *hw = &adapter->hw;
2055 	u32 ctrl_ext, connsw;
2056 	bool swap_now = false;
2057 
2058 	ctrl_ext = rd32(E1000_CTRL_EXT);
2059 	connsw = rd32(E1000_CONNSW);
2060 
2061 	/* need to live swap if current media is copper and we have fiber/serdes
2062 	 * to go to.
2063 	 */
2064 
2065 	if ((hw->phy.media_type == e1000_media_type_copper) &&
2066 	    (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2067 		swap_now = true;
2068 	} else if ((hw->phy.media_type != e1000_media_type_copper) &&
2069 		   !(connsw & E1000_CONNSW_SERDESD)) {
2070 		/* copper signal takes time to appear */
2071 		if (adapter->copper_tries < 4) {
2072 			adapter->copper_tries++;
2073 			connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2074 			wr32(E1000_CONNSW, connsw);
2075 			return;
2076 		} else {
2077 			adapter->copper_tries = 0;
2078 			if ((connsw & E1000_CONNSW_PHYSD) &&
2079 			    (!(connsw & E1000_CONNSW_PHY_PDN))) {
2080 				swap_now = true;
2081 				connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2082 				wr32(E1000_CONNSW, connsw);
2083 			}
2084 		}
2085 	}
2086 
2087 	if (!swap_now)
2088 		return;
2089 
2090 	switch (hw->phy.media_type) {
2091 	case e1000_media_type_copper:
2092 		netdev_info(adapter->netdev,
2093 			"MAS: changing media to fiber/serdes\n");
2094 		ctrl_ext |=
2095 			E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2096 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
2097 		adapter->copper_tries = 0;
2098 		break;
2099 	case e1000_media_type_internal_serdes:
2100 	case e1000_media_type_fiber:
2101 		netdev_info(adapter->netdev,
2102 			"MAS: changing media to copper\n");
2103 		ctrl_ext &=
2104 			~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2105 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
2106 		break;
2107 	default:
2108 		/* shouldn't get here during regular operation */
2109 		netdev_err(adapter->netdev,
2110 			"AMS: Invalid media type found, returning\n");
2111 		break;
2112 	}
2113 	wr32(E1000_CTRL_EXT, ctrl_ext);
2114 }
2115 
2116 /**
2117  *  igb_up - Open the interface and prepare it to handle traffic
2118  *  @adapter: board private structure
2119  **/
2120 int igb_up(struct igb_adapter *adapter)
2121 {
2122 	struct e1000_hw *hw = &adapter->hw;
2123 	int i;
2124 
2125 	/* hardware has been reset, we need to reload some things */
2126 	igb_configure(adapter);
2127 
2128 	clear_bit(__IGB_DOWN, &adapter->state);
2129 
2130 	for (i = 0; i < adapter->num_q_vectors; i++)
2131 		napi_enable(&(adapter->q_vector[i]->napi));
2132 
2133 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
2134 		igb_configure_msix(adapter);
2135 	else
2136 		igb_assign_vector(adapter->q_vector[0], 0);
2137 
2138 	/* Clear any pending interrupts. */
2139 	rd32(E1000_TSICR);
2140 	rd32(E1000_ICR);
2141 	igb_irq_enable(adapter);
2142 
2143 	/* notify VFs that reset has been completed */
2144 	if (adapter->vfs_allocated_count) {
2145 		u32 reg_data = rd32(E1000_CTRL_EXT);
2146 
2147 		reg_data |= E1000_CTRL_EXT_PFRSTD;
2148 		wr32(E1000_CTRL_EXT, reg_data);
2149 	}
2150 
2151 	netif_tx_start_all_queues(adapter->netdev);
2152 
2153 	/* start the watchdog. */
2154 	hw->mac.get_link_status = 1;
2155 	schedule_work(&adapter->watchdog_task);
2156 
2157 	if ((adapter->flags & IGB_FLAG_EEE) &&
2158 	    (!hw->dev_spec._82575.eee_disable))
2159 		adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2160 
2161 	return 0;
2162 }
2163 
2164 void igb_down(struct igb_adapter *adapter)
2165 {
2166 	struct net_device *netdev = adapter->netdev;
2167 	struct e1000_hw *hw = &adapter->hw;
2168 	u32 tctl, rctl;
2169 	int i;
2170 
2171 	/* signal that we're down so the interrupt handler does not
2172 	 * reschedule our watchdog timer
2173 	 */
2174 	set_bit(__IGB_DOWN, &adapter->state);
2175 
2176 	/* disable receives in the hardware */
2177 	rctl = rd32(E1000_RCTL);
2178 	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2179 	/* flush and sleep below */
2180 
2181 	igb_nfc_filter_exit(adapter);
2182 
2183 	netif_carrier_off(netdev);
2184 	netif_tx_stop_all_queues(netdev);
2185 
2186 	/* disable transmits in the hardware */
2187 	tctl = rd32(E1000_TCTL);
2188 	tctl &= ~E1000_TCTL_EN;
2189 	wr32(E1000_TCTL, tctl);
2190 	/* flush both disables and wait for them to finish */
2191 	wrfl();
2192 	usleep_range(10000, 11000);
2193 
2194 	igb_irq_disable(adapter);
2195 
2196 	adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2197 
2198 	for (i = 0; i < adapter->num_q_vectors; i++) {
2199 		if (adapter->q_vector[i]) {
2200 			napi_synchronize(&adapter->q_vector[i]->napi);
2201 			napi_disable(&adapter->q_vector[i]->napi);
2202 		}
2203 	}
2204 
2205 	del_timer_sync(&adapter->watchdog_timer);
2206 	del_timer_sync(&adapter->phy_info_timer);
2207 
2208 	/* record the stats before reset*/
2209 	spin_lock(&adapter->stats64_lock);
2210 	igb_update_stats(adapter);
2211 	spin_unlock(&adapter->stats64_lock);
2212 
2213 	adapter->link_speed = 0;
2214 	adapter->link_duplex = 0;
2215 
2216 	if (!pci_channel_offline(adapter->pdev))
2217 		igb_reset(adapter);
2218 
2219 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
2220 	adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2221 
2222 	igb_clean_all_tx_rings(adapter);
2223 	igb_clean_all_rx_rings(adapter);
2224 #ifdef CONFIG_IGB_DCA
2225 
2226 	/* since we reset the hardware DCA settings were cleared */
2227 	igb_setup_dca(adapter);
2228 #endif
2229 }
2230 
2231 void igb_reinit_locked(struct igb_adapter *adapter)
2232 {
2233 	WARN_ON(in_interrupt());
2234 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2235 		usleep_range(1000, 2000);
2236 	igb_down(adapter);
2237 	igb_up(adapter);
2238 	clear_bit(__IGB_RESETTING, &adapter->state);
2239 }
2240 
2241 /** igb_enable_mas - Media Autosense re-enable after swap
2242  *
2243  * @adapter: adapter struct
2244  **/
2245 static void igb_enable_mas(struct igb_adapter *adapter)
2246 {
2247 	struct e1000_hw *hw = &adapter->hw;
2248 	u32 connsw = rd32(E1000_CONNSW);
2249 
2250 	/* configure for SerDes media detect */
2251 	if ((hw->phy.media_type == e1000_media_type_copper) &&
2252 	    (!(connsw & E1000_CONNSW_SERDESD))) {
2253 		connsw |= E1000_CONNSW_ENRGSRC;
2254 		connsw |= E1000_CONNSW_AUTOSENSE_EN;
2255 		wr32(E1000_CONNSW, connsw);
2256 		wrfl();
2257 	}
2258 }
2259 
2260 void igb_reset(struct igb_adapter *adapter)
2261 {
2262 	struct pci_dev *pdev = adapter->pdev;
2263 	struct e1000_hw *hw = &adapter->hw;
2264 	struct e1000_mac_info *mac = &hw->mac;
2265 	struct e1000_fc_info *fc = &hw->fc;
2266 	u32 pba, hwm;
2267 
2268 	/* Repartition Pba for greater than 9k mtu
2269 	 * To take effect CTRL.RST is required.
2270 	 */
2271 	switch (mac->type) {
2272 	case e1000_i350:
2273 	case e1000_i354:
2274 	case e1000_82580:
2275 		pba = rd32(E1000_RXPBS);
2276 		pba = igb_rxpbs_adjust_82580(pba);
2277 		break;
2278 	case e1000_82576:
2279 		pba = rd32(E1000_RXPBS);
2280 		pba &= E1000_RXPBS_SIZE_MASK_82576;
2281 		break;
2282 	case e1000_82575:
2283 	case e1000_i210:
2284 	case e1000_i211:
2285 	default:
2286 		pba = E1000_PBA_34K;
2287 		break;
2288 	}
2289 
2290 	if (mac->type == e1000_82575) {
2291 		u32 min_rx_space, min_tx_space, needed_tx_space;
2292 
2293 		/* write Rx PBA so that hardware can report correct Tx PBA */
2294 		wr32(E1000_PBA, pba);
2295 
2296 		/* To maintain wire speed transmits, the Tx FIFO should be
2297 		 * large enough to accommodate two full transmit packets,
2298 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
2299 		 * the Rx FIFO should be large enough to accommodate at least
2300 		 * one full receive packet and is similarly rounded up and
2301 		 * expressed in KB.
2302 		 */
2303 		min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2304 
2305 		/* The Tx FIFO also stores 16 bytes of information about the Tx
2306 		 * but don't include Ethernet FCS because hardware appends it.
2307 		 * We only need to round down to the nearest 512 byte block
2308 		 * count since the value we care about is 2 frames, not 1.
2309 		 */
2310 		min_tx_space = adapter->max_frame_size;
2311 		min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2312 		min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2313 
2314 		/* upper 16 bits has Tx packet buffer allocation size in KB */
2315 		needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2316 
2317 		/* If current Tx allocation is less than the min Tx FIFO size,
2318 		 * and the min Tx FIFO size is less than the current Rx FIFO
2319 		 * allocation, take space away from current Rx allocation.
2320 		 */
2321 		if (needed_tx_space < pba) {
2322 			pba -= needed_tx_space;
2323 
2324 			/* if short on Rx space, Rx wins and must trump Tx
2325 			 * adjustment
2326 			 */
2327 			if (pba < min_rx_space)
2328 				pba = min_rx_space;
2329 		}
2330 
2331 		/* adjust PBA for jumbo frames */
2332 		wr32(E1000_PBA, pba);
2333 	}
2334 
2335 	/* flow control settings
2336 	 * The high water mark must be low enough to fit one full frame
2337 	 * after transmitting the pause frame.  As such we must have enough
2338 	 * space to allow for us to complete our current transmit and then
2339 	 * receive the frame that is in progress from the link partner.
2340 	 * Set it to:
2341 	 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
2342 	 */
2343 	hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2344 
2345 	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
2346 	fc->low_water = fc->high_water - 16;
2347 	fc->pause_time = 0xFFFF;
2348 	fc->send_xon = 1;
2349 	fc->current_mode = fc->requested_mode;
2350 
2351 	/* disable receive for all VFs and wait one second */
2352 	if (adapter->vfs_allocated_count) {
2353 		int i;
2354 
2355 		for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2356 			adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2357 
2358 		/* ping all the active vfs to let them know we are going down */
2359 		igb_ping_all_vfs(adapter);
2360 
2361 		/* disable transmits and receives */
2362 		wr32(E1000_VFRE, 0);
2363 		wr32(E1000_VFTE, 0);
2364 	}
2365 
2366 	/* Allow time for pending master requests to run */
2367 	hw->mac.ops.reset_hw(hw);
2368 	wr32(E1000_WUC, 0);
2369 
2370 	if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2371 		/* need to resetup here after media swap */
2372 		adapter->ei.get_invariants(hw);
2373 		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2374 	}
2375 	if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2376 	    (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2377 		igb_enable_mas(adapter);
2378 	}
2379 	if (hw->mac.ops.init_hw(hw))
2380 		dev_err(&pdev->dev, "Hardware Error\n");
2381 
2382 	/* RAR registers were cleared during init_hw, clear mac table */
2383 	igb_flush_mac_table(adapter);
2384 	__dev_uc_unsync(adapter->netdev, NULL);
2385 
2386 	/* Recover default RAR entry */
2387 	igb_set_default_mac_filter(adapter);
2388 
2389 	/* Flow control settings reset on hardware reset, so guarantee flow
2390 	 * control is off when forcing speed.
2391 	 */
2392 	if (!hw->mac.autoneg)
2393 		igb_force_mac_fc(hw);
2394 
2395 	igb_init_dmac(adapter, pba);
2396 #ifdef CONFIG_IGB_HWMON
2397 	/* Re-initialize the thermal sensor on i350 devices. */
2398 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
2399 		if (mac->type == e1000_i350 && hw->bus.func == 0) {
2400 			/* If present, re-initialize the external thermal sensor
2401 			 * interface.
2402 			 */
2403 			if (adapter->ets)
2404 				mac->ops.init_thermal_sensor_thresh(hw);
2405 		}
2406 	}
2407 #endif
2408 	/* Re-establish EEE setting */
2409 	if (hw->phy.media_type == e1000_media_type_copper) {
2410 		switch (mac->type) {
2411 		case e1000_i350:
2412 		case e1000_i210:
2413 		case e1000_i211:
2414 			igb_set_eee_i350(hw, true, true);
2415 			break;
2416 		case e1000_i354:
2417 			igb_set_eee_i354(hw, true, true);
2418 			break;
2419 		default:
2420 			break;
2421 		}
2422 	}
2423 	if (!netif_running(adapter->netdev))
2424 		igb_power_down_link(adapter);
2425 
2426 	igb_update_mng_vlan(adapter);
2427 
2428 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2429 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2430 
2431 	/* Re-enable PTP, where applicable. */
2432 	if (adapter->ptp_flags & IGB_PTP_ENABLED)
2433 		igb_ptp_reset(adapter);
2434 
2435 	igb_get_phy_info(hw);
2436 }
2437 
2438 static netdev_features_t igb_fix_features(struct net_device *netdev,
2439 	netdev_features_t features)
2440 {
2441 	/* Since there is no support for separate Rx/Tx vlan accel
2442 	 * enable/disable make sure Tx flag is always in same state as Rx.
2443 	 */
2444 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2445 		features |= NETIF_F_HW_VLAN_CTAG_TX;
2446 	else
2447 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2448 
2449 	return features;
2450 }
2451 
2452 static int igb_set_features(struct net_device *netdev,
2453 	netdev_features_t features)
2454 {
2455 	netdev_features_t changed = netdev->features ^ features;
2456 	struct igb_adapter *adapter = netdev_priv(netdev);
2457 
2458 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2459 		igb_vlan_mode(netdev, features);
2460 
2461 	if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2462 		return 0;
2463 
2464 	if (!(features & NETIF_F_NTUPLE)) {
2465 		struct hlist_node *node2;
2466 		struct igb_nfc_filter *rule;
2467 
2468 		spin_lock(&adapter->nfc_lock);
2469 		hlist_for_each_entry_safe(rule, node2,
2470 					  &adapter->nfc_filter_list, nfc_node) {
2471 			igb_erase_filter(adapter, rule);
2472 			hlist_del(&rule->nfc_node);
2473 			kfree(rule);
2474 		}
2475 		spin_unlock(&adapter->nfc_lock);
2476 		adapter->nfc_filter_count = 0;
2477 	}
2478 
2479 	netdev->features = features;
2480 
2481 	if (netif_running(netdev))
2482 		igb_reinit_locked(adapter);
2483 	else
2484 		igb_reset(adapter);
2485 
2486 	return 1;
2487 }
2488 
2489 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2490 			   struct net_device *dev,
2491 			   const unsigned char *addr, u16 vid,
2492 			   u16 flags,
2493 			   struct netlink_ext_ack *extack)
2494 {
2495 	/* guarantee we can provide a unique filter for the unicast address */
2496 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2497 		struct igb_adapter *adapter = netdev_priv(dev);
2498 		int vfn = adapter->vfs_allocated_count;
2499 
2500 		if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2501 			return -ENOMEM;
2502 	}
2503 
2504 	return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2505 }
2506 
2507 #define IGB_MAX_MAC_HDR_LEN	127
2508 #define IGB_MAX_NETWORK_HDR_LEN	511
2509 
2510 static netdev_features_t
2511 igb_features_check(struct sk_buff *skb, struct net_device *dev,
2512 		   netdev_features_t features)
2513 {
2514 	unsigned int network_hdr_len, mac_hdr_len;
2515 
2516 	/* Make certain the headers can be described by a context descriptor */
2517 	mac_hdr_len = skb_network_header(skb) - skb->data;
2518 	if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2519 		return features & ~(NETIF_F_HW_CSUM |
2520 				    NETIF_F_SCTP_CRC |
2521 				    NETIF_F_GSO_UDP_L4 |
2522 				    NETIF_F_HW_VLAN_CTAG_TX |
2523 				    NETIF_F_TSO |
2524 				    NETIF_F_TSO6);
2525 
2526 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2527 	if (unlikely(network_hdr_len >  IGB_MAX_NETWORK_HDR_LEN))
2528 		return features & ~(NETIF_F_HW_CSUM |
2529 				    NETIF_F_SCTP_CRC |
2530 				    NETIF_F_GSO_UDP_L4 |
2531 				    NETIF_F_TSO |
2532 				    NETIF_F_TSO6);
2533 
2534 	/* We can only support IPV4 TSO in tunnels if we can mangle the
2535 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2536 	 */
2537 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2538 		features &= ~NETIF_F_TSO;
2539 
2540 	return features;
2541 }
2542 
2543 static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2544 {
2545 	if (!is_fqtss_enabled(adapter)) {
2546 		enable_fqtss(adapter, true);
2547 		return;
2548 	}
2549 
2550 	igb_config_tx_modes(adapter, queue);
2551 
2552 	if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2553 		enable_fqtss(adapter, false);
2554 }
2555 
2556 static int igb_offload_cbs(struct igb_adapter *adapter,
2557 			   struct tc_cbs_qopt_offload *qopt)
2558 {
2559 	struct e1000_hw *hw = &adapter->hw;
2560 	int err;
2561 
2562 	/* CBS offloading is only supported by i210 controller. */
2563 	if (hw->mac.type != e1000_i210)
2564 		return -EOPNOTSUPP;
2565 
2566 	/* CBS offloading is only supported by queue 0 and queue 1. */
2567 	if (qopt->queue < 0 || qopt->queue > 1)
2568 		return -EINVAL;
2569 
2570 	err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2571 				  qopt->idleslope, qopt->sendslope,
2572 				  qopt->hicredit, qopt->locredit);
2573 	if (err)
2574 		return err;
2575 
2576 	igb_offload_apply(adapter, qopt->queue);
2577 
2578 	return 0;
2579 }
2580 
2581 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2582 #define VLAN_PRIO_FULL_MASK (0x07)
2583 
2584 static int igb_parse_cls_flower(struct igb_adapter *adapter,
2585 				struct flow_cls_offload *f,
2586 				int traffic_class,
2587 				struct igb_nfc_filter *input)
2588 {
2589 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2590 	struct flow_dissector *dissector = rule->match.dissector;
2591 	struct netlink_ext_ack *extack = f->common.extack;
2592 
2593 	if (dissector->used_keys &
2594 	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2595 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2596 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2597 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2598 		NL_SET_ERR_MSG_MOD(extack,
2599 				   "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2600 		return -EOPNOTSUPP;
2601 	}
2602 
2603 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2604 		struct flow_match_eth_addrs match;
2605 
2606 		flow_rule_match_eth_addrs(rule, &match);
2607 		if (!is_zero_ether_addr(match.mask->dst)) {
2608 			if (!is_broadcast_ether_addr(match.mask->dst)) {
2609 				NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2610 				return -EINVAL;
2611 			}
2612 
2613 			input->filter.match_flags |=
2614 				IGB_FILTER_FLAG_DST_MAC_ADDR;
2615 			ether_addr_copy(input->filter.dst_addr, match.key->dst);
2616 		}
2617 
2618 		if (!is_zero_ether_addr(match.mask->src)) {
2619 			if (!is_broadcast_ether_addr(match.mask->src)) {
2620 				NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2621 				return -EINVAL;
2622 			}
2623 
2624 			input->filter.match_flags |=
2625 				IGB_FILTER_FLAG_SRC_MAC_ADDR;
2626 			ether_addr_copy(input->filter.src_addr, match.key->src);
2627 		}
2628 	}
2629 
2630 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2631 		struct flow_match_basic match;
2632 
2633 		flow_rule_match_basic(rule, &match);
2634 		if (match.mask->n_proto) {
2635 			if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2636 				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2637 				return -EINVAL;
2638 			}
2639 
2640 			input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2641 			input->filter.etype = match.key->n_proto;
2642 		}
2643 	}
2644 
2645 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2646 		struct flow_match_vlan match;
2647 
2648 		flow_rule_match_vlan(rule, &match);
2649 		if (match.mask->vlan_priority) {
2650 			if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2651 				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2652 				return -EINVAL;
2653 			}
2654 
2655 			input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2656 			input->filter.vlan_tci = match.key->vlan_priority;
2657 		}
2658 	}
2659 
2660 	input->action = traffic_class;
2661 	input->cookie = f->cookie;
2662 
2663 	return 0;
2664 }
2665 
2666 static int igb_configure_clsflower(struct igb_adapter *adapter,
2667 				   struct flow_cls_offload *cls_flower)
2668 {
2669 	struct netlink_ext_ack *extack = cls_flower->common.extack;
2670 	struct igb_nfc_filter *filter, *f;
2671 	int err, tc;
2672 
2673 	tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2674 	if (tc < 0) {
2675 		NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2676 		return -EINVAL;
2677 	}
2678 
2679 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2680 	if (!filter)
2681 		return -ENOMEM;
2682 
2683 	err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2684 	if (err < 0)
2685 		goto err_parse;
2686 
2687 	spin_lock(&adapter->nfc_lock);
2688 
2689 	hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2690 		if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2691 			err = -EEXIST;
2692 			NL_SET_ERR_MSG_MOD(extack,
2693 					   "This filter is already set in ethtool");
2694 			goto err_locked;
2695 		}
2696 	}
2697 
2698 	hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2699 		if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2700 			err = -EEXIST;
2701 			NL_SET_ERR_MSG_MOD(extack,
2702 					   "This filter is already set in cls_flower");
2703 			goto err_locked;
2704 		}
2705 	}
2706 
2707 	err = igb_add_filter(adapter, filter);
2708 	if (err < 0) {
2709 		NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2710 		goto err_locked;
2711 	}
2712 
2713 	hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2714 
2715 	spin_unlock(&adapter->nfc_lock);
2716 
2717 	return 0;
2718 
2719 err_locked:
2720 	spin_unlock(&adapter->nfc_lock);
2721 
2722 err_parse:
2723 	kfree(filter);
2724 
2725 	return err;
2726 }
2727 
2728 static int igb_delete_clsflower(struct igb_adapter *adapter,
2729 				struct flow_cls_offload *cls_flower)
2730 {
2731 	struct igb_nfc_filter *filter;
2732 	int err;
2733 
2734 	spin_lock(&adapter->nfc_lock);
2735 
2736 	hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2737 		if (filter->cookie == cls_flower->cookie)
2738 			break;
2739 
2740 	if (!filter) {
2741 		err = -ENOENT;
2742 		goto out;
2743 	}
2744 
2745 	err = igb_erase_filter(adapter, filter);
2746 	if (err < 0)
2747 		goto out;
2748 
2749 	hlist_del(&filter->nfc_node);
2750 	kfree(filter);
2751 
2752 out:
2753 	spin_unlock(&adapter->nfc_lock);
2754 
2755 	return err;
2756 }
2757 
2758 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2759 				   struct flow_cls_offload *cls_flower)
2760 {
2761 	switch (cls_flower->command) {
2762 	case FLOW_CLS_REPLACE:
2763 		return igb_configure_clsflower(adapter, cls_flower);
2764 	case FLOW_CLS_DESTROY:
2765 		return igb_delete_clsflower(adapter, cls_flower);
2766 	case FLOW_CLS_STATS:
2767 		return -EOPNOTSUPP;
2768 	default:
2769 		return -EOPNOTSUPP;
2770 	}
2771 }
2772 
2773 static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2774 				 void *cb_priv)
2775 {
2776 	struct igb_adapter *adapter = cb_priv;
2777 
2778 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2779 		return -EOPNOTSUPP;
2780 
2781 	switch (type) {
2782 	case TC_SETUP_CLSFLOWER:
2783 		return igb_setup_tc_cls_flower(adapter, type_data);
2784 
2785 	default:
2786 		return -EOPNOTSUPP;
2787 	}
2788 }
2789 
2790 static int igb_offload_txtime(struct igb_adapter *adapter,
2791 			      struct tc_etf_qopt_offload *qopt)
2792 {
2793 	struct e1000_hw *hw = &adapter->hw;
2794 	int err;
2795 
2796 	/* Launchtime offloading is only supported by i210 controller. */
2797 	if (hw->mac.type != e1000_i210)
2798 		return -EOPNOTSUPP;
2799 
2800 	/* Launchtime offloading is only supported by queues 0 and 1. */
2801 	if (qopt->queue < 0 || qopt->queue > 1)
2802 		return -EINVAL;
2803 
2804 	err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2805 	if (err)
2806 		return err;
2807 
2808 	igb_offload_apply(adapter, qopt->queue);
2809 
2810 	return 0;
2811 }
2812 
2813 static LIST_HEAD(igb_block_cb_list);
2814 
2815 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2816 			void *type_data)
2817 {
2818 	struct igb_adapter *adapter = netdev_priv(dev);
2819 
2820 	switch (type) {
2821 	case TC_SETUP_QDISC_CBS:
2822 		return igb_offload_cbs(adapter, type_data);
2823 	case TC_SETUP_BLOCK:
2824 		return flow_block_cb_setup_simple(type_data,
2825 						  &igb_block_cb_list,
2826 						  igb_setup_tc_block_cb,
2827 						  adapter, adapter, true);
2828 
2829 	case TC_SETUP_QDISC_ETF:
2830 		return igb_offload_txtime(adapter, type_data);
2831 
2832 	default:
2833 		return -EOPNOTSUPP;
2834 	}
2835 }
2836 
2837 static const struct net_device_ops igb_netdev_ops = {
2838 	.ndo_open		= igb_open,
2839 	.ndo_stop		= igb_close,
2840 	.ndo_start_xmit		= igb_xmit_frame,
2841 	.ndo_get_stats64	= igb_get_stats64,
2842 	.ndo_set_rx_mode	= igb_set_rx_mode,
2843 	.ndo_set_mac_address	= igb_set_mac,
2844 	.ndo_change_mtu		= igb_change_mtu,
2845 	.ndo_do_ioctl		= igb_ioctl,
2846 	.ndo_tx_timeout		= igb_tx_timeout,
2847 	.ndo_validate_addr	= eth_validate_addr,
2848 	.ndo_vlan_rx_add_vid	= igb_vlan_rx_add_vid,
2849 	.ndo_vlan_rx_kill_vid	= igb_vlan_rx_kill_vid,
2850 	.ndo_set_vf_mac		= igb_ndo_set_vf_mac,
2851 	.ndo_set_vf_vlan	= igb_ndo_set_vf_vlan,
2852 	.ndo_set_vf_rate	= igb_ndo_set_vf_bw,
2853 	.ndo_set_vf_spoofchk	= igb_ndo_set_vf_spoofchk,
2854 	.ndo_set_vf_trust	= igb_ndo_set_vf_trust,
2855 	.ndo_get_vf_config	= igb_ndo_get_vf_config,
2856 	.ndo_fix_features	= igb_fix_features,
2857 	.ndo_set_features	= igb_set_features,
2858 	.ndo_fdb_add		= igb_ndo_fdb_add,
2859 	.ndo_features_check	= igb_features_check,
2860 	.ndo_setup_tc		= igb_setup_tc,
2861 };
2862 
2863 /**
2864  * igb_set_fw_version - Configure version string for ethtool
2865  * @adapter: adapter struct
2866  **/
2867 void igb_set_fw_version(struct igb_adapter *adapter)
2868 {
2869 	struct e1000_hw *hw = &adapter->hw;
2870 	struct e1000_fw_version fw;
2871 
2872 	igb_get_fw_version(hw, &fw);
2873 
2874 	switch (hw->mac.type) {
2875 	case e1000_i210:
2876 	case e1000_i211:
2877 		if (!(igb_get_flash_presence_i210(hw))) {
2878 			snprintf(adapter->fw_version,
2879 				 sizeof(adapter->fw_version),
2880 				 "%2d.%2d-%d",
2881 				 fw.invm_major, fw.invm_minor,
2882 				 fw.invm_img_type);
2883 			break;
2884 		}
2885 		/* fall through */
2886 	default:
2887 		/* if option is rom valid, display its version too */
2888 		if (fw.or_valid) {
2889 			snprintf(adapter->fw_version,
2890 				 sizeof(adapter->fw_version),
2891 				 "%d.%d, 0x%08x, %d.%d.%d",
2892 				 fw.eep_major, fw.eep_minor, fw.etrack_id,
2893 				 fw.or_major, fw.or_build, fw.or_patch);
2894 		/* no option rom */
2895 		} else if (fw.etrack_id != 0X0000) {
2896 			snprintf(adapter->fw_version,
2897 			    sizeof(adapter->fw_version),
2898 			    "%d.%d, 0x%08x",
2899 			    fw.eep_major, fw.eep_minor, fw.etrack_id);
2900 		} else {
2901 		snprintf(adapter->fw_version,
2902 		    sizeof(adapter->fw_version),
2903 		    "%d.%d.%d",
2904 		    fw.eep_major, fw.eep_minor, fw.eep_build);
2905 		}
2906 		break;
2907 	}
2908 }
2909 
2910 /**
2911  * igb_init_mas - init Media Autosense feature if enabled in the NVM
2912  *
2913  * @adapter: adapter struct
2914  **/
2915 static void igb_init_mas(struct igb_adapter *adapter)
2916 {
2917 	struct e1000_hw *hw = &adapter->hw;
2918 	u16 eeprom_data;
2919 
2920 	hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2921 	switch (hw->bus.func) {
2922 	case E1000_FUNC_0:
2923 		if (eeprom_data & IGB_MAS_ENABLE_0) {
2924 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2925 			netdev_info(adapter->netdev,
2926 				"MAS: Enabling Media Autosense for port %d\n",
2927 				hw->bus.func);
2928 		}
2929 		break;
2930 	case E1000_FUNC_1:
2931 		if (eeprom_data & IGB_MAS_ENABLE_1) {
2932 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2933 			netdev_info(adapter->netdev,
2934 				"MAS: Enabling Media Autosense for port %d\n",
2935 				hw->bus.func);
2936 		}
2937 		break;
2938 	case E1000_FUNC_2:
2939 		if (eeprom_data & IGB_MAS_ENABLE_2) {
2940 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2941 			netdev_info(adapter->netdev,
2942 				"MAS: Enabling Media Autosense for port %d\n",
2943 				hw->bus.func);
2944 		}
2945 		break;
2946 	case E1000_FUNC_3:
2947 		if (eeprom_data & IGB_MAS_ENABLE_3) {
2948 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2949 			netdev_info(adapter->netdev,
2950 				"MAS: Enabling Media Autosense for port %d\n",
2951 				hw->bus.func);
2952 		}
2953 		break;
2954 	default:
2955 		/* Shouldn't get here */
2956 		netdev_err(adapter->netdev,
2957 			"MAS: Invalid port configuration, returning\n");
2958 		break;
2959 	}
2960 }
2961 
2962 /**
2963  *  igb_init_i2c - Init I2C interface
2964  *  @adapter: pointer to adapter structure
2965  **/
2966 static s32 igb_init_i2c(struct igb_adapter *adapter)
2967 {
2968 	s32 status = 0;
2969 
2970 	/* I2C interface supported on i350 devices */
2971 	if (adapter->hw.mac.type != e1000_i350)
2972 		return 0;
2973 
2974 	/* Initialize the i2c bus which is controlled by the registers.
2975 	 * This bus will use the i2c_algo_bit structue that implements
2976 	 * the protocol through toggling of the 4 bits in the register.
2977 	 */
2978 	adapter->i2c_adap.owner = THIS_MODULE;
2979 	adapter->i2c_algo = igb_i2c_algo;
2980 	adapter->i2c_algo.data = adapter;
2981 	adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2982 	adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2983 	strlcpy(adapter->i2c_adap.name, "igb BB",
2984 		sizeof(adapter->i2c_adap.name));
2985 	status = i2c_bit_add_bus(&adapter->i2c_adap);
2986 	return status;
2987 }
2988 
2989 /**
2990  *  igb_probe - Device Initialization Routine
2991  *  @pdev: PCI device information struct
2992  *  @ent: entry in igb_pci_tbl
2993  *
2994  *  Returns 0 on success, negative on failure
2995  *
2996  *  igb_probe initializes an adapter identified by a pci_dev structure.
2997  *  The OS initialization, configuring of the adapter private structure,
2998  *  and a hardware reset occur.
2999  **/
3000 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3001 {
3002 	struct net_device *netdev;
3003 	struct igb_adapter *adapter;
3004 	struct e1000_hw *hw;
3005 	u16 eeprom_data = 0;
3006 	s32 ret_val;
3007 	static int global_quad_port_a; /* global quad port a indication */
3008 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3009 	int err, pci_using_dac;
3010 	u8 part_str[E1000_PBANUM_LENGTH];
3011 
3012 	/* Catch broken hardware that put the wrong VF device ID in
3013 	 * the PCIe SR-IOV capability.
3014 	 */
3015 	if (pdev->is_virtfn) {
3016 		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
3017 			pci_name(pdev), pdev->vendor, pdev->device);
3018 		return -EINVAL;
3019 	}
3020 
3021 	err = pci_enable_device_mem(pdev);
3022 	if (err)
3023 		return err;
3024 
3025 	pci_using_dac = 0;
3026 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3027 	if (!err) {
3028 		pci_using_dac = 1;
3029 	} else {
3030 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3031 		if (err) {
3032 			dev_err(&pdev->dev,
3033 				"No usable DMA configuration, aborting\n");
3034 			goto err_dma;
3035 		}
3036 	}
3037 
3038 	err = pci_request_mem_regions(pdev, igb_driver_name);
3039 	if (err)
3040 		goto err_pci_reg;
3041 
3042 	pci_enable_pcie_error_reporting(pdev);
3043 
3044 	pci_set_master(pdev);
3045 	pci_save_state(pdev);
3046 
3047 	err = -ENOMEM;
3048 	netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3049 				   IGB_MAX_TX_QUEUES);
3050 	if (!netdev)
3051 		goto err_alloc_etherdev;
3052 
3053 	SET_NETDEV_DEV(netdev, &pdev->dev);
3054 
3055 	pci_set_drvdata(pdev, netdev);
3056 	adapter = netdev_priv(netdev);
3057 	adapter->netdev = netdev;
3058 	adapter->pdev = pdev;
3059 	hw = &adapter->hw;
3060 	hw->back = adapter;
3061 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3062 
3063 	err = -EIO;
3064 	adapter->io_addr = pci_iomap(pdev, 0, 0);
3065 	if (!adapter->io_addr)
3066 		goto err_ioremap;
3067 	/* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
3068 	hw->hw_addr = adapter->io_addr;
3069 
3070 	netdev->netdev_ops = &igb_netdev_ops;
3071 	igb_set_ethtool_ops(netdev);
3072 	netdev->watchdog_timeo = 5 * HZ;
3073 
3074 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3075 
3076 	netdev->mem_start = pci_resource_start(pdev, 0);
3077 	netdev->mem_end = pci_resource_end(pdev, 0);
3078 
3079 	/* PCI config space info */
3080 	hw->vendor_id = pdev->vendor;
3081 	hw->device_id = pdev->device;
3082 	hw->revision_id = pdev->revision;
3083 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
3084 	hw->subsystem_device_id = pdev->subsystem_device;
3085 
3086 	/* Copy the default MAC, PHY and NVM function pointers */
3087 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3088 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3089 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3090 	/* Initialize skew-specific constants */
3091 	err = ei->get_invariants(hw);
3092 	if (err)
3093 		goto err_sw_init;
3094 
3095 	/* setup the private structure */
3096 	err = igb_sw_init(adapter);
3097 	if (err)
3098 		goto err_sw_init;
3099 
3100 	igb_get_bus_info_pcie(hw);
3101 
3102 	hw->phy.autoneg_wait_to_complete = false;
3103 
3104 	/* Copper options */
3105 	if (hw->phy.media_type == e1000_media_type_copper) {
3106 		hw->phy.mdix = AUTO_ALL_MODES;
3107 		hw->phy.disable_polarity_correction = false;
3108 		hw->phy.ms_type = e1000_ms_hw_default;
3109 	}
3110 
3111 	if (igb_check_reset_block(hw))
3112 		dev_info(&pdev->dev,
3113 			"PHY reset is blocked due to SOL/IDER session.\n");
3114 
3115 	/* features is initialized to 0 in allocation, it might have bits
3116 	 * set by igb_sw_init so we should use an or instead of an
3117 	 * assignment.
3118 	 */
3119 	netdev->features |= NETIF_F_SG |
3120 			    NETIF_F_TSO |
3121 			    NETIF_F_TSO6 |
3122 			    NETIF_F_RXHASH |
3123 			    NETIF_F_RXCSUM |
3124 			    NETIF_F_HW_CSUM;
3125 
3126 	if (hw->mac.type >= e1000_82576)
3127 		netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3128 
3129 	if (hw->mac.type >= e1000_i350)
3130 		netdev->features |= NETIF_F_HW_TC;
3131 
3132 #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3133 				  NETIF_F_GSO_GRE_CSUM | \
3134 				  NETIF_F_GSO_IPXIP4 | \
3135 				  NETIF_F_GSO_IPXIP6 | \
3136 				  NETIF_F_GSO_UDP_TUNNEL | \
3137 				  NETIF_F_GSO_UDP_TUNNEL_CSUM)
3138 
3139 	netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3140 	netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3141 
3142 	/* copy netdev features into list of user selectable features */
3143 	netdev->hw_features |= netdev->features |
3144 			       NETIF_F_HW_VLAN_CTAG_RX |
3145 			       NETIF_F_HW_VLAN_CTAG_TX |
3146 			       NETIF_F_RXALL;
3147 
3148 	if (hw->mac.type >= e1000_i350)
3149 		netdev->hw_features |= NETIF_F_NTUPLE;
3150 
3151 	if (pci_using_dac)
3152 		netdev->features |= NETIF_F_HIGHDMA;
3153 
3154 	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3155 	netdev->mpls_features |= NETIF_F_HW_CSUM;
3156 	netdev->hw_enc_features |= netdev->vlan_features;
3157 
3158 	/* set this bit last since it cannot be part of vlan_features */
3159 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3160 			    NETIF_F_HW_VLAN_CTAG_RX |
3161 			    NETIF_F_HW_VLAN_CTAG_TX;
3162 
3163 	netdev->priv_flags |= IFF_SUPP_NOFCS;
3164 
3165 	netdev->priv_flags |= IFF_UNICAST_FLT;
3166 
3167 	/* MTU range: 68 - 9216 */
3168 	netdev->min_mtu = ETH_MIN_MTU;
3169 	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3170 
3171 	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3172 
3173 	/* before reading the NVM, reset the controller to put the device in a
3174 	 * known good starting state
3175 	 */
3176 	hw->mac.ops.reset_hw(hw);
3177 
3178 	/* make sure the NVM is good , i211/i210 parts can have special NVM
3179 	 * that doesn't contain a checksum
3180 	 */
3181 	switch (hw->mac.type) {
3182 	case e1000_i210:
3183 	case e1000_i211:
3184 		if (igb_get_flash_presence_i210(hw)) {
3185 			if (hw->nvm.ops.validate(hw) < 0) {
3186 				dev_err(&pdev->dev,
3187 					"The NVM Checksum Is Not Valid\n");
3188 				err = -EIO;
3189 				goto err_eeprom;
3190 			}
3191 		}
3192 		break;
3193 	default:
3194 		if (hw->nvm.ops.validate(hw) < 0) {
3195 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3196 			err = -EIO;
3197 			goto err_eeprom;
3198 		}
3199 		break;
3200 	}
3201 
3202 	if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3203 		/* copy the MAC address out of the NVM */
3204 		if (hw->mac.ops.read_mac_addr(hw))
3205 			dev_err(&pdev->dev, "NVM Read Error\n");
3206 	}
3207 
3208 	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3209 
3210 	if (!is_valid_ether_addr(netdev->dev_addr)) {
3211 		dev_err(&pdev->dev, "Invalid MAC Address\n");
3212 		err = -EIO;
3213 		goto err_eeprom;
3214 	}
3215 
3216 	igb_set_default_mac_filter(adapter);
3217 
3218 	/* get firmware version for ethtool -i */
3219 	igb_set_fw_version(adapter);
3220 
3221 	/* configure RXPBSIZE and TXPBSIZE */
3222 	if (hw->mac.type == e1000_i210) {
3223 		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3224 		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3225 	}
3226 
3227 	timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3228 	timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3229 
3230 	INIT_WORK(&adapter->reset_task, igb_reset_task);
3231 	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3232 
3233 	/* Initialize link properties that are user-changeable */
3234 	adapter->fc_autoneg = true;
3235 	hw->mac.autoneg = true;
3236 	hw->phy.autoneg_advertised = 0x2f;
3237 
3238 	hw->fc.requested_mode = e1000_fc_default;
3239 	hw->fc.current_mode = e1000_fc_default;
3240 
3241 	igb_validate_mdi_setting(hw);
3242 
3243 	/* By default, support wake on port A */
3244 	if (hw->bus.func == 0)
3245 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3246 
3247 	/* Check the NVM for wake support on non-port A ports */
3248 	if (hw->mac.type >= e1000_82580)
3249 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3250 				 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3251 				 &eeprom_data);
3252 	else if (hw->bus.func == 1)
3253 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3254 
3255 	if (eeprom_data & IGB_EEPROM_APME)
3256 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3257 
3258 	/* now that we have the eeprom settings, apply the special cases where
3259 	 * the eeprom may be wrong or the board simply won't support wake on
3260 	 * lan on a particular port
3261 	 */
3262 	switch (pdev->device) {
3263 	case E1000_DEV_ID_82575GB_QUAD_COPPER:
3264 		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3265 		break;
3266 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
3267 	case E1000_DEV_ID_82576_FIBER:
3268 	case E1000_DEV_ID_82576_SERDES:
3269 		/* Wake events only supported on port A for dual fiber
3270 		 * regardless of eeprom setting
3271 		 */
3272 		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3273 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3274 		break;
3275 	case E1000_DEV_ID_82576_QUAD_COPPER:
3276 	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3277 		/* if quad port adapter, disable WoL on all but port A */
3278 		if (global_quad_port_a != 0)
3279 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3280 		else
3281 			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3282 		/* Reset for multiple quad port adapters */
3283 		if (++global_quad_port_a == 4)
3284 			global_quad_port_a = 0;
3285 		break;
3286 	default:
3287 		/* If the device can't wake, don't set software support */
3288 		if (!device_can_wakeup(&adapter->pdev->dev))
3289 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3290 	}
3291 
3292 	/* initialize the wol settings based on the eeprom settings */
3293 	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3294 		adapter->wol |= E1000_WUFC_MAG;
3295 
3296 	/* Some vendors want WoL disabled by default, but still supported */
3297 	if ((hw->mac.type == e1000_i350) &&
3298 	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3299 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3300 		adapter->wol = 0;
3301 	}
3302 
3303 	/* Some vendors want the ability to Use the EEPROM setting as
3304 	 * enable/disable only, and not for capability
3305 	 */
3306 	if (((hw->mac.type == e1000_i350) ||
3307 	     (hw->mac.type == e1000_i354)) &&
3308 	    (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3309 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3310 		adapter->wol = 0;
3311 	}
3312 	if (hw->mac.type == e1000_i350) {
3313 		if (((pdev->subsystem_device == 0x5001) ||
3314 		     (pdev->subsystem_device == 0x5002)) &&
3315 				(hw->bus.func == 0)) {
3316 			adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3317 			adapter->wol = 0;
3318 		}
3319 		if (pdev->subsystem_device == 0x1F52)
3320 			adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3321 	}
3322 
3323 	device_set_wakeup_enable(&adapter->pdev->dev,
3324 				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3325 
3326 	/* reset the hardware with the new settings */
3327 	igb_reset(adapter);
3328 
3329 	/* Init the I2C interface */
3330 	err = igb_init_i2c(adapter);
3331 	if (err) {
3332 		dev_err(&pdev->dev, "failed to init i2c interface\n");
3333 		goto err_eeprom;
3334 	}
3335 
3336 	/* let the f/w know that the h/w is now under the control of the
3337 	 * driver.
3338 	 */
3339 	igb_get_hw_control(adapter);
3340 
3341 	strcpy(netdev->name, "eth%d");
3342 	err = register_netdev(netdev);
3343 	if (err)
3344 		goto err_register;
3345 
3346 	/* carrier off reporting is important to ethtool even BEFORE open */
3347 	netif_carrier_off(netdev);
3348 
3349 #ifdef CONFIG_IGB_DCA
3350 	if (dca_add_requester(&pdev->dev) == 0) {
3351 		adapter->flags |= IGB_FLAG_DCA_ENABLED;
3352 		dev_info(&pdev->dev, "DCA enabled\n");
3353 		igb_setup_dca(adapter);
3354 	}
3355 
3356 #endif
3357 #ifdef CONFIG_IGB_HWMON
3358 	/* Initialize the thermal sensor on i350 devices. */
3359 	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3360 		u16 ets_word;
3361 
3362 		/* Read the NVM to determine if this i350 device supports an
3363 		 * external thermal sensor.
3364 		 */
3365 		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3366 		if (ets_word != 0x0000 && ets_word != 0xFFFF)
3367 			adapter->ets = true;
3368 		else
3369 			adapter->ets = false;
3370 		if (igb_sysfs_init(adapter))
3371 			dev_err(&pdev->dev,
3372 				"failed to allocate sysfs resources\n");
3373 	} else {
3374 		adapter->ets = false;
3375 	}
3376 #endif
3377 	/* Check if Media Autosense is enabled */
3378 	adapter->ei = *ei;
3379 	if (hw->dev_spec._82575.mas_capable)
3380 		igb_init_mas(adapter);
3381 
3382 	/* do hw tstamp init after resetting */
3383 	igb_ptp_init(adapter);
3384 
3385 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3386 	/* print bus type/speed/width info, not applicable to i354 */
3387 	if (hw->mac.type != e1000_i354) {
3388 		dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3389 			 netdev->name,
3390 			 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3391 			  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3392 			   "unknown"),
3393 			 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3394 			  "Width x4" :
3395 			  (hw->bus.width == e1000_bus_width_pcie_x2) ?
3396 			  "Width x2" :
3397 			  (hw->bus.width == e1000_bus_width_pcie_x1) ?
3398 			  "Width x1" : "unknown"), netdev->dev_addr);
3399 	}
3400 
3401 	if ((hw->mac.type >= e1000_i210 ||
3402 	     igb_get_flash_presence_i210(hw))) {
3403 		ret_val = igb_read_part_string(hw, part_str,
3404 					       E1000_PBANUM_LENGTH);
3405 	} else {
3406 		ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3407 	}
3408 
3409 	if (ret_val)
3410 		strcpy(part_str, "Unknown");
3411 	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3412 	dev_info(&pdev->dev,
3413 		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3414 		(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3415 		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3416 		adapter->num_rx_queues, adapter->num_tx_queues);
3417 	if (hw->phy.media_type == e1000_media_type_copper) {
3418 		switch (hw->mac.type) {
3419 		case e1000_i350:
3420 		case e1000_i210:
3421 		case e1000_i211:
3422 			/* Enable EEE for internal copper PHY devices */
3423 			err = igb_set_eee_i350(hw, true, true);
3424 			if ((!err) &&
3425 			    (!hw->dev_spec._82575.eee_disable)) {
3426 				adapter->eee_advert =
3427 					MDIO_EEE_100TX | MDIO_EEE_1000T;
3428 				adapter->flags |= IGB_FLAG_EEE;
3429 			}
3430 			break;
3431 		case e1000_i354:
3432 			if ((rd32(E1000_CTRL_EXT) &
3433 			    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3434 				err = igb_set_eee_i354(hw, true, true);
3435 				if ((!err) &&
3436 					(!hw->dev_spec._82575.eee_disable)) {
3437 					adapter->eee_advert =
3438 					   MDIO_EEE_100TX | MDIO_EEE_1000T;
3439 					adapter->flags |= IGB_FLAG_EEE;
3440 				}
3441 			}
3442 			break;
3443 		default:
3444 			break;
3445 		}
3446 	}
3447 
3448 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3449 
3450 	pm_runtime_put_noidle(&pdev->dev);
3451 	return 0;
3452 
3453 err_register:
3454 	igb_release_hw_control(adapter);
3455 	memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3456 err_eeprom:
3457 	if (!igb_check_reset_block(hw))
3458 		igb_reset_phy(hw);
3459 
3460 	if (hw->flash_address)
3461 		iounmap(hw->flash_address);
3462 err_sw_init:
3463 	kfree(adapter->mac_table);
3464 	kfree(adapter->shadow_vfta);
3465 	igb_clear_interrupt_scheme(adapter);
3466 #ifdef CONFIG_PCI_IOV
3467 	igb_disable_sriov(pdev);
3468 #endif
3469 	pci_iounmap(pdev, adapter->io_addr);
3470 err_ioremap:
3471 	free_netdev(netdev);
3472 err_alloc_etherdev:
3473 	pci_release_mem_regions(pdev);
3474 err_pci_reg:
3475 err_dma:
3476 	pci_disable_device(pdev);
3477 	return err;
3478 }
3479 
3480 #ifdef CONFIG_PCI_IOV
3481 static int igb_disable_sriov(struct pci_dev *pdev)
3482 {
3483 	struct net_device *netdev = pci_get_drvdata(pdev);
3484 	struct igb_adapter *adapter = netdev_priv(netdev);
3485 	struct e1000_hw *hw = &adapter->hw;
3486 
3487 	/* reclaim resources allocated to VFs */
3488 	if (adapter->vf_data) {
3489 		/* disable iov and allow time for transactions to clear */
3490 		if (pci_vfs_assigned(pdev)) {
3491 			dev_warn(&pdev->dev,
3492 				 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3493 			return -EPERM;
3494 		} else {
3495 			pci_disable_sriov(pdev);
3496 			msleep(500);
3497 		}
3498 
3499 		kfree(adapter->vf_mac_list);
3500 		adapter->vf_mac_list = NULL;
3501 		kfree(adapter->vf_data);
3502 		adapter->vf_data = NULL;
3503 		adapter->vfs_allocated_count = 0;
3504 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3505 		wrfl();
3506 		msleep(100);
3507 		dev_info(&pdev->dev, "IOV Disabled\n");
3508 
3509 		/* Re-enable DMA Coalescing flag since IOV is turned off */
3510 		adapter->flags |= IGB_FLAG_DMAC;
3511 	}
3512 
3513 	return 0;
3514 }
3515 
3516 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3517 {
3518 	struct net_device *netdev = pci_get_drvdata(pdev);
3519 	struct igb_adapter *adapter = netdev_priv(netdev);
3520 	int old_vfs = pci_num_vf(pdev);
3521 	struct vf_mac_filter *mac_list;
3522 	int err = 0;
3523 	int num_vf_mac_filters, i;
3524 
3525 	if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3526 		err = -EPERM;
3527 		goto out;
3528 	}
3529 	if (!num_vfs)
3530 		goto out;
3531 
3532 	if (old_vfs) {
3533 		dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3534 			 old_vfs, max_vfs);
3535 		adapter->vfs_allocated_count = old_vfs;
3536 	} else
3537 		adapter->vfs_allocated_count = num_vfs;
3538 
3539 	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3540 				sizeof(struct vf_data_storage), GFP_KERNEL);
3541 
3542 	/* if allocation failed then we do not support SR-IOV */
3543 	if (!adapter->vf_data) {
3544 		adapter->vfs_allocated_count = 0;
3545 		err = -ENOMEM;
3546 		goto out;
3547 	}
3548 
3549 	/* Due to the limited number of RAR entries calculate potential
3550 	 * number of MAC filters available for the VFs. Reserve entries
3551 	 * for PF default MAC, PF MAC filters and at least one RAR entry
3552 	 * for each VF for VF MAC.
3553 	 */
3554 	num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3555 			     (1 + IGB_PF_MAC_FILTERS_RESERVED +
3556 			      adapter->vfs_allocated_count);
3557 
3558 	adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3559 				       sizeof(struct vf_mac_filter),
3560 				       GFP_KERNEL);
3561 
3562 	mac_list = adapter->vf_mac_list;
3563 	INIT_LIST_HEAD(&adapter->vf_macs.l);
3564 
3565 	if (adapter->vf_mac_list) {
3566 		/* Initialize list of VF MAC filters */
3567 		for (i = 0; i < num_vf_mac_filters; i++) {
3568 			mac_list->vf = -1;
3569 			mac_list->free = true;
3570 			list_add(&mac_list->l, &adapter->vf_macs.l);
3571 			mac_list++;
3572 		}
3573 	} else {
3574 		/* If we could not allocate memory for the VF MAC filters
3575 		 * we can continue without this feature but warn user.
3576 		 */
3577 		dev_err(&pdev->dev,
3578 			"Unable to allocate memory for VF MAC filter list\n");
3579 	}
3580 
3581 	/* only call pci_enable_sriov() if no VFs are allocated already */
3582 	if (!old_vfs) {
3583 		err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3584 		if (err)
3585 			goto err_out;
3586 	}
3587 	dev_info(&pdev->dev, "%d VFs allocated\n",
3588 		 adapter->vfs_allocated_count);
3589 	for (i = 0; i < adapter->vfs_allocated_count; i++)
3590 		igb_vf_configure(adapter, i);
3591 
3592 	/* DMA Coalescing is not supported in IOV mode. */
3593 	adapter->flags &= ~IGB_FLAG_DMAC;
3594 	goto out;
3595 
3596 err_out:
3597 	kfree(adapter->vf_mac_list);
3598 	adapter->vf_mac_list = NULL;
3599 	kfree(adapter->vf_data);
3600 	adapter->vf_data = NULL;
3601 	adapter->vfs_allocated_count = 0;
3602 out:
3603 	return err;
3604 }
3605 
3606 #endif
3607 /**
3608  *  igb_remove_i2c - Cleanup  I2C interface
3609  *  @adapter: pointer to adapter structure
3610  **/
3611 static void igb_remove_i2c(struct igb_adapter *adapter)
3612 {
3613 	/* free the adapter bus structure */
3614 	i2c_del_adapter(&adapter->i2c_adap);
3615 }
3616 
3617 /**
3618  *  igb_remove - Device Removal Routine
3619  *  @pdev: PCI device information struct
3620  *
3621  *  igb_remove is called by the PCI subsystem to alert the driver
3622  *  that it should release a PCI device.  The could be caused by a
3623  *  Hot-Plug event, or because the driver is going to be removed from
3624  *  memory.
3625  **/
3626 static void igb_remove(struct pci_dev *pdev)
3627 {
3628 	struct net_device *netdev = pci_get_drvdata(pdev);
3629 	struct igb_adapter *adapter = netdev_priv(netdev);
3630 	struct e1000_hw *hw = &adapter->hw;
3631 
3632 	pm_runtime_get_noresume(&pdev->dev);
3633 #ifdef CONFIG_IGB_HWMON
3634 	igb_sysfs_exit(adapter);
3635 #endif
3636 	igb_remove_i2c(adapter);
3637 	igb_ptp_stop(adapter);
3638 	/* The watchdog timer may be rescheduled, so explicitly
3639 	 * disable watchdog from being rescheduled.
3640 	 */
3641 	set_bit(__IGB_DOWN, &adapter->state);
3642 	del_timer_sync(&adapter->watchdog_timer);
3643 	del_timer_sync(&adapter->phy_info_timer);
3644 
3645 	cancel_work_sync(&adapter->reset_task);
3646 	cancel_work_sync(&adapter->watchdog_task);
3647 
3648 #ifdef CONFIG_IGB_DCA
3649 	if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3650 		dev_info(&pdev->dev, "DCA disabled\n");
3651 		dca_remove_requester(&pdev->dev);
3652 		adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3653 		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3654 	}
3655 #endif
3656 
3657 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
3658 	 * would have already happened in close and is redundant.
3659 	 */
3660 	igb_release_hw_control(adapter);
3661 
3662 #ifdef CONFIG_PCI_IOV
3663 	igb_disable_sriov(pdev);
3664 #endif
3665 
3666 	unregister_netdev(netdev);
3667 
3668 	igb_clear_interrupt_scheme(adapter);
3669 
3670 	pci_iounmap(pdev, adapter->io_addr);
3671 	if (hw->flash_address)
3672 		iounmap(hw->flash_address);
3673 	pci_release_mem_regions(pdev);
3674 
3675 	kfree(adapter->mac_table);
3676 	kfree(adapter->shadow_vfta);
3677 	free_netdev(netdev);
3678 
3679 	pci_disable_pcie_error_reporting(pdev);
3680 
3681 	pci_disable_device(pdev);
3682 }
3683 
3684 /**
3685  *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3686  *  @adapter: board private structure to initialize
3687  *
3688  *  This function initializes the vf specific data storage and then attempts to
3689  *  allocate the VFs.  The reason for ordering it this way is because it is much
3690  *  mor expensive time wise to disable SR-IOV than it is to allocate and free
3691  *  the memory for the VFs.
3692  **/
3693 static void igb_probe_vfs(struct igb_adapter *adapter)
3694 {
3695 #ifdef CONFIG_PCI_IOV
3696 	struct pci_dev *pdev = adapter->pdev;
3697 	struct e1000_hw *hw = &adapter->hw;
3698 
3699 	/* Virtualization features not supported on i210 family. */
3700 	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3701 		return;
3702 
3703 	/* Of the below we really only want the effect of getting
3704 	 * IGB_FLAG_HAS_MSIX set (if available), without which
3705 	 * igb_enable_sriov() has no effect.
3706 	 */
3707 	igb_set_interrupt_capability(adapter, true);
3708 	igb_reset_interrupt_capability(adapter);
3709 
3710 	pci_sriov_set_totalvfs(pdev, 7);
3711 	igb_enable_sriov(pdev, max_vfs);
3712 
3713 #endif /* CONFIG_PCI_IOV */
3714 }
3715 
3716 unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3717 {
3718 	struct e1000_hw *hw = &adapter->hw;
3719 	unsigned int max_rss_queues;
3720 
3721 	/* Determine the maximum number of RSS queues supported. */
3722 	switch (hw->mac.type) {
3723 	case e1000_i211:
3724 		max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3725 		break;
3726 	case e1000_82575:
3727 	case e1000_i210:
3728 		max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3729 		break;
3730 	case e1000_i350:
3731 		/* I350 cannot do RSS and SR-IOV at the same time */
3732 		if (!!adapter->vfs_allocated_count) {
3733 			max_rss_queues = 1;
3734 			break;
3735 		}
3736 		/* fall through */
3737 	case e1000_82576:
3738 		if (!!adapter->vfs_allocated_count) {
3739 			max_rss_queues = 2;
3740 			break;
3741 		}
3742 		/* fall through */
3743 	case e1000_82580:
3744 	case e1000_i354:
3745 	default:
3746 		max_rss_queues = IGB_MAX_RX_QUEUES;
3747 		break;
3748 	}
3749 
3750 	return max_rss_queues;
3751 }
3752 
3753 static void igb_init_queue_configuration(struct igb_adapter *adapter)
3754 {
3755 	u32 max_rss_queues;
3756 
3757 	max_rss_queues = igb_get_max_rss_queues(adapter);
3758 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3759 
3760 	igb_set_flag_queue_pairs(adapter, max_rss_queues);
3761 }
3762 
3763 void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3764 			      const u32 max_rss_queues)
3765 {
3766 	struct e1000_hw *hw = &adapter->hw;
3767 
3768 	/* Determine if we need to pair queues. */
3769 	switch (hw->mac.type) {
3770 	case e1000_82575:
3771 	case e1000_i211:
3772 		/* Device supports enough interrupts without queue pairing. */
3773 		break;
3774 	case e1000_82576:
3775 	case e1000_82580:
3776 	case e1000_i350:
3777 	case e1000_i354:
3778 	case e1000_i210:
3779 	default:
3780 		/* If rss_queues > half of max_rss_queues, pair the queues in
3781 		 * order to conserve interrupts due to limited supply.
3782 		 */
3783 		if (adapter->rss_queues > (max_rss_queues / 2))
3784 			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3785 		else
3786 			adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3787 		break;
3788 	}
3789 }
3790 
3791 /**
3792  *  igb_sw_init - Initialize general software structures (struct igb_adapter)
3793  *  @adapter: board private structure to initialize
3794  *
3795  *  igb_sw_init initializes the Adapter private data structure.
3796  *  Fields are initialized based on PCI device information and
3797  *  OS network device settings (MTU size).
3798  **/
3799 static int igb_sw_init(struct igb_adapter *adapter)
3800 {
3801 	struct e1000_hw *hw = &adapter->hw;
3802 	struct net_device *netdev = adapter->netdev;
3803 	struct pci_dev *pdev = adapter->pdev;
3804 
3805 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3806 
3807 	/* set default ring sizes */
3808 	adapter->tx_ring_count = IGB_DEFAULT_TXD;
3809 	adapter->rx_ring_count = IGB_DEFAULT_RXD;
3810 
3811 	/* set default ITR values */
3812 	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3813 	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3814 
3815 	/* set default work limits */
3816 	adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3817 
3818 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3819 				  VLAN_HLEN;
3820 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3821 
3822 	spin_lock_init(&adapter->nfc_lock);
3823 	spin_lock_init(&adapter->stats64_lock);
3824 #ifdef CONFIG_PCI_IOV
3825 	switch (hw->mac.type) {
3826 	case e1000_82576:
3827 	case e1000_i350:
3828 		if (max_vfs > 7) {
3829 			dev_warn(&pdev->dev,
3830 				 "Maximum of 7 VFs per PF, using max\n");
3831 			max_vfs = adapter->vfs_allocated_count = 7;
3832 		} else
3833 			adapter->vfs_allocated_count = max_vfs;
3834 		if (adapter->vfs_allocated_count)
3835 			dev_warn(&pdev->dev,
3836 				 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3837 		break;
3838 	default:
3839 		break;
3840 	}
3841 #endif /* CONFIG_PCI_IOV */
3842 
3843 	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
3844 	adapter->flags |= IGB_FLAG_HAS_MSIX;
3845 
3846 	adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3847 				     sizeof(struct igb_mac_addr),
3848 				     GFP_KERNEL);
3849 	if (!adapter->mac_table)
3850 		return -ENOMEM;
3851 
3852 	igb_probe_vfs(adapter);
3853 
3854 	igb_init_queue_configuration(adapter);
3855 
3856 	/* Setup and initialize a copy of the hw vlan table array */
3857 	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3858 				       GFP_KERNEL);
3859 	if (!adapter->shadow_vfta)
3860 		return -ENOMEM;
3861 
3862 	/* This call may decrease the number of queues */
3863 	if (igb_init_interrupt_scheme(adapter, true)) {
3864 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3865 		return -ENOMEM;
3866 	}
3867 
3868 	/* Explicitly disable IRQ since the NIC can be in any state. */
3869 	igb_irq_disable(adapter);
3870 
3871 	if (hw->mac.type >= e1000_i350)
3872 		adapter->flags &= ~IGB_FLAG_DMAC;
3873 
3874 	set_bit(__IGB_DOWN, &adapter->state);
3875 	return 0;
3876 }
3877 
3878 /**
3879  *  igb_open - Called when a network interface is made active
3880  *  @netdev: network interface device structure
3881  *
3882  *  Returns 0 on success, negative value on failure
3883  *
3884  *  The open entry point is called when a network interface is made
3885  *  active by the system (IFF_UP).  At this point all resources needed
3886  *  for transmit and receive operations are allocated, the interrupt
3887  *  handler is registered with the OS, the watchdog timer is started,
3888  *  and the stack is notified that the interface is ready.
3889  **/
3890 static int __igb_open(struct net_device *netdev, bool resuming)
3891 {
3892 	struct igb_adapter *adapter = netdev_priv(netdev);
3893 	struct e1000_hw *hw = &adapter->hw;
3894 	struct pci_dev *pdev = adapter->pdev;
3895 	int err;
3896 	int i;
3897 
3898 	/* disallow open during test */
3899 	if (test_bit(__IGB_TESTING, &adapter->state)) {
3900 		WARN_ON(resuming);
3901 		return -EBUSY;
3902 	}
3903 
3904 	if (!resuming)
3905 		pm_runtime_get_sync(&pdev->dev);
3906 
3907 	netif_carrier_off(netdev);
3908 
3909 	/* allocate transmit descriptors */
3910 	err = igb_setup_all_tx_resources(adapter);
3911 	if (err)
3912 		goto err_setup_tx;
3913 
3914 	/* allocate receive descriptors */
3915 	err = igb_setup_all_rx_resources(adapter);
3916 	if (err)
3917 		goto err_setup_rx;
3918 
3919 	igb_power_up_link(adapter);
3920 
3921 	/* before we allocate an interrupt, we must be ready to handle it.
3922 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3923 	 * as soon as we call pci_request_irq, so we have to setup our
3924 	 * clean_rx handler before we do so.
3925 	 */
3926 	igb_configure(adapter);
3927 
3928 	err = igb_request_irq(adapter);
3929 	if (err)
3930 		goto err_req_irq;
3931 
3932 	/* Notify the stack of the actual queue counts. */
3933 	err = netif_set_real_num_tx_queues(adapter->netdev,
3934 					   adapter->num_tx_queues);
3935 	if (err)
3936 		goto err_set_queues;
3937 
3938 	err = netif_set_real_num_rx_queues(adapter->netdev,
3939 					   adapter->num_rx_queues);
3940 	if (err)
3941 		goto err_set_queues;
3942 
3943 	/* From here on the code is the same as igb_up() */
3944 	clear_bit(__IGB_DOWN, &adapter->state);
3945 
3946 	for (i = 0; i < adapter->num_q_vectors; i++)
3947 		napi_enable(&(adapter->q_vector[i]->napi));
3948 
3949 	/* Clear any pending interrupts. */
3950 	rd32(E1000_TSICR);
3951 	rd32(E1000_ICR);
3952 
3953 	igb_irq_enable(adapter);
3954 
3955 	/* notify VFs that reset has been completed */
3956 	if (adapter->vfs_allocated_count) {
3957 		u32 reg_data = rd32(E1000_CTRL_EXT);
3958 
3959 		reg_data |= E1000_CTRL_EXT_PFRSTD;
3960 		wr32(E1000_CTRL_EXT, reg_data);
3961 	}
3962 
3963 	netif_tx_start_all_queues(netdev);
3964 
3965 	if (!resuming)
3966 		pm_runtime_put(&pdev->dev);
3967 
3968 	/* start the watchdog. */
3969 	hw->mac.get_link_status = 1;
3970 	schedule_work(&adapter->watchdog_task);
3971 
3972 	return 0;
3973 
3974 err_set_queues:
3975 	igb_free_irq(adapter);
3976 err_req_irq:
3977 	igb_release_hw_control(adapter);
3978 	igb_power_down_link(adapter);
3979 	igb_free_all_rx_resources(adapter);
3980 err_setup_rx:
3981 	igb_free_all_tx_resources(adapter);
3982 err_setup_tx:
3983 	igb_reset(adapter);
3984 	if (!resuming)
3985 		pm_runtime_put(&pdev->dev);
3986 
3987 	return err;
3988 }
3989 
3990 int igb_open(struct net_device *netdev)
3991 {
3992 	return __igb_open(netdev, false);
3993 }
3994 
3995 /**
3996  *  igb_close - Disables a network interface
3997  *  @netdev: network interface device structure
3998  *
3999  *  Returns 0, this is not allowed to fail
4000  *
4001  *  The close entry point is called when an interface is de-activated
4002  *  by the OS.  The hardware is still under the driver's control, but
4003  *  needs to be disabled.  A global MAC reset is issued to stop the
4004  *  hardware, and all transmit and receive resources are freed.
4005  **/
4006 static int __igb_close(struct net_device *netdev, bool suspending)
4007 {
4008 	struct igb_adapter *adapter = netdev_priv(netdev);
4009 	struct pci_dev *pdev = adapter->pdev;
4010 
4011 	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4012 
4013 	if (!suspending)
4014 		pm_runtime_get_sync(&pdev->dev);
4015 
4016 	igb_down(adapter);
4017 	igb_free_irq(adapter);
4018 
4019 	igb_free_all_tx_resources(adapter);
4020 	igb_free_all_rx_resources(adapter);
4021 
4022 	if (!suspending)
4023 		pm_runtime_put_sync(&pdev->dev);
4024 	return 0;
4025 }
4026 
4027 int igb_close(struct net_device *netdev)
4028 {
4029 	if (netif_device_present(netdev) || netdev->dismantle)
4030 		return __igb_close(netdev, false);
4031 	return 0;
4032 }
4033 
4034 /**
4035  *  igb_setup_tx_resources - allocate Tx resources (Descriptors)
4036  *  @tx_ring: tx descriptor ring (for a specific queue) to setup
4037  *
4038  *  Return 0 on success, negative on failure
4039  **/
4040 int igb_setup_tx_resources(struct igb_ring *tx_ring)
4041 {
4042 	struct device *dev = tx_ring->dev;
4043 	int size;
4044 
4045 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4046 
4047 	tx_ring->tx_buffer_info = vmalloc(size);
4048 	if (!tx_ring->tx_buffer_info)
4049 		goto err;
4050 
4051 	/* round up to nearest 4K */
4052 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4053 	tx_ring->size = ALIGN(tx_ring->size, 4096);
4054 
4055 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4056 					   &tx_ring->dma, GFP_KERNEL);
4057 	if (!tx_ring->desc)
4058 		goto err;
4059 
4060 	tx_ring->next_to_use = 0;
4061 	tx_ring->next_to_clean = 0;
4062 
4063 	return 0;
4064 
4065 err:
4066 	vfree(tx_ring->tx_buffer_info);
4067 	tx_ring->tx_buffer_info = NULL;
4068 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4069 	return -ENOMEM;
4070 }
4071 
4072 /**
4073  *  igb_setup_all_tx_resources - wrapper to allocate Tx resources
4074  *				 (Descriptors) for all queues
4075  *  @adapter: board private structure
4076  *
4077  *  Return 0 on success, negative on failure
4078  **/
4079 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4080 {
4081 	struct pci_dev *pdev = adapter->pdev;
4082 	int i, err = 0;
4083 
4084 	for (i = 0; i < adapter->num_tx_queues; i++) {
4085 		err = igb_setup_tx_resources(adapter->tx_ring[i]);
4086 		if (err) {
4087 			dev_err(&pdev->dev,
4088 				"Allocation for Tx Queue %u failed\n", i);
4089 			for (i--; i >= 0; i--)
4090 				igb_free_tx_resources(adapter->tx_ring[i]);
4091 			break;
4092 		}
4093 	}
4094 
4095 	return err;
4096 }
4097 
4098 /**
4099  *  igb_setup_tctl - configure the transmit control registers
4100  *  @adapter: Board private structure
4101  **/
4102 void igb_setup_tctl(struct igb_adapter *adapter)
4103 {
4104 	struct e1000_hw *hw = &adapter->hw;
4105 	u32 tctl;
4106 
4107 	/* disable queue 0 which is enabled by default on 82575 and 82576 */
4108 	wr32(E1000_TXDCTL(0), 0);
4109 
4110 	/* Program the Transmit Control Register */
4111 	tctl = rd32(E1000_TCTL);
4112 	tctl &= ~E1000_TCTL_CT;
4113 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4114 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4115 
4116 	igb_config_collision_dist(hw);
4117 
4118 	/* Enable transmits */
4119 	tctl |= E1000_TCTL_EN;
4120 
4121 	wr32(E1000_TCTL, tctl);
4122 }
4123 
4124 /**
4125  *  igb_configure_tx_ring - Configure transmit ring after Reset
4126  *  @adapter: board private structure
4127  *  @ring: tx ring to configure
4128  *
4129  *  Configure a transmit ring after a reset.
4130  **/
4131 void igb_configure_tx_ring(struct igb_adapter *adapter,
4132 			   struct igb_ring *ring)
4133 {
4134 	struct e1000_hw *hw = &adapter->hw;
4135 	u32 txdctl = 0;
4136 	u64 tdba = ring->dma;
4137 	int reg_idx = ring->reg_idx;
4138 
4139 	wr32(E1000_TDLEN(reg_idx),
4140 	     ring->count * sizeof(union e1000_adv_tx_desc));
4141 	wr32(E1000_TDBAL(reg_idx),
4142 	     tdba & 0x00000000ffffffffULL);
4143 	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4144 
4145 	ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4146 	wr32(E1000_TDH(reg_idx), 0);
4147 	writel(0, ring->tail);
4148 
4149 	txdctl |= IGB_TX_PTHRESH;
4150 	txdctl |= IGB_TX_HTHRESH << 8;
4151 	txdctl |= IGB_TX_WTHRESH << 16;
4152 
4153 	/* reinitialize tx_buffer_info */
4154 	memset(ring->tx_buffer_info, 0,
4155 	       sizeof(struct igb_tx_buffer) * ring->count);
4156 
4157 	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4158 	wr32(E1000_TXDCTL(reg_idx), txdctl);
4159 }
4160 
4161 /**
4162  *  igb_configure_tx - Configure transmit Unit after Reset
4163  *  @adapter: board private structure
4164  *
4165  *  Configure the Tx unit of the MAC after a reset.
4166  **/
4167 static void igb_configure_tx(struct igb_adapter *adapter)
4168 {
4169 	struct e1000_hw *hw = &adapter->hw;
4170 	int i;
4171 
4172 	/* disable the queues */
4173 	for (i = 0; i < adapter->num_tx_queues; i++)
4174 		wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4175 
4176 	wrfl();
4177 	usleep_range(10000, 20000);
4178 
4179 	for (i = 0; i < adapter->num_tx_queues; i++)
4180 		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4181 }
4182 
4183 /**
4184  *  igb_setup_rx_resources - allocate Rx resources (Descriptors)
4185  *  @rx_ring: Rx descriptor ring (for a specific queue) to setup
4186  *
4187  *  Returns 0 on success, negative on failure
4188  **/
4189 int igb_setup_rx_resources(struct igb_ring *rx_ring)
4190 {
4191 	struct device *dev = rx_ring->dev;
4192 	int size;
4193 
4194 	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4195 
4196 	rx_ring->rx_buffer_info = vmalloc(size);
4197 	if (!rx_ring->rx_buffer_info)
4198 		goto err;
4199 
4200 	/* Round up to nearest 4K */
4201 	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4202 	rx_ring->size = ALIGN(rx_ring->size, 4096);
4203 
4204 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4205 					   &rx_ring->dma, GFP_KERNEL);
4206 	if (!rx_ring->desc)
4207 		goto err;
4208 
4209 	rx_ring->next_to_alloc = 0;
4210 	rx_ring->next_to_clean = 0;
4211 	rx_ring->next_to_use = 0;
4212 
4213 	return 0;
4214 
4215 err:
4216 	vfree(rx_ring->rx_buffer_info);
4217 	rx_ring->rx_buffer_info = NULL;
4218 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4219 	return -ENOMEM;
4220 }
4221 
4222 /**
4223  *  igb_setup_all_rx_resources - wrapper to allocate Rx resources
4224  *				 (Descriptors) for all queues
4225  *  @adapter: board private structure
4226  *
4227  *  Return 0 on success, negative on failure
4228  **/
4229 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4230 {
4231 	struct pci_dev *pdev = adapter->pdev;
4232 	int i, err = 0;
4233 
4234 	for (i = 0; i < adapter->num_rx_queues; i++) {
4235 		err = igb_setup_rx_resources(adapter->rx_ring[i]);
4236 		if (err) {
4237 			dev_err(&pdev->dev,
4238 				"Allocation for Rx Queue %u failed\n", i);
4239 			for (i--; i >= 0; i--)
4240 				igb_free_rx_resources(adapter->rx_ring[i]);
4241 			break;
4242 		}
4243 	}
4244 
4245 	return err;
4246 }
4247 
4248 /**
4249  *  igb_setup_mrqc - configure the multiple receive queue control registers
4250  *  @adapter: Board private structure
4251  **/
4252 static void igb_setup_mrqc(struct igb_adapter *adapter)
4253 {
4254 	struct e1000_hw *hw = &adapter->hw;
4255 	u32 mrqc, rxcsum;
4256 	u32 j, num_rx_queues;
4257 	u32 rss_key[10];
4258 
4259 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
4260 	for (j = 0; j < 10; j++)
4261 		wr32(E1000_RSSRK(j), rss_key[j]);
4262 
4263 	num_rx_queues = adapter->rss_queues;
4264 
4265 	switch (hw->mac.type) {
4266 	case e1000_82576:
4267 		/* 82576 supports 2 RSS queues for SR-IOV */
4268 		if (adapter->vfs_allocated_count)
4269 			num_rx_queues = 2;
4270 		break;
4271 	default:
4272 		break;
4273 	}
4274 
4275 	if (adapter->rss_indir_tbl_init != num_rx_queues) {
4276 		for (j = 0; j < IGB_RETA_SIZE; j++)
4277 			adapter->rss_indir_tbl[j] =
4278 			(j * num_rx_queues) / IGB_RETA_SIZE;
4279 		adapter->rss_indir_tbl_init = num_rx_queues;
4280 	}
4281 	igb_write_rss_indir_tbl(adapter);
4282 
4283 	/* Disable raw packet checksumming so that RSS hash is placed in
4284 	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
4285 	 * offloads as they are enabled by default
4286 	 */
4287 	rxcsum = rd32(E1000_RXCSUM);
4288 	rxcsum |= E1000_RXCSUM_PCSD;
4289 
4290 	if (adapter->hw.mac.type >= e1000_82576)
4291 		/* Enable Receive Checksum Offload for SCTP */
4292 		rxcsum |= E1000_RXCSUM_CRCOFL;
4293 
4294 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
4295 	wr32(E1000_RXCSUM, rxcsum);
4296 
4297 	/* Generate RSS hash based on packet types, TCP/UDP
4298 	 * port numbers and/or IPv4/v6 src and dst addresses
4299 	 */
4300 	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4301 	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
4302 	       E1000_MRQC_RSS_FIELD_IPV6 |
4303 	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
4304 	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4305 
4306 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4307 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4308 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4309 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4310 
4311 	/* If VMDq is enabled then we set the appropriate mode for that, else
4312 	 * we default to RSS so that an RSS hash is calculated per packet even
4313 	 * if we are only using one queue
4314 	 */
4315 	if (adapter->vfs_allocated_count) {
4316 		if (hw->mac.type > e1000_82575) {
4317 			/* Set the default pool for the PF's first queue */
4318 			u32 vtctl = rd32(E1000_VT_CTL);
4319 
4320 			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4321 				   E1000_VT_CTL_DISABLE_DEF_POOL);
4322 			vtctl |= adapter->vfs_allocated_count <<
4323 				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4324 			wr32(E1000_VT_CTL, vtctl);
4325 		}
4326 		if (adapter->rss_queues > 1)
4327 			mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4328 		else
4329 			mrqc |= E1000_MRQC_ENABLE_VMDQ;
4330 	} else {
4331 		if (hw->mac.type != e1000_i211)
4332 			mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4333 	}
4334 	igb_vmm_control(adapter);
4335 
4336 	wr32(E1000_MRQC, mrqc);
4337 }
4338 
4339 /**
4340  *  igb_setup_rctl - configure the receive control registers
4341  *  @adapter: Board private structure
4342  **/
4343 void igb_setup_rctl(struct igb_adapter *adapter)
4344 {
4345 	struct e1000_hw *hw = &adapter->hw;
4346 	u32 rctl;
4347 
4348 	rctl = rd32(E1000_RCTL);
4349 
4350 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4351 	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4352 
4353 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4354 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4355 
4356 	/* enable stripping of CRC. It's unlikely this will break BMC
4357 	 * redirection as it did with e1000. Newer features require
4358 	 * that the HW strips the CRC.
4359 	 */
4360 	rctl |= E1000_RCTL_SECRC;
4361 
4362 	/* disable store bad packets and clear size bits. */
4363 	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4364 
4365 	/* enable LPE to allow for reception of jumbo frames */
4366 	rctl |= E1000_RCTL_LPE;
4367 
4368 	/* disable queue 0 to prevent tail write w/o re-config */
4369 	wr32(E1000_RXDCTL(0), 0);
4370 
4371 	/* Attention!!!  For SR-IOV PF driver operations you must enable
4372 	 * queue drop for all VF and PF queues to prevent head of line blocking
4373 	 * if an un-trusted VF does not provide descriptors to hardware.
4374 	 */
4375 	if (adapter->vfs_allocated_count) {
4376 		/* set all queue drop enable bits */
4377 		wr32(E1000_QDE, ALL_QUEUES);
4378 	}
4379 
4380 	/* This is useful for sniffing bad packets. */
4381 	if (adapter->netdev->features & NETIF_F_RXALL) {
4382 		/* UPE and MPE will be handled by normal PROMISC logic
4383 		 * in e1000e_set_rx_mode
4384 		 */
4385 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
4386 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
4387 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
4388 
4389 		rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
4390 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
4391 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
4392 		 * and that breaks VLANs.
4393 		 */
4394 	}
4395 
4396 	wr32(E1000_RCTL, rctl);
4397 }
4398 
4399 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4400 				   int vfn)
4401 {
4402 	struct e1000_hw *hw = &adapter->hw;
4403 	u32 vmolr;
4404 
4405 	if (size > MAX_JUMBO_FRAME_SIZE)
4406 		size = MAX_JUMBO_FRAME_SIZE;
4407 
4408 	vmolr = rd32(E1000_VMOLR(vfn));
4409 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
4410 	vmolr |= size | E1000_VMOLR_LPE;
4411 	wr32(E1000_VMOLR(vfn), vmolr);
4412 
4413 	return 0;
4414 }
4415 
4416 static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4417 					 int vfn, bool enable)
4418 {
4419 	struct e1000_hw *hw = &adapter->hw;
4420 	u32 val, reg;
4421 
4422 	if (hw->mac.type < e1000_82576)
4423 		return;
4424 
4425 	if (hw->mac.type == e1000_i350)
4426 		reg = E1000_DVMOLR(vfn);
4427 	else
4428 		reg = E1000_VMOLR(vfn);
4429 
4430 	val = rd32(reg);
4431 	if (enable)
4432 		val |= E1000_VMOLR_STRVLAN;
4433 	else
4434 		val &= ~(E1000_VMOLR_STRVLAN);
4435 	wr32(reg, val);
4436 }
4437 
4438 static inline void igb_set_vmolr(struct igb_adapter *adapter,
4439 				 int vfn, bool aupe)
4440 {
4441 	struct e1000_hw *hw = &adapter->hw;
4442 	u32 vmolr;
4443 
4444 	/* This register exists only on 82576 and newer so if we are older then
4445 	 * we should exit and do nothing
4446 	 */
4447 	if (hw->mac.type < e1000_82576)
4448 		return;
4449 
4450 	vmolr = rd32(E1000_VMOLR(vfn));
4451 	if (aupe)
4452 		vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
4453 	else
4454 		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
4455 
4456 	/* clear all bits that might not be set */
4457 	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4458 
4459 	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4460 		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
4461 	/* for VMDq only allow the VFs and pool 0 to accept broadcast and
4462 	 * multicast packets
4463 	 */
4464 	if (vfn <= adapter->vfs_allocated_count)
4465 		vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
4466 
4467 	wr32(E1000_VMOLR(vfn), vmolr);
4468 }
4469 
4470 /**
4471  *  igb_setup_srrctl - configure the split and replication receive control
4472  *                     registers
4473  *  @adapter: Board private structure
4474  *  @ring: receive ring to be configured
4475  **/
4476 void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4477 {
4478 	struct e1000_hw *hw = &adapter->hw;
4479 	int reg_idx = ring->reg_idx;
4480 	u32 srrctl = 0;
4481 
4482 	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4483 	if (ring_uses_large_buffer(ring))
4484 		srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4485 	else
4486 		srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4487 	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4488 	if (hw->mac.type >= e1000_82580)
4489 		srrctl |= E1000_SRRCTL_TIMESTAMP;
4490 	/* Only set Drop Enable if VFs allocated, or we are supporting multiple
4491 	 * queues and rx flow control is disabled
4492 	 */
4493 	if (adapter->vfs_allocated_count ||
4494 	    (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4495 	     adapter->num_rx_queues > 1))
4496 		srrctl |= E1000_SRRCTL_DROP_EN;
4497 
4498 	wr32(E1000_SRRCTL(reg_idx), srrctl);
4499 }
4500 
4501 /**
4502  *  igb_configure_rx_ring - Configure a receive ring after Reset
4503  *  @adapter: board private structure
4504  *  @ring: receive ring to be configured
4505  *
4506  *  Configure the Rx unit of the MAC after a reset.
4507  **/
4508 void igb_configure_rx_ring(struct igb_adapter *adapter,
4509 			   struct igb_ring *ring)
4510 {
4511 	struct e1000_hw *hw = &adapter->hw;
4512 	union e1000_adv_rx_desc *rx_desc;
4513 	u64 rdba = ring->dma;
4514 	int reg_idx = ring->reg_idx;
4515 	u32 rxdctl = 0;
4516 
4517 	/* disable the queue */
4518 	wr32(E1000_RXDCTL(reg_idx), 0);
4519 
4520 	/* Set DMA base address registers */
4521 	wr32(E1000_RDBAL(reg_idx),
4522 	     rdba & 0x00000000ffffffffULL);
4523 	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4524 	wr32(E1000_RDLEN(reg_idx),
4525 	     ring->count * sizeof(union e1000_adv_rx_desc));
4526 
4527 	/* initialize head and tail */
4528 	ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4529 	wr32(E1000_RDH(reg_idx), 0);
4530 	writel(0, ring->tail);
4531 
4532 	/* set descriptor configuration */
4533 	igb_setup_srrctl(adapter, ring);
4534 
4535 	/* set filtering for VMDQ pools */
4536 	igb_set_vmolr(adapter, reg_idx & 0x7, true);
4537 
4538 	rxdctl |= IGB_RX_PTHRESH;
4539 	rxdctl |= IGB_RX_HTHRESH << 8;
4540 	rxdctl |= IGB_RX_WTHRESH << 16;
4541 
4542 	/* initialize rx_buffer_info */
4543 	memset(ring->rx_buffer_info, 0,
4544 	       sizeof(struct igb_rx_buffer) * ring->count);
4545 
4546 	/* initialize Rx descriptor 0 */
4547 	rx_desc = IGB_RX_DESC(ring, 0);
4548 	rx_desc->wb.upper.length = 0;
4549 
4550 	/* enable receive descriptor fetching */
4551 	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4552 	wr32(E1000_RXDCTL(reg_idx), rxdctl);
4553 }
4554 
4555 static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4556 				  struct igb_ring *rx_ring)
4557 {
4558 	/* set build_skb and buffer size flags */
4559 	clear_ring_build_skb_enabled(rx_ring);
4560 	clear_ring_uses_large_buffer(rx_ring);
4561 
4562 	if (adapter->flags & IGB_FLAG_RX_LEGACY)
4563 		return;
4564 
4565 	set_ring_build_skb_enabled(rx_ring);
4566 
4567 #if (PAGE_SIZE < 8192)
4568 	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4569 		return;
4570 
4571 	set_ring_uses_large_buffer(rx_ring);
4572 #endif
4573 }
4574 
4575 /**
4576  *  igb_configure_rx - Configure receive Unit after Reset
4577  *  @adapter: board private structure
4578  *
4579  *  Configure the Rx unit of the MAC after a reset.
4580  **/
4581 static void igb_configure_rx(struct igb_adapter *adapter)
4582 {
4583 	int i;
4584 
4585 	/* set the correct pool for the PF default MAC address in entry 0 */
4586 	igb_set_default_mac_filter(adapter);
4587 
4588 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
4589 	 * the Base and Length of the Rx Descriptor Ring
4590 	 */
4591 	for (i = 0; i < adapter->num_rx_queues; i++) {
4592 		struct igb_ring *rx_ring = adapter->rx_ring[i];
4593 
4594 		igb_set_rx_buffer_len(adapter, rx_ring);
4595 		igb_configure_rx_ring(adapter, rx_ring);
4596 	}
4597 }
4598 
4599 /**
4600  *  igb_free_tx_resources - Free Tx Resources per Queue
4601  *  @tx_ring: Tx descriptor ring for a specific queue
4602  *
4603  *  Free all transmit software resources
4604  **/
4605 void igb_free_tx_resources(struct igb_ring *tx_ring)
4606 {
4607 	igb_clean_tx_ring(tx_ring);
4608 
4609 	vfree(tx_ring->tx_buffer_info);
4610 	tx_ring->tx_buffer_info = NULL;
4611 
4612 	/* if not set, then don't free */
4613 	if (!tx_ring->desc)
4614 		return;
4615 
4616 	dma_free_coherent(tx_ring->dev, tx_ring->size,
4617 			  tx_ring->desc, tx_ring->dma);
4618 
4619 	tx_ring->desc = NULL;
4620 }
4621 
4622 /**
4623  *  igb_free_all_tx_resources - Free Tx Resources for All Queues
4624  *  @adapter: board private structure
4625  *
4626  *  Free all transmit software resources
4627  **/
4628 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4629 {
4630 	int i;
4631 
4632 	for (i = 0; i < adapter->num_tx_queues; i++)
4633 		if (adapter->tx_ring[i])
4634 			igb_free_tx_resources(adapter->tx_ring[i]);
4635 }
4636 
4637 /**
4638  *  igb_clean_tx_ring - Free Tx Buffers
4639  *  @tx_ring: ring to be cleaned
4640  **/
4641 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4642 {
4643 	u16 i = tx_ring->next_to_clean;
4644 	struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4645 
4646 	while (i != tx_ring->next_to_use) {
4647 		union e1000_adv_tx_desc *eop_desc, *tx_desc;
4648 
4649 		/* Free all the Tx ring sk_buffs */
4650 		dev_kfree_skb_any(tx_buffer->skb);
4651 
4652 		/* unmap skb header data */
4653 		dma_unmap_single(tx_ring->dev,
4654 				 dma_unmap_addr(tx_buffer, dma),
4655 				 dma_unmap_len(tx_buffer, len),
4656 				 DMA_TO_DEVICE);
4657 
4658 		/* check for eop_desc to determine the end of the packet */
4659 		eop_desc = tx_buffer->next_to_watch;
4660 		tx_desc = IGB_TX_DESC(tx_ring, i);
4661 
4662 		/* unmap remaining buffers */
4663 		while (tx_desc != eop_desc) {
4664 			tx_buffer++;
4665 			tx_desc++;
4666 			i++;
4667 			if (unlikely(i == tx_ring->count)) {
4668 				i = 0;
4669 				tx_buffer = tx_ring->tx_buffer_info;
4670 				tx_desc = IGB_TX_DESC(tx_ring, 0);
4671 			}
4672 
4673 			/* unmap any remaining paged data */
4674 			if (dma_unmap_len(tx_buffer, len))
4675 				dma_unmap_page(tx_ring->dev,
4676 					       dma_unmap_addr(tx_buffer, dma),
4677 					       dma_unmap_len(tx_buffer, len),
4678 					       DMA_TO_DEVICE);
4679 		}
4680 
4681 		/* move us one more past the eop_desc for start of next pkt */
4682 		tx_buffer++;
4683 		i++;
4684 		if (unlikely(i == tx_ring->count)) {
4685 			i = 0;
4686 			tx_buffer = tx_ring->tx_buffer_info;
4687 		}
4688 	}
4689 
4690 	/* reset BQL for queue */
4691 	netdev_tx_reset_queue(txring_txq(tx_ring));
4692 
4693 	/* reset next_to_use and next_to_clean */
4694 	tx_ring->next_to_use = 0;
4695 	tx_ring->next_to_clean = 0;
4696 }
4697 
4698 /**
4699  *  igb_clean_all_tx_rings - Free Tx Buffers for all queues
4700  *  @adapter: board private structure
4701  **/
4702 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4703 {
4704 	int i;
4705 
4706 	for (i = 0; i < adapter->num_tx_queues; i++)
4707 		if (adapter->tx_ring[i])
4708 			igb_clean_tx_ring(adapter->tx_ring[i]);
4709 }
4710 
4711 /**
4712  *  igb_free_rx_resources - Free Rx Resources
4713  *  @rx_ring: ring to clean the resources from
4714  *
4715  *  Free all receive software resources
4716  **/
4717 void igb_free_rx_resources(struct igb_ring *rx_ring)
4718 {
4719 	igb_clean_rx_ring(rx_ring);
4720 
4721 	vfree(rx_ring->rx_buffer_info);
4722 	rx_ring->rx_buffer_info = NULL;
4723 
4724 	/* if not set, then don't free */
4725 	if (!rx_ring->desc)
4726 		return;
4727 
4728 	dma_free_coherent(rx_ring->dev, rx_ring->size,
4729 			  rx_ring->desc, rx_ring->dma);
4730 
4731 	rx_ring->desc = NULL;
4732 }
4733 
4734 /**
4735  *  igb_free_all_rx_resources - Free Rx Resources for All Queues
4736  *  @adapter: board private structure
4737  *
4738  *  Free all receive software resources
4739  **/
4740 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4741 {
4742 	int i;
4743 
4744 	for (i = 0; i < adapter->num_rx_queues; i++)
4745 		if (adapter->rx_ring[i])
4746 			igb_free_rx_resources(adapter->rx_ring[i]);
4747 }
4748 
4749 /**
4750  *  igb_clean_rx_ring - Free Rx Buffers per Queue
4751  *  @rx_ring: ring to free buffers from
4752  **/
4753 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4754 {
4755 	u16 i = rx_ring->next_to_clean;
4756 
4757 	dev_kfree_skb(rx_ring->skb);
4758 	rx_ring->skb = NULL;
4759 
4760 	/* Free all the Rx ring sk_buffs */
4761 	while (i != rx_ring->next_to_alloc) {
4762 		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4763 
4764 		/* Invalidate cache lines that may have been written to by
4765 		 * device so that we avoid corrupting memory.
4766 		 */
4767 		dma_sync_single_range_for_cpu(rx_ring->dev,
4768 					      buffer_info->dma,
4769 					      buffer_info->page_offset,
4770 					      igb_rx_bufsz(rx_ring),
4771 					      DMA_FROM_DEVICE);
4772 
4773 		/* free resources associated with mapping */
4774 		dma_unmap_page_attrs(rx_ring->dev,
4775 				     buffer_info->dma,
4776 				     igb_rx_pg_size(rx_ring),
4777 				     DMA_FROM_DEVICE,
4778 				     IGB_RX_DMA_ATTR);
4779 		__page_frag_cache_drain(buffer_info->page,
4780 					buffer_info->pagecnt_bias);
4781 
4782 		i++;
4783 		if (i == rx_ring->count)
4784 			i = 0;
4785 	}
4786 
4787 	rx_ring->next_to_alloc = 0;
4788 	rx_ring->next_to_clean = 0;
4789 	rx_ring->next_to_use = 0;
4790 }
4791 
4792 /**
4793  *  igb_clean_all_rx_rings - Free Rx Buffers for all queues
4794  *  @adapter: board private structure
4795  **/
4796 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4797 {
4798 	int i;
4799 
4800 	for (i = 0; i < adapter->num_rx_queues; i++)
4801 		if (adapter->rx_ring[i])
4802 			igb_clean_rx_ring(adapter->rx_ring[i]);
4803 }
4804 
4805 /**
4806  *  igb_set_mac - Change the Ethernet Address of the NIC
4807  *  @netdev: network interface device structure
4808  *  @p: pointer to an address structure
4809  *
4810  *  Returns 0 on success, negative on failure
4811  **/
4812 static int igb_set_mac(struct net_device *netdev, void *p)
4813 {
4814 	struct igb_adapter *adapter = netdev_priv(netdev);
4815 	struct e1000_hw *hw = &adapter->hw;
4816 	struct sockaddr *addr = p;
4817 
4818 	if (!is_valid_ether_addr(addr->sa_data))
4819 		return -EADDRNOTAVAIL;
4820 
4821 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4822 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4823 
4824 	/* set the correct pool for the new PF MAC address in entry 0 */
4825 	igb_set_default_mac_filter(adapter);
4826 
4827 	return 0;
4828 }
4829 
4830 /**
4831  *  igb_write_mc_addr_list - write multicast addresses to MTA
4832  *  @netdev: network interface device structure
4833  *
4834  *  Writes multicast address list to the MTA hash table.
4835  *  Returns: -ENOMEM on failure
4836  *           0 on no addresses written
4837  *           X on writing X addresses to MTA
4838  **/
4839 static int igb_write_mc_addr_list(struct net_device *netdev)
4840 {
4841 	struct igb_adapter *adapter = netdev_priv(netdev);
4842 	struct e1000_hw *hw = &adapter->hw;
4843 	struct netdev_hw_addr *ha;
4844 	u8  *mta_list;
4845 	int i;
4846 
4847 	if (netdev_mc_empty(netdev)) {
4848 		/* nothing to program, so clear mc list */
4849 		igb_update_mc_addr_list(hw, NULL, 0);
4850 		igb_restore_vf_multicasts(adapter);
4851 		return 0;
4852 	}
4853 
4854 	mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
4855 	if (!mta_list)
4856 		return -ENOMEM;
4857 
4858 	/* The shared function expects a packed array of only addresses. */
4859 	i = 0;
4860 	netdev_for_each_mc_addr(ha, netdev)
4861 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4862 
4863 	igb_update_mc_addr_list(hw, mta_list, i);
4864 	kfree(mta_list);
4865 
4866 	return netdev_mc_count(netdev);
4867 }
4868 
4869 static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4870 {
4871 	struct e1000_hw *hw = &adapter->hw;
4872 	u32 i, pf_id;
4873 
4874 	switch (hw->mac.type) {
4875 	case e1000_i210:
4876 	case e1000_i211:
4877 	case e1000_i350:
4878 		/* VLAN filtering needed for VLAN prio filter */
4879 		if (adapter->netdev->features & NETIF_F_NTUPLE)
4880 			break;
4881 		/* fall through */
4882 	case e1000_82576:
4883 	case e1000_82580:
4884 	case e1000_i354:
4885 		/* VLAN filtering needed for pool filtering */
4886 		if (adapter->vfs_allocated_count)
4887 			break;
4888 		/* fall through */
4889 	default:
4890 		return 1;
4891 	}
4892 
4893 	/* We are already in VLAN promisc, nothing to do */
4894 	if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4895 		return 0;
4896 
4897 	if (!adapter->vfs_allocated_count)
4898 		goto set_vfta;
4899 
4900 	/* Add PF to all active pools */
4901 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4902 
4903 	for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4904 		u32 vlvf = rd32(E1000_VLVF(i));
4905 
4906 		vlvf |= BIT(pf_id);
4907 		wr32(E1000_VLVF(i), vlvf);
4908 	}
4909 
4910 set_vfta:
4911 	/* Set all bits in the VLAN filter table array */
4912 	for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4913 		hw->mac.ops.write_vfta(hw, i, ~0U);
4914 
4915 	/* Set flag so we don't redo unnecessary work */
4916 	adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4917 
4918 	return 0;
4919 }
4920 
4921 #define VFTA_BLOCK_SIZE 8
4922 static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4923 {
4924 	struct e1000_hw *hw = &adapter->hw;
4925 	u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4926 	u32 vid_start = vfta_offset * 32;
4927 	u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4928 	u32 i, vid, word, bits, pf_id;
4929 
4930 	/* guarantee that we don't scrub out management VLAN */
4931 	vid = adapter->mng_vlan_id;
4932 	if (vid >= vid_start && vid < vid_end)
4933 		vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4934 
4935 	if (!adapter->vfs_allocated_count)
4936 		goto set_vfta;
4937 
4938 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4939 
4940 	for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4941 		u32 vlvf = rd32(E1000_VLVF(i));
4942 
4943 		/* pull VLAN ID from VLVF */
4944 		vid = vlvf & VLAN_VID_MASK;
4945 
4946 		/* only concern ourselves with a certain range */
4947 		if (vid < vid_start || vid >= vid_end)
4948 			continue;
4949 
4950 		if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4951 			/* record VLAN ID in VFTA */
4952 			vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4953 
4954 			/* if PF is part of this then continue */
4955 			if (test_bit(vid, adapter->active_vlans))
4956 				continue;
4957 		}
4958 
4959 		/* remove PF from the pool */
4960 		bits = ~BIT(pf_id);
4961 		bits &= rd32(E1000_VLVF(i));
4962 		wr32(E1000_VLVF(i), bits);
4963 	}
4964 
4965 set_vfta:
4966 	/* extract values from active_vlans and write back to VFTA */
4967 	for (i = VFTA_BLOCK_SIZE; i--;) {
4968 		vid = (vfta_offset + i) * 32;
4969 		word = vid / BITS_PER_LONG;
4970 		bits = vid % BITS_PER_LONG;
4971 
4972 		vfta[i] |= adapter->active_vlans[word] >> bits;
4973 
4974 		hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4975 	}
4976 }
4977 
4978 static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4979 {
4980 	u32 i;
4981 
4982 	/* We are not in VLAN promisc, nothing to do */
4983 	if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4984 		return;
4985 
4986 	/* Set flag so we don't redo unnecessary work */
4987 	adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4988 
4989 	for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4990 		igb_scrub_vfta(adapter, i);
4991 }
4992 
4993 /**
4994  *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4995  *  @netdev: network interface device structure
4996  *
4997  *  The set_rx_mode entry point is called whenever the unicast or multicast
4998  *  address lists or the network interface flags are updated.  This routine is
4999  *  responsible for configuring the hardware for proper unicast, multicast,
5000  *  promiscuous mode, and all-multi behavior.
5001  **/
5002 static void igb_set_rx_mode(struct net_device *netdev)
5003 {
5004 	struct igb_adapter *adapter = netdev_priv(netdev);
5005 	struct e1000_hw *hw = &adapter->hw;
5006 	unsigned int vfn = adapter->vfs_allocated_count;
5007 	u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5008 	int count;
5009 
5010 	/* Check for Promiscuous and All Multicast modes */
5011 	if (netdev->flags & IFF_PROMISC) {
5012 		rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5013 		vmolr |= E1000_VMOLR_MPME;
5014 
5015 		/* enable use of UTA filter to force packets to default pool */
5016 		if (hw->mac.type == e1000_82576)
5017 			vmolr |= E1000_VMOLR_ROPE;
5018 	} else {
5019 		if (netdev->flags & IFF_ALLMULTI) {
5020 			rctl |= E1000_RCTL_MPE;
5021 			vmolr |= E1000_VMOLR_MPME;
5022 		} else {
5023 			/* Write addresses to the MTA, if the attempt fails
5024 			 * then we should just turn on promiscuous mode so
5025 			 * that we can at least receive multicast traffic
5026 			 */
5027 			count = igb_write_mc_addr_list(netdev);
5028 			if (count < 0) {
5029 				rctl |= E1000_RCTL_MPE;
5030 				vmolr |= E1000_VMOLR_MPME;
5031 			} else if (count) {
5032 				vmolr |= E1000_VMOLR_ROMPE;
5033 			}
5034 		}
5035 	}
5036 
5037 	/* Write addresses to available RAR registers, if there is not
5038 	 * sufficient space to store all the addresses then enable
5039 	 * unicast promiscuous mode
5040 	 */
5041 	if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5042 		rctl |= E1000_RCTL_UPE;
5043 		vmolr |= E1000_VMOLR_ROPE;
5044 	}
5045 
5046 	/* enable VLAN filtering by default */
5047 	rctl |= E1000_RCTL_VFE;
5048 
5049 	/* disable VLAN filtering for modes that require it */
5050 	if ((netdev->flags & IFF_PROMISC) ||
5051 	    (netdev->features & NETIF_F_RXALL)) {
5052 		/* if we fail to set all rules then just clear VFE */
5053 		if (igb_vlan_promisc_enable(adapter))
5054 			rctl &= ~E1000_RCTL_VFE;
5055 	} else {
5056 		igb_vlan_promisc_disable(adapter);
5057 	}
5058 
5059 	/* update state of unicast, multicast, and VLAN filtering modes */
5060 	rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5061 				     E1000_RCTL_VFE);
5062 	wr32(E1000_RCTL, rctl);
5063 
5064 #if (PAGE_SIZE < 8192)
5065 	if (!adapter->vfs_allocated_count) {
5066 		if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5067 			rlpml = IGB_MAX_FRAME_BUILD_SKB;
5068 	}
5069 #endif
5070 	wr32(E1000_RLPML, rlpml);
5071 
5072 	/* In order to support SR-IOV and eventually VMDq it is necessary to set
5073 	 * the VMOLR to enable the appropriate modes.  Without this workaround
5074 	 * we will have issues with VLAN tag stripping not being done for frames
5075 	 * that are only arriving because we are the default pool
5076 	 */
5077 	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5078 		return;
5079 
5080 	/* set UTA to appropriate mode */
5081 	igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5082 
5083 	vmolr |= rd32(E1000_VMOLR(vfn)) &
5084 		 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5085 
5086 	/* enable Rx jumbo frames, restrict as needed to support build_skb */
5087 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
5088 #if (PAGE_SIZE < 8192)
5089 	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5090 		vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5091 	else
5092 #endif
5093 		vmolr |= MAX_JUMBO_FRAME_SIZE;
5094 	vmolr |= E1000_VMOLR_LPE;
5095 
5096 	wr32(E1000_VMOLR(vfn), vmolr);
5097 
5098 	igb_restore_vf_multicasts(adapter);
5099 }
5100 
5101 static void igb_check_wvbr(struct igb_adapter *adapter)
5102 {
5103 	struct e1000_hw *hw = &adapter->hw;
5104 	u32 wvbr = 0;
5105 
5106 	switch (hw->mac.type) {
5107 	case e1000_82576:
5108 	case e1000_i350:
5109 		wvbr = rd32(E1000_WVBR);
5110 		if (!wvbr)
5111 			return;
5112 		break;
5113 	default:
5114 		break;
5115 	}
5116 
5117 	adapter->wvbr |= wvbr;
5118 }
5119 
5120 #define IGB_STAGGERED_QUEUE_OFFSET 8
5121 
5122 static void igb_spoof_check(struct igb_adapter *adapter)
5123 {
5124 	int j;
5125 
5126 	if (!adapter->wvbr)
5127 		return;
5128 
5129 	for (j = 0; j < adapter->vfs_allocated_count; j++) {
5130 		if (adapter->wvbr & BIT(j) ||
5131 		    adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5132 			dev_warn(&adapter->pdev->dev,
5133 				"Spoof event(s) detected on VF %d\n", j);
5134 			adapter->wvbr &=
5135 				~(BIT(j) |
5136 				  BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5137 		}
5138 	}
5139 }
5140 
5141 /* Need to wait a few seconds after link up to get diagnostic information from
5142  * the phy
5143  */
5144 static void igb_update_phy_info(struct timer_list *t)
5145 {
5146 	struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5147 	igb_get_phy_info(&adapter->hw);
5148 }
5149 
5150 /**
5151  *  igb_has_link - check shared code for link and determine up/down
5152  *  @adapter: pointer to driver private info
5153  **/
5154 bool igb_has_link(struct igb_adapter *adapter)
5155 {
5156 	struct e1000_hw *hw = &adapter->hw;
5157 	bool link_active = false;
5158 
5159 	/* get_link_status is set on LSC (link status) interrupt or
5160 	 * rx sequence error interrupt.  get_link_status will stay
5161 	 * false until the e1000_check_for_link establishes link
5162 	 * for copper adapters ONLY
5163 	 */
5164 	switch (hw->phy.media_type) {
5165 	case e1000_media_type_copper:
5166 		if (!hw->mac.get_link_status)
5167 			return true;
5168 		/* fall through */
5169 	case e1000_media_type_internal_serdes:
5170 		hw->mac.ops.check_for_link(hw);
5171 		link_active = !hw->mac.get_link_status;
5172 		break;
5173 	default:
5174 	case e1000_media_type_unknown:
5175 		break;
5176 	}
5177 
5178 	if (((hw->mac.type == e1000_i210) ||
5179 	     (hw->mac.type == e1000_i211)) &&
5180 	     (hw->phy.id == I210_I_PHY_ID)) {
5181 		if (!netif_carrier_ok(adapter->netdev)) {
5182 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5183 		} else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5184 			adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5185 			adapter->link_check_timeout = jiffies;
5186 		}
5187 	}
5188 
5189 	return link_active;
5190 }
5191 
5192 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5193 {
5194 	bool ret = false;
5195 	u32 ctrl_ext, thstat;
5196 
5197 	/* check for thermal sensor event on i350 copper only */
5198 	if (hw->mac.type == e1000_i350) {
5199 		thstat = rd32(E1000_THSTAT);
5200 		ctrl_ext = rd32(E1000_CTRL_EXT);
5201 
5202 		if ((hw->phy.media_type == e1000_media_type_copper) &&
5203 		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5204 			ret = !!(thstat & event);
5205 	}
5206 
5207 	return ret;
5208 }
5209 
5210 /**
5211  *  igb_check_lvmmc - check for malformed packets received
5212  *  and indicated in LVMMC register
5213  *  @adapter: pointer to adapter
5214  **/
5215 static void igb_check_lvmmc(struct igb_adapter *adapter)
5216 {
5217 	struct e1000_hw *hw = &adapter->hw;
5218 	u32 lvmmc;
5219 
5220 	lvmmc = rd32(E1000_LVMMC);
5221 	if (lvmmc) {
5222 		if (unlikely(net_ratelimit())) {
5223 			netdev_warn(adapter->netdev,
5224 				    "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5225 				    lvmmc);
5226 		}
5227 	}
5228 }
5229 
5230 /**
5231  *  igb_watchdog - Timer Call-back
5232  *  @data: pointer to adapter cast into an unsigned long
5233  **/
5234 static void igb_watchdog(struct timer_list *t)
5235 {
5236 	struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5237 	/* Do the rest outside of interrupt context */
5238 	schedule_work(&adapter->watchdog_task);
5239 }
5240 
5241 static void igb_watchdog_task(struct work_struct *work)
5242 {
5243 	struct igb_adapter *adapter = container_of(work,
5244 						   struct igb_adapter,
5245 						   watchdog_task);
5246 	struct e1000_hw *hw = &adapter->hw;
5247 	struct e1000_phy_info *phy = &hw->phy;
5248 	struct net_device *netdev = adapter->netdev;
5249 	u32 link;
5250 	int i;
5251 	u32 connsw;
5252 	u16 phy_data, retry_count = 20;
5253 
5254 	link = igb_has_link(adapter);
5255 
5256 	if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5257 		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5258 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5259 		else
5260 			link = false;
5261 	}
5262 
5263 	/* Force link down if we have fiber to swap to */
5264 	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5265 		if (hw->phy.media_type == e1000_media_type_copper) {
5266 			connsw = rd32(E1000_CONNSW);
5267 			if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5268 				link = 0;
5269 		}
5270 	}
5271 	if (link) {
5272 		/* Perform a reset if the media type changed. */
5273 		if (hw->dev_spec._82575.media_changed) {
5274 			hw->dev_spec._82575.media_changed = false;
5275 			adapter->flags |= IGB_FLAG_MEDIA_RESET;
5276 			igb_reset(adapter);
5277 		}
5278 		/* Cancel scheduled suspend requests. */
5279 		pm_runtime_resume(netdev->dev.parent);
5280 
5281 		if (!netif_carrier_ok(netdev)) {
5282 			u32 ctrl;
5283 
5284 			hw->mac.ops.get_speed_and_duplex(hw,
5285 							 &adapter->link_speed,
5286 							 &adapter->link_duplex);
5287 
5288 			ctrl = rd32(E1000_CTRL);
5289 			/* Links status message must follow this format */
5290 			netdev_info(netdev,
5291 			       "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5292 			       netdev->name,
5293 			       adapter->link_speed,
5294 			       adapter->link_duplex == FULL_DUPLEX ?
5295 			       "Full" : "Half",
5296 			       (ctrl & E1000_CTRL_TFCE) &&
5297 			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5298 			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
5299 			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
5300 
5301 			/* disable EEE if enabled */
5302 			if ((adapter->flags & IGB_FLAG_EEE) &&
5303 				(adapter->link_duplex == HALF_DUPLEX)) {
5304 				dev_info(&adapter->pdev->dev,
5305 				"EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5306 				adapter->hw.dev_spec._82575.eee_disable = true;
5307 				adapter->flags &= ~IGB_FLAG_EEE;
5308 			}
5309 
5310 			/* check if SmartSpeed worked */
5311 			igb_check_downshift(hw);
5312 			if (phy->speed_downgraded)
5313 				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5314 
5315 			/* check for thermal sensor event */
5316 			if (igb_thermal_sensor_event(hw,
5317 			    E1000_THSTAT_LINK_THROTTLE))
5318 				netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5319 
5320 			/* adjust timeout factor according to speed/duplex */
5321 			adapter->tx_timeout_factor = 1;
5322 			switch (adapter->link_speed) {
5323 			case SPEED_10:
5324 				adapter->tx_timeout_factor = 14;
5325 				break;
5326 			case SPEED_100:
5327 				/* maybe add some timeout factor ? */
5328 				break;
5329 			}
5330 
5331 			if (adapter->link_speed != SPEED_1000)
5332 				goto no_wait;
5333 
5334 			/* wait for Remote receiver status OK */
5335 retry_read_status:
5336 			if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5337 					      &phy_data)) {
5338 				if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5339 				    retry_count) {
5340 					msleep(100);
5341 					retry_count--;
5342 					goto retry_read_status;
5343 				} else if (!retry_count) {
5344 					dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5345 				}
5346 			} else {
5347 				dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5348 			}
5349 no_wait:
5350 			netif_carrier_on(netdev);
5351 
5352 			igb_ping_all_vfs(adapter);
5353 			igb_check_vf_rate_limit(adapter);
5354 
5355 			/* link state has changed, schedule phy info update */
5356 			if (!test_bit(__IGB_DOWN, &adapter->state))
5357 				mod_timer(&adapter->phy_info_timer,
5358 					  round_jiffies(jiffies + 2 * HZ));
5359 		}
5360 	} else {
5361 		if (netif_carrier_ok(netdev)) {
5362 			adapter->link_speed = 0;
5363 			adapter->link_duplex = 0;
5364 
5365 			/* check for thermal sensor event */
5366 			if (igb_thermal_sensor_event(hw,
5367 			    E1000_THSTAT_PWR_DOWN)) {
5368 				netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5369 			}
5370 
5371 			/* Links status message must follow this format */
5372 			netdev_info(netdev, "igb: %s NIC Link is Down\n",
5373 			       netdev->name);
5374 			netif_carrier_off(netdev);
5375 
5376 			igb_ping_all_vfs(adapter);
5377 
5378 			/* link state has changed, schedule phy info update */
5379 			if (!test_bit(__IGB_DOWN, &adapter->state))
5380 				mod_timer(&adapter->phy_info_timer,
5381 					  round_jiffies(jiffies + 2 * HZ));
5382 
5383 			/* link is down, time to check for alternate media */
5384 			if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5385 				igb_check_swap_media(adapter);
5386 				if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5387 					schedule_work(&adapter->reset_task);
5388 					/* return immediately */
5389 					return;
5390 				}
5391 			}
5392 			pm_schedule_suspend(netdev->dev.parent,
5393 					    MSEC_PER_SEC * 5);
5394 
5395 		/* also check for alternate media here */
5396 		} else if (!netif_carrier_ok(netdev) &&
5397 			   (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5398 			igb_check_swap_media(adapter);
5399 			if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5400 				schedule_work(&adapter->reset_task);
5401 				/* return immediately */
5402 				return;
5403 			}
5404 		}
5405 	}
5406 
5407 	spin_lock(&adapter->stats64_lock);
5408 	igb_update_stats(adapter);
5409 	spin_unlock(&adapter->stats64_lock);
5410 
5411 	for (i = 0; i < adapter->num_tx_queues; i++) {
5412 		struct igb_ring *tx_ring = adapter->tx_ring[i];
5413 		if (!netif_carrier_ok(netdev)) {
5414 			/* We've lost link, so the controller stops DMA,
5415 			 * but we've got queued Tx work that's never going
5416 			 * to get done, so reset controller to flush Tx.
5417 			 * (Do the reset outside of interrupt context).
5418 			 */
5419 			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5420 				adapter->tx_timeout_count++;
5421 				schedule_work(&adapter->reset_task);
5422 				/* return immediately since reset is imminent */
5423 				return;
5424 			}
5425 		}
5426 
5427 		/* Force detection of hung controller every watchdog period */
5428 		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5429 	}
5430 
5431 	/* Cause software interrupt to ensure Rx ring is cleaned */
5432 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5433 		u32 eics = 0;
5434 
5435 		for (i = 0; i < adapter->num_q_vectors; i++)
5436 			eics |= adapter->q_vector[i]->eims_value;
5437 		wr32(E1000_EICS, eics);
5438 	} else {
5439 		wr32(E1000_ICS, E1000_ICS_RXDMT0);
5440 	}
5441 
5442 	igb_spoof_check(adapter);
5443 	igb_ptp_rx_hang(adapter);
5444 	igb_ptp_tx_hang(adapter);
5445 
5446 	/* Check LVMMC register on i350/i354 only */
5447 	if ((adapter->hw.mac.type == e1000_i350) ||
5448 	    (adapter->hw.mac.type == e1000_i354))
5449 		igb_check_lvmmc(adapter);
5450 
5451 	/* Reset the timer */
5452 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
5453 		if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5454 			mod_timer(&adapter->watchdog_timer,
5455 				  round_jiffies(jiffies +  HZ));
5456 		else
5457 			mod_timer(&adapter->watchdog_timer,
5458 				  round_jiffies(jiffies + 2 * HZ));
5459 	}
5460 }
5461 
5462 enum latency_range {
5463 	lowest_latency = 0,
5464 	low_latency = 1,
5465 	bulk_latency = 2,
5466 	latency_invalid = 255
5467 };
5468 
5469 /**
5470  *  igb_update_ring_itr - update the dynamic ITR value based on packet size
5471  *  @q_vector: pointer to q_vector
5472  *
5473  *  Stores a new ITR value based on strictly on packet size.  This
5474  *  algorithm is less sophisticated than that used in igb_update_itr,
5475  *  due to the difficulty of synchronizing statistics across multiple
5476  *  receive rings.  The divisors and thresholds used by this function
5477  *  were determined based on theoretical maximum wire speed and testing
5478  *  data, in order to minimize response time while increasing bulk
5479  *  throughput.
5480  *  This functionality is controlled by ethtool's coalescing settings.
5481  *  NOTE:  This function is called only when operating in a multiqueue
5482  *         receive environment.
5483  **/
5484 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5485 {
5486 	int new_val = q_vector->itr_val;
5487 	int avg_wire_size = 0;
5488 	struct igb_adapter *adapter = q_vector->adapter;
5489 	unsigned int packets;
5490 
5491 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
5492 	 * ints/sec - ITR timer value of 120 ticks.
5493 	 */
5494 	if (adapter->link_speed != SPEED_1000) {
5495 		new_val = IGB_4K_ITR;
5496 		goto set_itr_val;
5497 	}
5498 
5499 	packets = q_vector->rx.total_packets;
5500 	if (packets)
5501 		avg_wire_size = q_vector->rx.total_bytes / packets;
5502 
5503 	packets = q_vector->tx.total_packets;
5504 	if (packets)
5505 		avg_wire_size = max_t(u32, avg_wire_size,
5506 				      q_vector->tx.total_bytes / packets);
5507 
5508 	/* if avg_wire_size isn't set no work was done */
5509 	if (!avg_wire_size)
5510 		goto clear_counts;
5511 
5512 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5513 	avg_wire_size += 24;
5514 
5515 	/* Don't starve jumbo frames */
5516 	avg_wire_size = min(avg_wire_size, 3000);
5517 
5518 	/* Give a little boost to mid-size frames */
5519 	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5520 		new_val = avg_wire_size / 3;
5521 	else
5522 		new_val = avg_wire_size / 2;
5523 
5524 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
5525 	if (new_val < IGB_20K_ITR &&
5526 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5527 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5528 		new_val = IGB_20K_ITR;
5529 
5530 set_itr_val:
5531 	if (new_val != q_vector->itr_val) {
5532 		q_vector->itr_val = new_val;
5533 		q_vector->set_itr = 1;
5534 	}
5535 clear_counts:
5536 	q_vector->rx.total_bytes = 0;
5537 	q_vector->rx.total_packets = 0;
5538 	q_vector->tx.total_bytes = 0;
5539 	q_vector->tx.total_packets = 0;
5540 }
5541 
5542 /**
5543  *  igb_update_itr - update the dynamic ITR value based on statistics
5544  *  @q_vector: pointer to q_vector
5545  *  @ring_container: ring info to update the itr for
5546  *
5547  *  Stores a new ITR value based on packets and byte
5548  *  counts during the last interrupt.  The advantage of per interrupt
5549  *  computation is faster updates and more accurate ITR for the current
5550  *  traffic pattern.  Constants in this function were computed
5551  *  based on theoretical maximum wire speed and thresholds were set based
5552  *  on testing data as well as attempting to minimize response time
5553  *  while increasing bulk throughput.
5554  *  This functionality is controlled by ethtool's coalescing settings.
5555  *  NOTE:  These calculations are only valid when operating in a single-
5556  *         queue environment.
5557  **/
5558 static void igb_update_itr(struct igb_q_vector *q_vector,
5559 			   struct igb_ring_container *ring_container)
5560 {
5561 	unsigned int packets = ring_container->total_packets;
5562 	unsigned int bytes = ring_container->total_bytes;
5563 	u8 itrval = ring_container->itr;
5564 
5565 	/* no packets, exit with status unchanged */
5566 	if (packets == 0)
5567 		return;
5568 
5569 	switch (itrval) {
5570 	case lowest_latency:
5571 		/* handle TSO and jumbo frames */
5572 		if (bytes/packets > 8000)
5573 			itrval = bulk_latency;
5574 		else if ((packets < 5) && (bytes > 512))
5575 			itrval = low_latency;
5576 		break;
5577 	case low_latency:  /* 50 usec aka 20000 ints/s */
5578 		if (bytes > 10000) {
5579 			/* this if handles the TSO accounting */
5580 			if (bytes/packets > 8000)
5581 				itrval = bulk_latency;
5582 			else if ((packets < 10) || ((bytes/packets) > 1200))
5583 				itrval = bulk_latency;
5584 			else if ((packets > 35))
5585 				itrval = lowest_latency;
5586 		} else if (bytes/packets > 2000) {
5587 			itrval = bulk_latency;
5588 		} else if (packets <= 2 && bytes < 512) {
5589 			itrval = lowest_latency;
5590 		}
5591 		break;
5592 	case bulk_latency: /* 250 usec aka 4000 ints/s */
5593 		if (bytes > 25000) {
5594 			if (packets > 35)
5595 				itrval = low_latency;
5596 		} else if (bytes < 1500) {
5597 			itrval = low_latency;
5598 		}
5599 		break;
5600 	}
5601 
5602 	/* clear work counters since we have the values we need */
5603 	ring_container->total_bytes = 0;
5604 	ring_container->total_packets = 0;
5605 
5606 	/* write updated itr to ring container */
5607 	ring_container->itr = itrval;
5608 }
5609 
5610 static void igb_set_itr(struct igb_q_vector *q_vector)
5611 {
5612 	struct igb_adapter *adapter = q_vector->adapter;
5613 	u32 new_itr = q_vector->itr_val;
5614 	u8 current_itr = 0;
5615 
5616 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
5617 	if (adapter->link_speed != SPEED_1000) {
5618 		current_itr = 0;
5619 		new_itr = IGB_4K_ITR;
5620 		goto set_itr_now;
5621 	}
5622 
5623 	igb_update_itr(q_vector, &q_vector->tx);
5624 	igb_update_itr(q_vector, &q_vector->rx);
5625 
5626 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5627 
5628 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
5629 	if (current_itr == lowest_latency &&
5630 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5631 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5632 		current_itr = low_latency;
5633 
5634 	switch (current_itr) {
5635 	/* counts and packets in update_itr are dependent on these numbers */
5636 	case lowest_latency:
5637 		new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
5638 		break;
5639 	case low_latency:
5640 		new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
5641 		break;
5642 	case bulk_latency:
5643 		new_itr = IGB_4K_ITR;  /* 4,000 ints/sec */
5644 		break;
5645 	default:
5646 		break;
5647 	}
5648 
5649 set_itr_now:
5650 	if (new_itr != q_vector->itr_val) {
5651 		/* this attempts to bias the interrupt rate towards Bulk
5652 		 * by adding intermediate steps when interrupt rate is
5653 		 * increasing
5654 		 */
5655 		new_itr = new_itr > q_vector->itr_val ?
5656 			  max((new_itr * q_vector->itr_val) /
5657 			  (new_itr + (q_vector->itr_val >> 2)),
5658 			  new_itr) : new_itr;
5659 		/* Don't write the value here; it resets the adapter's
5660 		 * internal timer, and causes us to delay far longer than
5661 		 * we should between interrupts.  Instead, we write the ITR
5662 		 * value at the beginning of the next interrupt so the timing
5663 		 * ends up being correct.
5664 		 */
5665 		q_vector->itr_val = new_itr;
5666 		q_vector->set_itr = 1;
5667 	}
5668 }
5669 
5670 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5671 			    struct igb_tx_buffer *first,
5672 			    u32 vlan_macip_lens, u32 type_tucmd,
5673 			    u32 mss_l4len_idx)
5674 {
5675 	struct e1000_adv_tx_context_desc *context_desc;
5676 	u16 i = tx_ring->next_to_use;
5677 	struct timespec64 ts;
5678 
5679 	context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5680 
5681 	i++;
5682 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5683 
5684 	/* set bits to identify this as an advanced context descriptor */
5685 	type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5686 
5687 	/* For 82575, context index must be unique per ring. */
5688 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5689 		mss_l4len_idx |= tx_ring->reg_idx << 4;
5690 
5691 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
5692 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
5693 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
5694 
5695 	/* We assume there is always a valid tx time available. Invalid times
5696 	 * should have been handled by the upper layers.
5697 	 */
5698 	if (tx_ring->launchtime_enable) {
5699 		ts = ktime_to_timespec64(first->skb->tstamp);
5700 		first->skb->tstamp = ktime_set(0, 0);
5701 		context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5702 	} else {
5703 		context_desc->seqnum_seed = 0;
5704 	}
5705 }
5706 
5707 static int igb_tso(struct igb_ring *tx_ring,
5708 		   struct igb_tx_buffer *first,
5709 		   u8 *hdr_len)
5710 {
5711 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5712 	struct sk_buff *skb = first->skb;
5713 	union {
5714 		struct iphdr *v4;
5715 		struct ipv6hdr *v6;
5716 		unsigned char *hdr;
5717 	} ip;
5718 	union {
5719 		struct tcphdr *tcp;
5720 		struct udphdr *udp;
5721 		unsigned char *hdr;
5722 	} l4;
5723 	u32 paylen, l4_offset;
5724 	int err;
5725 
5726 	if (skb->ip_summed != CHECKSUM_PARTIAL)
5727 		return 0;
5728 
5729 	if (!skb_is_gso(skb))
5730 		return 0;
5731 
5732 	err = skb_cow_head(skb, 0);
5733 	if (err < 0)
5734 		return err;
5735 
5736 	ip.hdr = skb_network_header(skb);
5737 	l4.hdr = skb_checksum_start(skb);
5738 
5739 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5740 	type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5741 		      E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
5742 
5743 	/* initialize outer IP header fields */
5744 	if (ip.v4->version == 4) {
5745 		unsigned char *csum_start = skb_checksum_start(skb);
5746 		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5747 
5748 		/* IP header will have to cancel out any data that
5749 		 * is not a part of the outer IP header
5750 		 */
5751 		ip.v4->check = csum_fold(csum_partial(trans_start,
5752 						      csum_start - trans_start,
5753 						      0));
5754 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5755 
5756 		ip.v4->tot_len = 0;
5757 		first->tx_flags |= IGB_TX_FLAGS_TSO |
5758 				   IGB_TX_FLAGS_CSUM |
5759 				   IGB_TX_FLAGS_IPV4;
5760 	} else {
5761 		ip.v6->payload_len = 0;
5762 		first->tx_flags |= IGB_TX_FLAGS_TSO |
5763 				   IGB_TX_FLAGS_CSUM;
5764 	}
5765 
5766 	/* determine offset of inner transport header */
5767 	l4_offset = l4.hdr - skb->data;
5768 
5769 	/* remove payload length from inner checksum */
5770 	paylen = skb->len - l4_offset;
5771 	if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
5772 		/* compute length of segmentation header */
5773 		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
5774 		csum_replace_by_diff(&l4.tcp->check,
5775 			(__force __wsum)htonl(paylen));
5776 	} else {
5777 		/* compute length of segmentation header */
5778 		*hdr_len = sizeof(*l4.udp) + l4_offset;
5779 		csum_replace_by_diff(&l4.udp->check,
5780 				     (__force __wsum)htonl(paylen));
5781 	}
5782 
5783 	/* update gso size and bytecount with header size */
5784 	first->gso_segs = skb_shinfo(skb)->gso_segs;
5785 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
5786 
5787 	/* MSS L4LEN IDX */
5788 	mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5789 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5790 
5791 	/* VLAN MACLEN IPLEN */
5792 	vlan_macip_lens = l4.hdr - ip.hdr;
5793 	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5794 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5795 
5796 	igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5797 			type_tucmd, mss_l4len_idx);
5798 
5799 	return 1;
5800 }
5801 
5802 static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5803 {
5804 	unsigned int offset = 0;
5805 
5806 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5807 
5808 	return offset == skb_checksum_start_offset(skb);
5809 }
5810 
5811 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5812 {
5813 	struct sk_buff *skb = first->skb;
5814 	u32 vlan_macip_lens = 0;
5815 	u32 type_tucmd = 0;
5816 
5817 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
5818 csum_failed:
5819 		if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5820 		    !tx_ring->launchtime_enable)
5821 			return;
5822 		goto no_csum;
5823 	}
5824 
5825 	switch (skb->csum_offset) {
5826 	case offsetof(struct tcphdr, check):
5827 		type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5828 		/* fall through */
5829 	case offsetof(struct udphdr, check):
5830 		break;
5831 	case offsetof(struct sctphdr, checksum):
5832 		/* validate that this is actually an SCTP request */
5833 		if (((first->protocol == htons(ETH_P_IP)) &&
5834 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5835 		    ((first->protocol == htons(ETH_P_IPV6)) &&
5836 		     igb_ipv6_csum_is_sctp(skb))) {
5837 			type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5838 			break;
5839 		}
5840 		/* fall through */
5841 	default:
5842 		skb_checksum_help(skb);
5843 		goto csum_failed;
5844 	}
5845 
5846 	/* update TX checksum flag */
5847 	first->tx_flags |= IGB_TX_FLAGS_CSUM;
5848 	vlan_macip_lens = skb_checksum_start_offset(skb) -
5849 			  skb_network_offset(skb);
5850 no_csum:
5851 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5852 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5853 
5854 	igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
5855 }
5856 
5857 #define IGB_SET_FLAG(_input, _flag, _result) \
5858 	((_flag <= _result) ? \
5859 	 ((u32)(_input & _flag) * (_result / _flag)) : \
5860 	 ((u32)(_input & _flag) / (_flag / _result)))
5861 
5862 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5863 {
5864 	/* set type for advanced descriptor with frame checksum insertion */
5865 	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5866 		       E1000_ADVTXD_DCMD_DEXT |
5867 		       E1000_ADVTXD_DCMD_IFCS;
5868 
5869 	/* set HW vlan bit if vlan is present */
5870 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5871 				 (E1000_ADVTXD_DCMD_VLE));
5872 
5873 	/* set segmentation bits for TSO */
5874 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5875 				 (E1000_ADVTXD_DCMD_TSE));
5876 
5877 	/* set timestamp bit if present */
5878 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5879 				 (E1000_ADVTXD_MAC_TSTAMP));
5880 
5881 	/* insert frame checksum */
5882 	cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5883 
5884 	return cmd_type;
5885 }
5886 
5887 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5888 				 union e1000_adv_tx_desc *tx_desc,
5889 				 u32 tx_flags, unsigned int paylen)
5890 {
5891 	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5892 
5893 	/* 82575 requires a unique index per ring */
5894 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5895 		olinfo_status |= tx_ring->reg_idx << 4;
5896 
5897 	/* insert L4 checksum */
5898 	olinfo_status |= IGB_SET_FLAG(tx_flags,
5899 				      IGB_TX_FLAGS_CSUM,
5900 				      (E1000_TXD_POPTS_TXSM << 8));
5901 
5902 	/* insert IPv4 checksum */
5903 	olinfo_status |= IGB_SET_FLAG(tx_flags,
5904 				      IGB_TX_FLAGS_IPV4,
5905 				      (E1000_TXD_POPTS_IXSM << 8));
5906 
5907 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5908 }
5909 
5910 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5911 {
5912 	struct net_device *netdev = tx_ring->netdev;
5913 
5914 	netif_stop_subqueue(netdev, tx_ring->queue_index);
5915 
5916 	/* Herbert's original patch had:
5917 	 *  smp_mb__after_netif_stop_queue();
5918 	 * but since that doesn't exist yet, just open code it.
5919 	 */
5920 	smp_mb();
5921 
5922 	/* We need to check again in a case another CPU has just
5923 	 * made room available.
5924 	 */
5925 	if (igb_desc_unused(tx_ring) < size)
5926 		return -EBUSY;
5927 
5928 	/* A reprieve! */
5929 	netif_wake_subqueue(netdev, tx_ring->queue_index);
5930 
5931 	u64_stats_update_begin(&tx_ring->tx_syncp2);
5932 	tx_ring->tx_stats.restart_queue2++;
5933 	u64_stats_update_end(&tx_ring->tx_syncp2);
5934 
5935 	return 0;
5936 }
5937 
5938 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5939 {
5940 	if (igb_desc_unused(tx_ring) >= size)
5941 		return 0;
5942 	return __igb_maybe_stop_tx(tx_ring, size);
5943 }
5944 
5945 static int igb_tx_map(struct igb_ring *tx_ring,
5946 		      struct igb_tx_buffer *first,
5947 		      const u8 hdr_len)
5948 {
5949 	struct sk_buff *skb = first->skb;
5950 	struct igb_tx_buffer *tx_buffer;
5951 	union e1000_adv_tx_desc *tx_desc;
5952 	skb_frag_t *frag;
5953 	dma_addr_t dma;
5954 	unsigned int data_len, size;
5955 	u32 tx_flags = first->tx_flags;
5956 	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5957 	u16 i = tx_ring->next_to_use;
5958 
5959 	tx_desc = IGB_TX_DESC(tx_ring, i);
5960 
5961 	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5962 
5963 	size = skb_headlen(skb);
5964 	data_len = skb->data_len;
5965 
5966 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5967 
5968 	tx_buffer = first;
5969 
5970 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5971 		if (dma_mapping_error(tx_ring->dev, dma))
5972 			goto dma_error;
5973 
5974 		/* record length, and DMA address */
5975 		dma_unmap_len_set(tx_buffer, len, size);
5976 		dma_unmap_addr_set(tx_buffer, dma, dma);
5977 
5978 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
5979 
5980 		while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5981 			tx_desc->read.cmd_type_len =
5982 				cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5983 
5984 			i++;
5985 			tx_desc++;
5986 			if (i == tx_ring->count) {
5987 				tx_desc = IGB_TX_DESC(tx_ring, 0);
5988 				i = 0;
5989 			}
5990 			tx_desc->read.olinfo_status = 0;
5991 
5992 			dma += IGB_MAX_DATA_PER_TXD;
5993 			size -= IGB_MAX_DATA_PER_TXD;
5994 
5995 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
5996 		}
5997 
5998 		if (likely(!data_len))
5999 			break;
6000 
6001 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6002 
6003 		i++;
6004 		tx_desc++;
6005 		if (i == tx_ring->count) {
6006 			tx_desc = IGB_TX_DESC(tx_ring, 0);
6007 			i = 0;
6008 		}
6009 		tx_desc->read.olinfo_status = 0;
6010 
6011 		size = skb_frag_size(frag);
6012 		data_len -= size;
6013 
6014 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6015 				       size, DMA_TO_DEVICE);
6016 
6017 		tx_buffer = &tx_ring->tx_buffer_info[i];
6018 	}
6019 
6020 	/* write last descriptor with RS and EOP bits */
6021 	cmd_type |= size | IGB_TXD_DCMD;
6022 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6023 
6024 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6025 
6026 	/* set the timestamp */
6027 	first->time_stamp = jiffies;
6028 
6029 	skb_tx_timestamp(skb);
6030 
6031 	/* Force memory writes to complete before letting h/w know there
6032 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
6033 	 * memory model archs, such as IA-64).
6034 	 *
6035 	 * We also need this memory barrier to make certain all of the
6036 	 * status bits have been updated before next_to_watch is written.
6037 	 */
6038 	dma_wmb();
6039 
6040 	/* set next_to_watch value indicating a packet is present */
6041 	first->next_to_watch = tx_desc;
6042 
6043 	i++;
6044 	if (i == tx_ring->count)
6045 		i = 0;
6046 
6047 	tx_ring->next_to_use = i;
6048 
6049 	/* Make sure there is space in the ring for the next send. */
6050 	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6051 
6052 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6053 		writel(i, tx_ring->tail);
6054 	}
6055 	return 0;
6056 
6057 dma_error:
6058 	dev_err(tx_ring->dev, "TX DMA map failed\n");
6059 	tx_buffer = &tx_ring->tx_buffer_info[i];
6060 
6061 	/* clear dma mappings for failed tx_buffer_info map */
6062 	while (tx_buffer != first) {
6063 		if (dma_unmap_len(tx_buffer, len))
6064 			dma_unmap_page(tx_ring->dev,
6065 				       dma_unmap_addr(tx_buffer, dma),
6066 				       dma_unmap_len(tx_buffer, len),
6067 				       DMA_TO_DEVICE);
6068 		dma_unmap_len_set(tx_buffer, len, 0);
6069 
6070 		if (i-- == 0)
6071 			i += tx_ring->count;
6072 		tx_buffer = &tx_ring->tx_buffer_info[i];
6073 	}
6074 
6075 	if (dma_unmap_len(tx_buffer, len))
6076 		dma_unmap_single(tx_ring->dev,
6077 				 dma_unmap_addr(tx_buffer, dma),
6078 				 dma_unmap_len(tx_buffer, len),
6079 				 DMA_TO_DEVICE);
6080 	dma_unmap_len_set(tx_buffer, len, 0);
6081 
6082 	dev_kfree_skb_any(tx_buffer->skb);
6083 	tx_buffer->skb = NULL;
6084 
6085 	tx_ring->next_to_use = i;
6086 
6087 	return -1;
6088 }
6089 
6090 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6091 				struct igb_ring *tx_ring)
6092 {
6093 	struct igb_tx_buffer *first;
6094 	int tso;
6095 	u32 tx_flags = 0;
6096 	unsigned short f;
6097 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
6098 	__be16 protocol = vlan_get_protocol(skb);
6099 	u8 hdr_len = 0;
6100 
6101 	/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
6102 	 *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
6103 	 *       + 2 desc gap to keep tail from touching head,
6104 	 *       + 1 desc for context descriptor,
6105 	 * otherwise try next time
6106 	 */
6107 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6108 		count += TXD_USE_COUNT(skb_frag_size(
6109 						&skb_shinfo(skb)->frags[f]));
6110 
6111 	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6112 		/* this is a hard error */
6113 		return NETDEV_TX_BUSY;
6114 	}
6115 
6116 	/* record the location of the first descriptor for this packet */
6117 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6118 	first->skb = skb;
6119 	first->bytecount = skb->len;
6120 	first->gso_segs = 1;
6121 
6122 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6123 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6124 
6125 		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6126 		    !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6127 					   &adapter->state)) {
6128 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6129 			tx_flags |= IGB_TX_FLAGS_TSTAMP;
6130 
6131 			adapter->ptp_tx_skb = skb_get(skb);
6132 			adapter->ptp_tx_start = jiffies;
6133 			if (adapter->hw.mac.type == e1000_82576)
6134 				schedule_work(&adapter->ptp_tx_work);
6135 		} else {
6136 			adapter->tx_hwtstamp_skipped++;
6137 		}
6138 	}
6139 
6140 	if (skb_vlan_tag_present(skb)) {
6141 		tx_flags |= IGB_TX_FLAGS_VLAN;
6142 		tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6143 	}
6144 
6145 	/* record initial flags and protocol */
6146 	first->tx_flags = tx_flags;
6147 	first->protocol = protocol;
6148 
6149 	tso = igb_tso(tx_ring, first, &hdr_len);
6150 	if (tso < 0)
6151 		goto out_drop;
6152 	else if (!tso)
6153 		igb_tx_csum(tx_ring, first);
6154 
6155 	if (igb_tx_map(tx_ring, first, hdr_len))
6156 		goto cleanup_tx_tstamp;
6157 
6158 	return NETDEV_TX_OK;
6159 
6160 out_drop:
6161 	dev_kfree_skb_any(first->skb);
6162 	first->skb = NULL;
6163 cleanup_tx_tstamp:
6164 	if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6165 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6166 
6167 		dev_kfree_skb_any(adapter->ptp_tx_skb);
6168 		adapter->ptp_tx_skb = NULL;
6169 		if (adapter->hw.mac.type == e1000_82576)
6170 			cancel_work_sync(&adapter->ptp_tx_work);
6171 		clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6172 	}
6173 
6174 	return NETDEV_TX_OK;
6175 }
6176 
6177 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6178 						    struct sk_buff *skb)
6179 {
6180 	unsigned int r_idx = skb->queue_mapping;
6181 
6182 	if (r_idx >= adapter->num_tx_queues)
6183 		r_idx = r_idx % adapter->num_tx_queues;
6184 
6185 	return adapter->tx_ring[r_idx];
6186 }
6187 
6188 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6189 				  struct net_device *netdev)
6190 {
6191 	struct igb_adapter *adapter = netdev_priv(netdev);
6192 
6193 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
6194 	 * in order to meet this minimum size requirement.
6195 	 */
6196 	if (skb_put_padto(skb, 17))
6197 		return NETDEV_TX_OK;
6198 
6199 	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6200 }
6201 
6202 /**
6203  *  igb_tx_timeout - Respond to a Tx Hang
6204  *  @netdev: network interface device structure
6205  **/
6206 static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6207 {
6208 	struct igb_adapter *adapter = netdev_priv(netdev);
6209 	struct e1000_hw *hw = &adapter->hw;
6210 
6211 	/* Do the reset outside of interrupt context */
6212 	adapter->tx_timeout_count++;
6213 
6214 	if (hw->mac.type >= e1000_82580)
6215 		hw->dev_spec._82575.global_device_reset = true;
6216 
6217 	schedule_work(&adapter->reset_task);
6218 	wr32(E1000_EICS,
6219 	     (adapter->eims_enable_mask & ~adapter->eims_other));
6220 }
6221 
6222 static void igb_reset_task(struct work_struct *work)
6223 {
6224 	struct igb_adapter *adapter;
6225 	adapter = container_of(work, struct igb_adapter, reset_task);
6226 
6227 	igb_dump(adapter);
6228 	netdev_err(adapter->netdev, "Reset adapter\n");
6229 	igb_reinit_locked(adapter);
6230 }
6231 
6232 /**
6233  *  igb_get_stats64 - Get System Network Statistics
6234  *  @netdev: network interface device structure
6235  *  @stats: rtnl_link_stats64 pointer
6236  **/
6237 static void igb_get_stats64(struct net_device *netdev,
6238 			    struct rtnl_link_stats64 *stats)
6239 {
6240 	struct igb_adapter *adapter = netdev_priv(netdev);
6241 
6242 	spin_lock(&adapter->stats64_lock);
6243 	igb_update_stats(adapter);
6244 	memcpy(stats, &adapter->stats64, sizeof(*stats));
6245 	spin_unlock(&adapter->stats64_lock);
6246 }
6247 
6248 /**
6249  *  igb_change_mtu - Change the Maximum Transfer Unit
6250  *  @netdev: network interface device structure
6251  *  @new_mtu: new value for maximum frame size
6252  *
6253  *  Returns 0 on success, negative on failure
6254  **/
6255 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6256 {
6257 	struct igb_adapter *adapter = netdev_priv(netdev);
6258 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6259 
6260 	/* adjust max frame to be at least the size of a standard frame */
6261 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6262 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6263 
6264 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6265 		usleep_range(1000, 2000);
6266 
6267 	/* igb_down has a dependency on max_frame_size */
6268 	adapter->max_frame_size = max_frame;
6269 
6270 	if (netif_running(netdev))
6271 		igb_down(adapter);
6272 
6273 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
6274 		   netdev->mtu, new_mtu);
6275 	netdev->mtu = new_mtu;
6276 
6277 	if (netif_running(netdev))
6278 		igb_up(adapter);
6279 	else
6280 		igb_reset(adapter);
6281 
6282 	clear_bit(__IGB_RESETTING, &adapter->state);
6283 
6284 	return 0;
6285 }
6286 
6287 /**
6288  *  igb_update_stats - Update the board statistics counters
6289  *  @adapter: board private structure
6290  **/
6291 void igb_update_stats(struct igb_adapter *adapter)
6292 {
6293 	struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6294 	struct e1000_hw *hw = &adapter->hw;
6295 	struct pci_dev *pdev = adapter->pdev;
6296 	u32 reg, mpc;
6297 	int i;
6298 	u64 bytes, packets;
6299 	unsigned int start;
6300 	u64 _bytes, _packets;
6301 
6302 	/* Prevent stats update while adapter is being reset, or if the pci
6303 	 * connection is down.
6304 	 */
6305 	if (adapter->link_speed == 0)
6306 		return;
6307 	if (pci_channel_offline(pdev))
6308 		return;
6309 
6310 	bytes = 0;
6311 	packets = 0;
6312 
6313 	rcu_read_lock();
6314 	for (i = 0; i < adapter->num_rx_queues; i++) {
6315 		struct igb_ring *ring = adapter->rx_ring[i];
6316 		u32 rqdpc = rd32(E1000_RQDPC(i));
6317 		if (hw->mac.type >= e1000_i210)
6318 			wr32(E1000_RQDPC(i), 0);
6319 
6320 		if (rqdpc) {
6321 			ring->rx_stats.drops += rqdpc;
6322 			net_stats->rx_fifo_errors += rqdpc;
6323 		}
6324 
6325 		do {
6326 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6327 			_bytes = ring->rx_stats.bytes;
6328 			_packets = ring->rx_stats.packets;
6329 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6330 		bytes += _bytes;
6331 		packets += _packets;
6332 	}
6333 
6334 	net_stats->rx_bytes = bytes;
6335 	net_stats->rx_packets = packets;
6336 
6337 	bytes = 0;
6338 	packets = 0;
6339 	for (i = 0; i < adapter->num_tx_queues; i++) {
6340 		struct igb_ring *ring = adapter->tx_ring[i];
6341 		do {
6342 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6343 			_bytes = ring->tx_stats.bytes;
6344 			_packets = ring->tx_stats.packets;
6345 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6346 		bytes += _bytes;
6347 		packets += _packets;
6348 	}
6349 	net_stats->tx_bytes = bytes;
6350 	net_stats->tx_packets = packets;
6351 	rcu_read_unlock();
6352 
6353 	/* read stats registers */
6354 	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6355 	adapter->stats.gprc += rd32(E1000_GPRC);
6356 	adapter->stats.gorc += rd32(E1000_GORCL);
6357 	rd32(E1000_GORCH); /* clear GORCL */
6358 	adapter->stats.bprc += rd32(E1000_BPRC);
6359 	adapter->stats.mprc += rd32(E1000_MPRC);
6360 	adapter->stats.roc += rd32(E1000_ROC);
6361 
6362 	adapter->stats.prc64 += rd32(E1000_PRC64);
6363 	adapter->stats.prc127 += rd32(E1000_PRC127);
6364 	adapter->stats.prc255 += rd32(E1000_PRC255);
6365 	adapter->stats.prc511 += rd32(E1000_PRC511);
6366 	adapter->stats.prc1023 += rd32(E1000_PRC1023);
6367 	adapter->stats.prc1522 += rd32(E1000_PRC1522);
6368 	adapter->stats.symerrs += rd32(E1000_SYMERRS);
6369 	adapter->stats.sec += rd32(E1000_SEC);
6370 
6371 	mpc = rd32(E1000_MPC);
6372 	adapter->stats.mpc += mpc;
6373 	net_stats->rx_fifo_errors += mpc;
6374 	adapter->stats.scc += rd32(E1000_SCC);
6375 	adapter->stats.ecol += rd32(E1000_ECOL);
6376 	adapter->stats.mcc += rd32(E1000_MCC);
6377 	adapter->stats.latecol += rd32(E1000_LATECOL);
6378 	adapter->stats.dc += rd32(E1000_DC);
6379 	adapter->stats.rlec += rd32(E1000_RLEC);
6380 	adapter->stats.xonrxc += rd32(E1000_XONRXC);
6381 	adapter->stats.xontxc += rd32(E1000_XONTXC);
6382 	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6383 	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6384 	adapter->stats.fcruc += rd32(E1000_FCRUC);
6385 	adapter->stats.gptc += rd32(E1000_GPTC);
6386 	adapter->stats.gotc += rd32(E1000_GOTCL);
6387 	rd32(E1000_GOTCH); /* clear GOTCL */
6388 	adapter->stats.rnbc += rd32(E1000_RNBC);
6389 	adapter->stats.ruc += rd32(E1000_RUC);
6390 	adapter->stats.rfc += rd32(E1000_RFC);
6391 	adapter->stats.rjc += rd32(E1000_RJC);
6392 	adapter->stats.tor += rd32(E1000_TORH);
6393 	adapter->stats.tot += rd32(E1000_TOTH);
6394 	adapter->stats.tpr += rd32(E1000_TPR);
6395 
6396 	adapter->stats.ptc64 += rd32(E1000_PTC64);
6397 	adapter->stats.ptc127 += rd32(E1000_PTC127);
6398 	adapter->stats.ptc255 += rd32(E1000_PTC255);
6399 	adapter->stats.ptc511 += rd32(E1000_PTC511);
6400 	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6401 	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6402 
6403 	adapter->stats.mptc += rd32(E1000_MPTC);
6404 	adapter->stats.bptc += rd32(E1000_BPTC);
6405 
6406 	adapter->stats.tpt += rd32(E1000_TPT);
6407 	adapter->stats.colc += rd32(E1000_COLC);
6408 
6409 	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6410 	/* read internal phy specific stats */
6411 	reg = rd32(E1000_CTRL_EXT);
6412 	if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6413 		adapter->stats.rxerrc += rd32(E1000_RXERRC);
6414 
6415 		/* this stat has invalid values on i210/i211 */
6416 		if ((hw->mac.type != e1000_i210) &&
6417 		    (hw->mac.type != e1000_i211))
6418 			adapter->stats.tncrs += rd32(E1000_TNCRS);
6419 	}
6420 
6421 	adapter->stats.tsctc += rd32(E1000_TSCTC);
6422 	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6423 
6424 	adapter->stats.iac += rd32(E1000_IAC);
6425 	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6426 	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6427 	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6428 	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6429 	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6430 	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6431 	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6432 	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6433 
6434 	/* Fill out the OS statistics structure */
6435 	net_stats->multicast = adapter->stats.mprc;
6436 	net_stats->collisions = adapter->stats.colc;
6437 
6438 	/* Rx Errors */
6439 
6440 	/* RLEC on some newer hardware can be incorrect so build
6441 	 * our own version based on RUC and ROC
6442 	 */
6443 	net_stats->rx_errors = adapter->stats.rxerrc +
6444 		adapter->stats.crcerrs + adapter->stats.algnerrc +
6445 		adapter->stats.ruc + adapter->stats.roc +
6446 		adapter->stats.cexterr;
6447 	net_stats->rx_length_errors = adapter->stats.ruc +
6448 				      adapter->stats.roc;
6449 	net_stats->rx_crc_errors = adapter->stats.crcerrs;
6450 	net_stats->rx_frame_errors = adapter->stats.algnerrc;
6451 	net_stats->rx_missed_errors = adapter->stats.mpc;
6452 
6453 	/* Tx Errors */
6454 	net_stats->tx_errors = adapter->stats.ecol +
6455 			       adapter->stats.latecol;
6456 	net_stats->tx_aborted_errors = adapter->stats.ecol;
6457 	net_stats->tx_window_errors = adapter->stats.latecol;
6458 	net_stats->tx_carrier_errors = adapter->stats.tncrs;
6459 
6460 	/* Tx Dropped needs to be maintained elsewhere */
6461 
6462 	/* Management Stats */
6463 	adapter->stats.mgptc += rd32(E1000_MGTPTC);
6464 	adapter->stats.mgprc += rd32(E1000_MGTPRC);
6465 	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6466 
6467 	/* OS2BMC Stats */
6468 	reg = rd32(E1000_MANC);
6469 	if (reg & E1000_MANC_EN_BMC2OS) {
6470 		adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6471 		adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6472 		adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6473 		adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6474 	}
6475 }
6476 
6477 static void igb_tsync_interrupt(struct igb_adapter *adapter)
6478 {
6479 	struct e1000_hw *hw = &adapter->hw;
6480 	struct ptp_clock_event event;
6481 	struct timespec64 ts;
6482 	u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6483 
6484 	if (tsicr & TSINTR_SYS_WRAP) {
6485 		event.type = PTP_CLOCK_PPS;
6486 		if (adapter->ptp_caps.pps)
6487 			ptp_clock_event(adapter->ptp_clock, &event);
6488 		ack |= TSINTR_SYS_WRAP;
6489 	}
6490 
6491 	if (tsicr & E1000_TSICR_TXTS) {
6492 		/* retrieve hardware timestamp */
6493 		schedule_work(&adapter->ptp_tx_work);
6494 		ack |= E1000_TSICR_TXTS;
6495 	}
6496 
6497 	if (tsicr & TSINTR_TT0) {
6498 		spin_lock(&adapter->tmreg_lock);
6499 		ts = timespec64_add(adapter->perout[0].start,
6500 				    adapter->perout[0].period);
6501 		/* u32 conversion of tv_sec is safe until y2106 */
6502 		wr32(E1000_TRGTTIML0, ts.tv_nsec);
6503 		wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6504 		tsauxc = rd32(E1000_TSAUXC);
6505 		tsauxc |= TSAUXC_EN_TT0;
6506 		wr32(E1000_TSAUXC, tsauxc);
6507 		adapter->perout[0].start = ts;
6508 		spin_unlock(&adapter->tmreg_lock);
6509 		ack |= TSINTR_TT0;
6510 	}
6511 
6512 	if (tsicr & TSINTR_TT1) {
6513 		spin_lock(&adapter->tmreg_lock);
6514 		ts = timespec64_add(adapter->perout[1].start,
6515 				    adapter->perout[1].period);
6516 		wr32(E1000_TRGTTIML1, ts.tv_nsec);
6517 		wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6518 		tsauxc = rd32(E1000_TSAUXC);
6519 		tsauxc |= TSAUXC_EN_TT1;
6520 		wr32(E1000_TSAUXC, tsauxc);
6521 		adapter->perout[1].start = ts;
6522 		spin_unlock(&adapter->tmreg_lock);
6523 		ack |= TSINTR_TT1;
6524 	}
6525 
6526 	if (tsicr & TSINTR_AUTT0) {
6527 		nsec = rd32(E1000_AUXSTMPL0);
6528 		sec  = rd32(E1000_AUXSTMPH0);
6529 		event.type = PTP_CLOCK_EXTTS;
6530 		event.index = 0;
6531 		event.timestamp = sec * 1000000000ULL + nsec;
6532 		ptp_clock_event(adapter->ptp_clock, &event);
6533 		ack |= TSINTR_AUTT0;
6534 	}
6535 
6536 	if (tsicr & TSINTR_AUTT1) {
6537 		nsec = rd32(E1000_AUXSTMPL1);
6538 		sec  = rd32(E1000_AUXSTMPH1);
6539 		event.type = PTP_CLOCK_EXTTS;
6540 		event.index = 1;
6541 		event.timestamp = sec * 1000000000ULL + nsec;
6542 		ptp_clock_event(adapter->ptp_clock, &event);
6543 		ack |= TSINTR_AUTT1;
6544 	}
6545 
6546 	/* acknowledge the interrupts */
6547 	wr32(E1000_TSICR, ack);
6548 }
6549 
6550 static irqreturn_t igb_msix_other(int irq, void *data)
6551 {
6552 	struct igb_adapter *adapter = data;
6553 	struct e1000_hw *hw = &adapter->hw;
6554 	u32 icr = rd32(E1000_ICR);
6555 	/* reading ICR causes bit 31 of EICR to be cleared */
6556 
6557 	if (icr & E1000_ICR_DRSTA)
6558 		schedule_work(&adapter->reset_task);
6559 
6560 	if (icr & E1000_ICR_DOUTSYNC) {
6561 		/* HW is reporting DMA is out of sync */
6562 		adapter->stats.doosync++;
6563 		/* The DMA Out of Sync is also indication of a spoof event
6564 		 * in IOV mode. Check the Wrong VM Behavior register to
6565 		 * see if it is really a spoof event.
6566 		 */
6567 		igb_check_wvbr(adapter);
6568 	}
6569 
6570 	/* Check for a mailbox event */
6571 	if (icr & E1000_ICR_VMMB)
6572 		igb_msg_task(adapter);
6573 
6574 	if (icr & E1000_ICR_LSC) {
6575 		hw->mac.get_link_status = 1;
6576 		/* guard against interrupt when we're going down */
6577 		if (!test_bit(__IGB_DOWN, &adapter->state))
6578 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
6579 	}
6580 
6581 	if (icr & E1000_ICR_TS)
6582 		igb_tsync_interrupt(adapter);
6583 
6584 	wr32(E1000_EIMS, adapter->eims_other);
6585 
6586 	return IRQ_HANDLED;
6587 }
6588 
6589 static void igb_write_itr(struct igb_q_vector *q_vector)
6590 {
6591 	struct igb_adapter *adapter = q_vector->adapter;
6592 	u32 itr_val = q_vector->itr_val & 0x7FFC;
6593 
6594 	if (!q_vector->set_itr)
6595 		return;
6596 
6597 	if (!itr_val)
6598 		itr_val = 0x4;
6599 
6600 	if (adapter->hw.mac.type == e1000_82575)
6601 		itr_val |= itr_val << 16;
6602 	else
6603 		itr_val |= E1000_EITR_CNT_IGNR;
6604 
6605 	writel(itr_val, q_vector->itr_register);
6606 	q_vector->set_itr = 0;
6607 }
6608 
6609 static irqreturn_t igb_msix_ring(int irq, void *data)
6610 {
6611 	struct igb_q_vector *q_vector = data;
6612 
6613 	/* Write the ITR value calculated from the previous interrupt. */
6614 	igb_write_itr(q_vector);
6615 
6616 	napi_schedule(&q_vector->napi);
6617 
6618 	return IRQ_HANDLED;
6619 }
6620 
6621 #ifdef CONFIG_IGB_DCA
6622 static void igb_update_tx_dca(struct igb_adapter *adapter,
6623 			      struct igb_ring *tx_ring,
6624 			      int cpu)
6625 {
6626 	struct e1000_hw *hw = &adapter->hw;
6627 	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6628 
6629 	if (hw->mac.type != e1000_82575)
6630 		txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6631 
6632 	/* We can enable relaxed ordering for reads, but not writes when
6633 	 * DCA is enabled.  This is due to a known issue in some chipsets
6634 	 * which will cause the DCA tag to be cleared.
6635 	 */
6636 	txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6637 		  E1000_DCA_TXCTRL_DATA_RRO_EN |
6638 		  E1000_DCA_TXCTRL_DESC_DCA_EN;
6639 
6640 	wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6641 }
6642 
6643 static void igb_update_rx_dca(struct igb_adapter *adapter,
6644 			      struct igb_ring *rx_ring,
6645 			      int cpu)
6646 {
6647 	struct e1000_hw *hw = &adapter->hw;
6648 	u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6649 
6650 	if (hw->mac.type != e1000_82575)
6651 		rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6652 
6653 	/* We can enable relaxed ordering for reads, but not writes when
6654 	 * DCA is enabled.  This is due to a known issue in some chipsets
6655 	 * which will cause the DCA tag to be cleared.
6656 	 */
6657 	rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6658 		  E1000_DCA_RXCTRL_DESC_DCA_EN;
6659 
6660 	wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6661 }
6662 
6663 static void igb_update_dca(struct igb_q_vector *q_vector)
6664 {
6665 	struct igb_adapter *adapter = q_vector->adapter;
6666 	int cpu = get_cpu();
6667 
6668 	if (q_vector->cpu == cpu)
6669 		goto out_no_update;
6670 
6671 	if (q_vector->tx.ring)
6672 		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6673 
6674 	if (q_vector->rx.ring)
6675 		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6676 
6677 	q_vector->cpu = cpu;
6678 out_no_update:
6679 	put_cpu();
6680 }
6681 
6682 static void igb_setup_dca(struct igb_adapter *adapter)
6683 {
6684 	struct e1000_hw *hw = &adapter->hw;
6685 	int i;
6686 
6687 	if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6688 		return;
6689 
6690 	/* Always use CB2 mode, difference is masked in the CB driver. */
6691 	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6692 
6693 	for (i = 0; i < adapter->num_q_vectors; i++) {
6694 		adapter->q_vector[i]->cpu = -1;
6695 		igb_update_dca(adapter->q_vector[i]);
6696 	}
6697 }
6698 
6699 static int __igb_notify_dca(struct device *dev, void *data)
6700 {
6701 	struct net_device *netdev = dev_get_drvdata(dev);
6702 	struct igb_adapter *adapter = netdev_priv(netdev);
6703 	struct pci_dev *pdev = adapter->pdev;
6704 	struct e1000_hw *hw = &adapter->hw;
6705 	unsigned long event = *(unsigned long *)data;
6706 
6707 	switch (event) {
6708 	case DCA_PROVIDER_ADD:
6709 		/* if already enabled, don't do it again */
6710 		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6711 			break;
6712 		if (dca_add_requester(dev) == 0) {
6713 			adapter->flags |= IGB_FLAG_DCA_ENABLED;
6714 			dev_info(&pdev->dev, "DCA enabled\n");
6715 			igb_setup_dca(adapter);
6716 			break;
6717 		}
6718 		/* Fall Through - since DCA is disabled. */
6719 	case DCA_PROVIDER_REMOVE:
6720 		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6721 			/* without this a class_device is left
6722 			 * hanging around in the sysfs model
6723 			 */
6724 			dca_remove_requester(dev);
6725 			dev_info(&pdev->dev, "DCA disabled\n");
6726 			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6727 			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6728 		}
6729 		break;
6730 	}
6731 
6732 	return 0;
6733 }
6734 
6735 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6736 			  void *p)
6737 {
6738 	int ret_val;
6739 
6740 	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6741 					 __igb_notify_dca);
6742 
6743 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6744 }
6745 #endif /* CONFIG_IGB_DCA */
6746 
6747 #ifdef CONFIG_PCI_IOV
6748 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6749 {
6750 	unsigned char mac_addr[ETH_ALEN];
6751 
6752 	eth_zero_addr(mac_addr);
6753 	igb_set_vf_mac(adapter, vf, mac_addr);
6754 
6755 	/* By default spoof check is enabled for all VFs */
6756 	adapter->vf_data[vf].spoofchk_enabled = true;
6757 
6758 	/* By default VFs are not trusted */
6759 	adapter->vf_data[vf].trusted = false;
6760 
6761 	return 0;
6762 }
6763 
6764 #endif
6765 static void igb_ping_all_vfs(struct igb_adapter *adapter)
6766 {
6767 	struct e1000_hw *hw = &adapter->hw;
6768 	u32 ping;
6769 	int i;
6770 
6771 	for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6772 		ping = E1000_PF_CONTROL_MSG;
6773 		if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6774 			ping |= E1000_VT_MSGTYPE_CTS;
6775 		igb_write_mbx(hw, &ping, 1, i);
6776 	}
6777 }
6778 
6779 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6780 {
6781 	struct e1000_hw *hw = &adapter->hw;
6782 	u32 vmolr = rd32(E1000_VMOLR(vf));
6783 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6784 
6785 	vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6786 			    IGB_VF_FLAG_MULTI_PROMISC);
6787 	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6788 
6789 	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6790 		vmolr |= E1000_VMOLR_MPME;
6791 		vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6792 		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6793 	} else {
6794 		/* if we have hashes and we are clearing a multicast promisc
6795 		 * flag we need to write the hashes to the MTA as this step
6796 		 * was previously skipped
6797 		 */
6798 		if (vf_data->num_vf_mc_hashes > 30) {
6799 			vmolr |= E1000_VMOLR_MPME;
6800 		} else if (vf_data->num_vf_mc_hashes) {
6801 			int j;
6802 
6803 			vmolr |= E1000_VMOLR_ROMPE;
6804 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6805 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6806 		}
6807 	}
6808 
6809 	wr32(E1000_VMOLR(vf), vmolr);
6810 
6811 	/* there are flags left unprocessed, likely not supported */
6812 	if (*msgbuf & E1000_VT_MSGINFO_MASK)
6813 		return -EINVAL;
6814 
6815 	return 0;
6816 }
6817 
6818 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6819 				  u32 *msgbuf, u32 vf)
6820 {
6821 	int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6822 	u16 *hash_list = (u16 *)&msgbuf[1];
6823 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6824 	int i;
6825 
6826 	/* salt away the number of multicast addresses assigned
6827 	 * to this VF for later use to restore when the PF multi cast
6828 	 * list changes
6829 	 */
6830 	vf_data->num_vf_mc_hashes = n;
6831 
6832 	/* only up to 30 hash values supported */
6833 	if (n > 30)
6834 		n = 30;
6835 
6836 	/* store the hashes for later use */
6837 	for (i = 0; i < n; i++)
6838 		vf_data->vf_mc_hashes[i] = hash_list[i];
6839 
6840 	/* Flush and reset the mta with the new values */
6841 	igb_set_rx_mode(adapter->netdev);
6842 
6843 	return 0;
6844 }
6845 
6846 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6847 {
6848 	struct e1000_hw *hw = &adapter->hw;
6849 	struct vf_data_storage *vf_data;
6850 	int i, j;
6851 
6852 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
6853 		u32 vmolr = rd32(E1000_VMOLR(i));
6854 
6855 		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6856 
6857 		vf_data = &adapter->vf_data[i];
6858 
6859 		if ((vf_data->num_vf_mc_hashes > 30) ||
6860 		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6861 			vmolr |= E1000_VMOLR_MPME;
6862 		} else if (vf_data->num_vf_mc_hashes) {
6863 			vmolr |= E1000_VMOLR_ROMPE;
6864 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6865 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6866 		}
6867 		wr32(E1000_VMOLR(i), vmolr);
6868 	}
6869 }
6870 
6871 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6872 {
6873 	struct e1000_hw *hw = &adapter->hw;
6874 	u32 pool_mask, vlvf_mask, i;
6875 
6876 	/* create mask for VF and other pools */
6877 	pool_mask = E1000_VLVF_POOLSEL_MASK;
6878 	vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6879 
6880 	/* drop PF from pool bits */
6881 	pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6882 			     adapter->vfs_allocated_count);
6883 
6884 	/* Find the vlan filter for this id */
6885 	for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6886 		u32 vlvf = rd32(E1000_VLVF(i));
6887 		u32 vfta_mask, vid, vfta;
6888 
6889 		/* remove the vf from the pool */
6890 		if (!(vlvf & vlvf_mask))
6891 			continue;
6892 
6893 		/* clear out bit from VLVF */
6894 		vlvf ^= vlvf_mask;
6895 
6896 		/* if other pools are present, just remove ourselves */
6897 		if (vlvf & pool_mask)
6898 			goto update_vlvfb;
6899 
6900 		/* if PF is present, leave VFTA */
6901 		if (vlvf & E1000_VLVF_POOLSEL_MASK)
6902 			goto update_vlvf;
6903 
6904 		vid = vlvf & E1000_VLVF_VLANID_MASK;
6905 		vfta_mask = BIT(vid % 32);
6906 
6907 		/* clear bit from VFTA */
6908 		vfta = adapter->shadow_vfta[vid / 32];
6909 		if (vfta & vfta_mask)
6910 			hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6911 update_vlvf:
6912 		/* clear pool selection enable */
6913 		if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6914 			vlvf &= E1000_VLVF_POOLSEL_MASK;
6915 		else
6916 			vlvf = 0;
6917 update_vlvfb:
6918 		/* clear pool bits */
6919 		wr32(E1000_VLVF(i), vlvf);
6920 	}
6921 }
6922 
6923 static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6924 {
6925 	u32 vlvf;
6926 	int idx;
6927 
6928 	/* short cut the special case */
6929 	if (vlan == 0)
6930 		return 0;
6931 
6932 	/* Search for the VLAN id in the VLVF entries */
6933 	for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6934 		vlvf = rd32(E1000_VLVF(idx));
6935 		if ((vlvf & VLAN_VID_MASK) == vlan)
6936 			break;
6937 	}
6938 
6939 	return idx;
6940 }
6941 
6942 static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6943 {
6944 	struct e1000_hw *hw = &adapter->hw;
6945 	u32 bits, pf_id;
6946 	int idx;
6947 
6948 	idx = igb_find_vlvf_entry(hw, vid);
6949 	if (!idx)
6950 		return;
6951 
6952 	/* See if any other pools are set for this VLAN filter
6953 	 * entry other than the PF.
6954 	 */
6955 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6956 	bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6957 	bits &= rd32(E1000_VLVF(idx));
6958 
6959 	/* Disable the filter so this falls into the default pool. */
6960 	if (!bits) {
6961 		if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6962 			wr32(E1000_VLVF(idx), BIT(pf_id));
6963 		else
6964 			wr32(E1000_VLVF(idx), 0);
6965 	}
6966 }
6967 
6968 static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6969 			   bool add, u32 vf)
6970 {
6971 	int pf_id = adapter->vfs_allocated_count;
6972 	struct e1000_hw *hw = &adapter->hw;
6973 	int err;
6974 
6975 	/* If VLAN overlaps with one the PF is currently monitoring make
6976 	 * sure that we are able to allocate a VLVF entry.  This may be
6977 	 * redundant but it guarantees PF will maintain visibility to
6978 	 * the VLAN.
6979 	 */
6980 	if (add && test_bit(vid, adapter->active_vlans)) {
6981 		err = igb_vfta_set(hw, vid, pf_id, true, false);
6982 		if (err)
6983 			return err;
6984 	}
6985 
6986 	err = igb_vfta_set(hw, vid, vf, add, false);
6987 
6988 	if (add && !err)
6989 		return err;
6990 
6991 	/* If we failed to add the VF VLAN or we are removing the VF VLAN
6992 	 * we may need to drop the PF pool bit in order to allow us to free
6993 	 * up the VLVF resources.
6994 	 */
6995 	if (test_bit(vid, adapter->active_vlans) ||
6996 	    (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6997 		igb_update_pf_vlvf(adapter, vid);
6998 
6999 	return err;
7000 }
7001 
7002 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7003 {
7004 	struct e1000_hw *hw = &adapter->hw;
7005 
7006 	if (vid)
7007 		wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7008 	else
7009 		wr32(E1000_VMVIR(vf), 0);
7010 }
7011 
7012 static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7013 				u16 vlan, u8 qos)
7014 {
7015 	int err;
7016 
7017 	err = igb_set_vf_vlan(adapter, vlan, true, vf);
7018 	if (err)
7019 		return err;
7020 
7021 	igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7022 	igb_set_vmolr(adapter, vf, !vlan);
7023 
7024 	/* revoke access to previous VLAN */
7025 	if (vlan != adapter->vf_data[vf].pf_vlan)
7026 		igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7027 				false, vf);
7028 
7029 	adapter->vf_data[vf].pf_vlan = vlan;
7030 	adapter->vf_data[vf].pf_qos = qos;
7031 	igb_set_vf_vlan_strip(adapter, vf, true);
7032 	dev_info(&adapter->pdev->dev,
7033 		 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7034 	if (test_bit(__IGB_DOWN, &adapter->state)) {
7035 		dev_warn(&adapter->pdev->dev,
7036 			 "The VF VLAN has been set, but the PF device is not up.\n");
7037 		dev_warn(&adapter->pdev->dev,
7038 			 "Bring the PF device up before attempting to use the VF device.\n");
7039 	}
7040 
7041 	return err;
7042 }
7043 
7044 static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7045 {
7046 	/* Restore tagless access via VLAN 0 */
7047 	igb_set_vf_vlan(adapter, 0, true, vf);
7048 
7049 	igb_set_vmvir(adapter, 0, vf);
7050 	igb_set_vmolr(adapter, vf, true);
7051 
7052 	/* Remove any PF assigned VLAN */
7053 	if (adapter->vf_data[vf].pf_vlan)
7054 		igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7055 				false, vf);
7056 
7057 	adapter->vf_data[vf].pf_vlan = 0;
7058 	adapter->vf_data[vf].pf_qos = 0;
7059 	igb_set_vf_vlan_strip(adapter, vf, false);
7060 
7061 	return 0;
7062 }
7063 
7064 static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7065 			       u16 vlan, u8 qos, __be16 vlan_proto)
7066 {
7067 	struct igb_adapter *adapter = netdev_priv(netdev);
7068 
7069 	if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7070 		return -EINVAL;
7071 
7072 	if (vlan_proto != htons(ETH_P_8021Q))
7073 		return -EPROTONOSUPPORT;
7074 
7075 	return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7076 			       igb_disable_port_vlan(adapter, vf);
7077 }
7078 
7079 static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7080 {
7081 	int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7082 	int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7083 	int ret;
7084 
7085 	if (adapter->vf_data[vf].pf_vlan)
7086 		return -1;
7087 
7088 	/* VLAN 0 is a special case, don't allow it to be removed */
7089 	if (!vid && !add)
7090 		return 0;
7091 
7092 	ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7093 	if (!ret)
7094 		igb_set_vf_vlan_strip(adapter, vf, !!vid);
7095 	return ret;
7096 }
7097 
7098 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7099 {
7100 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7101 
7102 	/* clear flags - except flag that indicates PF has set the MAC */
7103 	vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7104 	vf_data->last_nack = jiffies;
7105 
7106 	/* reset vlans for device */
7107 	igb_clear_vf_vfta(adapter, vf);
7108 	igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7109 	igb_set_vmvir(adapter, vf_data->pf_vlan |
7110 			       (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7111 	igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7112 	igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7113 
7114 	/* reset multicast table array for vf */
7115 	adapter->vf_data[vf].num_vf_mc_hashes = 0;
7116 
7117 	/* Flush and reset the mta with the new values */
7118 	igb_set_rx_mode(adapter->netdev);
7119 }
7120 
7121 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7122 {
7123 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7124 
7125 	/* clear mac address as we were hotplug removed/added */
7126 	if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7127 		eth_zero_addr(vf_mac);
7128 
7129 	/* process remaining reset events */
7130 	igb_vf_reset(adapter, vf);
7131 }
7132 
7133 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7134 {
7135 	struct e1000_hw *hw = &adapter->hw;
7136 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7137 	u32 reg, msgbuf[3];
7138 	u8 *addr = (u8 *)(&msgbuf[1]);
7139 
7140 	/* process all the same items cleared in a function level reset */
7141 	igb_vf_reset(adapter, vf);
7142 
7143 	/* set vf mac address */
7144 	igb_set_vf_mac(adapter, vf, vf_mac);
7145 
7146 	/* enable transmit and receive for vf */
7147 	reg = rd32(E1000_VFTE);
7148 	wr32(E1000_VFTE, reg | BIT(vf));
7149 	reg = rd32(E1000_VFRE);
7150 	wr32(E1000_VFRE, reg | BIT(vf));
7151 
7152 	adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7153 
7154 	/* reply to reset with ack and vf mac address */
7155 	if (!is_zero_ether_addr(vf_mac)) {
7156 		msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7157 		memcpy(addr, vf_mac, ETH_ALEN);
7158 	} else {
7159 		msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7160 	}
7161 	igb_write_mbx(hw, msgbuf, 3, vf);
7162 }
7163 
7164 static void igb_flush_mac_table(struct igb_adapter *adapter)
7165 {
7166 	struct e1000_hw *hw = &adapter->hw;
7167 	int i;
7168 
7169 	for (i = 0; i < hw->mac.rar_entry_count; i++) {
7170 		adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7171 		memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7172 		adapter->mac_table[i].queue = 0;
7173 		igb_rar_set_index(adapter, i);
7174 	}
7175 }
7176 
7177 static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7178 {
7179 	struct e1000_hw *hw = &adapter->hw;
7180 	/* do not count rar entries reserved for VFs MAC addresses */
7181 	int rar_entries = hw->mac.rar_entry_count -
7182 			  adapter->vfs_allocated_count;
7183 	int i, count = 0;
7184 
7185 	for (i = 0; i < rar_entries; i++) {
7186 		/* do not count default entries */
7187 		if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7188 			continue;
7189 
7190 		/* do not count "in use" entries for different queues */
7191 		if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7192 		    (adapter->mac_table[i].queue != queue))
7193 			continue;
7194 
7195 		count++;
7196 	}
7197 
7198 	return count;
7199 }
7200 
7201 /* Set default MAC address for the PF in the first RAR entry */
7202 static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7203 {
7204 	struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7205 
7206 	ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7207 	mac_table->queue = adapter->vfs_allocated_count;
7208 	mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7209 
7210 	igb_rar_set_index(adapter, 0);
7211 }
7212 
7213 /* If the filter to be added and an already existing filter express
7214  * the same address and address type, it should be possible to only
7215  * override the other configurations, for example the queue to steer
7216  * traffic.
7217  */
7218 static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7219 				      const u8 *addr, const u8 flags)
7220 {
7221 	if (!(entry->state & IGB_MAC_STATE_IN_USE))
7222 		return true;
7223 
7224 	if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7225 	    (flags & IGB_MAC_STATE_SRC_ADDR))
7226 		return false;
7227 
7228 	if (!ether_addr_equal(addr, entry->addr))
7229 		return false;
7230 
7231 	return true;
7232 }
7233 
7234 /* Add a MAC filter for 'addr' directing matching traffic to 'queue',
7235  * 'flags' is used to indicate what kind of match is made, match is by
7236  * default for the destination address, if matching by source address
7237  * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
7238  */
7239 static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7240 				    const u8 *addr, const u8 queue,
7241 				    const u8 flags)
7242 {
7243 	struct e1000_hw *hw = &adapter->hw;
7244 	int rar_entries = hw->mac.rar_entry_count -
7245 			  adapter->vfs_allocated_count;
7246 	int i;
7247 
7248 	if (is_zero_ether_addr(addr))
7249 		return -EINVAL;
7250 
7251 	/* Search for the first empty entry in the MAC table.
7252 	 * Do not touch entries at the end of the table reserved for the VF MAC
7253 	 * addresses.
7254 	 */
7255 	for (i = 0; i < rar_entries; i++) {
7256 		if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7257 					       addr, flags))
7258 			continue;
7259 
7260 		ether_addr_copy(adapter->mac_table[i].addr, addr);
7261 		adapter->mac_table[i].queue = queue;
7262 		adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7263 
7264 		igb_rar_set_index(adapter, i);
7265 		return i;
7266 	}
7267 
7268 	return -ENOSPC;
7269 }
7270 
7271 static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7272 			      const u8 queue)
7273 {
7274 	return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7275 }
7276 
7277 /* Remove a MAC filter for 'addr' directing matching traffic to
7278  * 'queue', 'flags' is used to indicate what kind of match need to be
7279  * removed, match is by default for the destination address, if
7280  * matching by source address is to be removed the flag
7281  * IGB_MAC_STATE_SRC_ADDR can be used.
7282  */
7283 static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7284 				    const u8 *addr, const u8 queue,
7285 				    const u8 flags)
7286 {
7287 	struct e1000_hw *hw = &adapter->hw;
7288 	int rar_entries = hw->mac.rar_entry_count -
7289 			  adapter->vfs_allocated_count;
7290 	int i;
7291 
7292 	if (is_zero_ether_addr(addr))
7293 		return -EINVAL;
7294 
7295 	/* Search for matching entry in the MAC table based on given address
7296 	 * and queue. Do not touch entries at the end of the table reserved
7297 	 * for the VF MAC addresses.
7298 	 */
7299 	for (i = 0; i < rar_entries; i++) {
7300 		if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7301 			continue;
7302 		if ((adapter->mac_table[i].state & flags) != flags)
7303 			continue;
7304 		if (adapter->mac_table[i].queue != queue)
7305 			continue;
7306 		if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7307 			continue;
7308 
7309 		/* When a filter for the default address is "deleted",
7310 		 * we return it to its initial configuration
7311 		 */
7312 		if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7313 			adapter->mac_table[i].state =
7314 				IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7315 			adapter->mac_table[i].queue =
7316 				adapter->vfs_allocated_count;
7317 		} else {
7318 			adapter->mac_table[i].state = 0;
7319 			adapter->mac_table[i].queue = 0;
7320 			memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7321 		}
7322 
7323 		igb_rar_set_index(adapter, i);
7324 		return 0;
7325 	}
7326 
7327 	return -ENOENT;
7328 }
7329 
7330 static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7331 			      const u8 queue)
7332 {
7333 	return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7334 }
7335 
7336 int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7337 				const u8 *addr, u8 queue, u8 flags)
7338 {
7339 	struct e1000_hw *hw = &adapter->hw;
7340 
7341 	/* In theory, this should be supported on 82575 as well, but
7342 	 * that part wasn't easily accessible during development.
7343 	 */
7344 	if (hw->mac.type != e1000_i210)
7345 		return -EOPNOTSUPP;
7346 
7347 	return igb_add_mac_filter_flags(adapter, addr, queue,
7348 					IGB_MAC_STATE_QUEUE_STEERING | flags);
7349 }
7350 
7351 int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7352 				const u8 *addr, u8 queue, u8 flags)
7353 {
7354 	return igb_del_mac_filter_flags(adapter, addr, queue,
7355 					IGB_MAC_STATE_QUEUE_STEERING | flags);
7356 }
7357 
7358 static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7359 {
7360 	struct igb_adapter *adapter = netdev_priv(netdev);
7361 	int ret;
7362 
7363 	ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7364 
7365 	return min_t(int, ret, 0);
7366 }
7367 
7368 static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7369 {
7370 	struct igb_adapter *adapter = netdev_priv(netdev);
7371 
7372 	igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7373 
7374 	return 0;
7375 }
7376 
7377 static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7378 				 const u32 info, const u8 *addr)
7379 {
7380 	struct pci_dev *pdev = adapter->pdev;
7381 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7382 	struct list_head *pos;
7383 	struct vf_mac_filter *entry = NULL;
7384 	int ret = 0;
7385 
7386 	switch (info) {
7387 	case E1000_VF_MAC_FILTER_CLR:
7388 		/* remove all unicast MAC filters related to the current VF */
7389 		list_for_each(pos, &adapter->vf_macs.l) {
7390 			entry = list_entry(pos, struct vf_mac_filter, l);
7391 			if (entry->vf == vf) {
7392 				entry->vf = -1;
7393 				entry->free = true;
7394 				igb_del_mac_filter(adapter, entry->vf_mac, vf);
7395 			}
7396 		}
7397 		break;
7398 	case E1000_VF_MAC_FILTER_ADD:
7399 		if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7400 		    !vf_data->trusted) {
7401 			dev_warn(&pdev->dev,
7402 				 "VF %d requested MAC filter but is administratively denied\n",
7403 				 vf);
7404 			return -EINVAL;
7405 		}
7406 		if (!is_valid_ether_addr(addr)) {
7407 			dev_warn(&pdev->dev,
7408 				 "VF %d attempted to set invalid MAC filter\n",
7409 				 vf);
7410 			return -EINVAL;
7411 		}
7412 
7413 		/* try to find empty slot in the list */
7414 		list_for_each(pos, &adapter->vf_macs.l) {
7415 			entry = list_entry(pos, struct vf_mac_filter, l);
7416 			if (entry->free)
7417 				break;
7418 		}
7419 
7420 		if (entry && entry->free) {
7421 			entry->free = false;
7422 			entry->vf = vf;
7423 			ether_addr_copy(entry->vf_mac, addr);
7424 
7425 			ret = igb_add_mac_filter(adapter, addr, vf);
7426 			ret = min_t(int, ret, 0);
7427 		} else {
7428 			ret = -ENOSPC;
7429 		}
7430 
7431 		if (ret == -ENOSPC)
7432 			dev_warn(&pdev->dev,
7433 				 "VF %d has requested MAC filter but there is no space for it\n",
7434 				 vf);
7435 		break;
7436 	default:
7437 		ret = -EINVAL;
7438 		break;
7439 	}
7440 
7441 	return ret;
7442 }
7443 
7444 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7445 {
7446 	struct pci_dev *pdev = adapter->pdev;
7447 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7448 	u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7449 
7450 	/* The VF MAC Address is stored in a packed array of bytes
7451 	 * starting at the second 32 bit word of the msg array
7452 	 */
7453 	unsigned char *addr = (unsigned char *)&msg[1];
7454 	int ret = 0;
7455 
7456 	if (!info) {
7457 		if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7458 		    !vf_data->trusted) {
7459 			dev_warn(&pdev->dev,
7460 				 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7461 				 vf);
7462 			return -EINVAL;
7463 		}
7464 
7465 		if (!is_valid_ether_addr(addr)) {
7466 			dev_warn(&pdev->dev,
7467 				 "VF %d attempted to set invalid MAC\n",
7468 				 vf);
7469 			return -EINVAL;
7470 		}
7471 
7472 		ret = igb_set_vf_mac(adapter, vf, addr);
7473 	} else {
7474 		ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7475 	}
7476 
7477 	return ret;
7478 }
7479 
7480 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7481 {
7482 	struct e1000_hw *hw = &adapter->hw;
7483 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7484 	u32 msg = E1000_VT_MSGTYPE_NACK;
7485 
7486 	/* if device isn't clear to send it shouldn't be reading either */
7487 	if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7488 	    time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7489 		igb_write_mbx(hw, &msg, 1, vf);
7490 		vf_data->last_nack = jiffies;
7491 	}
7492 }
7493 
7494 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7495 {
7496 	struct pci_dev *pdev = adapter->pdev;
7497 	u32 msgbuf[E1000_VFMAILBOX_SIZE];
7498 	struct e1000_hw *hw = &adapter->hw;
7499 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7500 	s32 retval;
7501 
7502 	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7503 
7504 	if (retval) {
7505 		/* if receive failed revoke VF CTS stats and restart init */
7506 		dev_err(&pdev->dev, "Error receiving message from VF\n");
7507 		vf_data->flags &= ~IGB_VF_FLAG_CTS;
7508 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7509 			goto unlock;
7510 		goto out;
7511 	}
7512 
7513 	/* this is a message we already processed, do nothing */
7514 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7515 		goto unlock;
7516 
7517 	/* until the vf completes a reset it should not be
7518 	 * allowed to start any configuration.
7519 	 */
7520 	if (msgbuf[0] == E1000_VF_RESET) {
7521 		/* unlocks mailbox */
7522 		igb_vf_reset_msg(adapter, vf);
7523 		return;
7524 	}
7525 
7526 	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7527 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7528 			goto unlock;
7529 		retval = -1;
7530 		goto out;
7531 	}
7532 
7533 	switch ((msgbuf[0] & 0xFFFF)) {
7534 	case E1000_VF_SET_MAC_ADDR:
7535 		retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7536 		break;
7537 	case E1000_VF_SET_PROMISC:
7538 		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7539 		break;
7540 	case E1000_VF_SET_MULTICAST:
7541 		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7542 		break;
7543 	case E1000_VF_SET_LPE:
7544 		retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7545 		break;
7546 	case E1000_VF_SET_VLAN:
7547 		retval = -1;
7548 		if (vf_data->pf_vlan)
7549 			dev_warn(&pdev->dev,
7550 				 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7551 				 vf);
7552 		else
7553 			retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7554 		break;
7555 	default:
7556 		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7557 		retval = -1;
7558 		break;
7559 	}
7560 
7561 	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7562 out:
7563 	/* notify the VF of the results of what it sent us */
7564 	if (retval)
7565 		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7566 	else
7567 		msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7568 
7569 	/* unlocks mailbox */
7570 	igb_write_mbx(hw, msgbuf, 1, vf);
7571 	return;
7572 
7573 unlock:
7574 	igb_unlock_mbx(hw, vf);
7575 }
7576 
7577 static void igb_msg_task(struct igb_adapter *adapter)
7578 {
7579 	struct e1000_hw *hw = &adapter->hw;
7580 	u32 vf;
7581 
7582 	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7583 		/* process any reset requests */
7584 		if (!igb_check_for_rst(hw, vf))
7585 			igb_vf_reset_event(adapter, vf);
7586 
7587 		/* process any messages pending */
7588 		if (!igb_check_for_msg(hw, vf))
7589 			igb_rcv_msg_from_vf(adapter, vf);
7590 
7591 		/* process any acks */
7592 		if (!igb_check_for_ack(hw, vf))
7593 			igb_rcv_ack_from_vf(adapter, vf);
7594 	}
7595 }
7596 
7597 /**
7598  *  igb_set_uta - Set unicast filter table address
7599  *  @adapter: board private structure
7600  *  @set: boolean indicating if we are setting or clearing bits
7601  *
7602  *  The unicast table address is a register array of 32-bit registers.
7603  *  The table is meant to be used in a way similar to how the MTA is used
7604  *  however due to certain limitations in the hardware it is necessary to
7605  *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
7606  *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
7607  **/
7608 static void igb_set_uta(struct igb_adapter *adapter, bool set)
7609 {
7610 	struct e1000_hw *hw = &adapter->hw;
7611 	u32 uta = set ? ~0 : 0;
7612 	int i;
7613 
7614 	/* we only need to do this if VMDq is enabled */
7615 	if (!adapter->vfs_allocated_count)
7616 		return;
7617 
7618 	for (i = hw->mac.uta_reg_count; i--;)
7619 		array_wr32(E1000_UTA, i, uta);
7620 }
7621 
7622 /**
7623  *  igb_intr_msi - Interrupt Handler
7624  *  @irq: interrupt number
7625  *  @data: pointer to a network interface device structure
7626  **/
7627 static irqreturn_t igb_intr_msi(int irq, void *data)
7628 {
7629 	struct igb_adapter *adapter = data;
7630 	struct igb_q_vector *q_vector = adapter->q_vector[0];
7631 	struct e1000_hw *hw = &adapter->hw;
7632 	/* read ICR disables interrupts using IAM */
7633 	u32 icr = rd32(E1000_ICR);
7634 
7635 	igb_write_itr(q_vector);
7636 
7637 	if (icr & E1000_ICR_DRSTA)
7638 		schedule_work(&adapter->reset_task);
7639 
7640 	if (icr & E1000_ICR_DOUTSYNC) {
7641 		/* HW is reporting DMA is out of sync */
7642 		adapter->stats.doosync++;
7643 	}
7644 
7645 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7646 		hw->mac.get_link_status = 1;
7647 		if (!test_bit(__IGB_DOWN, &adapter->state))
7648 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
7649 	}
7650 
7651 	if (icr & E1000_ICR_TS)
7652 		igb_tsync_interrupt(adapter);
7653 
7654 	napi_schedule(&q_vector->napi);
7655 
7656 	return IRQ_HANDLED;
7657 }
7658 
7659 /**
7660  *  igb_intr - Legacy Interrupt Handler
7661  *  @irq: interrupt number
7662  *  @data: pointer to a network interface device structure
7663  **/
7664 static irqreturn_t igb_intr(int irq, void *data)
7665 {
7666 	struct igb_adapter *adapter = data;
7667 	struct igb_q_vector *q_vector = adapter->q_vector[0];
7668 	struct e1000_hw *hw = &adapter->hw;
7669 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
7670 	 * need for the IMC write
7671 	 */
7672 	u32 icr = rd32(E1000_ICR);
7673 
7674 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
7675 	 * not set, then the adapter didn't send an interrupt
7676 	 */
7677 	if (!(icr & E1000_ICR_INT_ASSERTED))
7678 		return IRQ_NONE;
7679 
7680 	igb_write_itr(q_vector);
7681 
7682 	if (icr & E1000_ICR_DRSTA)
7683 		schedule_work(&adapter->reset_task);
7684 
7685 	if (icr & E1000_ICR_DOUTSYNC) {
7686 		/* HW is reporting DMA is out of sync */
7687 		adapter->stats.doosync++;
7688 	}
7689 
7690 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7691 		hw->mac.get_link_status = 1;
7692 		/* guard against interrupt when we're going down */
7693 		if (!test_bit(__IGB_DOWN, &adapter->state))
7694 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
7695 	}
7696 
7697 	if (icr & E1000_ICR_TS)
7698 		igb_tsync_interrupt(adapter);
7699 
7700 	napi_schedule(&q_vector->napi);
7701 
7702 	return IRQ_HANDLED;
7703 }
7704 
7705 static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7706 {
7707 	struct igb_adapter *adapter = q_vector->adapter;
7708 	struct e1000_hw *hw = &adapter->hw;
7709 
7710 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7711 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7712 		if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7713 			igb_set_itr(q_vector);
7714 		else
7715 			igb_update_ring_itr(q_vector);
7716 	}
7717 
7718 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
7719 		if (adapter->flags & IGB_FLAG_HAS_MSIX)
7720 			wr32(E1000_EIMS, q_vector->eims_value);
7721 		else
7722 			igb_irq_enable(adapter);
7723 	}
7724 }
7725 
7726 /**
7727  *  igb_poll - NAPI Rx polling callback
7728  *  @napi: napi polling structure
7729  *  @budget: count of how many packets we should handle
7730  **/
7731 static int igb_poll(struct napi_struct *napi, int budget)
7732 {
7733 	struct igb_q_vector *q_vector = container_of(napi,
7734 						     struct igb_q_vector,
7735 						     napi);
7736 	bool clean_complete = true;
7737 	int work_done = 0;
7738 
7739 #ifdef CONFIG_IGB_DCA
7740 	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7741 		igb_update_dca(q_vector);
7742 #endif
7743 	if (q_vector->tx.ring)
7744 		clean_complete = igb_clean_tx_irq(q_vector, budget);
7745 
7746 	if (q_vector->rx.ring) {
7747 		int cleaned = igb_clean_rx_irq(q_vector, budget);
7748 
7749 		work_done += cleaned;
7750 		if (cleaned >= budget)
7751 			clean_complete = false;
7752 	}
7753 
7754 	/* If all work not completed, return budget and keep polling */
7755 	if (!clean_complete)
7756 		return budget;
7757 
7758 	/* Exit the polling mode, but don't re-enable interrupts if stack might
7759 	 * poll us due to busy-polling
7760 	 */
7761 	if (likely(napi_complete_done(napi, work_done)))
7762 		igb_ring_irq_enable(q_vector);
7763 
7764 	return min(work_done, budget - 1);
7765 }
7766 
7767 /**
7768  *  igb_clean_tx_irq - Reclaim resources after transmit completes
7769  *  @q_vector: pointer to q_vector containing needed info
7770  *  @napi_budget: Used to determine if we are in netpoll
7771  *
7772  *  returns true if ring is completely cleaned
7773  **/
7774 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7775 {
7776 	struct igb_adapter *adapter = q_vector->adapter;
7777 	struct igb_ring *tx_ring = q_vector->tx.ring;
7778 	struct igb_tx_buffer *tx_buffer;
7779 	union e1000_adv_tx_desc *tx_desc;
7780 	unsigned int total_bytes = 0, total_packets = 0;
7781 	unsigned int budget = q_vector->tx.work_limit;
7782 	unsigned int i = tx_ring->next_to_clean;
7783 
7784 	if (test_bit(__IGB_DOWN, &adapter->state))
7785 		return true;
7786 
7787 	tx_buffer = &tx_ring->tx_buffer_info[i];
7788 	tx_desc = IGB_TX_DESC(tx_ring, i);
7789 	i -= tx_ring->count;
7790 
7791 	do {
7792 		union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7793 
7794 		/* if next_to_watch is not set then there is no work pending */
7795 		if (!eop_desc)
7796 			break;
7797 
7798 		/* prevent any other reads prior to eop_desc */
7799 		smp_rmb();
7800 
7801 		/* if DD is not set pending work has not been completed */
7802 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7803 			break;
7804 
7805 		/* clear next_to_watch to prevent false hangs */
7806 		tx_buffer->next_to_watch = NULL;
7807 
7808 		/* update the statistics for this packet */
7809 		total_bytes += tx_buffer->bytecount;
7810 		total_packets += tx_buffer->gso_segs;
7811 
7812 		/* free the skb */
7813 		napi_consume_skb(tx_buffer->skb, napi_budget);
7814 
7815 		/* unmap skb header data */
7816 		dma_unmap_single(tx_ring->dev,
7817 				 dma_unmap_addr(tx_buffer, dma),
7818 				 dma_unmap_len(tx_buffer, len),
7819 				 DMA_TO_DEVICE);
7820 
7821 		/* clear tx_buffer data */
7822 		dma_unmap_len_set(tx_buffer, len, 0);
7823 
7824 		/* clear last DMA location and unmap remaining buffers */
7825 		while (tx_desc != eop_desc) {
7826 			tx_buffer++;
7827 			tx_desc++;
7828 			i++;
7829 			if (unlikely(!i)) {
7830 				i -= tx_ring->count;
7831 				tx_buffer = tx_ring->tx_buffer_info;
7832 				tx_desc = IGB_TX_DESC(tx_ring, 0);
7833 			}
7834 
7835 			/* unmap any remaining paged data */
7836 			if (dma_unmap_len(tx_buffer, len)) {
7837 				dma_unmap_page(tx_ring->dev,
7838 					       dma_unmap_addr(tx_buffer, dma),
7839 					       dma_unmap_len(tx_buffer, len),
7840 					       DMA_TO_DEVICE);
7841 				dma_unmap_len_set(tx_buffer, len, 0);
7842 			}
7843 		}
7844 
7845 		/* move us one more past the eop_desc for start of next pkt */
7846 		tx_buffer++;
7847 		tx_desc++;
7848 		i++;
7849 		if (unlikely(!i)) {
7850 			i -= tx_ring->count;
7851 			tx_buffer = tx_ring->tx_buffer_info;
7852 			tx_desc = IGB_TX_DESC(tx_ring, 0);
7853 		}
7854 
7855 		/* issue prefetch for next Tx descriptor */
7856 		prefetch(tx_desc);
7857 
7858 		/* update budget accounting */
7859 		budget--;
7860 	} while (likely(budget));
7861 
7862 	netdev_tx_completed_queue(txring_txq(tx_ring),
7863 				  total_packets, total_bytes);
7864 	i += tx_ring->count;
7865 	tx_ring->next_to_clean = i;
7866 	u64_stats_update_begin(&tx_ring->tx_syncp);
7867 	tx_ring->tx_stats.bytes += total_bytes;
7868 	tx_ring->tx_stats.packets += total_packets;
7869 	u64_stats_update_end(&tx_ring->tx_syncp);
7870 	q_vector->tx.total_bytes += total_bytes;
7871 	q_vector->tx.total_packets += total_packets;
7872 
7873 	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7874 		struct e1000_hw *hw = &adapter->hw;
7875 
7876 		/* Detect a transmit hang in hardware, this serializes the
7877 		 * check with the clearing of time_stamp and movement of i
7878 		 */
7879 		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7880 		if (tx_buffer->next_to_watch &&
7881 		    time_after(jiffies, tx_buffer->time_stamp +
7882 			       (adapter->tx_timeout_factor * HZ)) &&
7883 		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7884 
7885 			/* detected Tx unit hang */
7886 			dev_err(tx_ring->dev,
7887 				"Detected Tx Unit Hang\n"
7888 				"  Tx Queue             <%d>\n"
7889 				"  TDH                  <%x>\n"
7890 				"  TDT                  <%x>\n"
7891 				"  next_to_use          <%x>\n"
7892 				"  next_to_clean        <%x>\n"
7893 				"buffer_info[next_to_clean]\n"
7894 				"  time_stamp           <%lx>\n"
7895 				"  next_to_watch        <%p>\n"
7896 				"  jiffies              <%lx>\n"
7897 				"  desc.status          <%x>\n",
7898 				tx_ring->queue_index,
7899 				rd32(E1000_TDH(tx_ring->reg_idx)),
7900 				readl(tx_ring->tail),
7901 				tx_ring->next_to_use,
7902 				tx_ring->next_to_clean,
7903 				tx_buffer->time_stamp,
7904 				tx_buffer->next_to_watch,
7905 				jiffies,
7906 				tx_buffer->next_to_watch->wb.status);
7907 			netif_stop_subqueue(tx_ring->netdev,
7908 					    tx_ring->queue_index);
7909 
7910 			/* we are about to reset, no point in enabling stuff */
7911 			return true;
7912 		}
7913 	}
7914 
7915 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7916 	if (unlikely(total_packets &&
7917 	    netif_carrier_ok(tx_ring->netdev) &&
7918 	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7919 		/* Make sure that anybody stopping the queue after this
7920 		 * sees the new next_to_clean.
7921 		 */
7922 		smp_mb();
7923 		if (__netif_subqueue_stopped(tx_ring->netdev,
7924 					     tx_ring->queue_index) &&
7925 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
7926 			netif_wake_subqueue(tx_ring->netdev,
7927 					    tx_ring->queue_index);
7928 
7929 			u64_stats_update_begin(&tx_ring->tx_syncp);
7930 			tx_ring->tx_stats.restart_queue++;
7931 			u64_stats_update_end(&tx_ring->tx_syncp);
7932 		}
7933 	}
7934 
7935 	return !!budget;
7936 }
7937 
7938 /**
7939  *  igb_reuse_rx_page - page flip buffer and store it back on the ring
7940  *  @rx_ring: rx descriptor ring to store buffers on
7941  *  @old_buff: donor buffer to have page reused
7942  *
7943  *  Synchronizes page for reuse by the adapter
7944  **/
7945 static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7946 			      struct igb_rx_buffer *old_buff)
7947 {
7948 	struct igb_rx_buffer *new_buff;
7949 	u16 nta = rx_ring->next_to_alloc;
7950 
7951 	new_buff = &rx_ring->rx_buffer_info[nta];
7952 
7953 	/* update, and store next to alloc */
7954 	nta++;
7955 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7956 
7957 	/* Transfer page from old buffer to new buffer.
7958 	 * Move each member individually to avoid possible store
7959 	 * forwarding stalls.
7960 	 */
7961 	new_buff->dma		= old_buff->dma;
7962 	new_buff->page		= old_buff->page;
7963 	new_buff->page_offset	= old_buff->page_offset;
7964 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
7965 }
7966 
7967 static inline bool igb_page_is_reserved(struct page *page)
7968 {
7969 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7970 }
7971 
7972 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7973 {
7974 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7975 	struct page *page = rx_buffer->page;
7976 
7977 	/* avoid re-using remote pages */
7978 	if (unlikely(igb_page_is_reserved(page)))
7979 		return false;
7980 
7981 #if (PAGE_SIZE < 8192)
7982 	/* if we are only owner of page we can reuse it */
7983 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7984 		return false;
7985 #else
7986 #define IGB_LAST_OFFSET \
7987 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7988 
7989 	if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7990 		return false;
7991 #endif
7992 
7993 	/* If we have drained the page fragment pool we need to update
7994 	 * the pagecnt_bias and page count so that we fully restock the
7995 	 * number of references the driver holds.
7996 	 */
7997 	if (unlikely(!pagecnt_bias)) {
7998 		page_ref_add(page, USHRT_MAX);
7999 		rx_buffer->pagecnt_bias = USHRT_MAX;
8000 	}
8001 
8002 	return true;
8003 }
8004 
8005 /**
8006  *  igb_add_rx_frag - Add contents of Rx buffer to sk_buff
8007  *  @rx_ring: rx descriptor ring to transact packets on
8008  *  @rx_buffer: buffer containing page to add
8009  *  @skb: sk_buff to place the data into
8010  *  @size: size of buffer to be added
8011  *
8012  *  This function will add the data contained in rx_buffer->page to the skb.
8013  **/
8014 static void igb_add_rx_frag(struct igb_ring *rx_ring,
8015 			    struct igb_rx_buffer *rx_buffer,
8016 			    struct sk_buff *skb,
8017 			    unsigned int size)
8018 {
8019 #if (PAGE_SIZE < 8192)
8020 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8021 #else
8022 	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8023 				SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8024 				SKB_DATA_ALIGN(size);
8025 #endif
8026 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8027 			rx_buffer->page_offset, size, truesize);
8028 #if (PAGE_SIZE < 8192)
8029 	rx_buffer->page_offset ^= truesize;
8030 #else
8031 	rx_buffer->page_offset += truesize;
8032 #endif
8033 }
8034 
8035 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8036 					 struct igb_rx_buffer *rx_buffer,
8037 					 union e1000_adv_rx_desc *rx_desc,
8038 					 unsigned int size)
8039 {
8040 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8041 #if (PAGE_SIZE < 8192)
8042 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8043 #else
8044 	unsigned int truesize = SKB_DATA_ALIGN(size);
8045 #endif
8046 	unsigned int headlen;
8047 	struct sk_buff *skb;
8048 
8049 	/* prefetch first cache line of first page */
8050 	prefetch(va);
8051 #if L1_CACHE_BYTES < 128
8052 	prefetch(va + L1_CACHE_BYTES);
8053 #endif
8054 
8055 	/* allocate a skb to store the frags */
8056 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8057 	if (unlikely(!skb))
8058 		return NULL;
8059 
8060 	if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8061 		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
8062 		va += IGB_TS_HDR_LEN;
8063 		size -= IGB_TS_HDR_LEN;
8064 	}
8065 
8066 	/* Determine available headroom for copy */
8067 	headlen = size;
8068 	if (headlen > IGB_RX_HDR_LEN)
8069 		headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
8070 
8071 	/* align pull length to size of long to optimize memcpy performance */
8072 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
8073 
8074 	/* update all of the pointers */
8075 	size -= headlen;
8076 	if (size) {
8077 		skb_add_rx_frag(skb, 0, rx_buffer->page,
8078 				(va + headlen) - page_address(rx_buffer->page),
8079 				size, truesize);
8080 #if (PAGE_SIZE < 8192)
8081 		rx_buffer->page_offset ^= truesize;
8082 #else
8083 		rx_buffer->page_offset += truesize;
8084 #endif
8085 	} else {
8086 		rx_buffer->pagecnt_bias++;
8087 	}
8088 
8089 	return skb;
8090 }
8091 
8092 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8093 				     struct igb_rx_buffer *rx_buffer,
8094 				     union e1000_adv_rx_desc *rx_desc,
8095 				     unsigned int size)
8096 {
8097 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8098 #if (PAGE_SIZE < 8192)
8099 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8100 #else
8101 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8102 				SKB_DATA_ALIGN(IGB_SKB_PAD + size);
8103 #endif
8104 	struct sk_buff *skb;
8105 
8106 	/* prefetch first cache line of first page */
8107 	prefetch(va);
8108 #if L1_CACHE_BYTES < 128
8109 	prefetch(va + L1_CACHE_BYTES);
8110 #endif
8111 
8112 	/* build an skb around the page buffer */
8113 	skb = build_skb(va - IGB_SKB_PAD, truesize);
8114 	if (unlikely(!skb))
8115 		return NULL;
8116 
8117 	/* update pointers within the skb to store the data */
8118 	skb_reserve(skb, IGB_SKB_PAD);
8119 	__skb_put(skb, size);
8120 
8121 	/* pull timestamp out of packet data */
8122 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8123 		igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8124 		__skb_pull(skb, IGB_TS_HDR_LEN);
8125 	}
8126 
8127 	/* update buffer offset */
8128 #if (PAGE_SIZE < 8192)
8129 	rx_buffer->page_offset ^= truesize;
8130 #else
8131 	rx_buffer->page_offset += truesize;
8132 #endif
8133 
8134 	return skb;
8135 }
8136 
8137 static inline void igb_rx_checksum(struct igb_ring *ring,
8138 				   union e1000_adv_rx_desc *rx_desc,
8139 				   struct sk_buff *skb)
8140 {
8141 	skb_checksum_none_assert(skb);
8142 
8143 	/* Ignore Checksum bit is set */
8144 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8145 		return;
8146 
8147 	/* Rx checksum disabled via ethtool */
8148 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
8149 		return;
8150 
8151 	/* TCP/UDP checksum error bit is set */
8152 	if (igb_test_staterr(rx_desc,
8153 			     E1000_RXDEXT_STATERR_TCPE |
8154 			     E1000_RXDEXT_STATERR_IPE)) {
8155 		/* work around errata with sctp packets where the TCPE aka
8156 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
8157 		 * packets, (aka let the stack check the crc32c)
8158 		 */
8159 		if (!((skb->len == 60) &&
8160 		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8161 			u64_stats_update_begin(&ring->rx_syncp);
8162 			ring->rx_stats.csum_err++;
8163 			u64_stats_update_end(&ring->rx_syncp);
8164 		}
8165 		/* let the stack verify checksum errors */
8166 		return;
8167 	}
8168 	/* It must be a TCP or UDP packet with a valid checksum */
8169 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8170 				      E1000_RXD_STAT_UDPCS))
8171 		skb->ip_summed = CHECKSUM_UNNECESSARY;
8172 
8173 	dev_dbg(ring->dev, "cksum success: bits %08X\n",
8174 		le32_to_cpu(rx_desc->wb.upper.status_error));
8175 }
8176 
8177 static inline void igb_rx_hash(struct igb_ring *ring,
8178 			       union e1000_adv_rx_desc *rx_desc,
8179 			       struct sk_buff *skb)
8180 {
8181 	if (ring->netdev->features & NETIF_F_RXHASH)
8182 		skb_set_hash(skb,
8183 			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8184 			     PKT_HASH_TYPE_L3);
8185 }
8186 
8187 /**
8188  *  igb_is_non_eop - process handling of non-EOP buffers
8189  *  @rx_ring: Rx ring being processed
8190  *  @rx_desc: Rx descriptor for current buffer
8191  *  @skb: current socket buffer containing buffer in progress
8192  *
8193  *  This function updates next to clean.  If the buffer is an EOP buffer
8194  *  this function exits returning false, otherwise it will place the
8195  *  sk_buff in the next buffer to be chained and return true indicating
8196  *  that this is in fact a non-EOP buffer.
8197  **/
8198 static bool igb_is_non_eop(struct igb_ring *rx_ring,
8199 			   union e1000_adv_rx_desc *rx_desc)
8200 {
8201 	u32 ntc = rx_ring->next_to_clean + 1;
8202 
8203 	/* fetch, update, and store next to clean */
8204 	ntc = (ntc < rx_ring->count) ? ntc : 0;
8205 	rx_ring->next_to_clean = ntc;
8206 
8207 	prefetch(IGB_RX_DESC(rx_ring, ntc));
8208 
8209 	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8210 		return false;
8211 
8212 	return true;
8213 }
8214 
8215 /**
8216  *  igb_cleanup_headers - Correct corrupted or empty headers
8217  *  @rx_ring: rx descriptor ring packet is being transacted on
8218  *  @rx_desc: pointer to the EOP Rx descriptor
8219  *  @skb: pointer to current skb being fixed
8220  *
8221  *  Address the case where we are pulling data in on pages only
8222  *  and as such no data is present in the skb header.
8223  *
8224  *  In addition if skb is not at least 60 bytes we need to pad it so that
8225  *  it is large enough to qualify as a valid Ethernet frame.
8226  *
8227  *  Returns true if an error was encountered and skb was freed.
8228  **/
8229 static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8230 				union e1000_adv_rx_desc *rx_desc,
8231 				struct sk_buff *skb)
8232 {
8233 	if (unlikely((igb_test_staterr(rx_desc,
8234 				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8235 		struct net_device *netdev = rx_ring->netdev;
8236 		if (!(netdev->features & NETIF_F_RXALL)) {
8237 			dev_kfree_skb_any(skb);
8238 			return true;
8239 		}
8240 	}
8241 
8242 	/* if eth_skb_pad returns an error the skb was freed */
8243 	if (eth_skb_pad(skb))
8244 		return true;
8245 
8246 	return false;
8247 }
8248 
8249 /**
8250  *  igb_process_skb_fields - Populate skb header fields from Rx descriptor
8251  *  @rx_ring: rx descriptor ring packet is being transacted on
8252  *  @rx_desc: pointer to the EOP Rx descriptor
8253  *  @skb: pointer to current skb being populated
8254  *
8255  *  This function checks the ring, descriptor, and packet information in
8256  *  order to populate the hash, checksum, VLAN, timestamp, protocol, and
8257  *  other fields within the skb.
8258  **/
8259 static void igb_process_skb_fields(struct igb_ring *rx_ring,
8260 				   union e1000_adv_rx_desc *rx_desc,
8261 				   struct sk_buff *skb)
8262 {
8263 	struct net_device *dev = rx_ring->netdev;
8264 
8265 	igb_rx_hash(rx_ring, rx_desc, skb);
8266 
8267 	igb_rx_checksum(rx_ring, rx_desc, skb);
8268 
8269 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8270 	    !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8271 		igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8272 
8273 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8274 	    igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8275 		u16 vid;
8276 
8277 		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8278 		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8279 			vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8280 		else
8281 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8282 
8283 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8284 	}
8285 
8286 	skb_record_rx_queue(skb, rx_ring->queue_index);
8287 
8288 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8289 }
8290 
8291 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8292 					       const unsigned int size)
8293 {
8294 	struct igb_rx_buffer *rx_buffer;
8295 
8296 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8297 	prefetchw(rx_buffer->page);
8298 
8299 	/* we are reusing so sync this buffer for CPU use */
8300 	dma_sync_single_range_for_cpu(rx_ring->dev,
8301 				      rx_buffer->dma,
8302 				      rx_buffer->page_offset,
8303 				      size,
8304 				      DMA_FROM_DEVICE);
8305 
8306 	rx_buffer->pagecnt_bias--;
8307 
8308 	return rx_buffer;
8309 }
8310 
8311 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8312 			      struct igb_rx_buffer *rx_buffer)
8313 {
8314 	if (igb_can_reuse_rx_page(rx_buffer)) {
8315 		/* hand second half of page back to the ring */
8316 		igb_reuse_rx_page(rx_ring, rx_buffer);
8317 	} else {
8318 		/* We are not reusing the buffer so unmap it and free
8319 		 * any references we are holding to it
8320 		 */
8321 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8322 				     igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8323 				     IGB_RX_DMA_ATTR);
8324 		__page_frag_cache_drain(rx_buffer->page,
8325 					rx_buffer->pagecnt_bias);
8326 	}
8327 
8328 	/* clear contents of rx_buffer */
8329 	rx_buffer->page = NULL;
8330 }
8331 
8332 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8333 {
8334 	struct igb_ring *rx_ring = q_vector->rx.ring;
8335 	struct sk_buff *skb = rx_ring->skb;
8336 	unsigned int total_bytes = 0, total_packets = 0;
8337 	u16 cleaned_count = igb_desc_unused(rx_ring);
8338 
8339 	while (likely(total_packets < budget)) {
8340 		union e1000_adv_rx_desc *rx_desc;
8341 		struct igb_rx_buffer *rx_buffer;
8342 		unsigned int size;
8343 
8344 		/* return some buffers to hardware, one at a time is too slow */
8345 		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8346 			igb_alloc_rx_buffers(rx_ring, cleaned_count);
8347 			cleaned_count = 0;
8348 		}
8349 
8350 		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8351 		size = le16_to_cpu(rx_desc->wb.upper.length);
8352 		if (!size)
8353 			break;
8354 
8355 		/* This memory barrier is needed to keep us from reading
8356 		 * any other fields out of the rx_desc until we know the
8357 		 * descriptor has been written back
8358 		 */
8359 		dma_rmb();
8360 
8361 		rx_buffer = igb_get_rx_buffer(rx_ring, size);
8362 
8363 		/* retrieve a buffer from the ring */
8364 		if (skb)
8365 			igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8366 		else if (ring_uses_build_skb(rx_ring))
8367 			skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8368 		else
8369 			skb = igb_construct_skb(rx_ring, rx_buffer,
8370 						rx_desc, size);
8371 
8372 		/* exit if we failed to retrieve a buffer */
8373 		if (!skb) {
8374 			rx_ring->rx_stats.alloc_failed++;
8375 			rx_buffer->pagecnt_bias++;
8376 			break;
8377 		}
8378 
8379 		igb_put_rx_buffer(rx_ring, rx_buffer);
8380 		cleaned_count++;
8381 
8382 		/* fetch next buffer in frame if non-eop */
8383 		if (igb_is_non_eop(rx_ring, rx_desc))
8384 			continue;
8385 
8386 		/* verify the packet layout is correct */
8387 		if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8388 			skb = NULL;
8389 			continue;
8390 		}
8391 
8392 		/* probably a little skewed due to removing CRC */
8393 		total_bytes += skb->len;
8394 
8395 		/* populate checksum, timestamp, VLAN, and protocol */
8396 		igb_process_skb_fields(rx_ring, rx_desc, skb);
8397 
8398 		napi_gro_receive(&q_vector->napi, skb);
8399 
8400 		/* reset skb pointer */
8401 		skb = NULL;
8402 
8403 		/* update budget accounting */
8404 		total_packets++;
8405 	}
8406 
8407 	/* place incomplete frames back on ring for completion */
8408 	rx_ring->skb = skb;
8409 
8410 	u64_stats_update_begin(&rx_ring->rx_syncp);
8411 	rx_ring->rx_stats.packets += total_packets;
8412 	rx_ring->rx_stats.bytes += total_bytes;
8413 	u64_stats_update_end(&rx_ring->rx_syncp);
8414 	q_vector->rx.total_packets += total_packets;
8415 	q_vector->rx.total_bytes += total_bytes;
8416 
8417 	if (cleaned_count)
8418 		igb_alloc_rx_buffers(rx_ring, cleaned_count);
8419 
8420 	return total_packets;
8421 }
8422 
8423 static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8424 {
8425 	return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8426 }
8427 
8428 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8429 				  struct igb_rx_buffer *bi)
8430 {
8431 	struct page *page = bi->page;
8432 	dma_addr_t dma;
8433 
8434 	/* since we are recycling buffers we should seldom need to alloc */
8435 	if (likely(page))
8436 		return true;
8437 
8438 	/* alloc new page for storage */
8439 	page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8440 	if (unlikely(!page)) {
8441 		rx_ring->rx_stats.alloc_failed++;
8442 		return false;
8443 	}
8444 
8445 	/* map page for use */
8446 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8447 				 igb_rx_pg_size(rx_ring),
8448 				 DMA_FROM_DEVICE,
8449 				 IGB_RX_DMA_ATTR);
8450 
8451 	/* if mapping failed free memory back to system since
8452 	 * there isn't much point in holding memory we can't use
8453 	 */
8454 	if (dma_mapping_error(rx_ring->dev, dma)) {
8455 		__free_pages(page, igb_rx_pg_order(rx_ring));
8456 
8457 		rx_ring->rx_stats.alloc_failed++;
8458 		return false;
8459 	}
8460 
8461 	bi->dma = dma;
8462 	bi->page = page;
8463 	bi->page_offset = igb_rx_offset(rx_ring);
8464 	bi->pagecnt_bias = 1;
8465 
8466 	return true;
8467 }
8468 
8469 /**
8470  *  igb_alloc_rx_buffers - Replace used receive buffers; packet split
8471  *  @adapter: address of board private structure
8472  **/
8473 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8474 {
8475 	union e1000_adv_rx_desc *rx_desc;
8476 	struct igb_rx_buffer *bi;
8477 	u16 i = rx_ring->next_to_use;
8478 	u16 bufsz;
8479 
8480 	/* nothing to do */
8481 	if (!cleaned_count)
8482 		return;
8483 
8484 	rx_desc = IGB_RX_DESC(rx_ring, i);
8485 	bi = &rx_ring->rx_buffer_info[i];
8486 	i -= rx_ring->count;
8487 
8488 	bufsz = igb_rx_bufsz(rx_ring);
8489 
8490 	do {
8491 		if (!igb_alloc_mapped_page(rx_ring, bi))
8492 			break;
8493 
8494 		/* sync the buffer for use by the device */
8495 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8496 						 bi->page_offset, bufsz,
8497 						 DMA_FROM_DEVICE);
8498 
8499 		/* Refresh the desc even if buffer_addrs didn't change
8500 		 * because each write-back erases this info.
8501 		 */
8502 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8503 
8504 		rx_desc++;
8505 		bi++;
8506 		i++;
8507 		if (unlikely(!i)) {
8508 			rx_desc = IGB_RX_DESC(rx_ring, 0);
8509 			bi = rx_ring->rx_buffer_info;
8510 			i -= rx_ring->count;
8511 		}
8512 
8513 		/* clear the length for the next_to_use descriptor */
8514 		rx_desc->wb.upper.length = 0;
8515 
8516 		cleaned_count--;
8517 	} while (cleaned_count);
8518 
8519 	i += rx_ring->count;
8520 
8521 	if (rx_ring->next_to_use != i) {
8522 		/* record the next descriptor to use */
8523 		rx_ring->next_to_use = i;
8524 
8525 		/* update next to alloc since we have filled the ring */
8526 		rx_ring->next_to_alloc = i;
8527 
8528 		/* Force memory writes to complete before letting h/w
8529 		 * know there are new descriptors to fetch.  (Only
8530 		 * applicable for weak-ordered memory model archs,
8531 		 * such as IA-64).
8532 		 */
8533 		dma_wmb();
8534 		writel(i, rx_ring->tail);
8535 	}
8536 }
8537 
8538 /**
8539  * igb_mii_ioctl -
8540  * @netdev:
8541  * @ifreq:
8542  * @cmd:
8543  **/
8544 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8545 {
8546 	struct igb_adapter *adapter = netdev_priv(netdev);
8547 	struct mii_ioctl_data *data = if_mii(ifr);
8548 
8549 	if (adapter->hw.phy.media_type != e1000_media_type_copper)
8550 		return -EOPNOTSUPP;
8551 
8552 	switch (cmd) {
8553 	case SIOCGMIIPHY:
8554 		data->phy_id = adapter->hw.phy.addr;
8555 		break;
8556 	case SIOCGMIIREG:
8557 		if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8558 				     &data->val_out))
8559 			return -EIO;
8560 		break;
8561 	case SIOCSMIIREG:
8562 	default:
8563 		return -EOPNOTSUPP;
8564 	}
8565 	return 0;
8566 }
8567 
8568 /**
8569  * igb_ioctl -
8570  * @netdev:
8571  * @ifreq:
8572  * @cmd:
8573  **/
8574 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8575 {
8576 	switch (cmd) {
8577 	case SIOCGMIIPHY:
8578 	case SIOCGMIIREG:
8579 	case SIOCSMIIREG:
8580 		return igb_mii_ioctl(netdev, ifr, cmd);
8581 	case SIOCGHWTSTAMP:
8582 		return igb_ptp_get_ts_config(netdev, ifr);
8583 	case SIOCSHWTSTAMP:
8584 		return igb_ptp_set_ts_config(netdev, ifr);
8585 	default:
8586 		return -EOPNOTSUPP;
8587 	}
8588 }
8589 
8590 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8591 {
8592 	struct igb_adapter *adapter = hw->back;
8593 
8594 	pci_read_config_word(adapter->pdev, reg, value);
8595 }
8596 
8597 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8598 {
8599 	struct igb_adapter *adapter = hw->back;
8600 
8601 	pci_write_config_word(adapter->pdev, reg, *value);
8602 }
8603 
8604 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8605 {
8606 	struct igb_adapter *adapter = hw->back;
8607 
8608 	if (pcie_capability_read_word(adapter->pdev, reg, value))
8609 		return -E1000_ERR_CONFIG;
8610 
8611 	return 0;
8612 }
8613 
8614 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8615 {
8616 	struct igb_adapter *adapter = hw->back;
8617 
8618 	if (pcie_capability_write_word(adapter->pdev, reg, *value))
8619 		return -E1000_ERR_CONFIG;
8620 
8621 	return 0;
8622 }
8623 
8624 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8625 {
8626 	struct igb_adapter *adapter = netdev_priv(netdev);
8627 	struct e1000_hw *hw = &adapter->hw;
8628 	u32 ctrl, rctl;
8629 	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8630 
8631 	if (enable) {
8632 		/* enable VLAN tag insert/strip */
8633 		ctrl = rd32(E1000_CTRL);
8634 		ctrl |= E1000_CTRL_VME;
8635 		wr32(E1000_CTRL, ctrl);
8636 
8637 		/* Disable CFI check */
8638 		rctl = rd32(E1000_RCTL);
8639 		rctl &= ~E1000_RCTL_CFIEN;
8640 		wr32(E1000_RCTL, rctl);
8641 	} else {
8642 		/* disable VLAN tag insert/strip */
8643 		ctrl = rd32(E1000_CTRL);
8644 		ctrl &= ~E1000_CTRL_VME;
8645 		wr32(E1000_CTRL, ctrl);
8646 	}
8647 
8648 	igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8649 }
8650 
8651 static int igb_vlan_rx_add_vid(struct net_device *netdev,
8652 			       __be16 proto, u16 vid)
8653 {
8654 	struct igb_adapter *adapter = netdev_priv(netdev);
8655 	struct e1000_hw *hw = &adapter->hw;
8656 	int pf_id = adapter->vfs_allocated_count;
8657 
8658 	/* add the filter since PF can receive vlans w/o entry in vlvf */
8659 	if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8660 		igb_vfta_set(hw, vid, pf_id, true, !!vid);
8661 
8662 	set_bit(vid, adapter->active_vlans);
8663 
8664 	return 0;
8665 }
8666 
8667 static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8668 				__be16 proto, u16 vid)
8669 {
8670 	struct igb_adapter *adapter = netdev_priv(netdev);
8671 	int pf_id = adapter->vfs_allocated_count;
8672 	struct e1000_hw *hw = &adapter->hw;
8673 
8674 	/* remove VID from filter table */
8675 	if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8676 		igb_vfta_set(hw, vid, pf_id, false, true);
8677 
8678 	clear_bit(vid, adapter->active_vlans);
8679 
8680 	return 0;
8681 }
8682 
8683 static void igb_restore_vlan(struct igb_adapter *adapter)
8684 {
8685 	u16 vid = 1;
8686 
8687 	igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8688 	igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8689 
8690 	for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8691 		igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8692 }
8693 
8694 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8695 {
8696 	struct pci_dev *pdev = adapter->pdev;
8697 	struct e1000_mac_info *mac = &adapter->hw.mac;
8698 
8699 	mac->autoneg = 0;
8700 
8701 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
8702 	 * for the switch() below to work
8703 	 */
8704 	if ((spd & 1) || (dplx & ~1))
8705 		goto err_inval;
8706 
8707 	/* Fiber NIC's only allow 1000 gbps Full duplex
8708 	 * and 100Mbps Full duplex for 100baseFx sfp
8709 	 */
8710 	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8711 		switch (spd + dplx) {
8712 		case SPEED_10 + DUPLEX_HALF:
8713 		case SPEED_10 + DUPLEX_FULL:
8714 		case SPEED_100 + DUPLEX_HALF:
8715 			goto err_inval;
8716 		default:
8717 			break;
8718 		}
8719 	}
8720 
8721 	switch (spd + dplx) {
8722 	case SPEED_10 + DUPLEX_HALF:
8723 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
8724 		break;
8725 	case SPEED_10 + DUPLEX_FULL:
8726 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
8727 		break;
8728 	case SPEED_100 + DUPLEX_HALF:
8729 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
8730 		break;
8731 	case SPEED_100 + DUPLEX_FULL:
8732 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
8733 		break;
8734 	case SPEED_1000 + DUPLEX_FULL:
8735 		mac->autoneg = 1;
8736 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8737 		break;
8738 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
8739 	default:
8740 		goto err_inval;
8741 	}
8742 
8743 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
8744 	adapter->hw.phy.mdix = AUTO_ALL_MODES;
8745 
8746 	return 0;
8747 
8748 err_inval:
8749 	dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8750 	return -EINVAL;
8751 }
8752 
8753 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8754 			  bool runtime)
8755 {
8756 	struct net_device *netdev = pci_get_drvdata(pdev);
8757 	struct igb_adapter *adapter = netdev_priv(netdev);
8758 	struct e1000_hw *hw = &adapter->hw;
8759 	u32 ctrl, rctl, status;
8760 	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8761 	bool wake;
8762 
8763 	rtnl_lock();
8764 	netif_device_detach(netdev);
8765 
8766 	if (netif_running(netdev))
8767 		__igb_close(netdev, true);
8768 
8769 	igb_ptp_suspend(adapter);
8770 
8771 	igb_clear_interrupt_scheme(adapter);
8772 	rtnl_unlock();
8773 
8774 	status = rd32(E1000_STATUS);
8775 	if (status & E1000_STATUS_LU)
8776 		wufc &= ~E1000_WUFC_LNKC;
8777 
8778 	if (wufc) {
8779 		igb_setup_rctl(adapter);
8780 		igb_set_rx_mode(netdev);
8781 
8782 		/* turn on all-multi mode if wake on multicast is enabled */
8783 		if (wufc & E1000_WUFC_MC) {
8784 			rctl = rd32(E1000_RCTL);
8785 			rctl |= E1000_RCTL_MPE;
8786 			wr32(E1000_RCTL, rctl);
8787 		}
8788 
8789 		ctrl = rd32(E1000_CTRL);
8790 		ctrl |= E1000_CTRL_ADVD3WUC;
8791 		wr32(E1000_CTRL, ctrl);
8792 
8793 		/* Allow time for pending master requests to run */
8794 		igb_disable_pcie_master(hw);
8795 
8796 		wr32(E1000_WUC, E1000_WUC_PME_EN);
8797 		wr32(E1000_WUFC, wufc);
8798 	} else {
8799 		wr32(E1000_WUC, 0);
8800 		wr32(E1000_WUFC, 0);
8801 	}
8802 
8803 	wake = wufc || adapter->en_mng_pt;
8804 	if (!wake)
8805 		igb_power_down_link(adapter);
8806 	else
8807 		igb_power_up_link(adapter);
8808 
8809 	if (enable_wake)
8810 		*enable_wake = wake;
8811 
8812 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
8813 	 * would have already happened in close and is redundant.
8814 	 */
8815 	igb_release_hw_control(adapter);
8816 
8817 	pci_disable_device(pdev);
8818 
8819 	return 0;
8820 }
8821 
8822 static void igb_deliver_wake_packet(struct net_device *netdev)
8823 {
8824 	struct igb_adapter *adapter = netdev_priv(netdev);
8825 	struct e1000_hw *hw = &adapter->hw;
8826 	struct sk_buff *skb;
8827 	u32 wupl;
8828 
8829 	wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8830 
8831 	/* WUPM stores only the first 128 bytes of the wake packet.
8832 	 * Read the packet only if we have the whole thing.
8833 	 */
8834 	if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8835 		return;
8836 
8837 	skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8838 	if (!skb)
8839 		return;
8840 
8841 	skb_put(skb, wupl);
8842 
8843 	/* Ensure reads are 32-bit aligned */
8844 	wupl = roundup(wupl, 4);
8845 
8846 	memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8847 
8848 	skb->protocol = eth_type_trans(skb, netdev);
8849 	netif_rx(skb);
8850 }
8851 
8852 static int __maybe_unused igb_suspend(struct device *dev)
8853 {
8854 	return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8855 }
8856 
8857 static int __maybe_unused igb_resume(struct device *dev)
8858 {
8859 	struct pci_dev *pdev = to_pci_dev(dev);
8860 	struct net_device *netdev = pci_get_drvdata(pdev);
8861 	struct igb_adapter *adapter = netdev_priv(netdev);
8862 	struct e1000_hw *hw = &adapter->hw;
8863 	u32 err, val;
8864 
8865 	pci_set_power_state(pdev, PCI_D0);
8866 	pci_restore_state(pdev);
8867 	pci_save_state(pdev);
8868 
8869 	if (!pci_device_is_present(pdev))
8870 		return -ENODEV;
8871 	err = pci_enable_device_mem(pdev);
8872 	if (err) {
8873 		dev_err(&pdev->dev,
8874 			"igb: Cannot enable PCI device from suspend\n");
8875 		return err;
8876 	}
8877 	pci_set_master(pdev);
8878 
8879 	pci_enable_wake(pdev, PCI_D3hot, 0);
8880 	pci_enable_wake(pdev, PCI_D3cold, 0);
8881 
8882 	if (igb_init_interrupt_scheme(adapter, true)) {
8883 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8884 		return -ENOMEM;
8885 	}
8886 
8887 	igb_reset(adapter);
8888 
8889 	/* let the f/w know that the h/w is now under the control of the
8890 	 * driver.
8891 	 */
8892 	igb_get_hw_control(adapter);
8893 
8894 	val = rd32(E1000_WUS);
8895 	if (val & WAKE_PKT_WUS)
8896 		igb_deliver_wake_packet(netdev);
8897 
8898 	wr32(E1000_WUS, ~0);
8899 
8900 	rtnl_lock();
8901 	if (!err && netif_running(netdev))
8902 		err = __igb_open(netdev, true);
8903 
8904 	if (!err)
8905 		netif_device_attach(netdev);
8906 	rtnl_unlock();
8907 
8908 	return err;
8909 }
8910 
8911 static int __maybe_unused igb_runtime_idle(struct device *dev)
8912 {
8913 	struct net_device *netdev = dev_get_drvdata(dev);
8914 	struct igb_adapter *adapter = netdev_priv(netdev);
8915 
8916 	if (!igb_has_link(adapter))
8917 		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8918 
8919 	return -EBUSY;
8920 }
8921 
8922 static int __maybe_unused igb_runtime_suspend(struct device *dev)
8923 {
8924 	return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8925 }
8926 
8927 static int __maybe_unused igb_runtime_resume(struct device *dev)
8928 {
8929 	return igb_resume(dev);
8930 }
8931 
8932 static void igb_shutdown(struct pci_dev *pdev)
8933 {
8934 	bool wake;
8935 
8936 	__igb_shutdown(pdev, &wake, 0);
8937 
8938 	if (system_state == SYSTEM_POWER_OFF) {
8939 		pci_wake_from_d3(pdev, wake);
8940 		pci_set_power_state(pdev, PCI_D3hot);
8941 	}
8942 }
8943 
8944 #ifdef CONFIG_PCI_IOV
8945 static int igb_sriov_reinit(struct pci_dev *dev)
8946 {
8947 	struct net_device *netdev = pci_get_drvdata(dev);
8948 	struct igb_adapter *adapter = netdev_priv(netdev);
8949 	struct pci_dev *pdev = adapter->pdev;
8950 
8951 	rtnl_lock();
8952 
8953 	if (netif_running(netdev))
8954 		igb_close(netdev);
8955 	else
8956 		igb_reset(adapter);
8957 
8958 	igb_clear_interrupt_scheme(adapter);
8959 
8960 	igb_init_queue_configuration(adapter);
8961 
8962 	if (igb_init_interrupt_scheme(adapter, true)) {
8963 		rtnl_unlock();
8964 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8965 		return -ENOMEM;
8966 	}
8967 
8968 	if (netif_running(netdev))
8969 		igb_open(netdev);
8970 
8971 	rtnl_unlock();
8972 
8973 	return 0;
8974 }
8975 
8976 static int igb_pci_disable_sriov(struct pci_dev *dev)
8977 {
8978 	int err = igb_disable_sriov(dev);
8979 
8980 	if (!err)
8981 		err = igb_sriov_reinit(dev);
8982 
8983 	return err;
8984 }
8985 
8986 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8987 {
8988 	int err = igb_enable_sriov(dev, num_vfs);
8989 
8990 	if (err)
8991 		goto out;
8992 
8993 	err = igb_sriov_reinit(dev);
8994 	if (!err)
8995 		return num_vfs;
8996 
8997 out:
8998 	return err;
8999 }
9000 
9001 #endif
9002 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9003 {
9004 #ifdef CONFIG_PCI_IOV
9005 	if (num_vfs == 0)
9006 		return igb_pci_disable_sriov(dev);
9007 	else
9008 		return igb_pci_enable_sriov(dev, num_vfs);
9009 #endif
9010 	return 0;
9011 }
9012 
9013 /**
9014  *  igb_io_error_detected - called when PCI error is detected
9015  *  @pdev: Pointer to PCI device
9016  *  @state: The current pci connection state
9017  *
9018  *  This function is called after a PCI bus error affecting
9019  *  this device has been detected.
9020  **/
9021 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9022 					      pci_channel_state_t state)
9023 {
9024 	struct net_device *netdev = pci_get_drvdata(pdev);
9025 	struct igb_adapter *adapter = netdev_priv(netdev);
9026 
9027 	netif_device_detach(netdev);
9028 
9029 	if (state == pci_channel_io_perm_failure)
9030 		return PCI_ERS_RESULT_DISCONNECT;
9031 
9032 	if (netif_running(netdev))
9033 		igb_down(adapter);
9034 	pci_disable_device(pdev);
9035 
9036 	/* Request a slot slot reset. */
9037 	return PCI_ERS_RESULT_NEED_RESET;
9038 }
9039 
9040 /**
9041  *  igb_io_slot_reset - called after the pci bus has been reset.
9042  *  @pdev: Pointer to PCI device
9043  *
9044  *  Restart the card from scratch, as if from a cold-boot. Implementation
9045  *  resembles the first-half of the igb_resume routine.
9046  **/
9047 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9048 {
9049 	struct net_device *netdev = pci_get_drvdata(pdev);
9050 	struct igb_adapter *adapter = netdev_priv(netdev);
9051 	struct e1000_hw *hw = &adapter->hw;
9052 	pci_ers_result_t result;
9053 
9054 	if (pci_enable_device_mem(pdev)) {
9055 		dev_err(&pdev->dev,
9056 			"Cannot re-enable PCI device after reset.\n");
9057 		result = PCI_ERS_RESULT_DISCONNECT;
9058 	} else {
9059 		pci_set_master(pdev);
9060 		pci_restore_state(pdev);
9061 		pci_save_state(pdev);
9062 
9063 		pci_enable_wake(pdev, PCI_D3hot, 0);
9064 		pci_enable_wake(pdev, PCI_D3cold, 0);
9065 
9066 		/* In case of PCI error, adapter lose its HW address
9067 		 * so we should re-assign it here.
9068 		 */
9069 		hw->hw_addr = adapter->io_addr;
9070 
9071 		igb_reset(adapter);
9072 		wr32(E1000_WUS, ~0);
9073 		result = PCI_ERS_RESULT_RECOVERED;
9074 	}
9075 
9076 	return result;
9077 }
9078 
9079 /**
9080  *  igb_io_resume - called when traffic can start flowing again.
9081  *  @pdev: Pointer to PCI device
9082  *
9083  *  This callback is called when the error recovery driver tells us that
9084  *  its OK to resume normal operation. Implementation resembles the
9085  *  second-half of the igb_resume routine.
9086  */
9087 static void igb_io_resume(struct pci_dev *pdev)
9088 {
9089 	struct net_device *netdev = pci_get_drvdata(pdev);
9090 	struct igb_adapter *adapter = netdev_priv(netdev);
9091 
9092 	if (netif_running(netdev)) {
9093 		if (igb_up(adapter)) {
9094 			dev_err(&pdev->dev, "igb_up failed after reset\n");
9095 			return;
9096 		}
9097 	}
9098 
9099 	netif_device_attach(netdev);
9100 
9101 	/* let the f/w know that the h/w is now under the control of the
9102 	 * driver.
9103 	 */
9104 	igb_get_hw_control(adapter);
9105 }
9106 
9107 /**
9108  *  igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
9109  *  @adapter: Pointer to adapter structure
9110  *  @index: Index of the RAR entry which need to be synced with MAC table
9111  **/
9112 static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9113 {
9114 	struct e1000_hw *hw = &adapter->hw;
9115 	u32 rar_low, rar_high;
9116 	u8 *addr = adapter->mac_table[index].addr;
9117 
9118 	/* HW expects these to be in network order when they are plugged
9119 	 * into the registers which are little endian.  In order to guarantee
9120 	 * that ordering we need to do an leXX_to_cpup here in order to be
9121 	 * ready for the byteswap that occurs with writel
9122 	 */
9123 	rar_low = le32_to_cpup((__le32 *)(addr));
9124 	rar_high = le16_to_cpup((__le16 *)(addr + 4));
9125 
9126 	/* Indicate to hardware the Address is Valid. */
9127 	if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9128 		if (is_valid_ether_addr(addr))
9129 			rar_high |= E1000_RAH_AV;
9130 
9131 		if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9132 			rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9133 
9134 		switch (hw->mac.type) {
9135 		case e1000_82575:
9136 		case e1000_i210:
9137 			if (adapter->mac_table[index].state &
9138 			    IGB_MAC_STATE_QUEUE_STEERING)
9139 				rar_high |= E1000_RAH_QSEL_ENABLE;
9140 
9141 			rar_high |= E1000_RAH_POOL_1 *
9142 				    adapter->mac_table[index].queue;
9143 			break;
9144 		default:
9145 			rar_high |= E1000_RAH_POOL_1 <<
9146 				    adapter->mac_table[index].queue;
9147 			break;
9148 		}
9149 	}
9150 
9151 	wr32(E1000_RAL(index), rar_low);
9152 	wrfl();
9153 	wr32(E1000_RAH(index), rar_high);
9154 	wrfl();
9155 }
9156 
9157 static int igb_set_vf_mac(struct igb_adapter *adapter,
9158 			  int vf, unsigned char *mac_addr)
9159 {
9160 	struct e1000_hw *hw = &adapter->hw;
9161 	/* VF MAC addresses start at end of receive addresses and moves
9162 	 * towards the first, as a result a collision should not be possible
9163 	 */
9164 	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9165 	unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9166 
9167 	ether_addr_copy(vf_mac_addr, mac_addr);
9168 	ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9169 	adapter->mac_table[rar_entry].queue = vf;
9170 	adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9171 	igb_rar_set_index(adapter, rar_entry);
9172 
9173 	return 0;
9174 }
9175 
9176 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9177 {
9178 	struct igb_adapter *adapter = netdev_priv(netdev);
9179 
9180 	if (vf >= adapter->vfs_allocated_count)
9181 		return -EINVAL;
9182 
9183 	/* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
9184 	 * flag and allows to overwrite the MAC via VF netdev.  This
9185 	 * is necessary to allow libvirt a way to restore the original
9186 	 * MAC after unbinding vfio-pci and reloading igbvf after shutting
9187 	 * down a VM.
9188 	 */
9189 	if (is_zero_ether_addr(mac)) {
9190 		adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9191 		dev_info(&adapter->pdev->dev,
9192 			 "remove administratively set MAC on VF %d\n",
9193 			 vf);
9194 	} else if (is_valid_ether_addr(mac)) {
9195 		adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9196 		dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9197 			 mac, vf);
9198 		dev_info(&adapter->pdev->dev,
9199 			 "Reload the VF driver to make this change effective.");
9200 		/* Generate additional warning if PF is down */
9201 		if (test_bit(__IGB_DOWN, &adapter->state)) {
9202 			dev_warn(&adapter->pdev->dev,
9203 				 "The VF MAC address has been set, but the PF device is not up.\n");
9204 			dev_warn(&adapter->pdev->dev,
9205 				 "Bring the PF device up before attempting to use the VF device.\n");
9206 		}
9207 	} else {
9208 		return -EINVAL;
9209 	}
9210 	return igb_set_vf_mac(adapter, vf, mac);
9211 }
9212 
9213 static int igb_link_mbps(int internal_link_speed)
9214 {
9215 	switch (internal_link_speed) {
9216 	case SPEED_100:
9217 		return 100;
9218 	case SPEED_1000:
9219 		return 1000;
9220 	default:
9221 		return 0;
9222 	}
9223 }
9224 
9225 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9226 				  int link_speed)
9227 {
9228 	int rf_dec, rf_int;
9229 	u32 bcnrc_val;
9230 
9231 	if (tx_rate != 0) {
9232 		/* Calculate the rate factor values to set */
9233 		rf_int = link_speed / tx_rate;
9234 		rf_dec = (link_speed - (rf_int * tx_rate));
9235 		rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9236 			 tx_rate;
9237 
9238 		bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9239 		bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9240 			      E1000_RTTBCNRC_RF_INT_MASK);
9241 		bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9242 	} else {
9243 		bcnrc_val = 0;
9244 	}
9245 
9246 	wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
9247 	/* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
9248 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
9249 	 */
9250 	wr32(E1000_RTTBCNRM, 0x14);
9251 	wr32(E1000_RTTBCNRC, bcnrc_val);
9252 }
9253 
9254 static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9255 {
9256 	int actual_link_speed, i;
9257 	bool reset_rate = false;
9258 
9259 	/* VF TX rate limit was not set or not supported */
9260 	if ((adapter->vf_rate_link_speed == 0) ||
9261 	    (adapter->hw.mac.type != e1000_82576))
9262 		return;
9263 
9264 	actual_link_speed = igb_link_mbps(adapter->link_speed);
9265 	if (actual_link_speed != adapter->vf_rate_link_speed) {
9266 		reset_rate = true;
9267 		adapter->vf_rate_link_speed = 0;
9268 		dev_info(&adapter->pdev->dev,
9269 			 "Link speed has been changed. VF Transmit rate is disabled\n");
9270 	}
9271 
9272 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
9273 		if (reset_rate)
9274 			adapter->vf_data[i].tx_rate = 0;
9275 
9276 		igb_set_vf_rate_limit(&adapter->hw, i,
9277 				      adapter->vf_data[i].tx_rate,
9278 				      actual_link_speed);
9279 	}
9280 }
9281 
9282 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9283 			     int min_tx_rate, int max_tx_rate)
9284 {
9285 	struct igb_adapter *adapter = netdev_priv(netdev);
9286 	struct e1000_hw *hw = &adapter->hw;
9287 	int actual_link_speed;
9288 
9289 	if (hw->mac.type != e1000_82576)
9290 		return -EOPNOTSUPP;
9291 
9292 	if (min_tx_rate)
9293 		return -EINVAL;
9294 
9295 	actual_link_speed = igb_link_mbps(adapter->link_speed);
9296 	if ((vf >= adapter->vfs_allocated_count) ||
9297 	    (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9298 	    (max_tx_rate < 0) ||
9299 	    (max_tx_rate > actual_link_speed))
9300 		return -EINVAL;
9301 
9302 	adapter->vf_rate_link_speed = actual_link_speed;
9303 	adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9304 	igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9305 
9306 	return 0;
9307 }
9308 
9309 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9310 				   bool setting)
9311 {
9312 	struct igb_adapter *adapter = netdev_priv(netdev);
9313 	struct e1000_hw *hw = &adapter->hw;
9314 	u32 reg_val, reg_offset;
9315 
9316 	if (!adapter->vfs_allocated_count)
9317 		return -EOPNOTSUPP;
9318 
9319 	if (vf >= adapter->vfs_allocated_count)
9320 		return -EINVAL;
9321 
9322 	reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9323 	reg_val = rd32(reg_offset);
9324 	if (setting)
9325 		reg_val |= (BIT(vf) |
9326 			    BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9327 	else
9328 		reg_val &= ~(BIT(vf) |
9329 			     BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9330 	wr32(reg_offset, reg_val);
9331 
9332 	adapter->vf_data[vf].spoofchk_enabled = setting;
9333 	return 0;
9334 }
9335 
9336 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9337 {
9338 	struct igb_adapter *adapter = netdev_priv(netdev);
9339 
9340 	if (vf >= adapter->vfs_allocated_count)
9341 		return -EINVAL;
9342 	if (adapter->vf_data[vf].trusted == setting)
9343 		return 0;
9344 
9345 	adapter->vf_data[vf].trusted = setting;
9346 
9347 	dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9348 		 vf, setting ? "" : "not ");
9349 	return 0;
9350 }
9351 
9352 static int igb_ndo_get_vf_config(struct net_device *netdev,
9353 				 int vf, struct ifla_vf_info *ivi)
9354 {
9355 	struct igb_adapter *adapter = netdev_priv(netdev);
9356 	if (vf >= adapter->vfs_allocated_count)
9357 		return -EINVAL;
9358 	ivi->vf = vf;
9359 	memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9360 	ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9361 	ivi->min_tx_rate = 0;
9362 	ivi->vlan = adapter->vf_data[vf].pf_vlan;
9363 	ivi->qos = adapter->vf_data[vf].pf_qos;
9364 	ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9365 	ivi->trusted = adapter->vf_data[vf].trusted;
9366 	return 0;
9367 }
9368 
9369 static void igb_vmm_control(struct igb_adapter *adapter)
9370 {
9371 	struct e1000_hw *hw = &adapter->hw;
9372 	u32 reg;
9373 
9374 	switch (hw->mac.type) {
9375 	case e1000_82575:
9376 	case e1000_i210:
9377 	case e1000_i211:
9378 	case e1000_i354:
9379 	default:
9380 		/* replication is not supported for 82575 */
9381 		return;
9382 	case e1000_82576:
9383 		/* notify HW that the MAC is adding vlan tags */
9384 		reg = rd32(E1000_DTXCTL);
9385 		reg |= E1000_DTXCTL_VLAN_ADDED;
9386 		wr32(E1000_DTXCTL, reg);
9387 		/* Fall through */
9388 	case e1000_82580:
9389 		/* enable replication vlan tag stripping */
9390 		reg = rd32(E1000_RPLOLR);
9391 		reg |= E1000_RPLOLR_STRVLAN;
9392 		wr32(E1000_RPLOLR, reg);
9393 		/* Fall through */
9394 	case e1000_i350:
9395 		/* none of the above registers are supported by i350 */
9396 		break;
9397 	}
9398 
9399 	if (adapter->vfs_allocated_count) {
9400 		igb_vmdq_set_loopback_pf(hw, true);
9401 		igb_vmdq_set_replication_pf(hw, true);
9402 		igb_vmdq_set_anti_spoofing_pf(hw, true,
9403 					      adapter->vfs_allocated_count);
9404 	} else {
9405 		igb_vmdq_set_loopback_pf(hw, false);
9406 		igb_vmdq_set_replication_pf(hw, false);
9407 	}
9408 }
9409 
9410 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9411 {
9412 	struct e1000_hw *hw = &adapter->hw;
9413 	u32 dmac_thr;
9414 	u16 hwm;
9415 
9416 	if (hw->mac.type > e1000_82580) {
9417 		if (adapter->flags & IGB_FLAG_DMAC) {
9418 			u32 reg;
9419 
9420 			/* force threshold to 0. */
9421 			wr32(E1000_DMCTXTH, 0);
9422 
9423 			/* DMA Coalescing high water mark needs to be greater
9424 			 * than the Rx threshold. Set hwm to PBA - max frame
9425 			 * size in 16B units, capping it at PBA - 6KB.
9426 			 */
9427 			hwm = 64 * (pba - 6);
9428 			reg = rd32(E1000_FCRTC);
9429 			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9430 			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9431 				& E1000_FCRTC_RTH_COAL_MASK);
9432 			wr32(E1000_FCRTC, reg);
9433 
9434 			/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
9435 			 * frame size, capping it at PBA - 10KB.
9436 			 */
9437 			dmac_thr = pba - 10;
9438 			reg = rd32(E1000_DMACR);
9439 			reg &= ~E1000_DMACR_DMACTHR_MASK;
9440 			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9441 				& E1000_DMACR_DMACTHR_MASK);
9442 
9443 			/* transition to L0x or L1 if available..*/
9444 			reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9445 
9446 			/* watchdog timer= +-1000 usec in 32usec intervals */
9447 			reg |= (1000 >> 5);
9448 
9449 			/* Disable BMC-to-OS Watchdog Enable */
9450 			if (hw->mac.type != e1000_i354)
9451 				reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9452 
9453 			wr32(E1000_DMACR, reg);
9454 
9455 			/* no lower threshold to disable
9456 			 * coalescing(smart fifb)-UTRESH=0
9457 			 */
9458 			wr32(E1000_DMCRTRH, 0);
9459 
9460 			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9461 
9462 			wr32(E1000_DMCTLX, reg);
9463 
9464 			/* free space in tx packet buffer to wake from
9465 			 * DMA coal
9466 			 */
9467 			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9468 			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9469 
9470 			/* make low power state decision controlled
9471 			 * by DMA coal
9472 			 */
9473 			reg = rd32(E1000_PCIEMISC);
9474 			reg &= ~E1000_PCIEMISC_LX_DECISION;
9475 			wr32(E1000_PCIEMISC, reg);
9476 		} /* endif adapter->dmac is not disabled */
9477 	} else if (hw->mac.type == e1000_82580) {
9478 		u32 reg = rd32(E1000_PCIEMISC);
9479 
9480 		wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9481 		wr32(E1000_DMACR, 0);
9482 	}
9483 }
9484 
9485 /**
9486  *  igb_read_i2c_byte - Reads 8 bit word over I2C
9487  *  @hw: pointer to hardware structure
9488  *  @byte_offset: byte offset to read
9489  *  @dev_addr: device address
9490  *  @data: value read
9491  *
9492  *  Performs byte read operation over I2C interface at
9493  *  a specified device address.
9494  **/
9495 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9496 		      u8 dev_addr, u8 *data)
9497 {
9498 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9499 	struct i2c_client *this_client = adapter->i2c_client;
9500 	s32 status;
9501 	u16 swfw_mask = 0;
9502 
9503 	if (!this_client)
9504 		return E1000_ERR_I2C;
9505 
9506 	swfw_mask = E1000_SWFW_PHY0_SM;
9507 
9508 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9509 		return E1000_ERR_SWFW_SYNC;
9510 
9511 	status = i2c_smbus_read_byte_data(this_client, byte_offset);
9512 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9513 
9514 	if (status < 0)
9515 		return E1000_ERR_I2C;
9516 	else {
9517 		*data = status;
9518 		return 0;
9519 	}
9520 }
9521 
9522 /**
9523  *  igb_write_i2c_byte - Writes 8 bit word over I2C
9524  *  @hw: pointer to hardware structure
9525  *  @byte_offset: byte offset to write
9526  *  @dev_addr: device address
9527  *  @data: value to write
9528  *
9529  *  Performs byte write operation over I2C interface at
9530  *  a specified device address.
9531  **/
9532 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9533 		       u8 dev_addr, u8 data)
9534 {
9535 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9536 	struct i2c_client *this_client = adapter->i2c_client;
9537 	s32 status;
9538 	u16 swfw_mask = E1000_SWFW_PHY0_SM;
9539 
9540 	if (!this_client)
9541 		return E1000_ERR_I2C;
9542 
9543 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9544 		return E1000_ERR_SWFW_SYNC;
9545 	status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9546 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9547 
9548 	if (status)
9549 		return E1000_ERR_I2C;
9550 	else
9551 		return 0;
9552 
9553 }
9554 
9555 int igb_reinit_queues(struct igb_adapter *adapter)
9556 {
9557 	struct net_device *netdev = adapter->netdev;
9558 	struct pci_dev *pdev = adapter->pdev;
9559 	int err = 0;
9560 
9561 	if (netif_running(netdev))
9562 		igb_close(netdev);
9563 
9564 	igb_reset_interrupt_capability(adapter);
9565 
9566 	if (igb_init_interrupt_scheme(adapter, true)) {
9567 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9568 		return -ENOMEM;
9569 	}
9570 
9571 	if (netif_running(netdev))
9572 		err = igb_open(netdev);
9573 
9574 	return err;
9575 }
9576 
9577 static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9578 {
9579 	struct igb_nfc_filter *rule;
9580 
9581 	spin_lock(&adapter->nfc_lock);
9582 
9583 	hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9584 		igb_erase_filter(adapter, rule);
9585 
9586 	hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9587 		igb_erase_filter(adapter, rule);
9588 
9589 	spin_unlock(&adapter->nfc_lock);
9590 }
9591 
9592 static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9593 {
9594 	struct igb_nfc_filter *rule;
9595 
9596 	spin_lock(&adapter->nfc_lock);
9597 
9598 	hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9599 		igb_add_filter(adapter, rule);
9600 
9601 	spin_unlock(&adapter->nfc_lock);
9602 }
9603 /* igb_main.c */
9604