xref: /openbmc/linux/drivers/net/ethernet/intel/igb/igb_main.c (revision 763f96944c954ce0e00a10a7bdfe29adbe4f92eb)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
3 
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
9 #include <linux/bitops.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/netdevice.h>
13 #include <linux/ipv6.h>
14 #include <linux/slab.h>
15 #include <net/checksum.h>
16 #include <net/ip6_checksum.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/mii.h>
21 #include <linux/ethtool.h>
22 #include <linux/if.h>
23 #include <linux/if_vlan.h>
24 #include <linux/pci.h>
25 #include <linux/pci-aspm.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
28 #include <linux/ip.h>
29 #include <linux/tcp.h>
30 #include <linux/sctp.h>
31 #include <linux/if_ether.h>
32 #include <linux/aer.h>
33 #include <linux/prefetch.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/etherdevice.h>
36 #ifdef CONFIG_IGB_DCA
37 #include <linux/dca.h>
38 #endif
39 #include <linux/i2c.h>
40 #include "igb.h"
41 
42 #define MAJ 5
43 #define MIN 4
44 #define BUILD 0
45 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
46 __stringify(BUILD) "-k"
47 
48 enum queue_mode {
49 	QUEUE_MODE_STRICT_PRIORITY,
50 	QUEUE_MODE_STREAM_RESERVATION,
51 };
52 
53 enum tx_queue_prio {
54 	TX_QUEUE_PRIO_HIGH,
55 	TX_QUEUE_PRIO_LOW,
56 };
57 
58 char igb_driver_name[] = "igb";
59 char igb_driver_version[] = DRV_VERSION;
60 static const char igb_driver_string[] =
61 				"Intel(R) Gigabit Ethernet Network Driver";
62 static const char igb_copyright[] =
63 				"Copyright (c) 2007-2014 Intel Corporation.";
64 
65 static const struct e1000_info *igb_info_tbl[] = {
66 	[board_82575] = &e1000_82575_info,
67 };
68 
69 static const struct pci_device_id igb_pci_tbl[] = {
70 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
71 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
72 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
73 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
74 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
75 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
76 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
77 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
78 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
79 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
80 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
81 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
82 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
83 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
84 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
85 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
86 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
87 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
88 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
89 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
90 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
91 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
92 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
93 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
94 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
95 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
96 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
97 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
98 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
99 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
100 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
101 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
102 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
103 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
104 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
105 	/* required last entry */
106 	{0, }
107 };
108 
109 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
110 
111 static int igb_setup_all_tx_resources(struct igb_adapter *);
112 static int igb_setup_all_rx_resources(struct igb_adapter *);
113 static void igb_free_all_tx_resources(struct igb_adapter *);
114 static void igb_free_all_rx_resources(struct igb_adapter *);
115 static void igb_setup_mrqc(struct igb_adapter *);
116 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
117 static void igb_remove(struct pci_dev *pdev);
118 static int igb_sw_init(struct igb_adapter *);
119 int igb_open(struct net_device *);
120 int igb_close(struct net_device *);
121 static void igb_configure(struct igb_adapter *);
122 static void igb_configure_tx(struct igb_adapter *);
123 static void igb_configure_rx(struct igb_adapter *);
124 static void igb_clean_all_tx_rings(struct igb_adapter *);
125 static void igb_clean_all_rx_rings(struct igb_adapter *);
126 static void igb_clean_tx_ring(struct igb_ring *);
127 static void igb_clean_rx_ring(struct igb_ring *);
128 static void igb_set_rx_mode(struct net_device *);
129 static void igb_update_phy_info(struct timer_list *);
130 static void igb_watchdog(struct timer_list *);
131 static void igb_watchdog_task(struct work_struct *);
132 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
133 static void igb_get_stats64(struct net_device *dev,
134 			    struct rtnl_link_stats64 *stats);
135 static int igb_change_mtu(struct net_device *, int);
136 static int igb_set_mac(struct net_device *, void *);
137 static void igb_set_uta(struct igb_adapter *adapter, bool set);
138 static irqreturn_t igb_intr(int irq, void *);
139 static irqreturn_t igb_intr_msi(int irq, void *);
140 static irqreturn_t igb_msix_other(int irq, void *);
141 static irqreturn_t igb_msix_ring(int irq, void *);
142 #ifdef CONFIG_IGB_DCA
143 static void igb_update_dca(struct igb_q_vector *);
144 static void igb_setup_dca(struct igb_adapter *);
145 #endif /* CONFIG_IGB_DCA */
146 static int igb_poll(struct napi_struct *, int);
147 static bool igb_clean_tx_irq(struct igb_q_vector *, int);
148 static int igb_clean_rx_irq(struct igb_q_vector *, int);
149 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
150 static void igb_tx_timeout(struct net_device *);
151 static void igb_reset_task(struct work_struct *);
152 static void igb_vlan_mode(struct net_device *netdev,
153 			  netdev_features_t features);
154 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
155 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
156 static void igb_restore_vlan(struct igb_adapter *);
157 static void igb_rar_set_index(struct igb_adapter *, u32);
158 static void igb_ping_all_vfs(struct igb_adapter *);
159 static void igb_msg_task(struct igb_adapter *);
160 static void igb_vmm_control(struct igb_adapter *);
161 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
162 static void igb_flush_mac_table(struct igb_adapter *);
163 static int igb_available_rars(struct igb_adapter *, u8);
164 static void igb_set_default_mac_filter(struct igb_adapter *);
165 static int igb_uc_sync(struct net_device *, const unsigned char *);
166 static int igb_uc_unsync(struct net_device *, const unsigned char *);
167 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
168 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
169 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
170 			       int vf, u16 vlan, u8 qos, __be16 vlan_proto);
171 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
172 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
173 				   bool setting);
174 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
175 				bool setting);
176 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
177 				 struct ifla_vf_info *ivi);
178 static void igb_check_vf_rate_limit(struct igb_adapter *);
179 static void igb_nfc_filter_exit(struct igb_adapter *adapter);
180 static void igb_nfc_filter_restore(struct igb_adapter *adapter);
181 
182 #ifdef CONFIG_PCI_IOV
183 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
184 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
185 static int igb_disable_sriov(struct pci_dev *dev);
186 static int igb_pci_disable_sriov(struct pci_dev *dev);
187 #endif
188 
189 static int igb_suspend(struct device *);
190 static int igb_resume(struct device *);
191 static int igb_runtime_suspend(struct device *dev);
192 static int igb_runtime_resume(struct device *dev);
193 static int igb_runtime_idle(struct device *dev);
194 static const struct dev_pm_ops igb_pm_ops = {
195 	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
196 	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
197 			igb_runtime_idle)
198 };
199 static void igb_shutdown(struct pci_dev *);
200 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
201 #ifdef CONFIG_IGB_DCA
202 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
203 static struct notifier_block dca_notifier = {
204 	.notifier_call	= igb_notify_dca,
205 	.next		= NULL,
206 	.priority	= 0
207 };
208 #endif
209 #ifdef CONFIG_NET_POLL_CONTROLLER
210 /* for netdump / net console */
211 static void igb_netpoll(struct net_device *);
212 #endif
213 #ifdef CONFIG_PCI_IOV
214 static unsigned int max_vfs;
215 module_param(max_vfs, uint, 0);
216 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
217 #endif /* CONFIG_PCI_IOV */
218 
219 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
220 		     pci_channel_state_t);
221 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
222 static void igb_io_resume(struct pci_dev *);
223 
224 static const struct pci_error_handlers igb_err_handler = {
225 	.error_detected = igb_io_error_detected,
226 	.slot_reset = igb_io_slot_reset,
227 	.resume = igb_io_resume,
228 };
229 
230 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
231 
232 static struct pci_driver igb_driver = {
233 	.name     = igb_driver_name,
234 	.id_table = igb_pci_tbl,
235 	.probe    = igb_probe,
236 	.remove   = igb_remove,
237 #ifdef CONFIG_PM
238 	.driver.pm = &igb_pm_ops,
239 #endif
240 	.shutdown = igb_shutdown,
241 	.sriov_configure = igb_pci_sriov_configure,
242 	.err_handler = &igb_err_handler
243 };
244 
245 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
246 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
247 MODULE_LICENSE("GPL");
248 MODULE_VERSION(DRV_VERSION);
249 
250 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
251 static int debug = -1;
252 module_param(debug, int, 0);
253 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
254 
255 struct igb_reg_info {
256 	u32 ofs;
257 	char *name;
258 };
259 
260 static const struct igb_reg_info igb_reg_info_tbl[] = {
261 
262 	/* General Registers */
263 	{E1000_CTRL, "CTRL"},
264 	{E1000_STATUS, "STATUS"},
265 	{E1000_CTRL_EXT, "CTRL_EXT"},
266 
267 	/* Interrupt Registers */
268 	{E1000_ICR, "ICR"},
269 
270 	/* RX Registers */
271 	{E1000_RCTL, "RCTL"},
272 	{E1000_RDLEN(0), "RDLEN"},
273 	{E1000_RDH(0), "RDH"},
274 	{E1000_RDT(0), "RDT"},
275 	{E1000_RXDCTL(0), "RXDCTL"},
276 	{E1000_RDBAL(0), "RDBAL"},
277 	{E1000_RDBAH(0), "RDBAH"},
278 
279 	/* TX Registers */
280 	{E1000_TCTL, "TCTL"},
281 	{E1000_TDBAL(0), "TDBAL"},
282 	{E1000_TDBAH(0), "TDBAH"},
283 	{E1000_TDLEN(0), "TDLEN"},
284 	{E1000_TDH(0), "TDH"},
285 	{E1000_TDT(0), "TDT"},
286 	{E1000_TXDCTL(0), "TXDCTL"},
287 	{E1000_TDFH, "TDFH"},
288 	{E1000_TDFT, "TDFT"},
289 	{E1000_TDFHS, "TDFHS"},
290 	{E1000_TDFPC, "TDFPC"},
291 
292 	/* List Terminator */
293 	{}
294 };
295 
296 /* igb_regdump - register printout routine */
297 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
298 {
299 	int n = 0;
300 	char rname[16];
301 	u32 regs[8];
302 
303 	switch (reginfo->ofs) {
304 	case E1000_RDLEN(0):
305 		for (n = 0; n < 4; n++)
306 			regs[n] = rd32(E1000_RDLEN(n));
307 		break;
308 	case E1000_RDH(0):
309 		for (n = 0; n < 4; n++)
310 			regs[n] = rd32(E1000_RDH(n));
311 		break;
312 	case E1000_RDT(0):
313 		for (n = 0; n < 4; n++)
314 			regs[n] = rd32(E1000_RDT(n));
315 		break;
316 	case E1000_RXDCTL(0):
317 		for (n = 0; n < 4; n++)
318 			regs[n] = rd32(E1000_RXDCTL(n));
319 		break;
320 	case E1000_RDBAL(0):
321 		for (n = 0; n < 4; n++)
322 			regs[n] = rd32(E1000_RDBAL(n));
323 		break;
324 	case E1000_RDBAH(0):
325 		for (n = 0; n < 4; n++)
326 			regs[n] = rd32(E1000_RDBAH(n));
327 		break;
328 	case E1000_TDBAL(0):
329 		for (n = 0; n < 4; n++)
330 			regs[n] = rd32(E1000_RDBAL(n));
331 		break;
332 	case E1000_TDBAH(0):
333 		for (n = 0; n < 4; n++)
334 			regs[n] = rd32(E1000_TDBAH(n));
335 		break;
336 	case E1000_TDLEN(0):
337 		for (n = 0; n < 4; n++)
338 			regs[n] = rd32(E1000_TDLEN(n));
339 		break;
340 	case E1000_TDH(0):
341 		for (n = 0; n < 4; n++)
342 			regs[n] = rd32(E1000_TDH(n));
343 		break;
344 	case E1000_TDT(0):
345 		for (n = 0; n < 4; n++)
346 			regs[n] = rd32(E1000_TDT(n));
347 		break;
348 	case E1000_TXDCTL(0):
349 		for (n = 0; n < 4; n++)
350 			regs[n] = rd32(E1000_TXDCTL(n));
351 		break;
352 	default:
353 		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
354 		return;
355 	}
356 
357 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
358 	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
359 		regs[2], regs[3]);
360 }
361 
362 /* igb_dump - Print registers, Tx-rings and Rx-rings */
363 static void igb_dump(struct igb_adapter *adapter)
364 {
365 	struct net_device *netdev = adapter->netdev;
366 	struct e1000_hw *hw = &adapter->hw;
367 	struct igb_reg_info *reginfo;
368 	struct igb_ring *tx_ring;
369 	union e1000_adv_tx_desc *tx_desc;
370 	struct my_u0 { u64 a; u64 b; } *u0;
371 	struct igb_ring *rx_ring;
372 	union e1000_adv_rx_desc *rx_desc;
373 	u32 staterr;
374 	u16 i, n;
375 
376 	if (!netif_msg_hw(adapter))
377 		return;
378 
379 	/* Print netdevice Info */
380 	if (netdev) {
381 		dev_info(&adapter->pdev->dev, "Net device Info\n");
382 		pr_info("Device Name     state            trans_start\n");
383 		pr_info("%-15s %016lX %016lX\n", netdev->name,
384 			netdev->state, dev_trans_start(netdev));
385 	}
386 
387 	/* Print Registers */
388 	dev_info(&adapter->pdev->dev, "Register Dump\n");
389 	pr_info(" Register Name   Value\n");
390 	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
391 	     reginfo->name; reginfo++) {
392 		igb_regdump(hw, reginfo);
393 	}
394 
395 	/* Print TX Ring Summary */
396 	if (!netdev || !netif_running(netdev))
397 		goto exit;
398 
399 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
400 	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
401 	for (n = 0; n < adapter->num_tx_queues; n++) {
402 		struct igb_tx_buffer *buffer_info;
403 		tx_ring = adapter->tx_ring[n];
404 		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
405 		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
406 			n, tx_ring->next_to_use, tx_ring->next_to_clean,
407 			(u64)dma_unmap_addr(buffer_info, dma),
408 			dma_unmap_len(buffer_info, len),
409 			buffer_info->next_to_watch,
410 			(u64)buffer_info->time_stamp);
411 	}
412 
413 	/* Print TX Rings */
414 	if (!netif_msg_tx_done(adapter))
415 		goto rx_ring_summary;
416 
417 	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
418 
419 	/* Transmit Descriptor Formats
420 	 *
421 	 * Advanced Transmit Descriptor
422 	 *   +--------------------------------------------------------------+
423 	 * 0 |         Buffer Address [63:0]                                |
424 	 *   +--------------------------------------------------------------+
425 	 * 8 | PAYLEN  | PORTS  |CC|IDX | STA | DCMD  |DTYP|MAC|RSV| DTALEN |
426 	 *   +--------------------------------------------------------------+
427 	 *   63      46 45    40 39 38 36 35 32 31   24             15       0
428 	 */
429 
430 	for (n = 0; n < adapter->num_tx_queues; n++) {
431 		tx_ring = adapter->tx_ring[n];
432 		pr_info("------------------------------------\n");
433 		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
434 		pr_info("------------------------------------\n");
435 		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] [bi->dma       ] leng  ntw timestamp        bi->skb\n");
436 
437 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
438 			const char *next_desc;
439 			struct igb_tx_buffer *buffer_info;
440 			tx_desc = IGB_TX_DESC(tx_ring, i);
441 			buffer_info = &tx_ring->tx_buffer_info[i];
442 			u0 = (struct my_u0 *)tx_desc;
443 			if (i == tx_ring->next_to_use &&
444 			    i == tx_ring->next_to_clean)
445 				next_desc = " NTC/U";
446 			else if (i == tx_ring->next_to_use)
447 				next_desc = " NTU";
448 			else if (i == tx_ring->next_to_clean)
449 				next_desc = " NTC";
450 			else
451 				next_desc = "";
452 
453 			pr_info("T [0x%03X]    %016llX %016llX %016llX %04X  %p %016llX %p%s\n",
454 				i, le64_to_cpu(u0->a),
455 				le64_to_cpu(u0->b),
456 				(u64)dma_unmap_addr(buffer_info, dma),
457 				dma_unmap_len(buffer_info, len),
458 				buffer_info->next_to_watch,
459 				(u64)buffer_info->time_stamp,
460 				buffer_info->skb, next_desc);
461 
462 			if (netif_msg_pktdata(adapter) && buffer_info->skb)
463 				print_hex_dump(KERN_INFO, "",
464 					DUMP_PREFIX_ADDRESS,
465 					16, 1, buffer_info->skb->data,
466 					dma_unmap_len(buffer_info, len),
467 					true);
468 		}
469 	}
470 
471 	/* Print RX Rings Summary */
472 rx_ring_summary:
473 	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
474 	pr_info("Queue [NTU] [NTC]\n");
475 	for (n = 0; n < adapter->num_rx_queues; n++) {
476 		rx_ring = adapter->rx_ring[n];
477 		pr_info(" %5d %5X %5X\n",
478 			n, rx_ring->next_to_use, rx_ring->next_to_clean);
479 	}
480 
481 	/* Print RX Rings */
482 	if (!netif_msg_rx_status(adapter))
483 		goto exit;
484 
485 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
486 
487 	/* Advanced Receive Descriptor (Read) Format
488 	 *    63                                           1        0
489 	 *    +-----------------------------------------------------+
490 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
491 	 *    +----------------------------------------------+------+
492 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
493 	 *    +-----------------------------------------------------+
494 	 *
495 	 *
496 	 * Advanced Receive Descriptor (Write-Back) Format
497 	 *
498 	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
499 	 *   +------------------------------------------------------+
500 	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
501 	 *   | Checksum   Ident  |   |           |    | Type | Type |
502 	 *   +------------------------------------------------------+
503 	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
504 	 *   +------------------------------------------------------+
505 	 *   63       48 47    32 31            20 19               0
506 	 */
507 
508 	for (n = 0; n < adapter->num_rx_queues; n++) {
509 		rx_ring = adapter->rx_ring[n];
510 		pr_info("------------------------------------\n");
511 		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
512 		pr_info("------------------------------------\n");
513 		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] [bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
514 		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
515 
516 		for (i = 0; i < rx_ring->count; i++) {
517 			const char *next_desc;
518 			struct igb_rx_buffer *buffer_info;
519 			buffer_info = &rx_ring->rx_buffer_info[i];
520 			rx_desc = IGB_RX_DESC(rx_ring, i);
521 			u0 = (struct my_u0 *)rx_desc;
522 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
523 
524 			if (i == rx_ring->next_to_use)
525 				next_desc = " NTU";
526 			else if (i == rx_ring->next_to_clean)
527 				next_desc = " NTC";
528 			else
529 				next_desc = "";
530 
531 			if (staterr & E1000_RXD_STAT_DD) {
532 				/* Descriptor Done */
533 				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
534 					"RWB", i,
535 					le64_to_cpu(u0->a),
536 					le64_to_cpu(u0->b),
537 					next_desc);
538 			} else {
539 				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
540 					"R  ", i,
541 					le64_to_cpu(u0->a),
542 					le64_to_cpu(u0->b),
543 					(u64)buffer_info->dma,
544 					next_desc);
545 
546 				if (netif_msg_pktdata(adapter) &&
547 				    buffer_info->dma && buffer_info->page) {
548 					print_hex_dump(KERN_INFO, "",
549 					  DUMP_PREFIX_ADDRESS,
550 					  16, 1,
551 					  page_address(buffer_info->page) +
552 						      buffer_info->page_offset,
553 					  igb_rx_bufsz(rx_ring), true);
554 				}
555 			}
556 		}
557 	}
558 
559 exit:
560 	return;
561 }
562 
563 /**
564  *  igb_get_i2c_data - Reads the I2C SDA data bit
565  *  @hw: pointer to hardware structure
566  *  @i2cctl: Current value of I2CCTL register
567  *
568  *  Returns the I2C data bit value
569  **/
570 static int igb_get_i2c_data(void *data)
571 {
572 	struct igb_adapter *adapter = (struct igb_adapter *)data;
573 	struct e1000_hw *hw = &adapter->hw;
574 	s32 i2cctl = rd32(E1000_I2CPARAMS);
575 
576 	return !!(i2cctl & E1000_I2C_DATA_IN);
577 }
578 
579 /**
580  *  igb_set_i2c_data - Sets the I2C data bit
581  *  @data: pointer to hardware structure
582  *  @state: I2C data value (0 or 1) to set
583  *
584  *  Sets the I2C data bit
585  **/
586 static void igb_set_i2c_data(void *data, int state)
587 {
588 	struct igb_adapter *adapter = (struct igb_adapter *)data;
589 	struct e1000_hw *hw = &adapter->hw;
590 	s32 i2cctl = rd32(E1000_I2CPARAMS);
591 
592 	if (state)
593 		i2cctl |= E1000_I2C_DATA_OUT;
594 	else
595 		i2cctl &= ~E1000_I2C_DATA_OUT;
596 
597 	i2cctl &= ~E1000_I2C_DATA_OE_N;
598 	i2cctl |= E1000_I2C_CLK_OE_N;
599 	wr32(E1000_I2CPARAMS, i2cctl);
600 	wrfl();
601 
602 }
603 
604 /**
605  *  igb_set_i2c_clk - Sets the I2C SCL clock
606  *  @data: pointer to hardware structure
607  *  @state: state to set clock
608  *
609  *  Sets the I2C clock line to state
610  **/
611 static void igb_set_i2c_clk(void *data, int state)
612 {
613 	struct igb_adapter *adapter = (struct igb_adapter *)data;
614 	struct e1000_hw *hw = &adapter->hw;
615 	s32 i2cctl = rd32(E1000_I2CPARAMS);
616 
617 	if (state) {
618 		i2cctl |= E1000_I2C_CLK_OUT;
619 		i2cctl &= ~E1000_I2C_CLK_OE_N;
620 	} else {
621 		i2cctl &= ~E1000_I2C_CLK_OUT;
622 		i2cctl &= ~E1000_I2C_CLK_OE_N;
623 	}
624 	wr32(E1000_I2CPARAMS, i2cctl);
625 	wrfl();
626 }
627 
628 /**
629  *  igb_get_i2c_clk - Gets the I2C SCL clock state
630  *  @data: pointer to hardware structure
631  *
632  *  Gets the I2C clock state
633  **/
634 static int igb_get_i2c_clk(void *data)
635 {
636 	struct igb_adapter *adapter = (struct igb_adapter *)data;
637 	struct e1000_hw *hw = &adapter->hw;
638 	s32 i2cctl = rd32(E1000_I2CPARAMS);
639 
640 	return !!(i2cctl & E1000_I2C_CLK_IN);
641 }
642 
643 static const struct i2c_algo_bit_data igb_i2c_algo = {
644 	.setsda		= igb_set_i2c_data,
645 	.setscl		= igb_set_i2c_clk,
646 	.getsda		= igb_get_i2c_data,
647 	.getscl		= igb_get_i2c_clk,
648 	.udelay		= 5,
649 	.timeout	= 20,
650 };
651 
652 /**
653  *  igb_get_hw_dev - return device
654  *  @hw: pointer to hardware structure
655  *
656  *  used by hardware layer to print debugging information
657  **/
658 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
659 {
660 	struct igb_adapter *adapter = hw->back;
661 	return adapter->netdev;
662 }
663 
664 /**
665  *  igb_init_module - Driver Registration Routine
666  *
667  *  igb_init_module is the first routine called when the driver is
668  *  loaded. All it does is register with the PCI subsystem.
669  **/
670 static int __init igb_init_module(void)
671 {
672 	int ret;
673 
674 	pr_info("%s - version %s\n",
675 	       igb_driver_string, igb_driver_version);
676 	pr_info("%s\n", igb_copyright);
677 
678 #ifdef CONFIG_IGB_DCA
679 	dca_register_notify(&dca_notifier);
680 #endif
681 	ret = pci_register_driver(&igb_driver);
682 	return ret;
683 }
684 
685 module_init(igb_init_module);
686 
687 /**
688  *  igb_exit_module - Driver Exit Cleanup Routine
689  *
690  *  igb_exit_module is called just before the driver is removed
691  *  from memory.
692  **/
693 static void __exit igb_exit_module(void)
694 {
695 #ifdef CONFIG_IGB_DCA
696 	dca_unregister_notify(&dca_notifier);
697 #endif
698 	pci_unregister_driver(&igb_driver);
699 }
700 
701 module_exit(igb_exit_module);
702 
703 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
704 /**
705  *  igb_cache_ring_register - Descriptor ring to register mapping
706  *  @adapter: board private structure to initialize
707  *
708  *  Once we know the feature-set enabled for the device, we'll cache
709  *  the register offset the descriptor ring is assigned to.
710  **/
711 static void igb_cache_ring_register(struct igb_adapter *adapter)
712 {
713 	int i = 0, j = 0;
714 	u32 rbase_offset = adapter->vfs_allocated_count;
715 
716 	switch (adapter->hw.mac.type) {
717 	case e1000_82576:
718 		/* The queues are allocated for virtualization such that VF 0
719 		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
720 		 * In order to avoid collision we start at the first free queue
721 		 * and continue consuming queues in the same sequence
722 		 */
723 		if (adapter->vfs_allocated_count) {
724 			for (; i < adapter->rss_queues; i++)
725 				adapter->rx_ring[i]->reg_idx = rbase_offset +
726 							       Q_IDX_82576(i);
727 		}
728 		/* Fall through */
729 	case e1000_82575:
730 	case e1000_82580:
731 	case e1000_i350:
732 	case e1000_i354:
733 	case e1000_i210:
734 	case e1000_i211:
735 		/* Fall through */
736 	default:
737 		for (; i < adapter->num_rx_queues; i++)
738 			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
739 		for (; j < adapter->num_tx_queues; j++)
740 			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
741 		break;
742 	}
743 }
744 
745 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
746 {
747 	struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
748 	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
749 	u32 value = 0;
750 
751 	if (E1000_REMOVED(hw_addr))
752 		return ~value;
753 
754 	value = readl(&hw_addr[reg]);
755 
756 	/* reads should not return all F's */
757 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
758 		struct net_device *netdev = igb->netdev;
759 		hw->hw_addr = NULL;
760 		netdev_err(netdev, "PCIe link lost\n");
761 	}
762 
763 	return value;
764 }
765 
766 /**
767  *  igb_write_ivar - configure ivar for given MSI-X vector
768  *  @hw: pointer to the HW structure
769  *  @msix_vector: vector number we are allocating to a given ring
770  *  @index: row index of IVAR register to write within IVAR table
771  *  @offset: column offset of in IVAR, should be multiple of 8
772  *
773  *  This function is intended to handle the writing of the IVAR register
774  *  for adapters 82576 and newer.  The IVAR table consists of 2 columns,
775  *  each containing an cause allocation for an Rx and Tx ring, and a
776  *  variable number of rows depending on the number of queues supported.
777  **/
778 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
779 			   int index, int offset)
780 {
781 	u32 ivar = array_rd32(E1000_IVAR0, index);
782 
783 	/* clear any bits that are currently set */
784 	ivar &= ~((u32)0xFF << offset);
785 
786 	/* write vector and valid bit */
787 	ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
788 
789 	array_wr32(E1000_IVAR0, index, ivar);
790 }
791 
792 #define IGB_N0_QUEUE -1
793 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
794 {
795 	struct igb_adapter *adapter = q_vector->adapter;
796 	struct e1000_hw *hw = &adapter->hw;
797 	int rx_queue = IGB_N0_QUEUE;
798 	int tx_queue = IGB_N0_QUEUE;
799 	u32 msixbm = 0;
800 
801 	if (q_vector->rx.ring)
802 		rx_queue = q_vector->rx.ring->reg_idx;
803 	if (q_vector->tx.ring)
804 		tx_queue = q_vector->tx.ring->reg_idx;
805 
806 	switch (hw->mac.type) {
807 	case e1000_82575:
808 		/* The 82575 assigns vectors using a bitmask, which matches the
809 		 * bitmask for the EICR/EIMS/EIMC registers.  To assign one
810 		 * or more queues to a vector, we write the appropriate bits
811 		 * into the MSIXBM register for that vector.
812 		 */
813 		if (rx_queue > IGB_N0_QUEUE)
814 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
815 		if (tx_queue > IGB_N0_QUEUE)
816 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
817 		if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
818 			msixbm |= E1000_EIMS_OTHER;
819 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
820 		q_vector->eims_value = msixbm;
821 		break;
822 	case e1000_82576:
823 		/* 82576 uses a table that essentially consists of 2 columns
824 		 * with 8 rows.  The ordering is column-major so we use the
825 		 * lower 3 bits as the row index, and the 4th bit as the
826 		 * column offset.
827 		 */
828 		if (rx_queue > IGB_N0_QUEUE)
829 			igb_write_ivar(hw, msix_vector,
830 				       rx_queue & 0x7,
831 				       (rx_queue & 0x8) << 1);
832 		if (tx_queue > IGB_N0_QUEUE)
833 			igb_write_ivar(hw, msix_vector,
834 				       tx_queue & 0x7,
835 				       ((tx_queue & 0x8) << 1) + 8);
836 		q_vector->eims_value = BIT(msix_vector);
837 		break;
838 	case e1000_82580:
839 	case e1000_i350:
840 	case e1000_i354:
841 	case e1000_i210:
842 	case e1000_i211:
843 		/* On 82580 and newer adapters the scheme is similar to 82576
844 		 * however instead of ordering column-major we have things
845 		 * ordered row-major.  So we traverse the table by using
846 		 * bit 0 as the column offset, and the remaining bits as the
847 		 * row index.
848 		 */
849 		if (rx_queue > IGB_N0_QUEUE)
850 			igb_write_ivar(hw, msix_vector,
851 				       rx_queue >> 1,
852 				       (rx_queue & 0x1) << 4);
853 		if (tx_queue > IGB_N0_QUEUE)
854 			igb_write_ivar(hw, msix_vector,
855 				       tx_queue >> 1,
856 				       ((tx_queue & 0x1) << 4) + 8);
857 		q_vector->eims_value = BIT(msix_vector);
858 		break;
859 	default:
860 		BUG();
861 		break;
862 	}
863 
864 	/* add q_vector eims value to global eims_enable_mask */
865 	adapter->eims_enable_mask |= q_vector->eims_value;
866 
867 	/* configure q_vector to set itr on first interrupt */
868 	q_vector->set_itr = 1;
869 }
870 
871 /**
872  *  igb_configure_msix - Configure MSI-X hardware
873  *  @adapter: board private structure to initialize
874  *
875  *  igb_configure_msix sets up the hardware to properly
876  *  generate MSI-X interrupts.
877  **/
878 static void igb_configure_msix(struct igb_adapter *adapter)
879 {
880 	u32 tmp;
881 	int i, vector = 0;
882 	struct e1000_hw *hw = &adapter->hw;
883 
884 	adapter->eims_enable_mask = 0;
885 
886 	/* set vector for other causes, i.e. link changes */
887 	switch (hw->mac.type) {
888 	case e1000_82575:
889 		tmp = rd32(E1000_CTRL_EXT);
890 		/* enable MSI-X PBA support*/
891 		tmp |= E1000_CTRL_EXT_PBA_CLR;
892 
893 		/* Auto-Mask interrupts upon ICR read. */
894 		tmp |= E1000_CTRL_EXT_EIAME;
895 		tmp |= E1000_CTRL_EXT_IRCA;
896 
897 		wr32(E1000_CTRL_EXT, tmp);
898 
899 		/* enable msix_other interrupt */
900 		array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
901 		adapter->eims_other = E1000_EIMS_OTHER;
902 
903 		break;
904 
905 	case e1000_82576:
906 	case e1000_82580:
907 	case e1000_i350:
908 	case e1000_i354:
909 	case e1000_i210:
910 	case e1000_i211:
911 		/* Turn on MSI-X capability first, or our settings
912 		 * won't stick.  And it will take days to debug.
913 		 */
914 		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
915 		     E1000_GPIE_PBA | E1000_GPIE_EIAME |
916 		     E1000_GPIE_NSICR);
917 
918 		/* enable msix_other interrupt */
919 		adapter->eims_other = BIT(vector);
920 		tmp = (vector++ | E1000_IVAR_VALID) << 8;
921 
922 		wr32(E1000_IVAR_MISC, tmp);
923 		break;
924 	default:
925 		/* do nothing, since nothing else supports MSI-X */
926 		break;
927 	} /* switch (hw->mac.type) */
928 
929 	adapter->eims_enable_mask |= adapter->eims_other;
930 
931 	for (i = 0; i < adapter->num_q_vectors; i++)
932 		igb_assign_vector(adapter->q_vector[i], vector++);
933 
934 	wrfl();
935 }
936 
937 /**
938  *  igb_request_msix - Initialize MSI-X interrupts
939  *  @adapter: board private structure to initialize
940  *
941  *  igb_request_msix allocates MSI-X vectors and requests interrupts from the
942  *  kernel.
943  **/
944 static int igb_request_msix(struct igb_adapter *adapter)
945 {
946 	struct net_device *netdev = adapter->netdev;
947 	int i, err = 0, vector = 0, free_vector = 0;
948 
949 	err = request_irq(adapter->msix_entries[vector].vector,
950 			  igb_msix_other, 0, netdev->name, adapter);
951 	if (err)
952 		goto err_out;
953 
954 	for (i = 0; i < adapter->num_q_vectors; i++) {
955 		struct igb_q_vector *q_vector = adapter->q_vector[i];
956 
957 		vector++;
958 
959 		q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
960 
961 		if (q_vector->rx.ring && q_vector->tx.ring)
962 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
963 				q_vector->rx.ring->queue_index);
964 		else if (q_vector->tx.ring)
965 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
966 				q_vector->tx.ring->queue_index);
967 		else if (q_vector->rx.ring)
968 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
969 				q_vector->rx.ring->queue_index);
970 		else
971 			sprintf(q_vector->name, "%s-unused", netdev->name);
972 
973 		err = request_irq(adapter->msix_entries[vector].vector,
974 				  igb_msix_ring, 0, q_vector->name,
975 				  q_vector);
976 		if (err)
977 			goto err_free;
978 	}
979 
980 	igb_configure_msix(adapter);
981 	return 0;
982 
983 err_free:
984 	/* free already assigned IRQs */
985 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
986 
987 	vector--;
988 	for (i = 0; i < vector; i++) {
989 		free_irq(adapter->msix_entries[free_vector++].vector,
990 			 adapter->q_vector[i]);
991 	}
992 err_out:
993 	return err;
994 }
995 
996 /**
997  *  igb_free_q_vector - Free memory allocated for specific interrupt vector
998  *  @adapter: board private structure to initialize
999  *  @v_idx: Index of vector to be freed
1000  *
1001  *  This function frees the memory allocated to the q_vector.
1002  **/
1003 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1004 {
1005 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1006 
1007 	adapter->q_vector[v_idx] = NULL;
1008 
1009 	/* igb_get_stats64() might access the rings on this vector,
1010 	 * we must wait a grace period before freeing it.
1011 	 */
1012 	if (q_vector)
1013 		kfree_rcu(q_vector, rcu);
1014 }
1015 
1016 /**
1017  *  igb_reset_q_vector - Reset config for interrupt vector
1018  *  @adapter: board private structure to initialize
1019  *  @v_idx: Index of vector to be reset
1020  *
1021  *  If NAPI is enabled it will delete any references to the
1022  *  NAPI struct. This is preparation for igb_free_q_vector.
1023  **/
1024 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1025 {
1026 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1027 
1028 	/* Coming from igb_set_interrupt_capability, the vectors are not yet
1029 	 * allocated. So, q_vector is NULL so we should stop here.
1030 	 */
1031 	if (!q_vector)
1032 		return;
1033 
1034 	if (q_vector->tx.ring)
1035 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1036 
1037 	if (q_vector->rx.ring)
1038 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1039 
1040 	netif_napi_del(&q_vector->napi);
1041 
1042 }
1043 
1044 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1045 {
1046 	int v_idx = adapter->num_q_vectors;
1047 
1048 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
1049 		pci_disable_msix(adapter->pdev);
1050 	else if (adapter->flags & IGB_FLAG_HAS_MSI)
1051 		pci_disable_msi(adapter->pdev);
1052 
1053 	while (v_idx--)
1054 		igb_reset_q_vector(adapter, v_idx);
1055 }
1056 
1057 /**
1058  *  igb_free_q_vectors - Free memory allocated for interrupt vectors
1059  *  @adapter: board private structure to initialize
1060  *
1061  *  This function frees the memory allocated to the q_vectors.  In addition if
1062  *  NAPI is enabled it will delete any references to the NAPI struct prior
1063  *  to freeing the q_vector.
1064  **/
1065 static void igb_free_q_vectors(struct igb_adapter *adapter)
1066 {
1067 	int v_idx = adapter->num_q_vectors;
1068 
1069 	adapter->num_tx_queues = 0;
1070 	adapter->num_rx_queues = 0;
1071 	adapter->num_q_vectors = 0;
1072 
1073 	while (v_idx--) {
1074 		igb_reset_q_vector(adapter, v_idx);
1075 		igb_free_q_vector(adapter, v_idx);
1076 	}
1077 }
1078 
1079 /**
1080  *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1081  *  @adapter: board private structure to initialize
1082  *
1083  *  This function resets the device so that it has 0 Rx queues, Tx queues, and
1084  *  MSI-X interrupts allocated.
1085  */
1086 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1087 {
1088 	igb_free_q_vectors(adapter);
1089 	igb_reset_interrupt_capability(adapter);
1090 }
1091 
1092 /**
1093  *  igb_set_interrupt_capability - set MSI or MSI-X if supported
1094  *  @adapter: board private structure to initialize
1095  *  @msix: boolean value of MSIX capability
1096  *
1097  *  Attempt to configure interrupts using the best available
1098  *  capabilities of the hardware and kernel.
1099  **/
1100 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1101 {
1102 	int err;
1103 	int numvecs, i;
1104 
1105 	if (!msix)
1106 		goto msi_only;
1107 	adapter->flags |= IGB_FLAG_HAS_MSIX;
1108 
1109 	/* Number of supported queues. */
1110 	adapter->num_rx_queues = adapter->rss_queues;
1111 	if (adapter->vfs_allocated_count)
1112 		adapter->num_tx_queues = 1;
1113 	else
1114 		adapter->num_tx_queues = adapter->rss_queues;
1115 
1116 	/* start with one vector for every Rx queue */
1117 	numvecs = adapter->num_rx_queues;
1118 
1119 	/* if Tx handler is separate add 1 for every Tx queue */
1120 	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1121 		numvecs += adapter->num_tx_queues;
1122 
1123 	/* store the number of vectors reserved for queues */
1124 	adapter->num_q_vectors = numvecs;
1125 
1126 	/* add 1 vector for link status interrupts */
1127 	numvecs++;
1128 	for (i = 0; i < numvecs; i++)
1129 		adapter->msix_entries[i].entry = i;
1130 
1131 	err = pci_enable_msix_range(adapter->pdev,
1132 				    adapter->msix_entries,
1133 				    numvecs,
1134 				    numvecs);
1135 	if (err > 0)
1136 		return;
1137 
1138 	igb_reset_interrupt_capability(adapter);
1139 
1140 	/* If we can't do MSI-X, try MSI */
1141 msi_only:
1142 	adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1143 #ifdef CONFIG_PCI_IOV
1144 	/* disable SR-IOV for non MSI-X configurations */
1145 	if (adapter->vf_data) {
1146 		struct e1000_hw *hw = &adapter->hw;
1147 		/* disable iov and allow time for transactions to clear */
1148 		pci_disable_sriov(adapter->pdev);
1149 		msleep(500);
1150 
1151 		kfree(adapter->vf_mac_list);
1152 		adapter->vf_mac_list = NULL;
1153 		kfree(adapter->vf_data);
1154 		adapter->vf_data = NULL;
1155 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1156 		wrfl();
1157 		msleep(100);
1158 		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1159 	}
1160 #endif
1161 	adapter->vfs_allocated_count = 0;
1162 	adapter->rss_queues = 1;
1163 	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1164 	adapter->num_rx_queues = 1;
1165 	adapter->num_tx_queues = 1;
1166 	adapter->num_q_vectors = 1;
1167 	if (!pci_enable_msi(adapter->pdev))
1168 		adapter->flags |= IGB_FLAG_HAS_MSI;
1169 }
1170 
1171 static void igb_add_ring(struct igb_ring *ring,
1172 			 struct igb_ring_container *head)
1173 {
1174 	head->ring = ring;
1175 	head->count++;
1176 }
1177 
1178 /**
1179  *  igb_alloc_q_vector - Allocate memory for a single interrupt vector
1180  *  @adapter: board private structure to initialize
1181  *  @v_count: q_vectors allocated on adapter, used for ring interleaving
1182  *  @v_idx: index of vector in adapter struct
1183  *  @txr_count: total number of Tx rings to allocate
1184  *  @txr_idx: index of first Tx ring to allocate
1185  *  @rxr_count: total number of Rx rings to allocate
1186  *  @rxr_idx: index of first Rx ring to allocate
1187  *
1188  *  We allocate one q_vector.  If allocation fails we return -ENOMEM.
1189  **/
1190 static int igb_alloc_q_vector(struct igb_adapter *adapter,
1191 			      int v_count, int v_idx,
1192 			      int txr_count, int txr_idx,
1193 			      int rxr_count, int rxr_idx)
1194 {
1195 	struct igb_q_vector *q_vector;
1196 	struct igb_ring *ring;
1197 	int ring_count, size;
1198 
1199 	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
1200 	if (txr_count > 1 || rxr_count > 1)
1201 		return -ENOMEM;
1202 
1203 	ring_count = txr_count + rxr_count;
1204 	size = sizeof(struct igb_q_vector) +
1205 	       (sizeof(struct igb_ring) * ring_count);
1206 
1207 	/* allocate q_vector and rings */
1208 	q_vector = adapter->q_vector[v_idx];
1209 	if (!q_vector) {
1210 		q_vector = kzalloc(size, GFP_KERNEL);
1211 	} else if (size > ksize(q_vector)) {
1212 		kfree_rcu(q_vector, rcu);
1213 		q_vector = kzalloc(size, GFP_KERNEL);
1214 	} else {
1215 		memset(q_vector, 0, size);
1216 	}
1217 	if (!q_vector)
1218 		return -ENOMEM;
1219 
1220 	/* initialize NAPI */
1221 	netif_napi_add(adapter->netdev, &q_vector->napi,
1222 		       igb_poll, 64);
1223 
1224 	/* tie q_vector and adapter together */
1225 	adapter->q_vector[v_idx] = q_vector;
1226 	q_vector->adapter = adapter;
1227 
1228 	/* initialize work limits */
1229 	q_vector->tx.work_limit = adapter->tx_work_limit;
1230 
1231 	/* initialize ITR configuration */
1232 	q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1233 	q_vector->itr_val = IGB_START_ITR;
1234 
1235 	/* initialize pointer to rings */
1236 	ring = q_vector->ring;
1237 
1238 	/* intialize ITR */
1239 	if (rxr_count) {
1240 		/* rx or rx/tx vector */
1241 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1242 			q_vector->itr_val = adapter->rx_itr_setting;
1243 	} else {
1244 		/* tx only vector */
1245 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1246 			q_vector->itr_val = adapter->tx_itr_setting;
1247 	}
1248 
1249 	if (txr_count) {
1250 		/* assign generic ring traits */
1251 		ring->dev = &adapter->pdev->dev;
1252 		ring->netdev = adapter->netdev;
1253 
1254 		/* configure backlink on ring */
1255 		ring->q_vector = q_vector;
1256 
1257 		/* update q_vector Tx values */
1258 		igb_add_ring(ring, &q_vector->tx);
1259 
1260 		/* For 82575, context index must be unique per ring. */
1261 		if (adapter->hw.mac.type == e1000_82575)
1262 			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1263 
1264 		/* apply Tx specific ring traits */
1265 		ring->count = adapter->tx_ring_count;
1266 		ring->queue_index = txr_idx;
1267 
1268 		ring->cbs_enable = false;
1269 		ring->idleslope = 0;
1270 		ring->sendslope = 0;
1271 		ring->hicredit = 0;
1272 		ring->locredit = 0;
1273 
1274 		u64_stats_init(&ring->tx_syncp);
1275 		u64_stats_init(&ring->tx_syncp2);
1276 
1277 		/* assign ring to adapter */
1278 		adapter->tx_ring[txr_idx] = ring;
1279 
1280 		/* push pointer to next ring */
1281 		ring++;
1282 	}
1283 
1284 	if (rxr_count) {
1285 		/* assign generic ring traits */
1286 		ring->dev = &adapter->pdev->dev;
1287 		ring->netdev = adapter->netdev;
1288 
1289 		/* configure backlink on ring */
1290 		ring->q_vector = q_vector;
1291 
1292 		/* update q_vector Rx values */
1293 		igb_add_ring(ring, &q_vector->rx);
1294 
1295 		/* set flag indicating ring supports SCTP checksum offload */
1296 		if (adapter->hw.mac.type >= e1000_82576)
1297 			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1298 
1299 		/* On i350, i354, i210, and i211, loopback VLAN packets
1300 		 * have the tag byte-swapped.
1301 		 */
1302 		if (adapter->hw.mac.type >= e1000_i350)
1303 			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1304 
1305 		/* apply Rx specific ring traits */
1306 		ring->count = adapter->rx_ring_count;
1307 		ring->queue_index = rxr_idx;
1308 
1309 		u64_stats_init(&ring->rx_syncp);
1310 
1311 		/* assign ring to adapter */
1312 		adapter->rx_ring[rxr_idx] = ring;
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 
1319 /**
1320  *  igb_alloc_q_vectors - Allocate memory for interrupt vectors
1321  *  @adapter: board private structure to initialize
1322  *
1323  *  We allocate one q_vector per queue interrupt.  If allocation fails we
1324  *  return -ENOMEM.
1325  **/
1326 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1327 {
1328 	int q_vectors = adapter->num_q_vectors;
1329 	int rxr_remaining = adapter->num_rx_queues;
1330 	int txr_remaining = adapter->num_tx_queues;
1331 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1332 	int err;
1333 
1334 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
1335 		for (; rxr_remaining; v_idx++) {
1336 			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1337 						 0, 0, 1, rxr_idx);
1338 
1339 			if (err)
1340 				goto err_out;
1341 
1342 			/* update counts and index */
1343 			rxr_remaining--;
1344 			rxr_idx++;
1345 		}
1346 	}
1347 
1348 	for (; v_idx < q_vectors; v_idx++) {
1349 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1350 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1351 
1352 		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1353 					 tqpv, txr_idx, rqpv, rxr_idx);
1354 
1355 		if (err)
1356 			goto err_out;
1357 
1358 		/* update counts and index */
1359 		rxr_remaining -= rqpv;
1360 		txr_remaining -= tqpv;
1361 		rxr_idx++;
1362 		txr_idx++;
1363 	}
1364 
1365 	return 0;
1366 
1367 err_out:
1368 	adapter->num_tx_queues = 0;
1369 	adapter->num_rx_queues = 0;
1370 	adapter->num_q_vectors = 0;
1371 
1372 	while (v_idx--)
1373 		igb_free_q_vector(adapter, v_idx);
1374 
1375 	return -ENOMEM;
1376 }
1377 
1378 /**
1379  *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1380  *  @adapter: board private structure to initialize
1381  *  @msix: boolean value of MSIX capability
1382  *
1383  *  This function initializes the interrupts and allocates all of the queues.
1384  **/
1385 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1386 {
1387 	struct pci_dev *pdev = adapter->pdev;
1388 	int err;
1389 
1390 	igb_set_interrupt_capability(adapter, msix);
1391 
1392 	err = igb_alloc_q_vectors(adapter);
1393 	if (err) {
1394 		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1395 		goto err_alloc_q_vectors;
1396 	}
1397 
1398 	igb_cache_ring_register(adapter);
1399 
1400 	return 0;
1401 
1402 err_alloc_q_vectors:
1403 	igb_reset_interrupt_capability(adapter);
1404 	return err;
1405 }
1406 
1407 /**
1408  *  igb_request_irq - initialize interrupts
1409  *  @adapter: board private structure to initialize
1410  *
1411  *  Attempts to configure interrupts using the best available
1412  *  capabilities of the hardware and kernel.
1413  **/
1414 static int igb_request_irq(struct igb_adapter *adapter)
1415 {
1416 	struct net_device *netdev = adapter->netdev;
1417 	struct pci_dev *pdev = adapter->pdev;
1418 	int err = 0;
1419 
1420 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1421 		err = igb_request_msix(adapter);
1422 		if (!err)
1423 			goto request_done;
1424 		/* fall back to MSI */
1425 		igb_free_all_tx_resources(adapter);
1426 		igb_free_all_rx_resources(adapter);
1427 
1428 		igb_clear_interrupt_scheme(adapter);
1429 		err = igb_init_interrupt_scheme(adapter, false);
1430 		if (err)
1431 			goto request_done;
1432 
1433 		igb_setup_all_tx_resources(adapter);
1434 		igb_setup_all_rx_resources(adapter);
1435 		igb_configure(adapter);
1436 	}
1437 
1438 	igb_assign_vector(adapter->q_vector[0], 0);
1439 
1440 	if (adapter->flags & IGB_FLAG_HAS_MSI) {
1441 		err = request_irq(pdev->irq, igb_intr_msi, 0,
1442 				  netdev->name, adapter);
1443 		if (!err)
1444 			goto request_done;
1445 
1446 		/* fall back to legacy interrupts */
1447 		igb_reset_interrupt_capability(adapter);
1448 		adapter->flags &= ~IGB_FLAG_HAS_MSI;
1449 	}
1450 
1451 	err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1452 			  netdev->name, adapter);
1453 
1454 	if (err)
1455 		dev_err(&pdev->dev, "Error %d getting interrupt\n",
1456 			err);
1457 
1458 request_done:
1459 	return err;
1460 }
1461 
1462 static void igb_free_irq(struct igb_adapter *adapter)
1463 {
1464 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1465 		int vector = 0, i;
1466 
1467 		free_irq(adapter->msix_entries[vector++].vector, adapter);
1468 
1469 		for (i = 0; i < adapter->num_q_vectors; i++)
1470 			free_irq(adapter->msix_entries[vector++].vector,
1471 				 adapter->q_vector[i]);
1472 	} else {
1473 		free_irq(adapter->pdev->irq, adapter);
1474 	}
1475 }
1476 
1477 /**
1478  *  igb_irq_disable - Mask off interrupt generation on the NIC
1479  *  @adapter: board private structure
1480  **/
1481 static void igb_irq_disable(struct igb_adapter *adapter)
1482 {
1483 	struct e1000_hw *hw = &adapter->hw;
1484 
1485 	/* we need to be careful when disabling interrupts.  The VFs are also
1486 	 * mapped into these registers and so clearing the bits can cause
1487 	 * issues on the VF drivers so we only need to clear what we set
1488 	 */
1489 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1490 		u32 regval = rd32(E1000_EIAM);
1491 
1492 		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1493 		wr32(E1000_EIMC, adapter->eims_enable_mask);
1494 		regval = rd32(E1000_EIAC);
1495 		wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1496 	}
1497 
1498 	wr32(E1000_IAM, 0);
1499 	wr32(E1000_IMC, ~0);
1500 	wrfl();
1501 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1502 		int i;
1503 
1504 		for (i = 0; i < adapter->num_q_vectors; i++)
1505 			synchronize_irq(adapter->msix_entries[i].vector);
1506 	} else {
1507 		synchronize_irq(adapter->pdev->irq);
1508 	}
1509 }
1510 
1511 /**
1512  *  igb_irq_enable - Enable default interrupt generation settings
1513  *  @adapter: board private structure
1514  **/
1515 static void igb_irq_enable(struct igb_adapter *adapter)
1516 {
1517 	struct e1000_hw *hw = &adapter->hw;
1518 
1519 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1520 		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1521 		u32 regval = rd32(E1000_EIAC);
1522 
1523 		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1524 		regval = rd32(E1000_EIAM);
1525 		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1526 		wr32(E1000_EIMS, adapter->eims_enable_mask);
1527 		if (adapter->vfs_allocated_count) {
1528 			wr32(E1000_MBVFIMR, 0xFF);
1529 			ims |= E1000_IMS_VMMB;
1530 		}
1531 		wr32(E1000_IMS, ims);
1532 	} else {
1533 		wr32(E1000_IMS, IMS_ENABLE_MASK |
1534 				E1000_IMS_DRSTA);
1535 		wr32(E1000_IAM, IMS_ENABLE_MASK |
1536 				E1000_IMS_DRSTA);
1537 	}
1538 }
1539 
1540 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1541 {
1542 	struct e1000_hw *hw = &adapter->hw;
1543 	u16 pf_id = adapter->vfs_allocated_count;
1544 	u16 vid = adapter->hw.mng_cookie.vlan_id;
1545 	u16 old_vid = adapter->mng_vlan_id;
1546 
1547 	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1548 		/* add VID to filter table */
1549 		igb_vfta_set(hw, vid, pf_id, true, true);
1550 		adapter->mng_vlan_id = vid;
1551 	} else {
1552 		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1553 	}
1554 
1555 	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1556 	    (vid != old_vid) &&
1557 	    !test_bit(old_vid, adapter->active_vlans)) {
1558 		/* remove VID from filter table */
1559 		igb_vfta_set(hw, vid, pf_id, false, true);
1560 	}
1561 }
1562 
1563 /**
1564  *  igb_release_hw_control - release control of the h/w to f/w
1565  *  @adapter: address of board private structure
1566  *
1567  *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1568  *  For ASF and Pass Through versions of f/w this means that the
1569  *  driver is no longer loaded.
1570  **/
1571 static void igb_release_hw_control(struct igb_adapter *adapter)
1572 {
1573 	struct e1000_hw *hw = &adapter->hw;
1574 	u32 ctrl_ext;
1575 
1576 	/* Let firmware take over control of h/w */
1577 	ctrl_ext = rd32(E1000_CTRL_EXT);
1578 	wr32(E1000_CTRL_EXT,
1579 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1580 }
1581 
1582 /**
1583  *  igb_get_hw_control - get control of the h/w from f/w
1584  *  @adapter: address of board private structure
1585  *
1586  *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1587  *  For ASF and Pass Through versions of f/w this means that
1588  *  the driver is loaded.
1589  **/
1590 static void igb_get_hw_control(struct igb_adapter *adapter)
1591 {
1592 	struct e1000_hw *hw = &adapter->hw;
1593 	u32 ctrl_ext;
1594 
1595 	/* Let firmware know the driver has taken over */
1596 	ctrl_ext = rd32(E1000_CTRL_EXT);
1597 	wr32(E1000_CTRL_EXT,
1598 			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1599 }
1600 
1601 static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1602 {
1603 	struct net_device *netdev = adapter->netdev;
1604 	struct e1000_hw *hw = &adapter->hw;
1605 
1606 	WARN_ON(hw->mac.type != e1000_i210);
1607 
1608 	if (enable)
1609 		adapter->flags |= IGB_FLAG_FQTSS;
1610 	else
1611 		adapter->flags &= ~IGB_FLAG_FQTSS;
1612 
1613 	if (netif_running(netdev))
1614 		schedule_work(&adapter->reset_task);
1615 }
1616 
1617 static bool is_fqtss_enabled(struct igb_adapter *adapter)
1618 {
1619 	return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1620 }
1621 
1622 static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1623 				   enum tx_queue_prio prio)
1624 {
1625 	u32 val;
1626 
1627 	WARN_ON(hw->mac.type != e1000_i210);
1628 	WARN_ON(queue < 0 || queue > 4);
1629 
1630 	val = rd32(E1000_I210_TXDCTL(queue));
1631 
1632 	if (prio == TX_QUEUE_PRIO_HIGH)
1633 		val |= E1000_TXDCTL_PRIORITY;
1634 	else
1635 		val &= ~E1000_TXDCTL_PRIORITY;
1636 
1637 	wr32(E1000_I210_TXDCTL(queue), val);
1638 }
1639 
1640 static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1641 {
1642 	u32 val;
1643 
1644 	WARN_ON(hw->mac.type != e1000_i210);
1645 	WARN_ON(queue < 0 || queue > 1);
1646 
1647 	val = rd32(E1000_I210_TQAVCC(queue));
1648 
1649 	if (mode == QUEUE_MODE_STREAM_RESERVATION)
1650 		val |= E1000_TQAVCC_QUEUEMODE;
1651 	else
1652 		val &= ~E1000_TQAVCC_QUEUEMODE;
1653 
1654 	wr32(E1000_I210_TQAVCC(queue), val);
1655 }
1656 
1657 /**
1658  *  igb_configure_cbs - Configure Credit-Based Shaper (CBS)
1659  *  @adapter: pointer to adapter struct
1660  *  @queue: queue number
1661  *  @enable: true = enable CBS, false = disable CBS
1662  *  @idleslope: idleSlope in kbps
1663  *  @sendslope: sendSlope in kbps
1664  *  @hicredit: hiCredit in bytes
1665  *  @locredit: loCredit in bytes
1666  *
1667  *  Configure CBS for a given hardware queue. When disabling, idleslope,
1668  *  sendslope, hicredit, locredit arguments are ignored. Returns 0 if
1669  *  success. Negative otherwise.
1670  **/
1671 static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
1672 			      bool enable, int idleslope, int sendslope,
1673 			      int hicredit, int locredit)
1674 {
1675 	struct net_device *netdev = adapter->netdev;
1676 	struct e1000_hw *hw = &adapter->hw;
1677 	u32 tqavcc;
1678 	u16 value;
1679 
1680 	WARN_ON(hw->mac.type != e1000_i210);
1681 	WARN_ON(queue < 0 || queue > 1);
1682 
1683 	if (enable || queue == 0) {
1684 		/* i210 does not allow the queue 0 to be in the Strict
1685 		 * Priority mode while the Qav mode is enabled, so,
1686 		 * instead of disabling strict priority mode, we give
1687 		 * queue 0 the maximum of credits possible.
1688 		 *
1689 		 * See section 8.12.19 of the i210 datasheet, "Note:
1690 		 * Queue0 QueueMode must be set to 1b when
1691 		 * TransmitMode is set to Qav."
1692 		 */
1693 		if (queue == 0 && !enable) {
1694 			/* max "linkspeed" idleslope in kbps */
1695 			idleslope = 1000000;
1696 			hicredit = ETH_FRAME_LEN;
1697 		}
1698 
1699 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1700 		set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1701 
1702 		/* According to i210 datasheet section 7.2.7.7, we should set
1703 		 * the 'idleSlope' field from TQAVCC register following the
1704 		 * equation:
1705 		 *
1706 		 * For 100 Mbps link speed:
1707 		 *
1708 		 *     value = BW * 0x7735 * 0.2                          (E1)
1709 		 *
1710 		 * For 1000Mbps link speed:
1711 		 *
1712 		 *     value = BW * 0x7735 * 2                            (E2)
1713 		 *
1714 		 * E1 and E2 can be merged into one equation as shown below.
1715 		 * Note that 'link-speed' is in Mbps.
1716 		 *
1717 		 *     value = BW * 0x7735 * 2 * link-speed
1718 		 *                           --------------               (E3)
1719 		 *                                1000
1720 		 *
1721 		 * 'BW' is the percentage bandwidth out of full link speed
1722 		 * which can be found with the following equation. Note that
1723 		 * idleSlope here is the parameter from this function which
1724 		 * is in kbps.
1725 		 *
1726 		 *     BW =     idleSlope
1727 		 *          -----------------                             (E4)
1728 		 *          link-speed * 1000
1729 		 *
1730 		 * That said, we can come up with a generic equation to
1731 		 * calculate the value we should set it TQAVCC register by
1732 		 * replacing 'BW' in E3 by E4. The resulting equation is:
1733 		 *
1734 		 * value =     idleSlope     * 0x7735 * 2 * link-speed
1735 		 *         -----------------            --------------    (E5)
1736 		 *         link-speed * 1000                 1000
1737 		 *
1738 		 * 'link-speed' is present in both sides of the fraction so
1739 		 * it is canceled out. The final equation is the following:
1740 		 *
1741 		 *     value = idleSlope * 61034
1742 		 *             -----------------                          (E6)
1743 		 *                  1000000
1744 		 *
1745 		 * NOTE: For i210, given the above, we can see that idleslope
1746 		 *       is represented in 16.38431 kbps units by the value at
1747 		 *       the TQAVCC register (1Gbps / 61034), which reduces
1748 		 *       the granularity for idleslope increments.
1749 		 *       For instance, if you want to configure a 2576kbps
1750 		 *       idleslope, the value to be written on the register
1751 		 *       would have to be 157.23. If rounded down, you end
1752 		 *       up with less bandwidth available than originally
1753 		 *       required (~2572 kbps). If rounded up, you end up
1754 		 *       with a higher bandwidth (~2589 kbps). Below the
1755 		 *       approach we take is to always round up the
1756 		 *       calculated value, so the resulting bandwidth might
1757 		 *       be slightly higher for some configurations.
1758 		 */
1759 		value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
1760 
1761 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
1762 		tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1763 		tqavcc |= value;
1764 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
1765 
1766 		wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
1767 	} else {
1768 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1769 		set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1770 
1771 		/* Set idleSlope to zero. */
1772 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
1773 		tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1774 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
1775 
1776 		/* Set hiCredit to zero. */
1777 		wr32(E1000_I210_TQAVHC(queue), 0);
1778 	}
1779 
1780 	/* XXX: In i210 controller the sendSlope and loCredit parameters from
1781 	 * CBS are not configurable by software so we don't do any 'controller
1782 	 * configuration' in respect to these parameters.
1783 	 */
1784 
1785 	netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1786 		   (enable) ? "enabled" : "disabled", queue,
1787 		   idleslope, sendslope, hicredit, locredit);
1788 }
1789 
1790 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1791 			       bool enable, int idleslope, int sendslope,
1792 			       int hicredit, int locredit)
1793 {
1794 	struct igb_ring *ring;
1795 
1796 	if (queue < 0 || queue > adapter->num_tx_queues)
1797 		return -EINVAL;
1798 
1799 	ring = adapter->tx_ring[queue];
1800 
1801 	ring->cbs_enable = enable;
1802 	ring->idleslope = idleslope;
1803 	ring->sendslope = sendslope;
1804 	ring->hicredit = hicredit;
1805 	ring->locredit = locredit;
1806 
1807 	return 0;
1808 }
1809 
1810 static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1811 {
1812 	struct igb_ring *ring;
1813 	int i;
1814 
1815 	for (i = 0; i < adapter->num_tx_queues; i++) {
1816 		ring = adapter->tx_ring[i];
1817 
1818 		if (ring->cbs_enable)
1819 			return true;
1820 	}
1821 
1822 	return false;
1823 }
1824 
1825 static void igb_setup_tx_mode(struct igb_adapter *adapter)
1826 {
1827 	struct net_device *netdev = adapter->netdev;
1828 	struct e1000_hw *hw = &adapter->hw;
1829 	u32 val;
1830 
1831 	/* Only i210 controller supports changing the transmission mode. */
1832 	if (hw->mac.type != e1000_i210)
1833 		return;
1834 
1835 	if (is_fqtss_enabled(adapter)) {
1836 		int i, max_queue;
1837 
1838 		/* Configure TQAVCTRL register: set transmit mode to 'Qav',
1839 		 * set data fetch arbitration to 'round robin' and set data
1840 		 * transfer arbitration to 'credit shaper algorithm.
1841 		 */
1842 		val = rd32(E1000_I210_TQAVCTRL);
1843 		val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
1844 		val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1845 		wr32(E1000_I210_TQAVCTRL, val);
1846 
1847 		/* Configure Tx and Rx packet buffers sizes as described in
1848 		 * i210 datasheet section 7.2.7.7.
1849 		 */
1850 		val = rd32(E1000_TXPBS);
1851 		val &= ~I210_TXPBSIZE_MASK;
1852 		val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1853 			I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1854 		wr32(E1000_TXPBS, val);
1855 
1856 		val = rd32(E1000_RXPBS);
1857 		val &= ~I210_RXPBSIZE_MASK;
1858 		val |= I210_RXPBSIZE_PB_32KB;
1859 		wr32(E1000_RXPBS, val);
1860 
1861 		/* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
1862 		 * register should not exceed the buffer size programmed in
1863 		 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
1864 		 * so according to the datasheet we should set MAX_TPKT_SIZE to
1865 		 * 4kB / 64.
1866 		 *
1867 		 * However, when we do so, no frame from queue 2 and 3 are
1868 		 * transmitted.  It seems the MAX_TPKT_SIZE should not be great
1869 		 * or _equal_ to the buffer size programmed in TXPBS. For this
1870 		 * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
1871 		 */
1872 		val = (4096 - 1) / 64;
1873 		wr32(E1000_I210_DTXMXPKTSZ, val);
1874 
1875 		/* Since FQTSS mode is enabled, apply any CBS configuration
1876 		 * previously set. If no previous CBS configuration has been
1877 		 * done, then the initial configuration is applied, which means
1878 		 * CBS is disabled.
1879 		 */
1880 		max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1881 			    adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1882 
1883 		for (i = 0; i < max_queue; i++) {
1884 			struct igb_ring *ring = adapter->tx_ring[i];
1885 
1886 			igb_configure_cbs(adapter, i, ring->cbs_enable,
1887 					  ring->idleslope, ring->sendslope,
1888 					  ring->hicredit, ring->locredit);
1889 		}
1890 	} else {
1891 		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1892 		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1893 		wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1894 
1895 		val = rd32(E1000_I210_TQAVCTRL);
1896 		/* According to Section 8.12.21, the other flags we've set when
1897 		 * enabling FQTSS are not relevant when disabling FQTSS so we
1898 		 * don't set they here.
1899 		 */
1900 		val &= ~E1000_TQAVCTRL_XMIT_MODE;
1901 		wr32(E1000_I210_TQAVCTRL, val);
1902 	}
1903 
1904 	netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1905 		   "enabled" : "disabled");
1906 }
1907 
1908 /**
1909  *  igb_configure - configure the hardware for RX and TX
1910  *  @adapter: private board structure
1911  **/
1912 static void igb_configure(struct igb_adapter *adapter)
1913 {
1914 	struct net_device *netdev = adapter->netdev;
1915 	int i;
1916 
1917 	igb_get_hw_control(adapter);
1918 	igb_set_rx_mode(netdev);
1919 	igb_setup_tx_mode(adapter);
1920 
1921 	igb_restore_vlan(adapter);
1922 
1923 	igb_setup_tctl(adapter);
1924 	igb_setup_mrqc(adapter);
1925 	igb_setup_rctl(adapter);
1926 
1927 	igb_nfc_filter_restore(adapter);
1928 	igb_configure_tx(adapter);
1929 	igb_configure_rx(adapter);
1930 
1931 	igb_rx_fifo_flush_82575(&adapter->hw);
1932 
1933 	/* call igb_desc_unused which always leaves
1934 	 * at least 1 descriptor unused to make sure
1935 	 * next_to_use != next_to_clean
1936 	 */
1937 	for (i = 0; i < adapter->num_rx_queues; i++) {
1938 		struct igb_ring *ring = adapter->rx_ring[i];
1939 		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
1940 	}
1941 }
1942 
1943 /**
1944  *  igb_power_up_link - Power up the phy/serdes link
1945  *  @adapter: address of board private structure
1946  **/
1947 void igb_power_up_link(struct igb_adapter *adapter)
1948 {
1949 	igb_reset_phy(&adapter->hw);
1950 
1951 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
1952 		igb_power_up_phy_copper(&adapter->hw);
1953 	else
1954 		igb_power_up_serdes_link_82575(&adapter->hw);
1955 
1956 	igb_setup_link(&adapter->hw);
1957 }
1958 
1959 /**
1960  *  igb_power_down_link - Power down the phy/serdes link
1961  *  @adapter: address of board private structure
1962  */
1963 static void igb_power_down_link(struct igb_adapter *adapter)
1964 {
1965 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
1966 		igb_power_down_phy_copper_82575(&adapter->hw);
1967 	else
1968 		igb_shutdown_serdes_link_82575(&adapter->hw);
1969 }
1970 
1971 /**
1972  * Detect and switch function for Media Auto Sense
1973  * @adapter: address of the board private structure
1974  **/
1975 static void igb_check_swap_media(struct igb_adapter *adapter)
1976 {
1977 	struct e1000_hw *hw = &adapter->hw;
1978 	u32 ctrl_ext, connsw;
1979 	bool swap_now = false;
1980 
1981 	ctrl_ext = rd32(E1000_CTRL_EXT);
1982 	connsw = rd32(E1000_CONNSW);
1983 
1984 	/* need to live swap if current media is copper and we have fiber/serdes
1985 	 * to go to.
1986 	 */
1987 
1988 	if ((hw->phy.media_type == e1000_media_type_copper) &&
1989 	    (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
1990 		swap_now = true;
1991 	} else if (!(connsw & E1000_CONNSW_SERDESD)) {
1992 		/* copper signal takes time to appear */
1993 		if (adapter->copper_tries < 4) {
1994 			adapter->copper_tries++;
1995 			connsw |= E1000_CONNSW_AUTOSENSE_CONF;
1996 			wr32(E1000_CONNSW, connsw);
1997 			return;
1998 		} else {
1999 			adapter->copper_tries = 0;
2000 			if ((connsw & E1000_CONNSW_PHYSD) &&
2001 			    (!(connsw & E1000_CONNSW_PHY_PDN))) {
2002 				swap_now = true;
2003 				connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2004 				wr32(E1000_CONNSW, connsw);
2005 			}
2006 		}
2007 	}
2008 
2009 	if (!swap_now)
2010 		return;
2011 
2012 	switch (hw->phy.media_type) {
2013 	case e1000_media_type_copper:
2014 		netdev_info(adapter->netdev,
2015 			"MAS: changing media to fiber/serdes\n");
2016 		ctrl_ext |=
2017 			E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2018 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
2019 		adapter->copper_tries = 0;
2020 		break;
2021 	case e1000_media_type_internal_serdes:
2022 	case e1000_media_type_fiber:
2023 		netdev_info(adapter->netdev,
2024 			"MAS: changing media to copper\n");
2025 		ctrl_ext &=
2026 			~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2027 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
2028 		break;
2029 	default:
2030 		/* shouldn't get here during regular operation */
2031 		netdev_err(adapter->netdev,
2032 			"AMS: Invalid media type found, returning\n");
2033 		break;
2034 	}
2035 	wr32(E1000_CTRL_EXT, ctrl_ext);
2036 }
2037 
2038 /**
2039  *  igb_up - Open the interface and prepare it to handle traffic
2040  *  @adapter: board private structure
2041  **/
2042 int igb_up(struct igb_adapter *adapter)
2043 {
2044 	struct e1000_hw *hw = &adapter->hw;
2045 	int i;
2046 
2047 	/* hardware has been reset, we need to reload some things */
2048 	igb_configure(adapter);
2049 
2050 	clear_bit(__IGB_DOWN, &adapter->state);
2051 
2052 	for (i = 0; i < adapter->num_q_vectors; i++)
2053 		napi_enable(&(adapter->q_vector[i]->napi));
2054 
2055 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
2056 		igb_configure_msix(adapter);
2057 	else
2058 		igb_assign_vector(adapter->q_vector[0], 0);
2059 
2060 	/* Clear any pending interrupts. */
2061 	rd32(E1000_TSICR);
2062 	rd32(E1000_ICR);
2063 	igb_irq_enable(adapter);
2064 
2065 	/* notify VFs that reset has been completed */
2066 	if (adapter->vfs_allocated_count) {
2067 		u32 reg_data = rd32(E1000_CTRL_EXT);
2068 
2069 		reg_data |= E1000_CTRL_EXT_PFRSTD;
2070 		wr32(E1000_CTRL_EXT, reg_data);
2071 	}
2072 
2073 	netif_tx_start_all_queues(adapter->netdev);
2074 
2075 	/* start the watchdog. */
2076 	hw->mac.get_link_status = 1;
2077 	schedule_work(&adapter->watchdog_task);
2078 
2079 	if ((adapter->flags & IGB_FLAG_EEE) &&
2080 	    (!hw->dev_spec._82575.eee_disable))
2081 		adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2082 
2083 	return 0;
2084 }
2085 
2086 void igb_down(struct igb_adapter *adapter)
2087 {
2088 	struct net_device *netdev = adapter->netdev;
2089 	struct e1000_hw *hw = &adapter->hw;
2090 	u32 tctl, rctl;
2091 	int i;
2092 
2093 	/* signal that we're down so the interrupt handler does not
2094 	 * reschedule our watchdog timer
2095 	 */
2096 	set_bit(__IGB_DOWN, &adapter->state);
2097 
2098 	/* disable receives in the hardware */
2099 	rctl = rd32(E1000_RCTL);
2100 	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2101 	/* flush and sleep below */
2102 
2103 	igb_nfc_filter_exit(adapter);
2104 
2105 	netif_carrier_off(netdev);
2106 	netif_tx_stop_all_queues(netdev);
2107 
2108 	/* disable transmits in the hardware */
2109 	tctl = rd32(E1000_TCTL);
2110 	tctl &= ~E1000_TCTL_EN;
2111 	wr32(E1000_TCTL, tctl);
2112 	/* flush both disables and wait for them to finish */
2113 	wrfl();
2114 	usleep_range(10000, 11000);
2115 
2116 	igb_irq_disable(adapter);
2117 
2118 	adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2119 
2120 	for (i = 0; i < adapter->num_q_vectors; i++) {
2121 		if (adapter->q_vector[i]) {
2122 			napi_synchronize(&adapter->q_vector[i]->napi);
2123 			napi_disable(&adapter->q_vector[i]->napi);
2124 		}
2125 	}
2126 
2127 	del_timer_sync(&adapter->watchdog_timer);
2128 	del_timer_sync(&adapter->phy_info_timer);
2129 
2130 	/* record the stats before reset*/
2131 	spin_lock(&adapter->stats64_lock);
2132 	igb_update_stats(adapter);
2133 	spin_unlock(&adapter->stats64_lock);
2134 
2135 	adapter->link_speed = 0;
2136 	adapter->link_duplex = 0;
2137 
2138 	if (!pci_channel_offline(adapter->pdev))
2139 		igb_reset(adapter);
2140 
2141 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
2142 	adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2143 
2144 	igb_clean_all_tx_rings(adapter);
2145 	igb_clean_all_rx_rings(adapter);
2146 #ifdef CONFIG_IGB_DCA
2147 
2148 	/* since we reset the hardware DCA settings were cleared */
2149 	igb_setup_dca(adapter);
2150 #endif
2151 }
2152 
2153 void igb_reinit_locked(struct igb_adapter *adapter)
2154 {
2155 	WARN_ON(in_interrupt());
2156 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2157 		usleep_range(1000, 2000);
2158 	igb_down(adapter);
2159 	igb_up(adapter);
2160 	clear_bit(__IGB_RESETTING, &adapter->state);
2161 }
2162 
2163 /** igb_enable_mas - Media Autosense re-enable after swap
2164  *
2165  * @adapter: adapter struct
2166  **/
2167 static void igb_enable_mas(struct igb_adapter *adapter)
2168 {
2169 	struct e1000_hw *hw = &adapter->hw;
2170 	u32 connsw = rd32(E1000_CONNSW);
2171 
2172 	/* configure for SerDes media detect */
2173 	if ((hw->phy.media_type == e1000_media_type_copper) &&
2174 	    (!(connsw & E1000_CONNSW_SERDESD))) {
2175 		connsw |= E1000_CONNSW_ENRGSRC;
2176 		connsw |= E1000_CONNSW_AUTOSENSE_EN;
2177 		wr32(E1000_CONNSW, connsw);
2178 		wrfl();
2179 	}
2180 }
2181 
2182 void igb_reset(struct igb_adapter *adapter)
2183 {
2184 	struct pci_dev *pdev = adapter->pdev;
2185 	struct e1000_hw *hw = &adapter->hw;
2186 	struct e1000_mac_info *mac = &hw->mac;
2187 	struct e1000_fc_info *fc = &hw->fc;
2188 	u32 pba, hwm;
2189 
2190 	/* Repartition Pba for greater than 9k mtu
2191 	 * To take effect CTRL.RST is required.
2192 	 */
2193 	switch (mac->type) {
2194 	case e1000_i350:
2195 	case e1000_i354:
2196 	case e1000_82580:
2197 		pba = rd32(E1000_RXPBS);
2198 		pba = igb_rxpbs_adjust_82580(pba);
2199 		break;
2200 	case e1000_82576:
2201 		pba = rd32(E1000_RXPBS);
2202 		pba &= E1000_RXPBS_SIZE_MASK_82576;
2203 		break;
2204 	case e1000_82575:
2205 	case e1000_i210:
2206 	case e1000_i211:
2207 	default:
2208 		pba = E1000_PBA_34K;
2209 		break;
2210 	}
2211 
2212 	if (mac->type == e1000_82575) {
2213 		u32 min_rx_space, min_tx_space, needed_tx_space;
2214 
2215 		/* write Rx PBA so that hardware can report correct Tx PBA */
2216 		wr32(E1000_PBA, pba);
2217 
2218 		/* To maintain wire speed transmits, the Tx FIFO should be
2219 		 * large enough to accommodate two full transmit packets,
2220 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
2221 		 * the Rx FIFO should be large enough to accommodate at least
2222 		 * one full receive packet and is similarly rounded up and
2223 		 * expressed in KB.
2224 		 */
2225 		min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2226 
2227 		/* The Tx FIFO also stores 16 bytes of information about the Tx
2228 		 * but don't include Ethernet FCS because hardware appends it.
2229 		 * We only need to round down to the nearest 512 byte block
2230 		 * count since the value we care about is 2 frames, not 1.
2231 		 */
2232 		min_tx_space = adapter->max_frame_size;
2233 		min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2234 		min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2235 
2236 		/* upper 16 bits has Tx packet buffer allocation size in KB */
2237 		needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2238 
2239 		/* If current Tx allocation is less than the min Tx FIFO size,
2240 		 * and the min Tx FIFO size is less than the current Rx FIFO
2241 		 * allocation, take space away from current Rx allocation.
2242 		 */
2243 		if (needed_tx_space < pba) {
2244 			pba -= needed_tx_space;
2245 
2246 			/* if short on Rx space, Rx wins and must trump Tx
2247 			 * adjustment
2248 			 */
2249 			if (pba < min_rx_space)
2250 				pba = min_rx_space;
2251 		}
2252 
2253 		/* adjust PBA for jumbo frames */
2254 		wr32(E1000_PBA, pba);
2255 	}
2256 
2257 	/* flow control settings
2258 	 * The high water mark must be low enough to fit one full frame
2259 	 * after transmitting the pause frame.  As such we must have enough
2260 	 * space to allow for us to complete our current transmit and then
2261 	 * receive the frame that is in progress from the link partner.
2262 	 * Set it to:
2263 	 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
2264 	 */
2265 	hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2266 
2267 	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
2268 	fc->low_water = fc->high_water - 16;
2269 	fc->pause_time = 0xFFFF;
2270 	fc->send_xon = 1;
2271 	fc->current_mode = fc->requested_mode;
2272 
2273 	/* disable receive for all VFs and wait one second */
2274 	if (adapter->vfs_allocated_count) {
2275 		int i;
2276 
2277 		for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2278 			adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2279 
2280 		/* ping all the active vfs to let them know we are going down */
2281 		igb_ping_all_vfs(adapter);
2282 
2283 		/* disable transmits and receives */
2284 		wr32(E1000_VFRE, 0);
2285 		wr32(E1000_VFTE, 0);
2286 	}
2287 
2288 	/* Allow time for pending master requests to run */
2289 	hw->mac.ops.reset_hw(hw);
2290 	wr32(E1000_WUC, 0);
2291 
2292 	if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2293 		/* need to resetup here after media swap */
2294 		adapter->ei.get_invariants(hw);
2295 		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2296 	}
2297 	if ((mac->type == e1000_82575) &&
2298 	    (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2299 		igb_enable_mas(adapter);
2300 	}
2301 	if (hw->mac.ops.init_hw(hw))
2302 		dev_err(&pdev->dev, "Hardware Error\n");
2303 
2304 	/* RAR registers were cleared during init_hw, clear mac table */
2305 	igb_flush_mac_table(adapter);
2306 	__dev_uc_unsync(adapter->netdev, NULL);
2307 
2308 	/* Recover default RAR entry */
2309 	igb_set_default_mac_filter(adapter);
2310 
2311 	/* Flow control settings reset on hardware reset, so guarantee flow
2312 	 * control is off when forcing speed.
2313 	 */
2314 	if (!hw->mac.autoneg)
2315 		igb_force_mac_fc(hw);
2316 
2317 	igb_init_dmac(adapter, pba);
2318 #ifdef CONFIG_IGB_HWMON
2319 	/* Re-initialize the thermal sensor on i350 devices. */
2320 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
2321 		if (mac->type == e1000_i350 && hw->bus.func == 0) {
2322 			/* If present, re-initialize the external thermal sensor
2323 			 * interface.
2324 			 */
2325 			if (adapter->ets)
2326 				mac->ops.init_thermal_sensor_thresh(hw);
2327 		}
2328 	}
2329 #endif
2330 	/* Re-establish EEE setting */
2331 	if (hw->phy.media_type == e1000_media_type_copper) {
2332 		switch (mac->type) {
2333 		case e1000_i350:
2334 		case e1000_i210:
2335 		case e1000_i211:
2336 			igb_set_eee_i350(hw, true, true);
2337 			break;
2338 		case e1000_i354:
2339 			igb_set_eee_i354(hw, true, true);
2340 			break;
2341 		default:
2342 			break;
2343 		}
2344 	}
2345 	if (!netif_running(adapter->netdev))
2346 		igb_power_down_link(adapter);
2347 
2348 	igb_update_mng_vlan(adapter);
2349 
2350 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2351 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2352 
2353 	/* Re-enable PTP, where applicable. */
2354 	if (adapter->ptp_flags & IGB_PTP_ENABLED)
2355 		igb_ptp_reset(adapter);
2356 
2357 	igb_get_phy_info(hw);
2358 }
2359 
2360 static netdev_features_t igb_fix_features(struct net_device *netdev,
2361 	netdev_features_t features)
2362 {
2363 	/* Since there is no support for separate Rx/Tx vlan accel
2364 	 * enable/disable make sure Tx flag is always in same state as Rx.
2365 	 */
2366 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2367 		features |= NETIF_F_HW_VLAN_CTAG_TX;
2368 	else
2369 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2370 
2371 	return features;
2372 }
2373 
2374 static int igb_set_features(struct net_device *netdev,
2375 	netdev_features_t features)
2376 {
2377 	netdev_features_t changed = netdev->features ^ features;
2378 	struct igb_adapter *adapter = netdev_priv(netdev);
2379 
2380 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2381 		igb_vlan_mode(netdev, features);
2382 
2383 	if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2384 		return 0;
2385 
2386 	if (!(features & NETIF_F_NTUPLE)) {
2387 		struct hlist_node *node2;
2388 		struct igb_nfc_filter *rule;
2389 
2390 		spin_lock(&adapter->nfc_lock);
2391 		hlist_for_each_entry_safe(rule, node2,
2392 					  &adapter->nfc_filter_list, nfc_node) {
2393 			igb_erase_filter(adapter, rule);
2394 			hlist_del(&rule->nfc_node);
2395 			kfree(rule);
2396 		}
2397 		spin_unlock(&adapter->nfc_lock);
2398 		adapter->nfc_filter_count = 0;
2399 	}
2400 
2401 	netdev->features = features;
2402 
2403 	if (netif_running(netdev))
2404 		igb_reinit_locked(adapter);
2405 	else
2406 		igb_reset(adapter);
2407 
2408 	return 0;
2409 }
2410 
2411 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2412 			   struct net_device *dev,
2413 			   const unsigned char *addr, u16 vid,
2414 			   u16 flags)
2415 {
2416 	/* guarantee we can provide a unique filter for the unicast address */
2417 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2418 		struct igb_adapter *adapter = netdev_priv(dev);
2419 		int vfn = adapter->vfs_allocated_count;
2420 
2421 		if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2422 			return -ENOMEM;
2423 	}
2424 
2425 	return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2426 }
2427 
2428 #define IGB_MAX_MAC_HDR_LEN	127
2429 #define IGB_MAX_NETWORK_HDR_LEN	511
2430 
2431 static netdev_features_t
2432 igb_features_check(struct sk_buff *skb, struct net_device *dev,
2433 		   netdev_features_t features)
2434 {
2435 	unsigned int network_hdr_len, mac_hdr_len;
2436 
2437 	/* Make certain the headers can be described by a context descriptor */
2438 	mac_hdr_len = skb_network_header(skb) - skb->data;
2439 	if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2440 		return features & ~(NETIF_F_HW_CSUM |
2441 				    NETIF_F_SCTP_CRC |
2442 				    NETIF_F_HW_VLAN_CTAG_TX |
2443 				    NETIF_F_TSO |
2444 				    NETIF_F_TSO6);
2445 
2446 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2447 	if (unlikely(network_hdr_len >  IGB_MAX_NETWORK_HDR_LEN))
2448 		return features & ~(NETIF_F_HW_CSUM |
2449 				    NETIF_F_SCTP_CRC |
2450 				    NETIF_F_TSO |
2451 				    NETIF_F_TSO6);
2452 
2453 	/* We can only support IPV4 TSO in tunnels if we can mangle the
2454 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2455 	 */
2456 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2457 		features &= ~NETIF_F_TSO;
2458 
2459 	return features;
2460 }
2461 
2462 static int igb_offload_cbs(struct igb_adapter *adapter,
2463 			   struct tc_cbs_qopt_offload *qopt)
2464 {
2465 	struct e1000_hw *hw = &adapter->hw;
2466 	int err;
2467 
2468 	/* CBS offloading is only supported by i210 controller. */
2469 	if (hw->mac.type != e1000_i210)
2470 		return -EOPNOTSUPP;
2471 
2472 	/* CBS offloading is only supported by queue 0 and queue 1. */
2473 	if (qopt->queue < 0 || qopt->queue > 1)
2474 		return -EINVAL;
2475 
2476 	err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2477 				  qopt->idleslope, qopt->sendslope,
2478 				  qopt->hicredit, qopt->locredit);
2479 	if (err)
2480 		return err;
2481 
2482 	if (is_fqtss_enabled(adapter)) {
2483 		igb_configure_cbs(adapter, qopt->queue, qopt->enable,
2484 				  qopt->idleslope, qopt->sendslope,
2485 				  qopt->hicredit, qopt->locredit);
2486 
2487 		if (!is_any_cbs_enabled(adapter))
2488 			enable_fqtss(adapter, false);
2489 
2490 	} else {
2491 		enable_fqtss(adapter, true);
2492 	}
2493 
2494 	return 0;
2495 }
2496 
2497 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2498 #define VLAN_PRIO_FULL_MASK (0x07)
2499 
2500 static int igb_parse_cls_flower(struct igb_adapter *adapter,
2501 				struct tc_cls_flower_offload *f,
2502 				int traffic_class,
2503 				struct igb_nfc_filter *input)
2504 {
2505 	struct netlink_ext_ack *extack = f->common.extack;
2506 
2507 	if (f->dissector->used_keys &
2508 	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2509 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2510 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2511 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2512 		NL_SET_ERR_MSG_MOD(extack,
2513 				   "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2514 		return -EOPNOTSUPP;
2515 	}
2516 
2517 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2518 		struct flow_dissector_key_eth_addrs *key, *mask;
2519 
2520 		key = skb_flow_dissector_target(f->dissector,
2521 						FLOW_DISSECTOR_KEY_ETH_ADDRS,
2522 						f->key);
2523 		mask = skb_flow_dissector_target(f->dissector,
2524 						 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2525 						 f->mask);
2526 
2527 		if (!is_zero_ether_addr(mask->dst)) {
2528 			if (!is_broadcast_ether_addr(mask->dst)) {
2529 				NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2530 				return -EINVAL;
2531 			}
2532 
2533 			input->filter.match_flags |=
2534 				IGB_FILTER_FLAG_DST_MAC_ADDR;
2535 			ether_addr_copy(input->filter.dst_addr, key->dst);
2536 		}
2537 
2538 		if (!is_zero_ether_addr(mask->src)) {
2539 			if (!is_broadcast_ether_addr(mask->src)) {
2540 				NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2541 				return -EINVAL;
2542 			}
2543 
2544 			input->filter.match_flags |=
2545 				IGB_FILTER_FLAG_SRC_MAC_ADDR;
2546 			ether_addr_copy(input->filter.src_addr, key->src);
2547 		}
2548 	}
2549 
2550 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2551 		struct flow_dissector_key_basic *key, *mask;
2552 
2553 		key = skb_flow_dissector_target(f->dissector,
2554 						FLOW_DISSECTOR_KEY_BASIC,
2555 						f->key);
2556 		mask = skb_flow_dissector_target(f->dissector,
2557 						 FLOW_DISSECTOR_KEY_BASIC,
2558 						 f->mask);
2559 
2560 		if (mask->n_proto) {
2561 			if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
2562 				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2563 				return -EINVAL;
2564 			}
2565 
2566 			input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2567 			input->filter.etype = key->n_proto;
2568 		}
2569 	}
2570 
2571 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2572 		struct flow_dissector_key_vlan *key, *mask;
2573 
2574 		key = skb_flow_dissector_target(f->dissector,
2575 						FLOW_DISSECTOR_KEY_VLAN,
2576 						f->key);
2577 		mask = skb_flow_dissector_target(f->dissector,
2578 						 FLOW_DISSECTOR_KEY_VLAN,
2579 						 f->mask);
2580 
2581 		if (mask->vlan_priority) {
2582 			if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2583 				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2584 				return -EINVAL;
2585 			}
2586 
2587 			input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2588 			input->filter.vlan_tci = key->vlan_priority;
2589 		}
2590 	}
2591 
2592 	input->action = traffic_class;
2593 	input->cookie = f->cookie;
2594 
2595 	return 0;
2596 }
2597 
2598 static int igb_configure_clsflower(struct igb_adapter *adapter,
2599 				   struct tc_cls_flower_offload *cls_flower)
2600 {
2601 	struct netlink_ext_ack *extack = cls_flower->common.extack;
2602 	struct igb_nfc_filter *filter, *f;
2603 	int err, tc;
2604 
2605 	tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2606 	if (tc < 0) {
2607 		NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2608 		return -EINVAL;
2609 	}
2610 
2611 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2612 	if (!filter)
2613 		return -ENOMEM;
2614 
2615 	err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2616 	if (err < 0)
2617 		goto err_parse;
2618 
2619 	spin_lock(&adapter->nfc_lock);
2620 
2621 	hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2622 		if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2623 			err = -EEXIST;
2624 			NL_SET_ERR_MSG_MOD(extack,
2625 					   "This filter is already set in ethtool");
2626 			goto err_locked;
2627 		}
2628 	}
2629 
2630 	hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2631 		if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2632 			err = -EEXIST;
2633 			NL_SET_ERR_MSG_MOD(extack,
2634 					   "This filter is already set in cls_flower");
2635 			goto err_locked;
2636 		}
2637 	}
2638 
2639 	err = igb_add_filter(adapter, filter);
2640 	if (err < 0) {
2641 		NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2642 		goto err_locked;
2643 	}
2644 
2645 	hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2646 
2647 	spin_unlock(&adapter->nfc_lock);
2648 
2649 	return 0;
2650 
2651 err_locked:
2652 	spin_unlock(&adapter->nfc_lock);
2653 
2654 err_parse:
2655 	kfree(filter);
2656 
2657 	return err;
2658 }
2659 
2660 static int igb_delete_clsflower(struct igb_adapter *adapter,
2661 				struct tc_cls_flower_offload *cls_flower)
2662 {
2663 	struct igb_nfc_filter *filter;
2664 	int err;
2665 
2666 	spin_lock(&adapter->nfc_lock);
2667 
2668 	hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2669 		if (filter->cookie == cls_flower->cookie)
2670 			break;
2671 
2672 	if (!filter) {
2673 		err = -ENOENT;
2674 		goto out;
2675 	}
2676 
2677 	err = igb_erase_filter(adapter, filter);
2678 	if (err < 0)
2679 		goto out;
2680 
2681 	hlist_del(&filter->nfc_node);
2682 	kfree(filter);
2683 
2684 out:
2685 	spin_unlock(&adapter->nfc_lock);
2686 
2687 	return err;
2688 }
2689 
2690 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2691 				   struct tc_cls_flower_offload *cls_flower)
2692 {
2693 	switch (cls_flower->command) {
2694 	case TC_CLSFLOWER_REPLACE:
2695 		return igb_configure_clsflower(adapter, cls_flower);
2696 	case TC_CLSFLOWER_DESTROY:
2697 		return igb_delete_clsflower(adapter, cls_flower);
2698 	case TC_CLSFLOWER_STATS:
2699 		return -EOPNOTSUPP;
2700 	default:
2701 		return -EINVAL;
2702 	}
2703 }
2704 
2705 static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2706 				 void *cb_priv)
2707 {
2708 	struct igb_adapter *adapter = cb_priv;
2709 
2710 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2711 		return -EOPNOTSUPP;
2712 
2713 	switch (type) {
2714 	case TC_SETUP_CLSFLOWER:
2715 		return igb_setup_tc_cls_flower(adapter, type_data);
2716 
2717 	default:
2718 		return -EOPNOTSUPP;
2719 	}
2720 }
2721 
2722 static int igb_setup_tc_block(struct igb_adapter *adapter,
2723 			      struct tc_block_offload *f)
2724 {
2725 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2726 		return -EOPNOTSUPP;
2727 
2728 	switch (f->command) {
2729 	case TC_BLOCK_BIND:
2730 		return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
2731 					     adapter, adapter);
2732 	case TC_BLOCK_UNBIND:
2733 		tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
2734 					adapter);
2735 		return 0;
2736 	default:
2737 		return -EOPNOTSUPP;
2738 	}
2739 }
2740 
2741 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2742 			void *type_data)
2743 {
2744 	struct igb_adapter *adapter = netdev_priv(dev);
2745 
2746 	switch (type) {
2747 	case TC_SETUP_QDISC_CBS:
2748 		return igb_offload_cbs(adapter, type_data);
2749 	case TC_SETUP_BLOCK:
2750 		return igb_setup_tc_block(adapter, type_data);
2751 
2752 	default:
2753 		return -EOPNOTSUPP;
2754 	}
2755 }
2756 
2757 static const struct net_device_ops igb_netdev_ops = {
2758 	.ndo_open		= igb_open,
2759 	.ndo_stop		= igb_close,
2760 	.ndo_start_xmit		= igb_xmit_frame,
2761 	.ndo_get_stats64	= igb_get_stats64,
2762 	.ndo_set_rx_mode	= igb_set_rx_mode,
2763 	.ndo_set_mac_address	= igb_set_mac,
2764 	.ndo_change_mtu		= igb_change_mtu,
2765 	.ndo_do_ioctl		= igb_ioctl,
2766 	.ndo_tx_timeout		= igb_tx_timeout,
2767 	.ndo_validate_addr	= eth_validate_addr,
2768 	.ndo_vlan_rx_add_vid	= igb_vlan_rx_add_vid,
2769 	.ndo_vlan_rx_kill_vid	= igb_vlan_rx_kill_vid,
2770 	.ndo_set_vf_mac		= igb_ndo_set_vf_mac,
2771 	.ndo_set_vf_vlan	= igb_ndo_set_vf_vlan,
2772 	.ndo_set_vf_rate	= igb_ndo_set_vf_bw,
2773 	.ndo_set_vf_spoofchk	= igb_ndo_set_vf_spoofchk,
2774 	.ndo_set_vf_trust	= igb_ndo_set_vf_trust,
2775 	.ndo_get_vf_config	= igb_ndo_get_vf_config,
2776 #ifdef CONFIG_NET_POLL_CONTROLLER
2777 	.ndo_poll_controller	= igb_netpoll,
2778 #endif
2779 	.ndo_fix_features	= igb_fix_features,
2780 	.ndo_set_features	= igb_set_features,
2781 	.ndo_fdb_add		= igb_ndo_fdb_add,
2782 	.ndo_features_check	= igb_features_check,
2783 	.ndo_setup_tc		= igb_setup_tc,
2784 };
2785 
2786 /**
2787  * igb_set_fw_version - Configure version string for ethtool
2788  * @adapter: adapter struct
2789  **/
2790 void igb_set_fw_version(struct igb_adapter *adapter)
2791 {
2792 	struct e1000_hw *hw = &adapter->hw;
2793 	struct e1000_fw_version fw;
2794 
2795 	igb_get_fw_version(hw, &fw);
2796 
2797 	switch (hw->mac.type) {
2798 	case e1000_i210:
2799 	case e1000_i211:
2800 		if (!(igb_get_flash_presence_i210(hw))) {
2801 			snprintf(adapter->fw_version,
2802 				 sizeof(adapter->fw_version),
2803 				 "%2d.%2d-%d",
2804 				 fw.invm_major, fw.invm_minor,
2805 				 fw.invm_img_type);
2806 			break;
2807 		}
2808 		/* fall through */
2809 	default:
2810 		/* if option is rom valid, display its version too */
2811 		if (fw.or_valid) {
2812 			snprintf(adapter->fw_version,
2813 				 sizeof(adapter->fw_version),
2814 				 "%d.%d, 0x%08x, %d.%d.%d",
2815 				 fw.eep_major, fw.eep_minor, fw.etrack_id,
2816 				 fw.or_major, fw.or_build, fw.or_patch);
2817 		/* no option rom */
2818 		} else if (fw.etrack_id != 0X0000) {
2819 			snprintf(adapter->fw_version,
2820 			    sizeof(adapter->fw_version),
2821 			    "%d.%d, 0x%08x",
2822 			    fw.eep_major, fw.eep_minor, fw.etrack_id);
2823 		} else {
2824 		snprintf(adapter->fw_version,
2825 		    sizeof(adapter->fw_version),
2826 		    "%d.%d.%d",
2827 		    fw.eep_major, fw.eep_minor, fw.eep_build);
2828 		}
2829 		break;
2830 	}
2831 }
2832 
2833 /**
2834  * igb_init_mas - init Media Autosense feature if enabled in the NVM
2835  *
2836  * @adapter: adapter struct
2837  **/
2838 static void igb_init_mas(struct igb_adapter *adapter)
2839 {
2840 	struct e1000_hw *hw = &adapter->hw;
2841 	u16 eeprom_data;
2842 
2843 	hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2844 	switch (hw->bus.func) {
2845 	case E1000_FUNC_0:
2846 		if (eeprom_data & IGB_MAS_ENABLE_0) {
2847 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2848 			netdev_info(adapter->netdev,
2849 				"MAS: Enabling Media Autosense for port %d\n",
2850 				hw->bus.func);
2851 		}
2852 		break;
2853 	case E1000_FUNC_1:
2854 		if (eeprom_data & IGB_MAS_ENABLE_1) {
2855 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2856 			netdev_info(adapter->netdev,
2857 				"MAS: Enabling Media Autosense for port %d\n",
2858 				hw->bus.func);
2859 		}
2860 		break;
2861 	case E1000_FUNC_2:
2862 		if (eeprom_data & IGB_MAS_ENABLE_2) {
2863 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2864 			netdev_info(adapter->netdev,
2865 				"MAS: Enabling Media Autosense for port %d\n",
2866 				hw->bus.func);
2867 		}
2868 		break;
2869 	case E1000_FUNC_3:
2870 		if (eeprom_data & IGB_MAS_ENABLE_3) {
2871 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
2872 			netdev_info(adapter->netdev,
2873 				"MAS: Enabling Media Autosense for port %d\n",
2874 				hw->bus.func);
2875 		}
2876 		break;
2877 	default:
2878 		/* Shouldn't get here */
2879 		netdev_err(adapter->netdev,
2880 			"MAS: Invalid port configuration, returning\n");
2881 		break;
2882 	}
2883 }
2884 
2885 /**
2886  *  igb_init_i2c - Init I2C interface
2887  *  @adapter: pointer to adapter structure
2888  **/
2889 static s32 igb_init_i2c(struct igb_adapter *adapter)
2890 {
2891 	s32 status = 0;
2892 
2893 	/* I2C interface supported on i350 devices */
2894 	if (adapter->hw.mac.type != e1000_i350)
2895 		return 0;
2896 
2897 	/* Initialize the i2c bus which is controlled by the registers.
2898 	 * This bus will use the i2c_algo_bit structue that implements
2899 	 * the protocol through toggling of the 4 bits in the register.
2900 	 */
2901 	adapter->i2c_adap.owner = THIS_MODULE;
2902 	adapter->i2c_algo = igb_i2c_algo;
2903 	adapter->i2c_algo.data = adapter;
2904 	adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2905 	adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2906 	strlcpy(adapter->i2c_adap.name, "igb BB",
2907 		sizeof(adapter->i2c_adap.name));
2908 	status = i2c_bit_add_bus(&adapter->i2c_adap);
2909 	return status;
2910 }
2911 
2912 /**
2913  *  igb_probe - Device Initialization Routine
2914  *  @pdev: PCI device information struct
2915  *  @ent: entry in igb_pci_tbl
2916  *
2917  *  Returns 0 on success, negative on failure
2918  *
2919  *  igb_probe initializes an adapter identified by a pci_dev structure.
2920  *  The OS initialization, configuring of the adapter private structure,
2921  *  and a hardware reset occur.
2922  **/
2923 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2924 {
2925 	struct net_device *netdev;
2926 	struct igb_adapter *adapter;
2927 	struct e1000_hw *hw;
2928 	u16 eeprom_data = 0;
2929 	s32 ret_val;
2930 	static int global_quad_port_a; /* global quad port a indication */
2931 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
2932 	int err, pci_using_dac;
2933 	u8 part_str[E1000_PBANUM_LENGTH];
2934 
2935 	/* Catch broken hardware that put the wrong VF device ID in
2936 	 * the PCIe SR-IOV capability.
2937 	 */
2938 	if (pdev->is_virtfn) {
2939 		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
2940 			pci_name(pdev), pdev->vendor, pdev->device);
2941 		return -EINVAL;
2942 	}
2943 
2944 	err = pci_enable_device_mem(pdev);
2945 	if (err)
2946 		return err;
2947 
2948 	pci_using_dac = 0;
2949 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2950 	if (!err) {
2951 		pci_using_dac = 1;
2952 	} else {
2953 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2954 		if (err) {
2955 			dev_err(&pdev->dev,
2956 				"No usable DMA configuration, aborting\n");
2957 			goto err_dma;
2958 		}
2959 	}
2960 
2961 	err = pci_request_mem_regions(pdev, igb_driver_name);
2962 	if (err)
2963 		goto err_pci_reg;
2964 
2965 	pci_enable_pcie_error_reporting(pdev);
2966 
2967 	pci_set_master(pdev);
2968 	pci_save_state(pdev);
2969 
2970 	err = -ENOMEM;
2971 	netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
2972 				   IGB_MAX_TX_QUEUES);
2973 	if (!netdev)
2974 		goto err_alloc_etherdev;
2975 
2976 	SET_NETDEV_DEV(netdev, &pdev->dev);
2977 
2978 	pci_set_drvdata(pdev, netdev);
2979 	adapter = netdev_priv(netdev);
2980 	adapter->netdev = netdev;
2981 	adapter->pdev = pdev;
2982 	hw = &adapter->hw;
2983 	hw->back = adapter;
2984 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2985 
2986 	err = -EIO;
2987 	adapter->io_addr = pci_iomap(pdev, 0, 0);
2988 	if (!adapter->io_addr)
2989 		goto err_ioremap;
2990 	/* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
2991 	hw->hw_addr = adapter->io_addr;
2992 
2993 	netdev->netdev_ops = &igb_netdev_ops;
2994 	igb_set_ethtool_ops(netdev);
2995 	netdev->watchdog_timeo = 5 * HZ;
2996 
2997 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2998 
2999 	netdev->mem_start = pci_resource_start(pdev, 0);
3000 	netdev->mem_end = pci_resource_end(pdev, 0);
3001 
3002 	/* PCI config space info */
3003 	hw->vendor_id = pdev->vendor;
3004 	hw->device_id = pdev->device;
3005 	hw->revision_id = pdev->revision;
3006 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
3007 	hw->subsystem_device_id = pdev->subsystem_device;
3008 
3009 	/* Copy the default MAC, PHY and NVM function pointers */
3010 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3011 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3012 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3013 	/* Initialize skew-specific constants */
3014 	err = ei->get_invariants(hw);
3015 	if (err)
3016 		goto err_sw_init;
3017 
3018 	/* setup the private structure */
3019 	err = igb_sw_init(adapter);
3020 	if (err)
3021 		goto err_sw_init;
3022 
3023 	igb_get_bus_info_pcie(hw);
3024 
3025 	hw->phy.autoneg_wait_to_complete = false;
3026 
3027 	/* Copper options */
3028 	if (hw->phy.media_type == e1000_media_type_copper) {
3029 		hw->phy.mdix = AUTO_ALL_MODES;
3030 		hw->phy.disable_polarity_correction = false;
3031 		hw->phy.ms_type = e1000_ms_hw_default;
3032 	}
3033 
3034 	if (igb_check_reset_block(hw))
3035 		dev_info(&pdev->dev,
3036 			"PHY reset is blocked due to SOL/IDER session.\n");
3037 
3038 	/* features is initialized to 0 in allocation, it might have bits
3039 	 * set by igb_sw_init so we should use an or instead of an
3040 	 * assignment.
3041 	 */
3042 	netdev->features |= NETIF_F_SG |
3043 			    NETIF_F_TSO |
3044 			    NETIF_F_TSO6 |
3045 			    NETIF_F_RXHASH |
3046 			    NETIF_F_RXCSUM |
3047 			    NETIF_F_HW_CSUM;
3048 
3049 	if (hw->mac.type >= e1000_82576)
3050 		netdev->features |= NETIF_F_SCTP_CRC;
3051 
3052 	if (hw->mac.type >= e1000_i350)
3053 		netdev->features |= NETIF_F_HW_TC;
3054 
3055 #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3056 				  NETIF_F_GSO_GRE_CSUM | \
3057 				  NETIF_F_GSO_IPXIP4 | \
3058 				  NETIF_F_GSO_IPXIP6 | \
3059 				  NETIF_F_GSO_UDP_TUNNEL | \
3060 				  NETIF_F_GSO_UDP_TUNNEL_CSUM)
3061 
3062 	netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3063 	netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3064 
3065 	/* copy netdev features into list of user selectable features */
3066 	netdev->hw_features |= netdev->features |
3067 			       NETIF_F_HW_VLAN_CTAG_RX |
3068 			       NETIF_F_HW_VLAN_CTAG_TX |
3069 			       NETIF_F_RXALL;
3070 
3071 	if (hw->mac.type >= e1000_i350)
3072 		netdev->hw_features |= NETIF_F_NTUPLE;
3073 
3074 	if (pci_using_dac)
3075 		netdev->features |= NETIF_F_HIGHDMA;
3076 
3077 	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3078 	netdev->mpls_features |= NETIF_F_HW_CSUM;
3079 	netdev->hw_enc_features |= netdev->vlan_features;
3080 
3081 	/* set this bit last since it cannot be part of vlan_features */
3082 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3083 			    NETIF_F_HW_VLAN_CTAG_RX |
3084 			    NETIF_F_HW_VLAN_CTAG_TX;
3085 
3086 	netdev->priv_flags |= IFF_SUPP_NOFCS;
3087 
3088 	netdev->priv_flags |= IFF_UNICAST_FLT;
3089 
3090 	/* MTU range: 68 - 9216 */
3091 	netdev->min_mtu = ETH_MIN_MTU;
3092 	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3093 
3094 	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3095 
3096 	/* before reading the NVM, reset the controller to put the device in a
3097 	 * known good starting state
3098 	 */
3099 	hw->mac.ops.reset_hw(hw);
3100 
3101 	/* make sure the NVM is good , i211/i210 parts can have special NVM
3102 	 * that doesn't contain a checksum
3103 	 */
3104 	switch (hw->mac.type) {
3105 	case e1000_i210:
3106 	case e1000_i211:
3107 		if (igb_get_flash_presence_i210(hw)) {
3108 			if (hw->nvm.ops.validate(hw) < 0) {
3109 				dev_err(&pdev->dev,
3110 					"The NVM Checksum Is Not Valid\n");
3111 				err = -EIO;
3112 				goto err_eeprom;
3113 			}
3114 		}
3115 		break;
3116 	default:
3117 		if (hw->nvm.ops.validate(hw) < 0) {
3118 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3119 			err = -EIO;
3120 			goto err_eeprom;
3121 		}
3122 		break;
3123 	}
3124 
3125 	if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3126 		/* copy the MAC address out of the NVM */
3127 		if (hw->mac.ops.read_mac_addr(hw))
3128 			dev_err(&pdev->dev, "NVM Read Error\n");
3129 	}
3130 
3131 	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3132 
3133 	if (!is_valid_ether_addr(netdev->dev_addr)) {
3134 		dev_err(&pdev->dev, "Invalid MAC Address\n");
3135 		err = -EIO;
3136 		goto err_eeprom;
3137 	}
3138 
3139 	igb_set_default_mac_filter(adapter);
3140 
3141 	/* get firmware version for ethtool -i */
3142 	igb_set_fw_version(adapter);
3143 
3144 	/* configure RXPBSIZE and TXPBSIZE */
3145 	if (hw->mac.type == e1000_i210) {
3146 		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3147 		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3148 	}
3149 
3150 	timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3151 	timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3152 
3153 	INIT_WORK(&adapter->reset_task, igb_reset_task);
3154 	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3155 
3156 	/* Initialize link properties that are user-changeable */
3157 	adapter->fc_autoneg = true;
3158 	hw->mac.autoneg = true;
3159 	hw->phy.autoneg_advertised = 0x2f;
3160 
3161 	hw->fc.requested_mode = e1000_fc_default;
3162 	hw->fc.current_mode = e1000_fc_default;
3163 
3164 	igb_validate_mdi_setting(hw);
3165 
3166 	/* By default, support wake on port A */
3167 	if (hw->bus.func == 0)
3168 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3169 
3170 	/* Check the NVM for wake support on non-port A ports */
3171 	if (hw->mac.type >= e1000_82580)
3172 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3173 				 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3174 				 &eeprom_data);
3175 	else if (hw->bus.func == 1)
3176 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3177 
3178 	if (eeprom_data & IGB_EEPROM_APME)
3179 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3180 
3181 	/* now that we have the eeprom settings, apply the special cases where
3182 	 * the eeprom may be wrong or the board simply won't support wake on
3183 	 * lan on a particular port
3184 	 */
3185 	switch (pdev->device) {
3186 	case E1000_DEV_ID_82575GB_QUAD_COPPER:
3187 		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3188 		break;
3189 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
3190 	case E1000_DEV_ID_82576_FIBER:
3191 	case E1000_DEV_ID_82576_SERDES:
3192 		/* Wake events only supported on port A for dual fiber
3193 		 * regardless of eeprom setting
3194 		 */
3195 		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3196 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3197 		break;
3198 	case E1000_DEV_ID_82576_QUAD_COPPER:
3199 	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3200 		/* if quad port adapter, disable WoL on all but port A */
3201 		if (global_quad_port_a != 0)
3202 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3203 		else
3204 			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3205 		/* Reset for multiple quad port adapters */
3206 		if (++global_quad_port_a == 4)
3207 			global_quad_port_a = 0;
3208 		break;
3209 	default:
3210 		/* If the device can't wake, don't set software support */
3211 		if (!device_can_wakeup(&adapter->pdev->dev))
3212 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3213 	}
3214 
3215 	/* initialize the wol settings based on the eeprom settings */
3216 	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3217 		adapter->wol |= E1000_WUFC_MAG;
3218 
3219 	/* Some vendors want WoL disabled by default, but still supported */
3220 	if ((hw->mac.type == e1000_i350) &&
3221 	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3222 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3223 		adapter->wol = 0;
3224 	}
3225 
3226 	/* Some vendors want the ability to Use the EEPROM setting as
3227 	 * enable/disable only, and not for capability
3228 	 */
3229 	if (((hw->mac.type == e1000_i350) ||
3230 	     (hw->mac.type == e1000_i354)) &&
3231 	    (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3232 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3233 		adapter->wol = 0;
3234 	}
3235 	if (hw->mac.type == e1000_i350) {
3236 		if (((pdev->subsystem_device == 0x5001) ||
3237 		     (pdev->subsystem_device == 0x5002)) &&
3238 				(hw->bus.func == 0)) {
3239 			adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3240 			adapter->wol = 0;
3241 		}
3242 		if (pdev->subsystem_device == 0x1F52)
3243 			adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3244 	}
3245 
3246 	device_set_wakeup_enable(&adapter->pdev->dev,
3247 				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3248 
3249 	/* reset the hardware with the new settings */
3250 	igb_reset(adapter);
3251 
3252 	/* Init the I2C interface */
3253 	err = igb_init_i2c(adapter);
3254 	if (err) {
3255 		dev_err(&pdev->dev, "failed to init i2c interface\n");
3256 		goto err_eeprom;
3257 	}
3258 
3259 	/* let the f/w know that the h/w is now under the control of the
3260 	 * driver.
3261 	 */
3262 	igb_get_hw_control(adapter);
3263 
3264 	strcpy(netdev->name, "eth%d");
3265 	err = register_netdev(netdev);
3266 	if (err)
3267 		goto err_register;
3268 
3269 	/* carrier off reporting is important to ethtool even BEFORE open */
3270 	netif_carrier_off(netdev);
3271 
3272 #ifdef CONFIG_IGB_DCA
3273 	if (dca_add_requester(&pdev->dev) == 0) {
3274 		adapter->flags |= IGB_FLAG_DCA_ENABLED;
3275 		dev_info(&pdev->dev, "DCA enabled\n");
3276 		igb_setup_dca(adapter);
3277 	}
3278 
3279 #endif
3280 #ifdef CONFIG_IGB_HWMON
3281 	/* Initialize the thermal sensor on i350 devices. */
3282 	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3283 		u16 ets_word;
3284 
3285 		/* Read the NVM to determine if this i350 device supports an
3286 		 * external thermal sensor.
3287 		 */
3288 		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3289 		if (ets_word != 0x0000 && ets_word != 0xFFFF)
3290 			adapter->ets = true;
3291 		else
3292 			adapter->ets = false;
3293 		if (igb_sysfs_init(adapter))
3294 			dev_err(&pdev->dev,
3295 				"failed to allocate sysfs resources\n");
3296 	} else {
3297 		adapter->ets = false;
3298 	}
3299 #endif
3300 	/* Check if Media Autosense is enabled */
3301 	adapter->ei = *ei;
3302 	if (hw->dev_spec._82575.mas_capable)
3303 		igb_init_mas(adapter);
3304 
3305 	/* do hw tstamp init after resetting */
3306 	igb_ptp_init(adapter);
3307 
3308 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3309 	/* print bus type/speed/width info, not applicable to i354 */
3310 	if (hw->mac.type != e1000_i354) {
3311 		dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3312 			 netdev->name,
3313 			 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3314 			  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3315 			   "unknown"),
3316 			 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3317 			  "Width x4" :
3318 			  (hw->bus.width == e1000_bus_width_pcie_x2) ?
3319 			  "Width x2" :
3320 			  (hw->bus.width == e1000_bus_width_pcie_x1) ?
3321 			  "Width x1" : "unknown"), netdev->dev_addr);
3322 	}
3323 
3324 	if ((hw->mac.type >= e1000_i210 ||
3325 	     igb_get_flash_presence_i210(hw))) {
3326 		ret_val = igb_read_part_string(hw, part_str,
3327 					       E1000_PBANUM_LENGTH);
3328 	} else {
3329 		ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3330 	}
3331 
3332 	if (ret_val)
3333 		strcpy(part_str, "Unknown");
3334 	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3335 	dev_info(&pdev->dev,
3336 		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3337 		(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3338 		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3339 		adapter->num_rx_queues, adapter->num_tx_queues);
3340 	if (hw->phy.media_type == e1000_media_type_copper) {
3341 		switch (hw->mac.type) {
3342 		case e1000_i350:
3343 		case e1000_i210:
3344 		case e1000_i211:
3345 			/* Enable EEE for internal copper PHY devices */
3346 			err = igb_set_eee_i350(hw, true, true);
3347 			if ((!err) &&
3348 			    (!hw->dev_spec._82575.eee_disable)) {
3349 				adapter->eee_advert =
3350 					MDIO_EEE_100TX | MDIO_EEE_1000T;
3351 				adapter->flags |= IGB_FLAG_EEE;
3352 			}
3353 			break;
3354 		case e1000_i354:
3355 			if ((rd32(E1000_CTRL_EXT) &
3356 			    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3357 				err = igb_set_eee_i354(hw, true, true);
3358 				if ((!err) &&
3359 					(!hw->dev_spec._82575.eee_disable)) {
3360 					adapter->eee_advert =
3361 					   MDIO_EEE_100TX | MDIO_EEE_1000T;
3362 					adapter->flags |= IGB_FLAG_EEE;
3363 				}
3364 			}
3365 			break;
3366 		default:
3367 			break;
3368 		}
3369 	}
3370 	pm_runtime_put_noidle(&pdev->dev);
3371 	return 0;
3372 
3373 err_register:
3374 	igb_release_hw_control(adapter);
3375 	memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3376 err_eeprom:
3377 	if (!igb_check_reset_block(hw))
3378 		igb_reset_phy(hw);
3379 
3380 	if (hw->flash_address)
3381 		iounmap(hw->flash_address);
3382 err_sw_init:
3383 	kfree(adapter->mac_table);
3384 	kfree(adapter->shadow_vfta);
3385 	igb_clear_interrupt_scheme(adapter);
3386 #ifdef CONFIG_PCI_IOV
3387 	igb_disable_sriov(pdev);
3388 #endif
3389 	pci_iounmap(pdev, adapter->io_addr);
3390 err_ioremap:
3391 	free_netdev(netdev);
3392 err_alloc_etherdev:
3393 	pci_release_mem_regions(pdev);
3394 err_pci_reg:
3395 err_dma:
3396 	pci_disable_device(pdev);
3397 	return err;
3398 }
3399 
3400 #ifdef CONFIG_PCI_IOV
3401 static int igb_disable_sriov(struct pci_dev *pdev)
3402 {
3403 	struct net_device *netdev = pci_get_drvdata(pdev);
3404 	struct igb_adapter *adapter = netdev_priv(netdev);
3405 	struct e1000_hw *hw = &adapter->hw;
3406 
3407 	/* reclaim resources allocated to VFs */
3408 	if (adapter->vf_data) {
3409 		/* disable iov and allow time for transactions to clear */
3410 		if (pci_vfs_assigned(pdev)) {
3411 			dev_warn(&pdev->dev,
3412 				 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3413 			return -EPERM;
3414 		} else {
3415 			pci_disable_sriov(pdev);
3416 			msleep(500);
3417 		}
3418 
3419 		kfree(adapter->vf_mac_list);
3420 		adapter->vf_mac_list = NULL;
3421 		kfree(adapter->vf_data);
3422 		adapter->vf_data = NULL;
3423 		adapter->vfs_allocated_count = 0;
3424 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3425 		wrfl();
3426 		msleep(100);
3427 		dev_info(&pdev->dev, "IOV Disabled\n");
3428 
3429 		/* Re-enable DMA Coalescing flag since IOV is turned off */
3430 		adapter->flags |= IGB_FLAG_DMAC;
3431 	}
3432 
3433 	return 0;
3434 }
3435 
3436 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3437 {
3438 	struct net_device *netdev = pci_get_drvdata(pdev);
3439 	struct igb_adapter *adapter = netdev_priv(netdev);
3440 	int old_vfs = pci_num_vf(pdev);
3441 	struct vf_mac_filter *mac_list;
3442 	int err = 0;
3443 	int num_vf_mac_filters, i;
3444 
3445 	if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3446 		err = -EPERM;
3447 		goto out;
3448 	}
3449 	if (!num_vfs)
3450 		goto out;
3451 
3452 	if (old_vfs) {
3453 		dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3454 			 old_vfs, max_vfs);
3455 		adapter->vfs_allocated_count = old_vfs;
3456 	} else
3457 		adapter->vfs_allocated_count = num_vfs;
3458 
3459 	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3460 				sizeof(struct vf_data_storage), GFP_KERNEL);
3461 
3462 	/* if allocation failed then we do not support SR-IOV */
3463 	if (!adapter->vf_data) {
3464 		adapter->vfs_allocated_count = 0;
3465 		err = -ENOMEM;
3466 		goto out;
3467 	}
3468 
3469 	/* Due to the limited number of RAR entries calculate potential
3470 	 * number of MAC filters available for the VFs. Reserve entries
3471 	 * for PF default MAC, PF MAC filters and at least one RAR entry
3472 	 * for each VF for VF MAC.
3473 	 */
3474 	num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3475 			     (1 + IGB_PF_MAC_FILTERS_RESERVED +
3476 			      adapter->vfs_allocated_count);
3477 
3478 	adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3479 				       sizeof(struct vf_mac_filter),
3480 				       GFP_KERNEL);
3481 
3482 	mac_list = adapter->vf_mac_list;
3483 	INIT_LIST_HEAD(&adapter->vf_macs.l);
3484 
3485 	if (adapter->vf_mac_list) {
3486 		/* Initialize list of VF MAC filters */
3487 		for (i = 0; i < num_vf_mac_filters; i++) {
3488 			mac_list->vf = -1;
3489 			mac_list->free = true;
3490 			list_add(&mac_list->l, &adapter->vf_macs.l);
3491 			mac_list++;
3492 		}
3493 	} else {
3494 		/* If we could not allocate memory for the VF MAC filters
3495 		 * we can continue without this feature but warn user.
3496 		 */
3497 		dev_err(&pdev->dev,
3498 			"Unable to allocate memory for VF MAC filter list\n");
3499 	}
3500 
3501 	/* only call pci_enable_sriov() if no VFs are allocated already */
3502 	if (!old_vfs) {
3503 		err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3504 		if (err)
3505 			goto err_out;
3506 	}
3507 	dev_info(&pdev->dev, "%d VFs allocated\n",
3508 		 adapter->vfs_allocated_count);
3509 	for (i = 0; i < adapter->vfs_allocated_count; i++)
3510 		igb_vf_configure(adapter, i);
3511 
3512 	/* DMA Coalescing is not supported in IOV mode. */
3513 	adapter->flags &= ~IGB_FLAG_DMAC;
3514 	goto out;
3515 
3516 err_out:
3517 	kfree(adapter->vf_mac_list);
3518 	adapter->vf_mac_list = NULL;
3519 	kfree(adapter->vf_data);
3520 	adapter->vf_data = NULL;
3521 	adapter->vfs_allocated_count = 0;
3522 out:
3523 	return err;
3524 }
3525 
3526 #endif
3527 /**
3528  *  igb_remove_i2c - Cleanup  I2C interface
3529  *  @adapter: pointer to adapter structure
3530  **/
3531 static void igb_remove_i2c(struct igb_adapter *adapter)
3532 {
3533 	/* free the adapter bus structure */
3534 	i2c_del_adapter(&adapter->i2c_adap);
3535 }
3536 
3537 /**
3538  *  igb_remove - Device Removal Routine
3539  *  @pdev: PCI device information struct
3540  *
3541  *  igb_remove is called by the PCI subsystem to alert the driver
3542  *  that it should release a PCI device.  The could be caused by a
3543  *  Hot-Plug event, or because the driver is going to be removed from
3544  *  memory.
3545  **/
3546 static void igb_remove(struct pci_dev *pdev)
3547 {
3548 	struct net_device *netdev = pci_get_drvdata(pdev);
3549 	struct igb_adapter *adapter = netdev_priv(netdev);
3550 	struct e1000_hw *hw = &adapter->hw;
3551 
3552 	pm_runtime_get_noresume(&pdev->dev);
3553 #ifdef CONFIG_IGB_HWMON
3554 	igb_sysfs_exit(adapter);
3555 #endif
3556 	igb_remove_i2c(adapter);
3557 	igb_ptp_stop(adapter);
3558 	/* The watchdog timer may be rescheduled, so explicitly
3559 	 * disable watchdog from being rescheduled.
3560 	 */
3561 	set_bit(__IGB_DOWN, &adapter->state);
3562 	del_timer_sync(&adapter->watchdog_timer);
3563 	del_timer_sync(&adapter->phy_info_timer);
3564 
3565 	cancel_work_sync(&adapter->reset_task);
3566 	cancel_work_sync(&adapter->watchdog_task);
3567 
3568 #ifdef CONFIG_IGB_DCA
3569 	if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3570 		dev_info(&pdev->dev, "DCA disabled\n");
3571 		dca_remove_requester(&pdev->dev);
3572 		adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3573 		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3574 	}
3575 #endif
3576 
3577 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
3578 	 * would have already happened in close and is redundant.
3579 	 */
3580 	igb_release_hw_control(adapter);
3581 
3582 #ifdef CONFIG_PCI_IOV
3583 	igb_disable_sriov(pdev);
3584 #endif
3585 
3586 	unregister_netdev(netdev);
3587 
3588 	igb_clear_interrupt_scheme(adapter);
3589 
3590 	pci_iounmap(pdev, adapter->io_addr);
3591 	if (hw->flash_address)
3592 		iounmap(hw->flash_address);
3593 	pci_release_mem_regions(pdev);
3594 
3595 	kfree(adapter->mac_table);
3596 	kfree(adapter->shadow_vfta);
3597 	free_netdev(netdev);
3598 
3599 	pci_disable_pcie_error_reporting(pdev);
3600 
3601 	pci_disable_device(pdev);
3602 }
3603 
3604 /**
3605  *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3606  *  @adapter: board private structure to initialize
3607  *
3608  *  This function initializes the vf specific data storage and then attempts to
3609  *  allocate the VFs.  The reason for ordering it this way is because it is much
3610  *  mor expensive time wise to disable SR-IOV than it is to allocate and free
3611  *  the memory for the VFs.
3612  **/
3613 static void igb_probe_vfs(struct igb_adapter *adapter)
3614 {
3615 #ifdef CONFIG_PCI_IOV
3616 	struct pci_dev *pdev = adapter->pdev;
3617 	struct e1000_hw *hw = &adapter->hw;
3618 
3619 	/* Virtualization features not supported on i210 family. */
3620 	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3621 		return;
3622 
3623 	/* Of the below we really only want the effect of getting
3624 	 * IGB_FLAG_HAS_MSIX set (if available), without which
3625 	 * igb_enable_sriov() has no effect.
3626 	 */
3627 	igb_set_interrupt_capability(adapter, true);
3628 	igb_reset_interrupt_capability(adapter);
3629 
3630 	pci_sriov_set_totalvfs(pdev, 7);
3631 	igb_enable_sriov(pdev, max_vfs);
3632 
3633 #endif /* CONFIG_PCI_IOV */
3634 }
3635 
3636 unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3637 {
3638 	struct e1000_hw *hw = &adapter->hw;
3639 	unsigned int max_rss_queues;
3640 
3641 	/* Determine the maximum number of RSS queues supported. */
3642 	switch (hw->mac.type) {
3643 	case e1000_i211:
3644 		max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3645 		break;
3646 	case e1000_82575:
3647 	case e1000_i210:
3648 		max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3649 		break;
3650 	case e1000_i350:
3651 		/* I350 cannot do RSS and SR-IOV at the same time */
3652 		if (!!adapter->vfs_allocated_count) {
3653 			max_rss_queues = 1;
3654 			break;
3655 		}
3656 		/* fall through */
3657 	case e1000_82576:
3658 		if (!!adapter->vfs_allocated_count) {
3659 			max_rss_queues = 2;
3660 			break;
3661 		}
3662 		/* fall through */
3663 	case e1000_82580:
3664 	case e1000_i354:
3665 	default:
3666 		max_rss_queues = IGB_MAX_RX_QUEUES;
3667 		break;
3668 	}
3669 
3670 	return max_rss_queues;
3671 }
3672 
3673 static void igb_init_queue_configuration(struct igb_adapter *adapter)
3674 {
3675 	u32 max_rss_queues;
3676 
3677 	max_rss_queues = igb_get_max_rss_queues(adapter);
3678 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3679 
3680 	igb_set_flag_queue_pairs(adapter, max_rss_queues);
3681 }
3682 
3683 void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3684 			      const u32 max_rss_queues)
3685 {
3686 	struct e1000_hw *hw = &adapter->hw;
3687 
3688 	/* Determine if we need to pair queues. */
3689 	switch (hw->mac.type) {
3690 	case e1000_82575:
3691 	case e1000_i211:
3692 		/* Device supports enough interrupts without queue pairing. */
3693 		break;
3694 	case e1000_82576:
3695 	case e1000_82580:
3696 	case e1000_i350:
3697 	case e1000_i354:
3698 	case e1000_i210:
3699 	default:
3700 		/* If rss_queues > half of max_rss_queues, pair the queues in
3701 		 * order to conserve interrupts due to limited supply.
3702 		 */
3703 		if (adapter->rss_queues > (max_rss_queues / 2))
3704 			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3705 		else
3706 			adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3707 		break;
3708 	}
3709 }
3710 
3711 /**
3712  *  igb_sw_init - Initialize general software structures (struct igb_adapter)
3713  *  @adapter: board private structure to initialize
3714  *
3715  *  igb_sw_init initializes the Adapter private data structure.
3716  *  Fields are initialized based on PCI device information and
3717  *  OS network device settings (MTU size).
3718  **/
3719 static int igb_sw_init(struct igb_adapter *adapter)
3720 {
3721 	struct e1000_hw *hw = &adapter->hw;
3722 	struct net_device *netdev = adapter->netdev;
3723 	struct pci_dev *pdev = adapter->pdev;
3724 
3725 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3726 
3727 	/* set default ring sizes */
3728 	adapter->tx_ring_count = IGB_DEFAULT_TXD;
3729 	adapter->rx_ring_count = IGB_DEFAULT_RXD;
3730 
3731 	/* set default ITR values */
3732 	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3733 	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3734 
3735 	/* set default work limits */
3736 	adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3737 
3738 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3739 				  VLAN_HLEN;
3740 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3741 
3742 	spin_lock_init(&adapter->nfc_lock);
3743 	spin_lock_init(&adapter->stats64_lock);
3744 #ifdef CONFIG_PCI_IOV
3745 	switch (hw->mac.type) {
3746 	case e1000_82576:
3747 	case e1000_i350:
3748 		if (max_vfs > 7) {
3749 			dev_warn(&pdev->dev,
3750 				 "Maximum of 7 VFs per PF, using max\n");
3751 			max_vfs = adapter->vfs_allocated_count = 7;
3752 		} else
3753 			adapter->vfs_allocated_count = max_vfs;
3754 		if (adapter->vfs_allocated_count)
3755 			dev_warn(&pdev->dev,
3756 				 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3757 		break;
3758 	default:
3759 		break;
3760 	}
3761 #endif /* CONFIG_PCI_IOV */
3762 
3763 	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
3764 	adapter->flags |= IGB_FLAG_HAS_MSIX;
3765 
3766 	adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
3767 				     hw->mac.rar_entry_count, GFP_ATOMIC);
3768 	if (!adapter->mac_table)
3769 		return -ENOMEM;
3770 
3771 	igb_probe_vfs(adapter);
3772 
3773 	igb_init_queue_configuration(adapter);
3774 
3775 	/* Setup and initialize a copy of the hw vlan table array */
3776 	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3777 				       GFP_ATOMIC);
3778 	if (!adapter->shadow_vfta)
3779 		return -ENOMEM;
3780 
3781 	/* This call may decrease the number of queues */
3782 	if (igb_init_interrupt_scheme(adapter, true)) {
3783 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3784 		return -ENOMEM;
3785 	}
3786 
3787 	/* Explicitly disable IRQ since the NIC can be in any state. */
3788 	igb_irq_disable(adapter);
3789 
3790 	if (hw->mac.type >= e1000_i350)
3791 		adapter->flags &= ~IGB_FLAG_DMAC;
3792 
3793 	set_bit(__IGB_DOWN, &adapter->state);
3794 	return 0;
3795 }
3796 
3797 /**
3798  *  igb_open - Called when a network interface is made active
3799  *  @netdev: network interface device structure
3800  *
3801  *  Returns 0 on success, negative value on failure
3802  *
3803  *  The open entry point is called when a network interface is made
3804  *  active by the system (IFF_UP).  At this point all resources needed
3805  *  for transmit and receive operations are allocated, the interrupt
3806  *  handler is registered with the OS, the watchdog timer is started,
3807  *  and the stack is notified that the interface is ready.
3808  **/
3809 static int __igb_open(struct net_device *netdev, bool resuming)
3810 {
3811 	struct igb_adapter *adapter = netdev_priv(netdev);
3812 	struct e1000_hw *hw = &adapter->hw;
3813 	struct pci_dev *pdev = adapter->pdev;
3814 	int err;
3815 	int i;
3816 
3817 	/* disallow open during test */
3818 	if (test_bit(__IGB_TESTING, &adapter->state)) {
3819 		WARN_ON(resuming);
3820 		return -EBUSY;
3821 	}
3822 
3823 	if (!resuming)
3824 		pm_runtime_get_sync(&pdev->dev);
3825 
3826 	netif_carrier_off(netdev);
3827 
3828 	/* allocate transmit descriptors */
3829 	err = igb_setup_all_tx_resources(adapter);
3830 	if (err)
3831 		goto err_setup_tx;
3832 
3833 	/* allocate receive descriptors */
3834 	err = igb_setup_all_rx_resources(adapter);
3835 	if (err)
3836 		goto err_setup_rx;
3837 
3838 	igb_power_up_link(adapter);
3839 
3840 	/* before we allocate an interrupt, we must be ready to handle it.
3841 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3842 	 * as soon as we call pci_request_irq, so we have to setup our
3843 	 * clean_rx handler before we do so.
3844 	 */
3845 	igb_configure(adapter);
3846 
3847 	err = igb_request_irq(adapter);
3848 	if (err)
3849 		goto err_req_irq;
3850 
3851 	/* Notify the stack of the actual queue counts. */
3852 	err = netif_set_real_num_tx_queues(adapter->netdev,
3853 					   adapter->num_tx_queues);
3854 	if (err)
3855 		goto err_set_queues;
3856 
3857 	err = netif_set_real_num_rx_queues(adapter->netdev,
3858 					   adapter->num_rx_queues);
3859 	if (err)
3860 		goto err_set_queues;
3861 
3862 	/* From here on the code is the same as igb_up() */
3863 	clear_bit(__IGB_DOWN, &adapter->state);
3864 
3865 	for (i = 0; i < adapter->num_q_vectors; i++)
3866 		napi_enable(&(adapter->q_vector[i]->napi));
3867 
3868 	/* Clear any pending interrupts. */
3869 	rd32(E1000_TSICR);
3870 	rd32(E1000_ICR);
3871 
3872 	igb_irq_enable(adapter);
3873 
3874 	/* notify VFs that reset has been completed */
3875 	if (adapter->vfs_allocated_count) {
3876 		u32 reg_data = rd32(E1000_CTRL_EXT);
3877 
3878 		reg_data |= E1000_CTRL_EXT_PFRSTD;
3879 		wr32(E1000_CTRL_EXT, reg_data);
3880 	}
3881 
3882 	netif_tx_start_all_queues(netdev);
3883 
3884 	if (!resuming)
3885 		pm_runtime_put(&pdev->dev);
3886 
3887 	/* start the watchdog. */
3888 	hw->mac.get_link_status = 1;
3889 	schedule_work(&adapter->watchdog_task);
3890 
3891 	return 0;
3892 
3893 err_set_queues:
3894 	igb_free_irq(adapter);
3895 err_req_irq:
3896 	igb_release_hw_control(adapter);
3897 	igb_power_down_link(adapter);
3898 	igb_free_all_rx_resources(adapter);
3899 err_setup_rx:
3900 	igb_free_all_tx_resources(adapter);
3901 err_setup_tx:
3902 	igb_reset(adapter);
3903 	if (!resuming)
3904 		pm_runtime_put(&pdev->dev);
3905 
3906 	return err;
3907 }
3908 
3909 int igb_open(struct net_device *netdev)
3910 {
3911 	return __igb_open(netdev, false);
3912 }
3913 
3914 /**
3915  *  igb_close - Disables a network interface
3916  *  @netdev: network interface device structure
3917  *
3918  *  Returns 0, this is not allowed to fail
3919  *
3920  *  The close entry point is called when an interface is de-activated
3921  *  by the OS.  The hardware is still under the driver's control, but
3922  *  needs to be disabled.  A global MAC reset is issued to stop the
3923  *  hardware, and all transmit and receive resources are freed.
3924  **/
3925 static int __igb_close(struct net_device *netdev, bool suspending)
3926 {
3927 	struct igb_adapter *adapter = netdev_priv(netdev);
3928 	struct pci_dev *pdev = adapter->pdev;
3929 
3930 	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
3931 
3932 	if (!suspending)
3933 		pm_runtime_get_sync(&pdev->dev);
3934 
3935 	igb_down(adapter);
3936 	igb_free_irq(adapter);
3937 
3938 	igb_free_all_tx_resources(adapter);
3939 	igb_free_all_rx_resources(adapter);
3940 
3941 	if (!suspending)
3942 		pm_runtime_put_sync(&pdev->dev);
3943 	return 0;
3944 }
3945 
3946 int igb_close(struct net_device *netdev)
3947 {
3948 	if (netif_device_present(netdev) || netdev->dismantle)
3949 		return __igb_close(netdev, false);
3950 	return 0;
3951 }
3952 
3953 /**
3954  *  igb_setup_tx_resources - allocate Tx resources (Descriptors)
3955  *  @tx_ring: tx descriptor ring (for a specific queue) to setup
3956  *
3957  *  Return 0 on success, negative on failure
3958  **/
3959 int igb_setup_tx_resources(struct igb_ring *tx_ring)
3960 {
3961 	struct device *dev = tx_ring->dev;
3962 	int size;
3963 
3964 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3965 
3966 	tx_ring->tx_buffer_info = vmalloc(size);
3967 	if (!tx_ring->tx_buffer_info)
3968 		goto err;
3969 
3970 	/* round up to nearest 4K */
3971 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
3972 	tx_ring->size = ALIGN(tx_ring->size, 4096);
3973 
3974 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
3975 					   &tx_ring->dma, GFP_KERNEL);
3976 	if (!tx_ring->desc)
3977 		goto err;
3978 
3979 	tx_ring->next_to_use = 0;
3980 	tx_ring->next_to_clean = 0;
3981 
3982 	return 0;
3983 
3984 err:
3985 	vfree(tx_ring->tx_buffer_info);
3986 	tx_ring->tx_buffer_info = NULL;
3987 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
3988 	return -ENOMEM;
3989 }
3990 
3991 /**
3992  *  igb_setup_all_tx_resources - wrapper to allocate Tx resources
3993  *				 (Descriptors) for all queues
3994  *  @adapter: board private structure
3995  *
3996  *  Return 0 on success, negative on failure
3997  **/
3998 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
3999 {
4000 	struct pci_dev *pdev = adapter->pdev;
4001 	int i, err = 0;
4002 
4003 	for (i = 0; i < adapter->num_tx_queues; i++) {
4004 		err = igb_setup_tx_resources(adapter->tx_ring[i]);
4005 		if (err) {
4006 			dev_err(&pdev->dev,
4007 				"Allocation for Tx Queue %u failed\n", i);
4008 			for (i--; i >= 0; i--)
4009 				igb_free_tx_resources(adapter->tx_ring[i]);
4010 			break;
4011 		}
4012 	}
4013 
4014 	return err;
4015 }
4016 
4017 /**
4018  *  igb_setup_tctl - configure the transmit control registers
4019  *  @adapter: Board private structure
4020  **/
4021 void igb_setup_tctl(struct igb_adapter *adapter)
4022 {
4023 	struct e1000_hw *hw = &adapter->hw;
4024 	u32 tctl;
4025 
4026 	/* disable queue 0 which is enabled by default on 82575 and 82576 */
4027 	wr32(E1000_TXDCTL(0), 0);
4028 
4029 	/* Program the Transmit Control Register */
4030 	tctl = rd32(E1000_TCTL);
4031 	tctl &= ~E1000_TCTL_CT;
4032 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4033 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4034 
4035 	igb_config_collision_dist(hw);
4036 
4037 	/* Enable transmits */
4038 	tctl |= E1000_TCTL_EN;
4039 
4040 	wr32(E1000_TCTL, tctl);
4041 }
4042 
4043 /**
4044  *  igb_configure_tx_ring - Configure transmit ring after Reset
4045  *  @adapter: board private structure
4046  *  @ring: tx ring to configure
4047  *
4048  *  Configure a transmit ring after a reset.
4049  **/
4050 void igb_configure_tx_ring(struct igb_adapter *adapter,
4051 			   struct igb_ring *ring)
4052 {
4053 	struct e1000_hw *hw = &adapter->hw;
4054 	u32 txdctl = 0;
4055 	u64 tdba = ring->dma;
4056 	int reg_idx = ring->reg_idx;
4057 
4058 	wr32(E1000_TDLEN(reg_idx),
4059 	     ring->count * sizeof(union e1000_adv_tx_desc));
4060 	wr32(E1000_TDBAL(reg_idx),
4061 	     tdba & 0x00000000ffffffffULL);
4062 	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4063 
4064 	ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4065 	wr32(E1000_TDH(reg_idx), 0);
4066 	writel(0, ring->tail);
4067 
4068 	txdctl |= IGB_TX_PTHRESH;
4069 	txdctl |= IGB_TX_HTHRESH << 8;
4070 	txdctl |= IGB_TX_WTHRESH << 16;
4071 
4072 	/* reinitialize tx_buffer_info */
4073 	memset(ring->tx_buffer_info, 0,
4074 	       sizeof(struct igb_tx_buffer) * ring->count);
4075 
4076 	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4077 	wr32(E1000_TXDCTL(reg_idx), txdctl);
4078 }
4079 
4080 /**
4081  *  igb_configure_tx - Configure transmit Unit after Reset
4082  *  @adapter: board private structure
4083  *
4084  *  Configure the Tx unit of the MAC after a reset.
4085  **/
4086 static void igb_configure_tx(struct igb_adapter *adapter)
4087 {
4088 	struct e1000_hw *hw = &adapter->hw;
4089 	int i;
4090 
4091 	/* disable the queues */
4092 	for (i = 0; i < adapter->num_tx_queues; i++)
4093 		wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4094 
4095 	wrfl();
4096 	usleep_range(10000, 20000);
4097 
4098 	for (i = 0; i < adapter->num_tx_queues; i++)
4099 		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4100 }
4101 
4102 /**
4103  *  igb_setup_rx_resources - allocate Rx resources (Descriptors)
4104  *  @rx_ring: Rx descriptor ring (for a specific queue) to setup
4105  *
4106  *  Returns 0 on success, negative on failure
4107  **/
4108 int igb_setup_rx_resources(struct igb_ring *rx_ring)
4109 {
4110 	struct device *dev = rx_ring->dev;
4111 	int size;
4112 
4113 	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4114 
4115 	rx_ring->rx_buffer_info = vmalloc(size);
4116 	if (!rx_ring->rx_buffer_info)
4117 		goto err;
4118 
4119 	/* Round up to nearest 4K */
4120 	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4121 	rx_ring->size = ALIGN(rx_ring->size, 4096);
4122 
4123 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4124 					   &rx_ring->dma, GFP_KERNEL);
4125 	if (!rx_ring->desc)
4126 		goto err;
4127 
4128 	rx_ring->next_to_alloc = 0;
4129 	rx_ring->next_to_clean = 0;
4130 	rx_ring->next_to_use = 0;
4131 
4132 	return 0;
4133 
4134 err:
4135 	vfree(rx_ring->rx_buffer_info);
4136 	rx_ring->rx_buffer_info = NULL;
4137 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4138 	return -ENOMEM;
4139 }
4140 
4141 /**
4142  *  igb_setup_all_rx_resources - wrapper to allocate Rx resources
4143  *				 (Descriptors) for all queues
4144  *  @adapter: board private structure
4145  *
4146  *  Return 0 on success, negative on failure
4147  **/
4148 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4149 {
4150 	struct pci_dev *pdev = adapter->pdev;
4151 	int i, err = 0;
4152 
4153 	for (i = 0; i < adapter->num_rx_queues; i++) {
4154 		err = igb_setup_rx_resources(adapter->rx_ring[i]);
4155 		if (err) {
4156 			dev_err(&pdev->dev,
4157 				"Allocation for Rx Queue %u failed\n", i);
4158 			for (i--; i >= 0; i--)
4159 				igb_free_rx_resources(adapter->rx_ring[i]);
4160 			break;
4161 		}
4162 	}
4163 
4164 	return err;
4165 }
4166 
4167 /**
4168  *  igb_setup_mrqc - configure the multiple receive queue control registers
4169  *  @adapter: Board private structure
4170  **/
4171 static void igb_setup_mrqc(struct igb_adapter *adapter)
4172 {
4173 	struct e1000_hw *hw = &adapter->hw;
4174 	u32 mrqc, rxcsum;
4175 	u32 j, num_rx_queues;
4176 	u32 rss_key[10];
4177 
4178 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
4179 	for (j = 0; j < 10; j++)
4180 		wr32(E1000_RSSRK(j), rss_key[j]);
4181 
4182 	num_rx_queues = adapter->rss_queues;
4183 
4184 	switch (hw->mac.type) {
4185 	case e1000_82576:
4186 		/* 82576 supports 2 RSS queues for SR-IOV */
4187 		if (adapter->vfs_allocated_count)
4188 			num_rx_queues = 2;
4189 		break;
4190 	default:
4191 		break;
4192 	}
4193 
4194 	if (adapter->rss_indir_tbl_init != num_rx_queues) {
4195 		for (j = 0; j < IGB_RETA_SIZE; j++)
4196 			adapter->rss_indir_tbl[j] =
4197 			(j * num_rx_queues) / IGB_RETA_SIZE;
4198 		adapter->rss_indir_tbl_init = num_rx_queues;
4199 	}
4200 	igb_write_rss_indir_tbl(adapter);
4201 
4202 	/* Disable raw packet checksumming so that RSS hash is placed in
4203 	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
4204 	 * offloads as they are enabled by default
4205 	 */
4206 	rxcsum = rd32(E1000_RXCSUM);
4207 	rxcsum |= E1000_RXCSUM_PCSD;
4208 
4209 	if (adapter->hw.mac.type >= e1000_82576)
4210 		/* Enable Receive Checksum Offload for SCTP */
4211 		rxcsum |= E1000_RXCSUM_CRCOFL;
4212 
4213 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
4214 	wr32(E1000_RXCSUM, rxcsum);
4215 
4216 	/* Generate RSS hash based on packet types, TCP/UDP
4217 	 * port numbers and/or IPv4/v6 src and dst addresses
4218 	 */
4219 	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4220 	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
4221 	       E1000_MRQC_RSS_FIELD_IPV6 |
4222 	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
4223 	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4224 
4225 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4226 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4227 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4228 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4229 
4230 	/* If VMDq is enabled then we set the appropriate mode for that, else
4231 	 * we default to RSS so that an RSS hash is calculated per packet even
4232 	 * if we are only using one queue
4233 	 */
4234 	if (adapter->vfs_allocated_count) {
4235 		if (hw->mac.type > e1000_82575) {
4236 			/* Set the default pool for the PF's first queue */
4237 			u32 vtctl = rd32(E1000_VT_CTL);
4238 
4239 			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4240 				   E1000_VT_CTL_DISABLE_DEF_POOL);
4241 			vtctl |= adapter->vfs_allocated_count <<
4242 				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4243 			wr32(E1000_VT_CTL, vtctl);
4244 		}
4245 		if (adapter->rss_queues > 1)
4246 			mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4247 		else
4248 			mrqc |= E1000_MRQC_ENABLE_VMDQ;
4249 	} else {
4250 		if (hw->mac.type != e1000_i211)
4251 			mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4252 	}
4253 	igb_vmm_control(adapter);
4254 
4255 	wr32(E1000_MRQC, mrqc);
4256 }
4257 
4258 /**
4259  *  igb_setup_rctl - configure the receive control registers
4260  *  @adapter: Board private structure
4261  **/
4262 void igb_setup_rctl(struct igb_adapter *adapter)
4263 {
4264 	struct e1000_hw *hw = &adapter->hw;
4265 	u32 rctl;
4266 
4267 	rctl = rd32(E1000_RCTL);
4268 
4269 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4270 	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4271 
4272 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4273 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4274 
4275 	/* enable stripping of CRC. It's unlikely this will break BMC
4276 	 * redirection as it did with e1000. Newer features require
4277 	 * that the HW strips the CRC.
4278 	 */
4279 	rctl |= E1000_RCTL_SECRC;
4280 
4281 	/* disable store bad packets and clear size bits. */
4282 	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4283 
4284 	/* enable LPE to allow for reception of jumbo frames */
4285 	rctl |= E1000_RCTL_LPE;
4286 
4287 	/* disable queue 0 to prevent tail write w/o re-config */
4288 	wr32(E1000_RXDCTL(0), 0);
4289 
4290 	/* Attention!!!  For SR-IOV PF driver operations you must enable
4291 	 * queue drop for all VF and PF queues to prevent head of line blocking
4292 	 * if an un-trusted VF does not provide descriptors to hardware.
4293 	 */
4294 	if (adapter->vfs_allocated_count) {
4295 		/* set all queue drop enable bits */
4296 		wr32(E1000_QDE, ALL_QUEUES);
4297 	}
4298 
4299 	/* This is useful for sniffing bad packets. */
4300 	if (adapter->netdev->features & NETIF_F_RXALL) {
4301 		/* UPE and MPE will be handled by normal PROMISC logic
4302 		 * in e1000e_set_rx_mode
4303 		 */
4304 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
4305 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
4306 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
4307 
4308 		rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
4309 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
4310 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
4311 		 * and that breaks VLANs.
4312 		 */
4313 	}
4314 
4315 	wr32(E1000_RCTL, rctl);
4316 }
4317 
4318 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4319 				   int vfn)
4320 {
4321 	struct e1000_hw *hw = &adapter->hw;
4322 	u32 vmolr;
4323 
4324 	if (size > MAX_JUMBO_FRAME_SIZE)
4325 		size = MAX_JUMBO_FRAME_SIZE;
4326 
4327 	vmolr = rd32(E1000_VMOLR(vfn));
4328 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
4329 	vmolr |= size | E1000_VMOLR_LPE;
4330 	wr32(E1000_VMOLR(vfn), vmolr);
4331 
4332 	return 0;
4333 }
4334 
4335 static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4336 					 int vfn, bool enable)
4337 {
4338 	struct e1000_hw *hw = &adapter->hw;
4339 	u32 val, reg;
4340 
4341 	if (hw->mac.type < e1000_82576)
4342 		return;
4343 
4344 	if (hw->mac.type == e1000_i350)
4345 		reg = E1000_DVMOLR(vfn);
4346 	else
4347 		reg = E1000_VMOLR(vfn);
4348 
4349 	val = rd32(reg);
4350 	if (enable)
4351 		val |= E1000_VMOLR_STRVLAN;
4352 	else
4353 		val &= ~(E1000_VMOLR_STRVLAN);
4354 	wr32(reg, val);
4355 }
4356 
4357 static inline void igb_set_vmolr(struct igb_adapter *adapter,
4358 				 int vfn, bool aupe)
4359 {
4360 	struct e1000_hw *hw = &adapter->hw;
4361 	u32 vmolr;
4362 
4363 	/* This register exists only on 82576 and newer so if we are older then
4364 	 * we should exit and do nothing
4365 	 */
4366 	if (hw->mac.type < e1000_82576)
4367 		return;
4368 
4369 	vmolr = rd32(E1000_VMOLR(vfn));
4370 	if (aupe)
4371 		vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
4372 	else
4373 		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
4374 
4375 	/* clear all bits that might not be set */
4376 	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4377 
4378 	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4379 		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
4380 	/* for VMDq only allow the VFs and pool 0 to accept broadcast and
4381 	 * multicast packets
4382 	 */
4383 	if (vfn <= adapter->vfs_allocated_count)
4384 		vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
4385 
4386 	wr32(E1000_VMOLR(vfn), vmolr);
4387 }
4388 
4389 /**
4390  *  igb_configure_rx_ring - Configure a receive ring after Reset
4391  *  @adapter: board private structure
4392  *  @ring: receive ring to be configured
4393  *
4394  *  Configure the Rx unit of the MAC after a reset.
4395  **/
4396 void igb_configure_rx_ring(struct igb_adapter *adapter,
4397 			   struct igb_ring *ring)
4398 {
4399 	struct e1000_hw *hw = &adapter->hw;
4400 	union e1000_adv_rx_desc *rx_desc;
4401 	u64 rdba = ring->dma;
4402 	int reg_idx = ring->reg_idx;
4403 	u32 srrctl = 0, rxdctl = 0;
4404 
4405 	/* disable the queue */
4406 	wr32(E1000_RXDCTL(reg_idx), 0);
4407 
4408 	/* Set DMA base address registers */
4409 	wr32(E1000_RDBAL(reg_idx),
4410 	     rdba & 0x00000000ffffffffULL);
4411 	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4412 	wr32(E1000_RDLEN(reg_idx),
4413 	     ring->count * sizeof(union e1000_adv_rx_desc));
4414 
4415 	/* initialize head and tail */
4416 	ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4417 	wr32(E1000_RDH(reg_idx), 0);
4418 	writel(0, ring->tail);
4419 
4420 	/* set descriptor configuration */
4421 	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4422 	if (ring_uses_large_buffer(ring))
4423 		srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4424 	else
4425 		srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4426 	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4427 	if (hw->mac.type >= e1000_82580)
4428 		srrctl |= E1000_SRRCTL_TIMESTAMP;
4429 	/* Only set Drop Enable if we are supporting multiple queues */
4430 	if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4431 		srrctl |= E1000_SRRCTL_DROP_EN;
4432 
4433 	wr32(E1000_SRRCTL(reg_idx), srrctl);
4434 
4435 	/* set filtering for VMDQ pools */
4436 	igb_set_vmolr(adapter, reg_idx & 0x7, true);
4437 
4438 	rxdctl |= IGB_RX_PTHRESH;
4439 	rxdctl |= IGB_RX_HTHRESH << 8;
4440 	rxdctl |= IGB_RX_WTHRESH << 16;
4441 
4442 	/* initialize rx_buffer_info */
4443 	memset(ring->rx_buffer_info, 0,
4444 	       sizeof(struct igb_rx_buffer) * ring->count);
4445 
4446 	/* initialize Rx descriptor 0 */
4447 	rx_desc = IGB_RX_DESC(ring, 0);
4448 	rx_desc->wb.upper.length = 0;
4449 
4450 	/* enable receive descriptor fetching */
4451 	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4452 	wr32(E1000_RXDCTL(reg_idx), rxdctl);
4453 }
4454 
4455 static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4456 				  struct igb_ring *rx_ring)
4457 {
4458 	/* set build_skb and buffer size flags */
4459 	clear_ring_build_skb_enabled(rx_ring);
4460 	clear_ring_uses_large_buffer(rx_ring);
4461 
4462 	if (adapter->flags & IGB_FLAG_RX_LEGACY)
4463 		return;
4464 
4465 	set_ring_build_skb_enabled(rx_ring);
4466 
4467 #if (PAGE_SIZE < 8192)
4468 	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4469 		return;
4470 
4471 	set_ring_uses_large_buffer(rx_ring);
4472 #endif
4473 }
4474 
4475 /**
4476  *  igb_configure_rx - Configure receive Unit after Reset
4477  *  @adapter: board private structure
4478  *
4479  *  Configure the Rx unit of the MAC after a reset.
4480  **/
4481 static void igb_configure_rx(struct igb_adapter *adapter)
4482 {
4483 	int i;
4484 
4485 	/* set the correct pool for the PF default MAC address in entry 0 */
4486 	igb_set_default_mac_filter(adapter);
4487 
4488 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
4489 	 * the Base and Length of the Rx Descriptor Ring
4490 	 */
4491 	for (i = 0; i < adapter->num_rx_queues; i++) {
4492 		struct igb_ring *rx_ring = adapter->rx_ring[i];
4493 
4494 		igb_set_rx_buffer_len(adapter, rx_ring);
4495 		igb_configure_rx_ring(adapter, rx_ring);
4496 	}
4497 }
4498 
4499 /**
4500  *  igb_free_tx_resources - Free Tx Resources per Queue
4501  *  @tx_ring: Tx descriptor ring for a specific queue
4502  *
4503  *  Free all transmit software resources
4504  **/
4505 void igb_free_tx_resources(struct igb_ring *tx_ring)
4506 {
4507 	igb_clean_tx_ring(tx_ring);
4508 
4509 	vfree(tx_ring->tx_buffer_info);
4510 	tx_ring->tx_buffer_info = NULL;
4511 
4512 	/* if not set, then don't free */
4513 	if (!tx_ring->desc)
4514 		return;
4515 
4516 	dma_free_coherent(tx_ring->dev, tx_ring->size,
4517 			  tx_ring->desc, tx_ring->dma);
4518 
4519 	tx_ring->desc = NULL;
4520 }
4521 
4522 /**
4523  *  igb_free_all_tx_resources - Free Tx Resources for All Queues
4524  *  @adapter: board private structure
4525  *
4526  *  Free all transmit software resources
4527  **/
4528 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4529 {
4530 	int i;
4531 
4532 	for (i = 0; i < adapter->num_tx_queues; i++)
4533 		if (adapter->tx_ring[i])
4534 			igb_free_tx_resources(adapter->tx_ring[i]);
4535 }
4536 
4537 /**
4538  *  igb_clean_tx_ring - Free Tx Buffers
4539  *  @tx_ring: ring to be cleaned
4540  **/
4541 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4542 {
4543 	u16 i = tx_ring->next_to_clean;
4544 	struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4545 
4546 	while (i != tx_ring->next_to_use) {
4547 		union e1000_adv_tx_desc *eop_desc, *tx_desc;
4548 
4549 		/* Free all the Tx ring sk_buffs */
4550 		dev_kfree_skb_any(tx_buffer->skb);
4551 
4552 		/* unmap skb header data */
4553 		dma_unmap_single(tx_ring->dev,
4554 				 dma_unmap_addr(tx_buffer, dma),
4555 				 dma_unmap_len(tx_buffer, len),
4556 				 DMA_TO_DEVICE);
4557 
4558 		/* check for eop_desc to determine the end of the packet */
4559 		eop_desc = tx_buffer->next_to_watch;
4560 		tx_desc = IGB_TX_DESC(tx_ring, i);
4561 
4562 		/* unmap remaining buffers */
4563 		while (tx_desc != eop_desc) {
4564 			tx_buffer++;
4565 			tx_desc++;
4566 			i++;
4567 			if (unlikely(i == tx_ring->count)) {
4568 				i = 0;
4569 				tx_buffer = tx_ring->tx_buffer_info;
4570 				tx_desc = IGB_TX_DESC(tx_ring, 0);
4571 			}
4572 
4573 			/* unmap any remaining paged data */
4574 			if (dma_unmap_len(tx_buffer, len))
4575 				dma_unmap_page(tx_ring->dev,
4576 					       dma_unmap_addr(tx_buffer, dma),
4577 					       dma_unmap_len(tx_buffer, len),
4578 					       DMA_TO_DEVICE);
4579 		}
4580 
4581 		/* move us one more past the eop_desc for start of next pkt */
4582 		tx_buffer++;
4583 		i++;
4584 		if (unlikely(i == tx_ring->count)) {
4585 			i = 0;
4586 			tx_buffer = tx_ring->tx_buffer_info;
4587 		}
4588 	}
4589 
4590 	/* reset BQL for queue */
4591 	netdev_tx_reset_queue(txring_txq(tx_ring));
4592 
4593 	/* reset next_to_use and next_to_clean */
4594 	tx_ring->next_to_use = 0;
4595 	tx_ring->next_to_clean = 0;
4596 }
4597 
4598 /**
4599  *  igb_clean_all_tx_rings - Free Tx Buffers for all queues
4600  *  @adapter: board private structure
4601  **/
4602 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4603 {
4604 	int i;
4605 
4606 	for (i = 0; i < adapter->num_tx_queues; i++)
4607 		if (adapter->tx_ring[i])
4608 			igb_clean_tx_ring(adapter->tx_ring[i]);
4609 }
4610 
4611 /**
4612  *  igb_free_rx_resources - Free Rx Resources
4613  *  @rx_ring: ring to clean the resources from
4614  *
4615  *  Free all receive software resources
4616  **/
4617 void igb_free_rx_resources(struct igb_ring *rx_ring)
4618 {
4619 	igb_clean_rx_ring(rx_ring);
4620 
4621 	vfree(rx_ring->rx_buffer_info);
4622 	rx_ring->rx_buffer_info = NULL;
4623 
4624 	/* if not set, then don't free */
4625 	if (!rx_ring->desc)
4626 		return;
4627 
4628 	dma_free_coherent(rx_ring->dev, rx_ring->size,
4629 			  rx_ring->desc, rx_ring->dma);
4630 
4631 	rx_ring->desc = NULL;
4632 }
4633 
4634 /**
4635  *  igb_free_all_rx_resources - Free Rx Resources for All Queues
4636  *  @adapter: board private structure
4637  *
4638  *  Free all receive software resources
4639  **/
4640 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4641 {
4642 	int i;
4643 
4644 	for (i = 0; i < adapter->num_rx_queues; i++)
4645 		if (adapter->rx_ring[i])
4646 			igb_free_rx_resources(adapter->rx_ring[i]);
4647 }
4648 
4649 /**
4650  *  igb_clean_rx_ring - Free Rx Buffers per Queue
4651  *  @rx_ring: ring to free buffers from
4652  **/
4653 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4654 {
4655 	u16 i = rx_ring->next_to_clean;
4656 
4657 	if (rx_ring->skb)
4658 		dev_kfree_skb(rx_ring->skb);
4659 	rx_ring->skb = NULL;
4660 
4661 	/* Free all the Rx ring sk_buffs */
4662 	while (i != rx_ring->next_to_alloc) {
4663 		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4664 
4665 		/* Invalidate cache lines that may have been written to by
4666 		 * device so that we avoid corrupting memory.
4667 		 */
4668 		dma_sync_single_range_for_cpu(rx_ring->dev,
4669 					      buffer_info->dma,
4670 					      buffer_info->page_offset,
4671 					      igb_rx_bufsz(rx_ring),
4672 					      DMA_FROM_DEVICE);
4673 
4674 		/* free resources associated with mapping */
4675 		dma_unmap_page_attrs(rx_ring->dev,
4676 				     buffer_info->dma,
4677 				     igb_rx_pg_size(rx_ring),
4678 				     DMA_FROM_DEVICE,
4679 				     IGB_RX_DMA_ATTR);
4680 		__page_frag_cache_drain(buffer_info->page,
4681 					buffer_info->pagecnt_bias);
4682 
4683 		i++;
4684 		if (i == rx_ring->count)
4685 			i = 0;
4686 	}
4687 
4688 	rx_ring->next_to_alloc = 0;
4689 	rx_ring->next_to_clean = 0;
4690 	rx_ring->next_to_use = 0;
4691 }
4692 
4693 /**
4694  *  igb_clean_all_rx_rings - Free Rx Buffers for all queues
4695  *  @adapter: board private structure
4696  **/
4697 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4698 {
4699 	int i;
4700 
4701 	for (i = 0; i < adapter->num_rx_queues; i++)
4702 		if (adapter->rx_ring[i])
4703 			igb_clean_rx_ring(adapter->rx_ring[i]);
4704 }
4705 
4706 /**
4707  *  igb_set_mac - Change the Ethernet Address of the NIC
4708  *  @netdev: network interface device structure
4709  *  @p: pointer to an address structure
4710  *
4711  *  Returns 0 on success, negative on failure
4712  **/
4713 static int igb_set_mac(struct net_device *netdev, void *p)
4714 {
4715 	struct igb_adapter *adapter = netdev_priv(netdev);
4716 	struct e1000_hw *hw = &adapter->hw;
4717 	struct sockaddr *addr = p;
4718 
4719 	if (!is_valid_ether_addr(addr->sa_data))
4720 		return -EADDRNOTAVAIL;
4721 
4722 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4723 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4724 
4725 	/* set the correct pool for the new PF MAC address in entry 0 */
4726 	igb_set_default_mac_filter(adapter);
4727 
4728 	return 0;
4729 }
4730 
4731 /**
4732  *  igb_write_mc_addr_list - write multicast addresses to MTA
4733  *  @netdev: network interface device structure
4734  *
4735  *  Writes multicast address list to the MTA hash table.
4736  *  Returns: -ENOMEM on failure
4737  *           0 on no addresses written
4738  *           X on writing X addresses to MTA
4739  **/
4740 static int igb_write_mc_addr_list(struct net_device *netdev)
4741 {
4742 	struct igb_adapter *adapter = netdev_priv(netdev);
4743 	struct e1000_hw *hw = &adapter->hw;
4744 	struct netdev_hw_addr *ha;
4745 	u8  *mta_list;
4746 	int i;
4747 
4748 	if (netdev_mc_empty(netdev)) {
4749 		/* nothing to program, so clear mc list */
4750 		igb_update_mc_addr_list(hw, NULL, 0);
4751 		igb_restore_vf_multicasts(adapter);
4752 		return 0;
4753 	}
4754 
4755 	mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
4756 	if (!mta_list)
4757 		return -ENOMEM;
4758 
4759 	/* The shared function expects a packed array of only addresses. */
4760 	i = 0;
4761 	netdev_for_each_mc_addr(ha, netdev)
4762 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4763 
4764 	igb_update_mc_addr_list(hw, mta_list, i);
4765 	kfree(mta_list);
4766 
4767 	return netdev_mc_count(netdev);
4768 }
4769 
4770 static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4771 {
4772 	struct e1000_hw *hw = &adapter->hw;
4773 	u32 i, pf_id;
4774 
4775 	switch (hw->mac.type) {
4776 	case e1000_i210:
4777 	case e1000_i211:
4778 	case e1000_i350:
4779 		/* VLAN filtering needed for VLAN prio filter */
4780 		if (adapter->netdev->features & NETIF_F_NTUPLE)
4781 			break;
4782 		/* fall through */
4783 	case e1000_82576:
4784 	case e1000_82580:
4785 	case e1000_i354:
4786 		/* VLAN filtering needed for pool filtering */
4787 		if (adapter->vfs_allocated_count)
4788 			break;
4789 		/* fall through */
4790 	default:
4791 		return 1;
4792 	}
4793 
4794 	/* We are already in VLAN promisc, nothing to do */
4795 	if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4796 		return 0;
4797 
4798 	if (!adapter->vfs_allocated_count)
4799 		goto set_vfta;
4800 
4801 	/* Add PF to all active pools */
4802 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4803 
4804 	for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4805 		u32 vlvf = rd32(E1000_VLVF(i));
4806 
4807 		vlvf |= BIT(pf_id);
4808 		wr32(E1000_VLVF(i), vlvf);
4809 	}
4810 
4811 set_vfta:
4812 	/* Set all bits in the VLAN filter table array */
4813 	for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4814 		hw->mac.ops.write_vfta(hw, i, ~0U);
4815 
4816 	/* Set flag so we don't redo unnecessary work */
4817 	adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4818 
4819 	return 0;
4820 }
4821 
4822 #define VFTA_BLOCK_SIZE 8
4823 static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4824 {
4825 	struct e1000_hw *hw = &adapter->hw;
4826 	u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4827 	u32 vid_start = vfta_offset * 32;
4828 	u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4829 	u32 i, vid, word, bits, pf_id;
4830 
4831 	/* guarantee that we don't scrub out management VLAN */
4832 	vid = adapter->mng_vlan_id;
4833 	if (vid >= vid_start && vid < vid_end)
4834 		vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4835 
4836 	if (!adapter->vfs_allocated_count)
4837 		goto set_vfta;
4838 
4839 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4840 
4841 	for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4842 		u32 vlvf = rd32(E1000_VLVF(i));
4843 
4844 		/* pull VLAN ID from VLVF */
4845 		vid = vlvf & VLAN_VID_MASK;
4846 
4847 		/* only concern ourselves with a certain range */
4848 		if (vid < vid_start || vid >= vid_end)
4849 			continue;
4850 
4851 		if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4852 			/* record VLAN ID in VFTA */
4853 			vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4854 
4855 			/* if PF is part of this then continue */
4856 			if (test_bit(vid, adapter->active_vlans))
4857 				continue;
4858 		}
4859 
4860 		/* remove PF from the pool */
4861 		bits = ~BIT(pf_id);
4862 		bits &= rd32(E1000_VLVF(i));
4863 		wr32(E1000_VLVF(i), bits);
4864 	}
4865 
4866 set_vfta:
4867 	/* extract values from active_vlans and write back to VFTA */
4868 	for (i = VFTA_BLOCK_SIZE; i--;) {
4869 		vid = (vfta_offset + i) * 32;
4870 		word = vid / BITS_PER_LONG;
4871 		bits = vid % BITS_PER_LONG;
4872 
4873 		vfta[i] |= adapter->active_vlans[word] >> bits;
4874 
4875 		hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4876 	}
4877 }
4878 
4879 static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4880 {
4881 	u32 i;
4882 
4883 	/* We are not in VLAN promisc, nothing to do */
4884 	if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4885 		return;
4886 
4887 	/* Set flag so we don't redo unnecessary work */
4888 	adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4889 
4890 	for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4891 		igb_scrub_vfta(adapter, i);
4892 }
4893 
4894 /**
4895  *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4896  *  @netdev: network interface device structure
4897  *
4898  *  The set_rx_mode entry point is called whenever the unicast or multicast
4899  *  address lists or the network interface flags are updated.  This routine is
4900  *  responsible for configuring the hardware for proper unicast, multicast,
4901  *  promiscuous mode, and all-multi behavior.
4902  **/
4903 static void igb_set_rx_mode(struct net_device *netdev)
4904 {
4905 	struct igb_adapter *adapter = netdev_priv(netdev);
4906 	struct e1000_hw *hw = &adapter->hw;
4907 	unsigned int vfn = adapter->vfs_allocated_count;
4908 	u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4909 	int count;
4910 
4911 	/* Check for Promiscuous and All Multicast modes */
4912 	if (netdev->flags & IFF_PROMISC) {
4913 		rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
4914 		vmolr |= E1000_VMOLR_MPME;
4915 
4916 		/* enable use of UTA filter to force packets to default pool */
4917 		if (hw->mac.type == e1000_82576)
4918 			vmolr |= E1000_VMOLR_ROPE;
4919 	} else {
4920 		if (netdev->flags & IFF_ALLMULTI) {
4921 			rctl |= E1000_RCTL_MPE;
4922 			vmolr |= E1000_VMOLR_MPME;
4923 		} else {
4924 			/* Write addresses to the MTA, if the attempt fails
4925 			 * then we should just turn on promiscuous mode so
4926 			 * that we can at least receive multicast traffic
4927 			 */
4928 			count = igb_write_mc_addr_list(netdev);
4929 			if (count < 0) {
4930 				rctl |= E1000_RCTL_MPE;
4931 				vmolr |= E1000_VMOLR_MPME;
4932 			} else if (count) {
4933 				vmolr |= E1000_VMOLR_ROMPE;
4934 			}
4935 		}
4936 	}
4937 
4938 	/* Write addresses to available RAR registers, if there is not
4939 	 * sufficient space to store all the addresses then enable
4940 	 * unicast promiscuous mode
4941 	 */
4942 	if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
4943 		rctl |= E1000_RCTL_UPE;
4944 		vmolr |= E1000_VMOLR_ROPE;
4945 	}
4946 
4947 	/* enable VLAN filtering by default */
4948 	rctl |= E1000_RCTL_VFE;
4949 
4950 	/* disable VLAN filtering for modes that require it */
4951 	if ((netdev->flags & IFF_PROMISC) ||
4952 	    (netdev->features & NETIF_F_RXALL)) {
4953 		/* if we fail to set all rules then just clear VFE */
4954 		if (igb_vlan_promisc_enable(adapter))
4955 			rctl &= ~E1000_RCTL_VFE;
4956 	} else {
4957 		igb_vlan_promisc_disable(adapter);
4958 	}
4959 
4960 	/* update state of unicast, multicast, and VLAN filtering modes */
4961 	rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
4962 				     E1000_RCTL_VFE);
4963 	wr32(E1000_RCTL, rctl);
4964 
4965 #if (PAGE_SIZE < 8192)
4966 	if (!adapter->vfs_allocated_count) {
4967 		if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4968 			rlpml = IGB_MAX_FRAME_BUILD_SKB;
4969 	}
4970 #endif
4971 	wr32(E1000_RLPML, rlpml);
4972 
4973 	/* In order to support SR-IOV and eventually VMDq it is necessary to set
4974 	 * the VMOLR to enable the appropriate modes.  Without this workaround
4975 	 * we will have issues with VLAN tag stripping not being done for frames
4976 	 * that are only arriving because we are the default pool
4977 	 */
4978 	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
4979 		return;
4980 
4981 	/* set UTA to appropriate mode */
4982 	igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
4983 
4984 	vmolr |= rd32(E1000_VMOLR(vfn)) &
4985 		 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
4986 
4987 	/* enable Rx jumbo frames, restrict as needed to support build_skb */
4988 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
4989 #if (PAGE_SIZE < 8192)
4990 	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4991 		vmolr |= IGB_MAX_FRAME_BUILD_SKB;
4992 	else
4993 #endif
4994 		vmolr |= MAX_JUMBO_FRAME_SIZE;
4995 	vmolr |= E1000_VMOLR_LPE;
4996 
4997 	wr32(E1000_VMOLR(vfn), vmolr);
4998 
4999 	igb_restore_vf_multicasts(adapter);
5000 }
5001 
5002 static void igb_check_wvbr(struct igb_adapter *adapter)
5003 {
5004 	struct e1000_hw *hw = &adapter->hw;
5005 	u32 wvbr = 0;
5006 
5007 	switch (hw->mac.type) {
5008 	case e1000_82576:
5009 	case e1000_i350:
5010 		wvbr = rd32(E1000_WVBR);
5011 		if (!wvbr)
5012 			return;
5013 		break;
5014 	default:
5015 		break;
5016 	}
5017 
5018 	adapter->wvbr |= wvbr;
5019 }
5020 
5021 #define IGB_STAGGERED_QUEUE_OFFSET 8
5022 
5023 static void igb_spoof_check(struct igb_adapter *adapter)
5024 {
5025 	int j;
5026 
5027 	if (!adapter->wvbr)
5028 		return;
5029 
5030 	for (j = 0; j < adapter->vfs_allocated_count; j++) {
5031 		if (adapter->wvbr & BIT(j) ||
5032 		    adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5033 			dev_warn(&adapter->pdev->dev,
5034 				"Spoof event(s) detected on VF %d\n", j);
5035 			adapter->wvbr &=
5036 				~(BIT(j) |
5037 				  BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5038 		}
5039 	}
5040 }
5041 
5042 /* Need to wait a few seconds after link up to get diagnostic information from
5043  * the phy
5044  */
5045 static void igb_update_phy_info(struct timer_list *t)
5046 {
5047 	struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5048 	igb_get_phy_info(&adapter->hw);
5049 }
5050 
5051 /**
5052  *  igb_has_link - check shared code for link and determine up/down
5053  *  @adapter: pointer to driver private info
5054  **/
5055 bool igb_has_link(struct igb_adapter *adapter)
5056 {
5057 	struct e1000_hw *hw = &adapter->hw;
5058 	bool link_active = false;
5059 
5060 	/* get_link_status is set on LSC (link status) interrupt or
5061 	 * rx sequence error interrupt.  get_link_status will stay
5062 	 * false until the e1000_check_for_link establishes link
5063 	 * for copper adapters ONLY
5064 	 */
5065 	switch (hw->phy.media_type) {
5066 	case e1000_media_type_copper:
5067 		if (!hw->mac.get_link_status)
5068 			return true;
5069 	case e1000_media_type_internal_serdes:
5070 		hw->mac.ops.check_for_link(hw);
5071 		link_active = !hw->mac.get_link_status;
5072 		break;
5073 	default:
5074 	case e1000_media_type_unknown:
5075 		break;
5076 	}
5077 
5078 	if (((hw->mac.type == e1000_i210) ||
5079 	     (hw->mac.type == e1000_i211)) &&
5080 	     (hw->phy.id == I210_I_PHY_ID)) {
5081 		if (!netif_carrier_ok(adapter->netdev)) {
5082 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5083 		} else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5084 			adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5085 			adapter->link_check_timeout = jiffies;
5086 		}
5087 	}
5088 
5089 	return link_active;
5090 }
5091 
5092 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5093 {
5094 	bool ret = false;
5095 	u32 ctrl_ext, thstat;
5096 
5097 	/* check for thermal sensor event on i350 copper only */
5098 	if (hw->mac.type == e1000_i350) {
5099 		thstat = rd32(E1000_THSTAT);
5100 		ctrl_ext = rd32(E1000_CTRL_EXT);
5101 
5102 		if ((hw->phy.media_type == e1000_media_type_copper) &&
5103 		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5104 			ret = !!(thstat & event);
5105 	}
5106 
5107 	return ret;
5108 }
5109 
5110 /**
5111  *  igb_check_lvmmc - check for malformed packets received
5112  *  and indicated in LVMMC register
5113  *  @adapter: pointer to adapter
5114  **/
5115 static void igb_check_lvmmc(struct igb_adapter *adapter)
5116 {
5117 	struct e1000_hw *hw = &adapter->hw;
5118 	u32 lvmmc;
5119 
5120 	lvmmc = rd32(E1000_LVMMC);
5121 	if (lvmmc) {
5122 		if (unlikely(net_ratelimit())) {
5123 			netdev_warn(adapter->netdev,
5124 				    "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5125 				    lvmmc);
5126 		}
5127 	}
5128 }
5129 
5130 /**
5131  *  igb_watchdog - Timer Call-back
5132  *  @data: pointer to adapter cast into an unsigned long
5133  **/
5134 static void igb_watchdog(struct timer_list *t)
5135 {
5136 	struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5137 	/* Do the rest outside of interrupt context */
5138 	schedule_work(&adapter->watchdog_task);
5139 }
5140 
5141 static void igb_watchdog_task(struct work_struct *work)
5142 {
5143 	struct igb_adapter *adapter = container_of(work,
5144 						   struct igb_adapter,
5145 						   watchdog_task);
5146 	struct e1000_hw *hw = &adapter->hw;
5147 	struct e1000_phy_info *phy = &hw->phy;
5148 	struct net_device *netdev = adapter->netdev;
5149 	u32 link;
5150 	int i;
5151 	u32 connsw;
5152 	u16 phy_data, retry_count = 20;
5153 
5154 	link = igb_has_link(adapter);
5155 
5156 	if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5157 		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5158 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5159 		else
5160 			link = false;
5161 	}
5162 
5163 	/* Force link down if we have fiber to swap to */
5164 	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5165 		if (hw->phy.media_type == e1000_media_type_copper) {
5166 			connsw = rd32(E1000_CONNSW);
5167 			if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5168 				link = 0;
5169 		}
5170 	}
5171 	if (link) {
5172 		/* Perform a reset if the media type changed. */
5173 		if (hw->dev_spec._82575.media_changed) {
5174 			hw->dev_spec._82575.media_changed = false;
5175 			adapter->flags |= IGB_FLAG_MEDIA_RESET;
5176 			igb_reset(adapter);
5177 		}
5178 		/* Cancel scheduled suspend requests. */
5179 		pm_runtime_resume(netdev->dev.parent);
5180 
5181 		if (!netif_carrier_ok(netdev)) {
5182 			u32 ctrl;
5183 
5184 			hw->mac.ops.get_speed_and_duplex(hw,
5185 							 &adapter->link_speed,
5186 							 &adapter->link_duplex);
5187 
5188 			ctrl = rd32(E1000_CTRL);
5189 			/* Links status message must follow this format */
5190 			netdev_info(netdev,
5191 			       "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5192 			       netdev->name,
5193 			       adapter->link_speed,
5194 			       adapter->link_duplex == FULL_DUPLEX ?
5195 			       "Full" : "Half",
5196 			       (ctrl & E1000_CTRL_TFCE) &&
5197 			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5198 			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
5199 			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
5200 
5201 			/* disable EEE if enabled */
5202 			if ((adapter->flags & IGB_FLAG_EEE) &&
5203 				(adapter->link_duplex == HALF_DUPLEX)) {
5204 				dev_info(&adapter->pdev->dev,
5205 				"EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5206 				adapter->hw.dev_spec._82575.eee_disable = true;
5207 				adapter->flags &= ~IGB_FLAG_EEE;
5208 			}
5209 
5210 			/* check if SmartSpeed worked */
5211 			igb_check_downshift(hw);
5212 			if (phy->speed_downgraded)
5213 				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5214 
5215 			/* check for thermal sensor event */
5216 			if (igb_thermal_sensor_event(hw,
5217 			    E1000_THSTAT_LINK_THROTTLE))
5218 				netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5219 
5220 			/* adjust timeout factor according to speed/duplex */
5221 			adapter->tx_timeout_factor = 1;
5222 			switch (adapter->link_speed) {
5223 			case SPEED_10:
5224 				adapter->tx_timeout_factor = 14;
5225 				break;
5226 			case SPEED_100:
5227 				/* maybe add some timeout factor ? */
5228 				break;
5229 			}
5230 
5231 			if (adapter->link_speed != SPEED_1000)
5232 				goto no_wait;
5233 
5234 			/* wait for Remote receiver status OK */
5235 retry_read_status:
5236 			if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5237 					      &phy_data)) {
5238 				if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5239 				    retry_count) {
5240 					msleep(100);
5241 					retry_count--;
5242 					goto retry_read_status;
5243 				} else if (!retry_count) {
5244 					dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5245 				}
5246 			} else {
5247 				dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5248 			}
5249 no_wait:
5250 			netif_carrier_on(netdev);
5251 
5252 			igb_ping_all_vfs(adapter);
5253 			igb_check_vf_rate_limit(adapter);
5254 
5255 			/* link state has changed, schedule phy info update */
5256 			if (!test_bit(__IGB_DOWN, &adapter->state))
5257 				mod_timer(&adapter->phy_info_timer,
5258 					  round_jiffies(jiffies + 2 * HZ));
5259 		}
5260 	} else {
5261 		if (netif_carrier_ok(netdev)) {
5262 			adapter->link_speed = 0;
5263 			adapter->link_duplex = 0;
5264 
5265 			/* check for thermal sensor event */
5266 			if (igb_thermal_sensor_event(hw,
5267 			    E1000_THSTAT_PWR_DOWN)) {
5268 				netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5269 			}
5270 
5271 			/* Links status message must follow this format */
5272 			netdev_info(netdev, "igb: %s NIC Link is Down\n",
5273 			       netdev->name);
5274 			netif_carrier_off(netdev);
5275 
5276 			igb_ping_all_vfs(adapter);
5277 
5278 			/* link state has changed, schedule phy info update */
5279 			if (!test_bit(__IGB_DOWN, &adapter->state))
5280 				mod_timer(&adapter->phy_info_timer,
5281 					  round_jiffies(jiffies + 2 * HZ));
5282 
5283 			/* link is down, time to check for alternate media */
5284 			if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5285 				igb_check_swap_media(adapter);
5286 				if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5287 					schedule_work(&adapter->reset_task);
5288 					/* return immediately */
5289 					return;
5290 				}
5291 			}
5292 			pm_schedule_suspend(netdev->dev.parent,
5293 					    MSEC_PER_SEC * 5);
5294 
5295 		/* also check for alternate media here */
5296 		} else if (!netif_carrier_ok(netdev) &&
5297 			   (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5298 			igb_check_swap_media(adapter);
5299 			if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5300 				schedule_work(&adapter->reset_task);
5301 				/* return immediately */
5302 				return;
5303 			}
5304 		}
5305 	}
5306 
5307 	spin_lock(&adapter->stats64_lock);
5308 	igb_update_stats(adapter);
5309 	spin_unlock(&adapter->stats64_lock);
5310 
5311 	for (i = 0; i < adapter->num_tx_queues; i++) {
5312 		struct igb_ring *tx_ring = adapter->tx_ring[i];
5313 		if (!netif_carrier_ok(netdev)) {
5314 			/* We've lost link, so the controller stops DMA,
5315 			 * but we've got queued Tx work that's never going
5316 			 * to get done, so reset controller to flush Tx.
5317 			 * (Do the reset outside of interrupt context).
5318 			 */
5319 			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5320 				adapter->tx_timeout_count++;
5321 				schedule_work(&adapter->reset_task);
5322 				/* return immediately since reset is imminent */
5323 				return;
5324 			}
5325 		}
5326 
5327 		/* Force detection of hung controller every watchdog period */
5328 		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5329 	}
5330 
5331 	/* Cause software interrupt to ensure Rx ring is cleaned */
5332 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5333 		u32 eics = 0;
5334 
5335 		for (i = 0; i < adapter->num_q_vectors; i++)
5336 			eics |= adapter->q_vector[i]->eims_value;
5337 		wr32(E1000_EICS, eics);
5338 	} else {
5339 		wr32(E1000_ICS, E1000_ICS_RXDMT0);
5340 	}
5341 
5342 	igb_spoof_check(adapter);
5343 	igb_ptp_rx_hang(adapter);
5344 	igb_ptp_tx_hang(adapter);
5345 
5346 	/* Check LVMMC register on i350/i354 only */
5347 	if ((adapter->hw.mac.type == e1000_i350) ||
5348 	    (adapter->hw.mac.type == e1000_i354))
5349 		igb_check_lvmmc(adapter);
5350 
5351 	/* Reset the timer */
5352 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
5353 		if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5354 			mod_timer(&adapter->watchdog_timer,
5355 				  round_jiffies(jiffies +  HZ));
5356 		else
5357 			mod_timer(&adapter->watchdog_timer,
5358 				  round_jiffies(jiffies + 2 * HZ));
5359 	}
5360 }
5361 
5362 enum latency_range {
5363 	lowest_latency = 0,
5364 	low_latency = 1,
5365 	bulk_latency = 2,
5366 	latency_invalid = 255
5367 };
5368 
5369 /**
5370  *  igb_update_ring_itr - update the dynamic ITR value based on packet size
5371  *  @q_vector: pointer to q_vector
5372  *
5373  *  Stores a new ITR value based on strictly on packet size.  This
5374  *  algorithm is less sophisticated than that used in igb_update_itr,
5375  *  due to the difficulty of synchronizing statistics across multiple
5376  *  receive rings.  The divisors and thresholds used by this function
5377  *  were determined based on theoretical maximum wire speed and testing
5378  *  data, in order to minimize response time while increasing bulk
5379  *  throughput.
5380  *  This functionality is controlled by ethtool's coalescing settings.
5381  *  NOTE:  This function is called only when operating in a multiqueue
5382  *         receive environment.
5383  **/
5384 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5385 {
5386 	int new_val = q_vector->itr_val;
5387 	int avg_wire_size = 0;
5388 	struct igb_adapter *adapter = q_vector->adapter;
5389 	unsigned int packets;
5390 
5391 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
5392 	 * ints/sec - ITR timer value of 120 ticks.
5393 	 */
5394 	if (adapter->link_speed != SPEED_1000) {
5395 		new_val = IGB_4K_ITR;
5396 		goto set_itr_val;
5397 	}
5398 
5399 	packets = q_vector->rx.total_packets;
5400 	if (packets)
5401 		avg_wire_size = q_vector->rx.total_bytes / packets;
5402 
5403 	packets = q_vector->tx.total_packets;
5404 	if (packets)
5405 		avg_wire_size = max_t(u32, avg_wire_size,
5406 				      q_vector->tx.total_bytes / packets);
5407 
5408 	/* if avg_wire_size isn't set no work was done */
5409 	if (!avg_wire_size)
5410 		goto clear_counts;
5411 
5412 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5413 	avg_wire_size += 24;
5414 
5415 	/* Don't starve jumbo frames */
5416 	avg_wire_size = min(avg_wire_size, 3000);
5417 
5418 	/* Give a little boost to mid-size frames */
5419 	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5420 		new_val = avg_wire_size / 3;
5421 	else
5422 		new_val = avg_wire_size / 2;
5423 
5424 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
5425 	if (new_val < IGB_20K_ITR &&
5426 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5427 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5428 		new_val = IGB_20K_ITR;
5429 
5430 set_itr_val:
5431 	if (new_val != q_vector->itr_val) {
5432 		q_vector->itr_val = new_val;
5433 		q_vector->set_itr = 1;
5434 	}
5435 clear_counts:
5436 	q_vector->rx.total_bytes = 0;
5437 	q_vector->rx.total_packets = 0;
5438 	q_vector->tx.total_bytes = 0;
5439 	q_vector->tx.total_packets = 0;
5440 }
5441 
5442 /**
5443  *  igb_update_itr - update the dynamic ITR value based on statistics
5444  *  @q_vector: pointer to q_vector
5445  *  @ring_container: ring info to update the itr for
5446  *
5447  *  Stores a new ITR value based on packets and byte
5448  *  counts during the last interrupt.  The advantage of per interrupt
5449  *  computation is faster updates and more accurate ITR for the current
5450  *  traffic pattern.  Constants in this function were computed
5451  *  based on theoretical maximum wire speed and thresholds were set based
5452  *  on testing data as well as attempting to minimize response time
5453  *  while increasing bulk throughput.
5454  *  This functionality is controlled by ethtool's coalescing settings.
5455  *  NOTE:  These calculations are only valid when operating in a single-
5456  *         queue environment.
5457  **/
5458 static void igb_update_itr(struct igb_q_vector *q_vector,
5459 			   struct igb_ring_container *ring_container)
5460 {
5461 	unsigned int packets = ring_container->total_packets;
5462 	unsigned int bytes = ring_container->total_bytes;
5463 	u8 itrval = ring_container->itr;
5464 
5465 	/* no packets, exit with status unchanged */
5466 	if (packets == 0)
5467 		return;
5468 
5469 	switch (itrval) {
5470 	case lowest_latency:
5471 		/* handle TSO and jumbo frames */
5472 		if (bytes/packets > 8000)
5473 			itrval = bulk_latency;
5474 		else if ((packets < 5) && (bytes > 512))
5475 			itrval = low_latency;
5476 		break;
5477 	case low_latency:  /* 50 usec aka 20000 ints/s */
5478 		if (bytes > 10000) {
5479 			/* this if handles the TSO accounting */
5480 			if (bytes/packets > 8000)
5481 				itrval = bulk_latency;
5482 			else if ((packets < 10) || ((bytes/packets) > 1200))
5483 				itrval = bulk_latency;
5484 			else if ((packets > 35))
5485 				itrval = lowest_latency;
5486 		} else if (bytes/packets > 2000) {
5487 			itrval = bulk_latency;
5488 		} else if (packets <= 2 && bytes < 512) {
5489 			itrval = lowest_latency;
5490 		}
5491 		break;
5492 	case bulk_latency: /* 250 usec aka 4000 ints/s */
5493 		if (bytes > 25000) {
5494 			if (packets > 35)
5495 				itrval = low_latency;
5496 		} else if (bytes < 1500) {
5497 			itrval = low_latency;
5498 		}
5499 		break;
5500 	}
5501 
5502 	/* clear work counters since we have the values we need */
5503 	ring_container->total_bytes = 0;
5504 	ring_container->total_packets = 0;
5505 
5506 	/* write updated itr to ring container */
5507 	ring_container->itr = itrval;
5508 }
5509 
5510 static void igb_set_itr(struct igb_q_vector *q_vector)
5511 {
5512 	struct igb_adapter *adapter = q_vector->adapter;
5513 	u32 new_itr = q_vector->itr_val;
5514 	u8 current_itr = 0;
5515 
5516 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
5517 	if (adapter->link_speed != SPEED_1000) {
5518 		current_itr = 0;
5519 		new_itr = IGB_4K_ITR;
5520 		goto set_itr_now;
5521 	}
5522 
5523 	igb_update_itr(q_vector, &q_vector->tx);
5524 	igb_update_itr(q_vector, &q_vector->rx);
5525 
5526 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5527 
5528 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
5529 	if (current_itr == lowest_latency &&
5530 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5531 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5532 		current_itr = low_latency;
5533 
5534 	switch (current_itr) {
5535 	/* counts and packets in update_itr are dependent on these numbers */
5536 	case lowest_latency:
5537 		new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
5538 		break;
5539 	case low_latency:
5540 		new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
5541 		break;
5542 	case bulk_latency:
5543 		new_itr = IGB_4K_ITR;  /* 4,000 ints/sec */
5544 		break;
5545 	default:
5546 		break;
5547 	}
5548 
5549 set_itr_now:
5550 	if (new_itr != q_vector->itr_val) {
5551 		/* this attempts to bias the interrupt rate towards Bulk
5552 		 * by adding intermediate steps when interrupt rate is
5553 		 * increasing
5554 		 */
5555 		new_itr = new_itr > q_vector->itr_val ?
5556 			  max((new_itr * q_vector->itr_val) /
5557 			  (new_itr + (q_vector->itr_val >> 2)),
5558 			  new_itr) : new_itr;
5559 		/* Don't write the value here; it resets the adapter's
5560 		 * internal timer, and causes us to delay far longer than
5561 		 * we should between interrupts.  Instead, we write the ITR
5562 		 * value at the beginning of the next interrupt so the timing
5563 		 * ends up being correct.
5564 		 */
5565 		q_vector->itr_val = new_itr;
5566 		q_vector->set_itr = 1;
5567 	}
5568 }
5569 
5570 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
5571 			    u32 type_tucmd, u32 mss_l4len_idx)
5572 {
5573 	struct e1000_adv_tx_context_desc *context_desc;
5574 	u16 i = tx_ring->next_to_use;
5575 
5576 	context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5577 
5578 	i++;
5579 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5580 
5581 	/* set bits to identify this as an advanced context descriptor */
5582 	type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5583 
5584 	/* For 82575, context index must be unique per ring. */
5585 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5586 		mss_l4len_idx |= tx_ring->reg_idx << 4;
5587 
5588 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
5589 	context_desc->seqnum_seed	= 0;
5590 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
5591 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
5592 }
5593 
5594 static int igb_tso(struct igb_ring *tx_ring,
5595 		   struct igb_tx_buffer *first,
5596 		   u8 *hdr_len)
5597 {
5598 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5599 	struct sk_buff *skb = first->skb;
5600 	union {
5601 		struct iphdr *v4;
5602 		struct ipv6hdr *v6;
5603 		unsigned char *hdr;
5604 	} ip;
5605 	union {
5606 		struct tcphdr *tcp;
5607 		unsigned char *hdr;
5608 	} l4;
5609 	u32 paylen, l4_offset;
5610 	int err;
5611 
5612 	if (skb->ip_summed != CHECKSUM_PARTIAL)
5613 		return 0;
5614 
5615 	if (!skb_is_gso(skb))
5616 		return 0;
5617 
5618 	err = skb_cow_head(skb, 0);
5619 	if (err < 0)
5620 		return err;
5621 
5622 	ip.hdr = skb_network_header(skb);
5623 	l4.hdr = skb_checksum_start(skb);
5624 
5625 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5626 	type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5627 
5628 	/* initialize outer IP header fields */
5629 	if (ip.v4->version == 4) {
5630 		unsigned char *csum_start = skb_checksum_start(skb);
5631 		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5632 
5633 		/* IP header will have to cancel out any data that
5634 		 * is not a part of the outer IP header
5635 		 */
5636 		ip.v4->check = csum_fold(csum_partial(trans_start,
5637 						      csum_start - trans_start,
5638 						      0));
5639 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5640 
5641 		ip.v4->tot_len = 0;
5642 		first->tx_flags |= IGB_TX_FLAGS_TSO |
5643 				   IGB_TX_FLAGS_CSUM |
5644 				   IGB_TX_FLAGS_IPV4;
5645 	} else {
5646 		ip.v6->payload_len = 0;
5647 		first->tx_flags |= IGB_TX_FLAGS_TSO |
5648 				   IGB_TX_FLAGS_CSUM;
5649 	}
5650 
5651 	/* determine offset of inner transport header */
5652 	l4_offset = l4.hdr - skb->data;
5653 
5654 	/* compute length of segmentation header */
5655 	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
5656 
5657 	/* remove payload length from inner checksum */
5658 	paylen = skb->len - l4_offset;
5659 	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
5660 
5661 	/* update gso size and bytecount with header size */
5662 	first->gso_segs = skb_shinfo(skb)->gso_segs;
5663 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
5664 
5665 	/* MSS L4LEN IDX */
5666 	mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5667 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5668 
5669 	/* VLAN MACLEN IPLEN */
5670 	vlan_macip_lens = l4.hdr - ip.hdr;
5671 	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5672 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5673 
5674 	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
5675 
5676 	return 1;
5677 }
5678 
5679 static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5680 {
5681 	unsigned int offset = 0;
5682 
5683 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5684 
5685 	return offset == skb_checksum_start_offset(skb);
5686 }
5687 
5688 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5689 {
5690 	struct sk_buff *skb = first->skb;
5691 	u32 vlan_macip_lens = 0;
5692 	u32 type_tucmd = 0;
5693 
5694 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
5695 csum_failed:
5696 		if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
5697 			return;
5698 		goto no_csum;
5699 	}
5700 
5701 	switch (skb->csum_offset) {
5702 	case offsetof(struct tcphdr, check):
5703 		type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5704 		/* fall through */
5705 	case offsetof(struct udphdr, check):
5706 		break;
5707 	case offsetof(struct sctphdr, checksum):
5708 		/* validate that this is actually an SCTP request */
5709 		if (((first->protocol == htons(ETH_P_IP)) &&
5710 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5711 		    ((first->protocol == htons(ETH_P_IPV6)) &&
5712 		     igb_ipv6_csum_is_sctp(skb))) {
5713 			type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5714 			break;
5715 		}
5716 	default:
5717 		skb_checksum_help(skb);
5718 		goto csum_failed;
5719 	}
5720 
5721 	/* update TX checksum flag */
5722 	first->tx_flags |= IGB_TX_FLAGS_CSUM;
5723 	vlan_macip_lens = skb_checksum_start_offset(skb) -
5724 			  skb_network_offset(skb);
5725 no_csum:
5726 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5727 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5728 
5729 	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
5730 }
5731 
5732 #define IGB_SET_FLAG(_input, _flag, _result) \
5733 	((_flag <= _result) ? \
5734 	 ((u32)(_input & _flag) * (_result / _flag)) : \
5735 	 ((u32)(_input & _flag) / (_flag / _result)))
5736 
5737 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5738 {
5739 	/* set type for advanced descriptor with frame checksum insertion */
5740 	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5741 		       E1000_ADVTXD_DCMD_DEXT |
5742 		       E1000_ADVTXD_DCMD_IFCS;
5743 
5744 	/* set HW vlan bit if vlan is present */
5745 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5746 				 (E1000_ADVTXD_DCMD_VLE));
5747 
5748 	/* set segmentation bits for TSO */
5749 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5750 				 (E1000_ADVTXD_DCMD_TSE));
5751 
5752 	/* set timestamp bit if present */
5753 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5754 				 (E1000_ADVTXD_MAC_TSTAMP));
5755 
5756 	/* insert frame checksum */
5757 	cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5758 
5759 	return cmd_type;
5760 }
5761 
5762 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5763 				 union e1000_adv_tx_desc *tx_desc,
5764 				 u32 tx_flags, unsigned int paylen)
5765 {
5766 	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5767 
5768 	/* 82575 requires a unique index per ring */
5769 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5770 		olinfo_status |= tx_ring->reg_idx << 4;
5771 
5772 	/* insert L4 checksum */
5773 	olinfo_status |= IGB_SET_FLAG(tx_flags,
5774 				      IGB_TX_FLAGS_CSUM,
5775 				      (E1000_TXD_POPTS_TXSM << 8));
5776 
5777 	/* insert IPv4 checksum */
5778 	olinfo_status |= IGB_SET_FLAG(tx_flags,
5779 				      IGB_TX_FLAGS_IPV4,
5780 				      (E1000_TXD_POPTS_IXSM << 8));
5781 
5782 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5783 }
5784 
5785 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5786 {
5787 	struct net_device *netdev = tx_ring->netdev;
5788 
5789 	netif_stop_subqueue(netdev, tx_ring->queue_index);
5790 
5791 	/* Herbert's original patch had:
5792 	 *  smp_mb__after_netif_stop_queue();
5793 	 * but since that doesn't exist yet, just open code it.
5794 	 */
5795 	smp_mb();
5796 
5797 	/* We need to check again in a case another CPU has just
5798 	 * made room available.
5799 	 */
5800 	if (igb_desc_unused(tx_ring) < size)
5801 		return -EBUSY;
5802 
5803 	/* A reprieve! */
5804 	netif_wake_subqueue(netdev, tx_ring->queue_index);
5805 
5806 	u64_stats_update_begin(&tx_ring->tx_syncp2);
5807 	tx_ring->tx_stats.restart_queue2++;
5808 	u64_stats_update_end(&tx_ring->tx_syncp2);
5809 
5810 	return 0;
5811 }
5812 
5813 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5814 {
5815 	if (igb_desc_unused(tx_ring) >= size)
5816 		return 0;
5817 	return __igb_maybe_stop_tx(tx_ring, size);
5818 }
5819 
5820 static int igb_tx_map(struct igb_ring *tx_ring,
5821 		      struct igb_tx_buffer *first,
5822 		      const u8 hdr_len)
5823 {
5824 	struct sk_buff *skb = first->skb;
5825 	struct igb_tx_buffer *tx_buffer;
5826 	union e1000_adv_tx_desc *tx_desc;
5827 	struct skb_frag_struct *frag;
5828 	dma_addr_t dma;
5829 	unsigned int data_len, size;
5830 	u32 tx_flags = first->tx_flags;
5831 	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5832 	u16 i = tx_ring->next_to_use;
5833 
5834 	tx_desc = IGB_TX_DESC(tx_ring, i);
5835 
5836 	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5837 
5838 	size = skb_headlen(skb);
5839 	data_len = skb->data_len;
5840 
5841 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5842 
5843 	tx_buffer = first;
5844 
5845 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5846 		if (dma_mapping_error(tx_ring->dev, dma))
5847 			goto dma_error;
5848 
5849 		/* record length, and DMA address */
5850 		dma_unmap_len_set(tx_buffer, len, size);
5851 		dma_unmap_addr_set(tx_buffer, dma, dma);
5852 
5853 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
5854 
5855 		while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5856 			tx_desc->read.cmd_type_len =
5857 				cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5858 
5859 			i++;
5860 			tx_desc++;
5861 			if (i == tx_ring->count) {
5862 				tx_desc = IGB_TX_DESC(tx_ring, 0);
5863 				i = 0;
5864 			}
5865 			tx_desc->read.olinfo_status = 0;
5866 
5867 			dma += IGB_MAX_DATA_PER_TXD;
5868 			size -= IGB_MAX_DATA_PER_TXD;
5869 
5870 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
5871 		}
5872 
5873 		if (likely(!data_len))
5874 			break;
5875 
5876 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5877 
5878 		i++;
5879 		tx_desc++;
5880 		if (i == tx_ring->count) {
5881 			tx_desc = IGB_TX_DESC(tx_ring, 0);
5882 			i = 0;
5883 		}
5884 		tx_desc->read.olinfo_status = 0;
5885 
5886 		size = skb_frag_size(frag);
5887 		data_len -= size;
5888 
5889 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
5890 				       size, DMA_TO_DEVICE);
5891 
5892 		tx_buffer = &tx_ring->tx_buffer_info[i];
5893 	}
5894 
5895 	/* write last descriptor with RS and EOP bits */
5896 	cmd_type |= size | IGB_TXD_DCMD;
5897 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
5898 
5899 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
5900 
5901 	/* set the timestamp */
5902 	first->time_stamp = jiffies;
5903 
5904 	/* Force memory writes to complete before letting h/w know there
5905 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
5906 	 * memory model archs, such as IA-64).
5907 	 *
5908 	 * We also need this memory barrier to make certain all of the
5909 	 * status bits have been updated before next_to_watch is written.
5910 	 */
5911 	wmb();
5912 
5913 	/* set next_to_watch value indicating a packet is present */
5914 	first->next_to_watch = tx_desc;
5915 
5916 	i++;
5917 	if (i == tx_ring->count)
5918 		i = 0;
5919 
5920 	tx_ring->next_to_use = i;
5921 
5922 	/* Make sure there is space in the ring for the next send. */
5923 	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
5924 
5925 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
5926 		writel(i, tx_ring->tail);
5927 
5928 		/* we need this if more than one processor can write to our tail
5929 		 * at a time, it synchronizes IO on IA64/Altix systems
5930 		 */
5931 		mmiowb();
5932 	}
5933 	return 0;
5934 
5935 dma_error:
5936 	dev_err(tx_ring->dev, "TX DMA map failed\n");
5937 	tx_buffer = &tx_ring->tx_buffer_info[i];
5938 
5939 	/* clear dma mappings for failed tx_buffer_info map */
5940 	while (tx_buffer != first) {
5941 		if (dma_unmap_len(tx_buffer, len))
5942 			dma_unmap_page(tx_ring->dev,
5943 				       dma_unmap_addr(tx_buffer, dma),
5944 				       dma_unmap_len(tx_buffer, len),
5945 				       DMA_TO_DEVICE);
5946 		dma_unmap_len_set(tx_buffer, len, 0);
5947 
5948 		if (i-- == 0)
5949 			i += tx_ring->count;
5950 		tx_buffer = &tx_ring->tx_buffer_info[i];
5951 	}
5952 
5953 	if (dma_unmap_len(tx_buffer, len))
5954 		dma_unmap_single(tx_ring->dev,
5955 				 dma_unmap_addr(tx_buffer, dma),
5956 				 dma_unmap_len(tx_buffer, len),
5957 				 DMA_TO_DEVICE);
5958 	dma_unmap_len_set(tx_buffer, len, 0);
5959 
5960 	dev_kfree_skb_any(tx_buffer->skb);
5961 	tx_buffer->skb = NULL;
5962 
5963 	tx_ring->next_to_use = i;
5964 
5965 	return -1;
5966 }
5967 
5968 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5969 				struct igb_ring *tx_ring)
5970 {
5971 	struct igb_tx_buffer *first;
5972 	int tso;
5973 	u32 tx_flags = 0;
5974 	unsigned short f;
5975 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
5976 	__be16 protocol = vlan_get_protocol(skb);
5977 	u8 hdr_len = 0;
5978 
5979 	/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
5980 	 *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
5981 	 *       + 2 desc gap to keep tail from touching head,
5982 	 *       + 1 desc for context descriptor,
5983 	 * otherwise try next time
5984 	 */
5985 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5986 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5987 
5988 	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
5989 		/* this is a hard error */
5990 		return NETDEV_TX_BUSY;
5991 	}
5992 
5993 	/* record the location of the first descriptor for this packet */
5994 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
5995 	first->skb = skb;
5996 	first->bytecount = skb->len;
5997 	first->gso_segs = 1;
5998 
5999 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6000 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6001 
6002 		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6003 		    !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6004 					   &adapter->state)) {
6005 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6006 			tx_flags |= IGB_TX_FLAGS_TSTAMP;
6007 
6008 			adapter->ptp_tx_skb = skb_get(skb);
6009 			adapter->ptp_tx_start = jiffies;
6010 			if (adapter->hw.mac.type == e1000_82576)
6011 				schedule_work(&adapter->ptp_tx_work);
6012 		} else {
6013 			adapter->tx_hwtstamp_skipped++;
6014 		}
6015 	}
6016 
6017 	skb_tx_timestamp(skb);
6018 
6019 	if (skb_vlan_tag_present(skb)) {
6020 		tx_flags |= IGB_TX_FLAGS_VLAN;
6021 		tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6022 	}
6023 
6024 	/* record initial flags and protocol */
6025 	first->tx_flags = tx_flags;
6026 	first->protocol = protocol;
6027 
6028 	tso = igb_tso(tx_ring, first, &hdr_len);
6029 	if (tso < 0)
6030 		goto out_drop;
6031 	else if (!tso)
6032 		igb_tx_csum(tx_ring, first);
6033 
6034 	if (igb_tx_map(tx_ring, first, hdr_len))
6035 		goto cleanup_tx_tstamp;
6036 
6037 	return NETDEV_TX_OK;
6038 
6039 out_drop:
6040 	dev_kfree_skb_any(first->skb);
6041 	first->skb = NULL;
6042 cleanup_tx_tstamp:
6043 	if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6044 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6045 
6046 		dev_kfree_skb_any(adapter->ptp_tx_skb);
6047 		adapter->ptp_tx_skb = NULL;
6048 		if (adapter->hw.mac.type == e1000_82576)
6049 			cancel_work_sync(&adapter->ptp_tx_work);
6050 		clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6051 	}
6052 
6053 	return NETDEV_TX_OK;
6054 }
6055 
6056 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6057 						    struct sk_buff *skb)
6058 {
6059 	unsigned int r_idx = skb->queue_mapping;
6060 
6061 	if (r_idx >= adapter->num_tx_queues)
6062 		r_idx = r_idx % adapter->num_tx_queues;
6063 
6064 	return adapter->tx_ring[r_idx];
6065 }
6066 
6067 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6068 				  struct net_device *netdev)
6069 {
6070 	struct igb_adapter *adapter = netdev_priv(netdev);
6071 
6072 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
6073 	 * in order to meet this minimum size requirement.
6074 	 */
6075 	if (skb_put_padto(skb, 17))
6076 		return NETDEV_TX_OK;
6077 
6078 	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6079 }
6080 
6081 /**
6082  *  igb_tx_timeout - Respond to a Tx Hang
6083  *  @netdev: network interface device structure
6084  **/
6085 static void igb_tx_timeout(struct net_device *netdev)
6086 {
6087 	struct igb_adapter *adapter = netdev_priv(netdev);
6088 	struct e1000_hw *hw = &adapter->hw;
6089 
6090 	/* Do the reset outside of interrupt context */
6091 	adapter->tx_timeout_count++;
6092 
6093 	if (hw->mac.type >= e1000_82580)
6094 		hw->dev_spec._82575.global_device_reset = true;
6095 
6096 	schedule_work(&adapter->reset_task);
6097 	wr32(E1000_EICS,
6098 	     (adapter->eims_enable_mask & ~adapter->eims_other));
6099 }
6100 
6101 static void igb_reset_task(struct work_struct *work)
6102 {
6103 	struct igb_adapter *adapter;
6104 	adapter = container_of(work, struct igb_adapter, reset_task);
6105 
6106 	igb_dump(adapter);
6107 	netdev_err(adapter->netdev, "Reset adapter\n");
6108 	igb_reinit_locked(adapter);
6109 }
6110 
6111 /**
6112  *  igb_get_stats64 - Get System Network Statistics
6113  *  @netdev: network interface device structure
6114  *  @stats: rtnl_link_stats64 pointer
6115  **/
6116 static void igb_get_stats64(struct net_device *netdev,
6117 			    struct rtnl_link_stats64 *stats)
6118 {
6119 	struct igb_adapter *adapter = netdev_priv(netdev);
6120 
6121 	spin_lock(&adapter->stats64_lock);
6122 	igb_update_stats(adapter);
6123 	memcpy(stats, &adapter->stats64, sizeof(*stats));
6124 	spin_unlock(&adapter->stats64_lock);
6125 }
6126 
6127 /**
6128  *  igb_change_mtu - Change the Maximum Transfer Unit
6129  *  @netdev: network interface device structure
6130  *  @new_mtu: new value for maximum frame size
6131  *
6132  *  Returns 0 on success, negative on failure
6133  **/
6134 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6135 {
6136 	struct igb_adapter *adapter = netdev_priv(netdev);
6137 	struct pci_dev *pdev = adapter->pdev;
6138 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6139 
6140 	/* adjust max frame to be at least the size of a standard frame */
6141 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6142 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6143 
6144 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6145 		usleep_range(1000, 2000);
6146 
6147 	/* igb_down has a dependency on max_frame_size */
6148 	adapter->max_frame_size = max_frame;
6149 
6150 	if (netif_running(netdev))
6151 		igb_down(adapter);
6152 
6153 	dev_info(&pdev->dev, "changing MTU from %d to %d\n",
6154 		 netdev->mtu, new_mtu);
6155 	netdev->mtu = new_mtu;
6156 
6157 	if (netif_running(netdev))
6158 		igb_up(adapter);
6159 	else
6160 		igb_reset(adapter);
6161 
6162 	clear_bit(__IGB_RESETTING, &adapter->state);
6163 
6164 	return 0;
6165 }
6166 
6167 /**
6168  *  igb_update_stats - Update the board statistics counters
6169  *  @adapter: board private structure
6170  **/
6171 void igb_update_stats(struct igb_adapter *adapter)
6172 {
6173 	struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6174 	struct e1000_hw *hw = &adapter->hw;
6175 	struct pci_dev *pdev = adapter->pdev;
6176 	u32 reg, mpc;
6177 	int i;
6178 	u64 bytes, packets;
6179 	unsigned int start;
6180 	u64 _bytes, _packets;
6181 
6182 	/* Prevent stats update while adapter is being reset, or if the pci
6183 	 * connection is down.
6184 	 */
6185 	if (adapter->link_speed == 0)
6186 		return;
6187 	if (pci_channel_offline(pdev))
6188 		return;
6189 
6190 	bytes = 0;
6191 	packets = 0;
6192 
6193 	rcu_read_lock();
6194 	for (i = 0; i < adapter->num_rx_queues; i++) {
6195 		struct igb_ring *ring = adapter->rx_ring[i];
6196 		u32 rqdpc = rd32(E1000_RQDPC(i));
6197 		if (hw->mac.type >= e1000_i210)
6198 			wr32(E1000_RQDPC(i), 0);
6199 
6200 		if (rqdpc) {
6201 			ring->rx_stats.drops += rqdpc;
6202 			net_stats->rx_fifo_errors += rqdpc;
6203 		}
6204 
6205 		do {
6206 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6207 			_bytes = ring->rx_stats.bytes;
6208 			_packets = ring->rx_stats.packets;
6209 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6210 		bytes += _bytes;
6211 		packets += _packets;
6212 	}
6213 
6214 	net_stats->rx_bytes = bytes;
6215 	net_stats->rx_packets = packets;
6216 
6217 	bytes = 0;
6218 	packets = 0;
6219 	for (i = 0; i < adapter->num_tx_queues; i++) {
6220 		struct igb_ring *ring = adapter->tx_ring[i];
6221 		do {
6222 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6223 			_bytes = ring->tx_stats.bytes;
6224 			_packets = ring->tx_stats.packets;
6225 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6226 		bytes += _bytes;
6227 		packets += _packets;
6228 	}
6229 	net_stats->tx_bytes = bytes;
6230 	net_stats->tx_packets = packets;
6231 	rcu_read_unlock();
6232 
6233 	/* read stats registers */
6234 	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6235 	adapter->stats.gprc += rd32(E1000_GPRC);
6236 	adapter->stats.gorc += rd32(E1000_GORCL);
6237 	rd32(E1000_GORCH); /* clear GORCL */
6238 	adapter->stats.bprc += rd32(E1000_BPRC);
6239 	adapter->stats.mprc += rd32(E1000_MPRC);
6240 	adapter->stats.roc += rd32(E1000_ROC);
6241 
6242 	adapter->stats.prc64 += rd32(E1000_PRC64);
6243 	adapter->stats.prc127 += rd32(E1000_PRC127);
6244 	adapter->stats.prc255 += rd32(E1000_PRC255);
6245 	adapter->stats.prc511 += rd32(E1000_PRC511);
6246 	adapter->stats.prc1023 += rd32(E1000_PRC1023);
6247 	adapter->stats.prc1522 += rd32(E1000_PRC1522);
6248 	adapter->stats.symerrs += rd32(E1000_SYMERRS);
6249 	adapter->stats.sec += rd32(E1000_SEC);
6250 
6251 	mpc = rd32(E1000_MPC);
6252 	adapter->stats.mpc += mpc;
6253 	net_stats->rx_fifo_errors += mpc;
6254 	adapter->stats.scc += rd32(E1000_SCC);
6255 	adapter->stats.ecol += rd32(E1000_ECOL);
6256 	adapter->stats.mcc += rd32(E1000_MCC);
6257 	adapter->stats.latecol += rd32(E1000_LATECOL);
6258 	adapter->stats.dc += rd32(E1000_DC);
6259 	adapter->stats.rlec += rd32(E1000_RLEC);
6260 	adapter->stats.xonrxc += rd32(E1000_XONRXC);
6261 	adapter->stats.xontxc += rd32(E1000_XONTXC);
6262 	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6263 	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6264 	adapter->stats.fcruc += rd32(E1000_FCRUC);
6265 	adapter->stats.gptc += rd32(E1000_GPTC);
6266 	adapter->stats.gotc += rd32(E1000_GOTCL);
6267 	rd32(E1000_GOTCH); /* clear GOTCL */
6268 	adapter->stats.rnbc += rd32(E1000_RNBC);
6269 	adapter->stats.ruc += rd32(E1000_RUC);
6270 	adapter->stats.rfc += rd32(E1000_RFC);
6271 	adapter->stats.rjc += rd32(E1000_RJC);
6272 	adapter->stats.tor += rd32(E1000_TORH);
6273 	adapter->stats.tot += rd32(E1000_TOTH);
6274 	adapter->stats.tpr += rd32(E1000_TPR);
6275 
6276 	adapter->stats.ptc64 += rd32(E1000_PTC64);
6277 	adapter->stats.ptc127 += rd32(E1000_PTC127);
6278 	adapter->stats.ptc255 += rd32(E1000_PTC255);
6279 	adapter->stats.ptc511 += rd32(E1000_PTC511);
6280 	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6281 	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6282 
6283 	adapter->stats.mptc += rd32(E1000_MPTC);
6284 	adapter->stats.bptc += rd32(E1000_BPTC);
6285 
6286 	adapter->stats.tpt += rd32(E1000_TPT);
6287 	adapter->stats.colc += rd32(E1000_COLC);
6288 
6289 	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6290 	/* read internal phy specific stats */
6291 	reg = rd32(E1000_CTRL_EXT);
6292 	if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6293 		adapter->stats.rxerrc += rd32(E1000_RXERRC);
6294 
6295 		/* this stat has invalid values on i210/i211 */
6296 		if ((hw->mac.type != e1000_i210) &&
6297 		    (hw->mac.type != e1000_i211))
6298 			adapter->stats.tncrs += rd32(E1000_TNCRS);
6299 	}
6300 
6301 	adapter->stats.tsctc += rd32(E1000_TSCTC);
6302 	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6303 
6304 	adapter->stats.iac += rd32(E1000_IAC);
6305 	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6306 	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6307 	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6308 	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6309 	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6310 	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6311 	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6312 	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6313 
6314 	/* Fill out the OS statistics structure */
6315 	net_stats->multicast = adapter->stats.mprc;
6316 	net_stats->collisions = adapter->stats.colc;
6317 
6318 	/* Rx Errors */
6319 
6320 	/* RLEC on some newer hardware can be incorrect so build
6321 	 * our own version based on RUC and ROC
6322 	 */
6323 	net_stats->rx_errors = adapter->stats.rxerrc +
6324 		adapter->stats.crcerrs + adapter->stats.algnerrc +
6325 		adapter->stats.ruc + adapter->stats.roc +
6326 		adapter->stats.cexterr;
6327 	net_stats->rx_length_errors = adapter->stats.ruc +
6328 				      adapter->stats.roc;
6329 	net_stats->rx_crc_errors = adapter->stats.crcerrs;
6330 	net_stats->rx_frame_errors = adapter->stats.algnerrc;
6331 	net_stats->rx_missed_errors = adapter->stats.mpc;
6332 
6333 	/* Tx Errors */
6334 	net_stats->tx_errors = adapter->stats.ecol +
6335 			       adapter->stats.latecol;
6336 	net_stats->tx_aborted_errors = adapter->stats.ecol;
6337 	net_stats->tx_window_errors = adapter->stats.latecol;
6338 	net_stats->tx_carrier_errors = adapter->stats.tncrs;
6339 
6340 	/* Tx Dropped needs to be maintained elsewhere */
6341 
6342 	/* Management Stats */
6343 	adapter->stats.mgptc += rd32(E1000_MGTPTC);
6344 	adapter->stats.mgprc += rd32(E1000_MGTPRC);
6345 	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6346 
6347 	/* OS2BMC Stats */
6348 	reg = rd32(E1000_MANC);
6349 	if (reg & E1000_MANC_EN_BMC2OS) {
6350 		adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6351 		adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6352 		adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6353 		adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6354 	}
6355 }
6356 
6357 static void igb_tsync_interrupt(struct igb_adapter *adapter)
6358 {
6359 	struct e1000_hw *hw = &adapter->hw;
6360 	struct ptp_clock_event event;
6361 	struct timespec64 ts;
6362 	u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6363 
6364 	if (tsicr & TSINTR_SYS_WRAP) {
6365 		event.type = PTP_CLOCK_PPS;
6366 		if (adapter->ptp_caps.pps)
6367 			ptp_clock_event(adapter->ptp_clock, &event);
6368 		ack |= TSINTR_SYS_WRAP;
6369 	}
6370 
6371 	if (tsicr & E1000_TSICR_TXTS) {
6372 		/* retrieve hardware timestamp */
6373 		schedule_work(&adapter->ptp_tx_work);
6374 		ack |= E1000_TSICR_TXTS;
6375 	}
6376 
6377 	if (tsicr & TSINTR_TT0) {
6378 		spin_lock(&adapter->tmreg_lock);
6379 		ts = timespec64_add(adapter->perout[0].start,
6380 				    adapter->perout[0].period);
6381 		/* u32 conversion of tv_sec is safe until y2106 */
6382 		wr32(E1000_TRGTTIML0, ts.tv_nsec);
6383 		wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6384 		tsauxc = rd32(E1000_TSAUXC);
6385 		tsauxc |= TSAUXC_EN_TT0;
6386 		wr32(E1000_TSAUXC, tsauxc);
6387 		adapter->perout[0].start = ts;
6388 		spin_unlock(&adapter->tmreg_lock);
6389 		ack |= TSINTR_TT0;
6390 	}
6391 
6392 	if (tsicr & TSINTR_TT1) {
6393 		spin_lock(&adapter->tmreg_lock);
6394 		ts = timespec64_add(adapter->perout[1].start,
6395 				    adapter->perout[1].period);
6396 		wr32(E1000_TRGTTIML1, ts.tv_nsec);
6397 		wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6398 		tsauxc = rd32(E1000_TSAUXC);
6399 		tsauxc |= TSAUXC_EN_TT1;
6400 		wr32(E1000_TSAUXC, tsauxc);
6401 		adapter->perout[1].start = ts;
6402 		spin_unlock(&adapter->tmreg_lock);
6403 		ack |= TSINTR_TT1;
6404 	}
6405 
6406 	if (tsicr & TSINTR_AUTT0) {
6407 		nsec = rd32(E1000_AUXSTMPL0);
6408 		sec  = rd32(E1000_AUXSTMPH0);
6409 		event.type = PTP_CLOCK_EXTTS;
6410 		event.index = 0;
6411 		event.timestamp = sec * 1000000000ULL + nsec;
6412 		ptp_clock_event(adapter->ptp_clock, &event);
6413 		ack |= TSINTR_AUTT0;
6414 	}
6415 
6416 	if (tsicr & TSINTR_AUTT1) {
6417 		nsec = rd32(E1000_AUXSTMPL1);
6418 		sec  = rd32(E1000_AUXSTMPH1);
6419 		event.type = PTP_CLOCK_EXTTS;
6420 		event.index = 1;
6421 		event.timestamp = sec * 1000000000ULL + nsec;
6422 		ptp_clock_event(adapter->ptp_clock, &event);
6423 		ack |= TSINTR_AUTT1;
6424 	}
6425 
6426 	/* acknowledge the interrupts */
6427 	wr32(E1000_TSICR, ack);
6428 }
6429 
6430 static irqreturn_t igb_msix_other(int irq, void *data)
6431 {
6432 	struct igb_adapter *adapter = data;
6433 	struct e1000_hw *hw = &adapter->hw;
6434 	u32 icr = rd32(E1000_ICR);
6435 	/* reading ICR causes bit 31 of EICR to be cleared */
6436 
6437 	if (icr & E1000_ICR_DRSTA)
6438 		schedule_work(&adapter->reset_task);
6439 
6440 	if (icr & E1000_ICR_DOUTSYNC) {
6441 		/* HW is reporting DMA is out of sync */
6442 		adapter->stats.doosync++;
6443 		/* The DMA Out of Sync is also indication of a spoof event
6444 		 * in IOV mode. Check the Wrong VM Behavior register to
6445 		 * see if it is really a spoof event.
6446 		 */
6447 		igb_check_wvbr(adapter);
6448 	}
6449 
6450 	/* Check for a mailbox event */
6451 	if (icr & E1000_ICR_VMMB)
6452 		igb_msg_task(adapter);
6453 
6454 	if (icr & E1000_ICR_LSC) {
6455 		hw->mac.get_link_status = 1;
6456 		/* guard against interrupt when we're going down */
6457 		if (!test_bit(__IGB_DOWN, &adapter->state))
6458 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
6459 	}
6460 
6461 	if (icr & E1000_ICR_TS)
6462 		igb_tsync_interrupt(adapter);
6463 
6464 	wr32(E1000_EIMS, adapter->eims_other);
6465 
6466 	return IRQ_HANDLED;
6467 }
6468 
6469 static void igb_write_itr(struct igb_q_vector *q_vector)
6470 {
6471 	struct igb_adapter *adapter = q_vector->adapter;
6472 	u32 itr_val = q_vector->itr_val & 0x7FFC;
6473 
6474 	if (!q_vector->set_itr)
6475 		return;
6476 
6477 	if (!itr_val)
6478 		itr_val = 0x4;
6479 
6480 	if (adapter->hw.mac.type == e1000_82575)
6481 		itr_val |= itr_val << 16;
6482 	else
6483 		itr_val |= E1000_EITR_CNT_IGNR;
6484 
6485 	writel(itr_val, q_vector->itr_register);
6486 	q_vector->set_itr = 0;
6487 }
6488 
6489 static irqreturn_t igb_msix_ring(int irq, void *data)
6490 {
6491 	struct igb_q_vector *q_vector = data;
6492 
6493 	/* Write the ITR value calculated from the previous interrupt. */
6494 	igb_write_itr(q_vector);
6495 
6496 	napi_schedule(&q_vector->napi);
6497 
6498 	return IRQ_HANDLED;
6499 }
6500 
6501 #ifdef CONFIG_IGB_DCA
6502 static void igb_update_tx_dca(struct igb_adapter *adapter,
6503 			      struct igb_ring *tx_ring,
6504 			      int cpu)
6505 {
6506 	struct e1000_hw *hw = &adapter->hw;
6507 	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6508 
6509 	if (hw->mac.type != e1000_82575)
6510 		txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6511 
6512 	/* We can enable relaxed ordering for reads, but not writes when
6513 	 * DCA is enabled.  This is due to a known issue in some chipsets
6514 	 * which will cause the DCA tag to be cleared.
6515 	 */
6516 	txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6517 		  E1000_DCA_TXCTRL_DATA_RRO_EN |
6518 		  E1000_DCA_TXCTRL_DESC_DCA_EN;
6519 
6520 	wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6521 }
6522 
6523 static void igb_update_rx_dca(struct igb_adapter *adapter,
6524 			      struct igb_ring *rx_ring,
6525 			      int cpu)
6526 {
6527 	struct e1000_hw *hw = &adapter->hw;
6528 	u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6529 
6530 	if (hw->mac.type != e1000_82575)
6531 		rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6532 
6533 	/* We can enable relaxed ordering for reads, but not writes when
6534 	 * DCA is enabled.  This is due to a known issue in some chipsets
6535 	 * which will cause the DCA tag to be cleared.
6536 	 */
6537 	rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6538 		  E1000_DCA_RXCTRL_DESC_DCA_EN;
6539 
6540 	wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6541 }
6542 
6543 static void igb_update_dca(struct igb_q_vector *q_vector)
6544 {
6545 	struct igb_adapter *adapter = q_vector->adapter;
6546 	int cpu = get_cpu();
6547 
6548 	if (q_vector->cpu == cpu)
6549 		goto out_no_update;
6550 
6551 	if (q_vector->tx.ring)
6552 		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6553 
6554 	if (q_vector->rx.ring)
6555 		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6556 
6557 	q_vector->cpu = cpu;
6558 out_no_update:
6559 	put_cpu();
6560 }
6561 
6562 static void igb_setup_dca(struct igb_adapter *adapter)
6563 {
6564 	struct e1000_hw *hw = &adapter->hw;
6565 	int i;
6566 
6567 	if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6568 		return;
6569 
6570 	/* Always use CB2 mode, difference is masked in the CB driver. */
6571 	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6572 
6573 	for (i = 0; i < adapter->num_q_vectors; i++) {
6574 		adapter->q_vector[i]->cpu = -1;
6575 		igb_update_dca(adapter->q_vector[i]);
6576 	}
6577 }
6578 
6579 static int __igb_notify_dca(struct device *dev, void *data)
6580 {
6581 	struct net_device *netdev = dev_get_drvdata(dev);
6582 	struct igb_adapter *adapter = netdev_priv(netdev);
6583 	struct pci_dev *pdev = adapter->pdev;
6584 	struct e1000_hw *hw = &adapter->hw;
6585 	unsigned long event = *(unsigned long *)data;
6586 
6587 	switch (event) {
6588 	case DCA_PROVIDER_ADD:
6589 		/* if already enabled, don't do it again */
6590 		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6591 			break;
6592 		if (dca_add_requester(dev) == 0) {
6593 			adapter->flags |= IGB_FLAG_DCA_ENABLED;
6594 			dev_info(&pdev->dev, "DCA enabled\n");
6595 			igb_setup_dca(adapter);
6596 			break;
6597 		}
6598 		/* Fall Through since DCA is disabled. */
6599 	case DCA_PROVIDER_REMOVE:
6600 		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6601 			/* without this a class_device is left
6602 			 * hanging around in the sysfs model
6603 			 */
6604 			dca_remove_requester(dev);
6605 			dev_info(&pdev->dev, "DCA disabled\n");
6606 			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6607 			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6608 		}
6609 		break;
6610 	}
6611 
6612 	return 0;
6613 }
6614 
6615 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6616 			  void *p)
6617 {
6618 	int ret_val;
6619 
6620 	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6621 					 __igb_notify_dca);
6622 
6623 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6624 }
6625 #endif /* CONFIG_IGB_DCA */
6626 
6627 #ifdef CONFIG_PCI_IOV
6628 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6629 {
6630 	unsigned char mac_addr[ETH_ALEN];
6631 
6632 	eth_zero_addr(mac_addr);
6633 	igb_set_vf_mac(adapter, vf, mac_addr);
6634 
6635 	/* By default spoof check is enabled for all VFs */
6636 	adapter->vf_data[vf].spoofchk_enabled = true;
6637 
6638 	/* By default VFs are not trusted */
6639 	adapter->vf_data[vf].trusted = false;
6640 
6641 	return 0;
6642 }
6643 
6644 #endif
6645 static void igb_ping_all_vfs(struct igb_adapter *adapter)
6646 {
6647 	struct e1000_hw *hw = &adapter->hw;
6648 	u32 ping;
6649 	int i;
6650 
6651 	for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6652 		ping = E1000_PF_CONTROL_MSG;
6653 		if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6654 			ping |= E1000_VT_MSGTYPE_CTS;
6655 		igb_write_mbx(hw, &ping, 1, i);
6656 	}
6657 }
6658 
6659 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6660 {
6661 	struct e1000_hw *hw = &adapter->hw;
6662 	u32 vmolr = rd32(E1000_VMOLR(vf));
6663 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6664 
6665 	vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6666 			    IGB_VF_FLAG_MULTI_PROMISC);
6667 	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6668 
6669 	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6670 		vmolr |= E1000_VMOLR_MPME;
6671 		vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6672 		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6673 	} else {
6674 		/* if we have hashes and we are clearing a multicast promisc
6675 		 * flag we need to write the hashes to the MTA as this step
6676 		 * was previously skipped
6677 		 */
6678 		if (vf_data->num_vf_mc_hashes > 30) {
6679 			vmolr |= E1000_VMOLR_MPME;
6680 		} else if (vf_data->num_vf_mc_hashes) {
6681 			int j;
6682 
6683 			vmolr |= E1000_VMOLR_ROMPE;
6684 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6685 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6686 		}
6687 	}
6688 
6689 	wr32(E1000_VMOLR(vf), vmolr);
6690 
6691 	/* there are flags left unprocessed, likely not supported */
6692 	if (*msgbuf & E1000_VT_MSGINFO_MASK)
6693 		return -EINVAL;
6694 
6695 	return 0;
6696 }
6697 
6698 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6699 				  u32 *msgbuf, u32 vf)
6700 {
6701 	int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6702 	u16 *hash_list = (u16 *)&msgbuf[1];
6703 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6704 	int i;
6705 
6706 	/* salt away the number of multicast addresses assigned
6707 	 * to this VF for later use to restore when the PF multi cast
6708 	 * list changes
6709 	 */
6710 	vf_data->num_vf_mc_hashes = n;
6711 
6712 	/* only up to 30 hash values supported */
6713 	if (n > 30)
6714 		n = 30;
6715 
6716 	/* store the hashes for later use */
6717 	for (i = 0; i < n; i++)
6718 		vf_data->vf_mc_hashes[i] = hash_list[i];
6719 
6720 	/* Flush and reset the mta with the new values */
6721 	igb_set_rx_mode(adapter->netdev);
6722 
6723 	return 0;
6724 }
6725 
6726 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6727 {
6728 	struct e1000_hw *hw = &adapter->hw;
6729 	struct vf_data_storage *vf_data;
6730 	int i, j;
6731 
6732 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
6733 		u32 vmolr = rd32(E1000_VMOLR(i));
6734 
6735 		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6736 
6737 		vf_data = &adapter->vf_data[i];
6738 
6739 		if ((vf_data->num_vf_mc_hashes > 30) ||
6740 		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6741 			vmolr |= E1000_VMOLR_MPME;
6742 		} else if (vf_data->num_vf_mc_hashes) {
6743 			vmolr |= E1000_VMOLR_ROMPE;
6744 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6745 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6746 		}
6747 		wr32(E1000_VMOLR(i), vmolr);
6748 	}
6749 }
6750 
6751 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6752 {
6753 	struct e1000_hw *hw = &adapter->hw;
6754 	u32 pool_mask, vlvf_mask, i;
6755 
6756 	/* create mask for VF and other pools */
6757 	pool_mask = E1000_VLVF_POOLSEL_MASK;
6758 	vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6759 
6760 	/* drop PF from pool bits */
6761 	pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6762 			     adapter->vfs_allocated_count);
6763 
6764 	/* Find the vlan filter for this id */
6765 	for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6766 		u32 vlvf = rd32(E1000_VLVF(i));
6767 		u32 vfta_mask, vid, vfta;
6768 
6769 		/* remove the vf from the pool */
6770 		if (!(vlvf & vlvf_mask))
6771 			continue;
6772 
6773 		/* clear out bit from VLVF */
6774 		vlvf ^= vlvf_mask;
6775 
6776 		/* if other pools are present, just remove ourselves */
6777 		if (vlvf & pool_mask)
6778 			goto update_vlvfb;
6779 
6780 		/* if PF is present, leave VFTA */
6781 		if (vlvf & E1000_VLVF_POOLSEL_MASK)
6782 			goto update_vlvf;
6783 
6784 		vid = vlvf & E1000_VLVF_VLANID_MASK;
6785 		vfta_mask = BIT(vid % 32);
6786 
6787 		/* clear bit from VFTA */
6788 		vfta = adapter->shadow_vfta[vid / 32];
6789 		if (vfta & vfta_mask)
6790 			hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6791 update_vlvf:
6792 		/* clear pool selection enable */
6793 		if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6794 			vlvf &= E1000_VLVF_POOLSEL_MASK;
6795 		else
6796 			vlvf = 0;
6797 update_vlvfb:
6798 		/* clear pool bits */
6799 		wr32(E1000_VLVF(i), vlvf);
6800 	}
6801 }
6802 
6803 static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6804 {
6805 	u32 vlvf;
6806 	int idx;
6807 
6808 	/* short cut the special case */
6809 	if (vlan == 0)
6810 		return 0;
6811 
6812 	/* Search for the VLAN id in the VLVF entries */
6813 	for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6814 		vlvf = rd32(E1000_VLVF(idx));
6815 		if ((vlvf & VLAN_VID_MASK) == vlan)
6816 			break;
6817 	}
6818 
6819 	return idx;
6820 }
6821 
6822 static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6823 {
6824 	struct e1000_hw *hw = &adapter->hw;
6825 	u32 bits, pf_id;
6826 	int idx;
6827 
6828 	idx = igb_find_vlvf_entry(hw, vid);
6829 	if (!idx)
6830 		return;
6831 
6832 	/* See if any other pools are set for this VLAN filter
6833 	 * entry other than the PF.
6834 	 */
6835 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6836 	bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6837 	bits &= rd32(E1000_VLVF(idx));
6838 
6839 	/* Disable the filter so this falls into the default pool. */
6840 	if (!bits) {
6841 		if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6842 			wr32(E1000_VLVF(idx), BIT(pf_id));
6843 		else
6844 			wr32(E1000_VLVF(idx), 0);
6845 	}
6846 }
6847 
6848 static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6849 			   bool add, u32 vf)
6850 {
6851 	int pf_id = adapter->vfs_allocated_count;
6852 	struct e1000_hw *hw = &adapter->hw;
6853 	int err;
6854 
6855 	/* If VLAN overlaps with one the PF is currently monitoring make
6856 	 * sure that we are able to allocate a VLVF entry.  This may be
6857 	 * redundant but it guarantees PF will maintain visibility to
6858 	 * the VLAN.
6859 	 */
6860 	if (add && test_bit(vid, adapter->active_vlans)) {
6861 		err = igb_vfta_set(hw, vid, pf_id, true, false);
6862 		if (err)
6863 			return err;
6864 	}
6865 
6866 	err = igb_vfta_set(hw, vid, vf, add, false);
6867 
6868 	if (add && !err)
6869 		return err;
6870 
6871 	/* If we failed to add the VF VLAN or we are removing the VF VLAN
6872 	 * we may need to drop the PF pool bit in order to allow us to free
6873 	 * up the VLVF resources.
6874 	 */
6875 	if (test_bit(vid, adapter->active_vlans) ||
6876 	    (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6877 		igb_update_pf_vlvf(adapter, vid);
6878 
6879 	return err;
6880 }
6881 
6882 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6883 {
6884 	struct e1000_hw *hw = &adapter->hw;
6885 
6886 	if (vid)
6887 		wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6888 	else
6889 		wr32(E1000_VMVIR(vf), 0);
6890 }
6891 
6892 static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6893 				u16 vlan, u8 qos)
6894 {
6895 	int err;
6896 
6897 	err = igb_set_vf_vlan(adapter, vlan, true, vf);
6898 	if (err)
6899 		return err;
6900 
6901 	igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
6902 	igb_set_vmolr(adapter, vf, !vlan);
6903 
6904 	/* revoke access to previous VLAN */
6905 	if (vlan != adapter->vf_data[vf].pf_vlan)
6906 		igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6907 				false, vf);
6908 
6909 	adapter->vf_data[vf].pf_vlan = vlan;
6910 	adapter->vf_data[vf].pf_qos = qos;
6911 	igb_set_vf_vlan_strip(adapter, vf, true);
6912 	dev_info(&adapter->pdev->dev,
6913 		 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
6914 	if (test_bit(__IGB_DOWN, &adapter->state)) {
6915 		dev_warn(&adapter->pdev->dev,
6916 			 "The VF VLAN has been set, but the PF device is not up.\n");
6917 		dev_warn(&adapter->pdev->dev,
6918 			 "Bring the PF device up before attempting to use the VF device.\n");
6919 	}
6920 
6921 	return err;
6922 }
6923 
6924 static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
6925 {
6926 	/* Restore tagless access via VLAN 0 */
6927 	igb_set_vf_vlan(adapter, 0, true, vf);
6928 
6929 	igb_set_vmvir(adapter, 0, vf);
6930 	igb_set_vmolr(adapter, vf, true);
6931 
6932 	/* Remove any PF assigned VLAN */
6933 	if (adapter->vf_data[vf].pf_vlan)
6934 		igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6935 				false, vf);
6936 
6937 	adapter->vf_data[vf].pf_vlan = 0;
6938 	adapter->vf_data[vf].pf_qos = 0;
6939 	igb_set_vf_vlan_strip(adapter, vf, false);
6940 
6941 	return 0;
6942 }
6943 
6944 static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
6945 			       u16 vlan, u8 qos, __be16 vlan_proto)
6946 {
6947 	struct igb_adapter *adapter = netdev_priv(netdev);
6948 
6949 	if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
6950 		return -EINVAL;
6951 
6952 	if (vlan_proto != htons(ETH_P_8021Q))
6953 		return -EPROTONOSUPPORT;
6954 
6955 	return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
6956 			       igb_disable_port_vlan(adapter, vf);
6957 }
6958 
6959 static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6960 {
6961 	int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6962 	int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
6963 	int ret;
6964 
6965 	if (adapter->vf_data[vf].pf_vlan)
6966 		return -1;
6967 
6968 	/* VLAN 0 is a special case, don't allow it to be removed */
6969 	if (!vid && !add)
6970 		return 0;
6971 
6972 	ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
6973 	if (!ret)
6974 		igb_set_vf_vlan_strip(adapter, vf, !!vid);
6975 	return ret;
6976 }
6977 
6978 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
6979 {
6980 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6981 
6982 	/* clear flags - except flag that indicates PF has set the MAC */
6983 	vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
6984 	vf_data->last_nack = jiffies;
6985 
6986 	/* reset vlans for device */
6987 	igb_clear_vf_vfta(adapter, vf);
6988 	igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
6989 	igb_set_vmvir(adapter, vf_data->pf_vlan |
6990 			       (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
6991 	igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
6992 	igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
6993 
6994 	/* reset multicast table array for vf */
6995 	adapter->vf_data[vf].num_vf_mc_hashes = 0;
6996 
6997 	/* Flush and reset the mta with the new values */
6998 	igb_set_rx_mode(adapter->netdev);
6999 }
7000 
7001 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7002 {
7003 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7004 
7005 	/* clear mac address as we were hotplug removed/added */
7006 	if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7007 		eth_zero_addr(vf_mac);
7008 
7009 	/* process remaining reset events */
7010 	igb_vf_reset(adapter, vf);
7011 }
7012 
7013 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7014 {
7015 	struct e1000_hw *hw = &adapter->hw;
7016 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7017 	u32 reg, msgbuf[3];
7018 	u8 *addr = (u8 *)(&msgbuf[1]);
7019 
7020 	/* process all the same items cleared in a function level reset */
7021 	igb_vf_reset(adapter, vf);
7022 
7023 	/* set vf mac address */
7024 	igb_set_vf_mac(adapter, vf, vf_mac);
7025 
7026 	/* enable transmit and receive for vf */
7027 	reg = rd32(E1000_VFTE);
7028 	wr32(E1000_VFTE, reg | BIT(vf));
7029 	reg = rd32(E1000_VFRE);
7030 	wr32(E1000_VFRE, reg | BIT(vf));
7031 
7032 	adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7033 
7034 	/* reply to reset with ack and vf mac address */
7035 	if (!is_zero_ether_addr(vf_mac)) {
7036 		msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7037 		memcpy(addr, vf_mac, ETH_ALEN);
7038 	} else {
7039 		msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7040 	}
7041 	igb_write_mbx(hw, msgbuf, 3, vf);
7042 }
7043 
7044 static void igb_flush_mac_table(struct igb_adapter *adapter)
7045 {
7046 	struct e1000_hw *hw = &adapter->hw;
7047 	int i;
7048 
7049 	for (i = 0; i < hw->mac.rar_entry_count; i++) {
7050 		adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7051 		memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7052 		adapter->mac_table[i].queue = 0;
7053 		igb_rar_set_index(adapter, i);
7054 	}
7055 }
7056 
7057 static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7058 {
7059 	struct e1000_hw *hw = &adapter->hw;
7060 	/* do not count rar entries reserved for VFs MAC addresses */
7061 	int rar_entries = hw->mac.rar_entry_count -
7062 			  adapter->vfs_allocated_count;
7063 	int i, count = 0;
7064 
7065 	for (i = 0; i < rar_entries; i++) {
7066 		/* do not count default entries */
7067 		if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7068 			continue;
7069 
7070 		/* do not count "in use" entries for different queues */
7071 		if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7072 		    (adapter->mac_table[i].queue != queue))
7073 			continue;
7074 
7075 		count++;
7076 	}
7077 
7078 	return count;
7079 }
7080 
7081 /* Set default MAC address for the PF in the first RAR entry */
7082 static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7083 {
7084 	struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7085 
7086 	ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7087 	mac_table->queue = adapter->vfs_allocated_count;
7088 	mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7089 
7090 	igb_rar_set_index(adapter, 0);
7091 }
7092 
7093 /* If the filter to be added and an already existing filter express
7094  * the same address and address type, it should be possible to only
7095  * override the other configurations, for example the queue to steer
7096  * traffic.
7097  */
7098 static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7099 				      const u8 *addr, const u8 flags)
7100 {
7101 	if (!(entry->state & IGB_MAC_STATE_IN_USE))
7102 		return true;
7103 
7104 	if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7105 	    (flags & IGB_MAC_STATE_SRC_ADDR))
7106 		return false;
7107 
7108 	if (!ether_addr_equal(addr, entry->addr))
7109 		return false;
7110 
7111 	return true;
7112 }
7113 
7114 /* Add a MAC filter for 'addr' directing matching traffic to 'queue',
7115  * 'flags' is used to indicate what kind of match is made, match is by
7116  * default for the destination address, if matching by source address
7117  * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
7118  */
7119 static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7120 				    const u8 *addr, const u8 queue,
7121 				    const u8 flags)
7122 {
7123 	struct e1000_hw *hw = &adapter->hw;
7124 	int rar_entries = hw->mac.rar_entry_count -
7125 			  adapter->vfs_allocated_count;
7126 	int i;
7127 
7128 	if (is_zero_ether_addr(addr))
7129 		return -EINVAL;
7130 
7131 	/* Search for the first empty entry in the MAC table.
7132 	 * Do not touch entries at the end of the table reserved for the VF MAC
7133 	 * addresses.
7134 	 */
7135 	for (i = 0; i < rar_entries; i++) {
7136 		if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7137 					       addr, flags))
7138 			continue;
7139 
7140 		ether_addr_copy(adapter->mac_table[i].addr, addr);
7141 		adapter->mac_table[i].queue = queue;
7142 		adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7143 
7144 		igb_rar_set_index(adapter, i);
7145 		return i;
7146 	}
7147 
7148 	return -ENOSPC;
7149 }
7150 
7151 static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7152 			      const u8 queue)
7153 {
7154 	return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7155 }
7156 
7157 /* Remove a MAC filter for 'addr' directing matching traffic to
7158  * 'queue', 'flags' is used to indicate what kind of match need to be
7159  * removed, match is by default for the destination address, if
7160  * matching by source address is to be removed the flag
7161  * IGB_MAC_STATE_SRC_ADDR can be used.
7162  */
7163 static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7164 				    const u8 *addr, const u8 queue,
7165 				    const u8 flags)
7166 {
7167 	struct e1000_hw *hw = &adapter->hw;
7168 	int rar_entries = hw->mac.rar_entry_count -
7169 			  adapter->vfs_allocated_count;
7170 	int i;
7171 
7172 	if (is_zero_ether_addr(addr))
7173 		return -EINVAL;
7174 
7175 	/* Search for matching entry in the MAC table based on given address
7176 	 * and queue. Do not touch entries at the end of the table reserved
7177 	 * for the VF MAC addresses.
7178 	 */
7179 	for (i = 0; i < rar_entries; i++) {
7180 		if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7181 			continue;
7182 		if ((adapter->mac_table[i].state & flags) != flags)
7183 			continue;
7184 		if (adapter->mac_table[i].queue != queue)
7185 			continue;
7186 		if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7187 			continue;
7188 
7189 		/* When a filter for the default address is "deleted",
7190 		 * we return it to its initial configuration
7191 		 */
7192 		if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7193 			adapter->mac_table[i].state =
7194 				IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7195 			adapter->mac_table[i].queue =
7196 				adapter->vfs_allocated_count;
7197 		} else {
7198 			adapter->mac_table[i].state = 0;
7199 			adapter->mac_table[i].queue = 0;
7200 			memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7201 		}
7202 
7203 		igb_rar_set_index(adapter, i);
7204 		return 0;
7205 	}
7206 
7207 	return -ENOENT;
7208 }
7209 
7210 static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7211 			      const u8 queue)
7212 {
7213 	return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7214 }
7215 
7216 int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7217 				const u8 *addr, u8 queue, u8 flags)
7218 {
7219 	struct e1000_hw *hw = &adapter->hw;
7220 
7221 	/* In theory, this should be supported on 82575 as well, but
7222 	 * that part wasn't easily accessible during development.
7223 	 */
7224 	if (hw->mac.type != e1000_i210)
7225 		return -EOPNOTSUPP;
7226 
7227 	return igb_add_mac_filter_flags(adapter, addr, queue,
7228 					IGB_MAC_STATE_QUEUE_STEERING | flags);
7229 }
7230 
7231 int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7232 				const u8 *addr, u8 queue, u8 flags)
7233 {
7234 	return igb_del_mac_filter_flags(adapter, addr, queue,
7235 					IGB_MAC_STATE_QUEUE_STEERING | flags);
7236 }
7237 
7238 static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7239 {
7240 	struct igb_adapter *adapter = netdev_priv(netdev);
7241 	int ret;
7242 
7243 	ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7244 
7245 	return min_t(int, ret, 0);
7246 }
7247 
7248 static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7249 {
7250 	struct igb_adapter *adapter = netdev_priv(netdev);
7251 
7252 	igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7253 
7254 	return 0;
7255 }
7256 
7257 static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7258 				 const u32 info, const u8 *addr)
7259 {
7260 	struct pci_dev *pdev = adapter->pdev;
7261 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7262 	struct list_head *pos;
7263 	struct vf_mac_filter *entry = NULL;
7264 	int ret = 0;
7265 
7266 	switch (info) {
7267 	case E1000_VF_MAC_FILTER_CLR:
7268 		/* remove all unicast MAC filters related to the current VF */
7269 		list_for_each(pos, &adapter->vf_macs.l) {
7270 			entry = list_entry(pos, struct vf_mac_filter, l);
7271 			if (entry->vf == vf) {
7272 				entry->vf = -1;
7273 				entry->free = true;
7274 				igb_del_mac_filter(adapter, entry->vf_mac, vf);
7275 			}
7276 		}
7277 		break;
7278 	case E1000_VF_MAC_FILTER_ADD:
7279 		if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7280 		    !vf_data->trusted) {
7281 			dev_warn(&pdev->dev,
7282 				 "VF %d requested MAC filter but is administratively denied\n",
7283 				 vf);
7284 			return -EINVAL;
7285 		}
7286 		if (!is_valid_ether_addr(addr)) {
7287 			dev_warn(&pdev->dev,
7288 				 "VF %d attempted to set invalid MAC filter\n",
7289 				 vf);
7290 			return -EINVAL;
7291 		}
7292 
7293 		/* try to find empty slot in the list */
7294 		list_for_each(pos, &adapter->vf_macs.l) {
7295 			entry = list_entry(pos, struct vf_mac_filter, l);
7296 			if (entry->free)
7297 				break;
7298 		}
7299 
7300 		if (entry && entry->free) {
7301 			entry->free = false;
7302 			entry->vf = vf;
7303 			ether_addr_copy(entry->vf_mac, addr);
7304 
7305 			ret = igb_add_mac_filter(adapter, addr, vf);
7306 			ret = min_t(int, ret, 0);
7307 		} else {
7308 			ret = -ENOSPC;
7309 		}
7310 
7311 		if (ret == -ENOSPC)
7312 			dev_warn(&pdev->dev,
7313 				 "VF %d has requested MAC filter but there is no space for it\n",
7314 				 vf);
7315 		break;
7316 	default:
7317 		ret = -EINVAL;
7318 		break;
7319 	}
7320 
7321 	return ret;
7322 }
7323 
7324 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7325 {
7326 	struct pci_dev *pdev = adapter->pdev;
7327 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7328 	u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7329 
7330 	/* The VF MAC Address is stored in a packed array of bytes
7331 	 * starting at the second 32 bit word of the msg array
7332 	 */
7333 	unsigned char *addr = (unsigned char *)&msg[1];
7334 	int ret = 0;
7335 
7336 	if (!info) {
7337 		if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7338 		    !vf_data->trusted) {
7339 			dev_warn(&pdev->dev,
7340 				 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7341 				 vf);
7342 			return -EINVAL;
7343 		}
7344 
7345 		if (!is_valid_ether_addr(addr)) {
7346 			dev_warn(&pdev->dev,
7347 				 "VF %d attempted to set invalid MAC\n",
7348 				 vf);
7349 			return -EINVAL;
7350 		}
7351 
7352 		ret = igb_set_vf_mac(adapter, vf, addr);
7353 	} else {
7354 		ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7355 	}
7356 
7357 	return ret;
7358 }
7359 
7360 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7361 {
7362 	struct e1000_hw *hw = &adapter->hw;
7363 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7364 	u32 msg = E1000_VT_MSGTYPE_NACK;
7365 
7366 	/* if device isn't clear to send it shouldn't be reading either */
7367 	if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7368 	    time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7369 		igb_write_mbx(hw, &msg, 1, vf);
7370 		vf_data->last_nack = jiffies;
7371 	}
7372 }
7373 
7374 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7375 {
7376 	struct pci_dev *pdev = adapter->pdev;
7377 	u32 msgbuf[E1000_VFMAILBOX_SIZE];
7378 	struct e1000_hw *hw = &adapter->hw;
7379 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7380 	s32 retval;
7381 
7382 	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7383 
7384 	if (retval) {
7385 		/* if receive failed revoke VF CTS stats and restart init */
7386 		dev_err(&pdev->dev, "Error receiving message from VF\n");
7387 		vf_data->flags &= ~IGB_VF_FLAG_CTS;
7388 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7389 			goto unlock;
7390 		goto out;
7391 	}
7392 
7393 	/* this is a message we already processed, do nothing */
7394 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7395 		goto unlock;
7396 
7397 	/* until the vf completes a reset it should not be
7398 	 * allowed to start any configuration.
7399 	 */
7400 	if (msgbuf[0] == E1000_VF_RESET) {
7401 		/* unlocks mailbox */
7402 		igb_vf_reset_msg(adapter, vf);
7403 		return;
7404 	}
7405 
7406 	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7407 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7408 			goto unlock;
7409 		retval = -1;
7410 		goto out;
7411 	}
7412 
7413 	switch ((msgbuf[0] & 0xFFFF)) {
7414 	case E1000_VF_SET_MAC_ADDR:
7415 		retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7416 		break;
7417 	case E1000_VF_SET_PROMISC:
7418 		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7419 		break;
7420 	case E1000_VF_SET_MULTICAST:
7421 		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7422 		break;
7423 	case E1000_VF_SET_LPE:
7424 		retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7425 		break;
7426 	case E1000_VF_SET_VLAN:
7427 		retval = -1;
7428 		if (vf_data->pf_vlan)
7429 			dev_warn(&pdev->dev,
7430 				 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7431 				 vf);
7432 		else
7433 			retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7434 		break;
7435 	default:
7436 		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7437 		retval = -1;
7438 		break;
7439 	}
7440 
7441 	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7442 out:
7443 	/* notify the VF of the results of what it sent us */
7444 	if (retval)
7445 		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7446 	else
7447 		msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7448 
7449 	/* unlocks mailbox */
7450 	igb_write_mbx(hw, msgbuf, 1, vf);
7451 	return;
7452 
7453 unlock:
7454 	igb_unlock_mbx(hw, vf);
7455 }
7456 
7457 static void igb_msg_task(struct igb_adapter *adapter)
7458 {
7459 	struct e1000_hw *hw = &adapter->hw;
7460 	u32 vf;
7461 
7462 	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7463 		/* process any reset requests */
7464 		if (!igb_check_for_rst(hw, vf))
7465 			igb_vf_reset_event(adapter, vf);
7466 
7467 		/* process any messages pending */
7468 		if (!igb_check_for_msg(hw, vf))
7469 			igb_rcv_msg_from_vf(adapter, vf);
7470 
7471 		/* process any acks */
7472 		if (!igb_check_for_ack(hw, vf))
7473 			igb_rcv_ack_from_vf(adapter, vf);
7474 	}
7475 }
7476 
7477 /**
7478  *  igb_set_uta - Set unicast filter table address
7479  *  @adapter: board private structure
7480  *  @set: boolean indicating if we are setting or clearing bits
7481  *
7482  *  The unicast table address is a register array of 32-bit registers.
7483  *  The table is meant to be used in a way similar to how the MTA is used
7484  *  however due to certain limitations in the hardware it is necessary to
7485  *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
7486  *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
7487  **/
7488 static void igb_set_uta(struct igb_adapter *adapter, bool set)
7489 {
7490 	struct e1000_hw *hw = &adapter->hw;
7491 	u32 uta = set ? ~0 : 0;
7492 	int i;
7493 
7494 	/* we only need to do this if VMDq is enabled */
7495 	if (!adapter->vfs_allocated_count)
7496 		return;
7497 
7498 	for (i = hw->mac.uta_reg_count; i--;)
7499 		array_wr32(E1000_UTA, i, uta);
7500 }
7501 
7502 /**
7503  *  igb_intr_msi - Interrupt Handler
7504  *  @irq: interrupt number
7505  *  @data: pointer to a network interface device structure
7506  **/
7507 static irqreturn_t igb_intr_msi(int irq, void *data)
7508 {
7509 	struct igb_adapter *adapter = data;
7510 	struct igb_q_vector *q_vector = adapter->q_vector[0];
7511 	struct e1000_hw *hw = &adapter->hw;
7512 	/* read ICR disables interrupts using IAM */
7513 	u32 icr = rd32(E1000_ICR);
7514 
7515 	igb_write_itr(q_vector);
7516 
7517 	if (icr & E1000_ICR_DRSTA)
7518 		schedule_work(&adapter->reset_task);
7519 
7520 	if (icr & E1000_ICR_DOUTSYNC) {
7521 		/* HW is reporting DMA is out of sync */
7522 		adapter->stats.doosync++;
7523 	}
7524 
7525 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7526 		hw->mac.get_link_status = 1;
7527 		if (!test_bit(__IGB_DOWN, &adapter->state))
7528 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
7529 	}
7530 
7531 	if (icr & E1000_ICR_TS)
7532 		igb_tsync_interrupt(adapter);
7533 
7534 	napi_schedule(&q_vector->napi);
7535 
7536 	return IRQ_HANDLED;
7537 }
7538 
7539 /**
7540  *  igb_intr - Legacy Interrupt Handler
7541  *  @irq: interrupt number
7542  *  @data: pointer to a network interface device structure
7543  **/
7544 static irqreturn_t igb_intr(int irq, void *data)
7545 {
7546 	struct igb_adapter *adapter = data;
7547 	struct igb_q_vector *q_vector = adapter->q_vector[0];
7548 	struct e1000_hw *hw = &adapter->hw;
7549 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
7550 	 * need for the IMC write
7551 	 */
7552 	u32 icr = rd32(E1000_ICR);
7553 
7554 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
7555 	 * not set, then the adapter didn't send an interrupt
7556 	 */
7557 	if (!(icr & E1000_ICR_INT_ASSERTED))
7558 		return IRQ_NONE;
7559 
7560 	igb_write_itr(q_vector);
7561 
7562 	if (icr & E1000_ICR_DRSTA)
7563 		schedule_work(&adapter->reset_task);
7564 
7565 	if (icr & E1000_ICR_DOUTSYNC) {
7566 		/* HW is reporting DMA is out of sync */
7567 		adapter->stats.doosync++;
7568 	}
7569 
7570 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7571 		hw->mac.get_link_status = 1;
7572 		/* guard against interrupt when we're going down */
7573 		if (!test_bit(__IGB_DOWN, &adapter->state))
7574 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
7575 	}
7576 
7577 	if (icr & E1000_ICR_TS)
7578 		igb_tsync_interrupt(adapter);
7579 
7580 	napi_schedule(&q_vector->napi);
7581 
7582 	return IRQ_HANDLED;
7583 }
7584 
7585 static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7586 {
7587 	struct igb_adapter *adapter = q_vector->adapter;
7588 	struct e1000_hw *hw = &adapter->hw;
7589 
7590 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7591 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7592 		if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7593 			igb_set_itr(q_vector);
7594 		else
7595 			igb_update_ring_itr(q_vector);
7596 	}
7597 
7598 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
7599 		if (adapter->flags & IGB_FLAG_HAS_MSIX)
7600 			wr32(E1000_EIMS, q_vector->eims_value);
7601 		else
7602 			igb_irq_enable(adapter);
7603 	}
7604 }
7605 
7606 /**
7607  *  igb_poll - NAPI Rx polling callback
7608  *  @napi: napi polling structure
7609  *  @budget: count of how many packets we should handle
7610  **/
7611 static int igb_poll(struct napi_struct *napi, int budget)
7612 {
7613 	struct igb_q_vector *q_vector = container_of(napi,
7614 						     struct igb_q_vector,
7615 						     napi);
7616 	bool clean_complete = true;
7617 	int work_done = 0;
7618 
7619 #ifdef CONFIG_IGB_DCA
7620 	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7621 		igb_update_dca(q_vector);
7622 #endif
7623 	if (q_vector->tx.ring)
7624 		clean_complete = igb_clean_tx_irq(q_vector, budget);
7625 
7626 	if (q_vector->rx.ring) {
7627 		int cleaned = igb_clean_rx_irq(q_vector, budget);
7628 
7629 		work_done += cleaned;
7630 		if (cleaned >= budget)
7631 			clean_complete = false;
7632 	}
7633 
7634 	/* If all work not completed, return budget and keep polling */
7635 	if (!clean_complete)
7636 		return budget;
7637 
7638 	/* If not enough Rx work done, exit the polling mode */
7639 	napi_complete_done(napi, work_done);
7640 	igb_ring_irq_enable(q_vector);
7641 
7642 	return 0;
7643 }
7644 
7645 /**
7646  *  igb_clean_tx_irq - Reclaim resources after transmit completes
7647  *  @q_vector: pointer to q_vector containing needed info
7648  *  @napi_budget: Used to determine if we are in netpoll
7649  *
7650  *  returns true if ring is completely cleaned
7651  **/
7652 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7653 {
7654 	struct igb_adapter *adapter = q_vector->adapter;
7655 	struct igb_ring *tx_ring = q_vector->tx.ring;
7656 	struct igb_tx_buffer *tx_buffer;
7657 	union e1000_adv_tx_desc *tx_desc;
7658 	unsigned int total_bytes = 0, total_packets = 0;
7659 	unsigned int budget = q_vector->tx.work_limit;
7660 	unsigned int i = tx_ring->next_to_clean;
7661 
7662 	if (test_bit(__IGB_DOWN, &adapter->state))
7663 		return true;
7664 
7665 	tx_buffer = &tx_ring->tx_buffer_info[i];
7666 	tx_desc = IGB_TX_DESC(tx_ring, i);
7667 	i -= tx_ring->count;
7668 
7669 	do {
7670 		union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7671 
7672 		/* if next_to_watch is not set then there is no work pending */
7673 		if (!eop_desc)
7674 			break;
7675 
7676 		/* prevent any other reads prior to eop_desc */
7677 		smp_rmb();
7678 
7679 		/* if DD is not set pending work has not been completed */
7680 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7681 			break;
7682 
7683 		/* clear next_to_watch to prevent false hangs */
7684 		tx_buffer->next_to_watch = NULL;
7685 
7686 		/* update the statistics for this packet */
7687 		total_bytes += tx_buffer->bytecount;
7688 		total_packets += tx_buffer->gso_segs;
7689 
7690 		/* free the skb */
7691 		napi_consume_skb(tx_buffer->skb, napi_budget);
7692 
7693 		/* unmap skb header data */
7694 		dma_unmap_single(tx_ring->dev,
7695 				 dma_unmap_addr(tx_buffer, dma),
7696 				 dma_unmap_len(tx_buffer, len),
7697 				 DMA_TO_DEVICE);
7698 
7699 		/* clear tx_buffer data */
7700 		dma_unmap_len_set(tx_buffer, len, 0);
7701 
7702 		/* clear last DMA location and unmap remaining buffers */
7703 		while (tx_desc != eop_desc) {
7704 			tx_buffer++;
7705 			tx_desc++;
7706 			i++;
7707 			if (unlikely(!i)) {
7708 				i -= tx_ring->count;
7709 				tx_buffer = tx_ring->tx_buffer_info;
7710 				tx_desc = IGB_TX_DESC(tx_ring, 0);
7711 			}
7712 
7713 			/* unmap any remaining paged data */
7714 			if (dma_unmap_len(tx_buffer, len)) {
7715 				dma_unmap_page(tx_ring->dev,
7716 					       dma_unmap_addr(tx_buffer, dma),
7717 					       dma_unmap_len(tx_buffer, len),
7718 					       DMA_TO_DEVICE);
7719 				dma_unmap_len_set(tx_buffer, len, 0);
7720 			}
7721 		}
7722 
7723 		/* move us one more past the eop_desc for start of next pkt */
7724 		tx_buffer++;
7725 		tx_desc++;
7726 		i++;
7727 		if (unlikely(!i)) {
7728 			i -= tx_ring->count;
7729 			tx_buffer = tx_ring->tx_buffer_info;
7730 			tx_desc = IGB_TX_DESC(tx_ring, 0);
7731 		}
7732 
7733 		/* issue prefetch for next Tx descriptor */
7734 		prefetch(tx_desc);
7735 
7736 		/* update budget accounting */
7737 		budget--;
7738 	} while (likely(budget));
7739 
7740 	netdev_tx_completed_queue(txring_txq(tx_ring),
7741 				  total_packets, total_bytes);
7742 	i += tx_ring->count;
7743 	tx_ring->next_to_clean = i;
7744 	u64_stats_update_begin(&tx_ring->tx_syncp);
7745 	tx_ring->tx_stats.bytes += total_bytes;
7746 	tx_ring->tx_stats.packets += total_packets;
7747 	u64_stats_update_end(&tx_ring->tx_syncp);
7748 	q_vector->tx.total_bytes += total_bytes;
7749 	q_vector->tx.total_packets += total_packets;
7750 
7751 	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7752 		struct e1000_hw *hw = &adapter->hw;
7753 
7754 		/* Detect a transmit hang in hardware, this serializes the
7755 		 * check with the clearing of time_stamp and movement of i
7756 		 */
7757 		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7758 		if (tx_buffer->next_to_watch &&
7759 		    time_after(jiffies, tx_buffer->time_stamp +
7760 			       (adapter->tx_timeout_factor * HZ)) &&
7761 		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7762 
7763 			/* detected Tx unit hang */
7764 			dev_err(tx_ring->dev,
7765 				"Detected Tx Unit Hang\n"
7766 				"  Tx Queue             <%d>\n"
7767 				"  TDH                  <%x>\n"
7768 				"  TDT                  <%x>\n"
7769 				"  next_to_use          <%x>\n"
7770 				"  next_to_clean        <%x>\n"
7771 				"buffer_info[next_to_clean]\n"
7772 				"  time_stamp           <%lx>\n"
7773 				"  next_to_watch        <%p>\n"
7774 				"  jiffies              <%lx>\n"
7775 				"  desc.status          <%x>\n",
7776 				tx_ring->queue_index,
7777 				rd32(E1000_TDH(tx_ring->reg_idx)),
7778 				readl(tx_ring->tail),
7779 				tx_ring->next_to_use,
7780 				tx_ring->next_to_clean,
7781 				tx_buffer->time_stamp,
7782 				tx_buffer->next_to_watch,
7783 				jiffies,
7784 				tx_buffer->next_to_watch->wb.status);
7785 			netif_stop_subqueue(tx_ring->netdev,
7786 					    tx_ring->queue_index);
7787 
7788 			/* we are about to reset, no point in enabling stuff */
7789 			return true;
7790 		}
7791 	}
7792 
7793 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7794 	if (unlikely(total_packets &&
7795 	    netif_carrier_ok(tx_ring->netdev) &&
7796 	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7797 		/* Make sure that anybody stopping the queue after this
7798 		 * sees the new next_to_clean.
7799 		 */
7800 		smp_mb();
7801 		if (__netif_subqueue_stopped(tx_ring->netdev,
7802 					     tx_ring->queue_index) &&
7803 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
7804 			netif_wake_subqueue(tx_ring->netdev,
7805 					    tx_ring->queue_index);
7806 
7807 			u64_stats_update_begin(&tx_ring->tx_syncp);
7808 			tx_ring->tx_stats.restart_queue++;
7809 			u64_stats_update_end(&tx_ring->tx_syncp);
7810 		}
7811 	}
7812 
7813 	return !!budget;
7814 }
7815 
7816 /**
7817  *  igb_reuse_rx_page - page flip buffer and store it back on the ring
7818  *  @rx_ring: rx descriptor ring to store buffers on
7819  *  @old_buff: donor buffer to have page reused
7820  *
7821  *  Synchronizes page for reuse by the adapter
7822  **/
7823 static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7824 			      struct igb_rx_buffer *old_buff)
7825 {
7826 	struct igb_rx_buffer *new_buff;
7827 	u16 nta = rx_ring->next_to_alloc;
7828 
7829 	new_buff = &rx_ring->rx_buffer_info[nta];
7830 
7831 	/* update, and store next to alloc */
7832 	nta++;
7833 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7834 
7835 	/* Transfer page from old buffer to new buffer.
7836 	 * Move each member individually to avoid possible store
7837 	 * forwarding stalls.
7838 	 */
7839 	new_buff->dma		= old_buff->dma;
7840 	new_buff->page		= old_buff->page;
7841 	new_buff->page_offset	= old_buff->page_offset;
7842 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
7843 }
7844 
7845 static inline bool igb_page_is_reserved(struct page *page)
7846 {
7847 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7848 }
7849 
7850 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7851 {
7852 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7853 	struct page *page = rx_buffer->page;
7854 
7855 	/* avoid re-using remote pages */
7856 	if (unlikely(igb_page_is_reserved(page)))
7857 		return false;
7858 
7859 #if (PAGE_SIZE < 8192)
7860 	/* if we are only owner of page we can reuse it */
7861 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7862 		return false;
7863 #else
7864 #define IGB_LAST_OFFSET \
7865 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7866 
7867 	if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7868 		return false;
7869 #endif
7870 
7871 	/* If we have drained the page fragment pool we need to update
7872 	 * the pagecnt_bias and page count so that we fully restock the
7873 	 * number of references the driver holds.
7874 	 */
7875 	if (unlikely(!pagecnt_bias)) {
7876 		page_ref_add(page, USHRT_MAX);
7877 		rx_buffer->pagecnt_bias = USHRT_MAX;
7878 	}
7879 
7880 	return true;
7881 }
7882 
7883 /**
7884  *  igb_add_rx_frag - Add contents of Rx buffer to sk_buff
7885  *  @rx_ring: rx descriptor ring to transact packets on
7886  *  @rx_buffer: buffer containing page to add
7887  *  @skb: sk_buff to place the data into
7888  *  @size: size of buffer to be added
7889  *
7890  *  This function will add the data contained in rx_buffer->page to the skb.
7891  **/
7892 static void igb_add_rx_frag(struct igb_ring *rx_ring,
7893 			    struct igb_rx_buffer *rx_buffer,
7894 			    struct sk_buff *skb,
7895 			    unsigned int size)
7896 {
7897 #if (PAGE_SIZE < 8192)
7898 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7899 #else
7900 	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
7901 				SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
7902 				SKB_DATA_ALIGN(size);
7903 #endif
7904 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
7905 			rx_buffer->page_offset, size, truesize);
7906 #if (PAGE_SIZE < 8192)
7907 	rx_buffer->page_offset ^= truesize;
7908 #else
7909 	rx_buffer->page_offset += truesize;
7910 #endif
7911 }
7912 
7913 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
7914 					 struct igb_rx_buffer *rx_buffer,
7915 					 union e1000_adv_rx_desc *rx_desc,
7916 					 unsigned int size)
7917 {
7918 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7919 #if (PAGE_SIZE < 8192)
7920 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7921 #else
7922 	unsigned int truesize = SKB_DATA_ALIGN(size);
7923 #endif
7924 	unsigned int headlen;
7925 	struct sk_buff *skb;
7926 
7927 	/* prefetch first cache line of first page */
7928 	prefetch(va);
7929 #if L1_CACHE_BYTES < 128
7930 	prefetch(va + L1_CACHE_BYTES);
7931 #endif
7932 
7933 	/* allocate a skb to store the frags */
7934 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
7935 	if (unlikely(!skb))
7936 		return NULL;
7937 
7938 	if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
7939 		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
7940 		va += IGB_TS_HDR_LEN;
7941 		size -= IGB_TS_HDR_LEN;
7942 	}
7943 
7944 	/* Determine available headroom for copy */
7945 	headlen = size;
7946 	if (headlen > IGB_RX_HDR_LEN)
7947 		headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
7948 
7949 	/* align pull length to size of long to optimize memcpy performance */
7950 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
7951 
7952 	/* update all of the pointers */
7953 	size -= headlen;
7954 	if (size) {
7955 		skb_add_rx_frag(skb, 0, rx_buffer->page,
7956 				(va + headlen) - page_address(rx_buffer->page),
7957 				size, truesize);
7958 #if (PAGE_SIZE < 8192)
7959 		rx_buffer->page_offset ^= truesize;
7960 #else
7961 		rx_buffer->page_offset += truesize;
7962 #endif
7963 	} else {
7964 		rx_buffer->pagecnt_bias++;
7965 	}
7966 
7967 	return skb;
7968 }
7969 
7970 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
7971 				     struct igb_rx_buffer *rx_buffer,
7972 				     union e1000_adv_rx_desc *rx_desc,
7973 				     unsigned int size)
7974 {
7975 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7976 #if (PAGE_SIZE < 8192)
7977 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7978 #else
7979 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
7980 				SKB_DATA_ALIGN(IGB_SKB_PAD + size);
7981 #endif
7982 	struct sk_buff *skb;
7983 
7984 	/* prefetch first cache line of first page */
7985 	prefetch(va);
7986 #if L1_CACHE_BYTES < 128
7987 	prefetch(va + L1_CACHE_BYTES);
7988 #endif
7989 
7990 	/* build an skb around the page buffer */
7991 	skb = build_skb(va - IGB_SKB_PAD, truesize);
7992 	if (unlikely(!skb))
7993 		return NULL;
7994 
7995 	/* update pointers within the skb to store the data */
7996 	skb_reserve(skb, IGB_SKB_PAD);
7997 	__skb_put(skb, size);
7998 
7999 	/* pull timestamp out of packet data */
8000 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8001 		igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8002 		__skb_pull(skb, IGB_TS_HDR_LEN);
8003 	}
8004 
8005 	/* update buffer offset */
8006 #if (PAGE_SIZE < 8192)
8007 	rx_buffer->page_offset ^= truesize;
8008 #else
8009 	rx_buffer->page_offset += truesize;
8010 #endif
8011 
8012 	return skb;
8013 }
8014 
8015 static inline void igb_rx_checksum(struct igb_ring *ring,
8016 				   union e1000_adv_rx_desc *rx_desc,
8017 				   struct sk_buff *skb)
8018 {
8019 	skb_checksum_none_assert(skb);
8020 
8021 	/* Ignore Checksum bit is set */
8022 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8023 		return;
8024 
8025 	/* Rx checksum disabled via ethtool */
8026 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
8027 		return;
8028 
8029 	/* TCP/UDP checksum error bit is set */
8030 	if (igb_test_staterr(rx_desc,
8031 			     E1000_RXDEXT_STATERR_TCPE |
8032 			     E1000_RXDEXT_STATERR_IPE)) {
8033 		/* work around errata with sctp packets where the TCPE aka
8034 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
8035 		 * packets, (aka let the stack check the crc32c)
8036 		 */
8037 		if (!((skb->len == 60) &&
8038 		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8039 			u64_stats_update_begin(&ring->rx_syncp);
8040 			ring->rx_stats.csum_err++;
8041 			u64_stats_update_end(&ring->rx_syncp);
8042 		}
8043 		/* let the stack verify checksum errors */
8044 		return;
8045 	}
8046 	/* It must be a TCP or UDP packet with a valid checksum */
8047 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8048 				      E1000_RXD_STAT_UDPCS))
8049 		skb->ip_summed = CHECKSUM_UNNECESSARY;
8050 
8051 	dev_dbg(ring->dev, "cksum success: bits %08X\n",
8052 		le32_to_cpu(rx_desc->wb.upper.status_error));
8053 }
8054 
8055 static inline void igb_rx_hash(struct igb_ring *ring,
8056 			       union e1000_adv_rx_desc *rx_desc,
8057 			       struct sk_buff *skb)
8058 {
8059 	if (ring->netdev->features & NETIF_F_RXHASH)
8060 		skb_set_hash(skb,
8061 			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8062 			     PKT_HASH_TYPE_L3);
8063 }
8064 
8065 /**
8066  *  igb_is_non_eop - process handling of non-EOP buffers
8067  *  @rx_ring: Rx ring being processed
8068  *  @rx_desc: Rx descriptor for current buffer
8069  *  @skb: current socket buffer containing buffer in progress
8070  *
8071  *  This function updates next to clean.  If the buffer is an EOP buffer
8072  *  this function exits returning false, otherwise it will place the
8073  *  sk_buff in the next buffer to be chained and return true indicating
8074  *  that this is in fact a non-EOP buffer.
8075  **/
8076 static bool igb_is_non_eop(struct igb_ring *rx_ring,
8077 			   union e1000_adv_rx_desc *rx_desc)
8078 {
8079 	u32 ntc = rx_ring->next_to_clean + 1;
8080 
8081 	/* fetch, update, and store next to clean */
8082 	ntc = (ntc < rx_ring->count) ? ntc : 0;
8083 	rx_ring->next_to_clean = ntc;
8084 
8085 	prefetch(IGB_RX_DESC(rx_ring, ntc));
8086 
8087 	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8088 		return false;
8089 
8090 	return true;
8091 }
8092 
8093 /**
8094  *  igb_cleanup_headers - Correct corrupted or empty headers
8095  *  @rx_ring: rx descriptor ring packet is being transacted on
8096  *  @rx_desc: pointer to the EOP Rx descriptor
8097  *  @skb: pointer to current skb being fixed
8098  *
8099  *  Address the case where we are pulling data in on pages only
8100  *  and as such no data is present in the skb header.
8101  *
8102  *  In addition if skb is not at least 60 bytes we need to pad it so that
8103  *  it is large enough to qualify as a valid Ethernet frame.
8104  *
8105  *  Returns true if an error was encountered and skb was freed.
8106  **/
8107 static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8108 				union e1000_adv_rx_desc *rx_desc,
8109 				struct sk_buff *skb)
8110 {
8111 	if (unlikely((igb_test_staterr(rx_desc,
8112 				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8113 		struct net_device *netdev = rx_ring->netdev;
8114 		if (!(netdev->features & NETIF_F_RXALL)) {
8115 			dev_kfree_skb_any(skb);
8116 			return true;
8117 		}
8118 	}
8119 
8120 	/* if eth_skb_pad returns an error the skb was freed */
8121 	if (eth_skb_pad(skb))
8122 		return true;
8123 
8124 	return false;
8125 }
8126 
8127 /**
8128  *  igb_process_skb_fields - Populate skb header fields from Rx descriptor
8129  *  @rx_ring: rx descriptor ring packet is being transacted on
8130  *  @rx_desc: pointer to the EOP Rx descriptor
8131  *  @skb: pointer to current skb being populated
8132  *
8133  *  This function checks the ring, descriptor, and packet information in
8134  *  order to populate the hash, checksum, VLAN, timestamp, protocol, and
8135  *  other fields within the skb.
8136  **/
8137 static void igb_process_skb_fields(struct igb_ring *rx_ring,
8138 				   union e1000_adv_rx_desc *rx_desc,
8139 				   struct sk_buff *skb)
8140 {
8141 	struct net_device *dev = rx_ring->netdev;
8142 
8143 	igb_rx_hash(rx_ring, rx_desc, skb);
8144 
8145 	igb_rx_checksum(rx_ring, rx_desc, skb);
8146 
8147 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8148 	    !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8149 		igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8150 
8151 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8152 	    igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8153 		u16 vid;
8154 
8155 		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8156 		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8157 			vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8158 		else
8159 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8160 
8161 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8162 	}
8163 
8164 	skb_record_rx_queue(skb, rx_ring->queue_index);
8165 
8166 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8167 }
8168 
8169 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8170 					       const unsigned int size)
8171 {
8172 	struct igb_rx_buffer *rx_buffer;
8173 
8174 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8175 	prefetchw(rx_buffer->page);
8176 
8177 	/* we are reusing so sync this buffer for CPU use */
8178 	dma_sync_single_range_for_cpu(rx_ring->dev,
8179 				      rx_buffer->dma,
8180 				      rx_buffer->page_offset,
8181 				      size,
8182 				      DMA_FROM_DEVICE);
8183 
8184 	rx_buffer->pagecnt_bias--;
8185 
8186 	return rx_buffer;
8187 }
8188 
8189 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8190 			      struct igb_rx_buffer *rx_buffer)
8191 {
8192 	if (igb_can_reuse_rx_page(rx_buffer)) {
8193 		/* hand second half of page back to the ring */
8194 		igb_reuse_rx_page(rx_ring, rx_buffer);
8195 	} else {
8196 		/* We are not reusing the buffer so unmap it and free
8197 		 * any references we are holding to it
8198 		 */
8199 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8200 				     igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8201 				     IGB_RX_DMA_ATTR);
8202 		__page_frag_cache_drain(rx_buffer->page,
8203 					rx_buffer->pagecnt_bias);
8204 	}
8205 
8206 	/* clear contents of rx_buffer */
8207 	rx_buffer->page = NULL;
8208 }
8209 
8210 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8211 {
8212 	struct igb_ring *rx_ring = q_vector->rx.ring;
8213 	struct sk_buff *skb = rx_ring->skb;
8214 	unsigned int total_bytes = 0, total_packets = 0;
8215 	u16 cleaned_count = igb_desc_unused(rx_ring);
8216 
8217 	while (likely(total_packets < budget)) {
8218 		union e1000_adv_rx_desc *rx_desc;
8219 		struct igb_rx_buffer *rx_buffer;
8220 		unsigned int size;
8221 
8222 		/* return some buffers to hardware, one at a time is too slow */
8223 		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8224 			igb_alloc_rx_buffers(rx_ring, cleaned_count);
8225 			cleaned_count = 0;
8226 		}
8227 
8228 		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8229 		size = le16_to_cpu(rx_desc->wb.upper.length);
8230 		if (!size)
8231 			break;
8232 
8233 		/* This memory barrier is needed to keep us from reading
8234 		 * any other fields out of the rx_desc until we know the
8235 		 * descriptor has been written back
8236 		 */
8237 		dma_rmb();
8238 
8239 		rx_buffer = igb_get_rx_buffer(rx_ring, size);
8240 
8241 		/* retrieve a buffer from the ring */
8242 		if (skb)
8243 			igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8244 		else if (ring_uses_build_skb(rx_ring))
8245 			skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8246 		else
8247 			skb = igb_construct_skb(rx_ring, rx_buffer,
8248 						rx_desc, size);
8249 
8250 		/* exit if we failed to retrieve a buffer */
8251 		if (!skb) {
8252 			rx_ring->rx_stats.alloc_failed++;
8253 			rx_buffer->pagecnt_bias++;
8254 			break;
8255 		}
8256 
8257 		igb_put_rx_buffer(rx_ring, rx_buffer);
8258 		cleaned_count++;
8259 
8260 		/* fetch next buffer in frame if non-eop */
8261 		if (igb_is_non_eop(rx_ring, rx_desc))
8262 			continue;
8263 
8264 		/* verify the packet layout is correct */
8265 		if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8266 			skb = NULL;
8267 			continue;
8268 		}
8269 
8270 		/* probably a little skewed due to removing CRC */
8271 		total_bytes += skb->len;
8272 
8273 		/* populate checksum, timestamp, VLAN, and protocol */
8274 		igb_process_skb_fields(rx_ring, rx_desc, skb);
8275 
8276 		napi_gro_receive(&q_vector->napi, skb);
8277 
8278 		/* reset skb pointer */
8279 		skb = NULL;
8280 
8281 		/* update budget accounting */
8282 		total_packets++;
8283 	}
8284 
8285 	/* place incomplete frames back on ring for completion */
8286 	rx_ring->skb = skb;
8287 
8288 	u64_stats_update_begin(&rx_ring->rx_syncp);
8289 	rx_ring->rx_stats.packets += total_packets;
8290 	rx_ring->rx_stats.bytes += total_bytes;
8291 	u64_stats_update_end(&rx_ring->rx_syncp);
8292 	q_vector->rx.total_packets += total_packets;
8293 	q_vector->rx.total_bytes += total_bytes;
8294 
8295 	if (cleaned_count)
8296 		igb_alloc_rx_buffers(rx_ring, cleaned_count);
8297 
8298 	return total_packets;
8299 }
8300 
8301 static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8302 {
8303 	return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8304 }
8305 
8306 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8307 				  struct igb_rx_buffer *bi)
8308 {
8309 	struct page *page = bi->page;
8310 	dma_addr_t dma;
8311 
8312 	/* since we are recycling buffers we should seldom need to alloc */
8313 	if (likely(page))
8314 		return true;
8315 
8316 	/* alloc new page for storage */
8317 	page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8318 	if (unlikely(!page)) {
8319 		rx_ring->rx_stats.alloc_failed++;
8320 		return false;
8321 	}
8322 
8323 	/* map page for use */
8324 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8325 				 igb_rx_pg_size(rx_ring),
8326 				 DMA_FROM_DEVICE,
8327 				 IGB_RX_DMA_ATTR);
8328 
8329 	/* if mapping failed free memory back to system since
8330 	 * there isn't much point in holding memory we can't use
8331 	 */
8332 	if (dma_mapping_error(rx_ring->dev, dma)) {
8333 		__free_pages(page, igb_rx_pg_order(rx_ring));
8334 
8335 		rx_ring->rx_stats.alloc_failed++;
8336 		return false;
8337 	}
8338 
8339 	bi->dma = dma;
8340 	bi->page = page;
8341 	bi->page_offset = igb_rx_offset(rx_ring);
8342 	bi->pagecnt_bias = 1;
8343 
8344 	return true;
8345 }
8346 
8347 /**
8348  *  igb_alloc_rx_buffers - Replace used receive buffers; packet split
8349  *  @adapter: address of board private structure
8350  **/
8351 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8352 {
8353 	union e1000_adv_rx_desc *rx_desc;
8354 	struct igb_rx_buffer *bi;
8355 	u16 i = rx_ring->next_to_use;
8356 	u16 bufsz;
8357 
8358 	/* nothing to do */
8359 	if (!cleaned_count)
8360 		return;
8361 
8362 	rx_desc = IGB_RX_DESC(rx_ring, i);
8363 	bi = &rx_ring->rx_buffer_info[i];
8364 	i -= rx_ring->count;
8365 
8366 	bufsz = igb_rx_bufsz(rx_ring);
8367 
8368 	do {
8369 		if (!igb_alloc_mapped_page(rx_ring, bi))
8370 			break;
8371 
8372 		/* sync the buffer for use by the device */
8373 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8374 						 bi->page_offset, bufsz,
8375 						 DMA_FROM_DEVICE);
8376 
8377 		/* Refresh the desc even if buffer_addrs didn't change
8378 		 * because each write-back erases this info.
8379 		 */
8380 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8381 
8382 		rx_desc++;
8383 		bi++;
8384 		i++;
8385 		if (unlikely(!i)) {
8386 			rx_desc = IGB_RX_DESC(rx_ring, 0);
8387 			bi = rx_ring->rx_buffer_info;
8388 			i -= rx_ring->count;
8389 		}
8390 
8391 		/* clear the length for the next_to_use descriptor */
8392 		rx_desc->wb.upper.length = 0;
8393 
8394 		cleaned_count--;
8395 	} while (cleaned_count);
8396 
8397 	i += rx_ring->count;
8398 
8399 	if (rx_ring->next_to_use != i) {
8400 		/* record the next descriptor to use */
8401 		rx_ring->next_to_use = i;
8402 
8403 		/* update next to alloc since we have filled the ring */
8404 		rx_ring->next_to_alloc = i;
8405 
8406 		/* Force memory writes to complete before letting h/w
8407 		 * know there are new descriptors to fetch.  (Only
8408 		 * applicable for weak-ordered memory model archs,
8409 		 * such as IA-64).
8410 		 */
8411 		wmb();
8412 		writel(i, rx_ring->tail);
8413 	}
8414 }
8415 
8416 /**
8417  * igb_mii_ioctl -
8418  * @netdev:
8419  * @ifreq:
8420  * @cmd:
8421  **/
8422 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8423 {
8424 	struct igb_adapter *adapter = netdev_priv(netdev);
8425 	struct mii_ioctl_data *data = if_mii(ifr);
8426 
8427 	if (adapter->hw.phy.media_type != e1000_media_type_copper)
8428 		return -EOPNOTSUPP;
8429 
8430 	switch (cmd) {
8431 	case SIOCGMIIPHY:
8432 		data->phy_id = adapter->hw.phy.addr;
8433 		break;
8434 	case SIOCGMIIREG:
8435 		if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8436 				     &data->val_out))
8437 			return -EIO;
8438 		break;
8439 	case SIOCSMIIREG:
8440 	default:
8441 		return -EOPNOTSUPP;
8442 	}
8443 	return 0;
8444 }
8445 
8446 /**
8447  * igb_ioctl -
8448  * @netdev:
8449  * @ifreq:
8450  * @cmd:
8451  **/
8452 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8453 {
8454 	switch (cmd) {
8455 	case SIOCGMIIPHY:
8456 	case SIOCGMIIREG:
8457 	case SIOCSMIIREG:
8458 		return igb_mii_ioctl(netdev, ifr, cmd);
8459 	case SIOCGHWTSTAMP:
8460 		return igb_ptp_get_ts_config(netdev, ifr);
8461 	case SIOCSHWTSTAMP:
8462 		return igb_ptp_set_ts_config(netdev, ifr);
8463 	default:
8464 		return -EOPNOTSUPP;
8465 	}
8466 }
8467 
8468 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8469 {
8470 	struct igb_adapter *adapter = hw->back;
8471 
8472 	pci_read_config_word(adapter->pdev, reg, value);
8473 }
8474 
8475 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8476 {
8477 	struct igb_adapter *adapter = hw->back;
8478 
8479 	pci_write_config_word(adapter->pdev, reg, *value);
8480 }
8481 
8482 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8483 {
8484 	struct igb_adapter *adapter = hw->back;
8485 
8486 	if (pcie_capability_read_word(adapter->pdev, reg, value))
8487 		return -E1000_ERR_CONFIG;
8488 
8489 	return 0;
8490 }
8491 
8492 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8493 {
8494 	struct igb_adapter *adapter = hw->back;
8495 
8496 	if (pcie_capability_write_word(adapter->pdev, reg, *value))
8497 		return -E1000_ERR_CONFIG;
8498 
8499 	return 0;
8500 }
8501 
8502 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8503 {
8504 	struct igb_adapter *adapter = netdev_priv(netdev);
8505 	struct e1000_hw *hw = &adapter->hw;
8506 	u32 ctrl, rctl;
8507 	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8508 
8509 	if (enable) {
8510 		/* enable VLAN tag insert/strip */
8511 		ctrl = rd32(E1000_CTRL);
8512 		ctrl |= E1000_CTRL_VME;
8513 		wr32(E1000_CTRL, ctrl);
8514 
8515 		/* Disable CFI check */
8516 		rctl = rd32(E1000_RCTL);
8517 		rctl &= ~E1000_RCTL_CFIEN;
8518 		wr32(E1000_RCTL, rctl);
8519 	} else {
8520 		/* disable VLAN tag insert/strip */
8521 		ctrl = rd32(E1000_CTRL);
8522 		ctrl &= ~E1000_CTRL_VME;
8523 		wr32(E1000_CTRL, ctrl);
8524 	}
8525 
8526 	igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8527 }
8528 
8529 static int igb_vlan_rx_add_vid(struct net_device *netdev,
8530 			       __be16 proto, u16 vid)
8531 {
8532 	struct igb_adapter *adapter = netdev_priv(netdev);
8533 	struct e1000_hw *hw = &adapter->hw;
8534 	int pf_id = adapter->vfs_allocated_count;
8535 
8536 	/* add the filter since PF can receive vlans w/o entry in vlvf */
8537 	if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8538 		igb_vfta_set(hw, vid, pf_id, true, !!vid);
8539 
8540 	set_bit(vid, adapter->active_vlans);
8541 
8542 	return 0;
8543 }
8544 
8545 static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8546 				__be16 proto, u16 vid)
8547 {
8548 	struct igb_adapter *adapter = netdev_priv(netdev);
8549 	int pf_id = adapter->vfs_allocated_count;
8550 	struct e1000_hw *hw = &adapter->hw;
8551 
8552 	/* remove VID from filter table */
8553 	if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8554 		igb_vfta_set(hw, vid, pf_id, false, true);
8555 
8556 	clear_bit(vid, adapter->active_vlans);
8557 
8558 	return 0;
8559 }
8560 
8561 static void igb_restore_vlan(struct igb_adapter *adapter)
8562 {
8563 	u16 vid = 1;
8564 
8565 	igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8566 	igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8567 
8568 	for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8569 		igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8570 }
8571 
8572 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8573 {
8574 	struct pci_dev *pdev = adapter->pdev;
8575 	struct e1000_mac_info *mac = &adapter->hw.mac;
8576 
8577 	mac->autoneg = 0;
8578 
8579 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
8580 	 * for the switch() below to work
8581 	 */
8582 	if ((spd & 1) || (dplx & ~1))
8583 		goto err_inval;
8584 
8585 	/* Fiber NIC's only allow 1000 gbps Full duplex
8586 	 * and 100Mbps Full duplex for 100baseFx sfp
8587 	 */
8588 	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8589 		switch (spd + dplx) {
8590 		case SPEED_10 + DUPLEX_HALF:
8591 		case SPEED_10 + DUPLEX_FULL:
8592 		case SPEED_100 + DUPLEX_HALF:
8593 			goto err_inval;
8594 		default:
8595 			break;
8596 		}
8597 	}
8598 
8599 	switch (spd + dplx) {
8600 	case SPEED_10 + DUPLEX_HALF:
8601 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
8602 		break;
8603 	case SPEED_10 + DUPLEX_FULL:
8604 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
8605 		break;
8606 	case SPEED_100 + DUPLEX_HALF:
8607 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
8608 		break;
8609 	case SPEED_100 + DUPLEX_FULL:
8610 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
8611 		break;
8612 	case SPEED_1000 + DUPLEX_FULL:
8613 		mac->autoneg = 1;
8614 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8615 		break;
8616 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
8617 	default:
8618 		goto err_inval;
8619 	}
8620 
8621 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
8622 	adapter->hw.phy.mdix = AUTO_ALL_MODES;
8623 
8624 	return 0;
8625 
8626 err_inval:
8627 	dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8628 	return -EINVAL;
8629 }
8630 
8631 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8632 			  bool runtime)
8633 {
8634 	struct net_device *netdev = pci_get_drvdata(pdev);
8635 	struct igb_adapter *adapter = netdev_priv(netdev);
8636 	struct e1000_hw *hw = &adapter->hw;
8637 	u32 ctrl, rctl, status;
8638 	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8639 #ifdef CONFIG_PM
8640 	int retval = 0;
8641 #endif
8642 
8643 	rtnl_lock();
8644 	netif_device_detach(netdev);
8645 
8646 	if (netif_running(netdev))
8647 		__igb_close(netdev, true);
8648 
8649 	igb_ptp_suspend(adapter);
8650 
8651 	igb_clear_interrupt_scheme(adapter);
8652 	rtnl_unlock();
8653 
8654 #ifdef CONFIG_PM
8655 	retval = pci_save_state(pdev);
8656 	if (retval)
8657 		return retval;
8658 #endif
8659 
8660 	status = rd32(E1000_STATUS);
8661 	if (status & E1000_STATUS_LU)
8662 		wufc &= ~E1000_WUFC_LNKC;
8663 
8664 	if (wufc) {
8665 		igb_setup_rctl(adapter);
8666 		igb_set_rx_mode(netdev);
8667 
8668 		/* turn on all-multi mode if wake on multicast is enabled */
8669 		if (wufc & E1000_WUFC_MC) {
8670 			rctl = rd32(E1000_RCTL);
8671 			rctl |= E1000_RCTL_MPE;
8672 			wr32(E1000_RCTL, rctl);
8673 		}
8674 
8675 		ctrl = rd32(E1000_CTRL);
8676 		/* advertise wake from D3Cold */
8677 		#define E1000_CTRL_ADVD3WUC 0x00100000
8678 		/* phy power management enable */
8679 		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8680 		ctrl |= E1000_CTRL_ADVD3WUC;
8681 		wr32(E1000_CTRL, ctrl);
8682 
8683 		/* Allow time for pending master requests to run */
8684 		igb_disable_pcie_master(hw);
8685 
8686 		wr32(E1000_WUC, E1000_WUC_PME_EN);
8687 		wr32(E1000_WUFC, wufc);
8688 	} else {
8689 		wr32(E1000_WUC, 0);
8690 		wr32(E1000_WUFC, 0);
8691 	}
8692 
8693 	*enable_wake = wufc || adapter->en_mng_pt;
8694 	if (!*enable_wake)
8695 		igb_power_down_link(adapter);
8696 	else
8697 		igb_power_up_link(adapter);
8698 
8699 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
8700 	 * would have already happened in close and is redundant.
8701 	 */
8702 	igb_release_hw_control(adapter);
8703 
8704 	pci_disable_device(pdev);
8705 
8706 	return 0;
8707 }
8708 
8709 static void igb_deliver_wake_packet(struct net_device *netdev)
8710 {
8711 	struct igb_adapter *adapter = netdev_priv(netdev);
8712 	struct e1000_hw *hw = &adapter->hw;
8713 	struct sk_buff *skb;
8714 	u32 wupl;
8715 
8716 	wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8717 
8718 	/* WUPM stores only the first 128 bytes of the wake packet.
8719 	 * Read the packet only if we have the whole thing.
8720 	 */
8721 	if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8722 		return;
8723 
8724 	skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8725 	if (!skb)
8726 		return;
8727 
8728 	skb_put(skb, wupl);
8729 
8730 	/* Ensure reads are 32-bit aligned */
8731 	wupl = roundup(wupl, 4);
8732 
8733 	memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8734 
8735 	skb->protocol = eth_type_trans(skb, netdev);
8736 	netif_rx(skb);
8737 }
8738 
8739 static int __maybe_unused igb_suspend(struct device *dev)
8740 {
8741 	int retval;
8742 	bool wake;
8743 	struct pci_dev *pdev = to_pci_dev(dev);
8744 
8745 	retval = __igb_shutdown(pdev, &wake, 0);
8746 	if (retval)
8747 		return retval;
8748 
8749 	if (wake) {
8750 		pci_prepare_to_sleep(pdev);
8751 	} else {
8752 		pci_wake_from_d3(pdev, false);
8753 		pci_set_power_state(pdev, PCI_D3hot);
8754 	}
8755 
8756 	return 0;
8757 }
8758 
8759 static int __maybe_unused igb_resume(struct device *dev)
8760 {
8761 	struct pci_dev *pdev = to_pci_dev(dev);
8762 	struct net_device *netdev = pci_get_drvdata(pdev);
8763 	struct igb_adapter *adapter = netdev_priv(netdev);
8764 	struct e1000_hw *hw = &adapter->hw;
8765 	u32 err, val;
8766 
8767 	pci_set_power_state(pdev, PCI_D0);
8768 	pci_restore_state(pdev);
8769 	pci_save_state(pdev);
8770 
8771 	if (!pci_device_is_present(pdev))
8772 		return -ENODEV;
8773 	err = pci_enable_device_mem(pdev);
8774 	if (err) {
8775 		dev_err(&pdev->dev,
8776 			"igb: Cannot enable PCI device from suspend\n");
8777 		return err;
8778 	}
8779 	pci_set_master(pdev);
8780 
8781 	pci_enable_wake(pdev, PCI_D3hot, 0);
8782 	pci_enable_wake(pdev, PCI_D3cold, 0);
8783 
8784 	if (igb_init_interrupt_scheme(adapter, true)) {
8785 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8786 		return -ENOMEM;
8787 	}
8788 
8789 	igb_reset(adapter);
8790 
8791 	/* let the f/w know that the h/w is now under the control of the
8792 	 * driver.
8793 	 */
8794 	igb_get_hw_control(adapter);
8795 
8796 	val = rd32(E1000_WUS);
8797 	if (val & WAKE_PKT_WUS)
8798 		igb_deliver_wake_packet(netdev);
8799 
8800 	wr32(E1000_WUS, ~0);
8801 
8802 	rtnl_lock();
8803 	if (!err && netif_running(netdev))
8804 		err = __igb_open(netdev, true);
8805 
8806 	if (!err)
8807 		netif_device_attach(netdev);
8808 	rtnl_unlock();
8809 
8810 	return err;
8811 }
8812 
8813 static int __maybe_unused igb_runtime_idle(struct device *dev)
8814 {
8815 	struct pci_dev *pdev = to_pci_dev(dev);
8816 	struct net_device *netdev = pci_get_drvdata(pdev);
8817 	struct igb_adapter *adapter = netdev_priv(netdev);
8818 
8819 	if (!igb_has_link(adapter))
8820 		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8821 
8822 	return -EBUSY;
8823 }
8824 
8825 static int __maybe_unused igb_runtime_suspend(struct device *dev)
8826 {
8827 	struct pci_dev *pdev = to_pci_dev(dev);
8828 	int retval;
8829 	bool wake;
8830 
8831 	retval = __igb_shutdown(pdev, &wake, 1);
8832 	if (retval)
8833 		return retval;
8834 
8835 	if (wake) {
8836 		pci_prepare_to_sleep(pdev);
8837 	} else {
8838 		pci_wake_from_d3(pdev, false);
8839 		pci_set_power_state(pdev, PCI_D3hot);
8840 	}
8841 
8842 	return 0;
8843 }
8844 
8845 static int __maybe_unused igb_runtime_resume(struct device *dev)
8846 {
8847 	return igb_resume(dev);
8848 }
8849 
8850 static void igb_shutdown(struct pci_dev *pdev)
8851 {
8852 	bool wake;
8853 
8854 	__igb_shutdown(pdev, &wake, 0);
8855 
8856 	if (system_state == SYSTEM_POWER_OFF) {
8857 		pci_wake_from_d3(pdev, wake);
8858 		pci_set_power_state(pdev, PCI_D3hot);
8859 	}
8860 }
8861 
8862 #ifdef CONFIG_PCI_IOV
8863 static int igb_sriov_reinit(struct pci_dev *dev)
8864 {
8865 	struct net_device *netdev = pci_get_drvdata(dev);
8866 	struct igb_adapter *adapter = netdev_priv(netdev);
8867 	struct pci_dev *pdev = adapter->pdev;
8868 
8869 	rtnl_lock();
8870 
8871 	if (netif_running(netdev))
8872 		igb_close(netdev);
8873 	else
8874 		igb_reset(adapter);
8875 
8876 	igb_clear_interrupt_scheme(adapter);
8877 
8878 	igb_init_queue_configuration(adapter);
8879 
8880 	if (igb_init_interrupt_scheme(adapter, true)) {
8881 		rtnl_unlock();
8882 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8883 		return -ENOMEM;
8884 	}
8885 
8886 	if (netif_running(netdev))
8887 		igb_open(netdev);
8888 
8889 	rtnl_unlock();
8890 
8891 	return 0;
8892 }
8893 
8894 static int igb_pci_disable_sriov(struct pci_dev *dev)
8895 {
8896 	int err = igb_disable_sriov(dev);
8897 
8898 	if (!err)
8899 		err = igb_sriov_reinit(dev);
8900 
8901 	return err;
8902 }
8903 
8904 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8905 {
8906 	int err = igb_enable_sriov(dev, num_vfs);
8907 
8908 	if (err)
8909 		goto out;
8910 
8911 	err = igb_sriov_reinit(dev);
8912 	if (!err)
8913 		return num_vfs;
8914 
8915 out:
8916 	return err;
8917 }
8918 
8919 #endif
8920 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
8921 {
8922 #ifdef CONFIG_PCI_IOV
8923 	if (num_vfs == 0)
8924 		return igb_pci_disable_sriov(dev);
8925 	else
8926 		return igb_pci_enable_sriov(dev, num_vfs);
8927 #endif
8928 	return 0;
8929 }
8930 
8931 #ifdef CONFIG_NET_POLL_CONTROLLER
8932 /* Polling 'interrupt' - used by things like netconsole to send skbs
8933  * without having to re-enable interrupts. It's not called while
8934  * the interrupt routine is executing.
8935  */
8936 static void igb_netpoll(struct net_device *netdev)
8937 {
8938 	struct igb_adapter *adapter = netdev_priv(netdev);
8939 	struct e1000_hw *hw = &adapter->hw;
8940 	struct igb_q_vector *q_vector;
8941 	int i;
8942 
8943 	for (i = 0; i < adapter->num_q_vectors; i++) {
8944 		q_vector = adapter->q_vector[i];
8945 		if (adapter->flags & IGB_FLAG_HAS_MSIX)
8946 			wr32(E1000_EIMC, q_vector->eims_value);
8947 		else
8948 			igb_irq_disable(adapter);
8949 		napi_schedule(&q_vector->napi);
8950 	}
8951 }
8952 #endif /* CONFIG_NET_POLL_CONTROLLER */
8953 
8954 /**
8955  *  igb_io_error_detected - called when PCI error is detected
8956  *  @pdev: Pointer to PCI device
8957  *  @state: The current pci connection state
8958  *
8959  *  This function is called after a PCI bus error affecting
8960  *  this device has been detected.
8961  **/
8962 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
8963 					      pci_channel_state_t state)
8964 {
8965 	struct net_device *netdev = pci_get_drvdata(pdev);
8966 	struct igb_adapter *adapter = netdev_priv(netdev);
8967 
8968 	netif_device_detach(netdev);
8969 
8970 	if (state == pci_channel_io_perm_failure)
8971 		return PCI_ERS_RESULT_DISCONNECT;
8972 
8973 	if (netif_running(netdev))
8974 		igb_down(adapter);
8975 	pci_disable_device(pdev);
8976 
8977 	/* Request a slot slot reset. */
8978 	return PCI_ERS_RESULT_NEED_RESET;
8979 }
8980 
8981 /**
8982  *  igb_io_slot_reset - called after the pci bus has been reset.
8983  *  @pdev: Pointer to PCI device
8984  *
8985  *  Restart the card from scratch, as if from a cold-boot. Implementation
8986  *  resembles the first-half of the igb_resume routine.
8987  **/
8988 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
8989 {
8990 	struct net_device *netdev = pci_get_drvdata(pdev);
8991 	struct igb_adapter *adapter = netdev_priv(netdev);
8992 	struct e1000_hw *hw = &adapter->hw;
8993 	pci_ers_result_t result;
8994 	int err;
8995 
8996 	if (pci_enable_device_mem(pdev)) {
8997 		dev_err(&pdev->dev,
8998 			"Cannot re-enable PCI device after reset.\n");
8999 		result = PCI_ERS_RESULT_DISCONNECT;
9000 	} else {
9001 		pci_set_master(pdev);
9002 		pci_restore_state(pdev);
9003 		pci_save_state(pdev);
9004 
9005 		pci_enable_wake(pdev, PCI_D3hot, 0);
9006 		pci_enable_wake(pdev, PCI_D3cold, 0);
9007 
9008 		/* In case of PCI error, adapter lose its HW address
9009 		 * so we should re-assign it here.
9010 		 */
9011 		hw->hw_addr = adapter->io_addr;
9012 
9013 		igb_reset(adapter);
9014 		wr32(E1000_WUS, ~0);
9015 		result = PCI_ERS_RESULT_RECOVERED;
9016 	}
9017 
9018 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
9019 	if (err) {
9020 		dev_err(&pdev->dev,
9021 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
9022 			err);
9023 		/* non-fatal, continue */
9024 	}
9025 
9026 	return result;
9027 }
9028 
9029 /**
9030  *  igb_io_resume - called when traffic can start flowing again.
9031  *  @pdev: Pointer to PCI device
9032  *
9033  *  This callback is called when the error recovery driver tells us that
9034  *  its OK to resume normal operation. Implementation resembles the
9035  *  second-half of the igb_resume routine.
9036  */
9037 static void igb_io_resume(struct pci_dev *pdev)
9038 {
9039 	struct net_device *netdev = pci_get_drvdata(pdev);
9040 	struct igb_adapter *adapter = netdev_priv(netdev);
9041 
9042 	if (netif_running(netdev)) {
9043 		if (igb_up(adapter)) {
9044 			dev_err(&pdev->dev, "igb_up failed after reset\n");
9045 			return;
9046 		}
9047 	}
9048 
9049 	netif_device_attach(netdev);
9050 
9051 	/* let the f/w know that the h/w is now under the control of the
9052 	 * driver.
9053 	 */
9054 	igb_get_hw_control(adapter);
9055 }
9056 
9057 /**
9058  *  igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
9059  *  @adapter: Pointer to adapter structure
9060  *  @index: Index of the RAR entry which need to be synced with MAC table
9061  **/
9062 static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9063 {
9064 	struct e1000_hw *hw = &adapter->hw;
9065 	u32 rar_low, rar_high;
9066 	u8 *addr = adapter->mac_table[index].addr;
9067 
9068 	/* HW expects these to be in network order when they are plugged
9069 	 * into the registers which are little endian.  In order to guarantee
9070 	 * that ordering we need to do an leXX_to_cpup here in order to be
9071 	 * ready for the byteswap that occurs with writel
9072 	 */
9073 	rar_low = le32_to_cpup((__le32 *)(addr));
9074 	rar_high = le16_to_cpup((__le16 *)(addr + 4));
9075 
9076 	/* Indicate to hardware the Address is Valid. */
9077 	if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9078 		if (is_valid_ether_addr(addr))
9079 			rar_high |= E1000_RAH_AV;
9080 
9081 		if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9082 			rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9083 
9084 		switch (hw->mac.type) {
9085 		case e1000_82575:
9086 		case e1000_i210:
9087 			if (adapter->mac_table[index].state &
9088 			    IGB_MAC_STATE_QUEUE_STEERING)
9089 				rar_high |= E1000_RAH_QSEL_ENABLE;
9090 
9091 			rar_high |= E1000_RAH_POOL_1 *
9092 				    adapter->mac_table[index].queue;
9093 			break;
9094 		default:
9095 			rar_high |= E1000_RAH_POOL_1 <<
9096 				    adapter->mac_table[index].queue;
9097 			break;
9098 		}
9099 	}
9100 
9101 	wr32(E1000_RAL(index), rar_low);
9102 	wrfl();
9103 	wr32(E1000_RAH(index), rar_high);
9104 	wrfl();
9105 }
9106 
9107 static int igb_set_vf_mac(struct igb_adapter *adapter,
9108 			  int vf, unsigned char *mac_addr)
9109 {
9110 	struct e1000_hw *hw = &adapter->hw;
9111 	/* VF MAC addresses start at end of receive addresses and moves
9112 	 * towards the first, as a result a collision should not be possible
9113 	 */
9114 	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9115 	unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9116 
9117 	ether_addr_copy(vf_mac_addr, mac_addr);
9118 	ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9119 	adapter->mac_table[rar_entry].queue = vf;
9120 	adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9121 	igb_rar_set_index(adapter, rar_entry);
9122 
9123 	return 0;
9124 }
9125 
9126 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9127 {
9128 	struct igb_adapter *adapter = netdev_priv(netdev);
9129 
9130 	if (vf >= adapter->vfs_allocated_count)
9131 		return -EINVAL;
9132 
9133 	/* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
9134 	 * flag and allows to overwrite the MAC via VF netdev.  This
9135 	 * is necessary to allow libvirt a way to restore the original
9136 	 * MAC after unbinding vfio-pci and reloading igbvf after shutting
9137 	 * down a VM.
9138 	 */
9139 	if (is_zero_ether_addr(mac)) {
9140 		adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9141 		dev_info(&adapter->pdev->dev,
9142 			 "remove administratively set MAC on VF %d\n",
9143 			 vf);
9144 	} else if (is_valid_ether_addr(mac)) {
9145 		adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9146 		dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9147 			 mac, vf);
9148 		dev_info(&adapter->pdev->dev,
9149 			 "Reload the VF driver to make this change effective.");
9150 		/* Generate additional warning if PF is down */
9151 		if (test_bit(__IGB_DOWN, &adapter->state)) {
9152 			dev_warn(&adapter->pdev->dev,
9153 				 "The VF MAC address has been set, but the PF device is not up.\n");
9154 			dev_warn(&adapter->pdev->dev,
9155 				 "Bring the PF device up before attempting to use the VF device.\n");
9156 		}
9157 	} else {
9158 		return -EINVAL;
9159 	}
9160 	return igb_set_vf_mac(adapter, vf, mac);
9161 }
9162 
9163 static int igb_link_mbps(int internal_link_speed)
9164 {
9165 	switch (internal_link_speed) {
9166 	case SPEED_100:
9167 		return 100;
9168 	case SPEED_1000:
9169 		return 1000;
9170 	default:
9171 		return 0;
9172 	}
9173 }
9174 
9175 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9176 				  int link_speed)
9177 {
9178 	int rf_dec, rf_int;
9179 	u32 bcnrc_val;
9180 
9181 	if (tx_rate != 0) {
9182 		/* Calculate the rate factor values to set */
9183 		rf_int = link_speed / tx_rate;
9184 		rf_dec = (link_speed - (rf_int * tx_rate));
9185 		rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9186 			 tx_rate;
9187 
9188 		bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9189 		bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9190 			      E1000_RTTBCNRC_RF_INT_MASK);
9191 		bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9192 	} else {
9193 		bcnrc_val = 0;
9194 	}
9195 
9196 	wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
9197 	/* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
9198 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
9199 	 */
9200 	wr32(E1000_RTTBCNRM, 0x14);
9201 	wr32(E1000_RTTBCNRC, bcnrc_val);
9202 }
9203 
9204 static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9205 {
9206 	int actual_link_speed, i;
9207 	bool reset_rate = false;
9208 
9209 	/* VF TX rate limit was not set or not supported */
9210 	if ((adapter->vf_rate_link_speed == 0) ||
9211 	    (adapter->hw.mac.type != e1000_82576))
9212 		return;
9213 
9214 	actual_link_speed = igb_link_mbps(adapter->link_speed);
9215 	if (actual_link_speed != adapter->vf_rate_link_speed) {
9216 		reset_rate = true;
9217 		adapter->vf_rate_link_speed = 0;
9218 		dev_info(&adapter->pdev->dev,
9219 			 "Link speed has been changed. VF Transmit rate is disabled\n");
9220 	}
9221 
9222 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
9223 		if (reset_rate)
9224 			adapter->vf_data[i].tx_rate = 0;
9225 
9226 		igb_set_vf_rate_limit(&adapter->hw, i,
9227 				      adapter->vf_data[i].tx_rate,
9228 				      actual_link_speed);
9229 	}
9230 }
9231 
9232 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9233 			     int min_tx_rate, int max_tx_rate)
9234 {
9235 	struct igb_adapter *adapter = netdev_priv(netdev);
9236 	struct e1000_hw *hw = &adapter->hw;
9237 	int actual_link_speed;
9238 
9239 	if (hw->mac.type != e1000_82576)
9240 		return -EOPNOTSUPP;
9241 
9242 	if (min_tx_rate)
9243 		return -EINVAL;
9244 
9245 	actual_link_speed = igb_link_mbps(adapter->link_speed);
9246 	if ((vf >= adapter->vfs_allocated_count) ||
9247 	    (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9248 	    (max_tx_rate < 0) ||
9249 	    (max_tx_rate > actual_link_speed))
9250 		return -EINVAL;
9251 
9252 	adapter->vf_rate_link_speed = actual_link_speed;
9253 	adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9254 	igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9255 
9256 	return 0;
9257 }
9258 
9259 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9260 				   bool setting)
9261 {
9262 	struct igb_adapter *adapter = netdev_priv(netdev);
9263 	struct e1000_hw *hw = &adapter->hw;
9264 	u32 reg_val, reg_offset;
9265 
9266 	if (!adapter->vfs_allocated_count)
9267 		return -EOPNOTSUPP;
9268 
9269 	if (vf >= adapter->vfs_allocated_count)
9270 		return -EINVAL;
9271 
9272 	reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9273 	reg_val = rd32(reg_offset);
9274 	if (setting)
9275 		reg_val |= (BIT(vf) |
9276 			    BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9277 	else
9278 		reg_val &= ~(BIT(vf) |
9279 			     BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9280 	wr32(reg_offset, reg_val);
9281 
9282 	adapter->vf_data[vf].spoofchk_enabled = setting;
9283 	return 0;
9284 }
9285 
9286 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9287 {
9288 	struct igb_adapter *adapter = netdev_priv(netdev);
9289 
9290 	if (vf >= adapter->vfs_allocated_count)
9291 		return -EINVAL;
9292 	if (adapter->vf_data[vf].trusted == setting)
9293 		return 0;
9294 
9295 	adapter->vf_data[vf].trusted = setting;
9296 
9297 	dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9298 		 vf, setting ? "" : "not ");
9299 	return 0;
9300 }
9301 
9302 static int igb_ndo_get_vf_config(struct net_device *netdev,
9303 				 int vf, struct ifla_vf_info *ivi)
9304 {
9305 	struct igb_adapter *adapter = netdev_priv(netdev);
9306 	if (vf >= adapter->vfs_allocated_count)
9307 		return -EINVAL;
9308 	ivi->vf = vf;
9309 	memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9310 	ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9311 	ivi->min_tx_rate = 0;
9312 	ivi->vlan = adapter->vf_data[vf].pf_vlan;
9313 	ivi->qos = adapter->vf_data[vf].pf_qos;
9314 	ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9315 	ivi->trusted = adapter->vf_data[vf].trusted;
9316 	return 0;
9317 }
9318 
9319 static void igb_vmm_control(struct igb_adapter *adapter)
9320 {
9321 	struct e1000_hw *hw = &adapter->hw;
9322 	u32 reg;
9323 
9324 	switch (hw->mac.type) {
9325 	case e1000_82575:
9326 	case e1000_i210:
9327 	case e1000_i211:
9328 	case e1000_i354:
9329 	default:
9330 		/* replication is not supported for 82575 */
9331 		return;
9332 	case e1000_82576:
9333 		/* notify HW that the MAC is adding vlan tags */
9334 		reg = rd32(E1000_DTXCTL);
9335 		reg |= E1000_DTXCTL_VLAN_ADDED;
9336 		wr32(E1000_DTXCTL, reg);
9337 		/* Fall through */
9338 	case e1000_82580:
9339 		/* enable replication vlan tag stripping */
9340 		reg = rd32(E1000_RPLOLR);
9341 		reg |= E1000_RPLOLR_STRVLAN;
9342 		wr32(E1000_RPLOLR, reg);
9343 		/* Fall through */
9344 	case e1000_i350:
9345 		/* none of the above registers are supported by i350 */
9346 		break;
9347 	}
9348 
9349 	if (adapter->vfs_allocated_count) {
9350 		igb_vmdq_set_loopback_pf(hw, true);
9351 		igb_vmdq_set_replication_pf(hw, true);
9352 		igb_vmdq_set_anti_spoofing_pf(hw, true,
9353 					      adapter->vfs_allocated_count);
9354 	} else {
9355 		igb_vmdq_set_loopback_pf(hw, false);
9356 		igb_vmdq_set_replication_pf(hw, false);
9357 	}
9358 }
9359 
9360 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9361 {
9362 	struct e1000_hw *hw = &adapter->hw;
9363 	u32 dmac_thr;
9364 	u16 hwm;
9365 
9366 	if (hw->mac.type > e1000_82580) {
9367 		if (adapter->flags & IGB_FLAG_DMAC) {
9368 			u32 reg;
9369 
9370 			/* force threshold to 0. */
9371 			wr32(E1000_DMCTXTH, 0);
9372 
9373 			/* DMA Coalescing high water mark needs to be greater
9374 			 * than the Rx threshold. Set hwm to PBA - max frame
9375 			 * size in 16B units, capping it at PBA - 6KB.
9376 			 */
9377 			hwm = 64 * (pba - 6);
9378 			reg = rd32(E1000_FCRTC);
9379 			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9380 			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9381 				& E1000_FCRTC_RTH_COAL_MASK);
9382 			wr32(E1000_FCRTC, reg);
9383 
9384 			/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
9385 			 * frame size, capping it at PBA - 10KB.
9386 			 */
9387 			dmac_thr = pba - 10;
9388 			reg = rd32(E1000_DMACR);
9389 			reg &= ~E1000_DMACR_DMACTHR_MASK;
9390 			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9391 				& E1000_DMACR_DMACTHR_MASK);
9392 
9393 			/* transition to L0x or L1 if available..*/
9394 			reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9395 
9396 			/* watchdog timer= +-1000 usec in 32usec intervals */
9397 			reg |= (1000 >> 5);
9398 
9399 			/* Disable BMC-to-OS Watchdog Enable */
9400 			if (hw->mac.type != e1000_i354)
9401 				reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9402 
9403 			wr32(E1000_DMACR, reg);
9404 
9405 			/* no lower threshold to disable
9406 			 * coalescing(smart fifb)-UTRESH=0
9407 			 */
9408 			wr32(E1000_DMCRTRH, 0);
9409 
9410 			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9411 
9412 			wr32(E1000_DMCTLX, reg);
9413 
9414 			/* free space in tx packet buffer to wake from
9415 			 * DMA coal
9416 			 */
9417 			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9418 			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9419 
9420 			/* make low power state decision controlled
9421 			 * by DMA coal
9422 			 */
9423 			reg = rd32(E1000_PCIEMISC);
9424 			reg &= ~E1000_PCIEMISC_LX_DECISION;
9425 			wr32(E1000_PCIEMISC, reg);
9426 		} /* endif adapter->dmac is not disabled */
9427 	} else if (hw->mac.type == e1000_82580) {
9428 		u32 reg = rd32(E1000_PCIEMISC);
9429 
9430 		wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9431 		wr32(E1000_DMACR, 0);
9432 	}
9433 }
9434 
9435 /**
9436  *  igb_read_i2c_byte - Reads 8 bit word over I2C
9437  *  @hw: pointer to hardware structure
9438  *  @byte_offset: byte offset to read
9439  *  @dev_addr: device address
9440  *  @data: value read
9441  *
9442  *  Performs byte read operation over I2C interface at
9443  *  a specified device address.
9444  **/
9445 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9446 		      u8 dev_addr, u8 *data)
9447 {
9448 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9449 	struct i2c_client *this_client = adapter->i2c_client;
9450 	s32 status;
9451 	u16 swfw_mask = 0;
9452 
9453 	if (!this_client)
9454 		return E1000_ERR_I2C;
9455 
9456 	swfw_mask = E1000_SWFW_PHY0_SM;
9457 
9458 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9459 		return E1000_ERR_SWFW_SYNC;
9460 
9461 	status = i2c_smbus_read_byte_data(this_client, byte_offset);
9462 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9463 
9464 	if (status < 0)
9465 		return E1000_ERR_I2C;
9466 	else {
9467 		*data = status;
9468 		return 0;
9469 	}
9470 }
9471 
9472 /**
9473  *  igb_write_i2c_byte - Writes 8 bit word over I2C
9474  *  @hw: pointer to hardware structure
9475  *  @byte_offset: byte offset to write
9476  *  @dev_addr: device address
9477  *  @data: value to write
9478  *
9479  *  Performs byte write operation over I2C interface at
9480  *  a specified device address.
9481  **/
9482 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9483 		       u8 dev_addr, u8 data)
9484 {
9485 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9486 	struct i2c_client *this_client = adapter->i2c_client;
9487 	s32 status;
9488 	u16 swfw_mask = E1000_SWFW_PHY0_SM;
9489 
9490 	if (!this_client)
9491 		return E1000_ERR_I2C;
9492 
9493 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9494 		return E1000_ERR_SWFW_SYNC;
9495 	status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9496 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9497 
9498 	if (status)
9499 		return E1000_ERR_I2C;
9500 	else
9501 		return 0;
9502 
9503 }
9504 
9505 int igb_reinit_queues(struct igb_adapter *adapter)
9506 {
9507 	struct net_device *netdev = adapter->netdev;
9508 	struct pci_dev *pdev = adapter->pdev;
9509 	int err = 0;
9510 
9511 	if (netif_running(netdev))
9512 		igb_close(netdev);
9513 
9514 	igb_reset_interrupt_capability(adapter);
9515 
9516 	if (igb_init_interrupt_scheme(adapter, true)) {
9517 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9518 		return -ENOMEM;
9519 	}
9520 
9521 	if (netif_running(netdev))
9522 		err = igb_open(netdev);
9523 
9524 	return err;
9525 }
9526 
9527 static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9528 {
9529 	struct igb_nfc_filter *rule;
9530 
9531 	spin_lock(&adapter->nfc_lock);
9532 
9533 	hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9534 		igb_erase_filter(adapter, rule);
9535 
9536 	hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9537 		igb_erase_filter(adapter, rule);
9538 
9539 	spin_unlock(&adapter->nfc_lock);
9540 }
9541 
9542 static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9543 {
9544 	struct igb_nfc_filter *rule;
9545 
9546 	spin_lock(&adapter->nfc_lock);
9547 
9548 	hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9549 		igb_add_filter(adapter, rule);
9550 
9551 	spin_unlock(&adapter->nfc_lock);
9552 }
9553 /* igb_main.c */
9554