1 /*******************************************************************************
2 
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2013 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/sctp.h>
39 #include <linux/pkt_sched.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/ethtool.h>
45 #include <linux/if.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_macvlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/prefetch.h>
50 #include <scsi/fc/fc_fcoe.h>
51 
52 #include "ixgbe.h"
53 #include "ixgbe_common.h"
54 #include "ixgbe_dcb_82599.h"
55 #include "ixgbe_sriov.h"
56 
57 char ixgbe_driver_name[] = "ixgbe";
58 static const char ixgbe_driver_string[] =
59 			      "Intel(R) 10 Gigabit PCI Express Network Driver";
60 #ifdef IXGBE_FCOE
61 char ixgbe_default_device_descr[] =
62 			      "Intel(R) 10 Gigabit Network Connection";
63 #else
64 static char ixgbe_default_device_descr[] =
65 			      "Intel(R) 10 Gigabit Network Connection";
66 #endif
67 #define DRV_VERSION "3.15.1-k"
68 const char ixgbe_driver_version[] = DRV_VERSION;
69 static const char ixgbe_copyright[] =
70 				"Copyright (c) 1999-2013 Intel Corporation.";
71 
72 static const struct ixgbe_info *ixgbe_info_tbl[] = {
73 	[board_82598] = &ixgbe_82598_info,
74 	[board_82599] = &ixgbe_82599_info,
75 	[board_X540] = &ixgbe_X540_info,
76 };
77 
78 /* ixgbe_pci_tbl - PCI Device ID Table
79  *
80  * Wildcard entries (PCI_ANY_ID) should come last
81  * Last entry must be all 0s
82  *
83  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
84  *   Class, Class Mask, private data (not used) }
85  */
86 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
87 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
88 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
89 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
90 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
91 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
92 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
93 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
94 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
95 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
96 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
97 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
98 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
99 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
100 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
101 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
102 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
103 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
104 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
105 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
106 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
107 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
108 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
109 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
110 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
111 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
112 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
113 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
114 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
115 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
116 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
117 	/* required last entry */
118 	{0, }
119 };
120 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
121 
122 #ifdef CONFIG_IXGBE_DCA
123 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
124 			    void *p);
125 static struct notifier_block dca_notifier = {
126 	.notifier_call = ixgbe_notify_dca,
127 	.next          = NULL,
128 	.priority      = 0
129 };
130 #endif
131 
132 #ifdef CONFIG_PCI_IOV
133 static unsigned int max_vfs;
134 module_param(max_vfs, uint, 0);
135 MODULE_PARM_DESC(max_vfs,
136 		 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
137 #endif /* CONFIG_PCI_IOV */
138 
139 static unsigned int allow_unsupported_sfp;
140 module_param(allow_unsupported_sfp, uint, 0);
141 MODULE_PARM_DESC(allow_unsupported_sfp,
142 		 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
143 
144 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
145 static int debug = -1;
146 module_param(debug, int, 0);
147 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
148 
149 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
150 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
151 MODULE_LICENSE("GPL");
152 MODULE_VERSION(DRV_VERSION);
153 
154 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
155 					  u32 reg, u16 *value)
156 {
157 	struct pci_dev *parent_dev;
158 	struct pci_bus *parent_bus;
159 
160 	parent_bus = adapter->pdev->bus->parent;
161 	if (!parent_bus)
162 		return -1;
163 
164 	parent_dev = parent_bus->self;
165 	if (!parent_dev)
166 		return -1;
167 
168 	if (!pci_is_pcie(parent_dev))
169 		return -1;
170 
171 	pcie_capability_read_word(parent_dev, reg, value);
172 	return 0;
173 }
174 
175 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
176 {
177 	struct ixgbe_hw *hw = &adapter->hw;
178 	u16 link_status = 0;
179 	int err;
180 
181 	hw->bus.type = ixgbe_bus_type_pci_express;
182 
183 	/* Get the negotiated link width and speed from PCI config space of the
184 	 * parent, as this device is behind a switch
185 	 */
186 	err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
187 
188 	/* assume caller will handle error case */
189 	if (err)
190 		return err;
191 
192 	hw->bus.width = ixgbe_convert_bus_width(link_status);
193 	hw->bus.speed = ixgbe_convert_bus_speed(link_status);
194 
195 	return 0;
196 }
197 
198 /**
199  * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
200  * @hw: hw specific details
201  *
202  * This function is used by probe to determine whether a device's PCI-Express
203  * bandwidth details should be gathered from the parent bus instead of from the
204  * device. Used to ensure that various locations all have the correct device ID
205  * checks.
206  */
207 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
208 {
209 	switch (hw->device_id) {
210 	case IXGBE_DEV_ID_82599_SFP_SF_QP:
211 	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
212 		return true;
213 	default:
214 		return false;
215 	}
216 }
217 
218 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
219 				     int expected_gts)
220 {
221 	int max_gts = 0;
222 	enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
223 	enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
224 	struct pci_dev *pdev;
225 
226 	/* determine whether to use the the parent device
227 	 */
228 	if (ixgbe_pcie_from_parent(&adapter->hw))
229 		pdev = adapter->pdev->bus->parent->self;
230 	else
231 		pdev = adapter->pdev;
232 
233 	if (pcie_get_minimum_link(pdev, &speed, &width) ||
234 	    speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
235 		e_dev_warn("Unable to determine PCI Express bandwidth.\n");
236 		return;
237 	}
238 
239 	switch (speed) {
240 	case PCIE_SPEED_2_5GT:
241 		/* 8b/10b encoding reduces max throughput by 20% */
242 		max_gts = 2 * width;
243 		break;
244 	case PCIE_SPEED_5_0GT:
245 		/* 8b/10b encoding reduces max throughput by 20% */
246 		max_gts = 4 * width;
247 		break;
248 	case PCIE_SPEED_8_0GT:
249 		/* 128b/130b encoding reduces throughput by less than 2% */
250 		max_gts = 8 * width;
251 		break;
252 	default:
253 		e_dev_warn("Unable to determine PCI Express bandwidth.\n");
254 		return;
255 	}
256 
257 	e_dev_info("PCI Express bandwidth of %dGT/s available\n",
258 		   max_gts);
259 	e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
260 		   (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
261 		    speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
262 		    speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
263 		    "Unknown"),
264 		   width,
265 		   (speed == PCIE_SPEED_2_5GT ? "20%" :
266 		    speed == PCIE_SPEED_5_0GT ? "20%" :
267 		    speed == PCIE_SPEED_8_0GT ? "<2%" :
268 		    "Unknown"));
269 
270 	if (max_gts < expected_gts) {
271 		e_dev_warn("This is not sufficient for optimal performance of this card.\n");
272 		e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
273 			expected_gts);
274 		e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
275 	}
276 }
277 
278 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
279 {
280 	if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
281 	    !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
282 		schedule_work(&adapter->service_task);
283 }
284 
285 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
286 {
287 	BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
288 
289 	/* flush memory to make sure state is correct before next watchdog */
290 	smp_mb__before_clear_bit();
291 	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
292 }
293 
294 struct ixgbe_reg_info {
295 	u32 ofs;
296 	char *name;
297 };
298 
299 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
300 
301 	/* General Registers */
302 	{IXGBE_CTRL, "CTRL"},
303 	{IXGBE_STATUS, "STATUS"},
304 	{IXGBE_CTRL_EXT, "CTRL_EXT"},
305 
306 	/* Interrupt Registers */
307 	{IXGBE_EICR, "EICR"},
308 
309 	/* RX Registers */
310 	{IXGBE_SRRCTL(0), "SRRCTL"},
311 	{IXGBE_DCA_RXCTRL(0), "DRXCTL"},
312 	{IXGBE_RDLEN(0), "RDLEN"},
313 	{IXGBE_RDH(0), "RDH"},
314 	{IXGBE_RDT(0), "RDT"},
315 	{IXGBE_RXDCTL(0), "RXDCTL"},
316 	{IXGBE_RDBAL(0), "RDBAL"},
317 	{IXGBE_RDBAH(0), "RDBAH"},
318 
319 	/* TX Registers */
320 	{IXGBE_TDBAL(0), "TDBAL"},
321 	{IXGBE_TDBAH(0), "TDBAH"},
322 	{IXGBE_TDLEN(0), "TDLEN"},
323 	{IXGBE_TDH(0), "TDH"},
324 	{IXGBE_TDT(0), "TDT"},
325 	{IXGBE_TXDCTL(0), "TXDCTL"},
326 
327 	/* List Terminator */
328 	{}
329 };
330 
331 
332 /*
333  * ixgbe_regdump - register printout routine
334  */
335 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
336 {
337 	int i = 0, j = 0;
338 	char rname[16];
339 	u32 regs[64];
340 
341 	switch (reginfo->ofs) {
342 	case IXGBE_SRRCTL(0):
343 		for (i = 0; i < 64; i++)
344 			regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
345 		break;
346 	case IXGBE_DCA_RXCTRL(0):
347 		for (i = 0; i < 64; i++)
348 			regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
349 		break;
350 	case IXGBE_RDLEN(0):
351 		for (i = 0; i < 64; i++)
352 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
353 		break;
354 	case IXGBE_RDH(0):
355 		for (i = 0; i < 64; i++)
356 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
357 		break;
358 	case IXGBE_RDT(0):
359 		for (i = 0; i < 64; i++)
360 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
361 		break;
362 	case IXGBE_RXDCTL(0):
363 		for (i = 0; i < 64; i++)
364 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
365 		break;
366 	case IXGBE_RDBAL(0):
367 		for (i = 0; i < 64; i++)
368 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
369 		break;
370 	case IXGBE_RDBAH(0):
371 		for (i = 0; i < 64; i++)
372 			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
373 		break;
374 	case IXGBE_TDBAL(0):
375 		for (i = 0; i < 64; i++)
376 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
377 		break;
378 	case IXGBE_TDBAH(0):
379 		for (i = 0; i < 64; i++)
380 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
381 		break;
382 	case IXGBE_TDLEN(0):
383 		for (i = 0; i < 64; i++)
384 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
385 		break;
386 	case IXGBE_TDH(0):
387 		for (i = 0; i < 64; i++)
388 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
389 		break;
390 	case IXGBE_TDT(0):
391 		for (i = 0; i < 64; i++)
392 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
393 		break;
394 	case IXGBE_TXDCTL(0):
395 		for (i = 0; i < 64; i++)
396 			regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
397 		break;
398 	default:
399 		pr_info("%-15s %08x\n", reginfo->name,
400 			IXGBE_READ_REG(hw, reginfo->ofs));
401 		return;
402 	}
403 
404 	for (i = 0; i < 8; i++) {
405 		snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
406 		pr_err("%-15s", rname);
407 		for (j = 0; j < 8; j++)
408 			pr_cont(" %08x", regs[i*8+j]);
409 		pr_cont("\n");
410 	}
411 
412 }
413 
414 /*
415  * ixgbe_dump - Print registers, tx-rings and rx-rings
416  */
417 static void ixgbe_dump(struct ixgbe_adapter *adapter)
418 {
419 	struct net_device *netdev = adapter->netdev;
420 	struct ixgbe_hw *hw = &adapter->hw;
421 	struct ixgbe_reg_info *reginfo;
422 	int n = 0;
423 	struct ixgbe_ring *tx_ring;
424 	struct ixgbe_tx_buffer *tx_buffer;
425 	union ixgbe_adv_tx_desc *tx_desc;
426 	struct my_u0 { u64 a; u64 b; } *u0;
427 	struct ixgbe_ring *rx_ring;
428 	union ixgbe_adv_rx_desc *rx_desc;
429 	struct ixgbe_rx_buffer *rx_buffer_info;
430 	u32 staterr;
431 	int i = 0;
432 
433 	if (!netif_msg_hw(adapter))
434 		return;
435 
436 	/* Print netdevice Info */
437 	if (netdev) {
438 		dev_info(&adapter->pdev->dev, "Net device Info\n");
439 		pr_info("Device Name     state            "
440 			"trans_start      last_rx\n");
441 		pr_info("%-15s %016lX %016lX %016lX\n",
442 			netdev->name,
443 			netdev->state,
444 			netdev->trans_start,
445 			netdev->last_rx);
446 	}
447 
448 	/* Print Registers */
449 	dev_info(&adapter->pdev->dev, "Register Dump\n");
450 	pr_info(" Register Name   Value\n");
451 	for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
452 	     reginfo->name; reginfo++) {
453 		ixgbe_regdump(hw, reginfo);
454 	}
455 
456 	/* Print TX Ring Summary */
457 	if (!netdev || !netif_running(netdev))
458 		goto exit;
459 
460 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
461 	pr_info(" %s     %s              %s        %s\n",
462 		"Queue [NTU] [NTC] [bi(ntc)->dma  ]",
463 		"leng", "ntw", "timestamp");
464 	for (n = 0; n < adapter->num_tx_queues; n++) {
465 		tx_ring = adapter->tx_ring[n];
466 		tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
467 		pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
468 			   n, tx_ring->next_to_use, tx_ring->next_to_clean,
469 			   (u64)dma_unmap_addr(tx_buffer, dma),
470 			   dma_unmap_len(tx_buffer, len),
471 			   tx_buffer->next_to_watch,
472 			   (u64)tx_buffer->time_stamp);
473 	}
474 
475 	/* Print TX Rings */
476 	if (!netif_msg_tx_done(adapter))
477 		goto rx_ring_summary;
478 
479 	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
480 
481 	/* Transmit Descriptor Formats
482 	 *
483 	 * 82598 Advanced Transmit Descriptor
484 	 *   +--------------------------------------------------------------+
485 	 * 0 |         Buffer Address [63:0]                                |
486 	 *   +--------------------------------------------------------------+
487 	 * 8 |  PAYLEN  | POPTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
488 	 *   +--------------------------------------------------------------+
489 	 *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
490 	 *
491 	 * 82598 Advanced Transmit Descriptor (Write-Back Format)
492 	 *   +--------------------------------------------------------------+
493 	 * 0 |                          RSV [63:0]                          |
494 	 *   +--------------------------------------------------------------+
495 	 * 8 |            RSV           |  STA  |          NXTSEQ           |
496 	 *   +--------------------------------------------------------------+
497 	 *   63                       36 35   32 31                         0
498 	 *
499 	 * 82599+ Advanced Transmit Descriptor
500 	 *   +--------------------------------------------------------------+
501 	 * 0 |         Buffer Address [63:0]                                |
502 	 *   +--------------------------------------------------------------+
503 	 * 8 |PAYLEN  |POPTS|CC|IDX  |STA  |DCMD  |DTYP |MAC  |RSV  |DTALEN |
504 	 *   +--------------------------------------------------------------+
505 	 *   63     46 45 40 39 38 36 35 32 31  24 23 20 19 18 17 16 15     0
506 	 *
507 	 * 82599+ Advanced Transmit Descriptor (Write-Back Format)
508 	 *   +--------------------------------------------------------------+
509 	 * 0 |                          RSV [63:0]                          |
510 	 *   +--------------------------------------------------------------+
511 	 * 8 |            RSV           |  STA  |           RSV             |
512 	 *   +--------------------------------------------------------------+
513 	 *   63                       36 35   32 31                         0
514 	 */
515 
516 	for (n = 0; n < adapter->num_tx_queues; n++) {
517 		tx_ring = adapter->tx_ring[n];
518 		pr_info("------------------------------------\n");
519 		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
520 		pr_info("------------------------------------\n");
521 		pr_info("%s%s    %s              %s        %s          %s\n",
522 			"T [desc]     [address 63:0  ] ",
523 			"[PlPOIdStDDt Ln] [bi->dma       ] ",
524 			"leng", "ntw", "timestamp", "bi->skb");
525 
526 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
527 			tx_desc = IXGBE_TX_DESC(tx_ring, i);
528 			tx_buffer = &tx_ring->tx_buffer_info[i];
529 			u0 = (struct my_u0 *)tx_desc;
530 			if (dma_unmap_len(tx_buffer, len) > 0) {
531 				pr_info("T [0x%03X]    %016llX %016llX %016llX %08X %p %016llX %p",
532 					i,
533 					le64_to_cpu(u0->a),
534 					le64_to_cpu(u0->b),
535 					(u64)dma_unmap_addr(tx_buffer, dma),
536 					dma_unmap_len(tx_buffer, len),
537 					tx_buffer->next_to_watch,
538 					(u64)tx_buffer->time_stamp,
539 					tx_buffer->skb);
540 				if (i == tx_ring->next_to_use &&
541 					i == tx_ring->next_to_clean)
542 					pr_cont(" NTC/U\n");
543 				else if (i == tx_ring->next_to_use)
544 					pr_cont(" NTU\n");
545 				else if (i == tx_ring->next_to_clean)
546 					pr_cont(" NTC\n");
547 				else
548 					pr_cont("\n");
549 
550 				if (netif_msg_pktdata(adapter) &&
551 				    tx_buffer->skb)
552 					print_hex_dump(KERN_INFO, "",
553 						DUMP_PREFIX_ADDRESS, 16, 1,
554 						tx_buffer->skb->data,
555 						dma_unmap_len(tx_buffer, len),
556 						true);
557 			}
558 		}
559 	}
560 
561 	/* Print RX Rings Summary */
562 rx_ring_summary:
563 	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
564 	pr_info("Queue [NTU] [NTC]\n");
565 	for (n = 0; n < adapter->num_rx_queues; n++) {
566 		rx_ring = adapter->rx_ring[n];
567 		pr_info("%5d %5X %5X\n",
568 			n, rx_ring->next_to_use, rx_ring->next_to_clean);
569 	}
570 
571 	/* Print RX Rings */
572 	if (!netif_msg_rx_status(adapter))
573 		goto exit;
574 
575 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
576 
577 	/* Receive Descriptor Formats
578 	 *
579 	 * 82598 Advanced Receive Descriptor (Read) Format
580 	 *    63                                           1        0
581 	 *    +-----------------------------------------------------+
582 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
583 	 *    +----------------------------------------------+------+
584 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
585 	 *    +-----------------------------------------------------+
586 	 *
587 	 *
588 	 * 82598 Advanced Receive Descriptor (Write-Back) Format
589 	 *
590 	 *   63       48 47    32 31  30      21 20 16 15   4 3     0
591 	 *   +------------------------------------------------------+
592 	 * 0 |       RSS Hash /  |SPH| HDR_LEN  | RSV |Packet|  RSS |
593 	 *   | Packet   | IP     |   |          |     | Type | Type |
594 	 *   | Checksum | Ident  |   |          |     |      |      |
595 	 *   +------------------------------------------------------+
596 	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
597 	 *   +------------------------------------------------------+
598 	 *   63       48 47    32 31            20 19               0
599 	 *
600 	 * 82599+ Advanced Receive Descriptor (Read) Format
601 	 *    63                                           1        0
602 	 *    +-----------------------------------------------------+
603 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
604 	 *    +----------------------------------------------+------+
605 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
606 	 *    +-----------------------------------------------------+
607 	 *
608 	 *
609 	 * 82599+ Advanced Receive Descriptor (Write-Back) Format
610 	 *
611 	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
612 	 *   +------------------------------------------------------+
613 	 * 0 |RSS / Frag Checksum|SPH| HDR_LEN  |RSC- |Packet|  RSS |
614 	 *   |/ RTT / PCoE_PARAM |   |          | CNT | Type | Type |
615 	 *   |/ Flow Dir Flt ID  |   |          |     |      |      |
616 	 *   +------------------------------------------------------+
617 	 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
618 	 *   +------------------------------------------------------+
619 	 *   63       48 47    32 31          20 19                 0
620 	 */
621 
622 	for (n = 0; n < adapter->num_rx_queues; n++) {
623 		rx_ring = adapter->rx_ring[n];
624 		pr_info("------------------------------------\n");
625 		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
626 		pr_info("------------------------------------\n");
627 		pr_info("%s%s%s",
628 			"R  [desc]      [ PktBuf     A0] ",
629 			"[  HeadBuf   DD] [bi->dma       ] [bi->skb       ] ",
630 			"<-- Adv Rx Read format\n");
631 		pr_info("%s%s%s",
632 			"RWB[desc]      [PcsmIpSHl PtRs] ",
633 			"[vl er S cks ln] ---------------- [bi->skb       ] ",
634 			"<-- Adv Rx Write-Back format\n");
635 
636 		for (i = 0; i < rx_ring->count; i++) {
637 			rx_buffer_info = &rx_ring->rx_buffer_info[i];
638 			rx_desc = IXGBE_RX_DESC(rx_ring, i);
639 			u0 = (struct my_u0 *)rx_desc;
640 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
641 			if (staterr & IXGBE_RXD_STAT_DD) {
642 				/* Descriptor Done */
643 				pr_info("RWB[0x%03X]     %016llX "
644 					"%016llX ---------------- %p", i,
645 					le64_to_cpu(u0->a),
646 					le64_to_cpu(u0->b),
647 					rx_buffer_info->skb);
648 			} else {
649 				pr_info("R  [0x%03X]     %016llX "
650 					"%016llX %016llX %p", i,
651 					le64_to_cpu(u0->a),
652 					le64_to_cpu(u0->b),
653 					(u64)rx_buffer_info->dma,
654 					rx_buffer_info->skb);
655 
656 				if (netif_msg_pktdata(adapter) &&
657 				    rx_buffer_info->dma) {
658 					print_hex_dump(KERN_INFO, "",
659 					   DUMP_PREFIX_ADDRESS, 16, 1,
660 					   page_address(rx_buffer_info->page) +
661 						    rx_buffer_info->page_offset,
662 					   ixgbe_rx_bufsz(rx_ring), true);
663 				}
664 			}
665 
666 			if (i == rx_ring->next_to_use)
667 				pr_cont(" NTU\n");
668 			else if (i == rx_ring->next_to_clean)
669 				pr_cont(" NTC\n");
670 			else
671 				pr_cont("\n");
672 
673 		}
674 	}
675 
676 exit:
677 	return;
678 }
679 
680 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
681 {
682 	u32 ctrl_ext;
683 
684 	/* Let firmware take over control of h/w */
685 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
686 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
687 			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
688 }
689 
690 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
691 {
692 	u32 ctrl_ext;
693 
694 	/* Let firmware know the driver has taken over */
695 	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
696 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
697 			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
698 }
699 
700 /**
701  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
702  * @adapter: pointer to adapter struct
703  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
704  * @queue: queue to map the corresponding interrupt to
705  * @msix_vector: the vector to map to the corresponding queue
706  *
707  */
708 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
709 			   u8 queue, u8 msix_vector)
710 {
711 	u32 ivar, index;
712 	struct ixgbe_hw *hw = &adapter->hw;
713 	switch (hw->mac.type) {
714 	case ixgbe_mac_82598EB:
715 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
716 		if (direction == -1)
717 			direction = 0;
718 		index = (((direction * 64) + queue) >> 2) & 0x1F;
719 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
720 		ivar &= ~(0xFF << (8 * (queue & 0x3)));
721 		ivar |= (msix_vector << (8 * (queue & 0x3)));
722 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
723 		break;
724 	case ixgbe_mac_82599EB:
725 	case ixgbe_mac_X540:
726 		if (direction == -1) {
727 			/* other causes */
728 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
729 			index = ((queue & 1) * 8);
730 			ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
731 			ivar &= ~(0xFF << index);
732 			ivar |= (msix_vector << index);
733 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
734 			break;
735 		} else {
736 			/* tx or rx causes */
737 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
738 			index = ((16 * (queue & 1)) + (8 * direction));
739 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
740 			ivar &= ~(0xFF << index);
741 			ivar |= (msix_vector << index);
742 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
743 			break;
744 		}
745 	default:
746 		break;
747 	}
748 }
749 
750 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
751 					  u64 qmask)
752 {
753 	u32 mask;
754 
755 	switch (adapter->hw.mac.type) {
756 	case ixgbe_mac_82598EB:
757 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
758 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
759 		break;
760 	case ixgbe_mac_82599EB:
761 	case ixgbe_mac_X540:
762 		mask = (qmask & 0xFFFFFFFF);
763 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
764 		mask = (qmask >> 32);
765 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
766 		break;
767 	default:
768 		break;
769 	}
770 }
771 
772 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
773 				      struct ixgbe_tx_buffer *tx_buffer)
774 {
775 	if (tx_buffer->skb) {
776 		dev_kfree_skb_any(tx_buffer->skb);
777 		if (dma_unmap_len(tx_buffer, len))
778 			dma_unmap_single(ring->dev,
779 					 dma_unmap_addr(tx_buffer, dma),
780 					 dma_unmap_len(tx_buffer, len),
781 					 DMA_TO_DEVICE);
782 	} else if (dma_unmap_len(tx_buffer, len)) {
783 		dma_unmap_page(ring->dev,
784 			       dma_unmap_addr(tx_buffer, dma),
785 			       dma_unmap_len(tx_buffer, len),
786 			       DMA_TO_DEVICE);
787 	}
788 	tx_buffer->next_to_watch = NULL;
789 	tx_buffer->skb = NULL;
790 	dma_unmap_len_set(tx_buffer, len, 0);
791 	/* tx_buffer must be completely set up in the transmit path */
792 }
793 
794 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
795 {
796 	struct ixgbe_hw *hw = &adapter->hw;
797 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
798 	int i;
799 	u32 data;
800 
801 	if ((hw->fc.current_mode != ixgbe_fc_full) &&
802 	    (hw->fc.current_mode != ixgbe_fc_rx_pause))
803 		return;
804 
805 	switch (hw->mac.type) {
806 	case ixgbe_mac_82598EB:
807 		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
808 		break;
809 	default:
810 		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
811 	}
812 	hwstats->lxoffrxc += data;
813 
814 	/* refill credits (no tx hang) if we received xoff */
815 	if (!data)
816 		return;
817 
818 	for (i = 0; i < adapter->num_tx_queues; i++)
819 		clear_bit(__IXGBE_HANG_CHECK_ARMED,
820 			  &adapter->tx_ring[i]->state);
821 }
822 
823 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
824 {
825 	struct ixgbe_hw *hw = &adapter->hw;
826 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
827 	u32 xoff[8] = {0};
828 	u8 tc;
829 	int i;
830 	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
831 
832 	if (adapter->ixgbe_ieee_pfc)
833 		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
834 
835 	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
836 		ixgbe_update_xoff_rx_lfc(adapter);
837 		return;
838 	}
839 
840 	/* update stats for each tc, only valid with PFC enabled */
841 	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
842 		u32 pxoffrxc;
843 
844 		switch (hw->mac.type) {
845 		case ixgbe_mac_82598EB:
846 			pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
847 			break;
848 		default:
849 			pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
850 		}
851 		hwstats->pxoffrxc[i] += pxoffrxc;
852 		/* Get the TC for given UP */
853 		tc = netdev_get_prio_tc_map(adapter->netdev, i);
854 		xoff[tc] += pxoffrxc;
855 	}
856 
857 	/* disarm tx queues that have received xoff frames */
858 	for (i = 0; i < adapter->num_tx_queues; i++) {
859 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
860 
861 		tc = tx_ring->dcb_tc;
862 		if (xoff[tc])
863 			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
864 	}
865 }
866 
867 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
868 {
869 	return ring->stats.packets;
870 }
871 
872 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
873 {
874 	struct ixgbe_adapter *adapter;
875 	struct ixgbe_hw *hw;
876 	u32 head, tail;
877 
878 	if (ring->l2_accel_priv)
879 		adapter = ring->l2_accel_priv->real_adapter;
880 	else
881 		adapter = netdev_priv(ring->netdev);
882 
883 	hw = &adapter->hw;
884 	head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
885 	tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
886 
887 	if (head != tail)
888 		return (head < tail) ?
889 			tail - head : (tail + ring->count - head);
890 
891 	return 0;
892 }
893 
894 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
895 {
896 	u32 tx_done = ixgbe_get_tx_completed(tx_ring);
897 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
898 	u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
899 	bool ret = false;
900 
901 	clear_check_for_tx_hang(tx_ring);
902 
903 	/*
904 	 * Check for a hung queue, but be thorough. This verifies
905 	 * that a transmit has been completed since the previous
906 	 * check AND there is at least one packet pending. The
907 	 * ARMED bit is set to indicate a potential hang. The
908 	 * bit is cleared if a pause frame is received to remove
909 	 * false hang detection due to PFC or 802.3x frames. By
910 	 * requiring this to fail twice we avoid races with
911 	 * pfc clearing the ARMED bit and conditions where we
912 	 * run the check_tx_hang logic with a transmit completion
913 	 * pending but without time to complete it yet.
914 	 */
915 	if ((tx_done_old == tx_done) && tx_pending) {
916 		/* make sure it is true for two checks in a row */
917 		ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
918 				       &tx_ring->state);
919 	} else {
920 		/* update completed stats and continue */
921 		tx_ring->tx_stats.tx_done_old = tx_done;
922 		/* reset the countdown */
923 		clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
924 	}
925 
926 	return ret;
927 }
928 
929 /**
930  * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
931  * @adapter: driver private struct
932  **/
933 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
934 {
935 
936 	/* Do the reset outside of interrupt context */
937 	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
938 		adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
939 		e_warn(drv, "initiating reset due to tx timeout\n");
940 		ixgbe_service_event_schedule(adapter);
941 	}
942 }
943 
944 /**
945  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
946  * @q_vector: structure containing interrupt and ring information
947  * @tx_ring: tx ring to clean
948  **/
949 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
950 			       struct ixgbe_ring *tx_ring)
951 {
952 	struct ixgbe_adapter *adapter = q_vector->adapter;
953 	struct ixgbe_tx_buffer *tx_buffer;
954 	union ixgbe_adv_tx_desc *tx_desc;
955 	unsigned int total_bytes = 0, total_packets = 0;
956 	unsigned int budget = q_vector->tx.work_limit;
957 	unsigned int i = tx_ring->next_to_clean;
958 
959 	if (test_bit(__IXGBE_DOWN, &adapter->state))
960 		return true;
961 
962 	tx_buffer = &tx_ring->tx_buffer_info[i];
963 	tx_desc = IXGBE_TX_DESC(tx_ring, i);
964 	i -= tx_ring->count;
965 
966 	do {
967 		union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
968 
969 		/* if next_to_watch is not set then there is no work pending */
970 		if (!eop_desc)
971 			break;
972 
973 		/* prevent any other reads prior to eop_desc */
974 		read_barrier_depends();
975 
976 		/* if DD is not set pending work has not been completed */
977 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
978 			break;
979 
980 		/* clear next_to_watch to prevent false hangs */
981 		tx_buffer->next_to_watch = NULL;
982 
983 		/* update the statistics for this packet */
984 		total_bytes += tx_buffer->bytecount;
985 		total_packets += tx_buffer->gso_segs;
986 
987 		/* free the skb */
988 		dev_kfree_skb_any(tx_buffer->skb);
989 
990 		/* unmap skb header data */
991 		dma_unmap_single(tx_ring->dev,
992 				 dma_unmap_addr(tx_buffer, dma),
993 				 dma_unmap_len(tx_buffer, len),
994 				 DMA_TO_DEVICE);
995 
996 		/* clear tx_buffer data */
997 		tx_buffer->skb = NULL;
998 		dma_unmap_len_set(tx_buffer, len, 0);
999 
1000 		/* unmap remaining buffers */
1001 		while (tx_desc != eop_desc) {
1002 			tx_buffer++;
1003 			tx_desc++;
1004 			i++;
1005 			if (unlikely(!i)) {
1006 				i -= tx_ring->count;
1007 				tx_buffer = tx_ring->tx_buffer_info;
1008 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1009 			}
1010 
1011 			/* unmap any remaining paged data */
1012 			if (dma_unmap_len(tx_buffer, len)) {
1013 				dma_unmap_page(tx_ring->dev,
1014 					       dma_unmap_addr(tx_buffer, dma),
1015 					       dma_unmap_len(tx_buffer, len),
1016 					       DMA_TO_DEVICE);
1017 				dma_unmap_len_set(tx_buffer, len, 0);
1018 			}
1019 		}
1020 
1021 		/* move us one more past the eop_desc for start of next pkt */
1022 		tx_buffer++;
1023 		tx_desc++;
1024 		i++;
1025 		if (unlikely(!i)) {
1026 			i -= tx_ring->count;
1027 			tx_buffer = tx_ring->tx_buffer_info;
1028 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1029 		}
1030 
1031 		/* issue prefetch for next Tx descriptor */
1032 		prefetch(tx_desc);
1033 
1034 		/* update budget accounting */
1035 		budget--;
1036 	} while (likely(budget));
1037 
1038 	i += tx_ring->count;
1039 	tx_ring->next_to_clean = i;
1040 	u64_stats_update_begin(&tx_ring->syncp);
1041 	tx_ring->stats.bytes += total_bytes;
1042 	tx_ring->stats.packets += total_packets;
1043 	u64_stats_update_end(&tx_ring->syncp);
1044 	q_vector->tx.total_bytes += total_bytes;
1045 	q_vector->tx.total_packets += total_packets;
1046 
1047 	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1048 		/* schedule immediate reset if we believe we hung */
1049 		struct ixgbe_hw *hw = &adapter->hw;
1050 		e_err(drv, "Detected Tx Unit Hang\n"
1051 			"  Tx Queue             <%d>\n"
1052 			"  TDH, TDT             <%x>, <%x>\n"
1053 			"  next_to_use          <%x>\n"
1054 			"  next_to_clean        <%x>\n"
1055 			"tx_buffer_info[next_to_clean]\n"
1056 			"  time_stamp           <%lx>\n"
1057 			"  jiffies              <%lx>\n",
1058 			tx_ring->queue_index,
1059 			IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1060 			IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1061 			tx_ring->next_to_use, i,
1062 			tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1063 
1064 		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1065 
1066 		e_info(probe,
1067 		       "tx hang %d detected on queue %d, resetting adapter\n",
1068 			adapter->tx_timeout_count + 1, tx_ring->queue_index);
1069 
1070 		/* schedule immediate reset if we believe we hung */
1071 		ixgbe_tx_timeout_reset(adapter);
1072 
1073 		/* the adapter is about to reset, no point in enabling stuff */
1074 		return true;
1075 	}
1076 
1077 	netdev_tx_completed_queue(txring_txq(tx_ring),
1078 				  total_packets, total_bytes);
1079 
1080 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1081 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1082 		     (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1083 		/* Make sure that anybody stopping the queue after this
1084 		 * sees the new next_to_clean.
1085 		 */
1086 		smp_mb();
1087 		if (__netif_subqueue_stopped(tx_ring->netdev,
1088 					     tx_ring->queue_index)
1089 		    && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1090 			netif_wake_subqueue(tx_ring->netdev,
1091 					    tx_ring->queue_index);
1092 			++tx_ring->tx_stats.restart_queue;
1093 		}
1094 	}
1095 
1096 	return !!budget;
1097 }
1098 
1099 #ifdef CONFIG_IXGBE_DCA
1100 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1101 				struct ixgbe_ring *tx_ring,
1102 				int cpu)
1103 {
1104 	struct ixgbe_hw *hw = &adapter->hw;
1105 	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1106 	u16 reg_offset;
1107 
1108 	switch (hw->mac.type) {
1109 	case ixgbe_mac_82598EB:
1110 		reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1111 		break;
1112 	case ixgbe_mac_82599EB:
1113 	case ixgbe_mac_X540:
1114 		reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1115 		txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1116 		break;
1117 	default:
1118 		/* for unknown hardware do not write register */
1119 		return;
1120 	}
1121 
1122 	/*
1123 	 * We can enable relaxed ordering for reads, but not writes when
1124 	 * DCA is enabled.  This is due to a known issue in some chipsets
1125 	 * which will cause the DCA tag to be cleared.
1126 	 */
1127 	txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1128 		  IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1129 		  IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1130 
1131 	IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1132 }
1133 
1134 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1135 				struct ixgbe_ring *rx_ring,
1136 				int cpu)
1137 {
1138 	struct ixgbe_hw *hw = &adapter->hw;
1139 	u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1140 	u8 reg_idx = rx_ring->reg_idx;
1141 
1142 
1143 	switch (hw->mac.type) {
1144 	case ixgbe_mac_82599EB:
1145 	case ixgbe_mac_X540:
1146 		rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1147 		break;
1148 	default:
1149 		break;
1150 	}
1151 
1152 	/*
1153 	 * We can enable relaxed ordering for reads, but not writes when
1154 	 * DCA is enabled.  This is due to a known issue in some chipsets
1155 	 * which will cause the DCA tag to be cleared.
1156 	 */
1157 	rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1158 		  IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1159 
1160 	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1161 }
1162 
1163 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1164 {
1165 	struct ixgbe_adapter *adapter = q_vector->adapter;
1166 	struct ixgbe_ring *ring;
1167 	int cpu = get_cpu();
1168 
1169 	if (q_vector->cpu == cpu)
1170 		goto out_no_update;
1171 
1172 	ixgbe_for_each_ring(ring, q_vector->tx)
1173 		ixgbe_update_tx_dca(adapter, ring, cpu);
1174 
1175 	ixgbe_for_each_ring(ring, q_vector->rx)
1176 		ixgbe_update_rx_dca(adapter, ring, cpu);
1177 
1178 	q_vector->cpu = cpu;
1179 out_no_update:
1180 	put_cpu();
1181 }
1182 
1183 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1184 {
1185 	int i;
1186 
1187 	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1188 		return;
1189 
1190 	/* always use CB2 mode, difference is masked in the CB driver */
1191 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1192 
1193 	for (i = 0; i < adapter->num_q_vectors; i++) {
1194 		adapter->q_vector[i]->cpu = -1;
1195 		ixgbe_update_dca(adapter->q_vector[i]);
1196 	}
1197 }
1198 
1199 static int __ixgbe_notify_dca(struct device *dev, void *data)
1200 {
1201 	struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1202 	unsigned long event = *(unsigned long *)data;
1203 
1204 	if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1205 		return 0;
1206 
1207 	switch (event) {
1208 	case DCA_PROVIDER_ADD:
1209 		/* if we're already enabled, don't do it again */
1210 		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1211 			break;
1212 		if (dca_add_requester(dev) == 0) {
1213 			adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1214 			ixgbe_setup_dca(adapter);
1215 			break;
1216 		}
1217 		/* Fall Through since DCA is disabled. */
1218 	case DCA_PROVIDER_REMOVE:
1219 		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1220 			dca_remove_requester(dev);
1221 			adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1222 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1223 		}
1224 		break;
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 #endif /* CONFIG_IXGBE_DCA */
1231 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1232 				 union ixgbe_adv_rx_desc *rx_desc,
1233 				 struct sk_buff *skb)
1234 {
1235 	if (ring->netdev->features & NETIF_F_RXHASH)
1236 		skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1237 }
1238 
1239 #ifdef IXGBE_FCOE
1240 /**
1241  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1242  * @ring: structure containing ring specific data
1243  * @rx_desc: advanced rx descriptor
1244  *
1245  * Returns : true if it is FCoE pkt
1246  */
1247 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1248 				    union ixgbe_adv_rx_desc *rx_desc)
1249 {
1250 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1251 
1252 	return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1253 	       ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1254 		(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1255 			     IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1256 }
1257 
1258 #endif /* IXGBE_FCOE */
1259 /**
1260  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1261  * @ring: structure containing ring specific data
1262  * @rx_desc: current Rx descriptor being processed
1263  * @skb: skb currently being received and modified
1264  **/
1265 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1266 				     union ixgbe_adv_rx_desc *rx_desc,
1267 				     struct sk_buff *skb)
1268 {
1269 	skb_checksum_none_assert(skb);
1270 
1271 	/* Rx csum disabled */
1272 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
1273 		return;
1274 
1275 	/* if IP and error */
1276 	if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1277 	    ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1278 		ring->rx_stats.csum_err++;
1279 		return;
1280 	}
1281 
1282 	if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1283 		return;
1284 
1285 	if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1286 		__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1287 
1288 		/*
1289 		 * 82599 errata, UDP frames with a 0 checksum can be marked as
1290 		 * checksum errors.
1291 		 */
1292 		if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1293 		    test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1294 			return;
1295 
1296 		ring->rx_stats.csum_err++;
1297 		return;
1298 	}
1299 
1300 	/* It must be a TCP or UDP packet with a valid checksum */
1301 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1302 }
1303 
1304 static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1305 {
1306 	rx_ring->next_to_use = val;
1307 
1308 	/* update next to alloc since we have filled the ring */
1309 	rx_ring->next_to_alloc = val;
1310 	/*
1311 	 * Force memory writes to complete before letting h/w
1312 	 * know there are new descriptors to fetch.  (Only
1313 	 * applicable for weak-ordered memory model archs,
1314 	 * such as IA-64).
1315 	 */
1316 	wmb();
1317 	writel(val, rx_ring->tail);
1318 }
1319 
1320 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1321 				    struct ixgbe_rx_buffer *bi)
1322 {
1323 	struct page *page = bi->page;
1324 	dma_addr_t dma = bi->dma;
1325 
1326 	/* since we are recycling buffers we should seldom need to alloc */
1327 	if (likely(dma))
1328 		return true;
1329 
1330 	/* alloc new page for storage */
1331 	if (likely(!page)) {
1332 		page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
1333 					 bi->skb, ixgbe_rx_pg_order(rx_ring));
1334 		if (unlikely(!page)) {
1335 			rx_ring->rx_stats.alloc_rx_page_failed++;
1336 			return false;
1337 		}
1338 		bi->page = page;
1339 	}
1340 
1341 	/* map page for use */
1342 	dma = dma_map_page(rx_ring->dev, page, 0,
1343 			   ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1344 
1345 	/*
1346 	 * if mapping failed free memory back to system since
1347 	 * there isn't much point in holding memory we can't use
1348 	 */
1349 	if (dma_mapping_error(rx_ring->dev, dma)) {
1350 		__free_pages(page, ixgbe_rx_pg_order(rx_ring));
1351 		bi->page = NULL;
1352 
1353 		rx_ring->rx_stats.alloc_rx_page_failed++;
1354 		return false;
1355 	}
1356 
1357 	bi->dma = dma;
1358 	bi->page_offset = 0;
1359 
1360 	return true;
1361 }
1362 
1363 /**
1364  * ixgbe_alloc_rx_buffers - Replace used receive buffers
1365  * @rx_ring: ring to place buffers on
1366  * @cleaned_count: number of buffers to replace
1367  **/
1368 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1369 {
1370 	union ixgbe_adv_rx_desc *rx_desc;
1371 	struct ixgbe_rx_buffer *bi;
1372 	u16 i = rx_ring->next_to_use;
1373 
1374 	/* nothing to do */
1375 	if (!cleaned_count)
1376 		return;
1377 
1378 	rx_desc = IXGBE_RX_DESC(rx_ring, i);
1379 	bi = &rx_ring->rx_buffer_info[i];
1380 	i -= rx_ring->count;
1381 
1382 	do {
1383 		if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1384 			break;
1385 
1386 		/*
1387 		 * Refresh the desc even if buffer_addrs didn't change
1388 		 * because each write-back erases this info.
1389 		 */
1390 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1391 
1392 		rx_desc++;
1393 		bi++;
1394 		i++;
1395 		if (unlikely(!i)) {
1396 			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1397 			bi = rx_ring->rx_buffer_info;
1398 			i -= rx_ring->count;
1399 		}
1400 
1401 		/* clear the hdr_addr for the next_to_use descriptor */
1402 		rx_desc->read.hdr_addr = 0;
1403 
1404 		cleaned_count--;
1405 	} while (cleaned_count);
1406 
1407 	i += rx_ring->count;
1408 
1409 	if (rx_ring->next_to_use != i)
1410 		ixgbe_release_rx_desc(rx_ring, i);
1411 }
1412 
1413 /**
1414  * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
1415  * @data: pointer to the start of the headers
1416  * @max_len: total length of section to find headers in
1417  *
1418  * This function is meant to determine the length of headers that will
1419  * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
1420  * motivation of doing this is to only perform one pull for IPv4 TCP
1421  * packets so that we can do basic things like calculating the gso_size
1422  * based on the average data per packet.
1423  **/
1424 static unsigned int ixgbe_get_headlen(unsigned char *data,
1425 				      unsigned int max_len)
1426 {
1427 	union {
1428 		unsigned char *network;
1429 		/* l2 headers */
1430 		struct ethhdr *eth;
1431 		struct vlan_hdr *vlan;
1432 		/* l3 headers */
1433 		struct iphdr *ipv4;
1434 		struct ipv6hdr *ipv6;
1435 	} hdr;
1436 	__be16 protocol;
1437 	u8 nexthdr = 0;	/* default to not TCP */
1438 	u8 hlen;
1439 
1440 	/* this should never happen, but better safe than sorry */
1441 	if (max_len < ETH_HLEN)
1442 		return max_len;
1443 
1444 	/* initialize network frame pointer */
1445 	hdr.network = data;
1446 
1447 	/* set first protocol and move network header forward */
1448 	protocol = hdr.eth->h_proto;
1449 	hdr.network += ETH_HLEN;
1450 
1451 	/* handle any vlan tag if present */
1452 	if (protocol == __constant_htons(ETH_P_8021Q)) {
1453 		if ((hdr.network - data) > (max_len - VLAN_HLEN))
1454 			return max_len;
1455 
1456 		protocol = hdr.vlan->h_vlan_encapsulated_proto;
1457 		hdr.network += VLAN_HLEN;
1458 	}
1459 
1460 	/* handle L3 protocols */
1461 	if (protocol == __constant_htons(ETH_P_IP)) {
1462 		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1463 			return max_len;
1464 
1465 		/* access ihl as a u8 to avoid unaligned access on ia64 */
1466 		hlen = (hdr.network[0] & 0x0F) << 2;
1467 
1468 		/* verify hlen meets minimum size requirements */
1469 		if (hlen < sizeof(struct iphdr))
1470 			return hdr.network - data;
1471 
1472 		/* record next protocol if header is present */
1473 		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
1474 			nexthdr = hdr.ipv4->protocol;
1475 	} else if (protocol == __constant_htons(ETH_P_IPV6)) {
1476 		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1477 			return max_len;
1478 
1479 		/* record next protocol */
1480 		nexthdr = hdr.ipv6->nexthdr;
1481 		hlen = sizeof(struct ipv6hdr);
1482 #ifdef IXGBE_FCOE
1483 	} else if (protocol == __constant_htons(ETH_P_FCOE)) {
1484 		if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1485 			return max_len;
1486 		hlen = FCOE_HEADER_LEN;
1487 #endif
1488 	} else {
1489 		return hdr.network - data;
1490 	}
1491 
1492 	/* relocate pointer to start of L4 header */
1493 	hdr.network += hlen;
1494 
1495 	/* finally sort out TCP/UDP */
1496 	if (nexthdr == IPPROTO_TCP) {
1497 		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1498 			return max_len;
1499 
1500 		/* access doff as a u8 to avoid unaligned access on ia64 */
1501 		hlen = (hdr.network[12] & 0xF0) >> 2;
1502 
1503 		/* verify hlen meets minimum size requirements */
1504 		if (hlen < sizeof(struct tcphdr))
1505 			return hdr.network - data;
1506 
1507 		hdr.network += hlen;
1508 	} else if (nexthdr == IPPROTO_UDP) {
1509 		if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1510 			return max_len;
1511 
1512 		hdr.network += sizeof(struct udphdr);
1513 	}
1514 
1515 	/*
1516 	 * If everything has gone correctly hdr.network should be the
1517 	 * data section of the packet and will be the end of the header.
1518 	 * If not then it probably represents the end of the last recognized
1519 	 * header.
1520 	 */
1521 	if ((hdr.network - data) < max_len)
1522 		return hdr.network - data;
1523 	else
1524 		return max_len;
1525 }
1526 
1527 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1528 				   struct sk_buff *skb)
1529 {
1530 	u16 hdr_len = skb_headlen(skb);
1531 
1532 	/* set gso_size to avoid messing up TCP MSS */
1533 	skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1534 						 IXGBE_CB(skb)->append_cnt);
1535 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1536 }
1537 
1538 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1539 				   struct sk_buff *skb)
1540 {
1541 	/* if append_cnt is 0 then frame is not RSC */
1542 	if (!IXGBE_CB(skb)->append_cnt)
1543 		return;
1544 
1545 	rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1546 	rx_ring->rx_stats.rsc_flush++;
1547 
1548 	ixgbe_set_rsc_gso_size(rx_ring, skb);
1549 
1550 	/* gso_size is computed using append_cnt so always clear it last */
1551 	IXGBE_CB(skb)->append_cnt = 0;
1552 }
1553 
1554 /**
1555  * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1556  * @rx_ring: rx descriptor ring packet is being transacted on
1557  * @rx_desc: pointer to the EOP Rx descriptor
1558  * @skb: pointer to current skb being populated
1559  *
1560  * This function checks the ring, descriptor, and packet information in
1561  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
1562  * other fields within the skb.
1563  **/
1564 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1565 				     union ixgbe_adv_rx_desc *rx_desc,
1566 				     struct sk_buff *skb)
1567 {
1568 	struct net_device *dev = rx_ring->netdev;
1569 
1570 	ixgbe_update_rsc_stats(rx_ring, skb);
1571 
1572 	ixgbe_rx_hash(rx_ring, rx_desc, skb);
1573 
1574 	ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1575 
1576 	ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1577 
1578 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1579 	    ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1580 		u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1581 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1582 	}
1583 
1584 	skb_record_rx_queue(skb, rx_ring->queue_index);
1585 
1586 	skb->protocol = eth_type_trans(skb, dev);
1587 }
1588 
1589 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1590 			 struct sk_buff *skb)
1591 {
1592 	struct ixgbe_adapter *adapter = q_vector->adapter;
1593 
1594 	if (ixgbe_qv_busy_polling(q_vector))
1595 		netif_receive_skb(skb);
1596 	else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1597 		napi_gro_receive(&q_vector->napi, skb);
1598 	else
1599 		netif_rx(skb);
1600 }
1601 
1602 /**
1603  * ixgbe_is_non_eop - process handling of non-EOP buffers
1604  * @rx_ring: Rx ring being processed
1605  * @rx_desc: Rx descriptor for current buffer
1606  * @skb: Current socket buffer containing buffer in progress
1607  *
1608  * This function updates next to clean.  If the buffer is an EOP buffer
1609  * this function exits returning false, otherwise it will place the
1610  * sk_buff in the next buffer to be chained and return true indicating
1611  * that this is in fact a non-EOP buffer.
1612  **/
1613 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1614 			     union ixgbe_adv_rx_desc *rx_desc,
1615 			     struct sk_buff *skb)
1616 {
1617 	u32 ntc = rx_ring->next_to_clean + 1;
1618 
1619 	/* fetch, update, and store next to clean */
1620 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1621 	rx_ring->next_to_clean = ntc;
1622 
1623 	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1624 
1625 	/* update RSC append count if present */
1626 	if (ring_is_rsc_enabled(rx_ring)) {
1627 		__le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1628 				     cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1629 
1630 		if (unlikely(rsc_enabled)) {
1631 			u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1632 
1633 			rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1634 			IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1635 
1636 			/* update ntc based on RSC value */
1637 			ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1638 			ntc &= IXGBE_RXDADV_NEXTP_MASK;
1639 			ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1640 		}
1641 	}
1642 
1643 	/* if we are the last buffer then there is nothing else to do */
1644 	if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1645 		return false;
1646 
1647 	/* place skb in next buffer to be received */
1648 	rx_ring->rx_buffer_info[ntc].skb = skb;
1649 	rx_ring->rx_stats.non_eop_descs++;
1650 
1651 	return true;
1652 }
1653 
1654 /**
1655  * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1656  * @rx_ring: rx descriptor ring packet is being transacted on
1657  * @skb: pointer to current skb being adjusted
1658  *
1659  * This function is an ixgbe specific version of __pskb_pull_tail.  The
1660  * main difference between this version and the original function is that
1661  * this function can make several assumptions about the state of things
1662  * that allow for significant optimizations versus the standard function.
1663  * As a result we can do things like drop a frag and maintain an accurate
1664  * truesize for the skb.
1665  */
1666 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1667 			    struct sk_buff *skb)
1668 {
1669 	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1670 	unsigned char *va;
1671 	unsigned int pull_len;
1672 
1673 	/*
1674 	 * it is valid to use page_address instead of kmap since we are
1675 	 * working with pages allocated out of the lomem pool per
1676 	 * alloc_page(GFP_ATOMIC)
1677 	 */
1678 	va = skb_frag_address(frag);
1679 
1680 	/*
1681 	 * we need the header to contain the greater of either ETH_HLEN or
1682 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
1683 	 */
1684 	pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1685 
1686 	/* align pull length to size of long to optimize memcpy performance */
1687 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1688 
1689 	/* update all of the pointers */
1690 	skb_frag_size_sub(frag, pull_len);
1691 	frag->page_offset += pull_len;
1692 	skb->data_len -= pull_len;
1693 	skb->tail += pull_len;
1694 }
1695 
1696 /**
1697  * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1698  * @rx_ring: rx descriptor ring packet is being transacted on
1699  * @skb: pointer to current skb being updated
1700  *
1701  * This function provides a basic DMA sync up for the first fragment of an
1702  * skb.  The reason for doing this is that the first fragment cannot be
1703  * unmapped until we have reached the end of packet descriptor for a buffer
1704  * chain.
1705  */
1706 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1707 				struct sk_buff *skb)
1708 {
1709 	/* if the page was released unmap it, else just sync our portion */
1710 	if (unlikely(IXGBE_CB(skb)->page_released)) {
1711 		dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1712 			       ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1713 		IXGBE_CB(skb)->page_released = false;
1714 	} else {
1715 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1716 
1717 		dma_sync_single_range_for_cpu(rx_ring->dev,
1718 					      IXGBE_CB(skb)->dma,
1719 					      frag->page_offset,
1720 					      ixgbe_rx_bufsz(rx_ring),
1721 					      DMA_FROM_DEVICE);
1722 	}
1723 	IXGBE_CB(skb)->dma = 0;
1724 }
1725 
1726 /**
1727  * ixgbe_cleanup_headers - Correct corrupted or empty headers
1728  * @rx_ring: rx descriptor ring packet is being transacted on
1729  * @rx_desc: pointer to the EOP Rx descriptor
1730  * @skb: pointer to current skb being fixed
1731  *
1732  * Check for corrupted packet headers caused by senders on the local L2
1733  * embedded NIC switch not setting up their Tx Descriptors right.  These
1734  * should be very rare.
1735  *
1736  * Also address the case where we are pulling data in on pages only
1737  * and as such no data is present in the skb header.
1738  *
1739  * In addition if skb is not at least 60 bytes we need to pad it so that
1740  * it is large enough to qualify as a valid Ethernet frame.
1741  *
1742  * Returns true if an error was encountered and skb was freed.
1743  **/
1744 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1745 				  union ixgbe_adv_rx_desc *rx_desc,
1746 				  struct sk_buff *skb)
1747 {
1748 	struct net_device *netdev = rx_ring->netdev;
1749 
1750 	/* verify that the packet does not have any known errors */
1751 	if (unlikely(ixgbe_test_staterr(rx_desc,
1752 					IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1753 	    !(netdev->features & NETIF_F_RXALL))) {
1754 		dev_kfree_skb_any(skb);
1755 		return true;
1756 	}
1757 
1758 	/* place header in linear portion of buffer */
1759 	if (skb_is_nonlinear(skb))
1760 		ixgbe_pull_tail(rx_ring, skb);
1761 
1762 #ifdef IXGBE_FCOE
1763 	/* do not attempt to pad FCoE Frames as this will disrupt DDP */
1764 	if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1765 		return false;
1766 
1767 #endif
1768 	/* if skb_pad returns an error the skb was freed */
1769 	if (unlikely(skb->len < 60)) {
1770 		int pad_len = 60 - skb->len;
1771 
1772 		if (skb_pad(skb, pad_len))
1773 			return true;
1774 		__skb_put(skb, pad_len);
1775 	}
1776 
1777 	return false;
1778 }
1779 
1780 /**
1781  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1782  * @rx_ring: rx descriptor ring to store buffers on
1783  * @old_buff: donor buffer to have page reused
1784  *
1785  * Synchronizes page for reuse by the adapter
1786  **/
1787 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1788 				struct ixgbe_rx_buffer *old_buff)
1789 {
1790 	struct ixgbe_rx_buffer *new_buff;
1791 	u16 nta = rx_ring->next_to_alloc;
1792 
1793 	new_buff = &rx_ring->rx_buffer_info[nta];
1794 
1795 	/* update, and store next to alloc */
1796 	nta++;
1797 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1798 
1799 	/* transfer page from old buffer to new buffer */
1800 	new_buff->page = old_buff->page;
1801 	new_buff->dma = old_buff->dma;
1802 	new_buff->page_offset = old_buff->page_offset;
1803 
1804 	/* sync the buffer for use by the device */
1805 	dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1806 					 new_buff->page_offset,
1807 					 ixgbe_rx_bufsz(rx_ring),
1808 					 DMA_FROM_DEVICE);
1809 }
1810 
1811 /**
1812  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1813  * @rx_ring: rx descriptor ring to transact packets on
1814  * @rx_buffer: buffer containing page to add
1815  * @rx_desc: descriptor containing length of buffer written by hardware
1816  * @skb: sk_buff to place the data into
1817  *
1818  * This function will add the data contained in rx_buffer->page to the skb.
1819  * This is done either through a direct copy if the data in the buffer is
1820  * less than the skb header size, otherwise it will just attach the page as
1821  * a frag to the skb.
1822  *
1823  * The function will then update the page offset if necessary and return
1824  * true if the buffer can be reused by the adapter.
1825  **/
1826 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1827 			      struct ixgbe_rx_buffer *rx_buffer,
1828 			      union ixgbe_adv_rx_desc *rx_desc,
1829 			      struct sk_buff *skb)
1830 {
1831 	struct page *page = rx_buffer->page;
1832 	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1833 #if (PAGE_SIZE < 8192)
1834 	unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1835 #else
1836 	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1837 	unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1838 				   ixgbe_rx_bufsz(rx_ring);
1839 #endif
1840 
1841 	if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1842 		unsigned char *va = page_address(page) + rx_buffer->page_offset;
1843 
1844 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1845 
1846 		/* we can reuse buffer as-is, just make sure it is local */
1847 		if (likely(page_to_nid(page) == numa_node_id()))
1848 			return true;
1849 
1850 		/* this page cannot be reused so discard it */
1851 		put_page(page);
1852 		return false;
1853 	}
1854 
1855 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1856 			rx_buffer->page_offset, size, truesize);
1857 
1858 	/* avoid re-using remote pages */
1859 	if (unlikely(page_to_nid(page) != numa_node_id()))
1860 		return false;
1861 
1862 #if (PAGE_SIZE < 8192)
1863 	/* if we are only owner of page we can reuse it */
1864 	if (unlikely(page_count(page) != 1))
1865 		return false;
1866 
1867 	/* flip page offset to other buffer */
1868 	rx_buffer->page_offset ^= truesize;
1869 
1870 	/*
1871 	 * since we are the only owner of the page and we need to
1872 	 * increment it, just set the value to 2 in order to avoid
1873 	 * an unecessary locked operation
1874 	 */
1875 	atomic_set(&page->_count, 2);
1876 #else
1877 	/* move offset up to the next cache line */
1878 	rx_buffer->page_offset += truesize;
1879 
1880 	if (rx_buffer->page_offset > last_offset)
1881 		return false;
1882 
1883 	/* bump ref count on page before it is given to the stack */
1884 	get_page(page);
1885 #endif
1886 
1887 	return true;
1888 }
1889 
1890 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1891 					     union ixgbe_adv_rx_desc *rx_desc)
1892 {
1893 	struct ixgbe_rx_buffer *rx_buffer;
1894 	struct sk_buff *skb;
1895 	struct page *page;
1896 
1897 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1898 	page = rx_buffer->page;
1899 	prefetchw(page);
1900 
1901 	skb = rx_buffer->skb;
1902 
1903 	if (likely(!skb)) {
1904 		void *page_addr = page_address(page) +
1905 				  rx_buffer->page_offset;
1906 
1907 		/* prefetch first cache line of first page */
1908 		prefetch(page_addr);
1909 #if L1_CACHE_BYTES < 128
1910 		prefetch(page_addr + L1_CACHE_BYTES);
1911 #endif
1912 
1913 		/* allocate a skb to store the frags */
1914 		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1915 						IXGBE_RX_HDR_SIZE);
1916 		if (unlikely(!skb)) {
1917 			rx_ring->rx_stats.alloc_rx_buff_failed++;
1918 			return NULL;
1919 		}
1920 
1921 		/*
1922 		 * we will be copying header into skb->data in
1923 		 * pskb_may_pull so it is in our interest to prefetch
1924 		 * it now to avoid a possible cache miss
1925 		 */
1926 		prefetchw(skb->data);
1927 
1928 		/*
1929 		 * Delay unmapping of the first packet. It carries the
1930 		 * header information, HW may still access the header
1931 		 * after the writeback.  Only unmap it when EOP is
1932 		 * reached
1933 		 */
1934 		if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1935 			goto dma_sync;
1936 
1937 		IXGBE_CB(skb)->dma = rx_buffer->dma;
1938 	} else {
1939 		if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1940 			ixgbe_dma_sync_frag(rx_ring, skb);
1941 
1942 dma_sync:
1943 		/* we are reusing so sync this buffer for CPU use */
1944 		dma_sync_single_range_for_cpu(rx_ring->dev,
1945 					      rx_buffer->dma,
1946 					      rx_buffer->page_offset,
1947 					      ixgbe_rx_bufsz(rx_ring),
1948 					      DMA_FROM_DEVICE);
1949 	}
1950 
1951 	/* pull page into skb */
1952 	if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1953 		/* hand second half of page back to the ring */
1954 		ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1955 	} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1956 		/* the page has been released from the ring */
1957 		IXGBE_CB(skb)->page_released = true;
1958 	} else {
1959 		/* we are not reusing the buffer so unmap it */
1960 		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1961 			       ixgbe_rx_pg_size(rx_ring),
1962 			       DMA_FROM_DEVICE);
1963 	}
1964 
1965 	/* clear contents of buffer_info */
1966 	rx_buffer->skb = NULL;
1967 	rx_buffer->dma = 0;
1968 	rx_buffer->page = NULL;
1969 
1970 	return skb;
1971 }
1972 
1973 /**
1974  * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1975  * @q_vector: structure containing interrupt and ring information
1976  * @rx_ring: rx descriptor ring to transact packets on
1977  * @budget: Total limit on number of packets to process
1978  *
1979  * This function provides a "bounce buffer" approach to Rx interrupt
1980  * processing.  The advantage to this is that on systems that have
1981  * expensive overhead for IOMMU access this provides a means of avoiding
1982  * it by maintaining the mapping of the page to the syste.
1983  *
1984  * Returns amount of work completed
1985  **/
1986 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1987 			       struct ixgbe_ring *rx_ring,
1988 			       const int budget)
1989 {
1990 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1991 #ifdef IXGBE_FCOE
1992 	struct ixgbe_adapter *adapter = q_vector->adapter;
1993 	int ddp_bytes;
1994 	unsigned int mss = 0;
1995 #endif /* IXGBE_FCOE */
1996 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1997 
1998 	do {
1999 		union ixgbe_adv_rx_desc *rx_desc;
2000 		struct sk_buff *skb;
2001 
2002 		/* return some buffers to hardware, one at a time is too slow */
2003 		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2004 			ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2005 			cleaned_count = 0;
2006 		}
2007 
2008 		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2009 
2010 		if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
2011 			break;
2012 
2013 		/*
2014 		 * This memory barrier is needed to keep us from reading
2015 		 * any other fields out of the rx_desc until we know the
2016 		 * RXD_STAT_DD bit is set
2017 		 */
2018 		rmb();
2019 
2020 		/* retrieve a buffer from the ring */
2021 		skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
2022 
2023 		/* exit if we failed to retrieve a buffer */
2024 		if (!skb)
2025 			break;
2026 
2027 		cleaned_count++;
2028 
2029 		/* place incomplete frames back on ring for completion */
2030 		if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2031 			continue;
2032 
2033 		/* verify the packet layout is correct */
2034 		if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2035 			continue;
2036 
2037 		/* probably a little skewed due to removing CRC */
2038 		total_rx_bytes += skb->len;
2039 
2040 		/* populate checksum, timestamp, VLAN, and protocol */
2041 		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2042 
2043 #ifdef IXGBE_FCOE
2044 		/* if ddp, not passing to ULD unless for FCP_RSP or error */
2045 		if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2046 			ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2047 			/* include DDPed FCoE data */
2048 			if (ddp_bytes > 0) {
2049 				if (!mss) {
2050 					mss = rx_ring->netdev->mtu -
2051 						sizeof(struct fcoe_hdr) -
2052 						sizeof(struct fc_frame_header) -
2053 						sizeof(struct fcoe_crc_eof);
2054 					if (mss > 512)
2055 						mss &= ~511;
2056 				}
2057 				total_rx_bytes += ddp_bytes;
2058 				total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2059 								 mss);
2060 			}
2061 			if (!ddp_bytes) {
2062 				dev_kfree_skb_any(skb);
2063 				continue;
2064 			}
2065 		}
2066 
2067 #endif /* IXGBE_FCOE */
2068 		skb_mark_napi_id(skb, &q_vector->napi);
2069 		ixgbe_rx_skb(q_vector, skb);
2070 
2071 		/* update budget accounting */
2072 		total_rx_packets++;
2073 	} while (likely(total_rx_packets < budget));
2074 
2075 	u64_stats_update_begin(&rx_ring->syncp);
2076 	rx_ring->stats.packets += total_rx_packets;
2077 	rx_ring->stats.bytes += total_rx_bytes;
2078 	u64_stats_update_end(&rx_ring->syncp);
2079 	q_vector->rx.total_packets += total_rx_packets;
2080 	q_vector->rx.total_bytes += total_rx_bytes;
2081 
2082 	if (cleaned_count)
2083 		ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2084 
2085 	return total_rx_packets;
2086 }
2087 
2088 #ifdef CONFIG_NET_RX_BUSY_POLL
2089 /* must be called with local_bh_disable()d */
2090 static int ixgbe_low_latency_recv(struct napi_struct *napi)
2091 {
2092 	struct ixgbe_q_vector *q_vector =
2093 			container_of(napi, struct ixgbe_q_vector, napi);
2094 	struct ixgbe_adapter *adapter = q_vector->adapter;
2095 	struct ixgbe_ring  *ring;
2096 	int found = 0;
2097 
2098 	if (test_bit(__IXGBE_DOWN, &adapter->state))
2099 		return LL_FLUSH_FAILED;
2100 
2101 	if (!ixgbe_qv_lock_poll(q_vector))
2102 		return LL_FLUSH_BUSY;
2103 
2104 	ixgbe_for_each_ring(ring, q_vector->rx) {
2105 		found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2106 #ifdef BP_EXTENDED_STATS
2107 		if (found)
2108 			ring->stats.cleaned += found;
2109 		else
2110 			ring->stats.misses++;
2111 #endif
2112 		if (found)
2113 			break;
2114 	}
2115 
2116 	ixgbe_qv_unlock_poll(q_vector);
2117 
2118 	return found;
2119 }
2120 #endif	/* CONFIG_NET_RX_BUSY_POLL */
2121 
2122 /**
2123  * ixgbe_configure_msix - Configure MSI-X hardware
2124  * @adapter: board private structure
2125  *
2126  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
2127  * interrupts.
2128  **/
2129 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2130 {
2131 	struct ixgbe_q_vector *q_vector;
2132 	int v_idx;
2133 	u32 mask;
2134 
2135 	/* Populate MSIX to EITR Select */
2136 	if (adapter->num_vfs > 32) {
2137 		u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2138 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2139 	}
2140 
2141 	/*
2142 	 * Populate the IVAR table and set the ITR values to the
2143 	 * corresponding register.
2144 	 */
2145 	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2146 		struct ixgbe_ring *ring;
2147 		q_vector = adapter->q_vector[v_idx];
2148 
2149 		ixgbe_for_each_ring(ring, q_vector->rx)
2150 			ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2151 
2152 		ixgbe_for_each_ring(ring, q_vector->tx)
2153 			ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2154 
2155 		ixgbe_write_eitr(q_vector);
2156 	}
2157 
2158 	switch (adapter->hw.mac.type) {
2159 	case ixgbe_mac_82598EB:
2160 		ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2161 			       v_idx);
2162 		break;
2163 	case ixgbe_mac_82599EB:
2164 	case ixgbe_mac_X540:
2165 		ixgbe_set_ivar(adapter, -1, 1, v_idx);
2166 		break;
2167 	default:
2168 		break;
2169 	}
2170 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2171 
2172 	/* set up to autoclear timer, and the vectors */
2173 	mask = IXGBE_EIMS_ENABLE_MASK;
2174 	mask &= ~(IXGBE_EIMS_OTHER |
2175 		  IXGBE_EIMS_MAILBOX |
2176 		  IXGBE_EIMS_LSC);
2177 
2178 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2179 }
2180 
2181 enum latency_range {
2182 	lowest_latency = 0,
2183 	low_latency = 1,
2184 	bulk_latency = 2,
2185 	latency_invalid = 255
2186 };
2187 
2188 /**
2189  * ixgbe_update_itr - update the dynamic ITR value based on statistics
2190  * @q_vector: structure containing interrupt and ring information
2191  * @ring_container: structure containing ring performance data
2192  *
2193  *      Stores a new ITR value based on packets and byte
2194  *      counts during the last interrupt.  The advantage of per interrupt
2195  *      computation is faster updates and more accurate ITR for the current
2196  *      traffic pattern.  Constants in this function were computed
2197  *      based on theoretical maximum wire speed and thresholds were set based
2198  *      on testing data as well as attempting to minimize response time
2199  *      while increasing bulk throughput.
2200  *      this functionality is controlled by the InterruptThrottleRate module
2201  *      parameter (see ixgbe_param.c)
2202  **/
2203 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2204 			     struct ixgbe_ring_container *ring_container)
2205 {
2206 	int bytes = ring_container->total_bytes;
2207 	int packets = ring_container->total_packets;
2208 	u32 timepassed_us;
2209 	u64 bytes_perint;
2210 	u8 itr_setting = ring_container->itr;
2211 
2212 	if (packets == 0)
2213 		return;
2214 
2215 	/* simple throttlerate management
2216 	 *   0-10MB/s   lowest (100000 ints/s)
2217 	 *  10-20MB/s   low    (20000 ints/s)
2218 	 *  20-1249MB/s bulk   (8000 ints/s)
2219 	 */
2220 	/* what was last interrupt timeslice? */
2221 	timepassed_us = q_vector->itr >> 2;
2222 	if (timepassed_us == 0)
2223 		return;
2224 
2225 	bytes_perint = bytes / timepassed_us; /* bytes/usec */
2226 
2227 	switch (itr_setting) {
2228 	case lowest_latency:
2229 		if (bytes_perint > 10)
2230 			itr_setting = low_latency;
2231 		break;
2232 	case low_latency:
2233 		if (bytes_perint > 20)
2234 			itr_setting = bulk_latency;
2235 		else if (bytes_perint <= 10)
2236 			itr_setting = lowest_latency;
2237 		break;
2238 	case bulk_latency:
2239 		if (bytes_perint <= 20)
2240 			itr_setting = low_latency;
2241 		break;
2242 	}
2243 
2244 	/* clear work counters since we have the values we need */
2245 	ring_container->total_bytes = 0;
2246 	ring_container->total_packets = 0;
2247 
2248 	/* write updated itr to ring container */
2249 	ring_container->itr = itr_setting;
2250 }
2251 
2252 /**
2253  * ixgbe_write_eitr - write EITR register in hardware specific way
2254  * @q_vector: structure containing interrupt and ring information
2255  *
2256  * This function is made to be called by ethtool and by the driver
2257  * when it needs to update EITR registers at runtime.  Hardware
2258  * specific quirks/differences are taken care of here.
2259  */
2260 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2261 {
2262 	struct ixgbe_adapter *adapter = q_vector->adapter;
2263 	struct ixgbe_hw *hw = &adapter->hw;
2264 	int v_idx = q_vector->v_idx;
2265 	u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2266 
2267 	switch (adapter->hw.mac.type) {
2268 	case ixgbe_mac_82598EB:
2269 		/* must write high and low 16 bits to reset counter */
2270 		itr_reg |= (itr_reg << 16);
2271 		break;
2272 	case ixgbe_mac_82599EB:
2273 	case ixgbe_mac_X540:
2274 		/*
2275 		 * set the WDIS bit to not clear the timer bits and cause an
2276 		 * immediate assertion of the interrupt
2277 		 */
2278 		itr_reg |= IXGBE_EITR_CNT_WDIS;
2279 		break;
2280 	default:
2281 		break;
2282 	}
2283 	IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2284 }
2285 
2286 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2287 {
2288 	u32 new_itr = q_vector->itr;
2289 	u8 current_itr;
2290 
2291 	ixgbe_update_itr(q_vector, &q_vector->tx);
2292 	ixgbe_update_itr(q_vector, &q_vector->rx);
2293 
2294 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2295 
2296 	switch (current_itr) {
2297 	/* counts and packets in update_itr are dependent on these numbers */
2298 	case lowest_latency:
2299 		new_itr = IXGBE_100K_ITR;
2300 		break;
2301 	case low_latency:
2302 		new_itr = IXGBE_20K_ITR;
2303 		break;
2304 	case bulk_latency:
2305 		new_itr = IXGBE_8K_ITR;
2306 		break;
2307 	default:
2308 		break;
2309 	}
2310 
2311 	if (new_itr != q_vector->itr) {
2312 		/* do an exponential smoothing */
2313 		new_itr = (10 * new_itr * q_vector->itr) /
2314 			  ((9 * new_itr) + q_vector->itr);
2315 
2316 		/* save the algorithm value here */
2317 		q_vector->itr = new_itr;
2318 
2319 		ixgbe_write_eitr(q_vector);
2320 	}
2321 }
2322 
2323 /**
2324  * ixgbe_check_overtemp_subtask - check for over temperature
2325  * @adapter: pointer to adapter
2326  **/
2327 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2328 {
2329 	struct ixgbe_hw *hw = &adapter->hw;
2330 	u32 eicr = adapter->interrupt_event;
2331 
2332 	if (test_bit(__IXGBE_DOWN, &adapter->state))
2333 		return;
2334 
2335 	if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2336 	    !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2337 		return;
2338 
2339 	adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2340 
2341 	switch (hw->device_id) {
2342 	case IXGBE_DEV_ID_82599_T3_LOM:
2343 		/*
2344 		 * Since the warning interrupt is for both ports
2345 		 * we don't have to check if:
2346 		 *  - This interrupt wasn't for our port.
2347 		 *  - We may have missed the interrupt so always have to
2348 		 *    check if we  got a LSC
2349 		 */
2350 		if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
2351 		    !(eicr & IXGBE_EICR_LSC))
2352 			return;
2353 
2354 		if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2355 			u32 speed;
2356 			bool link_up = false;
2357 
2358 			hw->mac.ops.check_link(hw, &speed, &link_up, false);
2359 
2360 			if (link_up)
2361 				return;
2362 		}
2363 
2364 		/* Check if this is not due to overtemp */
2365 		if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2366 			return;
2367 
2368 		break;
2369 	default:
2370 		if (!(eicr & IXGBE_EICR_GPI_SDP0))
2371 			return;
2372 		break;
2373 	}
2374 	e_crit(drv,
2375 	       "Network adapter has been stopped because it has over heated. "
2376 	       "Restart the computer. If the problem persists, "
2377 	       "power off the system and replace the adapter\n");
2378 
2379 	adapter->interrupt_event = 0;
2380 }
2381 
2382 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2383 {
2384 	struct ixgbe_hw *hw = &adapter->hw;
2385 
2386 	if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2387 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
2388 		e_crit(probe, "Fan has stopped, replace the adapter\n");
2389 		/* write to clear the interrupt */
2390 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2391 	}
2392 }
2393 
2394 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2395 {
2396 	if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2397 		return;
2398 
2399 	switch (adapter->hw.mac.type) {
2400 	case ixgbe_mac_82599EB:
2401 		/*
2402 		 * Need to check link state so complete overtemp check
2403 		 * on service task
2404 		 */
2405 		if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2406 		    (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2407 			adapter->interrupt_event = eicr;
2408 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2409 			ixgbe_service_event_schedule(adapter);
2410 			return;
2411 		}
2412 		return;
2413 	case ixgbe_mac_X540:
2414 		if (!(eicr & IXGBE_EICR_TS))
2415 			return;
2416 		break;
2417 	default:
2418 		return;
2419 	}
2420 
2421 	e_crit(drv,
2422 	       "Network adapter has been stopped because it has over heated. "
2423 	       "Restart the computer. If the problem persists, "
2424 	       "power off the system and replace the adapter\n");
2425 }
2426 
2427 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2428 {
2429 	struct ixgbe_hw *hw = &adapter->hw;
2430 
2431 	if (eicr & IXGBE_EICR_GPI_SDP2) {
2432 		/* Clear the interrupt */
2433 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2434 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2435 			adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2436 			ixgbe_service_event_schedule(adapter);
2437 		}
2438 	}
2439 
2440 	if (eicr & IXGBE_EICR_GPI_SDP1) {
2441 		/* Clear the interrupt */
2442 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2443 		if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2444 			adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2445 			ixgbe_service_event_schedule(adapter);
2446 		}
2447 	}
2448 }
2449 
2450 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2451 {
2452 	struct ixgbe_hw *hw = &adapter->hw;
2453 
2454 	adapter->lsc_int++;
2455 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2456 	adapter->link_check_timeout = jiffies;
2457 	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2458 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2459 		IXGBE_WRITE_FLUSH(hw);
2460 		ixgbe_service_event_schedule(adapter);
2461 	}
2462 }
2463 
2464 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2465 					   u64 qmask)
2466 {
2467 	u32 mask;
2468 	struct ixgbe_hw *hw = &adapter->hw;
2469 
2470 	switch (hw->mac.type) {
2471 	case ixgbe_mac_82598EB:
2472 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2473 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2474 		break;
2475 	case ixgbe_mac_82599EB:
2476 	case ixgbe_mac_X540:
2477 		mask = (qmask & 0xFFFFFFFF);
2478 		if (mask)
2479 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2480 		mask = (qmask >> 32);
2481 		if (mask)
2482 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2483 		break;
2484 	default:
2485 		break;
2486 	}
2487 	/* skip the flush */
2488 }
2489 
2490 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2491 					    u64 qmask)
2492 {
2493 	u32 mask;
2494 	struct ixgbe_hw *hw = &adapter->hw;
2495 
2496 	switch (hw->mac.type) {
2497 	case ixgbe_mac_82598EB:
2498 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2499 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2500 		break;
2501 	case ixgbe_mac_82599EB:
2502 	case ixgbe_mac_X540:
2503 		mask = (qmask & 0xFFFFFFFF);
2504 		if (mask)
2505 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2506 		mask = (qmask >> 32);
2507 		if (mask)
2508 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2509 		break;
2510 	default:
2511 		break;
2512 	}
2513 	/* skip the flush */
2514 }
2515 
2516 /**
2517  * ixgbe_irq_enable - Enable default interrupt generation settings
2518  * @adapter: board private structure
2519  **/
2520 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2521 				    bool flush)
2522 {
2523 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2524 
2525 	/* don't reenable LSC while waiting for link */
2526 	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2527 		mask &= ~IXGBE_EIMS_LSC;
2528 
2529 	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2530 		switch (adapter->hw.mac.type) {
2531 		case ixgbe_mac_82599EB:
2532 			mask |= IXGBE_EIMS_GPI_SDP0;
2533 			break;
2534 		case ixgbe_mac_X540:
2535 			mask |= IXGBE_EIMS_TS;
2536 			break;
2537 		default:
2538 			break;
2539 		}
2540 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2541 		mask |= IXGBE_EIMS_GPI_SDP1;
2542 	switch (adapter->hw.mac.type) {
2543 	case ixgbe_mac_82599EB:
2544 		mask |= IXGBE_EIMS_GPI_SDP1;
2545 		mask |= IXGBE_EIMS_GPI_SDP2;
2546 	case ixgbe_mac_X540:
2547 		mask |= IXGBE_EIMS_ECC;
2548 		mask |= IXGBE_EIMS_MAILBOX;
2549 		break;
2550 	default:
2551 		break;
2552 	}
2553 
2554 	if (adapter->hw.mac.type == ixgbe_mac_X540)
2555 		mask |= IXGBE_EIMS_TIMESYNC;
2556 
2557 	if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2558 	    !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2559 		mask |= IXGBE_EIMS_FLOW_DIR;
2560 
2561 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2562 	if (queues)
2563 		ixgbe_irq_enable_queues(adapter, ~0);
2564 	if (flush)
2565 		IXGBE_WRITE_FLUSH(&adapter->hw);
2566 }
2567 
2568 static irqreturn_t ixgbe_msix_other(int irq, void *data)
2569 {
2570 	struct ixgbe_adapter *adapter = data;
2571 	struct ixgbe_hw *hw = &adapter->hw;
2572 	u32 eicr;
2573 
2574 	/*
2575 	 * Workaround for Silicon errata.  Use clear-by-write instead
2576 	 * of clear-by-read.  Reading with EICS will return the
2577 	 * interrupt causes without clearing, which later be done
2578 	 * with the write to EICR.
2579 	 */
2580 	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2581 
2582 	/* The lower 16bits of the EICR register are for the queue interrupts
2583 	 * which should be masked here in order to not accidently clear them if
2584 	 * the bits are high when ixgbe_msix_other is called. There is a race
2585 	 * condition otherwise which results in possible performance loss
2586 	 * especially if the ixgbe_msix_other interrupt is triggering
2587 	 * consistently (as it would when PPS is turned on for the X540 device)
2588 	 */
2589 	eicr &= 0xFFFF0000;
2590 
2591 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2592 
2593 	if (eicr & IXGBE_EICR_LSC)
2594 		ixgbe_check_lsc(adapter);
2595 
2596 	if (eicr & IXGBE_EICR_MAILBOX)
2597 		ixgbe_msg_task(adapter);
2598 
2599 	switch (hw->mac.type) {
2600 	case ixgbe_mac_82599EB:
2601 	case ixgbe_mac_X540:
2602 		if (eicr & IXGBE_EICR_ECC)
2603 			e_info(link, "Received unrecoverable ECC Err, please "
2604 			       "reboot\n");
2605 		/* Handle Flow Director Full threshold interrupt */
2606 		if (eicr & IXGBE_EICR_FLOW_DIR) {
2607 			int reinit_count = 0;
2608 			int i;
2609 			for (i = 0; i < adapter->num_tx_queues; i++) {
2610 				struct ixgbe_ring *ring = adapter->tx_ring[i];
2611 				if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2612 						       &ring->state))
2613 					reinit_count++;
2614 			}
2615 			if (reinit_count) {
2616 				/* no more flow director interrupts until after init */
2617 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2618 				adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2619 				ixgbe_service_event_schedule(adapter);
2620 			}
2621 		}
2622 		ixgbe_check_sfp_event(adapter, eicr);
2623 		ixgbe_check_overtemp_event(adapter, eicr);
2624 		break;
2625 	default:
2626 		break;
2627 	}
2628 
2629 	ixgbe_check_fan_failure(adapter, eicr);
2630 
2631 	if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2632 		ixgbe_ptp_check_pps_event(adapter, eicr);
2633 
2634 	/* re-enable the original interrupt state, no lsc, no queues */
2635 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
2636 		ixgbe_irq_enable(adapter, false, false);
2637 
2638 	return IRQ_HANDLED;
2639 }
2640 
2641 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2642 {
2643 	struct ixgbe_q_vector *q_vector = data;
2644 
2645 	/* EIAM disabled interrupts (on this vector) for us */
2646 
2647 	if (q_vector->rx.ring || q_vector->tx.ring)
2648 		napi_schedule(&q_vector->napi);
2649 
2650 	return IRQ_HANDLED;
2651 }
2652 
2653 /**
2654  * ixgbe_poll - NAPI Rx polling callback
2655  * @napi: structure for representing this polling device
2656  * @budget: how many packets driver is allowed to clean
2657  *
2658  * This function is used for legacy and MSI, NAPI mode
2659  **/
2660 int ixgbe_poll(struct napi_struct *napi, int budget)
2661 {
2662 	struct ixgbe_q_vector *q_vector =
2663 				container_of(napi, struct ixgbe_q_vector, napi);
2664 	struct ixgbe_adapter *adapter = q_vector->adapter;
2665 	struct ixgbe_ring *ring;
2666 	int per_ring_budget;
2667 	bool clean_complete = true;
2668 
2669 #ifdef CONFIG_IXGBE_DCA
2670 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2671 		ixgbe_update_dca(q_vector);
2672 #endif
2673 
2674 	ixgbe_for_each_ring(ring, q_vector->tx)
2675 		clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2676 
2677 	if (!ixgbe_qv_lock_napi(q_vector))
2678 		return budget;
2679 
2680 	/* attempt to distribute budget to each queue fairly, but don't allow
2681 	 * the budget to go below 1 because we'll exit polling */
2682 	if (q_vector->rx.count > 1)
2683 		per_ring_budget = max(budget/q_vector->rx.count, 1);
2684 	else
2685 		per_ring_budget = budget;
2686 
2687 	ixgbe_for_each_ring(ring, q_vector->rx)
2688 		clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
2689 				   per_ring_budget) < per_ring_budget);
2690 
2691 	ixgbe_qv_unlock_napi(q_vector);
2692 	/* If all work not completed, return budget and keep polling */
2693 	if (!clean_complete)
2694 		return budget;
2695 
2696 	/* all work done, exit the polling mode */
2697 	napi_complete(napi);
2698 	if (adapter->rx_itr_setting & 1)
2699 		ixgbe_set_itr(q_vector);
2700 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
2701 		ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2702 
2703 	return 0;
2704 }
2705 
2706 /**
2707  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2708  * @adapter: board private structure
2709  *
2710  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2711  * interrupts from the kernel.
2712  **/
2713 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2714 {
2715 	struct net_device *netdev = adapter->netdev;
2716 	int vector, err;
2717 	int ri = 0, ti = 0;
2718 
2719 	for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2720 		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2721 		struct msix_entry *entry = &adapter->msix_entries[vector];
2722 
2723 		if (q_vector->tx.ring && q_vector->rx.ring) {
2724 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2725 				 "%s-%s-%d", netdev->name, "TxRx", ri++);
2726 			ti++;
2727 		} else if (q_vector->rx.ring) {
2728 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2729 				 "%s-%s-%d", netdev->name, "rx", ri++);
2730 		} else if (q_vector->tx.ring) {
2731 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2732 				 "%s-%s-%d", netdev->name, "tx", ti++);
2733 		} else {
2734 			/* skip this unused q_vector */
2735 			continue;
2736 		}
2737 		err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2738 				  q_vector->name, q_vector);
2739 		if (err) {
2740 			e_err(probe, "request_irq failed for MSIX interrupt "
2741 			      "Error: %d\n", err);
2742 			goto free_queue_irqs;
2743 		}
2744 		/* If Flow Director is enabled, set interrupt affinity */
2745 		if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2746 			/* assign the mask for this irq */
2747 			irq_set_affinity_hint(entry->vector,
2748 					      &q_vector->affinity_mask);
2749 		}
2750 	}
2751 
2752 	err = request_irq(adapter->msix_entries[vector].vector,
2753 			  ixgbe_msix_other, 0, netdev->name, adapter);
2754 	if (err) {
2755 		e_err(probe, "request_irq for msix_other failed: %d\n", err);
2756 		goto free_queue_irqs;
2757 	}
2758 
2759 	return 0;
2760 
2761 free_queue_irqs:
2762 	while (vector) {
2763 		vector--;
2764 		irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2765 				      NULL);
2766 		free_irq(adapter->msix_entries[vector].vector,
2767 			 adapter->q_vector[vector]);
2768 	}
2769 	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2770 	pci_disable_msix(adapter->pdev);
2771 	kfree(adapter->msix_entries);
2772 	adapter->msix_entries = NULL;
2773 	return err;
2774 }
2775 
2776 /**
2777  * ixgbe_intr - legacy mode Interrupt Handler
2778  * @irq: interrupt number
2779  * @data: pointer to a network interface device structure
2780  **/
2781 static irqreturn_t ixgbe_intr(int irq, void *data)
2782 {
2783 	struct ixgbe_adapter *adapter = data;
2784 	struct ixgbe_hw *hw = &adapter->hw;
2785 	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2786 	u32 eicr;
2787 
2788 	/*
2789 	 * Workaround for silicon errata #26 on 82598.  Mask the interrupt
2790 	 * before the read of EICR.
2791 	 */
2792 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2793 
2794 	/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2795 	 * therefore no explicit interrupt disable is necessary */
2796 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2797 	if (!eicr) {
2798 		/*
2799 		 * shared interrupt alert!
2800 		 * make sure interrupts are enabled because the read will
2801 		 * have disabled interrupts due to EIAM
2802 		 * finish the workaround of silicon errata on 82598.  Unmask
2803 		 * the interrupt that we masked before the EICR read.
2804 		 */
2805 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
2806 			ixgbe_irq_enable(adapter, true, true);
2807 		return IRQ_NONE;	/* Not our interrupt */
2808 	}
2809 
2810 	if (eicr & IXGBE_EICR_LSC)
2811 		ixgbe_check_lsc(adapter);
2812 
2813 	switch (hw->mac.type) {
2814 	case ixgbe_mac_82599EB:
2815 		ixgbe_check_sfp_event(adapter, eicr);
2816 		/* Fall through */
2817 	case ixgbe_mac_X540:
2818 		if (eicr & IXGBE_EICR_ECC)
2819 			e_info(link, "Received unrecoverable ECC err, please "
2820 				     "reboot\n");
2821 		ixgbe_check_overtemp_event(adapter, eicr);
2822 		break;
2823 	default:
2824 		break;
2825 	}
2826 
2827 	ixgbe_check_fan_failure(adapter, eicr);
2828 	if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2829 		ixgbe_ptp_check_pps_event(adapter, eicr);
2830 
2831 	/* would disable interrupts here but EIAM disabled it */
2832 	napi_schedule(&q_vector->napi);
2833 
2834 	/*
2835 	 * re-enable link(maybe) and non-queue interrupts, no flush.
2836 	 * ixgbe_poll will re-enable the queue interrupts
2837 	 */
2838 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
2839 		ixgbe_irq_enable(adapter, false, false);
2840 
2841 	return IRQ_HANDLED;
2842 }
2843 
2844 /**
2845  * ixgbe_request_irq - initialize interrupts
2846  * @adapter: board private structure
2847  *
2848  * Attempts to configure interrupts using the best available
2849  * capabilities of the hardware and kernel.
2850  **/
2851 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2852 {
2853 	struct net_device *netdev = adapter->netdev;
2854 	int err;
2855 
2856 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2857 		err = ixgbe_request_msix_irqs(adapter);
2858 	else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2859 		err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2860 				  netdev->name, adapter);
2861 	else
2862 		err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2863 				  netdev->name, adapter);
2864 
2865 	if (err)
2866 		e_err(probe, "request_irq failed, Error %d\n", err);
2867 
2868 	return err;
2869 }
2870 
2871 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2872 {
2873 	int vector;
2874 
2875 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2876 		free_irq(adapter->pdev->irq, adapter);
2877 		return;
2878 	}
2879 
2880 	for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2881 		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2882 		struct msix_entry *entry = &adapter->msix_entries[vector];
2883 
2884 		/* free only the irqs that were actually requested */
2885 		if (!q_vector->rx.ring && !q_vector->tx.ring)
2886 			continue;
2887 
2888 		/* clear the affinity_mask in the IRQ descriptor */
2889 		irq_set_affinity_hint(entry->vector, NULL);
2890 
2891 		free_irq(entry->vector, q_vector);
2892 	}
2893 
2894 	free_irq(adapter->msix_entries[vector++].vector, adapter);
2895 }
2896 
2897 /**
2898  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2899  * @adapter: board private structure
2900  **/
2901 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2902 {
2903 	switch (adapter->hw.mac.type) {
2904 	case ixgbe_mac_82598EB:
2905 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2906 		break;
2907 	case ixgbe_mac_82599EB:
2908 	case ixgbe_mac_X540:
2909 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2910 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2911 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2912 		break;
2913 	default:
2914 		break;
2915 	}
2916 	IXGBE_WRITE_FLUSH(&adapter->hw);
2917 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2918 		int vector;
2919 
2920 		for (vector = 0; vector < adapter->num_q_vectors; vector++)
2921 			synchronize_irq(adapter->msix_entries[vector].vector);
2922 
2923 		synchronize_irq(adapter->msix_entries[vector++].vector);
2924 	} else {
2925 		synchronize_irq(adapter->pdev->irq);
2926 	}
2927 }
2928 
2929 /**
2930  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2931  *
2932  **/
2933 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2934 {
2935 	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2936 
2937 	ixgbe_write_eitr(q_vector);
2938 
2939 	ixgbe_set_ivar(adapter, 0, 0, 0);
2940 	ixgbe_set_ivar(adapter, 1, 0, 0);
2941 
2942 	e_info(hw, "Legacy interrupt IVAR setup done\n");
2943 }
2944 
2945 /**
2946  * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2947  * @adapter: board private structure
2948  * @ring: structure containing ring specific data
2949  *
2950  * Configure the Tx descriptor ring after a reset.
2951  **/
2952 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2953 			     struct ixgbe_ring *ring)
2954 {
2955 	struct ixgbe_hw *hw = &adapter->hw;
2956 	u64 tdba = ring->dma;
2957 	int wait_loop = 10;
2958 	u32 txdctl = IXGBE_TXDCTL_ENABLE;
2959 	u8 reg_idx = ring->reg_idx;
2960 
2961 	/* disable queue to avoid issues while updating state */
2962 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
2963 	IXGBE_WRITE_FLUSH(hw);
2964 
2965 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2966 			(tdba & DMA_BIT_MASK(32)));
2967 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2968 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2969 			ring->count * sizeof(union ixgbe_adv_tx_desc));
2970 	IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2971 	IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2972 	ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2973 
2974 	/*
2975 	 * set WTHRESH to encourage burst writeback, it should not be set
2976 	 * higher than 1 when:
2977 	 * - ITR is 0 as it could cause false TX hangs
2978 	 * - ITR is set to > 100k int/sec and BQL is enabled
2979 	 *
2980 	 * In order to avoid issues WTHRESH + PTHRESH should always be equal
2981 	 * to or less than the number of on chip descriptors, which is
2982 	 * currently 40.
2983 	 */
2984 #if IS_ENABLED(CONFIG_BQL)
2985 	if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
2986 #else
2987 	if (!ring->q_vector || (ring->q_vector->itr < 8))
2988 #endif
2989 		txdctl |= (1 << 16);	/* WTHRESH = 1 */
2990 	else
2991 		txdctl |= (8 << 16);	/* WTHRESH = 8 */
2992 
2993 	/*
2994 	 * Setting PTHRESH to 32 both improves performance
2995 	 * and avoids a TX hang with DFP enabled
2996 	 */
2997 	txdctl |= (1 << 8) |	/* HTHRESH = 1 */
2998 		   32;		/* PTHRESH = 32 */
2999 
3000 	/* reinitialize flowdirector state */
3001 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3002 		ring->atr_sample_rate = adapter->atr_sample_rate;
3003 		ring->atr_count = 0;
3004 		set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3005 	} else {
3006 		ring->atr_sample_rate = 0;
3007 	}
3008 
3009 	/* initialize XPS */
3010 	if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3011 		struct ixgbe_q_vector *q_vector = ring->q_vector;
3012 
3013 		if (q_vector)
3014 			netif_set_xps_queue(ring->netdev,
3015 					    &q_vector->affinity_mask,
3016 					    ring->queue_index);
3017 	}
3018 
3019 	clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3020 
3021 	/* enable queue */
3022 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3023 
3024 	/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3025 	if (hw->mac.type == ixgbe_mac_82598EB &&
3026 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3027 		return;
3028 
3029 	/* poll to verify queue is enabled */
3030 	do {
3031 		usleep_range(1000, 2000);
3032 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3033 	} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3034 	if (!wait_loop)
3035 		e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
3036 }
3037 
3038 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3039 {
3040 	struct ixgbe_hw *hw = &adapter->hw;
3041 	u32 rttdcs, mtqc;
3042 	u8 tcs = netdev_get_num_tc(adapter->netdev);
3043 
3044 	if (hw->mac.type == ixgbe_mac_82598EB)
3045 		return;
3046 
3047 	/* disable the arbiter while setting MTQC */
3048 	rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3049 	rttdcs |= IXGBE_RTTDCS_ARBDIS;
3050 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3051 
3052 	/* set transmit pool layout */
3053 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3054 		mtqc = IXGBE_MTQC_VT_ENA;
3055 		if (tcs > 4)
3056 			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3057 		else if (tcs > 1)
3058 			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3059 		else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3060 			mtqc |= IXGBE_MTQC_32VF;
3061 		else
3062 			mtqc |= IXGBE_MTQC_64VF;
3063 	} else {
3064 		if (tcs > 4)
3065 			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3066 		else if (tcs > 1)
3067 			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3068 		else
3069 			mtqc = IXGBE_MTQC_64Q_1PB;
3070 	}
3071 
3072 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3073 
3074 	/* Enable Security TX Buffer IFG for multiple pb */
3075 	if (tcs) {
3076 		u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3077 		sectx |= IXGBE_SECTX_DCB;
3078 		IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3079 	}
3080 
3081 	/* re-enable the arbiter */
3082 	rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3083 	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3084 }
3085 
3086 /**
3087  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
3088  * @adapter: board private structure
3089  *
3090  * Configure the Tx unit of the MAC after a reset.
3091  **/
3092 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3093 {
3094 	struct ixgbe_hw *hw = &adapter->hw;
3095 	u32 dmatxctl;
3096 	u32 i;
3097 
3098 	ixgbe_setup_mtqc(adapter);
3099 
3100 	if (hw->mac.type != ixgbe_mac_82598EB) {
3101 		/* DMATXCTL.EN must be before Tx queues are enabled */
3102 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3103 		dmatxctl |= IXGBE_DMATXCTL_TE;
3104 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3105 	}
3106 
3107 	/* Setup the HW Tx Head and Tail descriptor pointers */
3108 	for (i = 0; i < adapter->num_tx_queues; i++)
3109 		ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3110 }
3111 
3112 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3113 				 struct ixgbe_ring *ring)
3114 {
3115 	struct ixgbe_hw *hw = &adapter->hw;
3116 	u8 reg_idx = ring->reg_idx;
3117 	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3118 
3119 	srrctl |= IXGBE_SRRCTL_DROP_EN;
3120 
3121 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3122 }
3123 
3124 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3125 				  struct ixgbe_ring *ring)
3126 {
3127 	struct ixgbe_hw *hw = &adapter->hw;
3128 	u8 reg_idx = ring->reg_idx;
3129 	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3130 
3131 	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3132 
3133 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3134 }
3135 
3136 #ifdef CONFIG_IXGBE_DCB
3137 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3138 #else
3139 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3140 #endif
3141 {
3142 	int i;
3143 	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3144 
3145 	if (adapter->ixgbe_ieee_pfc)
3146 		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3147 
3148 	/*
3149 	 * We should set the drop enable bit if:
3150 	 *  SR-IOV is enabled
3151 	 *   or
3152 	 *  Number of Rx queues > 1 and flow control is disabled
3153 	 *
3154 	 *  This allows us to avoid head of line blocking for security
3155 	 *  and performance reasons.
3156 	 */
3157 	if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3158 	    !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3159 		for (i = 0; i < adapter->num_rx_queues; i++)
3160 			ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3161 	} else {
3162 		for (i = 0; i < adapter->num_rx_queues; i++)
3163 			ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3164 	}
3165 }
3166 
3167 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3168 
3169 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3170 				   struct ixgbe_ring *rx_ring)
3171 {
3172 	struct ixgbe_hw *hw = &adapter->hw;
3173 	u32 srrctl;
3174 	u8 reg_idx = rx_ring->reg_idx;
3175 
3176 	if (hw->mac.type == ixgbe_mac_82598EB) {
3177 		u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3178 
3179 		/*
3180 		 * if VMDq is not active we must program one srrctl register
3181 		 * per RSS queue since we have enabled RDRXCTL.MVMEN
3182 		 */
3183 		reg_idx &= mask;
3184 	}
3185 
3186 	/* configure header buffer length, needed for RSC */
3187 	srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3188 
3189 	/* configure the packet buffer length */
3190 	srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3191 
3192 	/* configure descriptor type */
3193 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3194 
3195 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3196 }
3197 
3198 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3199 {
3200 	struct ixgbe_hw *hw = &adapter->hw;
3201 	static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
3202 			  0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
3203 			  0x6A3E67EA, 0x14364D17, 0x3BED200D};
3204 	u32 mrqc = 0, reta = 0;
3205 	u32 rxcsum;
3206 	int i, j;
3207 	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3208 
3209 	/*
3210 	 * Program table for at least 2 queues w/ SR-IOV so that VFs can
3211 	 * make full use of any rings they may have.  We will use the
3212 	 * PSRTYPE register to control how many rings we use within the PF.
3213 	 */
3214 	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3215 		rss_i = 2;
3216 
3217 	/* Fill out hash function seeds */
3218 	for (i = 0; i < 10; i++)
3219 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
3220 
3221 	/* Fill out redirection table */
3222 	for (i = 0, j = 0; i < 128; i++, j++) {
3223 		if (j == rss_i)
3224 			j = 0;
3225 		/* reta = 4-byte sliding window of
3226 		 * 0x00..(indices-1)(indices-1)00..etc. */
3227 		reta = (reta << 8) | (j * 0x11);
3228 		if ((i & 3) == 3)
3229 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3230 	}
3231 
3232 	/* Disable indicating checksum in descriptor, enables RSS hash */
3233 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3234 	rxcsum |= IXGBE_RXCSUM_PCSD;
3235 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3236 
3237 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3238 		if (adapter->ring_feature[RING_F_RSS].mask)
3239 			mrqc = IXGBE_MRQC_RSSEN;
3240 	} else {
3241 		u8 tcs = netdev_get_num_tc(adapter->netdev);
3242 
3243 		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3244 			if (tcs > 4)
3245 				mrqc = IXGBE_MRQC_VMDQRT8TCEN;	/* 8 TCs */
3246 			else if (tcs > 1)
3247 				mrqc = IXGBE_MRQC_VMDQRT4TCEN;	/* 4 TCs */
3248 			else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3249 				mrqc = IXGBE_MRQC_VMDQRSS32EN;
3250 			else
3251 				mrqc = IXGBE_MRQC_VMDQRSS64EN;
3252 		} else {
3253 			if (tcs > 4)
3254 				mrqc = IXGBE_MRQC_RTRSS8TCEN;
3255 			else if (tcs > 1)
3256 				mrqc = IXGBE_MRQC_RTRSS4TCEN;
3257 			else
3258 				mrqc = IXGBE_MRQC_RSSEN;
3259 		}
3260 	}
3261 
3262 	/* Perform hash on these packet types */
3263 	mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3264 		IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3265 		IXGBE_MRQC_RSS_FIELD_IPV6 |
3266 		IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3267 
3268 	if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3269 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3270 	if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3271 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3272 
3273 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3274 }
3275 
3276 /**
3277  * ixgbe_configure_rscctl - enable RSC for the indicated ring
3278  * @adapter:    address of board private structure
3279  * @index:      index of ring to set
3280  **/
3281 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3282 				   struct ixgbe_ring *ring)
3283 {
3284 	struct ixgbe_hw *hw = &adapter->hw;
3285 	u32 rscctrl;
3286 	u8 reg_idx = ring->reg_idx;
3287 
3288 	if (!ring_is_rsc_enabled(ring))
3289 		return;
3290 
3291 	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3292 	rscctrl |= IXGBE_RSCCTL_RSCEN;
3293 	/*
3294 	 * we must limit the number of descriptors so that the
3295 	 * total size of max desc * buf_len is not greater
3296 	 * than 65536
3297 	 */
3298 	rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3299 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3300 }
3301 
3302 #define IXGBE_MAX_RX_DESC_POLL 10
3303 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3304 				       struct ixgbe_ring *ring)
3305 {
3306 	struct ixgbe_hw *hw = &adapter->hw;
3307 	int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3308 	u32 rxdctl;
3309 	u8 reg_idx = ring->reg_idx;
3310 
3311 	/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3312 	if (hw->mac.type == ixgbe_mac_82598EB &&
3313 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3314 		return;
3315 
3316 	do {
3317 		usleep_range(1000, 2000);
3318 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3319 	} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3320 
3321 	if (!wait_loop) {
3322 		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3323 		      "the polling period\n", reg_idx);
3324 	}
3325 }
3326 
3327 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3328 			    struct ixgbe_ring *ring)
3329 {
3330 	struct ixgbe_hw *hw = &adapter->hw;
3331 	int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3332 	u32 rxdctl;
3333 	u8 reg_idx = ring->reg_idx;
3334 
3335 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3336 	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3337 
3338 	/* write value back with RXDCTL.ENABLE bit cleared */
3339 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3340 
3341 	if (hw->mac.type == ixgbe_mac_82598EB &&
3342 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3343 		return;
3344 
3345 	/* the hardware may take up to 100us to really disable the rx queue */
3346 	do {
3347 		udelay(10);
3348 		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3349 	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3350 
3351 	if (!wait_loop) {
3352 		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3353 		      "the polling period\n", reg_idx);
3354 	}
3355 }
3356 
3357 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3358 			     struct ixgbe_ring *ring)
3359 {
3360 	struct ixgbe_hw *hw = &adapter->hw;
3361 	u64 rdba = ring->dma;
3362 	u32 rxdctl;
3363 	u8 reg_idx = ring->reg_idx;
3364 
3365 	/* disable queue to avoid issues while updating state */
3366 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3367 	ixgbe_disable_rx_queue(adapter, ring);
3368 
3369 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3370 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3371 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3372 			ring->count * sizeof(union ixgbe_adv_rx_desc));
3373 	IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3374 	IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3375 	ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
3376 
3377 	ixgbe_configure_srrctl(adapter, ring);
3378 	ixgbe_configure_rscctl(adapter, ring);
3379 
3380 	if (hw->mac.type == ixgbe_mac_82598EB) {
3381 		/*
3382 		 * enable cache line friendly hardware writes:
3383 		 * PTHRESH=32 descriptors (half the internal cache),
3384 		 * this also removes ugly rx_no_buffer_count increment
3385 		 * HTHRESH=4 descriptors (to minimize latency on fetch)
3386 		 * WTHRESH=8 burst writeback up to two cache lines
3387 		 */
3388 		rxdctl &= ~0x3FFFFF;
3389 		rxdctl |=  0x080420;
3390 	}
3391 
3392 	/* enable receive descriptor ring */
3393 	rxdctl |= IXGBE_RXDCTL_ENABLE;
3394 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3395 
3396 	ixgbe_rx_desc_queue_enable(adapter, ring);
3397 	ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3398 }
3399 
3400 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3401 {
3402 	struct ixgbe_hw *hw = &adapter->hw;
3403 	int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3404 	u16 pool;
3405 
3406 	/* PSRTYPE must be initialized in non 82598 adapters */
3407 	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3408 		      IXGBE_PSRTYPE_UDPHDR |
3409 		      IXGBE_PSRTYPE_IPV4HDR |
3410 		      IXGBE_PSRTYPE_L2HDR |
3411 		      IXGBE_PSRTYPE_IPV6HDR;
3412 
3413 	if (hw->mac.type == ixgbe_mac_82598EB)
3414 		return;
3415 
3416 	if (rss_i > 3)
3417 		psrtype |= 2 << 29;
3418 	else if (rss_i > 1)
3419 		psrtype |= 1 << 29;
3420 
3421 	for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3422 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3423 }
3424 
3425 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3426 {
3427 	struct ixgbe_hw *hw = &adapter->hw;
3428 	u32 reg_offset, vf_shift;
3429 	u32 gcr_ext, vmdctl;
3430 	int i;
3431 
3432 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3433 		return;
3434 
3435 	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3436 	vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3437 	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3438 	vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3439 	vmdctl |= IXGBE_VT_CTL_REPLEN;
3440 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3441 
3442 	vf_shift = VMDQ_P(0) % 32;
3443 	reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3444 
3445 	/* Enable only the PF's pool for Tx/Rx */
3446 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3447 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3448 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3449 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3450 	if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
3451 		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3452 
3453 	/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3454 	hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3455 
3456 	/*
3457 	 * Set up VF register offsets for selected VT Mode,
3458 	 * i.e. 32 or 64 VFs for SR-IOV
3459 	 */
3460 	switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3461 	case IXGBE_82599_VMDQ_8Q_MASK:
3462 		gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3463 		break;
3464 	case IXGBE_82599_VMDQ_4Q_MASK:
3465 		gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3466 		break;
3467 	default:
3468 		gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3469 		break;
3470 	}
3471 
3472 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3473 
3474 
3475 	/* Enable MAC Anti-Spoofing */
3476 	hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3477 					  adapter->num_vfs);
3478 	/* For VFs that have spoof checking turned off */
3479 	for (i = 0; i < adapter->num_vfs; i++) {
3480 		if (!adapter->vfinfo[i].spoofchk_enabled)
3481 			ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3482 	}
3483 }
3484 
3485 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3486 {
3487 	struct ixgbe_hw *hw = &adapter->hw;
3488 	struct net_device *netdev = adapter->netdev;
3489 	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3490 	struct ixgbe_ring *rx_ring;
3491 	int i;
3492 	u32 mhadd, hlreg0;
3493 
3494 #ifdef IXGBE_FCOE
3495 	/* adjust max frame to be able to do baby jumbo for FCoE */
3496 	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3497 	    (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3498 		max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3499 
3500 #endif /* IXGBE_FCOE */
3501 
3502 	/* adjust max frame to be at least the size of a standard frame */
3503 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3504 		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3505 
3506 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3507 	if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3508 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
3509 		mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3510 
3511 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3512 	}
3513 
3514 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3515 	/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3516 	hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3517 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3518 
3519 	/*
3520 	 * Setup the HW Rx Head and Tail Descriptor Pointers and
3521 	 * the Base and Length of the Rx Descriptor Ring
3522 	 */
3523 	for (i = 0; i < adapter->num_rx_queues; i++) {
3524 		rx_ring = adapter->rx_ring[i];
3525 		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3526 			set_ring_rsc_enabled(rx_ring);
3527 		else
3528 			clear_ring_rsc_enabled(rx_ring);
3529 	}
3530 }
3531 
3532 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3533 {
3534 	struct ixgbe_hw *hw = &adapter->hw;
3535 	u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3536 
3537 	switch (hw->mac.type) {
3538 	case ixgbe_mac_82598EB:
3539 		/*
3540 		 * For VMDq support of different descriptor types or
3541 		 * buffer sizes through the use of multiple SRRCTL
3542 		 * registers, RDRXCTL.MVMEN must be set to 1
3543 		 *
3544 		 * also, the manual doesn't mention it clearly but DCA hints
3545 		 * will only use queue 0's tags unless this bit is set.  Side
3546 		 * effects of setting this bit are only that SRRCTL must be
3547 		 * fully programmed [0..15]
3548 		 */
3549 		rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3550 		break;
3551 	case ixgbe_mac_82599EB:
3552 	case ixgbe_mac_X540:
3553 		/* Disable RSC for ACK packets */
3554 		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3555 		   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3556 		rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3557 		/* hardware requires some bits to be set by default */
3558 		rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3559 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3560 		break;
3561 	default:
3562 		/* We should do nothing since we don't know this hardware */
3563 		return;
3564 	}
3565 
3566 	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3567 }
3568 
3569 /**
3570  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3571  * @adapter: board private structure
3572  *
3573  * Configure the Rx unit of the MAC after a reset.
3574  **/
3575 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3576 {
3577 	struct ixgbe_hw *hw = &adapter->hw;
3578 	int i;
3579 	u32 rxctrl, rfctl;
3580 
3581 	/* disable receives while setting up the descriptors */
3582 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3583 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3584 
3585 	ixgbe_setup_psrtype(adapter);
3586 	ixgbe_setup_rdrxctl(adapter);
3587 
3588 	/* RSC Setup */
3589 	rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3590 	rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3591 	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3592 		rfctl |= IXGBE_RFCTL_RSC_DIS;
3593 	IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3594 
3595 	/* Program registers for the distribution of queues */
3596 	ixgbe_setup_mrqc(adapter);
3597 
3598 	/* set_rx_buffer_len must be called before ring initialization */
3599 	ixgbe_set_rx_buffer_len(adapter);
3600 
3601 	/*
3602 	 * Setup the HW Rx Head and Tail Descriptor Pointers and
3603 	 * the Base and Length of the Rx Descriptor Ring
3604 	 */
3605 	for (i = 0; i < adapter->num_rx_queues; i++)
3606 		ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3607 
3608 	/* disable drop enable for 82598 parts */
3609 	if (hw->mac.type == ixgbe_mac_82598EB)
3610 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3611 
3612 	/* enable all receives */
3613 	rxctrl |= IXGBE_RXCTRL_RXEN;
3614 	hw->mac.ops.enable_rx_dma(hw, rxctrl);
3615 }
3616 
3617 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3618 				 __be16 proto, u16 vid)
3619 {
3620 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3621 	struct ixgbe_hw *hw = &adapter->hw;
3622 
3623 	/* add VID to filter table */
3624 	hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3625 	set_bit(vid, adapter->active_vlans);
3626 
3627 	return 0;
3628 }
3629 
3630 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3631 				  __be16 proto, u16 vid)
3632 {
3633 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3634 	struct ixgbe_hw *hw = &adapter->hw;
3635 
3636 	/* remove VID from filter table */
3637 	hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3638 	clear_bit(vid, adapter->active_vlans);
3639 
3640 	return 0;
3641 }
3642 
3643 /**
3644  * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3645  * @adapter: driver data
3646  */
3647 static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3648 {
3649 	struct ixgbe_hw *hw = &adapter->hw;
3650 	u32 vlnctrl;
3651 
3652 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3653 	vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3654 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3655 }
3656 
3657 /**
3658  * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3659  * @adapter: driver data
3660  */
3661 static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3662 {
3663 	struct ixgbe_hw *hw = &adapter->hw;
3664 	u32 vlnctrl;
3665 
3666 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3667 	vlnctrl |= IXGBE_VLNCTRL_VFE;
3668 	vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3669 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3670 }
3671 
3672 /**
3673  * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3674  * @adapter: driver data
3675  */
3676 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3677 {
3678 	struct ixgbe_hw *hw = &adapter->hw;
3679 	u32 vlnctrl;
3680 	int i, j;
3681 
3682 	switch (hw->mac.type) {
3683 	case ixgbe_mac_82598EB:
3684 		vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3685 		vlnctrl &= ~IXGBE_VLNCTRL_VME;
3686 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3687 		break;
3688 	case ixgbe_mac_82599EB:
3689 	case ixgbe_mac_X540:
3690 		for (i = 0; i < adapter->num_rx_queues; i++) {
3691 			struct ixgbe_ring *ring = adapter->rx_ring[i];
3692 
3693 			if (ring->l2_accel_priv)
3694 				continue;
3695 			j = ring->reg_idx;
3696 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3697 			vlnctrl &= ~IXGBE_RXDCTL_VME;
3698 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3699 		}
3700 		break;
3701 	default:
3702 		break;
3703 	}
3704 }
3705 
3706 /**
3707  * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
3708  * @adapter: driver data
3709  */
3710 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3711 {
3712 	struct ixgbe_hw *hw = &adapter->hw;
3713 	u32 vlnctrl;
3714 	int i, j;
3715 
3716 	switch (hw->mac.type) {
3717 	case ixgbe_mac_82598EB:
3718 		vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3719 		vlnctrl |= IXGBE_VLNCTRL_VME;
3720 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3721 		break;
3722 	case ixgbe_mac_82599EB:
3723 	case ixgbe_mac_X540:
3724 		for (i = 0; i < adapter->num_rx_queues; i++) {
3725 			struct ixgbe_ring *ring = adapter->rx_ring[i];
3726 
3727 			if (ring->l2_accel_priv)
3728 				continue;
3729 			j = ring->reg_idx;
3730 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3731 			vlnctrl |= IXGBE_RXDCTL_VME;
3732 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3733 		}
3734 		break;
3735 	default:
3736 		break;
3737 	}
3738 }
3739 
3740 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3741 {
3742 	u16 vid;
3743 
3744 	ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
3745 
3746 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3747 		ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
3748 }
3749 
3750 /**
3751  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3752  * @netdev: network interface device structure
3753  *
3754  * Writes unicast address list to the RAR table.
3755  * Returns: -ENOMEM on failure/insufficient address space
3756  *                0 on no addresses written
3757  *                X on writing X addresses to the RAR table
3758  **/
3759 static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3760 {
3761 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3762 	struct ixgbe_hw *hw = &adapter->hw;
3763 	unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3764 	int count = 0;
3765 
3766 	/* In SR-IOV/VMDQ modes significantly less RAR entries are available */
3767 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3768 		rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3769 
3770 	/* return ENOMEM indicating insufficient memory for addresses */
3771 	if (netdev_uc_count(netdev) > rar_entries)
3772 		return -ENOMEM;
3773 
3774 	if (!netdev_uc_empty(netdev)) {
3775 		struct netdev_hw_addr *ha;
3776 		/* return error if we do not support writing to RAR table */
3777 		if (!hw->mac.ops.set_rar)
3778 			return -ENOMEM;
3779 
3780 		netdev_for_each_uc_addr(ha, netdev) {
3781 			if (!rar_entries)
3782 				break;
3783 			hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3784 					    VMDQ_P(0), IXGBE_RAH_AV);
3785 			count++;
3786 		}
3787 	}
3788 	/* write the addresses in reverse order to avoid write combining */
3789 	for (; rar_entries > 0 ; rar_entries--)
3790 		hw->mac.ops.clear_rar(hw, rar_entries);
3791 
3792 	return count;
3793 }
3794 
3795 /**
3796  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
3797  * @netdev: network interface device structure
3798  *
3799  * The set_rx_method entry point is called whenever the unicast/multicast
3800  * address list or the network interface flags are updated.  This routine is
3801  * responsible for configuring the hardware for proper unicast, multicast and
3802  * promiscuous mode.
3803  **/
3804 void ixgbe_set_rx_mode(struct net_device *netdev)
3805 {
3806 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
3807 	struct ixgbe_hw *hw = &adapter->hw;
3808 	u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3809 	int count;
3810 
3811 	/* Check for Promiscuous and All Multicast modes */
3812 
3813 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3814 
3815 	/* set all bits that we expect to always be set */
3816 	fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
3817 	fctrl |= IXGBE_FCTRL_BAM;
3818 	fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3819 	fctrl |= IXGBE_FCTRL_PMCF;
3820 
3821 	/* clear the bits we are changing the status of */
3822 	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3823 
3824 	if (netdev->flags & IFF_PROMISC) {
3825 		hw->addr_ctrl.user_set_promisc = true;
3826 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3827 		vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3828 		/* Only disable hardware filter vlans in promiscuous mode
3829 		 * if SR-IOV and VMDQ are disabled - otherwise ensure
3830 		 * that hardware VLAN filters remain enabled.
3831 		 */
3832 		if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3833 					IXGBE_FLAG_SRIOV_ENABLED)))
3834 			ixgbe_vlan_filter_disable(adapter);
3835 		else
3836 			ixgbe_vlan_filter_enable(adapter);
3837 	} else {
3838 		if (netdev->flags & IFF_ALLMULTI) {
3839 			fctrl |= IXGBE_FCTRL_MPE;
3840 			vmolr |= IXGBE_VMOLR_MPE;
3841 		}
3842 		ixgbe_vlan_filter_enable(adapter);
3843 		hw->addr_ctrl.user_set_promisc = false;
3844 	}
3845 
3846 	/*
3847 	 * Write addresses to available RAR registers, if there is not
3848 	 * sufficient space to store all the addresses then enable
3849 	 * unicast promiscuous mode
3850 	 */
3851 	count = ixgbe_write_uc_addr_list(netdev);
3852 	if (count < 0) {
3853 		fctrl |= IXGBE_FCTRL_UPE;
3854 		vmolr |= IXGBE_VMOLR_ROPE;
3855 	}
3856 
3857 	/* Write addresses to the MTA, if the attempt fails
3858 	 * then we should just turn on promiscuous mode so
3859 	 * that we can at least receive multicast traffic
3860 	 */
3861 	hw->mac.ops.update_mc_addr_list(hw, netdev);
3862 	vmolr |= IXGBE_VMOLR_ROMPE;
3863 
3864 	if (adapter->num_vfs)
3865 		ixgbe_restore_vf_multicasts(adapter);
3866 
3867 	if (hw->mac.type != ixgbe_mac_82598EB) {
3868 		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
3869 			 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3870 			   IXGBE_VMOLR_ROPE);
3871 		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
3872 	}
3873 
3874 	/* This is useful for sniffing bad packets. */
3875 	if (adapter->netdev->features & NETIF_F_RXALL) {
3876 		/* UPE and MPE will be handled by normal PROMISC logic
3877 		 * in e1000e_set_rx_mode */
3878 		fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
3879 			  IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
3880 			  IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
3881 
3882 		fctrl &= ~(IXGBE_FCTRL_DPF);
3883 		/* NOTE:  VLAN filtering is disabled by setting PROMISC */
3884 	}
3885 
3886 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3887 
3888 	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3889 		ixgbe_vlan_strip_enable(adapter);
3890 	else
3891 		ixgbe_vlan_strip_disable(adapter);
3892 }
3893 
3894 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3895 {
3896 	int q_idx;
3897 
3898 	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3899 		ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
3900 		napi_enable(&adapter->q_vector[q_idx]->napi);
3901 	}
3902 }
3903 
3904 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3905 {
3906 	int q_idx;
3907 
3908 	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3909 		napi_disable(&adapter->q_vector[q_idx]->napi);
3910 		while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
3911 			pr_info("QV %d locked\n", q_idx);
3912 			usleep_range(1000, 20000);
3913 		}
3914 	}
3915 }
3916 
3917 #ifdef CONFIG_IXGBE_DCB
3918 /**
3919  * ixgbe_configure_dcb - Configure DCB hardware
3920  * @adapter: ixgbe adapter struct
3921  *
3922  * This is called by the driver on open to configure the DCB hardware.
3923  * This is also called by the gennetlink interface when reconfiguring
3924  * the DCB state.
3925  */
3926 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3927 {
3928 	struct ixgbe_hw *hw = &adapter->hw;
3929 	int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3930 
3931 	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3932 		if (hw->mac.type == ixgbe_mac_82598EB)
3933 			netif_set_gso_max_size(adapter->netdev, 65536);
3934 		return;
3935 	}
3936 
3937 	if (hw->mac.type == ixgbe_mac_82598EB)
3938 		netif_set_gso_max_size(adapter->netdev, 32768);
3939 
3940 #ifdef IXGBE_FCOE
3941 	if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3942 		max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3943 #endif
3944 
3945 	/* reconfigure the hardware */
3946 	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
3947 		ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3948 						DCB_TX_CONFIG);
3949 		ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3950 						DCB_RX_CONFIG);
3951 		ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3952 	} else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
3953 		ixgbe_dcb_hw_ets(&adapter->hw,
3954 				 adapter->ixgbe_ieee_ets,
3955 				 max_frame);
3956 		ixgbe_dcb_hw_pfc_config(&adapter->hw,
3957 					adapter->ixgbe_ieee_pfc->pfc_en,
3958 					adapter->ixgbe_ieee_ets->prio_tc);
3959 	}
3960 
3961 	/* Enable RSS Hash per TC */
3962 	if (hw->mac.type != ixgbe_mac_82598EB) {
3963 		u32 msb = 0;
3964 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
3965 
3966 		while (rss_i) {
3967 			msb++;
3968 			rss_i >>= 1;
3969 		}
3970 
3971 		/* write msb to all 8 TCs in one write */
3972 		IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
3973 	}
3974 }
3975 #endif
3976 
3977 /* Additional bittime to account for IXGBE framing */
3978 #define IXGBE_ETH_FRAMING 20
3979 
3980 /**
3981  * ixgbe_hpbthresh - calculate high water mark for flow control
3982  *
3983  * @adapter: board private structure to calculate for
3984  * @pb: packet buffer to calculate
3985  */
3986 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3987 {
3988 	struct ixgbe_hw *hw = &adapter->hw;
3989 	struct net_device *dev = adapter->netdev;
3990 	int link, tc, kb, marker;
3991 	u32 dv_id, rx_pba;
3992 
3993 	/* Calculate max LAN frame size */
3994 	tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
3995 
3996 #ifdef IXGBE_FCOE
3997 	/* FCoE traffic class uses FCOE jumbo frames */
3998 	if ((dev->features & NETIF_F_FCOE_MTU) &&
3999 	    (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4000 	    (pb == ixgbe_fcoe_get_tc(adapter)))
4001 		tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4002 
4003 #endif
4004 	/* Calculate delay value for device */
4005 	switch (hw->mac.type) {
4006 	case ixgbe_mac_X540:
4007 		dv_id = IXGBE_DV_X540(link, tc);
4008 		break;
4009 	default:
4010 		dv_id = IXGBE_DV(link, tc);
4011 		break;
4012 	}
4013 
4014 	/* Loopback switch introduces additional latency */
4015 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4016 		dv_id += IXGBE_B2BT(tc);
4017 
4018 	/* Delay value is calculated in bit times convert to KB */
4019 	kb = IXGBE_BT2KB(dv_id);
4020 	rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4021 
4022 	marker = rx_pba - kb;
4023 
4024 	/* It is possible that the packet buffer is not large enough
4025 	 * to provide required headroom. In this case throw an error
4026 	 * to user and a do the best we can.
4027 	 */
4028 	if (marker < 0) {
4029 		e_warn(drv, "Packet Buffer(%i) can not provide enough"
4030 			    "headroom to support flow control."
4031 			    "Decrease MTU or number of traffic classes\n", pb);
4032 		marker = tc + 1;
4033 	}
4034 
4035 	return marker;
4036 }
4037 
4038 /**
4039  * ixgbe_lpbthresh - calculate low water mark for for flow control
4040  *
4041  * @adapter: board private structure to calculate for
4042  * @pb: packet buffer to calculate
4043  */
4044 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
4045 {
4046 	struct ixgbe_hw *hw = &adapter->hw;
4047 	struct net_device *dev = adapter->netdev;
4048 	int tc;
4049 	u32 dv_id;
4050 
4051 	/* Calculate max LAN frame size */
4052 	tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4053 
4054 	/* Calculate delay value for device */
4055 	switch (hw->mac.type) {
4056 	case ixgbe_mac_X540:
4057 		dv_id = IXGBE_LOW_DV_X540(tc);
4058 		break;
4059 	default:
4060 		dv_id = IXGBE_LOW_DV(tc);
4061 		break;
4062 	}
4063 
4064 	/* Delay value is calculated in bit times convert to KB */
4065 	return IXGBE_BT2KB(dv_id);
4066 }
4067 
4068 /*
4069  * ixgbe_pbthresh_setup - calculate and setup high low water marks
4070  */
4071 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4072 {
4073 	struct ixgbe_hw *hw = &adapter->hw;
4074 	int num_tc = netdev_get_num_tc(adapter->netdev);
4075 	int i;
4076 
4077 	if (!num_tc)
4078 		num_tc = 1;
4079 
4080 	hw->fc.low_water = ixgbe_lpbthresh(adapter);
4081 
4082 	for (i = 0; i < num_tc; i++) {
4083 		hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4084 
4085 		/* Low water marks must not be larger than high water marks */
4086 		if (hw->fc.low_water > hw->fc.high_water[i])
4087 			hw->fc.low_water = 0;
4088 	}
4089 }
4090 
4091 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4092 {
4093 	struct ixgbe_hw *hw = &adapter->hw;
4094 	int hdrm;
4095 	u8 tc = netdev_get_num_tc(adapter->netdev);
4096 
4097 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4098 	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4099 		hdrm = 32 << adapter->fdir_pballoc;
4100 	else
4101 		hdrm = 0;
4102 
4103 	hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4104 	ixgbe_pbthresh_setup(adapter);
4105 }
4106 
4107 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4108 {
4109 	struct ixgbe_hw *hw = &adapter->hw;
4110 	struct hlist_node *node2;
4111 	struct ixgbe_fdir_filter *filter;
4112 
4113 	spin_lock(&adapter->fdir_perfect_lock);
4114 
4115 	if (!hlist_empty(&adapter->fdir_filter_list))
4116 		ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4117 
4118 	hlist_for_each_entry_safe(filter, node2,
4119 				  &adapter->fdir_filter_list, fdir_node) {
4120 		ixgbe_fdir_write_perfect_filter_82599(hw,
4121 				&filter->filter,
4122 				filter->sw_idx,
4123 				(filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4124 				IXGBE_FDIR_DROP_QUEUE :
4125 				adapter->rx_ring[filter->action]->reg_idx);
4126 	}
4127 
4128 	spin_unlock(&adapter->fdir_perfect_lock);
4129 }
4130 
4131 static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4132 				      struct ixgbe_adapter *adapter)
4133 {
4134 	struct ixgbe_hw *hw = &adapter->hw;
4135 	u32 vmolr;
4136 
4137 	/* No unicast promiscuous support for VMDQ devices. */
4138 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4139 	vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4140 
4141 	/* clear the affected bit */
4142 	vmolr &= ~IXGBE_VMOLR_MPE;
4143 
4144 	if (dev->flags & IFF_ALLMULTI) {
4145 		vmolr |= IXGBE_VMOLR_MPE;
4146 	} else {
4147 		vmolr |= IXGBE_VMOLR_ROMPE;
4148 		hw->mac.ops.update_mc_addr_list(hw, dev);
4149 	}
4150 	ixgbe_write_uc_addr_list(adapter->netdev);
4151 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4152 }
4153 
4154 static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4155 				 u8 *addr, u16 pool)
4156 {
4157 	struct ixgbe_hw *hw = &adapter->hw;
4158 	unsigned int entry;
4159 
4160 	entry = hw->mac.num_rar_entries - pool;
4161 	hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
4162 }
4163 
4164 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4165 {
4166 	struct ixgbe_adapter *adapter = vadapter->real_adapter;
4167 	int rss_i = adapter->num_rx_queues_per_pool;
4168 	struct ixgbe_hw *hw = &adapter->hw;
4169 	u16 pool = vadapter->pool;
4170 	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4171 		      IXGBE_PSRTYPE_UDPHDR |
4172 		      IXGBE_PSRTYPE_IPV4HDR |
4173 		      IXGBE_PSRTYPE_L2HDR |
4174 		      IXGBE_PSRTYPE_IPV6HDR;
4175 
4176 	if (hw->mac.type == ixgbe_mac_82598EB)
4177 		return;
4178 
4179 	if (rss_i > 3)
4180 		psrtype |= 2 << 29;
4181 	else if (rss_i > 1)
4182 		psrtype |= 1 << 29;
4183 
4184 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4185 }
4186 
4187 /**
4188  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4189  * @rx_ring: ring to free buffers from
4190  **/
4191 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4192 {
4193 	struct device *dev = rx_ring->dev;
4194 	unsigned long size;
4195 	u16 i;
4196 
4197 	/* ring already cleared, nothing to do */
4198 	if (!rx_ring->rx_buffer_info)
4199 		return;
4200 
4201 	/* Free all the Rx ring sk_buffs */
4202 	for (i = 0; i < rx_ring->count; i++) {
4203 		struct ixgbe_rx_buffer *rx_buffer;
4204 
4205 		rx_buffer = &rx_ring->rx_buffer_info[i];
4206 		if (rx_buffer->skb) {
4207 			struct sk_buff *skb = rx_buffer->skb;
4208 			if (IXGBE_CB(skb)->page_released) {
4209 				dma_unmap_page(dev,
4210 					       IXGBE_CB(skb)->dma,
4211 					       ixgbe_rx_bufsz(rx_ring),
4212 					       DMA_FROM_DEVICE);
4213 				IXGBE_CB(skb)->page_released = false;
4214 			}
4215 			dev_kfree_skb(skb);
4216 		}
4217 		rx_buffer->skb = NULL;
4218 		if (rx_buffer->dma)
4219 			dma_unmap_page(dev, rx_buffer->dma,
4220 				       ixgbe_rx_pg_size(rx_ring),
4221 				       DMA_FROM_DEVICE);
4222 		rx_buffer->dma = 0;
4223 		if (rx_buffer->page)
4224 			__free_pages(rx_buffer->page,
4225 				     ixgbe_rx_pg_order(rx_ring));
4226 		rx_buffer->page = NULL;
4227 	}
4228 
4229 	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4230 	memset(rx_ring->rx_buffer_info, 0, size);
4231 
4232 	/* Zero out the descriptor ring */
4233 	memset(rx_ring->desc, 0, rx_ring->size);
4234 
4235 	rx_ring->next_to_alloc = 0;
4236 	rx_ring->next_to_clean = 0;
4237 	rx_ring->next_to_use = 0;
4238 }
4239 
4240 static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4241 				   struct ixgbe_ring *rx_ring)
4242 {
4243 	struct ixgbe_adapter *adapter = vadapter->real_adapter;
4244 	int index = rx_ring->queue_index + vadapter->rx_base_queue;
4245 
4246 	/* shutdown specific queue receive and wait for dma to settle */
4247 	ixgbe_disable_rx_queue(adapter, rx_ring);
4248 	usleep_range(10000, 20000);
4249 	ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
4250 	ixgbe_clean_rx_ring(rx_ring);
4251 	rx_ring->l2_accel_priv = NULL;
4252 }
4253 
4254 static int ixgbe_fwd_ring_down(struct net_device *vdev,
4255 			       struct ixgbe_fwd_adapter *accel)
4256 {
4257 	struct ixgbe_adapter *adapter = accel->real_adapter;
4258 	unsigned int rxbase = accel->rx_base_queue;
4259 	unsigned int txbase = accel->tx_base_queue;
4260 	int i;
4261 
4262 	netif_tx_stop_all_queues(vdev);
4263 
4264 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4265 		ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4266 		adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
4267 	}
4268 
4269 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4270 		adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
4271 		adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
4272 	}
4273 
4274 
4275 	return 0;
4276 }
4277 
4278 static int ixgbe_fwd_ring_up(struct net_device *vdev,
4279 			     struct ixgbe_fwd_adapter *accel)
4280 {
4281 	struct ixgbe_adapter *adapter = accel->real_adapter;
4282 	unsigned int rxbase, txbase, queues;
4283 	int i, baseq, err = 0;
4284 
4285 	if (!test_bit(accel->pool, &adapter->fwd_bitmask))
4286 		return 0;
4287 
4288 	baseq = accel->pool * adapter->num_rx_queues_per_pool;
4289 	netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4290 		   accel->pool, adapter->num_rx_pools,
4291 		   baseq, baseq + adapter->num_rx_queues_per_pool,
4292 		   adapter->fwd_bitmask);
4293 
4294 	accel->netdev = vdev;
4295 	accel->rx_base_queue = rxbase = baseq;
4296 	accel->tx_base_queue = txbase = baseq;
4297 
4298 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
4299 		ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4300 
4301 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4302 		adapter->rx_ring[rxbase + i]->netdev = vdev;
4303 		adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
4304 		ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
4305 	}
4306 
4307 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4308 		adapter->tx_ring[txbase + i]->netdev = vdev;
4309 		adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
4310 	}
4311 
4312 	queues = min_t(unsigned int,
4313 		       adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
4314 	err = netif_set_real_num_tx_queues(vdev, queues);
4315 	if (err)
4316 		goto fwd_queue_err;
4317 
4318 	err = netif_set_real_num_rx_queues(vdev, queues);
4319 	if (err)
4320 		goto fwd_queue_err;
4321 
4322 	if (is_valid_ether_addr(vdev->dev_addr))
4323 		ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
4324 
4325 	ixgbe_fwd_psrtype(accel);
4326 	ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
4327 	return err;
4328 fwd_queue_err:
4329 	ixgbe_fwd_ring_down(vdev, accel);
4330 	return err;
4331 }
4332 
4333 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
4334 {
4335 	struct net_device *upper;
4336 	struct list_head *iter;
4337 	int err;
4338 
4339 	netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4340 		if (netif_is_macvlan(upper)) {
4341 			struct macvlan_dev *dfwd = netdev_priv(upper);
4342 			struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
4343 
4344 			if (dfwd->fwd_priv) {
4345 				err = ixgbe_fwd_ring_up(upper, vadapter);
4346 				if (err)
4347 					continue;
4348 			}
4349 		}
4350 	}
4351 }
4352 
4353 static void ixgbe_configure(struct ixgbe_adapter *adapter)
4354 {
4355 	struct ixgbe_hw *hw = &adapter->hw;
4356 
4357 	ixgbe_configure_pb(adapter);
4358 #ifdef CONFIG_IXGBE_DCB
4359 	ixgbe_configure_dcb(adapter);
4360 #endif
4361 	/*
4362 	 * We must restore virtualization before VLANs or else
4363 	 * the VLVF registers will not be populated
4364 	 */
4365 	ixgbe_configure_virtualization(adapter);
4366 
4367 	ixgbe_set_rx_mode(adapter->netdev);
4368 	ixgbe_restore_vlan(adapter);
4369 
4370 	switch (hw->mac.type) {
4371 	case ixgbe_mac_82599EB:
4372 	case ixgbe_mac_X540:
4373 		hw->mac.ops.disable_rx_buff(hw);
4374 		break;
4375 	default:
4376 		break;
4377 	}
4378 
4379 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
4380 		ixgbe_init_fdir_signature_82599(&adapter->hw,
4381 						adapter->fdir_pballoc);
4382 	} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
4383 		ixgbe_init_fdir_perfect_82599(&adapter->hw,
4384 					      adapter->fdir_pballoc);
4385 		ixgbe_fdir_filter_restore(adapter);
4386 	}
4387 
4388 	switch (hw->mac.type) {
4389 	case ixgbe_mac_82599EB:
4390 	case ixgbe_mac_X540:
4391 		hw->mac.ops.enable_rx_buff(hw);
4392 		break;
4393 	default:
4394 		break;
4395 	}
4396 
4397 #ifdef IXGBE_FCOE
4398 	/* configure FCoE L2 filters, redirection table, and Rx control */
4399 	ixgbe_configure_fcoe(adapter);
4400 
4401 #endif /* IXGBE_FCOE */
4402 	ixgbe_configure_tx(adapter);
4403 	ixgbe_configure_rx(adapter);
4404 	ixgbe_configure_dfwd(adapter);
4405 }
4406 
4407 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4408 {
4409 	switch (hw->phy.type) {
4410 	case ixgbe_phy_sfp_avago:
4411 	case ixgbe_phy_sfp_ftl:
4412 	case ixgbe_phy_sfp_intel:
4413 	case ixgbe_phy_sfp_unknown:
4414 	case ixgbe_phy_sfp_passive_tyco:
4415 	case ixgbe_phy_sfp_passive_unknown:
4416 	case ixgbe_phy_sfp_active_unknown:
4417 	case ixgbe_phy_sfp_ftl_active:
4418 	case ixgbe_phy_qsfp_passive_unknown:
4419 	case ixgbe_phy_qsfp_active_unknown:
4420 	case ixgbe_phy_qsfp_intel:
4421 	case ixgbe_phy_qsfp_unknown:
4422 		return true;
4423 	case ixgbe_phy_nl:
4424 		if (hw->mac.type == ixgbe_mac_82598EB)
4425 			return true;
4426 	default:
4427 		return false;
4428 	}
4429 }
4430 
4431 /**
4432  * ixgbe_sfp_link_config - set up SFP+ link
4433  * @adapter: pointer to private adapter struct
4434  **/
4435 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
4436 {
4437 	/*
4438 	 * We are assuming the worst case scenario here, and that
4439 	 * is that an SFP was inserted/removed after the reset
4440 	 * but before SFP detection was enabled.  As such the best
4441 	 * solution is to just start searching as soon as we start
4442 	 */
4443 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
4444 		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
4445 
4446 	adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
4447 }
4448 
4449 /**
4450  * ixgbe_non_sfp_link_config - set up non-SFP+ link
4451  * @hw: pointer to private hardware struct
4452  *
4453  * Returns 0 on success, negative on failure
4454  **/
4455 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
4456 {
4457 	u32 speed;
4458 	bool autoneg, link_up = false;
4459 	u32 ret = IXGBE_ERR_LINK_SETUP;
4460 
4461 	if (hw->mac.ops.check_link)
4462 		ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
4463 
4464 	if (ret)
4465 		goto link_cfg_out;
4466 
4467 	speed = hw->phy.autoneg_advertised;
4468 	if ((!speed) && (hw->mac.ops.get_link_capabilities))
4469 		ret = hw->mac.ops.get_link_capabilities(hw, &speed,
4470 							&autoneg);
4471 	if (ret)
4472 		goto link_cfg_out;
4473 
4474 	if (hw->mac.ops.setup_link)
4475 		ret = hw->mac.ops.setup_link(hw, speed, link_up);
4476 link_cfg_out:
4477 	return ret;
4478 }
4479 
4480 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4481 {
4482 	struct ixgbe_hw *hw = &adapter->hw;
4483 	u32 gpie = 0;
4484 
4485 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4486 		gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4487 		       IXGBE_GPIE_OCD;
4488 		gpie |= IXGBE_GPIE_EIAME;
4489 		/*
4490 		 * use EIAM to auto-mask when MSI-X interrupt is asserted
4491 		 * this saves a register write for every interrupt
4492 		 */
4493 		switch (hw->mac.type) {
4494 		case ixgbe_mac_82598EB:
4495 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4496 			break;
4497 		case ixgbe_mac_82599EB:
4498 		case ixgbe_mac_X540:
4499 		default:
4500 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4501 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4502 			break;
4503 		}
4504 	} else {
4505 		/* legacy interrupts, use EIAM to auto-mask when reading EICR,
4506 		 * specifically only auto mask tx and rx interrupts */
4507 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4508 	}
4509 
4510 	/* XXX: to interrupt immediately for EICS writes, enable this */
4511 	/* gpie |= IXGBE_GPIE_EIMEN; */
4512 
4513 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
4514 		gpie &= ~IXGBE_GPIE_VTMODE_MASK;
4515 
4516 		switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4517 		case IXGBE_82599_VMDQ_8Q_MASK:
4518 			gpie |= IXGBE_GPIE_VTMODE_16;
4519 			break;
4520 		case IXGBE_82599_VMDQ_4Q_MASK:
4521 			gpie |= IXGBE_GPIE_VTMODE_32;
4522 			break;
4523 		default:
4524 			gpie |= IXGBE_GPIE_VTMODE_64;
4525 			break;
4526 		}
4527 	}
4528 
4529 	/* Enable Thermal over heat sensor interrupt */
4530 	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
4531 		switch (adapter->hw.mac.type) {
4532 		case ixgbe_mac_82599EB:
4533 			gpie |= IXGBE_SDP0_GPIEN;
4534 			break;
4535 		case ixgbe_mac_X540:
4536 			gpie |= IXGBE_EIMS_TS;
4537 			break;
4538 		default:
4539 			break;
4540 		}
4541 	}
4542 
4543 	/* Enable fan failure interrupt */
4544 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
4545 		gpie |= IXGBE_SDP1_GPIEN;
4546 
4547 	if (hw->mac.type == ixgbe_mac_82599EB) {
4548 		gpie |= IXGBE_SDP1_GPIEN;
4549 		gpie |= IXGBE_SDP2_GPIEN;
4550 	}
4551 
4552 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4553 }
4554 
4555 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4556 {
4557 	struct ixgbe_hw *hw = &adapter->hw;
4558 	struct net_device *upper;
4559 	struct list_head *iter;
4560 	int err;
4561 	u32 ctrl_ext;
4562 
4563 	ixgbe_get_hw_control(adapter);
4564 	ixgbe_setup_gpie(adapter);
4565 
4566 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4567 		ixgbe_configure_msix(adapter);
4568 	else
4569 		ixgbe_configure_msi_and_legacy(adapter);
4570 
4571 	/* enable the optics for 82599 SFP+ fiber */
4572 	if (hw->mac.ops.enable_tx_laser)
4573 		hw->mac.ops.enable_tx_laser(hw);
4574 
4575 	clear_bit(__IXGBE_DOWN, &adapter->state);
4576 	ixgbe_napi_enable_all(adapter);
4577 
4578 	if (ixgbe_is_sfp(hw)) {
4579 		ixgbe_sfp_link_config(adapter);
4580 	} else {
4581 		err = ixgbe_non_sfp_link_config(hw);
4582 		if (err)
4583 			e_err(probe, "link_config FAILED %d\n", err);
4584 	}
4585 
4586 	/* clear any pending interrupts, may auto mask */
4587 	IXGBE_READ_REG(hw, IXGBE_EICR);
4588 	ixgbe_irq_enable(adapter, true, true);
4589 
4590 	/*
4591 	 * If this adapter has a fan, check to see if we had a failure
4592 	 * before we enabled the interrupt.
4593 	 */
4594 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4595 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4596 		if (esdp & IXGBE_ESDP_SDP1)
4597 			e_crit(drv, "Fan has stopped, replace the adapter\n");
4598 	}
4599 
4600 	/* enable transmits */
4601 	netif_tx_start_all_queues(adapter->netdev);
4602 
4603 	/* enable any upper devices */
4604 	netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4605 		if (netif_is_macvlan(upper)) {
4606 			struct macvlan_dev *vlan = netdev_priv(upper);
4607 
4608 			if (vlan->fwd_priv)
4609 				netif_tx_start_all_queues(upper);
4610 		}
4611 	}
4612 
4613 	/* bring the link up in the watchdog, this could race with our first
4614 	 * link up interrupt but shouldn't be a problem */
4615 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4616 	adapter->link_check_timeout = jiffies;
4617 	mod_timer(&adapter->service_timer, jiffies);
4618 
4619 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
4620 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4621 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4622 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4623 }
4624 
4625 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
4626 {
4627 	WARN_ON(in_interrupt());
4628 	/* put off any impending NetWatchDogTimeout */
4629 	adapter->netdev->trans_start = jiffies;
4630 
4631 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
4632 		usleep_range(1000, 2000);
4633 	ixgbe_down(adapter);
4634 	/*
4635 	 * If SR-IOV enabled then wait a bit before bringing the adapter
4636 	 * back up to give the VFs time to respond to the reset.  The
4637 	 * two second wait is based upon the watchdog timer cycle in
4638 	 * the VF driver.
4639 	 */
4640 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4641 		msleep(2000);
4642 	ixgbe_up(adapter);
4643 	clear_bit(__IXGBE_RESETTING, &adapter->state);
4644 }
4645 
4646 void ixgbe_up(struct ixgbe_adapter *adapter)
4647 {
4648 	/* hardware has been reset, we need to reload some things */
4649 	ixgbe_configure(adapter);
4650 
4651 	ixgbe_up_complete(adapter);
4652 }
4653 
4654 void ixgbe_reset(struct ixgbe_adapter *adapter)
4655 {
4656 	struct ixgbe_hw *hw = &adapter->hw;
4657 	int err;
4658 
4659 	/* lock SFP init bit to prevent race conditions with the watchdog */
4660 	while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4661 		usleep_range(1000, 2000);
4662 
4663 	/* clear all SFP and link config related flags while holding SFP_INIT */
4664 	adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4665 			     IXGBE_FLAG2_SFP_NEEDS_RESET);
4666 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4667 
4668 	err = hw->mac.ops.init_hw(hw);
4669 	switch (err) {
4670 	case 0:
4671 	case IXGBE_ERR_SFP_NOT_PRESENT:
4672 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
4673 		break;
4674 	case IXGBE_ERR_MASTER_REQUESTS_PENDING:
4675 		e_dev_err("master disable timed out\n");
4676 		break;
4677 	case IXGBE_ERR_EEPROM_VERSION:
4678 		/* We are running on a pre-production device, log a warning */
4679 		e_dev_warn("This device is a pre-production adapter/LOM. "
4680 			   "Please be aware there may be issues associated with "
4681 			   "your hardware.  If you are experiencing problems "
4682 			   "please contact your Intel or hardware "
4683 			   "representative who provided you with this "
4684 			   "hardware.\n");
4685 		break;
4686 	default:
4687 		e_dev_err("Hardware Error: %d\n", err);
4688 	}
4689 
4690 	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4691 
4692 	/* reprogram the RAR[0] in case user changed it. */
4693 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
4694 
4695 	/* update SAN MAC vmdq pool selection */
4696 	if (hw->mac.san_mac_rar_index)
4697 		hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4698 
4699 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
4700 		ixgbe_ptp_reset(adapter);
4701 }
4702 
4703 /**
4704  * ixgbe_clean_tx_ring - Free Tx Buffers
4705  * @tx_ring: ring to be cleaned
4706  **/
4707 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
4708 {
4709 	struct ixgbe_tx_buffer *tx_buffer_info;
4710 	unsigned long size;
4711 	u16 i;
4712 
4713 	/* ring already cleared, nothing to do */
4714 	if (!tx_ring->tx_buffer_info)
4715 		return;
4716 
4717 	/* Free all the Tx ring sk_buffs */
4718 	for (i = 0; i < tx_ring->count; i++) {
4719 		tx_buffer_info = &tx_ring->tx_buffer_info[i];
4720 		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4721 	}
4722 
4723 	netdev_tx_reset_queue(txring_txq(tx_ring));
4724 
4725 	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4726 	memset(tx_ring->tx_buffer_info, 0, size);
4727 
4728 	/* Zero out the descriptor ring */
4729 	memset(tx_ring->desc, 0, tx_ring->size);
4730 
4731 	tx_ring->next_to_use = 0;
4732 	tx_ring->next_to_clean = 0;
4733 }
4734 
4735 /**
4736  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
4737  * @adapter: board private structure
4738  **/
4739 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
4740 {
4741 	int i;
4742 
4743 	for (i = 0; i < adapter->num_rx_queues; i++)
4744 		ixgbe_clean_rx_ring(adapter->rx_ring[i]);
4745 }
4746 
4747 /**
4748  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
4749  * @adapter: board private structure
4750  **/
4751 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4752 {
4753 	int i;
4754 
4755 	for (i = 0; i < adapter->num_tx_queues; i++)
4756 		ixgbe_clean_tx_ring(adapter->tx_ring[i]);
4757 }
4758 
4759 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4760 {
4761 	struct hlist_node *node2;
4762 	struct ixgbe_fdir_filter *filter;
4763 
4764 	spin_lock(&adapter->fdir_perfect_lock);
4765 
4766 	hlist_for_each_entry_safe(filter, node2,
4767 				  &adapter->fdir_filter_list, fdir_node) {
4768 		hlist_del(&filter->fdir_node);
4769 		kfree(filter);
4770 	}
4771 	adapter->fdir_filter_count = 0;
4772 
4773 	spin_unlock(&adapter->fdir_perfect_lock);
4774 }
4775 
4776 void ixgbe_down(struct ixgbe_adapter *adapter)
4777 {
4778 	struct net_device *netdev = adapter->netdev;
4779 	struct ixgbe_hw *hw = &adapter->hw;
4780 	struct net_device *upper;
4781 	struct list_head *iter;
4782 	u32 rxctrl;
4783 	int i;
4784 
4785 	/* signal that we are down to the interrupt handler */
4786 	set_bit(__IXGBE_DOWN, &adapter->state);
4787 
4788 	/* disable receives */
4789 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4790 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4791 
4792 	/* disable all enabled rx queues */
4793 	for (i = 0; i < adapter->num_rx_queues; i++)
4794 		/* this call also flushes the previous write */
4795 		ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4796 
4797 	usleep_range(10000, 20000);
4798 
4799 	netif_tx_stop_all_queues(netdev);
4800 
4801 	/* call carrier off first to avoid false dev_watchdog timeouts */
4802 	netif_carrier_off(netdev);
4803 	netif_tx_disable(netdev);
4804 
4805 	/* disable any upper devices */
4806 	netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4807 		if (netif_is_macvlan(upper)) {
4808 			struct macvlan_dev *vlan = netdev_priv(upper);
4809 
4810 			if (vlan->fwd_priv) {
4811 				netif_tx_stop_all_queues(upper);
4812 				netif_carrier_off(upper);
4813 				netif_tx_disable(upper);
4814 			}
4815 		}
4816 	}
4817 
4818 	ixgbe_irq_disable(adapter);
4819 
4820 	ixgbe_napi_disable_all(adapter);
4821 
4822 	adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
4823 			     IXGBE_FLAG2_RESET_REQUESTED);
4824 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4825 
4826 	del_timer_sync(&adapter->service_timer);
4827 
4828 	if (adapter->num_vfs) {
4829 		/* Clear EITR Select mapping */
4830 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
4831 
4832 		/* Mark all the VFs as inactive */
4833 		for (i = 0 ; i < adapter->num_vfs; i++)
4834 			adapter->vfinfo[i].clear_to_send = false;
4835 
4836 		/* ping all the active vfs to let them know we are going down */
4837 		ixgbe_ping_all_vfs(adapter);
4838 
4839 		/* Disable all VFTE/VFRE TX/RX */
4840 		ixgbe_disable_tx_rx(adapter);
4841 	}
4842 
4843 	/* disable transmits in the hardware now that interrupts are off */
4844 	for (i = 0; i < adapter->num_tx_queues; i++) {
4845 		u8 reg_idx = adapter->tx_ring[i]->reg_idx;
4846 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
4847 	}
4848 
4849 	/* Disable the Tx DMA engine on 82599 and X540 */
4850 	switch (hw->mac.type) {
4851 	case ixgbe_mac_82599EB:
4852 	case ixgbe_mac_X540:
4853 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
4854 				(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4855 				 ~IXGBE_DMATXCTL_TE));
4856 		break;
4857 	default:
4858 		break;
4859 	}
4860 
4861 	if (!pci_channel_offline(adapter->pdev))
4862 		ixgbe_reset(adapter);
4863 
4864 	/* power down the optics for 82599 SFP+ fiber */
4865 	if (hw->mac.ops.disable_tx_laser)
4866 		hw->mac.ops.disable_tx_laser(hw);
4867 
4868 	ixgbe_clean_all_tx_rings(adapter);
4869 	ixgbe_clean_all_rx_rings(adapter);
4870 
4871 #ifdef CONFIG_IXGBE_DCA
4872 	/* since we reset the hardware DCA settings were cleared */
4873 	ixgbe_setup_dca(adapter);
4874 #endif
4875 }
4876 
4877 /**
4878  * ixgbe_tx_timeout - Respond to a Tx Hang
4879  * @netdev: network interface device structure
4880  **/
4881 static void ixgbe_tx_timeout(struct net_device *netdev)
4882 {
4883 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
4884 
4885 	/* Do the reset outside of interrupt context */
4886 	ixgbe_tx_timeout_reset(adapter);
4887 }
4888 
4889 /**
4890  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4891  * @adapter: board private structure to initialize
4892  *
4893  * ixgbe_sw_init initializes the Adapter private data structure.
4894  * Fields are initialized based on PCI device information and
4895  * OS network device settings (MTU size).
4896  **/
4897 static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4898 {
4899 	struct ixgbe_hw *hw = &adapter->hw;
4900 	struct pci_dev *pdev = adapter->pdev;
4901 	unsigned int rss, fdir;
4902 	u32 fwsm;
4903 #ifdef CONFIG_IXGBE_DCB
4904 	int j;
4905 	struct tc_configuration *tc;
4906 #endif
4907 
4908 	/* PCI config space info */
4909 
4910 	hw->vendor_id = pdev->vendor;
4911 	hw->device_id = pdev->device;
4912 	hw->revision_id = pdev->revision;
4913 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4914 	hw->subsystem_device_id = pdev->subsystem_device;
4915 
4916 	/* Set common capability flags and settings */
4917 	rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
4918 	adapter->ring_feature[RING_F_RSS].limit = rss;
4919 	adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4920 	adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4921 	adapter->max_q_vectors = MAX_Q_VECTORS_82599;
4922 	adapter->atr_sample_rate = 20;
4923 	fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
4924 	adapter->ring_feature[RING_F_FDIR].limit = fdir;
4925 	adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
4926 #ifdef CONFIG_IXGBE_DCA
4927 	adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
4928 #endif
4929 #ifdef IXGBE_FCOE
4930 	adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4931 	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4932 #ifdef CONFIG_IXGBE_DCB
4933 	/* Default traffic class to use for FCoE */
4934 	adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4935 #endif /* CONFIG_IXGBE_DCB */
4936 #endif /* IXGBE_FCOE */
4937 
4938 	/* Set MAC specific capability flags and exceptions */
4939 	switch (hw->mac.type) {
4940 	case ixgbe_mac_82598EB:
4941 		adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
4942 		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
4943 
4944 		if (hw->device_id == IXGBE_DEV_ID_82598AT)
4945 			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4946 
4947 		adapter->max_q_vectors = MAX_Q_VECTORS_82598;
4948 		adapter->ring_feature[RING_F_FDIR].limit = 0;
4949 		adapter->atr_sample_rate = 0;
4950 		adapter->fdir_pballoc = 0;
4951 #ifdef IXGBE_FCOE
4952 		adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
4953 		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4954 #ifdef CONFIG_IXGBE_DCB
4955 		adapter->fcoe.up = 0;
4956 #endif /* IXGBE_DCB */
4957 #endif /* IXGBE_FCOE */
4958 		break;
4959 	case ixgbe_mac_82599EB:
4960 		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4961 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4962 		break;
4963 	case ixgbe_mac_X540:
4964 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4965 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
4966 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4967 		break;
4968 	default:
4969 		break;
4970 	}
4971 
4972 #ifdef IXGBE_FCOE
4973 	/* FCoE support exists, always init the FCoE lock */
4974 	spin_lock_init(&adapter->fcoe.lock);
4975 
4976 #endif
4977 	/* n-tuple support exists, always init our spinlock */
4978 	spin_lock_init(&adapter->fdir_perfect_lock);
4979 
4980 #ifdef CONFIG_IXGBE_DCB
4981 	switch (hw->mac.type) {
4982 	case ixgbe_mac_X540:
4983 		adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
4984 		adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
4985 		break;
4986 	default:
4987 		adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
4988 		adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
4989 		break;
4990 	}
4991 
4992 	/* Configure DCB traffic classes */
4993 	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4994 		tc = &adapter->dcb_cfg.tc_config[j];
4995 		tc->path[DCB_TX_CONFIG].bwg_id = 0;
4996 		tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4997 		tc->path[DCB_RX_CONFIG].bwg_id = 0;
4998 		tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4999 		tc->dcb_pfc = pfc_disabled;
5000 	}
5001 
5002 	/* Initialize default user to priority mapping, UPx->TC0 */
5003 	tc = &adapter->dcb_cfg.tc_config[0];
5004 	tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5005 	tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5006 
5007 	adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5008 	adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5009 	adapter->dcb_cfg.pfc_mode_enable = false;
5010 	adapter->dcb_set_bitmap = 0x00;
5011 	adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5012 	memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5013 	       sizeof(adapter->temp_dcb_cfg));
5014 
5015 #endif
5016 
5017 	/* default flow control settings */
5018 	hw->fc.requested_mode = ixgbe_fc_full;
5019 	hw->fc.current_mode = ixgbe_fc_full;	/* init for ethtool output */
5020 	ixgbe_pbthresh_setup(adapter);
5021 	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5022 	hw->fc.send_xon = true;
5023 	hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
5024 
5025 #ifdef CONFIG_PCI_IOV
5026 	if (max_vfs > 0)
5027 		e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5028 
5029 	/* assign number of SR-IOV VFs */
5030 	if (hw->mac.type != ixgbe_mac_82598EB) {
5031 		if (max_vfs > 63) {
5032 			adapter->num_vfs = 0;
5033 			e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5034 		} else {
5035 			adapter->num_vfs = max_vfs;
5036 		}
5037 	}
5038 #endif /* CONFIG_PCI_IOV */
5039 
5040 	/* enable itr by default in dynamic mode */
5041 	adapter->rx_itr_setting = 1;
5042 	adapter->tx_itr_setting = 1;
5043 
5044 	/* set default ring sizes */
5045 	adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5046 	adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5047 
5048 	/* set default work limits */
5049 	adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
5050 
5051 	/* initialize eeprom parameters */
5052 	if (ixgbe_init_eeprom_params_generic(hw)) {
5053 		e_dev_err("EEPROM initialization failed\n");
5054 		return -EIO;
5055 	}
5056 
5057 	/* PF holds first pool slot */
5058 	set_bit(0, &adapter->fwd_bitmask);
5059 	set_bit(__IXGBE_DOWN, &adapter->state);
5060 
5061 	return 0;
5062 }
5063 
5064 /**
5065  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
5066  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
5067  *
5068  * Return 0 on success, negative on failure
5069  **/
5070 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5071 {
5072 	struct device *dev = tx_ring->dev;
5073 	int orig_node = dev_to_node(dev);
5074 	int numa_node = -1;
5075 	int size;
5076 
5077 	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5078 
5079 	if (tx_ring->q_vector)
5080 		numa_node = tx_ring->q_vector->numa_node;
5081 
5082 	tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
5083 	if (!tx_ring->tx_buffer_info)
5084 		tx_ring->tx_buffer_info = vzalloc(size);
5085 	if (!tx_ring->tx_buffer_info)
5086 		goto err;
5087 
5088 	u64_stats_init(&tx_ring->syncp);
5089 
5090 	/* round up to nearest 4K */
5091 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
5092 	tx_ring->size = ALIGN(tx_ring->size, 4096);
5093 
5094 	set_dev_node(dev, numa_node);
5095 	tx_ring->desc = dma_alloc_coherent(dev,
5096 					   tx_ring->size,
5097 					   &tx_ring->dma,
5098 					   GFP_KERNEL);
5099 	set_dev_node(dev, orig_node);
5100 	if (!tx_ring->desc)
5101 		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
5102 						   &tx_ring->dma, GFP_KERNEL);
5103 	if (!tx_ring->desc)
5104 		goto err;
5105 
5106 	tx_ring->next_to_use = 0;
5107 	tx_ring->next_to_clean = 0;
5108 	return 0;
5109 
5110 err:
5111 	vfree(tx_ring->tx_buffer_info);
5112 	tx_ring->tx_buffer_info = NULL;
5113 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5114 	return -ENOMEM;
5115 }
5116 
5117 /**
5118  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5119  * @adapter: board private structure
5120  *
5121  * If this function returns with an error, then it's possible one or
5122  * more of the rings is populated (while the rest are not).  It is the
5123  * callers duty to clean those orphaned rings.
5124  *
5125  * Return 0 on success, negative on failure
5126  **/
5127 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5128 {
5129 	int i, err = 0;
5130 
5131 	for (i = 0; i < adapter->num_tx_queues; i++) {
5132 		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5133 		if (!err)
5134 			continue;
5135 
5136 		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
5137 		goto err_setup_tx;
5138 	}
5139 
5140 	return 0;
5141 err_setup_tx:
5142 	/* rewind the index freeing the rings as we go */
5143 	while (i--)
5144 		ixgbe_free_tx_resources(adapter->tx_ring[i]);
5145 	return err;
5146 }
5147 
5148 /**
5149  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5150  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
5151  *
5152  * Returns 0 on success, negative on failure
5153  **/
5154 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5155 {
5156 	struct device *dev = rx_ring->dev;
5157 	int orig_node = dev_to_node(dev);
5158 	int numa_node = -1;
5159 	int size;
5160 
5161 	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5162 
5163 	if (rx_ring->q_vector)
5164 		numa_node = rx_ring->q_vector->numa_node;
5165 
5166 	rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
5167 	if (!rx_ring->rx_buffer_info)
5168 		rx_ring->rx_buffer_info = vzalloc(size);
5169 	if (!rx_ring->rx_buffer_info)
5170 		goto err;
5171 
5172 	u64_stats_init(&rx_ring->syncp);
5173 
5174 	/* Round up to nearest 4K */
5175 	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5176 	rx_ring->size = ALIGN(rx_ring->size, 4096);
5177 
5178 	set_dev_node(dev, numa_node);
5179 	rx_ring->desc = dma_alloc_coherent(dev,
5180 					   rx_ring->size,
5181 					   &rx_ring->dma,
5182 					   GFP_KERNEL);
5183 	set_dev_node(dev, orig_node);
5184 	if (!rx_ring->desc)
5185 		rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5186 						   &rx_ring->dma, GFP_KERNEL);
5187 	if (!rx_ring->desc)
5188 		goto err;
5189 
5190 	rx_ring->next_to_clean = 0;
5191 	rx_ring->next_to_use = 0;
5192 
5193 	return 0;
5194 err:
5195 	vfree(rx_ring->rx_buffer_info);
5196 	rx_ring->rx_buffer_info = NULL;
5197 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5198 	return -ENOMEM;
5199 }
5200 
5201 /**
5202  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5203  * @adapter: board private structure
5204  *
5205  * If this function returns with an error, then it's possible one or
5206  * more of the rings is populated (while the rest are not).  It is the
5207  * callers duty to clean those orphaned rings.
5208  *
5209  * Return 0 on success, negative on failure
5210  **/
5211 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5212 {
5213 	int i, err = 0;
5214 
5215 	for (i = 0; i < adapter->num_rx_queues; i++) {
5216 		err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5217 		if (!err)
5218 			continue;
5219 
5220 		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5221 		goto err_setup_rx;
5222 	}
5223 
5224 #ifdef IXGBE_FCOE
5225 	err = ixgbe_setup_fcoe_ddp_resources(adapter);
5226 	if (!err)
5227 #endif
5228 		return 0;
5229 err_setup_rx:
5230 	/* rewind the index freeing the rings as we go */
5231 	while (i--)
5232 		ixgbe_free_rx_resources(adapter->rx_ring[i]);
5233 	return err;
5234 }
5235 
5236 /**
5237  * ixgbe_free_tx_resources - Free Tx Resources per Queue
5238  * @tx_ring: Tx descriptor ring for a specific queue
5239  *
5240  * Free all transmit software resources
5241  **/
5242 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5243 {
5244 	ixgbe_clean_tx_ring(tx_ring);
5245 
5246 	vfree(tx_ring->tx_buffer_info);
5247 	tx_ring->tx_buffer_info = NULL;
5248 
5249 	/* if not set, then don't free */
5250 	if (!tx_ring->desc)
5251 		return;
5252 
5253 	dma_free_coherent(tx_ring->dev, tx_ring->size,
5254 			  tx_ring->desc, tx_ring->dma);
5255 
5256 	tx_ring->desc = NULL;
5257 }
5258 
5259 /**
5260  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5261  * @adapter: board private structure
5262  *
5263  * Free all transmit software resources
5264  **/
5265 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5266 {
5267 	int i;
5268 
5269 	for (i = 0; i < adapter->num_tx_queues; i++)
5270 		if (adapter->tx_ring[i]->desc)
5271 			ixgbe_free_tx_resources(adapter->tx_ring[i]);
5272 }
5273 
5274 /**
5275  * ixgbe_free_rx_resources - Free Rx Resources
5276  * @rx_ring: ring to clean the resources from
5277  *
5278  * Free all receive software resources
5279  **/
5280 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5281 {
5282 	ixgbe_clean_rx_ring(rx_ring);
5283 
5284 	vfree(rx_ring->rx_buffer_info);
5285 	rx_ring->rx_buffer_info = NULL;
5286 
5287 	/* if not set, then don't free */
5288 	if (!rx_ring->desc)
5289 		return;
5290 
5291 	dma_free_coherent(rx_ring->dev, rx_ring->size,
5292 			  rx_ring->desc, rx_ring->dma);
5293 
5294 	rx_ring->desc = NULL;
5295 }
5296 
5297 /**
5298  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5299  * @adapter: board private structure
5300  *
5301  * Free all receive software resources
5302  **/
5303 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5304 {
5305 	int i;
5306 
5307 #ifdef IXGBE_FCOE
5308 	ixgbe_free_fcoe_ddp_resources(adapter);
5309 
5310 #endif
5311 	for (i = 0; i < adapter->num_rx_queues; i++)
5312 		if (adapter->rx_ring[i]->desc)
5313 			ixgbe_free_rx_resources(adapter->rx_ring[i]);
5314 }
5315 
5316 /**
5317  * ixgbe_change_mtu - Change the Maximum Transfer Unit
5318  * @netdev: network interface device structure
5319  * @new_mtu: new value for maximum frame size
5320  *
5321  * Returns 0 on success, negative on failure
5322  **/
5323 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5324 {
5325 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
5326 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5327 
5328 	/* MTU < 68 is an error and causes problems on some kernels */
5329 	if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5330 		return -EINVAL;
5331 
5332 	/*
5333 	 * For 82599EB we cannot allow legacy VFs to enable their receive
5334 	 * paths when MTU greater than 1500 is configured.  So display a
5335 	 * warning that legacy VFs will be disabled.
5336 	 */
5337 	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5338 	    (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5339 	    (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5340 		e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5341 
5342 	e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5343 
5344 	/* must set new MTU before calling down or up */
5345 	netdev->mtu = new_mtu;
5346 
5347 	if (netif_running(netdev))
5348 		ixgbe_reinit_locked(adapter);
5349 
5350 	return 0;
5351 }
5352 
5353 /**
5354  * ixgbe_open - Called when a network interface is made active
5355  * @netdev: network interface device structure
5356  *
5357  * Returns 0 on success, negative value on failure
5358  *
5359  * The open entry point is called when a network interface is made
5360  * active by the system (IFF_UP).  At this point all resources needed
5361  * for transmit and receive operations are allocated, the interrupt
5362  * handler is registered with the OS, the watchdog timer is started,
5363  * and the stack is notified that the interface is ready.
5364  **/
5365 static int ixgbe_open(struct net_device *netdev)
5366 {
5367 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
5368 	int err, queues;
5369 
5370 	/* disallow open during test */
5371 	if (test_bit(__IXGBE_TESTING, &adapter->state))
5372 		return -EBUSY;
5373 
5374 	netif_carrier_off(netdev);
5375 
5376 	/* allocate transmit descriptors */
5377 	err = ixgbe_setup_all_tx_resources(adapter);
5378 	if (err)
5379 		goto err_setup_tx;
5380 
5381 	/* allocate receive descriptors */
5382 	err = ixgbe_setup_all_rx_resources(adapter);
5383 	if (err)
5384 		goto err_setup_rx;
5385 
5386 	ixgbe_configure(adapter);
5387 
5388 	err = ixgbe_request_irq(adapter);
5389 	if (err)
5390 		goto err_req_irq;
5391 
5392 	/* Notify the stack of the actual queue counts. */
5393 	if (adapter->num_rx_pools > 1)
5394 		queues = adapter->num_rx_queues_per_pool;
5395 	else
5396 		queues = adapter->num_tx_queues;
5397 
5398 	err = netif_set_real_num_tx_queues(netdev, queues);
5399 	if (err)
5400 		goto err_set_queues;
5401 
5402 	if (adapter->num_rx_pools > 1 &&
5403 	    adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
5404 		queues = IXGBE_MAX_L2A_QUEUES;
5405 	else
5406 		queues = adapter->num_rx_queues;
5407 	err = netif_set_real_num_rx_queues(netdev, queues);
5408 	if (err)
5409 		goto err_set_queues;
5410 
5411 	ixgbe_ptp_init(adapter);
5412 
5413 	ixgbe_up_complete(adapter);
5414 
5415 	return 0;
5416 
5417 err_set_queues:
5418 	ixgbe_free_irq(adapter);
5419 err_req_irq:
5420 	ixgbe_free_all_rx_resources(adapter);
5421 err_setup_rx:
5422 	ixgbe_free_all_tx_resources(adapter);
5423 err_setup_tx:
5424 	ixgbe_reset(adapter);
5425 
5426 	return err;
5427 }
5428 
5429 /**
5430  * ixgbe_close - Disables a network interface
5431  * @netdev: network interface device structure
5432  *
5433  * Returns 0, this is not allowed to fail
5434  *
5435  * The close entry point is called when an interface is de-activated
5436  * by the OS.  The hardware is still under the drivers control, but
5437  * needs to be disabled.  A global MAC reset is issued to stop the
5438  * hardware, and all transmit and receive resources are freed.
5439  **/
5440 static int ixgbe_close(struct net_device *netdev)
5441 {
5442 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
5443 
5444 	ixgbe_ptp_stop(adapter);
5445 
5446 	ixgbe_down(adapter);
5447 	ixgbe_free_irq(adapter);
5448 
5449 	ixgbe_fdir_filter_exit(adapter);
5450 
5451 	ixgbe_free_all_tx_resources(adapter);
5452 	ixgbe_free_all_rx_resources(adapter);
5453 
5454 	ixgbe_release_hw_control(adapter);
5455 
5456 	return 0;
5457 }
5458 
5459 #ifdef CONFIG_PM
5460 static int ixgbe_resume(struct pci_dev *pdev)
5461 {
5462 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5463 	struct net_device *netdev = adapter->netdev;
5464 	u32 err;
5465 
5466 	pci_set_power_state(pdev, PCI_D0);
5467 	pci_restore_state(pdev);
5468 	/*
5469 	 * pci_restore_state clears dev->state_saved so call
5470 	 * pci_save_state to restore it.
5471 	 */
5472 	pci_save_state(pdev);
5473 
5474 	err = pci_enable_device_mem(pdev);
5475 	if (err) {
5476 		e_dev_err("Cannot enable PCI device from suspend\n");
5477 		return err;
5478 	}
5479 	pci_set_master(pdev);
5480 
5481 	pci_wake_from_d3(pdev, false);
5482 
5483 	ixgbe_reset(adapter);
5484 
5485 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5486 
5487 	rtnl_lock();
5488 	err = ixgbe_init_interrupt_scheme(adapter);
5489 	if (!err && netif_running(netdev))
5490 		err = ixgbe_open(netdev);
5491 
5492 	rtnl_unlock();
5493 
5494 	if (err)
5495 		return err;
5496 
5497 	netif_device_attach(netdev);
5498 
5499 	return 0;
5500 }
5501 #endif /* CONFIG_PM */
5502 
5503 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5504 {
5505 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5506 	struct net_device *netdev = adapter->netdev;
5507 	struct ixgbe_hw *hw = &adapter->hw;
5508 	u32 ctrl, fctrl;
5509 	u32 wufc = adapter->wol;
5510 #ifdef CONFIG_PM
5511 	int retval = 0;
5512 #endif
5513 
5514 	netif_device_detach(netdev);
5515 
5516 	rtnl_lock();
5517 	if (netif_running(netdev)) {
5518 		ixgbe_down(adapter);
5519 		ixgbe_free_irq(adapter);
5520 		ixgbe_free_all_tx_resources(adapter);
5521 		ixgbe_free_all_rx_resources(adapter);
5522 	}
5523 	rtnl_unlock();
5524 
5525 	ixgbe_clear_interrupt_scheme(adapter);
5526 
5527 #ifdef CONFIG_PM
5528 	retval = pci_save_state(pdev);
5529 	if (retval)
5530 		return retval;
5531 
5532 #endif
5533 	if (hw->mac.ops.stop_link_on_d3)
5534 		hw->mac.ops.stop_link_on_d3(hw);
5535 
5536 	if (wufc) {
5537 		ixgbe_set_rx_mode(netdev);
5538 
5539 		/* enable the optics for 82599 SFP+ fiber as we can WoL */
5540 		if (hw->mac.ops.enable_tx_laser)
5541 			hw->mac.ops.enable_tx_laser(hw);
5542 
5543 		/* turn on all-multi mode if wake on multicast is enabled */
5544 		if (wufc & IXGBE_WUFC_MC) {
5545 			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5546 			fctrl |= IXGBE_FCTRL_MPE;
5547 			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5548 		}
5549 
5550 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5551 		ctrl |= IXGBE_CTRL_GIO_DIS;
5552 		IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5553 
5554 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5555 	} else {
5556 		IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5557 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5558 	}
5559 
5560 	switch (hw->mac.type) {
5561 	case ixgbe_mac_82598EB:
5562 		pci_wake_from_d3(pdev, false);
5563 		break;
5564 	case ixgbe_mac_82599EB:
5565 	case ixgbe_mac_X540:
5566 		pci_wake_from_d3(pdev, !!wufc);
5567 		break;
5568 	default:
5569 		break;
5570 	}
5571 
5572 	*enable_wake = !!wufc;
5573 
5574 	ixgbe_release_hw_control(adapter);
5575 
5576 	pci_disable_device(pdev);
5577 
5578 	return 0;
5579 }
5580 
5581 #ifdef CONFIG_PM
5582 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5583 {
5584 	int retval;
5585 	bool wake;
5586 
5587 	retval = __ixgbe_shutdown(pdev, &wake);
5588 	if (retval)
5589 		return retval;
5590 
5591 	if (wake) {
5592 		pci_prepare_to_sleep(pdev);
5593 	} else {
5594 		pci_wake_from_d3(pdev, false);
5595 		pci_set_power_state(pdev, PCI_D3hot);
5596 	}
5597 
5598 	return 0;
5599 }
5600 #endif /* CONFIG_PM */
5601 
5602 static void ixgbe_shutdown(struct pci_dev *pdev)
5603 {
5604 	bool wake;
5605 
5606 	__ixgbe_shutdown(pdev, &wake);
5607 
5608 	if (system_state == SYSTEM_POWER_OFF) {
5609 		pci_wake_from_d3(pdev, wake);
5610 		pci_set_power_state(pdev, PCI_D3hot);
5611 	}
5612 }
5613 
5614 /**
5615  * ixgbe_update_stats - Update the board statistics counters.
5616  * @adapter: board private structure
5617  **/
5618 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5619 {
5620 	struct net_device *netdev = adapter->netdev;
5621 	struct ixgbe_hw *hw = &adapter->hw;
5622 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
5623 	u64 total_mpc = 0;
5624 	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5625 	u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5626 	u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5627 	u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
5628 
5629 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5630 	    test_bit(__IXGBE_RESETTING, &adapter->state))
5631 		return;
5632 
5633 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5634 		u64 rsc_count = 0;
5635 		u64 rsc_flush = 0;
5636 		for (i = 0; i < adapter->num_rx_queues; i++) {
5637 			rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5638 			rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5639 		}
5640 		adapter->rsc_total_count = rsc_count;
5641 		adapter->rsc_total_flush = rsc_flush;
5642 	}
5643 
5644 	for (i = 0; i < adapter->num_rx_queues; i++) {
5645 		struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5646 		non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5647 		alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5648 		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5649 		hw_csum_rx_error += rx_ring->rx_stats.csum_err;
5650 		bytes += rx_ring->stats.bytes;
5651 		packets += rx_ring->stats.packets;
5652 	}
5653 	adapter->non_eop_descs = non_eop_descs;
5654 	adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5655 	adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5656 	adapter->hw_csum_rx_error = hw_csum_rx_error;
5657 	netdev->stats.rx_bytes = bytes;
5658 	netdev->stats.rx_packets = packets;
5659 
5660 	bytes = 0;
5661 	packets = 0;
5662 	/* gather some stats to the adapter struct that are per queue */
5663 	for (i = 0; i < adapter->num_tx_queues; i++) {
5664 		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5665 		restart_queue += tx_ring->tx_stats.restart_queue;
5666 		tx_busy += tx_ring->tx_stats.tx_busy;
5667 		bytes += tx_ring->stats.bytes;
5668 		packets += tx_ring->stats.packets;
5669 	}
5670 	adapter->restart_queue = restart_queue;
5671 	adapter->tx_busy = tx_busy;
5672 	netdev->stats.tx_bytes = bytes;
5673 	netdev->stats.tx_packets = packets;
5674 
5675 	hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5676 
5677 	/* 8 register reads */
5678 	for (i = 0; i < 8; i++) {
5679 		/* for packet buffers not used, the register should read 0 */
5680 		mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5681 		missed_rx += mpc;
5682 		hwstats->mpc[i] += mpc;
5683 		total_mpc += hwstats->mpc[i];
5684 		hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5685 		hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5686 		switch (hw->mac.type) {
5687 		case ixgbe_mac_82598EB:
5688 			hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5689 			hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5690 			hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5691 			hwstats->pxonrxc[i] +=
5692 				IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5693 			break;
5694 		case ixgbe_mac_82599EB:
5695 		case ixgbe_mac_X540:
5696 			hwstats->pxonrxc[i] +=
5697 				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5698 			break;
5699 		default:
5700 			break;
5701 		}
5702 	}
5703 
5704 	/*16 register reads */
5705 	for (i = 0; i < 16; i++) {
5706 		hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5707 		hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5708 		if ((hw->mac.type == ixgbe_mac_82599EB) ||
5709 		    (hw->mac.type == ixgbe_mac_X540)) {
5710 			hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
5711 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
5712 			hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
5713 			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
5714 		}
5715 	}
5716 
5717 	hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5718 	/* work around hardware counting issue */
5719 	hwstats->gprc -= missed_rx;
5720 
5721 	ixgbe_update_xoff_received(adapter);
5722 
5723 	/* 82598 hardware only has a 32 bit counter in the high register */
5724 	switch (hw->mac.type) {
5725 	case ixgbe_mac_82598EB:
5726 		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5727 		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5728 		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5729 		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5730 		break;
5731 	case ixgbe_mac_X540:
5732 		/* OS2BMC stats are X540 only*/
5733 		hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5734 		hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5735 		hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5736 		hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5737 	case ixgbe_mac_82599EB:
5738 		for (i = 0; i < 16; i++)
5739 			adapter->hw_rx_no_dma_resources +=
5740 					     IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5741 		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5742 		IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5743 		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5744 		IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
5745 		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5746 		IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5747 		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5748 		hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5749 		hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5750 #ifdef IXGBE_FCOE
5751 		hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5752 		hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5753 		hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5754 		hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5755 		hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5756 		hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5757 		/* Add up per cpu counters for total ddp aloc fail */
5758 		if (adapter->fcoe.ddp_pool) {
5759 			struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5760 			struct ixgbe_fcoe_ddp_pool *ddp_pool;
5761 			unsigned int cpu;
5762 			u64 noddp = 0, noddp_ext_buff = 0;
5763 			for_each_possible_cpu(cpu) {
5764 				ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
5765 				noddp += ddp_pool->noddp;
5766 				noddp_ext_buff += ddp_pool->noddp_ext_buff;
5767 			}
5768 			hwstats->fcoe_noddp = noddp;
5769 			hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
5770 		}
5771 #endif /* IXGBE_FCOE */
5772 		break;
5773 	default:
5774 		break;
5775 	}
5776 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5777 	hwstats->bprc += bprc;
5778 	hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5779 	if (hw->mac.type == ixgbe_mac_82598EB)
5780 		hwstats->mprc -= bprc;
5781 	hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5782 	hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5783 	hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5784 	hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5785 	hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5786 	hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5787 	hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5788 	hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5789 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5790 	hwstats->lxontxc += lxon;
5791 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5792 	hwstats->lxofftxc += lxoff;
5793 	hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5794 	hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5795 	/*
5796 	 * 82598 errata - tx of flow control packets is included in tx counters
5797 	 */
5798 	xon_off_tot = lxon + lxoff;
5799 	hwstats->gptc -= xon_off_tot;
5800 	hwstats->mptc -= xon_off_tot;
5801 	hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5802 	hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5803 	hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5804 	hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5805 	hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5806 	hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5807 	hwstats->ptc64 -= xon_off_tot;
5808 	hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5809 	hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5810 	hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5811 	hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5812 	hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5813 	hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5814 
5815 	/* Fill out the OS statistics structure */
5816 	netdev->stats.multicast = hwstats->mprc;
5817 
5818 	/* Rx Errors */
5819 	netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5820 	netdev->stats.rx_dropped = 0;
5821 	netdev->stats.rx_length_errors = hwstats->rlec;
5822 	netdev->stats.rx_crc_errors = hwstats->crcerrs;
5823 	netdev->stats.rx_missed_errors = total_mpc;
5824 }
5825 
5826 /**
5827  * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
5828  * @adapter: pointer to the device adapter structure
5829  **/
5830 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5831 {
5832 	struct ixgbe_hw *hw = &adapter->hw;
5833 	int i;
5834 
5835 	if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5836 		return;
5837 
5838 	adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5839 
5840 	/* if interface is down do nothing */
5841 	if (test_bit(__IXGBE_DOWN, &adapter->state))
5842 		return;
5843 
5844 	/* do nothing if we are not using signature filters */
5845 	if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5846 		return;
5847 
5848 	adapter->fdir_overflow++;
5849 
5850 	if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5851 		for (i = 0; i < adapter->num_tx_queues; i++)
5852 			set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5853 			        &(adapter->tx_ring[i]->state));
5854 		/* re-enable flow director interrupts */
5855 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5856 	} else {
5857 		e_err(probe, "failed to finish FDIR re-initialization, "
5858 		      "ignored adding FDIR ATR filters\n");
5859 	}
5860 }
5861 
5862 /**
5863  * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
5864  * @adapter: pointer to the device adapter structure
5865  *
5866  * This function serves two purposes.  First it strobes the interrupt lines
5867  * in order to make certain interrupts are occurring.  Secondly it sets the
5868  * bits needed to check for TX hangs.  As a result we should immediately
5869  * determine if a hang has occurred.
5870  */
5871 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5872 {
5873 	struct ixgbe_hw *hw = &adapter->hw;
5874 	u64 eics = 0;
5875 	int i;
5876 
5877 	/* If we're down or resetting, just bail */
5878 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5879 	    test_bit(__IXGBE_RESETTING, &adapter->state))
5880 		return;
5881 
5882 	/* Force detection of hung controller */
5883 	if (netif_carrier_ok(adapter->netdev)) {
5884 		for (i = 0; i < adapter->num_tx_queues; i++)
5885 			set_check_for_tx_hang(adapter->tx_ring[i]);
5886 	}
5887 
5888 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5889 		/*
5890 		 * for legacy and MSI interrupts don't set any bits
5891 		 * that are enabled for EIAM, because this operation
5892 		 * would set *both* EIMS and EICS for any bit in EIAM
5893 		 */
5894 		IXGBE_WRITE_REG(hw, IXGBE_EICS,
5895 			(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5896 	} else {
5897 		/* get one bit for every active tx/rx interrupt vector */
5898 		for (i = 0; i < adapter->num_q_vectors; i++) {
5899 			struct ixgbe_q_vector *qv = adapter->q_vector[i];
5900 			if (qv->rx.ring || qv->tx.ring)
5901 				eics |= ((u64)1 << i);
5902 		}
5903 	}
5904 
5905 	/* Cause software interrupt to ensure rings are cleaned */
5906 	ixgbe_irq_rearm_queues(adapter, eics);
5907 
5908 }
5909 
5910 /**
5911  * ixgbe_watchdog_update_link - update the link status
5912  * @adapter: pointer to the device adapter structure
5913  * @link_speed: pointer to a u32 to store the link_speed
5914  **/
5915 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5916 {
5917 	struct ixgbe_hw *hw = &adapter->hw;
5918 	u32 link_speed = adapter->link_speed;
5919 	bool link_up = adapter->link_up;
5920 	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
5921 
5922 	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5923 		return;
5924 
5925 	if (hw->mac.ops.check_link) {
5926 		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5927 	} else {
5928 		/* always assume link is up, if no check link function */
5929 		link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5930 		link_up = true;
5931 	}
5932 
5933 	if (adapter->ixgbe_ieee_pfc)
5934 		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5935 
5936 	if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
5937 		hw->mac.ops.fc_enable(hw);
5938 		ixgbe_set_rx_drop_en(adapter);
5939 	}
5940 
5941 	if (link_up ||
5942 	    time_after(jiffies, (adapter->link_check_timeout +
5943 				 IXGBE_TRY_LINK_TIMEOUT))) {
5944 		adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5945 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5946 		IXGBE_WRITE_FLUSH(hw);
5947 	}
5948 
5949 	adapter->link_up = link_up;
5950 	adapter->link_speed = link_speed;
5951 }
5952 
5953 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
5954 {
5955 #ifdef CONFIG_IXGBE_DCB
5956 	struct net_device *netdev = adapter->netdev;
5957 	struct dcb_app app = {
5958 			      .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
5959 			      .protocol = 0,
5960 			     };
5961 	u8 up = 0;
5962 
5963 	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
5964 		up = dcb_ieee_getapp_mask(netdev, &app);
5965 
5966 	adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
5967 #endif
5968 }
5969 
5970 /**
5971  * ixgbe_watchdog_link_is_up - update netif_carrier status and
5972  *                             print link up message
5973  * @adapter: pointer to the device adapter structure
5974  **/
5975 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5976 {
5977 	struct net_device *netdev = adapter->netdev;
5978 	struct ixgbe_hw *hw = &adapter->hw;
5979 	u32 link_speed = adapter->link_speed;
5980 	bool flow_rx, flow_tx;
5981 
5982 	/* only continue if link was previously down */
5983 	if (netif_carrier_ok(netdev))
5984 		return;
5985 
5986 	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
5987 
5988 	switch (hw->mac.type) {
5989 	case ixgbe_mac_82598EB: {
5990 		u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5991 		u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5992 		flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5993 		flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5994 	}
5995 		break;
5996 	case ixgbe_mac_X540:
5997 	case ixgbe_mac_82599EB: {
5998 		u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5999 		u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6000 		flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6001 		flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6002 	}
6003 		break;
6004 	default:
6005 		flow_tx = false;
6006 		flow_rx = false;
6007 		break;
6008 	}
6009 
6010 	adapter->last_rx_ptp_check = jiffies;
6011 
6012 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6013 		ixgbe_ptp_start_cyclecounter(adapter);
6014 
6015 	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
6016 	       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6017 	       "10 Gbps" :
6018 	       (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6019 	       "1 Gbps" :
6020 	       (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6021 	       "100 Mbps" :
6022 	       "unknown speed"))),
6023 	       ((flow_rx && flow_tx) ? "RX/TX" :
6024 	       (flow_rx ? "RX" :
6025 	       (flow_tx ? "TX" : "None"))));
6026 
6027 	netif_carrier_on(netdev);
6028 	ixgbe_check_vf_rate_limit(adapter);
6029 
6030 	/* update the default user priority for VFs */
6031 	ixgbe_update_default_up(adapter);
6032 
6033 	/* ping all the active vfs to let them know link has changed */
6034 	ixgbe_ping_all_vfs(adapter);
6035 }
6036 
6037 /**
6038  * ixgbe_watchdog_link_is_down - update netif_carrier status and
6039  *                               print link down message
6040  * @adapter: pointer to the adapter structure
6041  **/
6042 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
6043 {
6044 	struct net_device *netdev = adapter->netdev;
6045 	struct ixgbe_hw *hw = &adapter->hw;
6046 
6047 	adapter->link_up = false;
6048 	adapter->link_speed = 0;
6049 
6050 	/* only continue if link was up previously */
6051 	if (!netif_carrier_ok(netdev))
6052 		return;
6053 
6054 	/* poll for SFP+ cable when link is down */
6055 	if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6056 		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6057 
6058 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6059 		ixgbe_ptp_start_cyclecounter(adapter);
6060 
6061 	e_info(drv, "NIC Link is Down\n");
6062 	netif_carrier_off(netdev);
6063 
6064 	/* ping all the active vfs to let them know link has changed */
6065 	ixgbe_ping_all_vfs(adapter);
6066 }
6067 
6068 /**
6069  * ixgbe_watchdog_flush_tx - flush queues on link down
6070  * @adapter: pointer to the device adapter structure
6071  **/
6072 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6073 {
6074 	int i;
6075 	int some_tx_pending = 0;
6076 
6077 	if (!netif_carrier_ok(adapter->netdev)) {
6078 		for (i = 0; i < adapter->num_tx_queues; i++) {
6079 			struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6080 			if (tx_ring->next_to_use != tx_ring->next_to_clean) {
6081 				some_tx_pending = 1;
6082 				break;
6083 			}
6084 		}
6085 
6086 		if (some_tx_pending) {
6087 			/* We've lost link, so the controller stops DMA,
6088 			 * but we've got queued Tx work that's never going
6089 			 * to get done, so reset controller to flush Tx.
6090 			 * (Do the reset outside of interrupt context).
6091 			 */
6092 			e_warn(drv, "initiating reset to clear Tx work after link loss\n");
6093 			adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
6094 		}
6095 	}
6096 }
6097 
6098 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6099 {
6100 	u32 ssvpc;
6101 
6102 	/* Do not perform spoof check for 82598 or if not in IOV mode */
6103 	if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
6104 	    adapter->num_vfs == 0)
6105 		return;
6106 
6107 	ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6108 
6109 	/*
6110 	 * ssvpc register is cleared on read, if zero then no
6111 	 * spoofed packets in the last interval.
6112 	 */
6113 	if (!ssvpc)
6114 		return;
6115 
6116 	e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
6117 }
6118 
6119 /**
6120  * ixgbe_watchdog_subtask - check and bring link up
6121  * @adapter: pointer to the device adapter structure
6122  **/
6123 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6124 {
6125 	/* if interface is down do nothing */
6126 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6127 	    test_bit(__IXGBE_RESETTING, &adapter->state))
6128 		return;
6129 
6130 	ixgbe_watchdog_update_link(adapter);
6131 
6132 	if (adapter->link_up)
6133 		ixgbe_watchdog_link_is_up(adapter);
6134 	else
6135 		ixgbe_watchdog_link_is_down(adapter);
6136 
6137 	ixgbe_spoof_check(adapter);
6138 	ixgbe_update_stats(adapter);
6139 
6140 	ixgbe_watchdog_flush_tx(adapter);
6141 }
6142 
6143 /**
6144  * ixgbe_sfp_detection_subtask - poll for SFP+ cable
6145  * @adapter: the ixgbe adapter structure
6146  **/
6147 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6148 {
6149 	struct ixgbe_hw *hw = &adapter->hw;
6150 	s32 err;
6151 
6152 	/* not searching for SFP so there is nothing to do here */
6153 	if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6154 	    !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6155 		return;
6156 
6157 	/* someone else is in init, wait until next service event */
6158 	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6159 		return;
6160 
6161 	err = hw->phy.ops.identify_sfp(hw);
6162 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6163 		goto sfp_out;
6164 
6165 	if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6166 		/* If no cable is present, then we need to reset
6167 		 * the next time we find a good cable. */
6168 		adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6169 	}
6170 
6171 	/* exit on error */
6172 	if (err)
6173 		goto sfp_out;
6174 
6175 	/* exit if reset not needed */
6176 	if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6177 		goto sfp_out;
6178 
6179 	adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6180 
6181 	/*
6182 	 * A module may be identified correctly, but the EEPROM may not have
6183 	 * support for that module.  setup_sfp() will fail in that case, so
6184 	 * we should not allow that module to load.
6185 	 */
6186 	if (hw->mac.type == ixgbe_mac_82598EB)
6187 		err = hw->phy.ops.reset(hw);
6188 	else
6189 		err = hw->mac.ops.setup_sfp(hw);
6190 
6191 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6192 		goto sfp_out;
6193 
6194 	adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6195 	e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6196 
6197 sfp_out:
6198 	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6199 
6200 	if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6201 	    (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6202 		e_dev_err("failed to initialize because an unsupported "
6203 			  "SFP+ module type was detected.\n");
6204 		e_dev_err("Reload the driver after installing a "
6205 			  "supported module.\n");
6206 		unregister_netdev(adapter->netdev);
6207 	}
6208 }
6209 
6210 /**
6211  * ixgbe_sfp_link_config_subtask - set up link SFP after module install
6212  * @adapter: the ixgbe adapter structure
6213  **/
6214 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6215 {
6216 	struct ixgbe_hw *hw = &adapter->hw;
6217 	u32 speed;
6218 	bool autoneg = false;
6219 
6220 	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6221 		return;
6222 
6223 	/* someone else is in init, wait until next service event */
6224 	if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6225 		return;
6226 
6227 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6228 
6229 	speed = hw->phy.autoneg_advertised;
6230 	if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
6231 		hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6232 
6233 		/* setup the highest link when no autoneg */
6234 		if (!autoneg) {
6235 			if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6236 				speed = IXGBE_LINK_SPEED_10GB_FULL;
6237 		}
6238 	}
6239 
6240 	if (hw->mac.ops.setup_link)
6241 		hw->mac.ops.setup_link(hw, speed, true);
6242 
6243 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6244 	adapter->link_check_timeout = jiffies;
6245 	clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6246 }
6247 
6248 #ifdef CONFIG_PCI_IOV
6249 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6250 {
6251 	int vf;
6252 	struct ixgbe_hw *hw = &adapter->hw;
6253 	struct net_device *netdev = adapter->netdev;
6254 	u32 gpc;
6255 	u32 ciaa, ciad;
6256 
6257 	gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6258 	if (gpc) /* If incrementing then no need for the check below */
6259 		return;
6260 	/*
6261 	 * Check to see if a bad DMA write target from an errant or
6262 	 * malicious VF has caused a PCIe error.  If so then we can
6263 	 * issue a VFLR to the offending VF(s) and then resume without
6264 	 * requesting a full slot reset.
6265 	 */
6266 
6267 	for (vf = 0; vf < adapter->num_vfs; vf++) {
6268 		ciaa = (vf << 16) | 0x80000000;
6269 		/* 32 bit read so align, we really want status at offset 6 */
6270 		ciaa |= PCI_COMMAND;
6271 		IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6272 		ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
6273 		ciaa &= 0x7FFFFFFF;
6274 		/* disable debug mode asap after reading data */
6275 		IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6276 		/* Get the upper 16 bits which will be the PCI status reg */
6277 		ciad >>= 16;
6278 		if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
6279 			netdev_err(netdev, "VF %d Hung DMA\n", vf);
6280 			/* Issue VFLR */
6281 			ciaa = (vf << 16) | 0x80000000;
6282 			ciaa |= 0xA8;
6283 			IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6284 			ciad = 0x00008000;  /* VFLR */
6285 			IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
6286 			ciaa &= 0x7FFFFFFF;
6287 			IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6288 		}
6289 	}
6290 }
6291 
6292 #endif
6293 /**
6294  * ixgbe_service_timer - Timer Call-back
6295  * @data: pointer to adapter cast into an unsigned long
6296  **/
6297 static void ixgbe_service_timer(unsigned long data)
6298 {
6299 	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6300 	unsigned long next_event_offset;
6301 	bool ready = true;
6302 
6303 	/* poll faster when waiting for link */
6304 	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6305 		next_event_offset = HZ / 10;
6306 	else
6307 		next_event_offset = HZ * 2;
6308 
6309 #ifdef CONFIG_PCI_IOV
6310 	/*
6311 	 * don't bother with SR-IOV VF DMA hang check if there are
6312 	 * no VFs or the link is down
6313 	 */
6314 	if (!adapter->num_vfs ||
6315 	    (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6316 		goto normal_timer_service;
6317 
6318 	/* If we have VFs allocated then we must check for DMA hangs */
6319 	ixgbe_check_for_bad_vf(adapter);
6320 	next_event_offset = HZ / 50;
6321 	adapter->timer_event_accumulator++;
6322 
6323 	if (adapter->timer_event_accumulator >= 100)
6324 		adapter->timer_event_accumulator = 0;
6325 	else
6326 		ready = false;
6327 
6328 normal_timer_service:
6329 #endif
6330 	/* Reset the timer */
6331 	mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6332 
6333 	if (ready)
6334 		ixgbe_service_event_schedule(adapter);
6335 }
6336 
6337 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6338 {
6339 	if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6340 		return;
6341 
6342 	adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6343 
6344 	/* If we're already down or resetting, just bail */
6345 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6346 	    test_bit(__IXGBE_RESETTING, &adapter->state))
6347 		return;
6348 
6349 	ixgbe_dump(adapter);
6350 	netdev_err(adapter->netdev, "Reset adapter\n");
6351 	adapter->tx_timeout_count++;
6352 
6353 	ixgbe_reinit_locked(adapter);
6354 }
6355 
6356 /**
6357  * ixgbe_service_task - manages and runs subtasks
6358  * @work: pointer to work_struct containing our data
6359  **/
6360 static void ixgbe_service_task(struct work_struct *work)
6361 {
6362 	struct ixgbe_adapter *adapter = container_of(work,
6363 						     struct ixgbe_adapter,
6364 						     service_task);
6365 	ixgbe_reset_subtask(adapter);
6366 	ixgbe_sfp_detection_subtask(adapter);
6367 	ixgbe_sfp_link_config_subtask(adapter);
6368 	ixgbe_check_overtemp_subtask(adapter);
6369 	ixgbe_watchdog_subtask(adapter);
6370 	ixgbe_fdir_reinit_subtask(adapter);
6371 	ixgbe_check_hang_subtask(adapter);
6372 
6373 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
6374 		ixgbe_ptp_overflow_check(adapter);
6375 		ixgbe_ptp_rx_hang(adapter);
6376 	}
6377 
6378 	ixgbe_service_event_complete(adapter);
6379 }
6380 
6381 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6382 		     struct ixgbe_tx_buffer *first,
6383 		     u8 *hdr_len)
6384 {
6385 	struct sk_buff *skb = first->skb;
6386 	u32 vlan_macip_lens, type_tucmd;
6387 	u32 mss_l4len_idx, l4len;
6388 
6389 	if (skb->ip_summed != CHECKSUM_PARTIAL)
6390 		return 0;
6391 
6392 	if (!skb_is_gso(skb))
6393 		return 0;
6394 
6395 	if (skb_header_cloned(skb)) {
6396 		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6397 		if (err)
6398 			return err;
6399 	}
6400 
6401 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6402 	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6403 
6404 	if (first->protocol == __constant_htons(ETH_P_IP)) {
6405 		struct iphdr *iph = ip_hdr(skb);
6406 		iph->tot_len = 0;
6407 		iph->check = 0;
6408 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6409 							 iph->daddr, 0,
6410 							 IPPROTO_TCP,
6411 							 0);
6412 		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6413 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6414 				   IXGBE_TX_FLAGS_CSUM |
6415 				   IXGBE_TX_FLAGS_IPV4;
6416 	} else if (skb_is_gso_v6(skb)) {
6417 		ipv6_hdr(skb)->payload_len = 0;
6418 		tcp_hdr(skb)->check =
6419 		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6420 				     &ipv6_hdr(skb)->daddr,
6421 				     0, IPPROTO_TCP, 0);
6422 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6423 				   IXGBE_TX_FLAGS_CSUM;
6424 	}
6425 
6426 	/* compute header lengths */
6427 	l4len = tcp_hdrlen(skb);
6428 	*hdr_len = skb_transport_offset(skb) + l4len;
6429 
6430 	/* update gso size and bytecount with header size */
6431 	first->gso_segs = skb_shinfo(skb)->gso_segs;
6432 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
6433 
6434 	/* mss_l4len_id: use 0 as index for TSO */
6435 	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6436 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6437 
6438 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
6439 	vlan_macip_lens = skb_network_header_len(skb);
6440 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6441 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6442 
6443 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6444 			  mss_l4len_idx);
6445 
6446 	return 1;
6447 }
6448 
6449 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6450 			  struct ixgbe_tx_buffer *first)
6451 {
6452 	struct sk_buff *skb = first->skb;
6453 	u32 vlan_macip_lens = 0;
6454 	u32 mss_l4len_idx = 0;
6455 	u32 type_tucmd = 0;
6456 
6457 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
6458 		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
6459 		    !(first->tx_flags & IXGBE_TX_FLAGS_CC))
6460 			return;
6461 	} else {
6462 		u8 l4_hdr = 0;
6463 		switch (first->protocol) {
6464 		case __constant_htons(ETH_P_IP):
6465 			vlan_macip_lens |= skb_network_header_len(skb);
6466 			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6467 			l4_hdr = ip_hdr(skb)->protocol;
6468 			break;
6469 		case __constant_htons(ETH_P_IPV6):
6470 			vlan_macip_lens |= skb_network_header_len(skb);
6471 			l4_hdr = ipv6_hdr(skb)->nexthdr;
6472 			break;
6473 		default:
6474 			if (unlikely(net_ratelimit())) {
6475 				dev_warn(tx_ring->dev,
6476 				 "partial checksum but proto=%x!\n",
6477 				 first->protocol);
6478 			}
6479 			break;
6480 		}
6481 
6482 		switch (l4_hdr) {
6483 		case IPPROTO_TCP:
6484 			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6485 			mss_l4len_idx = tcp_hdrlen(skb) <<
6486 					IXGBE_ADVTXD_L4LEN_SHIFT;
6487 			break;
6488 		case IPPROTO_SCTP:
6489 			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6490 			mss_l4len_idx = sizeof(struct sctphdr) <<
6491 					IXGBE_ADVTXD_L4LEN_SHIFT;
6492 			break;
6493 		case IPPROTO_UDP:
6494 			mss_l4len_idx = sizeof(struct udphdr) <<
6495 					IXGBE_ADVTXD_L4LEN_SHIFT;
6496 			break;
6497 		default:
6498 			if (unlikely(net_ratelimit())) {
6499 				dev_warn(tx_ring->dev,
6500 				 "partial checksum but l4 proto=%x!\n",
6501 				 l4_hdr);
6502 			}
6503 			break;
6504 		}
6505 
6506 		/* update TX checksum flag */
6507 		first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
6508 	}
6509 
6510 	/* vlan_macip_lens: MACLEN, VLAN tag */
6511 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6512 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6513 
6514 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
6515 			  type_tucmd, mss_l4len_idx);
6516 }
6517 
6518 #define IXGBE_SET_FLAG(_input, _flag, _result) \
6519 	((_flag <= _result) ? \
6520 	 ((u32)(_input & _flag) * (_result / _flag)) : \
6521 	 ((u32)(_input & _flag) / (_flag / _result)))
6522 
6523 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6524 {
6525 	/* set type for advanced descriptor with frame checksum insertion */
6526 	u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
6527 		       IXGBE_ADVTXD_DCMD_DEXT |
6528 		       IXGBE_ADVTXD_DCMD_IFCS;
6529 
6530 	/* set HW vlan bit if vlan is present */
6531 	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
6532 				   IXGBE_ADVTXD_DCMD_VLE);
6533 
6534 	/* set segmentation enable bits for TSO/FSO */
6535 	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
6536 				   IXGBE_ADVTXD_DCMD_TSE);
6537 
6538 	/* set timestamp bit if present */
6539 	cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
6540 				   IXGBE_ADVTXD_MAC_TSTAMP);
6541 
6542 	/* insert frame checksum */
6543 	cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
6544 
6545 	return cmd_type;
6546 }
6547 
6548 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
6549 				   u32 tx_flags, unsigned int paylen)
6550 {
6551 	u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
6552 
6553 	/* enable L4 checksum for TSO and TX checksum offload */
6554 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6555 					IXGBE_TX_FLAGS_CSUM,
6556 					IXGBE_ADVTXD_POPTS_TXSM);
6557 
6558 	/* enble IPv4 checksum for TSO */
6559 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6560 					IXGBE_TX_FLAGS_IPV4,
6561 					IXGBE_ADVTXD_POPTS_IXSM);
6562 
6563 	/*
6564 	 * Check Context must be set if Tx switch is enabled, which it
6565 	 * always is for case where virtual functions are running
6566 	 */
6567 	olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6568 					IXGBE_TX_FLAGS_CC,
6569 					IXGBE_ADVTXD_CC);
6570 
6571 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6572 }
6573 
6574 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6575 		       IXGBE_TXD_CMD_RS)
6576 
6577 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6578 			 struct ixgbe_tx_buffer *first,
6579 			 const u8 hdr_len)
6580 {
6581 	struct sk_buff *skb = first->skb;
6582 	struct ixgbe_tx_buffer *tx_buffer;
6583 	union ixgbe_adv_tx_desc *tx_desc;
6584 	struct skb_frag_struct *frag;
6585 	dma_addr_t dma;
6586 	unsigned int data_len, size;
6587 	u32 tx_flags = first->tx_flags;
6588 	u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
6589 	u16 i = tx_ring->next_to_use;
6590 
6591 	tx_desc = IXGBE_TX_DESC(tx_ring, i);
6592 
6593 	ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
6594 
6595 	size = skb_headlen(skb);
6596 	data_len = skb->data_len;
6597 
6598 #ifdef IXGBE_FCOE
6599 	if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6600 		if (data_len < sizeof(struct fcoe_crc_eof)) {
6601 			size -= sizeof(struct fcoe_crc_eof) - data_len;
6602 			data_len = 0;
6603 		} else {
6604 			data_len -= sizeof(struct fcoe_crc_eof);
6605 		}
6606 	}
6607 
6608 #endif
6609 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6610 
6611 	tx_buffer = first;
6612 
6613 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6614 		if (dma_mapping_error(tx_ring->dev, dma))
6615 			goto dma_error;
6616 
6617 		/* record length, and DMA address */
6618 		dma_unmap_len_set(tx_buffer, len, size);
6619 		dma_unmap_addr_set(tx_buffer, dma, dma);
6620 
6621 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
6622 
6623 		while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
6624 			tx_desc->read.cmd_type_len =
6625 				cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
6626 
6627 			i++;
6628 			tx_desc++;
6629 			if (i == tx_ring->count) {
6630 				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6631 				i = 0;
6632 			}
6633 			tx_desc->read.olinfo_status = 0;
6634 
6635 			dma += IXGBE_MAX_DATA_PER_TXD;
6636 			size -= IXGBE_MAX_DATA_PER_TXD;
6637 
6638 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
6639 		}
6640 
6641 		if (likely(!data_len))
6642 			break;
6643 
6644 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6645 
6646 		i++;
6647 		tx_desc++;
6648 		if (i == tx_ring->count) {
6649 			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6650 			i = 0;
6651 		}
6652 		tx_desc->read.olinfo_status = 0;
6653 
6654 #ifdef IXGBE_FCOE
6655 		size = min_t(unsigned int, data_len, skb_frag_size(frag));
6656 #else
6657 		size = skb_frag_size(frag);
6658 #endif
6659 		data_len -= size;
6660 
6661 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
6662 				       DMA_TO_DEVICE);
6663 
6664 		tx_buffer = &tx_ring->tx_buffer_info[i];
6665 	}
6666 
6667 	/* write last descriptor with RS and EOP bits */
6668 	cmd_type |= size | IXGBE_TXD_CMD;
6669 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6670 
6671 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6672 
6673 	/* set the timestamp */
6674 	first->time_stamp = jiffies;
6675 
6676 	/*
6677 	 * Force memory writes to complete before letting h/w know there
6678 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
6679 	 * memory model archs, such as IA-64).
6680 	 *
6681 	 * We also need this memory barrier to make certain all of the
6682 	 * status bits have been updated before next_to_watch is written.
6683 	 */
6684 	wmb();
6685 
6686 	/* set next_to_watch value indicating a packet is present */
6687 	first->next_to_watch = tx_desc;
6688 
6689 	i++;
6690 	if (i == tx_ring->count)
6691 		i = 0;
6692 
6693 	tx_ring->next_to_use = i;
6694 
6695 	/* notify HW of packet */
6696 	writel(i, tx_ring->tail);
6697 
6698 	return;
6699 dma_error:
6700 	dev_err(tx_ring->dev, "TX DMA map failed\n");
6701 
6702 	/* clear dma mappings for failed tx_buffer_info map */
6703 	for (;;) {
6704 		tx_buffer = &tx_ring->tx_buffer_info[i];
6705 		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
6706 		if (tx_buffer == first)
6707 			break;
6708 		if (i == 0)
6709 			i = tx_ring->count;
6710 		i--;
6711 	}
6712 
6713 	tx_ring->next_to_use = i;
6714 }
6715 
6716 static void ixgbe_atr(struct ixgbe_ring *ring,
6717 		      struct ixgbe_tx_buffer *first)
6718 {
6719 	struct ixgbe_q_vector *q_vector = ring->q_vector;
6720 	union ixgbe_atr_hash_dword input = { .dword = 0 };
6721 	union ixgbe_atr_hash_dword common = { .dword = 0 };
6722 	union {
6723 		unsigned char *network;
6724 		struct iphdr *ipv4;
6725 		struct ipv6hdr *ipv6;
6726 	} hdr;
6727 	struct tcphdr *th;
6728 	__be16 vlan_id;
6729 
6730 	/* if ring doesn't have a interrupt vector, cannot perform ATR */
6731 	if (!q_vector)
6732 		return;
6733 
6734 	/* do nothing if sampling is disabled */
6735 	if (!ring->atr_sample_rate)
6736 		return;
6737 
6738 	ring->atr_count++;
6739 
6740 	/* snag network header to get L4 type and address */
6741 	hdr.network = skb_network_header(first->skb);
6742 
6743 	/* Currently only IPv4/IPv6 with TCP is supported */
6744 	if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
6745 	     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6746 	    (first->protocol != __constant_htons(ETH_P_IP) ||
6747 	     hdr.ipv4->protocol != IPPROTO_TCP))
6748 		return;
6749 
6750 	th = tcp_hdr(first->skb);
6751 
6752 	/* skip this packet since it is invalid or the socket is closing */
6753 	if (!th || th->fin)
6754 		return;
6755 
6756 	/* sample on all syn packets or once every atr sample count */
6757 	if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6758 		return;
6759 
6760 	/* reset sample count */
6761 	ring->atr_count = 0;
6762 
6763 	vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6764 
6765 	/*
6766 	 * src and dst are inverted, think how the receiver sees them
6767 	 *
6768 	 * The input is broken into two sections, a non-compressed section
6769 	 * containing vm_pool, vlan_id, and flow_type.  The rest of the data
6770 	 * is XORed together and stored in the compressed dword.
6771 	 */
6772 	input.formatted.vlan_id = vlan_id;
6773 
6774 	/*
6775 	 * since src port and flex bytes occupy the same word XOR them together
6776 	 * and write the value to source port portion of compressed dword
6777 	 */
6778 	if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
6779 		common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6780 	else
6781 		common.port.src ^= th->dest ^ first->protocol;
6782 	common.port.dst ^= th->source;
6783 
6784 	if (first->protocol == __constant_htons(ETH_P_IP)) {
6785 		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6786 		common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6787 	} else {
6788 		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6789 		common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6790 			     hdr.ipv6->saddr.s6_addr32[1] ^
6791 			     hdr.ipv6->saddr.s6_addr32[2] ^
6792 			     hdr.ipv6->saddr.s6_addr32[3] ^
6793 			     hdr.ipv6->daddr.s6_addr32[0] ^
6794 			     hdr.ipv6->daddr.s6_addr32[1] ^
6795 			     hdr.ipv6->daddr.s6_addr32[2] ^
6796 			     hdr.ipv6->daddr.s6_addr32[3];
6797 	}
6798 
6799 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
6800 	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6801 					      input, common, ring->queue_index);
6802 }
6803 
6804 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6805 {
6806 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6807 	/* Herbert's original patch had:
6808 	 *  smp_mb__after_netif_stop_queue();
6809 	 * but since that doesn't exist yet, just open code it. */
6810 	smp_mb();
6811 
6812 	/* We need to check again in a case another CPU has just
6813 	 * made room available. */
6814 	if (likely(ixgbe_desc_unused(tx_ring) < size))
6815 		return -EBUSY;
6816 
6817 	/* A reprieve! - use start_queue because it doesn't call schedule */
6818 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6819 	++tx_ring->tx_stats.restart_queue;
6820 	return 0;
6821 }
6822 
6823 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6824 {
6825 	if (likely(ixgbe_desc_unused(tx_ring) >= size))
6826 		return 0;
6827 	return __ixgbe_maybe_stop_tx(tx_ring, size);
6828 }
6829 
6830 #ifdef IXGBE_FCOE
6831 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6832 {
6833 	struct ixgbe_adapter *adapter;
6834 	struct ixgbe_ring_feature *f;
6835 	int txq;
6836 
6837 	/*
6838 	 * only execute the code below if protocol is FCoE
6839 	 * or FIP and we have FCoE enabled on the adapter
6840 	 */
6841 	switch (vlan_get_protocol(skb)) {
6842 	case __constant_htons(ETH_P_FCOE):
6843 	case __constant_htons(ETH_P_FIP):
6844 		adapter = netdev_priv(dev);
6845 
6846 		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
6847 			break;
6848 	default:
6849 		return __netdev_pick_tx(dev, skb);
6850 	}
6851 
6852 	f = &adapter->ring_feature[RING_F_FCOE];
6853 
6854 	txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6855 					   smp_processor_id();
6856 
6857 	while (txq >= f->indices)
6858 		txq -= f->indices;
6859 
6860 	return txq + f->offset;
6861 }
6862 
6863 #endif
6864 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6865 			  struct ixgbe_adapter *adapter,
6866 			  struct ixgbe_ring *tx_ring)
6867 {
6868 	struct ixgbe_tx_buffer *first;
6869 	int tso;
6870 	u32 tx_flags = 0;
6871 	unsigned short f;
6872 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
6873 	__be16 protocol = skb->protocol;
6874 	u8 hdr_len = 0;
6875 
6876 	/*
6877 	 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
6878 	 *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
6879 	 *       + 2 desc gap to keep tail from touching head,
6880 	 *       + 1 desc for context descriptor,
6881 	 * otherwise try next time
6882 	 */
6883 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6884 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6885 
6886 	if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6887 		tx_ring->tx_stats.tx_busy++;
6888 		return NETDEV_TX_BUSY;
6889 	}
6890 
6891 	/* record the location of the first descriptor for this packet */
6892 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6893 	first->skb = skb;
6894 	first->bytecount = skb->len;
6895 	first->gso_segs = 1;
6896 
6897 	/* if we have a HW VLAN tag being added default to the HW one */
6898 	if (vlan_tx_tag_present(skb)) {
6899 		tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6900 		tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6901 	/* else if it is a SW VLAN check the next protocol and store the tag */
6902 	} else if (protocol == __constant_htons(ETH_P_8021Q)) {
6903 		struct vlan_hdr *vhdr, _vhdr;
6904 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6905 		if (!vhdr)
6906 			goto out_drop;
6907 
6908 		protocol = vhdr->h_vlan_encapsulated_proto;
6909 		tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
6910 				  IXGBE_TX_FLAGS_VLAN_SHIFT;
6911 		tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
6912 	}
6913 
6914 	skb_tx_timestamp(skb);
6915 
6916 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6917 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6918 		tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6919 
6920 		/* schedule check for Tx timestamp */
6921 		adapter->ptp_tx_skb = skb_get(skb);
6922 		adapter->ptp_tx_start = jiffies;
6923 		schedule_work(&adapter->ptp_tx_work);
6924 	}
6925 
6926 #ifdef CONFIG_PCI_IOV
6927 	/*
6928 	 * Use the l2switch_enable flag - would be false if the DMA
6929 	 * Tx switch had been disabled.
6930 	 */
6931 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6932 		tx_flags |= IXGBE_TX_FLAGS_CC;
6933 
6934 #endif
6935 	/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
6936 	if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
6937 	    ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
6938 	     (skb->priority != TC_PRIO_CONTROL))) {
6939 		tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6940 		tx_flags |= (skb->priority & 0x7) <<
6941 					IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
6942 		if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
6943 			struct vlan_ethhdr *vhdr;
6944 			if (skb_header_cloned(skb) &&
6945 			    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6946 				goto out_drop;
6947 			vhdr = (struct vlan_ethhdr *)skb->data;
6948 			vhdr->h_vlan_TCI = htons(tx_flags >>
6949 						 IXGBE_TX_FLAGS_VLAN_SHIFT);
6950 		} else {
6951 			tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6952 		}
6953 	}
6954 
6955 	/* record initial flags and protocol */
6956 	first->tx_flags = tx_flags;
6957 	first->protocol = protocol;
6958 
6959 #ifdef IXGBE_FCOE
6960 	/* setup tx offload for FCoE */
6961 	if ((protocol == __constant_htons(ETH_P_FCOE)) &&
6962 	    (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
6963 		tso = ixgbe_fso(tx_ring, first, &hdr_len);
6964 		if (tso < 0)
6965 			goto out_drop;
6966 
6967 		goto xmit_fcoe;
6968 	}
6969 
6970 #endif /* IXGBE_FCOE */
6971 	tso = ixgbe_tso(tx_ring, first, &hdr_len);
6972 	if (tso < 0)
6973 		goto out_drop;
6974 	else if (!tso)
6975 		ixgbe_tx_csum(tx_ring, first);
6976 
6977 	/* add the ATR filter if ATR is on */
6978 	if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6979 		ixgbe_atr(tx_ring, first);
6980 
6981 #ifdef IXGBE_FCOE
6982 xmit_fcoe:
6983 #endif /* IXGBE_FCOE */
6984 	ixgbe_tx_map(tx_ring, first, hdr_len);
6985 
6986 	ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6987 
6988 	return NETDEV_TX_OK;
6989 
6990 out_drop:
6991 	dev_kfree_skb_any(first->skb);
6992 	first->skb = NULL;
6993 
6994 	return NETDEV_TX_OK;
6995 }
6996 
6997 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
6998 				      struct net_device *netdev,
6999 				      struct ixgbe_ring *ring)
7000 {
7001 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7002 	struct ixgbe_ring *tx_ring;
7003 
7004 	/*
7005 	 * The minimum packet size for olinfo paylen is 17 so pad the skb
7006 	 * in order to meet this minimum size requirement.
7007 	 */
7008 	if (unlikely(skb->len < 17)) {
7009 		if (skb_pad(skb, 17 - skb->len))
7010 			return NETDEV_TX_OK;
7011 		skb->len = 17;
7012 		skb_set_tail_pointer(skb, 17);
7013 	}
7014 
7015 	tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
7016 
7017 	return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
7018 }
7019 
7020 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
7021 				    struct net_device *netdev)
7022 {
7023 	return __ixgbe_xmit_frame(skb, netdev, NULL);
7024 }
7025 
7026 /**
7027  * ixgbe_set_mac - Change the Ethernet Address of the NIC
7028  * @netdev: network interface device structure
7029  * @p: pointer to an address structure
7030  *
7031  * Returns 0 on success, negative on failure
7032  **/
7033 static int ixgbe_set_mac(struct net_device *netdev, void *p)
7034 {
7035 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7036 	struct ixgbe_hw *hw = &adapter->hw;
7037 	struct sockaddr *addr = p;
7038 
7039 	if (!is_valid_ether_addr(addr->sa_data))
7040 		return -EADDRNOTAVAIL;
7041 
7042 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7043 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7044 
7045 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
7046 
7047 	return 0;
7048 }
7049 
7050 static int
7051 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
7052 {
7053 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7054 	struct ixgbe_hw *hw = &adapter->hw;
7055 	u16 value;
7056 	int rc;
7057 
7058 	if (prtad != hw->phy.mdio.prtad)
7059 		return -EINVAL;
7060 	rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
7061 	if (!rc)
7062 		rc = value;
7063 	return rc;
7064 }
7065 
7066 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
7067 			    u16 addr, u16 value)
7068 {
7069 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7070 	struct ixgbe_hw *hw = &adapter->hw;
7071 
7072 	if (prtad != hw->phy.mdio.prtad)
7073 		return -EINVAL;
7074 	return hw->phy.ops.write_reg(hw, addr, devad, value);
7075 }
7076 
7077 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7078 {
7079 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7080 
7081 	switch (cmd) {
7082 	case SIOCSHWTSTAMP:
7083 		return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
7084 	default:
7085 		return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7086 	}
7087 }
7088 
7089 /**
7090  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
7091  * netdev->dev_addrs
7092  * @netdev: network interface device structure
7093  *
7094  * Returns non-zero on failure
7095  **/
7096 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
7097 {
7098 	int err = 0;
7099 	struct ixgbe_adapter *adapter = netdev_priv(dev);
7100 	struct ixgbe_hw *hw = &adapter->hw;
7101 
7102 	if (is_valid_ether_addr(hw->mac.san_addr)) {
7103 		rtnl_lock();
7104 		err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
7105 		rtnl_unlock();
7106 
7107 		/* update SAN MAC vmdq pool selection */
7108 		hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
7109 	}
7110 	return err;
7111 }
7112 
7113 /**
7114  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
7115  * netdev->dev_addrs
7116  * @netdev: network interface device structure
7117  *
7118  * Returns non-zero on failure
7119  **/
7120 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
7121 {
7122 	int err = 0;
7123 	struct ixgbe_adapter *adapter = netdev_priv(dev);
7124 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
7125 
7126 	if (is_valid_ether_addr(mac->san_addr)) {
7127 		rtnl_lock();
7128 		err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7129 		rtnl_unlock();
7130 	}
7131 	return err;
7132 }
7133 
7134 #ifdef CONFIG_NET_POLL_CONTROLLER
7135 /*
7136  * Polling 'interrupt' - used by things like netconsole to send skbs
7137  * without having to re-enable interrupts. It's not called while
7138  * the interrupt routine is executing.
7139  */
7140 static void ixgbe_netpoll(struct net_device *netdev)
7141 {
7142 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7143 	int i;
7144 
7145 	/* if interface is down do nothing */
7146 	if (test_bit(__IXGBE_DOWN, &adapter->state))
7147 		return;
7148 
7149 	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
7150 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
7151 		for (i = 0; i < adapter->num_q_vectors; i++)
7152 			ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
7153 	} else {
7154 		ixgbe_intr(adapter->pdev->irq, netdev);
7155 	}
7156 	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
7157 }
7158 
7159 #endif
7160 static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7161 						   struct rtnl_link_stats64 *stats)
7162 {
7163 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7164 	int i;
7165 
7166 	rcu_read_lock();
7167 	for (i = 0; i < adapter->num_rx_queues; i++) {
7168 		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
7169 		u64 bytes, packets;
7170 		unsigned int start;
7171 
7172 		if (ring) {
7173 			do {
7174 				start = u64_stats_fetch_begin_bh(&ring->syncp);
7175 				packets = ring->stats.packets;
7176 				bytes   = ring->stats.bytes;
7177 			} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
7178 			stats->rx_packets += packets;
7179 			stats->rx_bytes   += bytes;
7180 		}
7181 	}
7182 
7183 	for (i = 0; i < adapter->num_tx_queues; i++) {
7184 		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
7185 		u64 bytes, packets;
7186 		unsigned int start;
7187 
7188 		if (ring) {
7189 			do {
7190 				start = u64_stats_fetch_begin_bh(&ring->syncp);
7191 				packets = ring->stats.packets;
7192 				bytes   = ring->stats.bytes;
7193 			} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
7194 			stats->tx_packets += packets;
7195 			stats->tx_bytes   += bytes;
7196 		}
7197 	}
7198 	rcu_read_unlock();
7199 	/* following stats updated by ixgbe_watchdog_task() */
7200 	stats->multicast	= netdev->stats.multicast;
7201 	stats->rx_errors	= netdev->stats.rx_errors;
7202 	stats->rx_length_errors	= netdev->stats.rx_length_errors;
7203 	stats->rx_crc_errors	= netdev->stats.rx_crc_errors;
7204 	stats->rx_missed_errors	= netdev->stats.rx_missed_errors;
7205 	return stats;
7206 }
7207 
7208 #ifdef CONFIG_IXGBE_DCB
7209 /**
7210  * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
7211  * @adapter: pointer to ixgbe_adapter
7212  * @tc: number of traffic classes currently enabled
7213  *
7214  * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
7215  * 802.1Q priority maps to a packet buffer that exists.
7216  */
7217 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
7218 {
7219 	struct ixgbe_hw *hw = &adapter->hw;
7220 	u32 reg, rsave;
7221 	int i;
7222 
7223 	/* 82598 have a static priority to TC mapping that can not
7224 	 * be changed so no validation is needed.
7225 	 */
7226 	if (hw->mac.type == ixgbe_mac_82598EB)
7227 		return;
7228 
7229 	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
7230 	rsave = reg;
7231 
7232 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
7233 		u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
7234 
7235 		/* If up2tc is out of bounds default to zero */
7236 		if (up2tc > tc)
7237 			reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
7238 	}
7239 
7240 	if (reg != rsave)
7241 		IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
7242 
7243 	return;
7244 }
7245 
7246 /**
7247  * ixgbe_set_prio_tc_map - Configure netdev prio tc map
7248  * @adapter: Pointer to adapter struct
7249  *
7250  * Populate the netdev user priority to tc map
7251  */
7252 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
7253 {
7254 	struct net_device *dev = adapter->netdev;
7255 	struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
7256 	struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
7257 	u8 prio;
7258 
7259 	for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
7260 		u8 tc = 0;
7261 
7262 		if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
7263 			tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
7264 		else if (ets)
7265 			tc = ets->prio_tc[prio];
7266 
7267 		netdev_set_prio_tc_map(dev, prio, tc);
7268 	}
7269 }
7270 
7271 #endif /* CONFIG_IXGBE_DCB */
7272 /**
7273  * ixgbe_setup_tc - configure net_device for multiple traffic classes
7274  *
7275  * @netdev: net device to configure
7276  * @tc: number of traffic classes to enable
7277  */
7278 int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7279 {
7280 	struct ixgbe_adapter *adapter = netdev_priv(dev);
7281 	struct ixgbe_hw *hw = &adapter->hw;
7282 	bool pools;
7283 
7284 	/* Hardware supports up to 8 traffic classes */
7285 	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
7286 	    (hw->mac.type == ixgbe_mac_82598EB &&
7287 	     tc < MAX_TRAFFIC_CLASS))
7288 		return -EINVAL;
7289 
7290 	pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
7291 	if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
7292 		return -EBUSY;
7293 
7294 	/* Hardware has to reinitialize queues and interrupts to
7295 	 * match packet buffer alignment. Unfortunately, the
7296 	 * hardware is not flexible enough to do this dynamically.
7297 	 */
7298 	if (netif_running(dev))
7299 		ixgbe_close(dev);
7300 	ixgbe_clear_interrupt_scheme(adapter);
7301 
7302 #ifdef CONFIG_IXGBE_DCB
7303 	if (tc) {
7304 		netdev_set_num_tc(dev, tc);
7305 		ixgbe_set_prio_tc_map(adapter);
7306 
7307 		adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
7308 
7309 		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
7310 			adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
7311 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
7312 		}
7313 	} else {
7314 		netdev_reset_tc(dev);
7315 
7316 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7317 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
7318 
7319 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
7320 
7321 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
7322 		adapter->dcb_cfg.pfc_mode_enable = false;
7323 	}
7324 
7325 	ixgbe_validate_rtr(adapter, tc);
7326 
7327 #endif /* CONFIG_IXGBE_DCB */
7328 	ixgbe_init_interrupt_scheme(adapter);
7329 
7330 	if (netif_running(dev))
7331 		return ixgbe_open(dev);
7332 
7333 	return 0;
7334 }
7335 
7336 #ifdef CONFIG_PCI_IOV
7337 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
7338 {
7339 	struct net_device *netdev = adapter->netdev;
7340 
7341 	rtnl_lock();
7342 	ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
7343 	rtnl_unlock();
7344 }
7345 
7346 #endif
7347 void ixgbe_do_reset(struct net_device *netdev)
7348 {
7349 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7350 
7351 	if (netif_running(netdev))
7352 		ixgbe_reinit_locked(adapter);
7353 	else
7354 		ixgbe_reset(adapter);
7355 }
7356 
7357 static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
7358 					    netdev_features_t features)
7359 {
7360 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7361 
7362 	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
7363 	if (!(features & NETIF_F_RXCSUM))
7364 		features &= ~NETIF_F_LRO;
7365 
7366 	/* Turn off LRO if not RSC capable */
7367 	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
7368 		features &= ~NETIF_F_LRO;
7369 
7370 	return features;
7371 }
7372 
7373 static int ixgbe_set_features(struct net_device *netdev,
7374 			      netdev_features_t features)
7375 {
7376 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
7377 	netdev_features_t changed = netdev->features ^ features;
7378 	bool need_reset = false;
7379 
7380 	/* Make sure RSC matches LRO, reset if change */
7381 	if (!(features & NETIF_F_LRO)) {
7382 		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7383 			need_reset = true;
7384 		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
7385 	} else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
7386 		   !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
7387 		if (adapter->rx_itr_setting == 1 ||
7388 		    adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
7389 			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
7390 			need_reset = true;
7391 		} else if ((changed ^ features) & NETIF_F_LRO) {
7392 			e_info(probe, "rx-usecs set too low, "
7393 			       "disabling RSC\n");
7394 		}
7395 	}
7396 
7397 	/*
7398 	 * Check if Flow Director n-tuple support was enabled or disabled.  If
7399 	 * the state changed, we need to reset.
7400 	 */
7401 	switch (features & NETIF_F_NTUPLE) {
7402 	case NETIF_F_NTUPLE:
7403 		/* turn off ATR, enable perfect filters and reset */
7404 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
7405 			need_reset = true;
7406 
7407 		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
7408 		adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7409 		break;
7410 	default:
7411 		/* turn off perfect filters, enable ATR and reset */
7412 		if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7413 			need_reset = true;
7414 
7415 		adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7416 
7417 		/* We cannot enable ATR if SR-IOV is enabled */
7418 		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7419 			break;
7420 
7421 		/* We cannot enable ATR if we have 2 or more traffic classes */
7422 		if (netdev_get_num_tc(netdev) > 1)
7423 			break;
7424 
7425 		/* We cannot enable ATR if RSS is disabled */
7426 		if (adapter->ring_feature[RING_F_RSS].limit <= 1)
7427 			break;
7428 
7429 		/* A sample rate of 0 indicates ATR disabled */
7430 		if (!adapter->atr_sample_rate)
7431 			break;
7432 
7433 		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
7434 		break;
7435 	}
7436 
7437 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
7438 		ixgbe_vlan_strip_enable(adapter);
7439 	else
7440 		ixgbe_vlan_strip_disable(adapter);
7441 
7442 	if (changed & NETIF_F_RXALL)
7443 		need_reset = true;
7444 
7445 	netdev->features = features;
7446 	if (need_reset)
7447 		ixgbe_do_reset(netdev);
7448 
7449 	return 0;
7450 }
7451 
7452 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7453 			     struct net_device *dev,
7454 			     const unsigned char *addr,
7455 			     u16 flags)
7456 {
7457 	struct ixgbe_adapter *adapter = netdev_priv(dev);
7458 	int err;
7459 
7460 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7461 		return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
7462 
7463 	/* Hardware does not support aging addresses so if a
7464 	 * ndm_state is given only allow permanent addresses
7465 	 */
7466 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7467 		pr_info("%s: FDB only supports static addresses\n",
7468 			ixgbe_driver_name);
7469 		return -EINVAL;
7470 	}
7471 
7472 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
7473 		u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
7474 
7475 		if (netdev_uc_count(dev) < rar_uc_entries)
7476 			err = dev_uc_add_excl(dev, addr);
7477 		else
7478 			err = -ENOMEM;
7479 	} else if (is_multicast_ether_addr(addr)) {
7480 		err = dev_mc_add_excl(dev, addr);
7481 	} else {
7482 		err = -EINVAL;
7483 	}
7484 
7485 	/* Only return duplicate errors if NLM_F_EXCL is set */
7486 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
7487 		err = 0;
7488 
7489 	return err;
7490 }
7491 
7492 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7493 				    struct nlmsghdr *nlh)
7494 {
7495 	struct ixgbe_adapter *adapter = netdev_priv(dev);
7496 	struct nlattr *attr, *br_spec;
7497 	int rem;
7498 
7499 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7500 		return -EOPNOTSUPP;
7501 
7502 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7503 
7504 	nla_for_each_nested(attr, br_spec, rem) {
7505 		__u16 mode;
7506 		u32 reg = 0;
7507 
7508 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7509 			continue;
7510 
7511 		mode = nla_get_u16(attr);
7512 		if (mode == BRIDGE_MODE_VEPA) {
7513 			reg = 0;
7514 			adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
7515 		} else if (mode == BRIDGE_MODE_VEB) {
7516 			reg = IXGBE_PFDTXGSWC_VT_LBEN;
7517 			adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
7518 		} else
7519 			return -EINVAL;
7520 
7521 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
7522 
7523 		e_info(drv, "enabling bridge mode: %s\n",
7524 			mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
7525 	}
7526 
7527 	return 0;
7528 }
7529 
7530 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7531 				    struct net_device *dev,
7532 				    u32 filter_mask)
7533 {
7534 	struct ixgbe_adapter *adapter = netdev_priv(dev);
7535 	u16 mode;
7536 
7537 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7538 		return 0;
7539 
7540 	if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
7541 		mode = BRIDGE_MODE_VEB;
7542 	else
7543 		mode = BRIDGE_MODE_VEPA;
7544 
7545 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7546 }
7547 
7548 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
7549 {
7550 	struct ixgbe_fwd_adapter *fwd_adapter = NULL;
7551 	struct ixgbe_adapter *adapter = netdev_priv(pdev);
7552 	unsigned int limit;
7553 	int pool, err;
7554 
7555 #ifdef CONFIG_RPS
7556 	if (vdev->num_rx_queues != vdev->num_tx_queues) {
7557 		netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
7558 			    vdev->name);
7559 		return ERR_PTR(-EINVAL);
7560 	}
7561 #endif
7562 	/* Check for hardware restriction on number of rx/tx queues */
7563 	if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
7564 	    vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
7565 		netdev_info(pdev,
7566 			    "%s: Supports RX/TX Queue counts 1,2, and 4\n",
7567 			    pdev->name);
7568 		return ERR_PTR(-EINVAL);
7569 	}
7570 
7571 	if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
7572 	      adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
7573 	    (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
7574 		return ERR_PTR(-EBUSY);
7575 
7576 	fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
7577 	if (!fwd_adapter)
7578 		return ERR_PTR(-ENOMEM);
7579 
7580 	pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
7581 	adapter->num_rx_pools++;
7582 	set_bit(pool, &adapter->fwd_bitmask);
7583 	limit = find_last_bit(&adapter->fwd_bitmask, 32);
7584 
7585 	/* Enable VMDq flag so device will be set in VM mode */
7586 	adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
7587 	adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
7588 	adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
7589 
7590 	/* Force reinit of ring allocation with VMDQ enabled */
7591 	err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
7592 	if (err)
7593 		goto fwd_add_err;
7594 	fwd_adapter->pool = pool;
7595 	fwd_adapter->real_adapter = adapter;
7596 	err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
7597 	if (err)
7598 		goto fwd_add_err;
7599 	netif_tx_start_all_queues(vdev);
7600 	return fwd_adapter;
7601 fwd_add_err:
7602 	/* unwind counter and free adapter struct */
7603 	netdev_info(pdev,
7604 		    "%s: dfwd hardware acceleration failed\n", vdev->name);
7605 	clear_bit(pool, &adapter->fwd_bitmask);
7606 	adapter->num_rx_pools--;
7607 	kfree(fwd_adapter);
7608 	return ERR_PTR(err);
7609 }
7610 
7611 static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
7612 {
7613 	struct ixgbe_fwd_adapter *fwd_adapter = priv;
7614 	struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
7615 	unsigned int limit;
7616 
7617 	clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
7618 	adapter->num_rx_pools--;
7619 
7620 	limit = find_last_bit(&adapter->fwd_bitmask, 32);
7621 	adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
7622 	ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
7623 	ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
7624 	netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
7625 		   fwd_adapter->pool, adapter->num_rx_pools,
7626 		   fwd_adapter->rx_base_queue,
7627 		   fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
7628 		   adapter->fwd_bitmask);
7629 	kfree(fwd_adapter);
7630 }
7631 
7632 static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
7633 				  struct net_device *dev,
7634 				  void *priv)
7635 {
7636 	struct ixgbe_fwd_adapter *fwd_adapter = priv;
7637 	unsigned int queue;
7638 	struct ixgbe_ring *tx_ring;
7639 
7640 	queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
7641 	tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
7642 
7643 	return __ixgbe_xmit_frame(skb, dev, tx_ring);
7644 }
7645 
7646 static const struct net_device_ops ixgbe_netdev_ops = {
7647 	.ndo_open		= ixgbe_open,
7648 	.ndo_stop		= ixgbe_close,
7649 	.ndo_start_xmit		= ixgbe_xmit_frame,
7650 #ifdef IXGBE_FCOE
7651 	.ndo_select_queue	= ixgbe_select_queue,
7652 #endif
7653 	.ndo_set_rx_mode	= ixgbe_set_rx_mode,
7654 	.ndo_validate_addr	= eth_validate_addr,
7655 	.ndo_set_mac_address	= ixgbe_set_mac,
7656 	.ndo_change_mtu		= ixgbe_change_mtu,
7657 	.ndo_tx_timeout		= ixgbe_tx_timeout,
7658 	.ndo_vlan_rx_add_vid	= ixgbe_vlan_rx_add_vid,
7659 	.ndo_vlan_rx_kill_vid	= ixgbe_vlan_rx_kill_vid,
7660 	.ndo_do_ioctl		= ixgbe_ioctl,
7661 	.ndo_set_vf_mac		= ixgbe_ndo_set_vf_mac,
7662 	.ndo_set_vf_vlan	= ixgbe_ndo_set_vf_vlan,
7663 	.ndo_set_vf_tx_rate	= ixgbe_ndo_set_vf_bw,
7664 	.ndo_set_vf_spoofchk	= ixgbe_ndo_set_vf_spoofchk,
7665 	.ndo_get_vf_config	= ixgbe_ndo_get_vf_config,
7666 	.ndo_get_stats64	= ixgbe_get_stats64,
7667 #ifdef CONFIG_IXGBE_DCB
7668 	.ndo_setup_tc		= ixgbe_setup_tc,
7669 #endif
7670 #ifdef CONFIG_NET_POLL_CONTROLLER
7671 	.ndo_poll_controller	= ixgbe_netpoll,
7672 #endif
7673 #ifdef CONFIG_NET_RX_BUSY_POLL
7674 	.ndo_busy_poll		= ixgbe_low_latency_recv,
7675 #endif
7676 #ifdef IXGBE_FCOE
7677 	.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7678 	.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
7679 	.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
7680 	.ndo_fcoe_enable = ixgbe_fcoe_enable,
7681 	.ndo_fcoe_disable = ixgbe_fcoe_disable,
7682 	.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
7683 	.ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
7684 #endif /* IXGBE_FCOE */
7685 	.ndo_set_features = ixgbe_set_features,
7686 	.ndo_fix_features = ixgbe_fix_features,
7687 	.ndo_fdb_add		= ixgbe_ndo_fdb_add,
7688 	.ndo_bridge_setlink	= ixgbe_ndo_bridge_setlink,
7689 	.ndo_bridge_getlink	= ixgbe_ndo_bridge_getlink,
7690 	.ndo_dfwd_add_station	= ixgbe_fwd_add,
7691 	.ndo_dfwd_del_station	= ixgbe_fwd_del,
7692 	.ndo_dfwd_start_xmit	= ixgbe_fwd_xmit,
7693 };
7694 
7695 /**
7696  * ixgbe_enumerate_functions - Get the number of ports this device has
7697  * @adapter: adapter structure
7698  *
7699  * This function enumerates the phsyical functions co-located on a single slot,
7700  * in order to determine how many ports a device has. This is most useful in
7701  * determining the required GT/s of PCIe bandwidth necessary for optimal
7702  * performance.
7703  **/
7704 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
7705 {
7706 	struct list_head *entry;
7707 	int physfns = 0;
7708 
7709 	/* Some cards can not use the generic count PCIe functions method,
7710 	 * because they are behind a parent switch, so we hardcode these with
7711 	 * the correct number of functions.
7712 	 */
7713 	if (ixgbe_pcie_from_parent(&adapter->hw)) {
7714 		physfns = 4;
7715 	} else {
7716 		list_for_each(entry, &adapter->pdev->bus_list) {
7717 			struct pci_dev *pdev =
7718 				list_entry(entry, struct pci_dev, bus_list);
7719 			/* don't count virtual functions */
7720 			if (!pdev->is_virtfn)
7721 				physfns++;
7722 		}
7723 	}
7724 
7725 	return physfns;
7726 }
7727 
7728 /**
7729  * ixgbe_wol_supported - Check whether device supports WoL
7730  * @hw: hw specific details
7731  * @device_id: the device ID
7732  * @subdev_id: the subsystem device ID
7733  *
7734  * This function is used by probe and ethtool to determine
7735  * which devices have WoL support
7736  *
7737  **/
7738 int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7739 			u16 subdevice_id)
7740 {
7741 	struct ixgbe_hw *hw = &adapter->hw;
7742 	u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
7743 	int is_wol_supported = 0;
7744 
7745 	switch (device_id) {
7746 	case IXGBE_DEV_ID_82599_SFP:
7747 		/* Only these subdevices could supports WOL */
7748 		switch (subdevice_id) {
7749 		case IXGBE_SUBDEV_ID_82599_560FLR:
7750 			/* only support first port */
7751 			if (hw->bus.func != 0)
7752 				break;
7753 		case IXGBE_SUBDEV_ID_82599_SP_560FLR:
7754 		case IXGBE_SUBDEV_ID_82599_SFP:
7755 		case IXGBE_SUBDEV_ID_82599_RNDC:
7756 		case IXGBE_SUBDEV_ID_82599_ECNA_DP:
7757 		case IXGBE_SUBDEV_ID_82599_LOM_SFP:
7758 			is_wol_supported = 1;
7759 			break;
7760 		}
7761 		break;
7762 	case IXGBE_DEV_ID_82599EN_SFP:
7763 		/* Only this subdevice supports WOL */
7764 		switch (subdevice_id) {
7765 		case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
7766 			is_wol_supported = 1;
7767 			break;
7768 		}
7769 		break;
7770 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7771 		/* All except this subdevice support WOL */
7772 		if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7773 			is_wol_supported = 1;
7774 		break;
7775 	case IXGBE_DEV_ID_82599_KX4:
7776 		is_wol_supported = 1;
7777 		break;
7778 	case IXGBE_DEV_ID_X540T:
7779 	case IXGBE_DEV_ID_X540T1:
7780 		/* check eeprom to see if enabled wol */
7781 		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
7782 		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
7783 		     (hw->bus.func == 0))) {
7784 			is_wol_supported = 1;
7785 		}
7786 		break;
7787 	}
7788 
7789 	return is_wol_supported;
7790 }
7791 
7792 /**
7793  * ixgbe_probe - Device Initialization Routine
7794  * @pdev: PCI device information struct
7795  * @ent: entry in ixgbe_pci_tbl
7796  *
7797  * Returns 0 on success, negative on failure
7798  *
7799  * ixgbe_probe initializes an adapter identified by a pci_dev structure.
7800  * The OS initialization, configuring of the adapter private structure,
7801  * and a hardware reset occur.
7802  **/
7803 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7804 {
7805 	struct net_device *netdev;
7806 	struct ixgbe_adapter *adapter = NULL;
7807 	struct ixgbe_hw *hw;
7808 	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
7809 	static int cards_found;
7810 	int i, err, pci_using_dac, expected_gts;
7811 	unsigned int indices = MAX_TX_QUEUES;
7812 	u8 part_str[IXGBE_PBANUM_LENGTH];
7813 #ifdef IXGBE_FCOE
7814 	u16 device_caps;
7815 #endif
7816 	u32 eec;
7817 
7818 	/* Catch broken hardware that put the wrong VF device ID in
7819 	 * the PCIe SR-IOV capability.
7820 	 */
7821 	if (pdev->is_virtfn) {
7822 		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
7823 		     pci_name(pdev), pdev->vendor, pdev->device);
7824 		return -EINVAL;
7825 	}
7826 
7827 	err = pci_enable_device_mem(pdev);
7828 	if (err)
7829 		return err;
7830 
7831 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
7832 		pci_using_dac = 1;
7833 	} else {
7834 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7835 		if (err) {
7836 			dev_err(&pdev->dev,
7837 				"No usable DMA configuration, aborting\n");
7838 			goto err_dma;
7839 		}
7840 		pci_using_dac = 0;
7841 	}
7842 
7843 	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7844 					   IORESOURCE_MEM), ixgbe_driver_name);
7845 	if (err) {
7846 		dev_err(&pdev->dev,
7847 			"pci_request_selected_regions failed 0x%x\n", err);
7848 		goto err_pci_reg;
7849 	}
7850 
7851 	pci_enable_pcie_error_reporting(pdev);
7852 
7853 	pci_set_master(pdev);
7854 	pci_save_state(pdev);
7855 
7856 	if (ii->mac == ixgbe_mac_82598EB) {
7857 #ifdef CONFIG_IXGBE_DCB
7858 		/* 8 TC w/ 4 queues per TC */
7859 		indices = 4 * MAX_TRAFFIC_CLASS;
7860 #else
7861 		indices = IXGBE_MAX_RSS_INDICES;
7862 #endif
7863 	}
7864 
7865 	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
7866 	if (!netdev) {
7867 		err = -ENOMEM;
7868 		goto err_alloc_etherdev;
7869 	}
7870 
7871 	SET_NETDEV_DEV(netdev, &pdev->dev);
7872 
7873 	adapter = netdev_priv(netdev);
7874 	pci_set_drvdata(pdev, adapter);
7875 
7876 	adapter->netdev = netdev;
7877 	adapter->pdev = pdev;
7878 	hw = &adapter->hw;
7879 	hw->back = adapter;
7880 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
7881 
7882 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7883 			      pci_resource_len(pdev, 0));
7884 	if (!hw->hw_addr) {
7885 		err = -EIO;
7886 		goto err_ioremap;
7887 	}
7888 
7889 	netdev->netdev_ops = &ixgbe_netdev_ops;
7890 	ixgbe_set_ethtool_ops(netdev);
7891 	netdev->watchdog_timeo = 5 * HZ;
7892 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
7893 
7894 	adapter->bd_number = cards_found;
7895 
7896 	/* Setup hw api */
7897 	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
7898 	hw->mac.type  = ii->mac;
7899 
7900 	/* EEPROM */
7901 	memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7902 	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7903 	/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
7904 	if (!(eec & (1 << 8)))
7905 		hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7906 
7907 	/* PHY */
7908 	memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
7909 	hw->phy.sfp_type = ixgbe_sfp_type_unknown;
7910 	/* ixgbe_identify_phy_generic will set prtad and mmds properly */
7911 	hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7912 	hw->phy.mdio.mmds = 0;
7913 	hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7914 	hw->phy.mdio.dev = netdev;
7915 	hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7916 	hw->phy.mdio.mdio_write = ixgbe_mdio_write;
7917 
7918 	ii->get_invariants(hw);
7919 
7920 	/* setup the private structure */
7921 	err = ixgbe_sw_init(adapter);
7922 	if (err)
7923 		goto err_sw_init;
7924 
7925 	/* Cache if MNG FW is up so we don't have to read the REG later */
7926 	if (hw->mac.ops.mng_fw_enabled)
7927 		hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
7928 
7929 	/* Make it possible the adapter to be woken up via WOL */
7930 	switch (adapter->hw.mac.type) {
7931 	case ixgbe_mac_82599EB:
7932 	case ixgbe_mac_X540:
7933 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7934 		break;
7935 	default:
7936 		break;
7937 	}
7938 
7939 	/*
7940 	 * If there is a fan on this device and it has failed log the
7941 	 * failure.
7942 	 */
7943 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7944 		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7945 		if (esdp & IXGBE_ESDP_SDP1)
7946 			e_crit(probe, "Fan has stopped, replace the adapter\n");
7947 	}
7948 
7949 	if (allow_unsupported_sfp)
7950 		hw->allow_unsupported_sfp = allow_unsupported_sfp;
7951 
7952 	/* reset_hw fills in the perm_addr as well */
7953 	hw->phy.reset_if_overtemp = true;
7954 	err = hw->mac.ops.reset_hw(hw);
7955 	hw->phy.reset_if_overtemp = false;
7956 	if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7957 	    hw->mac.type == ixgbe_mac_82598EB) {
7958 		err = 0;
7959 	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7960 		e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
7961 		e_dev_err("Reload the driver after installing a supported module.\n");
7962 		goto err_sw_init;
7963 	} else if (err) {
7964 		e_dev_err("HW Init failed: %d\n", err);
7965 		goto err_sw_init;
7966 	}
7967 
7968 #ifdef CONFIG_PCI_IOV
7969 	/* SR-IOV not supported on the 82598 */
7970 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7971 		goto skip_sriov;
7972 	/* Mailbox */
7973 	ixgbe_init_mbx_params_pf(hw);
7974 	memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
7975 	ixgbe_enable_sriov(adapter);
7976 	pci_sriov_set_totalvfs(pdev, 63);
7977 skip_sriov:
7978 
7979 #endif
7980 	netdev->features = NETIF_F_SG |
7981 			   NETIF_F_IP_CSUM |
7982 			   NETIF_F_IPV6_CSUM |
7983 			   NETIF_F_HW_VLAN_CTAG_TX |
7984 			   NETIF_F_HW_VLAN_CTAG_RX |
7985 			   NETIF_F_HW_VLAN_CTAG_FILTER |
7986 			   NETIF_F_TSO |
7987 			   NETIF_F_TSO6 |
7988 			   NETIF_F_RXHASH |
7989 			   NETIF_F_RXCSUM;
7990 
7991 	netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
7992 
7993 	switch (adapter->hw.mac.type) {
7994 	case ixgbe_mac_82599EB:
7995 	case ixgbe_mac_X540:
7996 		netdev->features |= NETIF_F_SCTP_CSUM;
7997 		netdev->hw_features |= NETIF_F_SCTP_CSUM |
7998 				       NETIF_F_NTUPLE;
7999 		break;
8000 	default:
8001 		break;
8002 	}
8003 
8004 	netdev->hw_features |= NETIF_F_RXALL;
8005 
8006 	netdev->vlan_features |= NETIF_F_TSO;
8007 	netdev->vlan_features |= NETIF_F_TSO6;
8008 	netdev->vlan_features |= NETIF_F_IP_CSUM;
8009 	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
8010 	netdev->vlan_features |= NETIF_F_SG;
8011 
8012 	netdev->priv_flags |= IFF_UNICAST_FLT;
8013 	netdev->priv_flags |= IFF_SUPP_NOFCS;
8014 
8015 #ifdef CONFIG_IXGBE_DCB
8016 	netdev->dcbnl_ops = &dcbnl_ops;
8017 #endif
8018 
8019 #ifdef IXGBE_FCOE
8020 	if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
8021 		unsigned int fcoe_l;
8022 
8023 		if (hw->mac.ops.get_device_caps) {
8024 			hw->mac.ops.get_device_caps(hw, &device_caps);
8025 			if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
8026 				adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
8027 		}
8028 
8029 
8030 		fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
8031 		adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
8032 
8033 		netdev->features |= NETIF_F_FSO |
8034 				    NETIF_F_FCOE_CRC;
8035 
8036 		netdev->vlan_features |= NETIF_F_FSO |
8037 					 NETIF_F_FCOE_CRC |
8038 					 NETIF_F_FCOE_MTU;
8039 	}
8040 #endif /* IXGBE_FCOE */
8041 	if (pci_using_dac) {
8042 		netdev->features |= NETIF_F_HIGHDMA;
8043 		netdev->vlan_features |= NETIF_F_HIGHDMA;
8044 	}
8045 
8046 	if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
8047 		netdev->hw_features |= NETIF_F_LRO;
8048 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8049 		netdev->features |= NETIF_F_LRO;
8050 
8051 	/* make sure the EEPROM is good */
8052 	if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
8053 		e_dev_err("The EEPROM Checksum Is Not Valid\n");
8054 		err = -EIO;
8055 		goto err_sw_init;
8056 	}
8057 
8058 	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
8059 
8060 	if (!is_valid_ether_addr(netdev->dev_addr)) {
8061 		e_dev_err("invalid MAC address\n");
8062 		err = -EIO;
8063 		goto err_sw_init;
8064 	}
8065 
8066 	setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8067 		    (unsigned long) adapter);
8068 
8069 	INIT_WORK(&adapter->service_task, ixgbe_service_task);
8070 	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
8071 
8072 	err = ixgbe_init_interrupt_scheme(adapter);
8073 	if (err)
8074 		goto err_sw_init;
8075 
8076 	/* WOL not supported for all devices */
8077 	adapter->wol = 0;
8078 	hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
8079 	hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
8080 						pdev->subsystem_device);
8081 	if (hw->wol_enabled)
8082 		adapter->wol = IXGBE_WUFC_MAG;
8083 
8084 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
8085 
8086 	/* save off EEPROM version number */
8087 	hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
8088 	hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
8089 
8090 	/* pick up the PCI bus settings for reporting later */
8091 	hw->mac.ops.get_bus_info(hw);
8092 	if (ixgbe_pcie_from_parent(hw))
8093 		ixgbe_get_parent_bus_info(adapter);
8094 
8095 	/* calculate the expected PCIe bandwidth required for optimal
8096 	 * performance. Note that some older parts will never have enough
8097 	 * bandwidth due to being older generation PCIe parts. We clamp these
8098 	 * parts to ensure no warning is displayed if it can't be fixed.
8099 	 */
8100 	switch (hw->mac.type) {
8101 	case ixgbe_mac_82598EB:
8102 		expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
8103 		break;
8104 	default:
8105 		expected_gts = ixgbe_enumerate_functions(adapter) * 10;
8106 		break;
8107 	}
8108 	ixgbe_check_minimum_link(adapter, expected_gts);
8109 
8110 	err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
8111 	if (err)
8112 		strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
8113 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
8114 		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
8115 			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
8116 		           part_str);
8117 	else
8118 		e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
8119 			   hw->mac.type, hw->phy.type, part_str);
8120 
8121 	e_dev_info("%pM\n", netdev->dev_addr);
8122 
8123 	/* reset the hardware with the new settings */
8124 	err = hw->mac.ops.start_hw(hw);
8125 	if (err == IXGBE_ERR_EEPROM_VERSION) {
8126 		/* We are running on a pre-production device, log a warning */
8127 		e_dev_warn("This device is a pre-production adapter/LOM. "
8128 			   "Please be aware there may be issues associated "
8129 			   "with your hardware.  If you are experiencing "
8130 			   "problems please contact your Intel or hardware "
8131 			   "representative who provided you with this "
8132 			   "hardware.\n");
8133 	}
8134 	strcpy(netdev->name, "eth%d");
8135 	err = register_netdev(netdev);
8136 	if (err)
8137 		goto err_register;
8138 
8139 	/* power down the optics for 82599 SFP+ fiber */
8140 	if (hw->mac.ops.disable_tx_laser)
8141 		hw->mac.ops.disable_tx_laser(hw);
8142 
8143 	/* carrier off reporting is important to ethtool even BEFORE open */
8144 	netif_carrier_off(netdev);
8145 
8146 #ifdef CONFIG_IXGBE_DCA
8147 	if (dca_add_requester(&pdev->dev) == 0) {
8148 		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
8149 		ixgbe_setup_dca(adapter);
8150 	}
8151 #endif
8152 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
8153 		e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
8154 		for (i = 0; i < adapter->num_vfs; i++)
8155 			ixgbe_vf_configuration(pdev, (i | 0x10000000));
8156 	}
8157 
8158 	/* firmware requires driver version to be 0xFFFFFFFF
8159 	 * since os does not support feature
8160 	 */
8161 	if (hw->mac.ops.set_fw_drv_ver)
8162 		hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
8163 					   0xFF);
8164 
8165 	/* add san mac addr to netdev */
8166 	ixgbe_add_sanmac_netdev(netdev);
8167 
8168 	e_dev_info("%s\n", ixgbe_default_device_descr);
8169 	cards_found++;
8170 
8171 #ifdef CONFIG_IXGBE_HWMON
8172 	if (ixgbe_sysfs_init(adapter))
8173 		e_err(probe, "failed to allocate sysfs resources\n");
8174 #endif /* CONFIG_IXGBE_HWMON */
8175 
8176 	ixgbe_dbg_adapter_init(adapter);
8177 
8178 	/* Need link setup for MNG FW, else wait for IXGBE_UP */
8179 	if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
8180 		hw->mac.ops.setup_link(hw,
8181 			IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8182 			true);
8183 
8184 	return 0;
8185 
8186 err_register:
8187 	ixgbe_release_hw_control(adapter);
8188 	ixgbe_clear_interrupt_scheme(adapter);
8189 err_sw_init:
8190 	ixgbe_disable_sriov(adapter);
8191 	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
8192 	iounmap(hw->hw_addr);
8193 err_ioremap:
8194 	free_netdev(netdev);
8195 err_alloc_etherdev:
8196 	pci_release_selected_regions(pdev,
8197 				     pci_select_bars(pdev, IORESOURCE_MEM));
8198 err_pci_reg:
8199 err_dma:
8200 	pci_disable_device(pdev);
8201 	return err;
8202 }
8203 
8204 /**
8205  * ixgbe_remove - Device Removal Routine
8206  * @pdev: PCI device information struct
8207  *
8208  * ixgbe_remove is called by the PCI subsystem to alert the driver
8209  * that it should release a PCI device.  The could be caused by a
8210  * Hot-Plug event, or because the driver is going to be removed from
8211  * memory.
8212  **/
8213 static void ixgbe_remove(struct pci_dev *pdev)
8214 {
8215 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8216 	struct net_device *netdev = adapter->netdev;
8217 
8218 	ixgbe_dbg_adapter_exit(adapter);
8219 
8220 	set_bit(__IXGBE_DOWN, &adapter->state);
8221 	cancel_work_sync(&adapter->service_task);
8222 
8223 
8224 #ifdef CONFIG_IXGBE_DCA
8225 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
8226 		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
8227 		dca_remove_requester(&pdev->dev);
8228 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
8229 	}
8230 
8231 #endif
8232 #ifdef CONFIG_IXGBE_HWMON
8233 	ixgbe_sysfs_exit(adapter);
8234 #endif /* CONFIG_IXGBE_HWMON */
8235 
8236 	/* remove the added san mac */
8237 	ixgbe_del_sanmac_netdev(netdev);
8238 
8239 	if (netdev->reg_state == NETREG_REGISTERED)
8240 		unregister_netdev(netdev);
8241 
8242 #ifdef CONFIG_PCI_IOV
8243 	/*
8244 	 * Only disable SR-IOV on unload if the user specified the now
8245 	 * deprecated max_vfs module parameter.
8246 	 */
8247 	if (max_vfs)
8248 		ixgbe_disable_sriov(adapter);
8249 #endif
8250 	ixgbe_clear_interrupt_scheme(adapter);
8251 
8252 	ixgbe_release_hw_control(adapter);
8253 
8254 #ifdef CONFIG_DCB
8255 	kfree(adapter->ixgbe_ieee_pfc);
8256 	kfree(adapter->ixgbe_ieee_ets);
8257 
8258 #endif
8259 	iounmap(adapter->hw.hw_addr);
8260 	pci_release_selected_regions(pdev, pci_select_bars(pdev,
8261 				     IORESOURCE_MEM));
8262 
8263 	e_dev_info("complete\n");
8264 
8265 	free_netdev(netdev);
8266 
8267 	pci_disable_pcie_error_reporting(pdev);
8268 
8269 	pci_disable_device(pdev);
8270 }
8271 
8272 /**
8273  * ixgbe_io_error_detected - called when PCI error is detected
8274  * @pdev: Pointer to PCI device
8275  * @state: The current pci connection state
8276  *
8277  * This function is called after a PCI bus error affecting
8278  * this device has been detected.
8279  */
8280 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8281 						pci_channel_state_t state)
8282 {
8283 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8284 	struct net_device *netdev = adapter->netdev;
8285 
8286 #ifdef CONFIG_PCI_IOV
8287 	struct pci_dev *bdev, *vfdev;
8288 	u32 dw0, dw1, dw2, dw3;
8289 	int vf, pos;
8290 	u16 req_id, pf_func;
8291 
8292 	if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
8293 	    adapter->num_vfs == 0)
8294 		goto skip_bad_vf_detection;
8295 
8296 	bdev = pdev->bus->self;
8297 	while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
8298 		bdev = bdev->bus->self;
8299 
8300 	if (!bdev)
8301 		goto skip_bad_vf_detection;
8302 
8303 	pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
8304 	if (!pos)
8305 		goto skip_bad_vf_detection;
8306 
8307 	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
8308 	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
8309 	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
8310 	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
8311 
8312 	req_id = dw1 >> 16;
8313 	/* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
8314 	if (!(req_id & 0x0080))
8315 		goto skip_bad_vf_detection;
8316 
8317 	pf_func = req_id & 0x01;
8318 	if ((pf_func & 1) == (pdev->devfn & 1)) {
8319 		unsigned int device_id;
8320 
8321 		vf = (req_id & 0x7F) >> 1;
8322 		e_dev_err("VF %d has caused a PCIe error\n", vf);
8323 		e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
8324 				"%8.8x\tdw3: %8.8x\n",
8325 		dw0, dw1, dw2, dw3);
8326 		switch (adapter->hw.mac.type) {
8327 		case ixgbe_mac_82599EB:
8328 			device_id = IXGBE_82599_VF_DEVICE_ID;
8329 			break;
8330 		case ixgbe_mac_X540:
8331 			device_id = IXGBE_X540_VF_DEVICE_ID;
8332 			break;
8333 		default:
8334 			device_id = 0;
8335 			break;
8336 		}
8337 
8338 		/* Find the pci device of the offending VF */
8339 		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
8340 		while (vfdev) {
8341 			if (vfdev->devfn == (req_id & 0xFF))
8342 				break;
8343 			vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
8344 					       device_id, vfdev);
8345 		}
8346 		/*
8347 		 * There's a slim chance the VF could have been hot plugged,
8348 		 * so if it is no longer present we don't need to issue the
8349 		 * VFLR.  Just clean up the AER in that case.
8350 		 */
8351 		if (vfdev) {
8352 			e_dev_err("Issuing VFLR to VF %d\n", vf);
8353 			pci_write_config_dword(vfdev, 0xA8, 0x00008000);
8354 			/* Free device reference count */
8355 			pci_dev_put(vfdev);
8356 		}
8357 
8358 		pci_cleanup_aer_uncorrect_error_status(pdev);
8359 	}
8360 
8361 	/*
8362 	 * Even though the error may have occurred on the other port
8363 	 * we still need to increment the vf error reference count for
8364 	 * both ports because the I/O resume function will be called
8365 	 * for both of them.
8366 	 */
8367 	adapter->vferr_refcount++;
8368 
8369 	return PCI_ERS_RESULT_RECOVERED;
8370 
8371 skip_bad_vf_detection:
8372 #endif /* CONFIG_PCI_IOV */
8373 	netif_device_detach(netdev);
8374 
8375 	if (state == pci_channel_io_perm_failure)
8376 		return PCI_ERS_RESULT_DISCONNECT;
8377 
8378 	if (netif_running(netdev))
8379 		ixgbe_down(adapter);
8380 	pci_disable_device(pdev);
8381 
8382 	/* Request a slot reset. */
8383 	return PCI_ERS_RESULT_NEED_RESET;
8384 }
8385 
8386 /**
8387  * ixgbe_io_slot_reset - called after the pci bus has been reset.
8388  * @pdev: Pointer to PCI device
8389  *
8390  * Restart the card from scratch, as if from a cold-boot.
8391  */
8392 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
8393 {
8394 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8395 	pci_ers_result_t result;
8396 	int err;
8397 
8398 	if (pci_enable_device_mem(pdev)) {
8399 		e_err(probe, "Cannot re-enable PCI device after reset.\n");
8400 		result = PCI_ERS_RESULT_DISCONNECT;
8401 	} else {
8402 		pci_set_master(pdev);
8403 		pci_restore_state(pdev);
8404 		pci_save_state(pdev);
8405 
8406 		pci_wake_from_d3(pdev, false);
8407 
8408 		ixgbe_reset(adapter);
8409 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8410 		result = PCI_ERS_RESULT_RECOVERED;
8411 	}
8412 
8413 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8414 	if (err) {
8415 		e_dev_err("pci_cleanup_aer_uncorrect_error_status "
8416 			  "failed 0x%0x\n", err);
8417 		/* non-fatal, continue */
8418 	}
8419 
8420 	return result;
8421 }
8422 
8423 /**
8424  * ixgbe_io_resume - called when traffic can start flowing again.
8425  * @pdev: Pointer to PCI device
8426  *
8427  * This callback is called when the error recovery driver tells us that
8428  * its OK to resume normal operation.
8429  */
8430 static void ixgbe_io_resume(struct pci_dev *pdev)
8431 {
8432 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8433 	struct net_device *netdev = adapter->netdev;
8434 
8435 #ifdef CONFIG_PCI_IOV
8436 	if (adapter->vferr_refcount) {
8437 		e_info(drv, "Resuming after VF err\n");
8438 		adapter->vferr_refcount--;
8439 		return;
8440 	}
8441 
8442 #endif
8443 	if (netif_running(netdev))
8444 		ixgbe_up(adapter);
8445 
8446 	netif_device_attach(netdev);
8447 }
8448 
8449 static const struct pci_error_handlers ixgbe_err_handler = {
8450 	.error_detected = ixgbe_io_error_detected,
8451 	.slot_reset = ixgbe_io_slot_reset,
8452 	.resume = ixgbe_io_resume,
8453 };
8454 
8455 static struct pci_driver ixgbe_driver = {
8456 	.name     = ixgbe_driver_name,
8457 	.id_table = ixgbe_pci_tbl,
8458 	.probe    = ixgbe_probe,
8459 	.remove   = ixgbe_remove,
8460 #ifdef CONFIG_PM
8461 	.suspend  = ixgbe_suspend,
8462 	.resume   = ixgbe_resume,
8463 #endif
8464 	.shutdown = ixgbe_shutdown,
8465 	.sriov_configure = ixgbe_pci_sriov_configure,
8466 	.err_handler = &ixgbe_err_handler
8467 };
8468 
8469 /**
8470  * ixgbe_init_module - Driver Registration Routine
8471  *
8472  * ixgbe_init_module is the first routine called when the driver is
8473  * loaded. All it does is register with the PCI subsystem.
8474  **/
8475 static int __init ixgbe_init_module(void)
8476 {
8477 	int ret;
8478 	pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
8479 	pr_info("%s\n", ixgbe_copyright);
8480 
8481 	ixgbe_dbg_init();
8482 
8483 	ret = pci_register_driver(&ixgbe_driver);
8484 	if (ret) {
8485 		ixgbe_dbg_exit();
8486 		return ret;
8487 	}
8488 
8489 #ifdef CONFIG_IXGBE_DCA
8490 	dca_register_notify(&dca_notifier);
8491 #endif
8492 
8493 	return 0;
8494 }
8495 
8496 module_init(ixgbe_init_module);
8497 
8498 /**
8499  * ixgbe_exit_module - Driver Exit Cleanup Routine
8500  *
8501  * ixgbe_exit_module is called just before the driver is removed
8502  * from memory.
8503  **/
8504 static void __exit ixgbe_exit_module(void)
8505 {
8506 #ifdef CONFIG_IXGBE_DCA
8507 	dca_unregister_notify(&dca_notifier);
8508 #endif
8509 	pci_unregister_driver(&ixgbe_driver);
8510 
8511 	ixgbe_dbg_exit();
8512 
8513 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
8514 }
8515 
8516 #ifdef CONFIG_IXGBE_DCA
8517 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
8518 			    void *p)
8519 {
8520 	int ret_val;
8521 
8522 	ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
8523 					 __ixgbe_notify_dca);
8524 
8525 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
8526 }
8527 
8528 #endif /* CONFIG_IXGBE_DCA */
8529 
8530 module_exit(ixgbe_exit_module);
8531 
8532 /* ixgbe_main.c */
8533