1 /*******************************************************************************
2 
3   Intel(R) 82576 Virtual Function Linux driver
4   Copyright(c) 2009 - 2012 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, see <http://www.gnu.org/licenses/>.
17 
18   The full GNU General Public License is included in this distribution in
19   the file called "COPYING".
20 
21   Contact Information:
22   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 
25 *******************************************************************************/
26 
27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/init.h>
32 #include <linux/pci.h>
33 #include <linux/vmalloc.h>
34 #include <linux/pagemap.h>
35 #include <linux/delay.h>
36 #include <linux/netdevice.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <linux/slab.h>
40 #include <net/checksum.h>
41 #include <net/ip6_checksum.h>
42 #include <linux/mii.h>
43 #include <linux/ethtool.h>
44 #include <linux/if_vlan.h>
45 #include <linux/prefetch.h>
46 #include <linux/sctp.h>
47 
48 #include "igbvf.h"
49 
50 #define DRV_VERSION "2.0.2-k"
51 char igbvf_driver_name[] = "igbvf";
52 const char igbvf_driver_version[] = DRV_VERSION;
53 static const char igbvf_driver_string[] =
54 		  "Intel(R) Gigabit Virtual Function Network Driver";
55 static const char igbvf_copyright[] =
56 		  "Copyright (c) 2009 - 2012 Intel Corporation.";
57 
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
59 static int debug = -1;
60 module_param(debug, int, 0);
61 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
62 
63 static int igbvf_poll(struct napi_struct *napi, int budget);
64 static void igbvf_reset(struct igbvf_adapter *);
65 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
66 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
67 
68 static struct igbvf_info igbvf_vf_info = {
69 	.mac		= e1000_vfadapt,
70 	.flags		= 0,
71 	.pba		= 10,
72 	.init_ops	= e1000_init_function_pointers_vf,
73 };
74 
75 static struct igbvf_info igbvf_i350_vf_info = {
76 	.mac		= e1000_vfadapt_i350,
77 	.flags		= 0,
78 	.pba		= 10,
79 	.init_ops	= e1000_init_function_pointers_vf,
80 };
81 
82 static const struct igbvf_info *igbvf_info_tbl[] = {
83 	[board_vf]	= &igbvf_vf_info,
84 	[board_i350_vf]	= &igbvf_i350_vf_info,
85 };
86 
87 /**
88  * igbvf_desc_unused - calculate if we have unused descriptors
89  * @rx_ring: address of receive ring structure
90  **/
91 static int igbvf_desc_unused(struct igbvf_ring *ring)
92 {
93 	if (ring->next_to_clean > ring->next_to_use)
94 		return ring->next_to_clean - ring->next_to_use - 1;
95 
96 	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
97 }
98 
99 /**
100  * igbvf_receive_skb - helper function to handle Rx indications
101  * @adapter: board private structure
102  * @status: descriptor status field as written by hardware
103  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
104  * @skb: pointer to sk_buff to be indicated to stack
105  **/
106 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
107 			      struct net_device *netdev,
108 			      struct sk_buff *skb,
109 			      u32 status, u16 vlan)
110 {
111 	u16 vid;
112 
113 	if (status & E1000_RXD_STAT_VP) {
114 		if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
115 		    (status & E1000_RXDEXT_STATERR_LB))
116 			vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
117 		else
118 			vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
119 		if (test_bit(vid, adapter->active_vlans))
120 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
121 	}
122 
123 	napi_gro_receive(&adapter->rx_ring->napi, skb);
124 }
125 
126 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
127 					 u32 status_err, struct sk_buff *skb)
128 {
129 	skb_checksum_none_assert(skb);
130 
131 	/* Ignore Checksum bit is set or checksum is disabled through ethtool */
132 	if ((status_err & E1000_RXD_STAT_IXSM) ||
133 	    (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
134 		return;
135 
136 	/* TCP/UDP checksum error bit is set */
137 	if (status_err &
138 	    (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
139 		/* let the stack verify checksum errors */
140 		adapter->hw_csum_err++;
141 		return;
142 	}
143 
144 	/* It must be a TCP or UDP packet with a valid checksum */
145 	if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
146 		skb->ip_summed = CHECKSUM_UNNECESSARY;
147 
148 	adapter->hw_csum_good++;
149 }
150 
151 /**
152  * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
153  * @rx_ring: address of ring structure to repopulate
154  * @cleaned_count: number of buffers to repopulate
155  **/
156 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
157 				   int cleaned_count)
158 {
159 	struct igbvf_adapter *adapter = rx_ring->adapter;
160 	struct net_device *netdev = adapter->netdev;
161 	struct pci_dev *pdev = adapter->pdev;
162 	union e1000_adv_rx_desc *rx_desc;
163 	struct igbvf_buffer *buffer_info;
164 	struct sk_buff *skb;
165 	unsigned int i;
166 	int bufsz;
167 
168 	i = rx_ring->next_to_use;
169 	buffer_info = &rx_ring->buffer_info[i];
170 
171 	if (adapter->rx_ps_hdr_size)
172 		bufsz = adapter->rx_ps_hdr_size;
173 	else
174 		bufsz = adapter->rx_buffer_len;
175 
176 	while (cleaned_count--) {
177 		rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
178 
179 		if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
180 			if (!buffer_info->page) {
181 				buffer_info->page = alloc_page(GFP_ATOMIC);
182 				if (!buffer_info->page) {
183 					adapter->alloc_rx_buff_failed++;
184 					goto no_buffers;
185 				}
186 				buffer_info->page_offset = 0;
187 			} else {
188 				buffer_info->page_offset ^= PAGE_SIZE / 2;
189 			}
190 			buffer_info->page_dma =
191 				dma_map_page(&pdev->dev, buffer_info->page,
192 					     buffer_info->page_offset,
193 					     PAGE_SIZE / 2,
194 					     DMA_FROM_DEVICE);
195 			if (dma_mapping_error(&pdev->dev,
196 					      buffer_info->page_dma)) {
197 				__free_page(buffer_info->page);
198 				buffer_info->page = NULL;
199 				dev_err(&pdev->dev, "RX DMA map failed\n");
200 				break;
201 			}
202 		}
203 
204 		if (!buffer_info->skb) {
205 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
206 			if (!skb) {
207 				adapter->alloc_rx_buff_failed++;
208 				goto no_buffers;
209 			}
210 
211 			buffer_info->skb = skb;
212 			buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
213 							  bufsz,
214 							  DMA_FROM_DEVICE);
215 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
216 				dev_kfree_skb(buffer_info->skb);
217 				buffer_info->skb = NULL;
218 				dev_err(&pdev->dev, "RX DMA map failed\n");
219 				goto no_buffers;
220 			}
221 		}
222 		/* Refresh the desc even if buffer_addrs didn't change because
223 		 * each write-back erases this info.
224 		 */
225 		if (adapter->rx_ps_hdr_size) {
226 			rx_desc->read.pkt_addr =
227 			     cpu_to_le64(buffer_info->page_dma);
228 			rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
229 		} else {
230 			rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
231 			rx_desc->read.hdr_addr = 0;
232 		}
233 
234 		i++;
235 		if (i == rx_ring->count)
236 			i = 0;
237 		buffer_info = &rx_ring->buffer_info[i];
238 	}
239 
240 no_buffers:
241 	if (rx_ring->next_to_use != i) {
242 		rx_ring->next_to_use = i;
243 		if (i == 0)
244 			i = (rx_ring->count - 1);
245 		else
246 			i--;
247 
248 		/* Force memory writes to complete before letting h/w
249 		 * know there are new descriptors to fetch.  (Only
250 		 * applicable for weak-ordered memory model archs,
251 		 * such as IA-64).
252 		*/
253 		wmb();
254 		writel(i, adapter->hw.hw_addr + rx_ring->tail);
255 	}
256 }
257 
258 /**
259  * igbvf_clean_rx_irq - Send received data up the network stack; legacy
260  * @adapter: board private structure
261  *
262  * the return value indicates whether actual cleaning was done, there
263  * is no guarantee that everything was cleaned
264  **/
265 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
266 			       int *work_done, int work_to_do)
267 {
268 	struct igbvf_ring *rx_ring = adapter->rx_ring;
269 	struct net_device *netdev = adapter->netdev;
270 	struct pci_dev *pdev = adapter->pdev;
271 	union e1000_adv_rx_desc *rx_desc, *next_rxd;
272 	struct igbvf_buffer *buffer_info, *next_buffer;
273 	struct sk_buff *skb;
274 	bool cleaned = false;
275 	int cleaned_count = 0;
276 	unsigned int total_bytes = 0, total_packets = 0;
277 	unsigned int i;
278 	u32 length, hlen, staterr;
279 
280 	i = rx_ring->next_to_clean;
281 	rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
282 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
283 
284 	while (staterr & E1000_RXD_STAT_DD) {
285 		if (*work_done >= work_to_do)
286 			break;
287 		(*work_done)++;
288 		rmb(); /* read descriptor and rx_buffer_info after status DD */
289 
290 		buffer_info = &rx_ring->buffer_info[i];
291 
292 		/* HW will not DMA in data larger than the given buffer, even
293 		 * if it parses the (NFS, of course) header to be larger.  In
294 		 * that case, it fills the header buffer and spills the rest
295 		 * into the page.
296 		 */
297 		hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
298 		       & E1000_RXDADV_HDRBUFLEN_MASK) >>
299 		       E1000_RXDADV_HDRBUFLEN_SHIFT;
300 		if (hlen > adapter->rx_ps_hdr_size)
301 			hlen = adapter->rx_ps_hdr_size;
302 
303 		length = le16_to_cpu(rx_desc->wb.upper.length);
304 		cleaned = true;
305 		cleaned_count++;
306 
307 		skb = buffer_info->skb;
308 		prefetch(skb->data - NET_IP_ALIGN);
309 		buffer_info->skb = NULL;
310 		if (!adapter->rx_ps_hdr_size) {
311 			dma_unmap_single(&pdev->dev, buffer_info->dma,
312 					 adapter->rx_buffer_len,
313 					 DMA_FROM_DEVICE);
314 			buffer_info->dma = 0;
315 			skb_put(skb, length);
316 			goto send_up;
317 		}
318 
319 		if (!skb_shinfo(skb)->nr_frags) {
320 			dma_unmap_single(&pdev->dev, buffer_info->dma,
321 					 adapter->rx_ps_hdr_size,
322 					 DMA_FROM_DEVICE);
323 			buffer_info->dma = 0;
324 			skb_put(skb, hlen);
325 		}
326 
327 		if (length) {
328 			dma_unmap_page(&pdev->dev, buffer_info->page_dma,
329 				       PAGE_SIZE / 2,
330 				       DMA_FROM_DEVICE);
331 			buffer_info->page_dma = 0;
332 
333 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
334 					   buffer_info->page,
335 					   buffer_info->page_offset,
336 					   length);
337 
338 			if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
339 			    (page_count(buffer_info->page) != 1))
340 				buffer_info->page = NULL;
341 			else
342 				get_page(buffer_info->page);
343 
344 			skb->len += length;
345 			skb->data_len += length;
346 			skb->truesize += PAGE_SIZE / 2;
347 		}
348 send_up:
349 		i++;
350 		if (i == rx_ring->count)
351 			i = 0;
352 		next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
353 		prefetch(next_rxd);
354 		next_buffer = &rx_ring->buffer_info[i];
355 
356 		if (!(staterr & E1000_RXD_STAT_EOP)) {
357 			buffer_info->skb = next_buffer->skb;
358 			buffer_info->dma = next_buffer->dma;
359 			next_buffer->skb = skb;
360 			next_buffer->dma = 0;
361 			goto next_desc;
362 		}
363 
364 		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
365 			dev_kfree_skb_irq(skb);
366 			goto next_desc;
367 		}
368 
369 		total_bytes += skb->len;
370 		total_packets++;
371 
372 		igbvf_rx_checksum_adv(adapter, staterr, skb);
373 
374 		skb->protocol = eth_type_trans(skb, netdev);
375 
376 		igbvf_receive_skb(adapter, netdev, skb, staterr,
377 				  rx_desc->wb.upper.vlan);
378 
379 next_desc:
380 		rx_desc->wb.upper.status_error = 0;
381 
382 		/* return some buffers to hardware, one at a time is too slow */
383 		if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
384 			igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
385 			cleaned_count = 0;
386 		}
387 
388 		/* use prefetched values */
389 		rx_desc = next_rxd;
390 		buffer_info = next_buffer;
391 
392 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
393 	}
394 
395 	rx_ring->next_to_clean = i;
396 	cleaned_count = igbvf_desc_unused(rx_ring);
397 
398 	if (cleaned_count)
399 		igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
400 
401 	adapter->total_rx_packets += total_packets;
402 	adapter->total_rx_bytes += total_bytes;
403 	adapter->net_stats.rx_bytes += total_bytes;
404 	adapter->net_stats.rx_packets += total_packets;
405 	return cleaned;
406 }
407 
408 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
409 			    struct igbvf_buffer *buffer_info)
410 {
411 	if (buffer_info->dma) {
412 		if (buffer_info->mapped_as_page)
413 			dma_unmap_page(&adapter->pdev->dev,
414 				       buffer_info->dma,
415 				       buffer_info->length,
416 				       DMA_TO_DEVICE);
417 		else
418 			dma_unmap_single(&adapter->pdev->dev,
419 					 buffer_info->dma,
420 					 buffer_info->length,
421 					 DMA_TO_DEVICE);
422 		buffer_info->dma = 0;
423 	}
424 	if (buffer_info->skb) {
425 		dev_kfree_skb_any(buffer_info->skb);
426 		buffer_info->skb = NULL;
427 	}
428 	buffer_info->time_stamp = 0;
429 }
430 
431 /**
432  * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
433  * @adapter: board private structure
434  *
435  * Return 0 on success, negative on failure
436  **/
437 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
438 			     struct igbvf_ring *tx_ring)
439 {
440 	struct pci_dev *pdev = adapter->pdev;
441 	int size;
442 
443 	size = sizeof(struct igbvf_buffer) * tx_ring->count;
444 	tx_ring->buffer_info = vzalloc(size);
445 	if (!tx_ring->buffer_info)
446 		goto err;
447 
448 	/* round up to nearest 4K */
449 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
450 	tx_ring->size = ALIGN(tx_ring->size, 4096);
451 
452 	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
453 					   &tx_ring->dma, GFP_KERNEL);
454 	if (!tx_ring->desc)
455 		goto err;
456 
457 	tx_ring->adapter = adapter;
458 	tx_ring->next_to_use = 0;
459 	tx_ring->next_to_clean = 0;
460 
461 	return 0;
462 err:
463 	vfree(tx_ring->buffer_info);
464 	dev_err(&adapter->pdev->dev,
465 		"Unable to allocate memory for the transmit descriptor ring\n");
466 	return -ENOMEM;
467 }
468 
469 /**
470  * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
471  * @adapter: board private structure
472  *
473  * Returns 0 on success, negative on failure
474  **/
475 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
476 			     struct igbvf_ring *rx_ring)
477 {
478 	struct pci_dev *pdev = adapter->pdev;
479 	int size, desc_len;
480 
481 	size = sizeof(struct igbvf_buffer) * rx_ring->count;
482 	rx_ring->buffer_info = vzalloc(size);
483 	if (!rx_ring->buffer_info)
484 		goto err;
485 
486 	desc_len = sizeof(union e1000_adv_rx_desc);
487 
488 	/* Round up to nearest 4K */
489 	rx_ring->size = rx_ring->count * desc_len;
490 	rx_ring->size = ALIGN(rx_ring->size, 4096);
491 
492 	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
493 					   &rx_ring->dma, GFP_KERNEL);
494 	if (!rx_ring->desc)
495 		goto err;
496 
497 	rx_ring->next_to_clean = 0;
498 	rx_ring->next_to_use = 0;
499 
500 	rx_ring->adapter = adapter;
501 
502 	return 0;
503 
504 err:
505 	vfree(rx_ring->buffer_info);
506 	rx_ring->buffer_info = NULL;
507 	dev_err(&adapter->pdev->dev,
508 		"Unable to allocate memory for the receive descriptor ring\n");
509 	return -ENOMEM;
510 }
511 
512 /**
513  * igbvf_clean_tx_ring - Free Tx Buffers
514  * @tx_ring: ring to be cleaned
515  **/
516 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
517 {
518 	struct igbvf_adapter *adapter = tx_ring->adapter;
519 	struct igbvf_buffer *buffer_info;
520 	unsigned long size;
521 	unsigned int i;
522 
523 	if (!tx_ring->buffer_info)
524 		return;
525 
526 	/* Free all the Tx ring sk_buffs */
527 	for (i = 0; i < tx_ring->count; i++) {
528 		buffer_info = &tx_ring->buffer_info[i];
529 		igbvf_put_txbuf(adapter, buffer_info);
530 	}
531 
532 	size = sizeof(struct igbvf_buffer) * tx_ring->count;
533 	memset(tx_ring->buffer_info, 0, size);
534 
535 	/* Zero out the descriptor ring */
536 	memset(tx_ring->desc, 0, tx_ring->size);
537 
538 	tx_ring->next_to_use = 0;
539 	tx_ring->next_to_clean = 0;
540 
541 	writel(0, adapter->hw.hw_addr + tx_ring->head);
542 	writel(0, adapter->hw.hw_addr + tx_ring->tail);
543 }
544 
545 /**
546  * igbvf_free_tx_resources - Free Tx Resources per Queue
547  * @tx_ring: ring to free resources from
548  *
549  * Free all transmit software resources
550  **/
551 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
552 {
553 	struct pci_dev *pdev = tx_ring->adapter->pdev;
554 
555 	igbvf_clean_tx_ring(tx_ring);
556 
557 	vfree(tx_ring->buffer_info);
558 	tx_ring->buffer_info = NULL;
559 
560 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
561 			  tx_ring->dma);
562 
563 	tx_ring->desc = NULL;
564 }
565 
566 /**
567  * igbvf_clean_rx_ring - Free Rx Buffers per Queue
568  * @adapter: board private structure
569  **/
570 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
571 {
572 	struct igbvf_adapter *adapter = rx_ring->adapter;
573 	struct igbvf_buffer *buffer_info;
574 	struct pci_dev *pdev = adapter->pdev;
575 	unsigned long size;
576 	unsigned int i;
577 
578 	if (!rx_ring->buffer_info)
579 		return;
580 
581 	/* Free all the Rx ring sk_buffs */
582 	for (i = 0; i < rx_ring->count; i++) {
583 		buffer_info = &rx_ring->buffer_info[i];
584 		if (buffer_info->dma) {
585 			if (adapter->rx_ps_hdr_size) {
586 				dma_unmap_single(&pdev->dev, buffer_info->dma,
587 						 adapter->rx_ps_hdr_size,
588 						 DMA_FROM_DEVICE);
589 			} else {
590 				dma_unmap_single(&pdev->dev, buffer_info->dma,
591 						 adapter->rx_buffer_len,
592 						 DMA_FROM_DEVICE);
593 			}
594 			buffer_info->dma = 0;
595 		}
596 
597 		if (buffer_info->skb) {
598 			dev_kfree_skb(buffer_info->skb);
599 			buffer_info->skb = NULL;
600 		}
601 
602 		if (buffer_info->page) {
603 			if (buffer_info->page_dma)
604 				dma_unmap_page(&pdev->dev,
605 					       buffer_info->page_dma,
606 					       PAGE_SIZE / 2,
607 					       DMA_FROM_DEVICE);
608 			put_page(buffer_info->page);
609 			buffer_info->page = NULL;
610 			buffer_info->page_dma = 0;
611 			buffer_info->page_offset = 0;
612 		}
613 	}
614 
615 	size = sizeof(struct igbvf_buffer) * rx_ring->count;
616 	memset(rx_ring->buffer_info, 0, size);
617 
618 	/* Zero out the descriptor ring */
619 	memset(rx_ring->desc, 0, rx_ring->size);
620 
621 	rx_ring->next_to_clean = 0;
622 	rx_ring->next_to_use = 0;
623 
624 	writel(0, adapter->hw.hw_addr + rx_ring->head);
625 	writel(0, adapter->hw.hw_addr + rx_ring->tail);
626 }
627 
628 /**
629  * igbvf_free_rx_resources - Free Rx Resources
630  * @rx_ring: ring to clean the resources from
631  *
632  * Free all receive software resources
633  **/
634 
635 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
636 {
637 	struct pci_dev *pdev = rx_ring->adapter->pdev;
638 
639 	igbvf_clean_rx_ring(rx_ring);
640 
641 	vfree(rx_ring->buffer_info);
642 	rx_ring->buffer_info = NULL;
643 
644 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
645 			  rx_ring->dma);
646 	rx_ring->desc = NULL;
647 }
648 
649 /**
650  * igbvf_update_itr - update the dynamic ITR value based on statistics
651  * @adapter: pointer to adapter
652  * @itr_setting: current adapter->itr
653  * @packets: the number of packets during this measurement interval
654  * @bytes: the number of bytes during this measurement interval
655  *
656  * Stores a new ITR value based on packets and byte counts during the last
657  * interrupt.  The advantage of per interrupt computation is faster updates
658  * and more accurate ITR for the current traffic pattern.  Constants in this
659  * function were computed based on theoretical maximum wire speed and thresholds
660  * were set based on testing data as well as attempting to minimize response
661  * time while increasing bulk throughput.
662  **/
663 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
664 					   enum latency_range itr_setting,
665 					   int packets, int bytes)
666 {
667 	enum latency_range retval = itr_setting;
668 
669 	if (packets == 0)
670 		goto update_itr_done;
671 
672 	switch (itr_setting) {
673 	case lowest_latency:
674 		/* handle TSO and jumbo frames */
675 		if (bytes/packets > 8000)
676 			retval = bulk_latency;
677 		else if ((packets < 5) && (bytes > 512))
678 			retval = low_latency;
679 		break;
680 	case low_latency:  /* 50 usec aka 20000 ints/s */
681 		if (bytes > 10000) {
682 			/* this if handles the TSO accounting */
683 			if (bytes/packets > 8000)
684 				retval = bulk_latency;
685 			else if ((packets < 10) || ((bytes/packets) > 1200))
686 				retval = bulk_latency;
687 			else if ((packets > 35))
688 				retval = lowest_latency;
689 		} else if (bytes/packets > 2000) {
690 			retval = bulk_latency;
691 		} else if (packets <= 2 && bytes < 512) {
692 			retval = lowest_latency;
693 		}
694 		break;
695 	case bulk_latency: /* 250 usec aka 4000 ints/s */
696 		if (bytes > 25000) {
697 			if (packets > 35)
698 				retval = low_latency;
699 		} else if (bytes < 6000) {
700 			retval = low_latency;
701 		}
702 		break;
703 	default:
704 		break;
705 	}
706 
707 update_itr_done:
708 	return retval;
709 }
710 
711 static int igbvf_range_to_itr(enum latency_range current_range)
712 {
713 	int new_itr;
714 
715 	switch (current_range) {
716 	/* counts and packets in update_itr are dependent on these numbers */
717 	case lowest_latency:
718 		new_itr = IGBVF_70K_ITR;
719 		break;
720 	case low_latency:
721 		new_itr = IGBVF_20K_ITR;
722 		break;
723 	case bulk_latency:
724 		new_itr = IGBVF_4K_ITR;
725 		break;
726 	default:
727 		new_itr = IGBVF_START_ITR;
728 		break;
729 	}
730 	return new_itr;
731 }
732 
733 static void igbvf_set_itr(struct igbvf_adapter *adapter)
734 {
735 	u32 new_itr;
736 
737 	adapter->tx_ring->itr_range =
738 			igbvf_update_itr(adapter,
739 					 adapter->tx_ring->itr_val,
740 					 adapter->total_tx_packets,
741 					 adapter->total_tx_bytes);
742 
743 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
744 	if (adapter->requested_itr == 3 &&
745 	    adapter->tx_ring->itr_range == lowest_latency)
746 		adapter->tx_ring->itr_range = low_latency;
747 
748 	new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
749 
750 	if (new_itr != adapter->tx_ring->itr_val) {
751 		u32 current_itr = adapter->tx_ring->itr_val;
752 		/* this attempts to bias the interrupt rate towards Bulk
753 		 * by adding intermediate steps when interrupt rate is
754 		 * increasing
755 		 */
756 		new_itr = new_itr > current_itr ?
757 			  min(current_itr + (new_itr >> 2), new_itr) :
758 			  new_itr;
759 		adapter->tx_ring->itr_val = new_itr;
760 
761 		adapter->tx_ring->set_itr = 1;
762 	}
763 
764 	adapter->rx_ring->itr_range =
765 			igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
766 					 adapter->total_rx_packets,
767 					 adapter->total_rx_bytes);
768 	if (adapter->requested_itr == 3 &&
769 	    adapter->rx_ring->itr_range == lowest_latency)
770 		adapter->rx_ring->itr_range = low_latency;
771 
772 	new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
773 
774 	if (new_itr != adapter->rx_ring->itr_val) {
775 		u32 current_itr = adapter->rx_ring->itr_val;
776 
777 		new_itr = new_itr > current_itr ?
778 			  min(current_itr + (new_itr >> 2), new_itr) :
779 			  new_itr;
780 		adapter->rx_ring->itr_val = new_itr;
781 
782 		adapter->rx_ring->set_itr = 1;
783 	}
784 }
785 
786 /**
787  * igbvf_clean_tx_irq - Reclaim resources after transmit completes
788  * @adapter: board private structure
789  *
790  * returns true if ring is completely cleaned
791  **/
792 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
793 {
794 	struct igbvf_adapter *adapter = tx_ring->adapter;
795 	struct net_device *netdev = adapter->netdev;
796 	struct igbvf_buffer *buffer_info;
797 	struct sk_buff *skb;
798 	union e1000_adv_tx_desc *tx_desc, *eop_desc;
799 	unsigned int total_bytes = 0, total_packets = 0;
800 	unsigned int i, count = 0;
801 	bool cleaned = false;
802 
803 	i = tx_ring->next_to_clean;
804 	buffer_info = &tx_ring->buffer_info[i];
805 	eop_desc = buffer_info->next_to_watch;
806 
807 	do {
808 		/* if next_to_watch is not set then there is no work pending */
809 		if (!eop_desc)
810 			break;
811 
812 		/* prevent any other reads prior to eop_desc */
813 		read_barrier_depends();
814 
815 		/* if DD is not set pending work has not been completed */
816 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
817 			break;
818 
819 		/* clear next_to_watch to prevent false hangs */
820 		buffer_info->next_to_watch = NULL;
821 
822 		for (cleaned = false; !cleaned; count++) {
823 			tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
824 			cleaned = (tx_desc == eop_desc);
825 			skb = buffer_info->skb;
826 
827 			if (skb) {
828 				unsigned int segs, bytecount;
829 
830 				/* gso_segs is currently only valid for tcp */
831 				segs = skb_shinfo(skb)->gso_segs ?: 1;
832 				/* multiply data chunks by size of headers */
833 				bytecount = ((segs - 1) * skb_headlen(skb)) +
834 					    skb->len;
835 				total_packets += segs;
836 				total_bytes += bytecount;
837 			}
838 
839 			igbvf_put_txbuf(adapter, buffer_info);
840 			tx_desc->wb.status = 0;
841 
842 			i++;
843 			if (i == tx_ring->count)
844 				i = 0;
845 
846 			buffer_info = &tx_ring->buffer_info[i];
847 		}
848 
849 		eop_desc = buffer_info->next_to_watch;
850 	} while (count < tx_ring->count);
851 
852 	tx_ring->next_to_clean = i;
853 
854 	if (unlikely(count && netif_carrier_ok(netdev) &&
855 	    igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
856 		/* Make sure that anybody stopping the queue after this
857 		 * sees the new next_to_clean.
858 		 */
859 		smp_mb();
860 		if (netif_queue_stopped(netdev) &&
861 		    !(test_bit(__IGBVF_DOWN, &adapter->state))) {
862 			netif_wake_queue(netdev);
863 			++adapter->restart_queue;
864 		}
865 	}
866 
867 	adapter->net_stats.tx_bytes += total_bytes;
868 	adapter->net_stats.tx_packets += total_packets;
869 	return count < tx_ring->count;
870 }
871 
872 static irqreturn_t igbvf_msix_other(int irq, void *data)
873 {
874 	struct net_device *netdev = data;
875 	struct igbvf_adapter *adapter = netdev_priv(netdev);
876 	struct e1000_hw *hw = &adapter->hw;
877 
878 	adapter->int_counter1++;
879 
880 	hw->mac.get_link_status = 1;
881 	if (!test_bit(__IGBVF_DOWN, &adapter->state))
882 		mod_timer(&adapter->watchdog_timer, jiffies + 1);
883 
884 	ew32(EIMS, adapter->eims_other);
885 
886 	return IRQ_HANDLED;
887 }
888 
889 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
890 {
891 	struct net_device *netdev = data;
892 	struct igbvf_adapter *adapter = netdev_priv(netdev);
893 	struct e1000_hw *hw = &adapter->hw;
894 	struct igbvf_ring *tx_ring = adapter->tx_ring;
895 
896 	if (tx_ring->set_itr) {
897 		writel(tx_ring->itr_val,
898 		       adapter->hw.hw_addr + tx_ring->itr_register);
899 		adapter->tx_ring->set_itr = 0;
900 	}
901 
902 	adapter->total_tx_bytes = 0;
903 	adapter->total_tx_packets = 0;
904 
905 	/* auto mask will automatically re-enable the interrupt when we write
906 	 * EICS
907 	 */
908 	if (!igbvf_clean_tx_irq(tx_ring))
909 		/* Ring was not completely cleaned, so fire another interrupt */
910 		ew32(EICS, tx_ring->eims_value);
911 	else
912 		ew32(EIMS, tx_ring->eims_value);
913 
914 	return IRQ_HANDLED;
915 }
916 
917 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
918 {
919 	struct net_device *netdev = data;
920 	struct igbvf_adapter *adapter = netdev_priv(netdev);
921 
922 	adapter->int_counter0++;
923 
924 	/* Write the ITR value calculated at the end of the
925 	 * previous interrupt.
926 	 */
927 	if (adapter->rx_ring->set_itr) {
928 		writel(adapter->rx_ring->itr_val,
929 		       adapter->hw.hw_addr + adapter->rx_ring->itr_register);
930 		adapter->rx_ring->set_itr = 0;
931 	}
932 
933 	if (napi_schedule_prep(&adapter->rx_ring->napi)) {
934 		adapter->total_rx_bytes = 0;
935 		adapter->total_rx_packets = 0;
936 		__napi_schedule(&adapter->rx_ring->napi);
937 	}
938 
939 	return IRQ_HANDLED;
940 }
941 
942 #define IGBVF_NO_QUEUE -1
943 
944 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
945 				int tx_queue, int msix_vector)
946 {
947 	struct e1000_hw *hw = &adapter->hw;
948 	u32 ivar, index;
949 
950 	/* 82576 uses a table-based method for assigning vectors.
951 	 * Each queue has a single entry in the table to which we write
952 	 * a vector number along with a "valid" bit.  Sadly, the layout
953 	 * of the table is somewhat counterintuitive.
954 	 */
955 	if (rx_queue > IGBVF_NO_QUEUE) {
956 		index = (rx_queue >> 1);
957 		ivar = array_er32(IVAR0, index);
958 		if (rx_queue & 0x1) {
959 			/* vector goes into third byte of register */
960 			ivar = ivar & 0xFF00FFFF;
961 			ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
962 		} else {
963 			/* vector goes into low byte of register */
964 			ivar = ivar & 0xFFFFFF00;
965 			ivar |= msix_vector | E1000_IVAR_VALID;
966 		}
967 		adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
968 		array_ew32(IVAR0, index, ivar);
969 	}
970 	if (tx_queue > IGBVF_NO_QUEUE) {
971 		index = (tx_queue >> 1);
972 		ivar = array_er32(IVAR0, index);
973 		if (tx_queue & 0x1) {
974 			/* vector goes into high byte of register */
975 			ivar = ivar & 0x00FFFFFF;
976 			ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
977 		} else {
978 			/* vector goes into second byte of register */
979 			ivar = ivar & 0xFFFF00FF;
980 			ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
981 		}
982 		adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
983 		array_ew32(IVAR0, index, ivar);
984 	}
985 }
986 
987 /**
988  * igbvf_configure_msix - Configure MSI-X hardware
989  * @adapter: board private structure
990  *
991  * igbvf_configure_msix sets up the hardware to properly
992  * generate MSI-X interrupts.
993  **/
994 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
995 {
996 	u32 tmp;
997 	struct e1000_hw *hw = &adapter->hw;
998 	struct igbvf_ring *tx_ring = adapter->tx_ring;
999 	struct igbvf_ring *rx_ring = adapter->rx_ring;
1000 	int vector = 0;
1001 
1002 	adapter->eims_enable_mask = 0;
1003 
1004 	igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
1005 	adapter->eims_enable_mask |= tx_ring->eims_value;
1006 	writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
1007 	igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
1008 	adapter->eims_enable_mask |= rx_ring->eims_value;
1009 	writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
1010 
1011 	/* set vector for other causes, i.e. link changes */
1012 
1013 	tmp = (vector++ | E1000_IVAR_VALID);
1014 
1015 	ew32(IVAR_MISC, tmp);
1016 
1017 	adapter->eims_enable_mask = GENMASK(vector - 1, 0);
1018 	adapter->eims_other = BIT(vector - 1);
1019 	e1e_flush();
1020 }
1021 
1022 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
1023 {
1024 	if (adapter->msix_entries) {
1025 		pci_disable_msix(adapter->pdev);
1026 		kfree(adapter->msix_entries);
1027 		adapter->msix_entries = NULL;
1028 	}
1029 }
1030 
1031 /**
1032  * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1033  * @adapter: board private structure
1034  *
1035  * Attempt to configure interrupts using the best available
1036  * capabilities of the hardware and kernel.
1037  **/
1038 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1039 {
1040 	int err = -ENOMEM;
1041 	int i;
1042 
1043 	/* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
1044 	adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1045 					GFP_KERNEL);
1046 	if (adapter->msix_entries) {
1047 		for (i = 0; i < 3; i++)
1048 			adapter->msix_entries[i].entry = i;
1049 
1050 		err = pci_enable_msix_range(adapter->pdev,
1051 					    adapter->msix_entries, 3, 3);
1052 	}
1053 
1054 	if (err < 0) {
1055 		/* MSI-X failed */
1056 		dev_err(&adapter->pdev->dev,
1057 			"Failed to initialize MSI-X interrupts.\n");
1058 		igbvf_reset_interrupt_capability(adapter);
1059 	}
1060 }
1061 
1062 /**
1063  * igbvf_request_msix - Initialize MSI-X interrupts
1064  * @adapter: board private structure
1065  *
1066  * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1067  * kernel.
1068  **/
1069 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1070 {
1071 	struct net_device *netdev = adapter->netdev;
1072 	int err = 0, vector = 0;
1073 
1074 	if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1075 		sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1076 		sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1077 	} else {
1078 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1079 		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1080 	}
1081 
1082 	err = request_irq(adapter->msix_entries[vector].vector,
1083 			  igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1084 			  netdev);
1085 	if (err)
1086 		goto out;
1087 
1088 	adapter->tx_ring->itr_register = E1000_EITR(vector);
1089 	adapter->tx_ring->itr_val = adapter->current_itr;
1090 	vector++;
1091 
1092 	err = request_irq(adapter->msix_entries[vector].vector,
1093 			  igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1094 			  netdev);
1095 	if (err)
1096 		goto out;
1097 
1098 	adapter->rx_ring->itr_register = E1000_EITR(vector);
1099 	adapter->rx_ring->itr_val = adapter->current_itr;
1100 	vector++;
1101 
1102 	err = request_irq(adapter->msix_entries[vector].vector,
1103 			  igbvf_msix_other, 0, netdev->name, netdev);
1104 	if (err)
1105 		goto out;
1106 
1107 	igbvf_configure_msix(adapter);
1108 	return 0;
1109 out:
1110 	return err;
1111 }
1112 
1113 /**
1114  * igbvf_alloc_queues - Allocate memory for all rings
1115  * @adapter: board private structure to initialize
1116  **/
1117 static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
1118 {
1119 	struct net_device *netdev = adapter->netdev;
1120 
1121 	adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1122 	if (!adapter->tx_ring)
1123 		return -ENOMEM;
1124 
1125 	adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1126 	if (!adapter->rx_ring) {
1127 		kfree(adapter->tx_ring);
1128 		return -ENOMEM;
1129 	}
1130 
1131 	netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1132 
1133 	return 0;
1134 }
1135 
1136 /**
1137  * igbvf_request_irq - initialize interrupts
1138  * @adapter: board private structure
1139  *
1140  * Attempts to configure interrupts using the best available
1141  * capabilities of the hardware and kernel.
1142  **/
1143 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1144 {
1145 	int err = -1;
1146 
1147 	/* igbvf supports msi-x only */
1148 	if (adapter->msix_entries)
1149 		err = igbvf_request_msix(adapter);
1150 
1151 	if (!err)
1152 		return err;
1153 
1154 	dev_err(&adapter->pdev->dev,
1155 		"Unable to allocate interrupt, Error: %d\n", err);
1156 
1157 	return err;
1158 }
1159 
1160 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1161 {
1162 	struct net_device *netdev = adapter->netdev;
1163 	int vector;
1164 
1165 	if (adapter->msix_entries) {
1166 		for (vector = 0; vector < 3; vector++)
1167 			free_irq(adapter->msix_entries[vector].vector, netdev);
1168 	}
1169 }
1170 
1171 /**
1172  * igbvf_irq_disable - Mask off interrupt generation on the NIC
1173  * @adapter: board private structure
1174  **/
1175 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1176 {
1177 	struct e1000_hw *hw = &adapter->hw;
1178 
1179 	ew32(EIMC, ~0);
1180 
1181 	if (adapter->msix_entries)
1182 		ew32(EIAC, 0);
1183 }
1184 
1185 /**
1186  * igbvf_irq_enable - Enable default interrupt generation settings
1187  * @adapter: board private structure
1188  **/
1189 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1190 {
1191 	struct e1000_hw *hw = &adapter->hw;
1192 
1193 	ew32(EIAC, adapter->eims_enable_mask);
1194 	ew32(EIAM, adapter->eims_enable_mask);
1195 	ew32(EIMS, adapter->eims_enable_mask);
1196 }
1197 
1198 /**
1199  * igbvf_poll - NAPI Rx polling callback
1200  * @napi: struct associated with this polling callback
1201  * @budget: amount of packets driver is allowed to process this poll
1202  **/
1203 static int igbvf_poll(struct napi_struct *napi, int budget)
1204 {
1205 	struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1206 	struct igbvf_adapter *adapter = rx_ring->adapter;
1207 	struct e1000_hw *hw = &adapter->hw;
1208 	int work_done = 0;
1209 
1210 	igbvf_clean_rx_irq(adapter, &work_done, budget);
1211 
1212 	/* If not enough Rx work done, exit the polling mode */
1213 	if (work_done < budget) {
1214 		napi_complete_done(napi, work_done);
1215 
1216 		if (adapter->requested_itr & 3)
1217 			igbvf_set_itr(adapter);
1218 
1219 		if (!test_bit(__IGBVF_DOWN, &adapter->state))
1220 			ew32(EIMS, adapter->rx_ring->eims_value);
1221 	}
1222 
1223 	return work_done;
1224 }
1225 
1226 /**
1227  * igbvf_set_rlpml - set receive large packet maximum length
1228  * @adapter: board private structure
1229  *
1230  * Configure the maximum size of packets that will be received
1231  */
1232 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1233 {
1234 	int max_frame_size;
1235 	struct e1000_hw *hw = &adapter->hw;
1236 
1237 	max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1238 	e1000_rlpml_set_vf(hw, max_frame_size);
1239 }
1240 
1241 static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
1242 				 __be16 proto, u16 vid)
1243 {
1244 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1245 	struct e1000_hw *hw = &adapter->hw;
1246 
1247 	if (hw->mac.ops.set_vfta(hw, vid, true)) {
1248 		dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1249 		return -EINVAL;
1250 	}
1251 	set_bit(vid, adapter->active_vlans);
1252 	return 0;
1253 }
1254 
1255 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
1256 				  __be16 proto, u16 vid)
1257 {
1258 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1259 	struct e1000_hw *hw = &adapter->hw;
1260 
1261 	if (hw->mac.ops.set_vfta(hw, vid, false)) {
1262 		dev_err(&adapter->pdev->dev,
1263 			"Failed to remove vlan id %d\n", vid);
1264 		return -EINVAL;
1265 	}
1266 	clear_bit(vid, adapter->active_vlans);
1267 	return 0;
1268 }
1269 
1270 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1271 {
1272 	u16 vid;
1273 
1274 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1275 		igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
1276 }
1277 
1278 /**
1279  * igbvf_configure_tx - Configure Transmit Unit after Reset
1280  * @adapter: board private structure
1281  *
1282  * Configure the Tx unit of the MAC after a reset.
1283  **/
1284 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1285 {
1286 	struct e1000_hw *hw = &adapter->hw;
1287 	struct igbvf_ring *tx_ring = adapter->tx_ring;
1288 	u64 tdba;
1289 	u32 txdctl, dca_txctrl;
1290 
1291 	/* disable transmits */
1292 	txdctl = er32(TXDCTL(0));
1293 	ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1294 	e1e_flush();
1295 	msleep(10);
1296 
1297 	/* Setup the HW Tx Head and Tail descriptor pointers */
1298 	ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1299 	tdba = tx_ring->dma;
1300 	ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1301 	ew32(TDBAH(0), (tdba >> 32));
1302 	ew32(TDH(0), 0);
1303 	ew32(TDT(0), 0);
1304 	tx_ring->head = E1000_TDH(0);
1305 	tx_ring->tail = E1000_TDT(0);
1306 
1307 	/* Turn off Relaxed Ordering on head write-backs.  The writebacks
1308 	 * MUST be delivered in order or it will completely screw up
1309 	 * our bookkeeping.
1310 	 */
1311 	dca_txctrl = er32(DCA_TXCTRL(0));
1312 	dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1313 	ew32(DCA_TXCTRL(0), dca_txctrl);
1314 
1315 	/* enable transmits */
1316 	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1317 	ew32(TXDCTL(0), txdctl);
1318 
1319 	/* Setup Transmit Descriptor Settings for eop descriptor */
1320 	adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1321 
1322 	/* enable Report Status bit */
1323 	adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1324 }
1325 
1326 /**
1327  * igbvf_setup_srrctl - configure the receive control registers
1328  * @adapter: Board private structure
1329  **/
1330 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1331 {
1332 	struct e1000_hw *hw = &adapter->hw;
1333 	u32 srrctl = 0;
1334 
1335 	srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1336 		    E1000_SRRCTL_BSIZEHDR_MASK |
1337 		    E1000_SRRCTL_BSIZEPKT_MASK);
1338 
1339 	/* Enable queue drop to avoid head of line blocking */
1340 	srrctl |= E1000_SRRCTL_DROP_EN;
1341 
1342 	/* Setup buffer sizes */
1343 	srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1344 		  E1000_SRRCTL_BSIZEPKT_SHIFT;
1345 
1346 	if (adapter->rx_buffer_len < 2048) {
1347 		adapter->rx_ps_hdr_size = 0;
1348 		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1349 	} else {
1350 		adapter->rx_ps_hdr_size = 128;
1351 		srrctl |= adapter->rx_ps_hdr_size <<
1352 			  E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1353 		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1354 	}
1355 
1356 	ew32(SRRCTL(0), srrctl);
1357 }
1358 
1359 /**
1360  * igbvf_configure_rx - Configure Receive Unit after Reset
1361  * @adapter: board private structure
1362  *
1363  * Configure the Rx unit of the MAC after a reset.
1364  **/
1365 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1366 {
1367 	struct e1000_hw *hw = &adapter->hw;
1368 	struct igbvf_ring *rx_ring = adapter->rx_ring;
1369 	u64 rdba;
1370 	u32 rxdctl;
1371 
1372 	/* disable receives */
1373 	rxdctl = er32(RXDCTL(0));
1374 	ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1375 	e1e_flush();
1376 	msleep(10);
1377 
1378 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1379 	 * the Base and Length of the Rx Descriptor Ring
1380 	 */
1381 	rdba = rx_ring->dma;
1382 	ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1383 	ew32(RDBAH(0), (rdba >> 32));
1384 	ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1385 	rx_ring->head = E1000_RDH(0);
1386 	rx_ring->tail = E1000_RDT(0);
1387 	ew32(RDH(0), 0);
1388 	ew32(RDT(0), 0);
1389 
1390 	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1391 	rxdctl &= 0xFFF00000;
1392 	rxdctl |= IGBVF_RX_PTHRESH;
1393 	rxdctl |= IGBVF_RX_HTHRESH << 8;
1394 	rxdctl |= IGBVF_RX_WTHRESH << 16;
1395 
1396 	igbvf_set_rlpml(adapter);
1397 
1398 	/* enable receives */
1399 	ew32(RXDCTL(0), rxdctl);
1400 }
1401 
1402 /**
1403  * igbvf_set_multi - Multicast and Promiscuous mode set
1404  * @netdev: network interface device structure
1405  *
1406  * The set_multi entry point is called whenever the multicast address
1407  * list or the network interface flags are updated.  This routine is
1408  * responsible for configuring the hardware for proper multicast,
1409  * promiscuous mode, and all-multi behavior.
1410  **/
1411 static void igbvf_set_multi(struct net_device *netdev)
1412 {
1413 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1414 	struct e1000_hw *hw = &adapter->hw;
1415 	struct netdev_hw_addr *ha;
1416 	u8  *mta_list = NULL;
1417 	int i;
1418 
1419 	if (!netdev_mc_empty(netdev)) {
1420 		mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
1421 					 GFP_ATOMIC);
1422 		if (!mta_list)
1423 			return;
1424 	}
1425 
1426 	/* prepare a packed array of only addresses. */
1427 	i = 0;
1428 	netdev_for_each_mc_addr(ha, netdev)
1429 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1430 
1431 	hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1432 	kfree(mta_list);
1433 }
1434 
1435 /**
1436  * igbvf_configure - configure the hardware for Rx and Tx
1437  * @adapter: private board structure
1438  **/
1439 static void igbvf_configure(struct igbvf_adapter *adapter)
1440 {
1441 	igbvf_set_multi(adapter->netdev);
1442 
1443 	igbvf_restore_vlan(adapter);
1444 
1445 	igbvf_configure_tx(adapter);
1446 	igbvf_setup_srrctl(adapter);
1447 	igbvf_configure_rx(adapter);
1448 	igbvf_alloc_rx_buffers(adapter->rx_ring,
1449 			       igbvf_desc_unused(adapter->rx_ring));
1450 }
1451 
1452 /* igbvf_reset - bring the hardware into a known good state
1453  * @adapter: private board structure
1454  *
1455  * This function boots the hardware and enables some settings that
1456  * require a configuration cycle of the hardware - those cannot be
1457  * set/changed during runtime. After reset the device needs to be
1458  * properly configured for Rx, Tx etc.
1459  */
1460 static void igbvf_reset(struct igbvf_adapter *adapter)
1461 {
1462 	struct e1000_mac_info *mac = &adapter->hw.mac;
1463 	struct net_device *netdev = adapter->netdev;
1464 	struct e1000_hw *hw = &adapter->hw;
1465 
1466 	/* Allow time for pending master requests to run */
1467 	if (mac->ops.reset_hw(hw))
1468 		dev_err(&adapter->pdev->dev, "PF still resetting\n");
1469 
1470 	mac->ops.init_hw(hw);
1471 
1472 	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1473 		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1474 		       netdev->addr_len);
1475 		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1476 		       netdev->addr_len);
1477 	}
1478 
1479 	adapter->last_reset = jiffies;
1480 }
1481 
1482 int igbvf_up(struct igbvf_adapter *adapter)
1483 {
1484 	struct e1000_hw *hw = &adapter->hw;
1485 
1486 	/* hardware has been reset, we need to reload some things */
1487 	igbvf_configure(adapter);
1488 
1489 	clear_bit(__IGBVF_DOWN, &adapter->state);
1490 
1491 	napi_enable(&adapter->rx_ring->napi);
1492 	if (adapter->msix_entries)
1493 		igbvf_configure_msix(adapter);
1494 
1495 	/* Clear any pending interrupts. */
1496 	er32(EICR);
1497 	igbvf_irq_enable(adapter);
1498 
1499 	/* start the watchdog */
1500 	hw->mac.get_link_status = 1;
1501 	mod_timer(&adapter->watchdog_timer, jiffies + 1);
1502 
1503 	return 0;
1504 }
1505 
1506 void igbvf_down(struct igbvf_adapter *adapter)
1507 {
1508 	struct net_device *netdev = adapter->netdev;
1509 	struct e1000_hw *hw = &adapter->hw;
1510 	u32 rxdctl, txdctl;
1511 
1512 	/* signal that we're down so the interrupt handler does not
1513 	 * reschedule our watchdog timer
1514 	 */
1515 	set_bit(__IGBVF_DOWN, &adapter->state);
1516 
1517 	/* disable receives in the hardware */
1518 	rxdctl = er32(RXDCTL(0));
1519 	ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1520 
1521 	netif_carrier_off(netdev);
1522 	netif_stop_queue(netdev);
1523 
1524 	/* disable transmits in the hardware */
1525 	txdctl = er32(TXDCTL(0));
1526 	ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1527 
1528 	/* flush both disables and wait for them to finish */
1529 	e1e_flush();
1530 	msleep(10);
1531 
1532 	napi_disable(&adapter->rx_ring->napi);
1533 
1534 	igbvf_irq_disable(adapter);
1535 
1536 	del_timer_sync(&adapter->watchdog_timer);
1537 
1538 	/* record the stats before reset*/
1539 	igbvf_update_stats(adapter);
1540 
1541 	adapter->link_speed = 0;
1542 	adapter->link_duplex = 0;
1543 
1544 	igbvf_reset(adapter);
1545 	igbvf_clean_tx_ring(adapter->tx_ring);
1546 	igbvf_clean_rx_ring(adapter->rx_ring);
1547 }
1548 
1549 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1550 {
1551 	might_sleep();
1552 	while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1553 		usleep_range(1000, 2000);
1554 	igbvf_down(adapter);
1555 	igbvf_up(adapter);
1556 	clear_bit(__IGBVF_RESETTING, &adapter->state);
1557 }
1558 
1559 /**
1560  * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1561  * @adapter: board private structure to initialize
1562  *
1563  * igbvf_sw_init initializes the Adapter private data structure.
1564  * Fields are initialized based on PCI device information and
1565  * OS network device settings (MTU size).
1566  **/
1567 static int igbvf_sw_init(struct igbvf_adapter *adapter)
1568 {
1569 	struct net_device *netdev = adapter->netdev;
1570 	s32 rc;
1571 
1572 	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1573 	adapter->rx_ps_hdr_size = 0;
1574 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1575 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1576 
1577 	adapter->tx_int_delay = 8;
1578 	adapter->tx_abs_int_delay = 32;
1579 	adapter->rx_int_delay = 0;
1580 	adapter->rx_abs_int_delay = 8;
1581 	adapter->requested_itr = 3;
1582 	adapter->current_itr = IGBVF_START_ITR;
1583 
1584 	/* Set various function pointers */
1585 	adapter->ei->init_ops(&adapter->hw);
1586 
1587 	rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1588 	if (rc)
1589 		return rc;
1590 
1591 	rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1592 	if (rc)
1593 		return rc;
1594 
1595 	igbvf_set_interrupt_capability(adapter);
1596 
1597 	if (igbvf_alloc_queues(adapter))
1598 		return -ENOMEM;
1599 
1600 	spin_lock_init(&adapter->tx_queue_lock);
1601 
1602 	/* Explicitly disable IRQ since the NIC can be in any state. */
1603 	igbvf_irq_disable(adapter);
1604 
1605 	spin_lock_init(&adapter->stats_lock);
1606 
1607 	set_bit(__IGBVF_DOWN, &adapter->state);
1608 	return 0;
1609 }
1610 
1611 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1612 {
1613 	struct e1000_hw *hw = &adapter->hw;
1614 
1615 	adapter->stats.last_gprc = er32(VFGPRC);
1616 	adapter->stats.last_gorc = er32(VFGORC);
1617 	adapter->stats.last_gptc = er32(VFGPTC);
1618 	adapter->stats.last_gotc = er32(VFGOTC);
1619 	adapter->stats.last_mprc = er32(VFMPRC);
1620 	adapter->stats.last_gotlbc = er32(VFGOTLBC);
1621 	adapter->stats.last_gptlbc = er32(VFGPTLBC);
1622 	adapter->stats.last_gorlbc = er32(VFGORLBC);
1623 	adapter->stats.last_gprlbc = er32(VFGPRLBC);
1624 
1625 	adapter->stats.base_gprc = er32(VFGPRC);
1626 	adapter->stats.base_gorc = er32(VFGORC);
1627 	adapter->stats.base_gptc = er32(VFGPTC);
1628 	adapter->stats.base_gotc = er32(VFGOTC);
1629 	adapter->stats.base_mprc = er32(VFMPRC);
1630 	adapter->stats.base_gotlbc = er32(VFGOTLBC);
1631 	adapter->stats.base_gptlbc = er32(VFGPTLBC);
1632 	adapter->stats.base_gorlbc = er32(VFGORLBC);
1633 	adapter->stats.base_gprlbc = er32(VFGPRLBC);
1634 }
1635 
1636 /**
1637  * igbvf_open - Called when a network interface is made active
1638  * @netdev: network interface device structure
1639  *
1640  * Returns 0 on success, negative value on failure
1641  *
1642  * The open entry point is called when a network interface is made
1643  * active by the system (IFF_UP).  At this point all resources needed
1644  * for transmit and receive operations are allocated, the interrupt
1645  * handler is registered with the OS, the watchdog timer is started,
1646  * and the stack is notified that the interface is ready.
1647  **/
1648 static int igbvf_open(struct net_device *netdev)
1649 {
1650 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1651 	struct e1000_hw *hw = &adapter->hw;
1652 	int err;
1653 
1654 	/* disallow open during test */
1655 	if (test_bit(__IGBVF_TESTING, &adapter->state))
1656 		return -EBUSY;
1657 
1658 	/* allocate transmit descriptors */
1659 	err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1660 	if (err)
1661 		goto err_setup_tx;
1662 
1663 	/* allocate receive descriptors */
1664 	err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1665 	if (err)
1666 		goto err_setup_rx;
1667 
1668 	/* before we allocate an interrupt, we must be ready to handle it.
1669 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1670 	 * as soon as we call pci_request_irq, so we have to setup our
1671 	 * clean_rx handler before we do so.
1672 	 */
1673 	igbvf_configure(adapter);
1674 
1675 	err = igbvf_request_irq(adapter);
1676 	if (err)
1677 		goto err_req_irq;
1678 
1679 	/* From here on the code is the same as igbvf_up() */
1680 	clear_bit(__IGBVF_DOWN, &adapter->state);
1681 
1682 	napi_enable(&adapter->rx_ring->napi);
1683 
1684 	/* clear any pending interrupts */
1685 	er32(EICR);
1686 
1687 	igbvf_irq_enable(adapter);
1688 
1689 	/* start the watchdog */
1690 	hw->mac.get_link_status = 1;
1691 	mod_timer(&adapter->watchdog_timer, jiffies + 1);
1692 
1693 	return 0;
1694 
1695 err_req_irq:
1696 	igbvf_free_rx_resources(adapter->rx_ring);
1697 err_setup_rx:
1698 	igbvf_free_tx_resources(adapter->tx_ring);
1699 err_setup_tx:
1700 	igbvf_reset(adapter);
1701 
1702 	return err;
1703 }
1704 
1705 /**
1706  * igbvf_close - Disables a network interface
1707  * @netdev: network interface device structure
1708  *
1709  * Returns 0, this is not allowed to fail
1710  *
1711  * The close entry point is called when an interface is de-activated
1712  * by the OS.  The hardware is still under the drivers control, but
1713  * needs to be disabled.  A global MAC reset is issued to stop the
1714  * hardware, and all transmit and receive resources are freed.
1715  **/
1716 static int igbvf_close(struct net_device *netdev)
1717 {
1718 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1719 
1720 	WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1721 	igbvf_down(adapter);
1722 
1723 	igbvf_free_irq(adapter);
1724 
1725 	igbvf_free_tx_resources(adapter->tx_ring);
1726 	igbvf_free_rx_resources(adapter->rx_ring);
1727 
1728 	return 0;
1729 }
1730 
1731 /**
1732  * igbvf_set_mac - Change the Ethernet Address of the NIC
1733  * @netdev: network interface device structure
1734  * @p: pointer to an address structure
1735  *
1736  * Returns 0 on success, negative on failure
1737  **/
1738 static int igbvf_set_mac(struct net_device *netdev, void *p)
1739 {
1740 	struct igbvf_adapter *adapter = netdev_priv(netdev);
1741 	struct e1000_hw *hw = &adapter->hw;
1742 	struct sockaddr *addr = p;
1743 
1744 	if (!is_valid_ether_addr(addr->sa_data))
1745 		return -EADDRNOTAVAIL;
1746 
1747 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1748 
1749 	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1750 
1751 	if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
1752 		return -EADDRNOTAVAIL;
1753 
1754 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1755 
1756 	return 0;
1757 }
1758 
1759 #define UPDATE_VF_COUNTER(reg, name) \
1760 { \
1761 	u32 current_counter = er32(reg); \
1762 	if (current_counter < adapter->stats.last_##name) \
1763 		adapter->stats.name += 0x100000000LL; \
1764 	adapter->stats.last_##name = current_counter; \
1765 	adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1766 	adapter->stats.name |= current_counter; \
1767 }
1768 
1769 /**
1770  * igbvf_update_stats - Update the board statistics counters
1771  * @adapter: board private structure
1772 **/
1773 void igbvf_update_stats(struct igbvf_adapter *adapter)
1774 {
1775 	struct e1000_hw *hw = &adapter->hw;
1776 	struct pci_dev *pdev = adapter->pdev;
1777 
1778 	/* Prevent stats update while adapter is being reset, link is down
1779 	 * or if the pci connection is down.
1780 	 */
1781 	if (adapter->link_speed == 0)
1782 		return;
1783 
1784 	if (test_bit(__IGBVF_RESETTING, &adapter->state))
1785 		return;
1786 
1787 	if (pci_channel_offline(pdev))
1788 		return;
1789 
1790 	UPDATE_VF_COUNTER(VFGPRC, gprc);
1791 	UPDATE_VF_COUNTER(VFGORC, gorc);
1792 	UPDATE_VF_COUNTER(VFGPTC, gptc);
1793 	UPDATE_VF_COUNTER(VFGOTC, gotc);
1794 	UPDATE_VF_COUNTER(VFMPRC, mprc);
1795 	UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1796 	UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1797 	UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1798 	UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1799 
1800 	/* Fill out the OS statistics structure */
1801 	adapter->net_stats.multicast = adapter->stats.mprc;
1802 }
1803 
1804 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1805 {
1806 	dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1807 		 adapter->link_speed,
1808 		 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1809 }
1810 
1811 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1812 {
1813 	struct e1000_hw *hw = &adapter->hw;
1814 	s32 ret_val = E1000_SUCCESS;
1815 	bool link_active;
1816 
1817 	/* If interface is down, stay link down */
1818 	if (test_bit(__IGBVF_DOWN, &adapter->state))
1819 		return false;
1820 
1821 	ret_val = hw->mac.ops.check_for_link(hw);
1822 	link_active = !hw->mac.get_link_status;
1823 
1824 	/* if check for link returns error we will need to reset */
1825 	if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1826 		schedule_work(&adapter->reset_task);
1827 
1828 	return link_active;
1829 }
1830 
1831 /**
1832  * igbvf_watchdog - Timer Call-back
1833  * @data: pointer to adapter cast into an unsigned long
1834  **/
1835 static void igbvf_watchdog(unsigned long data)
1836 {
1837 	struct igbvf_adapter *adapter = (struct igbvf_adapter *)data;
1838 
1839 	/* Do the rest outside of interrupt context */
1840 	schedule_work(&adapter->watchdog_task);
1841 }
1842 
1843 static void igbvf_watchdog_task(struct work_struct *work)
1844 {
1845 	struct igbvf_adapter *adapter = container_of(work,
1846 						     struct igbvf_adapter,
1847 						     watchdog_task);
1848 	struct net_device *netdev = adapter->netdev;
1849 	struct e1000_mac_info *mac = &adapter->hw.mac;
1850 	struct igbvf_ring *tx_ring = adapter->tx_ring;
1851 	struct e1000_hw *hw = &adapter->hw;
1852 	u32 link;
1853 	int tx_pending = 0;
1854 
1855 	link = igbvf_has_link(adapter);
1856 
1857 	if (link) {
1858 		if (!netif_carrier_ok(netdev)) {
1859 			mac->ops.get_link_up_info(&adapter->hw,
1860 						  &adapter->link_speed,
1861 						  &adapter->link_duplex);
1862 			igbvf_print_link_info(adapter);
1863 
1864 			netif_carrier_on(netdev);
1865 			netif_wake_queue(netdev);
1866 		}
1867 	} else {
1868 		if (netif_carrier_ok(netdev)) {
1869 			adapter->link_speed = 0;
1870 			adapter->link_duplex = 0;
1871 			dev_info(&adapter->pdev->dev, "Link is Down\n");
1872 			netif_carrier_off(netdev);
1873 			netif_stop_queue(netdev);
1874 		}
1875 	}
1876 
1877 	if (netif_carrier_ok(netdev)) {
1878 		igbvf_update_stats(adapter);
1879 	} else {
1880 		tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1881 			      tx_ring->count);
1882 		if (tx_pending) {
1883 			/* We've lost link, so the controller stops DMA,
1884 			 * but we've got queued Tx work that's never going
1885 			 * to get done, so reset controller to flush Tx.
1886 			 * (Do the reset outside of interrupt context).
1887 			 */
1888 			adapter->tx_timeout_count++;
1889 			schedule_work(&adapter->reset_task);
1890 		}
1891 	}
1892 
1893 	/* Cause software interrupt to ensure Rx ring is cleaned */
1894 	ew32(EICS, adapter->rx_ring->eims_value);
1895 
1896 	/* Reset the timer */
1897 	if (!test_bit(__IGBVF_DOWN, &adapter->state))
1898 		mod_timer(&adapter->watchdog_timer,
1899 			  round_jiffies(jiffies + (2 * HZ)));
1900 }
1901 
1902 #define IGBVF_TX_FLAGS_CSUM		0x00000001
1903 #define IGBVF_TX_FLAGS_VLAN		0x00000002
1904 #define IGBVF_TX_FLAGS_TSO		0x00000004
1905 #define IGBVF_TX_FLAGS_IPV4		0x00000008
1906 #define IGBVF_TX_FLAGS_VLAN_MASK	0xffff0000
1907 #define IGBVF_TX_FLAGS_VLAN_SHIFT	16
1908 
1909 static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
1910 			      u32 type_tucmd, u32 mss_l4len_idx)
1911 {
1912 	struct e1000_adv_tx_context_desc *context_desc;
1913 	struct igbvf_buffer *buffer_info;
1914 	u16 i = tx_ring->next_to_use;
1915 
1916 	context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1917 	buffer_info = &tx_ring->buffer_info[i];
1918 
1919 	i++;
1920 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1921 
1922 	/* set bits to identify this as an advanced context descriptor */
1923 	type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1924 
1925 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1926 	context_desc->seqnum_seed	= 0;
1927 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1928 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1929 
1930 	buffer_info->time_stamp = jiffies;
1931 	buffer_info->dma = 0;
1932 }
1933 
1934 static int igbvf_tso(struct igbvf_ring *tx_ring,
1935 		     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1936 {
1937 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1938 	union {
1939 		struct iphdr *v4;
1940 		struct ipv6hdr *v6;
1941 		unsigned char *hdr;
1942 	} ip;
1943 	union {
1944 		struct tcphdr *tcp;
1945 		unsigned char *hdr;
1946 	} l4;
1947 	u32 paylen, l4_offset;
1948 	int err;
1949 
1950 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1951 		return 0;
1952 
1953 	if (!skb_is_gso(skb))
1954 		return 0;
1955 
1956 	err = skb_cow_head(skb, 0);
1957 	if (err < 0)
1958 		return err;
1959 
1960 	ip.hdr = skb_network_header(skb);
1961 	l4.hdr = skb_checksum_start(skb);
1962 
1963 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1964 	type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
1965 
1966 	/* initialize outer IP header fields */
1967 	if (ip.v4->version == 4) {
1968 		/* IP header will have to cancel out any data that
1969 		 * is not a part of the outer IP header
1970 		 */
1971 		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
1972 						  csum_unfold(l4.tcp->check)));
1973 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
1974 
1975 		ip.v4->tot_len = 0;
1976 	} else {
1977 		ip.v6->payload_len = 0;
1978 	}
1979 
1980 	/* determine offset of inner transport header */
1981 	l4_offset = l4.hdr - skb->data;
1982 
1983 	/* compute length of segmentation header */
1984 	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
1985 
1986 	/* remove payload length from inner checksum */
1987 	paylen = skb->len - l4_offset;
1988 	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
1989 
1990 	/* MSS L4LEN IDX */
1991 	mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
1992 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
1993 
1994 	/* VLAN MACLEN IPLEN */
1995 	vlan_macip_lens = l4.hdr - ip.hdr;
1996 	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
1997 	vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
1998 
1999 	igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
2000 
2001 	return 1;
2002 }
2003 
2004 static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
2005 {
2006 	unsigned int offset = 0;
2007 
2008 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
2009 
2010 	return offset == skb_checksum_start_offset(skb);
2011 }
2012 
2013 static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
2014 			  u32 tx_flags, __be16 protocol)
2015 {
2016 	u32 vlan_macip_lens = 0;
2017 	u32 type_tucmd = 0;
2018 
2019 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
2020 csum_failed:
2021 		if (!(tx_flags & IGBVF_TX_FLAGS_VLAN))
2022 			return false;
2023 		goto no_csum;
2024 	}
2025 
2026 	switch (skb->csum_offset) {
2027 	case offsetof(struct tcphdr, check):
2028 		type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
2029 		/* fall through */
2030 	case offsetof(struct udphdr, check):
2031 		break;
2032 	case offsetof(struct sctphdr, checksum):
2033 		/* validate that this is actually an SCTP request */
2034 		if (((protocol == htons(ETH_P_IP)) &&
2035 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
2036 		    ((protocol == htons(ETH_P_IPV6)) &&
2037 		     igbvf_ipv6_csum_is_sctp(skb))) {
2038 			type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
2039 			break;
2040 		}
2041 	default:
2042 		skb_checksum_help(skb);
2043 		goto csum_failed;
2044 	}
2045 
2046 	vlan_macip_lens = skb_checksum_start_offset(skb) -
2047 			  skb_network_offset(skb);
2048 no_csum:
2049 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
2050 	vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
2051 
2052 	igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
2053 	return true;
2054 }
2055 
2056 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2057 {
2058 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2059 
2060 	/* there is enough descriptors then we don't need to worry  */
2061 	if (igbvf_desc_unused(adapter->tx_ring) >= size)
2062 		return 0;
2063 
2064 	netif_stop_queue(netdev);
2065 
2066 	/* Herbert's original patch had:
2067 	 *  smp_mb__after_netif_stop_queue();
2068 	 * but since that doesn't exist yet, just open code it.
2069 	 */
2070 	smp_mb();
2071 
2072 	/* We need to check again just in case room has been made available */
2073 	if (igbvf_desc_unused(adapter->tx_ring) < size)
2074 		return -EBUSY;
2075 
2076 	netif_wake_queue(netdev);
2077 
2078 	++adapter->restart_queue;
2079 	return 0;
2080 }
2081 
2082 #define IGBVF_MAX_TXD_PWR	16
2083 #define IGBVF_MAX_DATA_PER_TXD	(1u << IGBVF_MAX_TXD_PWR)
2084 
2085 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2086 				   struct igbvf_ring *tx_ring,
2087 				   struct sk_buff *skb)
2088 {
2089 	struct igbvf_buffer *buffer_info;
2090 	struct pci_dev *pdev = adapter->pdev;
2091 	unsigned int len = skb_headlen(skb);
2092 	unsigned int count = 0, i;
2093 	unsigned int f;
2094 
2095 	i = tx_ring->next_to_use;
2096 
2097 	buffer_info = &tx_ring->buffer_info[i];
2098 	BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2099 	buffer_info->length = len;
2100 	/* set time_stamp *before* dma to help avoid a possible race */
2101 	buffer_info->time_stamp = jiffies;
2102 	buffer_info->mapped_as_page = false;
2103 	buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2104 					  DMA_TO_DEVICE);
2105 	if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2106 		goto dma_error;
2107 
2108 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2109 		const struct skb_frag_struct *frag;
2110 
2111 		count++;
2112 		i++;
2113 		if (i == tx_ring->count)
2114 			i = 0;
2115 
2116 		frag = &skb_shinfo(skb)->frags[f];
2117 		len = skb_frag_size(frag);
2118 
2119 		buffer_info = &tx_ring->buffer_info[i];
2120 		BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2121 		buffer_info->length = len;
2122 		buffer_info->time_stamp = jiffies;
2123 		buffer_info->mapped_as_page = true;
2124 		buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2125 						    DMA_TO_DEVICE);
2126 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2127 			goto dma_error;
2128 	}
2129 
2130 	tx_ring->buffer_info[i].skb = skb;
2131 
2132 	return ++count;
2133 
2134 dma_error:
2135 	dev_err(&pdev->dev, "TX DMA map failed\n");
2136 
2137 	/* clear timestamp and dma mappings for failed buffer_info mapping */
2138 	buffer_info->dma = 0;
2139 	buffer_info->time_stamp = 0;
2140 	buffer_info->length = 0;
2141 	buffer_info->mapped_as_page = false;
2142 	if (count)
2143 		count--;
2144 
2145 	/* clear timestamp and dma mappings for remaining portion of packet */
2146 	while (count--) {
2147 		if (i == 0)
2148 			i += tx_ring->count;
2149 		i--;
2150 		buffer_info = &tx_ring->buffer_info[i];
2151 		igbvf_put_txbuf(adapter, buffer_info);
2152 	}
2153 
2154 	return 0;
2155 }
2156 
2157 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2158 				      struct igbvf_ring *tx_ring,
2159 				      int tx_flags, int count,
2160 				      unsigned int first, u32 paylen,
2161 				      u8 hdr_len)
2162 {
2163 	union e1000_adv_tx_desc *tx_desc = NULL;
2164 	struct igbvf_buffer *buffer_info;
2165 	u32 olinfo_status = 0, cmd_type_len;
2166 	unsigned int i;
2167 
2168 	cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2169 			E1000_ADVTXD_DCMD_DEXT);
2170 
2171 	if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2172 		cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2173 
2174 	if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2175 		cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2176 
2177 		/* insert tcp checksum */
2178 		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2179 
2180 		/* insert ip checksum */
2181 		if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2182 			olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2183 
2184 	} else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2185 		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2186 	}
2187 
2188 	olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2189 
2190 	i = tx_ring->next_to_use;
2191 	while (count--) {
2192 		buffer_info = &tx_ring->buffer_info[i];
2193 		tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2194 		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2195 		tx_desc->read.cmd_type_len =
2196 			 cpu_to_le32(cmd_type_len | buffer_info->length);
2197 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2198 		i++;
2199 		if (i == tx_ring->count)
2200 			i = 0;
2201 	}
2202 
2203 	tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2204 	/* Force memory writes to complete before letting h/w
2205 	 * know there are new descriptors to fetch.  (Only
2206 	 * applicable for weak-ordered memory model archs,
2207 	 * such as IA-64).
2208 	 */
2209 	wmb();
2210 
2211 	tx_ring->buffer_info[first].next_to_watch = tx_desc;
2212 	tx_ring->next_to_use = i;
2213 	writel(i, adapter->hw.hw_addr + tx_ring->tail);
2214 	/* we need this if more than one processor can write to our tail
2215 	 * at a time, it synchronizes IO on IA64/Altix systems
2216 	 */
2217 	mmiowb();
2218 }
2219 
2220 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2221 					     struct net_device *netdev,
2222 					     struct igbvf_ring *tx_ring)
2223 {
2224 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2225 	unsigned int first, tx_flags = 0;
2226 	u8 hdr_len = 0;
2227 	int count = 0;
2228 	int tso = 0;
2229 	__be16 protocol = vlan_get_protocol(skb);
2230 
2231 	if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2232 		dev_kfree_skb_any(skb);
2233 		return NETDEV_TX_OK;
2234 	}
2235 
2236 	if (skb->len <= 0) {
2237 		dev_kfree_skb_any(skb);
2238 		return NETDEV_TX_OK;
2239 	}
2240 
2241 	/* need: count + 4 desc gap to keep tail from touching
2242 	 *       + 2 desc gap to keep tail from touching head,
2243 	 *       + 1 desc for skb->data,
2244 	 *       + 1 desc for context descriptor,
2245 	 * head, otherwise try next time
2246 	 */
2247 	if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2248 		/* this is a hard error */
2249 		return NETDEV_TX_BUSY;
2250 	}
2251 
2252 	if (skb_vlan_tag_present(skb)) {
2253 		tx_flags |= IGBVF_TX_FLAGS_VLAN;
2254 		tx_flags |= (skb_vlan_tag_get(skb) <<
2255 			     IGBVF_TX_FLAGS_VLAN_SHIFT);
2256 	}
2257 
2258 	if (protocol == htons(ETH_P_IP))
2259 		tx_flags |= IGBVF_TX_FLAGS_IPV4;
2260 
2261 	first = tx_ring->next_to_use;
2262 
2263 	tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
2264 	if (unlikely(tso < 0)) {
2265 		dev_kfree_skb_any(skb);
2266 		return NETDEV_TX_OK;
2267 	}
2268 
2269 	if (tso)
2270 		tx_flags |= IGBVF_TX_FLAGS_TSO;
2271 	else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
2272 		 (skb->ip_summed == CHECKSUM_PARTIAL))
2273 		tx_flags |= IGBVF_TX_FLAGS_CSUM;
2274 
2275 	/* count reflects descriptors mapped, if 0 then mapping error
2276 	 * has occurred and we need to rewind the descriptor queue
2277 	 */
2278 	count = igbvf_tx_map_adv(adapter, tx_ring, skb);
2279 
2280 	if (count) {
2281 		igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2282 				   first, skb->len, hdr_len);
2283 		/* Make sure there is space in the ring for the next send. */
2284 		igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2285 	} else {
2286 		dev_kfree_skb_any(skb);
2287 		tx_ring->buffer_info[first].time_stamp = 0;
2288 		tx_ring->next_to_use = first;
2289 	}
2290 
2291 	return NETDEV_TX_OK;
2292 }
2293 
2294 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2295 				    struct net_device *netdev)
2296 {
2297 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2298 	struct igbvf_ring *tx_ring;
2299 
2300 	if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2301 		dev_kfree_skb_any(skb);
2302 		return NETDEV_TX_OK;
2303 	}
2304 
2305 	tx_ring = &adapter->tx_ring[0];
2306 
2307 	return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2308 }
2309 
2310 /**
2311  * igbvf_tx_timeout - Respond to a Tx Hang
2312  * @netdev: network interface device structure
2313  **/
2314 static void igbvf_tx_timeout(struct net_device *netdev)
2315 {
2316 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2317 
2318 	/* Do the reset outside of interrupt context */
2319 	adapter->tx_timeout_count++;
2320 	schedule_work(&adapter->reset_task);
2321 }
2322 
2323 static void igbvf_reset_task(struct work_struct *work)
2324 {
2325 	struct igbvf_adapter *adapter;
2326 
2327 	adapter = container_of(work, struct igbvf_adapter, reset_task);
2328 
2329 	igbvf_reinit_locked(adapter);
2330 }
2331 
2332 /**
2333  * igbvf_get_stats - Get System Network Statistics
2334  * @netdev: network interface device structure
2335  *
2336  * Returns the address of the device statistics structure.
2337  * The statistics are actually updated from the timer callback.
2338  **/
2339 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2340 {
2341 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2342 
2343 	/* only return the current stats */
2344 	return &adapter->net_stats;
2345 }
2346 
2347 /**
2348  * igbvf_change_mtu - Change the Maximum Transfer Unit
2349  * @netdev: network interface device structure
2350  * @new_mtu: new value for maximum frame size
2351  *
2352  * Returns 0 on success, negative on failure
2353  **/
2354 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2355 {
2356 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2357 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2358 
2359 	if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
2360 	    max_frame > MAX_JUMBO_FRAME_SIZE)
2361 		return -EINVAL;
2362 
2363 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2364 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2365 		dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2366 		return -EINVAL;
2367 	}
2368 
2369 	while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2370 		usleep_range(1000, 2000);
2371 	/* igbvf_down has a dependency on max_frame_size */
2372 	adapter->max_frame_size = max_frame;
2373 	if (netif_running(netdev))
2374 		igbvf_down(adapter);
2375 
2376 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2377 	 * means we reserve 2 more, this pushes us to allocate from the next
2378 	 * larger slab size.
2379 	 * i.e. RXBUFFER_2048 --> size-4096 slab
2380 	 * However with the new *_jumbo_rx* routines, jumbo receives will use
2381 	 * fragmented skbs
2382 	 */
2383 
2384 	if (max_frame <= 1024)
2385 		adapter->rx_buffer_len = 1024;
2386 	else if (max_frame <= 2048)
2387 		adapter->rx_buffer_len = 2048;
2388 	else
2389 #if (PAGE_SIZE / 2) > 16384
2390 		adapter->rx_buffer_len = 16384;
2391 #else
2392 		adapter->rx_buffer_len = PAGE_SIZE / 2;
2393 #endif
2394 
2395 	/* adjust allocation if LPE protects us, and we aren't using SBP */
2396 	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2397 	    (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2398 		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2399 					 ETH_FCS_LEN;
2400 
2401 	dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2402 		 netdev->mtu, new_mtu);
2403 	netdev->mtu = new_mtu;
2404 
2405 	if (netif_running(netdev))
2406 		igbvf_up(adapter);
2407 	else
2408 		igbvf_reset(adapter);
2409 
2410 	clear_bit(__IGBVF_RESETTING, &adapter->state);
2411 
2412 	return 0;
2413 }
2414 
2415 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2416 {
2417 	switch (cmd) {
2418 	default:
2419 		return -EOPNOTSUPP;
2420 	}
2421 }
2422 
2423 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2424 {
2425 	struct net_device *netdev = pci_get_drvdata(pdev);
2426 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2427 #ifdef CONFIG_PM
2428 	int retval = 0;
2429 #endif
2430 
2431 	netif_device_detach(netdev);
2432 
2433 	if (netif_running(netdev)) {
2434 		WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2435 		igbvf_down(adapter);
2436 		igbvf_free_irq(adapter);
2437 	}
2438 
2439 #ifdef CONFIG_PM
2440 	retval = pci_save_state(pdev);
2441 	if (retval)
2442 		return retval;
2443 #endif
2444 
2445 	pci_disable_device(pdev);
2446 
2447 	return 0;
2448 }
2449 
2450 #ifdef CONFIG_PM
2451 static int igbvf_resume(struct pci_dev *pdev)
2452 {
2453 	struct net_device *netdev = pci_get_drvdata(pdev);
2454 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2455 	u32 err;
2456 
2457 	pci_restore_state(pdev);
2458 	err = pci_enable_device_mem(pdev);
2459 	if (err) {
2460 		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2461 		return err;
2462 	}
2463 
2464 	pci_set_master(pdev);
2465 
2466 	if (netif_running(netdev)) {
2467 		err = igbvf_request_irq(adapter);
2468 		if (err)
2469 			return err;
2470 	}
2471 
2472 	igbvf_reset(adapter);
2473 
2474 	if (netif_running(netdev))
2475 		igbvf_up(adapter);
2476 
2477 	netif_device_attach(netdev);
2478 
2479 	return 0;
2480 }
2481 #endif
2482 
2483 static void igbvf_shutdown(struct pci_dev *pdev)
2484 {
2485 	igbvf_suspend(pdev, PMSG_SUSPEND);
2486 }
2487 
2488 #ifdef CONFIG_NET_POLL_CONTROLLER
2489 /* Polling 'interrupt' - used by things like netconsole to send skbs
2490  * without having to re-enable interrupts. It's not called while
2491  * the interrupt routine is executing.
2492  */
2493 static void igbvf_netpoll(struct net_device *netdev)
2494 {
2495 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2496 
2497 	disable_irq(adapter->pdev->irq);
2498 
2499 	igbvf_clean_tx_irq(adapter->tx_ring);
2500 
2501 	enable_irq(adapter->pdev->irq);
2502 }
2503 #endif
2504 
2505 /**
2506  * igbvf_io_error_detected - called when PCI error is detected
2507  * @pdev: Pointer to PCI device
2508  * @state: The current pci connection state
2509  *
2510  * This function is called after a PCI bus error affecting
2511  * this device has been detected.
2512  */
2513 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2514 						pci_channel_state_t state)
2515 {
2516 	struct net_device *netdev = pci_get_drvdata(pdev);
2517 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2518 
2519 	netif_device_detach(netdev);
2520 
2521 	if (state == pci_channel_io_perm_failure)
2522 		return PCI_ERS_RESULT_DISCONNECT;
2523 
2524 	if (netif_running(netdev))
2525 		igbvf_down(adapter);
2526 	pci_disable_device(pdev);
2527 
2528 	/* Request a slot slot reset. */
2529 	return PCI_ERS_RESULT_NEED_RESET;
2530 }
2531 
2532 /**
2533  * igbvf_io_slot_reset - called after the pci bus has been reset.
2534  * @pdev: Pointer to PCI device
2535  *
2536  * Restart the card from scratch, as if from a cold-boot. Implementation
2537  * resembles the first-half of the igbvf_resume routine.
2538  */
2539 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2540 {
2541 	struct net_device *netdev = pci_get_drvdata(pdev);
2542 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2543 
2544 	if (pci_enable_device_mem(pdev)) {
2545 		dev_err(&pdev->dev,
2546 			"Cannot re-enable PCI device after reset.\n");
2547 		return PCI_ERS_RESULT_DISCONNECT;
2548 	}
2549 	pci_set_master(pdev);
2550 
2551 	igbvf_reset(adapter);
2552 
2553 	return PCI_ERS_RESULT_RECOVERED;
2554 }
2555 
2556 /**
2557  * igbvf_io_resume - called when traffic can start flowing again.
2558  * @pdev: Pointer to PCI device
2559  *
2560  * This callback is called when the error recovery driver tells us that
2561  * its OK to resume normal operation. Implementation resembles the
2562  * second-half of the igbvf_resume routine.
2563  */
2564 static void igbvf_io_resume(struct pci_dev *pdev)
2565 {
2566 	struct net_device *netdev = pci_get_drvdata(pdev);
2567 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2568 
2569 	if (netif_running(netdev)) {
2570 		if (igbvf_up(adapter)) {
2571 			dev_err(&pdev->dev,
2572 				"can't bring device back up after reset\n");
2573 			return;
2574 		}
2575 	}
2576 
2577 	netif_device_attach(netdev);
2578 }
2579 
2580 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2581 {
2582 	struct e1000_hw *hw = &adapter->hw;
2583 	struct net_device *netdev = adapter->netdev;
2584 	struct pci_dev *pdev = adapter->pdev;
2585 
2586 	if (hw->mac.type == e1000_vfadapt_i350)
2587 		dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2588 	else
2589 		dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2590 	dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2591 }
2592 
2593 static int igbvf_set_features(struct net_device *netdev,
2594 			      netdev_features_t features)
2595 {
2596 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2597 
2598 	if (features & NETIF_F_RXCSUM)
2599 		adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2600 	else
2601 		adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2602 
2603 	return 0;
2604 }
2605 
2606 #define IGBVF_MAX_MAC_HDR_LEN		127
2607 #define IGBVF_MAX_NETWORK_HDR_LEN	511
2608 
2609 static netdev_features_t
2610 igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
2611 		     netdev_features_t features)
2612 {
2613 	unsigned int network_hdr_len, mac_hdr_len;
2614 
2615 	/* Make certain the headers can be described by a context descriptor */
2616 	mac_hdr_len = skb_network_header(skb) - skb->data;
2617 	if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
2618 		return features & ~(NETIF_F_HW_CSUM |
2619 				    NETIF_F_SCTP_CRC |
2620 				    NETIF_F_HW_VLAN_CTAG_TX |
2621 				    NETIF_F_TSO |
2622 				    NETIF_F_TSO6);
2623 
2624 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2625 	if (unlikely(network_hdr_len >  IGBVF_MAX_NETWORK_HDR_LEN))
2626 		return features & ~(NETIF_F_HW_CSUM |
2627 				    NETIF_F_SCTP_CRC |
2628 				    NETIF_F_TSO |
2629 				    NETIF_F_TSO6);
2630 
2631 	/* We can only support IPV4 TSO in tunnels if we can mangle the
2632 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2633 	 */
2634 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2635 		features &= ~NETIF_F_TSO;
2636 
2637 	return features;
2638 }
2639 
2640 static const struct net_device_ops igbvf_netdev_ops = {
2641 	.ndo_open		= igbvf_open,
2642 	.ndo_stop		= igbvf_close,
2643 	.ndo_start_xmit		= igbvf_xmit_frame,
2644 	.ndo_get_stats		= igbvf_get_stats,
2645 	.ndo_set_rx_mode	= igbvf_set_multi,
2646 	.ndo_set_mac_address	= igbvf_set_mac,
2647 	.ndo_change_mtu		= igbvf_change_mtu,
2648 	.ndo_do_ioctl		= igbvf_ioctl,
2649 	.ndo_tx_timeout		= igbvf_tx_timeout,
2650 	.ndo_vlan_rx_add_vid	= igbvf_vlan_rx_add_vid,
2651 	.ndo_vlan_rx_kill_vid	= igbvf_vlan_rx_kill_vid,
2652 #ifdef CONFIG_NET_POLL_CONTROLLER
2653 	.ndo_poll_controller	= igbvf_netpoll,
2654 #endif
2655 	.ndo_set_features	= igbvf_set_features,
2656 	.ndo_features_check	= igbvf_features_check,
2657 };
2658 
2659 /**
2660  * igbvf_probe - Device Initialization Routine
2661  * @pdev: PCI device information struct
2662  * @ent: entry in igbvf_pci_tbl
2663  *
2664  * Returns 0 on success, negative on failure
2665  *
2666  * igbvf_probe initializes an adapter identified by a pci_dev structure.
2667  * The OS initialization, configuring of the adapter private structure,
2668  * and a hardware reset occur.
2669  **/
2670 static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2671 {
2672 	struct net_device *netdev;
2673 	struct igbvf_adapter *adapter;
2674 	struct e1000_hw *hw;
2675 	const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2676 
2677 	static int cards_found;
2678 	int err, pci_using_dac;
2679 
2680 	err = pci_enable_device_mem(pdev);
2681 	if (err)
2682 		return err;
2683 
2684 	pci_using_dac = 0;
2685 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2686 	if (!err) {
2687 		pci_using_dac = 1;
2688 	} else {
2689 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2690 		if (err) {
2691 			dev_err(&pdev->dev,
2692 				"No usable DMA configuration, aborting\n");
2693 			goto err_dma;
2694 		}
2695 	}
2696 
2697 	err = pci_request_regions(pdev, igbvf_driver_name);
2698 	if (err)
2699 		goto err_pci_reg;
2700 
2701 	pci_set_master(pdev);
2702 
2703 	err = -ENOMEM;
2704 	netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2705 	if (!netdev)
2706 		goto err_alloc_etherdev;
2707 
2708 	SET_NETDEV_DEV(netdev, &pdev->dev);
2709 
2710 	pci_set_drvdata(pdev, netdev);
2711 	adapter = netdev_priv(netdev);
2712 	hw = &adapter->hw;
2713 	adapter->netdev = netdev;
2714 	adapter->pdev = pdev;
2715 	adapter->ei = ei;
2716 	adapter->pba = ei->pba;
2717 	adapter->flags = ei->flags;
2718 	adapter->hw.back = adapter;
2719 	adapter->hw.mac.type = ei->mac;
2720 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2721 
2722 	/* PCI config space info */
2723 
2724 	hw->vendor_id = pdev->vendor;
2725 	hw->device_id = pdev->device;
2726 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
2727 	hw->subsystem_device_id = pdev->subsystem_device;
2728 	hw->revision_id = pdev->revision;
2729 
2730 	err = -EIO;
2731 	adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2732 				      pci_resource_len(pdev, 0));
2733 
2734 	if (!adapter->hw.hw_addr)
2735 		goto err_ioremap;
2736 
2737 	if (ei->get_variants) {
2738 		err = ei->get_variants(adapter);
2739 		if (err)
2740 			goto err_get_variants;
2741 	}
2742 
2743 	/* setup adapter struct */
2744 	err = igbvf_sw_init(adapter);
2745 	if (err)
2746 		goto err_sw_init;
2747 
2748 	/* construct the net_device struct */
2749 	netdev->netdev_ops = &igbvf_netdev_ops;
2750 
2751 	igbvf_set_ethtool_ops(netdev);
2752 	netdev->watchdog_timeo = 5 * HZ;
2753 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2754 
2755 	adapter->bd_number = cards_found++;
2756 
2757 	netdev->hw_features = NETIF_F_SG |
2758 			      NETIF_F_TSO |
2759 			      NETIF_F_TSO6 |
2760 			      NETIF_F_RXCSUM |
2761 			      NETIF_F_HW_CSUM |
2762 			      NETIF_F_SCTP_CRC;
2763 
2764 #define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2765 				    NETIF_F_GSO_GRE_CSUM | \
2766 				    NETIF_F_GSO_IPXIP4 | \
2767 				    NETIF_F_GSO_IPXIP6 | \
2768 				    NETIF_F_GSO_UDP_TUNNEL | \
2769 				    NETIF_F_GSO_UDP_TUNNEL_CSUM)
2770 
2771 	netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
2772 	netdev->hw_features |= NETIF_F_GSO_PARTIAL |
2773 			       IGBVF_GSO_PARTIAL_FEATURES;
2774 
2775 	netdev->features = netdev->hw_features;
2776 
2777 	if (pci_using_dac)
2778 		netdev->features |= NETIF_F_HIGHDMA;
2779 
2780 	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
2781 	netdev->mpls_features |= NETIF_F_HW_CSUM;
2782 	netdev->hw_enc_features |= netdev->vlan_features;
2783 
2784 	/* set this bit last since it cannot be part of vlan_features */
2785 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2786 			    NETIF_F_HW_VLAN_CTAG_RX |
2787 			    NETIF_F_HW_VLAN_CTAG_TX;
2788 
2789 	/*reset the controller to put the device in a known good state */
2790 	err = hw->mac.ops.reset_hw(hw);
2791 	if (err) {
2792 		dev_info(&pdev->dev,
2793 			 "PF still in reset state. Is the PF interface up?\n");
2794 	} else {
2795 		err = hw->mac.ops.read_mac_addr(hw);
2796 		if (err)
2797 			dev_info(&pdev->dev, "Error reading MAC address.\n");
2798 		else if (is_zero_ether_addr(adapter->hw.mac.addr))
2799 			dev_info(&pdev->dev,
2800 				 "MAC address not assigned by administrator.\n");
2801 		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2802 		       netdev->addr_len);
2803 	}
2804 
2805 	if (!is_valid_ether_addr(netdev->dev_addr)) {
2806 		dev_info(&pdev->dev, "Assigning random MAC address.\n");
2807 		eth_hw_addr_random(netdev);
2808 		memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2809 		       netdev->addr_len);
2810 	}
2811 
2812 	setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2813 		    (unsigned long)adapter);
2814 
2815 	INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2816 	INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2817 
2818 	/* ring size defaults */
2819 	adapter->rx_ring->count = 1024;
2820 	adapter->tx_ring->count = 1024;
2821 
2822 	/* reset the hardware with the new settings */
2823 	igbvf_reset(adapter);
2824 
2825 	/* set hardware-specific flags */
2826 	if (adapter->hw.mac.type == e1000_vfadapt_i350)
2827 		adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
2828 
2829 	strcpy(netdev->name, "eth%d");
2830 	err = register_netdev(netdev);
2831 	if (err)
2832 		goto err_hw_init;
2833 
2834 	/* tell the stack to leave us alone until igbvf_open() is called */
2835 	netif_carrier_off(netdev);
2836 	netif_stop_queue(netdev);
2837 
2838 	igbvf_print_device_info(adapter);
2839 
2840 	igbvf_initialize_last_counter_stats(adapter);
2841 
2842 	return 0;
2843 
2844 err_hw_init:
2845 	kfree(adapter->tx_ring);
2846 	kfree(adapter->rx_ring);
2847 err_sw_init:
2848 	igbvf_reset_interrupt_capability(adapter);
2849 err_get_variants:
2850 	iounmap(adapter->hw.hw_addr);
2851 err_ioremap:
2852 	free_netdev(netdev);
2853 err_alloc_etherdev:
2854 	pci_release_regions(pdev);
2855 err_pci_reg:
2856 err_dma:
2857 	pci_disable_device(pdev);
2858 	return err;
2859 }
2860 
2861 /**
2862  * igbvf_remove - Device Removal Routine
2863  * @pdev: PCI device information struct
2864  *
2865  * igbvf_remove is called by the PCI subsystem to alert the driver
2866  * that it should release a PCI device.  The could be caused by a
2867  * Hot-Plug event, or because the driver is going to be removed from
2868  * memory.
2869  **/
2870 static void igbvf_remove(struct pci_dev *pdev)
2871 {
2872 	struct net_device *netdev = pci_get_drvdata(pdev);
2873 	struct igbvf_adapter *adapter = netdev_priv(netdev);
2874 	struct e1000_hw *hw = &adapter->hw;
2875 
2876 	/* The watchdog timer may be rescheduled, so explicitly
2877 	 * disable it from being rescheduled.
2878 	 */
2879 	set_bit(__IGBVF_DOWN, &adapter->state);
2880 	del_timer_sync(&adapter->watchdog_timer);
2881 
2882 	cancel_work_sync(&adapter->reset_task);
2883 	cancel_work_sync(&adapter->watchdog_task);
2884 
2885 	unregister_netdev(netdev);
2886 
2887 	igbvf_reset_interrupt_capability(adapter);
2888 
2889 	/* it is important to delete the NAPI struct prior to freeing the
2890 	 * Rx ring so that you do not end up with null pointer refs
2891 	 */
2892 	netif_napi_del(&adapter->rx_ring->napi);
2893 	kfree(adapter->tx_ring);
2894 	kfree(adapter->rx_ring);
2895 
2896 	iounmap(hw->hw_addr);
2897 	if (hw->flash_address)
2898 		iounmap(hw->flash_address);
2899 	pci_release_regions(pdev);
2900 
2901 	free_netdev(netdev);
2902 
2903 	pci_disable_device(pdev);
2904 }
2905 
2906 /* PCI Error Recovery (ERS) */
2907 static const struct pci_error_handlers igbvf_err_handler = {
2908 	.error_detected = igbvf_io_error_detected,
2909 	.slot_reset = igbvf_io_slot_reset,
2910 	.resume = igbvf_io_resume,
2911 };
2912 
2913 static const struct pci_device_id igbvf_pci_tbl[] = {
2914 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2915 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2916 	{ } /* terminate list */
2917 };
2918 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2919 
2920 /* PCI Device API Driver */
2921 static struct pci_driver igbvf_driver = {
2922 	.name		= igbvf_driver_name,
2923 	.id_table	= igbvf_pci_tbl,
2924 	.probe		= igbvf_probe,
2925 	.remove		= igbvf_remove,
2926 #ifdef CONFIG_PM
2927 	/* Power Management Hooks */
2928 	.suspend	= igbvf_suspend,
2929 	.resume		= igbvf_resume,
2930 #endif
2931 	.shutdown	= igbvf_shutdown,
2932 	.err_handler	= &igbvf_err_handler
2933 };
2934 
2935 /**
2936  * igbvf_init_module - Driver Registration Routine
2937  *
2938  * igbvf_init_module is the first routine called when the driver is
2939  * loaded. All it does is register with the PCI subsystem.
2940  **/
2941 static int __init igbvf_init_module(void)
2942 {
2943 	int ret;
2944 
2945 	pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2946 	pr_info("%s\n", igbvf_copyright);
2947 
2948 	ret = pci_register_driver(&igbvf_driver);
2949 
2950 	return ret;
2951 }
2952 module_init(igbvf_init_module);
2953 
2954 /**
2955  * igbvf_exit_module - Driver Exit Cleanup Routine
2956  *
2957  * igbvf_exit_module is called just before the driver is removed
2958  * from memory.
2959  **/
2960 static void __exit igbvf_exit_module(void)
2961 {
2962 	pci_unregister_driver(&igbvf_driver);
2963 }
2964 module_exit(igbvf_exit_module);
2965 
2966 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2967 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2968 MODULE_LICENSE("GPL");
2969 MODULE_VERSION(DRV_VERSION);
2970 
2971 /* netdev.c */
2972