1d1a890faSShreyas Bhatewara /*
2d1a890faSShreyas Bhatewara  * Linux driver for VMware's vmxnet3 ethernet NIC.
3d1a890faSShreyas Bhatewara  *
4d1a890faSShreyas Bhatewara  * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5d1a890faSShreyas Bhatewara  *
6d1a890faSShreyas Bhatewara  * This program is free software; you can redistribute it and/or modify it
7d1a890faSShreyas Bhatewara  * under the terms of the GNU General Public License as published by the
8d1a890faSShreyas Bhatewara  * Free Software Foundation; version 2 of the License and no later version.
9d1a890faSShreyas Bhatewara  *
10d1a890faSShreyas Bhatewara  * This program is distributed in the hope that it will be useful, but
11d1a890faSShreyas Bhatewara  * WITHOUT ANY WARRANTY; without even the implied warranty of
12d1a890faSShreyas Bhatewara  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13d1a890faSShreyas Bhatewara  * NON INFRINGEMENT. See the GNU General Public License for more
14d1a890faSShreyas Bhatewara  * details.
15d1a890faSShreyas Bhatewara  *
16d1a890faSShreyas Bhatewara  * You should have received a copy of the GNU General Public License
17d1a890faSShreyas Bhatewara  * along with this program; if not, write to the Free Software
18d1a890faSShreyas Bhatewara  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19d1a890faSShreyas Bhatewara  *
20d1a890faSShreyas Bhatewara  * The full GNU General Public License is included in this distribution in
21d1a890faSShreyas Bhatewara  * the file called "COPYING".
22d1a890faSShreyas Bhatewara  *
23d1a890faSShreyas Bhatewara  * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24d1a890faSShreyas Bhatewara  *
25d1a890faSShreyas Bhatewara  */
26d1a890faSShreyas Bhatewara 
279d9779e7SPaul Gortmaker #include <linux/module.h>
28b038b040SStephen Rothwell #include <net/ip6_checksum.h>
29b038b040SStephen Rothwell 
30d1a890faSShreyas Bhatewara #include "vmxnet3_int.h"
31d1a890faSShreyas Bhatewara 
32d1a890faSShreyas Bhatewara char vmxnet3_driver_name[] = "vmxnet3";
33d1a890faSShreyas Bhatewara #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34d1a890faSShreyas Bhatewara 
35d1a890faSShreyas Bhatewara /*
36d1a890faSShreyas Bhatewara  * PCI Device ID Table
37d1a890faSShreyas Bhatewara  * Last entry must be all 0s
38d1a890faSShreyas Bhatewara  */
399baa3c34SBenoit Taine static const struct pci_device_id vmxnet3_pciid_table[] = {
40d1a890faSShreyas Bhatewara 	{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41d1a890faSShreyas Bhatewara 	{0}
42d1a890faSShreyas Bhatewara };
43d1a890faSShreyas Bhatewara 
44d1a890faSShreyas Bhatewara MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45d1a890faSShreyas Bhatewara 
4609c5088eSShreyas Bhatewara static int enable_mq = 1;
47d1a890faSShreyas Bhatewara 
48f9f25026SShreyas Bhatewara static void
49f9f25026SShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50f9f25026SShreyas Bhatewara 
51d1a890faSShreyas Bhatewara /*
52d1a890faSShreyas Bhatewara  *    Enable/Disable the given intr
53d1a890faSShreyas Bhatewara  */
54d1a890faSShreyas Bhatewara static void
55d1a890faSShreyas Bhatewara vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56d1a890faSShreyas Bhatewara {
57d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58d1a890faSShreyas Bhatewara }
59d1a890faSShreyas Bhatewara 
60d1a890faSShreyas Bhatewara 
61d1a890faSShreyas Bhatewara static void
62d1a890faSShreyas Bhatewara vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63d1a890faSShreyas Bhatewara {
64d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65d1a890faSShreyas Bhatewara }
66d1a890faSShreyas Bhatewara 
67d1a890faSShreyas Bhatewara 
68d1a890faSShreyas Bhatewara /*
69d1a890faSShreyas Bhatewara  *    Enable/Disable all intrs used by the device
70d1a890faSShreyas Bhatewara  */
71d1a890faSShreyas Bhatewara static void
72d1a890faSShreyas Bhatewara vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73d1a890faSShreyas Bhatewara {
74d1a890faSShreyas Bhatewara 	int i;
75d1a890faSShreyas Bhatewara 
76d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
77d1a890faSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, i);
786929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl &=
796929fe8aSRonghua Zang 					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
80d1a890faSShreyas Bhatewara }
81d1a890faSShreyas Bhatewara 
82d1a890faSShreyas Bhatewara 
83d1a890faSShreyas Bhatewara static void
84d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85d1a890faSShreyas Bhatewara {
86d1a890faSShreyas Bhatewara 	int i;
87d1a890faSShreyas Bhatewara 
886929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl |=
896929fe8aSRonghua Zang 					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
91d1a890faSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, i);
92d1a890faSShreyas Bhatewara }
93d1a890faSShreyas Bhatewara 
94d1a890faSShreyas Bhatewara 
95d1a890faSShreyas Bhatewara static void
96d1a890faSShreyas Bhatewara vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97d1a890faSShreyas Bhatewara {
98d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99d1a890faSShreyas Bhatewara }
100d1a890faSShreyas Bhatewara 
101d1a890faSShreyas Bhatewara 
102d1a890faSShreyas Bhatewara static bool
103d1a890faSShreyas Bhatewara vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104d1a890faSShreyas Bhatewara {
10509c5088eSShreyas Bhatewara 	return tq->stopped;
106d1a890faSShreyas Bhatewara }
107d1a890faSShreyas Bhatewara 
108d1a890faSShreyas Bhatewara 
109d1a890faSShreyas Bhatewara static void
110d1a890faSShreyas Bhatewara vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111d1a890faSShreyas Bhatewara {
112d1a890faSShreyas Bhatewara 	tq->stopped = false;
11309c5088eSShreyas Bhatewara 	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
114d1a890faSShreyas Bhatewara }
115d1a890faSShreyas Bhatewara 
116d1a890faSShreyas Bhatewara 
117d1a890faSShreyas Bhatewara static void
118d1a890faSShreyas Bhatewara vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119d1a890faSShreyas Bhatewara {
120d1a890faSShreyas Bhatewara 	tq->stopped = false;
12109c5088eSShreyas Bhatewara 	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
122d1a890faSShreyas Bhatewara }
123d1a890faSShreyas Bhatewara 
124d1a890faSShreyas Bhatewara 
125d1a890faSShreyas Bhatewara static void
126d1a890faSShreyas Bhatewara vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127d1a890faSShreyas Bhatewara {
128d1a890faSShreyas Bhatewara 	tq->stopped = true;
129d1a890faSShreyas Bhatewara 	tq->num_stop++;
13009c5088eSShreyas Bhatewara 	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
131d1a890faSShreyas Bhatewara }
132d1a890faSShreyas Bhatewara 
133d1a890faSShreyas Bhatewara 
134d1a890faSShreyas Bhatewara /*
135d1a890faSShreyas Bhatewara  * Check the link state. This may start or stop the tx queue.
136d1a890faSShreyas Bhatewara  */
137d1a890faSShreyas Bhatewara static void
1384a1745fcSShreyas Bhatewara vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139d1a890faSShreyas Bhatewara {
140d1a890faSShreyas Bhatewara 	u32 ret;
14109c5088eSShreyas Bhatewara 	int i;
14283d0feffSShreyas Bhatewara 	unsigned long flags;
143d1a890faSShreyas Bhatewara 
14483d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
145d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
14783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
14883d0feffSShreyas Bhatewara 
149d1a890faSShreyas Bhatewara 	adapter->link_speed = ret >> 16;
150d1a890faSShreyas Bhatewara 	if (ret & 1) { /* Link is up. */
151204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152204a6e65SStephen Hemminger 			    adapter->link_speed);
153d1a890faSShreyas Bhatewara 		netif_carrier_on(adapter->netdev);
154d1a890faSShreyas Bhatewara 
15509c5088eSShreyas Bhatewara 		if (affectTxQueue) {
15609c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
15709c5088eSShreyas Bhatewara 				vmxnet3_tq_start(&adapter->tx_queue[i],
15809c5088eSShreyas Bhatewara 						 adapter);
15909c5088eSShreyas Bhatewara 		}
160d1a890faSShreyas Bhatewara 	} else {
161204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Down\n");
162d1a890faSShreyas Bhatewara 		netif_carrier_off(adapter->netdev);
163d1a890faSShreyas Bhatewara 
16409c5088eSShreyas Bhatewara 		if (affectTxQueue) {
16509c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
16609c5088eSShreyas Bhatewara 				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
16709c5088eSShreyas Bhatewara 		}
168d1a890faSShreyas Bhatewara 	}
169d1a890faSShreyas Bhatewara }
170d1a890faSShreyas Bhatewara 
171d1a890faSShreyas Bhatewara static void
172d1a890faSShreyas Bhatewara vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173d1a890faSShreyas Bhatewara {
17409c5088eSShreyas Bhatewara 	int i;
175e328d410SRoland Dreier 	unsigned long flags;
176115924b6SShreyas Bhatewara 	u32 events = le32_to_cpu(adapter->shared->ecr);
177d1a890faSShreyas Bhatewara 	if (!events)
178d1a890faSShreyas Bhatewara 		return;
179d1a890faSShreyas Bhatewara 
180d1a890faSShreyas Bhatewara 	vmxnet3_ack_events(adapter, events);
181d1a890faSShreyas Bhatewara 
182d1a890faSShreyas Bhatewara 	/* Check if link state has changed */
183d1a890faSShreyas Bhatewara 	if (events & VMXNET3_ECR_LINK)
1844a1745fcSShreyas Bhatewara 		vmxnet3_check_link(adapter, true);
185d1a890faSShreyas Bhatewara 
186d1a890faSShreyas Bhatewara 	/* Check if there is an error on xmit/recv queues */
187d1a890faSShreyas Bhatewara 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
188e328d410SRoland Dreier 		spin_lock_irqsave(&adapter->cmd_lock, flags);
189d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_GET_QUEUE_STATUS);
191e328d410SRoland Dreier 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
192d1a890faSShreyas Bhatewara 
19309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++)
19409c5088eSShreyas Bhatewara 			if (adapter->tqd_start[i].status.stopped)
19509c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
19609c5088eSShreyas Bhatewara 					"%s: tq[%d] error 0x%x\n",
19709c5088eSShreyas Bhatewara 					adapter->netdev->name, i, le32_to_cpu(
19809c5088eSShreyas Bhatewara 					adapter->tqd_start[i].status.error));
19909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++)
20009c5088eSShreyas Bhatewara 			if (adapter->rqd_start[i].status.stopped)
20109c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
20209c5088eSShreyas Bhatewara 					"%s: rq[%d] error 0x%x\n",
20309c5088eSShreyas Bhatewara 					adapter->netdev->name, i,
20409c5088eSShreyas Bhatewara 					adapter->rqd_start[i].status.error);
205d1a890faSShreyas Bhatewara 
206d1a890faSShreyas Bhatewara 		schedule_work(&adapter->work);
207d1a890faSShreyas Bhatewara 	}
208d1a890faSShreyas Bhatewara }
209d1a890faSShreyas Bhatewara 
210115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
211115924b6SShreyas Bhatewara /*
212115924b6SShreyas Bhatewara  * The device expects the bitfields in shared structures to be written in
213115924b6SShreyas Bhatewara  * little endian. When CPU is big endian, the following routines are used to
214115924b6SShreyas Bhatewara  * correctly read and write into ABI.
215115924b6SShreyas Bhatewara  * The general technique used here is : double word bitfields are defined in
216115924b6SShreyas Bhatewara  * opposite order for big endian architecture. Then before reading them in
217115924b6SShreyas Bhatewara  * driver the complete double word is translated using le32_to_cpu. Similarly
218115924b6SShreyas Bhatewara  * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219115924b6SShreyas Bhatewara  * double words into required format.
220115924b6SShreyas Bhatewara  * In order to avoid touching bits in shared structure more than once, temporary
221115924b6SShreyas Bhatewara  * descriptors are used. These are passed as srcDesc to following functions.
222115924b6SShreyas Bhatewara  */
223115924b6SShreyas Bhatewara static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224115924b6SShreyas Bhatewara 				struct Vmxnet3_RxDesc *dstDesc)
225115924b6SShreyas Bhatewara {
226115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc + 2;
227115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc + 2;
228115924b6SShreyas Bhatewara 	dstDesc->addr = le64_to_cpu(srcDesc->addr);
229115924b6SShreyas Bhatewara 	*dst = le32_to_cpu(*src);
230115924b6SShreyas Bhatewara 	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231115924b6SShreyas Bhatewara }
232115924b6SShreyas Bhatewara 
233115924b6SShreyas Bhatewara static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234115924b6SShreyas Bhatewara 			       struct Vmxnet3_TxDesc *dstDesc)
235115924b6SShreyas Bhatewara {
236115924b6SShreyas Bhatewara 	int i;
237115924b6SShreyas Bhatewara 	u32 *src = (u32 *)(srcDesc + 1);
238115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)(dstDesc + 1);
239115924b6SShreyas Bhatewara 
240115924b6SShreyas Bhatewara 	/* Working backwards so that the gen bit is set at the end. */
241115924b6SShreyas Bhatewara 	for (i = 2; i > 0; i--) {
242115924b6SShreyas Bhatewara 		src--;
243115924b6SShreyas Bhatewara 		dst--;
244115924b6SShreyas Bhatewara 		*dst = cpu_to_le32(*src);
245115924b6SShreyas Bhatewara 	}
246115924b6SShreyas Bhatewara }
247115924b6SShreyas Bhatewara 
248115924b6SShreyas Bhatewara 
249115924b6SShreyas Bhatewara static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250115924b6SShreyas Bhatewara 				struct Vmxnet3_RxCompDesc *dstDesc)
251115924b6SShreyas Bhatewara {
252115924b6SShreyas Bhatewara 	int i = 0;
253115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc;
254115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc;
255115924b6SShreyas Bhatewara 	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256115924b6SShreyas Bhatewara 		*dst = le32_to_cpu(*src);
257115924b6SShreyas Bhatewara 		src++;
258115924b6SShreyas Bhatewara 		dst++;
259115924b6SShreyas Bhatewara 	}
260115924b6SShreyas Bhatewara }
261115924b6SShreyas Bhatewara 
262115924b6SShreyas Bhatewara 
263115924b6SShreyas Bhatewara /* Used to read bitfield values from double words. */
264115924b6SShreyas Bhatewara static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265115924b6SShreyas Bhatewara {
266115924b6SShreyas Bhatewara 	u32 temp = le32_to_cpu(*bitfield);
267115924b6SShreyas Bhatewara 	u32 mask = ((1 << size) - 1) << pos;
268115924b6SShreyas Bhatewara 	temp &= mask;
269115924b6SShreyas Bhatewara 	temp >>= pos;
270115924b6SShreyas Bhatewara 	return temp;
271115924b6SShreyas Bhatewara }
272115924b6SShreyas Bhatewara 
273115924b6SShreyas Bhatewara 
274115924b6SShreyas Bhatewara 
275115924b6SShreyas Bhatewara #endif  /* __BIG_ENDIAN_BITFIELD */
276115924b6SShreyas Bhatewara 
277115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
278115924b6SShreyas Bhatewara 
279115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281115924b6SShreyas Bhatewara 			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284115924b6SShreyas Bhatewara 			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_SIZE)
288115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289115924b6SShreyas Bhatewara 			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291115924b6SShreyas Bhatewara 			(dstrcd) = (tmp); \
292115924b6SShreyas Bhatewara 			vmxnet3_RxCompToCPU((rcd), (tmp)); \
293115924b6SShreyas Bhatewara 		} while (0)
294115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295115924b6SShreyas Bhatewara 			(dstrxd) = (tmp); \
296115924b6SShreyas Bhatewara 			vmxnet3_RxDescToCPU((rxd), (tmp)); \
297115924b6SShreyas Bhatewara 		} while (0)
298115924b6SShreyas Bhatewara 
299115924b6SShreyas Bhatewara #else
300115924b6SShreyas Bhatewara 
301115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307115924b6SShreyas Bhatewara 
308115924b6SShreyas Bhatewara #endif /* __BIG_ENDIAN_BITFIELD  */
309115924b6SShreyas Bhatewara 
310d1a890faSShreyas Bhatewara 
311d1a890faSShreyas Bhatewara static void
312d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313d1a890faSShreyas Bhatewara 		     struct pci_dev *pdev)
314d1a890faSShreyas Bhatewara {
315d1a890faSShreyas Bhatewara 	if (tbi->map_type == VMXNET3_MAP_SINGLE)
316b0eb57cbSAndy King 		dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
317d1a890faSShreyas Bhatewara 				 PCI_DMA_TODEVICE);
318d1a890faSShreyas Bhatewara 	else if (tbi->map_type == VMXNET3_MAP_PAGE)
319b0eb57cbSAndy King 		dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
320d1a890faSShreyas Bhatewara 			       PCI_DMA_TODEVICE);
321d1a890faSShreyas Bhatewara 	else
322d1a890faSShreyas Bhatewara 		BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323d1a890faSShreyas Bhatewara 
324d1a890faSShreyas Bhatewara 	tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325d1a890faSShreyas Bhatewara }
326d1a890faSShreyas Bhatewara 
327d1a890faSShreyas Bhatewara 
328d1a890faSShreyas Bhatewara static int
329d1a890faSShreyas Bhatewara vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330d1a890faSShreyas Bhatewara 		  struct pci_dev *pdev,	struct vmxnet3_adapter *adapter)
331d1a890faSShreyas Bhatewara {
332d1a890faSShreyas Bhatewara 	struct sk_buff *skb;
333d1a890faSShreyas Bhatewara 	int entries = 0;
334d1a890faSShreyas Bhatewara 
335d1a890faSShreyas Bhatewara 	/* no out of order completion */
336d1a890faSShreyas Bhatewara 	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337115924b6SShreyas Bhatewara 	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
338d1a890faSShreyas Bhatewara 
339d1a890faSShreyas Bhatewara 	skb = tq->buf_info[eop_idx].skb;
340d1a890faSShreyas Bhatewara 	BUG_ON(skb == NULL);
341d1a890faSShreyas Bhatewara 	tq->buf_info[eop_idx].skb = NULL;
342d1a890faSShreyas Bhatewara 
343d1a890faSShreyas Bhatewara 	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344d1a890faSShreyas Bhatewara 
345d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != eop_idx) {
346d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347d1a890faSShreyas Bhatewara 				     pdev);
348d1a890faSShreyas Bhatewara 
349d1a890faSShreyas Bhatewara 		/* update next2comp w/o tx_lock. Since we are marking more,
350d1a890faSShreyas Bhatewara 		 * instead of less, tx ring entries avail, the worst case is
351d1a890faSShreyas Bhatewara 		 * that the tx routine incorrectly re-queues a pkt due to
352d1a890faSShreyas Bhatewara 		 * insufficient tx ring entries.
353d1a890faSShreyas Bhatewara 		 */
354d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355d1a890faSShreyas Bhatewara 		entries++;
356d1a890faSShreyas Bhatewara 	}
357d1a890faSShreyas Bhatewara 
358d1a890faSShreyas Bhatewara 	dev_kfree_skb_any(skb);
359d1a890faSShreyas Bhatewara 	return entries;
360d1a890faSShreyas Bhatewara }
361d1a890faSShreyas Bhatewara 
362d1a890faSShreyas Bhatewara 
363d1a890faSShreyas Bhatewara static int
364d1a890faSShreyas Bhatewara vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365d1a890faSShreyas Bhatewara 			struct vmxnet3_adapter *adapter)
366d1a890faSShreyas Bhatewara {
367d1a890faSShreyas Bhatewara 	int completed = 0;
368d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
369d1a890faSShreyas Bhatewara 
370d1a890faSShreyas Bhatewara 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371115924b6SShreyas Bhatewara 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372115924b6SShreyas Bhatewara 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373115924b6SShreyas Bhatewara 					       &gdesc->tcd), tq, adapter->pdev,
374115924b6SShreyas Bhatewara 					       adapter);
375d1a890faSShreyas Bhatewara 
376d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
377d1a890faSShreyas Bhatewara 		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
378d1a890faSShreyas Bhatewara 	}
379d1a890faSShreyas Bhatewara 
380d1a890faSShreyas Bhatewara 	if (completed) {
381d1a890faSShreyas Bhatewara 		spin_lock(&tq->tx_lock);
382d1a890faSShreyas Bhatewara 		if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
383d1a890faSShreyas Bhatewara 			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
384d1a890faSShreyas Bhatewara 			     VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
385d1a890faSShreyas Bhatewara 			     netif_carrier_ok(adapter->netdev))) {
386d1a890faSShreyas Bhatewara 			vmxnet3_tq_wake(tq, adapter);
387d1a890faSShreyas Bhatewara 		}
388d1a890faSShreyas Bhatewara 		spin_unlock(&tq->tx_lock);
389d1a890faSShreyas Bhatewara 	}
390d1a890faSShreyas Bhatewara 	return completed;
391d1a890faSShreyas Bhatewara }
392d1a890faSShreyas Bhatewara 
393d1a890faSShreyas Bhatewara 
394d1a890faSShreyas Bhatewara static void
395d1a890faSShreyas Bhatewara vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
396d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
397d1a890faSShreyas Bhatewara {
398d1a890faSShreyas Bhatewara 	int i;
399d1a890faSShreyas Bhatewara 
400d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
401d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_buf_info *tbi;
402d1a890faSShreyas Bhatewara 
403d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2comp;
404d1a890faSShreyas Bhatewara 
405d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
406d1a890faSShreyas Bhatewara 		if (tbi->skb) {
407d1a890faSShreyas Bhatewara 			dev_kfree_skb_any(tbi->skb);
408d1a890faSShreyas Bhatewara 			tbi->skb = NULL;
409d1a890faSShreyas Bhatewara 		}
410d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
411d1a890faSShreyas Bhatewara 	}
412d1a890faSShreyas Bhatewara 
413d1a890faSShreyas Bhatewara 	/* sanity check, verify all buffers are indeed unmapped and freed */
414d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++) {
415d1a890faSShreyas Bhatewara 		BUG_ON(tq->buf_info[i].skb != NULL ||
416d1a890faSShreyas Bhatewara 		       tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
417d1a890faSShreyas Bhatewara 	}
418d1a890faSShreyas Bhatewara 
419d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
420d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
421d1a890faSShreyas Bhatewara 
422d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
423d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
424d1a890faSShreyas Bhatewara }
425d1a890faSShreyas Bhatewara 
426d1a890faSShreyas Bhatewara 
42709c5088eSShreyas Bhatewara static void
428d1a890faSShreyas Bhatewara vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
430d1a890faSShreyas Bhatewara {
431d1a890faSShreyas Bhatewara 	if (tq->tx_ring.base) {
432b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
433d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxDesc),
434d1a890faSShreyas Bhatewara 				  tq->tx_ring.base, tq->tx_ring.basePA);
435d1a890faSShreyas Bhatewara 		tq->tx_ring.base = NULL;
436d1a890faSShreyas Bhatewara 	}
437d1a890faSShreyas Bhatewara 	if (tq->data_ring.base) {
438b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
439d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxDataDesc),
440d1a890faSShreyas Bhatewara 				  tq->data_ring.base, tq->data_ring.basePA);
441d1a890faSShreyas Bhatewara 		tq->data_ring.base = NULL;
442d1a890faSShreyas Bhatewara 	}
443d1a890faSShreyas Bhatewara 	if (tq->comp_ring.base) {
444b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
445d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxCompDesc),
446d1a890faSShreyas Bhatewara 				  tq->comp_ring.base, tq->comp_ring.basePA);
447d1a890faSShreyas Bhatewara 		tq->comp_ring.base = NULL;
448d1a890faSShreyas Bhatewara 	}
449b0eb57cbSAndy King 	if (tq->buf_info) {
450b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev,
451b0eb57cbSAndy King 				  tq->tx_ring.size * sizeof(tq->buf_info[0]),
452b0eb57cbSAndy King 				  tq->buf_info, tq->buf_info_pa);
453d1a890faSShreyas Bhatewara 		tq->buf_info = NULL;
454d1a890faSShreyas Bhatewara 	}
455b0eb57cbSAndy King }
456d1a890faSShreyas Bhatewara 
457d1a890faSShreyas Bhatewara 
45809c5088eSShreyas Bhatewara /* Destroy all tx queues */
45909c5088eSShreyas Bhatewara void
46009c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
46109c5088eSShreyas Bhatewara {
46209c5088eSShreyas Bhatewara 	int i;
46309c5088eSShreyas Bhatewara 
46409c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
46509c5088eSShreyas Bhatewara 		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
46609c5088eSShreyas Bhatewara }
46709c5088eSShreyas Bhatewara 
46809c5088eSShreyas Bhatewara 
469d1a890faSShreyas Bhatewara static void
470d1a890faSShreyas Bhatewara vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
471d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
472d1a890faSShreyas Bhatewara {
473d1a890faSShreyas Bhatewara 	int i;
474d1a890faSShreyas Bhatewara 
475d1a890faSShreyas Bhatewara 	/* reset the tx ring contents to 0 and reset the tx ring states */
476d1a890faSShreyas Bhatewara 	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
477d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxDesc));
478d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
480d1a890faSShreyas Bhatewara 
481d1a890faSShreyas Bhatewara 	memset(tq->data_ring.base, 0, tq->data_ring.size *
482d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxDataDesc));
483d1a890faSShreyas Bhatewara 
484d1a890faSShreyas Bhatewara 	/* reset the tx comp ring contents to 0 and reset comp ring states */
485d1a890faSShreyas Bhatewara 	memset(tq->comp_ring.base, 0, tq->comp_ring.size *
486d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxCompDesc));
487d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
488d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
489d1a890faSShreyas Bhatewara 
490d1a890faSShreyas Bhatewara 	/* reset the bookkeeping data */
491d1a890faSShreyas Bhatewara 	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
492d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++)
493d1a890faSShreyas Bhatewara 		tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
494d1a890faSShreyas Bhatewara 
495d1a890faSShreyas Bhatewara 	/* stats are not reset */
496d1a890faSShreyas Bhatewara }
497d1a890faSShreyas Bhatewara 
498d1a890faSShreyas Bhatewara 
499d1a890faSShreyas Bhatewara static int
500d1a890faSShreyas Bhatewara vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
501d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
502d1a890faSShreyas Bhatewara {
503b0eb57cbSAndy King 	size_t sz;
504b0eb57cbSAndy King 
505d1a890faSShreyas Bhatewara 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
506d1a890faSShreyas Bhatewara 	       tq->comp_ring.base || tq->buf_info);
507d1a890faSShreyas Bhatewara 
508b0eb57cbSAndy King 	tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
509b0eb57cbSAndy King 			tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
510b0eb57cbSAndy King 			&tq->tx_ring.basePA, GFP_KERNEL);
511d1a890faSShreyas Bhatewara 	if (!tq->tx_ring.base) {
512204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx ring\n");
513d1a890faSShreyas Bhatewara 		goto err;
514d1a890faSShreyas Bhatewara 	}
515d1a890faSShreyas Bhatewara 
516b0eb57cbSAndy King 	tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
517b0eb57cbSAndy King 			tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
518b0eb57cbSAndy King 			&tq->data_ring.basePA, GFP_KERNEL);
519d1a890faSShreyas Bhatewara 	if (!tq->data_ring.base) {
520204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate data ring\n");
521d1a890faSShreyas Bhatewara 		goto err;
522d1a890faSShreyas Bhatewara 	}
523d1a890faSShreyas Bhatewara 
524b0eb57cbSAndy King 	tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
525b0eb57cbSAndy King 			tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
526b0eb57cbSAndy King 			&tq->comp_ring.basePA, GFP_KERNEL);
527d1a890faSShreyas Bhatewara 	if (!tq->comp_ring.base) {
528204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
529d1a890faSShreyas Bhatewara 		goto err;
530d1a890faSShreyas Bhatewara 	}
531d1a890faSShreyas Bhatewara 
532b0eb57cbSAndy King 	sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
533b0eb57cbSAndy King 	tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
534b0eb57cbSAndy King 					   &tq->buf_info_pa, GFP_KERNEL);
535e404decbSJoe Perches 	if (!tq->buf_info)
536d1a890faSShreyas Bhatewara 		goto err;
537d1a890faSShreyas Bhatewara 
538d1a890faSShreyas Bhatewara 	return 0;
539d1a890faSShreyas Bhatewara 
540d1a890faSShreyas Bhatewara err:
541d1a890faSShreyas Bhatewara 	vmxnet3_tq_destroy(tq, adapter);
542d1a890faSShreyas Bhatewara 	return -ENOMEM;
543d1a890faSShreyas Bhatewara }
544d1a890faSShreyas Bhatewara 
54509c5088eSShreyas Bhatewara static void
54609c5088eSShreyas Bhatewara vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
54709c5088eSShreyas Bhatewara {
54809c5088eSShreyas Bhatewara 	int i;
54909c5088eSShreyas Bhatewara 
55009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
55109c5088eSShreyas Bhatewara 		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
55209c5088eSShreyas Bhatewara }
553d1a890faSShreyas Bhatewara 
554d1a890faSShreyas Bhatewara /*
555d1a890faSShreyas Bhatewara  *    starting from ring->next2fill, allocate rx buffers for the given ring
556d1a890faSShreyas Bhatewara  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557d1a890faSShreyas Bhatewara  *    are allocated or allocation fails
558d1a890faSShreyas Bhatewara  */
559d1a890faSShreyas Bhatewara 
560d1a890faSShreyas Bhatewara static int
561d1a890faSShreyas Bhatewara vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
562d1a890faSShreyas Bhatewara 			int num_to_alloc, struct vmxnet3_adapter *adapter)
563d1a890faSShreyas Bhatewara {
564d1a890faSShreyas Bhatewara 	int num_allocated = 0;
565d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566d1a890faSShreyas Bhatewara 	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
567d1a890faSShreyas Bhatewara 	u32 val;
568d1a890faSShreyas Bhatewara 
5695318d809SShreyas Bhatewara 	while (num_allocated <= num_to_alloc) {
570d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
571d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gd;
572d1a890faSShreyas Bhatewara 
573d1a890faSShreyas Bhatewara 		rbi = rbi_base + ring->next2fill;
574d1a890faSShreyas Bhatewara 		gd = ring->base + ring->next2fill;
575d1a890faSShreyas Bhatewara 
576d1a890faSShreyas Bhatewara 		if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
577d1a890faSShreyas Bhatewara 			if (rbi->skb == NULL) {
5780d735f13SStephen Hemminger 				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
5790d735f13SStephen Hemminger 								       rbi->len,
5800d735f13SStephen Hemminger 								       GFP_KERNEL);
581d1a890faSShreyas Bhatewara 				if (unlikely(rbi->skb == NULL)) {
582d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
583d1a890faSShreyas Bhatewara 					break;
584d1a890faSShreyas Bhatewara 				}
585d1a890faSShreyas Bhatewara 
586b0eb57cbSAndy King 				rbi->dma_addr = dma_map_single(
587b0eb57cbSAndy King 						&adapter->pdev->dev,
588d1a890faSShreyas Bhatewara 						rbi->skb->data, rbi->len,
589d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
590d1a890faSShreyas Bhatewara 			} else {
591d1a890faSShreyas Bhatewara 				/* rx buffer skipped by the device */
592d1a890faSShreyas Bhatewara 			}
593d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
594d1a890faSShreyas Bhatewara 		} else {
595d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
596d1a890faSShreyas Bhatewara 			       rbi->len  != PAGE_SIZE);
597d1a890faSShreyas Bhatewara 
598d1a890faSShreyas Bhatewara 			if (rbi->page == NULL) {
599d1a890faSShreyas Bhatewara 				rbi->page = alloc_page(GFP_ATOMIC);
600d1a890faSShreyas Bhatewara 				if (unlikely(rbi->page == NULL)) {
601d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
602d1a890faSShreyas Bhatewara 					break;
603d1a890faSShreyas Bhatewara 				}
604b0eb57cbSAndy King 				rbi->dma_addr = dma_map_page(
605b0eb57cbSAndy King 						&adapter->pdev->dev,
606d1a890faSShreyas Bhatewara 						rbi->page, 0, PAGE_SIZE,
607d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
608d1a890faSShreyas Bhatewara 			} else {
609d1a890faSShreyas Bhatewara 				/* rx buffers skipped by the device */
610d1a890faSShreyas Bhatewara 			}
611d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
612d1a890faSShreyas Bhatewara 		}
613d1a890faSShreyas Bhatewara 
614d1a890faSShreyas Bhatewara 		BUG_ON(rbi->dma_addr == 0);
615115924b6SShreyas Bhatewara 		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
6165318d809SShreyas Bhatewara 		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
617115924b6SShreyas Bhatewara 					   | val | rbi->len);
618d1a890faSShreyas Bhatewara 
6195318d809SShreyas Bhatewara 		/* Fill the last buffer but dont mark it ready, or else the
6205318d809SShreyas Bhatewara 		 * device will think that the queue is full */
6215318d809SShreyas Bhatewara 		if (num_allocated == num_to_alloc)
6225318d809SShreyas Bhatewara 			break;
6235318d809SShreyas Bhatewara 
6245318d809SShreyas Bhatewara 		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
625d1a890faSShreyas Bhatewara 		num_allocated++;
626d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(ring);
627d1a890faSShreyas Bhatewara 	}
628d1a890faSShreyas Bhatewara 
629fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
63069b9a712SStephen Hemminger 		"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
63169b9a712SStephen Hemminger 		num_allocated, ring->next2fill, ring->next2comp);
632d1a890faSShreyas Bhatewara 
633d1a890faSShreyas Bhatewara 	/* so that the device can distinguish a full ring and an empty ring */
634d1a890faSShreyas Bhatewara 	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
635d1a890faSShreyas Bhatewara 
636d1a890faSShreyas Bhatewara 	return num_allocated;
637d1a890faSShreyas Bhatewara }
638d1a890faSShreyas Bhatewara 
639d1a890faSShreyas Bhatewara 
640d1a890faSShreyas Bhatewara static void
641d1a890faSShreyas Bhatewara vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
642d1a890faSShreyas Bhatewara 		    struct vmxnet3_rx_buf_info *rbi)
643d1a890faSShreyas Bhatewara {
644d1a890faSShreyas Bhatewara 	struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
645d1a890faSShreyas Bhatewara 		skb_shinfo(skb)->nr_frags;
646d1a890faSShreyas Bhatewara 
647d1a890faSShreyas Bhatewara 	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
648d1a890faSShreyas Bhatewara 
6490e0634d2SIan Campbell 	__skb_frag_set_page(frag, rbi->page);
650d1a890faSShreyas Bhatewara 	frag->page_offset = 0;
6519e903e08SEric Dumazet 	skb_frag_size_set(frag, rcd->len);
6529e903e08SEric Dumazet 	skb->data_len += rcd->len;
6535e6c355cSEric Dumazet 	skb->truesize += PAGE_SIZE;
654d1a890faSShreyas Bhatewara 	skb_shinfo(skb)->nr_frags++;
655d1a890faSShreyas Bhatewara }
656d1a890faSShreyas Bhatewara 
657d1a890faSShreyas Bhatewara 
658d1a890faSShreyas Bhatewara static void
659d1a890faSShreyas Bhatewara vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
660d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
661d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
662d1a890faSShreyas Bhatewara {
663d1a890faSShreyas Bhatewara 	u32 dw2, len;
664d1a890faSShreyas Bhatewara 	unsigned long buf_offset;
665d1a890faSShreyas Bhatewara 	int i;
666d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
667d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_buf_info *tbi = NULL;
668d1a890faSShreyas Bhatewara 
669d1a890faSShreyas Bhatewara 	BUG_ON(ctx->copy_size > skb_headlen(skb));
670d1a890faSShreyas Bhatewara 
671d1a890faSShreyas Bhatewara 	/* use the previous gen bit for the SOP desc */
672d1a890faSShreyas Bhatewara 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
673d1a890faSShreyas Bhatewara 
674d1a890faSShreyas Bhatewara 	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
675d1a890faSShreyas Bhatewara 	gdesc = ctx->sop_txd; /* both loops below can be skipped */
676d1a890faSShreyas Bhatewara 
677d1a890faSShreyas Bhatewara 	/* no need to map the buffer if headers are copied */
678d1a890faSShreyas Bhatewara 	if (ctx->copy_size) {
679115924b6SShreyas Bhatewara 		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
680d1a890faSShreyas Bhatewara 					tq->tx_ring.next2fill *
681115924b6SShreyas Bhatewara 					sizeof(struct Vmxnet3_TxDataDesc));
682115924b6SShreyas Bhatewara 		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
683d1a890faSShreyas Bhatewara 		ctx->sop_txd->dword[3] = 0;
684d1a890faSShreyas Bhatewara 
685d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
686d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_NONE;
687d1a890faSShreyas Bhatewara 
688fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
689f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
690115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill,
691115924b6SShreyas Bhatewara 			le64_to_cpu(ctx->sop_txd->txd.addr),
692d1a890faSShreyas Bhatewara 			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
693d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
694d1a890faSShreyas Bhatewara 
695d1a890faSShreyas Bhatewara 		/* use the right gen for non-SOP desc */
696d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
697d1a890faSShreyas Bhatewara 	}
698d1a890faSShreyas Bhatewara 
699d1a890faSShreyas Bhatewara 	/* linear part can use multiple tx desc if it's big */
700d1a890faSShreyas Bhatewara 	len = skb_headlen(skb) - ctx->copy_size;
701d1a890faSShreyas Bhatewara 	buf_offset = ctx->copy_size;
702d1a890faSShreyas Bhatewara 	while (len) {
703d1a890faSShreyas Bhatewara 		u32 buf_size;
704d1a890faSShreyas Bhatewara 
7051f4b1612SBhavesh Davda 		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
7061f4b1612SBhavesh Davda 			buf_size = len;
7071f4b1612SBhavesh Davda 			dw2 |= len;
7081f4b1612SBhavesh Davda 		} else {
7091f4b1612SBhavesh Davda 			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
7101f4b1612SBhavesh Davda 			/* spec says that for TxDesc.len, 0 == 2^14 */
7111f4b1612SBhavesh Davda 		}
712d1a890faSShreyas Bhatewara 
713d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
714d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_SINGLE;
715b0eb57cbSAndy King 		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
716d1a890faSShreyas Bhatewara 				skb->data + buf_offset, buf_size,
717d1a890faSShreyas Bhatewara 				PCI_DMA_TODEVICE);
718d1a890faSShreyas Bhatewara 
7191f4b1612SBhavesh Davda 		tbi->len = buf_size;
720d1a890faSShreyas Bhatewara 
721d1a890faSShreyas Bhatewara 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
722d1a890faSShreyas Bhatewara 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
723d1a890faSShreyas Bhatewara 
724115924b6SShreyas Bhatewara 		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
7251f4b1612SBhavesh Davda 		gdesc->dword[2] = cpu_to_le32(dw2);
726d1a890faSShreyas Bhatewara 		gdesc->dword[3] = 0;
727d1a890faSShreyas Bhatewara 
728fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
729f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
730115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
731115924b6SShreyas Bhatewara 			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
732d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
733d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
734d1a890faSShreyas Bhatewara 
735d1a890faSShreyas Bhatewara 		len -= buf_size;
736d1a890faSShreyas Bhatewara 		buf_offset += buf_size;
737d1a890faSShreyas Bhatewara 	}
738d1a890faSShreyas Bhatewara 
739d1a890faSShreyas Bhatewara 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
7409e903e08SEric Dumazet 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
741a4d7e485SEric Dumazet 		u32 buf_size;
742d1a890faSShreyas Bhatewara 
743a4d7e485SEric Dumazet 		buf_offset = 0;
744a4d7e485SEric Dumazet 		len = skb_frag_size(frag);
745a4d7e485SEric Dumazet 		while (len) {
746d1a890faSShreyas Bhatewara 			tbi = tq->buf_info + tq->tx_ring.next2fill;
747a4d7e485SEric Dumazet 			if (len < VMXNET3_MAX_TX_BUF_SIZE) {
748a4d7e485SEric Dumazet 				buf_size = len;
749a4d7e485SEric Dumazet 				dw2 |= len;
750a4d7e485SEric Dumazet 			} else {
751a4d7e485SEric Dumazet 				buf_size = VMXNET3_MAX_TX_BUF_SIZE;
752a4d7e485SEric Dumazet 				/* spec says that for TxDesc.len, 0 == 2^14 */
753a4d7e485SEric Dumazet 			}
754d1a890faSShreyas Bhatewara 			tbi->map_type = VMXNET3_MAP_PAGE;
7550e0634d2SIan Campbell 			tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
756a4d7e485SEric Dumazet 							 buf_offset, buf_size,
7575d6bcdfeSIan Campbell 							 DMA_TO_DEVICE);
758d1a890faSShreyas Bhatewara 
759a4d7e485SEric Dumazet 			tbi->len = buf_size;
760d1a890faSShreyas Bhatewara 
761d1a890faSShreyas Bhatewara 			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
762d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
763d1a890faSShreyas Bhatewara 
764115924b6SShreyas Bhatewara 			gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
765a4d7e485SEric Dumazet 			gdesc->dword[2] = cpu_to_le32(dw2);
766d1a890faSShreyas Bhatewara 			gdesc->dword[3] = 0;
767d1a890faSShreyas Bhatewara 
768fdcd79b9SStephen Hemminger 			netdev_dbg(adapter->netdev,
7698b429468SHans Wennborg 				"txd[%u]: 0x%llx %u %u\n",
770115924b6SShreyas Bhatewara 				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
771115924b6SShreyas Bhatewara 				le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
772d1a890faSShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
773d1a890faSShreyas Bhatewara 			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
774a4d7e485SEric Dumazet 
775a4d7e485SEric Dumazet 			len -= buf_size;
776a4d7e485SEric Dumazet 			buf_offset += buf_size;
777a4d7e485SEric Dumazet 		}
778d1a890faSShreyas Bhatewara 	}
779d1a890faSShreyas Bhatewara 
780d1a890faSShreyas Bhatewara 	ctx->eop_txd = gdesc;
781d1a890faSShreyas Bhatewara 
782d1a890faSShreyas Bhatewara 	/* set the last buf_info for the pkt */
783d1a890faSShreyas Bhatewara 	tbi->skb = skb;
784d1a890faSShreyas Bhatewara 	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
785d1a890faSShreyas Bhatewara }
786d1a890faSShreyas Bhatewara 
787d1a890faSShreyas Bhatewara 
78809c5088eSShreyas Bhatewara /* Init all tx queues */
78909c5088eSShreyas Bhatewara static void
79009c5088eSShreyas Bhatewara vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
79109c5088eSShreyas Bhatewara {
79209c5088eSShreyas Bhatewara 	int i;
79309c5088eSShreyas Bhatewara 
79409c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
79509c5088eSShreyas Bhatewara 		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
79609c5088eSShreyas Bhatewara }
79709c5088eSShreyas Bhatewara 
79809c5088eSShreyas Bhatewara 
799d1a890faSShreyas Bhatewara /*
800d1a890faSShreyas Bhatewara  *    parse and copy relevant protocol headers:
801d1a890faSShreyas Bhatewara  *      For a tso pkt, relevant headers are L2/3/4 including options
802d1a890faSShreyas Bhatewara  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
803d1a890faSShreyas Bhatewara  *      if it's a TCP/UDP pkt
804d1a890faSShreyas Bhatewara  *
805d1a890faSShreyas Bhatewara  * Returns:
806d1a890faSShreyas Bhatewara  *    -1:  error happens during parsing
807d1a890faSShreyas Bhatewara  *     0:  protocol headers parsed, but too big to be copied
808d1a890faSShreyas Bhatewara  *     1:  protocol headers parsed and copied
809d1a890faSShreyas Bhatewara  *
810d1a890faSShreyas Bhatewara  * Other effects:
811d1a890faSShreyas Bhatewara  *    1. related *ctx fields are updated.
812d1a890faSShreyas Bhatewara  *    2. ctx->copy_size is # of bytes copied
813d1a890faSShreyas Bhatewara  *    3. the portion copied is guaranteed to be in the linear part
814d1a890faSShreyas Bhatewara  *
815d1a890faSShreyas Bhatewara  */
816d1a890faSShreyas Bhatewara static int
817d1a890faSShreyas Bhatewara vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
818d1a890faSShreyas Bhatewara 			   struct vmxnet3_tx_ctx *ctx,
819d1a890faSShreyas Bhatewara 			   struct vmxnet3_adapter *adapter)
820d1a890faSShreyas Bhatewara {
821d1a890faSShreyas Bhatewara 	struct Vmxnet3_TxDataDesc *tdd;
822d1a890faSShreyas Bhatewara 
8230d0b1672SMichał Mirosław 	if (ctx->mss) {	/* TSO */
824d1a890faSShreyas Bhatewara 		ctx->eth_ip_hdr_size = skb_transport_offset(skb);
8258bca5d1eSEric Dumazet 		ctx->l4_hdr_size = tcp_hdrlen(skb);
826d1a890faSShreyas Bhatewara 		ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
827d1a890faSShreyas Bhatewara 	} else {
828d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
8290d0b1672SMichał Mirosław 			ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
830d1a890faSShreyas Bhatewara 
831d1a890faSShreyas Bhatewara 			if (ctx->ipv4) {
8328bca5d1eSEric Dumazet 				const struct iphdr *iph = ip_hdr(skb);
8338bca5d1eSEric Dumazet 
83439d4a96fSShreyas Bhatewara 				if (iph->protocol == IPPROTO_TCP)
8358bca5d1eSEric Dumazet 					ctx->l4_hdr_size = tcp_hdrlen(skb);
83639d4a96fSShreyas Bhatewara 				else if (iph->protocol == IPPROTO_UDP)
837f6a1ad42SDavid S. Miller 					ctx->l4_hdr_size = sizeof(struct udphdr);
83839d4a96fSShreyas Bhatewara 				else
839d1a890faSShreyas Bhatewara 					ctx->l4_hdr_size = 0;
840d1a890faSShreyas Bhatewara 			} else {
841d1a890faSShreyas Bhatewara 				/* for simplicity, don't copy L4 headers */
842d1a890faSShreyas Bhatewara 				ctx->l4_hdr_size = 0;
843d1a890faSShreyas Bhatewara 			}
844b203262dSNeil Horman 			ctx->copy_size = min(ctx->eth_ip_hdr_size +
845b203262dSNeil Horman 					 ctx->l4_hdr_size, skb->len);
846d1a890faSShreyas Bhatewara 		} else {
847d1a890faSShreyas Bhatewara 			ctx->eth_ip_hdr_size = 0;
848d1a890faSShreyas Bhatewara 			ctx->l4_hdr_size = 0;
849d1a890faSShreyas Bhatewara 			/* copy as much as allowed */
850d1a890faSShreyas Bhatewara 			ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
851d1a890faSShreyas Bhatewara 					     , skb_headlen(skb));
852d1a890faSShreyas Bhatewara 		}
853d1a890faSShreyas Bhatewara 
854d1a890faSShreyas Bhatewara 		/* make sure headers are accessible directly */
855d1a890faSShreyas Bhatewara 		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
856d1a890faSShreyas Bhatewara 			goto err;
857d1a890faSShreyas Bhatewara 	}
858d1a890faSShreyas Bhatewara 
859d1a890faSShreyas Bhatewara 	if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
860d1a890faSShreyas Bhatewara 		tq->stats.oversized_hdr++;
861d1a890faSShreyas Bhatewara 		ctx->copy_size = 0;
862d1a890faSShreyas Bhatewara 		return 0;
863d1a890faSShreyas Bhatewara 	}
864d1a890faSShreyas Bhatewara 
865d1a890faSShreyas Bhatewara 	tdd = tq->data_ring.base + tq->tx_ring.next2fill;
866d1a890faSShreyas Bhatewara 
867d1a890faSShreyas Bhatewara 	memcpy(tdd->data, skb->data, ctx->copy_size);
868fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
869f6965582SRandy Dunlap 		"copy %u bytes to dataRing[%u]\n",
870d1a890faSShreyas Bhatewara 		ctx->copy_size, tq->tx_ring.next2fill);
871d1a890faSShreyas Bhatewara 	return 1;
872d1a890faSShreyas Bhatewara 
873d1a890faSShreyas Bhatewara err:
874d1a890faSShreyas Bhatewara 	return -1;
875d1a890faSShreyas Bhatewara }
876d1a890faSShreyas Bhatewara 
877d1a890faSShreyas Bhatewara 
878d1a890faSShreyas Bhatewara static void
879d1a890faSShreyas Bhatewara vmxnet3_prepare_tso(struct sk_buff *skb,
880d1a890faSShreyas Bhatewara 		    struct vmxnet3_tx_ctx *ctx)
881d1a890faSShreyas Bhatewara {
8828bca5d1eSEric Dumazet 	struct tcphdr *tcph = tcp_hdr(skb);
8838bca5d1eSEric Dumazet 
884d1a890faSShreyas Bhatewara 	if (ctx->ipv4) {
8858bca5d1eSEric Dumazet 		struct iphdr *iph = ip_hdr(skb);
8868bca5d1eSEric Dumazet 
887d1a890faSShreyas Bhatewara 		iph->check = 0;
888d1a890faSShreyas Bhatewara 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
889d1a890faSShreyas Bhatewara 						 IPPROTO_TCP, 0);
890d1a890faSShreyas Bhatewara 	} else {
8918bca5d1eSEric Dumazet 		struct ipv6hdr *iph = ipv6_hdr(skb);
8928bca5d1eSEric Dumazet 
893d1a890faSShreyas Bhatewara 		tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
894d1a890faSShreyas Bhatewara 					       IPPROTO_TCP, 0);
895d1a890faSShreyas Bhatewara 	}
896d1a890faSShreyas Bhatewara }
897d1a890faSShreyas Bhatewara 
898a4d7e485SEric Dumazet static int txd_estimate(const struct sk_buff *skb)
899a4d7e485SEric Dumazet {
900a4d7e485SEric Dumazet 	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
901a4d7e485SEric Dumazet 	int i;
902a4d7e485SEric Dumazet 
903a4d7e485SEric Dumazet 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
904a4d7e485SEric Dumazet 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
905a4d7e485SEric Dumazet 
906a4d7e485SEric Dumazet 		count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
907a4d7e485SEric Dumazet 	}
908a4d7e485SEric Dumazet 	return count;
909a4d7e485SEric Dumazet }
910d1a890faSShreyas Bhatewara 
911d1a890faSShreyas Bhatewara /*
912d1a890faSShreyas Bhatewara  * Transmits a pkt thru a given tq
913d1a890faSShreyas Bhatewara  * Returns:
914d1a890faSShreyas Bhatewara  *    NETDEV_TX_OK:      descriptors are setup successfully
91525985edcSLucas De Marchi  *    NETDEV_TX_OK:      error occurred, the pkt is dropped
916d1a890faSShreyas Bhatewara  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
917d1a890faSShreyas Bhatewara  *
918d1a890faSShreyas Bhatewara  * Side-effects:
919d1a890faSShreyas Bhatewara  *    1. tx ring may be changed
920d1a890faSShreyas Bhatewara  *    2. tq stats may be updated accordingly
921d1a890faSShreyas Bhatewara  *    3. shared->txNumDeferred may be updated
922d1a890faSShreyas Bhatewara  */
923d1a890faSShreyas Bhatewara 
924d1a890faSShreyas Bhatewara static int
925d1a890faSShreyas Bhatewara vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
926d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter, struct net_device *netdev)
927d1a890faSShreyas Bhatewara {
928d1a890faSShreyas Bhatewara 	int ret;
929d1a890faSShreyas Bhatewara 	u32 count;
930d1a890faSShreyas Bhatewara 	unsigned long flags;
931d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_ctx ctx;
932d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
933115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
934115924b6SShreyas Bhatewara 	/* Use temporary descriptor to avoid touching bits multiple times */
935115924b6SShreyas Bhatewara 	union Vmxnet3_GenericDesc tempTxDesc;
936115924b6SShreyas Bhatewara #endif
937d1a890faSShreyas Bhatewara 
938a4d7e485SEric Dumazet 	count = txd_estimate(skb);
939d1a890faSShreyas Bhatewara 
94072e85c45SJesse Gross 	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
941d1a890faSShreyas Bhatewara 
942d1a890faSShreyas Bhatewara 	ctx.mss = skb_shinfo(skb)->gso_size;
943d1a890faSShreyas Bhatewara 	if (ctx.mss) {
944d1a890faSShreyas Bhatewara 		if (skb_header_cloned(skb)) {
945d1a890faSShreyas Bhatewara 			if (unlikely(pskb_expand_head(skb, 0, 0,
946d1a890faSShreyas Bhatewara 						      GFP_ATOMIC) != 0)) {
947d1a890faSShreyas Bhatewara 				tq->stats.drop_tso++;
948d1a890faSShreyas Bhatewara 				goto drop_pkt;
949d1a890faSShreyas Bhatewara 			}
950d1a890faSShreyas Bhatewara 			tq->stats.copy_skb_header++;
951d1a890faSShreyas Bhatewara 		}
952d1a890faSShreyas Bhatewara 		vmxnet3_prepare_tso(skb, &ctx);
953d1a890faSShreyas Bhatewara 	} else {
954d1a890faSShreyas Bhatewara 		if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
955d1a890faSShreyas Bhatewara 
956d1a890faSShreyas Bhatewara 			/* non-tso pkts must not use more than
957d1a890faSShreyas Bhatewara 			 * VMXNET3_MAX_TXD_PER_PKT entries
958d1a890faSShreyas Bhatewara 			 */
959d1a890faSShreyas Bhatewara 			if (skb_linearize(skb) != 0) {
960d1a890faSShreyas Bhatewara 				tq->stats.drop_too_many_frags++;
961d1a890faSShreyas Bhatewara 				goto drop_pkt;
962d1a890faSShreyas Bhatewara 			}
963d1a890faSShreyas Bhatewara 			tq->stats.linearized++;
964d1a890faSShreyas Bhatewara 
965d1a890faSShreyas Bhatewara 			/* recalculate the # of descriptors to use */
966d1a890faSShreyas Bhatewara 			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
967d1a890faSShreyas Bhatewara 		}
968d1a890faSShreyas Bhatewara 	}
969d1a890faSShreyas Bhatewara 
97009c5088eSShreyas Bhatewara 	spin_lock_irqsave(&tq->tx_lock, flags);
97109c5088eSShreyas Bhatewara 
97209c5088eSShreyas Bhatewara 	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
97309c5088eSShreyas Bhatewara 		tq->stats.tx_ring_full++;
974fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
97509c5088eSShreyas Bhatewara 			"tx queue stopped on %s, next2comp %u"
97609c5088eSShreyas Bhatewara 			" next2fill %u\n", adapter->netdev->name,
97709c5088eSShreyas Bhatewara 			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
97809c5088eSShreyas Bhatewara 
97909c5088eSShreyas Bhatewara 		vmxnet3_tq_stop(tq, adapter);
98009c5088eSShreyas Bhatewara 		spin_unlock_irqrestore(&tq->tx_lock, flags);
98109c5088eSShreyas Bhatewara 		return NETDEV_TX_BUSY;
98209c5088eSShreyas Bhatewara 	}
98309c5088eSShreyas Bhatewara 
98409c5088eSShreyas Bhatewara 
985d1a890faSShreyas Bhatewara 	ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
986d1a890faSShreyas Bhatewara 	if (ret >= 0) {
987d1a890faSShreyas Bhatewara 		BUG_ON(ret <= 0 && ctx.copy_size != 0);
988d1a890faSShreyas Bhatewara 		/* hdrs parsed, check against other limits */
989d1a890faSShreyas Bhatewara 		if (ctx.mss) {
990d1a890faSShreyas Bhatewara 			if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
991d1a890faSShreyas Bhatewara 				     VMXNET3_MAX_TX_BUF_SIZE)) {
992d1a890faSShreyas Bhatewara 				goto hdr_too_big;
993d1a890faSShreyas Bhatewara 			}
994d1a890faSShreyas Bhatewara 		} else {
995d1a890faSShreyas Bhatewara 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
996d1a890faSShreyas Bhatewara 				if (unlikely(ctx.eth_ip_hdr_size +
997d1a890faSShreyas Bhatewara 					     skb->csum_offset >
998d1a890faSShreyas Bhatewara 					     VMXNET3_MAX_CSUM_OFFSET)) {
999d1a890faSShreyas Bhatewara 					goto hdr_too_big;
1000d1a890faSShreyas Bhatewara 				}
1001d1a890faSShreyas Bhatewara 			}
1002d1a890faSShreyas Bhatewara 		}
1003d1a890faSShreyas Bhatewara 	} else {
1004d1a890faSShreyas Bhatewara 		tq->stats.drop_hdr_inspect_err++;
1005f955e141SDan Carpenter 		goto unlock_drop_pkt;
1006d1a890faSShreyas Bhatewara 	}
1007d1a890faSShreyas Bhatewara 
1008d1a890faSShreyas Bhatewara 	/* fill tx descs related to addr & len */
1009d1a890faSShreyas Bhatewara 	vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1010d1a890faSShreyas Bhatewara 
1011d1a890faSShreyas Bhatewara 	/* setup the EOP desc */
1012115924b6SShreyas Bhatewara 	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1013d1a890faSShreyas Bhatewara 
1014d1a890faSShreyas Bhatewara 	/* setup the SOP desc */
1015115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1016115924b6SShreyas Bhatewara 	gdesc = &tempTxDesc;
1017115924b6SShreyas Bhatewara 	gdesc->dword[2] = ctx.sop_txd->dword[2];
1018115924b6SShreyas Bhatewara 	gdesc->dword[3] = ctx.sop_txd->dword[3];
1019115924b6SShreyas Bhatewara #else
1020d1a890faSShreyas Bhatewara 	gdesc = ctx.sop_txd;
1021115924b6SShreyas Bhatewara #endif
1022d1a890faSShreyas Bhatewara 	if (ctx.mss) {
1023d1a890faSShreyas Bhatewara 		gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1024d1a890faSShreyas Bhatewara 		gdesc->txd.om = VMXNET3_OM_TSO;
1025d1a890faSShreyas Bhatewara 		gdesc->txd.msscof = ctx.mss;
1026115924b6SShreyas Bhatewara 		le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1027115924b6SShreyas Bhatewara 			     gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1028d1a890faSShreyas Bhatewara 	} else {
1029d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1030d1a890faSShreyas Bhatewara 			gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1031d1a890faSShreyas Bhatewara 			gdesc->txd.om = VMXNET3_OM_CSUM;
1032d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1033d1a890faSShreyas Bhatewara 					    skb->csum_offset;
1034d1a890faSShreyas Bhatewara 		} else {
1035d1a890faSShreyas Bhatewara 			gdesc->txd.om = 0;
1036d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = 0;
1037d1a890faSShreyas Bhatewara 		}
1038115924b6SShreyas Bhatewara 		le32_add_cpu(&tq->shared->txNumDeferred, 1);
1039d1a890faSShreyas Bhatewara 	}
1040d1a890faSShreyas Bhatewara 
1041d1a890faSShreyas Bhatewara 	if (vlan_tx_tag_present(skb)) {
1042d1a890faSShreyas Bhatewara 		gdesc->txd.ti = 1;
1043d1a890faSShreyas Bhatewara 		gdesc->txd.tci = vlan_tx_tag_get(skb);
1044d1a890faSShreyas Bhatewara 	}
1045d1a890faSShreyas Bhatewara 
1046115924b6SShreyas Bhatewara 	/* finally flips the GEN bit of the SOP desc. */
1047115924b6SShreyas Bhatewara 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1048115924b6SShreyas Bhatewara 						  VMXNET3_TXD_GEN);
1049115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1050115924b6SShreyas Bhatewara 	/* Finished updating in bitfields of Tx Desc, so write them in original
1051115924b6SShreyas Bhatewara 	 * place.
1052115924b6SShreyas Bhatewara 	 */
1053115924b6SShreyas Bhatewara 	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1054115924b6SShreyas Bhatewara 			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1055115924b6SShreyas Bhatewara 	gdesc = ctx.sop_txd;
1056115924b6SShreyas Bhatewara #endif
1057fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
1058f6965582SRandy Dunlap 		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1059c2fd03a0SJoe Perches 		(u32)(ctx.sop_txd -
1060115924b6SShreyas Bhatewara 		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1061115924b6SShreyas Bhatewara 		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1062d1a890faSShreyas Bhatewara 
1063d1a890faSShreyas Bhatewara 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1064d1a890faSShreyas Bhatewara 
1065115924b6SShreyas Bhatewara 	if (le32_to_cpu(tq->shared->txNumDeferred) >=
1066115924b6SShreyas Bhatewara 					le32_to_cpu(tq->shared->txThreshold)) {
1067d1a890faSShreyas Bhatewara 		tq->shared->txNumDeferred = 0;
106809c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
106909c5088eSShreyas Bhatewara 				       VMXNET3_REG_TXPROD + tq->qid * 8,
1070d1a890faSShreyas Bhatewara 				       tq->tx_ring.next2fill);
1071d1a890faSShreyas Bhatewara 	}
1072d1a890faSShreyas Bhatewara 
1073d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1074d1a890faSShreyas Bhatewara 
1075d1a890faSShreyas Bhatewara hdr_too_big:
1076d1a890faSShreyas Bhatewara 	tq->stats.drop_oversized_hdr++;
1077f955e141SDan Carpenter unlock_drop_pkt:
1078f955e141SDan Carpenter 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1079d1a890faSShreyas Bhatewara drop_pkt:
1080d1a890faSShreyas Bhatewara 	tq->stats.drop_total++;
1081b1b71817SEric W. Biederman 	dev_kfree_skb_any(skb);
1082d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1083d1a890faSShreyas Bhatewara }
1084d1a890faSShreyas Bhatewara 
1085d1a890faSShreyas Bhatewara 
1086d1a890faSShreyas Bhatewara static netdev_tx_t
1087d1a890faSShreyas Bhatewara vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1088d1a890faSShreyas Bhatewara {
1089d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1090d1a890faSShreyas Bhatewara 
109109c5088eSShreyas Bhatewara 	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
109209c5088eSShreyas Bhatewara 	return vmxnet3_tq_xmit(skb,
109309c5088eSShreyas Bhatewara 			       &adapter->tx_queue[skb->queue_mapping],
109409c5088eSShreyas Bhatewara 			       adapter, netdev);
1095d1a890faSShreyas Bhatewara }
1096d1a890faSShreyas Bhatewara 
1097d1a890faSShreyas Bhatewara 
1098d1a890faSShreyas Bhatewara static void
1099d1a890faSShreyas Bhatewara vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1100d1a890faSShreyas Bhatewara 		struct sk_buff *skb,
1101d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gdesc)
1102d1a890faSShreyas Bhatewara {
1103a0d2730cSMichał Mirosław 	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1104d1a890faSShreyas Bhatewara 		/* typical case: TCP/UDP over IP and both csums are correct */
1105115924b6SShreyas Bhatewara 		if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1106d1a890faSShreyas Bhatewara 							VMXNET3_RCD_CSUM_OK) {
1107d1a890faSShreyas Bhatewara 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1108d1a890faSShreyas Bhatewara 			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1109d1a890faSShreyas Bhatewara 			BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
1110d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->rcd.frg);
1111d1a890faSShreyas Bhatewara 		} else {
1112d1a890faSShreyas Bhatewara 			if (gdesc->rcd.csum) {
1113d1a890faSShreyas Bhatewara 				skb->csum = htons(gdesc->rcd.csum);
1114d1a890faSShreyas Bhatewara 				skb->ip_summed = CHECKSUM_PARTIAL;
1115d1a890faSShreyas Bhatewara 			} else {
1116bc8acf2cSEric Dumazet 				skb_checksum_none_assert(skb);
1117d1a890faSShreyas Bhatewara 			}
1118d1a890faSShreyas Bhatewara 		}
1119d1a890faSShreyas Bhatewara 	} else {
1120bc8acf2cSEric Dumazet 		skb_checksum_none_assert(skb);
1121d1a890faSShreyas Bhatewara 	}
1122d1a890faSShreyas Bhatewara }
1123d1a890faSShreyas Bhatewara 
1124d1a890faSShreyas Bhatewara 
1125d1a890faSShreyas Bhatewara static void
1126d1a890faSShreyas Bhatewara vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1127d1a890faSShreyas Bhatewara 		 struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1128d1a890faSShreyas Bhatewara {
1129d1a890faSShreyas Bhatewara 	rq->stats.drop_err++;
1130d1a890faSShreyas Bhatewara 	if (!rcd->fcs)
1131d1a890faSShreyas Bhatewara 		rq->stats.drop_fcs++;
1132d1a890faSShreyas Bhatewara 
1133d1a890faSShreyas Bhatewara 	rq->stats.drop_total++;
1134d1a890faSShreyas Bhatewara 
1135d1a890faSShreyas Bhatewara 	/*
1136d1a890faSShreyas Bhatewara 	 * We do not unmap and chain the rx buffer to the skb.
1137d1a890faSShreyas Bhatewara 	 * We basically pretend this buffer is not used and will be recycled
1138d1a890faSShreyas Bhatewara 	 * by vmxnet3_rq_alloc_rx_buf()
1139d1a890faSShreyas Bhatewara 	 */
1140d1a890faSShreyas Bhatewara 
1141d1a890faSShreyas Bhatewara 	/*
1142d1a890faSShreyas Bhatewara 	 * ctx->skb may be NULL if this is the first and the only one
1143d1a890faSShreyas Bhatewara 	 * desc for the pkt
1144d1a890faSShreyas Bhatewara 	 */
1145d1a890faSShreyas Bhatewara 	if (ctx->skb)
1146d1a890faSShreyas Bhatewara 		dev_kfree_skb_irq(ctx->skb);
1147d1a890faSShreyas Bhatewara 
1148d1a890faSShreyas Bhatewara 	ctx->skb = NULL;
1149d1a890faSShreyas Bhatewara }
1150d1a890faSShreyas Bhatewara 
1151d1a890faSShreyas Bhatewara 
1152d1a890faSShreyas Bhatewara static int
1153d1a890faSShreyas Bhatewara vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1154d1a890faSShreyas Bhatewara 		       struct vmxnet3_adapter *adapter, int quota)
1155d1a890faSShreyas Bhatewara {
1156215faf9cSJoe Perches 	static const u32 rxprod_reg[2] = {
1157215faf9cSJoe Perches 		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1158215faf9cSJoe Perches 	};
1159d1a890faSShreyas Bhatewara 	u32 num_rxd = 0;
11605318d809SShreyas Bhatewara 	bool skip_page_frags = false;
1161d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxCompDesc *rcd;
1162d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1163115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1164115924b6SShreyas Bhatewara 	struct Vmxnet3_RxDesc rxCmdDesc;
1165115924b6SShreyas Bhatewara 	struct Vmxnet3_RxCompDesc rxComp;
1166115924b6SShreyas Bhatewara #endif
1167115924b6SShreyas Bhatewara 	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1168115924b6SShreyas Bhatewara 			  &rxComp);
1169d1a890faSShreyas Bhatewara 	while (rcd->gen == rq->comp_ring.gen) {
1170d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
11715318d809SShreyas Bhatewara 		struct sk_buff *skb, *new_skb = NULL;
11725318d809SShreyas Bhatewara 		struct page *new_page = NULL;
1173d1a890faSShreyas Bhatewara 		int num_to_alloc;
1174d1a890faSShreyas Bhatewara 		struct Vmxnet3_RxDesc *rxd;
1175d1a890faSShreyas Bhatewara 		u32 idx, ring_idx;
11765318d809SShreyas Bhatewara 		struct vmxnet3_cmd_ring	*ring = NULL;
1177d1a890faSShreyas Bhatewara 		if (num_rxd >= quota) {
1178d1a890faSShreyas Bhatewara 			/* we may stop even before we see the EOP desc of
1179d1a890faSShreyas Bhatewara 			 * the current pkt
1180d1a890faSShreyas Bhatewara 			 */
1181d1a890faSShreyas Bhatewara 			break;
1182d1a890faSShreyas Bhatewara 		}
1183d1a890faSShreyas Bhatewara 		num_rxd++;
118409c5088eSShreyas Bhatewara 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1185d1a890faSShreyas Bhatewara 		idx = rcd->rxdIdx;
118609c5088eSShreyas Bhatewara 		ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
11875318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
1188115924b6SShreyas Bhatewara 		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1189115924b6SShreyas Bhatewara 				  &rxCmdDesc);
1190d1a890faSShreyas Bhatewara 		rbi = rq->buf_info[ring_idx] + idx;
1191d1a890faSShreyas Bhatewara 
1192115924b6SShreyas Bhatewara 		BUG_ON(rxd->addr != rbi->dma_addr ||
1193115924b6SShreyas Bhatewara 		       rxd->len != rbi->len);
1194d1a890faSShreyas Bhatewara 
1195d1a890faSShreyas Bhatewara 		if (unlikely(rcd->eop && rcd->err)) {
1196d1a890faSShreyas Bhatewara 			vmxnet3_rx_error(rq, rcd, ctx, adapter);
1197d1a890faSShreyas Bhatewara 			goto rcd_done;
1198d1a890faSShreyas Bhatewara 		}
1199d1a890faSShreyas Bhatewara 
1200d1a890faSShreyas Bhatewara 		if (rcd->sop) { /* first buf of the pkt */
1201d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1202d1a890faSShreyas Bhatewara 			       rcd->rqID != rq->qid);
1203d1a890faSShreyas Bhatewara 
1204d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1205d1a890faSShreyas Bhatewara 			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1206d1a890faSShreyas Bhatewara 
1207d1a890faSShreyas Bhatewara 			if (unlikely(rcd->len == 0)) {
1208d1a890faSShreyas Bhatewara 				/* Pretend the rx buffer is skipped. */
1209d1a890faSShreyas Bhatewara 				BUG_ON(!(rcd->sop && rcd->eop));
1210fdcd79b9SStephen Hemminger 				netdev_dbg(adapter->netdev,
1211f6965582SRandy Dunlap 					"rxRing[%u][%u] 0 length\n",
1212d1a890faSShreyas Bhatewara 					ring_idx, idx);
1213d1a890faSShreyas Bhatewara 				goto rcd_done;
1214d1a890faSShreyas Bhatewara 			}
1215d1a890faSShreyas Bhatewara 
12165318d809SShreyas Bhatewara 			skip_page_frags = false;
1217d1a890faSShreyas Bhatewara 			ctx->skb = rbi->skb;
12180d735f13SStephen Hemminger 			new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
12190d735f13SStephen Hemminger 							    rbi->len);
12205318d809SShreyas Bhatewara 			if (new_skb == NULL) {
12215318d809SShreyas Bhatewara 				/* Skb allocation failed, do not handover this
12225318d809SShreyas Bhatewara 				 * skb to stack. Reuse it. Drop the existing pkt
12235318d809SShreyas Bhatewara 				 */
12245318d809SShreyas Bhatewara 				rq->stats.rx_buf_alloc_failure++;
12255318d809SShreyas Bhatewara 				ctx->skb = NULL;
12265318d809SShreyas Bhatewara 				rq->stats.drop_total++;
12275318d809SShreyas Bhatewara 				skip_page_frags = true;
12285318d809SShreyas Bhatewara 				goto rcd_done;
12295318d809SShreyas Bhatewara 			}
1230d1a890faSShreyas Bhatewara 
1231b0eb57cbSAndy King 			dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1232b0eb57cbSAndy King 					 rbi->len,
1233d1a890faSShreyas Bhatewara 					 PCI_DMA_FROMDEVICE);
1234d1a890faSShreyas Bhatewara 
12357db11f75SStephen Hemminger #ifdef VMXNET3_RSS
12367db11f75SStephen Hemminger 			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
12377db11f75SStephen Hemminger 			    (adapter->netdev->features & NETIF_F_RXHASH))
12382c15a154SMichal Schmidt 				skb_set_hash(ctx->skb,
12392c15a154SMichal Schmidt 					     le32_to_cpu(rcd->rssHash),
12400b680703STom Herbert 					     PKT_HASH_TYPE_L3);
12417db11f75SStephen Hemminger #endif
1242d1a890faSShreyas Bhatewara 			skb_put(ctx->skb, rcd->len);
12435318d809SShreyas Bhatewara 
12445318d809SShreyas Bhatewara 			/* Immediate refill */
12455318d809SShreyas Bhatewara 			rbi->skb = new_skb;
1246b0eb57cbSAndy King 			rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
12475318d809SShreyas Bhatewara 						       rbi->skb->data, rbi->len,
12485318d809SShreyas Bhatewara 						       PCI_DMA_FROMDEVICE);
12495318d809SShreyas Bhatewara 			rxd->addr = cpu_to_le64(rbi->dma_addr);
12505318d809SShreyas Bhatewara 			rxd->len = rbi->len;
12515318d809SShreyas Bhatewara 
1252d1a890faSShreyas Bhatewara 		} else {
12535318d809SShreyas Bhatewara 			BUG_ON(ctx->skb == NULL && !skip_page_frags);
12545318d809SShreyas Bhatewara 
1255d1a890faSShreyas Bhatewara 			/* non SOP buffer must be type 1 in most cases */
12565318d809SShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1257d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1258d1a890faSShreyas Bhatewara 
12595318d809SShreyas Bhatewara 			/* If an sop buffer was dropped, skip all
12605318d809SShreyas Bhatewara 			 * following non-sop fragments. They will be reused.
12615318d809SShreyas Bhatewara 			 */
12625318d809SShreyas Bhatewara 			if (skip_page_frags)
12635318d809SShreyas Bhatewara 				goto rcd_done;
12645318d809SShreyas Bhatewara 
12655318d809SShreyas Bhatewara 			new_page = alloc_page(GFP_ATOMIC);
12665318d809SShreyas Bhatewara 			if (unlikely(new_page == NULL)) {
12675318d809SShreyas Bhatewara 				/* Replacement page frag could not be allocated.
12685318d809SShreyas Bhatewara 				 * Reuse this page. Drop the pkt and free the
12695318d809SShreyas Bhatewara 				 * skb which contained this page as a frag. Skip
12705318d809SShreyas Bhatewara 				 * processing all the following non-sop frags.
12715318d809SShreyas Bhatewara 				 */
12725318d809SShreyas Bhatewara 				rq->stats.rx_buf_alloc_failure++;
12735318d809SShreyas Bhatewara 				dev_kfree_skb(ctx->skb);
12745318d809SShreyas Bhatewara 				ctx->skb = NULL;
12755318d809SShreyas Bhatewara 				skip_page_frags = true;
12765318d809SShreyas Bhatewara 				goto rcd_done;
12775318d809SShreyas Bhatewara 			}
12785318d809SShreyas Bhatewara 
1279d1a890faSShreyas Bhatewara 			if (rcd->len) {
1280b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev,
1281d1a890faSShreyas Bhatewara 					       rbi->dma_addr, rbi->len,
1282d1a890faSShreyas Bhatewara 					       PCI_DMA_FROMDEVICE);
1283d1a890faSShreyas Bhatewara 
1284d1a890faSShreyas Bhatewara 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
1285d1a890faSShreyas Bhatewara 			}
12865318d809SShreyas Bhatewara 
12875318d809SShreyas Bhatewara 			/* Immediate refill */
12885318d809SShreyas Bhatewara 			rbi->page = new_page;
1289b0eb57cbSAndy King 			rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
1290b0eb57cbSAndy King 						     rbi->page,
12915318d809SShreyas Bhatewara 						     0, PAGE_SIZE,
12925318d809SShreyas Bhatewara 						     PCI_DMA_FROMDEVICE);
12935318d809SShreyas Bhatewara 			rxd->addr = cpu_to_le64(rbi->dma_addr);
12945318d809SShreyas Bhatewara 			rxd->len = rbi->len;
1295d1a890faSShreyas Bhatewara 		}
12965318d809SShreyas Bhatewara 
1297d1a890faSShreyas Bhatewara 
1298d1a890faSShreyas Bhatewara 		skb = ctx->skb;
1299d1a890faSShreyas Bhatewara 		if (rcd->eop) {
1300d1a890faSShreyas Bhatewara 			skb->len += skb->data_len;
1301d1a890faSShreyas Bhatewara 
1302d1a890faSShreyas Bhatewara 			vmxnet3_rx_csum(adapter, skb,
1303d1a890faSShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
1304d1a890faSShreyas Bhatewara 			skb->protocol = eth_type_trans(skb, adapter->netdev);
1305d1a890faSShreyas Bhatewara 
130672e85c45SJesse Gross 			if (unlikely(rcd->ts))
130786a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
130872e85c45SJesse Gross 
1309213ade8cSJesse Gross 			if (adapter->netdev->features & NETIF_F_LRO)
1310d1a890faSShreyas Bhatewara 				netif_receive_skb(skb);
1311213ade8cSJesse Gross 			else
1312213ade8cSJesse Gross 				napi_gro_receive(&rq->napi, skb);
1313d1a890faSShreyas Bhatewara 
1314d1a890faSShreyas Bhatewara 			ctx->skb = NULL;
1315d1a890faSShreyas Bhatewara 		}
1316d1a890faSShreyas Bhatewara 
1317d1a890faSShreyas Bhatewara rcd_done:
13185318d809SShreyas Bhatewara 		/* device may have skipped some rx descs */
13195318d809SShreyas Bhatewara 		ring->next2comp = idx;
13205318d809SShreyas Bhatewara 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
13215318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
13225318d809SShreyas Bhatewara 		while (num_to_alloc) {
13235318d809SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
13245318d809SShreyas Bhatewara 					  &rxCmdDesc);
13255318d809SShreyas Bhatewara 			BUG_ON(!rxd->addr);
1326d1a890faSShreyas Bhatewara 
13275318d809SShreyas Bhatewara 			/* Recv desc is ready to be used by the device */
13285318d809SShreyas Bhatewara 			rxd->gen = ring->gen;
13295318d809SShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(ring);
13305318d809SShreyas Bhatewara 			num_to_alloc--;
13315318d809SShreyas Bhatewara 		}
1332d1a890faSShreyas Bhatewara 
1333d1a890faSShreyas Bhatewara 		/* if needed, update the register */
1334d1a890faSShreyas Bhatewara 		if (unlikely(rq->shared->updateRxProd)) {
1335d1a890faSShreyas Bhatewara 			VMXNET3_WRITE_BAR0_REG(adapter,
1336d1a890faSShreyas Bhatewara 					       rxprod_reg[ring_idx] + rq->qid * 8,
13375318d809SShreyas Bhatewara 					       ring->next2fill);
1338d1a890faSShreyas Bhatewara 		}
1339d1a890faSShreyas Bhatewara 
1340d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1341115924b6SShreyas Bhatewara 		vmxnet3_getRxComp(rcd,
1342115924b6SShreyas Bhatewara 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1343d1a890faSShreyas Bhatewara 	}
1344d1a890faSShreyas Bhatewara 
1345d1a890faSShreyas Bhatewara 	return num_rxd;
1346d1a890faSShreyas Bhatewara }
1347d1a890faSShreyas Bhatewara 
1348d1a890faSShreyas Bhatewara 
1349d1a890faSShreyas Bhatewara static void
1350d1a890faSShreyas Bhatewara vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1351d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
1352d1a890faSShreyas Bhatewara {
1353d1a890faSShreyas Bhatewara 	u32 i, ring_idx;
1354d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxDesc *rxd;
1355d1a890faSShreyas Bhatewara 
1356d1a890faSShreyas Bhatewara 	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1357d1a890faSShreyas Bhatewara 		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1358115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1359115924b6SShreyas Bhatewara 			struct Vmxnet3_RxDesc rxDesc;
1360115924b6SShreyas Bhatewara #endif
1361115924b6SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd,
1362115924b6SShreyas Bhatewara 				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1363d1a890faSShreyas Bhatewara 
1364d1a890faSShreyas Bhatewara 			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1365d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].skb) {
1366b0eb57cbSAndy King 				dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1367d1a890faSShreyas Bhatewara 						 rxd->len, PCI_DMA_FROMDEVICE);
1368d1a890faSShreyas Bhatewara 				dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1369d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].skb = NULL;
1370d1a890faSShreyas Bhatewara 			} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1371d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].page) {
1372b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1373d1a890faSShreyas Bhatewara 					       rxd->len, PCI_DMA_FROMDEVICE);
1374d1a890faSShreyas Bhatewara 				put_page(rq->buf_info[ring_idx][i].page);
1375d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].page = NULL;
1376d1a890faSShreyas Bhatewara 			}
1377d1a890faSShreyas Bhatewara 		}
1378d1a890faSShreyas Bhatewara 
1379d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1380d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].next2fill =
1381d1a890faSShreyas Bhatewara 					rq->rx_ring[ring_idx].next2comp = 0;
1382d1a890faSShreyas Bhatewara 	}
1383d1a890faSShreyas Bhatewara 
1384d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1385d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1386d1a890faSShreyas Bhatewara }
1387d1a890faSShreyas Bhatewara 
1388d1a890faSShreyas Bhatewara 
138909c5088eSShreyas Bhatewara static void
139009c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
139109c5088eSShreyas Bhatewara {
139209c5088eSShreyas Bhatewara 	int i;
139309c5088eSShreyas Bhatewara 
139409c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
139509c5088eSShreyas Bhatewara 		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
139609c5088eSShreyas Bhatewara }
139709c5088eSShreyas Bhatewara 
139809c5088eSShreyas Bhatewara 
1399280b74f7Sstephen hemminger static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1400d1a890faSShreyas Bhatewara 			       struct vmxnet3_adapter *adapter)
1401d1a890faSShreyas Bhatewara {
1402d1a890faSShreyas Bhatewara 	int i;
1403d1a890faSShreyas Bhatewara 	int j;
1404d1a890faSShreyas Bhatewara 
1405d1a890faSShreyas Bhatewara 	/* all rx buffers must have already been freed */
1406d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1407d1a890faSShreyas Bhatewara 		if (rq->buf_info[i]) {
1408d1a890faSShreyas Bhatewara 			for (j = 0; j < rq->rx_ring[i].size; j++)
1409d1a890faSShreyas Bhatewara 				BUG_ON(rq->buf_info[i][j].page != NULL);
1410d1a890faSShreyas Bhatewara 		}
1411d1a890faSShreyas Bhatewara 	}
1412d1a890faSShreyas Bhatewara 
1413d1a890faSShreyas Bhatewara 
1414d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1415d1a890faSShreyas Bhatewara 		if (rq->rx_ring[i].base) {
1416b0eb57cbSAndy King 			dma_free_coherent(&adapter->pdev->dev,
1417b0eb57cbSAndy King 					  rq->rx_ring[i].size
1418d1a890faSShreyas Bhatewara 					  * sizeof(struct Vmxnet3_RxDesc),
1419d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].base,
1420d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].basePA);
1421d1a890faSShreyas Bhatewara 			rq->rx_ring[i].base = NULL;
1422d1a890faSShreyas Bhatewara 		}
1423d1a890faSShreyas Bhatewara 		rq->buf_info[i] = NULL;
1424d1a890faSShreyas Bhatewara 	}
1425d1a890faSShreyas Bhatewara 
1426d1a890faSShreyas Bhatewara 	if (rq->comp_ring.base) {
1427b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1428b0eb57cbSAndy King 				  * sizeof(struct Vmxnet3_RxCompDesc),
1429d1a890faSShreyas Bhatewara 				  rq->comp_ring.base, rq->comp_ring.basePA);
1430d1a890faSShreyas Bhatewara 		rq->comp_ring.base = NULL;
1431d1a890faSShreyas Bhatewara 	}
1432b0eb57cbSAndy King 
1433b0eb57cbSAndy King 	if (rq->buf_info[0]) {
1434b0eb57cbSAndy King 		size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1435b0eb57cbSAndy King 			(rq->rx_ring[0].size + rq->rx_ring[1].size);
1436b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1437b0eb57cbSAndy King 				  rq->buf_info_pa);
1438b0eb57cbSAndy King 	}
1439d1a890faSShreyas Bhatewara }
1440d1a890faSShreyas Bhatewara 
1441d1a890faSShreyas Bhatewara 
1442d1a890faSShreyas Bhatewara static int
1443d1a890faSShreyas Bhatewara vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1444d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter  *adapter)
1445d1a890faSShreyas Bhatewara {
1446d1a890faSShreyas Bhatewara 	int i;
1447d1a890faSShreyas Bhatewara 
1448d1a890faSShreyas Bhatewara 	/* initialize buf_info */
1449d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[0].size; i++) {
1450d1a890faSShreyas Bhatewara 
1451d1a890faSShreyas Bhatewara 		/* 1st buf for a pkt is skbuff */
1452d1a890faSShreyas Bhatewara 		if (i % adapter->rx_buf_per_pkt == 0) {
1453d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1454d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = adapter->skb_buf_size;
1455d1a890faSShreyas Bhatewara 		} else { /* subsequent bufs for a pkt is frag */
1456d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1457d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = PAGE_SIZE;
1458d1a890faSShreyas Bhatewara 		}
1459d1a890faSShreyas Bhatewara 	}
1460d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[1].size; i++) {
1461d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1462d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].len = PAGE_SIZE;
1463d1a890faSShreyas Bhatewara 	}
1464d1a890faSShreyas Bhatewara 
1465d1a890faSShreyas Bhatewara 	/* reset internal state and allocate buffers for both rings */
1466d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1467d1a890faSShreyas Bhatewara 		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1468d1a890faSShreyas Bhatewara 
1469d1a890faSShreyas Bhatewara 		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1470d1a890faSShreyas Bhatewara 		       sizeof(struct Vmxnet3_RxDesc));
1471d1a890faSShreyas Bhatewara 		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1472d1a890faSShreyas Bhatewara 	}
1473d1a890faSShreyas Bhatewara 	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1474d1a890faSShreyas Bhatewara 				    adapter) == 0) {
1475d1a890faSShreyas Bhatewara 		/* at least has 1 rx buffer for the 1st ring */
1476d1a890faSShreyas Bhatewara 		return -ENOMEM;
1477d1a890faSShreyas Bhatewara 	}
1478d1a890faSShreyas Bhatewara 	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1479d1a890faSShreyas Bhatewara 
1480d1a890faSShreyas Bhatewara 	/* reset the comp ring */
1481d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1482d1a890faSShreyas Bhatewara 	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1483d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_RxCompDesc));
1484d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1485d1a890faSShreyas Bhatewara 
1486d1a890faSShreyas Bhatewara 	/* reset rxctx */
1487d1a890faSShreyas Bhatewara 	rq->rx_ctx.skb = NULL;
1488d1a890faSShreyas Bhatewara 
1489d1a890faSShreyas Bhatewara 	/* stats are not reset */
1490d1a890faSShreyas Bhatewara 	return 0;
1491d1a890faSShreyas Bhatewara }
1492d1a890faSShreyas Bhatewara 
1493d1a890faSShreyas Bhatewara 
1494d1a890faSShreyas Bhatewara static int
149509c5088eSShreyas Bhatewara vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
149609c5088eSShreyas Bhatewara {
149709c5088eSShreyas Bhatewara 	int i, err = 0;
149809c5088eSShreyas Bhatewara 
149909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
150009c5088eSShreyas Bhatewara 		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
150109c5088eSShreyas Bhatewara 		if (unlikely(err)) {
150209c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev, "%s: failed to "
150309c5088eSShreyas Bhatewara 				"initialize rx queue%i\n",
150409c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
150509c5088eSShreyas Bhatewara 			break;
150609c5088eSShreyas Bhatewara 		}
150709c5088eSShreyas Bhatewara 	}
150809c5088eSShreyas Bhatewara 	return err;
150909c5088eSShreyas Bhatewara 
151009c5088eSShreyas Bhatewara }
151109c5088eSShreyas Bhatewara 
151209c5088eSShreyas Bhatewara 
151309c5088eSShreyas Bhatewara static int
1514d1a890faSShreyas Bhatewara vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1515d1a890faSShreyas Bhatewara {
1516d1a890faSShreyas Bhatewara 	int i;
1517d1a890faSShreyas Bhatewara 	size_t sz;
1518d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *bi;
1519d1a890faSShreyas Bhatewara 
1520d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1521d1a890faSShreyas Bhatewara 
1522d1a890faSShreyas Bhatewara 		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1523b0eb57cbSAndy King 		rq->rx_ring[i].base = dma_alloc_coherent(
1524b0eb57cbSAndy King 						&adapter->pdev->dev, sz,
1525b0eb57cbSAndy King 						&rq->rx_ring[i].basePA,
1526b0eb57cbSAndy King 						GFP_KERNEL);
1527d1a890faSShreyas Bhatewara 		if (!rq->rx_ring[i].base) {
1528204a6e65SStephen Hemminger 			netdev_err(adapter->netdev,
1529204a6e65SStephen Hemminger 				   "failed to allocate rx ring %d\n", i);
1530d1a890faSShreyas Bhatewara 			goto err;
1531d1a890faSShreyas Bhatewara 		}
1532d1a890faSShreyas Bhatewara 	}
1533d1a890faSShreyas Bhatewara 
1534d1a890faSShreyas Bhatewara 	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1535b0eb57cbSAndy King 	rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1536b0eb57cbSAndy King 						&rq->comp_ring.basePA,
1537b0eb57cbSAndy King 						GFP_KERNEL);
1538d1a890faSShreyas Bhatewara 	if (!rq->comp_ring.base) {
1539204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1540d1a890faSShreyas Bhatewara 		goto err;
1541d1a890faSShreyas Bhatewara 	}
1542d1a890faSShreyas Bhatewara 
1543d1a890faSShreyas Bhatewara 	sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1544d1a890faSShreyas Bhatewara 						   rq->rx_ring[1].size);
1545b0eb57cbSAndy King 	bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1546b0eb57cbSAndy King 				 GFP_KERNEL);
1547e404decbSJoe Perches 	if (!bi)
1548d1a890faSShreyas Bhatewara 		goto err;
1549e404decbSJoe Perches 
1550d1a890faSShreyas Bhatewara 	rq->buf_info[0] = bi;
1551d1a890faSShreyas Bhatewara 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
1552d1a890faSShreyas Bhatewara 
1553d1a890faSShreyas Bhatewara 	return 0;
1554d1a890faSShreyas Bhatewara 
1555d1a890faSShreyas Bhatewara err:
1556d1a890faSShreyas Bhatewara 	vmxnet3_rq_destroy(rq, adapter);
1557d1a890faSShreyas Bhatewara 	return -ENOMEM;
1558d1a890faSShreyas Bhatewara }
1559d1a890faSShreyas Bhatewara 
1560d1a890faSShreyas Bhatewara 
1561d1a890faSShreyas Bhatewara static int
156209c5088eSShreyas Bhatewara vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
156309c5088eSShreyas Bhatewara {
156409c5088eSShreyas Bhatewara 	int i, err = 0;
156509c5088eSShreyas Bhatewara 
156609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
156709c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
156809c5088eSShreyas Bhatewara 		if (unlikely(err)) {
156909c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev,
157009c5088eSShreyas Bhatewara 				"%s: failed to create rx queue%i\n",
157109c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
157209c5088eSShreyas Bhatewara 			goto err_out;
157309c5088eSShreyas Bhatewara 		}
157409c5088eSShreyas Bhatewara 	}
157509c5088eSShreyas Bhatewara 	return err;
157609c5088eSShreyas Bhatewara err_out:
157709c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
157809c5088eSShreyas Bhatewara 	return err;
157909c5088eSShreyas Bhatewara 
158009c5088eSShreyas Bhatewara }
158109c5088eSShreyas Bhatewara 
158209c5088eSShreyas Bhatewara /* Multiple queue aware polling function for tx and rx */
158309c5088eSShreyas Bhatewara 
158409c5088eSShreyas Bhatewara static int
1585d1a890faSShreyas Bhatewara vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1586d1a890faSShreyas Bhatewara {
158709c5088eSShreyas Bhatewara 	int rcd_done = 0, i;
1588d1a890faSShreyas Bhatewara 	if (unlikely(adapter->shared->ecr))
1589d1a890faSShreyas Bhatewara 		vmxnet3_process_events(adapter);
159009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
159109c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1592d1a890faSShreyas Bhatewara 
159309c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
159409c5088eSShreyas Bhatewara 		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
159509c5088eSShreyas Bhatewara 						   adapter, budget);
159609c5088eSShreyas Bhatewara 	return rcd_done;
1597d1a890faSShreyas Bhatewara }
1598d1a890faSShreyas Bhatewara 
1599d1a890faSShreyas Bhatewara 
1600d1a890faSShreyas Bhatewara static int
1601d1a890faSShreyas Bhatewara vmxnet3_poll(struct napi_struct *napi, int budget)
1602d1a890faSShreyas Bhatewara {
160309c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
160409c5088eSShreyas Bhatewara 					  struct vmxnet3_rx_queue, napi);
1605d1a890faSShreyas Bhatewara 	int rxd_done;
1606d1a890faSShreyas Bhatewara 
160709c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1608d1a890faSShreyas Bhatewara 
1609d1a890faSShreyas Bhatewara 	if (rxd_done < budget) {
1610d1a890faSShreyas Bhatewara 		napi_complete(napi);
161109c5088eSShreyas Bhatewara 		vmxnet3_enable_all_intrs(rx_queue->adapter);
1612d1a890faSShreyas Bhatewara 	}
1613d1a890faSShreyas Bhatewara 	return rxd_done;
1614d1a890faSShreyas Bhatewara }
1615d1a890faSShreyas Bhatewara 
161609c5088eSShreyas Bhatewara /*
161709c5088eSShreyas Bhatewara  * NAPI polling function for MSI-X mode with multiple Rx queues
161809c5088eSShreyas Bhatewara  * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
161909c5088eSShreyas Bhatewara  */
162009c5088eSShreyas Bhatewara 
162109c5088eSShreyas Bhatewara static int
162209c5088eSShreyas Bhatewara vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
162309c5088eSShreyas Bhatewara {
162409c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = container_of(napi,
162509c5088eSShreyas Bhatewara 						struct vmxnet3_rx_queue, napi);
162609c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
162709c5088eSShreyas Bhatewara 	int rxd_done;
162809c5088eSShreyas Bhatewara 
162909c5088eSShreyas Bhatewara 	/* When sharing interrupt with corresponding tx queue, process
163009c5088eSShreyas Bhatewara 	 * tx completions in that queue as well
163109c5088eSShreyas Bhatewara 	 */
163209c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
163309c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq =
163409c5088eSShreyas Bhatewara 				&adapter->tx_queue[rq - adapter->rx_queue];
163509c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
163609c5088eSShreyas Bhatewara 	}
163709c5088eSShreyas Bhatewara 
163809c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
163909c5088eSShreyas Bhatewara 
164009c5088eSShreyas Bhatewara 	if (rxd_done < budget) {
164109c5088eSShreyas Bhatewara 		napi_complete(napi);
164209c5088eSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
164309c5088eSShreyas Bhatewara 	}
164409c5088eSShreyas Bhatewara 	return rxd_done;
164509c5088eSShreyas Bhatewara }
164609c5088eSShreyas Bhatewara 
164709c5088eSShreyas Bhatewara 
164809c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
164909c5088eSShreyas Bhatewara 
165009c5088eSShreyas Bhatewara /*
165109c5088eSShreyas Bhatewara  * Handle completion interrupts on tx queues
165209c5088eSShreyas Bhatewara  * Returns whether or not the intr is handled
165309c5088eSShreyas Bhatewara  */
165409c5088eSShreyas Bhatewara 
165509c5088eSShreyas Bhatewara static irqreturn_t
165609c5088eSShreyas Bhatewara vmxnet3_msix_tx(int irq, void *data)
165709c5088eSShreyas Bhatewara {
165809c5088eSShreyas Bhatewara 	struct vmxnet3_tx_queue *tq = data;
165909c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = tq->adapter;
166009c5088eSShreyas Bhatewara 
166109c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
166209c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
166309c5088eSShreyas Bhatewara 
166409c5088eSShreyas Bhatewara 	/* Handle the case where only one irq is allocate for all tx queues */
166509c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
166609c5088eSShreyas Bhatewara 		int i;
166709c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
166809c5088eSShreyas Bhatewara 			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
166909c5088eSShreyas Bhatewara 			vmxnet3_tq_tx_complete(txq, adapter);
167009c5088eSShreyas Bhatewara 		}
167109c5088eSShreyas Bhatewara 	} else {
167209c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
167309c5088eSShreyas Bhatewara 	}
167409c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
167509c5088eSShreyas Bhatewara 
167609c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
167709c5088eSShreyas Bhatewara }
167809c5088eSShreyas Bhatewara 
167909c5088eSShreyas Bhatewara 
168009c5088eSShreyas Bhatewara /*
168109c5088eSShreyas Bhatewara  * Handle completion interrupts on rx queues. Returns whether or not the
168209c5088eSShreyas Bhatewara  * intr is handled
168309c5088eSShreyas Bhatewara  */
168409c5088eSShreyas Bhatewara 
168509c5088eSShreyas Bhatewara static irqreturn_t
168609c5088eSShreyas Bhatewara vmxnet3_msix_rx(int irq, void *data)
168709c5088eSShreyas Bhatewara {
168809c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = data;
168909c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
169009c5088eSShreyas Bhatewara 
169109c5088eSShreyas Bhatewara 	/* disable intr if needed */
169209c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
169309c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
169409c5088eSShreyas Bhatewara 	napi_schedule(&rq->napi);
169509c5088eSShreyas Bhatewara 
169609c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
169709c5088eSShreyas Bhatewara }
169809c5088eSShreyas Bhatewara 
169909c5088eSShreyas Bhatewara /*
170009c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
170109c5088eSShreyas Bhatewara  *
170209c5088eSShreyas Bhatewara  * vmxnet3_msix_event --
170309c5088eSShreyas Bhatewara  *
170409c5088eSShreyas Bhatewara  *    vmxnet3 msix event intr handler
170509c5088eSShreyas Bhatewara  *
170609c5088eSShreyas Bhatewara  * Result:
170709c5088eSShreyas Bhatewara  *    whether or not the intr is handled
170809c5088eSShreyas Bhatewara  *
170909c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
171009c5088eSShreyas Bhatewara  */
171109c5088eSShreyas Bhatewara 
171209c5088eSShreyas Bhatewara static irqreturn_t
171309c5088eSShreyas Bhatewara vmxnet3_msix_event(int irq, void *data)
171409c5088eSShreyas Bhatewara {
171509c5088eSShreyas Bhatewara 	struct net_device *dev = data;
171609c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
171709c5088eSShreyas Bhatewara 
171809c5088eSShreyas Bhatewara 	/* disable intr if needed */
171909c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
172009c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
172109c5088eSShreyas Bhatewara 
172209c5088eSShreyas Bhatewara 	if (adapter->shared->ecr)
172309c5088eSShreyas Bhatewara 		vmxnet3_process_events(adapter);
172409c5088eSShreyas Bhatewara 
172509c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
172609c5088eSShreyas Bhatewara 
172709c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
172809c5088eSShreyas Bhatewara }
172909c5088eSShreyas Bhatewara 
173009c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI  */
173109c5088eSShreyas Bhatewara 
1732d1a890faSShreyas Bhatewara 
1733d1a890faSShreyas Bhatewara /* Interrupt handler for vmxnet3  */
1734d1a890faSShreyas Bhatewara static irqreturn_t
1735d1a890faSShreyas Bhatewara vmxnet3_intr(int irq, void *dev_id)
1736d1a890faSShreyas Bhatewara {
1737d1a890faSShreyas Bhatewara 	struct net_device *dev = dev_id;
1738d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
1739d1a890faSShreyas Bhatewara 
174009c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_INTX) {
1741d1a890faSShreyas Bhatewara 		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1742d1a890faSShreyas Bhatewara 		if (unlikely(icr == 0))
1743d1a890faSShreyas Bhatewara 			/* not ours */
1744d1a890faSShreyas Bhatewara 			return IRQ_NONE;
1745d1a890faSShreyas Bhatewara 	}
1746d1a890faSShreyas Bhatewara 
1747d1a890faSShreyas Bhatewara 
1748d1a890faSShreyas Bhatewara 	/* disable intr if needed */
1749d1a890faSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
175009c5088eSShreyas Bhatewara 		vmxnet3_disable_all_intrs(adapter);
1751d1a890faSShreyas Bhatewara 
175209c5088eSShreyas Bhatewara 	napi_schedule(&adapter->rx_queue[0].napi);
1753d1a890faSShreyas Bhatewara 
1754d1a890faSShreyas Bhatewara 	return IRQ_HANDLED;
1755d1a890faSShreyas Bhatewara }
1756d1a890faSShreyas Bhatewara 
1757d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
1758d1a890faSShreyas Bhatewara 
1759d1a890faSShreyas Bhatewara /* netpoll callback. */
1760d1a890faSShreyas Bhatewara static void
1761d1a890faSShreyas Bhatewara vmxnet3_netpoll(struct net_device *netdev)
1762d1a890faSShreyas Bhatewara {
1763d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1764d1a890faSShreyas Bhatewara 
1765d25f06eaSNeil Horman 	switch (adapter->intr.type) {
17660a8d8c44SArnd Bergmann #ifdef CONFIG_PCI_MSI
17670a8d8c44SArnd Bergmann 	case VMXNET3_IT_MSIX: {
17680a8d8c44SArnd Bergmann 		int i;
1769d25f06eaSNeil Horman 		for (i = 0; i < adapter->num_rx_queues; i++)
1770d25f06eaSNeil Horman 			vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
1771d25f06eaSNeil Horman 		break;
17720a8d8c44SArnd Bergmann 	}
17730a8d8c44SArnd Bergmann #endif
1774d25f06eaSNeil Horman 	case VMXNET3_IT_MSI:
1775d25f06eaSNeil Horman 	default:
1776d25f06eaSNeil Horman 		vmxnet3_intr(0, adapter->netdev);
1777d25f06eaSNeil Horman 		break;
1778d25f06eaSNeil Horman 	}
177909c5088eSShreyas Bhatewara 
1780d1a890faSShreyas Bhatewara }
178109c5088eSShreyas Bhatewara #endif	/* CONFIG_NET_POLL_CONTROLLER */
1782d1a890faSShreyas Bhatewara 
1783d1a890faSShreyas Bhatewara static int
1784d1a890faSShreyas Bhatewara vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1785d1a890faSShreyas Bhatewara {
178609c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
178709c5088eSShreyas Bhatewara 	int err = 0, i;
178809c5088eSShreyas Bhatewara 	int vector = 0;
1789d1a890faSShreyas Bhatewara 
17908f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
1791d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
179209c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
179309c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
179409c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
179509c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
179609c5088eSShreyas Bhatewara 				err = request_irq(
179709c5088eSShreyas Bhatewara 					      intr->msix_entries[vector].vector,
179809c5088eSShreyas Bhatewara 					      vmxnet3_msix_tx, 0,
179909c5088eSShreyas Bhatewara 					      adapter->tx_queue[i].name,
180009c5088eSShreyas Bhatewara 					      &adapter->tx_queue[i]);
180109c5088eSShreyas Bhatewara 			} else {
180209c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
180309c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
180409c5088eSShreyas Bhatewara 			}
180509c5088eSShreyas Bhatewara 			if (err) {
180609c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
180709c5088eSShreyas Bhatewara 					"Failed to request irq for MSIX, %s, "
180809c5088eSShreyas Bhatewara 					"error %d\n",
180909c5088eSShreyas Bhatewara 					adapter->tx_queue[i].name, err);
181009c5088eSShreyas Bhatewara 				return err;
181109c5088eSShreyas Bhatewara 			}
181209c5088eSShreyas Bhatewara 
181309c5088eSShreyas Bhatewara 			/* Handle the case where only 1 MSIx was allocated for
181409c5088eSShreyas Bhatewara 			 * all tx queues */
181509c5088eSShreyas Bhatewara 			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
181609c5088eSShreyas Bhatewara 				for (; i < adapter->num_tx_queues; i++)
181709c5088eSShreyas Bhatewara 					adapter->tx_queue[i].comp_ring.intr_idx
181809c5088eSShreyas Bhatewara 								= vector;
181909c5088eSShreyas Bhatewara 				vector++;
182009c5088eSShreyas Bhatewara 				break;
182109c5088eSShreyas Bhatewara 			} else {
182209c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx
182309c5088eSShreyas Bhatewara 								= vector++;
182409c5088eSShreyas Bhatewara 			}
182509c5088eSShreyas Bhatewara 		}
182609c5088eSShreyas Bhatewara 		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
182709c5088eSShreyas Bhatewara 			vector = 0;
182809c5088eSShreyas Bhatewara 
182909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
183009c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
183109c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
183209c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
183309c5088eSShreyas Bhatewara 			else
183409c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
183509c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
183609c5088eSShreyas Bhatewara 			err = request_irq(intr->msix_entries[vector].vector,
183709c5088eSShreyas Bhatewara 					  vmxnet3_msix_rx, 0,
183809c5088eSShreyas Bhatewara 					  adapter->rx_queue[i].name,
183909c5088eSShreyas Bhatewara 					  &(adapter->rx_queue[i]));
184009c5088eSShreyas Bhatewara 			if (err) {
1841204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
1842204a6e65SStephen Hemminger 					   "Failed to request irq for MSIX, "
1843204a6e65SStephen Hemminger 					   "%s, error %d\n",
184409c5088eSShreyas Bhatewara 					   adapter->rx_queue[i].name, err);
184509c5088eSShreyas Bhatewara 				return err;
184609c5088eSShreyas Bhatewara 			}
184709c5088eSShreyas Bhatewara 
184809c5088eSShreyas Bhatewara 			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
184909c5088eSShreyas Bhatewara 		}
185009c5088eSShreyas Bhatewara 
185109c5088eSShreyas Bhatewara 		sprintf(intr->event_msi_vector_name, "%s-event-%d",
185209c5088eSShreyas Bhatewara 			adapter->netdev->name, vector);
185309c5088eSShreyas Bhatewara 		err = request_irq(intr->msix_entries[vector].vector,
185409c5088eSShreyas Bhatewara 				  vmxnet3_msix_event, 0,
185509c5088eSShreyas Bhatewara 				  intr->event_msi_vector_name, adapter->netdev);
185609c5088eSShreyas Bhatewara 		intr->event_intr_idx = vector;
185709c5088eSShreyas Bhatewara 
185809c5088eSShreyas Bhatewara 	} else if (intr->type == VMXNET3_IT_MSI) {
185909c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
1860d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1861d1a890faSShreyas Bhatewara 				  adapter->netdev->name, adapter->netdev);
186209c5088eSShreyas Bhatewara 	} else {
1863115924b6SShreyas Bhatewara #endif
186409c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
1865d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1866d1a890faSShreyas Bhatewara 				  IRQF_SHARED, adapter->netdev->name,
1867d1a890faSShreyas Bhatewara 				  adapter->netdev);
186809c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
186909c5088eSShreyas Bhatewara 	}
187009c5088eSShreyas Bhatewara #endif
187109c5088eSShreyas Bhatewara 	intr->num_intrs = vector + 1;
187209c5088eSShreyas Bhatewara 	if (err) {
1873204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
1874204a6e65SStephen Hemminger 			   "Failed to request irq (intr type:%d), error %d\n",
1875204a6e65SStephen Hemminger 			   intr->type, err);
187609c5088eSShreyas Bhatewara 	} else {
187709c5088eSShreyas Bhatewara 		/* Number of rx queues will not change after this */
187809c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
187909c5088eSShreyas Bhatewara 			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
188009c5088eSShreyas Bhatewara 			rq->qid = i;
188109c5088eSShreyas Bhatewara 			rq->qid2 = i + adapter->num_rx_queues;
1882d1a890faSShreyas Bhatewara 		}
1883d1a890faSShreyas Bhatewara 
1884d1a890faSShreyas Bhatewara 
1885d1a890faSShreyas Bhatewara 
1886d1a890faSShreyas Bhatewara 		/* init our intr settings */
188709c5088eSShreyas Bhatewara 		for (i = 0; i < intr->num_intrs; i++)
188809c5088eSShreyas Bhatewara 			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
188909c5088eSShreyas Bhatewara 		if (adapter->intr.type != VMXNET3_IT_MSIX) {
1890d1a890faSShreyas Bhatewara 			adapter->intr.event_intr_idx = 0;
189109c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
189209c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx = 0;
189309c5088eSShreyas Bhatewara 			adapter->rx_queue[0].comp_ring.intr_idx = 0;
189409c5088eSShreyas Bhatewara 		}
1895d1a890faSShreyas Bhatewara 
1896204a6e65SStephen Hemminger 		netdev_info(adapter->netdev,
1897204a6e65SStephen Hemminger 			    "intr type %u, mode %u, %u vectors allocated\n",
1898204a6e65SStephen Hemminger 			    intr->type, intr->mask_mode, intr->num_intrs);
1899d1a890faSShreyas Bhatewara 	}
1900d1a890faSShreyas Bhatewara 
1901d1a890faSShreyas Bhatewara 	return err;
1902d1a890faSShreyas Bhatewara }
1903d1a890faSShreyas Bhatewara 
1904d1a890faSShreyas Bhatewara 
1905d1a890faSShreyas Bhatewara static void
1906d1a890faSShreyas Bhatewara vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1907d1a890faSShreyas Bhatewara {
190809c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
190909c5088eSShreyas Bhatewara 	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1910d1a890faSShreyas Bhatewara 
191109c5088eSShreyas Bhatewara 	switch (intr->type) {
19128f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
1913d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSIX:
1914d1a890faSShreyas Bhatewara 	{
191509c5088eSShreyas Bhatewara 		int i, vector = 0;
1916d1a890faSShreyas Bhatewara 
191709c5088eSShreyas Bhatewara 		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
191809c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++) {
191909c5088eSShreyas Bhatewara 				free_irq(intr->msix_entries[vector++].vector,
192009c5088eSShreyas Bhatewara 					 &(adapter->tx_queue[i]));
192109c5088eSShreyas Bhatewara 				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
192209c5088eSShreyas Bhatewara 					break;
192309c5088eSShreyas Bhatewara 			}
192409c5088eSShreyas Bhatewara 		}
192509c5088eSShreyas Bhatewara 
192609c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
192709c5088eSShreyas Bhatewara 			free_irq(intr->msix_entries[vector++].vector,
192809c5088eSShreyas Bhatewara 				 &(adapter->rx_queue[i]));
192909c5088eSShreyas Bhatewara 		}
193009c5088eSShreyas Bhatewara 
193109c5088eSShreyas Bhatewara 		free_irq(intr->msix_entries[vector].vector,
1932d1a890faSShreyas Bhatewara 			 adapter->netdev);
193309c5088eSShreyas Bhatewara 		BUG_ON(vector >= intr->num_intrs);
1934d1a890faSShreyas Bhatewara 		break;
1935d1a890faSShreyas Bhatewara 	}
19368f7e524cSRandy Dunlap #endif
1937d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSI:
1938d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
1939d1a890faSShreyas Bhatewara 		break;
1940d1a890faSShreyas Bhatewara 	case VMXNET3_IT_INTX:
1941d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
1942d1a890faSShreyas Bhatewara 		break;
1943d1a890faSShreyas Bhatewara 	default:
1944c068e777SSasha Levin 		BUG();
1945d1a890faSShreyas Bhatewara 	}
1946d1a890faSShreyas Bhatewara }
1947d1a890faSShreyas Bhatewara 
1948d1a890faSShreyas Bhatewara 
1949d1a890faSShreyas Bhatewara static void
1950d1a890faSShreyas Bhatewara vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1951d1a890faSShreyas Bhatewara {
1952d1a890faSShreyas Bhatewara 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
195372e85c45SJesse Gross 	u16 vid;
1954d1a890faSShreyas Bhatewara 
195572e85c45SJesse Gross 	/* allow untagged pkts */
1956d1a890faSShreyas Bhatewara 	VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
195772e85c45SJesse Gross 
195872e85c45SJesse Gross 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
195972e85c45SJesse Gross 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1960d1a890faSShreyas Bhatewara }
1961d1a890faSShreyas Bhatewara 
1962d1a890faSShreyas Bhatewara 
19638e586137SJiri Pirko static int
196480d5c368SPatrick McHardy vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1965d1a890faSShreyas Bhatewara {
1966d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1967f6957f88SJesse Gross 
1968f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
1969d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
197083d0feffSShreyas Bhatewara 		unsigned long flags;
1971d1a890faSShreyas Bhatewara 
1972d1a890faSShreyas Bhatewara 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
197383d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
1974d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1975d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
197683d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1977f6957f88SJesse Gross 	}
197872e85c45SJesse Gross 
197972e85c45SJesse Gross 	set_bit(vid, adapter->active_vlans);
19808e586137SJiri Pirko 
19818e586137SJiri Pirko 	return 0;
1982d1a890faSShreyas Bhatewara }
1983d1a890faSShreyas Bhatewara 
1984d1a890faSShreyas Bhatewara 
19858e586137SJiri Pirko static int
198680d5c368SPatrick McHardy vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1987d1a890faSShreyas Bhatewara {
1988d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1989f6957f88SJesse Gross 
1990f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
1991d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
199283d0feffSShreyas Bhatewara 		unsigned long flags;
1993d1a890faSShreyas Bhatewara 
1994d1a890faSShreyas Bhatewara 		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
199583d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
1996d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1997d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
199883d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1999f6957f88SJesse Gross 	}
200072e85c45SJesse Gross 
200172e85c45SJesse Gross 	clear_bit(vid, adapter->active_vlans);
20028e586137SJiri Pirko 
20038e586137SJiri Pirko 	return 0;
2004d1a890faSShreyas Bhatewara }
2005d1a890faSShreyas Bhatewara 
2006d1a890faSShreyas Bhatewara 
2007d1a890faSShreyas Bhatewara static u8 *
2008d1a890faSShreyas Bhatewara vmxnet3_copy_mc(struct net_device *netdev)
2009d1a890faSShreyas Bhatewara {
2010d1a890faSShreyas Bhatewara 	u8 *buf = NULL;
20114cd24eafSJiri Pirko 	u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2012d1a890faSShreyas Bhatewara 
2013d1a890faSShreyas Bhatewara 	/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2014d1a890faSShreyas Bhatewara 	if (sz <= 0xffff) {
2015d1a890faSShreyas Bhatewara 		/* We may be called with BH disabled */
2016d1a890faSShreyas Bhatewara 		buf = kmalloc(sz, GFP_ATOMIC);
2017d1a890faSShreyas Bhatewara 		if (buf) {
201822bedad3SJiri Pirko 			struct netdev_hw_addr *ha;
2019567ec874SJiri Pirko 			int i = 0;
2020d1a890faSShreyas Bhatewara 
202122bedad3SJiri Pirko 			netdev_for_each_mc_addr(ha, netdev)
202222bedad3SJiri Pirko 				memcpy(buf + i++ * ETH_ALEN, ha->addr,
2023d1a890faSShreyas Bhatewara 				       ETH_ALEN);
2024d1a890faSShreyas Bhatewara 		}
2025d1a890faSShreyas Bhatewara 	}
2026d1a890faSShreyas Bhatewara 	return buf;
2027d1a890faSShreyas Bhatewara }
2028d1a890faSShreyas Bhatewara 
2029d1a890faSShreyas Bhatewara 
2030d1a890faSShreyas Bhatewara static void
2031d1a890faSShreyas Bhatewara vmxnet3_set_mc(struct net_device *netdev)
2032d1a890faSShreyas Bhatewara {
2033d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
203483d0feffSShreyas Bhatewara 	unsigned long flags;
2035d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxFilterConf *rxConf =
2036d1a890faSShreyas Bhatewara 					&adapter->shared->devRead.rxFilterConf;
2037d1a890faSShreyas Bhatewara 	u8 *new_table = NULL;
2038b0eb57cbSAndy King 	dma_addr_t new_table_pa = 0;
2039d1a890faSShreyas Bhatewara 	u32 new_mode = VMXNET3_RXM_UCAST;
2040d1a890faSShreyas Bhatewara 
204172e85c45SJesse Gross 	if (netdev->flags & IFF_PROMISC) {
204272e85c45SJesse Gross 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
204372e85c45SJesse Gross 		memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
204472e85c45SJesse Gross 
2045d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_PROMISC;
204672e85c45SJesse Gross 	} else {
204772e85c45SJesse Gross 		vmxnet3_restore_vlan(adapter);
204872e85c45SJesse Gross 	}
2049d1a890faSShreyas Bhatewara 
2050d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_BROADCAST)
2051d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_BCAST;
2052d1a890faSShreyas Bhatewara 
2053d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_ALLMULTI)
2054d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_ALL_MULTI;
2055d1a890faSShreyas Bhatewara 	else
20564cd24eafSJiri Pirko 		if (!netdev_mc_empty(netdev)) {
2057d1a890faSShreyas Bhatewara 			new_table = vmxnet3_copy_mc(netdev);
2058d1a890faSShreyas Bhatewara 			if (new_table) {
2059115924b6SShreyas Bhatewara 				rxConf->mfTableLen = cpu_to_le16(
20604cd24eafSJiri Pirko 					netdev_mc_count(netdev) * ETH_ALEN);
2061b0eb57cbSAndy King 				new_table_pa = dma_map_single(
2062b0eb57cbSAndy King 							&adapter->pdev->dev,
2063b0eb57cbSAndy King 							new_table,
2064b0eb57cbSAndy King 							rxConf->mfTableLen,
2065b0eb57cbSAndy King 							PCI_DMA_TODEVICE);
20664ad9a64fSAndy King 			}
20674ad9a64fSAndy King 
20684ad9a64fSAndy King 			if (new_table_pa) {
20694ad9a64fSAndy King 				new_mode |= VMXNET3_RXM_MCAST;
2070b0eb57cbSAndy King 				rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2071d1a890faSShreyas Bhatewara 			} else {
20724ad9a64fSAndy King 				netdev_info(netdev,
20734ad9a64fSAndy King 					    "failed to copy mcast list, setting ALL_MULTI\n");
2074d1a890faSShreyas Bhatewara 				new_mode |= VMXNET3_RXM_ALL_MULTI;
2075d1a890faSShreyas Bhatewara 			}
2076d1a890faSShreyas Bhatewara 		}
2077d1a890faSShreyas Bhatewara 
2078d1a890faSShreyas Bhatewara 	if (!(new_mode & VMXNET3_RXM_MCAST)) {
2079d1a890faSShreyas Bhatewara 		rxConf->mfTableLen = 0;
2080d1a890faSShreyas Bhatewara 		rxConf->mfTablePA = 0;
2081d1a890faSShreyas Bhatewara 	}
2082d1a890faSShreyas Bhatewara 
208383d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2084d1a890faSShreyas Bhatewara 	if (new_mode != rxConf->rxMode) {
2085115924b6SShreyas Bhatewara 		rxConf->rxMode = cpu_to_le32(new_mode);
2086d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2087d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_RX_MODE);
208872e85c45SJesse Gross 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
208972e85c45SJesse Gross 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2090d1a890faSShreyas Bhatewara 	}
2091d1a890faSShreyas Bhatewara 
2092d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2093d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
209483d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2095d1a890faSShreyas Bhatewara 
20964ad9a64fSAndy King 	if (new_table_pa)
2097b0eb57cbSAndy King 		dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2098b0eb57cbSAndy King 				 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2099d1a890faSShreyas Bhatewara 	kfree(new_table);
2100d1a890faSShreyas Bhatewara }
2101d1a890faSShreyas Bhatewara 
210209c5088eSShreyas Bhatewara void
210309c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
210409c5088eSShreyas Bhatewara {
210509c5088eSShreyas Bhatewara 	int i;
210609c5088eSShreyas Bhatewara 
210709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
210809c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
210909c5088eSShreyas Bhatewara }
211009c5088eSShreyas Bhatewara 
2111d1a890faSShreyas Bhatewara 
2112d1a890faSShreyas Bhatewara /*
2113d1a890faSShreyas Bhatewara  *   Set up driver_shared based on settings in adapter.
2114d1a890faSShreyas Bhatewara  */
2115d1a890faSShreyas Bhatewara 
2116d1a890faSShreyas Bhatewara static void
2117d1a890faSShreyas Bhatewara vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2118d1a890faSShreyas Bhatewara {
2119d1a890faSShreyas Bhatewara 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2120d1a890faSShreyas Bhatewara 	struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2121d1a890faSShreyas Bhatewara 	struct Vmxnet3_TxQueueConf *tqc;
2122d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxQueueConf *rqc;
2123d1a890faSShreyas Bhatewara 	int i;
2124d1a890faSShreyas Bhatewara 
2125d1a890faSShreyas Bhatewara 	memset(shared, 0, sizeof(*shared));
2126d1a890faSShreyas Bhatewara 
2127d1a890faSShreyas Bhatewara 	/* driver settings */
2128115924b6SShreyas Bhatewara 	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2129115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.version = cpu_to_le32(
2130115924b6SShreyas Bhatewara 						VMXNET3_DRIVER_VERSION_NUM);
2131d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2132d1a890faSShreyas Bhatewara 				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2133d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2134115924b6SShreyas Bhatewara 	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2135115924b6SShreyas Bhatewara 				*((u32 *)&devRead->misc.driverInfo.gos));
2136115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2137115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2138d1a890faSShreyas Bhatewara 
2139b0eb57cbSAndy King 	devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2140115924b6SShreyas Bhatewara 	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2141d1a890faSShreyas Bhatewara 
2142d1a890faSShreyas Bhatewara 	/* set up feature flags */
2143a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_RXCSUM)
21443843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2145d1a890faSShreyas Bhatewara 
2146a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_LRO) {
21473843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_LRO;
2148115924b6SShreyas Bhatewara 		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2149d1a890faSShreyas Bhatewara 	}
2150f646968fSPatrick McHardy 	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
21513843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2152d1a890faSShreyas Bhatewara 
2153115924b6SShreyas Bhatewara 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2154115924b6SShreyas Bhatewara 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2155115924b6SShreyas Bhatewara 	devRead->misc.queueDescLen = cpu_to_le32(
215609c5088eSShreyas Bhatewara 		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
215709c5088eSShreyas Bhatewara 		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2158d1a890faSShreyas Bhatewara 
2159d1a890faSShreyas Bhatewara 	/* tx queue settings */
216009c5088eSShreyas Bhatewara 	devRead->misc.numTxQueues =  adapter->num_tx_queues;
216109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
216209c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
216309c5088eSShreyas Bhatewara 		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
216409c5088eSShreyas Bhatewara 		tqc = &adapter->tqd_start[i].conf;
216509c5088eSShreyas Bhatewara 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
216609c5088eSShreyas Bhatewara 		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
216709c5088eSShreyas Bhatewara 		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2168b0eb57cbSAndy King 		tqc->ddPA           = cpu_to_le64(tq->buf_info_pa);
216909c5088eSShreyas Bhatewara 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
217009c5088eSShreyas Bhatewara 		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
217109c5088eSShreyas Bhatewara 		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
217209c5088eSShreyas Bhatewara 		tqc->ddLen          = cpu_to_le32(
217309c5088eSShreyas Bhatewara 					sizeof(struct vmxnet3_tx_buf_info) *
2174115924b6SShreyas Bhatewara 					tqc->txRingSize);
217509c5088eSShreyas Bhatewara 		tqc->intrIdx        = tq->comp_ring.intr_idx;
217609c5088eSShreyas Bhatewara 	}
2177d1a890faSShreyas Bhatewara 
2178d1a890faSShreyas Bhatewara 	/* rx queue settings */
217909c5088eSShreyas Bhatewara 	devRead->misc.numRxQueues = adapter->num_rx_queues;
218009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
218109c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
218209c5088eSShreyas Bhatewara 		rqc = &adapter->rqd_start[i].conf;
218309c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
218409c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
218509c5088eSShreyas Bhatewara 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2186b0eb57cbSAndy King 		rqc->ddPA            = cpu_to_le64(rq->buf_info_pa);
218709c5088eSShreyas Bhatewara 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
218809c5088eSShreyas Bhatewara 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
218909c5088eSShreyas Bhatewara 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
219009c5088eSShreyas Bhatewara 		rqc->ddLen           = cpu_to_le32(
219109c5088eSShreyas Bhatewara 					sizeof(struct vmxnet3_rx_buf_info) *
219209c5088eSShreyas Bhatewara 					(rqc->rxRingSize[0] +
219309c5088eSShreyas Bhatewara 					 rqc->rxRingSize[1]));
219409c5088eSShreyas Bhatewara 		rqc->intrIdx         = rq->comp_ring.intr_idx;
219509c5088eSShreyas Bhatewara 	}
219609c5088eSShreyas Bhatewara 
219709c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
219809c5088eSShreyas Bhatewara 	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
219909c5088eSShreyas Bhatewara 
220009c5088eSShreyas Bhatewara 	if (adapter->rss) {
220109c5088eSShreyas Bhatewara 		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
220266d35910SStephen Hemminger 		static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
220366d35910SStephen Hemminger 			0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
220466d35910SStephen Hemminger 			0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
220566d35910SStephen Hemminger 			0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
220666d35910SStephen Hemminger 			0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
220766d35910SStephen Hemminger 			0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
220866d35910SStephen Hemminger 		};
220966d35910SStephen Hemminger 
221009c5088eSShreyas Bhatewara 		devRead->misc.uptFeatures |= UPT1_F_RSS;
221109c5088eSShreyas Bhatewara 		devRead->misc.numRxQueues = adapter->num_rx_queues;
221209c5088eSShreyas Bhatewara 		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
221309c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV4 |
221409c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
221509c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV6;
221609c5088eSShreyas Bhatewara 		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
221709c5088eSShreyas Bhatewara 		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
221809c5088eSShreyas Bhatewara 		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
221966d35910SStephen Hemminger 		memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
222066d35910SStephen Hemminger 
222109c5088eSShreyas Bhatewara 		for (i = 0; i < rssConf->indTableSize; i++)
2222278bc429SBen Hutchings 			rssConf->indTable[i] = ethtool_rxfh_indir_default(
2223278bc429SBen Hutchings 				i, adapter->num_rx_queues);
222409c5088eSShreyas Bhatewara 
222509c5088eSShreyas Bhatewara 		devRead->rssConfDesc.confVer = 1;
2226b0eb57cbSAndy King 		devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2227b0eb57cbSAndy King 		devRead->rssConfDesc.confPA =
2228b0eb57cbSAndy King 			cpu_to_le64(adapter->rss_conf_pa);
222909c5088eSShreyas Bhatewara 	}
223009c5088eSShreyas Bhatewara 
223109c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
2232d1a890faSShreyas Bhatewara 
2233d1a890faSShreyas Bhatewara 	/* intr settings */
2234d1a890faSShreyas Bhatewara 	devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2235d1a890faSShreyas Bhatewara 				     VMXNET3_IMM_AUTO;
2236d1a890faSShreyas Bhatewara 	devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2237d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
2238d1a890faSShreyas Bhatewara 		devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2239d1a890faSShreyas Bhatewara 
2240d1a890faSShreyas Bhatewara 	devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
22416929fe8aSRonghua Zang 	devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2242d1a890faSShreyas Bhatewara 
2243d1a890faSShreyas Bhatewara 	/* rx filter settings */
2244d1a890faSShreyas Bhatewara 	devRead->rxFilterConf.rxMode = 0;
2245d1a890faSShreyas Bhatewara 	vmxnet3_restore_vlan(adapter);
2246f9f25026SShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2247f9f25026SShreyas Bhatewara 
2248d1a890faSShreyas Bhatewara 	/* the rest are already zeroed */
2249d1a890faSShreyas Bhatewara }
2250d1a890faSShreyas Bhatewara 
2251d1a890faSShreyas Bhatewara 
2252d1a890faSShreyas Bhatewara int
2253d1a890faSShreyas Bhatewara vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2254d1a890faSShreyas Bhatewara {
225509c5088eSShreyas Bhatewara 	int err, i;
2256d1a890faSShreyas Bhatewara 	u32 ret;
225783d0feffSShreyas Bhatewara 	unsigned long flags;
2258d1a890faSShreyas Bhatewara 
2259fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
226009c5088eSShreyas Bhatewara 		" ring sizes %u %u %u\n", adapter->netdev->name,
226109c5088eSShreyas Bhatewara 		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
226209c5088eSShreyas Bhatewara 		adapter->tx_queue[0].tx_ring.size,
226309c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[0].size,
226409c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[1].size);
2265d1a890faSShreyas Bhatewara 
226609c5088eSShreyas Bhatewara 	vmxnet3_tq_init_all(adapter);
226709c5088eSShreyas Bhatewara 	err = vmxnet3_rq_init_all(adapter);
2268d1a890faSShreyas Bhatewara 	if (err) {
2269204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2270204a6e65SStephen Hemminger 			   "Failed to init rx queue error %d\n", err);
2271d1a890faSShreyas Bhatewara 		goto rq_err;
2272d1a890faSShreyas Bhatewara 	}
2273d1a890faSShreyas Bhatewara 
2274d1a890faSShreyas Bhatewara 	err = vmxnet3_request_irqs(adapter);
2275d1a890faSShreyas Bhatewara 	if (err) {
2276204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2277204a6e65SStephen Hemminger 			   "Failed to setup irq for error %d\n", err);
2278d1a890faSShreyas Bhatewara 		goto irq_err;
2279d1a890faSShreyas Bhatewara 	}
2280d1a890faSShreyas Bhatewara 
2281d1a890faSShreyas Bhatewara 	vmxnet3_setup_driver_shared(adapter);
2282d1a890faSShreyas Bhatewara 
2283115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2284115924b6SShreyas Bhatewara 			       adapter->shared_pa));
2285115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2286115924b6SShreyas Bhatewara 			       adapter->shared_pa));
228783d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2288d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2289d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_ACTIVATE_DEV);
2290d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
229183d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2292d1a890faSShreyas Bhatewara 
2293d1a890faSShreyas Bhatewara 	if (ret != 0) {
2294204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2295204a6e65SStephen Hemminger 			   "Failed to activate dev: error %u\n", ret);
2296d1a890faSShreyas Bhatewara 		err = -EINVAL;
2297d1a890faSShreyas Bhatewara 		goto activate_err;
2298d1a890faSShreyas Bhatewara 	}
229909c5088eSShreyas Bhatewara 
230009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
230109c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
230209c5088eSShreyas Bhatewara 				VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
230309c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[0].next2fill);
230409c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
230509c5088eSShreyas Bhatewara 				(i * VMXNET3_REG_ALIGN)),
230609c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[1].next2fill);
230709c5088eSShreyas Bhatewara 	}
2308d1a890faSShreyas Bhatewara 
2309d1a890faSShreyas Bhatewara 	/* Apply the rx filter settins last. */
2310d1a890faSShreyas Bhatewara 	vmxnet3_set_mc(adapter->netdev);
2311d1a890faSShreyas Bhatewara 
2312d1a890faSShreyas Bhatewara 	/*
2313d1a890faSShreyas Bhatewara 	 * Check link state when first activating device. It will start the
2314d1a890faSShreyas Bhatewara 	 * tx queue if the link is up.
2315d1a890faSShreyas Bhatewara 	 */
23164a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, true);
231709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
231809c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
2319d1a890faSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
2320d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2321d1a890faSShreyas Bhatewara 	return 0;
2322d1a890faSShreyas Bhatewara 
2323d1a890faSShreyas Bhatewara activate_err:
2324d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2325d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2326d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2327d1a890faSShreyas Bhatewara irq_err:
2328d1a890faSShreyas Bhatewara rq_err:
2329d1a890faSShreyas Bhatewara 	/* free up buffers we allocated */
233009c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2331d1a890faSShreyas Bhatewara 	return err;
2332d1a890faSShreyas Bhatewara }
2333d1a890faSShreyas Bhatewara 
2334d1a890faSShreyas Bhatewara 
2335d1a890faSShreyas Bhatewara void
2336d1a890faSShreyas Bhatewara vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2337d1a890faSShreyas Bhatewara {
233883d0feffSShreyas Bhatewara 	unsigned long flags;
233983d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2340d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
234183d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2342d1a890faSShreyas Bhatewara }
2343d1a890faSShreyas Bhatewara 
2344d1a890faSShreyas Bhatewara 
2345d1a890faSShreyas Bhatewara int
2346d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2347d1a890faSShreyas Bhatewara {
234809c5088eSShreyas Bhatewara 	int i;
234983d0feffSShreyas Bhatewara 	unsigned long flags;
2350d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2351d1a890faSShreyas Bhatewara 		return 0;
2352d1a890faSShreyas Bhatewara 
2353d1a890faSShreyas Bhatewara 
235483d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2355d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2356d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
235783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2358d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
2359d1a890faSShreyas Bhatewara 
236009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
236109c5088eSShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
2362d1a890faSShreyas Bhatewara 	netif_tx_disable(adapter->netdev);
2363d1a890faSShreyas Bhatewara 	adapter->link_speed = 0;
2364d1a890faSShreyas Bhatewara 	netif_carrier_off(adapter->netdev);
2365d1a890faSShreyas Bhatewara 
236609c5088eSShreyas Bhatewara 	vmxnet3_tq_cleanup_all(adapter);
236709c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2368d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2369d1a890faSShreyas Bhatewara 	return 0;
2370d1a890faSShreyas Bhatewara }
2371d1a890faSShreyas Bhatewara 
2372d1a890faSShreyas Bhatewara 
2373d1a890faSShreyas Bhatewara static void
2374d1a890faSShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2375d1a890faSShreyas Bhatewara {
2376d1a890faSShreyas Bhatewara 	u32 tmp;
2377d1a890faSShreyas Bhatewara 
2378d1a890faSShreyas Bhatewara 	tmp = *(u32 *)mac;
2379d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2380d1a890faSShreyas Bhatewara 
2381d1a890faSShreyas Bhatewara 	tmp = (mac[5] << 8) | mac[4];
2382d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2383d1a890faSShreyas Bhatewara }
2384d1a890faSShreyas Bhatewara 
2385d1a890faSShreyas Bhatewara 
2386d1a890faSShreyas Bhatewara static int
2387d1a890faSShreyas Bhatewara vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2388d1a890faSShreyas Bhatewara {
2389d1a890faSShreyas Bhatewara 	struct sockaddr *addr = p;
2390d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2391d1a890faSShreyas Bhatewara 
2392d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2393d1a890faSShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, addr->sa_data);
2394d1a890faSShreyas Bhatewara 
2395d1a890faSShreyas Bhatewara 	return 0;
2396d1a890faSShreyas Bhatewara }
2397d1a890faSShreyas Bhatewara 
2398d1a890faSShreyas Bhatewara 
2399d1a890faSShreyas Bhatewara /* ==================== initialization and cleanup routines ============ */
2400d1a890faSShreyas Bhatewara 
2401d1a890faSShreyas Bhatewara static int
2402d1a890faSShreyas Bhatewara vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2403d1a890faSShreyas Bhatewara {
2404d1a890faSShreyas Bhatewara 	int err;
2405d1a890faSShreyas Bhatewara 	unsigned long mmio_start, mmio_len;
2406d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = adapter->pdev;
2407d1a890faSShreyas Bhatewara 
2408d1a890faSShreyas Bhatewara 	err = pci_enable_device(pdev);
2409d1a890faSShreyas Bhatewara 	if (err) {
2410204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2411d1a890faSShreyas Bhatewara 		return err;
2412d1a890faSShreyas Bhatewara 	}
2413d1a890faSShreyas Bhatewara 
2414d1a890faSShreyas Bhatewara 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2415d1a890faSShreyas Bhatewara 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2416204a6e65SStephen Hemminger 			dev_err(&pdev->dev,
2417204a6e65SStephen Hemminger 				"pci_set_consistent_dma_mask failed\n");
2418d1a890faSShreyas Bhatewara 			err = -EIO;
2419d1a890faSShreyas Bhatewara 			goto err_set_mask;
2420d1a890faSShreyas Bhatewara 		}
2421d1a890faSShreyas Bhatewara 		*dma64 = true;
2422d1a890faSShreyas Bhatewara 	} else {
2423d1a890faSShreyas Bhatewara 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2424204a6e65SStephen Hemminger 			dev_err(&pdev->dev,
2425204a6e65SStephen Hemminger 				"pci_set_dma_mask failed\n");
2426d1a890faSShreyas Bhatewara 			err = -EIO;
2427d1a890faSShreyas Bhatewara 			goto err_set_mask;
2428d1a890faSShreyas Bhatewara 		}
2429d1a890faSShreyas Bhatewara 		*dma64 = false;
2430d1a890faSShreyas Bhatewara 	}
2431d1a890faSShreyas Bhatewara 
2432d1a890faSShreyas Bhatewara 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2433d1a890faSShreyas Bhatewara 					   vmxnet3_driver_name);
2434d1a890faSShreyas Bhatewara 	if (err) {
2435204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
2436204a6e65SStephen Hemminger 			"Failed to request region for adapter: error %d\n", err);
2437d1a890faSShreyas Bhatewara 		goto err_set_mask;
2438d1a890faSShreyas Bhatewara 	}
2439d1a890faSShreyas Bhatewara 
2440d1a890faSShreyas Bhatewara 	pci_set_master(pdev);
2441d1a890faSShreyas Bhatewara 
2442d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 0);
2443d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 0);
2444d1a890faSShreyas Bhatewara 	adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2445d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr0) {
2446204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar0\n");
2447d1a890faSShreyas Bhatewara 		err = -EIO;
2448d1a890faSShreyas Bhatewara 		goto err_ioremap;
2449d1a890faSShreyas Bhatewara 	}
2450d1a890faSShreyas Bhatewara 
2451d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 1);
2452d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 1);
2453d1a890faSShreyas Bhatewara 	adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2454d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr1) {
2455204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar1\n");
2456d1a890faSShreyas Bhatewara 		err = -EIO;
2457d1a890faSShreyas Bhatewara 		goto err_bar1;
2458d1a890faSShreyas Bhatewara 	}
2459d1a890faSShreyas Bhatewara 	return 0;
2460d1a890faSShreyas Bhatewara 
2461d1a890faSShreyas Bhatewara err_bar1:
2462d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2463d1a890faSShreyas Bhatewara err_ioremap:
2464d1a890faSShreyas Bhatewara 	pci_release_selected_regions(pdev, (1 << 2) - 1);
2465d1a890faSShreyas Bhatewara err_set_mask:
2466d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
2467d1a890faSShreyas Bhatewara 	return err;
2468d1a890faSShreyas Bhatewara }
2469d1a890faSShreyas Bhatewara 
2470d1a890faSShreyas Bhatewara 
2471d1a890faSShreyas Bhatewara static void
2472d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2473d1a890faSShreyas Bhatewara {
2474d1a890faSShreyas Bhatewara 	BUG_ON(!adapter->pdev);
2475d1a890faSShreyas Bhatewara 
2476d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2477d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr1);
2478d1a890faSShreyas Bhatewara 	pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2479d1a890faSShreyas Bhatewara 	pci_disable_device(adapter->pdev);
2480d1a890faSShreyas Bhatewara }
2481d1a890faSShreyas Bhatewara 
2482d1a890faSShreyas Bhatewara 
2483d1a890faSShreyas Bhatewara static void
2484d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2485d1a890faSShreyas Bhatewara {
248609c5088eSShreyas Bhatewara 	size_t sz, i, ring0_size, ring1_size, comp_size;
248709c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[0];
248809c5088eSShreyas Bhatewara 
2489d1a890faSShreyas Bhatewara 
2490d1a890faSShreyas Bhatewara 	if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2491d1a890faSShreyas Bhatewara 				    VMXNET3_MAX_ETH_HDR_SIZE) {
2492d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = adapter->netdev->mtu +
2493d1a890faSShreyas Bhatewara 					VMXNET3_MAX_ETH_HDR_SIZE;
2494d1a890faSShreyas Bhatewara 		if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2495d1a890faSShreyas Bhatewara 			adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2496d1a890faSShreyas Bhatewara 
2497d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1;
2498d1a890faSShreyas Bhatewara 	} else {
2499d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2500d1a890faSShreyas Bhatewara 		sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2501d1a890faSShreyas Bhatewara 					    VMXNET3_MAX_ETH_HDR_SIZE;
2502d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2503d1a890faSShreyas Bhatewara 	}
2504d1a890faSShreyas Bhatewara 
2505d1a890faSShreyas Bhatewara 	/*
2506d1a890faSShreyas Bhatewara 	 * for simplicity, force the ring0 size to be a multiple of
2507d1a890faSShreyas Bhatewara 	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2508d1a890faSShreyas Bhatewara 	 */
2509d1a890faSShreyas Bhatewara 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
251009c5088eSShreyas Bhatewara 	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
251109c5088eSShreyas Bhatewara 	ring0_size = (ring0_size + sz - 1) / sz * sz;
2512a53255d3SShreyas Bhatewara 	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
251309c5088eSShreyas Bhatewara 			   sz * sz);
251409c5088eSShreyas Bhatewara 	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
251509c5088eSShreyas Bhatewara 	comp_size = ring0_size + ring1_size;
251609c5088eSShreyas Bhatewara 
251709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
251809c5088eSShreyas Bhatewara 		rq = &adapter->rx_queue[i];
251909c5088eSShreyas Bhatewara 		rq->rx_ring[0].size = ring0_size;
252009c5088eSShreyas Bhatewara 		rq->rx_ring[1].size = ring1_size;
252109c5088eSShreyas Bhatewara 		rq->comp_ring.size = comp_size;
252209c5088eSShreyas Bhatewara 	}
2523d1a890faSShreyas Bhatewara }
2524d1a890faSShreyas Bhatewara 
2525d1a890faSShreyas Bhatewara 
2526d1a890faSShreyas Bhatewara int
2527d1a890faSShreyas Bhatewara vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2528d1a890faSShreyas Bhatewara 		      u32 rx_ring_size, u32 rx_ring2_size)
2529d1a890faSShreyas Bhatewara {
253009c5088eSShreyas Bhatewara 	int err = 0, i;
2531d1a890faSShreyas Bhatewara 
253209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
253309c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
253409c5088eSShreyas Bhatewara 		tq->tx_ring.size   = tx_ring_size;
253509c5088eSShreyas Bhatewara 		tq->data_ring.size = tx_ring_size;
253609c5088eSShreyas Bhatewara 		tq->comp_ring.size = tx_ring_size;
253709c5088eSShreyas Bhatewara 		tq->shared = &adapter->tqd_start[i].ctrl;
253809c5088eSShreyas Bhatewara 		tq->stopped = true;
253909c5088eSShreyas Bhatewara 		tq->adapter = adapter;
254009c5088eSShreyas Bhatewara 		tq->qid = i;
254109c5088eSShreyas Bhatewara 		err = vmxnet3_tq_create(tq, adapter);
254209c5088eSShreyas Bhatewara 		/*
254309c5088eSShreyas Bhatewara 		 * Too late to change num_tx_queues. We cannot do away with
254409c5088eSShreyas Bhatewara 		 * lesser number of queues than what we asked for
254509c5088eSShreyas Bhatewara 		 */
2546d1a890faSShreyas Bhatewara 		if (err)
254709c5088eSShreyas Bhatewara 			goto queue_err;
254809c5088eSShreyas Bhatewara 	}
2549d1a890faSShreyas Bhatewara 
255009c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
255109c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2552d1a890faSShreyas Bhatewara 	vmxnet3_adjust_rx_ring_size(adapter);
255309c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
255409c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
255509c5088eSShreyas Bhatewara 		/* qid and qid2 for rx queues will be assigned later when num
255609c5088eSShreyas Bhatewara 		 * of rx queues is finalized after allocating intrs */
255709c5088eSShreyas Bhatewara 		rq->shared = &adapter->rqd_start[i].ctrl;
255809c5088eSShreyas Bhatewara 		rq->adapter = adapter;
255909c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(rq, adapter);
256009c5088eSShreyas Bhatewara 		if (err) {
256109c5088eSShreyas Bhatewara 			if (i == 0) {
2562204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2563204a6e65SStephen Hemminger 					   "Could not allocate any rx queues. "
2564204a6e65SStephen Hemminger 					   "Aborting.\n");
256509c5088eSShreyas Bhatewara 				goto queue_err;
256609c5088eSShreyas Bhatewara 			} else {
2567204a6e65SStephen Hemminger 				netdev_info(adapter->netdev,
2568204a6e65SStephen Hemminger 					    "Number of rx queues changed "
256909c5088eSShreyas Bhatewara 					    "to : %d.\n", i);
257009c5088eSShreyas Bhatewara 				adapter->num_rx_queues = i;
257109c5088eSShreyas Bhatewara 				err = 0;
257209c5088eSShreyas Bhatewara 				break;
257309c5088eSShreyas Bhatewara 			}
257409c5088eSShreyas Bhatewara 		}
257509c5088eSShreyas Bhatewara 	}
257609c5088eSShreyas Bhatewara 	return err;
257709c5088eSShreyas Bhatewara queue_err:
257809c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2579d1a890faSShreyas Bhatewara 	return err;
2580d1a890faSShreyas Bhatewara }
2581d1a890faSShreyas Bhatewara 
2582d1a890faSShreyas Bhatewara static int
2583d1a890faSShreyas Bhatewara vmxnet3_open(struct net_device *netdev)
2584d1a890faSShreyas Bhatewara {
2585d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
258609c5088eSShreyas Bhatewara 	int err, i;
2587d1a890faSShreyas Bhatewara 
2588d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
2589d1a890faSShreyas Bhatewara 
259009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
259109c5088eSShreyas Bhatewara 		spin_lock_init(&adapter->tx_queue[i].tx_lock);
2592d1a890faSShreyas Bhatewara 
2593f00e2b0aSNeil Horman 	err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
2594f00e2b0aSNeil Horman 				    adapter->rx_ring_size,
2595d1a890faSShreyas Bhatewara 				    VMXNET3_DEF_RX_RING_SIZE);
2596d1a890faSShreyas Bhatewara 	if (err)
2597d1a890faSShreyas Bhatewara 		goto queue_err;
2598d1a890faSShreyas Bhatewara 
2599d1a890faSShreyas Bhatewara 	err = vmxnet3_activate_dev(adapter);
2600d1a890faSShreyas Bhatewara 	if (err)
2601d1a890faSShreyas Bhatewara 		goto activate_err;
2602d1a890faSShreyas Bhatewara 
2603d1a890faSShreyas Bhatewara 	return 0;
2604d1a890faSShreyas Bhatewara 
2605d1a890faSShreyas Bhatewara activate_err:
260609c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
260709c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2608d1a890faSShreyas Bhatewara queue_err:
2609d1a890faSShreyas Bhatewara 	return err;
2610d1a890faSShreyas Bhatewara }
2611d1a890faSShreyas Bhatewara 
2612d1a890faSShreyas Bhatewara 
2613d1a890faSShreyas Bhatewara static int
2614d1a890faSShreyas Bhatewara vmxnet3_close(struct net_device *netdev)
2615d1a890faSShreyas Bhatewara {
2616d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2617d1a890faSShreyas Bhatewara 
2618d1a890faSShreyas Bhatewara 	/*
2619d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
2620d1a890faSShreyas Bhatewara 	 * completion.
2621d1a890faSShreyas Bhatewara 	 */
2622d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2623d1a890faSShreyas Bhatewara 		msleep(1);
2624d1a890faSShreyas Bhatewara 
2625d1a890faSShreyas Bhatewara 	vmxnet3_quiesce_dev(adapter);
2626d1a890faSShreyas Bhatewara 
262709c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
262809c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2629d1a890faSShreyas Bhatewara 
2630d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2631d1a890faSShreyas Bhatewara 
2632d1a890faSShreyas Bhatewara 
2633d1a890faSShreyas Bhatewara 	return 0;
2634d1a890faSShreyas Bhatewara }
2635d1a890faSShreyas Bhatewara 
2636d1a890faSShreyas Bhatewara 
2637d1a890faSShreyas Bhatewara void
2638d1a890faSShreyas Bhatewara vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2639d1a890faSShreyas Bhatewara {
264009c5088eSShreyas Bhatewara 	int i;
264109c5088eSShreyas Bhatewara 
2642d1a890faSShreyas Bhatewara 	/*
2643d1a890faSShreyas Bhatewara 	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2644d1a890faSShreyas Bhatewara 	 * vmxnet3_close() will deadlock.
2645d1a890faSShreyas Bhatewara 	 */
2646d1a890faSShreyas Bhatewara 	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2647d1a890faSShreyas Bhatewara 
2648d1a890faSShreyas Bhatewara 	/* we need to enable NAPI, otherwise dev_close will deadlock */
264909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
265009c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
2651d1a890faSShreyas Bhatewara 	dev_close(adapter->netdev);
2652d1a890faSShreyas Bhatewara }
2653d1a890faSShreyas Bhatewara 
2654d1a890faSShreyas Bhatewara 
2655d1a890faSShreyas Bhatewara static int
2656d1a890faSShreyas Bhatewara vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2657d1a890faSShreyas Bhatewara {
2658d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2659d1a890faSShreyas Bhatewara 	int err = 0;
2660d1a890faSShreyas Bhatewara 
2661d1a890faSShreyas Bhatewara 	if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2662d1a890faSShreyas Bhatewara 		return -EINVAL;
2663d1a890faSShreyas Bhatewara 
2664d1a890faSShreyas Bhatewara 	netdev->mtu = new_mtu;
2665d1a890faSShreyas Bhatewara 
2666d1a890faSShreyas Bhatewara 	/*
2667d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
2668d1a890faSShreyas Bhatewara 	 * completion.
2669d1a890faSShreyas Bhatewara 	 */
2670d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2671d1a890faSShreyas Bhatewara 		msleep(1);
2672d1a890faSShreyas Bhatewara 
2673d1a890faSShreyas Bhatewara 	if (netif_running(netdev)) {
2674d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
2675d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
2676d1a890faSShreyas Bhatewara 
2677d1a890faSShreyas Bhatewara 		/* we need to re-create the rx queue based on the new mtu */
267809c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy_all(adapter);
2679d1a890faSShreyas Bhatewara 		vmxnet3_adjust_rx_ring_size(adapter);
268009c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create_all(adapter);
2681d1a890faSShreyas Bhatewara 		if (err) {
2682204a6e65SStephen Hemminger 			netdev_err(netdev,
2683204a6e65SStephen Hemminger 				   "failed to re-create rx queues, "
2684204a6e65SStephen Hemminger 				   " error %d. Closing it.\n", err);
2685d1a890faSShreyas Bhatewara 			goto out;
2686d1a890faSShreyas Bhatewara 		}
2687d1a890faSShreyas Bhatewara 
2688d1a890faSShreyas Bhatewara 		err = vmxnet3_activate_dev(adapter);
2689d1a890faSShreyas Bhatewara 		if (err) {
2690204a6e65SStephen Hemminger 			netdev_err(netdev,
2691204a6e65SStephen Hemminger 				   "failed to re-activate, error %d. "
2692204a6e65SStephen Hemminger 				   "Closing it\n", err);
2693d1a890faSShreyas Bhatewara 			goto out;
2694d1a890faSShreyas Bhatewara 		}
2695d1a890faSShreyas Bhatewara 	}
2696d1a890faSShreyas Bhatewara 
2697d1a890faSShreyas Bhatewara out:
2698d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2699d1a890faSShreyas Bhatewara 	if (err)
2700d1a890faSShreyas Bhatewara 		vmxnet3_force_close(adapter);
2701d1a890faSShreyas Bhatewara 
2702d1a890faSShreyas Bhatewara 	return err;
2703d1a890faSShreyas Bhatewara }
2704d1a890faSShreyas Bhatewara 
2705d1a890faSShreyas Bhatewara 
2706d1a890faSShreyas Bhatewara static void
2707d1a890faSShreyas Bhatewara vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2708d1a890faSShreyas Bhatewara {
2709d1a890faSShreyas Bhatewara 	struct net_device *netdev = adapter->netdev;
2710d1a890faSShreyas Bhatewara 
2711a0d2730cSMichał Mirosław 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2712f646968fSPatrick McHardy 		NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2713f646968fSPatrick McHardy 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
271472e85c45SJesse Gross 		NETIF_F_LRO;
2715a0d2730cSMichał Mirosław 	if (dma64)
2716ebbf9295SShreyas Bhatewara 		netdev->hw_features |= NETIF_F_HIGHDMA;
271772e85c45SJesse Gross 	netdev->vlan_features = netdev->hw_features &
2718f646968fSPatrick McHardy 				~(NETIF_F_HW_VLAN_CTAG_TX |
2719f646968fSPatrick McHardy 				  NETIF_F_HW_VLAN_CTAG_RX);
2720f646968fSPatrick McHardy 	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
2721d1a890faSShreyas Bhatewara }
2722d1a890faSShreyas Bhatewara 
2723d1a890faSShreyas Bhatewara 
2724d1a890faSShreyas Bhatewara static void
2725d1a890faSShreyas Bhatewara vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2726d1a890faSShreyas Bhatewara {
2727d1a890faSShreyas Bhatewara 	u32 tmp;
2728d1a890faSShreyas Bhatewara 
2729d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2730d1a890faSShreyas Bhatewara 	*(u32 *)mac = tmp;
2731d1a890faSShreyas Bhatewara 
2732d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2733d1a890faSShreyas Bhatewara 	mac[4] = tmp & 0xff;
2734d1a890faSShreyas Bhatewara 	mac[5] = (tmp >> 8) & 0xff;
2735d1a890faSShreyas Bhatewara }
2736d1a890faSShreyas Bhatewara 
273709c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
273809c5088eSShreyas Bhatewara 
273909c5088eSShreyas Bhatewara /*
274009c5088eSShreyas Bhatewara  * Enable MSIx vectors.
274109c5088eSShreyas Bhatewara  * Returns :
274225985edcSLucas De Marchi  *	VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2743b60b869dSAlexander Gordeev  *	 were enabled.
2744b60b869dSAlexander Gordeev  *	number of vectors which were enabled otherwise (this number is greater
274509c5088eSShreyas Bhatewara  *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
274609c5088eSShreyas Bhatewara  */
274709c5088eSShreyas Bhatewara 
274809c5088eSShreyas Bhatewara static int
2749b60b869dSAlexander Gordeev vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
275009c5088eSShreyas Bhatewara {
2751c0a1be38SAlexander Gordeev 	int ret = pci_enable_msix_range(adapter->pdev,
2752c0a1be38SAlexander Gordeev 					adapter->intr.msix_entries, nvec, nvec);
2753c0a1be38SAlexander Gordeev 
2754c0a1be38SAlexander Gordeev 	if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
27554bad25faSStephen Hemminger 		dev_err(&adapter->netdev->dev,
2756b60b869dSAlexander Gordeev 			"Failed to enable %d MSI-X, trying %d\n",
2757b60b869dSAlexander Gordeev 			nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
275809c5088eSShreyas Bhatewara 
2759c0a1be38SAlexander Gordeev 		ret = pci_enable_msix_range(adapter->pdev,
2760c0a1be38SAlexander Gordeev 					    adapter->intr.msix_entries,
2761c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT,
2762c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT);
2763c0a1be38SAlexander Gordeev 	}
2764c0a1be38SAlexander Gordeev 
2765c0a1be38SAlexander Gordeev 	if (ret < 0) {
2766c0a1be38SAlexander Gordeev 		dev_err(&adapter->netdev->dev,
2767c0a1be38SAlexander Gordeev 			"Failed to enable MSI-X, error: %d\n", ret);
2768c0a1be38SAlexander Gordeev 	}
2769c0a1be38SAlexander Gordeev 
2770c0a1be38SAlexander Gordeev 	return ret;
277109c5088eSShreyas Bhatewara }
277209c5088eSShreyas Bhatewara 
277309c5088eSShreyas Bhatewara 
277409c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
2775d1a890faSShreyas Bhatewara 
2776d1a890faSShreyas Bhatewara static void
2777d1a890faSShreyas Bhatewara vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2778d1a890faSShreyas Bhatewara {
2779d1a890faSShreyas Bhatewara 	u32 cfg;
2780e328d410SRoland Dreier 	unsigned long flags;
2781d1a890faSShreyas Bhatewara 
2782d1a890faSShreyas Bhatewara 	/* intr settings */
2783e328d410SRoland Dreier 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2784d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2785d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_GET_CONF_INTR);
2786d1a890faSShreyas Bhatewara 	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2787e328d410SRoland Dreier 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2788d1a890faSShreyas Bhatewara 	adapter->intr.type = cfg & 0x3;
2789d1a890faSShreyas Bhatewara 	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2790d1a890faSShreyas Bhatewara 
2791d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_AUTO) {
27920bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSIX;
27930bdc0d70SShreyas Bhatewara 	}
2794d1a890faSShreyas Bhatewara 
27958f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
27960bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
2797b60b869dSAlexander Gordeev 		int i, nvec;
27980bdc0d70SShreyas Bhatewara 
2799b60b869dSAlexander Gordeev 		nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
2800b60b869dSAlexander Gordeev 			1 : adapter->num_tx_queues;
2801b60b869dSAlexander Gordeev 		nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
2802b60b869dSAlexander Gordeev 			0 : adapter->num_rx_queues;
2803b60b869dSAlexander Gordeev 		nvec += 1;	/* for link event */
2804b60b869dSAlexander Gordeev 		nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
2805b60b869dSAlexander Gordeev 		       nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
280609c5088eSShreyas Bhatewara 
2807b60b869dSAlexander Gordeev 		for (i = 0; i < nvec; i++)
2808b60b869dSAlexander Gordeev 			adapter->intr.msix_entries[i].entry = i;
280909c5088eSShreyas Bhatewara 
2810b60b869dSAlexander Gordeev 		nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
2811b60b869dSAlexander Gordeev 		if (nvec < 0)
2812b60b869dSAlexander Gordeev 			goto msix_err;
281309c5088eSShreyas Bhatewara 
281409c5088eSShreyas Bhatewara 		/* If we cannot allocate one MSIx vector per queue
281509c5088eSShreyas Bhatewara 		 * then limit the number of rx queues to 1
281609c5088eSShreyas Bhatewara 		 */
2817b60b869dSAlexander Gordeev 		if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
281809c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
28197e96fbf2SShreyas Bhatewara 			    || adapter->num_rx_queues != 1) {
282009c5088eSShreyas Bhatewara 				adapter->share_intr = VMXNET3_INTR_TXSHARE;
2821204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2822204a6e65SStephen Hemminger 					   "Number of rx queues : 1\n");
282309c5088eSShreyas Bhatewara 				adapter->num_rx_queues = 1;
282409c5088eSShreyas Bhatewara 			}
2825d1a890faSShreyas Bhatewara 		}
2826b60b869dSAlexander Gordeev 
2827b60b869dSAlexander Gordeev 		adapter->intr.num_intrs = nvec;
282809c5088eSShreyas Bhatewara 		return;
282909c5088eSShreyas Bhatewara 
2830b60b869dSAlexander Gordeev msix_err:
283109c5088eSShreyas Bhatewara 		/* If we cannot allocate MSIx vectors use only one rx queue */
28324bad25faSStephen Hemminger 		dev_info(&adapter->pdev->dev,
28334bad25faSStephen Hemminger 			 "Failed to enable MSI-X, error %d. "
2834b60b869dSAlexander Gordeev 			 "Limiting #rx queues to 1, try MSI.\n", nvec);
283509c5088eSShreyas Bhatewara 
28360bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSI;
28370bdc0d70SShreyas Bhatewara 	}
2838d1a890faSShreyas Bhatewara 
28390bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSI) {
2840b60b869dSAlexander Gordeev 		if (!pci_enable_msi(adapter->pdev)) {
284109c5088eSShreyas Bhatewara 			adapter->num_rx_queues = 1;
2842d1a890faSShreyas Bhatewara 			adapter->intr.num_intrs = 1;
2843d1a890faSShreyas Bhatewara 			return;
2844d1a890faSShreyas Bhatewara 		}
2845d1a890faSShreyas Bhatewara 	}
28460bdc0d70SShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
2847d1a890faSShreyas Bhatewara 
284809c5088eSShreyas Bhatewara 	adapter->num_rx_queues = 1;
2849204a6e65SStephen Hemminger 	dev_info(&adapter->netdev->dev,
2850204a6e65SStephen Hemminger 		 "Using INTx interrupt, #Rx queues: 1.\n");
2851d1a890faSShreyas Bhatewara 	adapter->intr.type = VMXNET3_IT_INTX;
2852d1a890faSShreyas Bhatewara 
2853d1a890faSShreyas Bhatewara 	/* INT-X related setting */
2854d1a890faSShreyas Bhatewara 	adapter->intr.num_intrs = 1;
2855d1a890faSShreyas Bhatewara }
2856d1a890faSShreyas Bhatewara 
2857d1a890faSShreyas Bhatewara 
2858d1a890faSShreyas Bhatewara static void
2859d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2860d1a890faSShreyas Bhatewara {
2861d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX)
2862d1a890faSShreyas Bhatewara 		pci_disable_msix(adapter->pdev);
2863d1a890faSShreyas Bhatewara 	else if (adapter->intr.type == VMXNET3_IT_MSI)
2864d1a890faSShreyas Bhatewara 		pci_disable_msi(adapter->pdev);
2865d1a890faSShreyas Bhatewara 	else
2866d1a890faSShreyas Bhatewara 		BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2867d1a890faSShreyas Bhatewara }
2868d1a890faSShreyas Bhatewara 
2869d1a890faSShreyas Bhatewara 
2870d1a890faSShreyas Bhatewara static void
2871d1a890faSShreyas Bhatewara vmxnet3_tx_timeout(struct net_device *netdev)
2872d1a890faSShreyas Bhatewara {
2873d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2874d1a890faSShreyas Bhatewara 	adapter->tx_timeout_count++;
2875d1a890faSShreyas Bhatewara 
2876204a6e65SStephen Hemminger 	netdev_err(adapter->netdev, "tx hang\n");
2877d1a890faSShreyas Bhatewara 	schedule_work(&adapter->work);
287809c5088eSShreyas Bhatewara 	netif_wake_queue(adapter->netdev);
2879d1a890faSShreyas Bhatewara }
2880d1a890faSShreyas Bhatewara 
2881d1a890faSShreyas Bhatewara 
2882d1a890faSShreyas Bhatewara static void
2883d1a890faSShreyas Bhatewara vmxnet3_reset_work(struct work_struct *data)
2884d1a890faSShreyas Bhatewara {
2885d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
2886d1a890faSShreyas Bhatewara 
2887d1a890faSShreyas Bhatewara 	adapter = container_of(data, struct vmxnet3_adapter, work);
2888d1a890faSShreyas Bhatewara 
2889d1a890faSShreyas Bhatewara 	/* if another thread is resetting the device, no need to proceed */
2890d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2891d1a890faSShreyas Bhatewara 		return;
2892d1a890faSShreyas Bhatewara 
2893d1a890faSShreyas Bhatewara 	/* if the device is closed, we must leave it alone */
2894d9a5f210SShreyas Bhatewara 	rtnl_lock();
2895d1a890faSShreyas Bhatewara 	if (netif_running(adapter->netdev)) {
2896204a6e65SStephen Hemminger 		netdev_notice(adapter->netdev, "resetting\n");
2897d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
2898d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
2899d1a890faSShreyas Bhatewara 		vmxnet3_activate_dev(adapter);
2900d1a890faSShreyas Bhatewara 	} else {
2901204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "already closed\n");
2902d1a890faSShreyas Bhatewara 	}
2903d9a5f210SShreyas Bhatewara 	rtnl_unlock();
2904d1a890faSShreyas Bhatewara 
2905d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2906d1a890faSShreyas Bhatewara }
2907d1a890faSShreyas Bhatewara 
2908d1a890faSShreyas Bhatewara 
29093a4751a3SBill Pemberton static int
2910d1a890faSShreyas Bhatewara vmxnet3_probe_device(struct pci_dev *pdev,
2911d1a890faSShreyas Bhatewara 		     const struct pci_device_id *id)
2912d1a890faSShreyas Bhatewara {
2913d1a890faSShreyas Bhatewara 	static const struct net_device_ops vmxnet3_netdev_ops = {
2914d1a890faSShreyas Bhatewara 		.ndo_open = vmxnet3_open,
2915d1a890faSShreyas Bhatewara 		.ndo_stop = vmxnet3_close,
2916d1a890faSShreyas Bhatewara 		.ndo_start_xmit = vmxnet3_xmit_frame,
2917d1a890faSShreyas Bhatewara 		.ndo_set_mac_address = vmxnet3_set_mac_addr,
2918d1a890faSShreyas Bhatewara 		.ndo_change_mtu = vmxnet3_change_mtu,
2919a0d2730cSMichał Mirosław 		.ndo_set_features = vmxnet3_set_features,
292095305f6cSstephen hemminger 		.ndo_get_stats64 = vmxnet3_get_stats64,
2921d1a890faSShreyas Bhatewara 		.ndo_tx_timeout = vmxnet3_tx_timeout,
2922afc4b13dSJiri Pirko 		.ndo_set_rx_mode = vmxnet3_set_mc,
2923d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2924d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2925d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
2926d1a890faSShreyas Bhatewara 		.ndo_poll_controller = vmxnet3_netpoll,
2927d1a890faSShreyas Bhatewara #endif
2928d1a890faSShreyas Bhatewara 	};
2929d1a890faSShreyas Bhatewara 	int err;
2930d1a890faSShreyas Bhatewara 	bool dma64 = false; /* stupid gcc */
2931d1a890faSShreyas Bhatewara 	u32 ver;
2932d1a890faSShreyas Bhatewara 	struct net_device *netdev;
2933d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
2934d1a890faSShreyas Bhatewara 	u8 mac[ETH_ALEN];
293509c5088eSShreyas Bhatewara 	int size;
293609c5088eSShreyas Bhatewara 	int num_tx_queues;
293709c5088eSShreyas Bhatewara 	int num_rx_queues;
2938d1a890faSShreyas Bhatewara 
2939e154b639SShreyas Bhatewara 	if (!pci_msi_enabled())
2940e154b639SShreyas Bhatewara 		enable_mq = 0;
2941e154b639SShreyas Bhatewara 
294209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
294309c5088eSShreyas Bhatewara 	if (enable_mq)
294409c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
294509c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
294609c5088eSShreyas Bhatewara 	else
294709c5088eSShreyas Bhatewara #endif
294809c5088eSShreyas Bhatewara 		num_rx_queues = 1;
2949eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
295009c5088eSShreyas Bhatewara 
295109c5088eSShreyas Bhatewara 	if (enable_mq)
295209c5088eSShreyas Bhatewara 		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
295309c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
295409c5088eSShreyas Bhatewara 	else
295509c5088eSShreyas Bhatewara 		num_tx_queues = 1;
295609c5088eSShreyas Bhatewara 
2957eebb02b1SShreyas Bhatewara 	num_tx_queues = rounddown_pow_of_two(num_tx_queues);
295809c5088eSShreyas Bhatewara 	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
295909c5088eSShreyas Bhatewara 				   max(num_tx_queues, num_rx_queues));
2960204a6e65SStephen Hemminger 	dev_info(&pdev->dev,
2961204a6e65SStephen Hemminger 		 "# of Tx queues : %d, # of Rx queues : %d\n",
296209c5088eSShreyas Bhatewara 		 num_tx_queues, num_rx_queues);
296309c5088eSShreyas Bhatewara 
296441de8d4cSJoe Perches 	if (!netdev)
2965d1a890faSShreyas Bhatewara 		return -ENOMEM;
2966d1a890faSShreyas Bhatewara 
2967d1a890faSShreyas Bhatewara 	pci_set_drvdata(pdev, netdev);
2968d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
2969d1a890faSShreyas Bhatewara 	adapter->netdev = netdev;
2970d1a890faSShreyas Bhatewara 	adapter->pdev = pdev;
2971d1a890faSShreyas Bhatewara 
2972f00e2b0aSNeil Horman 	adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
2973f00e2b0aSNeil Horman 	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
2974f00e2b0aSNeil Horman 
297583d0feffSShreyas Bhatewara 	spin_lock_init(&adapter->cmd_lock);
2976b0eb57cbSAndy King 	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
2977b0eb57cbSAndy King 					     sizeof(struct vmxnet3_adapter),
2978b0eb57cbSAndy King 					     PCI_DMA_TODEVICE);
2979b0eb57cbSAndy King 	adapter->shared = dma_alloc_coherent(
2980b0eb57cbSAndy King 				&adapter->pdev->dev,
2981d1a890faSShreyas Bhatewara 				sizeof(struct Vmxnet3_DriverShared),
2982b0eb57cbSAndy King 				&adapter->shared_pa, GFP_KERNEL);
2983d1a890faSShreyas Bhatewara 	if (!adapter->shared) {
2984204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
2985d1a890faSShreyas Bhatewara 		err = -ENOMEM;
2986d1a890faSShreyas Bhatewara 		goto err_alloc_shared;
2987d1a890faSShreyas Bhatewara 	}
2988d1a890faSShreyas Bhatewara 
298909c5088eSShreyas Bhatewara 	adapter->num_rx_queues = num_rx_queues;
299009c5088eSShreyas Bhatewara 	adapter->num_tx_queues = num_tx_queues;
2991e4fabf2bSBhavesh Davda 	adapter->rx_buf_per_pkt = 1;
299209c5088eSShreyas Bhatewara 
299309c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
299409c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2995b0eb57cbSAndy King 	adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
2996b0eb57cbSAndy King 						&adapter->queue_desc_pa,
2997b0eb57cbSAndy King 						GFP_KERNEL);
2998d1a890faSShreyas Bhatewara 
2999d1a890faSShreyas Bhatewara 	if (!adapter->tqd_start) {
3000204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
3001d1a890faSShreyas Bhatewara 		err = -ENOMEM;
3002d1a890faSShreyas Bhatewara 		goto err_alloc_queue_desc;
3003d1a890faSShreyas Bhatewara 	}
300409c5088eSShreyas Bhatewara 	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
300509c5088eSShreyas Bhatewara 							    adapter->num_tx_queues);
3006d1a890faSShreyas Bhatewara 
3007b0eb57cbSAndy King 	adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3008b0eb57cbSAndy King 					      sizeof(struct Vmxnet3_PMConf),
3009b0eb57cbSAndy King 					      &adapter->pm_conf_pa,
3010b0eb57cbSAndy King 					      GFP_KERNEL);
3011d1a890faSShreyas Bhatewara 	if (adapter->pm_conf == NULL) {
3012d1a890faSShreyas Bhatewara 		err = -ENOMEM;
3013d1a890faSShreyas Bhatewara 		goto err_alloc_pm;
3014d1a890faSShreyas Bhatewara 	}
3015d1a890faSShreyas Bhatewara 
301609c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
301709c5088eSShreyas Bhatewara 
3018b0eb57cbSAndy King 	adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3019b0eb57cbSAndy King 					       sizeof(struct UPT1_RSSConf),
3020b0eb57cbSAndy King 					       &adapter->rss_conf_pa,
3021b0eb57cbSAndy King 					       GFP_KERNEL);
302209c5088eSShreyas Bhatewara 	if (adapter->rss_conf == NULL) {
302309c5088eSShreyas Bhatewara 		err = -ENOMEM;
302409c5088eSShreyas Bhatewara 		goto err_alloc_rss;
302509c5088eSShreyas Bhatewara 	}
302609c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
302709c5088eSShreyas Bhatewara 
3028d1a890faSShreyas Bhatewara 	err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3029d1a890faSShreyas Bhatewara 	if (err < 0)
3030d1a890faSShreyas Bhatewara 		goto err_alloc_pci;
3031d1a890faSShreyas Bhatewara 
3032d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3033d1a890faSShreyas Bhatewara 	if (ver & 1) {
3034d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
3035d1a890faSShreyas Bhatewara 	} else {
3036204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3037204a6e65SStephen Hemminger 			"Incompatible h/w version (0x%x) for adapter\n", ver);
3038d1a890faSShreyas Bhatewara 		err = -EBUSY;
3039d1a890faSShreyas Bhatewara 		goto err_ver;
3040d1a890faSShreyas Bhatewara 	}
3041d1a890faSShreyas Bhatewara 
3042d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3043d1a890faSShreyas Bhatewara 	if (ver & 1) {
3044d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3045d1a890faSShreyas Bhatewara 	} else {
3046204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3047204a6e65SStephen Hemminger 			"Incompatible upt version (0x%x) for adapter\n", ver);
3048d1a890faSShreyas Bhatewara 		err = -EBUSY;
3049d1a890faSShreyas Bhatewara 		goto err_ver;
3050d1a890faSShreyas Bhatewara 	}
3051d1a890faSShreyas Bhatewara 
3052e101e7ddSShreyas Bhatewara 	SET_NETDEV_DEV(netdev, &pdev->dev);
3053d1a890faSShreyas Bhatewara 	vmxnet3_declare_features(adapter, dma64);
3054d1a890faSShreyas Bhatewara 
30554db37a78SStephen Hemminger 	if (adapter->num_tx_queues == adapter->num_rx_queues)
30564db37a78SStephen Hemminger 		adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
30574db37a78SStephen Hemminger 	else
305809c5088eSShreyas Bhatewara 		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
305909c5088eSShreyas Bhatewara 
3060d1a890faSShreyas Bhatewara 	vmxnet3_alloc_intr_resources(adapter);
3061d1a890faSShreyas Bhatewara 
306209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
306309c5088eSShreyas Bhatewara 	if (adapter->num_rx_queues > 1 &&
306409c5088eSShreyas Bhatewara 	    adapter->intr.type == VMXNET3_IT_MSIX) {
306509c5088eSShreyas Bhatewara 		adapter->rss = true;
30667db11f75SStephen Hemminger 		netdev->hw_features |= NETIF_F_RXHASH;
30677db11f75SStephen Hemminger 		netdev->features |= NETIF_F_RXHASH;
3068204a6e65SStephen Hemminger 		dev_dbg(&pdev->dev, "RSS is enabled.\n");
306909c5088eSShreyas Bhatewara 	} else {
307009c5088eSShreyas Bhatewara 		adapter->rss = false;
307109c5088eSShreyas Bhatewara 	}
307209c5088eSShreyas Bhatewara #endif
307309c5088eSShreyas Bhatewara 
3074d1a890faSShreyas Bhatewara 	vmxnet3_read_mac_addr(adapter, mac);
3075d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr,  mac, netdev->addr_len);
3076d1a890faSShreyas Bhatewara 
3077d1a890faSShreyas Bhatewara 	netdev->netdev_ops = &vmxnet3_netdev_ops;
3078d1a890faSShreyas Bhatewara 	vmxnet3_set_ethtool_ops(netdev);
307909c5088eSShreyas Bhatewara 	netdev->watchdog_timeo = 5 * HZ;
3080d1a890faSShreyas Bhatewara 
3081d1a890faSShreyas Bhatewara 	INIT_WORK(&adapter->work, vmxnet3_reset_work);
3082e3bc4ffbSSteve Hodgson 	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3083d1a890faSShreyas Bhatewara 
308409c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
308509c5088eSShreyas Bhatewara 		int i;
308609c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
308709c5088eSShreyas Bhatewara 			netif_napi_add(adapter->netdev,
308809c5088eSShreyas Bhatewara 				       &adapter->rx_queue[i].napi,
308909c5088eSShreyas Bhatewara 				       vmxnet3_poll_rx_only, 64);
309009c5088eSShreyas Bhatewara 		}
309109c5088eSShreyas Bhatewara 	} else {
309209c5088eSShreyas Bhatewara 		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
309309c5088eSShreyas Bhatewara 			       vmxnet3_poll, 64);
309409c5088eSShreyas Bhatewara 	}
309509c5088eSShreyas Bhatewara 
309609c5088eSShreyas Bhatewara 	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
309709c5088eSShreyas Bhatewara 	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
309809c5088eSShreyas Bhatewara 
30996cdd20c3SNeil Horman 	netif_carrier_off(netdev);
3100d1a890faSShreyas Bhatewara 	err = register_netdev(netdev);
3101d1a890faSShreyas Bhatewara 
3102d1a890faSShreyas Bhatewara 	if (err) {
3103204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to register adapter\n");
3104d1a890faSShreyas Bhatewara 		goto err_register;
3105d1a890faSShreyas Bhatewara 	}
3106d1a890faSShreyas Bhatewara 
31074a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, false);
3108d1a890faSShreyas Bhatewara 	return 0;
3109d1a890faSShreyas Bhatewara 
3110d1a890faSShreyas Bhatewara err_register:
3111d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3112d1a890faSShreyas Bhatewara err_ver:
3113d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
3114d1a890faSShreyas Bhatewara err_alloc_pci:
311509c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
3116b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3117b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
311809c5088eSShreyas Bhatewara err_alloc_rss:
311909c5088eSShreyas Bhatewara #endif
3120b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3121b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
3122d1a890faSShreyas Bhatewara err_alloc_pm:
3123b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
312409c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
3125d1a890faSShreyas Bhatewara err_alloc_queue_desc:
3126b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
3127b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
3128d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
3129d1a890faSShreyas Bhatewara err_alloc_shared:
3130b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3131b0eb57cbSAndy King 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3132d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3133d1a890faSShreyas Bhatewara 	return err;
3134d1a890faSShreyas Bhatewara }
3135d1a890faSShreyas Bhatewara 
3136d1a890faSShreyas Bhatewara 
31373a4751a3SBill Pemberton static void
3138d1a890faSShreyas Bhatewara vmxnet3_remove_device(struct pci_dev *pdev)
3139d1a890faSShreyas Bhatewara {
3140d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3141d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
314209c5088eSShreyas Bhatewara 	int size = 0;
314309c5088eSShreyas Bhatewara 	int num_rx_queues;
314409c5088eSShreyas Bhatewara 
314509c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
314609c5088eSShreyas Bhatewara 	if (enable_mq)
314709c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
314809c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
314909c5088eSShreyas Bhatewara 	else
315009c5088eSShreyas Bhatewara #endif
315109c5088eSShreyas Bhatewara 		num_rx_queues = 1;
3152eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3153d1a890faSShreyas Bhatewara 
315423f333a2STejun Heo 	cancel_work_sync(&adapter->work);
3155d1a890faSShreyas Bhatewara 
3156d1a890faSShreyas Bhatewara 	unregister_netdev(netdev);
3157d1a890faSShreyas Bhatewara 
3158d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3159d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
316009c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
3161b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3162b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
316309c5088eSShreyas Bhatewara #endif
3164b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3165b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
316609c5088eSShreyas Bhatewara 
316709c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
316809c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3169b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
317009c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
3171b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
3172b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
3173d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
3174b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3175b0eb57cbSAndy King 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3176d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3177d1a890faSShreyas Bhatewara }
3178d1a890faSShreyas Bhatewara 
3179d1a890faSShreyas Bhatewara 
3180d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3181d1a890faSShreyas Bhatewara 
3182d1a890faSShreyas Bhatewara static int
3183d1a890faSShreyas Bhatewara vmxnet3_suspend(struct device *device)
3184d1a890faSShreyas Bhatewara {
3185d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3186d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3187d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3188d1a890faSShreyas Bhatewara 	struct Vmxnet3_PMConf *pmConf;
3189d1a890faSShreyas Bhatewara 	struct ethhdr *ehdr;
3190d1a890faSShreyas Bhatewara 	struct arphdr *ahdr;
3191d1a890faSShreyas Bhatewara 	u8 *arpreq;
3192d1a890faSShreyas Bhatewara 	struct in_device *in_dev;
3193d1a890faSShreyas Bhatewara 	struct in_ifaddr *ifa;
319483d0feffSShreyas Bhatewara 	unsigned long flags;
3195d1a890faSShreyas Bhatewara 	int i = 0;
3196d1a890faSShreyas Bhatewara 
3197d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3198d1a890faSShreyas Bhatewara 		return 0;
3199d1a890faSShreyas Bhatewara 
320051956cd6SShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
320151956cd6SShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
320251956cd6SShreyas Bhatewara 
3203d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
3204d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
3205d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3206d1a890faSShreyas Bhatewara 
3207d1a890faSShreyas Bhatewara 	netif_device_detach(netdev);
320809c5088eSShreyas Bhatewara 	netif_tx_stop_all_queues(netdev);
3209d1a890faSShreyas Bhatewara 
3210d1a890faSShreyas Bhatewara 	/* Create wake-up filters. */
3211d1a890faSShreyas Bhatewara 	pmConf = adapter->pm_conf;
3212d1a890faSShreyas Bhatewara 	memset(pmConf, 0, sizeof(*pmConf));
3213d1a890faSShreyas Bhatewara 
3214d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_UCAST) {
3215d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_ALEN;
3216d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize = 1;
3217d1a890faSShreyas Bhatewara 		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3218d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3219d1a890faSShreyas Bhatewara 
32203843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3221d1a890faSShreyas Bhatewara 		i++;
3222d1a890faSShreyas Bhatewara 	}
3223d1a890faSShreyas Bhatewara 
3224d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_ARP) {
3225d1a890faSShreyas Bhatewara 		in_dev = in_dev_get(netdev);
3226d1a890faSShreyas Bhatewara 		if (!in_dev)
3227d1a890faSShreyas Bhatewara 			goto skip_arp;
3228d1a890faSShreyas Bhatewara 
3229d1a890faSShreyas Bhatewara 		ifa = (struct in_ifaddr *)in_dev->ifa_list;
3230d1a890faSShreyas Bhatewara 		if (!ifa)
3231d1a890faSShreyas Bhatewara 			goto skip_arp;
3232d1a890faSShreyas Bhatewara 
3233d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3234d1a890faSShreyas Bhatewara 			sizeof(struct arphdr) +		/* ARP header */
3235d1a890faSShreyas Bhatewara 			2 * ETH_ALEN +		/* 2 Ethernet addresses*/
3236d1a890faSShreyas Bhatewara 			2 * sizeof(u32);	/*2 IPv4 addresses */
3237d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize =
3238d1a890faSShreyas Bhatewara 			(pmConf->filters[i].patternSize - 1) / 8 + 1;
3239d1a890faSShreyas Bhatewara 
3240d1a890faSShreyas Bhatewara 		/* ETH_P_ARP in Ethernet header. */
3241d1a890faSShreyas Bhatewara 		ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3242d1a890faSShreyas Bhatewara 		ehdr->h_proto = htons(ETH_P_ARP);
3243d1a890faSShreyas Bhatewara 
3244d1a890faSShreyas Bhatewara 		/* ARPOP_REQUEST in ARP header. */
3245d1a890faSShreyas Bhatewara 		ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3246d1a890faSShreyas Bhatewara 		ahdr->ar_op = htons(ARPOP_REQUEST);
3247d1a890faSShreyas Bhatewara 		arpreq = (u8 *)(ahdr + 1);
3248d1a890faSShreyas Bhatewara 
3249d1a890faSShreyas Bhatewara 		/* The Unicast IPv4 address in 'tip' field. */
3250d1a890faSShreyas Bhatewara 		arpreq += 2 * ETH_ALEN + sizeof(u32);
3251d1a890faSShreyas Bhatewara 		*(u32 *)arpreq = ifa->ifa_address;
3252d1a890faSShreyas Bhatewara 
3253d1a890faSShreyas Bhatewara 		/* The mask for the relevant bits. */
3254d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x00;
3255d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3256d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3257d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[3] = 0x00;
3258d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3259d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3260d1a890faSShreyas Bhatewara 		in_dev_put(in_dev);
3261d1a890faSShreyas Bhatewara 
32623843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3263d1a890faSShreyas Bhatewara 		i++;
3264d1a890faSShreyas Bhatewara 	}
3265d1a890faSShreyas Bhatewara 
3266d1a890faSShreyas Bhatewara skip_arp:
3267d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_MAGIC)
32683843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3269d1a890faSShreyas Bhatewara 
3270d1a890faSShreyas Bhatewara 	pmConf->numFilters = i;
3271d1a890faSShreyas Bhatewara 
3272115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3273115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3274115924b6SShreyas Bhatewara 								  *pmConf));
3275b0eb57cbSAndy King 	adapter->shared->devRead.pmConfDesc.confPA =
3276b0eb57cbSAndy King 		cpu_to_le64(adapter->pm_conf_pa);
3277d1a890faSShreyas Bhatewara 
327883d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3279d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3280d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_PMCFG);
328183d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3282d1a890faSShreyas Bhatewara 
3283d1a890faSShreyas Bhatewara 	pci_save_state(pdev);
3284d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3285d1a890faSShreyas Bhatewara 			adapter->wol);
3286d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
3287d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3288d1a890faSShreyas Bhatewara 
3289d1a890faSShreyas Bhatewara 	return 0;
3290d1a890faSShreyas Bhatewara }
3291d1a890faSShreyas Bhatewara 
3292d1a890faSShreyas Bhatewara 
3293d1a890faSShreyas Bhatewara static int
3294d1a890faSShreyas Bhatewara vmxnet3_resume(struct device *device)
3295d1a890faSShreyas Bhatewara {
329651956cd6SShreyas Bhatewara 	int err, i = 0;
329783d0feffSShreyas Bhatewara 	unsigned long flags;
3298d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3299d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3300d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3301d1a890faSShreyas Bhatewara 	struct Vmxnet3_PMConf *pmConf;
3302d1a890faSShreyas Bhatewara 
3303d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3304d1a890faSShreyas Bhatewara 		return 0;
3305d1a890faSShreyas Bhatewara 
3306d1a890faSShreyas Bhatewara 	/* Destroy wake-up filters. */
3307d1a890faSShreyas Bhatewara 	pmConf = adapter->pm_conf;
3308d1a890faSShreyas Bhatewara 	memset(pmConf, 0, sizeof(*pmConf));
3309d1a890faSShreyas Bhatewara 
3310115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3311115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3312115924b6SShreyas Bhatewara 								  *pmConf));
3313b0eb57cbSAndy King 	adapter->shared->devRead.pmConfDesc.confPA =
3314b0eb57cbSAndy King 		cpu_to_le64(adapter->pm_conf_pa);
3315d1a890faSShreyas Bhatewara 
3316d1a890faSShreyas Bhatewara 	netif_device_attach(netdev);
3317d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, PCI_D0);
3318d1a890faSShreyas Bhatewara 	pci_restore_state(pdev);
3319d1a890faSShreyas Bhatewara 	err = pci_enable_device_mem(pdev);
3320d1a890faSShreyas Bhatewara 	if (err != 0)
3321d1a890faSShreyas Bhatewara 		return err;
3322d1a890faSShreyas Bhatewara 
3323d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, PCI_D0, 0);
3324d1a890faSShreyas Bhatewara 
332583d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3326d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3327d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_PMCFG);
332883d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3329d1a890faSShreyas Bhatewara 	vmxnet3_alloc_intr_resources(adapter);
3330d1a890faSShreyas Bhatewara 	vmxnet3_request_irqs(adapter);
333151956cd6SShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
333251956cd6SShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
3333d1a890faSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
3334d1a890faSShreyas Bhatewara 
3335d1a890faSShreyas Bhatewara 	return 0;
3336d1a890faSShreyas Bhatewara }
3337d1a890faSShreyas Bhatewara 
333847145210SAlexey Dobriyan static const struct dev_pm_ops vmxnet3_pm_ops = {
3339d1a890faSShreyas Bhatewara 	.suspend = vmxnet3_suspend,
3340d1a890faSShreyas Bhatewara 	.resume = vmxnet3_resume,
3341d1a890faSShreyas Bhatewara };
3342d1a890faSShreyas Bhatewara #endif
3343d1a890faSShreyas Bhatewara 
3344d1a890faSShreyas Bhatewara static struct pci_driver vmxnet3_driver = {
3345d1a890faSShreyas Bhatewara 	.name		= vmxnet3_driver_name,
3346d1a890faSShreyas Bhatewara 	.id_table	= vmxnet3_pciid_table,
3347d1a890faSShreyas Bhatewara 	.probe		= vmxnet3_probe_device,
33483a4751a3SBill Pemberton 	.remove		= vmxnet3_remove_device,
3349d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3350d1a890faSShreyas Bhatewara 	.driver.pm	= &vmxnet3_pm_ops,
3351d1a890faSShreyas Bhatewara #endif
3352d1a890faSShreyas Bhatewara };
3353d1a890faSShreyas Bhatewara 
3354d1a890faSShreyas Bhatewara 
3355d1a890faSShreyas Bhatewara static int __init
3356d1a890faSShreyas Bhatewara vmxnet3_init_module(void)
3357d1a890faSShreyas Bhatewara {
3358204a6e65SStephen Hemminger 	pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3359d1a890faSShreyas Bhatewara 		VMXNET3_DRIVER_VERSION_REPORT);
3360d1a890faSShreyas Bhatewara 	return pci_register_driver(&vmxnet3_driver);
3361d1a890faSShreyas Bhatewara }
3362d1a890faSShreyas Bhatewara 
3363d1a890faSShreyas Bhatewara module_init(vmxnet3_init_module);
3364d1a890faSShreyas Bhatewara 
3365d1a890faSShreyas Bhatewara 
3366d1a890faSShreyas Bhatewara static void
3367d1a890faSShreyas Bhatewara vmxnet3_exit_module(void)
3368d1a890faSShreyas Bhatewara {
3369d1a890faSShreyas Bhatewara 	pci_unregister_driver(&vmxnet3_driver);
3370d1a890faSShreyas Bhatewara }
3371d1a890faSShreyas Bhatewara 
3372d1a890faSShreyas Bhatewara module_exit(vmxnet3_exit_module);
3373d1a890faSShreyas Bhatewara 
3374d1a890faSShreyas Bhatewara MODULE_AUTHOR("VMware, Inc.");
3375d1a890faSShreyas Bhatewara MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3376d1a890faSShreyas Bhatewara MODULE_LICENSE("GPL v2");
3377d1a890faSShreyas Bhatewara MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
3378