1d1a890faSShreyas Bhatewara /*
2d1a890faSShreyas Bhatewara  * Linux driver for VMware's vmxnet3 ethernet NIC.
3d1a890faSShreyas Bhatewara  *
4123db31dSRonak Doshi  * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
5d1a890faSShreyas Bhatewara  *
6d1a890faSShreyas Bhatewara  * This program is free software; you can redistribute it and/or modify it
7d1a890faSShreyas Bhatewara  * under the terms of the GNU General Public License as published by the
8d1a890faSShreyas Bhatewara  * Free Software Foundation; version 2 of the License and no later version.
9d1a890faSShreyas Bhatewara  *
10d1a890faSShreyas Bhatewara  * This program is distributed in the hope that it will be useful, but
11d1a890faSShreyas Bhatewara  * WITHOUT ANY WARRANTY; without even the implied warranty of
12d1a890faSShreyas Bhatewara  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13d1a890faSShreyas Bhatewara  * NON INFRINGEMENT. See the GNU General Public License for more
14d1a890faSShreyas Bhatewara  * details.
15d1a890faSShreyas Bhatewara  *
16d1a890faSShreyas Bhatewara  * You should have received a copy of the GNU General Public License
17d1a890faSShreyas Bhatewara  * along with this program; if not, write to the Free Software
18d1a890faSShreyas Bhatewara  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19d1a890faSShreyas Bhatewara  *
20d1a890faSShreyas Bhatewara  * The full GNU General Public License is included in this distribution in
21d1a890faSShreyas Bhatewara  * the file called "COPYING".
22d1a890faSShreyas Bhatewara  *
23190af10fSShrikrishna Khare  * Maintained by: pv-drivers@vmware.com
24d1a890faSShreyas Bhatewara  *
25d1a890faSShreyas Bhatewara  */
26d1a890faSShreyas Bhatewara 
279d9779e7SPaul Gortmaker #include <linux/module.h>
28b038b040SStephen Rothwell #include <net/ip6_checksum.h>
29b038b040SStephen Rothwell 
30d1a890faSShreyas Bhatewara #include "vmxnet3_int.h"
31d1a890faSShreyas Bhatewara 
32d1a890faSShreyas Bhatewara char vmxnet3_driver_name[] = "vmxnet3";
33d1a890faSShreyas Bhatewara #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34d1a890faSShreyas Bhatewara 
35d1a890faSShreyas Bhatewara /*
36d1a890faSShreyas Bhatewara  * PCI Device ID Table
37d1a890faSShreyas Bhatewara  * Last entry must be all 0s
38d1a890faSShreyas Bhatewara  */
399baa3c34SBenoit Taine static const struct pci_device_id vmxnet3_pciid_table[] = {
40d1a890faSShreyas Bhatewara 	{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41d1a890faSShreyas Bhatewara 	{0}
42d1a890faSShreyas Bhatewara };
43d1a890faSShreyas Bhatewara 
44d1a890faSShreyas Bhatewara MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45d1a890faSShreyas Bhatewara 
4609c5088eSShreyas Bhatewara static int enable_mq = 1;
47d1a890faSShreyas Bhatewara 
48f9f25026SShreyas Bhatewara static void
49f9f25026SShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50f9f25026SShreyas Bhatewara 
51d1a890faSShreyas Bhatewara /*
52d1a890faSShreyas Bhatewara  *    Enable/Disable the given intr
53d1a890faSShreyas Bhatewara  */
54d1a890faSShreyas Bhatewara static void
55d1a890faSShreyas Bhatewara vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56d1a890faSShreyas Bhatewara {
57d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58d1a890faSShreyas Bhatewara }
59d1a890faSShreyas Bhatewara 
60d1a890faSShreyas Bhatewara 
61d1a890faSShreyas Bhatewara static void
62d1a890faSShreyas Bhatewara vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63d1a890faSShreyas Bhatewara {
64d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65d1a890faSShreyas Bhatewara }
66d1a890faSShreyas Bhatewara 
67d1a890faSShreyas Bhatewara 
68d1a890faSShreyas Bhatewara /*
69d1a890faSShreyas Bhatewara  *    Enable/Disable all intrs used by the device
70d1a890faSShreyas Bhatewara  */
71d1a890faSShreyas Bhatewara static void
72d1a890faSShreyas Bhatewara vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73d1a890faSShreyas Bhatewara {
74d1a890faSShreyas Bhatewara 	int i;
75d1a890faSShreyas Bhatewara 
76d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
77d1a890faSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, i);
786929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl &=
796929fe8aSRonghua Zang 					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
80d1a890faSShreyas Bhatewara }
81d1a890faSShreyas Bhatewara 
82d1a890faSShreyas Bhatewara 
83d1a890faSShreyas Bhatewara static void
84d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85d1a890faSShreyas Bhatewara {
86d1a890faSShreyas Bhatewara 	int i;
87d1a890faSShreyas Bhatewara 
886929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl |=
896929fe8aSRonghua Zang 					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
91d1a890faSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, i);
92d1a890faSShreyas Bhatewara }
93d1a890faSShreyas Bhatewara 
94d1a890faSShreyas Bhatewara 
95d1a890faSShreyas Bhatewara static void
96d1a890faSShreyas Bhatewara vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97d1a890faSShreyas Bhatewara {
98d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99d1a890faSShreyas Bhatewara }
100d1a890faSShreyas Bhatewara 
101d1a890faSShreyas Bhatewara 
102d1a890faSShreyas Bhatewara static bool
103d1a890faSShreyas Bhatewara vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104d1a890faSShreyas Bhatewara {
10509c5088eSShreyas Bhatewara 	return tq->stopped;
106d1a890faSShreyas Bhatewara }
107d1a890faSShreyas Bhatewara 
108d1a890faSShreyas Bhatewara 
109d1a890faSShreyas Bhatewara static void
110d1a890faSShreyas Bhatewara vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111d1a890faSShreyas Bhatewara {
112d1a890faSShreyas Bhatewara 	tq->stopped = false;
11309c5088eSShreyas Bhatewara 	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
114d1a890faSShreyas Bhatewara }
115d1a890faSShreyas Bhatewara 
116d1a890faSShreyas Bhatewara 
117d1a890faSShreyas Bhatewara static void
118d1a890faSShreyas Bhatewara vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119d1a890faSShreyas Bhatewara {
120d1a890faSShreyas Bhatewara 	tq->stopped = false;
12109c5088eSShreyas Bhatewara 	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
122d1a890faSShreyas Bhatewara }
123d1a890faSShreyas Bhatewara 
124d1a890faSShreyas Bhatewara 
125d1a890faSShreyas Bhatewara static void
126d1a890faSShreyas Bhatewara vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127d1a890faSShreyas Bhatewara {
128d1a890faSShreyas Bhatewara 	tq->stopped = true;
129d1a890faSShreyas Bhatewara 	tq->num_stop++;
13009c5088eSShreyas Bhatewara 	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
131d1a890faSShreyas Bhatewara }
132d1a890faSShreyas Bhatewara 
133d1a890faSShreyas Bhatewara 
134d1a890faSShreyas Bhatewara /*
135d1a890faSShreyas Bhatewara  * Check the link state. This may start or stop the tx queue.
136d1a890faSShreyas Bhatewara  */
137d1a890faSShreyas Bhatewara static void
1384a1745fcSShreyas Bhatewara vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139d1a890faSShreyas Bhatewara {
140d1a890faSShreyas Bhatewara 	u32 ret;
14109c5088eSShreyas Bhatewara 	int i;
14283d0feffSShreyas Bhatewara 	unsigned long flags;
143d1a890faSShreyas Bhatewara 
14483d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
145d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
14783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
14883d0feffSShreyas Bhatewara 
149d1a890faSShreyas Bhatewara 	adapter->link_speed = ret >> 16;
150d1a890faSShreyas Bhatewara 	if (ret & 1) { /* Link is up. */
151204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152204a6e65SStephen Hemminger 			    adapter->link_speed);
153d1a890faSShreyas Bhatewara 		netif_carrier_on(adapter->netdev);
154d1a890faSShreyas Bhatewara 
15509c5088eSShreyas Bhatewara 		if (affectTxQueue) {
15609c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
15709c5088eSShreyas Bhatewara 				vmxnet3_tq_start(&adapter->tx_queue[i],
15809c5088eSShreyas Bhatewara 						 adapter);
15909c5088eSShreyas Bhatewara 		}
160d1a890faSShreyas Bhatewara 	} else {
161204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Down\n");
162d1a890faSShreyas Bhatewara 		netif_carrier_off(adapter->netdev);
163d1a890faSShreyas Bhatewara 
16409c5088eSShreyas Bhatewara 		if (affectTxQueue) {
16509c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
16609c5088eSShreyas Bhatewara 				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
16709c5088eSShreyas Bhatewara 		}
168d1a890faSShreyas Bhatewara 	}
169d1a890faSShreyas Bhatewara }
170d1a890faSShreyas Bhatewara 
171d1a890faSShreyas Bhatewara static void
172d1a890faSShreyas Bhatewara vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173d1a890faSShreyas Bhatewara {
17409c5088eSShreyas Bhatewara 	int i;
175e328d410SRoland Dreier 	unsigned long flags;
176115924b6SShreyas Bhatewara 	u32 events = le32_to_cpu(adapter->shared->ecr);
177d1a890faSShreyas Bhatewara 	if (!events)
178d1a890faSShreyas Bhatewara 		return;
179d1a890faSShreyas Bhatewara 
180d1a890faSShreyas Bhatewara 	vmxnet3_ack_events(adapter, events);
181d1a890faSShreyas Bhatewara 
182d1a890faSShreyas Bhatewara 	/* Check if link state has changed */
183d1a890faSShreyas Bhatewara 	if (events & VMXNET3_ECR_LINK)
1844a1745fcSShreyas Bhatewara 		vmxnet3_check_link(adapter, true);
185d1a890faSShreyas Bhatewara 
186d1a890faSShreyas Bhatewara 	/* Check if there is an error on xmit/recv queues */
187d1a890faSShreyas Bhatewara 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
188e328d410SRoland Dreier 		spin_lock_irqsave(&adapter->cmd_lock, flags);
189d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_GET_QUEUE_STATUS);
191e328d410SRoland Dreier 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
192d1a890faSShreyas Bhatewara 
19309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++)
19409c5088eSShreyas Bhatewara 			if (adapter->tqd_start[i].status.stopped)
19509c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
19609c5088eSShreyas Bhatewara 					"%s: tq[%d] error 0x%x\n",
19709c5088eSShreyas Bhatewara 					adapter->netdev->name, i, le32_to_cpu(
19809c5088eSShreyas Bhatewara 					adapter->tqd_start[i].status.error));
19909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++)
20009c5088eSShreyas Bhatewara 			if (adapter->rqd_start[i].status.stopped)
20109c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
20209c5088eSShreyas Bhatewara 					"%s: rq[%d] error 0x%x\n",
20309c5088eSShreyas Bhatewara 					adapter->netdev->name, i,
20409c5088eSShreyas Bhatewara 					adapter->rqd_start[i].status.error);
205d1a890faSShreyas Bhatewara 
206d1a890faSShreyas Bhatewara 		schedule_work(&adapter->work);
207d1a890faSShreyas Bhatewara 	}
208d1a890faSShreyas Bhatewara }
209d1a890faSShreyas Bhatewara 
210115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
211115924b6SShreyas Bhatewara /*
212115924b6SShreyas Bhatewara  * The device expects the bitfields in shared structures to be written in
213115924b6SShreyas Bhatewara  * little endian. When CPU is big endian, the following routines are used to
214115924b6SShreyas Bhatewara  * correctly read and write into ABI.
215115924b6SShreyas Bhatewara  * The general technique used here is : double word bitfields are defined in
216115924b6SShreyas Bhatewara  * opposite order for big endian architecture. Then before reading them in
217115924b6SShreyas Bhatewara  * driver the complete double word is translated using le32_to_cpu. Similarly
218115924b6SShreyas Bhatewara  * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219115924b6SShreyas Bhatewara  * double words into required format.
220115924b6SShreyas Bhatewara  * In order to avoid touching bits in shared structure more than once, temporary
221115924b6SShreyas Bhatewara  * descriptors are used. These are passed as srcDesc to following functions.
222115924b6SShreyas Bhatewara  */
223115924b6SShreyas Bhatewara static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224115924b6SShreyas Bhatewara 				struct Vmxnet3_RxDesc *dstDesc)
225115924b6SShreyas Bhatewara {
226115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc + 2;
227115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc + 2;
228115924b6SShreyas Bhatewara 	dstDesc->addr = le64_to_cpu(srcDesc->addr);
229115924b6SShreyas Bhatewara 	*dst = le32_to_cpu(*src);
230115924b6SShreyas Bhatewara 	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231115924b6SShreyas Bhatewara }
232115924b6SShreyas Bhatewara 
233115924b6SShreyas Bhatewara static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234115924b6SShreyas Bhatewara 			       struct Vmxnet3_TxDesc *dstDesc)
235115924b6SShreyas Bhatewara {
236115924b6SShreyas Bhatewara 	int i;
237115924b6SShreyas Bhatewara 	u32 *src = (u32 *)(srcDesc + 1);
238115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)(dstDesc + 1);
239115924b6SShreyas Bhatewara 
240115924b6SShreyas Bhatewara 	/* Working backwards so that the gen bit is set at the end. */
241115924b6SShreyas Bhatewara 	for (i = 2; i > 0; i--) {
242115924b6SShreyas Bhatewara 		src--;
243115924b6SShreyas Bhatewara 		dst--;
244115924b6SShreyas Bhatewara 		*dst = cpu_to_le32(*src);
245115924b6SShreyas Bhatewara 	}
246115924b6SShreyas Bhatewara }
247115924b6SShreyas Bhatewara 
248115924b6SShreyas Bhatewara 
249115924b6SShreyas Bhatewara static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250115924b6SShreyas Bhatewara 				struct Vmxnet3_RxCompDesc *dstDesc)
251115924b6SShreyas Bhatewara {
252115924b6SShreyas Bhatewara 	int i = 0;
253115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc;
254115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc;
255115924b6SShreyas Bhatewara 	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256115924b6SShreyas Bhatewara 		*dst = le32_to_cpu(*src);
257115924b6SShreyas Bhatewara 		src++;
258115924b6SShreyas Bhatewara 		dst++;
259115924b6SShreyas Bhatewara 	}
260115924b6SShreyas Bhatewara }
261115924b6SShreyas Bhatewara 
262115924b6SShreyas Bhatewara 
263115924b6SShreyas Bhatewara /* Used to read bitfield values from double words. */
264115924b6SShreyas Bhatewara static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265115924b6SShreyas Bhatewara {
266115924b6SShreyas Bhatewara 	u32 temp = le32_to_cpu(*bitfield);
267115924b6SShreyas Bhatewara 	u32 mask = ((1 << size) - 1) << pos;
268115924b6SShreyas Bhatewara 	temp &= mask;
269115924b6SShreyas Bhatewara 	temp >>= pos;
270115924b6SShreyas Bhatewara 	return temp;
271115924b6SShreyas Bhatewara }
272115924b6SShreyas Bhatewara 
273115924b6SShreyas Bhatewara 
274115924b6SShreyas Bhatewara 
275115924b6SShreyas Bhatewara #endif  /* __BIG_ENDIAN_BITFIELD */
276115924b6SShreyas Bhatewara 
277115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
278115924b6SShreyas Bhatewara 
279115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281115924b6SShreyas Bhatewara 			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284115924b6SShreyas Bhatewara 			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_SIZE)
288115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289115924b6SShreyas Bhatewara 			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291115924b6SShreyas Bhatewara 			(dstrcd) = (tmp); \
292115924b6SShreyas Bhatewara 			vmxnet3_RxCompToCPU((rcd), (tmp)); \
293115924b6SShreyas Bhatewara 		} while (0)
294115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295115924b6SShreyas Bhatewara 			(dstrxd) = (tmp); \
296115924b6SShreyas Bhatewara 			vmxnet3_RxDescToCPU((rxd), (tmp)); \
297115924b6SShreyas Bhatewara 		} while (0)
298115924b6SShreyas Bhatewara 
299115924b6SShreyas Bhatewara #else
300115924b6SShreyas Bhatewara 
301115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307115924b6SShreyas Bhatewara 
308115924b6SShreyas Bhatewara #endif /* __BIG_ENDIAN_BITFIELD  */
309115924b6SShreyas Bhatewara 
310d1a890faSShreyas Bhatewara 
311d1a890faSShreyas Bhatewara static void
312d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313d1a890faSShreyas Bhatewara 		     struct pci_dev *pdev)
314d1a890faSShreyas Bhatewara {
315d1a890faSShreyas Bhatewara 	if (tbi->map_type == VMXNET3_MAP_SINGLE)
316b0eb57cbSAndy King 		dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
317d1a890faSShreyas Bhatewara 				 PCI_DMA_TODEVICE);
318d1a890faSShreyas Bhatewara 	else if (tbi->map_type == VMXNET3_MAP_PAGE)
319b0eb57cbSAndy King 		dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
320d1a890faSShreyas Bhatewara 			       PCI_DMA_TODEVICE);
321d1a890faSShreyas Bhatewara 	else
322d1a890faSShreyas Bhatewara 		BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323d1a890faSShreyas Bhatewara 
324d1a890faSShreyas Bhatewara 	tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325d1a890faSShreyas Bhatewara }
326d1a890faSShreyas Bhatewara 
327d1a890faSShreyas Bhatewara 
328d1a890faSShreyas Bhatewara static int
329d1a890faSShreyas Bhatewara vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330d1a890faSShreyas Bhatewara 		  struct pci_dev *pdev,	struct vmxnet3_adapter *adapter)
331d1a890faSShreyas Bhatewara {
332d1a890faSShreyas Bhatewara 	struct sk_buff *skb;
333d1a890faSShreyas Bhatewara 	int entries = 0;
334d1a890faSShreyas Bhatewara 
335d1a890faSShreyas Bhatewara 	/* no out of order completion */
336d1a890faSShreyas Bhatewara 	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337115924b6SShreyas Bhatewara 	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
338d1a890faSShreyas Bhatewara 
339d1a890faSShreyas Bhatewara 	skb = tq->buf_info[eop_idx].skb;
340d1a890faSShreyas Bhatewara 	BUG_ON(skb == NULL);
341d1a890faSShreyas Bhatewara 	tq->buf_info[eop_idx].skb = NULL;
342d1a890faSShreyas Bhatewara 
343d1a890faSShreyas Bhatewara 	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344d1a890faSShreyas Bhatewara 
345d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != eop_idx) {
346d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347d1a890faSShreyas Bhatewara 				     pdev);
348d1a890faSShreyas Bhatewara 
349d1a890faSShreyas Bhatewara 		/* update next2comp w/o tx_lock. Since we are marking more,
350d1a890faSShreyas Bhatewara 		 * instead of less, tx ring entries avail, the worst case is
351d1a890faSShreyas Bhatewara 		 * that the tx routine incorrectly re-queues a pkt due to
352d1a890faSShreyas Bhatewara 		 * insufficient tx ring entries.
353d1a890faSShreyas Bhatewara 		 */
354d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355d1a890faSShreyas Bhatewara 		entries++;
356d1a890faSShreyas Bhatewara 	}
357d1a890faSShreyas Bhatewara 
358d1a890faSShreyas Bhatewara 	dev_kfree_skb_any(skb);
359d1a890faSShreyas Bhatewara 	return entries;
360d1a890faSShreyas Bhatewara }
361d1a890faSShreyas Bhatewara 
362d1a890faSShreyas Bhatewara 
363d1a890faSShreyas Bhatewara static int
364d1a890faSShreyas Bhatewara vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365d1a890faSShreyas Bhatewara 			struct vmxnet3_adapter *adapter)
366d1a890faSShreyas Bhatewara {
367d1a890faSShreyas Bhatewara 	int completed = 0;
368d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
369d1a890faSShreyas Bhatewara 
370d1a890faSShreyas Bhatewara 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371115924b6SShreyas Bhatewara 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372f3002c13Shpreg@vmware.com 		/* Prevent any &gdesc->tcd field from being (speculatively)
373f3002c13Shpreg@vmware.com 		 * read before (&gdesc->tcd)->gen is read.
374f3002c13Shpreg@vmware.com 		 */
375f3002c13Shpreg@vmware.com 		dma_rmb();
376f3002c13Shpreg@vmware.com 
377115924b6SShreyas Bhatewara 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
378115924b6SShreyas Bhatewara 					       &gdesc->tcd), tq, adapter->pdev,
379115924b6SShreyas Bhatewara 					       adapter);
380d1a890faSShreyas Bhatewara 
381d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
382d1a890faSShreyas Bhatewara 		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
383d1a890faSShreyas Bhatewara 	}
384d1a890faSShreyas Bhatewara 
385d1a890faSShreyas Bhatewara 	if (completed) {
386d1a890faSShreyas Bhatewara 		spin_lock(&tq->tx_lock);
387d1a890faSShreyas Bhatewara 		if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
388d1a890faSShreyas Bhatewara 			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
389d1a890faSShreyas Bhatewara 			     VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
390d1a890faSShreyas Bhatewara 			     netif_carrier_ok(adapter->netdev))) {
391d1a890faSShreyas Bhatewara 			vmxnet3_tq_wake(tq, adapter);
392d1a890faSShreyas Bhatewara 		}
393d1a890faSShreyas Bhatewara 		spin_unlock(&tq->tx_lock);
394d1a890faSShreyas Bhatewara 	}
395d1a890faSShreyas Bhatewara 	return completed;
396d1a890faSShreyas Bhatewara }
397d1a890faSShreyas Bhatewara 
398d1a890faSShreyas Bhatewara 
399d1a890faSShreyas Bhatewara static void
400d1a890faSShreyas Bhatewara vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
401d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
402d1a890faSShreyas Bhatewara {
403d1a890faSShreyas Bhatewara 	int i;
404d1a890faSShreyas Bhatewara 
405d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
406d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_buf_info *tbi;
407d1a890faSShreyas Bhatewara 
408d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2comp;
409d1a890faSShreyas Bhatewara 
410d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
411d1a890faSShreyas Bhatewara 		if (tbi->skb) {
412d1a890faSShreyas Bhatewara 			dev_kfree_skb_any(tbi->skb);
413d1a890faSShreyas Bhatewara 			tbi->skb = NULL;
414d1a890faSShreyas Bhatewara 		}
415d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
416d1a890faSShreyas Bhatewara 	}
417d1a890faSShreyas Bhatewara 
418d1a890faSShreyas Bhatewara 	/* sanity check, verify all buffers are indeed unmapped and freed */
419d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++) {
420d1a890faSShreyas Bhatewara 		BUG_ON(tq->buf_info[i].skb != NULL ||
421d1a890faSShreyas Bhatewara 		       tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
422d1a890faSShreyas Bhatewara 	}
423d1a890faSShreyas Bhatewara 
424d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
425d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
426d1a890faSShreyas Bhatewara 
427d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
428d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
429d1a890faSShreyas Bhatewara }
430d1a890faSShreyas Bhatewara 
431d1a890faSShreyas Bhatewara 
43209c5088eSShreyas Bhatewara static void
433d1a890faSShreyas Bhatewara vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
434d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
435d1a890faSShreyas Bhatewara {
436d1a890faSShreyas Bhatewara 	if (tq->tx_ring.base) {
437b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
438d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxDesc),
439d1a890faSShreyas Bhatewara 				  tq->tx_ring.base, tq->tx_ring.basePA);
440d1a890faSShreyas Bhatewara 		tq->tx_ring.base = NULL;
441d1a890faSShreyas Bhatewara 	}
442d1a890faSShreyas Bhatewara 	if (tq->data_ring.base) {
4433c8b3efcSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
4443c8b3efcSShrikrishna Khare 				  tq->data_ring.size * tq->txdata_desc_size,
445d1a890faSShreyas Bhatewara 				  tq->data_ring.base, tq->data_ring.basePA);
446d1a890faSShreyas Bhatewara 		tq->data_ring.base = NULL;
447d1a890faSShreyas Bhatewara 	}
448d1a890faSShreyas Bhatewara 	if (tq->comp_ring.base) {
449b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
450d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxCompDesc),
451d1a890faSShreyas Bhatewara 				  tq->comp_ring.base, tq->comp_ring.basePA);
452d1a890faSShreyas Bhatewara 		tq->comp_ring.base = NULL;
453d1a890faSShreyas Bhatewara 	}
454b0eb57cbSAndy King 	if (tq->buf_info) {
455b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev,
456b0eb57cbSAndy King 				  tq->tx_ring.size * sizeof(tq->buf_info[0]),
457b0eb57cbSAndy King 				  tq->buf_info, tq->buf_info_pa);
458d1a890faSShreyas Bhatewara 		tq->buf_info = NULL;
459d1a890faSShreyas Bhatewara 	}
460b0eb57cbSAndy King }
461d1a890faSShreyas Bhatewara 
462d1a890faSShreyas Bhatewara 
46309c5088eSShreyas Bhatewara /* Destroy all tx queues */
46409c5088eSShreyas Bhatewara void
46509c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
46609c5088eSShreyas Bhatewara {
46709c5088eSShreyas Bhatewara 	int i;
46809c5088eSShreyas Bhatewara 
46909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
47009c5088eSShreyas Bhatewara 		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
47109c5088eSShreyas Bhatewara }
47209c5088eSShreyas Bhatewara 
47309c5088eSShreyas Bhatewara 
474d1a890faSShreyas Bhatewara static void
475d1a890faSShreyas Bhatewara vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
476d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
477d1a890faSShreyas Bhatewara {
478d1a890faSShreyas Bhatewara 	int i;
479d1a890faSShreyas Bhatewara 
480d1a890faSShreyas Bhatewara 	/* reset the tx ring contents to 0 and reset the tx ring states */
481d1a890faSShreyas Bhatewara 	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
482d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxDesc));
483d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
484d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
485d1a890faSShreyas Bhatewara 
4863c8b3efcSShrikrishna Khare 	memset(tq->data_ring.base, 0,
4873c8b3efcSShrikrishna Khare 	       tq->data_ring.size * tq->txdata_desc_size);
488d1a890faSShreyas Bhatewara 
489d1a890faSShreyas Bhatewara 	/* reset the tx comp ring contents to 0 and reset comp ring states */
490d1a890faSShreyas Bhatewara 	memset(tq->comp_ring.base, 0, tq->comp_ring.size *
491d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxCompDesc));
492d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
493d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
494d1a890faSShreyas Bhatewara 
495d1a890faSShreyas Bhatewara 	/* reset the bookkeeping data */
496d1a890faSShreyas Bhatewara 	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
497d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++)
498d1a890faSShreyas Bhatewara 		tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
499d1a890faSShreyas Bhatewara 
500d1a890faSShreyas Bhatewara 	/* stats are not reset */
501d1a890faSShreyas Bhatewara }
502d1a890faSShreyas Bhatewara 
503d1a890faSShreyas Bhatewara 
504d1a890faSShreyas Bhatewara static int
505d1a890faSShreyas Bhatewara vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
506d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
507d1a890faSShreyas Bhatewara {
508b0eb57cbSAndy King 	size_t sz;
509b0eb57cbSAndy King 
510d1a890faSShreyas Bhatewara 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
511d1a890faSShreyas Bhatewara 	       tq->comp_ring.base || tq->buf_info);
512d1a890faSShreyas Bhatewara 
513b0eb57cbSAndy King 	tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
514b0eb57cbSAndy King 			tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
515b0eb57cbSAndy King 			&tq->tx_ring.basePA, GFP_KERNEL);
516d1a890faSShreyas Bhatewara 	if (!tq->tx_ring.base) {
517204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx ring\n");
518d1a890faSShreyas Bhatewara 		goto err;
519d1a890faSShreyas Bhatewara 	}
520d1a890faSShreyas Bhatewara 
521b0eb57cbSAndy King 	tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
5223c8b3efcSShrikrishna Khare 			tq->data_ring.size * tq->txdata_desc_size,
523b0eb57cbSAndy King 			&tq->data_ring.basePA, GFP_KERNEL);
524d1a890faSShreyas Bhatewara 	if (!tq->data_ring.base) {
5253c8b3efcSShrikrishna Khare 		netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
526d1a890faSShreyas Bhatewara 		goto err;
527d1a890faSShreyas Bhatewara 	}
528d1a890faSShreyas Bhatewara 
529b0eb57cbSAndy King 	tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
530b0eb57cbSAndy King 			tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
531b0eb57cbSAndy King 			&tq->comp_ring.basePA, GFP_KERNEL);
532d1a890faSShreyas Bhatewara 	if (!tq->comp_ring.base) {
533204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
534d1a890faSShreyas Bhatewara 		goto err;
535d1a890faSShreyas Bhatewara 	}
536d1a890faSShreyas Bhatewara 
537b0eb57cbSAndy King 	sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
538750afb08SLuis Chamberlain 	tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
539b0eb57cbSAndy King 					  &tq->buf_info_pa, GFP_KERNEL);
540e404decbSJoe Perches 	if (!tq->buf_info)
541d1a890faSShreyas Bhatewara 		goto err;
542d1a890faSShreyas Bhatewara 
543d1a890faSShreyas Bhatewara 	return 0;
544d1a890faSShreyas Bhatewara 
545d1a890faSShreyas Bhatewara err:
546d1a890faSShreyas Bhatewara 	vmxnet3_tq_destroy(tq, adapter);
547d1a890faSShreyas Bhatewara 	return -ENOMEM;
548d1a890faSShreyas Bhatewara }
549d1a890faSShreyas Bhatewara 
55009c5088eSShreyas Bhatewara static void
55109c5088eSShreyas Bhatewara vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
55209c5088eSShreyas Bhatewara {
55309c5088eSShreyas Bhatewara 	int i;
55409c5088eSShreyas Bhatewara 
55509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
55609c5088eSShreyas Bhatewara 		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
55709c5088eSShreyas Bhatewara }
558d1a890faSShreyas Bhatewara 
559d1a890faSShreyas Bhatewara /*
560d1a890faSShreyas Bhatewara  *    starting from ring->next2fill, allocate rx buffers for the given ring
561d1a890faSShreyas Bhatewara  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
562d1a890faSShreyas Bhatewara  *    are allocated or allocation fails
563d1a890faSShreyas Bhatewara  */
564d1a890faSShreyas Bhatewara 
565d1a890faSShreyas Bhatewara static int
566d1a890faSShreyas Bhatewara vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
567d1a890faSShreyas Bhatewara 			int num_to_alloc, struct vmxnet3_adapter *adapter)
568d1a890faSShreyas Bhatewara {
569d1a890faSShreyas Bhatewara 	int num_allocated = 0;
570d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
571d1a890faSShreyas Bhatewara 	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
572d1a890faSShreyas Bhatewara 	u32 val;
573d1a890faSShreyas Bhatewara 
5745318d809SShreyas Bhatewara 	while (num_allocated <= num_to_alloc) {
575d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
576d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gd;
577d1a890faSShreyas Bhatewara 
578d1a890faSShreyas Bhatewara 		rbi = rbi_base + ring->next2fill;
579d1a890faSShreyas Bhatewara 		gd = ring->base + ring->next2fill;
580d1a890faSShreyas Bhatewara 
581d1a890faSShreyas Bhatewara 		if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
582d1a890faSShreyas Bhatewara 			if (rbi->skb == NULL) {
5830d735f13SStephen Hemminger 				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
5840d735f13SStephen Hemminger 								       rbi->len,
5850d735f13SStephen Hemminger 								       GFP_KERNEL);
586d1a890faSShreyas Bhatewara 				if (unlikely(rbi->skb == NULL)) {
587d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
588d1a890faSShreyas Bhatewara 					break;
589d1a890faSShreyas Bhatewara 				}
590d1a890faSShreyas Bhatewara 
591b0eb57cbSAndy King 				rbi->dma_addr = dma_map_single(
592b0eb57cbSAndy King 						&adapter->pdev->dev,
593d1a890faSShreyas Bhatewara 						rbi->skb->data, rbi->len,
594d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
5955738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
5965738a09dSAlexey Khoroshilov 						      rbi->dma_addr)) {
5975738a09dSAlexey Khoroshilov 					dev_kfree_skb_any(rbi->skb);
5985738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
5995738a09dSAlexey Khoroshilov 					break;
6005738a09dSAlexey Khoroshilov 				}
601d1a890faSShreyas Bhatewara 			} else {
602d1a890faSShreyas Bhatewara 				/* rx buffer skipped by the device */
603d1a890faSShreyas Bhatewara 			}
604d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
605d1a890faSShreyas Bhatewara 		} else {
606d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
607d1a890faSShreyas Bhatewara 			       rbi->len  != PAGE_SIZE);
608d1a890faSShreyas Bhatewara 
609d1a890faSShreyas Bhatewara 			if (rbi->page == NULL) {
610d1a890faSShreyas Bhatewara 				rbi->page = alloc_page(GFP_ATOMIC);
611d1a890faSShreyas Bhatewara 				if (unlikely(rbi->page == NULL)) {
612d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
613d1a890faSShreyas Bhatewara 					break;
614d1a890faSShreyas Bhatewara 				}
615b0eb57cbSAndy King 				rbi->dma_addr = dma_map_page(
616b0eb57cbSAndy King 						&adapter->pdev->dev,
617d1a890faSShreyas Bhatewara 						rbi->page, 0, PAGE_SIZE,
618d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
6195738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
6205738a09dSAlexey Khoroshilov 						      rbi->dma_addr)) {
6215738a09dSAlexey Khoroshilov 					put_page(rbi->page);
6225738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
6235738a09dSAlexey Khoroshilov 					break;
6245738a09dSAlexey Khoroshilov 				}
625d1a890faSShreyas Bhatewara 			} else {
626d1a890faSShreyas Bhatewara 				/* rx buffers skipped by the device */
627d1a890faSShreyas Bhatewara 			}
628d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
629d1a890faSShreyas Bhatewara 		}
630d1a890faSShreyas Bhatewara 
631115924b6SShreyas Bhatewara 		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
6325318d809SShreyas Bhatewara 		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
633115924b6SShreyas Bhatewara 					   | val | rbi->len);
634d1a890faSShreyas Bhatewara 
6355318d809SShreyas Bhatewara 		/* Fill the last buffer but dont mark it ready, or else the
6365318d809SShreyas Bhatewara 		 * device will think that the queue is full */
6375318d809SShreyas Bhatewara 		if (num_allocated == num_to_alloc)
6385318d809SShreyas Bhatewara 			break;
6395318d809SShreyas Bhatewara 
6405318d809SShreyas Bhatewara 		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
641d1a890faSShreyas Bhatewara 		num_allocated++;
642d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(ring);
643d1a890faSShreyas Bhatewara 	}
644d1a890faSShreyas Bhatewara 
645fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
64669b9a712SStephen Hemminger 		"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
64769b9a712SStephen Hemminger 		num_allocated, ring->next2fill, ring->next2comp);
648d1a890faSShreyas Bhatewara 
649d1a890faSShreyas Bhatewara 	/* so that the device can distinguish a full ring and an empty ring */
650d1a890faSShreyas Bhatewara 	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
651d1a890faSShreyas Bhatewara 
652d1a890faSShreyas Bhatewara 	return num_allocated;
653d1a890faSShreyas Bhatewara }
654d1a890faSShreyas Bhatewara 
655d1a890faSShreyas Bhatewara 
656d1a890faSShreyas Bhatewara static void
657d1a890faSShreyas Bhatewara vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
658d1a890faSShreyas Bhatewara 		    struct vmxnet3_rx_buf_info *rbi)
659d1a890faSShreyas Bhatewara {
660d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
661d1a890faSShreyas Bhatewara 
662d1a890faSShreyas Bhatewara 	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
663d1a890faSShreyas Bhatewara 
6640e0634d2SIan Campbell 	__skb_frag_set_page(frag, rbi->page);
665b54c9d5bSJonathan Lemon 	skb_frag_off_set(frag, 0);
6669e903e08SEric Dumazet 	skb_frag_size_set(frag, rcd->len);
6679e903e08SEric Dumazet 	skb->data_len += rcd->len;
6685e6c355cSEric Dumazet 	skb->truesize += PAGE_SIZE;
669d1a890faSShreyas Bhatewara 	skb_shinfo(skb)->nr_frags++;
670d1a890faSShreyas Bhatewara }
671d1a890faSShreyas Bhatewara 
672d1a890faSShreyas Bhatewara 
6735738a09dSAlexey Khoroshilov static int
674d1a890faSShreyas Bhatewara vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
675d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
676d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
677d1a890faSShreyas Bhatewara {
678d1a890faSShreyas Bhatewara 	u32 dw2, len;
679d1a890faSShreyas Bhatewara 	unsigned long buf_offset;
680d1a890faSShreyas Bhatewara 	int i;
681d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
682d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_buf_info *tbi = NULL;
683d1a890faSShreyas Bhatewara 
684d1a890faSShreyas Bhatewara 	BUG_ON(ctx->copy_size > skb_headlen(skb));
685d1a890faSShreyas Bhatewara 
686d1a890faSShreyas Bhatewara 	/* use the previous gen bit for the SOP desc */
687d1a890faSShreyas Bhatewara 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
688d1a890faSShreyas Bhatewara 
689d1a890faSShreyas Bhatewara 	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
690d1a890faSShreyas Bhatewara 	gdesc = ctx->sop_txd; /* both loops below can be skipped */
691d1a890faSShreyas Bhatewara 
692d1a890faSShreyas Bhatewara 	/* no need to map the buffer if headers are copied */
693d1a890faSShreyas Bhatewara 	if (ctx->copy_size) {
694115924b6SShreyas Bhatewara 		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
695d1a890faSShreyas Bhatewara 					tq->tx_ring.next2fill *
6963c8b3efcSShrikrishna Khare 					tq->txdata_desc_size);
697115924b6SShreyas Bhatewara 		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
698d1a890faSShreyas Bhatewara 		ctx->sop_txd->dword[3] = 0;
699d1a890faSShreyas Bhatewara 
700d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
701d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_NONE;
702d1a890faSShreyas Bhatewara 
703fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
704f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
705115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill,
706115924b6SShreyas Bhatewara 			le64_to_cpu(ctx->sop_txd->txd.addr),
707d1a890faSShreyas Bhatewara 			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
708d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
709d1a890faSShreyas Bhatewara 
710d1a890faSShreyas Bhatewara 		/* use the right gen for non-SOP desc */
711d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
712d1a890faSShreyas Bhatewara 	}
713d1a890faSShreyas Bhatewara 
714d1a890faSShreyas Bhatewara 	/* linear part can use multiple tx desc if it's big */
715d1a890faSShreyas Bhatewara 	len = skb_headlen(skb) - ctx->copy_size;
716d1a890faSShreyas Bhatewara 	buf_offset = ctx->copy_size;
717d1a890faSShreyas Bhatewara 	while (len) {
718d1a890faSShreyas Bhatewara 		u32 buf_size;
719d1a890faSShreyas Bhatewara 
7201f4b1612SBhavesh Davda 		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
7211f4b1612SBhavesh Davda 			buf_size = len;
7221f4b1612SBhavesh Davda 			dw2 |= len;
7231f4b1612SBhavesh Davda 		} else {
7241f4b1612SBhavesh Davda 			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
7251f4b1612SBhavesh Davda 			/* spec says that for TxDesc.len, 0 == 2^14 */
7261f4b1612SBhavesh Davda 		}
727d1a890faSShreyas Bhatewara 
728d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
729d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_SINGLE;
730b0eb57cbSAndy King 		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
731d1a890faSShreyas Bhatewara 				skb->data + buf_offset, buf_size,
732d1a890faSShreyas Bhatewara 				PCI_DMA_TODEVICE);
7335738a09dSAlexey Khoroshilov 		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
7345738a09dSAlexey Khoroshilov 			return -EFAULT;
735d1a890faSShreyas Bhatewara 
7361f4b1612SBhavesh Davda 		tbi->len = buf_size;
737d1a890faSShreyas Bhatewara 
738d1a890faSShreyas Bhatewara 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
739d1a890faSShreyas Bhatewara 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
740d1a890faSShreyas Bhatewara 
741115924b6SShreyas Bhatewara 		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
7421f4b1612SBhavesh Davda 		gdesc->dword[2] = cpu_to_le32(dw2);
743d1a890faSShreyas Bhatewara 		gdesc->dword[3] = 0;
744d1a890faSShreyas Bhatewara 
745fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
746f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
747115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
748115924b6SShreyas Bhatewara 			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
749d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
750d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
751d1a890faSShreyas Bhatewara 
752d1a890faSShreyas Bhatewara 		len -= buf_size;
753d1a890faSShreyas Bhatewara 		buf_offset += buf_size;
754d1a890faSShreyas Bhatewara 	}
755d1a890faSShreyas Bhatewara 
756d1a890faSShreyas Bhatewara 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
757d7840976SMatthew Wilcox (Oracle) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
758a4d7e485SEric Dumazet 		u32 buf_size;
759d1a890faSShreyas Bhatewara 
760a4d7e485SEric Dumazet 		buf_offset = 0;
761a4d7e485SEric Dumazet 		len = skb_frag_size(frag);
762a4d7e485SEric Dumazet 		while (len) {
763d1a890faSShreyas Bhatewara 			tbi = tq->buf_info + tq->tx_ring.next2fill;
764a4d7e485SEric Dumazet 			if (len < VMXNET3_MAX_TX_BUF_SIZE) {
765a4d7e485SEric Dumazet 				buf_size = len;
766a4d7e485SEric Dumazet 				dw2 |= len;
767a4d7e485SEric Dumazet 			} else {
768a4d7e485SEric Dumazet 				buf_size = VMXNET3_MAX_TX_BUF_SIZE;
769a4d7e485SEric Dumazet 				/* spec says that for TxDesc.len, 0 == 2^14 */
770a4d7e485SEric Dumazet 			}
771d1a890faSShreyas Bhatewara 			tbi->map_type = VMXNET3_MAP_PAGE;
7720e0634d2SIan Campbell 			tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
773a4d7e485SEric Dumazet 							 buf_offset, buf_size,
7745d6bcdfeSIan Campbell 							 DMA_TO_DEVICE);
7755738a09dSAlexey Khoroshilov 			if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
7765738a09dSAlexey Khoroshilov 				return -EFAULT;
777d1a890faSShreyas Bhatewara 
778a4d7e485SEric Dumazet 			tbi->len = buf_size;
779d1a890faSShreyas Bhatewara 
780d1a890faSShreyas Bhatewara 			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
781d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
782d1a890faSShreyas Bhatewara 
783115924b6SShreyas Bhatewara 			gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
784a4d7e485SEric Dumazet 			gdesc->dword[2] = cpu_to_le32(dw2);
785d1a890faSShreyas Bhatewara 			gdesc->dword[3] = 0;
786d1a890faSShreyas Bhatewara 
787fdcd79b9SStephen Hemminger 			netdev_dbg(adapter->netdev,
7888b429468SHans Wennborg 				"txd[%u]: 0x%llx %u %u\n",
789115924b6SShreyas Bhatewara 				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
790115924b6SShreyas Bhatewara 				le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
791d1a890faSShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
792d1a890faSShreyas Bhatewara 			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
793a4d7e485SEric Dumazet 
794a4d7e485SEric Dumazet 			len -= buf_size;
795a4d7e485SEric Dumazet 			buf_offset += buf_size;
796a4d7e485SEric Dumazet 		}
797d1a890faSShreyas Bhatewara 	}
798d1a890faSShreyas Bhatewara 
799d1a890faSShreyas Bhatewara 	ctx->eop_txd = gdesc;
800d1a890faSShreyas Bhatewara 
801d1a890faSShreyas Bhatewara 	/* set the last buf_info for the pkt */
802d1a890faSShreyas Bhatewara 	tbi->skb = skb;
803d1a890faSShreyas Bhatewara 	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
8045738a09dSAlexey Khoroshilov 
8055738a09dSAlexey Khoroshilov 	return 0;
806d1a890faSShreyas Bhatewara }
807d1a890faSShreyas Bhatewara 
808d1a890faSShreyas Bhatewara 
80909c5088eSShreyas Bhatewara /* Init all tx queues */
81009c5088eSShreyas Bhatewara static void
81109c5088eSShreyas Bhatewara vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
81209c5088eSShreyas Bhatewara {
81309c5088eSShreyas Bhatewara 	int i;
81409c5088eSShreyas Bhatewara 
81509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
81609c5088eSShreyas Bhatewara 		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
81709c5088eSShreyas Bhatewara }
81809c5088eSShreyas Bhatewara 
81909c5088eSShreyas Bhatewara 
820d1a890faSShreyas Bhatewara /*
821cec05562SNeil Horman  *    parse relevant protocol headers:
822d1a890faSShreyas Bhatewara  *      For a tso pkt, relevant headers are L2/3/4 including options
823d1a890faSShreyas Bhatewara  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
824d1a890faSShreyas Bhatewara  *      if it's a TCP/UDP pkt
825d1a890faSShreyas Bhatewara  *
826d1a890faSShreyas Bhatewara  * Returns:
827d1a890faSShreyas Bhatewara  *    -1:  error happens during parsing
828d1a890faSShreyas Bhatewara  *     0:  protocol headers parsed, but too big to be copied
829d1a890faSShreyas Bhatewara  *     1:  protocol headers parsed and copied
830d1a890faSShreyas Bhatewara  *
831d1a890faSShreyas Bhatewara  * Other effects:
832d1a890faSShreyas Bhatewara  *    1. related *ctx fields are updated.
833d1a890faSShreyas Bhatewara  *    2. ctx->copy_size is # of bytes copied
834cec05562SNeil Horman  *    3. the portion to be copied is guaranteed to be in the linear part
835d1a890faSShreyas Bhatewara  *
836d1a890faSShreyas Bhatewara  */
837d1a890faSShreyas Bhatewara static int
838cec05562SNeil Horman vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
839d1a890faSShreyas Bhatewara 		  struct vmxnet3_tx_ctx *ctx,
840d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
841d1a890faSShreyas Bhatewara {
842759c9359SShrikrishna Khare 	u8 protocol = 0;
843d1a890faSShreyas Bhatewara 
8440d0b1672SMichał Mirosław 	if (ctx->mss) {	/* TSO */
845d1a890faSShreyas Bhatewara 		ctx->eth_ip_hdr_size = skb_transport_offset(skb);
8468bca5d1eSEric Dumazet 		ctx->l4_hdr_size = tcp_hdrlen(skb);
847d1a890faSShreyas Bhatewara 		ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
848d1a890faSShreyas Bhatewara 	} else {
849d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
8500d0b1672SMichał Mirosław 			ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
851d1a890faSShreyas Bhatewara 
852d1a890faSShreyas Bhatewara 			if (ctx->ipv4) {
8538bca5d1eSEric Dumazet 				const struct iphdr *iph = ip_hdr(skb);
8548bca5d1eSEric Dumazet 
855759c9359SShrikrishna Khare 				protocol = iph->protocol;
856759c9359SShrikrishna Khare 			} else if (ctx->ipv6) {
857759c9359SShrikrishna Khare 				const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
858759c9359SShrikrishna Khare 
859759c9359SShrikrishna Khare 				protocol = ipv6h->nexthdr;
860d1a890faSShreyas Bhatewara 			}
861759c9359SShrikrishna Khare 
862759c9359SShrikrishna Khare 			switch (protocol) {
863759c9359SShrikrishna Khare 			case IPPROTO_TCP:
864759c9359SShrikrishna Khare 				ctx->l4_hdr_size = tcp_hdrlen(skb);
865759c9359SShrikrishna Khare 				break;
866759c9359SShrikrishna Khare 			case IPPROTO_UDP:
867759c9359SShrikrishna Khare 				ctx->l4_hdr_size = sizeof(struct udphdr);
868759c9359SShrikrishna Khare 				break;
869759c9359SShrikrishna Khare 			default:
870759c9359SShrikrishna Khare 				ctx->l4_hdr_size = 0;
871759c9359SShrikrishna Khare 				break;
872759c9359SShrikrishna Khare 			}
873759c9359SShrikrishna Khare 
874b203262dSNeil Horman 			ctx->copy_size = min(ctx->eth_ip_hdr_size +
875b203262dSNeil Horman 					 ctx->l4_hdr_size, skb->len);
876d1a890faSShreyas Bhatewara 		} else {
877d1a890faSShreyas Bhatewara 			ctx->eth_ip_hdr_size = 0;
878d1a890faSShreyas Bhatewara 			ctx->l4_hdr_size = 0;
879d1a890faSShreyas Bhatewara 			/* copy as much as allowed */
8803c8b3efcSShrikrishna Khare 			ctx->copy_size = min_t(unsigned int,
8813c8b3efcSShrikrishna Khare 					       tq->txdata_desc_size,
8823c8b3efcSShrikrishna Khare 					       skb_headlen(skb));
883d1a890faSShreyas Bhatewara 		}
884d1a890faSShreyas Bhatewara 
885c41fcce9SShreyas Bhatewara 		if (skb->len <= VMXNET3_HDR_COPY_SIZE)
886c41fcce9SShreyas Bhatewara 			ctx->copy_size = skb->len;
887c41fcce9SShreyas Bhatewara 
888d1a890faSShreyas Bhatewara 		/* make sure headers are accessible directly */
889d1a890faSShreyas Bhatewara 		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
890d1a890faSShreyas Bhatewara 			goto err;
891d1a890faSShreyas Bhatewara 	}
892d1a890faSShreyas Bhatewara 
8933c8b3efcSShrikrishna Khare 	if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
894d1a890faSShreyas Bhatewara 		tq->stats.oversized_hdr++;
895d1a890faSShreyas Bhatewara 		ctx->copy_size = 0;
896d1a890faSShreyas Bhatewara 		return 0;
897d1a890faSShreyas Bhatewara 	}
898d1a890faSShreyas Bhatewara 
899cec05562SNeil Horman 	return 1;
900cec05562SNeil Horman err:
901cec05562SNeil Horman 	return -1;
902cec05562SNeil Horman }
903cec05562SNeil Horman 
904cec05562SNeil Horman /*
905cec05562SNeil Horman  *    copy relevant protocol headers to the transmit ring:
906cec05562SNeil Horman  *      For a tso pkt, relevant headers are L2/3/4 including options
907cec05562SNeil Horman  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
908cec05562SNeil Horman  *      if it's a TCP/UDP pkt
909cec05562SNeil Horman  *
910cec05562SNeil Horman  *
911cec05562SNeil Horman  *    Note that this requires that vmxnet3_parse_hdr be called first to set the
912cec05562SNeil Horman  *      appropriate bits in ctx first
913cec05562SNeil Horman  */
914cec05562SNeil Horman static void
915cec05562SNeil Horman vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
916cec05562SNeil Horman 		 struct vmxnet3_tx_ctx *ctx,
917cec05562SNeil Horman 		 struct vmxnet3_adapter *adapter)
918cec05562SNeil Horman {
919cec05562SNeil Horman 	struct Vmxnet3_TxDataDesc *tdd;
920cec05562SNeil Horman 
921ff2e7d5dSShrikrishna Khare 	tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
922ff2e7d5dSShrikrishna Khare 					    tq->tx_ring.next2fill *
923ff2e7d5dSShrikrishna Khare 					    tq->txdata_desc_size);
924d1a890faSShreyas Bhatewara 
925d1a890faSShreyas Bhatewara 	memcpy(tdd->data, skb->data, ctx->copy_size);
926fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
927f6965582SRandy Dunlap 		"copy %u bytes to dataRing[%u]\n",
928d1a890faSShreyas Bhatewara 		ctx->copy_size, tq->tx_ring.next2fill);
929d1a890faSShreyas Bhatewara }
930d1a890faSShreyas Bhatewara 
931d1a890faSShreyas Bhatewara 
932d1a890faSShreyas Bhatewara static void
933d1a890faSShreyas Bhatewara vmxnet3_prepare_tso(struct sk_buff *skb,
934d1a890faSShreyas Bhatewara 		    struct vmxnet3_tx_ctx *ctx)
935d1a890faSShreyas Bhatewara {
9368bca5d1eSEric Dumazet 	struct tcphdr *tcph = tcp_hdr(skb);
9378bca5d1eSEric Dumazet 
938d1a890faSShreyas Bhatewara 	if (ctx->ipv4) {
9398bca5d1eSEric Dumazet 		struct iphdr *iph = ip_hdr(skb);
9408bca5d1eSEric Dumazet 
941d1a890faSShreyas Bhatewara 		iph->check = 0;
942d1a890faSShreyas Bhatewara 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
943d1a890faSShreyas Bhatewara 						 IPPROTO_TCP, 0);
944759c9359SShrikrishna Khare 	} else if (ctx->ipv6) {
945091c9f82SHeiner Kallweit 		tcp_v6_gso_csum_prep(skb);
946d1a890faSShreyas Bhatewara 	}
947d1a890faSShreyas Bhatewara }
948d1a890faSShreyas Bhatewara 
949a4d7e485SEric Dumazet static int txd_estimate(const struct sk_buff *skb)
950a4d7e485SEric Dumazet {
951a4d7e485SEric Dumazet 	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
952a4d7e485SEric Dumazet 	int i;
953a4d7e485SEric Dumazet 
954a4d7e485SEric Dumazet 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
955d7840976SMatthew Wilcox (Oracle) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
956a4d7e485SEric Dumazet 
957a4d7e485SEric Dumazet 		count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
958a4d7e485SEric Dumazet 	}
959a4d7e485SEric Dumazet 	return count;
960a4d7e485SEric Dumazet }
961d1a890faSShreyas Bhatewara 
962d1a890faSShreyas Bhatewara /*
963d1a890faSShreyas Bhatewara  * Transmits a pkt thru a given tq
964d1a890faSShreyas Bhatewara  * Returns:
965d1a890faSShreyas Bhatewara  *    NETDEV_TX_OK:      descriptors are setup successfully
96625985edcSLucas De Marchi  *    NETDEV_TX_OK:      error occurred, the pkt is dropped
967d1a890faSShreyas Bhatewara  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
968d1a890faSShreyas Bhatewara  *
969d1a890faSShreyas Bhatewara  * Side-effects:
970d1a890faSShreyas Bhatewara  *    1. tx ring may be changed
971d1a890faSShreyas Bhatewara  *    2. tq stats may be updated accordingly
972d1a890faSShreyas Bhatewara  *    3. shared->txNumDeferred may be updated
973d1a890faSShreyas Bhatewara  */
974d1a890faSShreyas Bhatewara 
975d1a890faSShreyas Bhatewara static int
976d1a890faSShreyas Bhatewara vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
977d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter, struct net_device *netdev)
978d1a890faSShreyas Bhatewara {
979d1a890faSShreyas Bhatewara 	int ret;
980d1a890faSShreyas Bhatewara 	u32 count;
9817a4c003dSRonak Doshi 	int num_pkts;
9827a4c003dSRonak Doshi 	int tx_num_deferred;
983d1a890faSShreyas Bhatewara 	unsigned long flags;
984d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_ctx ctx;
985d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
986115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
987115924b6SShreyas Bhatewara 	/* Use temporary descriptor to avoid touching bits multiple times */
988115924b6SShreyas Bhatewara 	union Vmxnet3_GenericDesc tempTxDesc;
989115924b6SShreyas Bhatewara #endif
990d1a890faSShreyas Bhatewara 
991a4d7e485SEric Dumazet 	count = txd_estimate(skb);
992d1a890faSShreyas Bhatewara 
99372e85c45SJesse Gross 	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
994759c9359SShrikrishna Khare 	ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
995d1a890faSShreyas Bhatewara 
996d1a890faSShreyas Bhatewara 	ctx.mss = skb_shinfo(skb)->gso_size;
997d1a890faSShreyas Bhatewara 	if (ctx.mss) {
998d1a890faSShreyas Bhatewara 		if (skb_header_cloned(skb)) {
999d1a890faSShreyas Bhatewara 			if (unlikely(pskb_expand_head(skb, 0, 0,
1000d1a890faSShreyas Bhatewara 						      GFP_ATOMIC) != 0)) {
1001d1a890faSShreyas Bhatewara 				tq->stats.drop_tso++;
1002d1a890faSShreyas Bhatewara 				goto drop_pkt;
1003d1a890faSShreyas Bhatewara 			}
1004d1a890faSShreyas Bhatewara 			tq->stats.copy_skb_header++;
1005d1a890faSShreyas Bhatewara 		}
1006d1a890faSShreyas Bhatewara 		vmxnet3_prepare_tso(skb, &ctx);
1007d1a890faSShreyas Bhatewara 	} else {
1008d1a890faSShreyas Bhatewara 		if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1009d1a890faSShreyas Bhatewara 
1010d1a890faSShreyas Bhatewara 			/* non-tso pkts must not use more than
1011d1a890faSShreyas Bhatewara 			 * VMXNET3_MAX_TXD_PER_PKT entries
1012d1a890faSShreyas Bhatewara 			 */
1013d1a890faSShreyas Bhatewara 			if (skb_linearize(skb) != 0) {
1014d1a890faSShreyas Bhatewara 				tq->stats.drop_too_many_frags++;
1015d1a890faSShreyas Bhatewara 				goto drop_pkt;
1016d1a890faSShreyas Bhatewara 			}
1017d1a890faSShreyas Bhatewara 			tq->stats.linearized++;
1018d1a890faSShreyas Bhatewara 
1019d1a890faSShreyas Bhatewara 			/* recalculate the # of descriptors to use */
1020d1a890faSShreyas Bhatewara 			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1021d1a890faSShreyas Bhatewara 		}
1022d1a890faSShreyas Bhatewara 	}
1023d1a890faSShreyas Bhatewara 
1024cec05562SNeil Horman 	ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1025d1a890faSShreyas Bhatewara 	if (ret >= 0) {
1026d1a890faSShreyas Bhatewara 		BUG_ON(ret <= 0 && ctx.copy_size != 0);
1027d1a890faSShreyas Bhatewara 		/* hdrs parsed, check against other limits */
1028d1a890faSShreyas Bhatewara 		if (ctx.mss) {
1029d1a890faSShreyas Bhatewara 			if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
1030d1a890faSShreyas Bhatewara 				     VMXNET3_MAX_TX_BUF_SIZE)) {
1031efc21d95SArnd Bergmann 				tq->stats.drop_oversized_hdr++;
1032efc21d95SArnd Bergmann 				goto drop_pkt;
1033d1a890faSShreyas Bhatewara 			}
1034d1a890faSShreyas Bhatewara 		} else {
1035d1a890faSShreyas Bhatewara 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
1036d1a890faSShreyas Bhatewara 				if (unlikely(ctx.eth_ip_hdr_size +
1037d1a890faSShreyas Bhatewara 					     skb->csum_offset >
1038d1a890faSShreyas Bhatewara 					     VMXNET3_MAX_CSUM_OFFSET)) {
1039efc21d95SArnd Bergmann 					tq->stats.drop_oversized_hdr++;
1040efc21d95SArnd Bergmann 					goto drop_pkt;
1041d1a890faSShreyas Bhatewara 				}
1042d1a890faSShreyas Bhatewara 			}
1043d1a890faSShreyas Bhatewara 		}
1044d1a890faSShreyas Bhatewara 	} else {
1045d1a890faSShreyas Bhatewara 		tq->stats.drop_hdr_inspect_err++;
1046cec05562SNeil Horman 		goto drop_pkt;
1047d1a890faSShreyas Bhatewara 	}
1048d1a890faSShreyas Bhatewara 
1049cec05562SNeil Horman 	spin_lock_irqsave(&tq->tx_lock, flags);
1050cec05562SNeil Horman 
1051cec05562SNeil Horman 	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1052cec05562SNeil Horman 		tq->stats.tx_ring_full++;
1053cec05562SNeil Horman 		netdev_dbg(adapter->netdev,
1054cec05562SNeil Horman 			"tx queue stopped on %s, next2comp %u"
1055cec05562SNeil Horman 			" next2fill %u\n", adapter->netdev->name,
1056cec05562SNeil Horman 			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1057cec05562SNeil Horman 
1058cec05562SNeil Horman 		vmxnet3_tq_stop(tq, adapter);
1059cec05562SNeil Horman 		spin_unlock_irqrestore(&tq->tx_lock, flags);
1060cec05562SNeil Horman 		return NETDEV_TX_BUSY;
1061cec05562SNeil Horman 	}
1062cec05562SNeil Horman 
1063cec05562SNeil Horman 
1064cec05562SNeil Horman 	vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1065cec05562SNeil Horman 
1066d1a890faSShreyas Bhatewara 	/* fill tx descs related to addr & len */
10675738a09dSAlexey Khoroshilov 	if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
10685738a09dSAlexey Khoroshilov 		goto unlock_drop_pkt;
1069d1a890faSShreyas Bhatewara 
1070d1a890faSShreyas Bhatewara 	/* setup the EOP desc */
1071115924b6SShreyas Bhatewara 	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1072d1a890faSShreyas Bhatewara 
1073d1a890faSShreyas Bhatewara 	/* setup the SOP desc */
1074115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1075115924b6SShreyas Bhatewara 	gdesc = &tempTxDesc;
1076115924b6SShreyas Bhatewara 	gdesc->dword[2] = ctx.sop_txd->dword[2];
1077115924b6SShreyas Bhatewara 	gdesc->dword[3] = ctx.sop_txd->dword[3];
1078115924b6SShreyas Bhatewara #else
1079d1a890faSShreyas Bhatewara 	gdesc = ctx.sop_txd;
1080115924b6SShreyas Bhatewara #endif
10817a4c003dSRonak Doshi 	tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1082d1a890faSShreyas Bhatewara 	if (ctx.mss) {
1083d1a890faSShreyas Bhatewara 		gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1084d1a890faSShreyas Bhatewara 		gdesc->txd.om = VMXNET3_OM_TSO;
1085d1a890faSShreyas Bhatewara 		gdesc->txd.msscof = ctx.mss;
10867a4c003dSRonak Doshi 		num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1087d1a890faSShreyas Bhatewara 	} else {
1088d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1089d1a890faSShreyas Bhatewara 			gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1090d1a890faSShreyas Bhatewara 			gdesc->txd.om = VMXNET3_OM_CSUM;
1091d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1092d1a890faSShreyas Bhatewara 					    skb->csum_offset;
1093d1a890faSShreyas Bhatewara 		} else {
1094d1a890faSShreyas Bhatewara 			gdesc->txd.om = 0;
1095d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = 0;
1096d1a890faSShreyas Bhatewara 		}
10977a4c003dSRonak Doshi 		num_pkts = 1;
1098d1a890faSShreyas Bhatewara 	}
10997a4c003dSRonak Doshi 	le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
11007a4c003dSRonak Doshi 	tx_num_deferred += num_pkts;
1101d1a890faSShreyas Bhatewara 
1102df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1103d1a890faSShreyas Bhatewara 		gdesc->txd.ti = 1;
1104df8a39deSJiri Pirko 		gdesc->txd.tci = skb_vlan_tag_get(skb);
1105d1a890faSShreyas Bhatewara 	}
1106d1a890faSShreyas Bhatewara 
1107f3002c13Shpreg@vmware.com 	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
1108f3002c13Shpreg@vmware.com 	 * all other writes to &gdesc->txd.
1109f3002c13Shpreg@vmware.com 	 */
1110f3002c13Shpreg@vmware.com 	dma_wmb();
1111f3002c13Shpreg@vmware.com 
1112115924b6SShreyas Bhatewara 	/* finally flips the GEN bit of the SOP desc. */
1113115924b6SShreyas Bhatewara 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1114115924b6SShreyas Bhatewara 						  VMXNET3_TXD_GEN);
1115115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1116115924b6SShreyas Bhatewara 	/* Finished updating in bitfields of Tx Desc, so write them in original
1117115924b6SShreyas Bhatewara 	 * place.
1118115924b6SShreyas Bhatewara 	 */
1119115924b6SShreyas Bhatewara 	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1120115924b6SShreyas Bhatewara 			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1121115924b6SShreyas Bhatewara 	gdesc = ctx.sop_txd;
1122115924b6SShreyas Bhatewara #endif
1123fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
1124f6965582SRandy Dunlap 		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1125c2fd03a0SJoe Perches 		(u32)(ctx.sop_txd -
1126115924b6SShreyas Bhatewara 		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1127115924b6SShreyas Bhatewara 		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1128d1a890faSShreyas Bhatewara 
1129d1a890faSShreyas Bhatewara 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1130d1a890faSShreyas Bhatewara 
11317a4c003dSRonak Doshi 	if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1132d1a890faSShreyas Bhatewara 		tq->shared->txNumDeferred = 0;
113309c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
113409c5088eSShreyas Bhatewara 				       VMXNET3_REG_TXPROD + tq->qid * 8,
1135d1a890faSShreyas Bhatewara 				       tq->tx_ring.next2fill);
1136d1a890faSShreyas Bhatewara 	}
1137d1a890faSShreyas Bhatewara 
1138d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1139d1a890faSShreyas Bhatewara 
1140f955e141SDan Carpenter unlock_drop_pkt:
1141f955e141SDan Carpenter 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1142d1a890faSShreyas Bhatewara drop_pkt:
1143d1a890faSShreyas Bhatewara 	tq->stats.drop_total++;
1144b1b71817SEric W. Biederman 	dev_kfree_skb_any(skb);
1145d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1146d1a890faSShreyas Bhatewara }
1147d1a890faSShreyas Bhatewara 
1148d1a890faSShreyas Bhatewara 
1149d1a890faSShreyas Bhatewara static netdev_tx_t
1150d1a890faSShreyas Bhatewara vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1151d1a890faSShreyas Bhatewara {
1152d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1153d1a890faSShreyas Bhatewara 
115409c5088eSShreyas Bhatewara 	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
115509c5088eSShreyas Bhatewara 	return vmxnet3_tq_xmit(skb,
115609c5088eSShreyas Bhatewara 			       &adapter->tx_queue[skb->queue_mapping],
115709c5088eSShreyas Bhatewara 			       adapter, netdev);
1158d1a890faSShreyas Bhatewara }
1159d1a890faSShreyas Bhatewara 
1160d1a890faSShreyas Bhatewara 
1161d1a890faSShreyas Bhatewara static void
1162d1a890faSShreyas Bhatewara vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1163d1a890faSShreyas Bhatewara 		struct sk_buff *skb,
1164d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gdesc)
1165d1a890faSShreyas Bhatewara {
1166a0d2730cSMichał Mirosław 	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1167f0d43780SShrikrishna Khare 		if (gdesc->rcd.v4 &&
1168f0d43780SShrikrishna Khare 		    (le32_to_cpu(gdesc->dword[3]) &
1169f0d43780SShrikrishna Khare 		     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1170d1a890faSShreyas Bhatewara 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1171d1a890faSShreyas Bhatewara 			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1172f0d43780SShrikrishna Khare 			BUG_ON(gdesc->rcd.frg);
1173f0d43780SShrikrishna Khare 		} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1174f0d43780SShrikrishna Khare 					     (1 << VMXNET3_RCD_TUC_SHIFT))) {
1175f0d43780SShrikrishna Khare 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1176f0d43780SShrikrishna Khare 			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1177d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->rcd.frg);
1178d1a890faSShreyas Bhatewara 		} else {
1179d1a890faSShreyas Bhatewara 			if (gdesc->rcd.csum) {
1180d1a890faSShreyas Bhatewara 				skb->csum = htons(gdesc->rcd.csum);
1181d1a890faSShreyas Bhatewara 				skb->ip_summed = CHECKSUM_PARTIAL;
1182d1a890faSShreyas Bhatewara 			} else {
1183bc8acf2cSEric Dumazet 				skb_checksum_none_assert(skb);
1184d1a890faSShreyas Bhatewara 			}
1185d1a890faSShreyas Bhatewara 		}
1186d1a890faSShreyas Bhatewara 	} else {
1187bc8acf2cSEric Dumazet 		skb_checksum_none_assert(skb);
1188d1a890faSShreyas Bhatewara 	}
1189d1a890faSShreyas Bhatewara }
1190d1a890faSShreyas Bhatewara 
1191d1a890faSShreyas Bhatewara 
1192d1a890faSShreyas Bhatewara static void
1193d1a890faSShreyas Bhatewara vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1194d1a890faSShreyas Bhatewara 		 struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1195d1a890faSShreyas Bhatewara {
1196d1a890faSShreyas Bhatewara 	rq->stats.drop_err++;
1197d1a890faSShreyas Bhatewara 	if (!rcd->fcs)
1198d1a890faSShreyas Bhatewara 		rq->stats.drop_fcs++;
1199d1a890faSShreyas Bhatewara 
1200d1a890faSShreyas Bhatewara 	rq->stats.drop_total++;
1201d1a890faSShreyas Bhatewara 
1202d1a890faSShreyas Bhatewara 	/*
1203d1a890faSShreyas Bhatewara 	 * We do not unmap and chain the rx buffer to the skb.
1204d1a890faSShreyas Bhatewara 	 * We basically pretend this buffer is not used and will be recycled
1205d1a890faSShreyas Bhatewara 	 * by vmxnet3_rq_alloc_rx_buf()
1206d1a890faSShreyas Bhatewara 	 */
1207d1a890faSShreyas Bhatewara 
1208d1a890faSShreyas Bhatewara 	/*
1209d1a890faSShreyas Bhatewara 	 * ctx->skb may be NULL if this is the first and the only one
1210d1a890faSShreyas Bhatewara 	 * desc for the pkt
1211d1a890faSShreyas Bhatewara 	 */
1212d1a890faSShreyas Bhatewara 	if (ctx->skb)
1213d1a890faSShreyas Bhatewara 		dev_kfree_skb_irq(ctx->skb);
1214d1a890faSShreyas Bhatewara 
1215d1a890faSShreyas Bhatewara 	ctx->skb = NULL;
1216d1a890faSShreyas Bhatewara }
1217d1a890faSShreyas Bhatewara 
1218d1a890faSShreyas Bhatewara 
121945dac1d6SShreyas Bhatewara static u32
122045dac1d6SShreyas Bhatewara vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
122145dac1d6SShreyas Bhatewara 		    union Vmxnet3_GenericDesc *gdesc)
122245dac1d6SShreyas Bhatewara {
122345dac1d6SShreyas Bhatewara 	u32 hlen, maplen;
122445dac1d6SShreyas Bhatewara 	union {
122545dac1d6SShreyas Bhatewara 		void *ptr;
122645dac1d6SShreyas Bhatewara 		struct ethhdr *eth;
122765ec0bd1SRonak Doshi 		struct vlan_ethhdr *veth;
122845dac1d6SShreyas Bhatewara 		struct iphdr *ipv4;
122945dac1d6SShreyas Bhatewara 		struct ipv6hdr *ipv6;
123045dac1d6SShreyas Bhatewara 		struct tcphdr *tcp;
123145dac1d6SShreyas Bhatewara 	} hdr;
123245dac1d6SShreyas Bhatewara 	BUG_ON(gdesc->rcd.tcp == 0);
123345dac1d6SShreyas Bhatewara 
123445dac1d6SShreyas Bhatewara 	maplen = skb_headlen(skb);
123545dac1d6SShreyas Bhatewara 	if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
123645dac1d6SShreyas Bhatewara 		return 0;
123745dac1d6SShreyas Bhatewara 
123865ec0bd1SRonak Doshi 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
123965ec0bd1SRonak Doshi 	    skb->protocol == cpu_to_be16(ETH_P_8021AD))
124065ec0bd1SRonak Doshi 		hlen = sizeof(struct vlan_ethhdr);
124165ec0bd1SRonak Doshi 	else
124265ec0bd1SRonak Doshi 		hlen = sizeof(struct ethhdr);
124365ec0bd1SRonak Doshi 
124445dac1d6SShreyas Bhatewara 	hdr.eth = eth_hdr(skb);
124545dac1d6SShreyas Bhatewara 	if (gdesc->rcd.v4) {
124665ec0bd1SRonak Doshi 		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
124765ec0bd1SRonak Doshi 		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
124865ec0bd1SRonak Doshi 		hdr.ptr += hlen;
124945dac1d6SShreyas Bhatewara 		BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
125045dac1d6SShreyas Bhatewara 		hlen = hdr.ipv4->ihl << 2;
125145dac1d6SShreyas Bhatewara 		hdr.ptr += hdr.ipv4->ihl << 2;
125245dac1d6SShreyas Bhatewara 	} else if (gdesc->rcd.v6) {
125365ec0bd1SRonak Doshi 		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
125465ec0bd1SRonak Doshi 		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
125565ec0bd1SRonak Doshi 		hdr.ptr += hlen;
125645dac1d6SShreyas Bhatewara 		/* Use an estimated value, since we also need to handle
125745dac1d6SShreyas Bhatewara 		 * TSO case.
125845dac1d6SShreyas Bhatewara 		 */
125945dac1d6SShreyas Bhatewara 		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
126045dac1d6SShreyas Bhatewara 			return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
126145dac1d6SShreyas Bhatewara 		hlen = sizeof(struct ipv6hdr);
126245dac1d6SShreyas Bhatewara 		hdr.ptr += sizeof(struct ipv6hdr);
126345dac1d6SShreyas Bhatewara 	} else {
126445dac1d6SShreyas Bhatewara 		/* Non-IP pkt, dont estimate header length */
126545dac1d6SShreyas Bhatewara 		return 0;
126645dac1d6SShreyas Bhatewara 	}
126745dac1d6SShreyas Bhatewara 
126845dac1d6SShreyas Bhatewara 	if (hlen + sizeof(struct tcphdr) > maplen)
126945dac1d6SShreyas Bhatewara 		return 0;
127045dac1d6SShreyas Bhatewara 
127145dac1d6SShreyas Bhatewara 	return (hlen + (hdr.tcp->doff << 2));
127245dac1d6SShreyas Bhatewara }
127345dac1d6SShreyas Bhatewara 
1274d1a890faSShreyas Bhatewara static int
1275d1a890faSShreyas Bhatewara vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1276d1a890faSShreyas Bhatewara 		       struct vmxnet3_adapter *adapter, int quota)
1277d1a890faSShreyas Bhatewara {
1278215faf9cSJoe Perches 	static const u32 rxprod_reg[2] = {
1279215faf9cSJoe Perches 		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1280215faf9cSJoe Perches 	};
12810769636cSNeil Horman 	u32 num_pkts = 0;
12825318d809SShreyas Bhatewara 	bool skip_page_frags = false;
1283d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxCompDesc *rcd;
1284d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
128545dac1d6SShreyas Bhatewara 	u16 segCnt = 0, mss = 0;
1286115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1287115924b6SShreyas Bhatewara 	struct Vmxnet3_RxDesc rxCmdDesc;
1288115924b6SShreyas Bhatewara 	struct Vmxnet3_RxCompDesc rxComp;
1289115924b6SShreyas Bhatewara #endif
1290115924b6SShreyas Bhatewara 	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1291115924b6SShreyas Bhatewara 			  &rxComp);
1292d1a890faSShreyas Bhatewara 	while (rcd->gen == rq->comp_ring.gen) {
1293d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
12945318d809SShreyas Bhatewara 		struct sk_buff *skb, *new_skb = NULL;
12955318d809SShreyas Bhatewara 		struct page *new_page = NULL;
12965738a09dSAlexey Khoroshilov 		dma_addr_t new_dma_addr;
1297d1a890faSShreyas Bhatewara 		int num_to_alloc;
1298d1a890faSShreyas Bhatewara 		struct Vmxnet3_RxDesc *rxd;
1299d1a890faSShreyas Bhatewara 		u32 idx, ring_idx;
13005318d809SShreyas Bhatewara 		struct vmxnet3_cmd_ring	*ring = NULL;
13010769636cSNeil Horman 		if (num_pkts >= quota) {
1302d1a890faSShreyas Bhatewara 			/* we may stop even before we see the EOP desc of
1303d1a890faSShreyas Bhatewara 			 * the current pkt
1304d1a890faSShreyas Bhatewara 			 */
1305d1a890faSShreyas Bhatewara 			break;
1306d1a890faSShreyas Bhatewara 		}
1307f3002c13Shpreg@vmware.com 
1308f3002c13Shpreg@vmware.com 		/* Prevent any rcd field from being (speculatively) read before
1309f3002c13Shpreg@vmware.com 		 * rcd->gen is read.
1310f3002c13Shpreg@vmware.com 		 */
1311f3002c13Shpreg@vmware.com 		dma_rmb();
1312f3002c13Shpreg@vmware.com 
131350a5ce3eSShrikrishna Khare 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
131450a5ce3eSShrikrishna Khare 		       rcd->rqID != rq->dataRingQid);
1315d1a890faSShreyas Bhatewara 		idx = rcd->rxdIdx;
131650a5ce3eSShrikrishna Khare 		ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
13175318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
1318115924b6SShreyas Bhatewara 		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1319115924b6SShreyas Bhatewara 				  &rxCmdDesc);
1320d1a890faSShreyas Bhatewara 		rbi = rq->buf_info[ring_idx] + idx;
1321d1a890faSShreyas Bhatewara 
1322115924b6SShreyas Bhatewara 		BUG_ON(rxd->addr != rbi->dma_addr ||
1323115924b6SShreyas Bhatewara 		       rxd->len != rbi->len);
1324d1a890faSShreyas Bhatewara 
1325d1a890faSShreyas Bhatewara 		if (unlikely(rcd->eop && rcd->err)) {
1326d1a890faSShreyas Bhatewara 			vmxnet3_rx_error(rq, rcd, ctx, adapter);
1327d1a890faSShreyas Bhatewara 			goto rcd_done;
1328d1a890faSShreyas Bhatewara 		}
1329d1a890faSShreyas Bhatewara 
1330d1a890faSShreyas Bhatewara 		if (rcd->sop) { /* first buf of the pkt */
133150a5ce3eSShrikrishna Khare 			bool rxDataRingUsed;
133250a5ce3eSShrikrishna Khare 			u16 len;
133350a5ce3eSShrikrishna Khare 
1334d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
133550a5ce3eSShrikrishna Khare 			       (rcd->rqID != rq->qid &&
133650a5ce3eSShrikrishna Khare 				rcd->rqID != rq->dataRingQid));
1337d1a890faSShreyas Bhatewara 
1338d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1339d1a890faSShreyas Bhatewara 			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1340d1a890faSShreyas Bhatewara 
1341d1a890faSShreyas Bhatewara 			if (unlikely(rcd->len == 0)) {
1342d1a890faSShreyas Bhatewara 				/* Pretend the rx buffer is skipped. */
1343d1a890faSShreyas Bhatewara 				BUG_ON(!(rcd->sop && rcd->eop));
1344fdcd79b9SStephen Hemminger 				netdev_dbg(adapter->netdev,
1345f6965582SRandy Dunlap 					"rxRing[%u][%u] 0 length\n",
1346d1a890faSShreyas Bhatewara 					ring_idx, idx);
1347d1a890faSShreyas Bhatewara 				goto rcd_done;
1348d1a890faSShreyas Bhatewara 			}
1349d1a890faSShreyas Bhatewara 
13505318d809SShreyas Bhatewara 			skip_page_frags = false;
1351d1a890faSShreyas Bhatewara 			ctx->skb = rbi->skb;
135250a5ce3eSShrikrishna Khare 
135350a5ce3eSShrikrishna Khare 			rxDataRingUsed =
135450a5ce3eSShrikrishna Khare 				VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
135550a5ce3eSShrikrishna Khare 			len = rxDataRingUsed ? rcd->len : rbi->len;
13560d735f13SStephen Hemminger 			new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
135750a5ce3eSShrikrishna Khare 							    len);
13585318d809SShreyas Bhatewara 			if (new_skb == NULL) {
13595318d809SShreyas Bhatewara 				/* Skb allocation failed, do not handover this
13605318d809SShreyas Bhatewara 				 * skb to stack. Reuse it. Drop the existing pkt
13615318d809SShreyas Bhatewara 				 */
13625318d809SShreyas Bhatewara 				rq->stats.rx_buf_alloc_failure++;
13635318d809SShreyas Bhatewara 				ctx->skb = NULL;
13645318d809SShreyas Bhatewara 				rq->stats.drop_total++;
13655318d809SShreyas Bhatewara 				skip_page_frags = true;
13665318d809SShreyas Bhatewara 				goto rcd_done;
13675318d809SShreyas Bhatewara 			}
136850a5ce3eSShrikrishna Khare 
136950a5ce3eSShrikrishna Khare 			if (rxDataRingUsed) {
137050a5ce3eSShrikrishna Khare 				size_t sz;
137150a5ce3eSShrikrishna Khare 
137250a5ce3eSShrikrishna Khare 				BUG_ON(rcd->len > rq->data_ring.desc_size);
137350a5ce3eSShrikrishna Khare 
137450a5ce3eSShrikrishna Khare 				ctx->skb = new_skb;
137550a5ce3eSShrikrishna Khare 				sz = rcd->rxdIdx * rq->data_ring.desc_size;
137650a5ce3eSShrikrishna Khare 				memcpy(new_skb->data,
137750a5ce3eSShrikrishna Khare 				       &rq->data_ring.base[sz], rcd->len);
137850a5ce3eSShrikrishna Khare 			} else {
137950a5ce3eSShrikrishna Khare 				ctx->skb = rbi->skb;
138050a5ce3eSShrikrishna Khare 
138150a5ce3eSShrikrishna Khare 				new_dma_addr =
138250a5ce3eSShrikrishna Khare 					dma_map_single(&adapter->pdev->dev,
13835738a09dSAlexey Khoroshilov 						       new_skb->data, rbi->len,
13845738a09dSAlexey Khoroshilov 						       PCI_DMA_FROMDEVICE);
13855738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
13865738a09dSAlexey Khoroshilov 						      new_dma_addr)) {
13875738a09dSAlexey Khoroshilov 					dev_kfree_skb(new_skb);
138850a5ce3eSShrikrishna Khare 					/* Skb allocation failed, do not
138950a5ce3eSShrikrishna Khare 					 * handover this skb to stack. Reuse
139050a5ce3eSShrikrishna Khare 					 * it. Drop the existing pkt.
13915738a09dSAlexey Khoroshilov 					 */
13925738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
13935738a09dSAlexey Khoroshilov 					ctx->skb = NULL;
13945738a09dSAlexey Khoroshilov 					rq->stats.drop_total++;
13955738a09dSAlexey Khoroshilov 					skip_page_frags = true;
13965738a09dSAlexey Khoroshilov 					goto rcd_done;
13975738a09dSAlexey Khoroshilov 				}
1398d1a890faSShreyas Bhatewara 
139950a5ce3eSShrikrishna Khare 				dma_unmap_single(&adapter->pdev->dev,
140050a5ce3eSShrikrishna Khare 						 rbi->dma_addr,
1401b0eb57cbSAndy King 						 rbi->len,
1402d1a890faSShreyas Bhatewara 						 PCI_DMA_FROMDEVICE);
1403d1a890faSShreyas Bhatewara 
140450a5ce3eSShrikrishna Khare 				/* Immediate refill */
140550a5ce3eSShrikrishna Khare 				rbi->skb = new_skb;
140650a5ce3eSShrikrishna Khare 				rbi->dma_addr = new_dma_addr;
140750a5ce3eSShrikrishna Khare 				rxd->addr = cpu_to_le64(rbi->dma_addr);
140850a5ce3eSShrikrishna Khare 				rxd->len = rbi->len;
140950a5ce3eSShrikrishna Khare 			}
141050a5ce3eSShrikrishna Khare 
14117db11f75SStephen Hemminger #ifdef VMXNET3_RSS
14127db11f75SStephen Hemminger 			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
14137db11f75SStephen Hemminger 			    (adapter->netdev->features & NETIF_F_RXHASH))
14142c15a154SMichal Schmidt 				skb_set_hash(ctx->skb,
14152c15a154SMichal Schmidt 					     le32_to_cpu(rcd->rssHash),
14160b680703STom Herbert 					     PKT_HASH_TYPE_L3);
14177db11f75SStephen Hemminger #endif
1418d1a890faSShreyas Bhatewara 			skb_put(ctx->skb, rcd->len);
14195318d809SShreyas Bhatewara 
1420190af10fSShrikrishna Khare 			if (VMXNET3_VERSION_GE_2(adapter) &&
142145dac1d6SShreyas Bhatewara 			    rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
142245dac1d6SShreyas Bhatewara 				struct Vmxnet3_RxCompDescExt *rcdlro;
142345dac1d6SShreyas Bhatewara 				rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
14245318d809SShreyas Bhatewara 
142545dac1d6SShreyas Bhatewara 				segCnt = rcdlro->segCnt;
142650219538SShrikrishna Khare 				WARN_ON_ONCE(segCnt == 0);
142745dac1d6SShreyas Bhatewara 				mss = rcdlro->mss;
142845dac1d6SShreyas Bhatewara 				if (unlikely(segCnt <= 1))
142945dac1d6SShreyas Bhatewara 					segCnt = 0;
143045dac1d6SShreyas Bhatewara 			} else {
143145dac1d6SShreyas Bhatewara 				segCnt = 0;
143245dac1d6SShreyas Bhatewara 			}
1433d1a890faSShreyas Bhatewara 		} else {
14345318d809SShreyas Bhatewara 			BUG_ON(ctx->skb == NULL && !skip_page_frags);
14355318d809SShreyas Bhatewara 
1436d1a890faSShreyas Bhatewara 			/* non SOP buffer must be type 1 in most cases */
14375318d809SShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1438d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1439d1a890faSShreyas Bhatewara 
14405318d809SShreyas Bhatewara 			/* If an sop buffer was dropped, skip all
14415318d809SShreyas Bhatewara 			 * following non-sop fragments. They will be reused.
14425318d809SShreyas Bhatewara 			 */
14435318d809SShreyas Bhatewara 			if (skip_page_frags)
14445318d809SShreyas Bhatewara 				goto rcd_done;
14455318d809SShreyas Bhatewara 
1446c41fcce9SShreyas Bhatewara 			if (rcd->len) {
14475318d809SShreyas Bhatewara 				new_page = alloc_page(GFP_ATOMIC);
14485318d809SShreyas Bhatewara 				/* Replacement page frag could not be allocated.
14495318d809SShreyas Bhatewara 				 * Reuse this page. Drop the pkt and free the
14505318d809SShreyas Bhatewara 				 * skb which contained this page as a frag. Skip
14515318d809SShreyas Bhatewara 				 * processing all the following non-sop frags.
14525318d809SShreyas Bhatewara 				 */
1453c41fcce9SShreyas Bhatewara 				if (unlikely(!new_page)) {
14545318d809SShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
14555318d809SShreyas Bhatewara 					dev_kfree_skb(ctx->skb);
14565318d809SShreyas Bhatewara 					ctx->skb = NULL;
14575318d809SShreyas Bhatewara 					skip_page_frags = true;
14585318d809SShreyas Bhatewara 					goto rcd_done;
14595318d809SShreyas Bhatewara 				}
146058caf637SShrikrishna Khare 				new_dma_addr = dma_map_page(&adapter->pdev->dev,
146158caf637SShrikrishna Khare 							    new_page,
14625738a09dSAlexey Khoroshilov 							    0, PAGE_SIZE,
14635738a09dSAlexey Khoroshilov 							    PCI_DMA_FROMDEVICE);
14645738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
14655738a09dSAlexey Khoroshilov 						      new_dma_addr)) {
14665738a09dSAlexey Khoroshilov 					put_page(new_page);
14675738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
14685738a09dSAlexey Khoroshilov 					dev_kfree_skb(ctx->skb);
14695738a09dSAlexey Khoroshilov 					ctx->skb = NULL;
14705738a09dSAlexey Khoroshilov 					skip_page_frags = true;
14715738a09dSAlexey Khoroshilov 					goto rcd_done;
14725738a09dSAlexey Khoroshilov 				}
14735318d809SShreyas Bhatewara 
1474b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev,
1475d1a890faSShreyas Bhatewara 					       rbi->dma_addr, rbi->len,
1476d1a890faSShreyas Bhatewara 					       PCI_DMA_FROMDEVICE);
1477d1a890faSShreyas Bhatewara 
1478d1a890faSShreyas Bhatewara 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
14795318d809SShreyas Bhatewara 
14805318d809SShreyas Bhatewara 				/* Immediate refill */
14815318d809SShreyas Bhatewara 				rbi->page = new_page;
14825738a09dSAlexey Khoroshilov 				rbi->dma_addr = new_dma_addr;
14835318d809SShreyas Bhatewara 				rxd->addr = cpu_to_le64(rbi->dma_addr);
14845318d809SShreyas Bhatewara 				rxd->len = rbi->len;
1485d1a890faSShreyas Bhatewara 			}
1486c41fcce9SShreyas Bhatewara 		}
14875318d809SShreyas Bhatewara 
1488d1a890faSShreyas Bhatewara 
1489d1a890faSShreyas Bhatewara 		skb = ctx->skb;
1490d1a890faSShreyas Bhatewara 		if (rcd->eop) {
149145dac1d6SShreyas Bhatewara 			u32 mtu = adapter->netdev->mtu;
1492d1a890faSShreyas Bhatewara 			skb->len += skb->data_len;
1493d1a890faSShreyas Bhatewara 
1494d1a890faSShreyas Bhatewara 			vmxnet3_rx_csum(adapter, skb,
1495d1a890faSShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
1496d1a890faSShreyas Bhatewara 			skb->protocol = eth_type_trans(skb, adapter->netdev);
1497034f4057SRonak Doshi 			if (!rcd->tcp ||
1498034f4057SRonak Doshi 			    !(adapter->netdev->features & NETIF_F_LRO))
149945dac1d6SShreyas Bhatewara 				goto not_lro;
1500d1a890faSShreyas Bhatewara 
150145dac1d6SShreyas Bhatewara 			if (segCnt != 0 && mss != 0) {
150245dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_type = rcd->v4 ?
150345dac1d6SShreyas Bhatewara 					SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
150445dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_size = mss;
150545dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_segs = segCnt;
150645dac1d6SShreyas Bhatewara 			} else if (segCnt != 0 || skb->len > mtu) {
150745dac1d6SShreyas Bhatewara 				u32 hlen;
150845dac1d6SShreyas Bhatewara 
150945dac1d6SShreyas Bhatewara 				hlen = vmxnet3_get_hdr_len(adapter, skb,
151045dac1d6SShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
151145dac1d6SShreyas Bhatewara 				if (hlen == 0)
151245dac1d6SShreyas Bhatewara 					goto not_lro;
151345dac1d6SShreyas Bhatewara 
151445dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_type =
151545dac1d6SShreyas Bhatewara 					rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
151645dac1d6SShreyas Bhatewara 				if (segCnt != 0) {
151745dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_segs = segCnt;
151845dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_size =
151945dac1d6SShreyas Bhatewara 						DIV_ROUND_UP(skb->len -
152045dac1d6SShreyas Bhatewara 							hlen, segCnt);
152145dac1d6SShreyas Bhatewara 				} else {
152245dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_size = mtu - hlen;
152345dac1d6SShreyas Bhatewara 				}
152445dac1d6SShreyas Bhatewara 			}
152545dac1d6SShreyas Bhatewara not_lro:
152672e85c45SJesse Gross 			if (unlikely(rcd->ts))
152786a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
152872e85c45SJesse Gross 
1529213ade8cSJesse Gross 			if (adapter->netdev->features & NETIF_F_LRO)
1530d1a890faSShreyas Bhatewara 				netif_receive_skb(skb);
1531213ade8cSJesse Gross 			else
1532213ade8cSJesse Gross 				napi_gro_receive(&rq->napi, skb);
1533d1a890faSShreyas Bhatewara 
1534d1a890faSShreyas Bhatewara 			ctx->skb = NULL;
15350769636cSNeil Horman 			num_pkts++;
1536d1a890faSShreyas Bhatewara 		}
1537d1a890faSShreyas Bhatewara 
1538d1a890faSShreyas Bhatewara rcd_done:
15395318d809SShreyas Bhatewara 		/* device may have skipped some rx descs */
15405318d809SShreyas Bhatewara 		ring->next2comp = idx;
15415318d809SShreyas Bhatewara 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
15425318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
1543f3002c13Shpreg@vmware.com 
1544f3002c13Shpreg@vmware.com 		/* Ensure that the writes to rxd->gen bits will be observed
1545f3002c13Shpreg@vmware.com 		 * after all other writes to rxd objects.
1546f3002c13Shpreg@vmware.com 		 */
1547f3002c13Shpreg@vmware.com 		dma_wmb();
1548f3002c13Shpreg@vmware.com 
15495318d809SShreyas Bhatewara 		while (num_to_alloc) {
15505318d809SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
15515318d809SShreyas Bhatewara 					  &rxCmdDesc);
15525318d809SShreyas Bhatewara 			BUG_ON(!rxd->addr);
1553d1a890faSShreyas Bhatewara 
15545318d809SShreyas Bhatewara 			/* Recv desc is ready to be used by the device */
15555318d809SShreyas Bhatewara 			rxd->gen = ring->gen;
15565318d809SShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(ring);
15575318d809SShreyas Bhatewara 			num_to_alloc--;
15585318d809SShreyas Bhatewara 		}
1559d1a890faSShreyas Bhatewara 
1560d1a890faSShreyas Bhatewara 		/* if needed, update the register */
1561d1a890faSShreyas Bhatewara 		if (unlikely(rq->shared->updateRxProd)) {
1562d1a890faSShreyas Bhatewara 			VMXNET3_WRITE_BAR0_REG(adapter,
1563d1a890faSShreyas Bhatewara 					       rxprod_reg[ring_idx] + rq->qid * 8,
15645318d809SShreyas Bhatewara 					       ring->next2fill);
1565d1a890faSShreyas Bhatewara 		}
1566d1a890faSShreyas Bhatewara 
1567d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1568115924b6SShreyas Bhatewara 		vmxnet3_getRxComp(rcd,
1569115924b6SShreyas Bhatewara 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1570d1a890faSShreyas Bhatewara 	}
1571d1a890faSShreyas Bhatewara 
15720769636cSNeil Horman 	return num_pkts;
1573d1a890faSShreyas Bhatewara }
1574d1a890faSShreyas Bhatewara 
1575d1a890faSShreyas Bhatewara 
1576d1a890faSShreyas Bhatewara static void
1577d1a890faSShreyas Bhatewara vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1578d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
1579d1a890faSShreyas Bhatewara {
1580d1a890faSShreyas Bhatewara 	u32 i, ring_idx;
1581d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxDesc *rxd;
1582d1a890faSShreyas Bhatewara 
1583d1a890faSShreyas Bhatewara 	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1584d1a890faSShreyas Bhatewara 		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1585115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1586115924b6SShreyas Bhatewara 			struct Vmxnet3_RxDesc rxDesc;
1587115924b6SShreyas Bhatewara #endif
1588115924b6SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd,
1589115924b6SShreyas Bhatewara 				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1590d1a890faSShreyas Bhatewara 
1591d1a890faSShreyas Bhatewara 			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1592d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].skb) {
1593b0eb57cbSAndy King 				dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1594d1a890faSShreyas Bhatewara 						 rxd->len, PCI_DMA_FROMDEVICE);
1595d1a890faSShreyas Bhatewara 				dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1596d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].skb = NULL;
1597d1a890faSShreyas Bhatewara 			} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1598d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].page) {
1599b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1600d1a890faSShreyas Bhatewara 					       rxd->len, PCI_DMA_FROMDEVICE);
1601d1a890faSShreyas Bhatewara 				put_page(rq->buf_info[ring_idx][i].page);
1602d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].page = NULL;
1603d1a890faSShreyas Bhatewara 			}
1604d1a890faSShreyas Bhatewara 		}
1605d1a890faSShreyas Bhatewara 
1606d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1607d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].next2fill =
1608d1a890faSShreyas Bhatewara 					rq->rx_ring[ring_idx].next2comp = 0;
1609d1a890faSShreyas Bhatewara 	}
1610d1a890faSShreyas Bhatewara 
1611d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1612d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1613d1a890faSShreyas Bhatewara }
1614d1a890faSShreyas Bhatewara 
1615d1a890faSShreyas Bhatewara 
161609c5088eSShreyas Bhatewara static void
161709c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
161809c5088eSShreyas Bhatewara {
161909c5088eSShreyas Bhatewara 	int i;
162009c5088eSShreyas Bhatewara 
162109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
162209c5088eSShreyas Bhatewara 		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
162309c5088eSShreyas Bhatewara }
162409c5088eSShreyas Bhatewara 
162509c5088eSShreyas Bhatewara 
1626280b74f7Sstephen hemminger static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1627d1a890faSShreyas Bhatewara 			       struct vmxnet3_adapter *adapter)
1628d1a890faSShreyas Bhatewara {
1629d1a890faSShreyas Bhatewara 	int i;
1630d1a890faSShreyas Bhatewara 	int j;
1631d1a890faSShreyas Bhatewara 
1632d1a890faSShreyas Bhatewara 	/* all rx buffers must have already been freed */
1633d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1634d1a890faSShreyas Bhatewara 		if (rq->buf_info[i]) {
1635d1a890faSShreyas Bhatewara 			for (j = 0; j < rq->rx_ring[i].size; j++)
1636d1a890faSShreyas Bhatewara 				BUG_ON(rq->buf_info[i][j].page != NULL);
1637d1a890faSShreyas Bhatewara 		}
1638d1a890faSShreyas Bhatewara 	}
1639d1a890faSShreyas Bhatewara 
1640d1a890faSShreyas Bhatewara 
1641d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1642d1a890faSShreyas Bhatewara 		if (rq->rx_ring[i].base) {
1643b0eb57cbSAndy King 			dma_free_coherent(&adapter->pdev->dev,
1644b0eb57cbSAndy King 					  rq->rx_ring[i].size
1645d1a890faSShreyas Bhatewara 					  * sizeof(struct Vmxnet3_RxDesc),
1646d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].base,
1647d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].basePA);
1648d1a890faSShreyas Bhatewara 			rq->rx_ring[i].base = NULL;
1649d1a890faSShreyas Bhatewara 		}
1650d1a890faSShreyas Bhatewara 	}
1651d1a890faSShreyas Bhatewara 
165250a5ce3eSShrikrishna Khare 	if (rq->data_ring.base) {
165350a5ce3eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
165450a5ce3eSShrikrishna Khare 				  rq->rx_ring[0].size * rq->data_ring.desc_size,
165550a5ce3eSShrikrishna Khare 				  rq->data_ring.base, rq->data_ring.basePA);
165650a5ce3eSShrikrishna Khare 		rq->data_ring.base = NULL;
165750a5ce3eSShrikrishna Khare 	}
165850a5ce3eSShrikrishna Khare 
1659d1a890faSShreyas Bhatewara 	if (rq->comp_ring.base) {
1660b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1661b0eb57cbSAndy King 				  * sizeof(struct Vmxnet3_RxCompDesc),
1662d1a890faSShreyas Bhatewara 				  rq->comp_ring.base, rq->comp_ring.basePA);
1663d1a890faSShreyas Bhatewara 		rq->comp_ring.base = NULL;
1664d1a890faSShreyas Bhatewara 	}
1665b0eb57cbSAndy King 
1666b0eb57cbSAndy King 	if (rq->buf_info[0]) {
1667b0eb57cbSAndy King 		size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1668b0eb57cbSAndy King 			(rq->rx_ring[0].size + rq->rx_ring[1].size);
1669b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1670b0eb57cbSAndy King 				  rq->buf_info_pa);
1671848b1598SNeil Horman 		rq->buf_info[0] = rq->buf_info[1] = NULL;
1672b0eb57cbSAndy King 	}
1673d1a890faSShreyas Bhatewara }
1674d1a890faSShreyas Bhatewara 
1675bb40aca7SWei Yongjun static void
167650a5ce3eSShrikrishna Khare vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
167750a5ce3eSShrikrishna Khare {
167850a5ce3eSShrikrishna Khare 	int i;
167950a5ce3eSShrikrishna Khare 
168050a5ce3eSShrikrishna Khare 	for (i = 0; i < adapter->num_rx_queues; i++) {
168150a5ce3eSShrikrishna Khare 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
168250a5ce3eSShrikrishna Khare 
168350a5ce3eSShrikrishna Khare 		if (rq->data_ring.base) {
168450a5ce3eSShrikrishna Khare 			dma_free_coherent(&adapter->pdev->dev,
168550a5ce3eSShrikrishna Khare 					  (rq->rx_ring[0].size *
168650a5ce3eSShrikrishna Khare 					  rq->data_ring.desc_size),
168750a5ce3eSShrikrishna Khare 					  rq->data_ring.base,
168850a5ce3eSShrikrishna Khare 					  rq->data_ring.basePA);
168950a5ce3eSShrikrishna Khare 			rq->data_ring.base = NULL;
169050a5ce3eSShrikrishna Khare 			rq->data_ring.desc_size = 0;
169150a5ce3eSShrikrishna Khare 		}
169250a5ce3eSShrikrishna Khare 	}
169350a5ce3eSShrikrishna Khare }
1694d1a890faSShreyas Bhatewara 
1695d1a890faSShreyas Bhatewara static int
1696d1a890faSShreyas Bhatewara vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1697d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter  *adapter)
1698d1a890faSShreyas Bhatewara {
1699d1a890faSShreyas Bhatewara 	int i;
1700d1a890faSShreyas Bhatewara 
1701d1a890faSShreyas Bhatewara 	/* initialize buf_info */
1702d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[0].size; i++) {
1703d1a890faSShreyas Bhatewara 
1704d1a890faSShreyas Bhatewara 		/* 1st buf for a pkt is skbuff */
1705d1a890faSShreyas Bhatewara 		if (i % adapter->rx_buf_per_pkt == 0) {
1706d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1707d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = adapter->skb_buf_size;
1708d1a890faSShreyas Bhatewara 		} else { /* subsequent bufs for a pkt is frag */
1709d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1710d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = PAGE_SIZE;
1711d1a890faSShreyas Bhatewara 		}
1712d1a890faSShreyas Bhatewara 	}
1713d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[1].size; i++) {
1714d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1715d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].len = PAGE_SIZE;
1716d1a890faSShreyas Bhatewara 	}
1717d1a890faSShreyas Bhatewara 
1718d1a890faSShreyas Bhatewara 	/* reset internal state and allocate buffers for both rings */
1719d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1720d1a890faSShreyas Bhatewara 		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1721d1a890faSShreyas Bhatewara 
1722d1a890faSShreyas Bhatewara 		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1723d1a890faSShreyas Bhatewara 		       sizeof(struct Vmxnet3_RxDesc));
1724d1a890faSShreyas Bhatewara 		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1725d1a890faSShreyas Bhatewara 	}
1726d1a890faSShreyas Bhatewara 	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1727d1a890faSShreyas Bhatewara 				    adapter) == 0) {
1728d1a890faSShreyas Bhatewara 		/* at least has 1 rx buffer for the 1st ring */
1729d1a890faSShreyas Bhatewara 		return -ENOMEM;
1730d1a890faSShreyas Bhatewara 	}
1731d1a890faSShreyas Bhatewara 	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1732d1a890faSShreyas Bhatewara 
1733d1a890faSShreyas Bhatewara 	/* reset the comp ring */
1734d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1735d1a890faSShreyas Bhatewara 	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1736d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_RxCompDesc));
1737d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1738d1a890faSShreyas Bhatewara 
1739d1a890faSShreyas Bhatewara 	/* reset rxctx */
1740d1a890faSShreyas Bhatewara 	rq->rx_ctx.skb = NULL;
1741d1a890faSShreyas Bhatewara 
1742d1a890faSShreyas Bhatewara 	/* stats are not reset */
1743d1a890faSShreyas Bhatewara 	return 0;
1744d1a890faSShreyas Bhatewara }
1745d1a890faSShreyas Bhatewara 
1746d1a890faSShreyas Bhatewara 
1747d1a890faSShreyas Bhatewara static int
174809c5088eSShreyas Bhatewara vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
174909c5088eSShreyas Bhatewara {
175009c5088eSShreyas Bhatewara 	int i, err = 0;
175109c5088eSShreyas Bhatewara 
175209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
175309c5088eSShreyas Bhatewara 		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
175409c5088eSShreyas Bhatewara 		if (unlikely(err)) {
175509c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev, "%s: failed to "
175609c5088eSShreyas Bhatewara 				"initialize rx queue%i\n",
175709c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
175809c5088eSShreyas Bhatewara 			break;
175909c5088eSShreyas Bhatewara 		}
176009c5088eSShreyas Bhatewara 	}
176109c5088eSShreyas Bhatewara 	return err;
176209c5088eSShreyas Bhatewara 
176309c5088eSShreyas Bhatewara }
176409c5088eSShreyas Bhatewara 
176509c5088eSShreyas Bhatewara 
176609c5088eSShreyas Bhatewara static int
1767d1a890faSShreyas Bhatewara vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1768d1a890faSShreyas Bhatewara {
1769d1a890faSShreyas Bhatewara 	int i;
1770d1a890faSShreyas Bhatewara 	size_t sz;
1771d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *bi;
1772d1a890faSShreyas Bhatewara 
1773d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1774d1a890faSShreyas Bhatewara 
1775d1a890faSShreyas Bhatewara 		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1776b0eb57cbSAndy King 		rq->rx_ring[i].base = dma_alloc_coherent(
1777b0eb57cbSAndy King 						&adapter->pdev->dev, sz,
1778b0eb57cbSAndy King 						&rq->rx_ring[i].basePA,
1779b0eb57cbSAndy King 						GFP_KERNEL);
1780d1a890faSShreyas Bhatewara 		if (!rq->rx_ring[i].base) {
1781204a6e65SStephen Hemminger 			netdev_err(adapter->netdev,
1782204a6e65SStephen Hemminger 				   "failed to allocate rx ring %d\n", i);
1783d1a890faSShreyas Bhatewara 			goto err;
1784d1a890faSShreyas Bhatewara 		}
1785d1a890faSShreyas Bhatewara 	}
1786d1a890faSShreyas Bhatewara 
178750a5ce3eSShrikrishna Khare 	if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
178850a5ce3eSShrikrishna Khare 		sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
178950a5ce3eSShrikrishna Khare 		rq->data_ring.base =
179050a5ce3eSShrikrishna Khare 			dma_alloc_coherent(&adapter->pdev->dev, sz,
179150a5ce3eSShrikrishna Khare 					   &rq->data_ring.basePA,
179250a5ce3eSShrikrishna Khare 					   GFP_KERNEL);
179350a5ce3eSShrikrishna Khare 		if (!rq->data_ring.base) {
179450a5ce3eSShrikrishna Khare 			netdev_err(adapter->netdev,
179550a5ce3eSShrikrishna Khare 				   "rx data ring will be disabled\n");
179650a5ce3eSShrikrishna Khare 			adapter->rxdataring_enabled = false;
179750a5ce3eSShrikrishna Khare 		}
179850a5ce3eSShrikrishna Khare 	} else {
179950a5ce3eSShrikrishna Khare 		rq->data_ring.base = NULL;
180050a5ce3eSShrikrishna Khare 		rq->data_ring.desc_size = 0;
180150a5ce3eSShrikrishna Khare 	}
180250a5ce3eSShrikrishna Khare 
1803d1a890faSShreyas Bhatewara 	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1804b0eb57cbSAndy King 	rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1805b0eb57cbSAndy King 						&rq->comp_ring.basePA,
1806b0eb57cbSAndy King 						GFP_KERNEL);
1807d1a890faSShreyas Bhatewara 	if (!rq->comp_ring.base) {
1808204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1809d1a890faSShreyas Bhatewara 		goto err;
1810d1a890faSShreyas Bhatewara 	}
1811d1a890faSShreyas Bhatewara 
1812d1a890faSShreyas Bhatewara 	sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1813d1a890faSShreyas Bhatewara 						   rq->rx_ring[1].size);
1814750afb08SLuis Chamberlain 	bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1815b0eb57cbSAndy King 				GFP_KERNEL);
1816e404decbSJoe Perches 	if (!bi)
1817d1a890faSShreyas Bhatewara 		goto err;
1818e404decbSJoe Perches 
1819d1a890faSShreyas Bhatewara 	rq->buf_info[0] = bi;
1820d1a890faSShreyas Bhatewara 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
1821d1a890faSShreyas Bhatewara 
1822d1a890faSShreyas Bhatewara 	return 0;
1823d1a890faSShreyas Bhatewara 
1824d1a890faSShreyas Bhatewara err:
1825d1a890faSShreyas Bhatewara 	vmxnet3_rq_destroy(rq, adapter);
1826d1a890faSShreyas Bhatewara 	return -ENOMEM;
1827d1a890faSShreyas Bhatewara }
1828d1a890faSShreyas Bhatewara 
1829d1a890faSShreyas Bhatewara 
1830d1a890faSShreyas Bhatewara static int
183109c5088eSShreyas Bhatewara vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
183209c5088eSShreyas Bhatewara {
183309c5088eSShreyas Bhatewara 	int i, err = 0;
183409c5088eSShreyas Bhatewara 
183550a5ce3eSShrikrishna Khare 	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
183650a5ce3eSShrikrishna Khare 
183709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
183809c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
183909c5088eSShreyas Bhatewara 		if (unlikely(err)) {
184009c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev,
184109c5088eSShreyas Bhatewara 				"%s: failed to create rx queue%i\n",
184209c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
184309c5088eSShreyas Bhatewara 			goto err_out;
184409c5088eSShreyas Bhatewara 		}
184509c5088eSShreyas Bhatewara 	}
184650a5ce3eSShrikrishna Khare 
184750a5ce3eSShrikrishna Khare 	if (!adapter->rxdataring_enabled)
184850a5ce3eSShrikrishna Khare 		vmxnet3_rq_destroy_all_rxdataring(adapter);
184950a5ce3eSShrikrishna Khare 
185009c5088eSShreyas Bhatewara 	return err;
185109c5088eSShreyas Bhatewara err_out:
185209c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
185309c5088eSShreyas Bhatewara 	return err;
185409c5088eSShreyas Bhatewara 
185509c5088eSShreyas Bhatewara }
185609c5088eSShreyas Bhatewara 
185709c5088eSShreyas Bhatewara /* Multiple queue aware polling function for tx and rx */
185809c5088eSShreyas Bhatewara 
185909c5088eSShreyas Bhatewara static int
1860d1a890faSShreyas Bhatewara vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1861d1a890faSShreyas Bhatewara {
186209c5088eSShreyas Bhatewara 	int rcd_done = 0, i;
1863d1a890faSShreyas Bhatewara 	if (unlikely(adapter->shared->ecr))
1864d1a890faSShreyas Bhatewara 		vmxnet3_process_events(adapter);
186509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
186609c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1867d1a890faSShreyas Bhatewara 
186809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
186909c5088eSShreyas Bhatewara 		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
187009c5088eSShreyas Bhatewara 						   adapter, budget);
187109c5088eSShreyas Bhatewara 	return rcd_done;
1872d1a890faSShreyas Bhatewara }
1873d1a890faSShreyas Bhatewara 
1874d1a890faSShreyas Bhatewara 
1875d1a890faSShreyas Bhatewara static int
1876d1a890faSShreyas Bhatewara vmxnet3_poll(struct napi_struct *napi, int budget)
1877d1a890faSShreyas Bhatewara {
187809c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
187909c5088eSShreyas Bhatewara 					  struct vmxnet3_rx_queue, napi);
1880d1a890faSShreyas Bhatewara 	int rxd_done;
1881d1a890faSShreyas Bhatewara 
188209c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1883d1a890faSShreyas Bhatewara 
1884d1a890faSShreyas Bhatewara 	if (rxd_done < budget) {
18856ad20165SEric Dumazet 		napi_complete_done(napi, rxd_done);
188609c5088eSShreyas Bhatewara 		vmxnet3_enable_all_intrs(rx_queue->adapter);
1887d1a890faSShreyas Bhatewara 	}
1888d1a890faSShreyas Bhatewara 	return rxd_done;
1889d1a890faSShreyas Bhatewara }
1890d1a890faSShreyas Bhatewara 
189109c5088eSShreyas Bhatewara /*
189209c5088eSShreyas Bhatewara  * NAPI polling function for MSI-X mode with multiple Rx queues
189309c5088eSShreyas Bhatewara  * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
189409c5088eSShreyas Bhatewara  */
189509c5088eSShreyas Bhatewara 
189609c5088eSShreyas Bhatewara static int
189709c5088eSShreyas Bhatewara vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
189809c5088eSShreyas Bhatewara {
189909c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = container_of(napi,
190009c5088eSShreyas Bhatewara 						struct vmxnet3_rx_queue, napi);
190109c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
190209c5088eSShreyas Bhatewara 	int rxd_done;
190309c5088eSShreyas Bhatewara 
190409c5088eSShreyas Bhatewara 	/* When sharing interrupt with corresponding tx queue, process
190509c5088eSShreyas Bhatewara 	 * tx completions in that queue as well
190609c5088eSShreyas Bhatewara 	 */
190709c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
190809c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq =
190909c5088eSShreyas Bhatewara 				&adapter->tx_queue[rq - adapter->rx_queue];
191009c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
191109c5088eSShreyas Bhatewara 	}
191209c5088eSShreyas Bhatewara 
191309c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
191409c5088eSShreyas Bhatewara 
191509c5088eSShreyas Bhatewara 	if (rxd_done < budget) {
19166ad20165SEric Dumazet 		napi_complete_done(napi, rxd_done);
191709c5088eSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
191809c5088eSShreyas Bhatewara 	}
191909c5088eSShreyas Bhatewara 	return rxd_done;
192009c5088eSShreyas Bhatewara }
192109c5088eSShreyas Bhatewara 
192209c5088eSShreyas Bhatewara 
192309c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
192409c5088eSShreyas Bhatewara 
192509c5088eSShreyas Bhatewara /*
192609c5088eSShreyas Bhatewara  * Handle completion interrupts on tx queues
192709c5088eSShreyas Bhatewara  * Returns whether or not the intr is handled
192809c5088eSShreyas Bhatewara  */
192909c5088eSShreyas Bhatewara 
193009c5088eSShreyas Bhatewara static irqreturn_t
193109c5088eSShreyas Bhatewara vmxnet3_msix_tx(int irq, void *data)
193209c5088eSShreyas Bhatewara {
193309c5088eSShreyas Bhatewara 	struct vmxnet3_tx_queue *tq = data;
193409c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = tq->adapter;
193509c5088eSShreyas Bhatewara 
193609c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
193709c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
193809c5088eSShreyas Bhatewara 
193909c5088eSShreyas Bhatewara 	/* Handle the case where only one irq is allocate for all tx queues */
194009c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
194109c5088eSShreyas Bhatewara 		int i;
194209c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
194309c5088eSShreyas Bhatewara 			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
194409c5088eSShreyas Bhatewara 			vmxnet3_tq_tx_complete(txq, adapter);
194509c5088eSShreyas Bhatewara 		}
194609c5088eSShreyas Bhatewara 	} else {
194709c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
194809c5088eSShreyas Bhatewara 	}
194909c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
195009c5088eSShreyas Bhatewara 
195109c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
195209c5088eSShreyas Bhatewara }
195309c5088eSShreyas Bhatewara 
195409c5088eSShreyas Bhatewara 
195509c5088eSShreyas Bhatewara /*
195609c5088eSShreyas Bhatewara  * Handle completion interrupts on rx queues. Returns whether or not the
195709c5088eSShreyas Bhatewara  * intr is handled
195809c5088eSShreyas Bhatewara  */
195909c5088eSShreyas Bhatewara 
196009c5088eSShreyas Bhatewara static irqreturn_t
196109c5088eSShreyas Bhatewara vmxnet3_msix_rx(int irq, void *data)
196209c5088eSShreyas Bhatewara {
196309c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = data;
196409c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
196509c5088eSShreyas Bhatewara 
196609c5088eSShreyas Bhatewara 	/* disable intr if needed */
196709c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
196809c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
196909c5088eSShreyas Bhatewara 	napi_schedule(&rq->napi);
197009c5088eSShreyas Bhatewara 
197109c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
197209c5088eSShreyas Bhatewara }
197309c5088eSShreyas Bhatewara 
197409c5088eSShreyas Bhatewara /*
197509c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
197609c5088eSShreyas Bhatewara  *
197709c5088eSShreyas Bhatewara  * vmxnet3_msix_event --
197809c5088eSShreyas Bhatewara  *
197909c5088eSShreyas Bhatewara  *    vmxnet3 msix event intr handler
198009c5088eSShreyas Bhatewara  *
198109c5088eSShreyas Bhatewara  * Result:
198209c5088eSShreyas Bhatewara  *    whether or not the intr is handled
198309c5088eSShreyas Bhatewara  *
198409c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
198509c5088eSShreyas Bhatewara  */
198609c5088eSShreyas Bhatewara 
198709c5088eSShreyas Bhatewara static irqreturn_t
198809c5088eSShreyas Bhatewara vmxnet3_msix_event(int irq, void *data)
198909c5088eSShreyas Bhatewara {
199009c5088eSShreyas Bhatewara 	struct net_device *dev = data;
199109c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
199209c5088eSShreyas Bhatewara 
199309c5088eSShreyas Bhatewara 	/* disable intr if needed */
199409c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
199509c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
199609c5088eSShreyas Bhatewara 
199709c5088eSShreyas Bhatewara 	if (adapter->shared->ecr)
199809c5088eSShreyas Bhatewara 		vmxnet3_process_events(adapter);
199909c5088eSShreyas Bhatewara 
200009c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
200109c5088eSShreyas Bhatewara 
200209c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
200309c5088eSShreyas Bhatewara }
200409c5088eSShreyas Bhatewara 
200509c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI  */
200609c5088eSShreyas Bhatewara 
2007d1a890faSShreyas Bhatewara 
2008d1a890faSShreyas Bhatewara /* Interrupt handler for vmxnet3  */
2009d1a890faSShreyas Bhatewara static irqreturn_t
2010d1a890faSShreyas Bhatewara vmxnet3_intr(int irq, void *dev_id)
2011d1a890faSShreyas Bhatewara {
2012d1a890faSShreyas Bhatewara 	struct net_device *dev = dev_id;
2013d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
2014d1a890faSShreyas Bhatewara 
201509c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_INTX) {
2016d1a890faSShreyas Bhatewara 		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2017d1a890faSShreyas Bhatewara 		if (unlikely(icr == 0))
2018d1a890faSShreyas Bhatewara 			/* not ours */
2019d1a890faSShreyas Bhatewara 			return IRQ_NONE;
2020d1a890faSShreyas Bhatewara 	}
2021d1a890faSShreyas Bhatewara 
2022d1a890faSShreyas Bhatewara 
2023d1a890faSShreyas Bhatewara 	/* disable intr if needed */
2024d1a890faSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
202509c5088eSShreyas Bhatewara 		vmxnet3_disable_all_intrs(adapter);
2026d1a890faSShreyas Bhatewara 
202709c5088eSShreyas Bhatewara 	napi_schedule(&adapter->rx_queue[0].napi);
2028d1a890faSShreyas Bhatewara 
2029d1a890faSShreyas Bhatewara 	return IRQ_HANDLED;
2030d1a890faSShreyas Bhatewara }
2031d1a890faSShreyas Bhatewara 
2032d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
2033d1a890faSShreyas Bhatewara 
2034d1a890faSShreyas Bhatewara /* netpoll callback. */
2035d1a890faSShreyas Bhatewara static void
2036d1a890faSShreyas Bhatewara vmxnet3_netpoll(struct net_device *netdev)
2037d1a890faSShreyas Bhatewara {
2038d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2039d1a890faSShreyas Bhatewara 
2040d25f06eaSNeil Horman 	switch (adapter->intr.type) {
20410a8d8c44SArnd Bergmann #ifdef CONFIG_PCI_MSI
20420a8d8c44SArnd Bergmann 	case VMXNET3_IT_MSIX: {
20430a8d8c44SArnd Bergmann 		int i;
2044d25f06eaSNeil Horman 		for (i = 0; i < adapter->num_rx_queues; i++)
2045d25f06eaSNeil Horman 			vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2046d25f06eaSNeil Horman 		break;
20470a8d8c44SArnd Bergmann 	}
20480a8d8c44SArnd Bergmann #endif
2049d25f06eaSNeil Horman 	case VMXNET3_IT_MSI:
2050d25f06eaSNeil Horman 	default:
2051d25f06eaSNeil Horman 		vmxnet3_intr(0, adapter->netdev);
2052d25f06eaSNeil Horman 		break;
2053d25f06eaSNeil Horman 	}
205409c5088eSShreyas Bhatewara 
2055d1a890faSShreyas Bhatewara }
205609c5088eSShreyas Bhatewara #endif	/* CONFIG_NET_POLL_CONTROLLER */
2057d1a890faSShreyas Bhatewara 
2058d1a890faSShreyas Bhatewara static int
2059d1a890faSShreyas Bhatewara vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2060d1a890faSShreyas Bhatewara {
206109c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
206209c5088eSShreyas Bhatewara 	int err = 0, i;
206309c5088eSShreyas Bhatewara 	int vector = 0;
2064d1a890faSShreyas Bhatewara 
20658f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
2066d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
206709c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
206809c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
206909c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
207009c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
207109c5088eSShreyas Bhatewara 				err = request_irq(
207209c5088eSShreyas Bhatewara 					      intr->msix_entries[vector].vector,
207309c5088eSShreyas Bhatewara 					      vmxnet3_msix_tx, 0,
207409c5088eSShreyas Bhatewara 					      adapter->tx_queue[i].name,
207509c5088eSShreyas Bhatewara 					      &adapter->tx_queue[i]);
207609c5088eSShreyas Bhatewara 			} else {
207709c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
207809c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
207909c5088eSShreyas Bhatewara 			}
208009c5088eSShreyas Bhatewara 			if (err) {
208109c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
208209c5088eSShreyas Bhatewara 					"Failed to request irq for MSIX, %s, "
208309c5088eSShreyas Bhatewara 					"error %d\n",
208409c5088eSShreyas Bhatewara 					adapter->tx_queue[i].name, err);
208509c5088eSShreyas Bhatewara 				return err;
208609c5088eSShreyas Bhatewara 			}
208709c5088eSShreyas Bhatewara 
208809c5088eSShreyas Bhatewara 			/* Handle the case where only 1 MSIx was allocated for
208909c5088eSShreyas Bhatewara 			 * all tx queues */
209009c5088eSShreyas Bhatewara 			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
209109c5088eSShreyas Bhatewara 				for (; i < adapter->num_tx_queues; i++)
209209c5088eSShreyas Bhatewara 					adapter->tx_queue[i].comp_ring.intr_idx
209309c5088eSShreyas Bhatewara 								= vector;
209409c5088eSShreyas Bhatewara 				vector++;
209509c5088eSShreyas Bhatewara 				break;
209609c5088eSShreyas Bhatewara 			} else {
209709c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx
209809c5088eSShreyas Bhatewara 								= vector++;
209909c5088eSShreyas Bhatewara 			}
210009c5088eSShreyas Bhatewara 		}
210109c5088eSShreyas Bhatewara 		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
210209c5088eSShreyas Bhatewara 			vector = 0;
210309c5088eSShreyas Bhatewara 
210409c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
210509c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
210609c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
210709c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
210809c5088eSShreyas Bhatewara 			else
210909c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
211009c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
211109c5088eSShreyas Bhatewara 			err = request_irq(intr->msix_entries[vector].vector,
211209c5088eSShreyas Bhatewara 					  vmxnet3_msix_rx, 0,
211309c5088eSShreyas Bhatewara 					  adapter->rx_queue[i].name,
211409c5088eSShreyas Bhatewara 					  &(adapter->rx_queue[i]));
211509c5088eSShreyas Bhatewara 			if (err) {
2116204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2117204a6e65SStephen Hemminger 					   "Failed to request irq for MSIX, "
2118204a6e65SStephen Hemminger 					   "%s, error %d\n",
211909c5088eSShreyas Bhatewara 					   adapter->rx_queue[i].name, err);
212009c5088eSShreyas Bhatewara 				return err;
212109c5088eSShreyas Bhatewara 			}
212209c5088eSShreyas Bhatewara 
212309c5088eSShreyas Bhatewara 			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
212409c5088eSShreyas Bhatewara 		}
212509c5088eSShreyas Bhatewara 
212609c5088eSShreyas Bhatewara 		sprintf(intr->event_msi_vector_name, "%s-event-%d",
212709c5088eSShreyas Bhatewara 			adapter->netdev->name, vector);
212809c5088eSShreyas Bhatewara 		err = request_irq(intr->msix_entries[vector].vector,
212909c5088eSShreyas Bhatewara 				  vmxnet3_msix_event, 0,
213009c5088eSShreyas Bhatewara 				  intr->event_msi_vector_name, adapter->netdev);
213109c5088eSShreyas Bhatewara 		intr->event_intr_idx = vector;
213209c5088eSShreyas Bhatewara 
213309c5088eSShreyas Bhatewara 	} else if (intr->type == VMXNET3_IT_MSI) {
213409c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
2135d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2136d1a890faSShreyas Bhatewara 				  adapter->netdev->name, adapter->netdev);
213709c5088eSShreyas Bhatewara 	} else {
2138115924b6SShreyas Bhatewara #endif
213909c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
2140d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2141d1a890faSShreyas Bhatewara 				  IRQF_SHARED, adapter->netdev->name,
2142d1a890faSShreyas Bhatewara 				  adapter->netdev);
214309c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
214409c5088eSShreyas Bhatewara 	}
214509c5088eSShreyas Bhatewara #endif
214609c5088eSShreyas Bhatewara 	intr->num_intrs = vector + 1;
214709c5088eSShreyas Bhatewara 	if (err) {
2148204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2149204a6e65SStephen Hemminger 			   "Failed to request irq (intr type:%d), error %d\n",
2150204a6e65SStephen Hemminger 			   intr->type, err);
215109c5088eSShreyas Bhatewara 	} else {
215209c5088eSShreyas Bhatewara 		/* Number of rx queues will not change after this */
215309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
215409c5088eSShreyas Bhatewara 			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
215509c5088eSShreyas Bhatewara 			rq->qid = i;
215609c5088eSShreyas Bhatewara 			rq->qid2 = i + adapter->num_rx_queues;
215750a5ce3eSShrikrishna Khare 			rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2158d1a890faSShreyas Bhatewara 		}
2159d1a890faSShreyas Bhatewara 
2160d1a890faSShreyas Bhatewara 		/* init our intr settings */
216109c5088eSShreyas Bhatewara 		for (i = 0; i < intr->num_intrs; i++)
216209c5088eSShreyas Bhatewara 			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
216309c5088eSShreyas Bhatewara 		if (adapter->intr.type != VMXNET3_IT_MSIX) {
2164d1a890faSShreyas Bhatewara 			adapter->intr.event_intr_idx = 0;
216509c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
216609c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx = 0;
216709c5088eSShreyas Bhatewara 			adapter->rx_queue[0].comp_ring.intr_idx = 0;
216809c5088eSShreyas Bhatewara 		}
2169d1a890faSShreyas Bhatewara 
2170204a6e65SStephen Hemminger 		netdev_info(adapter->netdev,
2171204a6e65SStephen Hemminger 			    "intr type %u, mode %u, %u vectors allocated\n",
2172204a6e65SStephen Hemminger 			    intr->type, intr->mask_mode, intr->num_intrs);
2173d1a890faSShreyas Bhatewara 	}
2174d1a890faSShreyas Bhatewara 
2175d1a890faSShreyas Bhatewara 	return err;
2176d1a890faSShreyas Bhatewara }
2177d1a890faSShreyas Bhatewara 
2178d1a890faSShreyas Bhatewara 
2179d1a890faSShreyas Bhatewara static void
2180d1a890faSShreyas Bhatewara vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2181d1a890faSShreyas Bhatewara {
218209c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
218309c5088eSShreyas Bhatewara 	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2184d1a890faSShreyas Bhatewara 
218509c5088eSShreyas Bhatewara 	switch (intr->type) {
21868f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
2187d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSIX:
2188d1a890faSShreyas Bhatewara 	{
218909c5088eSShreyas Bhatewara 		int i, vector = 0;
2190d1a890faSShreyas Bhatewara 
219109c5088eSShreyas Bhatewara 		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
219209c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++) {
219309c5088eSShreyas Bhatewara 				free_irq(intr->msix_entries[vector++].vector,
219409c5088eSShreyas Bhatewara 					 &(adapter->tx_queue[i]));
219509c5088eSShreyas Bhatewara 				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
219609c5088eSShreyas Bhatewara 					break;
219709c5088eSShreyas Bhatewara 			}
219809c5088eSShreyas Bhatewara 		}
219909c5088eSShreyas Bhatewara 
220009c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
220109c5088eSShreyas Bhatewara 			free_irq(intr->msix_entries[vector++].vector,
220209c5088eSShreyas Bhatewara 				 &(adapter->rx_queue[i]));
220309c5088eSShreyas Bhatewara 		}
220409c5088eSShreyas Bhatewara 
220509c5088eSShreyas Bhatewara 		free_irq(intr->msix_entries[vector].vector,
2206d1a890faSShreyas Bhatewara 			 adapter->netdev);
220709c5088eSShreyas Bhatewara 		BUG_ON(vector >= intr->num_intrs);
2208d1a890faSShreyas Bhatewara 		break;
2209d1a890faSShreyas Bhatewara 	}
22108f7e524cSRandy Dunlap #endif
2211d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSI:
2212d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
2213d1a890faSShreyas Bhatewara 		break;
2214d1a890faSShreyas Bhatewara 	case VMXNET3_IT_INTX:
2215d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
2216d1a890faSShreyas Bhatewara 		break;
2217d1a890faSShreyas Bhatewara 	default:
2218c068e777SSasha Levin 		BUG();
2219d1a890faSShreyas Bhatewara 	}
2220d1a890faSShreyas Bhatewara }
2221d1a890faSShreyas Bhatewara 
2222d1a890faSShreyas Bhatewara 
2223d1a890faSShreyas Bhatewara static void
2224d1a890faSShreyas Bhatewara vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2225d1a890faSShreyas Bhatewara {
2226d1a890faSShreyas Bhatewara 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
222772e85c45SJesse Gross 	u16 vid;
2228d1a890faSShreyas Bhatewara 
222972e85c45SJesse Gross 	/* allow untagged pkts */
2230d1a890faSShreyas Bhatewara 	VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
223172e85c45SJesse Gross 
223272e85c45SJesse Gross 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
223372e85c45SJesse Gross 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2234d1a890faSShreyas Bhatewara }
2235d1a890faSShreyas Bhatewara 
2236d1a890faSShreyas Bhatewara 
22378e586137SJiri Pirko static int
223880d5c368SPatrick McHardy vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2239d1a890faSShreyas Bhatewara {
2240d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2241f6957f88SJesse Gross 
2242f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
2243d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
224483d0feffSShreyas Bhatewara 		unsigned long flags;
2245d1a890faSShreyas Bhatewara 
2246d1a890faSShreyas Bhatewara 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
224783d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
2248d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2249d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
225083d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2251f6957f88SJesse Gross 	}
225272e85c45SJesse Gross 
225372e85c45SJesse Gross 	set_bit(vid, adapter->active_vlans);
22548e586137SJiri Pirko 
22558e586137SJiri Pirko 	return 0;
2256d1a890faSShreyas Bhatewara }
2257d1a890faSShreyas Bhatewara 
2258d1a890faSShreyas Bhatewara 
22598e586137SJiri Pirko static int
226080d5c368SPatrick McHardy vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2261d1a890faSShreyas Bhatewara {
2262d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2263f6957f88SJesse Gross 
2264f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
2265d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
226683d0feffSShreyas Bhatewara 		unsigned long flags;
2267d1a890faSShreyas Bhatewara 
2268d1a890faSShreyas Bhatewara 		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
226983d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
2270d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2271d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
227283d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2273f6957f88SJesse Gross 	}
227472e85c45SJesse Gross 
227572e85c45SJesse Gross 	clear_bit(vid, adapter->active_vlans);
22768e586137SJiri Pirko 
22778e586137SJiri Pirko 	return 0;
2278d1a890faSShreyas Bhatewara }
2279d1a890faSShreyas Bhatewara 
2280d1a890faSShreyas Bhatewara 
2281d1a890faSShreyas Bhatewara static u8 *
2282d1a890faSShreyas Bhatewara vmxnet3_copy_mc(struct net_device *netdev)
2283d1a890faSShreyas Bhatewara {
2284d1a890faSShreyas Bhatewara 	u8 *buf = NULL;
22854cd24eafSJiri Pirko 	u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2286d1a890faSShreyas Bhatewara 
2287d1a890faSShreyas Bhatewara 	/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2288d1a890faSShreyas Bhatewara 	if (sz <= 0xffff) {
2289d1a890faSShreyas Bhatewara 		/* We may be called with BH disabled */
2290d1a890faSShreyas Bhatewara 		buf = kmalloc(sz, GFP_ATOMIC);
2291d1a890faSShreyas Bhatewara 		if (buf) {
229222bedad3SJiri Pirko 			struct netdev_hw_addr *ha;
2293567ec874SJiri Pirko 			int i = 0;
2294d1a890faSShreyas Bhatewara 
229522bedad3SJiri Pirko 			netdev_for_each_mc_addr(ha, netdev)
229622bedad3SJiri Pirko 				memcpy(buf + i++ * ETH_ALEN, ha->addr,
2297d1a890faSShreyas Bhatewara 				       ETH_ALEN);
2298d1a890faSShreyas Bhatewara 		}
2299d1a890faSShreyas Bhatewara 	}
2300d1a890faSShreyas Bhatewara 	return buf;
2301d1a890faSShreyas Bhatewara }
2302d1a890faSShreyas Bhatewara 
2303d1a890faSShreyas Bhatewara 
2304d1a890faSShreyas Bhatewara static void
2305d1a890faSShreyas Bhatewara vmxnet3_set_mc(struct net_device *netdev)
2306d1a890faSShreyas Bhatewara {
2307d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
230883d0feffSShreyas Bhatewara 	unsigned long flags;
2309d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxFilterConf *rxConf =
2310d1a890faSShreyas Bhatewara 					&adapter->shared->devRead.rxFilterConf;
2311d1a890faSShreyas Bhatewara 	u8 *new_table = NULL;
2312b0eb57cbSAndy King 	dma_addr_t new_table_pa = 0;
2313fb5c6cfaSAlexey Khoroshilov 	bool new_table_pa_valid = false;
2314d1a890faSShreyas Bhatewara 	u32 new_mode = VMXNET3_RXM_UCAST;
2315d1a890faSShreyas Bhatewara 
231672e85c45SJesse Gross 	if (netdev->flags & IFF_PROMISC) {
231772e85c45SJesse Gross 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
231872e85c45SJesse Gross 		memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
231972e85c45SJesse Gross 
2320d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_PROMISC;
232172e85c45SJesse Gross 	} else {
232272e85c45SJesse Gross 		vmxnet3_restore_vlan(adapter);
232372e85c45SJesse Gross 	}
2324d1a890faSShreyas Bhatewara 
2325d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_BROADCAST)
2326d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_BCAST;
2327d1a890faSShreyas Bhatewara 
2328d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_ALLMULTI)
2329d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_ALL_MULTI;
2330d1a890faSShreyas Bhatewara 	else
23314cd24eafSJiri Pirko 		if (!netdev_mc_empty(netdev)) {
2332d1a890faSShreyas Bhatewara 			new_table = vmxnet3_copy_mc(netdev);
2333d1a890faSShreyas Bhatewara 			if (new_table) {
2334d37d5ec8SShrikrishna Khare 				size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2335d37d5ec8SShrikrishna Khare 
2336d37d5ec8SShrikrishna Khare 				rxConf->mfTableLen = cpu_to_le16(sz);
2337b0eb57cbSAndy King 				new_table_pa = dma_map_single(
2338b0eb57cbSAndy King 							&adapter->pdev->dev,
2339b0eb57cbSAndy King 							new_table,
2340d37d5ec8SShrikrishna Khare 							sz,
2341b0eb57cbSAndy King 							PCI_DMA_TODEVICE);
23425738a09dSAlexey Khoroshilov 				if (!dma_mapping_error(&adapter->pdev->dev,
23435738a09dSAlexey Khoroshilov 						       new_table_pa)) {
23444ad9a64fSAndy King 					new_mode |= VMXNET3_RXM_MCAST;
2345fb5c6cfaSAlexey Khoroshilov 					new_table_pa_valid = true;
2346fb5c6cfaSAlexey Khoroshilov 					rxConf->mfTablePA = cpu_to_le64(
2347fb5c6cfaSAlexey Khoroshilov 								new_table_pa);
2348fb5c6cfaSAlexey Khoroshilov 				}
2349fb5c6cfaSAlexey Khoroshilov 			}
2350fb5c6cfaSAlexey Khoroshilov 			if (!new_table_pa_valid) {
23514ad9a64fSAndy King 				netdev_info(netdev,
23524ad9a64fSAndy King 					    "failed to copy mcast list, setting ALL_MULTI\n");
2353d1a890faSShreyas Bhatewara 				new_mode |= VMXNET3_RXM_ALL_MULTI;
2354d1a890faSShreyas Bhatewara 			}
2355d1a890faSShreyas Bhatewara 		}
2356d1a890faSShreyas Bhatewara 
2357d1a890faSShreyas Bhatewara 	if (!(new_mode & VMXNET3_RXM_MCAST)) {
2358d1a890faSShreyas Bhatewara 		rxConf->mfTableLen = 0;
2359d1a890faSShreyas Bhatewara 		rxConf->mfTablePA = 0;
2360d1a890faSShreyas Bhatewara 	}
2361d1a890faSShreyas Bhatewara 
236283d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2363d1a890faSShreyas Bhatewara 	if (new_mode != rxConf->rxMode) {
2364115924b6SShreyas Bhatewara 		rxConf->rxMode = cpu_to_le32(new_mode);
2365d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2366d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_RX_MODE);
236772e85c45SJesse Gross 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
236872e85c45SJesse Gross 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2369d1a890faSShreyas Bhatewara 	}
2370d1a890faSShreyas Bhatewara 
2371d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2372d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
237383d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2374d1a890faSShreyas Bhatewara 
2375fb5c6cfaSAlexey Khoroshilov 	if (new_table_pa_valid)
2376b0eb57cbSAndy King 		dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2377b0eb57cbSAndy King 				 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2378d1a890faSShreyas Bhatewara 	kfree(new_table);
2379d1a890faSShreyas Bhatewara }
2380d1a890faSShreyas Bhatewara 
238109c5088eSShreyas Bhatewara void
238209c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
238309c5088eSShreyas Bhatewara {
238409c5088eSShreyas Bhatewara 	int i;
238509c5088eSShreyas Bhatewara 
238609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
238709c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
238809c5088eSShreyas Bhatewara }
238909c5088eSShreyas Bhatewara 
2390d1a890faSShreyas Bhatewara 
2391d1a890faSShreyas Bhatewara /*
2392d1a890faSShreyas Bhatewara  *   Set up driver_shared based on settings in adapter.
2393d1a890faSShreyas Bhatewara  */
2394d1a890faSShreyas Bhatewara 
2395d1a890faSShreyas Bhatewara static void
2396d1a890faSShreyas Bhatewara vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2397d1a890faSShreyas Bhatewara {
2398d1a890faSShreyas Bhatewara 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2399d1a890faSShreyas Bhatewara 	struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2400d1a890faSShreyas Bhatewara 	struct Vmxnet3_TxQueueConf *tqc;
2401d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxQueueConf *rqc;
2402d1a890faSShreyas Bhatewara 	int i;
2403d1a890faSShreyas Bhatewara 
2404d1a890faSShreyas Bhatewara 	memset(shared, 0, sizeof(*shared));
2405d1a890faSShreyas Bhatewara 
2406d1a890faSShreyas Bhatewara 	/* driver settings */
2407115924b6SShreyas Bhatewara 	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2408115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.version = cpu_to_le32(
2409115924b6SShreyas Bhatewara 						VMXNET3_DRIVER_VERSION_NUM);
2410d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2411d1a890faSShreyas Bhatewara 				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2412d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2413115924b6SShreyas Bhatewara 	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2414115924b6SShreyas Bhatewara 				*((u32 *)&devRead->misc.driverInfo.gos));
2415115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2416115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2417d1a890faSShreyas Bhatewara 
2418b0eb57cbSAndy King 	devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2419115924b6SShreyas Bhatewara 	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2420d1a890faSShreyas Bhatewara 
2421d1a890faSShreyas Bhatewara 	/* set up feature flags */
2422a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_RXCSUM)
24233843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2424d1a890faSShreyas Bhatewara 
2425a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_LRO) {
24263843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_LRO;
2427115924b6SShreyas Bhatewara 		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2428d1a890faSShreyas Bhatewara 	}
2429f646968fSPatrick McHardy 	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
24303843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2431d1a890faSShreyas Bhatewara 
2432115924b6SShreyas Bhatewara 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2433115924b6SShreyas Bhatewara 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2434115924b6SShreyas Bhatewara 	devRead->misc.queueDescLen = cpu_to_le32(
243509c5088eSShreyas Bhatewara 		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
243609c5088eSShreyas Bhatewara 		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2437d1a890faSShreyas Bhatewara 
2438d1a890faSShreyas Bhatewara 	/* tx queue settings */
243909c5088eSShreyas Bhatewara 	devRead->misc.numTxQueues =  adapter->num_tx_queues;
244009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
244109c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
244209c5088eSShreyas Bhatewara 		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
244309c5088eSShreyas Bhatewara 		tqc = &adapter->tqd_start[i].conf;
244409c5088eSShreyas Bhatewara 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
244509c5088eSShreyas Bhatewara 		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
244609c5088eSShreyas Bhatewara 		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2447b0eb57cbSAndy King 		tqc->ddPA           = cpu_to_le64(tq->buf_info_pa);
244809c5088eSShreyas Bhatewara 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
244909c5088eSShreyas Bhatewara 		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
24503c8b3efcSShrikrishna Khare 		tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
245109c5088eSShreyas Bhatewara 		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
245209c5088eSShreyas Bhatewara 		tqc->ddLen          = cpu_to_le32(
245309c5088eSShreyas Bhatewara 					sizeof(struct vmxnet3_tx_buf_info) *
2454115924b6SShreyas Bhatewara 					tqc->txRingSize);
245509c5088eSShreyas Bhatewara 		tqc->intrIdx        = tq->comp_ring.intr_idx;
245609c5088eSShreyas Bhatewara 	}
2457d1a890faSShreyas Bhatewara 
2458d1a890faSShreyas Bhatewara 	/* rx queue settings */
245909c5088eSShreyas Bhatewara 	devRead->misc.numRxQueues = adapter->num_rx_queues;
246009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
246109c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
246209c5088eSShreyas Bhatewara 		rqc = &adapter->rqd_start[i].conf;
246309c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
246409c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
246509c5088eSShreyas Bhatewara 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2466b0eb57cbSAndy King 		rqc->ddPA            = cpu_to_le64(rq->buf_info_pa);
246709c5088eSShreyas Bhatewara 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
246809c5088eSShreyas Bhatewara 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
246909c5088eSShreyas Bhatewara 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
247009c5088eSShreyas Bhatewara 		rqc->ddLen           = cpu_to_le32(
247109c5088eSShreyas Bhatewara 					sizeof(struct vmxnet3_rx_buf_info) *
247209c5088eSShreyas Bhatewara 					(rqc->rxRingSize[0] +
247309c5088eSShreyas Bhatewara 					 rqc->rxRingSize[1]));
247409c5088eSShreyas Bhatewara 		rqc->intrIdx         = rq->comp_ring.intr_idx;
247550a5ce3eSShrikrishna Khare 		if (VMXNET3_VERSION_GE_3(adapter)) {
247650a5ce3eSShrikrishna Khare 			rqc->rxDataRingBasePA =
247750a5ce3eSShrikrishna Khare 				cpu_to_le64(rq->data_ring.basePA);
247850a5ce3eSShrikrishna Khare 			rqc->rxDataRingDescSize =
247950a5ce3eSShrikrishna Khare 				cpu_to_le16(rq->data_ring.desc_size);
248050a5ce3eSShrikrishna Khare 		}
248109c5088eSShreyas Bhatewara 	}
248209c5088eSShreyas Bhatewara 
248309c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
248409c5088eSShreyas Bhatewara 	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
248509c5088eSShreyas Bhatewara 
248609c5088eSShreyas Bhatewara 	if (adapter->rss) {
248709c5088eSShreyas Bhatewara 		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
248866d35910SStephen Hemminger 
248909c5088eSShreyas Bhatewara 		devRead->misc.uptFeatures |= UPT1_F_RSS;
249009c5088eSShreyas Bhatewara 		devRead->misc.numRxQueues = adapter->num_rx_queues;
249109c5088eSShreyas Bhatewara 		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
249209c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV4 |
249309c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
249409c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV6;
249509c5088eSShreyas Bhatewara 		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
249609c5088eSShreyas Bhatewara 		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
249709c5088eSShreyas Bhatewara 		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
24986bf79cddSEric Dumazet 		netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
249966d35910SStephen Hemminger 
250009c5088eSShreyas Bhatewara 		for (i = 0; i < rssConf->indTableSize; i++)
2501278bc429SBen Hutchings 			rssConf->indTable[i] = ethtool_rxfh_indir_default(
2502278bc429SBen Hutchings 				i, adapter->num_rx_queues);
250309c5088eSShreyas Bhatewara 
250409c5088eSShreyas Bhatewara 		devRead->rssConfDesc.confVer = 1;
2505b0eb57cbSAndy King 		devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2506b0eb57cbSAndy King 		devRead->rssConfDesc.confPA =
2507b0eb57cbSAndy King 			cpu_to_le64(adapter->rss_conf_pa);
250809c5088eSShreyas Bhatewara 	}
250909c5088eSShreyas Bhatewara 
251009c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
2511d1a890faSShreyas Bhatewara 
2512d1a890faSShreyas Bhatewara 	/* intr settings */
2513d1a890faSShreyas Bhatewara 	devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2514d1a890faSShreyas Bhatewara 				     VMXNET3_IMM_AUTO;
2515d1a890faSShreyas Bhatewara 	devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2516d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
2517d1a890faSShreyas Bhatewara 		devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2518d1a890faSShreyas Bhatewara 
2519d1a890faSShreyas Bhatewara 	devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
25206929fe8aSRonghua Zang 	devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2521d1a890faSShreyas Bhatewara 
2522d1a890faSShreyas Bhatewara 	/* rx filter settings */
2523d1a890faSShreyas Bhatewara 	devRead->rxFilterConf.rxMode = 0;
2524d1a890faSShreyas Bhatewara 	vmxnet3_restore_vlan(adapter);
2525f9f25026SShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2526f9f25026SShreyas Bhatewara 
2527d1a890faSShreyas Bhatewara 	/* the rest are already zeroed */
2528d1a890faSShreyas Bhatewara }
2529d1a890faSShreyas Bhatewara 
25304edef40eSShrikrishna Khare static void
25314edef40eSShrikrishna Khare vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
25324edef40eSShrikrishna Khare {
25334edef40eSShrikrishna Khare 	struct Vmxnet3_DriverShared *shared = adapter->shared;
25344edef40eSShrikrishna Khare 	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
25354edef40eSShrikrishna Khare 	unsigned long flags;
25364edef40eSShrikrishna Khare 
25374edef40eSShrikrishna Khare 	if (!VMXNET3_VERSION_GE_3(adapter))
25384edef40eSShrikrishna Khare 		return;
25394edef40eSShrikrishna Khare 
25404edef40eSShrikrishna Khare 	spin_lock_irqsave(&adapter->cmd_lock, flags);
25414edef40eSShrikrishna Khare 	cmdInfo->varConf.confVer = 1;
25424edef40eSShrikrishna Khare 	cmdInfo->varConf.confLen =
25434edef40eSShrikrishna Khare 		cpu_to_le32(sizeof(*adapter->coal_conf));
25444edef40eSShrikrishna Khare 	cmdInfo->varConf.confPA  = cpu_to_le64(adapter->coal_conf_pa);
25454edef40eSShrikrishna Khare 
25464edef40eSShrikrishna Khare 	if (adapter->default_coal_mode) {
25474edef40eSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
25484edef40eSShrikrishna Khare 				       VMXNET3_CMD_GET_COALESCE);
25494edef40eSShrikrishna Khare 	} else {
25504edef40eSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
25514edef40eSShrikrishna Khare 				       VMXNET3_CMD_SET_COALESCE);
25524edef40eSShrikrishna Khare 	}
25534edef40eSShrikrishna Khare 
25544edef40eSShrikrishna Khare 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
25554edef40eSShrikrishna Khare }
2556d1a890faSShreyas Bhatewara 
2557d3a8a9e5SRonak Doshi static void
2558d3a8a9e5SRonak Doshi vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2559d3a8a9e5SRonak Doshi {
2560d3a8a9e5SRonak Doshi 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2561d3a8a9e5SRonak Doshi 	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2562d3a8a9e5SRonak Doshi 	unsigned long flags;
2563d3a8a9e5SRonak Doshi 
2564d3a8a9e5SRonak Doshi 		if (!VMXNET3_VERSION_GE_4(adapter))
2565d3a8a9e5SRonak Doshi 			return;
2566d3a8a9e5SRonak Doshi 
2567d3a8a9e5SRonak Doshi 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2568d3a8a9e5SRonak Doshi 
2569d3a8a9e5SRonak Doshi 	if (adapter->default_rss_fields) {
2570d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2571d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_GET_RSS_FIELDS);
2572d3a8a9e5SRonak Doshi 		adapter->rss_fields =
2573d3a8a9e5SRonak Doshi 			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2574d3a8a9e5SRonak Doshi 	} else {
2575d3a8a9e5SRonak Doshi 		cmdInfo->setRssFields = adapter->rss_fields;
2576d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2577d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_SET_RSS_FIELDS);
2578d3a8a9e5SRonak Doshi 		/* Not all requested RSS may get applied, so get and
2579d3a8a9e5SRonak Doshi 		 * cache what was actually applied.
2580d3a8a9e5SRonak Doshi 		 */
2581d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2582d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_GET_RSS_FIELDS);
2583d3a8a9e5SRonak Doshi 		adapter->rss_fields =
2584d3a8a9e5SRonak Doshi 			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2585d3a8a9e5SRonak Doshi 	}
2586d3a8a9e5SRonak Doshi 
2587d3a8a9e5SRonak Doshi 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2588d3a8a9e5SRonak Doshi }
2589d3a8a9e5SRonak Doshi 
2590d1a890faSShreyas Bhatewara int
2591d1a890faSShreyas Bhatewara vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2592d1a890faSShreyas Bhatewara {
259309c5088eSShreyas Bhatewara 	int err, i;
2594d1a890faSShreyas Bhatewara 	u32 ret;
259583d0feffSShreyas Bhatewara 	unsigned long flags;
2596d1a890faSShreyas Bhatewara 
2597fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
259809c5088eSShreyas Bhatewara 		" ring sizes %u %u %u\n", adapter->netdev->name,
259909c5088eSShreyas Bhatewara 		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
260009c5088eSShreyas Bhatewara 		adapter->tx_queue[0].tx_ring.size,
260109c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[0].size,
260209c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[1].size);
2603d1a890faSShreyas Bhatewara 
260409c5088eSShreyas Bhatewara 	vmxnet3_tq_init_all(adapter);
260509c5088eSShreyas Bhatewara 	err = vmxnet3_rq_init_all(adapter);
2606d1a890faSShreyas Bhatewara 	if (err) {
2607204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2608204a6e65SStephen Hemminger 			   "Failed to init rx queue error %d\n", err);
2609d1a890faSShreyas Bhatewara 		goto rq_err;
2610d1a890faSShreyas Bhatewara 	}
2611d1a890faSShreyas Bhatewara 
2612d1a890faSShreyas Bhatewara 	err = vmxnet3_request_irqs(adapter);
2613d1a890faSShreyas Bhatewara 	if (err) {
2614204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2615204a6e65SStephen Hemminger 			   "Failed to setup irq for error %d\n", err);
2616d1a890faSShreyas Bhatewara 		goto irq_err;
2617d1a890faSShreyas Bhatewara 	}
2618d1a890faSShreyas Bhatewara 
2619d1a890faSShreyas Bhatewara 	vmxnet3_setup_driver_shared(adapter);
2620d1a890faSShreyas Bhatewara 
2621115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2622115924b6SShreyas Bhatewara 			       adapter->shared_pa));
2623115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2624115924b6SShreyas Bhatewara 			       adapter->shared_pa));
262583d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2626d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2627d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_ACTIVATE_DEV);
2628d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
262983d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2630d1a890faSShreyas Bhatewara 
2631d1a890faSShreyas Bhatewara 	if (ret != 0) {
2632204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2633204a6e65SStephen Hemminger 			   "Failed to activate dev: error %u\n", ret);
2634d1a890faSShreyas Bhatewara 		err = -EINVAL;
2635d1a890faSShreyas Bhatewara 		goto activate_err;
2636d1a890faSShreyas Bhatewara 	}
263709c5088eSShreyas Bhatewara 
26384edef40eSShrikrishna Khare 	vmxnet3_init_coalesce(adapter);
2639d3a8a9e5SRonak Doshi 	vmxnet3_init_rssfields(adapter);
26404edef40eSShrikrishna Khare 
264109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
264209c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
264309c5088eSShreyas Bhatewara 				VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
264409c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[0].next2fill);
264509c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
264609c5088eSShreyas Bhatewara 				(i * VMXNET3_REG_ALIGN)),
264709c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[1].next2fill);
264809c5088eSShreyas Bhatewara 	}
2649d1a890faSShreyas Bhatewara 
2650d1a890faSShreyas Bhatewara 	/* Apply the rx filter settins last. */
2651d1a890faSShreyas Bhatewara 	vmxnet3_set_mc(adapter->netdev);
2652d1a890faSShreyas Bhatewara 
2653d1a890faSShreyas Bhatewara 	/*
2654d1a890faSShreyas Bhatewara 	 * Check link state when first activating device. It will start the
2655d1a890faSShreyas Bhatewara 	 * tx queue if the link is up.
2656d1a890faSShreyas Bhatewara 	 */
26574a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, true);
265809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
265909c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
2660d1a890faSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
2661d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2662d1a890faSShreyas Bhatewara 	return 0;
2663d1a890faSShreyas Bhatewara 
2664d1a890faSShreyas Bhatewara activate_err:
2665d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2666d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2667d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2668d1a890faSShreyas Bhatewara irq_err:
2669d1a890faSShreyas Bhatewara rq_err:
2670d1a890faSShreyas Bhatewara 	/* free up buffers we allocated */
267109c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2672d1a890faSShreyas Bhatewara 	return err;
2673d1a890faSShreyas Bhatewara }
2674d1a890faSShreyas Bhatewara 
2675d1a890faSShreyas Bhatewara 
2676d1a890faSShreyas Bhatewara void
2677d1a890faSShreyas Bhatewara vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2678d1a890faSShreyas Bhatewara {
267983d0feffSShreyas Bhatewara 	unsigned long flags;
268083d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2681d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
268283d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2683d1a890faSShreyas Bhatewara }
2684d1a890faSShreyas Bhatewara 
2685d1a890faSShreyas Bhatewara 
2686d1a890faSShreyas Bhatewara int
2687d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2688d1a890faSShreyas Bhatewara {
268909c5088eSShreyas Bhatewara 	int i;
269083d0feffSShreyas Bhatewara 	unsigned long flags;
2691d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2692d1a890faSShreyas Bhatewara 		return 0;
2693d1a890faSShreyas Bhatewara 
2694d1a890faSShreyas Bhatewara 
269583d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2696d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2697d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
269883d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2699d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
2700d1a890faSShreyas Bhatewara 
270109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
270209c5088eSShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
2703d1a890faSShreyas Bhatewara 	netif_tx_disable(adapter->netdev);
2704d1a890faSShreyas Bhatewara 	adapter->link_speed = 0;
2705d1a890faSShreyas Bhatewara 	netif_carrier_off(adapter->netdev);
2706d1a890faSShreyas Bhatewara 
270709c5088eSShreyas Bhatewara 	vmxnet3_tq_cleanup_all(adapter);
270809c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2709d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2710d1a890faSShreyas Bhatewara 	return 0;
2711d1a890faSShreyas Bhatewara }
2712d1a890faSShreyas Bhatewara 
2713d1a890faSShreyas Bhatewara 
2714d1a890faSShreyas Bhatewara static void
2715d1a890faSShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2716d1a890faSShreyas Bhatewara {
2717d1a890faSShreyas Bhatewara 	u32 tmp;
2718d1a890faSShreyas Bhatewara 
2719d1a890faSShreyas Bhatewara 	tmp = *(u32 *)mac;
2720d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2721d1a890faSShreyas Bhatewara 
2722d1a890faSShreyas Bhatewara 	tmp = (mac[5] << 8) | mac[4];
2723d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2724d1a890faSShreyas Bhatewara }
2725d1a890faSShreyas Bhatewara 
2726d1a890faSShreyas Bhatewara 
2727d1a890faSShreyas Bhatewara static int
2728d1a890faSShreyas Bhatewara vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2729d1a890faSShreyas Bhatewara {
2730d1a890faSShreyas Bhatewara 	struct sockaddr *addr = p;
2731d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2732d1a890faSShreyas Bhatewara 
2733d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2734d1a890faSShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, addr->sa_data);
2735d1a890faSShreyas Bhatewara 
2736d1a890faSShreyas Bhatewara 	return 0;
2737d1a890faSShreyas Bhatewara }
2738d1a890faSShreyas Bhatewara 
2739d1a890faSShreyas Bhatewara 
2740d1a890faSShreyas Bhatewara /* ==================== initialization and cleanup routines ============ */
2741d1a890faSShreyas Bhatewara 
2742d1a890faSShreyas Bhatewara static int
274361aeeceaShpreg@vmware.com vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2744d1a890faSShreyas Bhatewara {
2745d1a890faSShreyas Bhatewara 	int err;
2746d1a890faSShreyas Bhatewara 	unsigned long mmio_start, mmio_len;
2747d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = adapter->pdev;
2748d1a890faSShreyas Bhatewara 
2749d1a890faSShreyas Bhatewara 	err = pci_enable_device(pdev);
2750d1a890faSShreyas Bhatewara 	if (err) {
2751204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2752d1a890faSShreyas Bhatewara 		return err;
2753d1a890faSShreyas Bhatewara 	}
2754d1a890faSShreyas Bhatewara 
2755d1a890faSShreyas Bhatewara 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2756d1a890faSShreyas Bhatewara 					   vmxnet3_driver_name);
2757d1a890faSShreyas Bhatewara 	if (err) {
2758204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
2759204a6e65SStephen Hemminger 			"Failed to request region for adapter: error %d\n", err);
276061aeeceaShpreg@vmware.com 		goto err_enable_device;
2761d1a890faSShreyas Bhatewara 	}
2762d1a890faSShreyas Bhatewara 
2763d1a890faSShreyas Bhatewara 	pci_set_master(pdev);
2764d1a890faSShreyas Bhatewara 
2765d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 0);
2766d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 0);
2767d1a890faSShreyas Bhatewara 	adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2768d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr0) {
2769204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar0\n");
2770d1a890faSShreyas Bhatewara 		err = -EIO;
2771d1a890faSShreyas Bhatewara 		goto err_ioremap;
2772d1a890faSShreyas Bhatewara 	}
2773d1a890faSShreyas Bhatewara 
2774d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 1);
2775d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 1);
2776d1a890faSShreyas Bhatewara 	adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2777d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr1) {
2778204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar1\n");
2779d1a890faSShreyas Bhatewara 		err = -EIO;
2780d1a890faSShreyas Bhatewara 		goto err_bar1;
2781d1a890faSShreyas Bhatewara 	}
2782d1a890faSShreyas Bhatewara 	return 0;
2783d1a890faSShreyas Bhatewara 
2784d1a890faSShreyas Bhatewara err_bar1:
2785d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2786d1a890faSShreyas Bhatewara err_ioremap:
2787d1a890faSShreyas Bhatewara 	pci_release_selected_regions(pdev, (1 << 2) - 1);
278861aeeceaShpreg@vmware.com err_enable_device:
2789d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
2790d1a890faSShreyas Bhatewara 	return err;
2791d1a890faSShreyas Bhatewara }
2792d1a890faSShreyas Bhatewara 
2793d1a890faSShreyas Bhatewara 
2794d1a890faSShreyas Bhatewara static void
2795d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2796d1a890faSShreyas Bhatewara {
2797d1a890faSShreyas Bhatewara 	BUG_ON(!adapter->pdev);
2798d1a890faSShreyas Bhatewara 
2799d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2800d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr1);
2801d1a890faSShreyas Bhatewara 	pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2802d1a890faSShreyas Bhatewara 	pci_disable_device(adapter->pdev);
2803d1a890faSShreyas Bhatewara }
2804d1a890faSShreyas Bhatewara 
2805d1a890faSShreyas Bhatewara 
2806d1a890faSShreyas Bhatewara static void
2807d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2808d1a890faSShreyas Bhatewara {
280909c5088eSShreyas Bhatewara 	size_t sz, i, ring0_size, ring1_size, comp_size;
2810d1a890faSShreyas Bhatewara 	if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2811d1a890faSShreyas Bhatewara 				    VMXNET3_MAX_ETH_HDR_SIZE) {
2812d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = adapter->netdev->mtu +
2813d1a890faSShreyas Bhatewara 					VMXNET3_MAX_ETH_HDR_SIZE;
2814d1a890faSShreyas Bhatewara 		if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2815d1a890faSShreyas Bhatewara 			adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2816d1a890faSShreyas Bhatewara 
2817d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1;
2818d1a890faSShreyas Bhatewara 	} else {
2819d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2820d1a890faSShreyas Bhatewara 		sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2821d1a890faSShreyas Bhatewara 					    VMXNET3_MAX_ETH_HDR_SIZE;
2822d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2823d1a890faSShreyas Bhatewara 	}
2824d1a890faSShreyas Bhatewara 
2825d1a890faSShreyas Bhatewara 	/*
2826d1a890faSShreyas Bhatewara 	 * for simplicity, force the ring0 size to be a multiple of
2827d1a890faSShreyas Bhatewara 	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2828d1a890faSShreyas Bhatewara 	 */
2829d1a890faSShreyas Bhatewara 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
283009c5088eSShreyas Bhatewara 	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
283109c5088eSShreyas Bhatewara 	ring0_size = (ring0_size + sz - 1) / sz * sz;
2832a53255d3SShreyas Bhatewara 	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
283309c5088eSShreyas Bhatewara 			   sz * sz);
283409c5088eSShreyas Bhatewara 	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
283553831aa1SShrikrishna Khare 	ring1_size = (ring1_size + sz - 1) / sz * sz;
283653831aa1SShrikrishna Khare 	ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
283753831aa1SShrikrishna Khare 			   sz * sz);
283809c5088eSShreyas Bhatewara 	comp_size = ring0_size + ring1_size;
283909c5088eSShreyas Bhatewara 
284009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
28415e264e2bSColin Ian King 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
28425e264e2bSColin Ian King 
284309c5088eSShreyas Bhatewara 		rq->rx_ring[0].size = ring0_size;
284409c5088eSShreyas Bhatewara 		rq->rx_ring[1].size = ring1_size;
284509c5088eSShreyas Bhatewara 		rq->comp_ring.size = comp_size;
284609c5088eSShreyas Bhatewara 	}
2847d1a890faSShreyas Bhatewara }
2848d1a890faSShreyas Bhatewara 
2849d1a890faSShreyas Bhatewara 
2850d1a890faSShreyas Bhatewara int
2851d1a890faSShreyas Bhatewara vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
28523c8b3efcSShrikrishna Khare 		      u32 rx_ring_size, u32 rx_ring2_size,
285350a5ce3eSShrikrishna Khare 		      u16 txdata_desc_size, u16 rxdata_desc_size)
2854d1a890faSShreyas Bhatewara {
285509c5088eSShreyas Bhatewara 	int err = 0, i;
2856d1a890faSShreyas Bhatewara 
285709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
285809c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
285909c5088eSShreyas Bhatewara 		tq->tx_ring.size   = tx_ring_size;
286009c5088eSShreyas Bhatewara 		tq->data_ring.size = tx_ring_size;
286109c5088eSShreyas Bhatewara 		tq->comp_ring.size = tx_ring_size;
28623c8b3efcSShrikrishna Khare 		tq->txdata_desc_size = txdata_desc_size;
286309c5088eSShreyas Bhatewara 		tq->shared = &adapter->tqd_start[i].ctrl;
286409c5088eSShreyas Bhatewara 		tq->stopped = true;
286509c5088eSShreyas Bhatewara 		tq->adapter = adapter;
286609c5088eSShreyas Bhatewara 		tq->qid = i;
286709c5088eSShreyas Bhatewara 		err = vmxnet3_tq_create(tq, adapter);
286809c5088eSShreyas Bhatewara 		/*
286909c5088eSShreyas Bhatewara 		 * Too late to change num_tx_queues. We cannot do away with
287009c5088eSShreyas Bhatewara 		 * lesser number of queues than what we asked for
287109c5088eSShreyas Bhatewara 		 */
2872d1a890faSShreyas Bhatewara 		if (err)
287309c5088eSShreyas Bhatewara 			goto queue_err;
287409c5088eSShreyas Bhatewara 	}
2875d1a890faSShreyas Bhatewara 
287609c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
287709c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2878d1a890faSShreyas Bhatewara 	vmxnet3_adjust_rx_ring_size(adapter);
287950a5ce3eSShrikrishna Khare 
288050a5ce3eSShrikrishna Khare 	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
288109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
288209c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
288309c5088eSShreyas Bhatewara 		/* qid and qid2 for rx queues will be assigned later when num
288409c5088eSShreyas Bhatewara 		 * of rx queues is finalized after allocating intrs */
288509c5088eSShreyas Bhatewara 		rq->shared = &adapter->rqd_start[i].ctrl;
288609c5088eSShreyas Bhatewara 		rq->adapter = adapter;
288750a5ce3eSShrikrishna Khare 		rq->data_ring.desc_size = rxdata_desc_size;
288809c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(rq, adapter);
288909c5088eSShreyas Bhatewara 		if (err) {
289009c5088eSShreyas Bhatewara 			if (i == 0) {
2891204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2892204a6e65SStephen Hemminger 					   "Could not allocate any rx queues. "
2893204a6e65SStephen Hemminger 					   "Aborting.\n");
289409c5088eSShreyas Bhatewara 				goto queue_err;
289509c5088eSShreyas Bhatewara 			} else {
2896204a6e65SStephen Hemminger 				netdev_info(adapter->netdev,
2897204a6e65SStephen Hemminger 					    "Number of rx queues changed "
289809c5088eSShreyas Bhatewara 					    "to : %d.\n", i);
289909c5088eSShreyas Bhatewara 				adapter->num_rx_queues = i;
290009c5088eSShreyas Bhatewara 				err = 0;
290109c5088eSShreyas Bhatewara 				break;
290209c5088eSShreyas Bhatewara 			}
290309c5088eSShreyas Bhatewara 		}
290409c5088eSShreyas Bhatewara 	}
290550a5ce3eSShrikrishna Khare 
290650a5ce3eSShrikrishna Khare 	if (!adapter->rxdataring_enabled)
290750a5ce3eSShrikrishna Khare 		vmxnet3_rq_destroy_all_rxdataring(adapter);
290850a5ce3eSShrikrishna Khare 
290909c5088eSShreyas Bhatewara 	return err;
291009c5088eSShreyas Bhatewara queue_err:
291109c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2912d1a890faSShreyas Bhatewara 	return err;
2913d1a890faSShreyas Bhatewara }
2914d1a890faSShreyas Bhatewara 
2915d1a890faSShreyas Bhatewara static int
2916d1a890faSShreyas Bhatewara vmxnet3_open(struct net_device *netdev)
2917d1a890faSShreyas Bhatewara {
2918d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
291909c5088eSShreyas Bhatewara 	int err, i;
2920d1a890faSShreyas Bhatewara 
2921d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
2922d1a890faSShreyas Bhatewara 
292309c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
292409c5088eSShreyas Bhatewara 		spin_lock_init(&adapter->tx_queue[i].tx_lock);
2925d1a890faSShreyas Bhatewara 
29263c8b3efcSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
29273c8b3efcSShrikrishna Khare 		unsigned long flags;
29283c8b3efcSShrikrishna Khare 		u16 txdata_desc_size;
29293c8b3efcSShrikrishna Khare 
29303c8b3efcSShrikrishna Khare 		spin_lock_irqsave(&adapter->cmd_lock, flags);
29313c8b3efcSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
29323c8b3efcSShrikrishna Khare 				       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
29333c8b3efcSShrikrishna Khare 		txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
29343c8b3efcSShrikrishna Khare 							 VMXNET3_REG_CMD);
29353c8b3efcSShrikrishna Khare 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
29363c8b3efcSShrikrishna Khare 
29373c8b3efcSShrikrishna Khare 		if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
29383c8b3efcSShrikrishna Khare 		    (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
29393c8b3efcSShrikrishna Khare 		    (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
29403c8b3efcSShrikrishna Khare 			adapter->txdata_desc_size =
29413c8b3efcSShrikrishna Khare 				sizeof(struct Vmxnet3_TxDataDesc);
29423c8b3efcSShrikrishna Khare 		} else {
29433c8b3efcSShrikrishna Khare 			adapter->txdata_desc_size = txdata_desc_size;
29443c8b3efcSShrikrishna Khare 		}
29453c8b3efcSShrikrishna Khare 	} else {
29463c8b3efcSShrikrishna Khare 		adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
29473c8b3efcSShrikrishna Khare 	}
29483c8b3efcSShrikrishna Khare 
29493c8b3efcSShrikrishna Khare 	err = vmxnet3_create_queues(adapter,
29503c8b3efcSShrikrishna Khare 				    adapter->tx_ring_size,
2951f00e2b0aSNeil Horman 				    adapter->rx_ring_size,
29523c8b3efcSShrikrishna Khare 				    adapter->rx_ring2_size,
295350a5ce3eSShrikrishna Khare 				    adapter->txdata_desc_size,
295450a5ce3eSShrikrishna Khare 				    adapter->rxdata_desc_size);
2955d1a890faSShreyas Bhatewara 	if (err)
2956d1a890faSShreyas Bhatewara 		goto queue_err;
2957d1a890faSShreyas Bhatewara 
2958d1a890faSShreyas Bhatewara 	err = vmxnet3_activate_dev(adapter);
2959d1a890faSShreyas Bhatewara 	if (err)
2960d1a890faSShreyas Bhatewara 		goto activate_err;
2961d1a890faSShreyas Bhatewara 
2962d1a890faSShreyas Bhatewara 	return 0;
2963d1a890faSShreyas Bhatewara 
2964d1a890faSShreyas Bhatewara activate_err:
296509c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
296609c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2967d1a890faSShreyas Bhatewara queue_err:
2968d1a890faSShreyas Bhatewara 	return err;
2969d1a890faSShreyas Bhatewara }
2970d1a890faSShreyas Bhatewara 
2971d1a890faSShreyas Bhatewara 
2972d1a890faSShreyas Bhatewara static int
2973d1a890faSShreyas Bhatewara vmxnet3_close(struct net_device *netdev)
2974d1a890faSShreyas Bhatewara {
2975d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2976d1a890faSShreyas Bhatewara 
2977d1a890faSShreyas Bhatewara 	/*
2978d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
2979d1a890faSShreyas Bhatewara 	 * completion.
2980d1a890faSShreyas Bhatewara 	 */
2981d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
298293c65d13SYueHaibing 		usleep_range(1000, 2000);
2983d1a890faSShreyas Bhatewara 
2984d1a890faSShreyas Bhatewara 	vmxnet3_quiesce_dev(adapter);
2985d1a890faSShreyas Bhatewara 
298609c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
298709c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2988d1a890faSShreyas Bhatewara 
2989d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2990d1a890faSShreyas Bhatewara 
2991d1a890faSShreyas Bhatewara 
2992d1a890faSShreyas Bhatewara 	return 0;
2993d1a890faSShreyas Bhatewara }
2994d1a890faSShreyas Bhatewara 
2995d1a890faSShreyas Bhatewara 
2996d1a890faSShreyas Bhatewara void
2997d1a890faSShreyas Bhatewara vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2998d1a890faSShreyas Bhatewara {
299909c5088eSShreyas Bhatewara 	int i;
300009c5088eSShreyas Bhatewara 
3001d1a890faSShreyas Bhatewara 	/*
3002d1a890faSShreyas Bhatewara 	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3003d1a890faSShreyas Bhatewara 	 * vmxnet3_close() will deadlock.
3004d1a890faSShreyas Bhatewara 	 */
3005d1a890faSShreyas Bhatewara 	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3006d1a890faSShreyas Bhatewara 
3007d1a890faSShreyas Bhatewara 	/* we need to enable NAPI, otherwise dev_close will deadlock */
300809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
300909c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
30101c4d5f51SNeil Horman 	/*
30111c4d5f51SNeil Horman 	 * Need to clear the quiesce bit to ensure that vmxnet3_close
30121c4d5f51SNeil Horman 	 * can quiesce the device properly
30131c4d5f51SNeil Horman 	 */
30141c4d5f51SNeil Horman 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3015d1a890faSShreyas Bhatewara 	dev_close(adapter->netdev);
3016d1a890faSShreyas Bhatewara }
3017d1a890faSShreyas Bhatewara 
3018d1a890faSShreyas Bhatewara 
3019d1a890faSShreyas Bhatewara static int
3020d1a890faSShreyas Bhatewara vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3021d1a890faSShreyas Bhatewara {
3022d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3023d1a890faSShreyas Bhatewara 	int err = 0;
3024d1a890faSShreyas Bhatewara 
3025d1a890faSShreyas Bhatewara 	netdev->mtu = new_mtu;
3026d1a890faSShreyas Bhatewara 
3027d1a890faSShreyas Bhatewara 	/*
3028d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
3029d1a890faSShreyas Bhatewara 	 * completion.
3030d1a890faSShreyas Bhatewara 	 */
3031d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
303293c65d13SYueHaibing 		usleep_range(1000, 2000);
3033d1a890faSShreyas Bhatewara 
3034d1a890faSShreyas Bhatewara 	if (netif_running(netdev)) {
3035d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
3036d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
3037d1a890faSShreyas Bhatewara 
3038d1a890faSShreyas Bhatewara 		/* we need to re-create the rx queue based on the new mtu */
303909c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy_all(adapter);
3040d1a890faSShreyas Bhatewara 		vmxnet3_adjust_rx_ring_size(adapter);
304109c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create_all(adapter);
3042d1a890faSShreyas Bhatewara 		if (err) {
3043204a6e65SStephen Hemminger 			netdev_err(netdev,
3044204a6e65SStephen Hemminger 				   "failed to re-create rx queues, "
3045204a6e65SStephen Hemminger 				   " error %d. Closing it.\n", err);
3046d1a890faSShreyas Bhatewara 			goto out;
3047d1a890faSShreyas Bhatewara 		}
3048d1a890faSShreyas Bhatewara 
3049d1a890faSShreyas Bhatewara 		err = vmxnet3_activate_dev(adapter);
3050d1a890faSShreyas Bhatewara 		if (err) {
3051204a6e65SStephen Hemminger 			netdev_err(netdev,
3052204a6e65SStephen Hemminger 				   "failed to re-activate, error %d. "
3053204a6e65SStephen Hemminger 				   "Closing it\n", err);
3054d1a890faSShreyas Bhatewara 			goto out;
3055d1a890faSShreyas Bhatewara 		}
3056d1a890faSShreyas Bhatewara 	}
3057d1a890faSShreyas Bhatewara 
3058d1a890faSShreyas Bhatewara out:
3059d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3060d1a890faSShreyas Bhatewara 	if (err)
3061d1a890faSShreyas Bhatewara 		vmxnet3_force_close(adapter);
3062d1a890faSShreyas Bhatewara 
3063d1a890faSShreyas Bhatewara 	return err;
3064d1a890faSShreyas Bhatewara }
3065d1a890faSShreyas Bhatewara 
3066d1a890faSShreyas Bhatewara 
3067d1a890faSShreyas Bhatewara static void
3068d1a890faSShreyas Bhatewara vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3069d1a890faSShreyas Bhatewara {
3070d1a890faSShreyas Bhatewara 	struct net_device *netdev = adapter->netdev;
3071d1a890faSShreyas Bhatewara 
3072a0d2730cSMichał Mirosław 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3073f646968fSPatrick McHardy 		NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3074f646968fSPatrick McHardy 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
307572e85c45SJesse Gross 		NETIF_F_LRO;
3076a0d2730cSMichał Mirosław 	if (dma64)
3077ebbf9295SShreyas Bhatewara 		netdev->hw_features |= NETIF_F_HIGHDMA;
307872e85c45SJesse Gross 	netdev->vlan_features = netdev->hw_features &
3079f646968fSPatrick McHardy 				~(NETIF_F_HW_VLAN_CTAG_TX |
3080f646968fSPatrick McHardy 				  NETIF_F_HW_VLAN_CTAG_RX);
3081f646968fSPatrick McHardy 	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3082d1a890faSShreyas Bhatewara }
3083d1a890faSShreyas Bhatewara 
3084d1a890faSShreyas Bhatewara 
3085d1a890faSShreyas Bhatewara static void
3086d1a890faSShreyas Bhatewara vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3087d1a890faSShreyas Bhatewara {
3088d1a890faSShreyas Bhatewara 	u32 tmp;
3089d1a890faSShreyas Bhatewara 
3090d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3091d1a890faSShreyas Bhatewara 	*(u32 *)mac = tmp;
3092d1a890faSShreyas Bhatewara 
3093d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3094d1a890faSShreyas Bhatewara 	mac[4] = tmp & 0xff;
3095d1a890faSShreyas Bhatewara 	mac[5] = (tmp >> 8) & 0xff;
3096d1a890faSShreyas Bhatewara }
3097d1a890faSShreyas Bhatewara 
309809c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
309909c5088eSShreyas Bhatewara 
310009c5088eSShreyas Bhatewara /*
310109c5088eSShreyas Bhatewara  * Enable MSIx vectors.
310209c5088eSShreyas Bhatewara  * Returns :
310325985edcSLucas De Marchi  *	VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3104b60b869dSAlexander Gordeev  *	 were enabled.
3105b60b869dSAlexander Gordeev  *	number of vectors which were enabled otherwise (this number is greater
310609c5088eSShreyas Bhatewara  *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
310709c5088eSShreyas Bhatewara  */
310809c5088eSShreyas Bhatewara 
310909c5088eSShreyas Bhatewara static int
3110b60b869dSAlexander Gordeev vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
311109c5088eSShreyas Bhatewara {
3112c0a1be38SAlexander Gordeev 	int ret = pci_enable_msix_range(adapter->pdev,
3113c0a1be38SAlexander Gordeev 					adapter->intr.msix_entries, nvec, nvec);
3114c0a1be38SAlexander Gordeev 
3115c0a1be38SAlexander Gordeev 	if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
31164bad25faSStephen Hemminger 		dev_err(&adapter->netdev->dev,
3117b60b869dSAlexander Gordeev 			"Failed to enable %d MSI-X, trying %d\n",
3118b60b869dSAlexander Gordeev 			nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
311909c5088eSShreyas Bhatewara 
3120c0a1be38SAlexander Gordeev 		ret = pci_enable_msix_range(adapter->pdev,
3121c0a1be38SAlexander Gordeev 					    adapter->intr.msix_entries,
3122c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT,
3123c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT);
3124c0a1be38SAlexander Gordeev 	}
3125c0a1be38SAlexander Gordeev 
3126c0a1be38SAlexander Gordeev 	if (ret < 0) {
3127c0a1be38SAlexander Gordeev 		dev_err(&adapter->netdev->dev,
3128c0a1be38SAlexander Gordeev 			"Failed to enable MSI-X, error: %d\n", ret);
3129c0a1be38SAlexander Gordeev 	}
3130c0a1be38SAlexander Gordeev 
3131c0a1be38SAlexander Gordeev 	return ret;
313209c5088eSShreyas Bhatewara }
313309c5088eSShreyas Bhatewara 
313409c5088eSShreyas Bhatewara 
313509c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
3136d1a890faSShreyas Bhatewara 
3137d1a890faSShreyas Bhatewara static void
3138d1a890faSShreyas Bhatewara vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3139d1a890faSShreyas Bhatewara {
3140d1a890faSShreyas Bhatewara 	u32 cfg;
3141e328d410SRoland Dreier 	unsigned long flags;
3142d1a890faSShreyas Bhatewara 
3143d1a890faSShreyas Bhatewara 	/* intr settings */
3144e328d410SRoland Dreier 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3145d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3146d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_GET_CONF_INTR);
3147d1a890faSShreyas Bhatewara 	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3148e328d410SRoland Dreier 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3149d1a890faSShreyas Bhatewara 	adapter->intr.type = cfg & 0x3;
3150d1a890faSShreyas Bhatewara 	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3151d1a890faSShreyas Bhatewara 
3152d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_AUTO) {
31530bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSIX;
31540bdc0d70SShreyas Bhatewara 	}
3155d1a890faSShreyas Bhatewara 
31568f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
31570bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
3158b60b869dSAlexander Gordeev 		int i, nvec;
31590bdc0d70SShreyas Bhatewara 
3160b60b869dSAlexander Gordeev 		nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3161b60b869dSAlexander Gordeev 			1 : adapter->num_tx_queues;
3162b60b869dSAlexander Gordeev 		nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3163b60b869dSAlexander Gordeev 			0 : adapter->num_rx_queues;
3164b60b869dSAlexander Gordeev 		nvec += 1;	/* for link event */
3165b60b869dSAlexander Gordeev 		nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3166b60b869dSAlexander Gordeev 		       nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
316709c5088eSShreyas Bhatewara 
3168b60b869dSAlexander Gordeev 		for (i = 0; i < nvec; i++)
3169b60b869dSAlexander Gordeev 			adapter->intr.msix_entries[i].entry = i;
317009c5088eSShreyas Bhatewara 
3171b60b869dSAlexander Gordeev 		nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3172b60b869dSAlexander Gordeev 		if (nvec < 0)
3173b60b869dSAlexander Gordeev 			goto msix_err;
317409c5088eSShreyas Bhatewara 
317509c5088eSShreyas Bhatewara 		/* If we cannot allocate one MSIx vector per queue
317609c5088eSShreyas Bhatewara 		 * then limit the number of rx queues to 1
317709c5088eSShreyas Bhatewara 		 */
3178b60b869dSAlexander Gordeev 		if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
317909c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
31807e96fbf2SShreyas Bhatewara 			    || adapter->num_rx_queues != 1) {
318109c5088eSShreyas Bhatewara 				adapter->share_intr = VMXNET3_INTR_TXSHARE;
3182204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
3183204a6e65SStephen Hemminger 					   "Number of rx queues : 1\n");
318409c5088eSShreyas Bhatewara 				adapter->num_rx_queues = 1;
318509c5088eSShreyas Bhatewara 			}
3186d1a890faSShreyas Bhatewara 		}
3187b60b869dSAlexander Gordeev 
3188b60b869dSAlexander Gordeev 		adapter->intr.num_intrs = nvec;
318909c5088eSShreyas Bhatewara 		return;
319009c5088eSShreyas Bhatewara 
3191b60b869dSAlexander Gordeev msix_err:
319209c5088eSShreyas Bhatewara 		/* If we cannot allocate MSIx vectors use only one rx queue */
31934bad25faSStephen Hemminger 		dev_info(&adapter->pdev->dev,
31944bad25faSStephen Hemminger 			 "Failed to enable MSI-X, error %d. "
3195b60b869dSAlexander Gordeev 			 "Limiting #rx queues to 1, try MSI.\n", nvec);
319609c5088eSShreyas Bhatewara 
31970bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSI;
31980bdc0d70SShreyas Bhatewara 	}
3199d1a890faSShreyas Bhatewara 
32000bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSI) {
3201b60b869dSAlexander Gordeev 		if (!pci_enable_msi(adapter->pdev)) {
320209c5088eSShreyas Bhatewara 			adapter->num_rx_queues = 1;
3203d1a890faSShreyas Bhatewara 			adapter->intr.num_intrs = 1;
3204d1a890faSShreyas Bhatewara 			return;
3205d1a890faSShreyas Bhatewara 		}
3206d1a890faSShreyas Bhatewara 	}
32070bdc0d70SShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
3208d1a890faSShreyas Bhatewara 
320909c5088eSShreyas Bhatewara 	adapter->num_rx_queues = 1;
3210204a6e65SStephen Hemminger 	dev_info(&adapter->netdev->dev,
3211204a6e65SStephen Hemminger 		 "Using INTx interrupt, #Rx queues: 1.\n");
3212d1a890faSShreyas Bhatewara 	adapter->intr.type = VMXNET3_IT_INTX;
3213d1a890faSShreyas Bhatewara 
3214d1a890faSShreyas Bhatewara 	/* INT-X related setting */
3215d1a890faSShreyas Bhatewara 	adapter->intr.num_intrs = 1;
3216d1a890faSShreyas Bhatewara }
3217d1a890faSShreyas Bhatewara 
3218d1a890faSShreyas Bhatewara 
3219d1a890faSShreyas Bhatewara static void
3220d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3221d1a890faSShreyas Bhatewara {
3222d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX)
3223d1a890faSShreyas Bhatewara 		pci_disable_msix(adapter->pdev);
3224d1a890faSShreyas Bhatewara 	else if (adapter->intr.type == VMXNET3_IT_MSI)
3225d1a890faSShreyas Bhatewara 		pci_disable_msi(adapter->pdev);
3226d1a890faSShreyas Bhatewara 	else
3227d1a890faSShreyas Bhatewara 		BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3228d1a890faSShreyas Bhatewara }
3229d1a890faSShreyas Bhatewara 
3230d1a890faSShreyas Bhatewara 
3231d1a890faSShreyas Bhatewara static void
32320290bd29SMichael S. Tsirkin vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3233d1a890faSShreyas Bhatewara {
3234d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3235d1a890faSShreyas Bhatewara 	adapter->tx_timeout_count++;
3236d1a890faSShreyas Bhatewara 
3237204a6e65SStephen Hemminger 	netdev_err(adapter->netdev, "tx hang\n");
3238d1a890faSShreyas Bhatewara 	schedule_work(&adapter->work);
3239d1a890faSShreyas Bhatewara }
3240d1a890faSShreyas Bhatewara 
3241d1a890faSShreyas Bhatewara 
3242d1a890faSShreyas Bhatewara static void
3243d1a890faSShreyas Bhatewara vmxnet3_reset_work(struct work_struct *data)
3244d1a890faSShreyas Bhatewara {
3245d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
3246d1a890faSShreyas Bhatewara 
3247d1a890faSShreyas Bhatewara 	adapter = container_of(data, struct vmxnet3_adapter, work);
3248d1a890faSShreyas Bhatewara 
3249d1a890faSShreyas Bhatewara 	/* if another thread is resetting the device, no need to proceed */
3250d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3251d1a890faSShreyas Bhatewara 		return;
3252d1a890faSShreyas Bhatewara 
3253d1a890faSShreyas Bhatewara 	/* if the device is closed, we must leave it alone */
3254d9a5f210SShreyas Bhatewara 	rtnl_lock();
3255d1a890faSShreyas Bhatewara 	if (netif_running(adapter->netdev)) {
3256204a6e65SStephen Hemminger 		netdev_notice(adapter->netdev, "resetting\n");
3257d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
3258d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
3259d1a890faSShreyas Bhatewara 		vmxnet3_activate_dev(adapter);
3260d1a890faSShreyas Bhatewara 	} else {
3261204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "already closed\n");
3262d1a890faSShreyas Bhatewara 	}
3263d9a5f210SShreyas Bhatewara 	rtnl_unlock();
3264d1a890faSShreyas Bhatewara 
3265277964e1SBenjamin Poirier 	netif_wake_queue(adapter->netdev);
3266d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3267d1a890faSShreyas Bhatewara }
3268d1a890faSShreyas Bhatewara 
3269d1a890faSShreyas Bhatewara 
32703a4751a3SBill Pemberton static int
3271d1a890faSShreyas Bhatewara vmxnet3_probe_device(struct pci_dev *pdev,
3272d1a890faSShreyas Bhatewara 		     const struct pci_device_id *id)
3273d1a890faSShreyas Bhatewara {
3274d1a890faSShreyas Bhatewara 	static const struct net_device_ops vmxnet3_netdev_ops = {
3275d1a890faSShreyas Bhatewara 		.ndo_open = vmxnet3_open,
3276d1a890faSShreyas Bhatewara 		.ndo_stop = vmxnet3_close,
3277d1a890faSShreyas Bhatewara 		.ndo_start_xmit = vmxnet3_xmit_frame,
3278d1a890faSShreyas Bhatewara 		.ndo_set_mac_address = vmxnet3_set_mac_addr,
3279d1a890faSShreyas Bhatewara 		.ndo_change_mtu = vmxnet3_change_mtu,
32803dd7400bSRonak Doshi 		.ndo_fix_features = vmxnet3_fix_features,
3281a0d2730cSMichał Mirosław 		.ndo_set_features = vmxnet3_set_features,
328295305f6cSstephen hemminger 		.ndo_get_stats64 = vmxnet3_get_stats64,
3283d1a890faSShreyas Bhatewara 		.ndo_tx_timeout = vmxnet3_tx_timeout,
3284afc4b13dSJiri Pirko 		.ndo_set_rx_mode = vmxnet3_set_mc,
3285d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3286d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3287d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
3288d1a890faSShreyas Bhatewara 		.ndo_poll_controller = vmxnet3_netpoll,
3289d1a890faSShreyas Bhatewara #endif
3290d1a890faSShreyas Bhatewara 	};
3291d1a890faSShreyas Bhatewara 	int err;
329261aeeceaShpreg@vmware.com 	bool dma64;
3293d1a890faSShreyas Bhatewara 	u32 ver;
3294d1a890faSShreyas Bhatewara 	struct net_device *netdev;
3295d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
3296d1a890faSShreyas Bhatewara 	u8 mac[ETH_ALEN];
329709c5088eSShreyas Bhatewara 	int size;
329809c5088eSShreyas Bhatewara 	int num_tx_queues;
329909c5088eSShreyas Bhatewara 	int num_rx_queues;
3300d1a890faSShreyas Bhatewara 
3301e154b639SShreyas Bhatewara 	if (!pci_msi_enabled())
3302e154b639SShreyas Bhatewara 		enable_mq = 0;
3303e154b639SShreyas Bhatewara 
330409c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
330509c5088eSShreyas Bhatewara 	if (enable_mq)
330609c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
330709c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
330809c5088eSShreyas Bhatewara 	else
330909c5088eSShreyas Bhatewara #endif
331009c5088eSShreyas Bhatewara 		num_rx_queues = 1;
3311eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
331209c5088eSShreyas Bhatewara 
331309c5088eSShreyas Bhatewara 	if (enable_mq)
331409c5088eSShreyas Bhatewara 		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
331509c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
331609c5088eSShreyas Bhatewara 	else
331709c5088eSShreyas Bhatewara 		num_tx_queues = 1;
331809c5088eSShreyas Bhatewara 
3319eebb02b1SShreyas Bhatewara 	num_tx_queues = rounddown_pow_of_two(num_tx_queues);
332009c5088eSShreyas Bhatewara 	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
332109c5088eSShreyas Bhatewara 				   max(num_tx_queues, num_rx_queues));
3322204a6e65SStephen Hemminger 	dev_info(&pdev->dev,
3323204a6e65SStephen Hemminger 		 "# of Tx queues : %d, # of Rx queues : %d\n",
332409c5088eSShreyas Bhatewara 		 num_tx_queues, num_rx_queues);
332509c5088eSShreyas Bhatewara 
332641de8d4cSJoe Perches 	if (!netdev)
3327d1a890faSShreyas Bhatewara 		return -ENOMEM;
3328d1a890faSShreyas Bhatewara 
3329d1a890faSShreyas Bhatewara 	pci_set_drvdata(pdev, netdev);
3330d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
3331d1a890faSShreyas Bhatewara 	adapter->netdev = netdev;
3332d1a890faSShreyas Bhatewara 	adapter->pdev = pdev;
3333d1a890faSShreyas Bhatewara 
3334f00e2b0aSNeil Horman 	adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3335f00e2b0aSNeil Horman 	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
333653831aa1SShrikrishna Khare 	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3337f00e2b0aSNeil Horman 
333861aeeceaShpreg@vmware.com 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
333961aeeceaShpreg@vmware.com 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
334061aeeceaShpreg@vmware.com 			dev_err(&pdev->dev,
334161aeeceaShpreg@vmware.com 				"pci_set_consistent_dma_mask failed\n");
334261aeeceaShpreg@vmware.com 			err = -EIO;
334361aeeceaShpreg@vmware.com 			goto err_set_mask;
334461aeeceaShpreg@vmware.com 		}
334561aeeceaShpreg@vmware.com 		dma64 = true;
334661aeeceaShpreg@vmware.com 	} else {
334761aeeceaShpreg@vmware.com 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
334861aeeceaShpreg@vmware.com 			dev_err(&pdev->dev,
334961aeeceaShpreg@vmware.com 				"pci_set_dma_mask failed\n");
335061aeeceaShpreg@vmware.com 			err = -EIO;
335161aeeceaShpreg@vmware.com 			goto err_set_mask;
335261aeeceaShpreg@vmware.com 		}
335361aeeceaShpreg@vmware.com 		dma64 = false;
335461aeeceaShpreg@vmware.com 	}
335561aeeceaShpreg@vmware.com 
335683d0feffSShreyas Bhatewara 	spin_lock_init(&adapter->cmd_lock);
3357b0eb57cbSAndy King 	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3358b0eb57cbSAndy King 					     sizeof(struct vmxnet3_adapter),
3359b0eb57cbSAndy King 					     PCI_DMA_TODEVICE);
33605738a09dSAlexey Khoroshilov 	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
33615738a09dSAlexey Khoroshilov 		dev_err(&pdev->dev, "Failed to map dma\n");
33625738a09dSAlexey Khoroshilov 		err = -EFAULT;
336361aeeceaShpreg@vmware.com 		goto err_set_mask;
33645738a09dSAlexey Khoroshilov 	}
3365b0eb57cbSAndy King 	adapter->shared = dma_alloc_coherent(
3366b0eb57cbSAndy King 				&adapter->pdev->dev,
3367d1a890faSShreyas Bhatewara 				sizeof(struct Vmxnet3_DriverShared),
3368b0eb57cbSAndy King 				&adapter->shared_pa, GFP_KERNEL);
3369d1a890faSShreyas Bhatewara 	if (!adapter->shared) {
3370204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
3371d1a890faSShreyas Bhatewara 		err = -ENOMEM;
3372d1a890faSShreyas Bhatewara 		goto err_alloc_shared;
3373d1a890faSShreyas Bhatewara 	}
3374d1a890faSShreyas Bhatewara 
337509c5088eSShreyas Bhatewara 	adapter->num_rx_queues = num_rx_queues;
337609c5088eSShreyas Bhatewara 	adapter->num_tx_queues = num_tx_queues;
3377e4fabf2bSBhavesh Davda 	adapter->rx_buf_per_pkt = 1;
337809c5088eSShreyas Bhatewara 
337909c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
338009c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3381b0eb57cbSAndy King 	adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3382b0eb57cbSAndy King 						&adapter->queue_desc_pa,
3383b0eb57cbSAndy King 						GFP_KERNEL);
3384d1a890faSShreyas Bhatewara 
3385d1a890faSShreyas Bhatewara 	if (!adapter->tqd_start) {
3386204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
3387d1a890faSShreyas Bhatewara 		err = -ENOMEM;
3388d1a890faSShreyas Bhatewara 		goto err_alloc_queue_desc;
3389d1a890faSShreyas Bhatewara 	}
339009c5088eSShreyas Bhatewara 	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
339109c5088eSShreyas Bhatewara 							    adapter->num_tx_queues);
3392d1a890faSShreyas Bhatewara 
3393b0eb57cbSAndy King 	adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3394b0eb57cbSAndy King 					      sizeof(struct Vmxnet3_PMConf),
3395b0eb57cbSAndy King 					      &adapter->pm_conf_pa,
3396b0eb57cbSAndy King 					      GFP_KERNEL);
3397d1a890faSShreyas Bhatewara 	if (adapter->pm_conf == NULL) {
3398d1a890faSShreyas Bhatewara 		err = -ENOMEM;
3399d1a890faSShreyas Bhatewara 		goto err_alloc_pm;
3400d1a890faSShreyas Bhatewara 	}
3401d1a890faSShreyas Bhatewara 
340209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
340309c5088eSShreyas Bhatewara 
3404b0eb57cbSAndy King 	adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3405b0eb57cbSAndy King 					       sizeof(struct UPT1_RSSConf),
3406b0eb57cbSAndy King 					       &adapter->rss_conf_pa,
3407b0eb57cbSAndy King 					       GFP_KERNEL);
340809c5088eSShreyas Bhatewara 	if (adapter->rss_conf == NULL) {
340909c5088eSShreyas Bhatewara 		err = -ENOMEM;
341009c5088eSShreyas Bhatewara 		goto err_alloc_rss;
341109c5088eSShreyas Bhatewara 	}
341209c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
341309c5088eSShreyas Bhatewara 
341461aeeceaShpreg@vmware.com 	err = vmxnet3_alloc_pci_resources(adapter);
3415d1a890faSShreyas Bhatewara 	if (err < 0)
3416d1a890faSShreyas Bhatewara 		goto err_alloc_pci;
3417d1a890faSShreyas Bhatewara 
3418d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
34196af9d787SShrikrishna Khare 	if (ver & (1 << VMXNET3_REV_3)) {
34206af9d787SShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
34216af9d787SShrikrishna Khare 				       VMXNET3_REG_VRRS,
34226af9d787SShrikrishna Khare 				       1 << VMXNET3_REV_3);
34236af9d787SShrikrishna Khare 		adapter->version = VMXNET3_REV_3 + 1;
34246af9d787SShrikrishna Khare 	} else if (ver & (1 << VMXNET3_REV_2)) {
3425190af10fSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
3426190af10fSShrikrishna Khare 				       VMXNET3_REG_VRRS,
3427190af10fSShrikrishna Khare 				       1 << VMXNET3_REV_2);
3428190af10fSShrikrishna Khare 		adapter->version = VMXNET3_REV_2 + 1;
3429190af10fSShrikrishna Khare 	} else if (ver & (1 << VMXNET3_REV_1)) {
3430190af10fSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
3431190af10fSShrikrishna Khare 				       VMXNET3_REG_VRRS,
3432190af10fSShrikrishna Khare 				       1 << VMXNET3_REV_1);
3433190af10fSShrikrishna Khare 		adapter->version = VMXNET3_REV_1 + 1;
3434d1a890faSShreyas Bhatewara 	} else {
3435204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3436204a6e65SStephen Hemminger 			"Incompatible h/w version (0x%x) for adapter\n", ver);
3437d1a890faSShreyas Bhatewara 		err = -EBUSY;
3438d1a890faSShreyas Bhatewara 		goto err_ver;
3439d1a890faSShreyas Bhatewara 	}
344045dac1d6SShreyas Bhatewara 	dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3441d1a890faSShreyas Bhatewara 
3442d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3443d1a890faSShreyas Bhatewara 	if (ver & 1) {
3444d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3445d1a890faSShreyas Bhatewara 	} else {
3446204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3447204a6e65SStephen Hemminger 			"Incompatible upt version (0x%x) for adapter\n", ver);
3448d1a890faSShreyas Bhatewara 		err = -EBUSY;
3449d1a890faSShreyas Bhatewara 		goto err_ver;
3450d1a890faSShreyas Bhatewara 	}
3451d1a890faSShreyas Bhatewara 
34524edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
34534edef40eSShrikrishna Khare 		adapter->coal_conf =
34544edef40eSShrikrishna Khare 			dma_alloc_coherent(&adapter->pdev->dev,
34554edef40eSShrikrishna Khare 					   sizeof(struct Vmxnet3_CoalesceScheme)
34564edef40eSShrikrishna Khare 					   ,
34574edef40eSShrikrishna Khare 					   &adapter->coal_conf_pa,
34584edef40eSShrikrishna Khare 					   GFP_KERNEL);
34594edef40eSShrikrishna Khare 		if (!adapter->coal_conf) {
34604edef40eSShrikrishna Khare 			err = -ENOMEM;
34614edef40eSShrikrishna Khare 			goto err_ver;
34624edef40eSShrikrishna Khare 		}
34634edef40eSShrikrishna Khare 		adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
34644edef40eSShrikrishna Khare 		adapter->default_coal_mode = true;
34654edef40eSShrikrishna Khare 	}
34664edef40eSShrikrishna Khare 
3467d3a8a9e5SRonak Doshi 	if (VMXNET3_VERSION_GE_4(adapter)) {
3468d3a8a9e5SRonak Doshi 		adapter->default_rss_fields = true;
3469d3a8a9e5SRonak Doshi 		adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
3470d3a8a9e5SRonak Doshi 	}
3471d3a8a9e5SRonak Doshi 
3472e101e7ddSShreyas Bhatewara 	SET_NETDEV_DEV(netdev, &pdev->dev);
3473d1a890faSShreyas Bhatewara 	vmxnet3_declare_features(adapter, dma64);
3474d1a890faSShreyas Bhatewara 
347550a5ce3eSShrikrishna Khare 	adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
347650a5ce3eSShrikrishna Khare 		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
347750a5ce3eSShrikrishna Khare 
34784db37a78SStephen Hemminger 	if (adapter->num_tx_queues == adapter->num_rx_queues)
34794db37a78SStephen Hemminger 		adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
34804db37a78SStephen Hemminger 	else
348109c5088eSShreyas Bhatewara 		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
348209c5088eSShreyas Bhatewara 
3483d1a890faSShreyas Bhatewara 	vmxnet3_alloc_intr_resources(adapter);
3484d1a890faSShreyas Bhatewara 
348509c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
348609c5088eSShreyas Bhatewara 	if (adapter->num_rx_queues > 1 &&
348709c5088eSShreyas Bhatewara 	    adapter->intr.type == VMXNET3_IT_MSIX) {
348809c5088eSShreyas Bhatewara 		adapter->rss = true;
34897db11f75SStephen Hemminger 		netdev->hw_features |= NETIF_F_RXHASH;
34907db11f75SStephen Hemminger 		netdev->features |= NETIF_F_RXHASH;
3491204a6e65SStephen Hemminger 		dev_dbg(&pdev->dev, "RSS is enabled.\n");
349209c5088eSShreyas Bhatewara 	} else {
349309c5088eSShreyas Bhatewara 		adapter->rss = false;
349409c5088eSShreyas Bhatewara 	}
349509c5088eSShreyas Bhatewara #endif
349609c5088eSShreyas Bhatewara 
3497d1a890faSShreyas Bhatewara 	vmxnet3_read_mac_addr(adapter, mac);
3498d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr,  mac, netdev->addr_len);
3499d1a890faSShreyas Bhatewara 
3500d1a890faSShreyas Bhatewara 	netdev->netdev_ops = &vmxnet3_netdev_ops;
3501d1a890faSShreyas Bhatewara 	vmxnet3_set_ethtool_ops(netdev);
350209c5088eSShreyas Bhatewara 	netdev->watchdog_timeo = 5 * HZ;
3503d1a890faSShreyas Bhatewara 
3504d0c2c997SJarod Wilson 	/* MTU range: 60 - 9000 */
3505d0c2c997SJarod Wilson 	netdev->min_mtu = VMXNET3_MIN_MTU;
3506d0c2c997SJarod Wilson 	netdev->max_mtu = VMXNET3_MAX_MTU;
3507d0c2c997SJarod Wilson 
3508d1a890faSShreyas Bhatewara 	INIT_WORK(&adapter->work, vmxnet3_reset_work);
3509e3bc4ffbSSteve Hodgson 	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3510d1a890faSShreyas Bhatewara 
351109c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
351209c5088eSShreyas Bhatewara 		int i;
351309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
351409c5088eSShreyas Bhatewara 			netif_napi_add(adapter->netdev,
351509c5088eSShreyas Bhatewara 				       &adapter->rx_queue[i].napi,
351609c5088eSShreyas Bhatewara 				       vmxnet3_poll_rx_only, 64);
351709c5088eSShreyas Bhatewara 		}
351809c5088eSShreyas Bhatewara 	} else {
351909c5088eSShreyas Bhatewara 		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
352009c5088eSShreyas Bhatewara 			       vmxnet3_poll, 64);
352109c5088eSShreyas Bhatewara 	}
352209c5088eSShreyas Bhatewara 
352309c5088eSShreyas Bhatewara 	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
352409c5088eSShreyas Bhatewara 	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
352509c5088eSShreyas Bhatewara 
35266cdd20c3SNeil Horman 	netif_carrier_off(netdev);
3527d1a890faSShreyas Bhatewara 	err = register_netdev(netdev);
3528d1a890faSShreyas Bhatewara 
3529d1a890faSShreyas Bhatewara 	if (err) {
3530204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to register adapter\n");
3531d1a890faSShreyas Bhatewara 		goto err_register;
3532d1a890faSShreyas Bhatewara 	}
3533d1a890faSShreyas Bhatewara 
35344a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, false);
3535d1a890faSShreyas Bhatewara 	return 0;
3536d1a890faSShreyas Bhatewara 
3537d1a890faSShreyas Bhatewara err_register:
35384edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
35394edef40eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
35404edef40eSShrikrishna Khare 				  sizeof(struct Vmxnet3_CoalesceScheme),
35414edef40eSShrikrishna Khare 				  adapter->coal_conf, adapter->coal_conf_pa);
35424edef40eSShrikrishna Khare 	}
3543d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3544d1a890faSShreyas Bhatewara err_ver:
3545d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
3546d1a890faSShreyas Bhatewara err_alloc_pci:
354709c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
3548b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3549b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
355009c5088eSShreyas Bhatewara err_alloc_rss:
355109c5088eSShreyas Bhatewara #endif
3552b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3553b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
3554d1a890faSShreyas Bhatewara err_alloc_pm:
3555b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
355609c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
3557d1a890faSShreyas Bhatewara err_alloc_queue_desc:
3558b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
3559b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
3560d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
3561d1a890faSShreyas Bhatewara err_alloc_shared:
3562b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3563b0eb57cbSAndy King 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
356461aeeceaShpreg@vmware.com err_set_mask:
3565d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3566d1a890faSShreyas Bhatewara 	return err;
3567d1a890faSShreyas Bhatewara }
3568d1a890faSShreyas Bhatewara 
3569d1a890faSShreyas Bhatewara 
35703a4751a3SBill Pemberton static void
3571d1a890faSShreyas Bhatewara vmxnet3_remove_device(struct pci_dev *pdev)
3572d1a890faSShreyas Bhatewara {
3573d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3574d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
357509c5088eSShreyas Bhatewara 	int size = 0;
357609c5088eSShreyas Bhatewara 	int num_rx_queues;
357709c5088eSShreyas Bhatewara 
357809c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
357909c5088eSShreyas Bhatewara 	if (enable_mq)
358009c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
358109c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
358209c5088eSShreyas Bhatewara 	else
358309c5088eSShreyas Bhatewara #endif
358409c5088eSShreyas Bhatewara 		num_rx_queues = 1;
3585eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3586d1a890faSShreyas Bhatewara 
358723f333a2STejun Heo 	cancel_work_sync(&adapter->work);
3588d1a890faSShreyas Bhatewara 
3589d1a890faSShreyas Bhatewara 	unregister_netdev(netdev);
3590d1a890faSShreyas Bhatewara 
3591d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3592d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
35934edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
35944edef40eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
35954edef40eSShrikrishna Khare 				  sizeof(struct Vmxnet3_CoalesceScheme),
35964edef40eSShrikrishna Khare 				  adapter->coal_conf, adapter->coal_conf_pa);
35974edef40eSShrikrishna Khare 	}
359809c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
3599b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3600b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
360109c5088eSShreyas Bhatewara #endif
3602b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3603b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
360409c5088eSShreyas Bhatewara 
360509c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
360609c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3607b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
360809c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
3609b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
3610b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
3611d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
3612b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3613b0eb57cbSAndy King 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3614d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3615d1a890faSShreyas Bhatewara }
3616d1a890faSShreyas Bhatewara 
3617e9ba47bfSShreyas Bhatewara static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3618e9ba47bfSShreyas Bhatewara {
3619e9ba47bfSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3620e9ba47bfSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3621e9ba47bfSShreyas Bhatewara 	unsigned long flags;
3622e9ba47bfSShreyas Bhatewara 
3623e9ba47bfSShreyas Bhatewara 	/* Reset_work may be in the middle of resetting the device, wait for its
3624e9ba47bfSShreyas Bhatewara 	 * completion.
3625e9ba47bfSShreyas Bhatewara 	 */
3626e9ba47bfSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
362793c65d13SYueHaibing 		usleep_range(1000, 2000);
3628e9ba47bfSShreyas Bhatewara 
3629e9ba47bfSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3630e9ba47bfSShreyas Bhatewara 			     &adapter->state)) {
3631e9ba47bfSShreyas Bhatewara 		clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3632e9ba47bfSShreyas Bhatewara 		return;
3633e9ba47bfSShreyas Bhatewara 	}
3634e9ba47bfSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3635e9ba47bfSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3636e9ba47bfSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
3637e9ba47bfSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3638e9ba47bfSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
3639e9ba47bfSShreyas Bhatewara 
3640e9ba47bfSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3641e9ba47bfSShreyas Bhatewara }
3642e9ba47bfSShreyas Bhatewara 
3643d1a890faSShreyas Bhatewara 
3644d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3645d1a890faSShreyas Bhatewara 
3646d1a890faSShreyas Bhatewara static int
3647d1a890faSShreyas Bhatewara vmxnet3_suspend(struct device *device)
3648d1a890faSShreyas Bhatewara {
3649d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3650d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3651d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3652d1a890faSShreyas Bhatewara 	struct Vmxnet3_PMConf *pmConf;
3653d1a890faSShreyas Bhatewara 	struct ethhdr *ehdr;
3654d1a890faSShreyas Bhatewara 	struct arphdr *ahdr;
3655d1a890faSShreyas Bhatewara 	u8 *arpreq;
3656d1a890faSShreyas Bhatewara 	struct in_device *in_dev;
3657d1a890faSShreyas Bhatewara 	struct in_ifaddr *ifa;
365883d0feffSShreyas Bhatewara 	unsigned long flags;
3659d1a890faSShreyas Bhatewara 	int i = 0;
3660d1a890faSShreyas Bhatewara 
3661d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3662d1a890faSShreyas Bhatewara 		return 0;
3663d1a890faSShreyas Bhatewara 
366451956cd6SShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
366551956cd6SShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
366651956cd6SShreyas Bhatewara 
3667d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
3668d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
3669d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3670d1a890faSShreyas Bhatewara 
3671d1a890faSShreyas Bhatewara 	netif_device_detach(netdev);
367209c5088eSShreyas Bhatewara 	netif_tx_stop_all_queues(netdev);
3673d1a890faSShreyas Bhatewara 
3674d1a890faSShreyas Bhatewara 	/* Create wake-up filters. */
3675d1a890faSShreyas Bhatewara 	pmConf = adapter->pm_conf;
3676d1a890faSShreyas Bhatewara 	memset(pmConf, 0, sizeof(*pmConf));
3677d1a890faSShreyas Bhatewara 
3678d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_UCAST) {
3679d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_ALEN;
3680d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize = 1;
3681d1a890faSShreyas Bhatewara 		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3682d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3683d1a890faSShreyas Bhatewara 
36843843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3685d1a890faSShreyas Bhatewara 		i++;
3686d1a890faSShreyas Bhatewara 	}
3687d1a890faSShreyas Bhatewara 
3688d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_ARP) {
36892638eb8bSFlorian Westphal 		rcu_read_lock();
3690d1a890faSShreyas Bhatewara 
36912638eb8bSFlorian Westphal 		in_dev = __in_dev_get_rcu(netdev);
36922638eb8bSFlorian Westphal 		if (!in_dev) {
36932638eb8bSFlorian Westphal 			rcu_read_unlock();
3694d1a890faSShreyas Bhatewara 			goto skip_arp;
36952638eb8bSFlorian Westphal 		}
36962638eb8bSFlorian Westphal 
36972638eb8bSFlorian Westphal 		ifa = rcu_dereference(in_dev->ifa_list);
36982638eb8bSFlorian Westphal 		if (!ifa) {
36992638eb8bSFlorian Westphal 			rcu_read_unlock();
37002638eb8bSFlorian Westphal 			goto skip_arp;
37012638eb8bSFlorian Westphal 		}
3702d1a890faSShreyas Bhatewara 
3703d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3704d1a890faSShreyas Bhatewara 			sizeof(struct arphdr) +		/* ARP header */
3705d1a890faSShreyas Bhatewara 			2 * ETH_ALEN +		/* 2 Ethernet addresses*/
3706d1a890faSShreyas Bhatewara 			2 * sizeof(u32);	/*2 IPv4 addresses */
3707d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize =
3708d1a890faSShreyas Bhatewara 			(pmConf->filters[i].patternSize - 1) / 8 + 1;
3709d1a890faSShreyas Bhatewara 
3710d1a890faSShreyas Bhatewara 		/* ETH_P_ARP in Ethernet header. */
3711d1a890faSShreyas Bhatewara 		ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3712d1a890faSShreyas Bhatewara 		ehdr->h_proto = htons(ETH_P_ARP);
3713d1a890faSShreyas Bhatewara 
3714d1a890faSShreyas Bhatewara 		/* ARPOP_REQUEST in ARP header. */
3715d1a890faSShreyas Bhatewara 		ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3716d1a890faSShreyas Bhatewara 		ahdr->ar_op = htons(ARPOP_REQUEST);
3717d1a890faSShreyas Bhatewara 		arpreq = (u8 *)(ahdr + 1);
3718d1a890faSShreyas Bhatewara 
3719d1a890faSShreyas Bhatewara 		/* The Unicast IPv4 address in 'tip' field. */
3720d1a890faSShreyas Bhatewara 		arpreq += 2 * ETH_ALEN + sizeof(u32);
37212638eb8bSFlorian Westphal 		*(__be32 *)arpreq = ifa->ifa_address;
37222638eb8bSFlorian Westphal 
37232638eb8bSFlorian Westphal 		rcu_read_unlock();
3724d1a890faSShreyas Bhatewara 
3725d1a890faSShreyas Bhatewara 		/* The mask for the relevant bits. */
3726d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x00;
3727d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3728d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3729d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[3] = 0x00;
3730d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3731d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3732d1a890faSShreyas Bhatewara 
37333843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3734d1a890faSShreyas Bhatewara 		i++;
3735d1a890faSShreyas Bhatewara 	}
3736d1a890faSShreyas Bhatewara 
3737d1a890faSShreyas Bhatewara skip_arp:
3738d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_MAGIC)
37393843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3740d1a890faSShreyas Bhatewara 
3741d1a890faSShreyas Bhatewara 	pmConf->numFilters = i;
3742d1a890faSShreyas Bhatewara 
3743115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3744115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3745115924b6SShreyas Bhatewara 								  *pmConf));
3746b0eb57cbSAndy King 	adapter->shared->devRead.pmConfDesc.confPA =
3747b0eb57cbSAndy King 		cpu_to_le64(adapter->pm_conf_pa);
3748d1a890faSShreyas Bhatewara 
374983d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3750d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3751d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_PMCFG);
375283d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3753d1a890faSShreyas Bhatewara 
3754d1a890faSShreyas Bhatewara 	pci_save_state(pdev);
3755d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3756d1a890faSShreyas Bhatewara 			adapter->wol);
3757d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
3758d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3759d1a890faSShreyas Bhatewara 
3760d1a890faSShreyas Bhatewara 	return 0;
3761d1a890faSShreyas Bhatewara }
3762d1a890faSShreyas Bhatewara 
3763d1a890faSShreyas Bhatewara 
3764d1a890faSShreyas Bhatewara static int
3765d1a890faSShreyas Bhatewara vmxnet3_resume(struct device *device)
3766d1a890faSShreyas Bhatewara {
37675ec82c1eSShrikrishna Khare 	int err;
376883d0feffSShreyas Bhatewara 	unsigned long flags;
3769d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3770d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3771d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3772d1a890faSShreyas Bhatewara 
3773d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3774d1a890faSShreyas Bhatewara 		return 0;
3775d1a890faSShreyas Bhatewara 
3776d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, PCI_D0);
3777d1a890faSShreyas Bhatewara 	pci_restore_state(pdev);
3778d1a890faSShreyas Bhatewara 	err = pci_enable_device_mem(pdev);
3779d1a890faSShreyas Bhatewara 	if (err != 0)
3780d1a890faSShreyas Bhatewara 		return err;
3781d1a890faSShreyas Bhatewara 
3782d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, PCI_D0, 0);
3783d1a890faSShreyas Bhatewara 
37845ec82c1eSShrikrishna Khare 	vmxnet3_alloc_intr_resources(adapter);
37855ec82c1eSShrikrishna Khare 
37865ec82c1eSShrikrishna Khare 	/* During hibernate and suspend, device has to be reinitialized as the
37875ec82c1eSShrikrishna Khare 	 * device state need not be preserved.
37885ec82c1eSShrikrishna Khare 	 */
37895ec82c1eSShrikrishna Khare 
37905ec82c1eSShrikrishna Khare 	/* Need not check adapter state as other reset tasks cannot run during
37915ec82c1eSShrikrishna Khare 	 * device resume.
37925ec82c1eSShrikrishna Khare 	 */
379383d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3794d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
37955ec82c1eSShrikrishna Khare 			       VMXNET3_CMD_QUIESCE_DEV);
379683d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
37975ec82c1eSShrikrishna Khare 	vmxnet3_tq_cleanup_all(adapter);
37985ec82c1eSShrikrishna Khare 	vmxnet3_rq_cleanup_all(adapter);
37995ec82c1eSShrikrishna Khare 
38005ec82c1eSShrikrishna Khare 	vmxnet3_reset_dev(adapter);
38015ec82c1eSShrikrishna Khare 	err = vmxnet3_activate_dev(adapter);
38025ec82c1eSShrikrishna Khare 	if (err != 0) {
38035ec82c1eSShrikrishna Khare 		netdev_err(netdev,
38045ec82c1eSShrikrishna Khare 			   "failed to re-activate on resume, error: %d", err);
38055ec82c1eSShrikrishna Khare 		vmxnet3_force_close(adapter);
38065ec82c1eSShrikrishna Khare 		return err;
38075ec82c1eSShrikrishna Khare 	}
38085ec82c1eSShrikrishna Khare 	netif_device_attach(netdev);
3809d1a890faSShreyas Bhatewara 
3810d1a890faSShreyas Bhatewara 	return 0;
3811d1a890faSShreyas Bhatewara }
3812d1a890faSShreyas Bhatewara 
381347145210SAlexey Dobriyan static const struct dev_pm_ops vmxnet3_pm_ops = {
3814d1a890faSShreyas Bhatewara 	.suspend = vmxnet3_suspend,
3815d1a890faSShreyas Bhatewara 	.resume = vmxnet3_resume,
38165ec82c1eSShrikrishna Khare 	.freeze = vmxnet3_suspend,
38175ec82c1eSShrikrishna Khare 	.restore = vmxnet3_resume,
3818d1a890faSShreyas Bhatewara };
3819d1a890faSShreyas Bhatewara #endif
3820d1a890faSShreyas Bhatewara 
3821d1a890faSShreyas Bhatewara static struct pci_driver vmxnet3_driver = {
3822d1a890faSShreyas Bhatewara 	.name		= vmxnet3_driver_name,
3823d1a890faSShreyas Bhatewara 	.id_table	= vmxnet3_pciid_table,
3824d1a890faSShreyas Bhatewara 	.probe		= vmxnet3_probe_device,
38253a4751a3SBill Pemberton 	.remove		= vmxnet3_remove_device,
3826e9ba47bfSShreyas Bhatewara 	.shutdown	= vmxnet3_shutdown_device,
3827d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3828d1a890faSShreyas Bhatewara 	.driver.pm	= &vmxnet3_pm_ops,
3829d1a890faSShreyas Bhatewara #endif
3830d1a890faSShreyas Bhatewara };
3831d1a890faSShreyas Bhatewara 
3832d1a890faSShreyas Bhatewara 
3833d1a890faSShreyas Bhatewara static int __init
3834d1a890faSShreyas Bhatewara vmxnet3_init_module(void)
3835d1a890faSShreyas Bhatewara {
3836204a6e65SStephen Hemminger 	pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3837d1a890faSShreyas Bhatewara 		VMXNET3_DRIVER_VERSION_REPORT);
3838d1a890faSShreyas Bhatewara 	return pci_register_driver(&vmxnet3_driver);
3839d1a890faSShreyas Bhatewara }
3840d1a890faSShreyas Bhatewara 
3841d1a890faSShreyas Bhatewara module_init(vmxnet3_init_module);
3842d1a890faSShreyas Bhatewara 
3843d1a890faSShreyas Bhatewara 
3844d1a890faSShreyas Bhatewara static void
3845d1a890faSShreyas Bhatewara vmxnet3_exit_module(void)
3846d1a890faSShreyas Bhatewara {
3847d1a890faSShreyas Bhatewara 	pci_unregister_driver(&vmxnet3_driver);
3848d1a890faSShreyas Bhatewara }
3849d1a890faSShreyas Bhatewara 
3850d1a890faSShreyas Bhatewara module_exit(vmxnet3_exit_module);
3851d1a890faSShreyas Bhatewara 
3852d1a890faSShreyas Bhatewara MODULE_AUTHOR("VMware, Inc.");
3853d1a890faSShreyas Bhatewara MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3854d1a890faSShreyas Bhatewara MODULE_LICENSE("GPL v2");
3855d1a890faSShreyas Bhatewara MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
3856