1d1a890faSShreyas Bhatewara /*
2d1a890faSShreyas Bhatewara  * Linux driver for VMware's vmxnet3 ethernet NIC.
3d1a890faSShreyas Bhatewara  *
469dbef0dSRonak Doshi  * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
5d1a890faSShreyas Bhatewara  *
6d1a890faSShreyas Bhatewara  * This program is free software; you can redistribute it and/or modify it
7d1a890faSShreyas Bhatewara  * under the terms of the GNU General Public License as published by the
8d1a890faSShreyas Bhatewara  * Free Software Foundation; version 2 of the License and no later version.
9d1a890faSShreyas Bhatewara  *
10d1a890faSShreyas Bhatewara  * This program is distributed in the hope that it will be useful, but
11d1a890faSShreyas Bhatewara  * WITHOUT ANY WARRANTY; without even the implied warranty of
12d1a890faSShreyas Bhatewara  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13d1a890faSShreyas Bhatewara  * NON INFRINGEMENT. See the GNU General Public License for more
14d1a890faSShreyas Bhatewara  * details.
15d1a890faSShreyas Bhatewara  *
16d1a890faSShreyas Bhatewara  * You should have received a copy of the GNU General Public License
17d1a890faSShreyas Bhatewara  * along with this program; if not, write to the Free Software
18d1a890faSShreyas Bhatewara  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19d1a890faSShreyas Bhatewara  *
20d1a890faSShreyas Bhatewara  * The full GNU General Public License is included in this distribution in
21d1a890faSShreyas Bhatewara  * the file called "COPYING".
22d1a890faSShreyas Bhatewara  *
23190af10fSShrikrishna Khare  * Maintained by: pv-drivers@vmware.com
24d1a890faSShreyas Bhatewara  *
25d1a890faSShreyas Bhatewara  */
26d1a890faSShreyas Bhatewara 
279d9779e7SPaul Gortmaker #include <linux/module.h>
28b038b040SStephen Rothwell #include <net/ip6_checksum.h>
29b038b040SStephen Rothwell 
30d1a890faSShreyas Bhatewara #include "vmxnet3_int.h"
31d1a890faSShreyas Bhatewara 
32d1a890faSShreyas Bhatewara char vmxnet3_driver_name[] = "vmxnet3";
33d1a890faSShreyas Bhatewara #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34d1a890faSShreyas Bhatewara 
35d1a890faSShreyas Bhatewara /*
36d1a890faSShreyas Bhatewara  * PCI Device ID Table
37d1a890faSShreyas Bhatewara  * Last entry must be all 0s
38d1a890faSShreyas Bhatewara  */
399baa3c34SBenoit Taine static const struct pci_device_id vmxnet3_pciid_table[] = {
40d1a890faSShreyas Bhatewara 	{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41d1a890faSShreyas Bhatewara 	{0}
42d1a890faSShreyas Bhatewara };
43d1a890faSShreyas Bhatewara 
44d1a890faSShreyas Bhatewara MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45d1a890faSShreyas Bhatewara 
4609c5088eSShreyas Bhatewara static int enable_mq = 1;
47d1a890faSShreyas Bhatewara 
48f9f25026SShreyas Bhatewara static void
49f9f25026SShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50f9f25026SShreyas Bhatewara 
51d1a890faSShreyas Bhatewara /*
52d1a890faSShreyas Bhatewara  *    Enable/Disable the given intr
53d1a890faSShreyas Bhatewara  */
54d1a890faSShreyas Bhatewara static void
55d1a890faSShreyas Bhatewara vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56d1a890faSShreyas Bhatewara {
57d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58d1a890faSShreyas Bhatewara }
59d1a890faSShreyas Bhatewara 
60d1a890faSShreyas Bhatewara 
61d1a890faSShreyas Bhatewara static void
62d1a890faSShreyas Bhatewara vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63d1a890faSShreyas Bhatewara {
64d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65d1a890faSShreyas Bhatewara }
66d1a890faSShreyas Bhatewara 
67d1a890faSShreyas Bhatewara 
68d1a890faSShreyas Bhatewara /*
69d1a890faSShreyas Bhatewara  *    Enable/Disable all intrs used by the device
70d1a890faSShreyas Bhatewara  */
71d1a890faSShreyas Bhatewara static void
72d1a890faSShreyas Bhatewara vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73d1a890faSShreyas Bhatewara {
74d1a890faSShreyas Bhatewara 	int i;
75d1a890faSShreyas Bhatewara 
76d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
77d1a890faSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, i);
786929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl &=
796929fe8aSRonghua Zang 					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
80d1a890faSShreyas Bhatewara }
81d1a890faSShreyas Bhatewara 
82d1a890faSShreyas Bhatewara 
83d1a890faSShreyas Bhatewara static void
84d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85d1a890faSShreyas Bhatewara {
86d1a890faSShreyas Bhatewara 	int i;
87d1a890faSShreyas Bhatewara 
886929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl |=
896929fe8aSRonghua Zang 					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
91d1a890faSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, i);
92d1a890faSShreyas Bhatewara }
93d1a890faSShreyas Bhatewara 
94d1a890faSShreyas Bhatewara 
95d1a890faSShreyas Bhatewara static void
96d1a890faSShreyas Bhatewara vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97d1a890faSShreyas Bhatewara {
98d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99d1a890faSShreyas Bhatewara }
100d1a890faSShreyas Bhatewara 
101d1a890faSShreyas Bhatewara 
102d1a890faSShreyas Bhatewara static bool
103d1a890faSShreyas Bhatewara vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104d1a890faSShreyas Bhatewara {
10509c5088eSShreyas Bhatewara 	return tq->stopped;
106d1a890faSShreyas Bhatewara }
107d1a890faSShreyas Bhatewara 
108d1a890faSShreyas Bhatewara 
109d1a890faSShreyas Bhatewara static void
110d1a890faSShreyas Bhatewara vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111d1a890faSShreyas Bhatewara {
112d1a890faSShreyas Bhatewara 	tq->stopped = false;
11309c5088eSShreyas Bhatewara 	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
114d1a890faSShreyas Bhatewara }
115d1a890faSShreyas Bhatewara 
116d1a890faSShreyas Bhatewara 
117d1a890faSShreyas Bhatewara static void
118d1a890faSShreyas Bhatewara vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119d1a890faSShreyas Bhatewara {
120d1a890faSShreyas Bhatewara 	tq->stopped = false;
12109c5088eSShreyas Bhatewara 	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
122d1a890faSShreyas Bhatewara }
123d1a890faSShreyas Bhatewara 
124d1a890faSShreyas Bhatewara 
125d1a890faSShreyas Bhatewara static void
126d1a890faSShreyas Bhatewara vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127d1a890faSShreyas Bhatewara {
128d1a890faSShreyas Bhatewara 	tq->stopped = true;
129d1a890faSShreyas Bhatewara 	tq->num_stop++;
13009c5088eSShreyas Bhatewara 	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
131d1a890faSShreyas Bhatewara }
132d1a890faSShreyas Bhatewara 
133d1a890faSShreyas Bhatewara 
134d1a890faSShreyas Bhatewara /*
135d1a890faSShreyas Bhatewara  * Check the link state. This may start or stop the tx queue.
136d1a890faSShreyas Bhatewara  */
137d1a890faSShreyas Bhatewara static void
1384a1745fcSShreyas Bhatewara vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139d1a890faSShreyas Bhatewara {
140d1a890faSShreyas Bhatewara 	u32 ret;
14109c5088eSShreyas Bhatewara 	int i;
14283d0feffSShreyas Bhatewara 	unsigned long flags;
143d1a890faSShreyas Bhatewara 
14483d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
145d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
14783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
14883d0feffSShreyas Bhatewara 
149d1a890faSShreyas Bhatewara 	adapter->link_speed = ret >> 16;
150d1a890faSShreyas Bhatewara 	if (ret & 1) { /* Link is up. */
151204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152204a6e65SStephen Hemminger 			    adapter->link_speed);
153d1a890faSShreyas Bhatewara 		netif_carrier_on(adapter->netdev);
154d1a890faSShreyas Bhatewara 
15509c5088eSShreyas Bhatewara 		if (affectTxQueue) {
15609c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
15709c5088eSShreyas Bhatewara 				vmxnet3_tq_start(&adapter->tx_queue[i],
15809c5088eSShreyas Bhatewara 						 adapter);
15909c5088eSShreyas Bhatewara 		}
160d1a890faSShreyas Bhatewara 	} else {
161204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Down\n");
162d1a890faSShreyas Bhatewara 		netif_carrier_off(adapter->netdev);
163d1a890faSShreyas Bhatewara 
16409c5088eSShreyas Bhatewara 		if (affectTxQueue) {
16509c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
16609c5088eSShreyas Bhatewara 				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
16709c5088eSShreyas Bhatewara 		}
168d1a890faSShreyas Bhatewara 	}
169d1a890faSShreyas Bhatewara }
170d1a890faSShreyas Bhatewara 
171d1a890faSShreyas Bhatewara static void
172d1a890faSShreyas Bhatewara vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173d1a890faSShreyas Bhatewara {
17409c5088eSShreyas Bhatewara 	int i;
175e328d410SRoland Dreier 	unsigned long flags;
176115924b6SShreyas Bhatewara 	u32 events = le32_to_cpu(adapter->shared->ecr);
177d1a890faSShreyas Bhatewara 	if (!events)
178d1a890faSShreyas Bhatewara 		return;
179d1a890faSShreyas Bhatewara 
180d1a890faSShreyas Bhatewara 	vmxnet3_ack_events(adapter, events);
181d1a890faSShreyas Bhatewara 
182d1a890faSShreyas Bhatewara 	/* Check if link state has changed */
183d1a890faSShreyas Bhatewara 	if (events & VMXNET3_ECR_LINK)
1844a1745fcSShreyas Bhatewara 		vmxnet3_check_link(adapter, true);
185d1a890faSShreyas Bhatewara 
186d1a890faSShreyas Bhatewara 	/* Check if there is an error on xmit/recv queues */
187d1a890faSShreyas Bhatewara 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
188e328d410SRoland Dreier 		spin_lock_irqsave(&adapter->cmd_lock, flags);
189d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_GET_QUEUE_STATUS);
191e328d410SRoland Dreier 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
192d1a890faSShreyas Bhatewara 
19309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++)
19409c5088eSShreyas Bhatewara 			if (adapter->tqd_start[i].status.stopped)
19509c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
19609c5088eSShreyas Bhatewara 					"%s: tq[%d] error 0x%x\n",
19709c5088eSShreyas Bhatewara 					adapter->netdev->name, i, le32_to_cpu(
19809c5088eSShreyas Bhatewara 					adapter->tqd_start[i].status.error));
19909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++)
20009c5088eSShreyas Bhatewara 			if (adapter->rqd_start[i].status.stopped)
20109c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
20209c5088eSShreyas Bhatewara 					"%s: rq[%d] error 0x%x\n",
20309c5088eSShreyas Bhatewara 					adapter->netdev->name, i,
20409c5088eSShreyas Bhatewara 					adapter->rqd_start[i].status.error);
205d1a890faSShreyas Bhatewara 
206d1a890faSShreyas Bhatewara 		schedule_work(&adapter->work);
207d1a890faSShreyas Bhatewara 	}
208d1a890faSShreyas Bhatewara }
209d1a890faSShreyas Bhatewara 
210115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
211115924b6SShreyas Bhatewara /*
212115924b6SShreyas Bhatewara  * The device expects the bitfields in shared structures to be written in
213115924b6SShreyas Bhatewara  * little endian. When CPU is big endian, the following routines are used to
214115924b6SShreyas Bhatewara  * correctly read and write into ABI.
215115924b6SShreyas Bhatewara  * The general technique used here is : double word bitfields are defined in
216115924b6SShreyas Bhatewara  * opposite order for big endian architecture. Then before reading them in
217115924b6SShreyas Bhatewara  * driver the complete double word is translated using le32_to_cpu. Similarly
218115924b6SShreyas Bhatewara  * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219115924b6SShreyas Bhatewara  * double words into required format.
220115924b6SShreyas Bhatewara  * In order to avoid touching bits in shared structure more than once, temporary
221115924b6SShreyas Bhatewara  * descriptors are used. These are passed as srcDesc to following functions.
222115924b6SShreyas Bhatewara  */
223115924b6SShreyas Bhatewara static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224115924b6SShreyas Bhatewara 				struct Vmxnet3_RxDesc *dstDesc)
225115924b6SShreyas Bhatewara {
226115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc + 2;
227115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc + 2;
228115924b6SShreyas Bhatewara 	dstDesc->addr = le64_to_cpu(srcDesc->addr);
229115924b6SShreyas Bhatewara 	*dst = le32_to_cpu(*src);
230115924b6SShreyas Bhatewara 	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231115924b6SShreyas Bhatewara }
232115924b6SShreyas Bhatewara 
233115924b6SShreyas Bhatewara static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234115924b6SShreyas Bhatewara 			       struct Vmxnet3_TxDesc *dstDesc)
235115924b6SShreyas Bhatewara {
236115924b6SShreyas Bhatewara 	int i;
237115924b6SShreyas Bhatewara 	u32 *src = (u32 *)(srcDesc + 1);
238115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)(dstDesc + 1);
239115924b6SShreyas Bhatewara 
240115924b6SShreyas Bhatewara 	/* Working backwards so that the gen bit is set at the end. */
241115924b6SShreyas Bhatewara 	for (i = 2; i > 0; i--) {
242115924b6SShreyas Bhatewara 		src--;
243115924b6SShreyas Bhatewara 		dst--;
244115924b6SShreyas Bhatewara 		*dst = cpu_to_le32(*src);
245115924b6SShreyas Bhatewara 	}
246115924b6SShreyas Bhatewara }
247115924b6SShreyas Bhatewara 
248115924b6SShreyas Bhatewara 
249115924b6SShreyas Bhatewara static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250115924b6SShreyas Bhatewara 				struct Vmxnet3_RxCompDesc *dstDesc)
251115924b6SShreyas Bhatewara {
252115924b6SShreyas Bhatewara 	int i = 0;
253115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc;
254115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc;
255115924b6SShreyas Bhatewara 	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256115924b6SShreyas Bhatewara 		*dst = le32_to_cpu(*src);
257115924b6SShreyas Bhatewara 		src++;
258115924b6SShreyas Bhatewara 		dst++;
259115924b6SShreyas Bhatewara 	}
260115924b6SShreyas Bhatewara }
261115924b6SShreyas Bhatewara 
262115924b6SShreyas Bhatewara 
263115924b6SShreyas Bhatewara /* Used to read bitfield values from double words. */
264115924b6SShreyas Bhatewara static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265115924b6SShreyas Bhatewara {
266115924b6SShreyas Bhatewara 	u32 temp = le32_to_cpu(*bitfield);
267115924b6SShreyas Bhatewara 	u32 mask = ((1 << size) - 1) << pos;
268115924b6SShreyas Bhatewara 	temp &= mask;
269115924b6SShreyas Bhatewara 	temp >>= pos;
270115924b6SShreyas Bhatewara 	return temp;
271115924b6SShreyas Bhatewara }
272115924b6SShreyas Bhatewara 
273115924b6SShreyas Bhatewara 
274115924b6SShreyas Bhatewara 
275115924b6SShreyas Bhatewara #endif  /* __BIG_ENDIAN_BITFIELD */
276115924b6SShreyas Bhatewara 
277115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
278115924b6SShreyas Bhatewara 
279115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281115924b6SShreyas Bhatewara 			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284115924b6SShreyas Bhatewara 			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_SIZE)
288115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289115924b6SShreyas Bhatewara 			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291115924b6SShreyas Bhatewara 			(dstrcd) = (tmp); \
292115924b6SShreyas Bhatewara 			vmxnet3_RxCompToCPU((rcd), (tmp)); \
293115924b6SShreyas Bhatewara 		} while (0)
294115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295115924b6SShreyas Bhatewara 			(dstrxd) = (tmp); \
296115924b6SShreyas Bhatewara 			vmxnet3_RxDescToCPU((rxd), (tmp)); \
297115924b6SShreyas Bhatewara 		} while (0)
298115924b6SShreyas Bhatewara 
299115924b6SShreyas Bhatewara #else
300115924b6SShreyas Bhatewara 
301115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307115924b6SShreyas Bhatewara 
308115924b6SShreyas Bhatewara #endif /* __BIG_ENDIAN_BITFIELD  */
309115924b6SShreyas Bhatewara 
310d1a890faSShreyas Bhatewara 
311d1a890faSShreyas Bhatewara static void
312d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313d1a890faSShreyas Bhatewara 		     struct pci_dev *pdev)
314d1a890faSShreyas Bhatewara {
315d1a890faSShreyas Bhatewara 	if (tbi->map_type == VMXNET3_MAP_SINGLE)
316b0eb57cbSAndy King 		dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
317d1a890faSShreyas Bhatewara 				 PCI_DMA_TODEVICE);
318d1a890faSShreyas Bhatewara 	else if (tbi->map_type == VMXNET3_MAP_PAGE)
319b0eb57cbSAndy King 		dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
320d1a890faSShreyas Bhatewara 			       PCI_DMA_TODEVICE);
321d1a890faSShreyas Bhatewara 	else
322d1a890faSShreyas Bhatewara 		BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323d1a890faSShreyas Bhatewara 
324d1a890faSShreyas Bhatewara 	tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325d1a890faSShreyas Bhatewara }
326d1a890faSShreyas Bhatewara 
327d1a890faSShreyas Bhatewara 
328d1a890faSShreyas Bhatewara static int
329d1a890faSShreyas Bhatewara vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330d1a890faSShreyas Bhatewara 		  struct pci_dev *pdev,	struct vmxnet3_adapter *adapter)
331d1a890faSShreyas Bhatewara {
332d1a890faSShreyas Bhatewara 	struct sk_buff *skb;
333d1a890faSShreyas Bhatewara 	int entries = 0;
334d1a890faSShreyas Bhatewara 
335d1a890faSShreyas Bhatewara 	/* no out of order completion */
336d1a890faSShreyas Bhatewara 	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337115924b6SShreyas Bhatewara 	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
338d1a890faSShreyas Bhatewara 
339d1a890faSShreyas Bhatewara 	skb = tq->buf_info[eop_idx].skb;
340d1a890faSShreyas Bhatewara 	BUG_ON(skb == NULL);
341d1a890faSShreyas Bhatewara 	tq->buf_info[eop_idx].skb = NULL;
342d1a890faSShreyas Bhatewara 
343d1a890faSShreyas Bhatewara 	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344d1a890faSShreyas Bhatewara 
345d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != eop_idx) {
346d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347d1a890faSShreyas Bhatewara 				     pdev);
348d1a890faSShreyas Bhatewara 
349d1a890faSShreyas Bhatewara 		/* update next2comp w/o tx_lock. Since we are marking more,
350d1a890faSShreyas Bhatewara 		 * instead of less, tx ring entries avail, the worst case is
351d1a890faSShreyas Bhatewara 		 * that the tx routine incorrectly re-queues a pkt due to
352d1a890faSShreyas Bhatewara 		 * insufficient tx ring entries.
353d1a890faSShreyas Bhatewara 		 */
354d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355d1a890faSShreyas Bhatewara 		entries++;
356d1a890faSShreyas Bhatewara 	}
357d1a890faSShreyas Bhatewara 
358d1a890faSShreyas Bhatewara 	dev_kfree_skb_any(skb);
359d1a890faSShreyas Bhatewara 	return entries;
360d1a890faSShreyas Bhatewara }
361d1a890faSShreyas Bhatewara 
362d1a890faSShreyas Bhatewara 
363d1a890faSShreyas Bhatewara static int
364d1a890faSShreyas Bhatewara vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365d1a890faSShreyas Bhatewara 			struct vmxnet3_adapter *adapter)
366d1a890faSShreyas Bhatewara {
367d1a890faSShreyas Bhatewara 	int completed = 0;
368d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
369d1a890faSShreyas Bhatewara 
370d1a890faSShreyas Bhatewara 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371115924b6SShreyas Bhatewara 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372f3002c13Shpreg@vmware.com 		/* Prevent any &gdesc->tcd field from being (speculatively)
373f3002c13Shpreg@vmware.com 		 * read before (&gdesc->tcd)->gen is read.
374f3002c13Shpreg@vmware.com 		 */
375f3002c13Shpreg@vmware.com 		dma_rmb();
376f3002c13Shpreg@vmware.com 
377115924b6SShreyas Bhatewara 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
378115924b6SShreyas Bhatewara 					       &gdesc->tcd), tq, adapter->pdev,
379115924b6SShreyas Bhatewara 					       adapter);
380d1a890faSShreyas Bhatewara 
381d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
382d1a890faSShreyas Bhatewara 		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
383d1a890faSShreyas Bhatewara 	}
384d1a890faSShreyas Bhatewara 
385d1a890faSShreyas Bhatewara 	if (completed) {
386d1a890faSShreyas Bhatewara 		spin_lock(&tq->tx_lock);
387d1a890faSShreyas Bhatewara 		if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
388d1a890faSShreyas Bhatewara 			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
389d1a890faSShreyas Bhatewara 			     VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
390d1a890faSShreyas Bhatewara 			     netif_carrier_ok(adapter->netdev))) {
391d1a890faSShreyas Bhatewara 			vmxnet3_tq_wake(tq, adapter);
392d1a890faSShreyas Bhatewara 		}
393d1a890faSShreyas Bhatewara 		spin_unlock(&tq->tx_lock);
394d1a890faSShreyas Bhatewara 	}
395d1a890faSShreyas Bhatewara 	return completed;
396d1a890faSShreyas Bhatewara }
397d1a890faSShreyas Bhatewara 
398d1a890faSShreyas Bhatewara 
399d1a890faSShreyas Bhatewara static void
400d1a890faSShreyas Bhatewara vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
401d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
402d1a890faSShreyas Bhatewara {
403d1a890faSShreyas Bhatewara 	int i;
404d1a890faSShreyas Bhatewara 
405d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
406d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_buf_info *tbi;
407d1a890faSShreyas Bhatewara 
408d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2comp;
409d1a890faSShreyas Bhatewara 
410d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
411d1a890faSShreyas Bhatewara 		if (tbi->skb) {
412d1a890faSShreyas Bhatewara 			dev_kfree_skb_any(tbi->skb);
413d1a890faSShreyas Bhatewara 			tbi->skb = NULL;
414d1a890faSShreyas Bhatewara 		}
415d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
416d1a890faSShreyas Bhatewara 	}
417d1a890faSShreyas Bhatewara 
418d1a890faSShreyas Bhatewara 	/* sanity check, verify all buffers are indeed unmapped and freed */
419d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++) {
420d1a890faSShreyas Bhatewara 		BUG_ON(tq->buf_info[i].skb != NULL ||
421d1a890faSShreyas Bhatewara 		       tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
422d1a890faSShreyas Bhatewara 	}
423d1a890faSShreyas Bhatewara 
424d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
425d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
426d1a890faSShreyas Bhatewara 
427d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
428d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
429d1a890faSShreyas Bhatewara }
430d1a890faSShreyas Bhatewara 
431d1a890faSShreyas Bhatewara 
43209c5088eSShreyas Bhatewara static void
433d1a890faSShreyas Bhatewara vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
434d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
435d1a890faSShreyas Bhatewara {
436d1a890faSShreyas Bhatewara 	if (tq->tx_ring.base) {
437b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
438d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxDesc),
439d1a890faSShreyas Bhatewara 				  tq->tx_ring.base, tq->tx_ring.basePA);
440d1a890faSShreyas Bhatewara 		tq->tx_ring.base = NULL;
441d1a890faSShreyas Bhatewara 	}
442d1a890faSShreyas Bhatewara 	if (tq->data_ring.base) {
4433c8b3efcSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
4443c8b3efcSShrikrishna Khare 				  tq->data_ring.size * tq->txdata_desc_size,
445d1a890faSShreyas Bhatewara 				  tq->data_ring.base, tq->data_ring.basePA);
446d1a890faSShreyas Bhatewara 		tq->data_ring.base = NULL;
447d1a890faSShreyas Bhatewara 	}
448d1a890faSShreyas Bhatewara 	if (tq->comp_ring.base) {
449b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
450d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxCompDesc),
451d1a890faSShreyas Bhatewara 				  tq->comp_ring.base, tq->comp_ring.basePA);
452d1a890faSShreyas Bhatewara 		tq->comp_ring.base = NULL;
453d1a890faSShreyas Bhatewara 	}
454de1da8bcSRonak Doshi 	kfree(tq->buf_info);
455d1a890faSShreyas Bhatewara 	tq->buf_info = NULL;
456d1a890faSShreyas Bhatewara }
457d1a890faSShreyas Bhatewara 
458d1a890faSShreyas Bhatewara 
45909c5088eSShreyas Bhatewara /* Destroy all tx queues */
46009c5088eSShreyas Bhatewara void
46109c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
46209c5088eSShreyas Bhatewara {
46309c5088eSShreyas Bhatewara 	int i;
46409c5088eSShreyas Bhatewara 
46509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
46609c5088eSShreyas Bhatewara 		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
46709c5088eSShreyas Bhatewara }
46809c5088eSShreyas Bhatewara 
46909c5088eSShreyas Bhatewara 
470d1a890faSShreyas Bhatewara static void
471d1a890faSShreyas Bhatewara vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
472d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
473d1a890faSShreyas Bhatewara {
474d1a890faSShreyas Bhatewara 	int i;
475d1a890faSShreyas Bhatewara 
476d1a890faSShreyas Bhatewara 	/* reset the tx ring contents to 0 and reset the tx ring states */
477d1a890faSShreyas Bhatewara 	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
478d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxDesc));
479d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
480d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
481d1a890faSShreyas Bhatewara 
4823c8b3efcSShrikrishna Khare 	memset(tq->data_ring.base, 0,
4833c8b3efcSShrikrishna Khare 	       tq->data_ring.size * tq->txdata_desc_size);
484d1a890faSShreyas Bhatewara 
485d1a890faSShreyas Bhatewara 	/* reset the tx comp ring contents to 0 and reset comp ring states */
486d1a890faSShreyas Bhatewara 	memset(tq->comp_ring.base, 0, tq->comp_ring.size *
487d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxCompDesc));
488d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
489d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
490d1a890faSShreyas Bhatewara 
491d1a890faSShreyas Bhatewara 	/* reset the bookkeeping data */
492d1a890faSShreyas Bhatewara 	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
493d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++)
494d1a890faSShreyas Bhatewara 		tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
495d1a890faSShreyas Bhatewara 
496d1a890faSShreyas Bhatewara 	/* stats are not reset */
497d1a890faSShreyas Bhatewara }
498d1a890faSShreyas Bhatewara 
499d1a890faSShreyas Bhatewara 
500d1a890faSShreyas Bhatewara static int
501d1a890faSShreyas Bhatewara vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
502d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
503d1a890faSShreyas Bhatewara {
504d1a890faSShreyas Bhatewara 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
505d1a890faSShreyas Bhatewara 	       tq->comp_ring.base || tq->buf_info);
506d1a890faSShreyas Bhatewara 
507b0eb57cbSAndy King 	tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
508b0eb57cbSAndy King 			tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
509b0eb57cbSAndy King 			&tq->tx_ring.basePA, GFP_KERNEL);
510d1a890faSShreyas Bhatewara 	if (!tq->tx_ring.base) {
511204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx ring\n");
512d1a890faSShreyas Bhatewara 		goto err;
513d1a890faSShreyas Bhatewara 	}
514d1a890faSShreyas Bhatewara 
515b0eb57cbSAndy King 	tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
5163c8b3efcSShrikrishna Khare 			tq->data_ring.size * tq->txdata_desc_size,
517b0eb57cbSAndy King 			&tq->data_ring.basePA, GFP_KERNEL);
518d1a890faSShreyas Bhatewara 	if (!tq->data_ring.base) {
5193c8b3efcSShrikrishna Khare 		netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
520d1a890faSShreyas Bhatewara 		goto err;
521d1a890faSShreyas Bhatewara 	}
522d1a890faSShreyas Bhatewara 
523b0eb57cbSAndy King 	tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
524b0eb57cbSAndy King 			tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
525b0eb57cbSAndy King 			&tq->comp_ring.basePA, GFP_KERNEL);
526d1a890faSShreyas Bhatewara 	if (!tq->comp_ring.base) {
527204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
528d1a890faSShreyas Bhatewara 		goto err;
529d1a890faSShreyas Bhatewara 	}
530d1a890faSShreyas Bhatewara 
531de1da8bcSRonak Doshi 	tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
532de1da8bcSRonak Doshi 				    GFP_KERNEL,
533de1da8bcSRonak Doshi 				    dev_to_node(&adapter->pdev->dev));
534e404decbSJoe Perches 	if (!tq->buf_info)
535d1a890faSShreyas Bhatewara 		goto err;
536d1a890faSShreyas Bhatewara 
537d1a890faSShreyas Bhatewara 	return 0;
538d1a890faSShreyas Bhatewara 
539d1a890faSShreyas Bhatewara err:
540d1a890faSShreyas Bhatewara 	vmxnet3_tq_destroy(tq, adapter);
541d1a890faSShreyas Bhatewara 	return -ENOMEM;
542d1a890faSShreyas Bhatewara }
543d1a890faSShreyas Bhatewara 
54409c5088eSShreyas Bhatewara static void
54509c5088eSShreyas Bhatewara vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
54609c5088eSShreyas Bhatewara {
54709c5088eSShreyas Bhatewara 	int i;
54809c5088eSShreyas Bhatewara 
54909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
55009c5088eSShreyas Bhatewara 		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
55109c5088eSShreyas Bhatewara }
552d1a890faSShreyas Bhatewara 
553d1a890faSShreyas Bhatewara /*
554d1a890faSShreyas Bhatewara  *    starting from ring->next2fill, allocate rx buffers for the given ring
555d1a890faSShreyas Bhatewara  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
556d1a890faSShreyas Bhatewara  *    are allocated or allocation fails
557d1a890faSShreyas Bhatewara  */
558d1a890faSShreyas Bhatewara 
559d1a890faSShreyas Bhatewara static int
560d1a890faSShreyas Bhatewara vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
561d1a890faSShreyas Bhatewara 			int num_to_alloc, struct vmxnet3_adapter *adapter)
562d1a890faSShreyas Bhatewara {
563d1a890faSShreyas Bhatewara 	int num_allocated = 0;
564d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
565d1a890faSShreyas Bhatewara 	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
566d1a890faSShreyas Bhatewara 	u32 val;
567d1a890faSShreyas Bhatewara 
5685318d809SShreyas Bhatewara 	while (num_allocated <= num_to_alloc) {
569d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
570d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gd;
571d1a890faSShreyas Bhatewara 
572d1a890faSShreyas Bhatewara 		rbi = rbi_base + ring->next2fill;
573d1a890faSShreyas Bhatewara 		gd = ring->base + ring->next2fill;
574d1a890faSShreyas Bhatewara 
575d1a890faSShreyas Bhatewara 		if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
576d1a890faSShreyas Bhatewara 			if (rbi->skb == NULL) {
5770d735f13SStephen Hemminger 				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
5780d735f13SStephen Hemminger 								       rbi->len,
5790d735f13SStephen Hemminger 								       GFP_KERNEL);
580d1a890faSShreyas Bhatewara 				if (unlikely(rbi->skb == NULL)) {
581d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
582d1a890faSShreyas Bhatewara 					break;
583d1a890faSShreyas Bhatewara 				}
584d1a890faSShreyas Bhatewara 
585b0eb57cbSAndy King 				rbi->dma_addr = dma_map_single(
586b0eb57cbSAndy King 						&adapter->pdev->dev,
587d1a890faSShreyas Bhatewara 						rbi->skb->data, rbi->len,
588d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
5895738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
5905738a09dSAlexey Khoroshilov 						      rbi->dma_addr)) {
5915738a09dSAlexey Khoroshilov 					dev_kfree_skb_any(rbi->skb);
5925738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
5935738a09dSAlexey Khoroshilov 					break;
5945738a09dSAlexey Khoroshilov 				}
595d1a890faSShreyas Bhatewara 			} else {
596d1a890faSShreyas Bhatewara 				/* rx buffer skipped by the device */
597d1a890faSShreyas Bhatewara 			}
598d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
599d1a890faSShreyas Bhatewara 		} else {
600d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
601d1a890faSShreyas Bhatewara 			       rbi->len  != PAGE_SIZE);
602d1a890faSShreyas Bhatewara 
603d1a890faSShreyas Bhatewara 			if (rbi->page == NULL) {
604d1a890faSShreyas Bhatewara 				rbi->page = alloc_page(GFP_ATOMIC);
605d1a890faSShreyas Bhatewara 				if (unlikely(rbi->page == NULL)) {
606d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
607d1a890faSShreyas Bhatewara 					break;
608d1a890faSShreyas Bhatewara 				}
609b0eb57cbSAndy King 				rbi->dma_addr = dma_map_page(
610b0eb57cbSAndy King 						&adapter->pdev->dev,
611d1a890faSShreyas Bhatewara 						rbi->page, 0, PAGE_SIZE,
612d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
6135738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
6145738a09dSAlexey Khoroshilov 						      rbi->dma_addr)) {
6155738a09dSAlexey Khoroshilov 					put_page(rbi->page);
6165738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
6175738a09dSAlexey Khoroshilov 					break;
6185738a09dSAlexey Khoroshilov 				}
619d1a890faSShreyas Bhatewara 			} else {
620d1a890faSShreyas Bhatewara 				/* rx buffers skipped by the device */
621d1a890faSShreyas Bhatewara 			}
622d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
623d1a890faSShreyas Bhatewara 		}
624d1a890faSShreyas Bhatewara 
625115924b6SShreyas Bhatewara 		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
6265318d809SShreyas Bhatewara 		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
627115924b6SShreyas Bhatewara 					   | val | rbi->len);
628d1a890faSShreyas Bhatewara 
6295318d809SShreyas Bhatewara 		/* Fill the last buffer but dont mark it ready, or else the
6305318d809SShreyas Bhatewara 		 * device will think that the queue is full */
6315318d809SShreyas Bhatewara 		if (num_allocated == num_to_alloc)
6325318d809SShreyas Bhatewara 			break;
6335318d809SShreyas Bhatewara 
6345318d809SShreyas Bhatewara 		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
635d1a890faSShreyas Bhatewara 		num_allocated++;
636d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(ring);
637d1a890faSShreyas Bhatewara 	}
638d1a890faSShreyas Bhatewara 
639fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
64069b9a712SStephen Hemminger 		"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
64169b9a712SStephen Hemminger 		num_allocated, ring->next2fill, ring->next2comp);
642d1a890faSShreyas Bhatewara 
643d1a890faSShreyas Bhatewara 	/* so that the device can distinguish a full ring and an empty ring */
644d1a890faSShreyas Bhatewara 	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
645d1a890faSShreyas Bhatewara 
646d1a890faSShreyas Bhatewara 	return num_allocated;
647d1a890faSShreyas Bhatewara }
648d1a890faSShreyas Bhatewara 
649d1a890faSShreyas Bhatewara 
650d1a890faSShreyas Bhatewara static void
651d1a890faSShreyas Bhatewara vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
652d1a890faSShreyas Bhatewara 		    struct vmxnet3_rx_buf_info *rbi)
653d1a890faSShreyas Bhatewara {
654d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
655d1a890faSShreyas Bhatewara 
656d1a890faSShreyas Bhatewara 	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
657d1a890faSShreyas Bhatewara 
6580e0634d2SIan Campbell 	__skb_frag_set_page(frag, rbi->page);
659b54c9d5bSJonathan Lemon 	skb_frag_off_set(frag, 0);
6609e903e08SEric Dumazet 	skb_frag_size_set(frag, rcd->len);
6619e903e08SEric Dumazet 	skb->data_len += rcd->len;
6625e6c355cSEric Dumazet 	skb->truesize += PAGE_SIZE;
663d1a890faSShreyas Bhatewara 	skb_shinfo(skb)->nr_frags++;
664d1a890faSShreyas Bhatewara }
665d1a890faSShreyas Bhatewara 
666d1a890faSShreyas Bhatewara 
6675738a09dSAlexey Khoroshilov static int
668d1a890faSShreyas Bhatewara vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
669d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
670d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
671d1a890faSShreyas Bhatewara {
672d1a890faSShreyas Bhatewara 	u32 dw2, len;
673d1a890faSShreyas Bhatewara 	unsigned long buf_offset;
674d1a890faSShreyas Bhatewara 	int i;
675d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
676d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_buf_info *tbi = NULL;
677d1a890faSShreyas Bhatewara 
678d1a890faSShreyas Bhatewara 	BUG_ON(ctx->copy_size > skb_headlen(skb));
679d1a890faSShreyas Bhatewara 
680d1a890faSShreyas Bhatewara 	/* use the previous gen bit for the SOP desc */
681d1a890faSShreyas Bhatewara 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
682d1a890faSShreyas Bhatewara 
683d1a890faSShreyas Bhatewara 	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
684d1a890faSShreyas Bhatewara 	gdesc = ctx->sop_txd; /* both loops below can be skipped */
685d1a890faSShreyas Bhatewara 
686d1a890faSShreyas Bhatewara 	/* no need to map the buffer if headers are copied */
687d1a890faSShreyas Bhatewara 	if (ctx->copy_size) {
688115924b6SShreyas Bhatewara 		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
689d1a890faSShreyas Bhatewara 					tq->tx_ring.next2fill *
6903c8b3efcSShrikrishna Khare 					tq->txdata_desc_size);
691115924b6SShreyas Bhatewara 		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
692d1a890faSShreyas Bhatewara 		ctx->sop_txd->dword[3] = 0;
693d1a890faSShreyas Bhatewara 
694d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
695d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_NONE;
696d1a890faSShreyas Bhatewara 
697fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
698f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
699115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill,
700115924b6SShreyas Bhatewara 			le64_to_cpu(ctx->sop_txd->txd.addr),
701d1a890faSShreyas Bhatewara 			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
702d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
703d1a890faSShreyas Bhatewara 
704d1a890faSShreyas Bhatewara 		/* use the right gen for non-SOP desc */
705d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
706d1a890faSShreyas Bhatewara 	}
707d1a890faSShreyas Bhatewara 
708d1a890faSShreyas Bhatewara 	/* linear part can use multiple tx desc if it's big */
709d1a890faSShreyas Bhatewara 	len = skb_headlen(skb) - ctx->copy_size;
710d1a890faSShreyas Bhatewara 	buf_offset = ctx->copy_size;
711d1a890faSShreyas Bhatewara 	while (len) {
712d1a890faSShreyas Bhatewara 		u32 buf_size;
713d1a890faSShreyas Bhatewara 
7141f4b1612SBhavesh Davda 		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
7151f4b1612SBhavesh Davda 			buf_size = len;
7161f4b1612SBhavesh Davda 			dw2 |= len;
7171f4b1612SBhavesh Davda 		} else {
7181f4b1612SBhavesh Davda 			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
7191f4b1612SBhavesh Davda 			/* spec says that for TxDesc.len, 0 == 2^14 */
7201f4b1612SBhavesh Davda 		}
721d1a890faSShreyas Bhatewara 
722d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
723d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_SINGLE;
724b0eb57cbSAndy King 		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
725d1a890faSShreyas Bhatewara 				skb->data + buf_offset, buf_size,
726d1a890faSShreyas Bhatewara 				PCI_DMA_TODEVICE);
7275738a09dSAlexey Khoroshilov 		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
7285738a09dSAlexey Khoroshilov 			return -EFAULT;
729d1a890faSShreyas Bhatewara 
7301f4b1612SBhavesh Davda 		tbi->len = buf_size;
731d1a890faSShreyas Bhatewara 
732d1a890faSShreyas Bhatewara 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
733d1a890faSShreyas Bhatewara 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
734d1a890faSShreyas Bhatewara 
735115924b6SShreyas Bhatewara 		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
7361f4b1612SBhavesh Davda 		gdesc->dword[2] = cpu_to_le32(dw2);
737d1a890faSShreyas Bhatewara 		gdesc->dword[3] = 0;
738d1a890faSShreyas Bhatewara 
739fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
740f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
741115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
742115924b6SShreyas Bhatewara 			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
743d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
744d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
745d1a890faSShreyas Bhatewara 
746d1a890faSShreyas Bhatewara 		len -= buf_size;
747d1a890faSShreyas Bhatewara 		buf_offset += buf_size;
748d1a890faSShreyas Bhatewara 	}
749d1a890faSShreyas Bhatewara 
750d1a890faSShreyas Bhatewara 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
751d7840976SMatthew Wilcox (Oracle) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
752a4d7e485SEric Dumazet 		u32 buf_size;
753d1a890faSShreyas Bhatewara 
754a4d7e485SEric Dumazet 		buf_offset = 0;
755a4d7e485SEric Dumazet 		len = skb_frag_size(frag);
756a4d7e485SEric Dumazet 		while (len) {
757d1a890faSShreyas Bhatewara 			tbi = tq->buf_info + tq->tx_ring.next2fill;
758a4d7e485SEric Dumazet 			if (len < VMXNET3_MAX_TX_BUF_SIZE) {
759a4d7e485SEric Dumazet 				buf_size = len;
760a4d7e485SEric Dumazet 				dw2 |= len;
761a4d7e485SEric Dumazet 			} else {
762a4d7e485SEric Dumazet 				buf_size = VMXNET3_MAX_TX_BUF_SIZE;
763a4d7e485SEric Dumazet 				/* spec says that for TxDesc.len, 0 == 2^14 */
764a4d7e485SEric Dumazet 			}
765d1a890faSShreyas Bhatewara 			tbi->map_type = VMXNET3_MAP_PAGE;
7660e0634d2SIan Campbell 			tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
767a4d7e485SEric Dumazet 							 buf_offset, buf_size,
7685d6bcdfeSIan Campbell 							 DMA_TO_DEVICE);
7695738a09dSAlexey Khoroshilov 			if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
7705738a09dSAlexey Khoroshilov 				return -EFAULT;
771d1a890faSShreyas Bhatewara 
772a4d7e485SEric Dumazet 			tbi->len = buf_size;
773d1a890faSShreyas Bhatewara 
774d1a890faSShreyas Bhatewara 			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
775d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
776d1a890faSShreyas Bhatewara 
777115924b6SShreyas Bhatewara 			gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
778a4d7e485SEric Dumazet 			gdesc->dword[2] = cpu_to_le32(dw2);
779d1a890faSShreyas Bhatewara 			gdesc->dword[3] = 0;
780d1a890faSShreyas Bhatewara 
781fdcd79b9SStephen Hemminger 			netdev_dbg(adapter->netdev,
7828b429468SHans Wennborg 				"txd[%u]: 0x%llx %u %u\n",
783115924b6SShreyas Bhatewara 				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
784115924b6SShreyas Bhatewara 				le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
785d1a890faSShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
786d1a890faSShreyas Bhatewara 			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
787a4d7e485SEric Dumazet 
788a4d7e485SEric Dumazet 			len -= buf_size;
789a4d7e485SEric Dumazet 			buf_offset += buf_size;
790a4d7e485SEric Dumazet 		}
791d1a890faSShreyas Bhatewara 	}
792d1a890faSShreyas Bhatewara 
793d1a890faSShreyas Bhatewara 	ctx->eop_txd = gdesc;
794d1a890faSShreyas Bhatewara 
795d1a890faSShreyas Bhatewara 	/* set the last buf_info for the pkt */
796d1a890faSShreyas Bhatewara 	tbi->skb = skb;
797d1a890faSShreyas Bhatewara 	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
7985738a09dSAlexey Khoroshilov 
7995738a09dSAlexey Khoroshilov 	return 0;
800d1a890faSShreyas Bhatewara }
801d1a890faSShreyas Bhatewara 
802d1a890faSShreyas Bhatewara 
80309c5088eSShreyas Bhatewara /* Init all tx queues */
80409c5088eSShreyas Bhatewara static void
80509c5088eSShreyas Bhatewara vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
80609c5088eSShreyas Bhatewara {
80709c5088eSShreyas Bhatewara 	int i;
80809c5088eSShreyas Bhatewara 
80909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
81009c5088eSShreyas Bhatewara 		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
81109c5088eSShreyas Bhatewara }
81209c5088eSShreyas Bhatewara 
81309c5088eSShreyas Bhatewara 
814d1a890faSShreyas Bhatewara /*
815cec05562SNeil Horman  *    parse relevant protocol headers:
816d1a890faSShreyas Bhatewara  *      For a tso pkt, relevant headers are L2/3/4 including options
817d1a890faSShreyas Bhatewara  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
818d1a890faSShreyas Bhatewara  *      if it's a TCP/UDP pkt
819d1a890faSShreyas Bhatewara  *
820d1a890faSShreyas Bhatewara  * Returns:
821d1a890faSShreyas Bhatewara  *    -1:  error happens during parsing
822d1a890faSShreyas Bhatewara  *     0:  protocol headers parsed, but too big to be copied
823d1a890faSShreyas Bhatewara  *     1:  protocol headers parsed and copied
824d1a890faSShreyas Bhatewara  *
825d1a890faSShreyas Bhatewara  * Other effects:
826d1a890faSShreyas Bhatewara  *    1. related *ctx fields are updated.
827d1a890faSShreyas Bhatewara  *    2. ctx->copy_size is # of bytes copied
828cec05562SNeil Horman  *    3. the portion to be copied is guaranteed to be in the linear part
829d1a890faSShreyas Bhatewara  *
830d1a890faSShreyas Bhatewara  */
831d1a890faSShreyas Bhatewara static int
832cec05562SNeil Horman vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
833d1a890faSShreyas Bhatewara 		  struct vmxnet3_tx_ctx *ctx,
834d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
835d1a890faSShreyas Bhatewara {
836759c9359SShrikrishna Khare 	u8 protocol = 0;
837d1a890faSShreyas Bhatewara 
8380d0b1672SMichał Mirosław 	if (ctx->mss) {	/* TSO */
839dacce2beSRonak Doshi 		if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
840dacce2beSRonak Doshi 			ctx->l4_offset = skb_inner_transport_offset(skb);
841dacce2beSRonak Doshi 			ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
842dacce2beSRonak Doshi 			ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
843dacce2beSRonak Doshi 		} else {
844dacce2beSRonak Doshi 			ctx->l4_offset = skb_transport_offset(skb);
8458bca5d1eSEric Dumazet 			ctx->l4_hdr_size = tcp_hdrlen(skb);
846dacce2beSRonak Doshi 			ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
847dacce2beSRonak Doshi 		}
848d1a890faSShreyas Bhatewara 	} else {
849d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
850dacce2beSRonak Doshi 			/* For encap packets, skb_checksum_start_offset refers
851dacce2beSRonak Doshi 			 * to inner L4 offset. Thus, below works for encap as
852dacce2beSRonak Doshi 			 * well as non-encap case
853dacce2beSRonak Doshi 			 */
854dacce2beSRonak Doshi 			ctx->l4_offset = skb_checksum_start_offset(skb);
855d1a890faSShreyas Bhatewara 
85636432797SRonak Doshi 			if (VMXNET3_VERSION_GE_4(adapter) &&
85736432797SRonak Doshi 			    skb->encapsulation) {
85836432797SRonak Doshi 				struct iphdr *iph = inner_ip_hdr(skb);
85936432797SRonak Doshi 
86036432797SRonak Doshi 				if (iph->version == 4) {
86136432797SRonak Doshi 					protocol = iph->protocol;
86236432797SRonak Doshi 				} else {
86336432797SRonak Doshi 					const struct ipv6hdr *ipv6h;
86436432797SRonak Doshi 
86536432797SRonak Doshi 					ipv6h = inner_ipv6_hdr(skb);
86636432797SRonak Doshi 					protocol = ipv6h->nexthdr;
86736432797SRonak Doshi 				}
86836432797SRonak Doshi 			} else {
869d1a890faSShreyas Bhatewara 				if (ctx->ipv4) {
8708bca5d1eSEric Dumazet 					const struct iphdr *iph = ip_hdr(skb);
8718bca5d1eSEric Dumazet 
872759c9359SShrikrishna Khare 					protocol = iph->protocol;
873759c9359SShrikrishna Khare 				} else if (ctx->ipv6) {
87436432797SRonak Doshi 					const struct ipv6hdr *ipv6h;
875759c9359SShrikrishna Khare 
87636432797SRonak Doshi 					ipv6h = ipv6_hdr(skb);
877759c9359SShrikrishna Khare 					protocol = ipv6h->nexthdr;
878d1a890faSShreyas Bhatewara 				}
87936432797SRonak Doshi 			}
880759c9359SShrikrishna Khare 
881759c9359SShrikrishna Khare 			switch (protocol) {
882759c9359SShrikrishna Khare 			case IPPROTO_TCP:
8838a7f280fSRonak Doshi 				ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
8848a7f280fSRonak Doshi 						   tcp_hdrlen(skb);
885759c9359SShrikrishna Khare 				break;
886759c9359SShrikrishna Khare 			case IPPROTO_UDP:
887759c9359SShrikrishna Khare 				ctx->l4_hdr_size = sizeof(struct udphdr);
888759c9359SShrikrishna Khare 				break;
889759c9359SShrikrishna Khare 			default:
890759c9359SShrikrishna Khare 				ctx->l4_hdr_size = 0;
891759c9359SShrikrishna Khare 				break;
892759c9359SShrikrishna Khare 			}
893759c9359SShrikrishna Khare 
894dacce2beSRonak Doshi 			ctx->copy_size = min(ctx->l4_offset +
895b203262dSNeil Horman 					 ctx->l4_hdr_size, skb->len);
896d1a890faSShreyas Bhatewara 		} else {
897dacce2beSRonak Doshi 			ctx->l4_offset = 0;
898d1a890faSShreyas Bhatewara 			ctx->l4_hdr_size = 0;
899d1a890faSShreyas Bhatewara 			/* copy as much as allowed */
9003c8b3efcSShrikrishna Khare 			ctx->copy_size = min_t(unsigned int,
9013c8b3efcSShrikrishna Khare 					       tq->txdata_desc_size,
9023c8b3efcSShrikrishna Khare 					       skb_headlen(skb));
903d1a890faSShreyas Bhatewara 		}
904d1a890faSShreyas Bhatewara 
905c41fcce9SShreyas Bhatewara 		if (skb->len <= VMXNET3_HDR_COPY_SIZE)
906c41fcce9SShreyas Bhatewara 			ctx->copy_size = skb->len;
907c41fcce9SShreyas Bhatewara 
908d1a890faSShreyas Bhatewara 		/* make sure headers are accessible directly */
909d1a890faSShreyas Bhatewara 		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
910d1a890faSShreyas Bhatewara 			goto err;
911d1a890faSShreyas Bhatewara 	}
912d1a890faSShreyas Bhatewara 
9133c8b3efcSShrikrishna Khare 	if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
914d1a890faSShreyas Bhatewara 		tq->stats.oversized_hdr++;
915d1a890faSShreyas Bhatewara 		ctx->copy_size = 0;
916d1a890faSShreyas Bhatewara 		return 0;
917d1a890faSShreyas Bhatewara 	}
918d1a890faSShreyas Bhatewara 
919cec05562SNeil Horman 	return 1;
920cec05562SNeil Horman err:
921cec05562SNeil Horman 	return -1;
922cec05562SNeil Horman }
923cec05562SNeil Horman 
924cec05562SNeil Horman /*
925cec05562SNeil Horman  *    copy relevant protocol headers to the transmit ring:
926cec05562SNeil Horman  *      For a tso pkt, relevant headers are L2/3/4 including options
927cec05562SNeil Horman  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
928cec05562SNeil Horman  *      if it's a TCP/UDP pkt
929cec05562SNeil Horman  *
930cec05562SNeil Horman  *
931cec05562SNeil Horman  *    Note that this requires that vmxnet3_parse_hdr be called first to set the
932cec05562SNeil Horman  *      appropriate bits in ctx first
933cec05562SNeil Horman  */
934cec05562SNeil Horman static void
935cec05562SNeil Horman vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
936cec05562SNeil Horman 		 struct vmxnet3_tx_ctx *ctx,
937cec05562SNeil Horman 		 struct vmxnet3_adapter *adapter)
938cec05562SNeil Horman {
939cec05562SNeil Horman 	struct Vmxnet3_TxDataDesc *tdd;
940cec05562SNeil Horman 
941ff2e7d5dSShrikrishna Khare 	tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
942ff2e7d5dSShrikrishna Khare 					    tq->tx_ring.next2fill *
943ff2e7d5dSShrikrishna Khare 					    tq->txdata_desc_size);
944d1a890faSShreyas Bhatewara 
945d1a890faSShreyas Bhatewara 	memcpy(tdd->data, skb->data, ctx->copy_size);
946fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
947f6965582SRandy Dunlap 		"copy %u bytes to dataRing[%u]\n",
948d1a890faSShreyas Bhatewara 		ctx->copy_size, tq->tx_ring.next2fill);
949d1a890faSShreyas Bhatewara }
950d1a890faSShreyas Bhatewara 
951d1a890faSShreyas Bhatewara 
952d1a890faSShreyas Bhatewara static void
953dacce2beSRonak Doshi vmxnet3_prepare_inner_tso(struct sk_buff *skb,
954dacce2beSRonak Doshi 			  struct vmxnet3_tx_ctx *ctx)
955dacce2beSRonak Doshi {
956dacce2beSRonak Doshi 	struct tcphdr *tcph = inner_tcp_hdr(skb);
957dacce2beSRonak Doshi 	struct iphdr *iph = inner_ip_hdr(skb);
958dacce2beSRonak Doshi 
95936432797SRonak Doshi 	if (iph->version == 4) {
960dacce2beSRonak Doshi 		iph->check = 0;
961dacce2beSRonak Doshi 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
962dacce2beSRonak Doshi 						 IPPROTO_TCP, 0);
96336432797SRonak Doshi 	} else {
964dacce2beSRonak Doshi 		struct ipv6hdr *iph = inner_ipv6_hdr(skb);
965dacce2beSRonak Doshi 
966dacce2beSRonak Doshi 		tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
967dacce2beSRonak Doshi 					       IPPROTO_TCP, 0);
968dacce2beSRonak Doshi 	}
969dacce2beSRonak Doshi }
970dacce2beSRonak Doshi 
971dacce2beSRonak Doshi static void
972d1a890faSShreyas Bhatewara vmxnet3_prepare_tso(struct sk_buff *skb,
973d1a890faSShreyas Bhatewara 		    struct vmxnet3_tx_ctx *ctx)
974d1a890faSShreyas Bhatewara {
9758bca5d1eSEric Dumazet 	struct tcphdr *tcph = tcp_hdr(skb);
9768bca5d1eSEric Dumazet 
977d1a890faSShreyas Bhatewara 	if (ctx->ipv4) {
9788bca5d1eSEric Dumazet 		struct iphdr *iph = ip_hdr(skb);
9798bca5d1eSEric Dumazet 
980d1a890faSShreyas Bhatewara 		iph->check = 0;
981d1a890faSShreyas Bhatewara 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
982d1a890faSShreyas Bhatewara 						 IPPROTO_TCP, 0);
983759c9359SShrikrishna Khare 	} else if (ctx->ipv6) {
984091c9f82SHeiner Kallweit 		tcp_v6_gso_csum_prep(skb);
985d1a890faSShreyas Bhatewara 	}
986d1a890faSShreyas Bhatewara }
987d1a890faSShreyas Bhatewara 
988a4d7e485SEric Dumazet static int txd_estimate(const struct sk_buff *skb)
989a4d7e485SEric Dumazet {
990a4d7e485SEric Dumazet 	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
991a4d7e485SEric Dumazet 	int i;
992a4d7e485SEric Dumazet 
993a4d7e485SEric Dumazet 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
994d7840976SMatthew Wilcox (Oracle) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
995a4d7e485SEric Dumazet 
996a4d7e485SEric Dumazet 		count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
997a4d7e485SEric Dumazet 	}
998a4d7e485SEric Dumazet 	return count;
999a4d7e485SEric Dumazet }
1000d1a890faSShreyas Bhatewara 
1001d1a890faSShreyas Bhatewara /*
1002d1a890faSShreyas Bhatewara  * Transmits a pkt thru a given tq
1003d1a890faSShreyas Bhatewara  * Returns:
1004d1a890faSShreyas Bhatewara  *    NETDEV_TX_OK:      descriptors are setup successfully
100525985edcSLucas De Marchi  *    NETDEV_TX_OK:      error occurred, the pkt is dropped
1006d1a890faSShreyas Bhatewara  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
1007d1a890faSShreyas Bhatewara  *
1008d1a890faSShreyas Bhatewara  * Side-effects:
1009d1a890faSShreyas Bhatewara  *    1. tx ring may be changed
1010d1a890faSShreyas Bhatewara  *    2. tq stats may be updated accordingly
1011d1a890faSShreyas Bhatewara  *    3. shared->txNumDeferred may be updated
1012d1a890faSShreyas Bhatewara  */
1013d1a890faSShreyas Bhatewara 
1014d1a890faSShreyas Bhatewara static int
1015d1a890faSShreyas Bhatewara vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1016d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter, struct net_device *netdev)
1017d1a890faSShreyas Bhatewara {
1018d1a890faSShreyas Bhatewara 	int ret;
1019d1a890faSShreyas Bhatewara 	u32 count;
10207a4c003dSRonak Doshi 	int num_pkts;
10217a4c003dSRonak Doshi 	int tx_num_deferred;
1022d1a890faSShreyas Bhatewara 	unsigned long flags;
1023d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_ctx ctx;
1024d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
1025115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1026115924b6SShreyas Bhatewara 	/* Use temporary descriptor to avoid touching bits multiple times */
1027115924b6SShreyas Bhatewara 	union Vmxnet3_GenericDesc tempTxDesc;
1028115924b6SShreyas Bhatewara #endif
1029d1a890faSShreyas Bhatewara 
1030a4d7e485SEric Dumazet 	count = txd_estimate(skb);
1031d1a890faSShreyas Bhatewara 
103272e85c45SJesse Gross 	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1033759c9359SShrikrishna Khare 	ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1034d1a890faSShreyas Bhatewara 
1035d1a890faSShreyas Bhatewara 	ctx.mss = skb_shinfo(skb)->gso_size;
1036d1a890faSShreyas Bhatewara 	if (ctx.mss) {
1037d1a890faSShreyas Bhatewara 		if (skb_header_cloned(skb)) {
1038d1a890faSShreyas Bhatewara 			if (unlikely(pskb_expand_head(skb, 0, 0,
1039d1a890faSShreyas Bhatewara 						      GFP_ATOMIC) != 0)) {
1040d1a890faSShreyas Bhatewara 				tq->stats.drop_tso++;
1041d1a890faSShreyas Bhatewara 				goto drop_pkt;
1042d1a890faSShreyas Bhatewara 			}
1043d1a890faSShreyas Bhatewara 			tq->stats.copy_skb_header++;
1044d1a890faSShreyas Bhatewara 		}
1045dacce2beSRonak Doshi 		if (skb->encapsulation) {
1046dacce2beSRonak Doshi 			vmxnet3_prepare_inner_tso(skb, &ctx);
1047dacce2beSRonak Doshi 		} else {
1048d1a890faSShreyas Bhatewara 			vmxnet3_prepare_tso(skb, &ctx);
1049dacce2beSRonak Doshi 		}
1050d1a890faSShreyas Bhatewara 	} else {
1051d1a890faSShreyas Bhatewara 		if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1052d1a890faSShreyas Bhatewara 
1053d1a890faSShreyas Bhatewara 			/* non-tso pkts must not use more than
1054d1a890faSShreyas Bhatewara 			 * VMXNET3_MAX_TXD_PER_PKT entries
1055d1a890faSShreyas Bhatewara 			 */
1056d1a890faSShreyas Bhatewara 			if (skb_linearize(skb) != 0) {
1057d1a890faSShreyas Bhatewara 				tq->stats.drop_too_many_frags++;
1058d1a890faSShreyas Bhatewara 				goto drop_pkt;
1059d1a890faSShreyas Bhatewara 			}
1060d1a890faSShreyas Bhatewara 			tq->stats.linearized++;
1061d1a890faSShreyas Bhatewara 
1062d1a890faSShreyas Bhatewara 			/* recalculate the # of descriptors to use */
1063d1a890faSShreyas Bhatewara 			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1064d1a890faSShreyas Bhatewara 		}
1065d1a890faSShreyas Bhatewara 	}
1066d1a890faSShreyas Bhatewara 
1067cec05562SNeil Horman 	ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1068d1a890faSShreyas Bhatewara 	if (ret >= 0) {
1069d1a890faSShreyas Bhatewara 		BUG_ON(ret <= 0 && ctx.copy_size != 0);
1070d1a890faSShreyas Bhatewara 		/* hdrs parsed, check against other limits */
1071d1a890faSShreyas Bhatewara 		if (ctx.mss) {
1072dacce2beSRonak Doshi 			if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1073d1a890faSShreyas Bhatewara 				     VMXNET3_MAX_TX_BUF_SIZE)) {
1074efc21d95SArnd Bergmann 				tq->stats.drop_oversized_hdr++;
1075efc21d95SArnd Bergmann 				goto drop_pkt;
1076d1a890faSShreyas Bhatewara 			}
1077d1a890faSShreyas Bhatewara 		} else {
1078d1a890faSShreyas Bhatewara 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
1079dacce2beSRonak Doshi 				if (unlikely(ctx.l4_offset +
1080d1a890faSShreyas Bhatewara 					     skb->csum_offset >
1081d1a890faSShreyas Bhatewara 					     VMXNET3_MAX_CSUM_OFFSET)) {
1082efc21d95SArnd Bergmann 					tq->stats.drop_oversized_hdr++;
1083efc21d95SArnd Bergmann 					goto drop_pkt;
1084d1a890faSShreyas Bhatewara 				}
1085d1a890faSShreyas Bhatewara 			}
1086d1a890faSShreyas Bhatewara 		}
1087d1a890faSShreyas Bhatewara 	} else {
1088d1a890faSShreyas Bhatewara 		tq->stats.drop_hdr_inspect_err++;
1089cec05562SNeil Horman 		goto drop_pkt;
1090d1a890faSShreyas Bhatewara 	}
1091d1a890faSShreyas Bhatewara 
1092cec05562SNeil Horman 	spin_lock_irqsave(&tq->tx_lock, flags);
1093cec05562SNeil Horman 
1094cec05562SNeil Horman 	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1095cec05562SNeil Horman 		tq->stats.tx_ring_full++;
1096cec05562SNeil Horman 		netdev_dbg(adapter->netdev,
1097cec05562SNeil Horman 			"tx queue stopped on %s, next2comp %u"
1098cec05562SNeil Horman 			" next2fill %u\n", adapter->netdev->name,
1099cec05562SNeil Horman 			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1100cec05562SNeil Horman 
1101cec05562SNeil Horman 		vmxnet3_tq_stop(tq, adapter);
1102cec05562SNeil Horman 		spin_unlock_irqrestore(&tq->tx_lock, flags);
1103cec05562SNeil Horman 		return NETDEV_TX_BUSY;
1104cec05562SNeil Horman 	}
1105cec05562SNeil Horman 
1106cec05562SNeil Horman 
1107cec05562SNeil Horman 	vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1108cec05562SNeil Horman 
1109d1a890faSShreyas Bhatewara 	/* fill tx descs related to addr & len */
11105738a09dSAlexey Khoroshilov 	if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
11115738a09dSAlexey Khoroshilov 		goto unlock_drop_pkt;
1112d1a890faSShreyas Bhatewara 
1113d1a890faSShreyas Bhatewara 	/* setup the EOP desc */
1114115924b6SShreyas Bhatewara 	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1115d1a890faSShreyas Bhatewara 
1116d1a890faSShreyas Bhatewara 	/* setup the SOP desc */
1117115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1118115924b6SShreyas Bhatewara 	gdesc = &tempTxDesc;
1119115924b6SShreyas Bhatewara 	gdesc->dword[2] = ctx.sop_txd->dword[2];
1120115924b6SShreyas Bhatewara 	gdesc->dword[3] = ctx.sop_txd->dword[3];
1121115924b6SShreyas Bhatewara #else
1122d1a890faSShreyas Bhatewara 	gdesc = ctx.sop_txd;
1123115924b6SShreyas Bhatewara #endif
11247a4c003dSRonak Doshi 	tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1125d1a890faSShreyas Bhatewara 	if (ctx.mss) {
1126dacce2beSRonak Doshi 		if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1127dacce2beSRonak Doshi 			gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1128dacce2beSRonak Doshi 			gdesc->txd.om = VMXNET3_OM_ENCAP;
1129dacce2beSRonak Doshi 			gdesc->txd.msscof = ctx.mss;
1130dacce2beSRonak Doshi 
11311dac3b1bSRonak Doshi 			if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1132dacce2beSRonak Doshi 				gdesc->txd.oco = 1;
1133dacce2beSRonak Doshi 		} else {
1134dacce2beSRonak Doshi 			gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1135d1a890faSShreyas Bhatewara 			gdesc->txd.om = VMXNET3_OM_TSO;
1136d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = ctx.mss;
1137dacce2beSRonak Doshi 		}
11387a4c003dSRonak Doshi 		num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1139d1a890faSShreyas Bhatewara 	} else {
1140d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1141dacce2beSRonak Doshi 			if (VMXNET3_VERSION_GE_4(adapter) &&
1142dacce2beSRonak Doshi 			    skb->encapsulation) {
1143dacce2beSRonak Doshi 				gdesc->txd.hlen = ctx.l4_offset +
1144dacce2beSRonak Doshi 						  ctx.l4_hdr_size;
1145dacce2beSRonak Doshi 				gdesc->txd.om = VMXNET3_OM_ENCAP;
1146dacce2beSRonak Doshi 				gdesc->txd.msscof = 0;		/* Reserved */
1147dacce2beSRonak Doshi 			} else {
1148dacce2beSRonak Doshi 				gdesc->txd.hlen = ctx.l4_offset;
1149d1a890faSShreyas Bhatewara 				gdesc->txd.om = VMXNET3_OM_CSUM;
1150dacce2beSRonak Doshi 				gdesc->txd.msscof = ctx.l4_offset +
1151d1a890faSShreyas Bhatewara 						    skb->csum_offset;
1152dacce2beSRonak Doshi 			}
1153d1a890faSShreyas Bhatewara 		} else {
1154d1a890faSShreyas Bhatewara 			gdesc->txd.om = 0;
1155d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = 0;
1156d1a890faSShreyas Bhatewara 		}
11577a4c003dSRonak Doshi 		num_pkts = 1;
1158d1a890faSShreyas Bhatewara 	}
11597a4c003dSRonak Doshi 	le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
11607a4c003dSRonak Doshi 	tx_num_deferred += num_pkts;
1161d1a890faSShreyas Bhatewara 
1162df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1163d1a890faSShreyas Bhatewara 		gdesc->txd.ti = 1;
1164df8a39deSJiri Pirko 		gdesc->txd.tci = skb_vlan_tag_get(skb);
1165d1a890faSShreyas Bhatewara 	}
1166d1a890faSShreyas Bhatewara 
1167f3002c13Shpreg@vmware.com 	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
1168f3002c13Shpreg@vmware.com 	 * all other writes to &gdesc->txd.
1169f3002c13Shpreg@vmware.com 	 */
1170f3002c13Shpreg@vmware.com 	dma_wmb();
1171f3002c13Shpreg@vmware.com 
1172115924b6SShreyas Bhatewara 	/* finally flips the GEN bit of the SOP desc. */
1173115924b6SShreyas Bhatewara 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1174115924b6SShreyas Bhatewara 						  VMXNET3_TXD_GEN);
1175115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1176115924b6SShreyas Bhatewara 	/* Finished updating in bitfields of Tx Desc, so write them in original
1177115924b6SShreyas Bhatewara 	 * place.
1178115924b6SShreyas Bhatewara 	 */
1179115924b6SShreyas Bhatewara 	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1180115924b6SShreyas Bhatewara 			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1181115924b6SShreyas Bhatewara 	gdesc = ctx.sop_txd;
1182115924b6SShreyas Bhatewara #endif
1183fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
1184f6965582SRandy Dunlap 		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1185c2fd03a0SJoe Perches 		(u32)(ctx.sop_txd -
1186115924b6SShreyas Bhatewara 		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1187115924b6SShreyas Bhatewara 		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1188d1a890faSShreyas Bhatewara 
1189d1a890faSShreyas Bhatewara 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1190d1a890faSShreyas Bhatewara 
11917a4c003dSRonak Doshi 	if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1192d1a890faSShreyas Bhatewara 		tq->shared->txNumDeferred = 0;
119309c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
119409c5088eSShreyas Bhatewara 				       VMXNET3_REG_TXPROD + tq->qid * 8,
1195d1a890faSShreyas Bhatewara 				       tq->tx_ring.next2fill);
1196d1a890faSShreyas Bhatewara 	}
1197d1a890faSShreyas Bhatewara 
1198d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1199d1a890faSShreyas Bhatewara 
1200f955e141SDan Carpenter unlock_drop_pkt:
1201f955e141SDan Carpenter 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1202d1a890faSShreyas Bhatewara drop_pkt:
1203d1a890faSShreyas Bhatewara 	tq->stats.drop_total++;
1204b1b71817SEric W. Biederman 	dev_kfree_skb_any(skb);
1205d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1206d1a890faSShreyas Bhatewara }
1207d1a890faSShreyas Bhatewara 
1208d1a890faSShreyas Bhatewara 
1209d1a890faSShreyas Bhatewara static netdev_tx_t
1210d1a890faSShreyas Bhatewara vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1211d1a890faSShreyas Bhatewara {
1212d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1213d1a890faSShreyas Bhatewara 
121409c5088eSShreyas Bhatewara 	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
121509c5088eSShreyas Bhatewara 	return vmxnet3_tq_xmit(skb,
121609c5088eSShreyas Bhatewara 			       &adapter->tx_queue[skb->queue_mapping],
121709c5088eSShreyas Bhatewara 			       adapter, netdev);
1218d1a890faSShreyas Bhatewara }
1219d1a890faSShreyas Bhatewara 
1220d1a890faSShreyas Bhatewara 
1221d1a890faSShreyas Bhatewara static void
1222d1a890faSShreyas Bhatewara vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1223d1a890faSShreyas Bhatewara 		struct sk_buff *skb,
1224d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gdesc)
1225d1a890faSShreyas Bhatewara {
1226a0d2730cSMichał Mirosław 	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1227f0d43780SShrikrishna Khare 		if (gdesc->rcd.v4 &&
1228f0d43780SShrikrishna Khare 		    (le32_to_cpu(gdesc->dword[3]) &
1229f0d43780SShrikrishna Khare 		     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1230d1a890faSShreyas Bhatewara 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1231dacce2beSRonak Doshi 			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1232dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1233dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1234dacce2beSRonak Doshi 			WARN_ON_ONCE(gdesc->rcd.frg &&
1235dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1236dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1237f0d43780SShrikrishna Khare 		} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1238f0d43780SShrikrishna Khare 					     (1 << VMXNET3_RCD_TUC_SHIFT))) {
1239f0d43780SShrikrishna Khare 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1240dacce2beSRonak Doshi 			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1241dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1242dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1243dacce2beSRonak Doshi 			WARN_ON_ONCE(gdesc->rcd.frg &&
1244dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1245dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1246d1a890faSShreyas Bhatewara 		} else {
1247d1a890faSShreyas Bhatewara 			if (gdesc->rcd.csum) {
1248d1a890faSShreyas Bhatewara 				skb->csum = htons(gdesc->rcd.csum);
1249d1a890faSShreyas Bhatewara 				skb->ip_summed = CHECKSUM_PARTIAL;
1250d1a890faSShreyas Bhatewara 			} else {
1251bc8acf2cSEric Dumazet 				skb_checksum_none_assert(skb);
1252d1a890faSShreyas Bhatewara 			}
1253d1a890faSShreyas Bhatewara 		}
1254d1a890faSShreyas Bhatewara 	} else {
1255bc8acf2cSEric Dumazet 		skb_checksum_none_assert(skb);
1256d1a890faSShreyas Bhatewara 	}
1257d1a890faSShreyas Bhatewara }
1258d1a890faSShreyas Bhatewara 
1259d1a890faSShreyas Bhatewara 
1260d1a890faSShreyas Bhatewara static void
1261d1a890faSShreyas Bhatewara vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1262d1a890faSShreyas Bhatewara 		 struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1263d1a890faSShreyas Bhatewara {
1264d1a890faSShreyas Bhatewara 	rq->stats.drop_err++;
1265d1a890faSShreyas Bhatewara 	if (!rcd->fcs)
1266d1a890faSShreyas Bhatewara 		rq->stats.drop_fcs++;
1267d1a890faSShreyas Bhatewara 
1268d1a890faSShreyas Bhatewara 	rq->stats.drop_total++;
1269d1a890faSShreyas Bhatewara 
1270d1a890faSShreyas Bhatewara 	/*
1271d1a890faSShreyas Bhatewara 	 * We do not unmap and chain the rx buffer to the skb.
1272d1a890faSShreyas Bhatewara 	 * We basically pretend this buffer is not used and will be recycled
1273d1a890faSShreyas Bhatewara 	 * by vmxnet3_rq_alloc_rx_buf()
1274d1a890faSShreyas Bhatewara 	 */
1275d1a890faSShreyas Bhatewara 
1276d1a890faSShreyas Bhatewara 	/*
1277d1a890faSShreyas Bhatewara 	 * ctx->skb may be NULL if this is the first and the only one
1278d1a890faSShreyas Bhatewara 	 * desc for the pkt
1279d1a890faSShreyas Bhatewara 	 */
1280d1a890faSShreyas Bhatewara 	if (ctx->skb)
1281d1a890faSShreyas Bhatewara 		dev_kfree_skb_irq(ctx->skb);
1282d1a890faSShreyas Bhatewara 
1283d1a890faSShreyas Bhatewara 	ctx->skb = NULL;
1284d1a890faSShreyas Bhatewara }
1285d1a890faSShreyas Bhatewara 
1286d1a890faSShreyas Bhatewara 
128745dac1d6SShreyas Bhatewara static u32
128845dac1d6SShreyas Bhatewara vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
128945dac1d6SShreyas Bhatewara 		    union Vmxnet3_GenericDesc *gdesc)
129045dac1d6SShreyas Bhatewara {
129145dac1d6SShreyas Bhatewara 	u32 hlen, maplen;
129245dac1d6SShreyas Bhatewara 	union {
129345dac1d6SShreyas Bhatewara 		void *ptr;
129445dac1d6SShreyas Bhatewara 		struct ethhdr *eth;
129565ec0bd1SRonak Doshi 		struct vlan_ethhdr *veth;
129645dac1d6SShreyas Bhatewara 		struct iphdr *ipv4;
129745dac1d6SShreyas Bhatewara 		struct ipv6hdr *ipv6;
129845dac1d6SShreyas Bhatewara 		struct tcphdr *tcp;
129945dac1d6SShreyas Bhatewara 	} hdr;
130045dac1d6SShreyas Bhatewara 	BUG_ON(gdesc->rcd.tcp == 0);
130145dac1d6SShreyas Bhatewara 
130245dac1d6SShreyas Bhatewara 	maplen = skb_headlen(skb);
130345dac1d6SShreyas Bhatewara 	if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
130445dac1d6SShreyas Bhatewara 		return 0;
130545dac1d6SShreyas Bhatewara 
130665ec0bd1SRonak Doshi 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
130765ec0bd1SRonak Doshi 	    skb->protocol == cpu_to_be16(ETH_P_8021AD))
130865ec0bd1SRonak Doshi 		hlen = sizeof(struct vlan_ethhdr);
130965ec0bd1SRonak Doshi 	else
131065ec0bd1SRonak Doshi 		hlen = sizeof(struct ethhdr);
131165ec0bd1SRonak Doshi 
131245dac1d6SShreyas Bhatewara 	hdr.eth = eth_hdr(skb);
131345dac1d6SShreyas Bhatewara 	if (gdesc->rcd.v4) {
131465ec0bd1SRonak Doshi 		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
131565ec0bd1SRonak Doshi 		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
131665ec0bd1SRonak Doshi 		hdr.ptr += hlen;
131745dac1d6SShreyas Bhatewara 		BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
131845dac1d6SShreyas Bhatewara 		hlen = hdr.ipv4->ihl << 2;
131945dac1d6SShreyas Bhatewara 		hdr.ptr += hdr.ipv4->ihl << 2;
132045dac1d6SShreyas Bhatewara 	} else if (gdesc->rcd.v6) {
132165ec0bd1SRonak Doshi 		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
132265ec0bd1SRonak Doshi 		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
132365ec0bd1SRonak Doshi 		hdr.ptr += hlen;
132445dac1d6SShreyas Bhatewara 		/* Use an estimated value, since we also need to handle
132545dac1d6SShreyas Bhatewara 		 * TSO case.
132645dac1d6SShreyas Bhatewara 		 */
132745dac1d6SShreyas Bhatewara 		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
132845dac1d6SShreyas Bhatewara 			return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
132945dac1d6SShreyas Bhatewara 		hlen = sizeof(struct ipv6hdr);
133045dac1d6SShreyas Bhatewara 		hdr.ptr += sizeof(struct ipv6hdr);
133145dac1d6SShreyas Bhatewara 	} else {
133245dac1d6SShreyas Bhatewara 		/* Non-IP pkt, dont estimate header length */
133345dac1d6SShreyas Bhatewara 		return 0;
133445dac1d6SShreyas Bhatewara 	}
133545dac1d6SShreyas Bhatewara 
133645dac1d6SShreyas Bhatewara 	if (hlen + sizeof(struct tcphdr) > maplen)
133745dac1d6SShreyas Bhatewara 		return 0;
133845dac1d6SShreyas Bhatewara 
133945dac1d6SShreyas Bhatewara 	return (hlen + (hdr.tcp->doff << 2));
134045dac1d6SShreyas Bhatewara }
134145dac1d6SShreyas Bhatewara 
1342d1a890faSShreyas Bhatewara static int
1343d1a890faSShreyas Bhatewara vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1344d1a890faSShreyas Bhatewara 		       struct vmxnet3_adapter *adapter, int quota)
1345d1a890faSShreyas Bhatewara {
1346215faf9cSJoe Perches 	static const u32 rxprod_reg[2] = {
1347215faf9cSJoe Perches 		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1348215faf9cSJoe Perches 	};
13490769636cSNeil Horman 	u32 num_pkts = 0;
13505318d809SShreyas Bhatewara 	bool skip_page_frags = false;
1351d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxCompDesc *rcd;
1352d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
135345dac1d6SShreyas Bhatewara 	u16 segCnt = 0, mss = 0;
1354115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1355115924b6SShreyas Bhatewara 	struct Vmxnet3_RxDesc rxCmdDesc;
1356115924b6SShreyas Bhatewara 	struct Vmxnet3_RxCompDesc rxComp;
1357115924b6SShreyas Bhatewara #endif
1358115924b6SShreyas Bhatewara 	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1359115924b6SShreyas Bhatewara 			  &rxComp);
1360d1a890faSShreyas Bhatewara 	while (rcd->gen == rq->comp_ring.gen) {
1361d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
13625318d809SShreyas Bhatewara 		struct sk_buff *skb, *new_skb = NULL;
13635318d809SShreyas Bhatewara 		struct page *new_page = NULL;
13645738a09dSAlexey Khoroshilov 		dma_addr_t new_dma_addr;
1365d1a890faSShreyas Bhatewara 		int num_to_alloc;
1366d1a890faSShreyas Bhatewara 		struct Vmxnet3_RxDesc *rxd;
1367d1a890faSShreyas Bhatewara 		u32 idx, ring_idx;
13685318d809SShreyas Bhatewara 		struct vmxnet3_cmd_ring	*ring = NULL;
13690769636cSNeil Horman 		if (num_pkts >= quota) {
1370d1a890faSShreyas Bhatewara 			/* we may stop even before we see the EOP desc of
1371d1a890faSShreyas Bhatewara 			 * the current pkt
1372d1a890faSShreyas Bhatewara 			 */
1373d1a890faSShreyas Bhatewara 			break;
1374d1a890faSShreyas Bhatewara 		}
1375f3002c13Shpreg@vmware.com 
1376f3002c13Shpreg@vmware.com 		/* Prevent any rcd field from being (speculatively) read before
1377f3002c13Shpreg@vmware.com 		 * rcd->gen is read.
1378f3002c13Shpreg@vmware.com 		 */
1379f3002c13Shpreg@vmware.com 		dma_rmb();
1380f3002c13Shpreg@vmware.com 
138150a5ce3eSShrikrishna Khare 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
138250a5ce3eSShrikrishna Khare 		       rcd->rqID != rq->dataRingQid);
1383d1a890faSShreyas Bhatewara 		idx = rcd->rxdIdx;
138450a5ce3eSShrikrishna Khare 		ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
13855318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
1386115924b6SShreyas Bhatewara 		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1387115924b6SShreyas Bhatewara 				  &rxCmdDesc);
1388d1a890faSShreyas Bhatewara 		rbi = rq->buf_info[ring_idx] + idx;
1389d1a890faSShreyas Bhatewara 
1390115924b6SShreyas Bhatewara 		BUG_ON(rxd->addr != rbi->dma_addr ||
1391115924b6SShreyas Bhatewara 		       rxd->len != rbi->len);
1392d1a890faSShreyas Bhatewara 
1393d1a890faSShreyas Bhatewara 		if (unlikely(rcd->eop && rcd->err)) {
1394d1a890faSShreyas Bhatewara 			vmxnet3_rx_error(rq, rcd, ctx, adapter);
1395d1a890faSShreyas Bhatewara 			goto rcd_done;
1396d1a890faSShreyas Bhatewara 		}
1397d1a890faSShreyas Bhatewara 
1398d1a890faSShreyas Bhatewara 		if (rcd->sop) { /* first buf of the pkt */
139950a5ce3eSShrikrishna Khare 			bool rxDataRingUsed;
140050a5ce3eSShrikrishna Khare 			u16 len;
140150a5ce3eSShrikrishna Khare 
1402d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
140350a5ce3eSShrikrishna Khare 			       (rcd->rqID != rq->qid &&
140450a5ce3eSShrikrishna Khare 				rcd->rqID != rq->dataRingQid));
1405d1a890faSShreyas Bhatewara 
1406d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1407d1a890faSShreyas Bhatewara 			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1408d1a890faSShreyas Bhatewara 
1409d1a890faSShreyas Bhatewara 			if (unlikely(rcd->len == 0)) {
1410d1a890faSShreyas Bhatewara 				/* Pretend the rx buffer is skipped. */
1411d1a890faSShreyas Bhatewara 				BUG_ON(!(rcd->sop && rcd->eop));
1412fdcd79b9SStephen Hemminger 				netdev_dbg(adapter->netdev,
1413f6965582SRandy Dunlap 					"rxRing[%u][%u] 0 length\n",
1414d1a890faSShreyas Bhatewara 					ring_idx, idx);
1415d1a890faSShreyas Bhatewara 				goto rcd_done;
1416d1a890faSShreyas Bhatewara 			}
1417d1a890faSShreyas Bhatewara 
14185318d809SShreyas Bhatewara 			skip_page_frags = false;
1419d1a890faSShreyas Bhatewara 			ctx->skb = rbi->skb;
142050a5ce3eSShrikrishna Khare 
142150a5ce3eSShrikrishna Khare 			rxDataRingUsed =
142250a5ce3eSShrikrishna Khare 				VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
142350a5ce3eSShrikrishna Khare 			len = rxDataRingUsed ? rcd->len : rbi->len;
14240d735f13SStephen Hemminger 			new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
142550a5ce3eSShrikrishna Khare 							    len);
14265318d809SShreyas Bhatewara 			if (new_skb == NULL) {
14275318d809SShreyas Bhatewara 				/* Skb allocation failed, do not handover this
14285318d809SShreyas Bhatewara 				 * skb to stack. Reuse it. Drop the existing pkt
14295318d809SShreyas Bhatewara 				 */
14305318d809SShreyas Bhatewara 				rq->stats.rx_buf_alloc_failure++;
14315318d809SShreyas Bhatewara 				ctx->skb = NULL;
14325318d809SShreyas Bhatewara 				rq->stats.drop_total++;
14335318d809SShreyas Bhatewara 				skip_page_frags = true;
14345318d809SShreyas Bhatewara 				goto rcd_done;
14355318d809SShreyas Bhatewara 			}
143650a5ce3eSShrikrishna Khare 
143750a5ce3eSShrikrishna Khare 			if (rxDataRingUsed) {
143850a5ce3eSShrikrishna Khare 				size_t sz;
143950a5ce3eSShrikrishna Khare 
144050a5ce3eSShrikrishna Khare 				BUG_ON(rcd->len > rq->data_ring.desc_size);
144150a5ce3eSShrikrishna Khare 
144250a5ce3eSShrikrishna Khare 				ctx->skb = new_skb;
144350a5ce3eSShrikrishna Khare 				sz = rcd->rxdIdx * rq->data_ring.desc_size;
144450a5ce3eSShrikrishna Khare 				memcpy(new_skb->data,
144550a5ce3eSShrikrishna Khare 				       &rq->data_ring.base[sz], rcd->len);
144650a5ce3eSShrikrishna Khare 			} else {
144750a5ce3eSShrikrishna Khare 				ctx->skb = rbi->skb;
144850a5ce3eSShrikrishna Khare 
144950a5ce3eSShrikrishna Khare 				new_dma_addr =
145050a5ce3eSShrikrishna Khare 					dma_map_single(&adapter->pdev->dev,
14515738a09dSAlexey Khoroshilov 						       new_skb->data, rbi->len,
14525738a09dSAlexey Khoroshilov 						       PCI_DMA_FROMDEVICE);
14535738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
14545738a09dSAlexey Khoroshilov 						      new_dma_addr)) {
14555738a09dSAlexey Khoroshilov 					dev_kfree_skb(new_skb);
145650a5ce3eSShrikrishna Khare 					/* Skb allocation failed, do not
145750a5ce3eSShrikrishna Khare 					 * handover this skb to stack. Reuse
145850a5ce3eSShrikrishna Khare 					 * it. Drop the existing pkt.
14595738a09dSAlexey Khoroshilov 					 */
14605738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
14615738a09dSAlexey Khoroshilov 					ctx->skb = NULL;
14625738a09dSAlexey Khoroshilov 					rq->stats.drop_total++;
14635738a09dSAlexey Khoroshilov 					skip_page_frags = true;
14645738a09dSAlexey Khoroshilov 					goto rcd_done;
14655738a09dSAlexey Khoroshilov 				}
1466d1a890faSShreyas Bhatewara 
146750a5ce3eSShrikrishna Khare 				dma_unmap_single(&adapter->pdev->dev,
146850a5ce3eSShrikrishna Khare 						 rbi->dma_addr,
1469b0eb57cbSAndy King 						 rbi->len,
1470d1a890faSShreyas Bhatewara 						 PCI_DMA_FROMDEVICE);
1471d1a890faSShreyas Bhatewara 
147250a5ce3eSShrikrishna Khare 				/* Immediate refill */
147350a5ce3eSShrikrishna Khare 				rbi->skb = new_skb;
147450a5ce3eSShrikrishna Khare 				rbi->dma_addr = new_dma_addr;
147550a5ce3eSShrikrishna Khare 				rxd->addr = cpu_to_le64(rbi->dma_addr);
147650a5ce3eSShrikrishna Khare 				rxd->len = rbi->len;
147750a5ce3eSShrikrishna Khare 			}
147850a5ce3eSShrikrishna Khare 
14797db11f75SStephen Hemminger #ifdef VMXNET3_RSS
14807db11f75SStephen Hemminger 			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
14817db11f75SStephen Hemminger 			    (adapter->netdev->features & NETIF_F_RXHASH))
14822c15a154SMichal Schmidt 				skb_set_hash(ctx->skb,
14832c15a154SMichal Schmidt 					     le32_to_cpu(rcd->rssHash),
14840b680703STom Herbert 					     PKT_HASH_TYPE_L3);
14857db11f75SStephen Hemminger #endif
1486d1a890faSShreyas Bhatewara 			skb_put(ctx->skb, rcd->len);
14875318d809SShreyas Bhatewara 
1488190af10fSShrikrishna Khare 			if (VMXNET3_VERSION_GE_2(adapter) &&
148945dac1d6SShreyas Bhatewara 			    rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
149045dac1d6SShreyas Bhatewara 				struct Vmxnet3_RxCompDescExt *rcdlro;
149145dac1d6SShreyas Bhatewara 				rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
14925318d809SShreyas Bhatewara 
149345dac1d6SShreyas Bhatewara 				segCnt = rcdlro->segCnt;
149450219538SShrikrishna Khare 				WARN_ON_ONCE(segCnt == 0);
149545dac1d6SShreyas Bhatewara 				mss = rcdlro->mss;
149645dac1d6SShreyas Bhatewara 				if (unlikely(segCnt <= 1))
149745dac1d6SShreyas Bhatewara 					segCnt = 0;
149845dac1d6SShreyas Bhatewara 			} else {
149945dac1d6SShreyas Bhatewara 				segCnt = 0;
150045dac1d6SShreyas Bhatewara 			}
1501d1a890faSShreyas Bhatewara 		} else {
15025318d809SShreyas Bhatewara 			BUG_ON(ctx->skb == NULL && !skip_page_frags);
15035318d809SShreyas Bhatewara 
1504d1a890faSShreyas Bhatewara 			/* non SOP buffer must be type 1 in most cases */
15055318d809SShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1506d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1507d1a890faSShreyas Bhatewara 
15085318d809SShreyas Bhatewara 			/* If an sop buffer was dropped, skip all
15095318d809SShreyas Bhatewara 			 * following non-sop fragments. They will be reused.
15105318d809SShreyas Bhatewara 			 */
15115318d809SShreyas Bhatewara 			if (skip_page_frags)
15125318d809SShreyas Bhatewara 				goto rcd_done;
15135318d809SShreyas Bhatewara 
1514c41fcce9SShreyas Bhatewara 			if (rcd->len) {
15155318d809SShreyas Bhatewara 				new_page = alloc_page(GFP_ATOMIC);
15165318d809SShreyas Bhatewara 				/* Replacement page frag could not be allocated.
15175318d809SShreyas Bhatewara 				 * Reuse this page. Drop the pkt and free the
15185318d809SShreyas Bhatewara 				 * skb which contained this page as a frag. Skip
15195318d809SShreyas Bhatewara 				 * processing all the following non-sop frags.
15205318d809SShreyas Bhatewara 				 */
1521c41fcce9SShreyas Bhatewara 				if (unlikely(!new_page)) {
15225318d809SShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
15235318d809SShreyas Bhatewara 					dev_kfree_skb(ctx->skb);
15245318d809SShreyas Bhatewara 					ctx->skb = NULL;
15255318d809SShreyas Bhatewara 					skip_page_frags = true;
15265318d809SShreyas Bhatewara 					goto rcd_done;
15275318d809SShreyas Bhatewara 				}
152858caf637SShrikrishna Khare 				new_dma_addr = dma_map_page(&adapter->pdev->dev,
152958caf637SShrikrishna Khare 							    new_page,
15305738a09dSAlexey Khoroshilov 							    0, PAGE_SIZE,
15315738a09dSAlexey Khoroshilov 							    PCI_DMA_FROMDEVICE);
15325738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
15335738a09dSAlexey Khoroshilov 						      new_dma_addr)) {
15345738a09dSAlexey Khoroshilov 					put_page(new_page);
15355738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
15365738a09dSAlexey Khoroshilov 					dev_kfree_skb(ctx->skb);
15375738a09dSAlexey Khoroshilov 					ctx->skb = NULL;
15385738a09dSAlexey Khoroshilov 					skip_page_frags = true;
15395738a09dSAlexey Khoroshilov 					goto rcd_done;
15405738a09dSAlexey Khoroshilov 				}
15415318d809SShreyas Bhatewara 
1542b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev,
1543d1a890faSShreyas Bhatewara 					       rbi->dma_addr, rbi->len,
1544d1a890faSShreyas Bhatewara 					       PCI_DMA_FROMDEVICE);
1545d1a890faSShreyas Bhatewara 
1546d1a890faSShreyas Bhatewara 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
15475318d809SShreyas Bhatewara 
15485318d809SShreyas Bhatewara 				/* Immediate refill */
15495318d809SShreyas Bhatewara 				rbi->page = new_page;
15505738a09dSAlexey Khoroshilov 				rbi->dma_addr = new_dma_addr;
15515318d809SShreyas Bhatewara 				rxd->addr = cpu_to_le64(rbi->dma_addr);
15525318d809SShreyas Bhatewara 				rxd->len = rbi->len;
1553d1a890faSShreyas Bhatewara 			}
1554c41fcce9SShreyas Bhatewara 		}
15555318d809SShreyas Bhatewara 
1556d1a890faSShreyas Bhatewara 
1557d1a890faSShreyas Bhatewara 		skb = ctx->skb;
1558d1a890faSShreyas Bhatewara 		if (rcd->eop) {
155945dac1d6SShreyas Bhatewara 			u32 mtu = adapter->netdev->mtu;
1560d1a890faSShreyas Bhatewara 			skb->len += skb->data_len;
1561d1a890faSShreyas Bhatewara 
1562d1a890faSShreyas Bhatewara 			vmxnet3_rx_csum(adapter, skb,
1563d1a890faSShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
1564d1a890faSShreyas Bhatewara 			skb->protocol = eth_type_trans(skb, adapter->netdev);
1565034f4057SRonak Doshi 			if (!rcd->tcp ||
1566034f4057SRonak Doshi 			    !(adapter->netdev->features & NETIF_F_LRO))
156745dac1d6SShreyas Bhatewara 				goto not_lro;
1568d1a890faSShreyas Bhatewara 
156945dac1d6SShreyas Bhatewara 			if (segCnt != 0 && mss != 0) {
157045dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_type = rcd->v4 ?
157145dac1d6SShreyas Bhatewara 					SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
157245dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_size = mss;
157345dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_segs = segCnt;
157445dac1d6SShreyas Bhatewara 			} else if (segCnt != 0 || skb->len > mtu) {
157545dac1d6SShreyas Bhatewara 				u32 hlen;
157645dac1d6SShreyas Bhatewara 
157745dac1d6SShreyas Bhatewara 				hlen = vmxnet3_get_hdr_len(adapter, skb,
157845dac1d6SShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
157945dac1d6SShreyas Bhatewara 				if (hlen == 0)
158045dac1d6SShreyas Bhatewara 					goto not_lro;
158145dac1d6SShreyas Bhatewara 
158245dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_type =
158345dac1d6SShreyas Bhatewara 					rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
158445dac1d6SShreyas Bhatewara 				if (segCnt != 0) {
158545dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_segs = segCnt;
158645dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_size =
158745dac1d6SShreyas Bhatewara 						DIV_ROUND_UP(skb->len -
158845dac1d6SShreyas Bhatewara 							hlen, segCnt);
158945dac1d6SShreyas Bhatewara 				} else {
159045dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_size = mtu - hlen;
159145dac1d6SShreyas Bhatewara 				}
159245dac1d6SShreyas Bhatewara 			}
159345dac1d6SShreyas Bhatewara not_lro:
159472e85c45SJesse Gross 			if (unlikely(rcd->ts))
159586a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
159672e85c45SJesse Gross 
1597213ade8cSJesse Gross 			if (adapter->netdev->features & NETIF_F_LRO)
1598d1a890faSShreyas Bhatewara 				netif_receive_skb(skb);
1599213ade8cSJesse Gross 			else
1600213ade8cSJesse Gross 				napi_gro_receive(&rq->napi, skb);
1601d1a890faSShreyas Bhatewara 
1602d1a890faSShreyas Bhatewara 			ctx->skb = NULL;
16030769636cSNeil Horman 			num_pkts++;
1604d1a890faSShreyas Bhatewara 		}
1605d1a890faSShreyas Bhatewara 
1606d1a890faSShreyas Bhatewara rcd_done:
16075318d809SShreyas Bhatewara 		/* device may have skipped some rx descs */
16085318d809SShreyas Bhatewara 		ring->next2comp = idx;
16095318d809SShreyas Bhatewara 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
16105318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
1611f3002c13Shpreg@vmware.com 
1612f3002c13Shpreg@vmware.com 		/* Ensure that the writes to rxd->gen bits will be observed
1613f3002c13Shpreg@vmware.com 		 * after all other writes to rxd objects.
1614f3002c13Shpreg@vmware.com 		 */
1615f3002c13Shpreg@vmware.com 		dma_wmb();
1616f3002c13Shpreg@vmware.com 
16175318d809SShreyas Bhatewara 		while (num_to_alloc) {
16185318d809SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
16195318d809SShreyas Bhatewara 					  &rxCmdDesc);
16205318d809SShreyas Bhatewara 			BUG_ON(!rxd->addr);
1621d1a890faSShreyas Bhatewara 
16225318d809SShreyas Bhatewara 			/* Recv desc is ready to be used by the device */
16235318d809SShreyas Bhatewara 			rxd->gen = ring->gen;
16245318d809SShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(ring);
16255318d809SShreyas Bhatewara 			num_to_alloc--;
16265318d809SShreyas Bhatewara 		}
1627d1a890faSShreyas Bhatewara 
1628d1a890faSShreyas Bhatewara 		/* if needed, update the register */
1629d1a890faSShreyas Bhatewara 		if (unlikely(rq->shared->updateRxProd)) {
1630d1a890faSShreyas Bhatewara 			VMXNET3_WRITE_BAR0_REG(adapter,
1631d1a890faSShreyas Bhatewara 					       rxprod_reg[ring_idx] + rq->qid * 8,
16325318d809SShreyas Bhatewara 					       ring->next2fill);
1633d1a890faSShreyas Bhatewara 		}
1634d1a890faSShreyas Bhatewara 
1635d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1636115924b6SShreyas Bhatewara 		vmxnet3_getRxComp(rcd,
1637115924b6SShreyas Bhatewara 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1638d1a890faSShreyas Bhatewara 	}
1639d1a890faSShreyas Bhatewara 
16400769636cSNeil Horman 	return num_pkts;
1641d1a890faSShreyas Bhatewara }
1642d1a890faSShreyas Bhatewara 
1643d1a890faSShreyas Bhatewara 
1644d1a890faSShreyas Bhatewara static void
1645d1a890faSShreyas Bhatewara vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1646d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
1647d1a890faSShreyas Bhatewara {
1648d1a890faSShreyas Bhatewara 	u32 i, ring_idx;
1649d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxDesc *rxd;
1650d1a890faSShreyas Bhatewara 
1651d1a890faSShreyas Bhatewara 	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1652d1a890faSShreyas Bhatewara 		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1653115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1654115924b6SShreyas Bhatewara 			struct Vmxnet3_RxDesc rxDesc;
1655115924b6SShreyas Bhatewara #endif
1656115924b6SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd,
1657115924b6SShreyas Bhatewara 				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1658d1a890faSShreyas Bhatewara 
1659d1a890faSShreyas Bhatewara 			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1660d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].skb) {
1661b0eb57cbSAndy King 				dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1662d1a890faSShreyas Bhatewara 						 rxd->len, PCI_DMA_FROMDEVICE);
1663d1a890faSShreyas Bhatewara 				dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1664d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].skb = NULL;
1665d1a890faSShreyas Bhatewara 			} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1666d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].page) {
1667b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1668d1a890faSShreyas Bhatewara 					       rxd->len, PCI_DMA_FROMDEVICE);
1669d1a890faSShreyas Bhatewara 				put_page(rq->buf_info[ring_idx][i].page);
1670d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].page = NULL;
1671d1a890faSShreyas Bhatewara 			}
1672d1a890faSShreyas Bhatewara 		}
1673d1a890faSShreyas Bhatewara 
1674d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1675d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].next2fill =
1676d1a890faSShreyas Bhatewara 					rq->rx_ring[ring_idx].next2comp = 0;
1677d1a890faSShreyas Bhatewara 	}
1678d1a890faSShreyas Bhatewara 
1679d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1680d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1681d1a890faSShreyas Bhatewara }
1682d1a890faSShreyas Bhatewara 
1683d1a890faSShreyas Bhatewara 
168409c5088eSShreyas Bhatewara static void
168509c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
168609c5088eSShreyas Bhatewara {
168709c5088eSShreyas Bhatewara 	int i;
168809c5088eSShreyas Bhatewara 
168909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
169009c5088eSShreyas Bhatewara 		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
169109c5088eSShreyas Bhatewara }
169209c5088eSShreyas Bhatewara 
169309c5088eSShreyas Bhatewara 
1694280b74f7Sstephen hemminger static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1695d1a890faSShreyas Bhatewara 			       struct vmxnet3_adapter *adapter)
1696d1a890faSShreyas Bhatewara {
1697d1a890faSShreyas Bhatewara 	int i;
1698d1a890faSShreyas Bhatewara 	int j;
1699d1a890faSShreyas Bhatewara 
1700d1a890faSShreyas Bhatewara 	/* all rx buffers must have already been freed */
1701d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1702d1a890faSShreyas Bhatewara 		if (rq->buf_info[i]) {
1703d1a890faSShreyas Bhatewara 			for (j = 0; j < rq->rx_ring[i].size; j++)
1704d1a890faSShreyas Bhatewara 				BUG_ON(rq->buf_info[i][j].page != NULL);
1705d1a890faSShreyas Bhatewara 		}
1706d1a890faSShreyas Bhatewara 	}
1707d1a890faSShreyas Bhatewara 
1708d1a890faSShreyas Bhatewara 
1709d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1710d1a890faSShreyas Bhatewara 		if (rq->rx_ring[i].base) {
1711b0eb57cbSAndy King 			dma_free_coherent(&adapter->pdev->dev,
1712b0eb57cbSAndy King 					  rq->rx_ring[i].size
1713d1a890faSShreyas Bhatewara 					  * sizeof(struct Vmxnet3_RxDesc),
1714d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].base,
1715d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].basePA);
1716d1a890faSShreyas Bhatewara 			rq->rx_ring[i].base = NULL;
1717d1a890faSShreyas Bhatewara 		}
1718d1a890faSShreyas Bhatewara 	}
1719d1a890faSShreyas Bhatewara 
172050a5ce3eSShrikrishna Khare 	if (rq->data_ring.base) {
172150a5ce3eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
172250a5ce3eSShrikrishna Khare 				  rq->rx_ring[0].size * rq->data_ring.desc_size,
172350a5ce3eSShrikrishna Khare 				  rq->data_ring.base, rq->data_ring.basePA);
172450a5ce3eSShrikrishna Khare 		rq->data_ring.base = NULL;
172550a5ce3eSShrikrishna Khare 	}
172650a5ce3eSShrikrishna Khare 
1727d1a890faSShreyas Bhatewara 	if (rq->comp_ring.base) {
1728b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1729b0eb57cbSAndy King 				  * sizeof(struct Vmxnet3_RxCompDesc),
1730d1a890faSShreyas Bhatewara 				  rq->comp_ring.base, rq->comp_ring.basePA);
1731d1a890faSShreyas Bhatewara 		rq->comp_ring.base = NULL;
1732d1a890faSShreyas Bhatewara 	}
1733b0eb57cbSAndy King 
1734de1da8bcSRonak Doshi 	kfree(rq->buf_info[0]);
1735de1da8bcSRonak Doshi 	rq->buf_info[0] = NULL;
1736de1da8bcSRonak Doshi 	rq->buf_info[1] = NULL;
1737d1a890faSShreyas Bhatewara }
1738d1a890faSShreyas Bhatewara 
1739bb40aca7SWei Yongjun static void
174050a5ce3eSShrikrishna Khare vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
174150a5ce3eSShrikrishna Khare {
174250a5ce3eSShrikrishna Khare 	int i;
174350a5ce3eSShrikrishna Khare 
174450a5ce3eSShrikrishna Khare 	for (i = 0; i < adapter->num_rx_queues; i++) {
174550a5ce3eSShrikrishna Khare 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
174650a5ce3eSShrikrishna Khare 
174750a5ce3eSShrikrishna Khare 		if (rq->data_ring.base) {
174850a5ce3eSShrikrishna Khare 			dma_free_coherent(&adapter->pdev->dev,
174950a5ce3eSShrikrishna Khare 					  (rq->rx_ring[0].size *
175050a5ce3eSShrikrishna Khare 					  rq->data_ring.desc_size),
175150a5ce3eSShrikrishna Khare 					  rq->data_ring.base,
175250a5ce3eSShrikrishna Khare 					  rq->data_ring.basePA);
175350a5ce3eSShrikrishna Khare 			rq->data_ring.base = NULL;
175450a5ce3eSShrikrishna Khare 			rq->data_ring.desc_size = 0;
175550a5ce3eSShrikrishna Khare 		}
175650a5ce3eSShrikrishna Khare 	}
175750a5ce3eSShrikrishna Khare }
1758d1a890faSShreyas Bhatewara 
1759d1a890faSShreyas Bhatewara static int
1760d1a890faSShreyas Bhatewara vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1761d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter  *adapter)
1762d1a890faSShreyas Bhatewara {
1763d1a890faSShreyas Bhatewara 	int i;
1764d1a890faSShreyas Bhatewara 
1765d1a890faSShreyas Bhatewara 	/* initialize buf_info */
1766d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[0].size; i++) {
1767d1a890faSShreyas Bhatewara 
1768d1a890faSShreyas Bhatewara 		/* 1st buf for a pkt is skbuff */
1769d1a890faSShreyas Bhatewara 		if (i % adapter->rx_buf_per_pkt == 0) {
1770d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1771d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = adapter->skb_buf_size;
1772d1a890faSShreyas Bhatewara 		} else { /* subsequent bufs for a pkt is frag */
1773d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1774d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = PAGE_SIZE;
1775d1a890faSShreyas Bhatewara 		}
1776d1a890faSShreyas Bhatewara 	}
1777d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[1].size; i++) {
1778d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1779d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].len = PAGE_SIZE;
1780d1a890faSShreyas Bhatewara 	}
1781d1a890faSShreyas Bhatewara 
1782d1a890faSShreyas Bhatewara 	/* reset internal state and allocate buffers for both rings */
1783d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1784d1a890faSShreyas Bhatewara 		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1785d1a890faSShreyas Bhatewara 
1786d1a890faSShreyas Bhatewara 		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1787d1a890faSShreyas Bhatewara 		       sizeof(struct Vmxnet3_RxDesc));
1788d1a890faSShreyas Bhatewara 		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1789d1a890faSShreyas Bhatewara 	}
1790d1a890faSShreyas Bhatewara 	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1791d1a890faSShreyas Bhatewara 				    adapter) == 0) {
1792d1a890faSShreyas Bhatewara 		/* at least has 1 rx buffer for the 1st ring */
1793d1a890faSShreyas Bhatewara 		return -ENOMEM;
1794d1a890faSShreyas Bhatewara 	}
1795d1a890faSShreyas Bhatewara 	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1796d1a890faSShreyas Bhatewara 
1797d1a890faSShreyas Bhatewara 	/* reset the comp ring */
1798d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1799d1a890faSShreyas Bhatewara 	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1800d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_RxCompDesc));
1801d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1802d1a890faSShreyas Bhatewara 
1803d1a890faSShreyas Bhatewara 	/* reset rxctx */
1804d1a890faSShreyas Bhatewara 	rq->rx_ctx.skb = NULL;
1805d1a890faSShreyas Bhatewara 
1806d1a890faSShreyas Bhatewara 	/* stats are not reset */
1807d1a890faSShreyas Bhatewara 	return 0;
1808d1a890faSShreyas Bhatewara }
1809d1a890faSShreyas Bhatewara 
1810d1a890faSShreyas Bhatewara 
1811d1a890faSShreyas Bhatewara static int
181209c5088eSShreyas Bhatewara vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
181309c5088eSShreyas Bhatewara {
181409c5088eSShreyas Bhatewara 	int i, err = 0;
181509c5088eSShreyas Bhatewara 
181609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
181709c5088eSShreyas Bhatewara 		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
181809c5088eSShreyas Bhatewara 		if (unlikely(err)) {
181909c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev, "%s: failed to "
182009c5088eSShreyas Bhatewara 				"initialize rx queue%i\n",
182109c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
182209c5088eSShreyas Bhatewara 			break;
182309c5088eSShreyas Bhatewara 		}
182409c5088eSShreyas Bhatewara 	}
182509c5088eSShreyas Bhatewara 	return err;
182609c5088eSShreyas Bhatewara 
182709c5088eSShreyas Bhatewara }
182809c5088eSShreyas Bhatewara 
182909c5088eSShreyas Bhatewara 
183009c5088eSShreyas Bhatewara static int
1831d1a890faSShreyas Bhatewara vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1832d1a890faSShreyas Bhatewara {
1833d1a890faSShreyas Bhatewara 	int i;
1834d1a890faSShreyas Bhatewara 	size_t sz;
1835d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *bi;
1836d1a890faSShreyas Bhatewara 
1837d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1838d1a890faSShreyas Bhatewara 
1839d1a890faSShreyas Bhatewara 		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1840b0eb57cbSAndy King 		rq->rx_ring[i].base = dma_alloc_coherent(
1841b0eb57cbSAndy King 						&adapter->pdev->dev, sz,
1842b0eb57cbSAndy King 						&rq->rx_ring[i].basePA,
1843b0eb57cbSAndy King 						GFP_KERNEL);
1844d1a890faSShreyas Bhatewara 		if (!rq->rx_ring[i].base) {
1845204a6e65SStephen Hemminger 			netdev_err(adapter->netdev,
1846204a6e65SStephen Hemminger 				   "failed to allocate rx ring %d\n", i);
1847d1a890faSShreyas Bhatewara 			goto err;
1848d1a890faSShreyas Bhatewara 		}
1849d1a890faSShreyas Bhatewara 	}
1850d1a890faSShreyas Bhatewara 
185150a5ce3eSShrikrishna Khare 	if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
185250a5ce3eSShrikrishna Khare 		sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
185350a5ce3eSShrikrishna Khare 		rq->data_ring.base =
185450a5ce3eSShrikrishna Khare 			dma_alloc_coherent(&adapter->pdev->dev, sz,
185550a5ce3eSShrikrishna Khare 					   &rq->data_ring.basePA,
185650a5ce3eSShrikrishna Khare 					   GFP_KERNEL);
185750a5ce3eSShrikrishna Khare 		if (!rq->data_ring.base) {
185850a5ce3eSShrikrishna Khare 			netdev_err(adapter->netdev,
185950a5ce3eSShrikrishna Khare 				   "rx data ring will be disabled\n");
186050a5ce3eSShrikrishna Khare 			adapter->rxdataring_enabled = false;
186150a5ce3eSShrikrishna Khare 		}
186250a5ce3eSShrikrishna Khare 	} else {
186350a5ce3eSShrikrishna Khare 		rq->data_ring.base = NULL;
186450a5ce3eSShrikrishna Khare 		rq->data_ring.desc_size = 0;
186550a5ce3eSShrikrishna Khare 	}
186650a5ce3eSShrikrishna Khare 
1867d1a890faSShreyas Bhatewara 	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1868b0eb57cbSAndy King 	rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1869b0eb57cbSAndy King 						&rq->comp_ring.basePA,
1870b0eb57cbSAndy King 						GFP_KERNEL);
1871d1a890faSShreyas Bhatewara 	if (!rq->comp_ring.base) {
1872204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1873d1a890faSShreyas Bhatewara 		goto err;
1874d1a890faSShreyas Bhatewara 	}
1875d1a890faSShreyas Bhatewara 
1876de1da8bcSRonak Doshi 	bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
1877de1da8bcSRonak Doshi 			  sizeof(rq->buf_info[0][0]), GFP_KERNEL,
1878de1da8bcSRonak Doshi 			  dev_to_node(&adapter->pdev->dev));
1879e404decbSJoe Perches 	if (!bi)
1880d1a890faSShreyas Bhatewara 		goto err;
1881e404decbSJoe Perches 
1882d1a890faSShreyas Bhatewara 	rq->buf_info[0] = bi;
1883d1a890faSShreyas Bhatewara 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
1884d1a890faSShreyas Bhatewara 
1885d1a890faSShreyas Bhatewara 	return 0;
1886d1a890faSShreyas Bhatewara 
1887d1a890faSShreyas Bhatewara err:
1888d1a890faSShreyas Bhatewara 	vmxnet3_rq_destroy(rq, adapter);
1889d1a890faSShreyas Bhatewara 	return -ENOMEM;
1890d1a890faSShreyas Bhatewara }
1891d1a890faSShreyas Bhatewara 
1892d1a890faSShreyas Bhatewara 
1893d1a890faSShreyas Bhatewara static int
189409c5088eSShreyas Bhatewara vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
189509c5088eSShreyas Bhatewara {
189609c5088eSShreyas Bhatewara 	int i, err = 0;
189709c5088eSShreyas Bhatewara 
189850a5ce3eSShrikrishna Khare 	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
189950a5ce3eSShrikrishna Khare 
190009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
190109c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
190209c5088eSShreyas Bhatewara 		if (unlikely(err)) {
190309c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev,
190409c5088eSShreyas Bhatewara 				"%s: failed to create rx queue%i\n",
190509c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
190609c5088eSShreyas Bhatewara 			goto err_out;
190709c5088eSShreyas Bhatewara 		}
190809c5088eSShreyas Bhatewara 	}
190950a5ce3eSShrikrishna Khare 
191050a5ce3eSShrikrishna Khare 	if (!adapter->rxdataring_enabled)
191150a5ce3eSShrikrishna Khare 		vmxnet3_rq_destroy_all_rxdataring(adapter);
191250a5ce3eSShrikrishna Khare 
191309c5088eSShreyas Bhatewara 	return err;
191409c5088eSShreyas Bhatewara err_out:
191509c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
191609c5088eSShreyas Bhatewara 	return err;
191709c5088eSShreyas Bhatewara 
191809c5088eSShreyas Bhatewara }
191909c5088eSShreyas Bhatewara 
192009c5088eSShreyas Bhatewara /* Multiple queue aware polling function for tx and rx */
192109c5088eSShreyas Bhatewara 
192209c5088eSShreyas Bhatewara static int
1923d1a890faSShreyas Bhatewara vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1924d1a890faSShreyas Bhatewara {
192509c5088eSShreyas Bhatewara 	int rcd_done = 0, i;
1926d1a890faSShreyas Bhatewara 	if (unlikely(adapter->shared->ecr))
1927d1a890faSShreyas Bhatewara 		vmxnet3_process_events(adapter);
192809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
192909c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1930d1a890faSShreyas Bhatewara 
193109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
193209c5088eSShreyas Bhatewara 		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
193309c5088eSShreyas Bhatewara 						   adapter, budget);
193409c5088eSShreyas Bhatewara 	return rcd_done;
1935d1a890faSShreyas Bhatewara }
1936d1a890faSShreyas Bhatewara 
1937d1a890faSShreyas Bhatewara 
1938d1a890faSShreyas Bhatewara static int
1939d1a890faSShreyas Bhatewara vmxnet3_poll(struct napi_struct *napi, int budget)
1940d1a890faSShreyas Bhatewara {
194109c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
194209c5088eSShreyas Bhatewara 					  struct vmxnet3_rx_queue, napi);
1943d1a890faSShreyas Bhatewara 	int rxd_done;
1944d1a890faSShreyas Bhatewara 
194509c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1946d1a890faSShreyas Bhatewara 
1947d1a890faSShreyas Bhatewara 	if (rxd_done < budget) {
19486ad20165SEric Dumazet 		napi_complete_done(napi, rxd_done);
194909c5088eSShreyas Bhatewara 		vmxnet3_enable_all_intrs(rx_queue->adapter);
1950d1a890faSShreyas Bhatewara 	}
1951d1a890faSShreyas Bhatewara 	return rxd_done;
1952d1a890faSShreyas Bhatewara }
1953d1a890faSShreyas Bhatewara 
195409c5088eSShreyas Bhatewara /*
195509c5088eSShreyas Bhatewara  * NAPI polling function for MSI-X mode with multiple Rx queues
195609c5088eSShreyas Bhatewara  * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
195709c5088eSShreyas Bhatewara  */
195809c5088eSShreyas Bhatewara 
195909c5088eSShreyas Bhatewara static int
196009c5088eSShreyas Bhatewara vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
196109c5088eSShreyas Bhatewara {
196209c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = container_of(napi,
196309c5088eSShreyas Bhatewara 						struct vmxnet3_rx_queue, napi);
196409c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
196509c5088eSShreyas Bhatewara 	int rxd_done;
196609c5088eSShreyas Bhatewara 
196709c5088eSShreyas Bhatewara 	/* When sharing interrupt with corresponding tx queue, process
196809c5088eSShreyas Bhatewara 	 * tx completions in that queue as well
196909c5088eSShreyas Bhatewara 	 */
197009c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
197109c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq =
197209c5088eSShreyas Bhatewara 				&adapter->tx_queue[rq - adapter->rx_queue];
197309c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
197409c5088eSShreyas Bhatewara 	}
197509c5088eSShreyas Bhatewara 
197609c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
197709c5088eSShreyas Bhatewara 
197809c5088eSShreyas Bhatewara 	if (rxd_done < budget) {
19796ad20165SEric Dumazet 		napi_complete_done(napi, rxd_done);
198009c5088eSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
198109c5088eSShreyas Bhatewara 	}
198209c5088eSShreyas Bhatewara 	return rxd_done;
198309c5088eSShreyas Bhatewara }
198409c5088eSShreyas Bhatewara 
198509c5088eSShreyas Bhatewara 
198609c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
198709c5088eSShreyas Bhatewara 
198809c5088eSShreyas Bhatewara /*
198909c5088eSShreyas Bhatewara  * Handle completion interrupts on tx queues
199009c5088eSShreyas Bhatewara  * Returns whether or not the intr is handled
199109c5088eSShreyas Bhatewara  */
199209c5088eSShreyas Bhatewara 
199309c5088eSShreyas Bhatewara static irqreturn_t
199409c5088eSShreyas Bhatewara vmxnet3_msix_tx(int irq, void *data)
199509c5088eSShreyas Bhatewara {
199609c5088eSShreyas Bhatewara 	struct vmxnet3_tx_queue *tq = data;
199709c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = tq->adapter;
199809c5088eSShreyas Bhatewara 
199909c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
200009c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
200109c5088eSShreyas Bhatewara 
200209c5088eSShreyas Bhatewara 	/* Handle the case where only one irq is allocate for all tx queues */
200309c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
200409c5088eSShreyas Bhatewara 		int i;
200509c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
200609c5088eSShreyas Bhatewara 			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
200709c5088eSShreyas Bhatewara 			vmxnet3_tq_tx_complete(txq, adapter);
200809c5088eSShreyas Bhatewara 		}
200909c5088eSShreyas Bhatewara 	} else {
201009c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
201109c5088eSShreyas Bhatewara 	}
201209c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
201309c5088eSShreyas Bhatewara 
201409c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
201509c5088eSShreyas Bhatewara }
201609c5088eSShreyas Bhatewara 
201709c5088eSShreyas Bhatewara 
201809c5088eSShreyas Bhatewara /*
201909c5088eSShreyas Bhatewara  * Handle completion interrupts on rx queues. Returns whether or not the
202009c5088eSShreyas Bhatewara  * intr is handled
202109c5088eSShreyas Bhatewara  */
202209c5088eSShreyas Bhatewara 
202309c5088eSShreyas Bhatewara static irqreturn_t
202409c5088eSShreyas Bhatewara vmxnet3_msix_rx(int irq, void *data)
202509c5088eSShreyas Bhatewara {
202609c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = data;
202709c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
202809c5088eSShreyas Bhatewara 
202909c5088eSShreyas Bhatewara 	/* disable intr if needed */
203009c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
203109c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
203209c5088eSShreyas Bhatewara 	napi_schedule(&rq->napi);
203309c5088eSShreyas Bhatewara 
203409c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
203509c5088eSShreyas Bhatewara }
203609c5088eSShreyas Bhatewara 
203709c5088eSShreyas Bhatewara /*
203809c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
203909c5088eSShreyas Bhatewara  *
204009c5088eSShreyas Bhatewara  * vmxnet3_msix_event --
204109c5088eSShreyas Bhatewara  *
204209c5088eSShreyas Bhatewara  *    vmxnet3 msix event intr handler
204309c5088eSShreyas Bhatewara  *
204409c5088eSShreyas Bhatewara  * Result:
204509c5088eSShreyas Bhatewara  *    whether or not the intr is handled
204609c5088eSShreyas Bhatewara  *
204709c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
204809c5088eSShreyas Bhatewara  */
204909c5088eSShreyas Bhatewara 
205009c5088eSShreyas Bhatewara static irqreturn_t
205109c5088eSShreyas Bhatewara vmxnet3_msix_event(int irq, void *data)
205209c5088eSShreyas Bhatewara {
205309c5088eSShreyas Bhatewara 	struct net_device *dev = data;
205409c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
205509c5088eSShreyas Bhatewara 
205609c5088eSShreyas Bhatewara 	/* disable intr if needed */
205709c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
205809c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
205909c5088eSShreyas Bhatewara 
206009c5088eSShreyas Bhatewara 	if (adapter->shared->ecr)
206109c5088eSShreyas Bhatewara 		vmxnet3_process_events(adapter);
206209c5088eSShreyas Bhatewara 
206309c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
206409c5088eSShreyas Bhatewara 
206509c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
206609c5088eSShreyas Bhatewara }
206709c5088eSShreyas Bhatewara 
206809c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI  */
206909c5088eSShreyas Bhatewara 
2070d1a890faSShreyas Bhatewara 
2071d1a890faSShreyas Bhatewara /* Interrupt handler for vmxnet3  */
2072d1a890faSShreyas Bhatewara static irqreturn_t
2073d1a890faSShreyas Bhatewara vmxnet3_intr(int irq, void *dev_id)
2074d1a890faSShreyas Bhatewara {
2075d1a890faSShreyas Bhatewara 	struct net_device *dev = dev_id;
2076d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
2077d1a890faSShreyas Bhatewara 
207809c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_INTX) {
2079d1a890faSShreyas Bhatewara 		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2080d1a890faSShreyas Bhatewara 		if (unlikely(icr == 0))
2081d1a890faSShreyas Bhatewara 			/* not ours */
2082d1a890faSShreyas Bhatewara 			return IRQ_NONE;
2083d1a890faSShreyas Bhatewara 	}
2084d1a890faSShreyas Bhatewara 
2085d1a890faSShreyas Bhatewara 
2086d1a890faSShreyas Bhatewara 	/* disable intr if needed */
2087d1a890faSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
208809c5088eSShreyas Bhatewara 		vmxnet3_disable_all_intrs(adapter);
2089d1a890faSShreyas Bhatewara 
209009c5088eSShreyas Bhatewara 	napi_schedule(&adapter->rx_queue[0].napi);
2091d1a890faSShreyas Bhatewara 
2092d1a890faSShreyas Bhatewara 	return IRQ_HANDLED;
2093d1a890faSShreyas Bhatewara }
2094d1a890faSShreyas Bhatewara 
2095d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
2096d1a890faSShreyas Bhatewara 
2097d1a890faSShreyas Bhatewara /* netpoll callback. */
2098d1a890faSShreyas Bhatewara static void
2099d1a890faSShreyas Bhatewara vmxnet3_netpoll(struct net_device *netdev)
2100d1a890faSShreyas Bhatewara {
2101d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2102d1a890faSShreyas Bhatewara 
2103d25f06eaSNeil Horman 	switch (adapter->intr.type) {
21040a8d8c44SArnd Bergmann #ifdef CONFIG_PCI_MSI
21050a8d8c44SArnd Bergmann 	case VMXNET3_IT_MSIX: {
21060a8d8c44SArnd Bergmann 		int i;
2107d25f06eaSNeil Horman 		for (i = 0; i < adapter->num_rx_queues; i++)
2108d25f06eaSNeil Horman 			vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2109d25f06eaSNeil Horman 		break;
21100a8d8c44SArnd Bergmann 	}
21110a8d8c44SArnd Bergmann #endif
2112d25f06eaSNeil Horman 	case VMXNET3_IT_MSI:
2113d25f06eaSNeil Horman 	default:
2114d25f06eaSNeil Horman 		vmxnet3_intr(0, adapter->netdev);
2115d25f06eaSNeil Horman 		break;
2116d25f06eaSNeil Horman 	}
211709c5088eSShreyas Bhatewara 
2118d1a890faSShreyas Bhatewara }
211909c5088eSShreyas Bhatewara #endif	/* CONFIG_NET_POLL_CONTROLLER */
2120d1a890faSShreyas Bhatewara 
2121d1a890faSShreyas Bhatewara static int
2122d1a890faSShreyas Bhatewara vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2123d1a890faSShreyas Bhatewara {
212409c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
212509c5088eSShreyas Bhatewara 	int err = 0, i;
212609c5088eSShreyas Bhatewara 	int vector = 0;
2127d1a890faSShreyas Bhatewara 
21288f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
2129d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
213009c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
213109c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
213209c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
213309c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
213409c5088eSShreyas Bhatewara 				err = request_irq(
213509c5088eSShreyas Bhatewara 					      intr->msix_entries[vector].vector,
213609c5088eSShreyas Bhatewara 					      vmxnet3_msix_tx, 0,
213709c5088eSShreyas Bhatewara 					      adapter->tx_queue[i].name,
213809c5088eSShreyas Bhatewara 					      &adapter->tx_queue[i]);
213909c5088eSShreyas Bhatewara 			} else {
214009c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
214109c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
214209c5088eSShreyas Bhatewara 			}
214309c5088eSShreyas Bhatewara 			if (err) {
214409c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
214509c5088eSShreyas Bhatewara 					"Failed to request irq for MSIX, %s, "
214609c5088eSShreyas Bhatewara 					"error %d\n",
214709c5088eSShreyas Bhatewara 					adapter->tx_queue[i].name, err);
214809c5088eSShreyas Bhatewara 				return err;
214909c5088eSShreyas Bhatewara 			}
215009c5088eSShreyas Bhatewara 
215109c5088eSShreyas Bhatewara 			/* Handle the case where only 1 MSIx was allocated for
215209c5088eSShreyas Bhatewara 			 * all tx queues */
215309c5088eSShreyas Bhatewara 			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
215409c5088eSShreyas Bhatewara 				for (; i < adapter->num_tx_queues; i++)
215509c5088eSShreyas Bhatewara 					adapter->tx_queue[i].comp_ring.intr_idx
215609c5088eSShreyas Bhatewara 								= vector;
215709c5088eSShreyas Bhatewara 				vector++;
215809c5088eSShreyas Bhatewara 				break;
215909c5088eSShreyas Bhatewara 			} else {
216009c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx
216109c5088eSShreyas Bhatewara 								= vector++;
216209c5088eSShreyas Bhatewara 			}
216309c5088eSShreyas Bhatewara 		}
216409c5088eSShreyas Bhatewara 		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
216509c5088eSShreyas Bhatewara 			vector = 0;
216609c5088eSShreyas Bhatewara 
216709c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
216809c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
216909c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
217009c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
217109c5088eSShreyas Bhatewara 			else
217209c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
217309c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
217409c5088eSShreyas Bhatewara 			err = request_irq(intr->msix_entries[vector].vector,
217509c5088eSShreyas Bhatewara 					  vmxnet3_msix_rx, 0,
217609c5088eSShreyas Bhatewara 					  adapter->rx_queue[i].name,
217709c5088eSShreyas Bhatewara 					  &(adapter->rx_queue[i]));
217809c5088eSShreyas Bhatewara 			if (err) {
2179204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2180204a6e65SStephen Hemminger 					   "Failed to request irq for MSIX, "
2181204a6e65SStephen Hemminger 					   "%s, error %d\n",
218209c5088eSShreyas Bhatewara 					   adapter->rx_queue[i].name, err);
218309c5088eSShreyas Bhatewara 				return err;
218409c5088eSShreyas Bhatewara 			}
218509c5088eSShreyas Bhatewara 
218609c5088eSShreyas Bhatewara 			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
218709c5088eSShreyas Bhatewara 		}
218809c5088eSShreyas Bhatewara 
218909c5088eSShreyas Bhatewara 		sprintf(intr->event_msi_vector_name, "%s-event-%d",
219009c5088eSShreyas Bhatewara 			adapter->netdev->name, vector);
219109c5088eSShreyas Bhatewara 		err = request_irq(intr->msix_entries[vector].vector,
219209c5088eSShreyas Bhatewara 				  vmxnet3_msix_event, 0,
219309c5088eSShreyas Bhatewara 				  intr->event_msi_vector_name, adapter->netdev);
219409c5088eSShreyas Bhatewara 		intr->event_intr_idx = vector;
219509c5088eSShreyas Bhatewara 
219609c5088eSShreyas Bhatewara 	} else if (intr->type == VMXNET3_IT_MSI) {
219709c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
2198d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2199d1a890faSShreyas Bhatewara 				  adapter->netdev->name, adapter->netdev);
220009c5088eSShreyas Bhatewara 	} else {
2201115924b6SShreyas Bhatewara #endif
220209c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
2203d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2204d1a890faSShreyas Bhatewara 				  IRQF_SHARED, adapter->netdev->name,
2205d1a890faSShreyas Bhatewara 				  adapter->netdev);
220609c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
220709c5088eSShreyas Bhatewara 	}
220809c5088eSShreyas Bhatewara #endif
220909c5088eSShreyas Bhatewara 	intr->num_intrs = vector + 1;
221009c5088eSShreyas Bhatewara 	if (err) {
2211204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2212204a6e65SStephen Hemminger 			   "Failed to request irq (intr type:%d), error %d\n",
2213204a6e65SStephen Hemminger 			   intr->type, err);
221409c5088eSShreyas Bhatewara 	} else {
221509c5088eSShreyas Bhatewara 		/* Number of rx queues will not change after this */
221609c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
221709c5088eSShreyas Bhatewara 			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
221809c5088eSShreyas Bhatewara 			rq->qid = i;
221909c5088eSShreyas Bhatewara 			rq->qid2 = i + adapter->num_rx_queues;
222050a5ce3eSShrikrishna Khare 			rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2221d1a890faSShreyas Bhatewara 		}
2222d1a890faSShreyas Bhatewara 
2223d1a890faSShreyas Bhatewara 		/* init our intr settings */
222409c5088eSShreyas Bhatewara 		for (i = 0; i < intr->num_intrs; i++)
222509c5088eSShreyas Bhatewara 			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
222609c5088eSShreyas Bhatewara 		if (adapter->intr.type != VMXNET3_IT_MSIX) {
2227d1a890faSShreyas Bhatewara 			adapter->intr.event_intr_idx = 0;
222809c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
222909c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx = 0;
223009c5088eSShreyas Bhatewara 			adapter->rx_queue[0].comp_ring.intr_idx = 0;
223109c5088eSShreyas Bhatewara 		}
2232d1a890faSShreyas Bhatewara 
2233204a6e65SStephen Hemminger 		netdev_info(adapter->netdev,
2234204a6e65SStephen Hemminger 			    "intr type %u, mode %u, %u vectors allocated\n",
2235204a6e65SStephen Hemminger 			    intr->type, intr->mask_mode, intr->num_intrs);
2236d1a890faSShreyas Bhatewara 	}
2237d1a890faSShreyas Bhatewara 
2238d1a890faSShreyas Bhatewara 	return err;
2239d1a890faSShreyas Bhatewara }
2240d1a890faSShreyas Bhatewara 
2241d1a890faSShreyas Bhatewara 
2242d1a890faSShreyas Bhatewara static void
2243d1a890faSShreyas Bhatewara vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2244d1a890faSShreyas Bhatewara {
224509c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
224609c5088eSShreyas Bhatewara 	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2247d1a890faSShreyas Bhatewara 
224809c5088eSShreyas Bhatewara 	switch (intr->type) {
22498f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
2250d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSIX:
2251d1a890faSShreyas Bhatewara 	{
225209c5088eSShreyas Bhatewara 		int i, vector = 0;
2253d1a890faSShreyas Bhatewara 
225409c5088eSShreyas Bhatewara 		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
225509c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++) {
225609c5088eSShreyas Bhatewara 				free_irq(intr->msix_entries[vector++].vector,
225709c5088eSShreyas Bhatewara 					 &(adapter->tx_queue[i]));
225809c5088eSShreyas Bhatewara 				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
225909c5088eSShreyas Bhatewara 					break;
226009c5088eSShreyas Bhatewara 			}
226109c5088eSShreyas Bhatewara 		}
226209c5088eSShreyas Bhatewara 
226309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
226409c5088eSShreyas Bhatewara 			free_irq(intr->msix_entries[vector++].vector,
226509c5088eSShreyas Bhatewara 				 &(adapter->rx_queue[i]));
226609c5088eSShreyas Bhatewara 		}
226709c5088eSShreyas Bhatewara 
226809c5088eSShreyas Bhatewara 		free_irq(intr->msix_entries[vector].vector,
2269d1a890faSShreyas Bhatewara 			 adapter->netdev);
227009c5088eSShreyas Bhatewara 		BUG_ON(vector >= intr->num_intrs);
2271d1a890faSShreyas Bhatewara 		break;
2272d1a890faSShreyas Bhatewara 	}
22738f7e524cSRandy Dunlap #endif
2274d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSI:
2275d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
2276d1a890faSShreyas Bhatewara 		break;
2277d1a890faSShreyas Bhatewara 	case VMXNET3_IT_INTX:
2278d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
2279d1a890faSShreyas Bhatewara 		break;
2280d1a890faSShreyas Bhatewara 	default:
2281c068e777SSasha Levin 		BUG();
2282d1a890faSShreyas Bhatewara 	}
2283d1a890faSShreyas Bhatewara }
2284d1a890faSShreyas Bhatewara 
2285d1a890faSShreyas Bhatewara 
2286d1a890faSShreyas Bhatewara static void
2287d1a890faSShreyas Bhatewara vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2288d1a890faSShreyas Bhatewara {
2289d1a890faSShreyas Bhatewara 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
229072e85c45SJesse Gross 	u16 vid;
2291d1a890faSShreyas Bhatewara 
229272e85c45SJesse Gross 	/* allow untagged pkts */
2293d1a890faSShreyas Bhatewara 	VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
229472e85c45SJesse Gross 
229572e85c45SJesse Gross 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
229672e85c45SJesse Gross 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2297d1a890faSShreyas Bhatewara }
2298d1a890faSShreyas Bhatewara 
2299d1a890faSShreyas Bhatewara 
23008e586137SJiri Pirko static int
230180d5c368SPatrick McHardy vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2302d1a890faSShreyas Bhatewara {
2303d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2304f6957f88SJesse Gross 
2305f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
2306d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
230783d0feffSShreyas Bhatewara 		unsigned long flags;
2308d1a890faSShreyas Bhatewara 
2309d1a890faSShreyas Bhatewara 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
231083d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
2311d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2312d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
231383d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2314f6957f88SJesse Gross 	}
231572e85c45SJesse Gross 
231672e85c45SJesse Gross 	set_bit(vid, adapter->active_vlans);
23178e586137SJiri Pirko 
23188e586137SJiri Pirko 	return 0;
2319d1a890faSShreyas Bhatewara }
2320d1a890faSShreyas Bhatewara 
2321d1a890faSShreyas Bhatewara 
23228e586137SJiri Pirko static int
232380d5c368SPatrick McHardy vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2324d1a890faSShreyas Bhatewara {
2325d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2326f6957f88SJesse Gross 
2327f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
2328d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
232983d0feffSShreyas Bhatewara 		unsigned long flags;
2330d1a890faSShreyas Bhatewara 
2331d1a890faSShreyas Bhatewara 		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
233283d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
2333d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2334d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
233583d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2336f6957f88SJesse Gross 	}
233772e85c45SJesse Gross 
233872e85c45SJesse Gross 	clear_bit(vid, adapter->active_vlans);
23398e586137SJiri Pirko 
23408e586137SJiri Pirko 	return 0;
2341d1a890faSShreyas Bhatewara }
2342d1a890faSShreyas Bhatewara 
2343d1a890faSShreyas Bhatewara 
2344d1a890faSShreyas Bhatewara static u8 *
2345d1a890faSShreyas Bhatewara vmxnet3_copy_mc(struct net_device *netdev)
2346d1a890faSShreyas Bhatewara {
2347d1a890faSShreyas Bhatewara 	u8 *buf = NULL;
23484cd24eafSJiri Pirko 	u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2349d1a890faSShreyas Bhatewara 
2350d1a890faSShreyas Bhatewara 	/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2351d1a890faSShreyas Bhatewara 	if (sz <= 0xffff) {
2352d1a890faSShreyas Bhatewara 		/* We may be called with BH disabled */
2353d1a890faSShreyas Bhatewara 		buf = kmalloc(sz, GFP_ATOMIC);
2354d1a890faSShreyas Bhatewara 		if (buf) {
235522bedad3SJiri Pirko 			struct netdev_hw_addr *ha;
2356567ec874SJiri Pirko 			int i = 0;
2357d1a890faSShreyas Bhatewara 
235822bedad3SJiri Pirko 			netdev_for_each_mc_addr(ha, netdev)
235922bedad3SJiri Pirko 				memcpy(buf + i++ * ETH_ALEN, ha->addr,
2360d1a890faSShreyas Bhatewara 				       ETH_ALEN);
2361d1a890faSShreyas Bhatewara 		}
2362d1a890faSShreyas Bhatewara 	}
2363d1a890faSShreyas Bhatewara 	return buf;
2364d1a890faSShreyas Bhatewara }
2365d1a890faSShreyas Bhatewara 
2366d1a890faSShreyas Bhatewara 
2367d1a890faSShreyas Bhatewara static void
2368d1a890faSShreyas Bhatewara vmxnet3_set_mc(struct net_device *netdev)
2369d1a890faSShreyas Bhatewara {
2370d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
237183d0feffSShreyas Bhatewara 	unsigned long flags;
2372d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxFilterConf *rxConf =
2373d1a890faSShreyas Bhatewara 					&adapter->shared->devRead.rxFilterConf;
2374d1a890faSShreyas Bhatewara 	u8 *new_table = NULL;
2375b0eb57cbSAndy King 	dma_addr_t new_table_pa = 0;
2376fb5c6cfaSAlexey Khoroshilov 	bool new_table_pa_valid = false;
2377d1a890faSShreyas Bhatewara 	u32 new_mode = VMXNET3_RXM_UCAST;
2378d1a890faSShreyas Bhatewara 
237972e85c45SJesse Gross 	if (netdev->flags & IFF_PROMISC) {
238072e85c45SJesse Gross 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
238172e85c45SJesse Gross 		memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
238272e85c45SJesse Gross 
2383d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_PROMISC;
238472e85c45SJesse Gross 	} else {
238572e85c45SJesse Gross 		vmxnet3_restore_vlan(adapter);
238672e85c45SJesse Gross 	}
2387d1a890faSShreyas Bhatewara 
2388d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_BROADCAST)
2389d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_BCAST;
2390d1a890faSShreyas Bhatewara 
2391d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_ALLMULTI)
2392d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_ALL_MULTI;
2393d1a890faSShreyas Bhatewara 	else
23944cd24eafSJiri Pirko 		if (!netdev_mc_empty(netdev)) {
2395d1a890faSShreyas Bhatewara 			new_table = vmxnet3_copy_mc(netdev);
2396d1a890faSShreyas Bhatewara 			if (new_table) {
2397d37d5ec8SShrikrishna Khare 				size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2398d37d5ec8SShrikrishna Khare 
2399d37d5ec8SShrikrishna Khare 				rxConf->mfTableLen = cpu_to_le16(sz);
2400b0eb57cbSAndy King 				new_table_pa = dma_map_single(
2401b0eb57cbSAndy King 							&adapter->pdev->dev,
2402b0eb57cbSAndy King 							new_table,
2403d37d5ec8SShrikrishna Khare 							sz,
2404b0eb57cbSAndy King 							PCI_DMA_TODEVICE);
24055738a09dSAlexey Khoroshilov 				if (!dma_mapping_error(&adapter->pdev->dev,
24065738a09dSAlexey Khoroshilov 						       new_table_pa)) {
24074ad9a64fSAndy King 					new_mode |= VMXNET3_RXM_MCAST;
2408fb5c6cfaSAlexey Khoroshilov 					new_table_pa_valid = true;
2409fb5c6cfaSAlexey Khoroshilov 					rxConf->mfTablePA = cpu_to_le64(
2410fb5c6cfaSAlexey Khoroshilov 								new_table_pa);
2411fb5c6cfaSAlexey Khoroshilov 				}
2412fb5c6cfaSAlexey Khoroshilov 			}
2413fb5c6cfaSAlexey Khoroshilov 			if (!new_table_pa_valid) {
24144ad9a64fSAndy King 				netdev_info(netdev,
24154ad9a64fSAndy King 					    "failed to copy mcast list, setting ALL_MULTI\n");
2416d1a890faSShreyas Bhatewara 				new_mode |= VMXNET3_RXM_ALL_MULTI;
2417d1a890faSShreyas Bhatewara 			}
2418d1a890faSShreyas Bhatewara 		}
2419d1a890faSShreyas Bhatewara 
2420d1a890faSShreyas Bhatewara 	if (!(new_mode & VMXNET3_RXM_MCAST)) {
2421d1a890faSShreyas Bhatewara 		rxConf->mfTableLen = 0;
2422d1a890faSShreyas Bhatewara 		rxConf->mfTablePA = 0;
2423d1a890faSShreyas Bhatewara 	}
2424d1a890faSShreyas Bhatewara 
242583d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2426d1a890faSShreyas Bhatewara 	if (new_mode != rxConf->rxMode) {
2427115924b6SShreyas Bhatewara 		rxConf->rxMode = cpu_to_le32(new_mode);
2428d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2429d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_RX_MODE);
243072e85c45SJesse Gross 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
243172e85c45SJesse Gross 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2432d1a890faSShreyas Bhatewara 	}
2433d1a890faSShreyas Bhatewara 
2434d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2435d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
243683d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2437d1a890faSShreyas Bhatewara 
2438fb5c6cfaSAlexey Khoroshilov 	if (new_table_pa_valid)
2439b0eb57cbSAndy King 		dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2440b0eb57cbSAndy King 				 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2441d1a890faSShreyas Bhatewara 	kfree(new_table);
2442d1a890faSShreyas Bhatewara }
2443d1a890faSShreyas Bhatewara 
244409c5088eSShreyas Bhatewara void
244509c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
244609c5088eSShreyas Bhatewara {
244709c5088eSShreyas Bhatewara 	int i;
244809c5088eSShreyas Bhatewara 
244909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
245009c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
245109c5088eSShreyas Bhatewara }
245209c5088eSShreyas Bhatewara 
2453d1a890faSShreyas Bhatewara 
2454d1a890faSShreyas Bhatewara /*
2455d1a890faSShreyas Bhatewara  *   Set up driver_shared based on settings in adapter.
2456d1a890faSShreyas Bhatewara  */
2457d1a890faSShreyas Bhatewara 
2458d1a890faSShreyas Bhatewara static void
2459d1a890faSShreyas Bhatewara vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2460d1a890faSShreyas Bhatewara {
2461d1a890faSShreyas Bhatewara 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2462d1a890faSShreyas Bhatewara 	struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2463*39f9895aSRonak Doshi 	struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2464d1a890faSShreyas Bhatewara 	struct Vmxnet3_TxQueueConf *tqc;
2465d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxQueueConf *rqc;
2466d1a890faSShreyas Bhatewara 	int i;
2467d1a890faSShreyas Bhatewara 
2468d1a890faSShreyas Bhatewara 	memset(shared, 0, sizeof(*shared));
2469d1a890faSShreyas Bhatewara 
2470d1a890faSShreyas Bhatewara 	/* driver settings */
2471115924b6SShreyas Bhatewara 	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2472115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.version = cpu_to_le32(
2473115924b6SShreyas Bhatewara 						VMXNET3_DRIVER_VERSION_NUM);
2474d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2475d1a890faSShreyas Bhatewara 				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2476d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2477115924b6SShreyas Bhatewara 	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2478115924b6SShreyas Bhatewara 				*((u32 *)&devRead->misc.driverInfo.gos));
2479115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2480115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2481d1a890faSShreyas Bhatewara 
2482b0eb57cbSAndy King 	devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2483115924b6SShreyas Bhatewara 	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2484d1a890faSShreyas Bhatewara 
2485d1a890faSShreyas Bhatewara 	/* set up feature flags */
2486a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_RXCSUM)
24873843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2488d1a890faSShreyas Bhatewara 
2489a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_LRO) {
24903843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_LRO;
2491115924b6SShreyas Bhatewara 		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2492d1a890faSShreyas Bhatewara 	}
2493f646968fSPatrick McHardy 	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
24943843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2495d1a890faSShreyas Bhatewara 
2496dacce2beSRonak Doshi 	if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2497dacce2beSRonak Doshi 					 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2498dacce2beSRonak Doshi 		devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2499dacce2beSRonak Doshi 
2500115924b6SShreyas Bhatewara 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2501115924b6SShreyas Bhatewara 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2502115924b6SShreyas Bhatewara 	devRead->misc.queueDescLen = cpu_to_le32(
250309c5088eSShreyas Bhatewara 		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
250409c5088eSShreyas Bhatewara 		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2505d1a890faSShreyas Bhatewara 
2506d1a890faSShreyas Bhatewara 	/* tx queue settings */
250709c5088eSShreyas Bhatewara 	devRead->misc.numTxQueues =  adapter->num_tx_queues;
250809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
250909c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
251009c5088eSShreyas Bhatewara 		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
251109c5088eSShreyas Bhatewara 		tqc = &adapter->tqd_start[i].conf;
251209c5088eSShreyas Bhatewara 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
251309c5088eSShreyas Bhatewara 		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
251409c5088eSShreyas Bhatewara 		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2515de1da8bcSRonak Doshi 		tqc->ddPA           = cpu_to_le64(~0ULL);
251609c5088eSShreyas Bhatewara 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
251709c5088eSShreyas Bhatewara 		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
25183c8b3efcSShrikrishna Khare 		tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
251909c5088eSShreyas Bhatewara 		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
2520de1da8bcSRonak Doshi 		tqc->ddLen          = cpu_to_le32(0);
252109c5088eSShreyas Bhatewara 		tqc->intrIdx        = tq->comp_ring.intr_idx;
252209c5088eSShreyas Bhatewara 	}
2523d1a890faSShreyas Bhatewara 
2524d1a890faSShreyas Bhatewara 	/* rx queue settings */
252509c5088eSShreyas Bhatewara 	devRead->misc.numRxQueues = adapter->num_rx_queues;
252609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
252709c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
252809c5088eSShreyas Bhatewara 		rqc = &adapter->rqd_start[i].conf;
252909c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
253009c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
253109c5088eSShreyas Bhatewara 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2532de1da8bcSRonak Doshi 		rqc->ddPA            = cpu_to_le64(~0ULL);
253309c5088eSShreyas Bhatewara 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
253409c5088eSShreyas Bhatewara 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
253509c5088eSShreyas Bhatewara 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
2536de1da8bcSRonak Doshi 		rqc->ddLen           = cpu_to_le32(0);
253709c5088eSShreyas Bhatewara 		rqc->intrIdx         = rq->comp_ring.intr_idx;
253850a5ce3eSShrikrishna Khare 		if (VMXNET3_VERSION_GE_3(adapter)) {
253950a5ce3eSShrikrishna Khare 			rqc->rxDataRingBasePA =
254050a5ce3eSShrikrishna Khare 				cpu_to_le64(rq->data_ring.basePA);
254150a5ce3eSShrikrishna Khare 			rqc->rxDataRingDescSize =
254250a5ce3eSShrikrishna Khare 				cpu_to_le16(rq->data_ring.desc_size);
254350a5ce3eSShrikrishna Khare 		}
254409c5088eSShreyas Bhatewara 	}
254509c5088eSShreyas Bhatewara 
254609c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
254709c5088eSShreyas Bhatewara 	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
254809c5088eSShreyas Bhatewara 
254909c5088eSShreyas Bhatewara 	if (adapter->rss) {
255009c5088eSShreyas Bhatewara 		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
255166d35910SStephen Hemminger 
255209c5088eSShreyas Bhatewara 		devRead->misc.uptFeatures |= UPT1_F_RSS;
255309c5088eSShreyas Bhatewara 		devRead->misc.numRxQueues = adapter->num_rx_queues;
255409c5088eSShreyas Bhatewara 		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
255509c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV4 |
255609c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
255709c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV6;
255809c5088eSShreyas Bhatewara 		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
255909c5088eSShreyas Bhatewara 		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
256009c5088eSShreyas Bhatewara 		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
25616bf79cddSEric Dumazet 		netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
256266d35910SStephen Hemminger 
256309c5088eSShreyas Bhatewara 		for (i = 0; i < rssConf->indTableSize; i++)
2564278bc429SBen Hutchings 			rssConf->indTable[i] = ethtool_rxfh_indir_default(
2565278bc429SBen Hutchings 				i, adapter->num_rx_queues);
256609c5088eSShreyas Bhatewara 
256709c5088eSShreyas Bhatewara 		devRead->rssConfDesc.confVer = 1;
2568b0eb57cbSAndy King 		devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2569b0eb57cbSAndy King 		devRead->rssConfDesc.confPA =
2570b0eb57cbSAndy King 			cpu_to_le64(adapter->rss_conf_pa);
257109c5088eSShreyas Bhatewara 	}
257209c5088eSShreyas Bhatewara 
257309c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
2574d1a890faSShreyas Bhatewara 
2575d1a890faSShreyas Bhatewara 	/* intr settings */
2576*39f9895aSRonak Doshi 	if (!VMXNET3_VERSION_GE_6(adapter) ||
2577*39f9895aSRonak Doshi 	    !adapter->queuesExtEnabled) {
2578d1a890faSShreyas Bhatewara 		devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2579d1a890faSShreyas Bhatewara 					     VMXNET3_IMM_AUTO;
2580d1a890faSShreyas Bhatewara 		devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2581d1a890faSShreyas Bhatewara 		for (i = 0; i < adapter->intr.num_intrs; i++)
2582d1a890faSShreyas Bhatewara 			devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2583d1a890faSShreyas Bhatewara 
2584d1a890faSShreyas Bhatewara 		devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
25856929fe8aSRonghua Zang 		devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2586*39f9895aSRonak Doshi 	} else {
2587*39f9895aSRonak Doshi 		devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2588*39f9895aSRonak Doshi 						   VMXNET3_IMM_AUTO;
2589*39f9895aSRonak Doshi 		devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2590*39f9895aSRonak Doshi 		for (i = 0; i < adapter->intr.num_intrs; i++)
2591*39f9895aSRonak Doshi 			devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2592*39f9895aSRonak Doshi 
2593*39f9895aSRonak Doshi 		devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2594*39f9895aSRonak Doshi 		devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2595*39f9895aSRonak Doshi 	}
2596d1a890faSShreyas Bhatewara 
2597d1a890faSShreyas Bhatewara 	/* rx filter settings */
2598d1a890faSShreyas Bhatewara 	devRead->rxFilterConf.rxMode = 0;
2599d1a890faSShreyas Bhatewara 	vmxnet3_restore_vlan(adapter);
2600f9f25026SShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2601f9f25026SShreyas Bhatewara 
2602d1a890faSShreyas Bhatewara 	/* the rest are already zeroed */
2603d1a890faSShreyas Bhatewara }
2604d1a890faSShreyas Bhatewara 
26054edef40eSShrikrishna Khare static void
26064edef40eSShrikrishna Khare vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
26074edef40eSShrikrishna Khare {
26084edef40eSShrikrishna Khare 	struct Vmxnet3_DriverShared *shared = adapter->shared;
26094edef40eSShrikrishna Khare 	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
26104edef40eSShrikrishna Khare 	unsigned long flags;
26114edef40eSShrikrishna Khare 
26124edef40eSShrikrishna Khare 	if (!VMXNET3_VERSION_GE_3(adapter))
26134edef40eSShrikrishna Khare 		return;
26144edef40eSShrikrishna Khare 
26154edef40eSShrikrishna Khare 	spin_lock_irqsave(&adapter->cmd_lock, flags);
26164edef40eSShrikrishna Khare 	cmdInfo->varConf.confVer = 1;
26174edef40eSShrikrishna Khare 	cmdInfo->varConf.confLen =
26184edef40eSShrikrishna Khare 		cpu_to_le32(sizeof(*adapter->coal_conf));
26194edef40eSShrikrishna Khare 	cmdInfo->varConf.confPA  = cpu_to_le64(adapter->coal_conf_pa);
26204edef40eSShrikrishna Khare 
26214edef40eSShrikrishna Khare 	if (adapter->default_coal_mode) {
26224edef40eSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
26234edef40eSShrikrishna Khare 				       VMXNET3_CMD_GET_COALESCE);
26244edef40eSShrikrishna Khare 	} else {
26254edef40eSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
26264edef40eSShrikrishna Khare 				       VMXNET3_CMD_SET_COALESCE);
26274edef40eSShrikrishna Khare 	}
26284edef40eSShrikrishna Khare 
26294edef40eSShrikrishna Khare 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
26304edef40eSShrikrishna Khare }
2631d1a890faSShreyas Bhatewara 
2632d3a8a9e5SRonak Doshi static void
2633d3a8a9e5SRonak Doshi vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2634d3a8a9e5SRonak Doshi {
2635d3a8a9e5SRonak Doshi 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2636d3a8a9e5SRonak Doshi 	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2637d3a8a9e5SRonak Doshi 	unsigned long flags;
2638d3a8a9e5SRonak Doshi 
2639d3a8a9e5SRonak Doshi 	if (!VMXNET3_VERSION_GE_4(adapter))
2640d3a8a9e5SRonak Doshi 		return;
2641d3a8a9e5SRonak Doshi 
2642d3a8a9e5SRonak Doshi 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2643d3a8a9e5SRonak Doshi 
2644d3a8a9e5SRonak Doshi 	if (adapter->default_rss_fields) {
2645d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2646d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_GET_RSS_FIELDS);
2647d3a8a9e5SRonak Doshi 		adapter->rss_fields =
2648d3a8a9e5SRonak Doshi 			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2649d3a8a9e5SRonak Doshi 	} else {
2650d3a8a9e5SRonak Doshi 		cmdInfo->setRssFields = adapter->rss_fields;
2651d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2652d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_SET_RSS_FIELDS);
2653d3a8a9e5SRonak Doshi 		/* Not all requested RSS may get applied, so get and
2654d3a8a9e5SRonak Doshi 		 * cache what was actually applied.
2655d3a8a9e5SRonak Doshi 		 */
2656d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2657d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_GET_RSS_FIELDS);
2658d3a8a9e5SRonak Doshi 		adapter->rss_fields =
2659d3a8a9e5SRonak Doshi 			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2660d3a8a9e5SRonak Doshi 	}
2661d3a8a9e5SRonak Doshi 
2662d3a8a9e5SRonak Doshi 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2663d3a8a9e5SRonak Doshi }
2664d3a8a9e5SRonak Doshi 
2665d1a890faSShreyas Bhatewara int
2666d1a890faSShreyas Bhatewara vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2667d1a890faSShreyas Bhatewara {
266809c5088eSShreyas Bhatewara 	int err, i;
2669d1a890faSShreyas Bhatewara 	u32 ret;
267083d0feffSShreyas Bhatewara 	unsigned long flags;
2671d1a890faSShreyas Bhatewara 
2672fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
267309c5088eSShreyas Bhatewara 		" ring sizes %u %u %u\n", adapter->netdev->name,
267409c5088eSShreyas Bhatewara 		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
267509c5088eSShreyas Bhatewara 		adapter->tx_queue[0].tx_ring.size,
267609c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[0].size,
267709c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[1].size);
2678d1a890faSShreyas Bhatewara 
267909c5088eSShreyas Bhatewara 	vmxnet3_tq_init_all(adapter);
268009c5088eSShreyas Bhatewara 	err = vmxnet3_rq_init_all(adapter);
2681d1a890faSShreyas Bhatewara 	if (err) {
2682204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2683204a6e65SStephen Hemminger 			   "Failed to init rx queue error %d\n", err);
2684d1a890faSShreyas Bhatewara 		goto rq_err;
2685d1a890faSShreyas Bhatewara 	}
2686d1a890faSShreyas Bhatewara 
2687d1a890faSShreyas Bhatewara 	err = vmxnet3_request_irqs(adapter);
2688d1a890faSShreyas Bhatewara 	if (err) {
2689204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2690204a6e65SStephen Hemminger 			   "Failed to setup irq for error %d\n", err);
2691d1a890faSShreyas Bhatewara 		goto irq_err;
2692d1a890faSShreyas Bhatewara 	}
2693d1a890faSShreyas Bhatewara 
2694d1a890faSShreyas Bhatewara 	vmxnet3_setup_driver_shared(adapter);
2695d1a890faSShreyas Bhatewara 
2696115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2697115924b6SShreyas Bhatewara 			       adapter->shared_pa));
2698115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2699115924b6SShreyas Bhatewara 			       adapter->shared_pa));
270083d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2701d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2702d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_ACTIVATE_DEV);
2703d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
270483d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2705d1a890faSShreyas Bhatewara 
2706d1a890faSShreyas Bhatewara 	if (ret != 0) {
2707204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2708204a6e65SStephen Hemminger 			   "Failed to activate dev: error %u\n", ret);
2709d1a890faSShreyas Bhatewara 		err = -EINVAL;
2710d1a890faSShreyas Bhatewara 		goto activate_err;
2711d1a890faSShreyas Bhatewara 	}
271209c5088eSShreyas Bhatewara 
27134edef40eSShrikrishna Khare 	vmxnet3_init_coalesce(adapter);
2714d3a8a9e5SRonak Doshi 	vmxnet3_init_rssfields(adapter);
27154edef40eSShrikrishna Khare 
271609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
271709c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
271809c5088eSShreyas Bhatewara 				VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
271909c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[0].next2fill);
272009c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
272109c5088eSShreyas Bhatewara 				(i * VMXNET3_REG_ALIGN)),
272209c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[1].next2fill);
272309c5088eSShreyas Bhatewara 	}
2724d1a890faSShreyas Bhatewara 
2725d1a890faSShreyas Bhatewara 	/* Apply the rx filter settins last. */
2726d1a890faSShreyas Bhatewara 	vmxnet3_set_mc(adapter->netdev);
2727d1a890faSShreyas Bhatewara 
2728d1a890faSShreyas Bhatewara 	/*
2729d1a890faSShreyas Bhatewara 	 * Check link state when first activating device. It will start the
2730d1a890faSShreyas Bhatewara 	 * tx queue if the link is up.
2731d1a890faSShreyas Bhatewara 	 */
27324a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, true);
2733*39f9895aSRonak Doshi 	netif_tx_wake_all_queues(adapter->netdev);
273409c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
273509c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
2736d1a890faSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
2737d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2738d1a890faSShreyas Bhatewara 	return 0;
2739d1a890faSShreyas Bhatewara 
2740d1a890faSShreyas Bhatewara activate_err:
2741d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2742d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2743d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2744d1a890faSShreyas Bhatewara irq_err:
2745d1a890faSShreyas Bhatewara rq_err:
2746d1a890faSShreyas Bhatewara 	/* free up buffers we allocated */
274709c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2748d1a890faSShreyas Bhatewara 	return err;
2749d1a890faSShreyas Bhatewara }
2750d1a890faSShreyas Bhatewara 
2751d1a890faSShreyas Bhatewara 
2752d1a890faSShreyas Bhatewara void
2753d1a890faSShreyas Bhatewara vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2754d1a890faSShreyas Bhatewara {
275583d0feffSShreyas Bhatewara 	unsigned long flags;
275683d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2757d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
275883d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2759d1a890faSShreyas Bhatewara }
2760d1a890faSShreyas Bhatewara 
2761d1a890faSShreyas Bhatewara 
2762d1a890faSShreyas Bhatewara int
2763d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2764d1a890faSShreyas Bhatewara {
276509c5088eSShreyas Bhatewara 	int i;
276683d0feffSShreyas Bhatewara 	unsigned long flags;
2767d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2768d1a890faSShreyas Bhatewara 		return 0;
2769d1a890faSShreyas Bhatewara 
2770d1a890faSShreyas Bhatewara 
277183d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2772d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2773d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
277483d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2775d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
2776d1a890faSShreyas Bhatewara 
277709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
277809c5088eSShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
2779d1a890faSShreyas Bhatewara 	netif_tx_disable(adapter->netdev);
2780d1a890faSShreyas Bhatewara 	adapter->link_speed = 0;
2781d1a890faSShreyas Bhatewara 	netif_carrier_off(adapter->netdev);
2782d1a890faSShreyas Bhatewara 
278309c5088eSShreyas Bhatewara 	vmxnet3_tq_cleanup_all(adapter);
278409c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2785d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2786d1a890faSShreyas Bhatewara 	return 0;
2787d1a890faSShreyas Bhatewara }
2788d1a890faSShreyas Bhatewara 
2789d1a890faSShreyas Bhatewara 
2790d1a890faSShreyas Bhatewara static void
2791d1a890faSShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2792d1a890faSShreyas Bhatewara {
2793d1a890faSShreyas Bhatewara 	u32 tmp;
2794d1a890faSShreyas Bhatewara 
2795d1a890faSShreyas Bhatewara 	tmp = *(u32 *)mac;
2796d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2797d1a890faSShreyas Bhatewara 
2798d1a890faSShreyas Bhatewara 	tmp = (mac[5] << 8) | mac[4];
2799d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2800d1a890faSShreyas Bhatewara }
2801d1a890faSShreyas Bhatewara 
2802d1a890faSShreyas Bhatewara 
2803d1a890faSShreyas Bhatewara static int
2804d1a890faSShreyas Bhatewara vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2805d1a890faSShreyas Bhatewara {
2806d1a890faSShreyas Bhatewara 	struct sockaddr *addr = p;
2807d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2808d1a890faSShreyas Bhatewara 
2809d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2810d1a890faSShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, addr->sa_data);
2811d1a890faSShreyas Bhatewara 
2812d1a890faSShreyas Bhatewara 	return 0;
2813d1a890faSShreyas Bhatewara }
2814d1a890faSShreyas Bhatewara 
2815d1a890faSShreyas Bhatewara 
2816d1a890faSShreyas Bhatewara /* ==================== initialization and cleanup routines ============ */
2817d1a890faSShreyas Bhatewara 
2818d1a890faSShreyas Bhatewara static int
281961aeeceaShpreg@vmware.com vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2820d1a890faSShreyas Bhatewara {
2821d1a890faSShreyas Bhatewara 	int err;
2822d1a890faSShreyas Bhatewara 	unsigned long mmio_start, mmio_len;
2823d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = adapter->pdev;
2824d1a890faSShreyas Bhatewara 
2825d1a890faSShreyas Bhatewara 	err = pci_enable_device(pdev);
2826d1a890faSShreyas Bhatewara 	if (err) {
2827204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2828d1a890faSShreyas Bhatewara 		return err;
2829d1a890faSShreyas Bhatewara 	}
2830d1a890faSShreyas Bhatewara 
2831d1a890faSShreyas Bhatewara 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2832d1a890faSShreyas Bhatewara 					   vmxnet3_driver_name);
2833d1a890faSShreyas Bhatewara 	if (err) {
2834204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
2835204a6e65SStephen Hemminger 			"Failed to request region for adapter: error %d\n", err);
283661aeeceaShpreg@vmware.com 		goto err_enable_device;
2837d1a890faSShreyas Bhatewara 	}
2838d1a890faSShreyas Bhatewara 
2839d1a890faSShreyas Bhatewara 	pci_set_master(pdev);
2840d1a890faSShreyas Bhatewara 
2841d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 0);
2842d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 0);
2843d1a890faSShreyas Bhatewara 	adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2844d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr0) {
2845204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar0\n");
2846d1a890faSShreyas Bhatewara 		err = -EIO;
2847d1a890faSShreyas Bhatewara 		goto err_ioremap;
2848d1a890faSShreyas Bhatewara 	}
2849d1a890faSShreyas Bhatewara 
2850d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 1);
2851d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 1);
2852d1a890faSShreyas Bhatewara 	adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2853d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr1) {
2854204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar1\n");
2855d1a890faSShreyas Bhatewara 		err = -EIO;
2856d1a890faSShreyas Bhatewara 		goto err_bar1;
2857d1a890faSShreyas Bhatewara 	}
2858d1a890faSShreyas Bhatewara 	return 0;
2859d1a890faSShreyas Bhatewara 
2860d1a890faSShreyas Bhatewara err_bar1:
2861d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2862d1a890faSShreyas Bhatewara err_ioremap:
2863d1a890faSShreyas Bhatewara 	pci_release_selected_regions(pdev, (1 << 2) - 1);
286461aeeceaShpreg@vmware.com err_enable_device:
2865d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
2866d1a890faSShreyas Bhatewara 	return err;
2867d1a890faSShreyas Bhatewara }
2868d1a890faSShreyas Bhatewara 
2869d1a890faSShreyas Bhatewara 
2870d1a890faSShreyas Bhatewara static void
2871d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2872d1a890faSShreyas Bhatewara {
2873d1a890faSShreyas Bhatewara 	BUG_ON(!adapter->pdev);
2874d1a890faSShreyas Bhatewara 
2875d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2876d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr1);
2877d1a890faSShreyas Bhatewara 	pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2878d1a890faSShreyas Bhatewara 	pci_disable_device(adapter->pdev);
2879d1a890faSShreyas Bhatewara }
2880d1a890faSShreyas Bhatewara 
2881d1a890faSShreyas Bhatewara 
2882d1a890faSShreyas Bhatewara static void
2883d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2884d1a890faSShreyas Bhatewara {
288509c5088eSShreyas Bhatewara 	size_t sz, i, ring0_size, ring1_size, comp_size;
2886d1a890faSShreyas Bhatewara 	if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2887d1a890faSShreyas Bhatewara 				    VMXNET3_MAX_ETH_HDR_SIZE) {
2888d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = adapter->netdev->mtu +
2889d1a890faSShreyas Bhatewara 					VMXNET3_MAX_ETH_HDR_SIZE;
2890d1a890faSShreyas Bhatewara 		if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2891d1a890faSShreyas Bhatewara 			adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2892d1a890faSShreyas Bhatewara 
2893d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1;
2894d1a890faSShreyas Bhatewara 	} else {
2895d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2896d1a890faSShreyas Bhatewara 		sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2897d1a890faSShreyas Bhatewara 					    VMXNET3_MAX_ETH_HDR_SIZE;
2898d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2899d1a890faSShreyas Bhatewara 	}
2900d1a890faSShreyas Bhatewara 
2901d1a890faSShreyas Bhatewara 	/*
2902d1a890faSShreyas Bhatewara 	 * for simplicity, force the ring0 size to be a multiple of
2903d1a890faSShreyas Bhatewara 	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2904d1a890faSShreyas Bhatewara 	 */
2905d1a890faSShreyas Bhatewara 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
290609c5088eSShreyas Bhatewara 	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
290709c5088eSShreyas Bhatewara 	ring0_size = (ring0_size + sz - 1) / sz * sz;
2908a53255d3SShreyas Bhatewara 	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
290909c5088eSShreyas Bhatewara 			   sz * sz);
291009c5088eSShreyas Bhatewara 	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
291153831aa1SShrikrishna Khare 	ring1_size = (ring1_size + sz - 1) / sz * sz;
291253831aa1SShrikrishna Khare 	ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
291353831aa1SShrikrishna Khare 			   sz * sz);
291409c5088eSShreyas Bhatewara 	comp_size = ring0_size + ring1_size;
291509c5088eSShreyas Bhatewara 
291609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
29175e264e2bSColin Ian King 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
29185e264e2bSColin Ian King 
291909c5088eSShreyas Bhatewara 		rq->rx_ring[0].size = ring0_size;
292009c5088eSShreyas Bhatewara 		rq->rx_ring[1].size = ring1_size;
292109c5088eSShreyas Bhatewara 		rq->comp_ring.size = comp_size;
292209c5088eSShreyas Bhatewara 	}
2923d1a890faSShreyas Bhatewara }
2924d1a890faSShreyas Bhatewara 
2925d1a890faSShreyas Bhatewara 
2926d1a890faSShreyas Bhatewara int
2927d1a890faSShreyas Bhatewara vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
29283c8b3efcSShrikrishna Khare 		      u32 rx_ring_size, u32 rx_ring2_size,
292950a5ce3eSShrikrishna Khare 		      u16 txdata_desc_size, u16 rxdata_desc_size)
2930d1a890faSShreyas Bhatewara {
293109c5088eSShreyas Bhatewara 	int err = 0, i;
2932d1a890faSShreyas Bhatewara 
293309c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
293409c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
293509c5088eSShreyas Bhatewara 		tq->tx_ring.size   = tx_ring_size;
293609c5088eSShreyas Bhatewara 		tq->data_ring.size = tx_ring_size;
293709c5088eSShreyas Bhatewara 		tq->comp_ring.size = tx_ring_size;
29383c8b3efcSShrikrishna Khare 		tq->txdata_desc_size = txdata_desc_size;
293909c5088eSShreyas Bhatewara 		tq->shared = &adapter->tqd_start[i].ctrl;
294009c5088eSShreyas Bhatewara 		tq->stopped = true;
294109c5088eSShreyas Bhatewara 		tq->adapter = adapter;
294209c5088eSShreyas Bhatewara 		tq->qid = i;
294309c5088eSShreyas Bhatewara 		err = vmxnet3_tq_create(tq, adapter);
294409c5088eSShreyas Bhatewara 		/*
294509c5088eSShreyas Bhatewara 		 * Too late to change num_tx_queues. We cannot do away with
294609c5088eSShreyas Bhatewara 		 * lesser number of queues than what we asked for
294709c5088eSShreyas Bhatewara 		 */
2948d1a890faSShreyas Bhatewara 		if (err)
294909c5088eSShreyas Bhatewara 			goto queue_err;
295009c5088eSShreyas Bhatewara 	}
2951d1a890faSShreyas Bhatewara 
295209c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
295309c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2954d1a890faSShreyas Bhatewara 	vmxnet3_adjust_rx_ring_size(adapter);
295550a5ce3eSShrikrishna Khare 
295650a5ce3eSShrikrishna Khare 	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
295709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
295809c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
295909c5088eSShreyas Bhatewara 		/* qid and qid2 for rx queues will be assigned later when num
296009c5088eSShreyas Bhatewara 		 * of rx queues is finalized after allocating intrs */
296109c5088eSShreyas Bhatewara 		rq->shared = &adapter->rqd_start[i].ctrl;
296209c5088eSShreyas Bhatewara 		rq->adapter = adapter;
296350a5ce3eSShrikrishna Khare 		rq->data_ring.desc_size = rxdata_desc_size;
296409c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(rq, adapter);
296509c5088eSShreyas Bhatewara 		if (err) {
296609c5088eSShreyas Bhatewara 			if (i == 0) {
2967204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2968204a6e65SStephen Hemminger 					   "Could not allocate any rx queues. "
2969204a6e65SStephen Hemminger 					   "Aborting.\n");
297009c5088eSShreyas Bhatewara 				goto queue_err;
297109c5088eSShreyas Bhatewara 			} else {
2972204a6e65SStephen Hemminger 				netdev_info(adapter->netdev,
2973204a6e65SStephen Hemminger 					    "Number of rx queues changed "
297409c5088eSShreyas Bhatewara 					    "to : %d.\n", i);
297509c5088eSShreyas Bhatewara 				adapter->num_rx_queues = i;
297609c5088eSShreyas Bhatewara 				err = 0;
297709c5088eSShreyas Bhatewara 				break;
297809c5088eSShreyas Bhatewara 			}
297909c5088eSShreyas Bhatewara 		}
298009c5088eSShreyas Bhatewara 	}
298150a5ce3eSShrikrishna Khare 
298250a5ce3eSShrikrishna Khare 	if (!adapter->rxdataring_enabled)
298350a5ce3eSShrikrishna Khare 		vmxnet3_rq_destroy_all_rxdataring(adapter);
298450a5ce3eSShrikrishna Khare 
298509c5088eSShreyas Bhatewara 	return err;
298609c5088eSShreyas Bhatewara queue_err:
298709c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2988d1a890faSShreyas Bhatewara 	return err;
2989d1a890faSShreyas Bhatewara }
2990d1a890faSShreyas Bhatewara 
2991d1a890faSShreyas Bhatewara static int
2992d1a890faSShreyas Bhatewara vmxnet3_open(struct net_device *netdev)
2993d1a890faSShreyas Bhatewara {
2994d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
299509c5088eSShreyas Bhatewara 	int err, i;
2996d1a890faSShreyas Bhatewara 
2997d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
2998d1a890faSShreyas Bhatewara 
299909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
300009c5088eSShreyas Bhatewara 		spin_lock_init(&adapter->tx_queue[i].tx_lock);
3001d1a890faSShreyas Bhatewara 
30023c8b3efcSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
30033c8b3efcSShrikrishna Khare 		unsigned long flags;
30043c8b3efcSShrikrishna Khare 		u16 txdata_desc_size;
30053c8b3efcSShrikrishna Khare 
30063c8b3efcSShrikrishna Khare 		spin_lock_irqsave(&adapter->cmd_lock, flags);
30073c8b3efcSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
30083c8b3efcSShrikrishna Khare 				       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
30093c8b3efcSShrikrishna Khare 		txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
30103c8b3efcSShrikrishna Khare 							 VMXNET3_REG_CMD);
30113c8b3efcSShrikrishna Khare 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
30123c8b3efcSShrikrishna Khare 
30133c8b3efcSShrikrishna Khare 		if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
30143c8b3efcSShrikrishna Khare 		    (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
30153c8b3efcSShrikrishna Khare 		    (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
30163c8b3efcSShrikrishna Khare 			adapter->txdata_desc_size =
30173c8b3efcSShrikrishna Khare 				sizeof(struct Vmxnet3_TxDataDesc);
30183c8b3efcSShrikrishna Khare 		} else {
30193c8b3efcSShrikrishna Khare 			adapter->txdata_desc_size = txdata_desc_size;
30203c8b3efcSShrikrishna Khare 		}
30213c8b3efcSShrikrishna Khare 	} else {
30223c8b3efcSShrikrishna Khare 		adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
30233c8b3efcSShrikrishna Khare 	}
30243c8b3efcSShrikrishna Khare 
30253c8b3efcSShrikrishna Khare 	err = vmxnet3_create_queues(adapter,
30263c8b3efcSShrikrishna Khare 				    adapter->tx_ring_size,
3027f00e2b0aSNeil Horman 				    adapter->rx_ring_size,
30283c8b3efcSShrikrishna Khare 				    adapter->rx_ring2_size,
302950a5ce3eSShrikrishna Khare 				    adapter->txdata_desc_size,
303050a5ce3eSShrikrishna Khare 				    adapter->rxdata_desc_size);
3031d1a890faSShreyas Bhatewara 	if (err)
3032d1a890faSShreyas Bhatewara 		goto queue_err;
3033d1a890faSShreyas Bhatewara 
3034d1a890faSShreyas Bhatewara 	err = vmxnet3_activate_dev(adapter);
3035d1a890faSShreyas Bhatewara 	if (err)
3036d1a890faSShreyas Bhatewara 		goto activate_err;
3037d1a890faSShreyas Bhatewara 
3038d1a890faSShreyas Bhatewara 	return 0;
3039d1a890faSShreyas Bhatewara 
3040d1a890faSShreyas Bhatewara activate_err:
304109c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
304209c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
3043d1a890faSShreyas Bhatewara queue_err:
3044d1a890faSShreyas Bhatewara 	return err;
3045d1a890faSShreyas Bhatewara }
3046d1a890faSShreyas Bhatewara 
3047d1a890faSShreyas Bhatewara 
3048d1a890faSShreyas Bhatewara static int
3049d1a890faSShreyas Bhatewara vmxnet3_close(struct net_device *netdev)
3050d1a890faSShreyas Bhatewara {
3051d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3052d1a890faSShreyas Bhatewara 
3053d1a890faSShreyas Bhatewara 	/*
3054d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
3055d1a890faSShreyas Bhatewara 	 * completion.
3056d1a890faSShreyas Bhatewara 	 */
3057d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
305893c65d13SYueHaibing 		usleep_range(1000, 2000);
3059d1a890faSShreyas Bhatewara 
3060d1a890faSShreyas Bhatewara 	vmxnet3_quiesce_dev(adapter);
3061d1a890faSShreyas Bhatewara 
306209c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
306309c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
3064d1a890faSShreyas Bhatewara 
3065d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3066d1a890faSShreyas Bhatewara 
3067d1a890faSShreyas Bhatewara 
3068d1a890faSShreyas Bhatewara 	return 0;
3069d1a890faSShreyas Bhatewara }
3070d1a890faSShreyas Bhatewara 
3071d1a890faSShreyas Bhatewara 
3072d1a890faSShreyas Bhatewara void
3073d1a890faSShreyas Bhatewara vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3074d1a890faSShreyas Bhatewara {
307509c5088eSShreyas Bhatewara 	int i;
307609c5088eSShreyas Bhatewara 
3077d1a890faSShreyas Bhatewara 	/*
3078d1a890faSShreyas Bhatewara 	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3079d1a890faSShreyas Bhatewara 	 * vmxnet3_close() will deadlock.
3080d1a890faSShreyas Bhatewara 	 */
3081d1a890faSShreyas Bhatewara 	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3082d1a890faSShreyas Bhatewara 
3083d1a890faSShreyas Bhatewara 	/* we need to enable NAPI, otherwise dev_close will deadlock */
308409c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
308509c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
30861c4d5f51SNeil Horman 	/*
30871c4d5f51SNeil Horman 	 * Need to clear the quiesce bit to ensure that vmxnet3_close
30881c4d5f51SNeil Horman 	 * can quiesce the device properly
30891c4d5f51SNeil Horman 	 */
30901c4d5f51SNeil Horman 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3091d1a890faSShreyas Bhatewara 	dev_close(adapter->netdev);
3092d1a890faSShreyas Bhatewara }
3093d1a890faSShreyas Bhatewara 
3094d1a890faSShreyas Bhatewara 
3095d1a890faSShreyas Bhatewara static int
3096d1a890faSShreyas Bhatewara vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3097d1a890faSShreyas Bhatewara {
3098d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3099d1a890faSShreyas Bhatewara 	int err = 0;
3100d1a890faSShreyas Bhatewara 
3101d1a890faSShreyas Bhatewara 	netdev->mtu = new_mtu;
3102d1a890faSShreyas Bhatewara 
3103d1a890faSShreyas Bhatewara 	/*
3104d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
3105d1a890faSShreyas Bhatewara 	 * completion.
3106d1a890faSShreyas Bhatewara 	 */
3107d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
310893c65d13SYueHaibing 		usleep_range(1000, 2000);
3109d1a890faSShreyas Bhatewara 
3110d1a890faSShreyas Bhatewara 	if (netif_running(netdev)) {
3111d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
3112d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
3113d1a890faSShreyas Bhatewara 
3114d1a890faSShreyas Bhatewara 		/* we need to re-create the rx queue based on the new mtu */
311509c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy_all(adapter);
3116d1a890faSShreyas Bhatewara 		vmxnet3_adjust_rx_ring_size(adapter);
311709c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create_all(adapter);
3118d1a890faSShreyas Bhatewara 		if (err) {
3119204a6e65SStephen Hemminger 			netdev_err(netdev,
3120204a6e65SStephen Hemminger 				   "failed to re-create rx queues, "
3121204a6e65SStephen Hemminger 				   " error %d. Closing it.\n", err);
3122d1a890faSShreyas Bhatewara 			goto out;
3123d1a890faSShreyas Bhatewara 		}
3124d1a890faSShreyas Bhatewara 
3125d1a890faSShreyas Bhatewara 		err = vmxnet3_activate_dev(adapter);
3126d1a890faSShreyas Bhatewara 		if (err) {
3127204a6e65SStephen Hemminger 			netdev_err(netdev,
3128204a6e65SStephen Hemminger 				   "failed to re-activate, error %d. "
3129204a6e65SStephen Hemminger 				   "Closing it\n", err);
3130d1a890faSShreyas Bhatewara 			goto out;
3131d1a890faSShreyas Bhatewara 		}
3132d1a890faSShreyas Bhatewara 	}
3133d1a890faSShreyas Bhatewara 
3134d1a890faSShreyas Bhatewara out:
3135d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3136d1a890faSShreyas Bhatewara 	if (err)
3137d1a890faSShreyas Bhatewara 		vmxnet3_force_close(adapter);
3138d1a890faSShreyas Bhatewara 
3139d1a890faSShreyas Bhatewara 	return err;
3140d1a890faSShreyas Bhatewara }
3141d1a890faSShreyas Bhatewara 
3142d1a890faSShreyas Bhatewara 
3143d1a890faSShreyas Bhatewara static void
3144d1a890faSShreyas Bhatewara vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3145d1a890faSShreyas Bhatewara {
3146d1a890faSShreyas Bhatewara 	struct net_device *netdev = adapter->netdev;
3147d1a890faSShreyas Bhatewara 
3148a0d2730cSMichał Mirosław 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3149f646968fSPatrick McHardy 		NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3150f646968fSPatrick McHardy 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
315172e85c45SJesse Gross 		NETIF_F_LRO;
3152dacce2beSRonak Doshi 
3153dacce2beSRonak Doshi 	if (VMXNET3_VERSION_GE_4(adapter)) {
3154dacce2beSRonak Doshi 		netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3155dacce2beSRonak Doshi 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
3156dacce2beSRonak Doshi 
3157dacce2beSRonak Doshi 		netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3158dacce2beSRonak Doshi 			NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3159dacce2beSRonak Doshi 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3160dacce2beSRonak Doshi 			NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3161dacce2beSRonak Doshi 			NETIF_F_GSO_UDP_TUNNEL_CSUM;
3162dacce2beSRonak Doshi 	}
3163dacce2beSRonak Doshi 
3164a0d2730cSMichał Mirosław 	if (dma64)
3165ebbf9295SShreyas Bhatewara 		netdev->hw_features |= NETIF_F_HIGHDMA;
316672e85c45SJesse Gross 	netdev->vlan_features = netdev->hw_features &
3167f646968fSPatrick McHardy 				~(NETIF_F_HW_VLAN_CTAG_TX |
3168f646968fSPatrick McHardy 				  NETIF_F_HW_VLAN_CTAG_RX);
3169f646968fSPatrick McHardy 	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3170d1a890faSShreyas Bhatewara }
3171d1a890faSShreyas Bhatewara 
3172d1a890faSShreyas Bhatewara 
3173d1a890faSShreyas Bhatewara static void
3174d1a890faSShreyas Bhatewara vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3175d1a890faSShreyas Bhatewara {
3176d1a890faSShreyas Bhatewara 	u32 tmp;
3177d1a890faSShreyas Bhatewara 
3178d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3179d1a890faSShreyas Bhatewara 	*(u32 *)mac = tmp;
3180d1a890faSShreyas Bhatewara 
3181d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3182d1a890faSShreyas Bhatewara 	mac[4] = tmp & 0xff;
3183d1a890faSShreyas Bhatewara 	mac[5] = (tmp >> 8) & 0xff;
3184d1a890faSShreyas Bhatewara }
3185d1a890faSShreyas Bhatewara 
318609c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
318709c5088eSShreyas Bhatewara 
318809c5088eSShreyas Bhatewara /*
318909c5088eSShreyas Bhatewara  * Enable MSIx vectors.
319009c5088eSShreyas Bhatewara  * Returns :
319125985edcSLucas De Marchi  *	VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3192b60b869dSAlexander Gordeev  *	 were enabled.
3193b60b869dSAlexander Gordeev  *	number of vectors which were enabled otherwise (this number is greater
319409c5088eSShreyas Bhatewara  *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
319509c5088eSShreyas Bhatewara  */
319609c5088eSShreyas Bhatewara 
319709c5088eSShreyas Bhatewara static int
3198b60b869dSAlexander Gordeev vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
319909c5088eSShreyas Bhatewara {
3200c0a1be38SAlexander Gordeev 	int ret = pci_enable_msix_range(adapter->pdev,
3201c0a1be38SAlexander Gordeev 					adapter->intr.msix_entries, nvec, nvec);
3202c0a1be38SAlexander Gordeev 
3203c0a1be38SAlexander Gordeev 	if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
32044bad25faSStephen Hemminger 		dev_err(&adapter->netdev->dev,
3205b60b869dSAlexander Gordeev 			"Failed to enable %d MSI-X, trying %d\n",
3206b60b869dSAlexander Gordeev 			nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
320709c5088eSShreyas Bhatewara 
3208c0a1be38SAlexander Gordeev 		ret = pci_enable_msix_range(adapter->pdev,
3209c0a1be38SAlexander Gordeev 					    adapter->intr.msix_entries,
3210c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT,
3211c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT);
3212c0a1be38SAlexander Gordeev 	}
3213c0a1be38SAlexander Gordeev 
3214c0a1be38SAlexander Gordeev 	if (ret < 0) {
3215c0a1be38SAlexander Gordeev 		dev_err(&adapter->netdev->dev,
3216c0a1be38SAlexander Gordeev 			"Failed to enable MSI-X, error: %d\n", ret);
3217c0a1be38SAlexander Gordeev 	}
3218c0a1be38SAlexander Gordeev 
3219c0a1be38SAlexander Gordeev 	return ret;
322009c5088eSShreyas Bhatewara }
322109c5088eSShreyas Bhatewara 
322209c5088eSShreyas Bhatewara 
322309c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
3224d1a890faSShreyas Bhatewara 
3225d1a890faSShreyas Bhatewara static void
3226d1a890faSShreyas Bhatewara vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3227d1a890faSShreyas Bhatewara {
3228d1a890faSShreyas Bhatewara 	u32 cfg;
3229e328d410SRoland Dreier 	unsigned long flags;
3230d1a890faSShreyas Bhatewara 
3231d1a890faSShreyas Bhatewara 	/* intr settings */
3232e328d410SRoland Dreier 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3233d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3234d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_GET_CONF_INTR);
3235d1a890faSShreyas Bhatewara 	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3236e328d410SRoland Dreier 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3237d1a890faSShreyas Bhatewara 	adapter->intr.type = cfg & 0x3;
3238d1a890faSShreyas Bhatewara 	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3239d1a890faSShreyas Bhatewara 
3240d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_AUTO) {
32410bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSIX;
32420bdc0d70SShreyas Bhatewara 	}
3243d1a890faSShreyas Bhatewara 
32448f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
32450bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
3246b60b869dSAlexander Gordeev 		int i, nvec;
32470bdc0d70SShreyas Bhatewara 
3248b60b869dSAlexander Gordeev 		nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3249b60b869dSAlexander Gordeev 			1 : adapter->num_tx_queues;
3250b60b869dSAlexander Gordeev 		nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3251b60b869dSAlexander Gordeev 			0 : adapter->num_rx_queues;
3252b60b869dSAlexander Gordeev 		nvec += 1;	/* for link event */
3253b60b869dSAlexander Gordeev 		nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3254b60b869dSAlexander Gordeev 		       nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
325509c5088eSShreyas Bhatewara 
3256b60b869dSAlexander Gordeev 		for (i = 0; i < nvec; i++)
3257b60b869dSAlexander Gordeev 			adapter->intr.msix_entries[i].entry = i;
325809c5088eSShreyas Bhatewara 
3259b60b869dSAlexander Gordeev 		nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3260b60b869dSAlexander Gordeev 		if (nvec < 0)
3261b60b869dSAlexander Gordeev 			goto msix_err;
326209c5088eSShreyas Bhatewara 
326309c5088eSShreyas Bhatewara 		/* If we cannot allocate one MSIx vector per queue
326409c5088eSShreyas Bhatewara 		 * then limit the number of rx queues to 1
326509c5088eSShreyas Bhatewara 		 */
3266b60b869dSAlexander Gordeev 		if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
326709c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
32687e96fbf2SShreyas Bhatewara 			    || adapter->num_rx_queues != 1) {
326909c5088eSShreyas Bhatewara 				adapter->share_intr = VMXNET3_INTR_TXSHARE;
3270204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
3271204a6e65SStephen Hemminger 					   "Number of rx queues : 1\n");
327209c5088eSShreyas Bhatewara 				adapter->num_rx_queues = 1;
327309c5088eSShreyas Bhatewara 			}
3274d1a890faSShreyas Bhatewara 		}
3275b60b869dSAlexander Gordeev 
3276b60b869dSAlexander Gordeev 		adapter->intr.num_intrs = nvec;
327709c5088eSShreyas Bhatewara 		return;
327809c5088eSShreyas Bhatewara 
3279b60b869dSAlexander Gordeev msix_err:
328009c5088eSShreyas Bhatewara 		/* If we cannot allocate MSIx vectors use only one rx queue */
32814bad25faSStephen Hemminger 		dev_info(&adapter->pdev->dev,
32824bad25faSStephen Hemminger 			 "Failed to enable MSI-X, error %d. "
3283b60b869dSAlexander Gordeev 			 "Limiting #rx queues to 1, try MSI.\n", nvec);
328409c5088eSShreyas Bhatewara 
32850bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSI;
32860bdc0d70SShreyas Bhatewara 	}
3287d1a890faSShreyas Bhatewara 
32880bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSI) {
3289b60b869dSAlexander Gordeev 		if (!pci_enable_msi(adapter->pdev)) {
329009c5088eSShreyas Bhatewara 			adapter->num_rx_queues = 1;
3291d1a890faSShreyas Bhatewara 			adapter->intr.num_intrs = 1;
3292d1a890faSShreyas Bhatewara 			return;
3293d1a890faSShreyas Bhatewara 		}
3294d1a890faSShreyas Bhatewara 	}
32950bdc0d70SShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
3296d1a890faSShreyas Bhatewara 
329709c5088eSShreyas Bhatewara 	adapter->num_rx_queues = 1;
3298204a6e65SStephen Hemminger 	dev_info(&adapter->netdev->dev,
3299204a6e65SStephen Hemminger 		 "Using INTx interrupt, #Rx queues: 1.\n");
3300d1a890faSShreyas Bhatewara 	adapter->intr.type = VMXNET3_IT_INTX;
3301d1a890faSShreyas Bhatewara 
3302d1a890faSShreyas Bhatewara 	/* INT-X related setting */
3303d1a890faSShreyas Bhatewara 	adapter->intr.num_intrs = 1;
3304d1a890faSShreyas Bhatewara }
3305d1a890faSShreyas Bhatewara 
3306d1a890faSShreyas Bhatewara 
3307d1a890faSShreyas Bhatewara static void
3308d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3309d1a890faSShreyas Bhatewara {
3310d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX)
3311d1a890faSShreyas Bhatewara 		pci_disable_msix(adapter->pdev);
3312d1a890faSShreyas Bhatewara 	else if (adapter->intr.type == VMXNET3_IT_MSI)
3313d1a890faSShreyas Bhatewara 		pci_disable_msi(adapter->pdev);
3314d1a890faSShreyas Bhatewara 	else
3315d1a890faSShreyas Bhatewara 		BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3316d1a890faSShreyas Bhatewara }
3317d1a890faSShreyas Bhatewara 
3318d1a890faSShreyas Bhatewara 
3319d1a890faSShreyas Bhatewara static void
33200290bd29SMichael S. Tsirkin vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3321d1a890faSShreyas Bhatewara {
3322d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3323d1a890faSShreyas Bhatewara 	adapter->tx_timeout_count++;
3324d1a890faSShreyas Bhatewara 
3325204a6e65SStephen Hemminger 	netdev_err(adapter->netdev, "tx hang\n");
3326d1a890faSShreyas Bhatewara 	schedule_work(&adapter->work);
3327d1a890faSShreyas Bhatewara }
3328d1a890faSShreyas Bhatewara 
3329d1a890faSShreyas Bhatewara 
3330d1a890faSShreyas Bhatewara static void
3331d1a890faSShreyas Bhatewara vmxnet3_reset_work(struct work_struct *data)
3332d1a890faSShreyas Bhatewara {
3333d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
3334d1a890faSShreyas Bhatewara 
3335d1a890faSShreyas Bhatewara 	adapter = container_of(data, struct vmxnet3_adapter, work);
3336d1a890faSShreyas Bhatewara 
3337d1a890faSShreyas Bhatewara 	/* if another thread is resetting the device, no need to proceed */
3338d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3339d1a890faSShreyas Bhatewara 		return;
3340d1a890faSShreyas Bhatewara 
3341d1a890faSShreyas Bhatewara 	/* if the device is closed, we must leave it alone */
3342d9a5f210SShreyas Bhatewara 	rtnl_lock();
3343d1a890faSShreyas Bhatewara 	if (netif_running(adapter->netdev)) {
3344204a6e65SStephen Hemminger 		netdev_notice(adapter->netdev, "resetting\n");
3345d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
3346d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
3347d1a890faSShreyas Bhatewara 		vmxnet3_activate_dev(adapter);
3348d1a890faSShreyas Bhatewara 	} else {
3349204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "already closed\n");
3350d1a890faSShreyas Bhatewara 	}
3351d9a5f210SShreyas Bhatewara 	rtnl_unlock();
3352d1a890faSShreyas Bhatewara 
3353277964e1SBenjamin Poirier 	netif_wake_queue(adapter->netdev);
3354d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3355d1a890faSShreyas Bhatewara }
3356d1a890faSShreyas Bhatewara 
3357d1a890faSShreyas Bhatewara 
33583a4751a3SBill Pemberton static int
3359d1a890faSShreyas Bhatewara vmxnet3_probe_device(struct pci_dev *pdev,
3360d1a890faSShreyas Bhatewara 		     const struct pci_device_id *id)
3361d1a890faSShreyas Bhatewara {
3362d1a890faSShreyas Bhatewara 	static const struct net_device_ops vmxnet3_netdev_ops = {
3363d1a890faSShreyas Bhatewara 		.ndo_open = vmxnet3_open,
3364d1a890faSShreyas Bhatewara 		.ndo_stop = vmxnet3_close,
3365d1a890faSShreyas Bhatewara 		.ndo_start_xmit = vmxnet3_xmit_frame,
3366d1a890faSShreyas Bhatewara 		.ndo_set_mac_address = vmxnet3_set_mac_addr,
3367d1a890faSShreyas Bhatewara 		.ndo_change_mtu = vmxnet3_change_mtu,
33683dd7400bSRonak Doshi 		.ndo_fix_features = vmxnet3_fix_features,
3369a0d2730cSMichał Mirosław 		.ndo_set_features = vmxnet3_set_features,
33701dac3b1bSRonak Doshi 		.ndo_features_check = vmxnet3_features_check,
337195305f6cSstephen hemminger 		.ndo_get_stats64 = vmxnet3_get_stats64,
3372d1a890faSShreyas Bhatewara 		.ndo_tx_timeout = vmxnet3_tx_timeout,
3373afc4b13dSJiri Pirko 		.ndo_set_rx_mode = vmxnet3_set_mc,
3374d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3375d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3376d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
3377d1a890faSShreyas Bhatewara 		.ndo_poll_controller = vmxnet3_netpoll,
3378d1a890faSShreyas Bhatewara #endif
3379d1a890faSShreyas Bhatewara 	};
3380d1a890faSShreyas Bhatewara 	int err;
338161aeeceaShpreg@vmware.com 	bool dma64;
3382d1a890faSShreyas Bhatewara 	u32 ver;
3383d1a890faSShreyas Bhatewara 	struct net_device *netdev;
3384d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
3385d1a890faSShreyas Bhatewara 	u8 mac[ETH_ALEN];
338609c5088eSShreyas Bhatewara 	int size;
338709c5088eSShreyas Bhatewara 	int num_tx_queues;
338809c5088eSShreyas Bhatewara 	int num_rx_queues;
3389*39f9895aSRonak Doshi 	int queues;
3390*39f9895aSRonak Doshi 	unsigned long flags;
3391d1a890faSShreyas Bhatewara 
3392e154b639SShreyas Bhatewara 	if (!pci_msi_enabled())
3393e154b639SShreyas Bhatewara 		enable_mq = 0;
3394e154b639SShreyas Bhatewara 
339509c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
339609c5088eSShreyas Bhatewara 	if (enable_mq)
339709c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
339809c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
339909c5088eSShreyas Bhatewara 	else
340009c5088eSShreyas Bhatewara #endif
340109c5088eSShreyas Bhatewara 		num_rx_queues = 1;
3402eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
340309c5088eSShreyas Bhatewara 
340409c5088eSShreyas Bhatewara 	if (enable_mq)
340509c5088eSShreyas Bhatewara 		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
340609c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
340709c5088eSShreyas Bhatewara 	else
340809c5088eSShreyas Bhatewara 		num_tx_queues = 1;
340909c5088eSShreyas Bhatewara 
3410eebb02b1SShreyas Bhatewara 	num_tx_queues = rounddown_pow_of_two(num_tx_queues);
341109c5088eSShreyas Bhatewara 	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
341209c5088eSShreyas Bhatewara 				   max(num_tx_queues, num_rx_queues));
341341de8d4cSJoe Perches 	if (!netdev)
3414d1a890faSShreyas Bhatewara 		return -ENOMEM;
3415d1a890faSShreyas Bhatewara 
3416d1a890faSShreyas Bhatewara 	pci_set_drvdata(pdev, netdev);
3417d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
3418d1a890faSShreyas Bhatewara 	adapter->netdev = netdev;
3419d1a890faSShreyas Bhatewara 	adapter->pdev = pdev;
3420d1a890faSShreyas Bhatewara 
3421f00e2b0aSNeil Horman 	adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3422f00e2b0aSNeil Horman 	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
342353831aa1SShrikrishna Khare 	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3424f00e2b0aSNeil Horman 
342561aeeceaShpreg@vmware.com 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
342661aeeceaShpreg@vmware.com 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
342761aeeceaShpreg@vmware.com 			dev_err(&pdev->dev,
342861aeeceaShpreg@vmware.com 				"pci_set_consistent_dma_mask failed\n");
342961aeeceaShpreg@vmware.com 			err = -EIO;
343061aeeceaShpreg@vmware.com 			goto err_set_mask;
343161aeeceaShpreg@vmware.com 		}
343261aeeceaShpreg@vmware.com 		dma64 = true;
343361aeeceaShpreg@vmware.com 	} else {
343461aeeceaShpreg@vmware.com 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
343561aeeceaShpreg@vmware.com 			dev_err(&pdev->dev,
343661aeeceaShpreg@vmware.com 				"pci_set_dma_mask failed\n");
343761aeeceaShpreg@vmware.com 			err = -EIO;
343861aeeceaShpreg@vmware.com 			goto err_set_mask;
343961aeeceaShpreg@vmware.com 		}
344061aeeceaShpreg@vmware.com 		dma64 = false;
344161aeeceaShpreg@vmware.com 	}
344261aeeceaShpreg@vmware.com 
344383d0feffSShreyas Bhatewara 	spin_lock_init(&adapter->cmd_lock);
3444b0eb57cbSAndy King 	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3445b0eb57cbSAndy King 					     sizeof(struct vmxnet3_adapter),
3446b0eb57cbSAndy King 					     PCI_DMA_TODEVICE);
34475738a09dSAlexey Khoroshilov 	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
34485738a09dSAlexey Khoroshilov 		dev_err(&pdev->dev, "Failed to map dma\n");
34495738a09dSAlexey Khoroshilov 		err = -EFAULT;
345061aeeceaShpreg@vmware.com 		goto err_set_mask;
34515738a09dSAlexey Khoroshilov 	}
3452b0eb57cbSAndy King 	adapter->shared = dma_alloc_coherent(
3453b0eb57cbSAndy King 				&adapter->pdev->dev,
3454d1a890faSShreyas Bhatewara 				sizeof(struct Vmxnet3_DriverShared),
3455b0eb57cbSAndy King 				&adapter->shared_pa, GFP_KERNEL);
3456d1a890faSShreyas Bhatewara 	if (!adapter->shared) {
3457204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
3458d1a890faSShreyas Bhatewara 		err = -ENOMEM;
3459d1a890faSShreyas Bhatewara 		goto err_alloc_shared;
3460d1a890faSShreyas Bhatewara 	}
3461d1a890faSShreyas Bhatewara 
346261aeeceaShpreg@vmware.com 	err = vmxnet3_alloc_pci_resources(adapter);
3463d1a890faSShreyas Bhatewara 	if (err < 0)
3464d1a890faSShreyas Bhatewara 		goto err_alloc_pci;
3465d1a890faSShreyas Bhatewara 
3466d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3467a31135e3SRonak Doshi 	if (ver & (1 << VMXNET3_REV_4)) {
3468a31135e3SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter,
3469a31135e3SRonak Doshi 				       VMXNET3_REG_VRRS,
3470a31135e3SRonak Doshi 				       1 << VMXNET3_REV_4);
3471a31135e3SRonak Doshi 		adapter->version = VMXNET3_REV_4 + 1;
3472a31135e3SRonak Doshi 	} else if (ver & (1 << VMXNET3_REV_3)) {
34736af9d787SShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
34746af9d787SShrikrishna Khare 				       VMXNET3_REG_VRRS,
34756af9d787SShrikrishna Khare 				       1 << VMXNET3_REV_3);
34766af9d787SShrikrishna Khare 		adapter->version = VMXNET3_REV_3 + 1;
34776af9d787SShrikrishna Khare 	} else if (ver & (1 << VMXNET3_REV_2)) {
3478190af10fSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
3479190af10fSShrikrishna Khare 				       VMXNET3_REG_VRRS,
3480190af10fSShrikrishna Khare 				       1 << VMXNET3_REV_2);
3481190af10fSShrikrishna Khare 		adapter->version = VMXNET3_REV_2 + 1;
3482190af10fSShrikrishna Khare 	} else if (ver & (1 << VMXNET3_REV_1)) {
3483190af10fSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
3484190af10fSShrikrishna Khare 				       VMXNET3_REG_VRRS,
3485190af10fSShrikrishna Khare 				       1 << VMXNET3_REV_1);
3486190af10fSShrikrishna Khare 		adapter->version = VMXNET3_REV_1 + 1;
3487d1a890faSShreyas Bhatewara 	} else {
3488204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3489204a6e65SStephen Hemminger 			"Incompatible h/w version (0x%x) for adapter\n", ver);
3490d1a890faSShreyas Bhatewara 		err = -EBUSY;
3491d1a890faSShreyas Bhatewara 		goto err_ver;
3492d1a890faSShreyas Bhatewara 	}
349345dac1d6SShreyas Bhatewara 	dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3494d1a890faSShreyas Bhatewara 
3495d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3496d1a890faSShreyas Bhatewara 	if (ver & 1) {
3497d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3498d1a890faSShreyas Bhatewara 	} else {
3499204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3500204a6e65SStephen Hemminger 			"Incompatible upt version (0x%x) for adapter\n", ver);
3501d1a890faSShreyas Bhatewara 		err = -EBUSY;
3502d1a890faSShreyas Bhatewara 		goto err_ver;
3503d1a890faSShreyas Bhatewara 	}
3504d1a890faSShreyas Bhatewara 
3505*39f9895aSRonak Doshi 	if (VMXNET3_VERSION_GE_6(adapter)) {
3506*39f9895aSRonak Doshi 		spin_lock_irqsave(&adapter->cmd_lock, flags);
3507*39f9895aSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3508*39f9895aSRonak Doshi 				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3509*39f9895aSRonak Doshi 		queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3510*39f9895aSRonak Doshi 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3511*39f9895aSRonak Doshi 		if (queues > 0) {
3512*39f9895aSRonak Doshi 			adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3513*39f9895aSRonak Doshi 			adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3514*39f9895aSRonak Doshi 		} else {
3515*39f9895aSRonak Doshi 			adapter->num_rx_queues = min(num_rx_queues,
3516*39f9895aSRonak Doshi 						     VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3517*39f9895aSRonak Doshi 			adapter->num_tx_queues = min(num_tx_queues,
3518*39f9895aSRonak Doshi 						     VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3519*39f9895aSRonak Doshi 		}
3520*39f9895aSRonak Doshi 		if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3521*39f9895aSRonak Doshi 		    adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3522*39f9895aSRonak Doshi 			adapter->queuesExtEnabled = true;
3523*39f9895aSRonak Doshi 		} else {
3524*39f9895aSRonak Doshi 			adapter->queuesExtEnabled = false;
3525*39f9895aSRonak Doshi 		}
3526*39f9895aSRonak Doshi 	} else {
3527*39f9895aSRonak Doshi 		adapter->queuesExtEnabled = false;
3528*39f9895aSRonak Doshi 		adapter->num_rx_queues = min(num_rx_queues,
3529*39f9895aSRonak Doshi 					     VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3530*39f9895aSRonak Doshi 		adapter->num_tx_queues = min(num_tx_queues,
3531*39f9895aSRonak Doshi 					     VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3532*39f9895aSRonak Doshi 	}
3533*39f9895aSRonak Doshi 	dev_info(&pdev->dev,
3534*39f9895aSRonak Doshi 		 "# of Tx queues : %d, # of Rx queues : %d\n",
3535*39f9895aSRonak Doshi 		 adapter->num_tx_queues, adapter->num_rx_queues);
3536*39f9895aSRonak Doshi 
3537*39f9895aSRonak Doshi 	adapter->rx_buf_per_pkt = 1;
3538*39f9895aSRonak Doshi 
3539*39f9895aSRonak Doshi 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3540*39f9895aSRonak Doshi 	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3541*39f9895aSRonak Doshi 	adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3542*39f9895aSRonak Doshi 						&adapter->queue_desc_pa,
3543*39f9895aSRonak Doshi 						GFP_KERNEL);
3544*39f9895aSRonak Doshi 
3545*39f9895aSRonak Doshi 	if (!adapter->tqd_start) {
3546*39f9895aSRonak Doshi 		dev_err(&pdev->dev, "Failed to allocate memory\n");
3547*39f9895aSRonak Doshi 		err = -ENOMEM;
3548*39f9895aSRonak Doshi 		goto err_ver;
3549*39f9895aSRonak Doshi 	}
3550*39f9895aSRonak Doshi 	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3551*39f9895aSRonak Doshi 							    adapter->num_tx_queues);
3552*39f9895aSRonak Doshi 
3553*39f9895aSRonak Doshi 	adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3554*39f9895aSRonak Doshi 					      sizeof(struct Vmxnet3_PMConf),
3555*39f9895aSRonak Doshi 					      &adapter->pm_conf_pa,
3556*39f9895aSRonak Doshi 					      GFP_KERNEL);
3557*39f9895aSRonak Doshi 	if (adapter->pm_conf == NULL) {
3558*39f9895aSRonak Doshi 		err = -ENOMEM;
3559*39f9895aSRonak Doshi 		goto err_alloc_pm;
3560*39f9895aSRonak Doshi 	}
3561*39f9895aSRonak Doshi 
3562*39f9895aSRonak Doshi #ifdef VMXNET3_RSS
3563*39f9895aSRonak Doshi 
3564*39f9895aSRonak Doshi 	adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3565*39f9895aSRonak Doshi 					       sizeof(struct UPT1_RSSConf),
3566*39f9895aSRonak Doshi 					       &adapter->rss_conf_pa,
3567*39f9895aSRonak Doshi 					       GFP_KERNEL);
3568*39f9895aSRonak Doshi 	if (adapter->rss_conf == NULL) {
3569*39f9895aSRonak Doshi 		err = -ENOMEM;
3570*39f9895aSRonak Doshi 		goto err_alloc_rss;
3571*39f9895aSRonak Doshi 	}
3572*39f9895aSRonak Doshi #endif /* VMXNET3_RSS */
3573*39f9895aSRonak Doshi 
35744edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
35754edef40eSShrikrishna Khare 		adapter->coal_conf =
35764edef40eSShrikrishna Khare 			dma_alloc_coherent(&adapter->pdev->dev,
35774edef40eSShrikrishna Khare 					   sizeof(struct Vmxnet3_CoalesceScheme)
35784edef40eSShrikrishna Khare 					   ,
35794edef40eSShrikrishna Khare 					   &adapter->coal_conf_pa,
35804edef40eSShrikrishna Khare 					   GFP_KERNEL);
35814edef40eSShrikrishna Khare 		if (!adapter->coal_conf) {
35824edef40eSShrikrishna Khare 			err = -ENOMEM;
3583*39f9895aSRonak Doshi 			goto err_coal_conf;
35844edef40eSShrikrishna Khare 		}
35854edef40eSShrikrishna Khare 		adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
35864edef40eSShrikrishna Khare 		adapter->default_coal_mode = true;
35874edef40eSShrikrishna Khare 	}
35884edef40eSShrikrishna Khare 
3589d3a8a9e5SRonak Doshi 	if (VMXNET3_VERSION_GE_4(adapter)) {
3590d3a8a9e5SRonak Doshi 		adapter->default_rss_fields = true;
3591d3a8a9e5SRonak Doshi 		adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
3592d3a8a9e5SRonak Doshi 	}
3593d3a8a9e5SRonak Doshi 
3594e101e7ddSShreyas Bhatewara 	SET_NETDEV_DEV(netdev, &pdev->dev);
3595d1a890faSShreyas Bhatewara 	vmxnet3_declare_features(adapter, dma64);
3596d1a890faSShreyas Bhatewara 
359750a5ce3eSShrikrishna Khare 	adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
359850a5ce3eSShrikrishna Khare 		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
359950a5ce3eSShrikrishna Khare 
36004db37a78SStephen Hemminger 	if (adapter->num_tx_queues == adapter->num_rx_queues)
36014db37a78SStephen Hemminger 		adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
36024db37a78SStephen Hemminger 	else
360309c5088eSShreyas Bhatewara 		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
360409c5088eSShreyas Bhatewara 
3605d1a890faSShreyas Bhatewara 	vmxnet3_alloc_intr_resources(adapter);
3606d1a890faSShreyas Bhatewara 
360709c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
360809c5088eSShreyas Bhatewara 	if (adapter->num_rx_queues > 1 &&
360909c5088eSShreyas Bhatewara 	    adapter->intr.type == VMXNET3_IT_MSIX) {
361009c5088eSShreyas Bhatewara 		adapter->rss = true;
36117db11f75SStephen Hemminger 		netdev->hw_features |= NETIF_F_RXHASH;
36127db11f75SStephen Hemminger 		netdev->features |= NETIF_F_RXHASH;
3613204a6e65SStephen Hemminger 		dev_dbg(&pdev->dev, "RSS is enabled.\n");
361409c5088eSShreyas Bhatewara 	} else {
361509c5088eSShreyas Bhatewara 		adapter->rss = false;
361609c5088eSShreyas Bhatewara 	}
361709c5088eSShreyas Bhatewara #endif
361809c5088eSShreyas Bhatewara 
3619d1a890faSShreyas Bhatewara 	vmxnet3_read_mac_addr(adapter, mac);
3620d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr,  mac, netdev->addr_len);
3621d1a890faSShreyas Bhatewara 
3622d1a890faSShreyas Bhatewara 	netdev->netdev_ops = &vmxnet3_netdev_ops;
3623d1a890faSShreyas Bhatewara 	vmxnet3_set_ethtool_ops(netdev);
362409c5088eSShreyas Bhatewara 	netdev->watchdog_timeo = 5 * HZ;
3625d1a890faSShreyas Bhatewara 
3626d0c2c997SJarod Wilson 	/* MTU range: 60 - 9000 */
3627d0c2c997SJarod Wilson 	netdev->min_mtu = VMXNET3_MIN_MTU;
3628d0c2c997SJarod Wilson 	netdev->max_mtu = VMXNET3_MAX_MTU;
3629d0c2c997SJarod Wilson 
3630d1a890faSShreyas Bhatewara 	INIT_WORK(&adapter->work, vmxnet3_reset_work);
3631e3bc4ffbSSteve Hodgson 	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3632d1a890faSShreyas Bhatewara 
363309c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
363409c5088eSShreyas Bhatewara 		int i;
363509c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
363609c5088eSShreyas Bhatewara 			netif_napi_add(adapter->netdev,
363709c5088eSShreyas Bhatewara 				       &adapter->rx_queue[i].napi,
363809c5088eSShreyas Bhatewara 				       vmxnet3_poll_rx_only, 64);
363909c5088eSShreyas Bhatewara 		}
364009c5088eSShreyas Bhatewara 	} else {
364109c5088eSShreyas Bhatewara 		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
364209c5088eSShreyas Bhatewara 			       vmxnet3_poll, 64);
364309c5088eSShreyas Bhatewara 	}
364409c5088eSShreyas Bhatewara 
364509c5088eSShreyas Bhatewara 	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
364609c5088eSShreyas Bhatewara 	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
364709c5088eSShreyas Bhatewara 
36486cdd20c3SNeil Horman 	netif_carrier_off(netdev);
3649d1a890faSShreyas Bhatewara 	err = register_netdev(netdev);
3650d1a890faSShreyas Bhatewara 
3651d1a890faSShreyas Bhatewara 	if (err) {
3652204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to register adapter\n");
3653d1a890faSShreyas Bhatewara 		goto err_register;
3654d1a890faSShreyas Bhatewara 	}
3655d1a890faSShreyas Bhatewara 
36564a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, false);
3657d1a890faSShreyas Bhatewara 	return 0;
3658d1a890faSShreyas Bhatewara 
3659d1a890faSShreyas Bhatewara err_register:
36604edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
36614edef40eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
36624edef40eSShrikrishna Khare 				  sizeof(struct Vmxnet3_CoalesceScheme),
36634edef40eSShrikrishna Khare 				  adapter->coal_conf, adapter->coal_conf_pa);
36644edef40eSShrikrishna Khare 	}
3665d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3666*39f9895aSRonak Doshi err_coal_conf:
366709c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
3668b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3669b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
367009c5088eSShreyas Bhatewara err_alloc_rss:
367109c5088eSShreyas Bhatewara #endif
3672b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3673b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
3674d1a890faSShreyas Bhatewara err_alloc_pm:
3675b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
367609c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
3677*39f9895aSRonak Doshi err_ver:
3678*39f9895aSRonak Doshi 	vmxnet3_free_pci_resources(adapter);
3679*39f9895aSRonak Doshi err_alloc_pci:
3680b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
3681b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
3682d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
3683d1a890faSShreyas Bhatewara err_alloc_shared:
3684b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3685b0eb57cbSAndy King 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
368661aeeceaShpreg@vmware.com err_set_mask:
3687d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3688d1a890faSShreyas Bhatewara 	return err;
3689d1a890faSShreyas Bhatewara }
3690d1a890faSShreyas Bhatewara 
3691d1a890faSShreyas Bhatewara 
36923a4751a3SBill Pemberton static void
3693d1a890faSShreyas Bhatewara vmxnet3_remove_device(struct pci_dev *pdev)
3694d1a890faSShreyas Bhatewara {
3695d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3696d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
369709c5088eSShreyas Bhatewara 	int size = 0;
3698*39f9895aSRonak Doshi 	int num_rx_queues, rx_queues;
3699*39f9895aSRonak Doshi 	unsigned long flags;
370009c5088eSShreyas Bhatewara 
370109c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
370209c5088eSShreyas Bhatewara 	if (enable_mq)
370309c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
370409c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
370509c5088eSShreyas Bhatewara 	else
370609c5088eSShreyas Bhatewara #endif
370709c5088eSShreyas Bhatewara 		num_rx_queues = 1;
3708eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3709*39f9895aSRonak Doshi 	if (VMXNET3_VERSION_GE_6(adapter)) {
3710*39f9895aSRonak Doshi 		spin_lock_irqsave(&adapter->cmd_lock, flags);
3711*39f9895aSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3712*39f9895aSRonak Doshi 				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3713*39f9895aSRonak Doshi 		rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3714*39f9895aSRonak Doshi 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3715*39f9895aSRonak Doshi 		if (rx_queues > 0)
3716*39f9895aSRonak Doshi 			rx_queues = (rx_queues >> 8) & 0xff;
3717*39f9895aSRonak Doshi 		else
3718*39f9895aSRonak Doshi 			rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3719*39f9895aSRonak Doshi 		num_rx_queues = min(num_rx_queues, rx_queues);
3720*39f9895aSRonak Doshi 	} else {
3721*39f9895aSRonak Doshi 		num_rx_queues = min(num_rx_queues,
3722*39f9895aSRonak Doshi 				    VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3723*39f9895aSRonak Doshi 	}
3724d1a890faSShreyas Bhatewara 
372523f333a2STejun Heo 	cancel_work_sync(&adapter->work);
3726d1a890faSShreyas Bhatewara 
3727d1a890faSShreyas Bhatewara 	unregister_netdev(netdev);
3728d1a890faSShreyas Bhatewara 
3729d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3730d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
37314edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
37324edef40eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
37334edef40eSShrikrishna Khare 				  sizeof(struct Vmxnet3_CoalesceScheme),
37344edef40eSShrikrishna Khare 				  adapter->coal_conf, adapter->coal_conf_pa);
37354edef40eSShrikrishna Khare 	}
373609c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
3737b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3738b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
373909c5088eSShreyas Bhatewara #endif
3740b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3741b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
374209c5088eSShreyas Bhatewara 
374309c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
374409c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3745b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
374609c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
3747b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
3748b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
3749d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
3750b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3751b0eb57cbSAndy King 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3752d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3753d1a890faSShreyas Bhatewara }
3754d1a890faSShreyas Bhatewara 
3755e9ba47bfSShreyas Bhatewara static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3756e9ba47bfSShreyas Bhatewara {
3757e9ba47bfSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3758e9ba47bfSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3759e9ba47bfSShreyas Bhatewara 	unsigned long flags;
3760e9ba47bfSShreyas Bhatewara 
3761e9ba47bfSShreyas Bhatewara 	/* Reset_work may be in the middle of resetting the device, wait for its
3762e9ba47bfSShreyas Bhatewara 	 * completion.
3763e9ba47bfSShreyas Bhatewara 	 */
3764e9ba47bfSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
376593c65d13SYueHaibing 		usleep_range(1000, 2000);
3766e9ba47bfSShreyas Bhatewara 
3767e9ba47bfSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3768e9ba47bfSShreyas Bhatewara 			     &adapter->state)) {
3769e9ba47bfSShreyas Bhatewara 		clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3770e9ba47bfSShreyas Bhatewara 		return;
3771e9ba47bfSShreyas Bhatewara 	}
3772e9ba47bfSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3773e9ba47bfSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3774e9ba47bfSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
3775e9ba47bfSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3776e9ba47bfSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
3777e9ba47bfSShreyas Bhatewara 
3778e9ba47bfSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3779e9ba47bfSShreyas Bhatewara }
3780e9ba47bfSShreyas Bhatewara 
3781d1a890faSShreyas Bhatewara 
3782d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3783d1a890faSShreyas Bhatewara 
3784d1a890faSShreyas Bhatewara static int
3785d1a890faSShreyas Bhatewara vmxnet3_suspend(struct device *device)
3786d1a890faSShreyas Bhatewara {
3787d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3788d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3789d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3790d1a890faSShreyas Bhatewara 	struct Vmxnet3_PMConf *pmConf;
3791d1a890faSShreyas Bhatewara 	struct ethhdr *ehdr;
3792d1a890faSShreyas Bhatewara 	struct arphdr *ahdr;
3793d1a890faSShreyas Bhatewara 	u8 *arpreq;
3794d1a890faSShreyas Bhatewara 	struct in_device *in_dev;
3795d1a890faSShreyas Bhatewara 	struct in_ifaddr *ifa;
379683d0feffSShreyas Bhatewara 	unsigned long flags;
3797d1a890faSShreyas Bhatewara 	int i = 0;
3798d1a890faSShreyas Bhatewara 
3799d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3800d1a890faSShreyas Bhatewara 		return 0;
3801d1a890faSShreyas Bhatewara 
380251956cd6SShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
380351956cd6SShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
380451956cd6SShreyas Bhatewara 
3805d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
3806d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
3807d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3808d1a890faSShreyas Bhatewara 
3809d1a890faSShreyas Bhatewara 	netif_device_detach(netdev);
381009c5088eSShreyas Bhatewara 	netif_tx_stop_all_queues(netdev);
3811d1a890faSShreyas Bhatewara 
3812d1a890faSShreyas Bhatewara 	/* Create wake-up filters. */
3813d1a890faSShreyas Bhatewara 	pmConf = adapter->pm_conf;
3814d1a890faSShreyas Bhatewara 	memset(pmConf, 0, sizeof(*pmConf));
3815d1a890faSShreyas Bhatewara 
3816d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_UCAST) {
3817d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_ALEN;
3818d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize = 1;
3819d1a890faSShreyas Bhatewara 		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3820d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3821d1a890faSShreyas Bhatewara 
38223843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3823d1a890faSShreyas Bhatewara 		i++;
3824d1a890faSShreyas Bhatewara 	}
3825d1a890faSShreyas Bhatewara 
3826d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_ARP) {
38272638eb8bSFlorian Westphal 		rcu_read_lock();
3828d1a890faSShreyas Bhatewara 
38292638eb8bSFlorian Westphal 		in_dev = __in_dev_get_rcu(netdev);
38302638eb8bSFlorian Westphal 		if (!in_dev) {
38312638eb8bSFlorian Westphal 			rcu_read_unlock();
3832d1a890faSShreyas Bhatewara 			goto skip_arp;
38332638eb8bSFlorian Westphal 		}
38342638eb8bSFlorian Westphal 
38352638eb8bSFlorian Westphal 		ifa = rcu_dereference(in_dev->ifa_list);
38362638eb8bSFlorian Westphal 		if (!ifa) {
38372638eb8bSFlorian Westphal 			rcu_read_unlock();
38382638eb8bSFlorian Westphal 			goto skip_arp;
38392638eb8bSFlorian Westphal 		}
3840d1a890faSShreyas Bhatewara 
3841d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3842d1a890faSShreyas Bhatewara 			sizeof(struct arphdr) +		/* ARP header */
3843d1a890faSShreyas Bhatewara 			2 * ETH_ALEN +		/* 2 Ethernet addresses*/
3844d1a890faSShreyas Bhatewara 			2 * sizeof(u32);	/*2 IPv4 addresses */
3845d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize =
3846d1a890faSShreyas Bhatewara 			(pmConf->filters[i].patternSize - 1) / 8 + 1;
3847d1a890faSShreyas Bhatewara 
3848d1a890faSShreyas Bhatewara 		/* ETH_P_ARP in Ethernet header. */
3849d1a890faSShreyas Bhatewara 		ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3850d1a890faSShreyas Bhatewara 		ehdr->h_proto = htons(ETH_P_ARP);
3851d1a890faSShreyas Bhatewara 
3852d1a890faSShreyas Bhatewara 		/* ARPOP_REQUEST in ARP header. */
3853d1a890faSShreyas Bhatewara 		ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3854d1a890faSShreyas Bhatewara 		ahdr->ar_op = htons(ARPOP_REQUEST);
3855d1a890faSShreyas Bhatewara 		arpreq = (u8 *)(ahdr + 1);
3856d1a890faSShreyas Bhatewara 
3857d1a890faSShreyas Bhatewara 		/* The Unicast IPv4 address in 'tip' field. */
3858d1a890faSShreyas Bhatewara 		arpreq += 2 * ETH_ALEN + sizeof(u32);
38592638eb8bSFlorian Westphal 		*(__be32 *)arpreq = ifa->ifa_address;
38602638eb8bSFlorian Westphal 
38612638eb8bSFlorian Westphal 		rcu_read_unlock();
3862d1a890faSShreyas Bhatewara 
3863d1a890faSShreyas Bhatewara 		/* The mask for the relevant bits. */
3864d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x00;
3865d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3866d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3867d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[3] = 0x00;
3868d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3869d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3870d1a890faSShreyas Bhatewara 
38713843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3872d1a890faSShreyas Bhatewara 		i++;
3873d1a890faSShreyas Bhatewara 	}
3874d1a890faSShreyas Bhatewara 
3875d1a890faSShreyas Bhatewara skip_arp:
3876d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_MAGIC)
38773843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3878d1a890faSShreyas Bhatewara 
3879d1a890faSShreyas Bhatewara 	pmConf->numFilters = i;
3880d1a890faSShreyas Bhatewara 
3881115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3882115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3883115924b6SShreyas Bhatewara 								  *pmConf));
3884b0eb57cbSAndy King 	adapter->shared->devRead.pmConfDesc.confPA =
3885b0eb57cbSAndy King 		cpu_to_le64(adapter->pm_conf_pa);
3886d1a890faSShreyas Bhatewara 
388783d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3888d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3889d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_PMCFG);
389083d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3891d1a890faSShreyas Bhatewara 
3892d1a890faSShreyas Bhatewara 	pci_save_state(pdev);
3893d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3894d1a890faSShreyas Bhatewara 			adapter->wol);
3895d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
3896d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3897d1a890faSShreyas Bhatewara 
3898d1a890faSShreyas Bhatewara 	return 0;
3899d1a890faSShreyas Bhatewara }
3900d1a890faSShreyas Bhatewara 
3901d1a890faSShreyas Bhatewara 
3902d1a890faSShreyas Bhatewara static int
3903d1a890faSShreyas Bhatewara vmxnet3_resume(struct device *device)
3904d1a890faSShreyas Bhatewara {
39055ec82c1eSShrikrishna Khare 	int err;
390683d0feffSShreyas Bhatewara 	unsigned long flags;
3907d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3908d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3909d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3910d1a890faSShreyas Bhatewara 
3911d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3912d1a890faSShreyas Bhatewara 		return 0;
3913d1a890faSShreyas Bhatewara 
3914d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, PCI_D0);
3915d1a890faSShreyas Bhatewara 	pci_restore_state(pdev);
3916d1a890faSShreyas Bhatewara 	err = pci_enable_device_mem(pdev);
3917d1a890faSShreyas Bhatewara 	if (err != 0)
3918d1a890faSShreyas Bhatewara 		return err;
3919d1a890faSShreyas Bhatewara 
3920d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, PCI_D0, 0);
3921d1a890faSShreyas Bhatewara 
39225ec82c1eSShrikrishna Khare 	vmxnet3_alloc_intr_resources(adapter);
39235ec82c1eSShrikrishna Khare 
39245ec82c1eSShrikrishna Khare 	/* During hibernate and suspend, device has to be reinitialized as the
39255ec82c1eSShrikrishna Khare 	 * device state need not be preserved.
39265ec82c1eSShrikrishna Khare 	 */
39275ec82c1eSShrikrishna Khare 
39285ec82c1eSShrikrishna Khare 	/* Need not check adapter state as other reset tasks cannot run during
39295ec82c1eSShrikrishna Khare 	 * device resume.
39305ec82c1eSShrikrishna Khare 	 */
393183d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3932d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
39335ec82c1eSShrikrishna Khare 			       VMXNET3_CMD_QUIESCE_DEV);
393483d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
39355ec82c1eSShrikrishna Khare 	vmxnet3_tq_cleanup_all(adapter);
39365ec82c1eSShrikrishna Khare 	vmxnet3_rq_cleanup_all(adapter);
39375ec82c1eSShrikrishna Khare 
39385ec82c1eSShrikrishna Khare 	vmxnet3_reset_dev(adapter);
39395ec82c1eSShrikrishna Khare 	err = vmxnet3_activate_dev(adapter);
39405ec82c1eSShrikrishna Khare 	if (err != 0) {
39415ec82c1eSShrikrishna Khare 		netdev_err(netdev,
39425ec82c1eSShrikrishna Khare 			   "failed to re-activate on resume, error: %d", err);
39435ec82c1eSShrikrishna Khare 		vmxnet3_force_close(adapter);
39445ec82c1eSShrikrishna Khare 		return err;
39455ec82c1eSShrikrishna Khare 	}
39465ec82c1eSShrikrishna Khare 	netif_device_attach(netdev);
3947d1a890faSShreyas Bhatewara 
3948d1a890faSShreyas Bhatewara 	return 0;
3949d1a890faSShreyas Bhatewara }
3950d1a890faSShreyas Bhatewara 
395147145210SAlexey Dobriyan static const struct dev_pm_ops vmxnet3_pm_ops = {
3952d1a890faSShreyas Bhatewara 	.suspend = vmxnet3_suspend,
3953d1a890faSShreyas Bhatewara 	.resume = vmxnet3_resume,
39545ec82c1eSShrikrishna Khare 	.freeze = vmxnet3_suspend,
39555ec82c1eSShrikrishna Khare 	.restore = vmxnet3_resume,
3956d1a890faSShreyas Bhatewara };
3957d1a890faSShreyas Bhatewara #endif
3958d1a890faSShreyas Bhatewara 
3959d1a890faSShreyas Bhatewara static struct pci_driver vmxnet3_driver = {
3960d1a890faSShreyas Bhatewara 	.name		= vmxnet3_driver_name,
3961d1a890faSShreyas Bhatewara 	.id_table	= vmxnet3_pciid_table,
3962d1a890faSShreyas Bhatewara 	.probe		= vmxnet3_probe_device,
39633a4751a3SBill Pemberton 	.remove		= vmxnet3_remove_device,
3964e9ba47bfSShreyas Bhatewara 	.shutdown	= vmxnet3_shutdown_device,
3965d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3966d1a890faSShreyas Bhatewara 	.driver.pm	= &vmxnet3_pm_ops,
3967d1a890faSShreyas Bhatewara #endif
3968d1a890faSShreyas Bhatewara };
3969d1a890faSShreyas Bhatewara 
3970d1a890faSShreyas Bhatewara 
3971d1a890faSShreyas Bhatewara static int __init
3972d1a890faSShreyas Bhatewara vmxnet3_init_module(void)
3973d1a890faSShreyas Bhatewara {
3974204a6e65SStephen Hemminger 	pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3975d1a890faSShreyas Bhatewara 		VMXNET3_DRIVER_VERSION_REPORT);
3976d1a890faSShreyas Bhatewara 	return pci_register_driver(&vmxnet3_driver);
3977d1a890faSShreyas Bhatewara }
3978d1a890faSShreyas Bhatewara 
3979d1a890faSShreyas Bhatewara module_init(vmxnet3_init_module);
3980d1a890faSShreyas Bhatewara 
3981d1a890faSShreyas Bhatewara 
3982d1a890faSShreyas Bhatewara static void
3983d1a890faSShreyas Bhatewara vmxnet3_exit_module(void)
3984d1a890faSShreyas Bhatewara {
3985d1a890faSShreyas Bhatewara 	pci_unregister_driver(&vmxnet3_driver);
3986d1a890faSShreyas Bhatewara }
3987d1a890faSShreyas Bhatewara 
3988d1a890faSShreyas Bhatewara module_exit(vmxnet3_exit_module);
3989d1a890faSShreyas Bhatewara 
3990d1a890faSShreyas Bhatewara MODULE_AUTHOR("VMware, Inc.");
3991d1a890faSShreyas Bhatewara MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3992d1a890faSShreyas Bhatewara MODULE_LICENSE("GPL v2");
3993d1a890faSShreyas Bhatewara MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
3994