1d1a890faSShreyas Bhatewara /*
2d1a890faSShreyas Bhatewara  * Linux driver for VMware's vmxnet3 ethernet NIC.
3d1a890faSShreyas Bhatewara  *
4d1a890faSShreyas Bhatewara  * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5d1a890faSShreyas Bhatewara  *
6d1a890faSShreyas Bhatewara  * This program is free software; you can redistribute it and/or modify it
7d1a890faSShreyas Bhatewara  * under the terms of the GNU General Public License as published by the
8d1a890faSShreyas Bhatewara  * Free Software Foundation; version 2 of the License and no later version.
9d1a890faSShreyas Bhatewara  *
10d1a890faSShreyas Bhatewara  * This program is distributed in the hope that it will be useful, but
11d1a890faSShreyas Bhatewara  * WITHOUT ANY WARRANTY; without even the implied warranty of
12d1a890faSShreyas Bhatewara  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13d1a890faSShreyas Bhatewara  * NON INFRINGEMENT. See the GNU General Public License for more
14d1a890faSShreyas Bhatewara  * details.
15d1a890faSShreyas Bhatewara  *
16d1a890faSShreyas Bhatewara  * You should have received a copy of the GNU General Public License
17d1a890faSShreyas Bhatewara  * along with this program; if not, write to the Free Software
18d1a890faSShreyas Bhatewara  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19d1a890faSShreyas Bhatewara  *
20d1a890faSShreyas Bhatewara  * The full GNU General Public License is included in this distribution in
21d1a890faSShreyas Bhatewara  * the file called "COPYING".
22d1a890faSShreyas Bhatewara  *
23d1a890faSShreyas Bhatewara  * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24d1a890faSShreyas Bhatewara  *
25d1a890faSShreyas Bhatewara  */
26d1a890faSShreyas Bhatewara 
279d9779e7SPaul Gortmaker #include <linux/module.h>
28b038b040SStephen Rothwell #include <net/ip6_checksum.h>
29b038b040SStephen Rothwell 
30d1a890faSShreyas Bhatewara #include "vmxnet3_int.h"
31d1a890faSShreyas Bhatewara 
32d1a890faSShreyas Bhatewara char vmxnet3_driver_name[] = "vmxnet3";
33d1a890faSShreyas Bhatewara #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34d1a890faSShreyas Bhatewara 
35d1a890faSShreyas Bhatewara /*
36d1a890faSShreyas Bhatewara  * PCI Device ID Table
37d1a890faSShreyas Bhatewara  * Last entry must be all 0s
38d1a890faSShreyas Bhatewara  */
39a3aa1884SAlexey Dobriyan static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
40d1a890faSShreyas Bhatewara 	{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41d1a890faSShreyas Bhatewara 	{0}
42d1a890faSShreyas Bhatewara };
43d1a890faSShreyas Bhatewara 
44d1a890faSShreyas Bhatewara MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45d1a890faSShreyas Bhatewara 
4609c5088eSShreyas Bhatewara static int enable_mq = 1;
47d1a890faSShreyas Bhatewara 
48f9f25026SShreyas Bhatewara static void
49f9f25026SShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50f9f25026SShreyas Bhatewara 
51d1a890faSShreyas Bhatewara /*
52d1a890faSShreyas Bhatewara  *    Enable/Disable the given intr
53d1a890faSShreyas Bhatewara  */
54d1a890faSShreyas Bhatewara static void
55d1a890faSShreyas Bhatewara vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56d1a890faSShreyas Bhatewara {
57d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58d1a890faSShreyas Bhatewara }
59d1a890faSShreyas Bhatewara 
60d1a890faSShreyas Bhatewara 
61d1a890faSShreyas Bhatewara static void
62d1a890faSShreyas Bhatewara vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63d1a890faSShreyas Bhatewara {
64d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65d1a890faSShreyas Bhatewara }
66d1a890faSShreyas Bhatewara 
67d1a890faSShreyas Bhatewara 
68d1a890faSShreyas Bhatewara /*
69d1a890faSShreyas Bhatewara  *    Enable/Disable all intrs used by the device
70d1a890faSShreyas Bhatewara  */
71d1a890faSShreyas Bhatewara static void
72d1a890faSShreyas Bhatewara vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73d1a890faSShreyas Bhatewara {
74d1a890faSShreyas Bhatewara 	int i;
75d1a890faSShreyas Bhatewara 
76d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
77d1a890faSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, i);
786929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl &=
796929fe8aSRonghua Zang 					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
80d1a890faSShreyas Bhatewara }
81d1a890faSShreyas Bhatewara 
82d1a890faSShreyas Bhatewara 
83d1a890faSShreyas Bhatewara static void
84d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85d1a890faSShreyas Bhatewara {
86d1a890faSShreyas Bhatewara 	int i;
87d1a890faSShreyas Bhatewara 
886929fe8aSRonghua Zang 	adapter->shared->devRead.intrConf.intrCtrl |=
896929fe8aSRonghua Zang 					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
91d1a890faSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, i);
92d1a890faSShreyas Bhatewara }
93d1a890faSShreyas Bhatewara 
94d1a890faSShreyas Bhatewara 
95d1a890faSShreyas Bhatewara static void
96d1a890faSShreyas Bhatewara vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97d1a890faSShreyas Bhatewara {
98d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99d1a890faSShreyas Bhatewara }
100d1a890faSShreyas Bhatewara 
101d1a890faSShreyas Bhatewara 
102d1a890faSShreyas Bhatewara static bool
103d1a890faSShreyas Bhatewara vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104d1a890faSShreyas Bhatewara {
10509c5088eSShreyas Bhatewara 	return tq->stopped;
106d1a890faSShreyas Bhatewara }
107d1a890faSShreyas Bhatewara 
108d1a890faSShreyas Bhatewara 
109d1a890faSShreyas Bhatewara static void
110d1a890faSShreyas Bhatewara vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111d1a890faSShreyas Bhatewara {
112d1a890faSShreyas Bhatewara 	tq->stopped = false;
11309c5088eSShreyas Bhatewara 	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
114d1a890faSShreyas Bhatewara }
115d1a890faSShreyas Bhatewara 
116d1a890faSShreyas Bhatewara 
117d1a890faSShreyas Bhatewara static void
118d1a890faSShreyas Bhatewara vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119d1a890faSShreyas Bhatewara {
120d1a890faSShreyas Bhatewara 	tq->stopped = false;
12109c5088eSShreyas Bhatewara 	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
122d1a890faSShreyas Bhatewara }
123d1a890faSShreyas Bhatewara 
124d1a890faSShreyas Bhatewara 
125d1a890faSShreyas Bhatewara static void
126d1a890faSShreyas Bhatewara vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127d1a890faSShreyas Bhatewara {
128d1a890faSShreyas Bhatewara 	tq->stopped = true;
129d1a890faSShreyas Bhatewara 	tq->num_stop++;
13009c5088eSShreyas Bhatewara 	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
131d1a890faSShreyas Bhatewara }
132d1a890faSShreyas Bhatewara 
133d1a890faSShreyas Bhatewara 
134d1a890faSShreyas Bhatewara /*
135d1a890faSShreyas Bhatewara  * Check the link state. This may start or stop the tx queue.
136d1a890faSShreyas Bhatewara  */
137d1a890faSShreyas Bhatewara static void
1384a1745fcSShreyas Bhatewara vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139d1a890faSShreyas Bhatewara {
140d1a890faSShreyas Bhatewara 	u32 ret;
14109c5088eSShreyas Bhatewara 	int i;
14283d0feffSShreyas Bhatewara 	unsigned long flags;
143d1a890faSShreyas Bhatewara 
14483d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
145d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
14783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
14883d0feffSShreyas Bhatewara 
149d1a890faSShreyas Bhatewara 	adapter->link_speed = ret >> 16;
150d1a890faSShreyas Bhatewara 	if (ret & 1) { /* Link is up. */
151204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152204a6e65SStephen Hemminger 			    adapter->link_speed);
153d1a890faSShreyas Bhatewara 		netif_carrier_on(adapter->netdev);
154d1a890faSShreyas Bhatewara 
15509c5088eSShreyas Bhatewara 		if (affectTxQueue) {
15609c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
15709c5088eSShreyas Bhatewara 				vmxnet3_tq_start(&adapter->tx_queue[i],
15809c5088eSShreyas Bhatewara 						 adapter);
15909c5088eSShreyas Bhatewara 		}
160d1a890faSShreyas Bhatewara 	} else {
161204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Down\n");
162d1a890faSShreyas Bhatewara 		netif_carrier_off(adapter->netdev);
163d1a890faSShreyas Bhatewara 
16409c5088eSShreyas Bhatewara 		if (affectTxQueue) {
16509c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
16609c5088eSShreyas Bhatewara 				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
16709c5088eSShreyas Bhatewara 		}
168d1a890faSShreyas Bhatewara 	}
169d1a890faSShreyas Bhatewara }
170d1a890faSShreyas Bhatewara 
171d1a890faSShreyas Bhatewara static void
172d1a890faSShreyas Bhatewara vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173d1a890faSShreyas Bhatewara {
17409c5088eSShreyas Bhatewara 	int i;
175e328d410SRoland Dreier 	unsigned long flags;
176115924b6SShreyas Bhatewara 	u32 events = le32_to_cpu(adapter->shared->ecr);
177d1a890faSShreyas Bhatewara 	if (!events)
178d1a890faSShreyas Bhatewara 		return;
179d1a890faSShreyas Bhatewara 
180d1a890faSShreyas Bhatewara 	vmxnet3_ack_events(adapter, events);
181d1a890faSShreyas Bhatewara 
182d1a890faSShreyas Bhatewara 	/* Check if link state has changed */
183d1a890faSShreyas Bhatewara 	if (events & VMXNET3_ECR_LINK)
1844a1745fcSShreyas Bhatewara 		vmxnet3_check_link(adapter, true);
185d1a890faSShreyas Bhatewara 
186d1a890faSShreyas Bhatewara 	/* Check if there is an error on xmit/recv queues */
187d1a890faSShreyas Bhatewara 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
188e328d410SRoland Dreier 		spin_lock_irqsave(&adapter->cmd_lock, flags);
189d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_GET_QUEUE_STATUS);
191e328d410SRoland Dreier 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
192d1a890faSShreyas Bhatewara 
19309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++)
19409c5088eSShreyas Bhatewara 			if (adapter->tqd_start[i].status.stopped)
19509c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
19609c5088eSShreyas Bhatewara 					"%s: tq[%d] error 0x%x\n",
19709c5088eSShreyas Bhatewara 					adapter->netdev->name, i, le32_to_cpu(
19809c5088eSShreyas Bhatewara 					adapter->tqd_start[i].status.error));
19909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++)
20009c5088eSShreyas Bhatewara 			if (adapter->rqd_start[i].status.stopped)
20109c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
20209c5088eSShreyas Bhatewara 					"%s: rq[%d] error 0x%x\n",
20309c5088eSShreyas Bhatewara 					adapter->netdev->name, i,
20409c5088eSShreyas Bhatewara 					adapter->rqd_start[i].status.error);
205d1a890faSShreyas Bhatewara 
206d1a890faSShreyas Bhatewara 		schedule_work(&adapter->work);
207d1a890faSShreyas Bhatewara 	}
208d1a890faSShreyas Bhatewara }
209d1a890faSShreyas Bhatewara 
210115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
211115924b6SShreyas Bhatewara /*
212115924b6SShreyas Bhatewara  * The device expects the bitfields in shared structures to be written in
213115924b6SShreyas Bhatewara  * little endian. When CPU is big endian, the following routines are used to
214115924b6SShreyas Bhatewara  * correctly read and write into ABI.
215115924b6SShreyas Bhatewara  * The general technique used here is : double word bitfields are defined in
216115924b6SShreyas Bhatewara  * opposite order for big endian architecture. Then before reading them in
217115924b6SShreyas Bhatewara  * driver the complete double word is translated using le32_to_cpu. Similarly
218115924b6SShreyas Bhatewara  * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219115924b6SShreyas Bhatewara  * double words into required format.
220115924b6SShreyas Bhatewara  * In order to avoid touching bits in shared structure more than once, temporary
221115924b6SShreyas Bhatewara  * descriptors are used. These are passed as srcDesc to following functions.
222115924b6SShreyas Bhatewara  */
223115924b6SShreyas Bhatewara static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224115924b6SShreyas Bhatewara 				struct Vmxnet3_RxDesc *dstDesc)
225115924b6SShreyas Bhatewara {
226115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc + 2;
227115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc + 2;
228115924b6SShreyas Bhatewara 	dstDesc->addr = le64_to_cpu(srcDesc->addr);
229115924b6SShreyas Bhatewara 	*dst = le32_to_cpu(*src);
230115924b6SShreyas Bhatewara 	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231115924b6SShreyas Bhatewara }
232115924b6SShreyas Bhatewara 
233115924b6SShreyas Bhatewara static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234115924b6SShreyas Bhatewara 			       struct Vmxnet3_TxDesc *dstDesc)
235115924b6SShreyas Bhatewara {
236115924b6SShreyas Bhatewara 	int i;
237115924b6SShreyas Bhatewara 	u32 *src = (u32 *)(srcDesc + 1);
238115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)(dstDesc + 1);
239115924b6SShreyas Bhatewara 
240115924b6SShreyas Bhatewara 	/* Working backwards so that the gen bit is set at the end. */
241115924b6SShreyas Bhatewara 	for (i = 2; i > 0; i--) {
242115924b6SShreyas Bhatewara 		src--;
243115924b6SShreyas Bhatewara 		dst--;
244115924b6SShreyas Bhatewara 		*dst = cpu_to_le32(*src);
245115924b6SShreyas Bhatewara 	}
246115924b6SShreyas Bhatewara }
247115924b6SShreyas Bhatewara 
248115924b6SShreyas Bhatewara 
249115924b6SShreyas Bhatewara static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250115924b6SShreyas Bhatewara 				struct Vmxnet3_RxCompDesc *dstDesc)
251115924b6SShreyas Bhatewara {
252115924b6SShreyas Bhatewara 	int i = 0;
253115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc;
254115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc;
255115924b6SShreyas Bhatewara 	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256115924b6SShreyas Bhatewara 		*dst = le32_to_cpu(*src);
257115924b6SShreyas Bhatewara 		src++;
258115924b6SShreyas Bhatewara 		dst++;
259115924b6SShreyas Bhatewara 	}
260115924b6SShreyas Bhatewara }
261115924b6SShreyas Bhatewara 
262115924b6SShreyas Bhatewara 
263115924b6SShreyas Bhatewara /* Used to read bitfield values from double words. */
264115924b6SShreyas Bhatewara static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265115924b6SShreyas Bhatewara {
266115924b6SShreyas Bhatewara 	u32 temp = le32_to_cpu(*bitfield);
267115924b6SShreyas Bhatewara 	u32 mask = ((1 << size) - 1) << pos;
268115924b6SShreyas Bhatewara 	temp &= mask;
269115924b6SShreyas Bhatewara 	temp >>= pos;
270115924b6SShreyas Bhatewara 	return temp;
271115924b6SShreyas Bhatewara }
272115924b6SShreyas Bhatewara 
273115924b6SShreyas Bhatewara 
274115924b6SShreyas Bhatewara 
275115924b6SShreyas Bhatewara #endif  /* __BIG_ENDIAN_BITFIELD */
276115924b6SShreyas Bhatewara 
277115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
278115924b6SShreyas Bhatewara 
279115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281115924b6SShreyas Bhatewara 			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284115924b6SShreyas Bhatewara 			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_SIZE)
288115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289115924b6SShreyas Bhatewara 			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291115924b6SShreyas Bhatewara 			(dstrcd) = (tmp); \
292115924b6SShreyas Bhatewara 			vmxnet3_RxCompToCPU((rcd), (tmp)); \
293115924b6SShreyas Bhatewara 		} while (0)
294115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295115924b6SShreyas Bhatewara 			(dstrxd) = (tmp); \
296115924b6SShreyas Bhatewara 			vmxnet3_RxDescToCPU((rxd), (tmp)); \
297115924b6SShreyas Bhatewara 		} while (0)
298115924b6SShreyas Bhatewara 
299115924b6SShreyas Bhatewara #else
300115924b6SShreyas Bhatewara 
301115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307115924b6SShreyas Bhatewara 
308115924b6SShreyas Bhatewara #endif /* __BIG_ENDIAN_BITFIELD  */
309115924b6SShreyas Bhatewara 
310d1a890faSShreyas Bhatewara 
311d1a890faSShreyas Bhatewara static void
312d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313d1a890faSShreyas Bhatewara 		     struct pci_dev *pdev)
314d1a890faSShreyas Bhatewara {
315d1a890faSShreyas Bhatewara 	if (tbi->map_type == VMXNET3_MAP_SINGLE)
316d1a890faSShreyas Bhatewara 		pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
317d1a890faSShreyas Bhatewara 				 PCI_DMA_TODEVICE);
318d1a890faSShreyas Bhatewara 	else if (tbi->map_type == VMXNET3_MAP_PAGE)
319d1a890faSShreyas Bhatewara 		pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
320d1a890faSShreyas Bhatewara 			       PCI_DMA_TODEVICE);
321d1a890faSShreyas Bhatewara 	else
322d1a890faSShreyas Bhatewara 		BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323d1a890faSShreyas Bhatewara 
324d1a890faSShreyas Bhatewara 	tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325d1a890faSShreyas Bhatewara }
326d1a890faSShreyas Bhatewara 
327d1a890faSShreyas Bhatewara 
328d1a890faSShreyas Bhatewara static int
329d1a890faSShreyas Bhatewara vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330d1a890faSShreyas Bhatewara 		  struct pci_dev *pdev,	struct vmxnet3_adapter *adapter)
331d1a890faSShreyas Bhatewara {
332d1a890faSShreyas Bhatewara 	struct sk_buff *skb;
333d1a890faSShreyas Bhatewara 	int entries = 0;
334d1a890faSShreyas Bhatewara 
335d1a890faSShreyas Bhatewara 	/* no out of order completion */
336d1a890faSShreyas Bhatewara 	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337115924b6SShreyas Bhatewara 	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
338d1a890faSShreyas Bhatewara 
339d1a890faSShreyas Bhatewara 	skb = tq->buf_info[eop_idx].skb;
340d1a890faSShreyas Bhatewara 	BUG_ON(skb == NULL);
341d1a890faSShreyas Bhatewara 	tq->buf_info[eop_idx].skb = NULL;
342d1a890faSShreyas Bhatewara 
343d1a890faSShreyas Bhatewara 	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344d1a890faSShreyas Bhatewara 
345d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != eop_idx) {
346d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347d1a890faSShreyas Bhatewara 				     pdev);
348d1a890faSShreyas Bhatewara 
349d1a890faSShreyas Bhatewara 		/* update next2comp w/o tx_lock. Since we are marking more,
350d1a890faSShreyas Bhatewara 		 * instead of less, tx ring entries avail, the worst case is
351d1a890faSShreyas Bhatewara 		 * that the tx routine incorrectly re-queues a pkt due to
352d1a890faSShreyas Bhatewara 		 * insufficient tx ring entries.
353d1a890faSShreyas Bhatewara 		 */
354d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355d1a890faSShreyas Bhatewara 		entries++;
356d1a890faSShreyas Bhatewara 	}
357d1a890faSShreyas Bhatewara 
358d1a890faSShreyas Bhatewara 	dev_kfree_skb_any(skb);
359d1a890faSShreyas Bhatewara 	return entries;
360d1a890faSShreyas Bhatewara }
361d1a890faSShreyas Bhatewara 
362d1a890faSShreyas Bhatewara 
363d1a890faSShreyas Bhatewara static int
364d1a890faSShreyas Bhatewara vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365d1a890faSShreyas Bhatewara 			struct vmxnet3_adapter *adapter)
366d1a890faSShreyas Bhatewara {
367d1a890faSShreyas Bhatewara 	int completed = 0;
368d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
369d1a890faSShreyas Bhatewara 
370d1a890faSShreyas Bhatewara 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371115924b6SShreyas Bhatewara 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372115924b6SShreyas Bhatewara 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373115924b6SShreyas Bhatewara 					       &gdesc->tcd), tq, adapter->pdev,
374115924b6SShreyas Bhatewara 					       adapter);
375d1a890faSShreyas Bhatewara 
376d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
377d1a890faSShreyas Bhatewara 		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
378d1a890faSShreyas Bhatewara 	}
379d1a890faSShreyas Bhatewara 
380d1a890faSShreyas Bhatewara 	if (completed) {
381d1a890faSShreyas Bhatewara 		spin_lock(&tq->tx_lock);
382d1a890faSShreyas Bhatewara 		if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
383d1a890faSShreyas Bhatewara 			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
384d1a890faSShreyas Bhatewara 			     VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
385d1a890faSShreyas Bhatewara 			     netif_carrier_ok(adapter->netdev))) {
386d1a890faSShreyas Bhatewara 			vmxnet3_tq_wake(tq, adapter);
387d1a890faSShreyas Bhatewara 		}
388d1a890faSShreyas Bhatewara 		spin_unlock(&tq->tx_lock);
389d1a890faSShreyas Bhatewara 	}
390d1a890faSShreyas Bhatewara 	return completed;
391d1a890faSShreyas Bhatewara }
392d1a890faSShreyas Bhatewara 
393d1a890faSShreyas Bhatewara 
394d1a890faSShreyas Bhatewara static void
395d1a890faSShreyas Bhatewara vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
396d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
397d1a890faSShreyas Bhatewara {
398d1a890faSShreyas Bhatewara 	int i;
399d1a890faSShreyas Bhatewara 
400d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
401d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_buf_info *tbi;
402d1a890faSShreyas Bhatewara 
403d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2comp;
404d1a890faSShreyas Bhatewara 
405d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
406d1a890faSShreyas Bhatewara 		if (tbi->skb) {
407d1a890faSShreyas Bhatewara 			dev_kfree_skb_any(tbi->skb);
408d1a890faSShreyas Bhatewara 			tbi->skb = NULL;
409d1a890faSShreyas Bhatewara 		}
410d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
411d1a890faSShreyas Bhatewara 	}
412d1a890faSShreyas Bhatewara 
413d1a890faSShreyas Bhatewara 	/* sanity check, verify all buffers are indeed unmapped and freed */
414d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++) {
415d1a890faSShreyas Bhatewara 		BUG_ON(tq->buf_info[i].skb != NULL ||
416d1a890faSShreyas Bhatewara 		       tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
417d1a890faSShreyas Bhatewara 	}
418d1a890faSShreyas Bhatewara 
419d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
420d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
421d1a890faSShreyas Bhatewara 
422d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
423d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
424d1a890faSShreyas Bhatewara }
425d1a890faSShreyas Bhatewara 
426d1a890faSShreyas Bhatewara 
42709c5088eSShreyas Bhatewara static void
428d1a890faSShreyas Bhatewara vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
430d1a890faSShreyas Bhatewara {
431d1a890faSShreyas Bhatewara 	if (tq->tx_ring.base) {
432d1a890faSShreyas Bhatewara 		pci_free_consistent(adapter->pdev, tq->tx_ring.size *
433d1a890faSShreyas Bhatewara 				    sizeof(struct Vmxnet3_TxDesc),
434d1a890faSShreyas Bhatewara 				    tq->tx_ring.base, tq->tx_ring.basePA);
435d1a890faSShreyas Bhatewara 		tq->tx_ring.base = NULL;
436d1a890faSShreyas Bhatewara 	}
437d1a890faSShreyas Bhatewara 	if (tq->data_ring.base) {
438d1a890faSShreyas Bhatewara 		pci_free_consistent(adapter->pdev, tq->data_ring.size *
439d1a890faSShreyas Bhatewara 				    sizeof(struct Vmxnet3_TxDataDesc),
440d1a890faSShreyas Bhatewara 				    tq->data_ring.base, tq->data_ring.basePA);
441d1a890faSShreyas Bhatewara 		tq->data_ring.base = NULL;
442d1a890faSShreyas Bhatewara 	}
443d1a890faSShreyas Bhatewara 	if (tq->comp_ring.base) {
444d1a890faSShreyas Bhatewara 		pci_free_consistent(adapter->pdev, tq->comp_ring.size *
445d1a890faSShreyas Bhatewara 				    sizeof(struct Vmxnet3_TxCompDesc),
446d1a890faSShreyas Bhatewara 				    tq->comp_ring.base, tq->comp_ring.basePA);
447d1a890faSShreyas Bhatewara 		tq->comp_ring.base = NULL;
448d1a890faSShreyas Bhatewara 	}
449d1a890faSShreyas Bhatewara 	kfree(tq->buf_info);
450d1a890faSShreyas Bhatewara 	tq->buf_info = NULL;
451d1a890faSShreyas Bhatewara }
452d1a890faSShreyas Bhatewara 
453d1a890faSShreyas Bhatewara 
45409c5088eSShreyas Bhatewara /* Destroy all tx queues */
45509c5088eSShreyas Bhatewara void
45609c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
45709c5088eSShreyas Bhatewara {
45809c5088eSShreyas Bhatewara 	int i;
45909c5088eSShreyas Bhatewara 
46009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
46109c5088eSShreyas Bhatewara 		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
46209c5088eSShreyas Bhatewara }
46309c5088eSShreyas Bhatewara 
46409c5088eSShreyas Bhatewara 
465d1a890faSShreyas Bhatewara static void
466d1a890faSShreyas Bhatewara vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
467d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
468d1a890faSShreyas Bhatewara {
469d1a890faSShreyas Bhatewara 	int i;
470d1a890faSShreyas Bhatewara 
471d1a890faSShreyas Bhatewara 	/* reset the tx ring contents to 0 and reset the tx ring states */
472d1a890faSShreyas Bhatewara 	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
473d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxDesc));
474d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
475d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
476d1a890faSShreyas Bhatewara 
477d1a890faSShreyas Bhatewara 	memset(tq->data_ring.base, 0, tq->data_ring.size *
478d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxDataDesc));
479d1a890faSShreyas Bhatewara 
480d1a890faSShreyas Bhatewara 	/* reset the tx comp ring contents to 0 and reset comp ring states */
481d1a890faSShreyas Bhatewara 	memset(tq->comp_ring.base, 0, tq->comp_ring.size *
482d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxCompDesc));
483d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
484d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
485d1a890faSShreyas Bhatewara 
486d1a890faSShreyas Bhatewara 	/* reset the bookkeeping data */
487d1a890faSShreyas Bhatewara 	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
488d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++)
489d1a890faSShreyas Bhatewara 		tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
490d1a890faSShreyas Bhatewara 
491d1a890faSShreyas Bhatewara 	/* stats are not reset */
492d1a890faSShreyas Bhatewara }
493d1a890faSShreyas Bhatewara 
494d1a890faSShreyas Bhatewara 
495d1a890faSShreyas Bhatewara static int
496d1a890faSShreyas Bhatewara vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
497d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
498d1a890faSShreyas Bhatewara {
499d1a890faSShreyas Bhatewara 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
500d1a890faSShreyas Bhatewara 	       tq->comp_ring.base || tq->buf_info);
501d1a890faSShreyas Bhatewara 
502d1a890faSShreyas Bhatewara 	tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
503d1a890faSShreyas Bhatewara 			   * sizeof(struct Vmxnet3_TxDesc),
504d1a890faSShreyas Bhatewara 			   &tq->tx_ring.basePA);
505d1a890faSShreyas Bhatewara 	if (!tq->tx_ring.base) {
506204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx ring\n");
507d1a890faSShreyas Bhatewara 		goto err;
508d1a890faSShreyas Bhatewara 	}
509d1a890faSShreyas Bhatewara 
510d1a890faSShreyas Bhatewara 	tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
511d1a890faSShreyas Bhatewara 			     tq->data_ring.size *
512d1a890faSShreyas Bhatewara 			     sizeof(struct Vmxnet3_TxDataDesc),
513d1a890faSShreyas Bhatewara 			     &tq->data_ring.basePA);
514d1a890faSShreyas Bhatewara 	if (!tq->data_ring.base) {
515204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate data ring\n");
516d1a890faSShreyas Bhatewara 		goto err;
517d1a890faSShreyas Bhatewara 	}
518d1a890faSShreyas Bhatewara 
519d1a890faSShreyas Bhatewara 	tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
520d1a890faSShreyas Bhatewara 			     tq->comp_ring.size *
521d1a890faSShreyas Bhatewara 			     sizeof(struct Vmxnet3_TxCompDesc),
522d1a890faSShreyas Bhatewara 			     &tq->comp_ring.basePA);
523d1a890faSShreyas Bhatewara 	if (!tq->comp_ring.base) {
524204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
525d1a890faSShreyas Bhatewara 		goto err;
526d1a890faSShreyas Bhatewara 	}
527d1a890faSShreyas Bhatewara 
528d1a890faSShreyas Bhatewara 	tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
529d1a890faSShreyas Bhatewara 			       GFP_KERNEL);
530e404decbSJoe Perches 	if (!tq->buf_info)
531d1a890faSShreyas Bhatewara 		goto err;
532d1a890faSShreyas Bhatewara 
533d1a890faSShreyas Bhatewara 	return 0;
534d1a890faSShreyas Bhatewara 
535d1a890faSShreyas Bhatewara err:
536d1a890faSShreyas Bhatewara 	vmxnet3_tq_destroy(tq, adapter);
537d1a890faSShreyas Bhatewara 	return -ENOMEM;
538d1a890faSShreyas Bhatewara }
539d1a890faSShreyas Bhatewara 
54009c5088eSShreyas Bhatewara static void
54109c5088eSShreyas Bhatewara vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
54209c5088eSShreyas Bhatewara {
54309c5088eSShreyas Bhatewara 	int i;
54409c5088eSShreyas Bhatewara 
54509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
54609c5088eSShreyas Bhatewara 		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
54709c5088eSShreyas Bhatewara }
548d1a890faSShreyas Bhatewara 
549d1a890faSShreyas Bhatewara /*
550d1a890faSShreyas Bhatewara  *    starting from ring->next2fill, allocate rx buffers for the given ring
551d1a890faSShreyas Bhatewara  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
552d1a890faSShreyas Bhatewara  *    are allocated or allocation fails
553d1a890faSShreyas Bhatewara  */
554d1a890faSShreyas Bhatewara 
555d1a890faSShreyas Bhatewara static int
556d1a890faSShreyas Bhatewara vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
557d1a890faSShreyas Bhatewara 			int num_to_alloc, struct vmxnet3_adapter *adapter)
558d1a890faSShreyas Bhatewara {
559d1a890faSShreyas Bhatewara 	int num_allocated = 0;
560d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
561d1a890faSShreyas Bhatewara 	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
562d1a890faSShreyas Bhatewara 	u32 val;
563d1a890faSShreyas Bhatewara 
5645318d809SShreyas Bhatewara 	while (num_allocated <= num_to_alloc) {
565d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
566d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gd;
567d1a890faSShreyas Bhatewara 
568d1a890faSShreyas Bhatewara 		rbi = rbi_base + ring->next2fill;
569d1a890faSShreyas Bhatewara 		gd = ring->base + ring->next2fill;
570d1a890faSShreyas Bhatewara 
571d1a890faSShreyas Bhatewara 		if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
572d1a890faSShreyas Bhatewara 			if (rbi->skb == NULL) {
5730d735f13SStephen Hemminger 				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
5740d735f13SStephen Hemminger 								       rbi->len,
5750d735f13SStephen Hemminger 								       GFP_KERNEL);
576d1a890faSShreyas Bhatewara 				if (unlikely(rbi->skb == NULL)) {
577d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
578d1a890faSShreyas Bhatewara 					break;
579d1a890faSShreyas Bhatewara 				}
580d1a890faSShreyas Bhatewara 
581d1a890faSShreyas Bhatewara 				rbi->dma_addr = pci_map_single(adapter->pdev,
582d1a890faSShreyas Bhatewara 						rbi->skb->data, rbi->len,
583d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
584d1a890faSShreyas Bhatewara 			} else {
585d1a890faSShreyas Bhatewara 				/* rx buffer skipped by the device */
586d1a890faSShreyas Bhatewara 			}
587d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
588d1a890faSShreyas Bhatewara 		} else {
589d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
590d1a890faSShreyas Bhatewara 			       rbi->len  != PAGE_SIZE);
591d1a890faSShreyas Bhatewara 
592d1a890faSShreyas Bhatewara 			if (rbi->page == NULL) {
593d1a890faSShreyas Bhatewara 				rbi->page = alloc_page(GFP_ATOMIC);
594d1a890faSShreyas Bhatewara 				if (unlikely(rbi->page == NULL)) {
595d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
596d1a890faSShreyas Bhatewara 					break;
597d1a890faSShreyas Bhatewara 				}
598d1a890faSShreyas Bhatewara 				rbi->dma_addr = pci_map_page(adapter->pdev,
599d1a890faSShreyas Bhatewara 						rbi->page, 0, PAGE_SIZE,
600d1a890faSShreyas Bhatewara 						PCI_DMA_FROMDEVICE);
601d1a890faSShreyas Bhatewara 			} else {
602d1a890faSShreyas Bhatewara 				/* rx buffers skipped by the device */
603d1a890faSShreyas Bhatewara 			}
604d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
605d1a890faSShreyas Bhatewara 		}
606d1a890faSShreyas Bhatewara 
607d1a890faSShreyas Bhatewara 		BUG_ON(rbi->dma_addr == 0);
608115924b6SShreyas Bhatewara 		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
6095318d809SShreyas Bhatewara 		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
610115924b6SShreyas Bhatewara 					   | val | rbi->len);
611d1a890faSShreyas Bhatewara 
6125318d809SShreyas Bhatewara 		/* Fill the last buffer but dont mark it ready, or else the
6135318d809SShreyas Bhatewara 		 * device will think that the queue is full */
6145318d809SShreyas Bhatewara 		if (num_allocated == num_to_alloc)
6155318d809SShreyas Bhatewara 			break;
6165318d809SShreyas Bhatewara 
6175318d809SShreyas Bhatewara 		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
618d1a890faSShreyas Bhatewara 		num_allocated++;
619d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(ring);
620d1a890faSShreyas Bhatewara 	}
621d1a890faSShreyas Bhatewara 
622fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
62369b9a712SStephen Hemminger 		"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
62469b9a712SStephen Hemminger 		num_allocated, ring->next2fill, ring->next2comp);
625d1a890faSShreyas Bhatewara 
626d1a890faSShreyas Bhatewara 	/* so that the device can distinguish a full ring and an empty ring */
627d1a890faSShreyas Bhatewara 	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
628d1a890faSShreyas Bhatewara 
629d1a890faSShreyas Bhatewara 	return num_allocated;
630d1a890faSShreyas Bhatewara }
631d1a890faSShreyas Bhatewara 
632d1a890faSShreyas Bhatewara 
633d1a890faSShreyas Bhatewara static void
634d1a890faSShreyas Bhatewara vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
635d1a890faSShreyas Bhatewara 		    struct vmxnet3_rx_buf_info *rbi)
636d1a890faSShreyas Bhatewara {
637d1a890faSShreyas Bhatewara 	struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
638d1a890faSShreyas Bhatewara 		skb_shinfo(skb)->nr_frags;
639d1a890faSShreyas Bhatewara 
640d1a890faSShreyas Bhatewara 	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
641d1a890faSShreyas Bhatewara 
6420e0634d2SIan Campbell 	__skb_frag_set_page(frag, rbi->page);
643d1a890faSShreyas Bhatewara 	frag->page_offset = 0;
6449e903e08SEric Dumazet 	skb_frag_size_set(frag, rcd->len);
6459e903e08SEric Dumazet 	skb->data_len += rcd->len;
6465e6c355cSEric Dumazet 	skb->truesize += PAGE_SIZE;
647d1a890faSShreyas Bhatewara 	skb_shinfo(skb)->nr_frags++;
648d1a890faSShreyas Bhatewara }
649d1a890faSShreyas Bhatewara 
650d1a890faSShreyas Bhatewara 
651d1a890faSShreyas Bhatewara static void
652d1a890faSShreyas Bhatewara vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
653d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
654d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
655d1a890faSShreyas Bhatewara {
656d1a890faSShreyas Bhatewara 	u32 dw2, len;
657d1a890faSShreyas Bhatewara 	unsigned long buf_offset;
658d1a890faSShreyas Bhatewara 	int i;
659d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
660d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_buf_info *tbi = NULL;
661d1a890faSShreyas Bhatewara 
662d1a890faSShreyas Bhatewara 	BUG_ON(ctx->copy_size > skb_headlen(skb));
663d1a890faSShreyas Bhatewara 
664d1a890faSShreyas Bhatewara 	/* use the previous gen bit for the SOP desc */
665d1a890faSShreyas Bhatewara 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
666d1a890faSShreyas Bhatewara 
667d1a890faSShreyas Bhatewara 	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
668d1a890faSShreyas Bhatewara 	gdesc = ctx->sop_txd; /* both loops below can be skipped */
669d1a890faSShreyas Bhatewara 
670d1a890faSShreyas Bhatewara 	/* no need to map the buffer if headers are copied */
671d1a890faSShreyas Bhatewara 	if (ctx->copy_size) {
672115924b6SShreyas Bhatewara 		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
673d1a890faSShreyas Bhatewara 					tq->tx_ring.next2fill *
674115924b6SShreyas Bhatewara 					sizeof(struct Vmxnet3_TxDataDesc));
675115924b6SShreyas Bhatewara 		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
676d1a890faSShreyas Bhatewara 		ctx->sop_txd->dword[3] = 0;
677d1a890faSShreyas Bhatewara 
678d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
679d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_NONE;
680d1a890faSShreyas Bhatewara 
681fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
682f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
683115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill,
684115924b6SShreyas Bhatewara 			le64_to_cpu(ctx->sop_txd->txd.addr),
685d1a890faSShreyas Bhatewara 			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
686d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
687d1a890faSShreyas Bhatewara 
688d1a890faSShreyas Bhatewara 		/* use the right gen for non-SOP desc */
689d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
690d1a890faSShreyas Bhatewara 	}
691d1a890faSShreyas Bhatewara 
692d1a890faSShreyas Bhatewara 	/* linear part can use multiple tx desc if it's big */
693d1a890faSShreyas Bhatewara 	len = skb_headlen(skb) - ctx->copy_size;
694d1a890faSShreyas Bhatewara 	buf_offset = ctx->copy_size;
695d1a890faSShreyas Bhatewara 	while (len) {
696d1a890faSShreyas Bhatewara 		u32 buf_size;
697d1a890faSShreyas Bhatewara 
6981f4b1612SBhavesh Davda 		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
6991f4b1612SBhavesh Davda 			buf_size = len;
7001f4b1612SBhavesh Davda 			dw2 |= len;
7011f4b1612SBhavesh Davda 		} else {
7021f4b1612SBhavesh Davda 			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
7031f4b1612SBhavesh Davda 			/* spec says that for TxDesc.len, 0 == 2^14 */
7041f4b1612SBhavesh Davda 		}
705d1a890faSShreyas Bhatewara 
706d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
707d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_SINGLE;
708d1a890faSShreyas Bhatewara 		tbi->dma_addr = pci_map_single(adapter->pdev,
709d1a890faSShreyas Bhatewara 				skb->data + buf_offset, buf_size,
710d1a890faSShreyas Bhatewara 				PCI_DMA_TODEVICE);
711d1a890faSShreyas Bhatewara 
7121f4b1612SBhavesh Davda 		tbi->len = buf_size;
713d1a890faSShreyas Bhatewara 
714d1a890faSShreyas Bhatewara 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
715d1a890faSShreyas Bhatewara 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
716d1a890faSShreyas Bhatewara 
717115924b6SShreyas Bhatewara 		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
7181f4b1612SBhavesh Davda 		gdesc->dword[2] = cpu_to_le32(dw2);
719d1a890faSShreyas Bhatewara 		gdesc->dword[3] = 0;
720d1a890faSShreyas Bhatewara 
721fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
722f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
723115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
724115924b6SShreyas Bhatewara 			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
725d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
726d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
727d1a890faSShreyas Bhatewara 
728d1a890faSShreyas Bhatewara 		len -= buf_size;
729d1a890faSShreyas Bhatewara 		buf_offset += buf_size;
730d1a890faSShreyas Bhatewara 	}
731d1a890faSShreyas Bhatewara 
732d1a890faSShreyas Bhatewara 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
7339e903e08SEric Dumazet 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
734a4d7e485SEric Dumazet 		u32 buf_size;
735d1a890faSShreyas Bhatewara 
736a4d7e485SEric Dumazet 		buf_offset = 0;
737a4d7e485SEric Dumazet 		len = skb_frag_size(frag);
738a4d7e485SEric Dumazet 		while (len) {
739d1a890faSShreyas Bhatewara 			tbi = tq->buf_info + tq->tx_ring.next2fill;
740a4d7e485SEric Dumazet 			if (len < VMXNET3_MAX_TX_BUF_SIZE) {
741a4d7e485SEric Dumazet 				buf_size = len;
742a4d7e485SEric Dumazet 				dw2 |= len;
743a4d7e485SEric Dumazet 			} else {
744a4d7e485SEric Dumazet 				buf_size = VMXNET3_MAX_TX_BUF_SIZE;
745a4d7e485SEric Dumazet 				/* spec says that for TxDesc.len, 0 == 2^14 */
746a4d7e485SEric Dumazet 			}
747d1a890faSShreyas Bhatewara 			tbi->map_type = VMXNET3_MAP_PAGE;
7480e0634d2SIan Campbell 			tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
749a4d7e485SEric Dumazet 							 buf_offset, buf_size,
7505d6bcdfeSIan Campbell 							 DMA_TO_DEVICE);
751d1a890faSShreyas Bhatewara 
752a4d7e485SEric Dumazet 			tbi->len = buf_size;
753d1a890faSShreyas Bhatewara 
754d1a890faSShreyas Bhatewara 			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
755d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
756d1a890faSShreyas Bhatewara 
757115924b6SShreyas Bhatewara 			gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
758a4d7e485SEric Dumazet 			gdesc->dword[2] = cpu_to_le32(dw2);
759d1a890faSShreyas Bhatewara 			gdesc->dword[3] = 0;
760d1a890faSShreyas Bhatewara 
761fdcd79b9SStephen Hemminger 			netdev_dbg(adapter->netdev,
762f6965582SRandy Dunlap 				"txd[%u]: 0x%llu %u %u\n",
763115924b6SShreyas Bhatewara 				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
764115924b6SShreyas Bhatewara 				le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
765d1a890faSShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
766d1a890faSShreyas Bhatewara 			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
767a4d7e485SEric Dumazet 
768a4d7e485SEric Dumazet 			len -= buf_size;
769a4d7e485SEric Dumazet 			buf_offset += buf_size;
770a4d7e485SEric Dumazet 		}
771d1a890faSShreyas Bhatewara 	}
772d1a890faSShreyas Bhatewara 
773d1a890faSShreyas Bhatewara 	ctx->eop_txd = gdesc;
774d1a890faSShreyas Bhatewara 
775d1a890faSShreyas Bhatewara 	/* set the last buf_info for the pkt */
776d1a890faSShreyas Bhatewara 	tbi->skb = skb;
777d1a890faSShreyas Bhatewara 	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
778d1a890faSShreyas Bhatewara }
779d1a890faSShreyas Bhatewara 
780d1a890faSShreyas Bhatewara 
78109c5088eSShreyas Bhatewara /* Init all tx queues */
78209c5088eSShreyas Bhatewara static void
78309c5088eSShreyas Bhatewara vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
78409c5088eSShreyas Bhatewara {
78509c5088eSShreyas Bhatewara 	int i;
78609c5088eSShreyas Bhatewara 
78709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
78809c5088eSShreyas Bhatewara 		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
78909c5088eSShreyas Bhatewara }
79009c5088eSShreyas Bhatewara 
79109c5088eSShreyas Bhatewara 
792d1a890faSShreyas Bhatewara /*
793d1a890faSShreyas Bhatewara  *    parse and copy relevant protocol headers:
794d1a890faSShreyas Bhatewara  *      For a tso pkt, relevant headers are L2/3/4 including options
795d1a890faSShreyas Bhatewara  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
796d1a890faSShreyas Bhatewara  *      if it's a TCP/UDP pkt
797d1a890faSShreyas Bhatewara  *
798d1a890faSShreyas Bhatewara  * Returns:
799d1a890faSShreyas Bhatewara  *    -1:  error happens during parsing
800d1a890faSShreyas Bhatewara  *     0:  protocol headers parsed, but too big to be copied
801d1a890faSShreyas Bhatewara  *     1:  protocol headers parsed and copied
802d1a890faSShreyas Bhatewara  *
803d1a890faSShreyas Bhatewara  * Other effects:
804d1a890faSShreyas Bhatewara  *    1. related *ctx fields are updated.
805d1a890faSShreyas Bhatewara  *    2. ctx->copy_size is # of bytes copied
806d1a890faSShreyas Bhatewara  *    3. the portion copied is guaranteed to be in the linear part
807d1a890faSShreyas Bhatewara  *
808d1a890faSShreyas Bhatewara  */
809d1a890faSShreyas Bhatewara static int
810d1a890faSShreyas Bhatewara vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
811d1a890faSShreyas Bhatewara 			   struct vmxnet3_tx_ctx *ctx,
812d1a890faSShreyas Bhatewara 			   struct vmxnet3_adapter *adapter)
813d1a890faSShreyas Bhatewara {
814d1a890faSShreyas Bhatewara 	struct Vmxnet3_TxDataDesc *tdd;
815d1a890faSShreyas Bhatewara 
8160d0b1672SMichał Mirosław 	if (ctx->mss) {	/* TSO */
817d1a890faSShreyas Bhatewara 		ctx->eth_ip_hdr_size = skb_transport_offset(skb);
8188bca5d1eSEric Dumazet 		ctx->l4_hdr_size = tcp_hdrlen(skb);
819d1a890faSShreyas Bhatewara 		ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
820d1a890faSShreyas Bhatewara 	} else {
821d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
8220d0b1672SMichał Mirosław 			ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
823d1a890faSShreyas Bhatewara 
824d1a890faSShreyas Bhatewara 			if (ctx->ipv4) {
8258bca5d1eSEric Dumazet 				const struct iphdr *iph = ip_hdr(skb);
8268bca5d1eSEric Dumazet 
82739d4a96fSShreyas Bhatewara 				if (iph->protocol == IPPROTO_TCP)
8288bca5d1eSEric Dumazet 					ctx->l4_hdr_size = tcp_hdrlen(skb);
82939d4a96fSShreyas Bhatewara 				else if (iph->protocol == IPPROTO_UDP)
830f6a1ad42SDavid S. Miller 					ctx->l4_hdr_size = sizeof(struct udphdr);
83139d4a96fSShreyas Bhatewara 				else
832d1a890faSShreyas Bhatewara 					ctx->l4_hdr_size = 0;
833d1a890faSShreyas Bhatewara 			} else {
834d1a890faSShreyas Bhatewara 				/* for simplicity, don't copy L4 headers */
835d1a890faSShreyas Bhatewara 				ctx->l4_hdr_size = 0;
836d1a890faSShreyas Bhatewara 			}
837b203262dSNeil Horman 			ctx->copy_size = min(ctx->eth_ip_hdr_size +
838b203262dSNeil Horman 					 ctx->l4_hdr_size, skb->len);
839d1a890faSShreyas Bhatewara 		} else {
840d1a890faSShreyas Bhatewara 			ctx->eth_ip_hdr_size = 0;
841d1a890faSShreyas Bhatewara 			ctx->l4_hdr_size = 0;
842d1a890faSShreyas Bhatewara 			/* copy as much as allowed */
843d1a890faSShreyas Bhatewara 			ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
844d1a890faSShreyas Bhatewara 					     , skb_headlen(skb));
845d1a890faSShreyas Bhatewara 		}
846d1a890faSShreyas Bhatewara 
847d1a890faSShreyas Bhatewara 		/* make sure headers are accessible directly */
848d1a890faSShreyas Bhatewara 		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
849d1a890faSShreyas Bhatewara 			goto err;
850d1a890faSShreyas Bhatewara 	}
851d1a890faSShreyas Bhatewara 
852d1a890faSShreyas Bhatewara 	if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
853d1a890faSShreyas Bhatewara 		tq->stats.oversized_hdr++;
854d1a890faSShreyas Bhatewara 		ctx->copy_size = 0;
855d1a890faSShreyas Bhatewara 		return 0;
856d1a890faSShreyas Bhatewara 	}
857d1a890faSShreyas Bhatewara 
858d1a890faSShreyas Bhatewara 	tdd = tq->data_ring.base + tq->tx_ring.next2fill;
859d1a890faSShreyas Bhatewara 
860d1a890faSShreyas Bhatewara 	memcpy(tdd->data, skb->data, ctx->copy_size);
861fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
862f6965582SRandy Dunlap 		"copy %u bytes to dataRing[%u]\n",
863d1a890faSShreyas Bhatewara 		ctx->copy_size, tq->tx_ring.next2fill);
864d1a890faSShreyas Bhatewara 	return 1;
865d1a890faSShreyas Bhatewara 
866d1a890faSShreyas Bhatewara err:
867d1a890faSShreyas Bhatewara 	return -1;
868d1a890faSShreyas Bhatewara }
869d1a890faSShreyas Bhatewara 
870d1a890faSShreyas Bhatewara 
871d1a890faSShreyas Bhatewara static void
872d1a890faSShreyas Bhatewara vmxnet3_prepare_tso(struct sk_buff *skb,
873d1a890faSShreyas Bhatewara 		    struct vmxnet3_tx_ctx *ctx)
874d1a890faSShreyas Bhatewara {
8758bca5d1eSEric Dumazet 	struct tcphdr *tcph = tcp_hdr(skb);
8768bca5d1eSEric Dumazet 
877d1a890faSShreyas Bhatewara 	if (ctx->ipv4) {
8788bca5d1eSEric Dumazet 		struct iphdr *iph = ip_hdr(skb);
8798bca5d1eSEric Dumazet 
880d1a890faSShreyas Bhatewara 		iph->check = 0;
881d1a890faSShreyas Bhatewara 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
882d1a890faSShreyas Bhatewara 						 IPPROTO_TCP, 0);
883d1a890faSShreyas Bhatewara 	} else {
8848bca5d1eSEric Dumazet 		struct ipv6hdr *iph = ipv6_hdr(skb);
8858bca5d1eSEric Dumazet 
886d1a890faSShreyas Bhatewara 		tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
887d1a890faSShreyas Bhatewara 					       IPPROTO_TCP, 0);
888d1a890faSShreyas Bhatewara 	}
889d1a890faSShreyas Bhatewara }
890d1a890faSShreyas Bhatewara 
891a4d7e485SEric Dumazet static int txd_estimate(const struct sk_buff *skb)
892a4d7e485SEric Dumazet {
893a4d7e485SEric Dumazet 	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
894a4d7e485SEric Dumazet 	int i;
895a4d7e485SEric Dumazet 
896a4d7e485SEric Dumazet 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
897a4d7e485SEric Dumazet 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
898a4d7e485SEric Dumazet 
899a4d7e485SEric Dumazet 		count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
900a4d7e485SEric Dumazet 	}
901a4d7e485SEric Dumazet 	return count;
902a4d7e485SEric Dumazet }
903d1a890faSShreyas Bhatewara 
904d1a890faSShreyas Bhatewara /*
905d1a890faSShreyas Bhatewara  * Transmits a pkt thru a given tq
906d1a890faSShreyas Bhatewara  * Returns:
907d1a890faSShreyas Bhatewara  *    NETDEV_TX_OK:      descriptors are setup successfully
90825985edcSLucas De Marchi  *    NETDEV_TX_OK:      error occurred, the pkt is dropped
909d1a890faSShreyas Bhatewara  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
910d1a890faSShreyas Bhatewara  *
911d1a890faSShreyas Bhatewara  * Side-effects:
912d1a890faSShreyas Bhatewara  *    1. tx ring may be changed
913d1a890faSShreyas Bhatewara  *    2. tq stats may be updated accordingly
914d1a890faSShreyas Bhatewara  *    3. shared->txNumDeferred may be updated
915d1a890faSShreyas Bhatewara  */
916d1a890faSShreyas Bhatewara 
917d1a890faSShreyas Bhatewara static int
918d1a890faSShreyas Bhatewara vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
919d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter, struct net_device *netdev)
920d1a890faSShreyas Bhatewara {
921d1a890faSShreyas Bhatewara 	int ret;
922d1a890faSShreyas Bhatewara 	u32 count;
923d1a890faSShreyas Bhatewara 	unsigned long flags;
924d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_ctx ctx;
925d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
926115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
927115924b6SShreyas Bhatewara 	/* Use temporary descriptor to avoid touching bits multiple times */
928115924b6SShreyas Bhatewara 	union Vmxnet3_GenericDesc tempTxDesc;
929115924b6SShreyas Bhatewara #endif
930d1a890faSShreyas Bhatewara 
931a4d7e485SEric Dumazet 	count = txd_estimate(skb);
932d1a890faSShreyas Bhatewara 
93372e85c45SJesse Gross 	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
934d1a890faSShreyas Bhatewara 
935d1a890faSShreyas Bhatewara 	ctx.mss = skb_shinfo(skb)->gso_size;
936d1a890faSShreyas Bhatewara 	if (ctx.mss) {
937d1a890faSShreyas Bhatewara 		if (skb_header_cloned(skb)) {
938d1a890faSShreyas Bhatewara 			if (unlikely(pskb_expand_head(skb, 0, 0,
939d1a890faSShreyas Bhatewara 						      GFP_ATOMIC) != 0)) {
940d1a890faSShreyas Bhatewara 				tq->stats.drop_tso++;
941d1a890faSShreyas Bhatewara 				goto drop_pkt;
942d1a890faSShreyas Bhatewara 			}
943d1a890faSShreyas Bhatewara 			tq->stats.copy_skb_header++;
944d1a890faSShreyas Bhatewara 		}
945d1a890faSShreyas Bhatewara 		vmxnet3_prepare_tso(skb, &ctx);
946d1a890faSShreyas Bhatewara 	} else {
947d1a890faSShreyas Bhatewara 		if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
948d1a890faSShreyas Bhatewara 
949d1a890faSShreyas Bhatewara 			/* non-tso pkts must not use more than
950d1a890faSShreyas Bhatewara 			 * VMXNET3_MAX_TXD_PER_PKT entries
951d1a890faSShreyas Bhatewara 			 */
952d1a890faSShreyas Bhatewara 			if (skb_linearize(skb) != 0) {
953d1a890faSShreyas Bhatewara 				tq->stats.drop_too_many_frags++;
954d1a890faSShreyas Bhatewara 				goto drop_pkt;
955d1a890faSShreyas Bhatewara 			}
956d1a890faSShreyas Bhatewara 			tq->stats.linearized++;
957d1a890faSShreyas Bhatewara 
958d1a890faSShreyas Bhatewara 			/* recalculate the # of descriptors to use */
959d1a890faSShreyas Bhatewara 			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
960d1a890faSShreyas Bhatewara 		}
961d1a890faSShreyas Bhatewara 	}
962d1a890faSShreyas Bhatewara 
96309c5088eSShreyas Bhatewara 	spin_lock_irqsave(&tq->tx_lock, flags);
96409c5088eSShreyas Bhatewara 
96509c5088eSShreyas Bhatewara 	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
96609c5088eSShreyas Bhatewara 		tq->stats.tx_ring_full++;
967fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
96809c5088eSShreyas Bhatewara 			"tx queue stopped on %s, next2comp %u"
96909c5088eSShreyas Bhatewara 			" next2fill %u\n", adapter->netdev->name,
97009c5088eSShreyas Bhatewara 			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
97109c5088eSShreyas Bhatewara 
97209c5088eSShreyas Bhatewara 		vmxnet3_tq_stop(tq, adapter);
97309c5088eSShreyas Bhatewara 		spin_unlock_irqrestore(&tq->tx_lock, flags);
97409c5088eSShreyas Bhatewara 		return NETDEV_TX_BUSY;
97509c5088eSShreyas Bhatewara 	}
97609c5088eSShreyas Bhatewara 
97709c5088eSShreyas Bhatewara 
978d1a890faSShreyas Bhatewara 	ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
979d1a890faSShreyas Bhatewara 	if (ret >= 0) {
980d1a890faSShreyas Bhatewara 		BUG_ON(ret <= 0 && ctx.copy_size != 0);
981d1a890faSShreyas Bhatewara 		/* hdrs parsed, check against other limits */
982d1a890faSShreyas Bhatewara 		if (ctx.mss) {
983d1a890faSShreyas Bhatewara 			if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
984d1a890faSShreyas Bhatewara 				     VMXNET3_MAX_TX_BUF_SIZE)) {
985d1a890faSShreyas Bhatewara 				goto hdr_too_big;
986d1a890faSShreyas Bhatewara 			}
987d1a890faSShreyas Bhatewara 		} else {
988d1a890faSShreyas Bhatewara 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
989d1a890faSShreyas Bhatewara 				if (unlikely(ctx.eth_ip_hdr_size +
990d1a890faSShreyas Bhatewara 					     skb->csum_offset >
991d1a890faSShreyas Bhatewara 					     VMXNET3_MAX_CSUM_OFFSET)) {
992d1a890faSShreyas Bhatewara 					goto hdr_too_big;
993d1a890faSShreyas Bhatewara 				}
994d1a890faSShreyas Bhatewara 			}
995d1a890faSShreyas Bhatewara 		}
996d1a890faSShreyas Bhatewara 	} else {
997d1a890faSShreyas Bhatewara 		tq->stats.drop_hdr_inspect_err++;
998f955e141SDan Carpenter 		goto unlock_drop_pkt;
999d1a890faSShreyas Bhatewara 	}
1000d1a890faSShreyas Bhatewara 
1001d1a890faSShreyas Bhatewara 	/* fill tx descs related to addr & len */
1002d1a890faSShreyas Bhatewara 	vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1003d1a890faSShreyas Bhatewara 
1004d1a890faSShreyas Bhatewara 	/* setup the EOP desc */
1005115924b6SShreyas Bhatewara 	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1006d1a890faSShreyas Bhatewara 
1007d1a890faSShreyas Bhatewara 	/* setup the SOP desc */
1008115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1009115924b6SShreyas Bhatewara 	gdesc = &tempTxDesc;
1010115924b6SShreyas Bhatewara 	gdesc->dword[2] = ctx.sop_txd->dword[2];
1011115924b6SShreyas Bhatewara 	gdesc->dword[3] = ctx.sop_txd->dword[3];
1012115924b6SShreyas Bhatewara #else
1013d1a890faSShreyas Bhatewara 	gdesc = ctx.sop_txd;
1014115924b6SShreyas Bhatewara #endif
1015d1a890faSShreyas Bhatewara 	if (ctx.mss) {
1016d1a890faSShreyas Bhatewara 		gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1017d1a890faSShreyas Bhatewara 		gdesc->txd.om = VMXNET3_OM_TSO;
1018d1a890faSShreyas Bhatewara 		gdesc->txd.msscof = ctx.mss;
1019115924b6SShreyas Bhatewara 		le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1020115924b6SShreyas Bhatewara 			     gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1021d1a890faSShreyas Bhatewara 	} else {
1022d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1023d1a890faSShreyas Bhatewara 			gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1024d1a890faSShreyas Bhatewara 			gdesc->txd.om = VMXNET3_OM_CSUM;
1025d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1026d1a890faSShreyas Bhatewara 					    skb->csum_offset;
1027d1a890faSShreyas Bhatewara 		} else {
1028d1a890faSShreyas Bhatewara 			gdesc->txd.om = 0;
1029d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = 0;
1030d1a890faSShreyas Bhatewara 		}
1031115924b6SShreyas Bhatewara 		le32_add_cpu(&tq->shared->txNumDeferred, 1);
1032d1a890faSShreyas Bhatewara 	}
1033d1a890faSShreyas Bhatewara 
1034d1a890faSShreyas Bhatewara 	if (vlan_tx_tag_present(skb)) {
1035d1a890faSShreyas Bhatewara 		gdesc->txd.ti = 1;
1036d1a890faSShreyas Bhatewara 		gdesc->txd.tci = vlan_tx_tag_get(skb);
1037d1a890faSShreyas Bhatewara 	}
1038d1a890faSShreyas Bhatewara 
1039115924b6SShreyas Bhatewara 	/* finally flips the GEN bit of the SOP desc. */
1040115924b6SShreyas Bhatewara 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1041115924b6SShreyas Bhatewara 						  VMXNET3_TXD_GEN);
1042115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1043115924b6SShreyas Bhatewara 	/* Finished updating in bitfields of Tx Desc, so write them in original
1044115924b6SShreyas Bhatewara 	 * place.
1045115924b6SShreyas Bhatewara 	 */
1046115924b6SShreyas Bhatewara 	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1047115924b6SShreyas Bhatewara 			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1048115924b6SShreyas Bhatewara 	gdesc = ctx.sop_txd;
1049115924b6SShreyas Bhatewara #endif
1050fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
1051f6965582SRandy Dunlap 		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1052c2fd03a0SJoe Perches 		(u32)(ctx.sop_txd -
1053115924b6SShreyas Bhatewara 		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1054115924b6SShreyas Bhatewara 		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1055d1a890faSShreyas Bhatewara 
1056d1a890faSShreyas Bhatewara 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1057d1a890faSShreyas Bhatewara 
1058115924b6SShreyas Bhatewara 	if (le32_to_cpu(tq->shared->txNumDeferred) >=
1059115924b6SShreyas Bhatewara 					le32_to_cpu(tq->shared->txThreshold)) {
1060d1a890faSShreyas Bhatewara 		tq->shared->txNumDeferred = 0;
106109c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
106209c5088eSShreyas Bhatewara 				       VMXNET3_REG_TXPROD + tq->qid * 8,
1063d1a890faSShreyas Bhatewara 				       tq->tx_ring.next2fill);
1064d1a890faSShreyas Bhatewara 	}
1065d1a890faSShreyas Bhatewara 
1066d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1067d1a890faSShreyas Bhatewara 
1068d1a890faSShreyas Bhatewara hdr_too_big:
1069d1a890faSShreyas Bhatewara 	tq->stats.drop_oversized_hdr++;
1070f955e141SDan Carpenter unlock_drop_pkt:
1071f955e141SDan Carpenter 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1072d1a890faSShreyas Bhatewara drop_pkt:
1073d1a890faSShreyas Bhatewara 	tq->stats.drop_total++;
1074d1a890faSShreyas Bhatewara 	dev_kfree_skb(skb);
1075d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1076d1a890faSShreyas Bhatewara }
1077d1a890faSShreyas Bhatewara 
1078d1a890faSShreyas Bhatewara 
1079d1a890faSShreyas Bhatewara static netdev_tx_t
1080d1a890faSShreyas Bhatewara vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1081d1a890faSShreyas Bhatewara {
1082d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1083d1a890faSShreyas Bhatewara 
108409c5088eSShreyas Bhatewara 	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
108509c5088eSShreyas Bhatewara 	return vmxnet3_tq_xmit(skb,
108609c5088eSShreyas Bhatewara 			       &adapter->tx_queue[skb->queue_mapping],
108709c5088eSShreyas Bhatewara 			       adapter, netdev);
1088d1a890faSShreyas Bhatewara }
1089d1a890faSShreyas Bhatewara 
1090d1a890faSShreyas Bhatewara 
1091d1a890faSShreyas Bhatewara static void
1092d1a890faSShreyas Bhatewara vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1093d1a890faSShreyas Bhatewara 		struct sk_buff *skb,
1094d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gdesc)
1095d1a890faSShreyas Bhatewara {
1096a0d2730cSMichał Mirosław 	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1097d1a890faSShreyas Bhatewara 		/* typical case: TCP/UDP over IP and both csums are correct */
1098115924b6SShreyas Bhatewara 		if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1099d1a890faSShreyas Bhatewara 							VMXNET3_RCD_CSUM_OK) {
1100d1a890faSShreyas Bhatewara 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1101d1a890faSShreyas Bhatewara 			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1102d1a890faSShreyas Bhatewara 			BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
1103d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->rcd.frg);
1104d1a890faSShreyas Bhatewara 		} else {
1105d1a890faSShreyas Bhatewara 			if (gdesc->rcd.csum) {
1106d1a890faSShreyas Bhatewara 				skb->csum = htons(gdesc->rcd.csum);
1107d1a890faSShreyas Bhatewara 				skb->ip_summed = CHECKSUM_PARTIAL;
1108d1a890faSShreyas Bhatewara 			} else {
1109bc8acf2cSEric Dumazet 				skb_checksum_none_assert(skb);
1110d1a890faSShreyas Bhatewara 			}
1111d1a890faSShreyas Bhatewara 		}
1112d1a890faSShreyas Bhatewara 	} else {
1113bc8acf2cSEric Dumazet 		skb_checksum_none_assert(skb);
1114d1a890faSShreyas Bhatewara 	}
1115d1a890faSShreyas Bhatewara }
1116d1a890faSShreyas Bhatewara 
1117d1a890faSShreyas Bhatewara 
1118d1a890faSShreyas Bhatewara static void
1119d1a890faSShreyas Bhatewara vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1120d1a890faSShreyas Bhatewara 		 struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1121d1a890faSShreyas Bhatewara {
1122d1a890faSShreyas Bhatewara 	rq->stats.drop_err++;
1123d1a890faSShreyas Bhatewara 	if (!rcd->fcs)
1124d1a890faSShreyas Bhatewara 		rq->stats.drop_fcs++;
1125d1a890faSShreyas Bhatewara 
1126d1a890faSShreyas Bhatewara 	rq->stats.drop_total++;
1127d1a890faSShreyas Bhatewara 
1128d1a890faSShreyas Bhatewara 	/*
1129d1a890faSShreyas Bhatewara 	 * We do not unmap and chain the rx buffer to the skb.
1130d1a890faSShreyas Bhatewara 	 * We basically pretend this buffer is not used and will be recycled
1131d1a890faSShreyas Bhatewara 	 * by vmxnet3_rq_alloc_rx_buf()
1132d1a890faSShreyas Bhatewara 	 */
1133d1a890faSShreyas Bhatewara 
1134d1a890faSShreyas Bhatewara 	/*
1135d1a890faSShreyas Bhatewara 	 * ctx->skb may be NULL if this is the first and the only one
1136d1a890faSShreyas Bhatewara 	 * desc for the pkt
1137d1a890faSShreyas Bhatewara 	 */
1138d1a890faSShreyas Bhatewara 	if (ctx->skb)
1139d1a890faSShreyas Bhatewara 		dev_kfree_skb_irq(ctx->skb);
1140d1a890faSShreyas Bhatewara 
1141d1a890faSShreyas Bhatewara 	ctx->skb = NULL;
1142d1a890faSShreyas Bhatewara }
1143d1a890faSShreyas Bhatewara 
1144d1a890faSShreyas Bhatewara 
1145d1a890faSShreyas Bhatewara static int
1146d1a890faSShreyas Bhatewara vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1147d1a890faSShreyas Bhatewara 		       struct vmxnet3_adapter *adapter, int quota)
1148d1a890faSShreyas Bhatewara {
1149215faf9cSJoe Perches 	static const u32 rxprod_reg[2] = {
1150215faf9cSJoe Perches 		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1151215faf9cSJoe Perches 	};
1152d1a890faSShreyas Bhatewara 	u32 num_rxd = 0;
11535318d809SShreyas Bhatewara 	bool skip_page_frags = false;
1154d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxCompDesc *rcd;
1155d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1156115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1157115924b6SShreyas Bhatewara 	struct Vmxnet3_RxDesc rxCmdDesc;
1158115924b6SShreyas Bhatewara 	struct Vmxnet3_RxCompDesc rxComp;
1159115924b6SShreyas Bhatewara #endif
1160115924b6SShreyas Bhatewara 	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1161115924b6SShreyas Bhatewara 			  &rxComp);
1162d1a890faSShreyas Bhatewara 	while (rcd->gen == rq->comp_ring.gen) {
1163d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
11645318d809SShreyas Bhatewara 		struct sk_buff *skb, *new_skb = NULL;
11655318d809SShreyas Bhatewara 		struct page *new_page = NULL;
1166d1a890faSShreyas Bhatewara 		int num_to_alloc;
1167d1a890faSShreyas Bhatewara 		struct Vmxnet3_RxDesc *rxd;
1168d1a890faSShreyas Bhatewara 		u32 idx, ring_idx;
11695318d809SShreyas Bhatewara 		struct vmxnet3_cmd_ring	*ring = NULL;
1170d1a890faSShreyas Bhatewara 		if (num_rxd >= quota) {
1171d1a890faSShreyas Bhatewara 			/* we may stop even before we see the EOP desc of
1172d1a890faSShreyas Bhatewara 			 * the current pkt
1173d1a890faSShreyas Bhatewara 			 */
1174d1a890faSShreyas Bhatewara 			break;
1175d1a890faSShreyas Bhatewara 		}
1176d1a890faSShreyas Bhatewara 		num_rxd++;
117709c5088eSShreyas Bhatewara 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1178d1a890faSShreyas Bhatewara 		idx = rcd->rxdIdx;
117909c5088eSShreyas Bhatewara 		ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
11805318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
1181115924b6SShreyas Bhatewara 		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1182115924b6SShreyas Bhatewara 				  &rxCmdDesc);
1183d1a890faSShreyas Bhatewara 		rbi = rq->buf_info[ring_idx] + idx;
1184d1a890faSShreyas Bhatewara 
1185115924b6SShreyas Bhatewara 		BUG_ON(rxd->addr != rbi->dma_addr ||
1186115924b6SShreyas Bhatewara 		       rxd->len != rbi->len);
1187d1a890faSShreyas Bhatewara 
1188d1a890faSShreyas Bhatewara 		if (unlikely(rcd->eop && rcd->err)) {
1189d1a890faSShreyas Bhatewara 			vmxnet3_rx_error(rq, rcd, ctx, adapter);
1190d1a890faSShreyas Bhatewara 			goto rcd_done;
1191d1a890faSShreyas Bhatewara 		}
1192d1a890faSShreyas Bhatewara 
1193d1a890faSShreyas Bhatewara 		if (rcd->sop) { /* first buf of the pkt */
1194d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1195d1a890faSShreyas Bhatewara 			       rcd->rqID != rq->qid);
1196d1a890faSShreyas Bhatewara 
1197d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1198d1a890faSShreyas Bhatewara 			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1199d1a890faSShreyas Bhatewara 
1200d1a890faSShreyas Bhatewara 			if (unlikely(rcd->len == 0)) {
1201d1a890faSShreyas Bhatewara 				/* Pretend the rx buffer is skipped. */
1202d1a890faSShreyas Bhatewara 				BUG_ON(!(rcd->sop && rcd->eop));
1203fdcd79b9SStephen Hemminger 				netdev_dbg(adapter->netdev,
1204f6965582SRandy Dunlap 					"rxRing[%u][%u] 0 length\n",
1205d1a890faSShreyas Bhatewara 					ring_idx, idx);
1206d1a890faSShreyas Bhatewara 				goto rcd_done;
1207d1a890faSShreyas Bhatewara 			}
1208d1a890faSShreyas Bhatewara 
12095318d809SShreyas Bhatewara 			skip_page_frags = false;
1210d1a890faSShreyas Bhatewara 			ctx->skb = rbi->skb;
12110d735f13SStephen Hemminger 			new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
12120d735f13SStephen Hemminger 							    rbi->len);
12135318d809SShreyas Bhatewara 			if (new_skb == NULL) {
12145318d809SShreyas Bhatewara 				/* Skb allocation failed, do not handover this
12155318d809SShreyas Bhatewara 				 * skb to stack. Reuse it. Drop the existing pkt
12165318d809SShreyas Bhatewara 				 */
12175318d809SShreyas Bhatewara 				rq->stats.rx_buf_alloc_failure++;
12185318d809SShreyas Bhatewara 				ctx->skb = NULL;
12195318d809SShreyas Bhatewara 				rq->stats.drop_total++;
12205318d809SShreyas Bhatewara 				skip_page_frags = true;
12215318d809SShreyas Bhatewara 				goto rcd_done;
12225318d809SShreyas Bhatewara 			}
1223d1a890faSShreyas Bhatewara 
1224d1a890faSShreyas Bhatewara 			pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1225d1a890faSShreyas Bhatewara 					 PCI_DMA_FROMDEVICE);
1226d1a890faSShreyas Bhatewara 
12277db11f75SStephen Hemminger #ifdef VMXNET3_RSS
12287db11f75SStephen Hemminger 			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
12297db11f75SStephen Hemminger 			    (adapter->netdev->features & NETIF_F_RXHASH))
12307db11f75SStephen Hemminger 				ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
12317db11f75SStephen Hemminger #endif
1232d1a890faSShreyas Bhatewara 			skb_put(ctx->skb, rcd->len);
12335318d809SShreyas Bhatewara 
12345318d809SShreyas Bhatewara 			/* Immediate refill */
12355318d809SShreyas Bhatewara 			rbi->skb = new_skb;
12365318d809SShreyas Bhatewara 			rbi->dma_addr = pci_map_single(adapter->pdev,
12375318d809SShreyas Bhatewara 						       rbi->skb->data, rbi->len,
12385318d809SShreyas Bhatewara 						       PCI_DMA_FROMDEVICE);
12395318d809SShreyas Bhatewara 			rxd->addr = cpu_to_le64(rbi->dma_addr);
12405318d809SShreyas Bhatewara 			rxd->len = rbi->len;
12415318d809SShreyas Bhatewara 
1242d1a890faSShreyas Bhatewara 		} else {
12435318d809SShreyas Bhatewara 			BUG_ON(ctx->skb == NULL && !skip_page_frags);
12445318d809SShreyas Bhatewara 
1245d1a890faSShreyas Bhatewara 			/* non SOP buffer must be type 1 in most cases */
12465318d809SShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1247d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1248d1a890faSShreyas Bhatewara 
12495318d809SShreyas Bhatewara 			/* If an sop buffer was dropped, skip all
12505318d809SShreyas Bhatewara 			 * following non-sop fragments. They will be reused.
12515318d809SShreyas Bhatewara 			 */
12525318d809SShreyas Bhatewara 			if (skip_page_frags)
12535318d809SShreyas Bhatewara 				goto rcd_done;
12545318d809SShreyas Bhatewara 
12555318d809SShreyas Bhatewara 			new_page = alloc_page(GFP_ATOMIC);
12565318d809SShreyas Bhatewara 			if (unlikely(new_page == NULL)) {
12575318d809SShreyas Bhatewara 				/* Replacement page frag could not be allocated.
12585318d809SShreyas Bhatewara 				 * Reuse this page. Drop the pkt and free the
12595318d809SShreyas Bhatewara 				 * skb which contained this page as a frag. Skip
12605318d809SShreyas Bhatewara 				 * processing all the following non-sop frags.
12615318d809SShreyas Bhatewara 				 */
12625318d809SShreyas Bhatewara 				rq->stats.rx_buf_alloc_failure++;
12635318d809SShreyas Bhatewara 				dev_kfree_skb(ctx->skb);
12645318d809SShreyas Bhatewara 				ctx->skb = NULL;
12655318d809SShreyas Bhatewara 				skip_page_frags = true;
12665318d809SShreyas Bhatewara 				goto rcd_done;
12675318d809SShreyas Bhatewara 			}
12685318d809SShreyas Bhatewara 
1269d1a890faSShreyas Bhatewara 			if (rcd->len) {
1270d1a890faSShreyas Bhatewara 				pci_unmap_page(adapter->pdev,
1271d1a890faSShreyas Bhatewara 					       rbi->dma_addr, rbi->len,
1272d1a890faSShreyas Bhatewara 					       PCI_DMA_FROMDEVICE);
1273d1a890faSShreyas Bhatewara 
1274d1a890faSShreyas Bhatewara 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
1275d1a890faSShreyas Bhatewara 			}
12765318d809SShreyas Bhatewara 
12775318d809SShreyas Bhatewara 			/* Immediate refill */
12785318d809SShreyas Bhatewara 			rbi->page = new_page;
12795318d809SShreyas Bhatewara 			rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
12805318d809SShreyas Bhatewara 						     0, PAGE_SIZE,
12815318d809SShreyas Bhatewara 						     PCI_DMA_FROMDEVICE);
12825318d809SShreyas Bhatewara 			rxd->addr = cpu_to_le64(rbi->dma_addr);
12835318d809SShreyas Bhatewara 			rxd->len = rbi->len;
1284d1a890faSShreyas Bhatewara 		}
12855318d809SShreyas Bhatewara 
1286d1a890faSShreyas Bhatewara 
1287d1a890faSShreyas Bhatewara 		skb = ctx->skb;
1288d1a890faSShreyas Bhatewara 		if (rcd->eop) {
1289d1a890faSShreyas Bhatewara 			skb->len += skb->data_len;
1290d1a890faSShreyas Bhatewara 
1291d1a890faSShreyas Bhatewara 			vmxnet3_rx_csum(adapter, skb,
1292d1a890faSShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
1293d1a890faSShreyas Bhatewara 			skb->protocol = eth_type_trans(skb, adapter->netdev);
1294d1a890faSShreyas Bhatewara 
129572e85c45SJesse Gross 			if (unlikely(rcd->ts))
129672e85c45SJesse Gross 				__vlan_hwaccel_put_tag(skb, rcd->tci);
129772e85c45SJesse Gross 
1298213ade8cSJesse Gross 			if (adapter->netdev->features & NETIF_F_LRO)
1299d1a890faSShreyas Bhatewara 				netif_receive_skb(skb);
1300213ade8cSJesse Gross 			else
1301213ade8cSJesse Gross 				napi_gro_receive(&rq->napi, skb);
1302d1a890faSShreyas Bhatewara 
1303d1a890faSShreyas Bhatewara 			ctx->skb = NULL;
1304d1a890faSShreyas Bhatewara 		}
1305d1a890faSShreyas Bhatewara 
1306d1a890faSShreyas Bhatewara rcd_done:
13075318d809SShreyas Bhatewara 		/* device may have skipped some rx descs */
13085318d809SShreyas Bhatewara 		ring->next2comp = idx;
13095318d809SShreyas Bhatewara 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
13105318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
13115318d809SShreyas Bhatewara 		while (num_to_alloc) {
13125318d809SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
13135318d809SShreyas Bhatewara 					  &rxCmdDesc);
13145318d809SShreyas Bhatewara 			BUG_ON(!rxd->addr);
1315d1a890faSShreyas Bhatewara 
13165318d809SShreyas Bhatewara 			/* Recv desc is ready to be used by the device */
13175318d809SShreyas Bhatewara 			rxd->gen = ring->gen;
13185318d809SShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(ring);
13195318d809SShreyas Bhatewara 			num_to_alloc--;
13205318d809SShreyas Bhatewara 		}
1321d1a890faSShreyas Bhatewara 
1322d1a890faSShreyas Bhatewara 		/* if needed, update the register */
1323d1a890faSShreyas Bhatewara 		if (unlikely(rq->shared->updateRxProd)) {
1324d1a890faSShreyas Bhatewara 			VMXNET3_WRITE_BAR0_REG(adapter,
1325d1a890faSShreyas Bhatewara 					       rxprod_reg[ring_idx] + rq->qid * 8,
13265318d809SShreyas Bhatewara 					       ring->next2fill);
1327d1a890faSShreyas Bhatewara 		}
1328d1a890faSShreyas Bhatewara 
1329d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1330115924b6SShreyas Bhatewara 		vmxnet3_getRxComp(rcd,
1331115924b6SShreyas Bhatewara 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1332d1a890faSShreyas Bhatewara 	}
1333d1a890faSShreyas Bhatewara 
1334d1a890faSShreyas Bhatewara 	return num_rxd;
1335d1a890faSShreyas Bhatewara }
1336d1a890faSShreyas Bhatewara 
1337d1a890faSShreyas Bhatewara 
1338d1a890faSShreyas Bhatewara static void
1339d1a890faSShreyas Bhatewara vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1340d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
1341d1a890faSShreyas Bhatewara {
1342d1a890faSShreyas Bhatewara 	u32 i, ring_idx;
1343d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxDesc *rxd;
1344d1a890faSShreyas Bhatewara 
1345d1a890faSShreyas Bhatewara 	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1346d1a890faSShreyas Bhatewara 		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1347115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1348115924b6SShreyas Bhatewara 			struct Vmxnet3_RxDesc rxDesc;
1349115924b6SShreyas Bhatewara #endif
1350115924b6SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd,
1351115924b6SShreyas Bhatewara 				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1352d1a890faSShreyas Bhatewara 
1353d1a890faSShreyas Bhatewara 			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1354d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].skb) {
1355d1a890faSShreyas Bhatewara 				pci_unmap_single(adapter->pdev, rxd->addr,
1356d1a890faSShreyas Bhatewara 						 rxd->len, PCI_DMA_FROMDEVICE);
1357d1a890faSShreyas Bhatewara 				dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1358d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].skb = NULL;
1359d1a890faSShreyas Bhatewara 			} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1360d1a890faSShreyas Bhatewara 					rq->buf_info[ring_idx][i].page) {
1361d1a890faSShreyas Bhatewara 				pci_unmap_page(adapter->pdev, rxd->addr,
1362d1a890faSShreyas Bhatewara 					       rxd->len, PCI_DMA_FROMDEVICE);
1363d1a890faSShreyas Bhatewara 				put_page(rq->buf_info[ring_idx][i].page);
1364d1a890faSShreyas Bhatewara 				rq->buf_info[ring_idx][i].page = NULL;
1365d1a890faSShreyas Bhatewara 			}
1366d1a890faSShreyas Bhatewara 		}
1367d1a890faSShreyas Bhatewara 
1368d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1369d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].next2fill =
1370d1a890faSShreyas Bhatewara 					rq->rx_ring[ring_idx].next2comp = 0;
1371d1a890faSShreyas Bhatewara 	}
1372d1a890faSShreyas Bhatewara 
1373d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1374d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1375d1a890faSShreyas Bhatewara }
1376d1a890faSShreyas Bhatewara 
1377d1a890faSShreyas Bhatewara 
137809c5088eSShreyas Bhatewara static void
137909c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
138009c5088eSShreyas Bhatewara {
138109c5088eSShreyas Bhatewara 	int i;
138209c5088eSShreyas Bhatewara 
138309c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
138409c5088eSShreyas Bhatewara 		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
138509c5088eSShreyas Bhatewara }
138609c5088eSShreyas Bhatewara 
138709c5088eSShreyas Bhatewara 
1388280b74f7Sstephen hemminger static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1389d1a890faSShreyas Bhatewara 			       struct vmxnet3_adapter *adapter)
1390d1a890faSShreyas Bhatewara {
1391d1a890faSShreyas Bhatewara 	int i;
1392d1a890faSShreyas Bhatewara 	int j;
1393d1a890faSShreyas Bhatewara 
1394d1a890faSShreyas Bhatewara 	/* all rx buffers must have already been freed */
1395d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1396d1a890faSShreyas Bhatewara 		if (rq->buf_info[i]) {
1397d1a890faSShreyas Bhatewara 			for (j = 0; j < rq->rx_ring[i].size; j++)
1398d1a890faSShreyas Bhatewara 				BUG_ON(rq->buf_info[i][j].page != NULL);
1399d1a890faSShreyas Bhatewara 		}
1400d1a890faSShreyas Bhatewara 	}
1401d1a890faSShreyas Bhatewara 
1402d1a890faSShreyas Bhatewara 
1403d1a890faSShreyas Bhatewara 	kfree(rq->buf_info[0]);
1404d1a890faSShreyas Bhatewara 
1405d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1406d1a890faSShreyas Bhatewara 		if (rq->rx_ring[i].base) {
1407d1a890faSShreyas Bhatewara 			pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1408d1a890faSShreyas Bhatewara 					    * sizeof(struct Vmxnet3_RxDesc),
1409d1a890faSShreyas Bhatewara 					    rq->rx_ring[i].base,
1410d1a890faSShreyas Bhatewara 					    rq->rx_ring[i].basePA);
1411d1a890faSShreyas Bhatewara 			rq->rx_ring[i].base = NULL;
1412d1a890faSShreyas Bhatewara 		}
1413d1a890faSShreyas Bhatewara 		rq->buf_info[i] = NULL;
1414d1a890faSShreyas Bhatewara 	}
1415d1a890faSShreyas Bhatewara 
1416d1a890faSShreyas Bhatewara 	if (rq->comp_ring.base) {
1417d1a890faSShreyas Bhatewara 		pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1418d1a890faSShreyas Bhatewara 				    sizeof(struct Vmxnet3_RxCompDesc),
1419d1a890faSShreyas Bhatewara 				    rq->comp_ring.base, rq->comp_ring.basePA);
1420d1a890faSShreyas Bhatewara 		rq->comp_ring.base = NULL;
1421d1a890faSShreyas Bhatewara 	}
1422d1a890faSShreyas Bhatewara }
1423d1a890faSShreyas Bhatewara 
1424d1a890faSShreyas Bhatewara 
1425d1a890faSShreyas Bhatewara static int
1426d1a890faSShreyas Bhatewara vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1427d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter  *adapter)
1428d1a890faSShreyas Bhatewara {
1429d1a890faSShreyas Bhatewara 	int i;
1430d1a890faSShreyas Bhatewara 
1431d1a890faSShreyas Bhatewara 	/* initialize buf_info */
1432d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[0].size; i++) {
1433d1a890faSShreyas Bhatewara 
1434d1a890faSShreyas Bhatewara 		/* 1st buf for a pkt is skbuff */
1435d1a890faSShreyas Bhatewara 		if (i % adapter->rx_buf_per_pkt == 0) {
1436d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1437d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = adapter->skb_buf_size;
1438d1a890faSShreyas Bhatewara 		} else { /* subsequent bufs for a pkt is frag */
1439d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1440d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = PAGE_SIZE;
1441d1a890faSShreyas Bhatewara 		}
1442d1a890faSShreyas Bhatewara 	}
1443d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[1].size; i++) {
1444d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1445d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].len = PAGE_SIZE;
1446d1a890faSShreyas Bhatewara 	}
1447d1a890faSShreyas Bhatewara 
1448d1a890faSShreyas Bhatewara 	/* reset internal state and allocate buffers for both rings */
1449d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1450d1a890faSShreyas Bhatewara 		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1451d1a890faSShreyas Bhatewara 
1452d1a890faSShreyas Bhatewara 		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1453d1a890faSShreyas Bhatewara 		       sizeof(struct Vmxnet3_RxDesc));
1454d1a890faSShreyas Bhatewara 		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1455d1a890faSShreyas Bhatewara 	}
1456d1a890faSShreyas Bhatewara 	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1457d1a890faSShreyas Bhatewara 				    adapter) == 0) {
1458d1a890faSShreyas Bhatewara 		/* at least has 1 rx buffer for the 1st ring */
1459d1a890faSShreyas Bhatewara 		return -ENOMEM;
1460d1a890faSShreyas Bhatewara 	}
1461d1a890faSShreyas Bhatewara 	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1462d1a890faSShreyas Bhatewara 
1463d1a890faSShreyas Bhatewara 	/* reset the comp ring */
1464d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1465d1a890faSShreyas Bhatewara 	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1466d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_RxCompDesc));
1467d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1468d1a890faSShreyas Bhatewara 
1469d1a890faSShreyas Bhatewara 	/* reset rxctx */
1470d1a890faSShreyas Bhatewara 	rq->rx_ctx.skb = NULL;
1471d1a890faSShreyas Bhatewara 
1472d1a890faSShreyas Bhatewara 	/* stats are not reset */
1473d1a890faSShreyas Bhatewara 	return 0;
1474d1a890faSShreyas Bhatewara }
1475d1a890faSShreyas Bhatewara 
1476d1a890faSShreyas Bhatewara 
1477d1a890faSShreyas Bhatewara static int
147809c5088eSShreyas Bhatewara vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
147909c5088eSShreyas Bhatewara {
148009c5088eSShreyas Bhatewara 	int i, err = 0;
148109c5088eSShreyas Bhatewara 
148209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
148309c5088eSShreyas Bhatewara 		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
148409c5088eSShreyas Bhatewara 		if (unlikely(err)) {
148509c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev, "%s: failed to "
148609c5088eSShreyas Bhatewara 				"initialize rx queue%i\n",
148709c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
148809c5088eSShreyas Bhatewara 			break;
148909c5088eSShreyas Bhatewara 		}
149009c5088eSShreyas Bhatewara 	}
149109c5088eSShreyas Bhatewara 	return err;
149209c5088eSShreyas Bhatewara 
149309c5088eSShreyas Bhatewara }
149409c5088eSShreyas Bhatewara 
149509c5088eSShreyas Bhatewara 
149609c5088eSShreyas Bhatewara static int
1497d1a890faSShreyas Bhatewara vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1498d1a890faSShreyas Bhatewara {
1499d1a890faSShreyas Bhatewara 	int i;
1500d1a890faSShreyas Bhatewara 	size_t sz;
1501d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *bi;
1502d1a890faSShreyas Bhatewara 
1503d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1504d1a890faSShreyas Bhatewara 
1505d1a890faSShreyas Bhatewara 		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1506d1a890faSShreyas Bhatewara 		rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1507d1a890faSShreyas Bhatewara 							&rq->rx_ring[i].basePA);
1508d1a890faSShreyas Bhatewara 		if (!rq->rx_ring[i].base) {
1509204a6e65SStephen Hemminger 			netdev_err(adapter->netdev,
1510204a6e65SStephen Hemminger 				   "failed to allocate rx ring %d\n", i);
1511d1a890faSShreyas Bhatewara 			goto err;
1512d1a890faSShreyas Bhatewara 		}
1513d1a890faSShreyas Bhatewara 	}
1514d1a890faSShreyas Bhatewara 
1515d1a890faSShreyas Bhatewara 	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1516d1a890faSShreyas Bhatewara 	rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1517d1a890faSShreyas Bhatewara 						  &rq->comp_ring.basePA);
1518d1a890faSShreyas Bhatewara 	if (!rq->comp_ring.base) {
1519204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1520d1a890faSShreyas Bhatewara 		goto err;
1521d1a890faSShreyas Bhatewara 	}
1522d1a890faSShreyas Bhatewara 
1523d1a890faSShreyas Bhatewara 	sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1524d1a890faSShreyas Bhatewara 						   rq->rx_ring[1].size);
1525476c609eSJulia Lawall 	bi = kzalloc(sz, GFP_KERNEL);
1526e404decbSJoe Perches 	if (!bi)
1527d1a890faSShreyas Bhatewara 		goto err;
1528e404decbSJoe Perches 
1529d1a890faSShreyas Bhatewara 	rq->buf_info[0] = bi;
1530d1a890faSShreyas Bhatewara 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
1531d1a890faSShreyas Bhatewara 
1532d1a890faSShreyas Bhatewara 	return 0;
1533d1a890faSShreyas Bhatewara 
1534d1a890faSShreyas Bhatewara err:
1535d1a890faSShreyas Bhatewara 	vmxnet3_rq_destroy(rq, adapter);
1536d1a890faSShreyas Bhatewara 	return -ENOMEM;
1537d1a890faSShreyas Bhatewara }
1538d1a890faSShreyas Bhatewara 
1539d1a890faSShreyas Bhatewara 
1540d1a890faSShreyas Bhatewara static int
154109c5088eSShreyas Bhatewara vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
154209c5088eSShreyas Bhatewara {
154309c5088eSShreyas Bhatewara 	int i, err = 0;
154409c5088eSShreyas Bhatewara 
154509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
154609c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
154709c5088eSShreyas Bhatewara 		if (unlikely(err)) {
154809c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev,
154909c5088eSShreyas Bhatewara 				"%s: failed to create rx queue%i\n",
155009c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
155109c5088eSShreyas Bhatewara 			goto err_out;
155209c5088eSShreyas Bhatewara 		}
155309c5088eSShreyas Bhatewara 	}
155409c5088eSShreyas Bhatewara 	return err;
155509c5088eSShreyas Bhatewara err_out:
155609c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
155709c5088eSShreyas Bhatewara 	return err;
155809c5088eSShreyas Bhatewara 
155909c5088eSShreyas Bhatewara }
156009c5088eSShreyas Bhatewara 
156109c5088eSShreyas Bhatewara /* Multiple queue aware polling function for tx and rx */
156209c5088eSShreyas Bhatewara 
156309c5088eSShreyas Bhatewara static int
1564d1a890faSShreyas Bhatewara vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1565d1a890faSShreyas Bhatewara {
156609c5088eSShreyas Bhatewara 	int rcd_done = 0, i;
1567d1a890faSShreyas Bhatewara 	if (unlikely(adapter->shared->ecr))
1568d1a890faSShreyas Bhatewara 		vmxnet3_process_events(adapter);
156909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
157009c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1571d1a890faSShreyas Bhatewara 
157209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
157309c5088eSShreyas Bhatewara 		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
157409c5088eSShreyas Bhatewara 						   adapter, budget);
157509c5088eSShreyas Bhatewara 	return rcd_done;
1576d1a890faSShreyas Bhatewara }
1577d1a890faSShreyas Bhatewara 
1578d1a890faSShreyas Bhatewara 
1579d1a890faSShreyas Bhatewara static int
1580d1a890faSShreyas Bhatewara vmxnet3_poll(struct napi_struct *napi, int budget)
1581d1a890faSShreyas Bhatewara {
158209c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
158309c5088eSShreyas Bhatewara 					  struct vmxnet3_rx_queue, napi);
1584d1a890faSShreyas Bhatewara 	int rxd_done;
1585d1a890faSShreyas Bhatewara 
158609c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1587d1a890faSShreyas Bhatewara 
1588d1a890faSShreyas Bhatewara 	if (rxd_done < budget) {
1589d1a890faSShreyas Bhatewara 		napi_complete(napi);
159009c5088eSShreyas Bhatewara 		vmxnet3_enable_all_intrs(rx_queue->adapter);
1591d1a890faSShreyas Bhatewara 	}
1592d1a890faSShreyas Bhatewara 	return rxd_done;
1593d1a890faSShreyas Bhatewara }
1594d1a890faSShreyas Bhatewara 
159509c5088eSShreyas Bhatewara /*
159609c5088eSShreyas Bhatewara  * NAPI polling function for MSI-X mode with multiple Rx queues
159709c5088eSShreyas Bhatewara  * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
159809c5088eSShreyas Bhatewara  */
159909c5088eSShreyas Bhatewara 
160009c5088eSShreyas Bhatewara static int
160109c5088eSShreyas Bhatewara vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
160209c5088eSShreyas Bhatewara {
160309c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = container_of(napi,
160409c5088eSShreyas Bhatewara 						struct vmxnet3_rx_queue, napi);
160509c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
160609c5088eSShreyas Bhatewara 	int rxd_done;
160709c5088eSShreyas Bhatewara 
160809c5088eSShreyas Bhatewara 	/* When sharing interrupt with corresponding tx queue, process
160909c5088eSShreyas Bhatewara 	 * tx completions in that queue as well
161009c5088eSShreyas Bhatewara 	 */
161109c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
161209c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq =
161309c5088eSShreyas Bhatewara 				&adapter->tx_queue[rq - adapter->rx_queue];
161409c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
161509c5088eSShreyas Bhatewara 	}
161609c5088eSShreyas Bhatewara 
161709c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
161809c5088eSShreyas Bhatewara 
161909c5088eSShreyas Bhatewara 	if (rxd_done < budget) {
162009c5088eSShreyas Bhatewara 		napi_complete(napi);
162109c5088eSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
162209c5088eSShreyas Bhatewara 	}
162309c5088eSShreyas Bhatewara 	return rxd_done;
162409c5088eSShreyas Bhatewara }
162509c5088eSShreyas Bhatewara 
162609c5088eSShreyas Bhatewara 
162709c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
162809c5088eSShreyas Bhatewara 
162909c5088eSShreyas Bhatewara /*
163009c5088eSShreyas Bhatewara  * Handle completion interrupts on tx queues
163109c5088eSShreyas Bhatewara  * Returns whether or not the intr is handled
163209c5088eSShreyas Bhatewara  */
163309c5088eSShreyas Bhatewara 
163409c5088eSShreyas Bhatewara static irqreturn_t
163509c5088eSShreyas Bhatewara vmxnet3_msix_tx(int irq, void *data)
163609c5088eSShreyas Bhatewara {
163709c5088eSShreyas Bhatewara 	struct vmxnet3_tx_queue *tq = data;
163809c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = tq->adapter;
163909c5088eSShreyas Bhatewara 
164009c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
164109c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
164209c5088eSShreyas Bhatewara 
164309c5088eSShreyas Bhatewara 	/* Handle the case where only one irq is allocate for all tx queues */
164409c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
164509c5088eSShreyas Bhatewara 		int i;
164609c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
164709c5088eSShreyas Bhatewara 			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
164809c5088eSShreyas Bhatewara 			vmxnet3_tq_tx_complete(txq, adapter);
164909c5088eSShreyas Bhatewara 		}
165009c5088eSShreyas Bhatewara 	} else {
165109c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
165209c5088eSShreyas Bhatewara 	}
165309c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
165409c5088eSShreyas Bhatewara 
165509c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
165609c5088eSShreyas Bhatewara }
165709c5088eSShreyas Bhatewara 
165809c5088eSShreyas Bhatewara 
165909c5088eSShreyas Bhatewara /*
166009c5088eSShreyas Bhatewara  * Handle completion interrupts on rx queues. Returns whether or not the
166109c5088eSShreyas Bhatewara  * intr is handled
166209c5088eSShreyas Bhatewara  */
166309c5088eSShreyas Bhatewara 
166409c5088eSShreyas Bhatewara static irqreturn_t
166509c5088eSShreyas Bhatewara vmxnet3_msix_rx(int irq, void *data)
166609c5088eSShreyas Bhatewara {
166709c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = data;
166809c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
166909c5088eSShreyas Bhatewara 
167009c5088eSShreyas Bhatewara 	/* disable intr if needed */
167109c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
167209c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
167309c5088eSShreyas Bhatewara 	napi_schedule(&rq->napi);
167409c5088eSShreyas Bhatewara 
167509c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
167609c5088eSShreyas Bhatewara }
167709c5088eSShreyas Bhatewara 
167809c5088eSShreyas Bhatewara /*
167909c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
168009c5088eSShreyas Bhatewara  *
168109c5088eSShreyas Bhatewara  * vmxnet3_msix_event --
168209c5088eSShreyas Bhatewara  *
168309c5088eSShreyas Bhatewara  *    vmxnet3 msix event intr handler
168409c5088eSShreyas Bhatewara  *
168509c5088eSShreyas Bhatewara  * Result:
168609c5088eSShreyas Bhatewara  *    whether or not the intr is handled
168709c5088eSShreyas Bhatewara  *
168809c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
168909c5088eSShreyas Bhatewara  */
169009c5088eSShreyas Bhatewara 
169109c5088eSShreyas Bhatewara static irqreturn_t
169209c5088eSShreyas Bhatewara vmxnet3_msix_event(int irq, void *data)
169309c5088eSShreyas Bhatewara {
169409c5088eSShreyas Bhatewara 	struct net_device *dev = data;
169509c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
169609c5088eSShreyas Bhatewara 
169709c5088eSShreyas Bhatewara 	/* disable intr if needed */
169809c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
169909c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
170009c5088eSShreyas Bhatewara 
170109c5088eSShreyas Bhatewara 	if (adapter->shared->ecr)
170209c5088eSShreyas Bhatewara 		vmxnet3_process_events(adapter);
170309c5088eSShreyas Bhatewara 
170409c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
170509c5088eSShreyas Bhatewara 
170609c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
170709c5088eSShreyas Bhatewara }
170809c5088eSShreyas Bhatewara 
170909c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI  */
171009c5088eSShreyas Bhatewara 
1711d1a890faSShreyas Bhatewara 
1712d1a890faSShreyas Bhatewara /* Interrupt handler for vmxnet3  */
1713d1a890faSShreyas Bhatewara static irqreturn_t
1714d1a890faSShreyas Bhatewara vmxnet3_intr(int irq, void *dev_id)
1715d1a890faSShreyas Bhatewara {
1716d1a890faSShreyas Bhatewara 	struct net_device *dev = dev_id;
1717d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
1718d1a890faSShreyas Bhatewara 
171909c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_INTX) {
1720d1a890faSShreyas Bhatewara 		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1721d1a890faSShreyas Bhatewara 		if (unlikely(icr == 0))
1722d1a890faSShreyas Bhatewara 			/* not ours */
1723d1a890faSShreyas Bhatewara 			return IRQ_NONE;
1724d1a890faSShreyas Bhatewara 	}
1725d1a890faSShreyas Bhatewara 
1726d1a890faSShreyas Bhatewara 
1727d1a890faSShreyas Bhatewara 	/* disable intr if needed */
1728d1a890faSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
172909c5088eSShreyas Bhatewara 		vmxnet3_disable_all_intrs(adapter);
1730d1a890faSShreyas Bhatewara 
173109c5088eSShreyas Bhatewara 	napi_schedule(&adapter->rx_queue[0].napi);
1732d1a890faSShreyas Bhatewara 
1733d1a890faSShreyas Bhatewara 	return IRQ_HANDLED;
1734d1a890faSShreyas Bhatewara }
1735d1a890faSShreyas Bhatewara 
1736d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
1737d1a890faSShreyas Bhatewara 
1738d1a890faSShreyas Bhatewara /* netpoll callback. */
1739d1a890faSShreyas Bhatewara static void
1740d1a890faSShreyas Bhatewara vmxnet3_netpoll(struct net_device *netdev)
1741d1a890faSShreyas Bhatewara {
1742d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1743d1a890faSShreyas Bhatewara 
174409c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
174509c5088eSShreyas Bhatewara 		vmxnet3_disable_all_intrs(adapter);
1746d1a890faSShreyas Bhatewara 
174709c5088eSShreyas Bhatewara 	vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
174809c5088eSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
174909c5088eSShreyas Bhatewara 
1750d1a890faSShreyas Bhatewara }
175109c5088eSShreyas Bhatewara #endif	/* CONFIG_NET_POLL_CONTROLLER */
1752d1a890faSShreyas Bhatewara 
1753d1a890faSShreyas Bhatewara static int
1754d1a890faSShreyas Bhatewara vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1755d1a890faSShreyas Bhatewara {
175609c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
175709c5088eSShreyas Bhatewara 	int err = 0, i;
175809c5088eSShreyas Bhatewara 	int vector = 0;
1759d1a890faSShreyas Bhatewara 
17608f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
1761d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
176209c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
176309c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
176409c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
176509c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
176609c5088eSShreyas Bhatewara 				err = request_irq(
176709c5088eSShreyas Bhatewara 					      intr->msix_entries[vector].vector,
176809c5088eSShreyas Bhatewara 					      vmxnet3_msix_tx, 0,
176909c5088eSShreyas Bhatewara 					      adapter->tx_queue[i].name,
177009c5088eSShreyas Bhatewara 					      &adapter->tx_queue[i]);
177109c5088eSShreyas Bhatewara 			} else {
177209c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
177309c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
177409c5088eSShreyas Bhatewara 			}
177509c5088eSShreyas Bhatewara 			if (err) {
177609c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
177709c5088eSShreyas Bhatewara 					"Failed to request irq for MSIX, %s, "
177809c5088eSShreyas Bhatewara 					"error %d\n",
177909c5088eSShreyas Bhatewara 					adapter->tx_queue[i].name, err);
178009c5088eSShreyas Bhatewara 				return err;
178109c5088eSShreyas Bhatewara 			}
178209c5088eSShreyas Bhatewara 
178309c5088eSShreyas Bhatewara 			/* Handle the case where only 1 MSIx was allocated for
178409c5088eSShreyas Bhatewara 			 * all tx queues */
178509c5088eSShreyas Bhatewara 			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
178609c5088eSShreyas Bhatewara 				for (; i < adapter->num_tx_queues; i++)
178709c5088eSShreyas Bhatewara 					adapter->tx_queue[i].comp_ring.intr_idx
178809c5088eSShreyas Bhatewara 								= vector;
178909c5088eSShreyas Bhatewara 				vector++;
179009c5088eSShreyas Bhatewara 				break;
179109c5088eSShreyas Bhatewara 			} else {
179209c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx
179309c5088eSShreyas Bhatewara 								= vector++;
179409c5088eSShreyas Bhatewara 			}
179509c5088eSShreyas Bhatewara 		}
179609c5088eSShreyas Bhatewara 		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
179709c5088eSShreyas Bhatewara 			vector = 0;
179809c5088eSShreyas Bhatewara 
179909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
180009c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
180109c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
180209c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
180309c5088eSShreyas Bhatewara 			else
180409c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
180509c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
180609c5088eSShreyas Bhatewara 			err = request_irq(intr->msix_entries[vector].vector,
180709c5088eSShreyas Bhatewara 					  vmxnet3_msix_rx, 0,
180809c5088eSShreyas Bhatewara 					  adapter->rx_queue[i].name,
180909c5088eSShreyas Bhatewara 					  &(adapter->rx_queue[i]));
181009c5088eSShreyas Bhatewara 			if (err) {
1811204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
1812204a6e65SStephen Hemminger 					   "Failed to request irq for MSIX, "
1813204a6e65SStephen Hemminger 					   "%s, error %d\n",
181409c5088eSShreyas Bhatewara 					   adapter->rx_queue[i].name, err);
181509c5088eSShreyas Bhatewara 				return err;
181609c5088eSShreyas Bhatewara 			}
181709c5088eSShreyas Bhatewara 
181809c5088eSShreyas Bhatewara 			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
181909c5088eSShreyas Bhatewara 		}
182009c5088eSShreyas Bhatewara 
182109c5088eSShreyas Bhatewara 		sprintf(intr->event_msi_vector_name, "%s-event-%d",
182209c5088eSShreyas Bhatewara 			adapter->netdev->name, vector);
182309c5088eSShreyas Bhatewara 		err = request_irq(intr->msix_entries[vector].vector,
182409c5088eSShreyas Bhatewara 				  vmxnet3_msix_event, 0,
182509c5088eSShreyas Bhatewara 				  intr->event_msi_vector_name, adapter->netdev);
182609c5088eSShreyas Bhatewara 		intr->event_intr_idx = vector;
182709c5088eSShreyas Bhatewara 
182809c5088eSShreyas Bhatewara 	} else if (intr->type == VMXNET3_IT_MSI) {
182909c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
1830d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1831d1a890faSShreyas Bhatewara 				  adapter->netdev->name, adapter->netdev);
183209c5088eSShreyas Bhatewara 	} else {
1833115924b6SShreyas Bhatewara #endif
183409c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
1835d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1836d1a890faSShreyas Bhatewara 				  IRQF_SHARED, adapter->netdev->name,
1837d1a890faSShreyas Bhatewara 				  adapter->netdev);
183809c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
183909c5088eSShreyas Bhatewara 	}
184009c5088eSShreyas Bhatewara #endif
184109c5088eSShreyas Bhatewara 	intr->num_intrs = vector + 1;
184209c5088eSShreyas Bhatewara 	if (err) {
1843204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
1844204a6e65SStephen Hemminger 			   "Failed to request irq (intr type:%d), error %d\n",
1845204a6e65SStephen Hemminger 			   intr->type, err);
184609c5088eSShreyas Bhatewara 	} else {
184709c5088eSShreyas Bhatewara 		/* Number of rx queues will not change after this */
184809c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
184909c5088eSShreyas Bhatewara 			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
185009c5088eSShreyas Bhatewara 			rq->qid = i;
185109c5088eSShreyas Bhatewara 			rq->qid2 = i + adapter->num_rx_queues;
1852d1a890faSShreyas Bhatewara 		}
1853d1a890faSShreyas Bhatewara 
1854d1a890faSShreyas Bhatewara 
1855d1a890faSShreyas Bhatewara 
1856d1a890faSShreyas Bhatewara 		/* init our intr settings */
185709c5088eSShreyas Bhatewara 		for (i = 0; i < intr->num_intrs; i++)
185809c5088eSShreyas Bhatewara 			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
185909c5088eSShreyas Bhatewara 		if (adapter->intr.type != VMXNET3_IT_MSIX) {
1860d1a890faSShreyas Bhatewara 			adapter->intr.event_intr_idx = 0;
186109c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
186209c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx = 0;
186309c5088eSShreyas Bhatewara 			adapter->rx_queue[0].comp_ring.intr_idx = 0;
186409c5088eSShreyas Bhatewara 		}
1865d1a890faSShreyas Bhatewara 
1866204a6e65SStephen Hemminger 		netdev_info(adapter->netdev,
1867204a6e65SStephen Hemminger 			    "intr type %u, mode %u, %u vectors allocated\n",
1868204a6e65SStephen Hemminger 			    intr->type, intr->mask_mode, intr->num_intrs);
1869d1a890faSShreyas Bhatewara 	}
1870d1a890faSShreyas Bhatewara 
1871d1a890faSShreyas Bhatewara 	return err;
1872d1a890faSShreyas Bhatewara }
1873d1a890faSShreyas Bhatewara 
1874d1a890faSShreyas Bhatewara 
1875d1a890faSShreyas Bhatewara static void
1876d1a890faSShreyas Bhatewara vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1877d1a890faSShreyas Bhatewara {
187809c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
187909c5088eSShreyas Bhatewara 	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1880d1a890faSShreyas Bhatewara 
188109c5088eSShreyas Bhatewara 	switch (intr->type) {
18828f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
1883d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSIX:
1884d1a890faSShreyas Bhatewara 	{
188509c5088eSShreyas Bhatewara 		int i, vector = 0;
1886d1a890faSShreyas Bhatewara 
188709c5088eSShreyas Bhatewara 		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
188809c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++) {
188909c5088eSShreyas Bhatewara 				free_irq(intr->msix_entries[vector++].vector,
189009c5088eSShreyas Bhatewara 					 &(adapter->tx_queue[i]));
189109c5088eSShreyas Bhatewara 				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
189209c5088eSShreyas Bhatewara 					break;
189309c5088eSShreyas Bhatewara 			}
189409c5088eSShreyas Bhatewara 		}
189509c5088eSShreyas Bhatewara 
189609c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
189709c5088eSShreyas Bhatewara 			free_irq(intr->msix_entries[vector++].vector,
189809c5088eSShreyas Bhatewara 				 &(adapter->rx_queue[i]));
189909c5088eSShreyas Bhatewara 		}
190009c5088eSShreyas Bhatewara 
190109c5088eSShreyas Bhatewara 		free_irq(intr->msix_entries[vector].vector,
1902d1a890faSShreyas Bhatewara 			 adapter->netdev);
190309c5088eSShreyas Bhatewara 		BUG_ON(vector >= intr->num_intrs);
1904d1a890faSShreyas Bhatewara 		break;
1905d1a890faSShreyas Bhatewara 	}
19068f7e524cSRandy Dunlap #endif
1907d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSI:
1908d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
1909d1a890faSShreyas Bhatewara 		break;
1910d1a890faSShreyas Bhatewara 	case VMXNET3_IT_INTX:
1911d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
1912d1a890faSShreyas Bhatewara 		break;
1913d1a890faSShreyas Bhatewara 	default:
1914c068e777SSasha Levin 		BUG();
1915d1a890faSShreyas Bhatewara 	}
1916d1a890faSShreyas Bhatewara }
1917d1a890faSShreyas Bhatewara 
1918d1a890faSShreyas Bhatewara 
1919d1a890faSShreyas Bhatewara static void
1920d1a890faSShreyas Bhatewara vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1921d1a890faSShreyas Bhatewara {
1922d1a890faSShreyas Bhatewara 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
192372e85c45SJesse Gross 	u16 vid;
1924d1a890faSShreyas Bhatewara 
192572e85c45SJesse Gross 	/* allow untagged pkts */
1926d1a890faSShreyas Bhatewara 	VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
192772e85c45SJesse Gross 
192872e85c45SJesse Gross 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
192972e85c45SJesse Gross 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1930d1a890faSShreyas Bhatewara }
1931d1a890faSShreyas Bhatewara 
1932d1a890faSShreyas Bhatewara 
19338e586137SJiri Pirko static int
1934d1a890faSShreyas Bhatewara vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1935d1a890faSShreyas Bhatewara {
1936d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1937f6957f88SJesse Gross 
1938f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
1939d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
194083d0feffSShreyas Bhatewara 		unsigned long flags;
1941d1a890faSShreyas Bhatewara 
1942d1a890faSShreyas Bhatewara 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
194383d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
1944d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
194683d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1947f6957f88SJesse Gross 	}
194872e85c45SJesse Gross 
194972e85c45SJesse Gross 	set_bit(vid, adapter->active_vlans);
19508e586137SJiri Pirko 
19518e586137SJiri Pirko 	return 0;
1952d1a890faSShreyas Bhatewara }
1953d1a890faSShreyas Bhatewara 
1954d1a890faSShreyas Bhatewara 
19558e586137SJiri Pirko static int
1956d1a890faSShreyas Bhatewara vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1957d1a890faSShreyas Bhatewara {
1958d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1959f6957f88SJesse Gross 
1960f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
1961d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
196283d0feffSShreyas Bhatewara 		unsigned long flags;
1963d1a890faSShreyas Bhatewara 
1964d1a890faSShreyas Bhatewara 		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
196583d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
1966d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1967d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
196883d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1969f6957f88SJesse Gross 	}
197072e85c45SJesse Gross 
197172e85c45SJesse Gross 	clear_bit(vid, adapter->active_vlans);
19728e586137SJiri Pirko 
19738e586137SJiri Pirko 	return 0;
1974d1a890faSShreyas Bhatewara }
1975d1a890faSShreyas Bhatewara 
1976d1a890faSShreyas Bhatewara 
1977d1a890faSShreyas Bhatewara static u8 *
1978d1a890faSShreyas Bhatewara vmxnet3_copy_mc(struct net_device *netdev)
1979d1a890faSShreyas Bhatewara {
1980d1a890faSShreyas Bhatewara 	u8 *buf = NULL;
19814cd24eafSJiri Pirko 	u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
1982d1a890faSShreyas Bhatewara 
1983d1a890faSShreyas Bhatewara 	/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1984d1a890faSShreyas Bhatewara 	if (sz <= 0xffff) {
1985d1a890faSShreyas Bhatewara 		/* We may be called with BH disabled */
1986d1a890faSShreyas Bhatewara 		buf = kmalloc(sz, GFP_ATOMIC);
1987d1a890faSShreyas Bhatewara 		if (buf) {
198822bedad3SJiri Pirko 			struct netdev_hw_addr *ha;
1989567ec874SJiri Pirko 			int i = 0;
1990d1a890faSShreyas Bhatewara 
199122bedad3SJiri Pirko 			netdev_for_each_mc_addr(ha, netdev)
199222bedad3SJiri Pirko 				memcpy(buf + i++ * ETH_ALEN, ha->addr,
1993d1a890faSShreyas Bhatewara 				       ETH_ALEN);
1994d1a890faSShreyas Bhatewara 		}
1995d1a890faSShreyas Bhatewara 	}
1996d1a890faSShreyas Bhatewara 	return buf;
1997d1a890faSShreyas Bhatewara }
1998d1a890faSShreyas Bhatewara 
1999d1a890faSShreyas Bhatewara 
2000d1a890faSShreyas Bhatewara static void
2001d1a890faSShreyas Bhatewara vmxnet3_set_mc(struct net_device *netdev)
2002d1a890faSShreyas Bhatewara {
2003d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
200483d0feffSShreyas Bhatewara 	unsigned long flags;
2005d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxFilterConf *rxConf =
2006d1a890faSShreyas Bhatewara 					&adapter->shared->devRead.rxFilterConf;
2007d1a890faSShreyas Bhatewara 	u8 *new_table = NULL;
2008d1a890faSShreyas Bhatewara 	u32 new_mode = VMXNET3_RXM_UCAST;
2009d1a890faSShreyas Bhatewara 
201072e85c45SJesse Gross 	if (netdev->flags & IFF_PROMISC) {
201172e85c45SJesse Gross 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
201272e85c45SJesse Gross 		memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
201372e85c45SJesse Gross 
2014d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_PROMISC;
201572e85c45SJesse Gross 	} else {
201672e85c45SJesse Gross 		vmxnet3_restore_vlan(adapter);
201772e85c45SJesse Gross 	}
2018d1a890faSShreyas Bhatewara 
2019d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_BROADCAST)
2020d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_BCAST;
2021d1a890faSShreyas Bhatewara 
2022d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_ALLMULTI)
2023d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_ALL_MULTI;
2024d1a890faSShreyas Bhatewara 	else
20254cd24eafSJiri Pirko 		if (!netdev_mc_empty(netdev)) {
2026d1a890faSShreyas Bhatewara 			new_table = vmxnet3_copy_mc(netdev);
2027d1a890faSShreyas Bhatewara 			if (new_table) {
2028d1a890faSShreyas Bhatewara 				new_mode |= VMXNET3_RXM_MCAST;
2029115924b6SShreyas Bhatewara 				rxConf->mfTableLen = cpu_to_le16(
20304cd24eafSJiri Pirko 					netdev_mc_count(netdev) * ETH_ALEN);
2031115924b6SShreyas Bhatewara 				rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2032115924b6SShreyas Bhatewara 						    new_table));
2033d1a890faSShreyas Bhatewara 			} else {
2034204a6e65SStephen Hemminger 				netdev_info(netdev, "failed to copy mcast list"
2035204a6e65SStephen Hemminger 					    ", setting ALL_MULTI\n");
2036d1a890faSShreyas Bhatewara 				new_mode |= VMXNET3_RXM_ALL_MULTI;
2037d1a890faSShreyas Bhatewara 			}
2038d1a890faSShreyas Bhatewara 		}
2039d1a890faSShreyas Bhatewara 
2040d1a890faSShreyas Bhatewara 
2041d1a890faSShreyas Bhatewara 	if (!(new_mode & VMXNET3_RXM_MCAST)) {
2042d1a890faSShreyas Bhatewara 		rxConf->mfTableLen = 0;
2043d1a890faSShreyas Bhatewara 		rxConf->mfTablePA = 0;
2044d1a890faSShreyas Bhatewara 	}
2045d1a890faSShreyas Bhatewara 
204683d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2047d1a890faSShreyas Bhatewara 	if (new_mode != rxConf->rxMode) {
2048115924b6SShreyas Bhatewara 		rxConf->rxMode = cpu_to_le32(new_mode);
2049d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2050d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_RX_MODE);
205172e85c45SJesse Gross 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
205272e85c45SJesse Gross 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2053d1a890faSShreyas Bhatewara 	}
2054d1a890faSShreyas Bhatewara 
2055d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2056d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
205783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2058d1a890faSShreyas Bhatewara 
2059d1a890faSShreyas Bhatewara 	kfree(new_table);
2060d1a890faSShreyas Bhatewara }
2061d1a890faSShreyas Bhatewara 
206209c5088eSShreyas Bhatewara void
206309c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
206409c5088eSShreyas Bhatewara {
206509c5088eSShreyas Bhatewara 	int i;
206609c5088eSShreyas Bhatewara 
206709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
206809c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
206909c5088eSShreyas Bhatewara }
207009c5088eSShreyas Bhatewara 
2071d1a890faSShreyas Bhatewara 
2072d1a890faSShreyas Bhatewara /*
2073d1a890faSShreyas Bhatewara  *   Set up driver_shared based on settings in adapter.
2074d1a890faSShreyas Bhatewara  */
2075d1a890faSShreyas Bhatewara 
2076d1a890faSShreyas Bhatewara static void
2077d1a890faSShreyas Bhatewara vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2078d1a890faSShreyas Bhatewara {
2079d1a890faSShreyas Bhatewara 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2080d1a890faSShreyas Bhatewara 	struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2081d1a890faSShreyas Bhatewara 	struct Vmxnet3_TxQueueConf *tqc;
2082d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxQueueConf *rqc;
2083d1a890faSShreyas Bhatewara 	int i;
2084d1a890faSShreyas Bhatewara 
2085d1a890faSShreyas Bhatewara 	memset(shared, 0, sizeof(*shared));
2086d1a890faSShreyas Bhatewara 
2087d1a890faSShreyas Bhatewara 	/* driver settings */
2088115924b6SShreyas Bhatewara 	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2089115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.version = cpu_to_le32(
2090115924b6SShreyas Bhatewara 						VMXNET3_DRIVER_VERSION_NUM);
2091d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2092d1a890faSShreyas Bhatewara 				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2093d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2094115924b6SShreyas Bhatewara 	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2095115924b6SShreyas Bhatewara 				*((u32 *)&devRead->misc.driverInfo.gos));
2096115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2097115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2098d1a890faSShreyas Bhatewara 
2099115924b6SShreyas Bhatewara 	devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2100115924b6SShreyas Bhatewara 	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2101d1a890faSShreyas Bhatewara 
2102d1a890faSShreyas Bhatewara 	/* set up feature flags */
2103a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_RXCSUM)
21043843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2105d1a890faSShreyas Bhatewara 
2106a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_LRO) {
21073843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_LRO;
2108115924b6SShreyas Bhatewara 		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2109d1a890faSShreyas Bhatewara 	}
211054da3d00SShreyas Bhatewara 	if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
21113843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2112d1a890faSShreyas Bhatewara 
2113115924b6SShreyas Bhatewara 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2114115924b6SShreyas Bhatewara 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2115115924b6SShreyas Bhatewara 	devRead->misc.queueDescLen = cpu_to_le32(
211609c5088eSShreyas Bhatewara 		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
211709c5088eSShreyas Bhatewara 		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2118d1a890faSShreyas Bhatewara 
2119d1a890faSShreyas Bhatewara 	/* tx queue settings */
212009c5088eSShreyas Bhatewara 	devRead->misc.numTxQueues =  adapter->num_tx_queues;
212109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
212209c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
212309c5088eSShreyas Bhatewara 		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
212409c5088eSShreyas Bhatewara 		tqc = &adapter->tqd_start[i].conf;
212509c5088eSShreyas Bhatewara 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
212609c5088eSShreyas Bhatewara 		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
212709c5088eSShreyas Bhatewara 		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
212809c5088eSShreyas Bhatewara 		tqc->ddPA           = cpu_to_le64(virt_to_phys(tq->buf_info));
212909c5088eSShreyas Bhatewara 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
213009c5088eSShreyas Bhatewara 		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
213109c5088eSShreyas Bhatewara 		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
213209c5088eSShreyas Bhatewara 		tqc->ddLen          = cpu_to_le32(
213309c5088eSShreyas Bhatewara 					sizeof(struct vmxnet3_tx_buf_info) *
2134115924b6SShreyas Bhatewara 					tqc->txRingSize);
213509c5088eSShreyas Bhatewara 		tqc->intrIdx        = tq->comp_ring.intr_idx;
213609c5088eSShreyas Bhatewara 	}
2137d1a890faSShreyas Bhatewara 
2138d1a890faSShreyas Bhatewara 	/* rx queue settings */
213909c5088eSShreyas Bhatewara 	devRead->misc.numRxQueues = adapter->num_rx_queues;
214009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
214109c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
214209c5088eSShreyas Bhatewara 		rqc = &adapter->rqd_start[i].conf;
214309c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
214409c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
214509c5088eSShreyas Bhatewara 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2146115924b6SShreyas Bhatewara 		rqc->ddPA            = cpu_to_le64(virt_to_phys(
214709c5088eSShreyas Bhatewara 							rq->buf_info));
214809c5088eSShreyas Bhatewara 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
214909c5088eSShreyas Bhatewara 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
215009c5088eSShreyas Bhatewara 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
215109c5088eSShreyas Bhatewara 		rqc->ddLen           = cpu_to_le32(
215209c5088eSShreyas Bhatewara 					sizeof(struct vmxnet3_rx_buf_info) *
215309c5088eSShreyas Bhatewara 					(rqc->rxRingSize[0] +
215409c5088eSShreyas Bhatewara 					 rqc->rxRingSize[1]));
215509c5088eSShreyas Bhatewara 		rqc->intrIdx         = rq->comp_ring.intr_idx;
215609c5088eSShreyas Bhatewara 	}
215709c5088eSShreyas Bhatewara 
215809c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
215909c5088eSShreyas Bhatewara 	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
216009c5088eSShreyas Bhatewara 
216109c5088eSShreyas Bhatewara 	if (adapter->rss) {
216209c5088eSShreyas Bhatewara 		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
216366d35910SStephen Hemminger 		static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
216466d35910SStephen Hemminger 			0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
216566d35910SStephen Hemminger 			0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
216666d35910SStephen Hemminger 			0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
216766d35910SStephen Hemminger 			0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
216866d35910SStephen Hemminger 			0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
216966d35910SStephen Hemminger 		};
217066d35910SStephen Hemminger 
217109c5088eSShreyas Bhatewara 		devRead->misc.uptFeatures |= UPT1_F_RSS;
217209c5088eSShreyas Bhatewara 		devRead->misc.numRxQueues = adapter->num_rx_queues;
217309c5088eSShreyas Bhatewara 		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
217409c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV4 |
217509c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
217609c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV6;
217709c5088eSShreyas Bhatewara 		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
217809c5088eSShreyas Bhatewara 		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
217909c5088eSShreyas Bhatewara 		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
218066d35910SStephen Hemminger 		memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
218166d35910SStephen Hemminger 
218209c5088eSShreyas Bhatewara 		for (i = 0; i < rssConf->indTableSize; i++)
2183278bc429SBen Hutchings 			rssConf->indTable[i] = ethtool_rxfh_indir_default(
2184278bc429SBen Hutchings 				i, adapter->num_rx_queues);
218509c5088eSShreyas Bhatewara 
218609c5088eSShreyas Bhatewara 		devRead->rssConfDesc.confVer = 1;
218709c5088eSShreyas Bhatewara 		devRead->rssConfDesc.confLen = sizeof(*rssConf);
218809c5088eSShreyas Bhatewara 		devRead->rssConfDesc.confPA  = virt_to_phys(rssConf);
218909c5088eSShreyas Bhatewara 	}
219009c5088eSShreyas Bhatewara 
219109c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
2192d1a890faSShreyas Bhatewara 
2193d1a890faSShreyas Bhatewara 	/* intr settings */
2194d1a890faSShreyas Bhatewara 	devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2195d1a890faSShreyas Bhatewara 				     VMXNET3_IMM_AUTO;
2196d1a890faSShreyas Bhatewara 	devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2197d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
2198d1a890faSShreyas Bhatewara 		devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2199d1a890faSShreyas Bhatewara 
2200d1a890faSShreyas Bhatewara 	devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
22016929fe8aSRonghua Zang 	devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2202d1a890faSShreyas Bhatewara 
2203d1a890faSShreyas Bhatewara 	/* rx filter settings */
2204d1a890faSShreyas Bhatewara 	devRead->rxFilterConf.rxMode = 0;
2205d1a890faSShreyas Bhatewara 	vmxnet3_restore_vlan(adapter);
2206f9f25026SShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2207f9f25026SShreyas Bhatewara 
2208d1a890faSShreyas Bhatewara 	/* the rest are already zeroed */
2209d1a890faSShreyas Bhatewara }
2210d1a890faSShreyas Bhatewara 
2211d1a890faSShreyas Bhatewara 
2212d1a890faSShreyas Bhatewara int
2213d1a890faSShreyas Bhatewara vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2214d1a890faSShreyas Bhatewara {
221509c5088eSShreyas Bhatewara 	int err, i;
2216d1a890faSShreyas Bhatewara 	u32 ret;
221783d0feffSShreyas Bhatewara 	unsigned long flags;
2218d1a890faSShreyas Bhatewara 
2219fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
222009c5088eSShreyas Bhatewara 		" ring sizes %u %u %u\n", adapter->netdev->name,
222109c5088eSShreyas Bhatewara 		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
222209c5088eSShreyas Bhatewara 		adapter->tx_queue[0].tx_ring.size,
222309c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[0].size,
222409c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[1].size);
2225d1a890faSShreyas Bhatewara 
222609c5088eSShreyas Bhatewara 	vmxnet3_tq_init_all(adapter);
222709c5088eSShreyas Bhatewara 	err = vmxnet3_rq_init_all(adapter);
2228d1a890faSShreyas Bhatewara 	if (err) {
2229204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2230204a6e65SStephen Hemminger 			   "Failed to init rx queue error %d\n", err);
2231d1a890faSShreyas Bhatewara 		goto rq_err;
2232d1a890faSShreyas Bhatewara 	}
2233d1a890faSShreyas Bhatewara 
2234d1a890faSShreyas Bhatewara 	err = vmxnet3_request_irqs(adapter);
2235d1a890faSShreyas Bhatewara 	if (err) {
2236204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2237204a6e65SStephen Hemminger 			   "Failed to setup irq for error %d\n", err);
2238d1a890faSShreyas Bhatewara 		goto irq_err;
2239d1a890faSShreyas Bhatewara 	}
2240d1a890faSShreyas Bhatewara 
2241d1a890faSShreyas Bhatewara 	vmxnet3_setup_driver_shared(adapter);
2242d1a890faSShreyas Bhatewara 
2243115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2244115924b6SShreyas Bhatewara 			       adapter->shared_pa));
2245115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2246115924b6SShreyas Bhatewara 			       adapter->shared_pa));
224783d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2248d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2249d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_ACTIVATE_DEV);
2250d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
225183d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2252d1a890faSShreyas Bhatewara 
2253d1a890faSShreyas Bhatewara 	if (ret != 0) {
2254204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2255204a6e65SStephen Hemminger 			   "Failed to activate dev: error %u\n", ret);
2256d1a890faSShreyas Bhatewara 		err = -EINVAL;
2257d1a890faSShreyas Bhatewara 		goto activate_err;
2258d1a890faSShreyas Bhatewara 	}
225909c5088eSShreyas Bhatewara 
226009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
226109c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
226209c5088eSShreyas Bhatewara 				VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
226309c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[0].next2fill);
226409c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
226509c5088eSShreyas Bhatewara 				(i * VMXNET3_REG_ALIGN)),
226609c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[1].next2fill);
226709c5088eSShreyas Bhatewara 	}
2268d1a890faSShreyas Bhatewara 
2269d1a890faSShreyas Bhatewara 	/* Apply the rx filter settins last. */
2270d1a890faSShreyas Bhatewara 	vmxnet3_set_mc(adapter->netdev);
2271d1a890faSShreyas Bhatewara 
2272d1a890faSShreyas Bhatewara 	/*
2273d1a890faSShreyas Bhatewara 	 * Check link state when first activating device. It will start the
2274d1a890faSShreyas Bhatewara 	 * tx queue if the link is up.
2275d1a890faSShreyas Bhatewara 	 */
22764a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, true);
227709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
227809c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
2279d1a890faSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
2280d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2281d1a890faSShreyas Bhatewara 	return 0;
2282d1a890faSShreyas Bhatewara 
2283d1a890faSShreyas Bhatewara activate_err:
2284d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2285d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2286d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2287d1a890faSShreyas Bhatewara irq_err:
2288d1a890faSShreyas Bhatewara rq_err:
2289d1a890faSShreyas Bhatewara 	/* free up buffers we allocated */
229009c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2291d1a890faSShreyas Bhatewara 	return err;
2292d1a890faSShreyas Bhatewara }
2293d1a890faSShreyas Bhatewara 
2294d1a890faSShreyas Bhatewara 
2295d1a890faSShreyas Bhatewara void
2296d1a890faSShreyas Bhatewara vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2297d1a890faSShreyas Bhatewara {
229883d0feffSShreyas Bhatewara 	unsigned long flags;
229983d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2300d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
230183d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2302d1a890faSShreyas Bhatewara }
2303d1a890faSShreyas Bhatewara 
2304d1a890faSShreyas Bhatewara 
2305d1a890faSShreyas Bhatewara int
2306d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2307d1a890faSShreyas Bhatewara {
230809c5088eSShreyas Bhatewara 	int i;
230983d0feffSShreyas Bhatewara 	unsigned long flags;
2310d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2311d1a890faSShreyas Bhatewara 		return 0;
2312d1a890faSShreyas Bhatewara 
2313d1a890faSShreyas Bhatewara 
231483d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2315d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2316d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
231783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2318d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
2319d1a890faSShreyas Bhatewara 
232009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
232109c5088eSShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
2322d1a890faSShreyas Bhatewara 	netif_tx_disable(adapter->netdev);
2323d1a890faSShreyas Bhatewara 	adapter->link_speed = 0;
2324d1a890faSShreyas Bhatewara 	netif_carrier_off(adapter->netdev);
2325d1a890faSShreyas Bhatewara 
232609c5088eSShreyas Bhatewara 	vmxnet3_tq_cleanup_all(adapter);
232709c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
2328d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
2329d1a890faSShreyas Bhatewara 	return 0;
2330d1a890faSShreyas Bhatewara }
2331d1a890faSShreyas Bhatewara 
2332d1a890faSShreyas Bhatewara 
2333d1a890faSShreyas Bhatewara static void
2334d1a890faSShreyas Bhatewara vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2335d1a890faSShreyas Bhatewara {
2336d1a890faSShreyas Bhatewara 	u32 tmp;
2337d1a890faSShreyas Bhatewara 
2338d1a890faSShreyas Bhatewara 	tmp = *(u32 *)mac;
2339d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2340d1a890faSShreyas Bhatewara 
2341d1a890faSShreyas Bhatewara 	tmp = (mac[5] << 8) | mac[4];
2342d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2343d1a890faSShreyas Bhatewara }
2344d1a890faSShreyas Bhatewara 
2345d1a890faSShreyas Bhatewara 
2346d1a890faSShreyas Bhatewara static int
2347d1a890faSShreyas Bhatewara vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2348d1a890faSShreyas Bhatewara {
2349d1a890faSShreyas Bhatewara 	struct sockaddr *addr = p;
2350d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2351d1a890faSShreyas Bhatewara 
2352d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2353d1a890faSShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, addr->sa_data);
2354d1a890faSShreyas Bhatewara 
2355d1a890faSShreyas Bhatewara 	return 0;
2356d1a890faSShreyas Bhatewara }
2357d1a890faSShreyas Bhatewara 
2358d1a890faSShreyas Bhatewara 
2359d1a890faSShreyas Bhatewara /* ==================== initialization and cleanup routines ============ */
2360d1a890faSShreyas Bhatewara 
2361d1a890faSShreyas Bhatewara static int
2362d1a890faSShreyas Bhatewara vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2363d1a890faSShreyas Bhatewara {
2364d1a890faSShreyas Bhatewara 	int err;
2365d1a890faSShreyas Bhatewara 	unsigned long mmio_start, mmio_len;
2366d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = adapter->pdev;
2367d1a890faSShreyas Bhatewara 
2368d1a890faSShreyas Bhatewara 	err = pci_enable_device(pdev);
2369d1a890faSShreyas Bhatewara 	if (err) {
2370204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2371d1a890faSShreyas Bhatewara 		return err;
2372d1a890faSShreyas Bhatewara 	}
2373d1a890faSShreyas Bhatewara 
2374d1a890faSShreyas Bhatewara 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2375d1a890faSShreyas Bhatewara 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2376204a6e65SStephen Hemminger 			dev_err(&pdev->dev,
2377204a6e65SStephen Hemminger 				"pci_set_consistent_dma_mask failed\n");
2378d1a890faSShreyas Bhatewara 			err = -EIO;
2379d1a890faSShreyas Bhatewara 			goto err_set_mask;
2380d1a890faSShreyas Bhatewara 		}
2381d1a890faSShreyas Bhatewara 		*dma64 = true;
2382d1a890faSShreyas Bhatewara 	} else {
2383d1a890faSShreyas Bhatewara 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2384204a6e65SStephen Hemminger 			dev_err(&pdev->dev,
2385204a6e65SStephen Hemminger 				"pci_set_dma_mask failed\n");
2386d1a890faSShreyas Bhatewara 			err = -EIO;
2387d1a890faSShreyas Bhatewara 			goto err_set_mask;
2388d1a890faSShreyas Bhatewara 		}
2389d1a890faSShreyas Bhatewara 		*dma64 = false;
2390d1a890faSShreyas Bhatewara 	}
2391d1a890faSShreyas Bhatewara 
2392d1a890faSShreyas Bhatewara 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2393d1a890faSShreyas Bhatewara 					   vmxnet3_driver_name);
2394d1a890faSShreyas Bhatewara 	if (err) {
2395204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
2396204a6e65SStephen Hemminger 			"Failed to request region for adapter: error %d\n", err);
2397d1a890faSShreyas Bhatewara 		goto err_set_mask;
2398d1a890faSShreyas Bhatewara 	}
2399d1a890faSShreyas Bhatewara 
2400d1a890faSShreyas Bhatewara 	pci_set_master(pdev);
2401d1a890faSShreyas Bhatewara 
2402d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 0);
2403d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 0);
2404d1a890faSShreyas Bhatewara 	adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2405d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr0) {
2406204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar0\n");
2407d1a890faSShreyas Bhatewara 		err = -EIO;
2408d1a890faSShreyas Bhatewara 		goto err_ioremap;
2409d1a890faSShreyas Bhatewara 	}
2410d1a890faSShreyas Bhatewara 
2411d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 1);
2412d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 1);
2413d1a890faSShreyas Bhatewara 	adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2414d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr1) {
2415204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar1\n");
2416d1a890faSShreyas Bhatewara 		err = -EIO;
2417d1a890faSShreyas Bhatewara 		goto err_bar1;
2418d1a890faSShreyas Bhatewara 	}
2419d1a890faSShreyas Bhatewara 	return 0;
2420d1a890faSShreyas Bhatewara 
2421d1a890faSShreyas Bhatewara err_bar1:
2422d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2423d1a890faSShreyas Bhatewara err_ioremap:
2424d1a890faSShreyas Bhatewara 	pci_release_selected_regions(pdev, (1 << 2) - 1);
2425d1a890faSShreyas Bhatewara err_set_mask:
2426d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
2427d1a890faSShreyas Bhatewara 	return err;
2428d1a890faSShreyas Bhatewara }
2429d1a890faSShreyas Bhatewara 
2430d1a890faSShreyas Bhatewara 
2431d1a890faSShreyas Bhatewara static void
2432d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2433d1a890faSShreyas Bhatewara {
2434d1a890faSShreyas Bhatewara 	BUG_ON(!adapter->pdev);
2435d1a890faSShreyas Bhatewara 
2436d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
2437d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr1);
2438d1a890faSShreyas Bhatewara 	pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2439d1a890faSShreyas Bhatewara 	pci_disable_device(adapter->pdev);
2440d1a890faSShreyas Bhatewara }
2441d1a890faSShreyas Bhatewara 
2442d1a890faSShreyas Bhatewara 
2443d1a890faSShreyas Bhatewara static void
2444d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2445d1a890faSShreyas Bhatewara {
244609c5088eSShreyas Bhatewara 	size_t sz, i, ring0_size, ring1_size, comp_size;
244709c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[0];
244809c5088eSShreyas Bhatewara 
2449d1a890faSShreyas Bhatewara 
2450d1a890faSShreyas Bhatewara 	if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2451d1a890faSShreyas Bhatewara 				    VMXNET3_MAX_ETH_HDR_SIZE) {
2452d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = adapter->netdev->mtu +
2453d1a890faSShreyas Bhatewara 					VMXNET3_MAX_ETH_HDR_SIZE;
2454d1a890faSShreyas Bhatewara 		if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2455d1a890faSShreyas Bhatewara 			adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2456d1a890faSShreyas Bhatewara 
2457d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1;
2458d1a890faSShreyas Bhatewara 	} else {
2459d1a890faSShreyas Bhatewara 		adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2460d1a890faSShreyas Bhatewara 		sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2461d1a890faSShreyas Bhatewara 					    VMXNET3_MAX_ETH_HDR_SIZE;
2462d1a890faSShreyas Bhatewara 		adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2463d1a890faSShreyas Bhatewara 	}
2464d1a890faSShreyas Bhatewara 
2465d1a890faSShreyas Bhatewara 	/*
2466d1a890faSShreyas Bhatewara 	 * for simplicity, force the ring0 size to be a multiple of
2467d1a890faSShreyas Bhatewara 	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2468d1a890faSShreyas Bhatewara 	 */
2469d1a890faSShreyas Bhatewara 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
247009c5088eSShreyas Bhatewara 	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
247109c5088eSShreyas Bhatewara 	ring0_size = (ring0_size + sz - 1) / sz * sz;
2472a53255d3SShreyas Bhatewara 	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
247309c5088eSShreyas Bhatewara 			   sz * sz);
247409c5088eSShreyas Bhatewara 	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
247509c5088eSShreyas Bhatewara 	comp_size = ring0_size + ring1_size;
247609c5088eSShreyas Bhatewara 
247709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
247809c5088eSShreyas Bhatewara 		rq = &adapter->rx_queue[i];
247909c5088eSShreyas Bhatewara 		rq->rx_ring[0].size = ring0_size;
248009c5088eSShreyas Bhatewara 		rq->rx_ring[1].size = ring1_size;
248109c5088eSShreyas Bhatewara 		rq->comp_ring.size = comp_size;
248209c5088eSShreyas Bhatewara 	}
2483d1a890faSShreyas Bhatewara }
2484d1a890faSShreyas Bhatewara 
2485d1a890faSShreyas Bhatewara 
2486d1a890faSShreyas Bhatewara int
2487d1a890faSShreyas Bhatewara vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2488d1a890faSShreyas Bhatewara 		      u32 rx_ring_size, u32 rx_ring2_size)
2489d1a890faSShreyas Bhatewara {
249009c5088eSShreyas Bhatewara 	int err = 0, i;
2491d1a890faSShreyas Bhatewara 
249209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
249309c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
249409c5088eSShreyas Bhatewara 		tq->tx_ring.size   = tx_ring_size;
249509c5088eSShreyas Bhatewara 		tq->data_ring.size = tx_ring_size;
249609c5088eSShreyas Bhatewara 		tq->comp_ring.size = tx_ring_size;
249709c5088eSShreyas Bhatewara 		tq->shared = &adapter->tqd_start[i].ctrl;
249809c5088eSShreyas Bhatewara 		tq->stopped = true;
249909c5088eSShreyas Bhatewara 		tq->adapter = adapter;
250009c5088eSShreyas Bhatewara 		tq->qid = i;
250109c5088eSShreyas Bhatewara 		err = vmxnet3_tq_create(tq, adapter);
250209c5088eSShreyas Bhatewara 		/*
250309c5088eSShreyas Bhatewara 		 * Too late to change num_tx_queues. We cannot do away with
250409c5088eSShreyas Bhatewara 		 * lesser number of queues than what we asked for
250509c5088eSShreyas Bhatewara 		 */
2506d1a890faSShreyas Bhatewara 		if (err)
250709c5088eSShreyas Bhatewara 			goto queue_err;
250809c5088eSShreyas Bhatewara 	}
2509d1a890faSShreyas Bhatewara 
251009c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
251109c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2512d1a890faSShreyas Bhatewara 	vmxnet3_adjust_rx_ring_size(adapter);
251309c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
251409c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
251509c5088eSShreyas Bhatewara 		/* qid and qid2 for rx queues will be assigned later when num
251609c5088eSShreyas Bhatewara 		 * of rx queues is finalized after allocating intrs */
251709c5088eSShreyas Bhatewara 		rq->shared = &adapter->rqd_start[i].ctrl;
251809c5088eSShreyas Bhatewara 		rq->adapter = adapter;
251909c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(rq, adapter);
252009c5088eSShreyas Bhatewara 		if (err) {
252109c5088eSShreyas Bhatewara 			if (i == 0) {
2522204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2523204a6e65SStephen Hemminger 					   "Could not allocate any rx queues. "
2524204a6e65SStephen Hemminger 					   "Aborting.\n");
252509c5088eSShreyas Bhatewara 				goto queue_err;
252609c5088eSShreyas Bhatewara 			} else {
2527204a6e65SStephen Hemminger 				netdev_info(adapter->netdev,
2528204a6e65SStephen Hemminger 					    "Number of rx queues changed "
252909c5088eSShreyas Bhatewara 					    "to : %d.\n", i);
253009c5088eSShreyas Bhatewara 				adapter->num_rx_queues = i;
253109c5088eSShreyas Bhatewara 				err = 0;
253209c5088eSShreyas Bhatewara 				break;
253309c5088eSShreyas Bhatewara 			}
253409c5088eSShreyas Bhatewara 		}
253509c5088eSShreyas Bhatewara 	}
253609c5088eSShreyas Bhatewara 	return err;
253709c5088eSShreyas Bhatewara queue_err:
253809c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2539d1a890faSShreyas Bhatewara 	return err;
2540d1a890faSShreyas Bhatewara }
2541d1a890faSShreyas Bhatewara 
2542d1a890faSShreyas Bhatewara static int
2543d1a890faSShreyas Bhatewara vmxnet3_open(struct net_device *netdev)
2544d1a890faSShreyas Bhatewara {
2545d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
254609c5088eSShreyas Bhatewara 	int err, i;
2547d1a890faSShreyas Bhatewara 
2548d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
2549d1a890faSShreyas Bhatewara 
255009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
255109c5088eSShreyas Bhatewara 		spin_lock_init(&adapter->tx_queue[i].tx_lock);
2552d1a890faSShreyas Bhatewara 
2553d1a890faSShreyas Bhatewara 	err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2554d1a890faSShreyas Bhatewara 				    VMXNET3_DEF_RX_RING_SIZE,
2555d1a890faSShreyas Bhatewara 				    VMXNET3_DEF_RX_RING_SIZE);
2556d1a890faSShreyas Bhatewara 	if (err)
2557d1a890faSShreyas Bhatewara 		goto queue_err;
2558d1a890faSShreyas Bhatewara 
2559d1a890faSShreyas Bhatewara 	err = vmxnet3_activate_dev(adapter);
2560d1a890faSShreyas Bhatewara 	if (err)
2561d1a890faSShreyas Bhatewara 		goto activate_err;
2562d1a890faSShreyas Bhatewara 
2563d1a890faSShreyas Bhatewara 	return 0;
2564d1a890faSShreyas Bhatewara 
2565d1a890faSShreyas Bhatewara activate_err:
256609c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
256709c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2568d1a890faSShreyas Bhatewara queue_err:
2569d1a890faSShreyas Bhatewara 	return err;
2570d1a890faSShreyas Bhatewara }
2571d1a890faSShreyas Bhatewara 
2572d1a890faSShreyas Bhatewara 
2573d1a890faSShreyas Bhatewara static int
2574d1a890faSShreyas Bhatewara vmxnet3_close(struct net_device *netdev)
2575d1a890faSShreyas Bhatewara {
2576d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2577d1a890faSShreyas Bhatewara 
2578d1a890faSShreyas Bhatewara 	/*
2579d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
2580d1a890faSShreyas Bhatewara 	 * completion.
2581d1a890faSShreyas Bhatewara 	 */
2582d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2583d1a890faSShreyas Bhatewara 		msleep(1);
2584d1a890faSShreyas Bhatewara 
2585d1a890faSShreyas Bhatewara 	vmxnet3_quiesce_dev(adapter);
2586d1a890faSShreyas Bhatewara 
258709c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
258809c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
2589d1a890faSShreyas Bhatewara 
2590d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2591d1a890faSShreyas Bhatewara 
2592d1a890faSShreyas Bhatewara 
2593d1a890faSShreyas Bhatewara 	return 0;
2594d1a890faSShreyas Bhatewara }
2595d1a890faSShreyas Bhatewara 
2596d1a890faSShreyas Bhatewara 
2597d1a890faSShreyas Bhatewara void
2598d1a890faSShreyas Bhatewara vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2599d1a890faSShreyas Bhatewara {
260009c5088eSShreyas Bhatewara 	int i;
260109c5088eSShreyas Bhatewara 
2602d1a890faSShreyas Bhatewara 	/*
2603d1a890faSShreyas Bhatewara 	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2604d1a890faSShreyas Bhatewara 	 * vmxnet3_close() will deadlock.
2605d1a890faSShreyas Bhatewara 	 */
2606d1a890faSShreyas Bhatewara 	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2607d1a890faSShreyas Bhatewara 
2608d1a890faSShreyas Bhatewara 	/* we need to enable NAPI, otherwise dev_close will deadlock */
260909c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
261009c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
2611d1a890faSShreyas Bhatewara 	dev_close(adapter->netdev);
2612d1a890faSShreyas Bhatewara }
2613d1a890faSShreyas Bhatewara 
2614d1a890faSShreyas Bhatewara 
2615d1a890faSShreyas Bhatewara static int
2616d1a890faSShreyas Bhatewara vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2617d1a890faSShreyas Bhatewara {
2618d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2619d1a890faSShreyas Bhatewara 	int err = 0;
2620d1a890faSShreyas Bhatewara 
2621d1a890faSShreyas Bhatewara 	if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2622d1a890faSShreyas Bhatewara 		return -EINVAL;
2623d1a890faSShreyas Bhatewara 
2624d1a890faSShreyas Bhatewara 	netdev->mtu = new_mtu;
2625d1a890faSShreyas Bhatewara 
2626d1a890faSShreyas Bhatewara 	/*
2627d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
2628d1a890faSShreyas Bhatewara 	 * completion.
2629d1a890faSShreyas Bhatewara 	 */
2630d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2631d1a890faSShreyas Bhatewara 		msleep(1);
2632d1a890faSShreyas Bhatewara 
2633d1a890faSShreyas Bhatewara 	if (netif_running(netdev)) {
2634d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
2635d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
2636d1a890faSShreyas Bhatewara 
2637d1a890faSShreyas Bhatewara 		/* we need to re-create the rx queue based on the new mtu */
263809c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy_all(adapter);
2639d1a890faSShreyas Bhatewara 		vmxnet3_adjust_rx_ring_size(adapter);
264009c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create_all(adapter);
2641d1a890faSShreyas Bhatewara 		if (err) {
2642204a6e65SStephen Hemminger 			netdev_err(netdev,
2643204a6e65SStephen Hemminger 				   "failed to re-create rx queues, "
2644204a6e65SStephen Hemminger 				   " error %d. Closing it.\n", err);
2645d1a890faSShreyas Bhatewara 			goto out;
2646d1a890faSShreyas Bhatewara 		}
2647d1a890faSShreyas Bhatewara 
2648d1a890faSShreyas Bhatewara 		err = vmxnet3_activate_dev(adapter);
2649d1a890faSShreyas Bhatewara 		if (err) {
2650204a6e65SStephen Hemminger 			netdev_err(netdev,
2651204a6e65SStephen Hemminger 				   "failed to re-activate, error %d. "
2652204a6e65SStephen Hemminger 				   "Closing it\n", err);
2653d1a890faSShreyas Bhatewara 			goto out;
2654d1a890faSShreyas Bhatewara 		}
2655d1a890faSShreyas Bhatewara 	}
2656d1a890faSShreyas Bhatewara 
2657d1a890faSShreyas Bhatewara out:
2658d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2659d1a890faSShreyas Bhatewara 	if (err)
2660d1a890faSShreyas Bhatewara 		vmxnet3_force_close(adapter);
2661d1a890faSShreyas Bhatewara 
2662d1a890faSShreyas Bhatewara 	return err;
2663d1a890faSShreyas Bhatewara }
2664d1a890faSShreyas Bhatewara 
2665d1a890faSShreyas Bhatewara 
2666d1a890faSShreyas Bhatewara static void
2667d1a890faSShreyas Bhatewara vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2668d1a890faSShreyas Bhatewara {
2669d1a890faSShreyas Bhatewara 	struct net_device *netdev = adapter->netdev;
2670d1a890faSShreyas Bhatewara 
2671a0d2730cSMichał Mirosław 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2672a0d2730cSMichał Mirosław 		NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
267372e85c45SJesse Gross 		NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
267472e85c45SJesse Gross 		NETIF_F_LRO;
2675a0d2730cSMichał Mirosław 	if (dma64)
2676ebbf9295SShreyas Bhatewara 		netdev->hw_features |= NETIF_F_HIGHDMA;
267772e85c45SJesse Gross 	netdev->vlan_features = netdev->hw_features &
267872e85c45SJesse Gross 				~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
267972e85c45SJesse Gross 	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
2680d1a890faSShreyas Bhatewara }
2681d1a890faSShreyas Bhatewara 
2682d1a890faSShreyas Bhatewara 
2683d1a890faSShreyas Bhatewara static void
2684d1a890faSShreyas Bhatewara vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2685d1a890faSShreyas Bhatewara {
2686d1a890faSShreyas Bhatewara 	u32 tmp;
2687d1a890faSShreyas Bhatewara 
2688d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2689d1a890faSShreyas Bhatewara 	*(u32 *)mac = tmp;
2690d1a890faSShreyas Bhatewara 
2691d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2692d1a890faSShreyas Bhatewara 	mac[4] = tmp & 0xff;
2693d1a890faSShreyas Bhatewara 	mac[5] = (tmp >> 8) & 0xff;
2694d1a890faSShreyas Bhatewara }
2695d1a890faSShreyas Bhatewara 
269609c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
269709c5088eSShreyas Bhatewara 
269809c5088eSShreyas Bhatewara /*
269909c5088eSShreyas Bhatewara  * Enable MSIx vectors.
270009c5088eSShreyas Bhatewara  * Returns :
270109c5088eSShreyas Bhatewara  *	0 on successful enabling of required vectors,
270225985edcSLucas De Marchi  *	VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
270309c5088eSShreyas Bhatewara  *	 could be enabled.
270409c5088eSShreyas Bhatewara  *	number of vectors which can be enabled otherwise (this number is smaller
270509c5088eSShreyas Bhatewara  *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
270609c5088eSShreyas Bhatewara  */
270709c5088eSShreyas Bhatewara 
270809c5088eSShreyas Bhatewara static int
270909c5088eSShreyas Bhatewara vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
271009c5088eSShreyas Bhatewara 			     int vectors)
271109c5088eSShreyas Bhatewara {
271209c5088eSShreyas Bhatewara 	int err = 0, vector_threshold;
271309c5088eSShreyas Bhatewara 	vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
271409c5088eSShreyas Bhatewara 
271509c5088eSShreyas Bhatewara 	while (vectors >= vector_threshold) {
271609c5088eSShreyas Bhatewara 		err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
271709c5088eSShreyas Bhatewara 				      vectors);
271809c5088eSShreyas Bhatewara 		if (!err) {
271909c5088eSShreyas Bhatewara 			adapter->intr.num_intrs = vectors;
272009c5088eSShreyas Bhatewara 			return 0;
272109c5088eSShreyas Bhatewara 		} else if (err < 0) {
27224bad25faSStephen Hemminger 			dev_err(&adapter->netdev->dev,
27234c1dc80aSShreyas Bhatewara 				   "Failed to enable MSI-X, error: %d\n", err);
272409c5088eSShreyas Bhatewara 			vectors = 0;
272509c5088eSShreyas Bhatewara 		} else if (err < vector_threshold) {
272609c5088eSShreyas Bhatewara 			break;
272709c5088eSShreyas Bhatewara 		} else {
272809c5088eSShreyas Bhatewara 			/* If fails to enable required number of MSI-x vectors
27297e96fbf2SShreyas Bhatewara 			 * try enabling minimum number of vectors required.
273009c5088eSShreyas Bhatewara 			 */
27314bad25faSStephen Hemminger 			dev_err(&adapter->netdev->dev,
27324c1dc80aSShreyas Bhatewara 				"Failed to enable %d MSI-X, trying %d instead\n",
27334c1dc80aSShreyas Bhatewara 				    vectors, vector_threshold);
273409c5088eSShreyas Bhatewara 			vectors = vector_threshold;
273509c5088eSShreyas Bhatewara 		}
273609c5088eSShreyas Bhatewara 	}
273709c5088eSShreyas Bhatewara 
27384bad25faSStephen Hemminger 	dev_info(&adapter->pdev->dev,
27394bad25faSStephen Hemminger 		 "Number of MSI-X interrupts which can be allocated "
27404bad25faSStephen Hemminger 		 "is lower than min threshold required.\n");
274109c5088eSShreyas Bhatewara 	return err;
274209c5088eSShreyas Bhatewara }
274309c5088eSShreyas Bhatewara 
274409c5088eSShreyas Bhatewara 
274509c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
2746d1a890faSShreyas Bhatewara 
2747d1a890faSShreyas Bhatewara static void
2748d1a890faSShreyas Bhatewara vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2749d1a890faSShreyas Bhatewara {
2750d1a890faSShreyas Bhatewara 	u32 cfg;
2751e328d410SRoland Dreier 	unsigned long flags;
2752d1a890faSShreyas Bhatewara 
2753d1a890faSShreyas Bhatewara 	/* intr settings */
2754e328d410SRoland Dreier 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2755d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2756d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_GET_CONF_INTR);
2757d1a890faSShreyas Bhatewara 	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2758e328d410SRoland Dreier 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2759d1a890faSShreyas Bhatewara 	adapter->intr.type = cfg & 0x3;
2760d1a890faSShreyas Bhatewara 	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2761d1a890faSShreyas Bhatewara 
2762d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_AUTO) {
27630bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSIX;
27640bdc0d70SShreyas Bhatewara 	}
2765d1a890faSShreyas Bhatewara 
27668f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
27670bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
276809c5088eSShreyas Bhatewara 		int vector, err = 0;
27690bdc0d70SShreyas Bhatewara 
277009c5088eSShreyas Bhatewara 		adapter->intr.num_intrs = (adapter->share_intr ==
277109c5088eSShreyas Bhatewara 					   VMXNET3_INTR_TXSHARE) ? 1 :
277209c5088eSShreyas Bhatewara 					   adapter->num_tx_queues;
277309c5088eSShreyas Bhatewara 		adapter->intr.num_intrs += (adapter->share_intr ==
277409c5088eSShreyas Bhatewara 					   VMXNET3_INTR_BUDDYSHARE) ? 0 :
277509c5088eSShreyas Bhatewara 					   adapter->num_rx_queues;
277609c5088eSShreyas Bhatewara 		adapter->intr.num_intrs += 1;		/* for link event */
277709c5088eSShreyas Bhatewara 
277809c5088eSShreyas Bhatewara 		adapter->intr.num_intrs = (adapter->intr.num_intrs >
277909c5088eSShreyas Bhatewara 					   VMXNET3_LINUX_MIN_MSIX_VECT
278009c5088eSShreyas Bhatewara 					   ? adapter->intr.num_intrs :
278109c5088eSShreyas Bhatewara 					   VMXNET3_LINUX_MIN_MSIX_VECT);
278209c5088eSShreyas Bhatewara 
278309c5088eSShreyas Bhatewara 		for (vector = 0; vector < adapter->intr.num_intrs; vector++)
278409c5088eSShreyas Bhatewara 			adapter->intr.msix_entries[vector].entry = vector;
278509c5088eSShreyas Bhatewara 
278609c5088eSShreyas Bhatewara 		err = vmxnet3_acquire_msix_vectors(adapter,
278709c5088eSShreyas Bhatewara 						   adapter->intr.num_intrs);
278809c5088eSShreyas Bhatewara 		/* If we cannot allocate one MSIx vector per queue
278909c5088eSShreyas Bhatewara 		 * then limit the number of rx queues to 1
279009c5088eSShreyas Bhatewara 		 */
279109c5088eSShreyas Bhatewara 		if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
279209c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
27937e96fbf2SShreyas Bhatewara 			    || adapter->num_rx_queues != 1) {
279409c5088eSShreyas Bhatewara 				adapter->share_intr = VMXNET3_INTR_TXSHARE;
2795204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2796204a6e65SStephen Hemminger 					   "Number of rx queues : 1\n");
279709c5088eSShreyas Bhatewara 				adapter->num_rx_queues = 1;
279809c5088eSShreyas Bhatewara 				adapter->intr.num_intrs =
279909c5088eSShreyas Bhatewara 						VMXNET3_LINUX_MIN_MSIX_VECT;
280009c5088eSShreyas Bhatewara 			}
2801d1a890faSShreyas Bhatewara 			return;
2802d1a890faSShreyas Bhatewara 		}
280309c5088eSShreyas Bhatewara 		if (!err)
280409c5088eSShreyas Bhatewara 			return;
280509c5088eSShreyas Bhatewara 
280609c5088eSShreyas Bhatewara 		/* If we cannot allocate MSIx vectors use only one rx queue */
28074bad25faSStephen Hemminger 		dev_info(&adapter->pdev->dev,
28084bad25faSStephen Hemminger 			 "Failed to enable MSI-X, error %d. "
28094bad25faSStephen Hemminger 			 "Limiting #rx queues to 1, try MSI.\n", err);
281009c5088eSShreyas Bhatewara 
28110bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSI;
28120bdc0d70SShreyas Bhatewara 	}
2813d1a890faSShreyas Bhatewara 
28140bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSI) {
28150bdc0d70SShreyas Bhatewara 		int err;
2816d1a890faSShreyas Bhatewara 		err = pci_enable_msi(adapter->pdev);
2817d1a890faSShreyas Bhatewara 		if (!err) {
281809c5088eSShreyas Bhatewara 			adapter->num_rx_queues = 1;
2819d1a890faSShreyas Bhatewara 			adapter->intr.num_intrs = 1;
2820d1a890faSShreyas Bhatewara 			return;
2821d1a890faSShreyas Bhatewara 		}
2822d1a890faSShreyas Bhatewara 	}
28230bdc0d70SShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
2824d1a890faSShreyas Bhatewara 
282509c5088eSShreyas Bhatewara 	adapter->num_rx_queues = 1;
2826204a6e65SStephen Hemminger 	dev_info(&adapter->netdev->dev,
2827204a6e65SStephen Hemminger 		 "Using INTx interrupt, #Rx queues: 1.\n");
2828d1a890faSShreyas Bhatewara 	adapter->intr.type = VMXNET3_IT_INTX;
2829d1a890faSShreyas Bhatewara 
2830d1a890faSShreyas Bhatewara 	/* INT-X related setting */
2831d1a890faSShreyas Bhatewara 	adapter->intr.num_intrs = 1;
2832d1a890faSShreyas Bhatewara }
2833d1a890faSShreyas Bhatewara 
2834d1a890faSShreyas Bhatewara 
2835d1a890faSShreyas Bhatewara static void
2836d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2837d1a890faSShreyas Bhatewara {
2838d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX)
2839d1a890faSShreyas Bhatewara 		pci_disable_msix(adapter->pdev);
2840d1a890faSShreyas Bhatewara 	else if (adapter->intr.type == VMXNET3_IT_MSI)
2841d1a890faSShreyas Bhatewara 		pci_disable_msi(adapter->pdev);
2842d1a890faSShreyas Bhatewara 	else
2843d1a890faSShreyas Bhatewara 		BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2844d1a890faSShreyas Bhatewara }
2845d1a890faSShreyas Bhatewara 
2846d1a890faSShreyas Bhatewara 
2847d1a890faSShreyas Bhatewara static void
2848d1a890faSShreyas Bhatewara vmxnet3_tx_timeout(struct net_device *netdev)
2849d1a890faSShreyas Bhatewara {
2850d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2851d1a890faSShreyas Bhatewara 	adapter->tx_timeout_count++;
2852d1a890faSShreyas Bhatewara 
2853204a6e65SStephen Hemminger 	netdev_err(adapter->netdev, "tx hang\n");
2854d1a890faSShreyas Bhatewara 	schedule_work(&adapter->work);
285509c5088eSShreyas Bhatewara 	netif_wake_queue(adapter->netdev);
2856d1a890faSShreyas Bhatewara }
2857d1a890faSShreyas Bhatewara 
2858d1a890faSShreyas Bhatewara 
2859d1a890faSShreyas Bhatewara static void
2860d1a890faSShreyas Bhatewara vmxnet3_reset_work(struct work_struct *data)
2861d1a890faSShreyas Bhatewara {
2862d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
2863d1a890faSShreyas Bhatewara 
2864d1a890faSShreyas Bhatewara 	adapter = container_of(data, struct vmxnet3_adapter, work);
2865d1a890faSShreyas Bhatewara 
2866d1a890faSShreyas Bhatewara 	/* if another thread is resetting the device, no need to proceed */
2867d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2868d1a890faSShreyas Bhatewara 		return;
2869d1a890faSShreyas Bhatewara 
2870d1a890faSShreyas Bhatewara 	/* if the device is closed, we must leave it alone */
2871d9a5f210SShreyas Bhatewara 	rtnl_lock();
2872d1a890faSShreyas Bhatewara 	if (netif_running(adapter->netdev)) {
2873204a6e65SStephen Hemminger 		netdev_notice(adapter->netdev, "resetting\n");
2874d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
2875d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
2876d1a890faSShreyas Bhatewara 		vmxnet3_activate_dev(adapter);
2877d1a890faSShreyas Bhatewara 	} else {
2878204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "already closed\n");
2879d1a890faSShreyas Bhatewara 	}
2880d9a5f210SShreyas Bhatewara 	rtnl_unlock();
2881d1a890faSShreyas Bhatewara 
2882d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2883d1a890faSShreyas Bhatewara }
2884d1a890faSShreyas Bhatewara 
2885d1a890faSShreyas Bhatewara 
28863a4751a3SBill Pemberton static int
2887d1a890faSShreyas Bhatewara vmxnet3_probe_device(struct pci_dev *pdev,
2888d1a890faSShreyas Bhatewara 		     const struct pci_device_id *id)
2889d1a890faSShreyas Bhatewara {
2890d1a890faSShreyas Bhatewara 	static const struct net_device_ops vmxnet3_netdev_ops = {
2891d1a890faSShreyas Bhatewara 		.ndo_open = vmxnet3_open,
2892d1a890faSShreyas Bhatewara 		.ndo_stop = vmxnet3_close,
2893d1a890faSShreyas Bhatewara 		.ndo_start_xmit = vmxnet3_xmit_frame,
2894d1a890faSShreyas Bhatewara 		.ndo_set_mac_address = vmxnet3_set_mac_addr,
2895d1a890faSShreyas Bhatewara 		.ndo_change_mtu = vmxnet3_change_mtu,
2896a0d2730cSMichał Mirosław 		.ndo_set_features = vmxnet3_set_features,
289795305f6cSstephen hemminger 		.ndo_get_stats64 = vmxnet3_get_stats64,
2898d1a890faSShreyas Bhatewara 		.ndo_tx_timeout = vmxnet3_tx_timeout,
2899afc4b13dSJiri Pirko 		.ndo_set_rx_mode = vmxnet3_set_mc,
2900d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2901d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2902d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
2903d1a890faSShreyas Bhatewara 		.ndo_poll_controller = vmxnet3_netpoll,
2904d1a890faSShreyas Bhatewara #endif
2905d1a890faSShreyas Bhatewara 	};
2906d1a890faSShreyas Bhatewara 	int err;
2907d1a890faSShreyas Bhatewara 	bool dma64 = false; /* stupid gcc */
2908d1a890faSShreyas Bhatewara 	u32 ver;
2909d1a890faSShreyas Bhatewara 	struct net_device *netdev;
2910d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
2911d1a890faSShreyas Bhatewara 	u8 mac[ETH_ALEN];
291209c5088eSShreyas Bhatewara 	int size;
291309c5088eSShreyas Bhatewara 	int num_tx_queues;
291409c5088eSShreyas Bhatewara 	int num_rx_queues;
2915d1a890faSShreyas Bhatewara 
2916e154b639SShreyas Bhatewara 	if (!pci_msi_enabled())
2917e154b639SShreyas Bhatewara 		enable_mq = 0;
2918e154b639SShreyas Bhatewara 
291909c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
292009c5088eSShreyas Bhatewara 	if (enable_mq)
292109c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
292209c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
292309c5088eSShreyas Bhatewara 	else
292409c5088eSShreyas Bhatewara #endif
292509c5088eSShreyas Bhatewara 		num_rx_queues = 1;
2926eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
292709c5088eSShreyas Bhatewara 
292809c5088eSShreyas Bhatewara 	if (enable_mq)
292909c5088eSShreyas Bhatewara 		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
293009c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
293109c5088eSShreyas Bhatewara 	else
293209c5088eSShreyas Bhatewara 		num_tx_queues = 1;
293309c5088eSShreyas Bhatewara 
2934eebb02b1SShreyas Bhatewara 	num_tx_queues = rounddown_pow_of_two(num_tx_queues);
293509c5088eSShreyas Bhatewara 	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
293609c5088eSShreyas Bhatewara 				   max(num_tx_queues, num_rx_queues));
2937204a6e65SStephen Hemminger 	dev_info(&pdev->dev,
2938204a6e65SStephen Hemminger 		 "# of Tx queues : %d, # of Rx queues : %d\n",
293909c5088eSShreyas Bhatewara 		 num_tx_queues, num_rx_queues);
294009c5088eSShreyas Bhatewara 
294141de8d4cSJoe Perches 	if (!netdev)
2942d1a890faSShreyas Bhatewara 		return -ENOMEM;
2943d1a890faSShreyas Bhatewara 
2944d1a890faSShreyas Bhatewara 	pci_set_drvdata(pdev, netdev);
2945d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
2946d1a890faSShreyas Bhatewara 	adapter->netdev = netdev;
2947d1a890faSShreyas Bhatewara 	adapter->pdev = pdev;
2948d1a890faSShreyas Bhatewara 
294983d0feffSShreyas Bhatewara 	spin_lock_init(&adapter->cmd_lock);
2950d1a890faSShreyas Bhatewara 	adapter->shared = pci_alloc_consistent(adapter->pdev,
2951d1a890faSShreyas Bhatewara 					       sizeof(struct Vmxnet3_DriverShared),
2952d1a890faSShreyas Bhatewara 					       &adapter->shared_pa);
2953d1a890faSShreyas Bhatewara 	if (!adapter->shared) {
2954204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
2955d1a890faSShreyas Bhatewara 		err = -ENOMEM;
2956d1a890faSShreyas Bhatewara 		goto err_alloc_shared;
2957d1a890faSShreyas Bhatewara 	}
2958d1a890faSShreyas Bhatewara 
295909c5088eSShreyas Bhatewara 	adapter->num_rx_queues = num_rx_queues;
296009c5088eSShreyas Bhatewara 	adapter->num_tx_queues = num_tx_queues;
2961e4fabf2bSBhavesh Davda 	adapter->rx_buf_per_pkt = 1;
296209c5088eSShreyas Bhatewara 
296309c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
296409c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
296509c5088eSShreyas Bhatewara 	adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2966d1a890faSShreyas Bhatewara 						  &adapter->queue_desc_pa);
2967d1a890faSShreyas Bhatewara 
2968d1a890faSShreyas Bhatewara 	if (!adapter->tqd_start) {
2969204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
2970d1a890faSShreyas Bhatewara 		err = -ENOMEM;
2971d1a890faSShreyas Bhatewara 		goto err_alloc_queue_desc;
2972d1a890faSShreyas Bhatewara 	}
297309c5088eSShreyas Bhatewara 	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
297409c5088eSShreyas Bhatewara 							    adapter->num_tx_queues);
2975d1a890faSShreyas Bhatewara 
2976d1a890faSShreyas Bhatewara 	adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2977d1a890faSShreyas Bhatewara 	if (adapter->pm_conf == NULL) {
2978d1a890faSShreyas Bhatewara 		err = -ENOMEM;
2979d1a890faSShreyas Bhatewara 		goto err_alloc_pm;
2980d1a890faSShreyas Bhatewara 	}
2981d1a890faSShreyas Bhatewara 
298209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
298309c5088eSShreyas Bhatewara 
298409c5088eSShreyas Bhatewara 	adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
298509c5088eSShreyas Bhatewara 	if (adapter->rss_conf == NULL) {
298609c5088eSShreyas Bhatewara 		err = -ENOMEM;
298709c5088eSShreyas Bhatewara 		goto err_alloc_rss;
298809c5088eSShreyas Bhatewara 	}
298909c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
299009c5088eSShreyas Bhatewara 
2991d1a890faSShreyas Bhatewara 	err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2992d1a890faSShreyas Bhatewara 	if (err < 0)
2993d1a890faSShreyas Bhatewara 		goto err_alloc_pci;
2994d1a890faSShreyas Bhatewara 
2995d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2996d1a890faSShreyas Bhatewara 	if (ver & 1) {
2997d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2998d1a890faSShreyas Bhatewara 	} else {
2999204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3000204a6e65SStephen Hemminger 			"Incompatible h/w version (0x%x) for adapter\n", ver);
3001d1a890faSShreyas Bhatewara 		err = -EBUSY;
3002d1a890faSShreyas Bhatewara 		goto err_ver;
3003d1a890faSShreyas Bhatewara 	}
3004d1a890faSShreyas Bhatewara 
3005d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3006d1a890faSShreyas Bhatewara 	if (ver & 1) {
3007d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3008d1a890faSShreyas Bhatewara 	} else {
3009204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3010204a6e65SStephen Hemminger 			"Incompatible upt version (0x%x) for adapter\n", ver);
3011d1a890faSShreyas Bhatewara 		err = -EBUSY;
3012d1a890faSShreyas Bhatewara 		goto err_ver;
3013d1a890faSShreyas Bhatewara 	}
3014d1a890faSShreyas Bhatewara 
3015e101e7ddSShreyas Bhatewara 	SET_NETDEV_DEV(netdev, &pdev->dev);
3016d1a890faSShreyas Bhatewara 	vmxnet3_declare_features(adapter, dma64);
3017d1a890faSShreyas Bhatewara 
30184db37a78SStephen Hemminger 	if (adapter->num_tx_queues == adapter->num_rx_queues)
30194db37a78SStephen Hemminger 		adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
30204db37a78SStephen Hemminger 	else
302109c5088eSShreyas Bhatewara 		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
302209c5088eSShreyas Bhatewara 
3023d1a890faSShreyas Bhatewara 	vmxnet3_alloc_intr_resources(adapter);
3024d1a890faSShreyas Bhatewara 
302509c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
302609c5088eSShreyas Bhatewara 	if (adapter->num_rx_queues > 1 &&
302709c5088eSShreyas Bhatewara 	    adapter->intr.type == VMXNET3_IT_MSIX) {
302809c5088eSShreyas Bhatewara 		adapter->rss = true;
30297db11f75SStephen Hemminger 		netdev->hw_features |= NETIF_F_RXHASH;
30307db11f75SStephen Hemminger 		netdev->features |= NETIF_F_RXHASH;
3031204a6e65SStephen Hemminger 		dev_dbg(&pdev->dev, "RSS is enabled.\n");
303209c5088eSShreyas Bhatewara 	} else {
303309c5088eSShreyas Bhatewara 		adapter->rss = false;
303409c5088eSShreyas Bhatewara 	}
303509c5088eSShreyas Bhatewara #endif
303609c5088eSShreyas Bhatewara 
3037d1a890faSShreyas Bhatewara 	vmxnet3_read_mac_addr(adapter, mac);
3038d1a890faSShreyas Bhatewara 	memcpy(netdev->dev_addr,  mac, netdev->addr_len);
3039d1a890faSShreyas Bhatewara 
3040d1a890faSShreyas Bhatewara 	netdev->netdev_ops = &vmxnet3_netdev_ops;
3041d1a890faSShreyas Bhatewara 	vmxnet3_set_ethtool_ops(netdev);
304209c5088eSShreyas Bhatewara 	netdev->watchdog_timeo = 5 * HZ;
3043d1a890faSShreyas Bhatewara 
3044d1a890faSShreyas Bhatewara 	INIT_WORK(&adapter->work, vmxnet3_reset_work);
3045e3bc4ffbSSteve Hodgson 	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3046d1a890faSShreyas Bhatewara 
304709c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
304809c5088eSShreyas Bhatewara 		int i;
304909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
305009c5088eSShreyas Bhatewara 			netif_napi_add(adapter->netdev,
305109c5088eSShreyas Bhatewara 				       &adapter->rx_queue[i].napi,
305209c5088eSShreyas Bhatewara 				       vmxnet3_poll_rx_only, 64);
305309c5088eSShreyas Bhatewara 		}
305409c5088eSShreyas Bhatewara 	} else {
305509c5088eSShreyas Bhatewara 		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
305609c5088eSShreyas Bhatewara 			       vmxnet3_poll, 64);
305709c5088eSShreyas Bhatewara 	}
305809c5088eSShreyas Bhatewara 
305909c5088eSShreyas Bhatewara 	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
306009c5088eSShreyas Bhatewara 	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
306109c5088eSShreyas Bhatewara 
30626cdd20c3SNeil Horman 	netif_carrier_off(netdev);
3063d1a890faSShreyas Bhatewara 	err = register_netdev(netdev);
3064d1a890faSShreyas Bhatewara 
3065d1a890faSShreyas Bhatewara 	if (err) {
3066204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to register adapter\n");
3067d1a890faSShreyas Bhatewara 		goto err_register;
3068d1a890faSShreyas Bhatewara 	}
3069d1a890faSShreyas Bhatewara 
30704a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, false);
3071d1a890faSShreyas Bhatewara 	return 0;
3072d1a890faSShreyas Bhatewara 
3073d1a890faSShreyas Bhatewara err_register:
3074d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3075d1a890faSShreyas Bhatewara err_ver:
3076d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
3077d1a890faSShreyas Bhatewara err_alloc_pci:
307809c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
307909c5088eSShreyas Bhatewara 	kfree(adapter->rss_conf);
308009c5088eSShreyas Bhatewara err_alloc_rss:
308109c5088eSShreyas Bhatewara #endif
3082d1a890faSShreyas Bhatewara 	kfree(adapter->pm_conf);
3083d1a890faSShreyas Bhatewara err_alloc_pm:
308409c5088eSShreyas Bhatewara 	pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
308509c5088eSShreyas Bhatewara 			    adapter->queue_desc_pa);
3086d1a890faSShreyas Bhatewara err_alloc_queue_desc:
3087d1a890faSShreyas Bhatewara 	pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3088d1a890faSShreyas Bhatewara 			    adapter->shared, adapter->shared_pa);
3089d1a890faSShreyas Bhatewara err_alloc_shared:
3090d1a890faSShreyas Bhatewara 	pci_set_drvdata(pdev, NULL);
3091d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3092d1a890faSShreyas Bhatewara 	return err;
3093d1a890faSShreyas Bhatewara }
3094d1a890faSShreyas Bhatewara 
3095d1a890faSShreyas Bhatewara 
30963a4751a3SBill Pemberton static void
3097d1a890faSShreyas Bhatewara vmxnet3_remove_device(struct pci_dev *pdev)
3098d1a890faSShreyas Bhatewara {
3099d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3100d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
310109c5088eSShreyas Bhatewara 	int size = 0;
310209c5088eSShreyas Bhatewara 	int num_rx_queues;
310309c5088eSShreyas Bhatewara 
310409c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
310509c5088eSShreyas Bhatewara 	if (enable_mq)
310609c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
310709c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
310809c5088eSShreyas Bhatewara 	else
310909c5088eSShreyas Bhatewara #endif
311009c5088eSShreyas Bhatewara 		num_rx_queues = 1;
3111eebb02b1SShreyas Bhatewara 	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3112d1a890faSShreyas Bhatewara 
311323f333a2STejun Heo 	cancel_work_sync(&adapter->work);
3114d1a890faSShreyas Bhatewara 
3115d1a890faSShreyas Bhatewara 	unregister_netdev(netdev);
3116d1a890faSShreyas Bhatewara 
3117d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3118d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
311909c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
312009c5088eSShreyas Bhatewara 	kfree(adapter->rss_conf);
312109c5088eSShreyas Bhatewara #endif
3122d1a890faSShreyas Bhatewara 	kfree(adapter->pm_conf);
312309c5088eSShreyas Bhatewara 
312409c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
312509c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
312609c5088eSShreyas Bhatewara 	pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
312709c5088eSShreyas Bhatewara 			    adapter->queue_desc_pa);
3128d1a890faSShreyas Bhatewara 	pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3129d1a890faSShreyas Bhatewara 			    adapter->shared, adapter->shared_pa);
3130d1a890faSShreyas Bhatewara 	free_netdev(netdev);
3131d1a890faSShreyas Bhatewara }
3132d1a890faSShreyas Bhatewara 
3133d1a890faSShreyas Bhatewara 
3134d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3135d1a890faSShreyas Bhatewara 
3136d1a890faSShreyas Bhatewara static int
3137d1a890faSShreyas Bhatewara vmxnet3_suspend(struct device *device)
3138d1a890faSShreyas Bhatewara {
3139d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3140d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3141d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3142d1a890faSShreyas Bhatewara 	struct Vmxnet3_PMConf *pmConf;
3143d1a890faSShreyas Bhatewara 	struct ethhdr *ehdr;
3144d1a890faSShreyas Bhatewara 	struct arphdr *ahdr;
3145d1a890faSShreyas Bhatewara 	u8 *arpreq;
3146d1a890faSShreyas Bhatewara 	struct in_device *in_dev;
3147d1a890faSShreyas Bhatewara 	struct in_ifaddr *ifa;
314883d0feffSShreyas Bhatewara 	unsigned long flags;
3149d1a890faSShreyas Bhatewara 	int i = 0;
3150d1a890faSShreyas Bhatewara 
3151d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3152d1a890faSShreyas Bhatewara 		return 0;
3153d1a890faSShreyas Bhatewara 
315451956cd6SShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
315551956cd6SShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
315651956cd6SShreyas Bhatewara 
3157d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
3158d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
3159d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
3160d1a890faSShreyas Bhatewara 
3161d1a890faSShreyas Bhatewara 	netif_device_detach(netdev);
316209c5088eSShreyas Bhatewara 	netif_tx_stop_all_queues(netdev);
3163d1a890faSShreyas Bhatewara 
3164d1a890faSShreyas Bhatewara 	/* Create wake-up filters. */
3165d1a890faSShreyas Bhatewara 	pmConf = adapter->pm_conf;
3166d1a890faSShreyas Bhatewara 	memset(pmConf, 0, sizeof(*pmConf));
3167d1a890faSShreyas Bhatewara 
3168d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_UCAST) {
3169d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_ALEN;
3170d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize = 1;
3171d1a890faSShreyas Bhatewara 		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3172d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3173d1a890faSShreyas Bhatewara 
31743843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3175d1a890faSShreyas Bhatewara 		i++;
3176d1a890faSShreyas Bhatewara 	}
3177d1a890faSShreyas Bhatewara 
3178d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_ARP) {
3179d1a890faSShreyas Bhatewara 		in_dev = in_dev_get(netdev);
3180d1a890faSShreyas Bhatewara 		if (!in_dev)
3181d1a890faSShreyas Bhatewara 			goto skip_arp;
3182d1a890faSShreyas Bhatewara 
3183d1a890faSShreyas Bhatewara 		ifa = (struct in_ifaddr *)in_dev->ifa_list;
3184d1a890faSShreyas Bhatewara 		if (!ifa)
3185d1a890faSShreyas Bhatewara 			goto skip_arp;
3186d1a890faSShreyas Bhatewara 
3187d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3188d1a890faSShreyas Bhatewara 			sizeof(struct arphdr) +		/* ARP header */
3189d1a890faSShreyas Bhatewara 			2 * ETH_ALEN +		/* 2 Ethernet addresses*/
3190d1a890faSShreyas Bhatewara 			2 * sizeof(u32);	/*2 IPv4 addresses */
3191d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize =
3192d1a890faSShreyas Bhatewara 			(pmConf->filters[i].patternSize - 1) / 8 + 1;
3193d1a890faSShreyas Bhatewara 
3194d1a890faSShreyas Bhatewara 		/* ETH_P_ARP in Ethernet header. */
3195d1a890faSShreyas Bhatewara 		ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3196d1a890faSShreyas Bhatewara 		ehdr->h_proto = htons(ETH_P_ARP);
3197d1a890faSShreyas Bhatewara 
3198d1a890faSShreyas Bhatewara 		/* ARPOP_REQUEST in ARP header. */
3199d1a890faSShreyas Bhatewara 		ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3200d1a890faSShreyas Bhatewara 		ahdr->ar_op = htons(ARPOP_REQUEST);
3201d1a890faSShreyas Bhatewara 		arpreq = (u8 *)(ahdr + 1);
3202d1a890faSShreyas Bhatewara 
3203d1a890faSShreyas Bhatewara 		/* The Unicast IPv4 address in 'tip' field. */
3204d1a890faSShreyas Bhatewara 		arpreq += 2 * ETH_ALEN + sizeof(u32);
3205d1a890faSShreyas Bhatewara 		*(u32 *)arpreq = ifa->ifa_address;
3206d1a890faSShreyas Bhatewara 
3207d1a890faSShreyas Bhatewara 		/* The mask for the relevant bits. */
3208d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x00;
3209d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3210d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3211d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[3] = 0x00;
3212d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3213d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3214d1a890faSShreyas Bhatewara 		in_dev_put(in_dev);
3215d1a890faSShreyas Bhatewara 
32163843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3217d1a890faSShreyas Bhatewara 		i++;
3218d1a890faSShreyas Bhatewara 	}
3219d1a890faSShreyas Bhatewara 
3220d1a890faSShreyas Bhatewara skip_arp:
3221d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_MAGIC)
32223843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3223d1a890faSShreyas Bhatewara 
3224d1a890faSShreyas Bhatewara 	pmConf->numFilters = i;
3225d1a890faSShreyas Bhatewara 
3226115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3227115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3228115924b6SShreyas Bhatewara 								  *pmConf));
3229115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3230115924b6SShreyas Bhatewara 								 pmConf));
3231d1a890faSShreyas Bhatewara 
323283d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3233d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3234d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_PMCFG);
323583d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3236d1a890faSShreyas Bhatewara 
3237d1a890faSShreyas Bhatewara 	pci_save_state(pdev);
3238d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3239d1a890faSShreyas Bhatewara 			adapter->wol);
3240d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
3241d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3242d1a890faSShreyas Bhatewara 
3243d1a890faSShreyas Bhatewara 	return 0;
3244d1a890faSShreyas Bhatewara }
3245d1a890faSShreyas Bhatewara 
3246d1a890faSShreyas Bhatewara 
3247d1a890faSShreyas Bhatewara static int
3248d1a890faSShreyas Bhatewara vmxnet3_resume(struct device *device)
3249d1a890faSShreyas Bhatewara {
325051956cd6SShreyas Bhatewara 	int err, i = 0;
325183d0feffSShreyas Bhatewara 	unsigned long flags;
3252d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
3253d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
3254d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3255d1a890faSShreyas Bhatewara 	struct Vmxnet3_PMConf *pmConf;
3256d1a890faSShreyas Bhatewara 
3257d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
3258d1a890faSShreyas Bhatewara 		return 0;
3259d1a890faSShreyas Bhatewara 
3260d1a890faSShreyas Bhatewara 	/* Destroy wake-up filters. */
3261d1a890faSShreyas Bhatewara 	pmConf = adapter->pm_conf;
3262d1a890faSShreyas Bhatewara 	memset(pmConf, 0, sizeof(*pmConf));
3263d1a890faSShreyas Bhatewara 
3264115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3265115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3266115924b6SShreyas Bhatewara 								  *pmConf));
32670561cf3dSHarvey Harrison 	adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3268115924b6SShreyas Bhatewara 								 pmConf));
3269d1a890faSShreyas Bhatewara 
3270d1a890faSShreyas Bhatewara 	netif_device_attach(netdev);
3271d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, PCI_D0);
3272d1a890faSShreyas Bhatewara 	pci_restore_state(pdev);
3273d1a890faSShreyas Bhatewara 	err = pci_enable_device_mem(pdev);
3274d1a890faSShreyas Bhatewara 	if (err != 0)
3275d1a890faSShreyas Bhatewara 		return err;
3276d1a890faSShreyas Bhatewara 
3277d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, PCI_D0, 0);
3278d1a890faSShreyas Bhatewara 
327983d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3280d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3281d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_PMCFG);
328283d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3283d1a890faSShreyas Bhatewara 	vmxnet3_alloc_intr_resources(adapter);
3284d1a890faSShreyas Bhatewara 	vmxnet3_request_irqs(adapter);
328551956cd6SShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
328651956cd6SShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
3287d1a890faSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
3288d1a890faSShreyas Bhatewara 
3289d1a890faSShreyas Bhatewara 	return 0;
3290d1a890faSShreyas Bhatewara }
3291d1a890faSShreyas Bhatewara 
329247145210SAlexey Dobriyan static const struct dev_pm_ops vmxnet3_pm_ops = {
3293d1a890faSShreyas Bhatewara 	.suspend = vmxnet3_suspend,
3294d1a890faSShreyas Bhatewara 	.resume = vmxnet3_resume,
3295d1a890faSShreyas Bhatewara };
3296d1a890faSShreyas Bhatewara #endif
3297d1a890faSShreyas Bhatewara 
3298d1a890faSShreyas Bhatewara static struct pci_driver vmxnet3_driver = {
3299d1a890faSShreyas Bhatewara 	.name		= vmxnet3_driver_name,
3300d1a890faSShreyas Bhatewara 	.id_table	= vmxnet3_pciid_table,
3301d1a890faSShreyas Bhatewara 	.probe		= vmxnet3_probe_device,
33023a4751a3SBill Pemberton 	.remove		= vmxnet3_remove_device,
3303d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
3304d1a890faSShreyas Bhatewara 	.driver.pm	= &vmxnet3_pm_ops,
3305d1a890faSShreyas Bhatewara #endif
3306d1a890faSShreyas Bhatewara };
3307d1a890faSShreyas Bhatewara 
3308d1a890faSShreyas Bhatewara 
3309d1a890faSShreyas Bhatewara static int __init
3310d1a890faSShreyas Bhatewara vmxnet3_init_module(void)
3311d1a890faSShreyas Bhatewara {
3312204a6e65SStephen Hemminger 	pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3313d1a890faSShreyas Bhatewara 		VMXNET3_DRIVER_VERSION_REPORT);
3314d1a890faSShreyas Bhatewara 	return pci_register_driver(&vmxnet3_driver);
3315d1a890faSShreyas Bhatewara }
3316d1a890faSShreyas Bhatewara 
3317d1a890faSShreyas Bhatewara module_init(vmxnet3_init_module);
3318d1a890faSShreyas Bhatewara 
3319d1a890faSShreyas Bhatewara 
3320d1a890faSShreyas Bhatewara static void
3321d1a890faSShreyas Bhatewara vmxnet3_exit_module(void)
3322d1a890faSShreyas Bhatewara {
3323d1a890faSShreyas Bhatewara 	pci_unregister_driver(&vmxnet3_driver);
3324d1a890faSShreyas Bhatewara }
3325d1a890faSShreyas Bhatewara 
3326d1a890faSShreyas Bhatewara module_exit(vmxnet3_exit_module);
3327d1a890faSShreyas Bhatewara 
3328d1a890faSShreyas Bhatewara MODULE_AUTHOR("VMware, Inc.");
3329d1a890faSShreyas Bhatewara MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3330d1a890faSShreyas Bhatewara MODULE_LICENSE("GPL v2");
3331d1a890faSShreyas Bhatewara MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
3332