1d1a890faSShreyas Bhatewara /*
2d1a890faSShreyas Bhatewara  * Linux driver for VMware's vmxnet3 ethernet NIC.
3d1a890faSShreyas Bhatewara  *
455f0395fSRonak Doshi  * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
5d1a890faSShreyas Bhatewara  *
6d1a890faSShreyas Bhatewara  * This program is free software; you can redistribute it and/or modify it
7d1a890faSShreyas Bhatewara  * under the terms of the GNU General Public License as published by the
8d1a890faSShreyas Bhatewara  * Free Software Foundation; version 2 of the License and no later version.
9d1a890faSShreyas Bhatewara  *
10d1a890faSShreyas Bhatewara  * This program is distributed in the hope that it will be useful, but
11d1a890faSShreyas Bhatewara  * WITHOUT ANY WARRANTY; without even the implied warranty of
12d1a890faSShreyas Bhatewara  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13d1a890faSShreyas Bhatewara  * NON INFRINGEMENT. See the GNU General Public License for more
14d1a890faSShreyas Bhatewara  * details.
15d1a890faSShreyas Bhatewara  *
16d1a890faSShreyas Bhatewara  * You should have received a copy of the GNU General Public License
17d1a890faSShreyas Bhatewara  * along with this program; if not, write to the Free Software
18d1a890faSShreyas Bhatewara  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19d1a890faSShreyas Bhatewara  *
20d1a890faSShreyas Bhatewara  * The full GNU General Public License is included in this distribution in
21d1a890faSShreyas Bhatewara  * the file called "COPYING".
22d1a890faSShreyas Bhatewara  *
23190af10fSShrikrishna Khare  * Maintained by: pv-drivers@vmware.com
24d1a890faSShreyas Bhatewara  *
25d1a890faSShreyas Bhatewara  */
26d1a890faSShreyas Bhatewara 
279d9779e7SPaul Gortmaker #include <linux/module.h>
28b038b040SStephen Rothwell #include <net/ip6_checksum.h>
29b038b040SStephen Rothwell 
30d1a890faSShreyas Bhatewara #include "vmxnet3_int.h"
3154f00cceSWilliam Tu #include "vmxnet3_xdp.h"
32d1a890faSShreyas Bhatewara 
33d1a890faSShreyas Bhatewara char vmxnet3_driver_name[] = "vmxnet3";
34d1a890faSShreyas Bhatewara #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
35d1a890faSShreyas Bhatewara 
36d1a890faSShreyas Bhatewara /*
37d1a890faSShreyas Bhatewara  * PCI Device ID Table
38d1a890faSShreyas Bhatewara  * Last entry must be all 0s
39d1a890faSShreyas Bhatewara  */
409baa3c34SBenoit Taine static const struct pci_device_id vmxnet3_pciid_table[] = {
41d1a890faSShreyas Bhatewara 	{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
42d1a890faSShreyas Bhatewara 	{0}
43d1a890faSShreyas Bhatewara };
44d1a890faSShreyas Bhatewara 
45d1a890faSShreyas Bhatewara MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46d1a890faSShreyas Bhatewara 
4709c5088eSShreyas Bhatewara static int enable_mq = 1;
48d1a890faSShreyas Bhatewara 
49f9f25026SShreyas Bhatewara static void
508bc7823eSJakub Kicinski vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
51f9f25026SShreyas Bhatewara 
52d1a890faSShreyas Bhatewara /*
53d1a890faSShreyas Bhatewara  *    Enable/Disable the given intr
54d1a890faSShreyas Bhatewara  */
55d1a890faSShreyas Bhatewara static void
vmxnet3_enable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)56d1a890faSShreyas Bhatewara vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
57d1a890faSShreyas Bhatewara {
58d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
59d1a890faSShreyas Bhatewara }
60d1a890faSShreyas Bhatewara 
61d1a890faSShreyas Bhatewara 
62d1a890faSShreyas Bhatewara static void
vmxnet3_disable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)63d1a890faSShreyas Bhatewara vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
64d1a890faSShreyas Bhatewara {
65d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
66d1a890faSShreyas Bhatewara }
67d1a890faSShreyas Bhatewara 
68d1a890faSShreyas Bhatewara 
69d1a890faSShreyas Bhatewara /*
70d1a890faSShreyas Bhatewara  *    Enable/Disable all intrs used by the device
71d1a890faSShreyas Bhatewara  */
72d1a890faSShreyas Bhatewara static void
vmxnet3_enable_all_intrs(struct vmxnet3_adapter * adapter)73d1a890faSShreyas Bhatewara vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
74d1a890faSShreyas Bhatewara {
75d1a890faSShreyas Bhatewara 	int i;
76d1a890faSShreyas Bhatewara 
77d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
78d1a890faSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, i);
79409e8ec8SRonak Doshi 	if (!VMXNET3_VERSION_GE_6(adapter) ||
80409e8ec8SRonak Doshi 	    !adapter->queuesExtEnabled) {
816929fe8aSRonghua Zang 		adapter->shared->devRead.intrConf.intrCtrl &=
826929fe8aSRonghua Zang 					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
83409e8ec8SRonak Doshi 	} else {
84409e8ec8SRonak Doshi 		adapter->shared->devReadExt.intrConfExt.intrCtrl &=
85409e8ec8SRonak Doshi 					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
86409e8ec8SRonak Doshi 	}
87d1a890faSShreyas Bhatewara }
88d1a890faSShreyas Bhatewara 
89d1a890faSShreyas Bhatewara 
90d1a890faSShreyas Bhatewara static void
vmxnet3_disable_all_intrs(struct vmxnet3_adapter * adapter)91d1a890faSShreyas Bhatewara vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
92d1a890faSShreyas Bhatewara {
93d1a890faSShreyas Bhatewara 	int i;
94d1a890faSShreyas Bhatewara 
95409e8ec8SRonak Doshi 	if (!VMXNET3_VERSION_GE_6(adapter) ||
96409e8ec8SRonak Doshi 	    !adapter->queuesExtEnabled) {
976929fe8aSRonghua Zang 		adapter->shared->devRead.intrConf.intrCtrl |=
986929fe8aSRonghua Zang 					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
99409e8ec8SRonak Doshi 	} else {
100409e8ec8SRonak Doshi 		adapter->shared->devReadExt.intrConfExt.intrCtrl |=
101409e8ec8SRonak Doshi 					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
102409e8ec8SRonak Doshi 	}
103d1a890faSShreyas Bhatewara 	for (i = 0; i < adapter->intr.num_intrs; i++)
104d1a890faSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, i);
105d1a890faSShreyas Bhatewara }
106d1a890faSShreyas Bhatewara 
107d1a890faSShreyas Bhatewara 
108d1a890faSShreyas Bhatewara static void
vmxnet3_ack_events(struct vmxnet3_adapter * adapter,u32 events)109d1a890faSShreyas Bhatewara vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
110d1a890faSShreyas Bhatewara {
111d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
112d1a890faSShreyas Bhatewara }
113d1a890faSShreyas Bhatewara 
114d1a890faSShreyas Bhatewara 
115d1a890faSShreyas Bhatewara static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)116d1a890faSShreyas Bhatewara vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
117d1a890faSShreyas Bhatewara {
11809c5088eSShreyas Bhatewara 	return tq->stopped;
119d1a890faSShreyas Bhatewara }
120d1a890faSShreyas Bhatewara 
121d1a890faSShreyas Bhatewara 
122d1a890faSShreyas Bhatewara static void
vmxnet3_tq_start(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)123d1a890faSShreyas Bhatewara vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
124d1a890faSShreyas Bhatewara {
125d1a890faSShreyas Bhatewara 	tq->stopped = false;
12609c5088eSShreyas Bhatewara 	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
127d1a890faSShreyas Bhatewara }
128d1a890faSShreyas Bhatewara 
129d1a890faSShreyas Bhatewara 
130d1a890faSShreyas Bhatewara static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)131d1a890faSShreyas Bhatewara vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
132d1a890faSShreyas Bhatewara {
133d1a890faSShreyas Bhatewara 	tq->stopped = false;
13409c5088eSShreyas Bhatewara 	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
135d1a890faSShreyas Bhatewara }
136d1a890faSShreyas Bhatewara 
137d1a890faSShreyas Bhatewara 
138d1a890faSShreyas Bhatewara static void
vmxnet3_tq_stop(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)139d1a890faSShreyas Bhatewara vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
140d1a890faSShreyas Bhatewara {
141d1a890faSShreyas Bhatewara 	tq->stopped = true;
142d1a890faSShreyas Bhatewara 	tq->num_stop++;
14309c5088eSShreyas Bhatewara 	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
144d1a890faSShreyas Bhatewara }
145d1a890faSShreyas Bhatewara 
1466f91f4baSRonak Doshi /* Check if capability is supported by UPT device or
1476f91f4baSRonak Doshi  * UPT is even requested
1486f91f4baSRonak Doshi  */
1496f91f4baSRonak Doshi bool
vmxnet3_check_ptcapability(u32 cap_supported,u32 cap)1506f91f4baSRonak Doshi vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
1516f91f4baSRonak Doshi {
1526f91f4baSRonak Doshi 	if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
1536f91f4baSRonak Doshi 	    cap_supported & (1UL << cap)) {
1546f91f4baSRonak Doshi 		return true;
1556f91f4baSRonak Doshi 	}
1566f91f4baSRonak Doshi 
1576f91f4baSRonak Doshi 	return false;
1586f91f4baSRonak Doshi }
1596f91f4baSRonak Doshi 
160d1a890faSShreyas Bhatewara 
161d1a890faSShreyas Bhatewara /*
162d1a890faSShreyas Bhatewara  * Check the link state. This may start or stop the tx queue.
163d1a890faSShreyas Bhatewara  */
164d1a890faSShreyas Bhatewara static void
vmxnet3_check_link(struct vmxnet3_adapter * adapter,bool affectTxQueue)1654a1745fcSShreyas Bhatewara vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
166d1a890faSShreyas Bhatewara {
167d1a890faSShreyas Bhatewara 	u32 ret;
16809c5088eSShreyas Bhatewara 	int i;
16983d0feffSShreyas Bhatewara 	unsigned long flags;
170d1a890faSShreyas Bhatewara 
17183d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
172d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
173d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
17483d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
17583d0feffSShreyas Bhatewara 
176d1a890faSShreyas Bhatewara 	adapter->link_speed = ret >> 16;
177d1a890faSShreyas Bhatewara 	if (ret & 1) { /* Link is up. */
178204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
179204a6e65SStephen Hemminger 			    adapter->link_speed);
180d1a890faSShreyas Bhatewara 		netif_carrier_on(adapter->netdev);
181d1a890faSShreyas Bhatewara 
18209c5088eSShreyas Bhatewara 		if (affectTxQueue) {
18309c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
18409c5088eSShreyas Bhatewara 				vmxnet3_tq_start(&adapter->tx_queue[i],
18509c5088eSShreyas Bhatewara 						 adapter);
18609c5088eSShreyas Bhatewara 		}
187d1a890faSShreyas Bhatewara 	} else {
188204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "NIC Link is Down\n");
189d1a890faSShreyas Bhatewara 		netif_carrier_off(adapter->netdev);
190d1a890faSShreyas Bhatewara 
19109c5088eSShreyas Bhatewara 		if (affectTxQueue) {
19209c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
19309c5088eSShreyas Bhatewara 				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
19409c5088eSShreyas Bhatewara 		}
195d1a890faSShreyas Bhatewara 	}
196d1a890faSShreyas Bhatewara }
197d1a890faSShreyas Bhatewara 
198d1a890faSShreyas Bhatewara static void
vmxnet3_process_events(struct vmxnet3_adapter * adapter)199d1a890faSShreyas Bhatewara vmxnet3_process_events(struct vmxnet3_adapter *adapter)
200d1a890faSShreyas Bhatewara {
20109c5088eSShreyas Bhatewara 	int i;
202e328d410SRoland Dreier 	unsigned long flags;
203115924b6SShreyas Bhatewara 	u32 events = le32_to_cpu(adapter->shared->ecr);
204d1a890faSShreyas Bhatewara 	if (!events)
205d1a890faSShreyas Bhatewara 		return;
206d1a890faSShreyas Bhatewara 
207d1a890faSShreyas Bhatewara 	vmxnet3_ack_events(adapter, events);
208d1a890faSShreyas Bhatewara 
209d1a890faSShreyas Bhatewara 	/* Check if link state has changed */
210d1a890faSShreyas Bhatewara 	if (events & VMXNET3_ECR_LINK)
2114a1745fcSShreyas Bhatewara 		vmxnet3_check_link(adapter, true);
212d1a890faSShreyas Bhatewara 
213d1a890faSShreyas Bhatewara 	/* Check if there is an error on xmit/recv queues */
214d1a890faSShreyas Bhatewara 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
215e328d410SRoland Dreier 		spin_lock_irqsave(&adapter->cmd_lock, flags);
216d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
217d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_GET_QUEUE_STATUS);
218e328d410SRoland Dreier 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
219d1a890faSShreyas Bhatewara 
22009c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++)
22109c5088eSShreyas Bhatewara 			if (adapter->tqd_start[i].status.stopped)
22209c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
22309c5088eSShreyas Bhatewara 					"%s: tq[%d] error 0x%x\n",
22409c5088eSShreyas Bhatewara 					adapter->netdev->name, i, le32_to_cpu(
22509c5088eSShreyas Bhatewara 					adapter->tqd_start[i].status.error));
22609c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++)
22709c5088eSShreyas Bhatewara 			if (adapter->rqd_start[i].status.stopped)
22809c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
22909c5088eSShreyas Bhatewara 					"%s: rq[%d] error 0x%x\n",
23009c5088eSShreyas Bhatewara 					adapter->netdev->name, i,
23109c5088eSShreyas Bhatewara 					adapter->rqd_start[i].status.error);
232d1a890faSShreyas Bhatewara 
233d1a890faSShreyas Bhatewara 		schedule_work(&adapter->work);
234d1a890faSShreyas Bhatewara 	}
235d1a890faSShreyas Bhatewara }
236d1a890faSShreyas Bhatewara 
237115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
238115924b6SShreyas Bhatewara /*
239115924b6SShreyas Bhatewara  * The device expects the bitfields in shared structures to be written in
240115924b6SShreyas Bhatewara  * little endian. When CPU is big endian, the following routines are used to
241115924b6SShreyas Bhatewara  * correctly read and write into ABI.
242115924b6SShreyas Bhatewara  * The general technique used here is : double word bitfields are defined in
243115924b6SShreyas Bhatewara  * opposite order for big endian architecture. Then before reading them in
244115924b6SShreyas Bhatewara  * driver the complete double word is translated using le32_to_cpu. Similarly
245115924b6SShreyas Bhatewara  * After the driver writes into bitfields, cpu_to_le32 is used to translate the
246115924b6SShreyas Bhatewara  * double words into required format.
247115924b6SShreyas Bhatewara  * In order to avoid touching bits in shared structure more than once, temporary
248115924b6SShreyas Bhatewara  * descriptors are used. These are passed as srcDesc to following functions.
249115924b6SShreyas Bhatewara  */
vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc * srcDesc,struct Vmxnet3_RxDesc * dstDesc)250115924b6SShreyas Bhatewara static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
251115924b6SShreyas Bhatewara 				struct Vmxnet3_RxDesc *dstDesc)
252115924b6SShreyas Bhatewara {
253115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc + 2;
254115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc + 2;
255115924b6SShreyas Bhatewara 	dstDesc->addr = le64_to_cpu(srcDesc->addr);
256115924b6SShreyas Bhatewara 	*dst = le32_to_cpu(*src);
257115924b6SShreyas Bhatewara 	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
258115924b6SShreyas Bhatewara }
259115924b6SShreyas Bhatewara 
vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc * srcDesc,struct Vmxnet3_TxDesc * dstDesc)260115924b6SShreyas Bhatewara static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
261115924b6SShreyas Bhatewara 			       struct Vmxnet3_TxDesc *dstDesc)
262115924b6SShreyas Bhatewara {
263115924b6SShreyas Bhatewara 	int i;
264115924b6SShreyas Bhatewara 	u32 *src = (u32 *)(srcDesc + 1);
265115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)(dstDesc + 1);
266115924b6SShreyas Bhatewara 
267115924b6SShreyas Bhatewara 	/* Working backwards so that the gen bit is set at the end. */
268115924b6SShreyas Bhatewara 	for (i = 2; i > 0; i--) {
269115924b6SShreyas Bhatewara 		src--;
270115924b6SShreyas Bhatewara 		dst--;
271115924b6SShreyas Bhatewara 		*dst = cpu_to_le32(*src);
272115924b6SShreyas Bhatewara 	}
273115924b6SShreyas Bhatewara }
274115924b6SShreyas Bhatewara 
275115924b6SShreyas Bhatewara 
vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc * srcDesc,struct Vmxnet3_RxCompDesc * dstDesc)276115924b6SShreyas Bhatewara static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
277115924b6SShreyas Bhatewara 				struct Vmxnet3_RxCompDesc *dstDesc)
278115924b6SShreyas Bhatewara {
279115924b6SShreyas Bhatewara 	int i = 0;
280115924b6SShreyas Bhatewara 	u32 *src = (u32 *)srcDesc;
281115924b6SShreyas Bhatewara 	u32 *dst = (u32 *)dstDesc;
282115924b6SShreyas Bhatewara 	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
283115924b6SShreyas Bhatewara 		*dst = le32_to_cpu(*src);
284115924b6SShreyas Bhatewara 		src++;
285115924b6SShreyas Bhatewara 		dst++;
286115924b6SShreyas Bhatewara 	}
287115924b6SShreyas Bhatewara }
288115924b6SShreyas Bhatewara 
289115924b6SShreyas Bhatewara 
290115924b6SShreyas Bhatewara /* Used to read bitfield values from double words. */
get_bitfield32(const __le32 * bitfield,u32 pos,u32 size)291115924b6SShreyas Bhatewara static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
292115924b6SShreyas Bhatewara {
293115924b6SShreyas Bhatewara 	u32 temp = le32_to_cpu(*bitfield);
294115924b6SShreyas Bhatewara 	u32 mask = ((1 << size) - 1) << pos;
295115924b6SShreyas Bhatewara 	temp &= mask;
296115924b6SShreyas Bhatewara 	temp >>= pos;
297115924b6SShreyas Bhatewara 	return temp;
298115924b6SShreyas Bhatewara }
299115924b6SShreyas Bhatewara 
300115924b6SShreyas Bhatewara 
301115924b6SShreyas Bhatewara 
302115924b6SShreyas Bhatewara #endif  /* __BIG_ENDIAN_BITFIELD */
303115924b6SShreyas Bhatewara 
304115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
305115924b6SShreyas Bhatewara 
306115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
307115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
308115924b6SShreyas Bhatewara 			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
309115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
310115924b6SShreyas Bhatewara 			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
311115924b6SShreyas Bhatewara 			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
312115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
313115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
314115924b6SShreyas Bhatewara 			VMXNET3_TCD_GEN_SIZE)
315115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
316115924b6SShreyas Bhatewara 			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
317115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
318115924b6SShreyas Bhatewara 			(dstrcd) = (tmp); \
319115924b6SShreyas Bhatewara 			vmxnet3_RxCompToCPU((rcd), (tmp)); \
320115924b6SShreyas Bhatewara 		} while (0)
321115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
322115924b6SShreyas Bhatewara 			(dstrxd) = (tmp); \
323115924b6SShreyas Bhatewara 			vmxnet3_RxDescToCPU((rxd), (tmp)); \
324115924b6SShreyas Bhatewara 		} while (0)
325115924b6SShreyas Bhatewara 
326115924b6SShreyas Bhatewara #else
327115924b6SShreyas Bhatewara 
328115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
329115924b6SShreyas Bhatewara #   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
330115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
331115924b6SShreyas Bhatewara #   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
332115924b6SShreyas Bhatewara #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
333115924b6SShreyas Bhatewara #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
334115924b6SShreyas Bhatewara 
335115924b6SShreyas Bhatewara #endif /* __BIG_ENDIAN_BITFIELD  */
336115924b6SShreyas Bhatewara 
337d1a890faSShreyas Bhatewara 
338d1a890faSShreyas Bhatewara static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info * tbi,struct pci_dev * pdev)339d1a890faSShreyas Bhatewara vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
340d1a890faSShreyas Bhatewara 		     struct pci_dev *pdev)
341d1a890faSShreyas Bhatewara {
34254f00cceSWilliam Tu 	u32 map_type = tbi->map_type;
34354f00cceSWilliam Tu 
34454f00cceSWilliam Tu 	if (map_type & VMXNET3_MAP_SINGLE)
345b0eb57cbSAndy King 		dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
346bf7bec46SChristophe JAILLET 				 DMA_TO_DEVICE);
34754f00cceSWilliam Tu 	else if (map_type & VMXNET3_MAP_PAGE)
348b0eb57cbSAndy King 		dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
349bf7bec46SChristophe JAILLET 			       DMA_TO_DEVICE);
350d1a890faSShreyas Bhatewara 	else
35154f00cceSWilliam Tu 		BUG_ON(map_type & ~VMXNET3_MAP_XDP);
352d1a890faSShreyas Bhatewara 
353d1a890faSShreyas Bhatewara 	tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
354d1a890faSShreyas Bhatewara }
355d1a890faSShreyas Bhatewara 
356d1a890faSShreyas Bhatewara 
357d1a890faSShreyas Bhatewara static int
vmxnet3_unmap_pkt(u32 eop_idx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter,struct xdp_frame_bulk * bq)358d1a890faSShreyas Bhatewara vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
35954f00cceSWilliam Tu 		  struct pci_dev *pdev,	struct vmxnet3_adapter *adapter,
36054f00cceSWilliam Tu 		  struct xdp_frame_bulk *bq)
361d1a890faSShreyas Bhatewara {
36254f00cceSWilliam Tu 	struct vmxnet3_tx_buf_info *tbi;
363d1a890faSShreyas Bhatewara 	int entries = 0;
36454f00cceSWilliam Tu 	u32 map_type;
365d1a890faSShreyas Bhatewara 
366d1a890faSShreyas Bhatewara 	/* no out of order completion */
367d1a890faSShreyas Bhatewara 	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
368115924b6SShreyas Bhatewara 	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
369d1a890faSShreyas Bhatewara 
37054f00cceSWilliam Tu 	tbi = &tq->buf_info[eop_idx];
37154f00cceSWilliam Tu 	BUG_ON(!tbi->skb);
37254f00cceSWilliam Tu 	map_type = tbi->map_type;
373d1a890faSShreyas Bhatewara 	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
374d1a890faSShreyas Bhatewara 
375d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != eop_idx) {
376d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
377d1a890faSShreyas Bhatewara 				     pdev);
378d1a890faSShreyas Bhatewara 
379d1a890faSShreyas Bhatewara 		/* update next2comp w/o tx_lock. Since we are marking more,
380d1a890faSShreyas Bhatewara 		 * instead of less, tx ring entries avail, the worst case is
381d1a890faSShreyas Bhatewara 		 * that the tx routine incorrectly re-queues a pkt due to
382d1a890faSShreyas Bhatewara 		 * insufficient tx ring entries.
383d1a890faSShreyas Bhatewara 		 */
384d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
385d1a890faSShreyas Bhatewara 		entries++;
386d1a890faSShreyas Bhatewara 	}
387d1a890faSShreyas Bhatewara 
38854f00cceSWilliam Tu 	if (map_type & VMXNET3_MAP_XDP)
38954f00cceSWilliam Tu 		xdp_return_frame_bulk(tbi->xdpf, bq);
39054f00cceSWilliam Tu 	else
39154f00cceSWilliam Tu 		dev_kfree_skb_any(tbi->skb);
39254f00cceSWilliam Tu 
39354f00cceSWilliam Tu 	/* xdpf and skb are in an anonymous union. */
39454f00cceSWilliam Tu 	tbi->skb = NULL;
39554f00cceSWilliam Tu 
396d1a890faSShreyas Bhatewara 	return entries;
397d1a890faSShreyas Bhatewara }
398d1a890faSShreyas Bhatewara 
399d1a890faSShreyas Bhatewara 
400d1a890faSShreyas Bhatewara static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)401d1a890faSShreyas Bhatewara vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
402d1a890faSShreyas Bhatewara 			struct vmxnet3_adapter *adapter)
403d1a890faSShreyas Bhatewara {
404d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
40554f00cceSWilliam Tu 	struct xdp_frame_bulk bq;
40654f00cceSWilliam Tu 	int completed = 0;
40754f00cceSWilliam Tu 
40854f00cceSWilliam Tu 	xdp_frame_bulk_init(&bq);
40954f00cceSWilliam Tu 	rcu_read_lock();
410d1a890faSShreyas Bhatewara 
411d1a890faSShreyas Bhatewara 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
412115924b6SShreyas Bhatewara 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
413f3002c13Shpreg@vmware.com 		/* Prevent any &gdesc->tcd field from being (speculatively)
414f3002c13Shpreg@vmware.com 		 * read before (&gdesc->tcd)->gen is read.
415f3002c13Shpreg@vmware.com 		 */
416f3002c13Shpreg@vmware.com 		dma_rmb();
417f3002c13Shpreg@vmware.com 
418115924b6SShreyas Bhatewara 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
419115924b6SShreyas Bhatewara 					       &gdesc->tcd), tq, adapter->pdev,
42054f00cceSWilliam Tu 					       adapter, &bq);
421d1a890faSShreyas Bhatewara 
422d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
423d1a890faSShreyas Bhatewara 		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
424d1a890faSShreyas Bhatewara 	}
42554f00cceSWilliam Tu 	xdp_flush_frame_bulk(&bq);
42654f00cceSWilliam Tu 	rcu_read_unlock();
427d1a890faSShreyas Bhatewara 
428d1a890faSShreyas Bhatewara 	if (completed) {
429d1a890faSShreyas Bhatewara 		spin_lock(&tq->tx_lock);
430d1a890faSShreyas Bhatewara 		if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
431d1a890faSShreyas Bhatewara 			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
432d1a890faSShreyas Bhatewara 			     VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
433d1a890faSShreyas Bhatewara 			     netif_carrier_ok(adapter->netdev))) {
434d1a890faSShreyas Bhatewara 			vmxnet3_tq_wake(tq, adapter);
435d1a890faSShreyas Bhatewara 		}
436d1a890faSShreyas Bhatewara 		spin_unlock(&tq->tx_lock);
437d1a890faSShreyas Bhatewara 	}
438d1a890faSShreyas Bhatewara 	return completed;
439d1a890faSShreyas Bhatewara }
440d1a890faSShreyas Bhatewara 
441d1a890faSShreyas Bhatewara 
442d1a890faSShreyas Bhatewara static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)443d1a890faSShreyas Bhatewara vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
444d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
445d1a890faSShreyas Bhatewara {
44654f00cceSWilliam Tu 	struct xdp_frame_bulk bq;
44754f00cceSWilliam Tu 	u32 map_type;
448d1a890faSShreyas Bhatewara 	int i;
449d1a890faSShreyas Bhatewara 
45054f00cceSWilliam Tu 	xdp_frame_bulk_init(&bq);
45154f00cceSWilliam Tu 	rcu_read_lock();
45254f00cceSWilliam Tu 
453d1a890faSShreyas Bhatewara 	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
454d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_buf_info *tbi;
455d1a890faSShreyas Bhatewara 
456d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2comp;
45754f00cceSWilliam Tu 		map_type = tbi->map_type;
458d1a890faSShreyas Bhatewara 
459d1a890faSShreyas Bhatewara 		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
460d1a890faSShreyas Bhatewara 		if (tbi->skb) {
46154f00cceSWilliam Tu 			if (map_type & VMXNET3_MAP_XDP)
46254f00cceSWilliam Tu 				xdp_return_frame_bulk(tbi->xdpf, &bq);
46354f00cceSWilliam Tu 			else
464d1a890faSShreyas Bhatewara 				dev_kfree_skb_any(tbi->skb);
465d1a890faSShreyas Bhatewara 			tbi->skb = NULL;
466d1a890faSShreyas Bhatewara 		}
467d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
468d1a890faSShreyas Bhatewara 	}
469d1a890faSShreyas Bhatewara 
47054f00cceSWilliam Tu 	xdp_flush_frame_bulk(&bq);
47154f00cceSWilliam Tu 	rcu_read_unlock();
47254f00cceSWilliam Tu 
47354f00cceSWilliam Tu 	/* sanity check, verify all buffers are indeed unmapped */
47454f00cceSWilliam Tu 	for (i = 0; i < tq->tx_ring.size; i++)
47554f00cceSWilliam Tu 		BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
476d1a890faSShreyas Bhatewara 
477d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
478d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479d1a890faSShreyas Bhatewara 
480d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
481d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
482d1a890faSShreyas Bhatewara }
483d1a890faSShreyas Bhatewara 
484d1a890faSShreyas Bhatewara 
48509c5088eSShreyas Bhatewara static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)486d1a890faSShreyas Bhatewara vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
487d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
488d1a890faSShreyas Bhatewara {
489d1a890faSShreyas Bhatewara 	if (tq->tx_ring.base) {
490b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
491d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxDesc),
492d1a890faSShreyas Bhatewara 				  tq->tx_ring.base, tq->tx_ring.basePA);
493d1a890faSShreyas Bhatewara 		tq->tx_ring.base = NULL;
494d1a890faSShreyas Bhatewara 	}
495d1a890faSShreyas Bhatewara 	if (tq->data_ring.base) {
4963c8b3efcSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
4973c8b3efcSShrikrishna Khare 				  tq->data_ring.size * tq->txdata_desc_size,
498d1a890faSShreyas Bhatewara 				  tq->data_ring.base, tq->data_ring.basePA);
499d1a890faSShreyas Bhatewara 		tq->data_ring.base = NULL;
500d1a890faSShreyas Bhatewara 	}
501d1a890faSShreyas Bhatewara 	if (tq->comp_ring.base) {
502b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
503d1a890faSShreyas Bhatewara 				  sizeof(struct Vmxnet3_TxCompDesc),
504d1a890faSShreyas Bhatewara 				  tq->comp_ring.base, tq->comp_ring.basePA);
505d1a890faSShreyas Bhatewara 		tq->comp_ring.base = NULL;
506d1a890faSShreyas Bhatewara 	}
507de1da8bcSRonak Doshi 	kfree(tq->buf_info);
508d1a890faSShreyas Bhatewara 	tq->buf_info = NULL;
509d1a890faSShreyas Bhatewara }
510d1a890faSShreyas Bhatewara 
511d1a890faSShreyas Bhatewara 
51209c5088eSShreyas Bhatewara /* Destroy all tx queues */
51309c5088eSShreyas Bhatewara void
vmxnet3_tq_destroy_all(struct vmxnet3_adapter * adapter)51409c5088eSShreyas Bhatewara vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
51509c5088eSShreyas Bhatewara {
51609c5088eSShreyas Bhatewara 	int i;
51709c5088eSShreyas Bhatewara 
51809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
51909c5088eSShreyas Bhatewara 		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
52009c5088eSShreyas Bhatewara }
52109c5088eSShreyas Bhatewara 
52209c5088eSShreyas Bhatewara 
523d1a890faSShreyas Bhatewara static void
vmxnet3_tq_init(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)524d1a890faSShreyas Bhatewara vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
525d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
526d1a890faSShreyas Bhatewara {
527d1a890faSShreyas Bhatewara 	int i;
528d1a890faSShreyas Bhatewara 
529d1a890faSShreyas Bhatewara 	/* reset the tx ring contents to 0 and reset the tx ring states */
530d1a890faSShreyas Bhatewara 	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
531d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxDesc));
532d1a890faSShreyas Bhatewara 	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
533d1a890faSShreyas Bhatewara 	tq->tx_ring.gen = VMXNET3_INIT_GEN;
534d1a890faSShreyas Bhatewara 
5353c8b3efcSShrikrishna Khare 	memset(tq->data_ring.base, 0,
5363c8b3efcSShrikrishna Khare 	       tq->data_ring.size * tq->txdata_desc_size);
537d1a890faSShreyas Bhatewara 
538d1a890faSShreyas Bhatewara 	/* reset the tx comp ring contents to 0 and reset comp ring states */
539d1a890faSShreyas Bhatewara 	memset(tq->comp_ring.base, 0, tq->comp_ring.size *
540d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_TxCompDesc));
541d1a890faSShreyas Bhatewara 	tq->comp_ring.next2proc = 0;
542d1a890faSShreyas Bhatewara 	tq->comp_ring.gen = VMXNET3_INIT_GEN;
543d1a890faSShreyas Bhatewara 
544d1a890faSShreyas Bhatewara 	/* reset the bookkeeping data */
545d1a890faSShreyas Bhatewara 	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
546d1a890faSShreyas Bhatewara 	for (i = 0; i < tq->tx_ring.size; i++)
547d1a890faSShreyas Bhatewara 		tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
548d1a890faSShreyas Bhatewara 
549d1a890faSShreyas Bhatewara 	/* stats are not reset */
550d1a890faSShreyas Bhatewara }
551d1a890faSShreyas Bhatewara 
552d1a890faSShreyas Bhatewara 
553d1a890faSShreyas Bhatewara static int
vmxnet3_tq_create(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)554d1a890faSShreyas Bhatewara vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
555d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
556d1a890faSShreyas Bhatewara {
557d1a890faSShreyas Bhatewara 	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
558d1a890faSShreyas Bhatewara 	       tq->comp_ring.base || tq->buf_info);
559d1a890faSShreyas Bhatewara 
560b0eb57cbSAndy King 	tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
561b0eb57cbSAndy King 			tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
562b0eb57cbSAndy King 			&tq->tx_ring.basePA, GFP_KERNEL);
563d1a890faSShreyas Bhatewara 	if (!tq->tx_ring.base) {
564204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx ring\n");
565d1a890faSShreyas Bhatewara 		goto err;
566d1a890faSShreyas Bhatewara 	}
567d1a890faSShreyas Bhatewara 
568b0eb57cbSAndy King 	tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
5693c8b3efcSShrikrishna Khare 			tq->data_ring.size * tq->txdata_desc_size,
570b0eb57cbSAndy King 			&tq->data_ring.basePA, GFP_KERNEL);
571d1a890faSShreyas Bhatewara 	if (!tq->data_ring.base) {
5723c8b3efcSShrikrishna Khare 		netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
573d1a890faSShreyas Bhatewara 		goto err;
574d1a890faSShreyas Bhatewara 	}
575d1a890faSShreyas Bhatewara 
576b0eb57cbSAndy King 	tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
577b0eb57cbSAndy King 			tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
578b0eb57cbSAndy King 			&tq->comp_ring.basePA, GFP_KERNEL);
579d1a890faSShreyas Bhatewara 	if (!tq->comp_ring.base) {
580204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
581d1a890faSShreyas Bhatewara 		goto err;
582d1a890faSShreyas Bhatewara 	}
583d1a890faSShreyas Bhatewara 
584de1da8bcSRonak Doshi 	tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
585de1da8bcSRonak Doshi 				    GFP_KERNEL,
586de1da8bcSRonak Doshi 				    dev_to_node(&adapter->pdev->dev));
587e404decbSJoe Perches 	if (!tq->buf_info)
588d1a890faSShreyas Bhatewara 		goto err;
589d1a890faSShreyas Bhatewara 
590d1a890faSShreyas Bhatewara 	return 0;
591d1a890faSShreyas Bhatewara 
592d1a890faSShreyas Bhatewara err:
593d1a890faSShreyas Bhatewara 	vmxnet3_tq_destroy(tq, adapter);
594d1a890faSShreyas Bhatewara 	return -ENOMEM;
595d1a890faSShreyas Bhatewara }
596d1a890faSShreyas Bhatewara 
59709c5088eSShreyas Bhatewara static void
vmxnet3_tq_cleanup_all(struct vmxnet3_adapter * adapter)59809c5088eSShreyas Bhatewara vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
59909c5088eSShreyas Bhatewara {
60009c5088eSShreyas Bhatewara 	int i;
60109c5088eSShreyas Bhatewara 
60209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
60309c5088eSShreyas Bhatewara 		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
60409c5088eSShreyas Bhatewara }
605d1a890faSShreyas Bhatewara 
606d1a890faSShreyas Bhatewara /*
607d1a890faSShreyas Bhatewara  *    starting from ring->next2fill, allocate rx buffers for the given ring
608d1a890faSShreyas Bhatewara  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
609d1a890faSShreyas Bhatewara  *    are allocated or allocation fails
610d1a890faSShreyas Bhatewara  */
611d1a890faSShreyas Bhatewara 
612d1a890faSShreyas Bhatewara static int
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue * rq,u32 ring_idx,int num_to_alloc,struct vmxnet3_adapter * adapter)613d1a890faSShreyas Bhatewara vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
614d1a890faSShreyas Bhatewara 			int num_to_alloc, struct vmxnet3_adapter *adapter)
615d1a890faSShreyas Bhatewara {
616d1a890faSShreyas Bhatewara 	int num_allocated = 0;
617d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
618d1a890faSShreyas Bhatewara 	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
619d1a890faSShreyas Bhatewara 	u32 val;
620d1a890faSShreyas Bhatewara 
6215318d809SShreyas Bhatewara 	while (num_allocated <= num_to_alloc) {
622d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
623d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gd;
624d1a890faSShreyas Bhatewara 
625d1a890faSShreyas Bhatewara 		rbi = rbi_base + ring->next2fill;
626d1a890faSShreyas Bhatewara 		gd = ring->base + ring->next2fill;
6272c5a5748SRonak Doshi 		rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
628d1a890faSShreyas Bhatewara 
62954f00cceSWilliam Tu 		if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
63054f00cceSWilliam Tu 			void *data = vmxnet3_pp_get_buff(rq->page_pool,
63154f00cceSWilliam Tu 							 &rbi->dma_addr,
63254f00cceSWilliam Tu 							 GFP_KERNEL);
63354f00cceSWilliam Tu 			if (!data) {
63454f00cceSWilliam Tu 				rq->stats.rx_buf_alloc_failure++;
63554f00cceSWilliam Tu 				break;
63654f00cceSWilliam Tu 			}
63754f00cceSWilliam Tu 			rbi->page = virt_to_page(data);
63854f00cceSWilliam Tu 			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
63954f00cceSWilliam Tu 		} else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
640d1a890faSShreyas Bhatewara 			if (rbi->skb == NULL) {
6410d735f13SStephen Hemminger 				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
6420d735f13SStephen Hemminger 								       rbi->len,
6430d735f13SStephen Hemminger 								       GFP_KERNEL);
644d1a890faSShreyas Bhatewara 				if (unlikely(rbi->skb == NULL)) {
645d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
646d1a890faSShreyas Bhatewara 					break;
647d1a890faSShreyas Bhatewara 				}
648d1a890faSShreyas Bhatewara 
649b0eb57cbSAndy King 				rbi->dma_addr = dma_map_single(
650b0eb57cbSAndy King 						&adapter->pdev->dev,
651d1a890faSShreyas Bhatewara 						rbi->skb->data, rbi->len,
652bf7bec46SChristophe JAILLET 						DMA_FROM_DEVICE);
6535738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
6545738a09dSAlexey Khoroshilov 						      rbi->dma_addr)) {
6555738a09dSAlexey Khoroshilov 					dev_kfree_skb_any(rbi->skb);
6569e7fef95SZixuan Fu 					rbi->skb = NULL;
6575738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
6585738a09dSAlexey Khoroshilov 					break;
6595738a09dSAlexey Khoroshilov 				}
660d1a890faSShreyas Bhatewara 			} else {
661d1a890faSShreyas Bhatewara 				/* rx buffer skipped by the device */
662d1a890faSShreyas Bhatewara 			}
663d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
664d1a890faSShreyas Bhatewara 		} else {
665d1a890faSShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
666d1a890faSShreyas Bhatewara 			       rbi->len  != PAGE_SIZE);
667d1a890faSShreyas Bhatewara 
668d1a890faSShreyas Bhatewara 			if (rbi->page == NULL) {
669d1a890faSShreyas Bhatewara 				rbi->page = alloc_page(GFP_ATOMIC);
670d1a890faSShreyas Bhatewara 				if (unlikely(rbi->page == NULL)) {
671d1a890faSShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
672d1a890faSShreyas Bhatewara 					break;
673d1a890faSShreyas Bhatewara 				}
674b0eb57cbSAndy King 				rbi->dma_addr = dma_map_page(
675b0eb57cbSAndy King 						&adapter->pdev->dev,
676d1a890faSShreyas Bhatewara 						rbi->page, 0, PAGE_SIZE,
677bf7bec46SChristophe JAILLET 						DMA_FROM_DEVICE);
6785738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
6795738a09dSAlexey Khoroshilov 						      rbi->dma_addr)) {
6805738a09dSAlexey Khoroshilov 					put_page(rbi->page);
6819e7fef95SZixuan Fu 					rbi->page = NULL;
6825738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
6835738a09dSAlexey Khoroshilov 					break;
6845738a09dSAlexey Khoroshilov 				}
685d1a890faSShreyas Bhatewara 			} else {
686d1a890faSShreyas Bhatewara 				/* rx buffers skipped by the device */
687d1a890faSShreyas Bhatewara 			}
688d1a890faSShreyas Bhatewara 			val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
689d1a890faSShreyas Bhatewara 		}
690d1a890faSShreyas Bhatewara 
691115924b6SShreyas Bhatewara 		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
6925318d809SShreyas Bhatewara 		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
693115924b6SShreyas Bhatewara 					   | val | rbi->len);
694d1a890faSShreyas Bhatewara 
6955318d809SShreyas Bhatewara 		/* Fill the last buffer but dont mark it ready, or else the
6965318d809SShreyas Bhatewara 		 * device will think that the queue is full */
6972c5a5748SRonak Doshi 		if (num_allocated == num_to_alloc) {
6982c5a5748SRonak Doshi 			rbi->comp_state = VMXNET3_RXD_COMP_DONE;
6995318d809SShreyas Bhatewara 			break;
7002c5a5748SRonak Doshi 		}
7015318d809SShreyas Bhatewara 
7025318d809SShreyas Bhatewara 		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
703d1a890faSShreyas Bhatewara 		num_allocated++;
704d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(ring);
705d1a890faSShreyas Bhatewara 	}
706d1a890faSShreyas Bhatewara 
707fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
70869b9a712SStephen Hemminger 		"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
70969b9a712SStephen Hemminger 		num_allocated, ring->next2fill, ring->next2comp);
710d1a890faSShreyas Bhatewara 
711d1a890faSShreyas Bhatewara 	/* so that the device can distinguish a full ring and an empty ring */
712d1a890faSShreyas Bhatewara 	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
713d1a890faSShreyas Bhatewara 
714d1a890faSShreyas Bhatewara 	return num_allocated;
715d1a890faSShreyas Bhatewara }
716d1a890faSShreyas Bhatewara 
717d1a890faSShreyas Bhatewara 
718d1a890faSShreyas Bhatewara static void
vmxnet3_append_frag(struct sk_buff * skb,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_buf_info * rbi)719d1a890faSShreyas Bhatewara vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
720d1a890faSShreyas Bhatewara 		    struct vmxnet3_rx_buf_info *rbi)
721d1a890faSShreyas Bhatewara {
722d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
723d1a890faSShreyas Bhatewara 
724d1a890faSShreyas Bhatewara 	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
725d1a890faSShreyas Bhatewara 
726b51f4113SYunsheng Lin 	skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
7279e903e08SEric Dumazet 	skb->data_len += rcd->len;
7285e6c355cSEric Dumazet 	skb->truesize += PAGE_SIZE;
729d1a890faSShreyas Bhatewara 	skb_shinfo(skb)->nr_frags++;
730d1a890faSShreyas Bhatewara }
731d1a890faSShreyas Bhatewara 
732d1a890faSShreyas Bhatewara 
7335738a09dSAlexey Khoroshilov static int
vmxnet3_map_pkt(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter)734d1a890faSShreyas Bhatewara vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
735d1a890faSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
736d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter)
737d1a890faSShreyas Bhatewara {
738d1a890faSShreyas Bhatewara 	u32 dw2, len;
739d1a890faSShreyas Bhatewara 	unsigned long buf_offset;
740d1a890faSShreyas Bhatewara 	int i;
741d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
742d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_buf_info *tbi = NULL;
743d1a890faSShreyas Bhatewara 
744d1a890faSShreyas Bhatewara 	BUG_ON(ctx->copy_size > skb_headlen(skb));
745d1a890faSShreyas Bhatewara 
746d1a890faSShreyas Bhatewara 	/* use the previous gen bit for the SOP desc */
747d1a890faSShreyas Bhatewara 	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
748d1a890faSShreyas Bhatewara 
749d1a890faSShreyas Bhatewara 	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
750d1a890faSShreyas Bhatewara 	gdesc = ctx->sop_txd; /* both loops below can be skipped */
751d1a890faSShreyas Bhatewara 
752d1a890faSShreyas Bhatewara 	/* no need to map the buffer if headers are copied */
753d1a890faSShreyas Bhatewara 	if (ctx->copy_size) {
754115924b6SShreyas Bhatewara 		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
755d1a890faSShreyas Bhatewara 					tq->tx_ring.next2fill *
7563c8b3efcSShrikrishna Khare 					tq->txdata_desc_size);
757115924b6SShreyas Bhatewara 		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
758d1a890faSShreyas Bhatewara 		ctx->sop_txd->dword[3] = 0;
759d1a890faSShreyas Bhatewara 
760d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
761d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_NONE;
762d1a890faSShreyas Bhatewara 
763fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
764f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
765115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill,
766115924b6SShreyas Bhatewara 			le64_to_cpu(ctx->sop_txd->txd.addr),
767d1a890faSShreyas Bhatewara 			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
768d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
769d1a890faSShreyas Bhatewara 
770d1a890faSShreyas Bhatewara 		/* use the right gen for non-SOP desc */
771d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
772d1a890faSShreyas Bhatewara 	}
773d1a890faSShreyas Bhatewara 
774d1a890faSShreyas Bhatewara 	/* linear part can use multiple tx desc if it's big */
775d1a890faSShreyas Bhatewara 	len = skb_headlen(skb) - ctx->copy_size;
776d1a890faSShreyas Bhatewara 	buf_offset = ctx->copy_size;
777d1a890faSShreyas Bhatewara 	while (len) {
778d1a890faSShreyas Bhatewara 		u32 buf_size;
779d1a890faSShreyas Bhatewara 
7801f4b1612SBhavesh Davda 		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
7811f4b1612SBhavesh Davda 			buf_size = len;
7821f4b1612SBhavesh Davda 			dw2 |= len;
7831f4b1612SBhavesh Davda 		} else {
7841f4b1612SBhavesh Davda 			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
7851f4b1612SBhavesh Davda 			/* spec says that for TxDesc.len, 0 == 2^14 */
7861f4b1612SBhavesh Davda 		}
787d1a890faSShreyas Bhatewara 
788d1a890faSShreyas Bhatewara 		tbi = tq->buf_info + tq->tx_ring.next2fill;
789d1a890faSShreyas Bhatewara 		tbi->map_type = VMXNET3_MAP_SINGLE;
790b0eb57cbSAndy King 		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
791d1a890faSShreyas Bhatewara 				skb->data + buf_offset, buf_size,
792bf7bec46SChristophe JAILLET 				DMA_TO_DEVICE);
7935738a09dSAlexey Khoroshilov 		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
7945738a09dSAlexey Khoroshilov 			return -EFAULT;
795d1a890faSShreyas Bhatewara 
7961f4b1612SBhavesh Davda 		tbi->len = buf_size;
797d1a890faSShreyas Bhatewara 
798d1a890faSShreyas Bhatewara 		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
799d1a890faSShreyas Bhatewara 		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
800d1a890faSShreyas Bhatewara 
801115924b6SShreyas Bhatewara 		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
8021f4b1612SBhavesh Davda 		gdesc->dword[2] = cpu_to_le32(dw2);
803d1a890faSShreyas Bhatewara 		gdesc->dword[3] = 0;
804d1a890faSShreyas Bhatewara 
805fdcd79b9SStephen Hemminger 		netdev_dbg(adapter->netdev,
806f6965582SRandy Dunlap 			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
807115924b6SShreyas Bhatewara 			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
808115924b6SShreyas Bhatewara 			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
809d1a890faSShreyas Bhatewara 		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
810d1a890faSShreyas Bhatewara 		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
811d1a890faSShreyas Bhatewara 
812d1a890faSShreyas Bhatewara 		len -= buf_size;
813d1a890faSShreyas Bhatewara 		buf_offset += buf_size;
814d1a890faSShreyas Bhatewara 	}
815d1a890faSShreyas Bhatewara 
816d1a890faSShreyas Bhatewara 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
817d7840976SMatthew Wilcox (Oracle) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
818a4d7e485SEric Dumazet 		u32 buf_size;
819d1a890faSShreyas Bhatewara 
820a4d7e485SEric Dumazet 		buf_offset = 0;
821a4d7e485SEric Dumazet 		len = skb_frag_size(frag);
822a4d7e485SEric Dumazet 		while (len) {
823d1a890faSShreyas Bhatewara 			tbi = tq->buf_info + tq->tx_ring.next2fill;
824a4d7e485SEric Dumazet 			if (len < VMXNET3_MAX_TX_BUF_SIZE) {
825a4d7e485SEric Dumazet 				buf_size = len;
826a4d7e485SEric Dumazet 				dw2 |= len;
827a4d7e485SEric Dumazet 			} else {
828a4d7e485SEric Dumazet 				buf_size = VMXNET3_MAX_TX_BUF_SIZE;
829a4d7e485SEric Dumazet 				/* spec says that for TxDesc.len, 0 == 2^14 */
830a4d7e485SEric Dumazet 			}
831d1a890faSShreyas Bhatewara 			tbi->map_type = VMXNET3_MAP_PAGE;
8320e0634d2SIan Campbell 			tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
833a4d7e485SEric Dumazet 							 buf_offset, buf_size,
8345d6bcdfeSIan Campbell 							 DMA_TO_DEVICE);
8355738a09dSAlexey Khoroshilov 			if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
8365738a09dSAlexey Khoroshilov 				return -EFAULT;
837d1a890faSShreyas Bhatewara 
838a4d7e485SEric Dumazet 			tbi->len = buf_size;
839d1a890faSShreyas Bhatewara 
840d1a890faSShreyas Bhatewara 			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
841d1a890faSShreyas Bhatewara 			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
842d1a890faSShreyas Bhatewara 
843115924b6SShreyas Bhatewara 			gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
844a4d7e485SEric Dumazet 			gdesc->dword[2] = cpu_to_le32(dw2);
845d1a890faSShreyas Bhatewara 			gdesc->dword[3] = 0;
846d1a890faSShreyas Bhatewara 
847fdcd79b9SStephen Hemminger 			netdev_dbg(adapter->netdev,
8488b429468SHans Wennborg 				"txd[%u]: 0x%llx %u %u\n",
849115924b6SShreyas Bhatewara 				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
850115924b6SShreyas Bhatewara 				le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
851d1a890faSShreyas Bhatewara 			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
852d1a890faSShreyas Bhatewara 			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
853a4d7e485SEric Dumazet 
854a4d7e485SEric Dumazet 			len -= buf_size;
855a4d7e485SEric Dumazet 			buf_offset += buf_size;
856a4d7e485SEric Dumazet 		}
857d1a890faSShreyas Bhatewara 	}
858d1a890faSShreyas Bhatewara 
859d1a890faSShreyas Bhatewara 	ctx->eop_txd = gdesc;
860d1a890faSShreyas Bhatewara 
861d1a890faSShreyas Bhatewara 	/* set the last buf_info for the pkt */
862d1a890faSShreyas Bhatewara 	tbi->skb = skb;
863d1a890faSShreyas Bhatewara 	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
8645738a09dSAlexey Khoroshilov 
8655738a09dSAlexey Khoroshilov 	return 0;
866d1a890faSShreyas Bhatewara }
867d1a890faSShreyas Bhatewara 
868d1a890faSShreyas Bhatewara 
86909c5088eSShreyas Bhatewara /* Init all tx queues */
87009c5088eSShreyas Bhatewara static void
vmxnet3_tq_init_all(struct vmxnet3_adapter * adapter)87109c5088eSShreyas Bhatewara vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
87209c5088eSShreyas Bhatewara {
87309c5088eSShreyas Bhatewara 	int i;
87409c5088eSShreyas Bhatewara 
87509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
87609c5088eSShreyas Bhatewara 		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
87709c5088eSShreyas Bhatewara }
87809c5088eSShreyas Bhatewara 
87909c5088eSShreyas Bhatewara 
880d1a890faSShreyas Bhatewara /*
881cec05562SNeil Horman  *    parse relevant protocol headers:
882d1a890faSShreyas Bhatewara  *      For a tso pkt, relevant headers are L2/3/4 including options
883d1a890faSShreyas Bhatewara  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
884d1a890faSShreyas Bhatewara  *      if it's a TCP/UDP pkt
885d1a890faSShreyas Bhatewara  *
886d1a890faSShreyas Bhatewara  * Returns:
887d1a890faSShreyas Bhatewara  *    -1:  error happens during parsing
888d1a890faSShreyas Bhatewara  *     0:  protocol headers parsed, but too big to be copied
889d1a890faSShreyas Bhatewara  *     1:  protocol headers parsed and copied
890d1a890faSShreyas Bhatewara  *
891d1a890faSShreyas Bhatewara  * Other effects:
892d1a890faSShreyas Bhatewara  *    1. related *ctx fields are updated.
893d1a890faSShreyas Bhatewara  *    2. ctx->copy_size is # of bytes copied
894cec05562SNeil Horman  *    3. the portion to be copied is guaranteed to be in the linear part
895d1a890faSShreyas Bhatewara  *
896d1a890faSShreyas Bhatewara  */
897d1a890faSShreyas Bhatewara static int
vmxnet3_parse_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)898cec05562SNeil Horman vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
899d1a890faSShreyas Bhatewara 		  struct vmxnet3_tx_ctx *ctx,
900d1a890faSShreyas Bhatewara 		  struct vmxnet3_adapter *adapter)
901d1a890faSShreyas Bhatewara {
902759c9359SShrikrishna Khare 	u8 protocol = 0;
903d1a890faSShreyas Bhatewara 
9040d0b1672SMichał Mirosław 	if (ctx->mss) {	/* TSO */
905dacce2beSRonak Doshi 		if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
906dacce2beSRonak Doshi 			ctx->l4_offset = skb_inner_transport_offset(skb);
907dacce2beSRonak Doshi 			ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
908dacce2beSRonak Doshi 			ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
909dacce2beSRonak Doshi 		} else {
910dacce2beSRonak Doshi 			ctx->l4_offset = skb_transport_offset(skb);
9118bca5d1eSEric Dumazet 			ctx->l4_hdr_size = tcp_hdrlen(skb);
912dacce2beSRonak Doshi 			ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
913dacce2beSRonak Doshi 		}
914d1a890faSShreyas Bhatewara 	} else {
915d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
916dacce2beSRonak Doshi 			/* For encap packets, skb_checksum_start_offset refers
917dacce2beSRonak Doshi 			 * to inner L4 offset. Thus, below works for encap as
918dacce2beSRonak Doshi 			 * well as non-encap case
919dacce2beSRonak Doshi 			 */
920dacce2beSRonak Doshi 			ctx->l4_offset = skb_checksum_start_offset(skb);
921d1a890faSShreyas Bhatewara 
92236432797SRonak Doshi 			if (VMXNET3_VERSION_GE_4(adapter) &&
92336432797SRonak Doshi 			    skb->encapsulation) {
92436432797SRonak Doshi 				struct iphdr *iph = inner_ip_hdr(skb);
92536432797SRonak Doshi 
92636432797SRonak Doshi 				if (iph->version == 4) {
92736432797SRonak Doshi 					protocol = iph->protocol;
92836432797SRonak Doshi 				} else {
92936432797SRonak Doshi 					const struct ipv6hdr *ipv6h;
93036432797SRonak Doshi 
93136432797SRonak Doshi 					ipv6h = inner_ipv6_hdr(skb);
93236432797SRonak Doshi 					protocol = ipv6h->nexthdr;
93336432797SRonak Doshi 				}
93436432797SRonak Doshi 			} else {
935d1a890faSShreyas Bhatewara 				if (ctx->ipv4) {
9368bca5d1eSEric Dumazet 					const struct iphdr *iph = ip_hdr(skb);
9378bca5d1eSEric Dumazet 
938759c9359SShrikrishna Khare 					protocol = iph->protocol;
939759c9359SShrikrishna Khare 				} else if (ctx->ipv6) {
94036432797SRonak Doshi 					const struct ipv6hdr *ipv6h;
941759c9359SShrikrishna Khare 
94236432797SRonak Doshi 					ipv6h = ipv6_hdr(skb);
943759c9359SShrikrishna Khare 					protocol = ipv6h->nexthdr;
944d1a890faSShreyas Bhatewara 				}
94536432797SRonak Doshi 			}
946759c9359SShrikrishna Khare 
947759c9359SShrikrishna Khare 			switch (protocol) {
948759c9359SShrikrishna Khare 			case IPPROTO_TCP:
9498a7f280fSRonak Doshi 				ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
9508a7f280fSRonak Doshi 						   tcp_hdrlen(skb);
951759c9359SShrikrishna Khare 				break;
952759c9359SShrikrishna Khare 			case IPPROTO_UDP:
953759c9359SShrikrishna Khare 				ctx->l4_hdr_size = sizeof(struct udphdr);
954759c9359SShrikrishna Khare 				break;
955759c9359SShrikrishna Khare 			default:
956759c9359SShrikrishna Khare 				ctx->l4_hdr_size = 0;
957759c9359SShrikrishna Khare 				break;
958759c9359SShrikrishna Khare 			}
959759c9359SShrikrishna Khare 
960dacce2beSRonak Doshi 			ctx->copy_size = min(ctx->l4_offset +
961b203262dSNeil Horman 					 ctx->l4_hdr_size, skb->len);
962d1a890faSShreyas Bhatewara 		} else {
963dacce2beSRonak Doshi 			ctx->l4_offset = 0;
964d1a890faSShreyas Bhatewara 			ctx->l4_hdr_size = 0;
965d1a890faSShreyas Bhatewara 			/* copy as much as allowed */
9663c8b3efcSShrikrishna Khare 			ctx->copy_size = min_t(unsigned int,
9673c8b3efcSShrikrishna Khare 					       tq->txdata_desc_size,
9683c8b3efcSShrikrishna Khare 					       skb_headlen(skb));
969d1a890faSShreyas Bhatewara 		}
970d1a890faSShreyas Bhatewara 
971c41fcce9SShreyas Bhatewara 		if (skb->len <= VMXNET3_HDR_COPY_SIZE)
972c41fcce9SShreyas Bhatewara 			ctx->copy_size = skb->len;
973c41fcce9SShreyas Bhatewara 
974d1a890faSShreyas Bhatewara 		/* make sure headers are accessible directly */
975d1a890faSShreyas Bhatewara 		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
976d1a890faSShreyas Bhatewara 			goto err;
977d1a890faSShreyas Bhatewara 	}
978d1a890faSShreyas Bhatewara 
9793c8b3efcSShrikrishna Khare 	if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
980d1a890faSShreyas Bhatewara 		tq->stats.oversized_hdr++;
981d1a890faSShreyas Bhatewara 		ctx->copy_size = 0;
982d1a890faSShreyas Bhatewara 		return 0;
983d1a890faSShreyas Bhatewara 	}
984d1a890faSShreyas Bhatewara 
985cec05562SNeil Horman 	return 1;
986cec05562SNeil Horman err:
987cec05562SNeil Horman 	return -1;
988cec05562SNeil Horman }
989cec05562SNeil Horman 
990cec05562SNeil Horman /*
991cec05562SNeil Horman  *    copy relevant protocol headers to the transmit ring:
992cec05562SNeil Horman  *      For a tso pkt, relevant headers are L2/3/4 including options
993cec05562SNeil Horman  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
994cec05562SNeil Horman  *      if it's a TCP/UDP pkt
995cec05562SNeil Horman  *
996cec05562SNeil Horman  *
997cec05562SNeil Horman  *    Note that this requires that vmxnet3_parse_hdr be called first to set the
998cec05562SNeil Horman  *      appropriate bits in ctx first
999cec05562SNeil Horman  */
1000cec05562SNeil Horman static void
vmxnet3_copy_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)1001cec05562SNeil Horman vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1002cec05562SNeil Horman 		 struct vmxnet3_tx_ctx *ctx,
1003cec05562SNeil Horman 		 struct vmxnet3_adapter *adapter)
1004cec05562SNeil Horman {
1005cec05562SNeil Horman 	struct Vmxnet3_TxDataDesc *tdd;
1006cec05562SNeil Horman 
1007ff2e7d5dSShrikrishna Khare 	tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
1008ff2e7d5dSShrikrishna Khare 					    tq->tx_ring.next2fill *
1009ff2e7d5dSShrikrishna Khare 					    tq->txdata_desc_size);
1010d1a890faSShreyas Bhatewara 
1011d1a890faSShreyas Bhatewara 	memcpy(tdd->data, skb->data, ctx->copy_size);
1012fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
1013f6965582SRandy Dunlap 		"copy %u bytes to dataRing[%u]\n",
1014d1a890faSShreyas Bhatewara 		ctx->copy_size, tq->tx_ring.next2fill);
1015d1a890faSShreyas Bhatewara }
1016d1a890faSShreyas Bhatewara 
1017d1a890faSShreyas Bhatewara 
1018d1a890faSShreyas Bhatewara static void
vmxnet3_prepare_inner_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)1019dacce2beSRonak Doshi vmxnet3_prepare_inner_tso(struct sk_buff *skb,
1020dacce2beSRonak Doshi 			  struct vmxnet3_tx_ctx *ctx)
1021dacce2beSRonak Doshi {
1022dacce2beSRonak Doshi 	struct tcphdr *tcph = inner_tcp_hdr(skb);
1023dacce2beSRonak Doshi 	struct iphdr *iph = inner_ip_hdr(skb);
1024dacce2beSRonak Doshi 
102536432797SRonak Doshi 	if (iph->version == 4) {
1026dacce2beSRonak Doshi 		iph->check = 0;
1027dacce2beSRonak Doshi 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1028dacce2beSRonak Doshi 						 IPPROTO_TCP, 0);
102936432797SRonak Doshi 	} else {
1030dacce2beSRonak Doshi 		struct ipv6hdr *iph = inner_ipv6_hdr(skb);
1031dacce2beSRonak Doshi 
1032dacce2beSRonak Doshi 		tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
1033dacce2beSRonak Doshi 					       IPPROTO_TCP, 0);
1034dacce2beSRonak Doshi 	}
1035dacce2beSRonak Doshi }
1036dacce2beSRonak Doshi 
1037dacce2beSRonak Doshi static void
vmxnet3_prepare_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)1038d1a890faSShreyas Bhatewara vmxnet3_prepare_tso(struct sk_buff *skb,
1039d1a890faSShreyas Bhatewara 		    struct vmxnet3_tx_ctx *ctx)
1040d1a890faSShreyas Bhatewara {
10418bca5d1eSEric Dumazet 	struct tcphdr *tcph = tcp_hdr(skb);
10428bca5d1eSEric Dumazet 
1043d1a890faSShreyas Bhatewara 	if (ctx->ipv4) {
10448bca5d1eSEric Dumazet 		struct iphdr *iph = ip_hdr(skb);
10458bca5d1eSEric Dumazet 
1046d1a890faSShreyas Bhatewara 		iph->check = 0;
1047d1a890faSShreyas Bhatewara 		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1048d1a890faSShreyas Bhatewara 						 IPPROTO_TCP, 0);
1049759c9359SShrikrishna Khare 	} else if (ctx->ipv6) {
1050091c9f82SHeiner Kallweit 		tcp_v6_gso_csum_prep(skb);
1051d1a890faSShreyas Bhatewara 	}
1052d1a890faSShreyas Bhatewara }
1053d1a890faSShreyas Bhatewara 
txd_estimate(const struct sk_buff * skb)1054a4d7e485SEric Dumazet static int txd_estimate(const struct sk_buff *skb)
1055a4d7e485SEric Dumazet {
1056a4d7e485SEric Dumazet 	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1057a4d7e485SEric Dumazet 	int i;
1058a4d7e485SEric Dumazet 
1059a4d7e485SEric Dumazet 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1060d7840976SMatthew Wilcox (Oracle) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1061a4d7e485SEric Dumazet 
1062a4d7e485SEric Dumazet 		count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1063a4d7e485SEric Dumazet 	}
1064a4d7e485SEric Dumazet 	return count;
1065a4d7e485SEric Dumazet }
1066d1a890faSShreyas Bhatewara 
1067d1a890faSShreyas Bhatewara /*
1068d1a890faSShreyas Bhatewara  * Transmits a pkt thru a given tq
1069d1a890faSShreyas Bhatewara  * Returns:
1070d1a890faSShreyas Bhatewara  *    NETDEV_TX_OK:      descriptors are setup successfully
107125985edcSLucas De Marchi  *    NETDEV_TX_OK:      error occurred, the pkt is dropped
1072d1a890faSShreyas Bhatewara  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
1073d1a890faSShreyas Bhatewara  *
1074d1a890faSShreyas Bhatewara  * Side-effects:
1075d1a890faSShreyas Bhatewara  *    1. tx ring may be changed
1076d1a890faSShreyas Bhatewara  *    2. tq stats may be updated accordingly
1077d1a890faSShreyas Bhatewara  *    3. shared->txNumDeferred may be updated
1078d1a890faSShreyas Bhatewara  */
1079d1a890faSShreyas Bhatewara 
1080d1a890faSShreyas Bhatewara static int
vmxnet3_tq_xmit(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter,struct net_device * netdev)1081d1a890faSShreyas Bhatewara vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1082d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter *adapter, struct net_device *netdev)
1083d1a890faSShreyas Bhatewara {
1084d1a890faSShreyas Bhatewara 	int ret;
1085d1a890faSShreyas Bhatewara 	u32 count;
10867a4c003dSRonak Doshi 	int num_pkts;
10877a4c003dSRonak Doshi 	int tx_num_deferred;
1088d1a890faSShreyas Bhatewara 	unsigned long flags;
1089d1a890faSShreyas Bhatewara 	struct vmxnet3_tx_ctx ctx;
1090d1a890faSShreyas Bhatewara 	union Vmxnet3_GenericDesc *gdesc;
1091115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1092115924b6SShreyas Bhatewara 	/* Use temporary descriptor to avoid touching bits multiple times */
1093115924b6SShreyas Bhatewara 	union Vmxnet3_GenericDesc tempTxDesc;
1094115924b6SShreyas Bhatewara #endif
1095d1a890faSShreyas Bhatewara 
1096a4d7e485SEric Dumazet 	count = txd_estimate(skb);
1097d1a890faSShreyas Bhatewara 
109872e85c45SJesse Gross 	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1099759c9359SShrikrishna Khare 	ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1100d1a890faSShreyas Bhatewara 
1101d1a890faSShreyas Bhatewara 	ctx.mss = skb_shinfo(skb)->gso_size;
1102d1a890faSShreyas Bhatewara 	if (ctx.mss) {
1103d1a890faSShreyas Bhatewara 		if (skb_header_cloned(skb)) {
1104d1a890faSShreyas Bhatewara 			if (unlikely(pskb_expand_head(skb, 0, 0,
1105d1a890faSShreyas Bhatewara 						      GFP_ATOMIC) != 0)) {
1106d1a890faSShreyas Bhatewara 				tq->stats.drop_tso++;
1107d1a890faSShreyas Bhatewara 				goto drop_pkt;
1108d1a890faSShreyas Bhatewara 			}
1109d1a890faSShreyas Bhatewara 			tq->stats.copy_skb_header++;
1110d1a890faSShreyas Bhatewara 		}
1111d2857b99SRonak Doshi 		if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1112d2857b99SRonak Doshi 			/* tso pkts must not use more than
1113d2857b99SRonak Doshi 			 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1114d2857b99SRonak Doshi 			 */
1115d2857b99SRonak Doshi 			if (skb_linearize(skb) != 0) {
1116d2857b99SRonak Doshi 				tq->stats.drop_too_many_frags++;
1117d2857b99SRonak Doshi 				goto drop_pkt;
1118d2857b99SRonak Doshi 			}
1119d2857b99SRonak Doshi 			tq->stats.linearized++;
1120d2857b99SRonak Doshi 
1121d2857b99SRonak Doshi 			/* recalculate the # of descriptors to use */
1122d2857b99SRonak Doshi 			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1123d2857b99SRonak Doshi 			if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1124d2857b99SRonak Doshi 				tq->stats.drop_too_many_frags++;
1125d2857b99SRonak Doshi 				goto drop_pkt;
1126d2857b99SRonak Doshi 			}
1127d2857b99SRonak Doshi 		}
1128dacce2beSRonak Doshi 		if (skb->encapsulation) {
1129dacce2beSRonak Doshi 			vmxnet3_prepare_inner_tso(skb, &ctx);
1130dacce2beSRonak Doshi 		} else {
1131d1a890faSShreyas Bhatewara 			vmxnet3_prepare_tso(skb, &ctx);
1132dacce2beSRonak Doshi 		}
1133d1a890faSShreyas Bhatewara 	} else {
1134d1a890faSShreyas Bhatewara 		if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1135d1a890faSShreyas Bhatewara 
1136d1a890faSShreyas Bhatewara 			/* non-tso pkts must not use more than
1137d1a890faSShreyas Bhatewara 			 * VMXNET3_MAX_TXD_PER_PKT entries
1138d1a890faSShreyas Bhatewara 			 */
1139d1a890faSShreyas Bhatewara 			if (skb_linearize(skb) != 0) {
1140d1a890faSShreyas Bhatewara 				tq->stats.drop_too_many_frags++;
1141d1a890faSShreyas Bhatewara 				goto drop_pkt;
1142d1a890faSShreyas Bhatewara 			}
1143d1a890faSShreyas Bhatewara 			tq->stats.linearized++;
1144d1a890faSShreyas Bhatewara 
1145d1a890faSShreyas Bhatewara 			/* recalculate the # of descriptors to use */
1146d1a890faSShreyas Bhatewara 			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1147d1a890faSShreyas Bhatewara 		}
1148d1a890faSShreyas Bhatewara 	}
1149d1a890faSShreyas Bhatewara 
1150cec05562SNeil Horman 	ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1151d1a890faSShreyas Bhatewara 	if (ret >= 0) {
1152d1a890faSShreyas Bhatewara 		BUG_ON(ret <= 0 && ctx.copy_size != 0);
1153d1a890faSShreyas Bhatewara 		/* hdrs parsed, check against other limits */
1154d1a890faSShreyas Bhatewara 		if (ctx.mss) {
1155dacce2beSRonak Doshi 			if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1156d1a890faSShreyas Bhatewara 				     VMXNET3_MAX_TX_BUF_SIZE)) {
1157efc21d95SArnd Bergmann 				tq->stats.drop_oversized_hdr++;
1158efc21d95SArnd Bergmann 				goto drop_pkt;
1159d1a890faSShreyas Bhatewara 			}
1160d1a890faSShreyas Bhatewara 		} else {
1161d1a890faSShreyas Bhatewara 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
1162dacce2beSRonak Doshi 				if (unlikely(ctx.l4_offset +
1163d1a890faSShreyas Bhatewara 					     skb->csum_offset >
1164d1a890faSShreyas Bhatewara 					     VMXNET3_MAX_CSUM_OFFSET)) {
1165efc21d95SArnd Bergmann 					tq->stats.drop_oversized_hdr++;
1166efc21d95SArnd Bergmann 					goto drop_pkt;
1167d1a890faSShreyas Bhatewara 				}
1168d1a890faSShreyas Bhatewara 			}
1169d1a890faSShreyas Bhatewara 		}
1170d1a890faSShreyas Bhatewara 	} else {
1171d1a890faSShreyas Bhatewara 		tq->stats.drop_hdr_inspect_err++;
1172cec05562SNeil Horman 		goto drop_pkt;
1173d1a890faSShreyas Bhatewara 	}
1174d1a890faSShreyas Bhatewara 
1175cec05562SNeil Horman 	spin_lock_irqsave(&tq->tx_lock, flags);
1176cec05562SNeil Horman 
1177cec05562SNeil Horman 	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1178cec05562SNeil Horman 		tq->stats.tx_ring_full++;
1179cec05562SNeil Horman 		netdev_dbg(adapter->netdev,
1180cec05562SNeil Horman 			"tx queue stopped on %s, next2comp %u"
1181cec05562SNeil Horman 			" next2fill %u\n", adapter->netdev->name,
1182cec05562SNeil Horman 			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1183cec05562SNeil Horman 
1184cec05562SNeil Horman 		vmxnet3_tq_stop(tq, adapter);
1185cec05562SNeil Horman 		spin_unlock_irqrestore(&tq->tx_lock, flags);
1186cec05562SNeil Horman 		return NETDEV_TX_BUSY;
1187cec05562SNeil Horman 	}
1188cec05562SNeil Horman 
1189cec05562SNeil Horman 
1190cec05562SNeil Horman 	vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1191cec05562SNeil Horman 
1192d1a890faSShreyas Bhatewara 	/* fill tx descs related to addr & len */
11935738a09dSAlexey Khoroshilov 	if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
11945738a09dSAlexey Khoroshilov 		goto unlock_drop_pkt;
1195d1a890faSShreyas Bhatewara 
1196d1a890faSShreyas Bhatewara 	/* setup the EOP desc */
1197115924b6SShreyas Bhatewara 	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1198d1a890faSShreyas Bhatewara 
1199d1a890faSShreyas Bhatewara 	/* setup the SOP desc */
1200115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1201115924b6SShreyas Bhatewara 	gdesc = &tempTxDesc;
1202115924b6SShreyas Bhatewara 	gdesc->dword[2] = ctx.sop_txd->dword[2];
1203115924b6SShreyas Bhatewara 	gdesc->dword[3] = ctx.sop_txd->dword[3];
1204115924b6SShreyas Bhatewara #else
1205d1a890faSShreyas Bhatewara 	gdesc = ctx.sop_txd;
1206115924b6SShreyas Bhatewara #endif
12077a4c003dSRonak Doshi 	tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1208d1a890faSShreyas Bhatewara 	if (ctx.mss) {
1209dacce2beSRonak Doshi 		if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1210dacce2beSRonak Doshi 			gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
121160cafa03SRonak Doshi 			if (VMXNET3_VERSION_GE_7(adapter)) {
121260cafa03SRonak Doshi 				gdesc->txd.om = VMXNET3_OM_TSO;
121360cafa03SRonak Doshi 				gdesc->txd.ext1 = 1;
121460cafa03SRonak Doshi 			} else {
1215dacce2beSRonak Doshi 				gdesc->txd.om = VMXNET3_OM_ENCAP;
121660cafa03SRonak Doshi 			}
1217dacce2beSRonak Doshi 			gdesc->txd.msscof = ctx.mss;
1218dacce2beSRonak Doshi 
12191dac3b1bSRonak Doshi 			if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1220dacce2beSRonak Doshi 				gdesc->txd.oco = 1;
1221dacce2beSRonak Doshi 		} else {
1222dacce2beSRonak Doshi 			gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1223d1a890faSShreyas Bhatewara 			gdesc->txd.om = VMXNET3_OM_TSO;
1224d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = ctx.mss;
1225dacce2beSRonak Doshi 		}
12267a4c003dSRonak Doshi 		num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1227d1a890faSShreyas Bhatewara 	} else {
1228d1a890faSShreyas Bhatewara 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1229dacce2beSRonak Doshi 			if (VMXNET3_VERSION_GE_4(adapter) &&
1230dacce2beSRonak Doshi 			    skb->encapsulation) {
1231dacce2beSRonak Doshi 				gdesc->txd.hlen = ctx.l4_offset +
1232dacce2beSRonak Doshi 						  ctx.l4_hdr_size;
123360cafa03SRonak Doshi 				if (VMXNET3_VERSION_GE_7(adapter)) {
123460cafa03SRonak Doshi 					gdesc->txd.om = VMXNET3_OM_CSUM;
123560cafa03SRonak Doshi 					gdesc->txd.msscof = ctx.l4_offset +
123660cafa03SRonak Doshi 							    skb->csum_offset;
123760cafa03SRonak Doshi 					gdesc->txd.ext1 = 1;
123860cafa03SRonak Doshi 				} else {
1239dacce2beSRonak Doshi 					gdesc->txd.om = VMXNET3_OM_ENCAP;
1240dacce2beSRonak Doshi 					gdesc->txd.msscof = 0;		/* Reserved */
124160cafa03SRonak Doshi 				}
1242dacce2beSRonak Doshi 			} else {
1243dacce2beSRonak Doshi 				gdesc->txd.hlen = ctx.l4_offset;
1244d1a890faSShreyas Bhatewara 				gdesc->txd.om = VMXNET3_OM_CSUM;
1245dacce2beSRonak Doshi 				gdesc->txd.msscof = ctx.l4_offset +
1246d1a890faSShreyas Bhatewara 						    skb->csum_offset;
1247dacce2beSRonak Doshi 			}
1248d1a890faSShreyas Bhatewara 		} else {
1249d1a890faSShreyas Bhatewara 			gdesc->txd.om = 0;
1250d1a890faSShreyas Bhatewara 			gdesc->txd.msscof = 0;
1251d1a890faSShreyas Bhatewara 		}
12527a4c003dSRonak Doshi 		num_pkts = 1;
1253d1a890faSShreyas Bhatewara 	}
12547a4c003dSRonak Doshi 	le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
12557a4c003dSRonak Doshi 	tx_num_deferred += num_pkts;
1256d1a890faSShreyas Bhatewara 
1257df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1258d1a890faSShreyas Bhatewara 		gdesc->txd.ti = 1;
1259df8a39deSJiri Pirko 		gdesc->txd.tci = skb_vlan_tag_get(skb);
1260d1a890faSShreyas Bhatewara 	}
1261d1a890faSShreyas Bhatewara 
1262f3002c13Shpreg@vmware.com 	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
1263f3002c13Shpreg@vmware.com 	 * all other writes to &gdesc->txd.
1264f3002c13Shpreg@vmware.com 	 */
1265f3002c13Shpreg@vmware.com 	dma_wmb();
1266f3002c13Shpreg@vmware.com 
1267115924b6SShreyas Bhatewara 	/* finally flips the GEN bit of the SOP desc. */
1268115924b6SShreyas Bhatewara 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1269115924b6SShreyas Bhatewara 						  VMXNET3_TXD_GEN);
1270115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1271115924b6SShreyas Bhatewara 	/* Finished updating in bitfields of Tx Desc, so write them in original
1272115924b6SShreyas Bhatewara 	 * place.
1273115924b6SShreyas Bhatewara 	 */
1274115924b6SShreyas Bhatewara 	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1275115924b6SShreyas Bhatewara 			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1276115924b6SShreyas Bhatewara 	gdesc = ctx.sop_txd;
1277115924b6SShreyas Bhatewara #endif
1278fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev,
1279f6965582SRandy Dunlap 		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1280c2fd03a0SJoe Perches 		(u32)(ctx.sop_txd -
1281115924b6SShreyas Bhatewara 		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1282115924b6SShreyas Bhatewara 		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1283d1a890faSShreyas Bhatewara 
1284d1a890faSShreyas Bhatewara 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1285d1a890faSShreyas Bhatewara 
12867a4c003dSRonak Doshi 	if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1287d1a890faSShreyas Bhatewara 		tq->shared->txNumDeferred = 0;
128809c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
1289543fb674SRonak Doshi 				       adapter->tx_prod_offset + tq->qid * 8,
1290d1a890faSShreyas Bhatewara 				       tq->tx_ring.next2fill);
1291d1a890faSShreyas Bhatewara 	}
1292d1a890faSShreyas Bhatewara 
1293d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1294d1a890faSShreyas Bhatewara 
1295f955e141SDan Carpenter unlock_drop_pkt:
1296f955e141SDan Carpenter 	spin_unlock_irqrestore(&tq->tx_lock, flags);
1297d1a890faSShreyas Bhatewara drop_pkt:
1298d1a890faSShreyas Bhatewara 	tq->stats.drop_total++;
1299b1b71817SEric W. Biederman 	dev_kfree_skb_any(skb);
1300d1a890faSShreyas Bhatewara 	return NETDEV_TX_OK;
1301d1a890faSShreyas Bhatewara }
1302d1a890faSShreyas Bhatewara 
130354f00cceSWilliam Tu static int
vmxnet3_create_pp(struct vmxnet3_adapter * adapter,struct vmxnet3_rx_queue * rq,int size)130454f00cceSWilliam Tu vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
130554f00cceSWilliam Tu 		  struct vmxnet3_rx_queue *rq, int size)
130654f00cceSWilliam Tu {
130754f00cceSWilliam Tu 	bool xdp_prog = vmxnet3_xdp_enabled(adapter);
130854f00cceSWilliam Tu 	const struct page_pool_params pp_params = {
130954f00cceSWilliam Tu 		.order = 0,
131054f00cceSWilliam Tu 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
131154f00cceSWilliam Tu 		.pool_size = size,
131254f00cceSWilliam Tu 		.nid = NUMA_NO_NODE,
131354f00cceSWilliam Tu 		.dev = &adapter->pdev->dev,
131454f00cceSWilliam Tu 		.offset = VMXNET3_XDP_RX_OFFSET,
131554f00cceSWilliam Tu 		.max_len = VMXNET3_XDP_MAX_FRSIZE,
131654f00cceSWilliam Tu 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
131754f00cceSWilliam Tu 	};
131854f00cceSWilliam Tu 	struct page_pool *pp;
131954f00cceSWilliam Tu 	int err;
132054f00cceSWilliam Tu 
132154f00cceSWilliam Tu 	pp = page_pool_create(&pp_params);
132254f00cceSWilliam Tu 	if (IS_ERR(pp))
132354f00cceSWilliam Tu 		return PTR_ERR(pp);
132454f00cceSWilliam Tu 
132554f00cceSWilliam Tu 	err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
132654f00cceSWilliam Tu 			       rq->napi.napi_id);
132754f00cceSWilliam Tu 	if (err < 0)
132854f00cceSWilliam Tu 		goto err_free_pp;
132954f00cceSWilliam Tu 
133054f00cceSWilliam Tu 	err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
133154f00cceSWilliam Tu 	if (err)
133254f00cceSWilliam Tu 		goto err_unregister_rxq;
133354f00cceSWilliam Tu 
133454f00cceSWilliam Tu 	rq->page_pool = pp;
133554f00cceSWilliam Tu 
133654f00cceSWilliam Tu 	return 0;
133754f00cceSWilliam Tu 
133854f00cceSWilliam Tu err_unregister_rxq:
133954f00cceSWilliam Tu 	xdp_rxq_info_unreg(&rq->xdp_rxq);
134054f00cceSWilliam Tu err_free_pp:
134154f00cceSWilliam Tu 	page_pool_destroy(pp);
134254f00cceSWilliam Tu 
134354f00cceSWilliam Tu 	return err;
134454f00cceSWilliam Tu }
134554f00cceSWilliam Tu 
134654f00cceSWilliam Tu void *
vmxnet3_pp_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)134754f00cceSWilliam Tu vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
134854f00cceSWilliam Tu 		    gfp_t gfp_mask)
134954f00cceSWilliam Tu {
135054f00cceSWilliam Tu 	struct page *page;
135154f00cceSWilliam Tu 
135254f00cceSWilliam Tu 	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
135354f00cceSWilliam Tu 	if (unlikely(!page))
135454f00cceSWilliam Tu 		return NULL;
135554f00cceSWilliam Tu 
135654f00cceSWilliam Tu 	*dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
135754f00cceSWilliam Tu 
135854f00cceSWilliam Tu 	return page_address(page);
135954f00cceSWilliam Tu }
1360d1a890faSShreyas Bhatewara 
1361d1a890faSShreyas Bhatewara static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1362d1a890faSShreyas Bhatewara vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1363d1a890faSShreyas Bhatewara {
1364d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1365d1a890faSShreyas Bhatewara 
136609c5088eSShreyas Bhatewara 	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
136709c5088eSShreyas Bhatewara 	return vmxnet3_tq_xmit(skb,
136809c5088eSShreyas Bhatewara 			       &adapter->tx_queue[skb->queue_mapping],
136909c5088eSShreyas Bhatewara 			       adapter, netdev);
1370d1a890faSShreyas Bhatewara }
1371d1a890faSShreyas Bhatewara 
1372d1a890faSShreyas Bhatewara 
1373d1a890faSShreyas Bhatewara static void
vmxnet3_rx_csum(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)1374d1a890faSShreyas Bhatewara vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1375d1a890faSShreyas Bhatewara 		struct sk_buff *skb,
1376d1a890faSShreyas Bhatewara 		union Vmxnet3_GenericDesc *gdesc)
1377d1a890faSShreyas Bhatewara {
1378a0d2730cSMichał Mirosław 	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1379f0d43780SShrikrishna Khare 		if (gdesc->rcd.v4 &&
1380f0d43780SShrikrishna Khare 		    (le32_to_cpu(gdesc->dword[3]) &
1381f0d43780SShrikrishna Khare 		     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1382d1a890faSShreyas Bhatewara 			skb->ip_summed = CHECKSUM_UNNECESSARY;
13833d8f2c42SRonak Doshi 			if ((le32_to_cpu(gdesc->dword[0]) &
13843d8f2c42SRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
13853d8f2c42SRonak Doshi 				skb->csum_level = 1;
13863d8f2c42SRonak Doshi 			}
1387dacce2beSRonak Doshi 			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1388dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1389dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1390dacce2beSRonak Doshi 			WARN_ON_ONCE(gdesc->rcd.frg &&
1391dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1392dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1393f0d43780SShrikrishna Khare 		} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1394f0d43780SShrikrishna Khare 					     (1 << VMXNET3_RCD_TUC_SHIFT))) {
1395f0d43780SShrikrishna Khare 			skb->ip_summed = CHECKSUM_UNNECESSARY;
13963d8f2c42SRonak Doshi 			if ((le32_to_cpu(gdesc->dword[0]) &
13973d8f2c42SRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
13983d8f2c42SRonak Doshi 				skb->csum_level = 1;
13993d8f2c42SRonak Doshi 			}
1400dacce2beSRonak Doshi 			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1401dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1402dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1403dacce2beSRonak Doshi 			WARN_ON_ONCE(gdesc->rcd.frg &&
1404dacce2beSRonak Doshi 				     !(le32_to_cpu(gdesc->dword[0]) &
1405dacce2beSRonak Doshi 				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1406d1a890faSShreyas Bhatewara 		} else {
1407d1a890faSShreyas Bhatewara 			if (gdesc->rcd.csum) {
1408d1a890faSShreyas Bhatewara 				skb->csum = htons(gdesc->rcd.csum);
1409d1a890faSShreyas Bhatewara 				skb->ip_summed = CHECKSUM_PARTIAL;
1410d1a890faSShreyas Bhatewara 			} else {
1411bc8acf2cSEric Dumazet 				skb_checksum_none_assert(skb);
1412d1a890faSShreyas Bhatewara 			}
1413d1a890faSShreyas Bhatewara 		}
1414d1a890faSShreyas Bhatewara 	} else {
1415bc8acf2cSEric Dumazet 		skb_checksum_none_assert(skb);
1416d1a890faSShreyas Bhatewara 	}
1417d1a890faSShreyas Bhatewara }
1418d1a890faSShreyas Bhatewara 
1419d1a890faSShreyas Bhatewara 
1420d1a890faSShreyas Bhatewara static void
vmxnet3_rx_error(struct vmxnet3_rx_queue * rq,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_ctx * ctx,struct vmxnet3_adapter * adapter)1421d1a890faSShreyas Bhatewara vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1422d1a890faSShreyas Bhatewara 		 struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1423d1a890faSShreyas Bhatewara {
1424d1a890faSShreyas Bhatewara 	rq->stats.drop_err++;
1425d1a890faSShreyas Bhatewara 	if (!rcd->fcs)
1426d1a890faSShreyas Bhatewara 		rq->stats.drop_fcs++;
1427d1a890faSShreyas Bhatewara 
1428d1a890faSShreyas Bhatewara 	rq->stats.drop_total++;
1429d1a890faSShreyas Bhatewara 
1430d1a890faSShreyas Bhatewara 	/*
1431d1a890faSShreyas Bhatewara 	 * We do not unmap and chain the rx buffer to the skb.
1432d1a890faSShreyas Bhatewara 	 * We basically pretend this buffer is not used and will be recycled
1433d1a890faSShreyas Bhatewara 	 * by vmxnet3_rq_alloc_rx_buf()
1434d1a890faSShreyas Bhatewara 	 */
1435d1a890faSShreyas Bhatewara 
1436d1a890faSShreyas Bhatewara 	/*
1437d1a890faSShreyas Bhatewara 	 * ctx->skb may be NULL if this is the first and the only one
1438d1a890faSShreyas Bhatewara 	 * desc for the pkt
1439d1a890faSShreyas Bhatewara 	 */
1440d1a890faSShreyas Bhatewara 	if (ctx->skb)
1441d1a890faSShreyas Bhatewara 		dev_kfree_skb_irq(ctx->skb);
1442d1a890faSShreyas Bhatewara 
1443d1a890faSShreyas Bhatewara 	ctx->skb = NULL;
1444d1a890faSShreyas Bhatewara }
1445d1a890faSShreyas Bhatewara 
1446d1a890faSShreyas Bhatewara 
144745dac1d6SShreyas Bhatewara static u32
vmxnet3_get_hdr_len(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)144845dac1d6SShreyas Bhatewara vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
144945dac1d6SShreyas Bhatewara 		    union Vmxnet3_GenericDesc *gdesc)
145045dac1d6SShreyas Bhatewara {
145145dac1d6SShreyas Bhatewara 	u32 hlen, maplen;
145245dac1d6SShreyas Bhatewara 	union {
145345dac1d6SShreyas Bhatewara 		void *ptr;
145445dac1d6SShreyas Bhatewara 		struct ethhdr *eth;
145565ec0bd1SRonak Doshi 		struct vlan_ethhdr *veth;
145645dac1d6SShreyas Bhatewara 		struct iphdr *ipv4;
145745dac1d6SShreyas Bhatewara 		struct ipv6hdr *ipv6;
145845dac1d6SShreyas Bhatewara 		struct tcphdr *tcp;
145945dac1d6SShreyas Bhatewara 	} hdr;
146045dac1d6SShreyas Bhatewara 	BUG_ON(gdesc->rcd.tcp == 0);
146145dac1d6SShreyas Bhatewara 
146245dac1d6SShreyas Bhatewara 	maplen = skb_headlen(skb);
146345dac1d6SShreyas Bhatewara 	if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
146445dac1d6SShreyas Bhatewara 		return 0;
146545dac1d6SShreyas Bhatewara 
146665ec0bd1SRonak Doshi 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
146765ec0bd1SRonak Doshi 	    skb->protocol == cpu_to_be16(ETH_P_8021AD))
146865ec0bd1SRonak Doshi 		hlen = sizeof(struct vlan_ethhdr);
146965ec0bd1SRonak Doshi 	else
147065ec0bd1SRonak Doshi 		hlen = sizeof(struct ethhdr);
147165ec0bd1SRonak Doshi 
147245dac1d6SShreyas Bhatewara 	hdr.eth = eth_hdr(skb);
147345dac1d6SShreyas Bhatewara 	if (gdesc->rcd.v4) {
147465ec0bd1SRonak Doshi 		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
147565ec0bd1SRonak Doshi 		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
147665ec0bd1SRonak Doshi 		hdr.ptr += hlen;
147745dac1d6SShreyas Bhatewara 		BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
147845dac1d6SShreyas Bhatewara 		hlen = hdr.ipv4->ihl << 2;
147945dac1d6SShreyas Bhatewara 		hdr.ptr += hdr.ipv4->ihl << 2;
148045dac1d6SShreyas Bhatewara 	} else if (gdesc->rcd.v6) {
148165ec0bd1SRonak Doshi 		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
148265ec0bd1SRonak Doshi 		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
148365ec0bd1SRonak Doshi 		hdr.ptr += hlen;
148445dac1d6SShreyas Bhatewara 		/* Use an estimated value, since we also need to handle
148545dac1d6SShreyas Bhatewara 		 * TSO case.
148645dac1d6SShreyas Bhatewara 		 */
148745dac1d6SShreyas Bhatewara 		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
148845dac1d6SShreyas Bhatewara 			return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
148945dac1d6SShreyas Bhatewara 		hlen = sizeof(struct ipv6hdr);
149045dac1d6SShreyas Bhatewara 		hdr.ptr += sizeof(struct ipv6hdr);
149145dac1d6SShreyas Bhatewara 	} else {
149245dac1d6SShreyas Bhatewara 		/* Non-IP pkt, dont estimate header length */
149345dac1d6SShreyas Bhatewara 		return 0;
149445dac1d6SShreyas Bhatewara 	}
149545dac1d6SShreyas Bhatewara 
149645dac1d6SShreyas Bhatewara 	if (hlen + sizeof(struct tcphdr) > maplen)
149745dac1d6SShreyas Bhatewara 		return 0;
149845dac1d6SShreyas Bhatewara 
149945dac1d6SShreyas Bhatewara 	return (hlen + (hdr.tcp->doff << 2));
150045dac1d6SShreyas Bhatewara }
150145dac1d6SShreyas Bhatewara 
1502d1a890faSShreyas Bhatewara static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter,int quota)1503d1a890faSShreyas Bhatewara vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1504d1a890faSShreyas Bhatewara 		       struct vmxnet3_adapter *adapter, int quota)
1505d1a890faSShreyas Bhatewara {
1506543fb674SRonak Doshi 	u32 rxprod_reg[2] = {
1507543fb674SRonak Doshi 		adapter->rx_prod_offset, adapter->rx_prod2_offset
1508215faf9cSJoe Perches 	};
15090769636cSNeil Horman 	u32 num_pkts = 0;
15105318d809SShreyas Bhatewara 	bool skip_page_frags = false;
151140b8c2a1SRonak Doshi 	bool encap_lro = false;
1512d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxCompDesc *rcd;
1513d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
151445dac1d6SShreyas Bhatewara 	u16 segCnt = 0, mss = 0;
15152c5a5748SRonak Doshi 	int comp_offset, fill_offset;
1516115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1517115924b6SShreyas Bhatewara 	struct Vmxnet3_RxDesc rxCmdDesc;
1518115924b6SShreyas Bhatewara 	struct Vmxnet3_RxCompDesc rxComp;
1519115924b6SShreyas Bhatewara #endif
152054f00cceSWilliam Tu 	bool need_flush = false;
152154f00cceSWilliam Tu 
1522115924b6SShreyas Bhatewara 	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1523115924b6SShreyas Bhatewara 			  &rxComp);
1524d1a890faSShreyas Bhatewara 	while (rcd->gen == rq->comp_ring.gen) {
1525d1a890faSShreyas Bhatewara 		struct vmxnet3_rx_buf_info *rbi;
15265318d809SShreyas Bhatewara 		struct sk_buff *skb, *new_skb = NULL;
15275318d809SShreyas Bhatewara 		struct page *new_page = NULL;
15285738a09dSAlexey Khoroshilov 		dma_addr_t new_dma_addr;
1529d1a890faSShreyas Bhatewara 		int num_to_alloc;
1530d1a890faSShreyas Bhatewara 		struct Vmxnet3_RxDesc *rxd;
1531d1a890faSShreyas Bhatewara 		u32 idx, ring_idx;
15325318d809SShreyas Bhatewara 		struct vmxnet3_cmd_ring	*ring = NULL;
15330769636cSNeil Horman 		if (num_pkts >= quota) {
1534d1a890faSShreyas Bhatewara 			/* we may stop even before we see the EOP desc of
1535d1a890faSShreyas Bhatewara 			 * the current pkt
1536d1a890faSShreyas Bhatewara 			 */
1537d1a890faSShreyas Bhatewara 			break;
1538d1a890faSShreyas Bhatewara 		}
1539f3002c13Shpreg@vmware.com 
1540f3002c13Shpreg@vmware.com 		/* Prevent any rcd field from being (speculatively) read before
1541f3002c13Shpreg@vmware.com 		 * rcd->gen is read.
1542f3002c13Shpreg@vmware.com 		 */
1543f3002c13Shpreg@vmware.com 		dma_rmb();
1544f3002c13Shpreg@vmware.com 
154550a5ce3eSShrikrishna Khare 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
154650a5ce3eSShrikrishna Khare 		       rcd->rqID != rq->dataRingQid);
1547d1a890faSShreyas Bhatewara 		idx = rcd->rxdIdx;
154850a5ce3eSShrikrishna Khare 		ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
15495318d809SShreyas Bhatewara 		ring = rq->rx_ring + ring_idx;
1550115924b6SShreyas Bhatewara 		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1551115924b6SShreyas Bhatewara 				  &rxCmdDesc);
1552d1a890faSShreyas Bhatewara 		rbi = rq->buf_info[ring_idx] + idx;
1553d1a890faSShreyas Bhatewara 
1554115924b6SShreyas Bhatewara 		BUG_ON(rxd->addr != rbi->dma_addr ||
1555115924b6SShreyas Bhatewara 		       rxd->len != rbi->len);
1556d1a890faSShreyas Bhatewara 
1557d1a890faSShreyas Bhatewara 		if (unlikely(rcd->eop && rcd->err)) {
1558d1a890faSShreyas Bhatewara 			vmxnet3_rx_error(rq, rcd, ctx, adapter);
1559d1a890faSShreyas Bhatewara 			goto rcd_done;
1560d1a890faSShreyas Bhatewara 		}
1561d1a890faSShreyas Bhatewara 
156254f00cceSWilliam Tu 		if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
156354f00cceSWilliam Tu 			struct sk_buff *skb_xdp_pass;
156454f00cceSWilliam Tu 			int act;
156554f00cceSWilliam Tu 
156654f00cceSWilliam Tu 			if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
156754f00cceSWilliam Tu 				ctx->skb = NULL;
156854f00cceSWilliam Tu 				goto skip_xdp; /* Handle it later. */
156954f00cceSWilliam Tu 			}
157054f00cceSWilliam Tu 
157154f00cceSWilliam Tu 			if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
157254f00cceSWilliam Tu 				goto rcd_done;
157354f00cceSWilliam Tu 
157454f00cceSWilliam Tu 			act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
157554f00cceSWilliam Tu 						  &skb_xdp_pass);
157654f00cceSWilliam Tu 			if (act == XDP_PASS) {
157754f00cceSWilliam Tu 				ctx->skb = skb_xdp_pass;
157854f00cceSWilliam Tu 				goto sop_done;
157954f00cceSWilliam Tu 			}
158054f00cceSWilliam Tu 			ctx->skb = NULL;
158154f00cceSWilliam Tu 			need_flush |= act == XDP_REDIRECT;
158254f00cceSWilliam Tu 
158354f00cceSWilliam Tu 			goto rcd_done;
158454f00cceSWilliam Tu 		}
158554f00cceSWilliam Tu skip_xdp:
158654f00cceSWilliam Tu 
1587d1a890faSShreyas Bhatewara 		if (rcd->sop) { /* first buf of the pkt */
158850a5ce3eSShrikrishna Khare 			bool rxDataRingUsed;
158950a5ce3eSShrikrishna Khare 			u16 len;
159050a5ce3eSShrikrishna Khare 
1591d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
159250a5ce3eSShrikrishna Khare 			       (rcd->rqID != rq->qid &&
159350a5ce3eSShrikrishna Khare 				rcd->rqID != rq->dataRingQid));
1594d1a890faSShreyas Bhatewara 
159554f00cceSWilliam Tu 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
159654f00cceSWilliam Tu 			       rbi->buf_type != VMXNET3_RX_BUF_XDP);
1597d1a890faSShreyas Bhatewara 			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1598d1a890faSShreyas Bhatewara 
1599d1a890faSShreyas Bhatewara 			if (unlikely(rcd->len == 0)) {
1600d1a890faSShreyas Bhatewara 				/* Pretend the rx buffer is skipped. */
1601d1a890faSShreyas Bhatewara 				BUG_ON(!(rcd->sop && rcd->eop));
1602fdcd79b9SStephen Hemminger 				netdev_dbg(adapter->netdev,
1603f6965582SRandy Dunlap 					"rxRing[%u][%u] 0 length\n",
1604d1a890faSShreyas Bhatewara 					ring_idx, idx);
1605d1a890faSShreyas Bhatewara 				goto rcd_done;
1606d1a890faSShreyas Bhatewara 			}
1607d1a890faSShreyas Bhatewara 
16085318d809SShreyas Bhatewara 			skip_page_frags = false;
1609d1a890faSShreyas Bhatewara 			ctx->skb = rbi->skb;
161050a5ce3eSShrikrishna Khare 
161150a5ce3eSShrikrishna Khare 			rxDataRingUsed =
161250a5ce3eSShrikrishna Khare 				VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
161350a5ce3eSShrikrishna Khare 			len = rxDataRingUsed ? rcd->len : rbi->len;
161454f00cceSWilliam Tu 
161554f00cceSWilliam Tu 			if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
161654f00cceSWilliam Tu 				struct sk_buff *skb_xdp_pass;
161754f00cceSWilliam Tu 				size_t sz;
161854f00cceSWilliam Tu 				int act;
161954f00cceSWilliam Tu 
162054f00cceSWilliam Tu 				sz = rcd->rxdIdx * rq->data_ring.desc_size;
162154f00cceSWilliam Tu 				act = vmxnet3_process_xdp_small(adapter, rq,
162254f00cceSWilliam Tu 								&rq->data_ring.base[sz],
162354f00cceSWilliam Tu 								rcd->len,
162454f00cceSWilliam Tu 								&skb_xdp_pass);
162554f00cceSWilliam Tu 				if (act == XDP_PASS) {
162654f00cceSWilliam Tu 					ctx->skb = skb_xdp_pass;
162754f00cceSWilliam Tu 					goto sop_done;
162854f00cceSWilliam Tu 				}
162954f00cceSWilliam Tu 				need_flush |= act == XDP_REDIRECT;
163054f00cceSWilliam Tu 
163154f00cceSWilliam Tu 				goto rcd_done;
163254f00cceSWilliam Tu 			}
16330d735f13SStephen Hemminger 			new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
163450a5ce3eSShrikrishna Khare 							    len);
16355318d809SShreyas Bhatewara 			if (new_skb == NULL) {
16365318d809SShreyas Bhatewara 				/* Skb allocation failed, do not handover this
16375318d809SShreyas Bhatewara 				 * skb to stack. Reuse it. Drop the existing pkt
16385318d809SShreyas Bhatewara 				 */
16395318d809SShreyas Bhatewara 				rq->stats.rx_buf_alloc_failure++;
16405318d809SShreyas Bhatewara 				ctx->skb = NULL;
16415318d809SShreyas Bhatewara 				rq->stats.drop_total++;
16425318d809SShreyas Bhatewara 				skip_page_frags = true;
16435318d809SShreyas Bhatewara 				goto rcd_done;
16445318d809SShreyas Bhatewara 			}
164550a5ce3eSShrikrishna Khare 
16466f483338SSeiji Nishikawa 			if (rxDataRingUsed && adapter->rxdataring_enabled) {
164750a5ce3eSShrikrishna Khare 				size_t sz;
164850a5ce3eSShrikrishna Khare 
164950a5ce3eSShrikrishna Khare 				BUG_ON(rcd->len > rq->data_ring.desc_size);
165050a5ce3eSShrikrishna Khare 
165150a5ce3eSShrikrishna Khare 				ctx->skb = new_skb;
165250a5ce3eSShrikrishna Khare 				sz = rcd->rxdIdx * rq->data_ring.desc_size;
165350a5ce3eSShrikrishna Khare 				memcpy(new_skb->data,
165450a5ce3eSShrikrishna Khare 				       &rq->data_ring.base[sz], rcd->len);
165550a5ce3eSShrikrishna Khare 			} else {
165650a5ce3eSShrikrishna Khare 				ctx->skb = rbi->skb;
165750a5ce3eSShrikrishna Khare 
165850a5ce3eSShrikrishna Khare 				new_dma_addr =
165950a5ce3eSShrikrishna Khare 					dma_map_single(&adapter->pdev->dev,
16605738a09dSAlexey Khoroshilov 						       new_skb->data, rbi->len,
1661bf7bec46SChristophe JAILLET 						       DMA_FROM_DEVICE);
16625738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
16635738a09dSAlexey Khoroshilov 						      new_dma_addr)) {
16645738a09dSAlexey Khoroshilov 					dev_kfree_skb(new_skb);
166550a5ce3eSShrikrishna Khare 					/* Skb allocation failed, do not
166650a5ce3eSShrikrishna Khare 					 * handover this skb to stack. Reuse
166750a5ce3eSShrikrishna Khare 					 * it. Drop the existing pkt.
16685738a09dSAlexey Khoroshilov 					 */
16695738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
16705738a09dSAlexey Khoroshilov 					ctx->skb = NULL;
16715738a09dSAlexey Khoroshilov 					rq->stats.drop_total++;
16725738a09dSAlexey Khoroshilov 					skip_page_frags = true;
16735738a09dSAlexey Khoroshilov 					goto rcd_done;
16745738a09dSAlexey Khoroshilov 				}
1675d1a890faSShreyas Bhatewara 
167650a5ce3eSShrikrishna Khare 				dma_unmap_single(&adapter->pdev->dev,
167750a5ce3eSShrikrishna Khare 						 rbi->dma_addr,
1678b0eb57cbSAndy King 						 rbi->len,
1679bf7bec46SChristophe JAILLET 						 DMA_FROM_DEVICE);
1680d1a890faSShreyas Bhatewara 
168150a5ce3eSShrikrishna Khare 				/* Immediate refill */
168250a5ce3eSShrikrishna Khare 				rbi->skb = new_skb;
168350a5ce3eSShrikrishna Khare 				rbi->dma_addr = new_dma_addr;
168450a5ce3eSShrikrishna Khare 				rxd->addr = cpu_to_le64(rbi->dma_addr);
168550a5ce3eSShrikrishna Khare 				rxd->len = rbi->len;
168650a5ce3eSShrikrishna Khare 			}
168750a5ce3eSShrikrishna Khare 
1688bdeed8b0SAndrey Turkin 			skb_record_rx_queue(ctx->skb, rq->qid);
1689d1a890faSShreyas Bhatewara 			skb_put(ctx->skb, rcd->len);
16905318d809SShreyas Bhatewara 
1691190af10fSShrikrishna Khare 			if (VMXNET3_VERSION_GE_2(adapter) &&
169245dac1d6SShreyas Bhatewara 			    rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
169345dac1d6SShreyas Bhatewara 				struct Vmxnet3_RxCompDescExt *rcdlro;
169440b8c2a1SRonak Doshi 				union Vmxnet3_GenericDesc *gdesc;
169540b8c2a1SRonak Doshi 
169645dac1d6SShreyas Bhatewara 				rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
169740b8c2a1SRonak Doshi 				gdesc = (union Vmxnet3_GenericDesc *)rcd;
16985318d809SShreyas Bhatewara 
169945dac1d6SShreyas Bhatewara 				segCnt = rcdlro->segCnt;
170050219538SShrikrishna Khare 				WARN_ON_ONCE(segCnt == 0);
170145dac1d6SShreyas Bhatewara 				mss = rcdlro->mss;
170245dac1d6SShreyas Bhatewara 				if (unlikely(segCnt <= 1))
170345dac1d6SShreyas Bhatewara 					segCnt = 0;
170440b8c2a1SRonak Doshi 				encap_lro = (le32_to_cpu(gdesc->dword[0]) &
170540b8c2a1SRonak Doshi 					(1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
170645dac1d6SShreyas Bhatewara 			} else {
170745dac1d6SShreyas Bhatewara 				segCnt = 0;
170845dac1d6SShreyas Bhatewara 			}
1709d1a890faSShreyas Bhatewara 		} else {
17105318d809SShreyas Bhatewara 			BUG_ON(ctx->skb == NULL && !skip_page_frags);
17115318d809SShreyas Bhatewara 
1712d1a890faSShreyas Bhatewara 			/* non SOP buffer must be type 1 in most cases */
17135318d809SShreyas Bhatewara 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1714d1a890faSShreyas Bhatewara 			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1715d1a890faSShreyas Bhatewara 
17165318d809SShreyas Bhatewara 			/* If an sop buffer was dropped, skip all
17175318d809SShreyas Bhatewara 			 * following non-sop fragments. They will be reused.
17185318d809SShreyas Bhatewara 			 */
17195318d809SShreyas Bhatewara 			if (skip_page_frags)
17205318d809SShreyas Bhatewara 				goto rcd_done;
17215318d809SShreyas Bhatewara 
1722c41fcce9SShreyas Bhatewara 			if (rcd->len) {
17235318d809SShreyas Bhatewara 				new_page = alloc_page(GFP_ATOMIC);
17245318d809SShreyas Bhatewara 				/* Replacement page frag could not be allocated.
17255318d809SShreyas Bhatewara 				 * Reuse this page. Drop the pkt and free the
17265318d809SShreyas Bhatewara 				 * skb which contained this page as a frag. Skip
17275318d809SShreyas Bhatewara 				 * processing all the following non-sop frags.
17285318d809SShreyas Bhatewara 				 */
1729c41fcce9SShreyas Bhatewara 				if (unlikely(!new_page)) {
17305318d809SShreyas Bhatewara 					rq->stats.rx_buf_alloc_failure++;
17315318d809SShreyas Bhatewara 					dev_kfree_skb(ctx->skb);
17325318d809SShreyas Bhatewara 					ctx->skb = NULL;
17335318d809SShreyas Bhatewara 					skip_page_frags = true;
17345318d809SShreyas Bhatewara 					goto rcd_done;
17355318d809SShreyas Bhatewara 				}
173658caf637SShrikrishna Khare 				new_dma_addr = dma_map_page(&adapter->pdev->dev,
173758caf637SShrikrishna Khare 							    new_page,
17385738a09dSAlexey Khoroshilov 							    0, PAGE_SIZE,
1739bf7bec46SChristophe JAILLET 							    DMA_FROM_DEVICE);
17405738a09dSAlexey Khoroshilov 				if (dma_mapping_error(&adapter->pdev->dev,
17415738a09dSAlexey Khoroshilov 						      new_dma_addr)) {
17425738a09dSAlexey Khoroshilov 					put_page(new_page);
17435738a09dSAlexey Khoroshilov 					rq->stats.rx_buf_alloc_failure++;
17445738a09dSAlexey Khoroshilov 					dev_kfree_skb(ctx->skb);
17455738a09dSAlexey Khoroshilov 					ctx->skb = NULL;
17465738a09dSAlexey Khoroshilov 					skip_page_frags = true;
17475738a09dSAlexey Khoroshilov 					goto rcd_done;
17485738a09dSAlexey Khoroshilov 				}
17495318d809SShreyas Bhatewara 
1750b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev,
1751d1a890faSShreyas Bhatewara 					       rbi->dma_addr, rbi->len,
1752bf7bec46SChristophe JAILLET 					       DMA_FROM_DEVICE);
1753d1a890faSShreyas Bhatewara 
1754d1a890faSShreyas Bhatewara 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
17555318d809SShreyas Bhatewara 
17565318d809SShreyas Bhatewara 				/* Immediate refill */
17575318d809SShreyas Bhatewara 				rbi->page = new_page;
17585738a09dSAlexey Khoroshilov 				rbi->dma_addr = new_dma_addr;
17595318d809SShreyas Bhatewara 				rxd->addr = cpu_to_le64(rbi->dma_addr);
17605318d809SShreyas Bhatewara 				rxd->len = rbi->len;
1761d1a890faSShreyas Bhatewara 			}
1762c41fcce9SShreyas Bhatewara 		}
17635318d809SShreyas Bhatewara 
1764d1a890faSShreyas Bhatewara 
176554f00cceSWilliam Tu sop_done:
1766d1a890faSShreyas Bhatewara 		skb = ctx->skb;
1767d1a890faSShreyas Bhatewara 		if (rcd->eop) {
176845dac1d6SShreyas Bhatewara 			u32 mtu = adapter->netdev->mtu;
1769d1a890faSShreyas Bhatewara 			skb->len += skb->data_len;
1770d1a890faSShreyas Bhatewara 
1771ec76d0c2SRonak Doshi #ifdef VMXNET3_RSS
1772ec76d0c2SRonak Doshi 			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1773ec76d0c2SRonak Doshi 			    (adapter->netdev->features & NETIF_F_RXHASH)) {
1774ec76d0c2SRonak Doshi 				enum pkt_hash_types hash_type;
1775ec76d0c2SRonak Doshi 
1776ec76d0c2SRonak Doshi 				switch (rcd->rssType) {
1777ec76d0c2SRonak Doshi 				case VMXNET3_RCD_RSS_TYPE_IPV4:
1778ec76d0c2SRonak Doshi 				case VMXNET3_RCD_RSS_TYPE_IPV6:
1779ec76d0c2SRonak Doshi 					hash_type = PKT_HASH_TYPE_L3;
1780ec76d0c2SRonak Doshi 					break;
1781ec76d0c2SRonak Doshi 				case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1782ec76d0c2SRonak Doshi 				case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1783ec76d0c2SRonak Doshi 				case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1784ec76d0c2SRonak Doshi 				case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1785ec76d0c2SRonak Doshi 					hash_type = PKT_HASH_TYPE_L4;
1786ec76d0c2SRonak Doshi 					break;
1787ec76d0c2SRonak Doshi 				default:
1788ec76d0c2SRonak Doshi 					hash_type = PKT_HASH_TYPE_L3;
1789ec76d0c2SRonak Doshi 					break;
1790ec76d0c2SRonak Doshi 				}
1791ec76d0c2SRonak Doshi 				skb_set_hash(skb,
1792ec76d0c2SRonak Doshi 					     le32_to_cpu(rcd->rssHash),
1793ec76d0c2SRonak Doshi 					     hash_type);
1794ec76d0c2SRonak Doshi 			}
1795ec76d0c2SRonak Doshi #endif
1796d1a890faSShreyas Bhatewara 			vmxnet3_rx_csum(adapter, skb,
1797d1a890faSShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
1798d1a890faSShreyas Bhatewara 			skb->protocol = eth_type_trans(skb, adapter->netdev);
179940b8c2a1SRonak Doshi 			if ((!rcd->tcp && !encap_lro) ||
1800034f4057SRonak Doshi 			    !(adapter->netdev->features & NETIF_F_LRO))
180145dac1d6SShreyas Bhatewara 				goto not_lro;
1802d1a890faSShreyas Bhatewara 
180345dac1d6SShreyas Bhatewara 			if (segCnt != 0 && mss != 0) {
180445dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_type = rcd->v4 ?
180545dac1d6SShreyas Bhatewara 					SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
180645dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_size = mss;
180745dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_segs = segCnt;
180840b8c2a1SRonak Doshi 			} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
180945dac1d6SShreyas Bhatewara 				u32 hlen;
181045dac1d6SShreyas Bhatewara 
181145dac1d6SShreyas Bhatewara 				hlen = vmxnet3_get_hdr_len(adapter, skb,
181245dac1d6SShreyas Bhatewara 					(union Vmxnet3_GenericDesc *)rcd);
181345dac1d6SShreyas Bhatewara 				if (hlen == 0)
181445dac1d6SShreyas Bhatewara 					goto not_lro;
181545dac1d6SShreyas Bhatewara 
181645dac1d6SShreyas Bhatewara 				skb_shinfo(skb)->gso_type =
181745dac1d6SShreyas Bhatewara 					rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
181845dac1d6SShreyas Bhatewara 				if (segCnt != 0) {
181945dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_segs = segCnt;
182045dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_size =
182145dac1d6SShreyas Bhatewara 						DIV_ROUND_UP(skb->len -
182245dac1d6SShreyas Bhatewara 							hlen, segCnt);
182345dac1d6SShreyas Bhatewara 				} else {
182445dac1d6SShreyas Bhatewara 					skb_shinfo(skb)->gso_size = mtu - hlen;
182545dac1d6SShreyas Bhatewara 				}
182645dac1d6SShreyas Bhatewara 			}
182745dac1d6SShreyas Bhatewara not_lro:
182872e85c45SJesse Gross 			if (unlikely(rcd->ts))
182986a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
183072e85c45SJesse Gross 
18313bced313SRonak Doshi 			/* Use GRO callback if UPT is enabled */
18323bced313SRonak Doshi 			if ((adapter->netdev->features & NETIF_F_LRO) &&
18333bced313SRonak Doshi 			    !rq->shared->updateRxProd)
1834d1a890faSShreyas Bhatewara 				netif_receive_skb(skb);
1835213ade8cSJesse Gross 			else
1836213ade8cSJesse Gross 				napi_gro_receive(&rq->napi, skb);
1837d1a890faSShreyas Bhatewara 
1838d1a890faSShreyas Bhatewara 			ctx->skb = NULL;
183940b8c2a1SRonak Doshi 			encap_lro = false;
18400769636cSNeil Horman 			num_pkts++;
1841d1a890faSShreyas Bhatewara 		}
1842d1a890faSShreyas Bhatewara 
1843d1a890faSShreyas Bhatewara rcd_done:
18445318d809SShreyas Bhatewara 		/* device may have skipped some rx descs */
18452c5a5748SRonak Doshi 		ring = rq->rx_ring + ring_idx;
18462c5a5748SRonak Doshi 		rbi->comp_state = VMXNET3_RXD_COMP_DONE;
18472c5a5748SRonak Doshi 
18482c5a5748SRonak Doshi 		comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
18492c5a5748SRonak Doshi 		fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
18502c5a5748SRonak Doshi 			      idx - ring->next2fill - 1;
18512c5a5748SRonak Doshi 		if (!ring->isOutOfOrder || fill_offset >= comp_offset)
18525318d809SShreyas Bhatewara 			ring->next2comp = idx;
18535318d809SShreyas Bhatewara 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1854f3002c13Shpreg@vmware.com 
1855f3002c13Shpreg@vmware.com 		/* Ensure that the writes to rxd->gen bits will be observed
1856f3002c13Shpreg@vmware.com 		 * after all other writes to rxd objects.
1857f3002c13Shpreg@vmware.com 		 */
1858f3002c13Shpreg@vmware.com 		dma_wmb();
1859f3002c13Shpreg@vmware.com 
18605318d809SShreyas Bhatewara 		while (num_to_alloc) {
18612c5a5748SRonak Doshi 			rbi = rq->buf_info[ring_idx] + ring->next2fill;
18622c5a5748SRonak Doshi 			if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
18632c5a5748SRonak Doshi 				goto refill_buf;
18642c5a5748SRonak Doshi 			if (ring_idx == 0) {
18652c5a5748SRonak Doshi 				/* ring0 Type1 buffers can get skipped; re-fill them */
18662c5a5748SRonak Doshi 				if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
18672c5a5748SRonak Doshi 					goto refill_buf;
18682c5a5748SRonak Doshi 			}
18692c5a5748SRonak Doshi 			if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
18702c5a5748SRonak Doshi refill_buf:
18715318d809SShreyas Bhatewara 				vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
18725318d809SShreyas Bhatewara 						  &rxCmdDesc);
18732c5a5748SRonak Doshi 				WARN_ON(!rxd->addr);
1874d1a890faSShreyas Bhatewara 
18755318d809SShreyas Bhatewara 				/* Recv desc is ready to be used by the device */
18765318d809SShreyas Bhatewara 				rxd->gen = ring->gen;
18775318d809SShreyas Bhatewara 				vmxnet3_cmd_ring_adv_next2fill(ring);
18782c5a5748SRonak Doshi 				rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
18795318d809SShreyas Bhatewara 				num_to_alloc--;
18802c5a5748SRonak Doshi 			} else {
18812c5a5748SRonak Doshi 				/* rx completion hasn't occurred */
18822c5a5748SRonak Doshi 				ring->isOutOfOrder = 1;
18832c5a5748SRonak Doshi 				break;
18842c5a5748SRonak Doshi 			}
18852c5a5748SRonak Doshi 		}
18862c5a5748SRonak Doshi 
18872c5a5748SRonak Doshi 		if (num_to_alloc == 0) {
18882c5a5748SRonak Doshi 			ring->isOutOfOrder = 0;
18895318d809SShreyas Bhatewara 		}
1890d1a890faSShreyas Bhatewara 
1891d1a890faSShreyas Bhatewara 		/* if needed, update the register */
18922c5a5748SRonak Doshi 		if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1893d1a890faSShreyas Bhatewara 			VMXNET3_WRITE_BAR0_REG(adapter,
1894d1a890faSShreyas Bhatewara 					       rxprod_reg[ring_idx] + rq->qid * 8,
18955318d809SShreyas Bhatewara 					       ring->next2fill);
1896d1a890faSShreyas Bhatewara 		}
1897d1a890faSShreyas Bhatewara 
1898d1a890faSShreyas Bhatewara 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1899115924b6SShreyas Bhatewara 		vmxnet3_getRxComp(rcd,
1900115924b6SShreyas Bhatewara 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1901d1a890faSShreyas Bhatewara 	}
190254f00cceSWilliam Tu 	if (need_flush)
190354f00cceSWilliam Tu 		xdp_do_flush();
1904d1a890faSShreyas Bhatewara 
19050769636cSNeil Horman 	return num_pkts;
1906d1a890faSShreyas Bhatewara }
1907d1a890faSShreyas Bhatewara 
1908d1a890faSShreyas Bhatewara 
1909d1a890faSShreyas Bhatewara static void
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)1910d1a890faSShreyas Bhatewara vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1911d1a890faSShreyas Bhatewara 		   struct vmxnet3_adapter *adapter)
1912d1a890faSShreyas Bhatewara {
1913d1a890faSShreyas Bhatewara 	u32 i, ring_idx;
1914d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxDesc *rxd;
1915d1a890faSShreyas Bhatewara 
1916edf410cbSZixuan Fu 	/* ring has already been cleaned up */
1917edf410cbSZixuan Fu 	if (!rq->rx_ring[0].base)
1918edf410cbSZixuan Fu 		return;
1919edf410cbSZixuan Fu 
1920d1a890faSShreyas Bhatewara 	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1921d1a890faSShreyas Bhatewara 		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
192254f00cceSWilliam Tu 			struct vmxnet3_rx_buf_info *rbi;
1923115924b6SShreyas Bhatewara #ifdef __BIG_ENDIAN_BITFIELD
1924115924b6SShreyas Bhatewara 			struct Vmxnet3_RxDesc rxDesc;
1925115924b6SShreyas Bhatewara #endif
192654f00cceSWilliam Tu 
192754f00cceSWilliam Tu 			rbi = &rq->buf_info[ring_idx][i];
1928115924b6SShreyas Bhatewara 			vmxnet3_getRxDesc(rxd,
1929115924b6SShreyas Bhatewara 				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1930d1a890faSShreyas Bhatewara 
1931d1a890faSShreyas Bhatewara 			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
193254f00cceSWilliam Tu 			    rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
193354f00cceSWilliam Tu 				page_pool_recycle_direct(rq->page_pool,
193454f00cceSWilliam Tu 							 rbi->page);
193554f00cceSWilliam Tu 				rbi->page = NULL;
193654f00cceSWilliam Tu 			} else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
193754f00cceSWilliam Tu 				   rbi->skb) {
1938b0eb57cbSAndy King 				dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1939bf7bec46SChristophe JAILLET 						 rxd->len, DMA_FROM_DEVICE);
194054f00cceSWilliam Tu 				dev_kfree_skb(rbi->skb);
194154f00cceSWilliam Tu 				rbi->skb = NULL;
1942d1a890faSShreyas Bhatewara 			} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
194354f00cceSWilliam Tu 				   rbi->page) {
1944b0eb57cbSAndy King 				dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1945bf7bec46SChristophe JAILLET 					       rxd->len, DMA_FROM_DEVICE);
194654f00cceSWilliam Tu 				put_page(rbi->page);
194754f00cceSWilliam Tu 				rbi->page = NULL;
1948d1a890faSShreyas Bhatewara 			}
1949d1a890faSShreyas Bhatewara 		}
1950d1a890faSShreyas Bhatewara 
1951d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1952d1a890faSShreyas Bhatewara 		rq->rx_ring[ring_idx].next2fill =
1953d1a890faSShreyas Bhatewara 					rq->rx_ring[ring_idx].next2comp = 0;
1954d1a890faSShreyas Bhatewara 	}
1955d1a890faSShreyas Bhatewara 
1956d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1957d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
1958d1a890faSShreyas Bhatewara }
1959d1a890faSShreyas Bhatewara 
1960d1a890faSShreyas Bhatewara 
196109c5088eSShreyas Bhatewara static void
vmxnet3_rq_cleanup_all(struct vmxnet3_adapter * adapter)196209c5088eSShreyas Bhatewara vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
196309c5088eSShreyas Bhatewara {
196409c5088eSShreyas Bhatewara 	int i;
196509c5088eSShreyas Bhatewara 
196609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
196709c5088eSShreyas Bhatewara 		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
196854f00cceSWilliam Tu 	rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
196909c5088eSShreyas Bhatewara }
197009c5088eSShreyas Bhatewara 
197109c5088eSShreyas Bhatewara 
vmxnet3_rq_destroy(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)1972280b74f7Sstephen hemminger static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1973d1a890faSShreyas Bhatewara 			       struct vmxnet3_adapter *adapter)
1974d1a890faSShreyas Bhatewara {
1975d1a890faSShreyas Bhatewara 	int i;
1976d1a890faSShreyas Bhatewara 	int j;
1977d1a890faSShreyas Bhatewara 
1978d1a890faSShreyas Bhatewara 	/* all rx buffers must have already been freed */
1979d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1980d1a890faSShreyas Bhatewara 		if (rq->buf_info[i]) {
1981d1a890faSShreyas Bhatewara 			for (j = 0; j < rq->rx_ring[i].size; j++)
1982d1a890faSShreyas Bhatewara 				BUG_ON(rq->buf_info[i][j].page != NULL);
1983d1a890faSShreyas Bhatewara 		}
1984d1a890faSShreyas Bhatewara 	}
1985d1a890faSShreyas Bhatewara 
1986d1a890faSShreyas Bhatewara 
1987d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
1988d1a890faSShreyas Bhatewara 		if (rq->rx_ring[i].base) {
1989b0eb57cbSAndy King 			dma_free_coherent(&adapter->pdev->dev,
1990b0eb57cbSAndy King 					  rq->rx_ring[i].size
1991d1a890faSShreyas Bhatewara 					  * sizeof(struct Vmxnet3_RxDesc),
1992d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].base,
1993d1a890faSShreyas Bhatewara 					  rq->rx_ring[i].basePA);
1994d1a890faSShreyas Bhatewara 			rq->rx_ring[i].base = NULL;
1995d1a890faSShreyas Bhatewara 		}
1996d1a890faSShreyas Bhatewara 	}
1997d1a890faSShreyas Bhatewara 
199854f00cceSWilliam Tu 	if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
199954f00cceSWilliam Tu 		xdp_rxq_info_unreg(&rq->xdp_rxq);
200054f00cceSWilliam Tu 	page_pool_destroy(rq->page_pool);
200154f00cceSWilliam Tu 	rq->page_pool = NULL;
200254f00cceSWilliam Tu 
200350a5ce3eSShrikrishna Khare 	if (rq->data_ring.base) {
200450a5ce3eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
200550a5ce3eSShrikrishna Khare 				  rq->rx_ring[0].size * rq->data_ring.desc_size,
200650a5ce3eSShrikrishna Khare 				  rq->data_ring.base, rq->data_ring.basePA);
200750a5ce3eSShrikrishna Khare 		rq->data_ring.base = NULL;
200850a5ce3eSShrikrishna Khare 	}
200950a5ce3eSShrikrishna Khare 
2010d1a890faSShreyas Bhatewara 	if (rq->comp_ring.base) {
2011b0eb57cbSAndy King 		dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2012b0eb57cbSAndy King 				  * sizeof(struct Vmxnet3_RxCompDesc),
2013d1a890faSShreyas Bhatewara 				  rq->comp_ring.base, rq->comp_ring.basePA);
2014d1a890faSShreyas Bhatewara 		rq->comp_ring.base = NULL;
2015d1a890faSShreyas Bhatewara 	}
2016b0eb57cbSAndy King 
2017de1da8bcSRonak Doshi 	kfree(rq->buf_info[0]);
2018de1da8bcSRonak Doshi 	rq->buf_info[0] = NULL;
2019de1da8bcSRonak Doshi 	rq->buf_info[1] = NULL;
2020d1a890faSShreyas Bhatewara }
2021d1a890faSShreyas Bhatewara 
2022bb40aca7SWei Yongjun static void
vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter * adapter)202350a5ce3eSShrikrishna Khare vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
202450a5ce3eSShrikrishna Khare {
202550a5ce3eSShrikrishna Khare 	int i;
202650a5ce3eSShrikrishna Khare 
202750a5ce3eSShrikrishna Khare 	for (i = 0; i < adapter->num_rx_queues; i++) {
202850a5ce3eSShrikrishna Khare 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
202950a5ce3eSShrikrishna Khare 
203050a5ce3eSShrikrishna Khare 		if (rq->data_ring.base) {
203150a5ce3eSShrikrishna Khare 			dma_free_coherent(&adapter->pdev->dev,
203250a5ce3eSShrikrishna Khare 					  (rq->rx_ring[0].size *
203350a5ce3eSShrikrishna Khare 					  rq->data_ring.desc_size),
203450a5ce3eSShrikrishna Khare 					  rq->data_ring.base,
203550a5ce3eSShrikrishna Khare 					  rq->data_ring.basePA);
203650a5ce3eSShrikrishna Khare 			rq->data_ring.base = NULL;
203750a5ce3eSShrikrishna Khare 		}
20389ee14af2SMatthias Stocker 		rq->data_ring.desc_size = 0;
203950a5ce3eSShrikrishna Khare 	}
204050a5ce3eSShrikrishna Khare }
2041d1a890faSShreyas Bhatewara 
2042d1a890faSShreyas Bhatewara static int
vmxnet3_rq_init(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2043d1a890faSShreyas Bhatewara vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
2044d1a890faSShreyas Bhatewara 		struct vmxnet3_adapter  *adapter)
2045d1a890faSShreyas Bhatewara {
204654f00cceSWilliam Tu 	int i, err;
2047d1a890faSShreyas Bhatewara 
2048d1a890faSShreyas Bhatewara 	/* initialize buf_info */
2049d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[0].size; i++) {
2050d1a890faSShreyas Bhatewara 
205154f00cceSWilliam Tu 		/* 1st buf for a pkt is skbuff or xdp page */
2052d1a890faSShreyas Bhatewara 		if (i % adapter->rx_buf_per_pkt == 0) {
205354f00cceSWilliam Tu 			rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
205454f00cceSWilliam Tu 						      VMXNET3_RX_BUF_XDP :
205554f00cceSWilliam Tu 						      VMXNET3_RX_BUF_SKB;
2056d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = adapter->skb_buf_size;
2057d1a890faSShreyas Bhatewara 		} else { /* subsequent bufs for a pkt is frag */
2058d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
2059d1a890faSShreyas Bhatewara 			rq->buf_info[0][i].len = PAGE_SIZE;
2060d1a890faSShreyas Bhatewara 		}
2061d1a890faSShreyas Bhatewara 	}
2062d1a890faSShreyas Bhatewara 	for (i = 0; i < rq->rx_ring[1].size; i++) {
2063d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
2064d1a890faSShreyas Bhatewara 		rq->buf_info[1][i].len = PAGE_SIZE;
2065d1a890faSShreyas Bhatewara 	}
2066d1a890faSShreyas Bhatewara 
2067d1a890faSShreyas Bhatewara 	/* reset internal state and allocate buffers for both rings */
2068d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
2069d1a890faSShreyas Bhatewara 		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
2070d1a890faSShreyas Bhatewara 
2071d1a890faSShreyas Bhatewara 		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
2072d1a890faSShreyas Bhatewara 		       sizeof(struct Vmxnet3_RxDesc));
2073d1a890faSShreyas Bhatewara 		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
20742c5a5748SRonak Doshi 		rq->rx_ring[i].isOutOfOrder = 0;
2075d1a890faSShreyas Bhatewara 	}
207654f00cceSWilliam Tu 
207754f00cceSWilliam Tu 	err = vmxnet3_create_pp(adapter, rq,
207854f00cceSWilliam Tu 				rq->rx_ring[0].size + rq->rx_ring[1].size);
207954f00cceSWilliam Tu 	if (err)
208054f00cceSWilliam Tu 		return err;
208154f00cceSWilliam Tu 
2082d1a890faSShreyas Bhatewara 	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
2083d1a890faSShreyas Bhatewara 				    adapter) == 0) {
208454f00cceSWilliam Tu 		xdp_rxq_info_unreg(&rq->xdp_rxq);
208554f00cceSWilliam Tu 		page_pool_destroy(rq->page_pool);
208654f00cceSWilliam Tu 		rq->page_pool = NULL;
208754f00cceSWilliam Tu 
2088d1a890faSShreyas Bhatewara 		/* at least has 1 rx buffer for the 1st ring */
2089d1a890faSShreyas Bhatewara 		return -ENOMEM;
2090d1a890faSShreyas Bhatewara 	}
2091d1a890faSShreyas Bhatewara 	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2092d1a890faSShreyas Bhatewara 
2093d1a890faSShreyas Bhatewara 	/* reset the comp ring */
2094d1a890faSShreyas Bhatewara 	rq->comp_ring.next2proc = 0;
2095d1a890faSShreyas Bhatewara 	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
2096d1a890faSShreyas Bhatewara 	       sizeof(struct Vmxnet3_RxCompDesc));
2097d1a890faSShreyas Bhatewara 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
2098d1a890faSShreyas Bhatewara 
2099d1a890faSShreyas Bhatewara 	/* reset rxctx */
2100d1a890faSShreyas Bhatewara 	rq->rx_ctx.skb = NULL;
2101d1a890faSShreyas Bhatewara 
2102d1a890faSShreyas Bhatewara 	/* stats are not reset */
2103d1a890faSShreyas Bhatewara 	return 0;
2104d1a890faSShreyas Bhatewara }
2105d1a890faSShreyas Bhatewara 
2106d1a890faSShreyas Bhatewara 
2107d1a890faSShreyas Bhatewara static int
vmxnet3_rq_init_all(struct vmxnet3_adapter * adapter)210809c5088eSShreyas Bhatewara vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
210909c5088eSShreyas Bhatewara {
211009c5088eSShreyas Bhatewara 	int i, err = 0;
211109c5088eSShreyas Bhatewara 
211209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
211309c5088eSShreyas Bhatewara 		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
211409c5088eSShreyas Bhatewara 		if (unlikely(err)) {
211509c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev, "%s: failed to "
211609c5088eSShreyas Bhatewara 				"initialize rx queue%i\n",
211709c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
211809c5088eSShreyas Bhatewara 			break;
211909c5088eSShreyas Bhatewara 		}
212009c5088eSShreyas Bhatewara 	}
212109c5088eSShreyas Bhatewara 	return err;
212209c5088eSShreyas Bhatewara 
212309c5088eSShreyas Bhatewara }
212409c5088eSShreyas Bhatewara 
212509c5088eSShreyas Bhatewara 
212609c5088eSShreyas Bhatewara static int
vmxnet3_rq_create(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2127d1a890faSShreyas Bhatewara vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2128d1a890faSShreyas Bhatewara {
2129d1a890faSShreyas Bhatewara 	int i;
2130d1a890faSShreyas Bhatewara 	size_t sz;
2131d1a890faSShreyas Bhatewara 	struct vmxnet3_rx_buf_info *bi;
2132d1a890faSShreyas Bhatewara 
2133d1a890faSShreyas Bhatewara 	for (i = 0; i < 2; i++) {
2134d1a890faSShreyas Bhatewara 
2135d1a890faSShreyas Bhatewara 		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
2136b0eb57cbSAndy King 		rq->rx_ring[i].base = dma_alloc_coherent(
2137b0eb57cbSAndy King 						&adapter->pdev->dev, sz,
2138b0eb57cbSAndy King 						&rq->rx_ring[i].basePA,
2139b0eb57cbSAndy King 						GFP_KERNEL);
2140d1a890faSShreyas Bhatewara 		if (!rq->rx_ring[i].base) {
2141204a6e65SStephen Hemminger 			netdev_err(adapter->netdev,
2142204a6e65SStephen Hemminger 				   "failed to allocate rx ring %d\n", i);
2143d1a890faSShreyas Bhatewara 			goto err;
2144d1a890faSShreyas Bhatewara 		}
2145d1a890faSShreyas Bhatewara 	}
2146d1a890faSShreyas Bhatewara 
214750a5ce3eSShrikrishna Khare 	if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
214850a5ce3eSShrikrishna Khare 		sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
214950a5ce3eSShrikrishna Khare 		rq->data_ring.base =
215050a5ce3eSShrikrishna Khare 			dma_alloc_coherent(&adapter->pdev->dev, sz,
215150a5ce3eSShrikrishna Khare 					   &rq->data_ring.basePA,
215250a5ce3eSShrikrishna Khare 					   GFP_KERNEL);
215350a5ce3eSShrikrishna Khare 		if (!rq->data_ring.base) {
215450a5ce3eSShrikrishna Khare 			netdev_err(adapter->netdev,
215550a5ce3eSShrikrishna Khare 				   "rx data ring will be disabled\n");
215650a5ce3eSShrikrishna Khare 			adapter->rxdataring_enabled = false;
215750a5ce3eSShrikrishna Khare 		}
215850a5ce3eSShrikrishna Khare 	} else {
215950a5ce3eSShrikrishna Khare 		rq->data_ring.base = NULL;
216050a5ce3eSShrikrishna Khare 		rq->data_ring.desc_size = 0;
216150a5ce3eSShrikrishna Khare 	}
216250a5ce3eSShrikrishna Khare 
2163d1a890faSShreyas Bhatewara 	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
2164b0eb57cbSAndy King 	rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2165b0eb57cbSAndy King 						&rq->comp_ring.basePA,
2166b0eb57cbSAndy King 						GFP_KERNEL);
2167d1a890faSShreyas Bhatewara 	if (!rq->comp_ring.base) {
2168204a6e65SStephen Hemminger 		netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
2169d1a890faSShreyas Bhatewara 		goto err;
2170d1a890faSShreyas Bhatewara 	}
2171d1a890faSShreyas Bhatewara 
2172de1da8bcSRonak Doshi 	bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2173de1da8bcSRonak Doshi 			  sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2174de1da8bcSRonak Doshi 			  dev_to_node(&adapter->pdev->dev));
2175e404decbSJoe Perches 	if (!bi)
2176d1a890faSShreyas Bhatewara 		goto err;
2177e404decbSJoe Perches 
2178d1a890faSShreyas Bhatewara 	rq->buf_info[0] = bi;
2179d1a890faSShreyas Bhatewara 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
2180d1a890faSShreyas Bhatewara 
2181d1a890faSShreyas Bhatewara 	return 0;
2182d1a890faSShreyas Bhatewara 
2183d1a890faSShreyas Bhatewara err:
2184d1a890faSShreyas Bhatewara 	vmxnet3_rq_destroy(rq, adapter);
2185d1a890faSShreyas Bhatewara 	return -ENOMEM;
2186d1a890faSShreyas Bhatewara }
2187d1a890faSShreyas Bhatewara 
2188d1a890faSShreyas Bhatewara 
218954f00cceSWilliam Tu int
vmxnet3_rq_create_all(struct vmxnet3_adapter * adapter)219009c5088eSShreyas Bhatewara vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
219109c5088eSShreyas Bhatewara {
219209c5088eSShreyas Bhatewara 	int i, err = 0;
219309c5088eSShreyas Bhatewara 
219450a5ce3eSShrikrishna Khare 	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
219550a5ce3eSShrikrishna Khare 
219609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
219709c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
219809c5088eSShreyas Bhatewara 		if (unlikely(err)) {
219909c5088eSShreyas Bhatewara 			dev_err(&adapter->netdev->dev,
220009c5088eSShreyas Bhatewara 				"%s: failed to create rx queue%i\n",
220109c5088eSShreyas Bhatewara 				adapter->netdev->name, i);
220209c5088eSShreyas Bhatewara 			goto err_out;
220309c5088eSShreyas Bhatewara 		}
220409c5088eSShreyas Bhatewara 	}
220550a5ce3eSShrikrishna Khare 
220650a5ce3eSShrikrishna Khare 	if (!adapter->rxdataring_enabled)
220750a5ce3eSShrikrishna Khare 		vmxnet3_rq_destroy_all_rxdataring(adapter);
220850a5ce3eSShrikrishna Khare 
220909c5088eSShreyas Bhatewara 	return err;
221009c5088eSShreyas Bhatewara err_out:
221109c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
221209c5088eSShreyas Bhatewara 	return err;
221309c5088eSShreyas Bhatewara 
221409c5088eSShreyas Bhatewara }
221509c5088eSShreyas Bhatewara 
221609c5088eSShreyas Bhatewara /* Multiple queue aware polling function for tx and rx */
221709c5088eSShreyas Bhatewara 
221809c5088eSShreyas Bhatewara static int
vmxnet3_do_poll(struct vmxnet3_adapter * adapter,int budget)2219d1a890faSShreyas Bhatewara vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2220d1a890faSShreyas Bhatewara {
222109c5088eSShreyas Bhatewara 	int rcd_done = 0, i;
2222d1a890faSShreyas Bhatewara 	if (unlikely(adapter->shared->ecr))
2223d1a890faSShreyas Bhatewara 		vmxnet3_process_events(adapter);
222409c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
222509c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2226d1a890faSShreyas Bhatewara 
222709c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
222809c5088eSShreyas Bhatewara 		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
222909c5088eSShreyas Bhatewara 						   adapter, budget);
223009c5088eSShreyas Bhatewara 	return rcd_done;
2231d1a890faSShreyas Bhatewara }
2232d1a890faSShreyas Bhatewara 
2233d1a890faSShreyas Bhatewara 
2234d1a890faSShreyas Bhatewara static int
vmxnet3_poll(struct napi_struct * napi,int budget)2235d1a890faSShreyas Bhatewara vmxnet3_poll(struct napi_struct *napi, int budget)
2236d1a890faSShreyas Bhatewara {
223709c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
223809c5088eSShreyas Bhatewara 					  struct vmxnet3_rx_queue, napi);
2239d1a890faSShreyas Bhatewara 	int rxd_done;
2240d1a890faSShreyas Bhatewara 
224109c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2242d1a890faSShreyas Bhatewara 
2243d1a890faSShreyas Bhatewara 	if (rxd_done < budget) {
22446ad20165SEric Dumazet 		napi_complete_done(napi, rxd_done);
224509c5088eSShreyas Bhatewara 		vmxnet3_enable_all_intrs(rx_queue->adapter);
2246d1a890faSShreyas Bhatewara 	}
2247d1a890faSShreyas Bhatewara 	return rxd_done;
2248d1a890faSShreyas Bhatewara }
2249d1a890faSShreyas Bhatewara 
225009c5088eSShreyas Bhatewara /*
225109c5088eSShreyas Bhatewara  * NAPI polling function for MSI-X mode with multiple Rx queues
225209c5088eSShreyas Bhatewara  * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
225309c5088eSShreyas Bhatewara  */
225409c5088eSShreyas Bhatewara 
225509c5088eSShreyas Bhatewara static int
vmxnet3_poll_rx_only(struct napi_struct * napi,int budget)225609c5088eSShreyas Bhatewara vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
225709c5088eSShreyas Bhatewara {
225809c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = container_of(napi,
225909c5088eSShreyas Bhatewara 						struct vmxnet3_rx_queue, napi);
226009c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
226109c5088eSShreyas Bhatewara 	int rxd_done;
226209c5088eSShreyas Bhatewara 
226309c5088eSShreyas Bhatewara 	/* When sharing interrupt with corresponding tx queue, process
226409c5088eSShreyas Bhatewara 	 * tx completions in that queue as well
226509c5088eSShreyas Bhatewara 	 */
226609c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
226709c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue *tq =
226809c5088eSShreyas Bhatewara 				&adapter->tx_queue[rq - adapter->rx_queue];
226909c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
227009c5088eSShreyas Bhatewara 	}
227109c5088eSShreyas Bhatewara 
227209c5088eSShreyas Bhatewara 	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
227309c5088eSShreyas Bhatewara 
227409c5088eSShreyas Bhatewara 	if (rxd_done < budget) {
22756ad20165SEric Dumazet 		napi_complete_done(napi, rxd_done);
227609c5088eSShreyas Bhatewara 		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
227709c5088eSShreyas Bhatewara 	}
227809c5088eSShreyas Bhatewara 	return rxd_done;
227909c5088eSShreyas Bhatewara }
228009c5088eSShreyas Bhatewara 
228109c5088eSShreyas Bhatewara 
228209c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
228309c5088eSShreyas Bhatewara 
228409c5088eSShreyas Bhatewara /*
228509c5088eSShreyas Bhatewara  * Handle completion interrupts on tx queues
228609c5088eSShreyas Bhatewara  * Returns whether or not the intr is handled
228709c5088eSShreyas Bhatewara  */
228809c5088eSShreyas Bhatewara 
228909c5088eSShreyas Bhatewara static irqreturn_t
vmxnet3_msix_tx(int irq,void * data)229009c5088eSShreyas Bhatewara vmxnet3_msix_tx(int irq, void *data)
229109c5088eSShreyas Bhatewara {
229209c5088eSShreyas Bhatewara 	struct vmxnet3_tx_queue *tq = data;
229309c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = tq->adapter;
229409c5088eSShreyas Bhatewara 
229509c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
229609c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
229709c5088eSShreyas Bhatewara 
229809c5088eSShreyas Bhatewara 	/* Handle the case where only one irq is allocate for all tx queues */
229909c5088eSShreyas Bhatewara 	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
230009c5088eSShreyas Bhatewara 		int i;
230109c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
230209c5088eSShreyas Bhatewara 			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
230309c5088eSShreyas Bhatewara 			vmxnet3_tq_tx_complete(txq, adapter);
230409c5088eSShreyas Bhatewara 		}
230509c5088eSShreyas Bhatewara 	} else {
230609c5088eSShreyas Bhatewara 		vmxnet3_tq_tx_complete(tq, adapter);
230709c5088eSShreyas Bhatewara 	}
230809c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
230909c5088eSShreyas Bhatewara 
231009c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
231109c5088eSShreyas Bhatewara }
231209c5088eSShreyas Bhatewara 
231309c5088eSShreyas Bhatewara 
231409c5088eSShreyas Bhatewara /*
231509c5088eSShreyas Bhatewara  * Handle completion interrupts on rx queues. Returns whether or not the
231609c5088eSShreyas Bhatewara  * intr is handled
231709c5088eSShreyas Bhatewara  */
231809c5088eSShreyas Bhatewara 
231909c5088eSShreyas Bhatewara static irqreturn_t
vmxnet3_msix_rx(int irq,void * data)232009c5088eSShreyas Bhatewara vmxnet3_msix_rx(int irq, void *data)
232109c5088eSShreyas Bhatewara {
232209c5088eSShreyas Bhatewara 	struct vmxnet3_rx_queue *rq = data;
232309c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = rq->adapter;
232409c5088eSShreyas Bhatewara 
232509c5088eSShreyas Bhatewara 	/* disable intr if needed */
232609c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
232709c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
232809c5088eSShreyas Bhatewara 	napi_schedule(&rq->napi);
232909c5088eSShreyas Bhatewara 
233009c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
233109c5088eSShreyas Bhatewara }
233209c5088eSShreyas Bhatewara 
233309c5088eSShreyas Bhatewara /*
233409c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
233509c5088eSShreyas Bhatewara  *
233609c5088eSShreyas Bhatewara  * vmxnet3_msix_event --
233709c5088eSShreyas Bhatewara  *
233809c5088eSShreyas Bhatewara  *    vmxnet3 msix event intr handler
233909c5088eSShreyas Bhatewara  *
234009c5088eSShreyas Bhatewara  * Result:
234109c5088eSShreyas Bhatewara  *    whether or not the intr is handled
234209c5088eSShreyas Bhatewara  *
234309c5088eSShreyas Bhatewara  *----------------------------------------------------------------------------
234409c5088eSShreyas Bhatewara  */
234509c5088eSShreyas Bhatewara 
234609c5088eSShreyas Bhatewara static irqreturn_t
vmxnet3_msix_event(int irq,void * data)234709c5088eSShreyas Bhatewara vmxnet3_msix_event(int irq, void *data)
234809c5088eSShreyas Bhatewara {
234909c5088eSShreyas Bhatewara 	struct net_device *dev = data;
235009c5088eSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
235109c5088eSShreyas Bhatewara 
235209c5088eSShreyas Bhatewara 	/* disable intr if needed */
235309c5088eSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
235409c5088eSShreyas Bhatewara 		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
235509c5088eSShreyas Bhatewara 
235609c5088eSShreyas Bhatewara 	if (adapter->shared->ecr)
235709c5088eSShreyas Bhatewara 		vmxnet3_process_events(adapter);
235809c5088eSShreyas Bhatewara 
235909c5088eSShreyas Bhatewara 	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
236009c5088eSShreyas Bhatewara 
236109c5088eSShreyas Bhatewara 	return IRQ_HANDLED;
236209c5088eSShreyas Bhatewara }
236309c5088eSShreyas Bhatewara 
236409c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI  */
236509c5088eSShreyas Bhatewara 
2366d1a890faSShreyas Bhatewara 
2367d1a890faSShreyas Bhatewara /* Interrupt handler for vmxnet3  */
2368d1a890faSShreyas Bhatewara static irqreturn_t
vmxnet3_intr(int irq,void * dev_id)2369d1a890faSShreyas Bhatewara vmxnet3_intr(int irq, void *dev_id)
2370d1a890faSShreyas Bhatewara {
2371d1a890faSShreyas Bhatewara 	struct net_device *dev = dev_id;
2372d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
2373d1a890faSShreyas Bhatewara 
237409c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_INTX) {
2375d1a890faSShreyas Bhatewara 		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2376d1a890faSShreyas Bhatewara 		if (unlikely(icr == 0))
2377d1a890faSShreyas Bhatewara 			/* not ours */
2378d1a890faSShreyas Bhatewara 			return IRQ_NONE;
2379d1a890faSShreyas Bhatewara 	}
2380d1a890faSShreyas Bhatewara 
2381d1a890faSShreyas Bhatewara 
2382d1a890faSShreyas Bhatewara 	/* disable intr if needed */
2383d1a890faSShreyas Bhatewara 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
238409c5088eSShreyas Bhatewara 		vmxnet3_disable_all_intrs(adapter);
2385d1a890faSShreyas Bhatewara 
238609c5088eSShreyas Bhatewara 	napi_schedule(&adapter->rx_queue[0].napi);
2387d1a890faSShreyas Bhatewara 
2388d1a890faSShreyas Bhatewara 	return IRQ_HANDLED;
2389d1a890faSShreyas Bhatewara }
2390d1a890faSShreyas Bhatewara 
2391d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
2392d1a890faSShreyas Bhatewara 
2393d1a890faSShreyas Bhatewara /* netpoll callback. */
2394d1a890faSShreyas Bhatewara static void
vmxnet3_netpoll(struct net_device * netdev)2395d1a890faSShreyas Bhatewara vmxnet3_netpoll(struct net_device *netdev)
2396d1a890faSShreyas Bhatewara {
2397d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2398d1a890faSShreyas Bhatewara 
2399d25f06eaSNeil Horman 	switch (adapter->intr.type) {
24000a8d8c44SArnd Bergmann #ifdef CONFIG_PCI_MSI
24010a8d8c44SArnd Bergmann 	case VMXNET3_IT_MSIX: {
24020a8d8c44SArnd Bergmann 		int i;
2403d25f06eaSNeil Horman 		for (i = 0; i < adapter->num_rx_queues; i++)
2404d25f06eaSNeil Horman 			vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2405d25f06eaSNeil Horman 		break;
24060a8d8c44SArnd Bergmann 	}
24070a8d8c44SArnd Bergmann #endif
2408d25f06eaSNeil Horman 	case VMXNET3_IT_MSI:
2409d25f06eaSNeil Horman 	default:
2410d25f06eaSNeil Horman 		vmxnet3_intr(0, adapter->netdev);
2411d25f06eaSNeil Horman 		break;
2412d25f06eaSNeil Horman 	}
241309c5088eSShreyas Bhatewara 
2414d1a890faSShreyas Bhatewara }
241509c5088eSShreyas Bhatewara #endif	/* CONFIG_NET_POLL_CONTROLLER */
2416d1a890faSShreyas Bhatewara 
2417d1a890faSShreyas Bhatewara static int
vmxnet3_request_irqs(struct vmxnet3_adapter * adapter)2418d1a890faSShreyas Bhatewara vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2419d1a890faSShreyas Bhatewara {
242009c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
242109c5088eSShreyas Bhatewara 	int err = 0, i;
242209c5088eSShreyas Bhatewara 	int vector = 0;
2423d1a890faSShreyas Bhatewara 
24248f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
2425d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
242609c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_tx_queues; i++) {
242709c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
242809c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
242909c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
243009c5088eSShreyas Bhatewara 				err = request_irq(
243109c5088eSShreyas Bhatewara 					      intr->msix_entries[vector].vector,
243209c5088eSShreyas Bhatewara 					      vmxnet3_msix_tx, 0,
243309c5088eSShreyas Bhatewara 					      adapter->tx_queue[i].name,
243409c5088eSShreyas Bhatewara 					      &adapter->tx_queue[i]);
243509c5088eSShreyas Bhatewara 			} else {
243609c5088eSShreyas Bhatewara 				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
243709c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
243809c5088eSShreyas Bhatewara 			}
243909c5088eSShreyas Bhatewara 			if (err) {
244009c5088eSShreyas Bhatewara 				dev_err(&adapter->netdev->dev,
244109c5088eSShreyas Bhatewara 					"Failed to request irq for MSIX, %s, "
244209c5088eSShreyas Bhatewara 					"error %d\n",
244309c5088eSShreyas Bhatewara 					adapter->tx_queue[i].name, err);
244409c5088eSShreyas Bhatewara 				return err;
244509c5088eSShreyas Bhatewara 			}
244609c5088eSShreyas Bhatewara 
244709c5088eSShreyas Bhatewara 			/* Handle the case where only 1 MSIx was allocated for
244809c5088eSShreyas Bhatewara 			 * all tx queues */
244909c5088eSShreyas Bhatewara 			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
245009c5088eSShreyas Bhatewara 				for (; i < adapter->num_tx_queues; i++)
245109c5088eSShreyas Bhatewara 					adapter->tx_queue[i].comp_ring.intr_idx
245209c5088eSShreyas Bhatewara 								= vector;
245309c5088eSShreyas Bhatewara 				vector++;
245409c5088eSShreyas Bhatewara 				break;
245509c5088eSShreyas Bhatewara 			} else {
245609c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx
245709c5088eSShreyas Bhatewara 								= vector++;
245809c5088eSShreyas Bhatewara 			}
245909c5088eSShreyas Bhatewara 		}
246009c5088eSShreyas Bhatewara 		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
246109c5088eSShreyas Bhatewara 			vector = 0;
246209c5088eSShreyas Bhatewara 
246309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
246409c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
246509c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
246609c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
246709c5088eSShreyas Bhatewara 			else
246809c5088eSShreyas Bhatewara 				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
246909c5088eSShreyas Bhatewara 					adapter->netdev->name, vector);
247009c5088eSShreyas Bhatewara 			err = request_irq(intr->msix_entries[vector].vector,
247109c5088eSShreyas Bhatewara 					  vmxnet3_msix_rx, 0,
247209c5088eSShreyas Bhatewara 					  adapter->rx_queue[i].name,
247309c5088eSShreyas Bhatewara 					  &(adapter->rx_queue[i]));
247409c5088eSShreyas Bhatewara 			if (err) {
2475204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
2476204a6e65SStephen Hemminger 					   "Failed to request irq for MSIX, "
2477204a6e65SStephen Hemminger 					   "%s, error %d\n",
247809c5088eSShreyas Bhatewara 					   adapter->rx_queue[i].name, err);
247909c5088eSShreyas Bhatewara 				return err;
248009c5088eSShreyas Bhatewara 			}
248109c5088eSShreyas Bhatewara 
248209c5088eSShreyas Bhatewara 			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
248309c5088eSShreyas Bhatewara 		}
248409c5088eSShreyas Bhatewara 
248509c5088eSShreyas Bhatewara 		sprintf(intr->event_msi_vector_name, "%s-event-%d",
248609c5088eSShreyas Bhatewara 			adapter->netdev->name, vector);
248709c5088eSShreyas Bhatewara 		err = request_irq(intr->msix_entries[vector].vector,
248809c5088eSShreyas Bhatewara 				  vmxnet3_msix_event, 0,
248909c5088eSShreyas Bhatewara 				  intr->event_msi_vector_name, adapter->netdev);
249009c5088eSShreyas Bhatewara 		intr->event_intr_idx = vector;
249109c5088eSShreyas Bhatewara 
249209c5088eSShreyas Bhatewara 	} else if (intr->type == VMXNET3_IT_MSI) {
249309c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
2494d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2495d1a890faSShreyas Bhatewara 				  adapter->netdev->name, adapter->netdev);
249609c5088eSShreyas Bhatewara 	} else {
2497115924b6SShreyas Bhatewara #endif
249809c5088eSShreyas Bhatewara 		adapter->num_rx_queues = 1;
2499d1a890faSShreyas Bhatewara 		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2500d1a890faSShreyas Bhatewara 				  IRQF_SHARED, adapter->netdev->name,
2501d1a890faSShreyas Bhatewara 				  adapter->netdev);
250209c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
250309c5088eSShreyas Bhatewara 	}
250409c5088eSShreyas Bhatewara #endif
250509c5088eSShreyas Bhatewara 	intr->num_intrs = vector + 1;
250609c5088eSShreyas Bhatewara 	if (err) {
2507204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
2508204a6e65SStephen Hemminger 			   "Failed to request irq (intr type:%d), error %d\n",
2509204a6e65SStephen Hemminger 			   intr->type, err);
251009c5088eSShreyas Bhatewara 	} else {
251109c5088eSShreyas Bhatewara 		/* Number of rx queues will not change after this */
251209c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
251309c5088eSShreyas Bhatewara 			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
251409c5088eSShreyas Bhatewara 			rq->qid = i;
251509c5088eSShreyas Bhatewara 			rq->qid2 = i + adapter->num_rx_queues;
251650a5ce3eSShrikrishna Khare 			rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2517d1a890faSShreyas Bhatewara 		}
2518d1a890faSShreyas Bhatewara 
2519d1a890faSShreyas Bhatewara 		/* init our intr settings */
252009c5088eSShreyas Bhatewara 		for (i = 0; i < intr->num_intrs; i++)
252109c5088eSShreyas Bhatewara 			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
252209c5088eSShreyas Bhatewara 		if (adapter->intr.type != VMXNET3_IT_MSIX) {
2523d1a890faSShreyas Bhatewara 			adapter->intr.event_intr_idx = 0;
252409c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++)
252509c5088eSShreyas Bhatewara 				adapter->tx_queue[i].comp_ring.intr_idx = 0;
252609c5088eSShreyas Bhatewara 			adapter->rx_queue[0].comp_ring.intr_idx = 0;
252709c5088eSShreyas Bhatewara 		}
2528d1a890faSShreyas Bhatewara 
2529204a6e65SStephen Hemminger 		netdev_info(adapter->netdev,
2530204a6e65SStephen Hemminger 			    "intr type %u, mode %u, %u vectors allocated\n",
2531204a6e65SStephen Hemminger 			    intr->type, intr->mask_mode, intr->num_intrs);
2532d1a890faSShreyas Bhatewara 	}
2533d1a890faSShreyas Bhatewara 
2534d1a890faSShreyas Bhatewara 	return err;
2535d1a890faSShreyas Bhatewara }
2536d1a890faSShreyas Bhatewara 
2537d1a890faSShreyas Bhatewara 
2538d1a890faSShreyas Bhatewara static void
vmxnet3_free_irqs(struct vmxnet3_adapter * adapter)2539d1a890faSShreyas Bhatewara vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2540d1a890faSShreyas Bhatewara {
254109c5088eSShreyas Bhatewara 	struct vmxnet3_intr *intr = &adapter->intr;
254209c5088eSShreyas Bhatewara 	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2543d1a890faSShreyas Bhatewara 
254409c5088eSShreyas Bhatewara 	switch (intr->type) {
25458f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
2546d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSIX:
2547d1a890faSShreyas Bhatewara 	{
254809c5088eSShreyas Bhatewara 		int i, vector = 0;
2549d1a890faSShreyas Bhatewara 
255009c5088eSShreyas Bhatewara 		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
255109c5088eSShreyas Bhatewara 			for (i = 0; i < adapter->num_tx_queues; i++) {
255209c5088eSShreyas Bhatewara 				free_irq(intr->msix_entries[vector++].vector,
255309c5088eSShreyas Bhatewara 					 &(adapter->tx_queue[i]));
255409c5088eSShreyas Bhatewara 				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
255509c5088eSShreyas Bhatewara 					break;
255609c5088eSShreyas Bhatewara 			}
255709c5088eSShreyas Bhatewara 		}
255809c5088eSShreyas Bhatewara 
255909c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
256009c5088eSShreyas Bhatewara 			free_irq(intr->msix_entries[vector++].vector,
256109c5088eSShreyas Bhatewara 				 &(adapter->rx_queue[i]));
256209c5088eSShreyas Bhatewara 		}
256309c5088eSShreyas Bhatewara 
256409c5088eSShreyas Bhatewara 		free_irq(intr->msix_entries[vector].vector,
2565d1a890faSShreyas Bhatewara 			 adapter->netdev);
256609c5088eSShreyas Bhatewara 		BUG_ON(vector >= intr->num_intrs);
2567d1a890faSShreyas Bhatewara 		break;
2568d1a890faSShreyas Bhatewara 	}
25698f7e524cSRandy Dunlap #endif
2570d1a890faSShreyas Bhatewara 	case VMXNET3_IT_MSI:
2571d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
2572d1a890faSShreyas Bhatewara 		break;
2573d1a890faSShreyas Bhatewara 	case VMXNET3_IT_INTX:
2574d1a890faSShreyas Bhatewara 		free_irq(adapter->pdev->irq, adapter->netdev);
2575d1a890faSShreyas Bhatewara 		break;
2576d1a890faSShreyas Bhatewara 	default:
2577c068e777SSasha Levin 		BUG();
2578d1a890faSShreyas Bhatewara 	}
2579d1a890faSShreyas Bhatewara }
2580d1a890faSShreyas Bhatewara 
2581d1a890faSShreyas Bhatewara 
2582d1a890faSShreyas Bhatewara static void
vmxnet3_restore_vlan(struct vmxnet3_adapter * adapter)2583d1a890faSShreyas Bhatewara vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2584d1a890faSShreyas Bhatewara {
2585d1a890faSShreyas Bhatewara 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
258672e85c45SJesse Gross 	u16 vid;
2587d1a890faSShreyas Bhatewara 
258872e85c45SJesse Gross 	/* allow untagged pkts */
2589d1a890faSShreyas Bhatewara 	VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
259072e85c45SJesse Gross 
259172e85c45SJesse Gross 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
259272e85c45SJesse Gross 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2593d1a890faSShreyas Bhatewara }
2594d1a890faSShreyas Bhatewara 
2595d1a890faSShreyas Bhatewara 
25968e586137SJiri Pirko static int
vmxnet3_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)259780d5c368SPatrick McHardy vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2598d1a890faSShreyas Bhatewara {
2599d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2600f6957f88SJesse Gross 
2601f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
2602d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
260383d0feffSShreyas Bhatewara 		unsigned long flags;
2604d1a890faSShreyas Bhatewara 
2605d1a890faSShreyas Bhatewara 		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
260683d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
2607d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2608d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
260983d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2610f6957f88SJesse Gross 	}
261172e85c45SJesse Gross 
261272e85c45SJesse Gross 	set_bit(vid, adapter->active_vlans);
26138e586137SJiri Pirko 
26148e586137SJiri Pirko 	return 0;
2615d1a890faSShreyas Bhatewara }
2616d1a890faSShreyas Bhatewara 
2617d1a890faSShreyas Bhatewara 
26188e586137SJiri Pirko static int
vmxnet3_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)261980d5c368SPatrick McHardy vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2620d1a890faSShreyas Bhatewara {
2621d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2622f6957f88SJesse Gross 
2623f6957f88SJesse Gross 	if (!(netdev->flags & IFF_PROMISC)) {
2624d1a890faSShreyas Bhatewara 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
262583d0feffSShreyas Bhatewara 		unsigned long flags;
2626d1a890faSShreyas Bhatewara 
2627d1a890faSShreyas Bhatewara 		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
262883d0feffSShreyas Bhatewara 		spin_lock_irqsave(&adapter->cmd_lock, flags);
2629d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2630d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
263183d0feffSShreyas Bhatewara 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2632f6957f88SJesse Gross 	}
263372e85c45SJesse Gross 
263472e85c45SJesse Gross 	clear_bit(vid, adapter->active_vlans);
26358e586137SJiri Pirko 
26368e586137SJiri Pirko 	return 0;
2637d1a890faSShreyas Bhatewara }
2638d1a890faSShreyas Bhatewara 
2639d1a890faSShreyas Bhatewara 
2640d1a890faSShreyas Bhatewara static u8 *
vmxnet3_copy_mc(struct net_device * netdev)2641d1a890faSShreyas Bhatewara vmxnet3_copy_mc(struct net_device *netdev)
2642d1a890faSShreyas Bhatewara {
2643d1a890faSShreyas Bhatewara 	u8 *buf = NULL;
26444cd24eafSJiri Pirko 	u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2645d1a890faSShreyas Bhatewara 
2646d1a890faSShreyas Bhatewara 	/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2647d1a890faSShreyas Bhatewara 	if (sz <= 0xffff) {
2648d1a890faSShreyas Bhatewara 		/* We may be called with BH disabled */
2649d1a890faSShreyas Bhatewara 		buf = kmalloc(sz, GFP_ATOMIC);
2650d1a890faSShreyas Bhatewara 		if (buf) {
265122bedad3SJiri Pirko 			struct netdev_hw_addr *ha;
2652567ec874SJiri Pirko 			int i = 0;
2653d1a890faSShreyas Bhatewara 
265422bedad3SJiri Pirko 			netdev_for_each_mc_addr(ha, netdev)
265522bedad3SJiri Pirko 				memcpy(buf + i++ * ETH_ALEN, ha->addr,
2656d1a890faSShreyas Bhatewara 				       ETH_ALEN);
2657d1a890faSShreyas Bhatewara 		}
2658d1a890faSShreyas Bhatewara 	}
2659d1a890faSShreyas Bhatewara 	return buf;
2660d1a890faSShreyas Bhatewara }
2661d1a890faSShreyas Bhatewara 
2662d1a890faSShreyas Bhatewara 
2663d1a890faSShreyas Bhatewara static void
vmxnet3_set_mc(struct net_device * netdev)2664d1a890faSShreyas Bhatewara vmxnet3_set_mc(struct net_device *netdev)
2665d1a890faSShreyas Bhatewara {
2666d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
266783d0feffSShreyas Bhatewara 	unsigned long flags;
2668d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxFilterConf *rxConf =
2669d1a890faSShreyas Bhatewara 					&adapter->shared->devRead.rxFilterConf;
2670d1a890faSShreyas Bhatewara 	u8 *new_table = NULL;
2671b0eb57cbSAndy King 	dma_addr_t new_table_pa = 0;
2672fb5c6cfaSAlexey Khoroshilov 	bool new_table_pa_valid = false;
2673d1a890faSShreyas Bhatewara 	u32 new_mode = VMXNET3_RXM_UCAST;
2674d1a890faSShreyas Bhatewara 
267572e85c45SJesse Gross 	if (netdev->flags & IFF_PROMISC) {
267672e85c45SJesse Gross 		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
267772e85c45SJesse Gross 		memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
267872e85c45SJesse Gross 
2679d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_PROMISC;
268072e85c45SJesse Gross 	} else {
268172e85c45SJesse Gross 		vmxnet3_restore_vlan(adapter);
268272e85c45SJesse Gross 	}
2683d1a890faSShreyas Bhatewara 
2684d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_BROADCAST)
2685d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_BCAST;
2686d1a890faSShreyas Bhatewara 
2687d1a890faSShreyas Bhatewara 	if (netdev->flags & IFF_ALLMULTI)
2688d1a890faSShreyas Bhatewara 		new_mode |= VMXNET3_RXM_ALL_MULTI;
2689d1a890faSShreyas Bhatewara 	else
26904cd24eafSJiri Pirko 		if (!netdev_mc_empty(netdev)) {
2691d1a890faSShreyas Bhatewara 			new_table = vmxnet3_copy_mc(netdev);
2692d1a890faSShreyas Bhatewara 			if (new_table) {
2693d37d5ec8SShrikrishna Khare 				size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2694d37d5ec8SShrikrishna Khare 
2695d37d5ec8SShrikrishna Khare 				rxConf->mfTableLen = cpu_to_le16(sz);
2696b0eb57cbSAndy King 				new_table_pa = dma_map_single(
2697b0eb57cbSAndy King 							&adapter->pdev->dev,
2698b0eb57cbSAndy King 							new_table,
2699d37d5ec8SShrikrishna Khare 							sz,
2700bf7bec46SChristophe JAILLET 							DMA_TO_DEVICE);
27015738a09dSAlexey Khoroshilov 				if (!dma_mapping_error(&adapter->pdev->dev,
27025738a09dSAlexey Khoroshilov 						       new_table_pa)) {
27034ad9a64fSAndy King 					new_mode |= VMXNET3_RXM_MCAST;
2704fb5c6cfaSAlexey Khoroshilov 					new_table_pa_valid = true;
2705fb5c6cfaSAlexey Khoroshilov 					rxConf->mfTablePA = cpu_to_le64(
2706fb5c6cfaSAlexey Khoroshilov 								new_table_pa);
2707fb5c6cfaSAlexey Khoroshilov 				}
2708fb5c6cfaSAlexey Khoroshilov 			}
2709fb5c6cfaSAlexey Khoroshilov 			if (!new_table_pa_valid) {
27104ad9a64fSAndy King 				netdev_info(netdev,
27114ad9a64fSAndy King 					    "failed to copy mcast list, setting ALL_MULTI\n");
2712d1a890faSShreyas Bhatewara 				new_mode |= VMXNET3_RXM_ALL_MULTI;
2713d1a890faSShreyas Bhatewara 			}
2714d1a890faSShreyas Bhatewara 		}
2715d1a890faSShreyas Bhatewara 
2716d1a890faSShreyas Bhatewara 	if (!(new_mode & VMXNET3_RXM_MCAST)) {
2717d1a890faSShreyas Bhatewara 		rxConf->mfTableLen = 0;
2718d1a890faSShreyas Bhatewara 		rxConf->mfTablePA = 0;
2719d1a890faSShreyas Bhatewara 	}
2720d1a890faSShreyas Bhatewara 
272183d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2722d1a890faSShreyas Bhatewara 	if (new_mode != rxConf->rxMode) {
2723115924b6SShreyas Bhatewara 		rxConf->rxMode = cpu_to_le32(new_mode);
2724d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2725d1a890faSShreyas Bhatewara 				       VMXNET3_CMD_UPDATE_RX_MODE);
272672e85c45SJesse Gross 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
272772e85c45SJesse Gross 				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2728d1a890faSShreyas Bhatewara 	}
2729d1a890faSShreyas Bhatewara 
2730d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2731d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
273283d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2733d1a890faSShreyas Bhatewara 
2734fb5c6cfaSAlexey Khoroshilov 	if (new_table_pa_valid)
2735b0eb57cbSAndy King 		dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2736bf7bec46SChristophe JAILLET 				 rxConf->mfTableLen, DMA_TO_DEVICE);
2737d1a890faSShreyas Bhatewara 	kfree(new_table);
2738d1a890faSShreyas Bhatewara }
2739d1a890faSShreyas Bhatewara 
274009c5088eSShreyas Bhatewara void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter * adapter)274109c5088eSShreyas Bhatewara vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
274209c5088eSShreyas Bhatewara {
274309c5088eSShreyas Bhatewara 	int i;
274409c5088eSShreyas Bhatewara 
274509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
274609c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
274709c5088eSShreyas Bhatewara }
274809c5088eSShreyas Bhatewara 
2749d1a890faSShreyas Bhatewara 
2750d1a890faSShreyas Bhatewara /*
2751d1a890faSShreyas Bhatewara  *   Set up driver_shared based on settings in adapter.
2752d1a890faSShreyas Bhatewara  */
2753d1a890faSShreyas Bhatewara 
2754d1a890faSShreyas Bhatewara static void
vmxnet3_setup_driver_shared(struct vmxnet3_adapter * adapter)2755d1a890faSShreyas Bhatewara vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2756d1a890faSShreyas Bhatewara {
2757d1a890faSShreyas Bhatewara 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2758d1a890faSShreyas Bhatewara 	struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
275939f9895aSRonak Doshi 	struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2760d1a890faSShreyas Bhatewara 	struct Vmxnet3_TxQueueConf *tqc;
2761d1a890faSShreyas Bhatewara 	struct Vmxnet3_RxQueueConf *rqc;
2762d1a890faSShreyas Bhatewara 	int i;
2763d1a890faSShreyas Bhatewara 
2764d1a890faSShreyas Bhatewara 	memset(shared, 0, sizeof(*shared));
2765d1a890faSShreyas Bhatewara 
2766d1a890faSShreyas Bhatewara 	/* driver settings */
2767115924b6SShreyas Bhatewara 	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2768115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.version = cpu_to_le32(
2769115924b6SShreyas Bhatewara 						VMXNET3_DRIVER_VERSION_NUM);
2770d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2771d1a890faSShreyas Bhatewara 				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2772d1a890faSShreyas Bhatewara 	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2773115924b6SShreyas Bhatewara 	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2774115924b6SShreyas Bhatewara 				*((u32 *)&devRead->misc.driverInfo.gos));
2775115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2776115924b6SShreyas Bhatewara 	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2777d1a890faSShreyas Bhatewara 
2778b0eb57cbSAndy King 	devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2779115924b6SShreyas Bhatewara 	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2780d1a890faSShreyas Bhatewara 
2781d1a890faSShreyas Bhatewara 	/* set up feature flags */
2782a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_RXCSUM)
27833843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2784d1a890faSShreyas Bhatewara 
2785a0d2730cSMichał Mirosław 	if (adapter->netdev->features & NETIF_F_LRO) {
27863843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_LRO;
2787115924b6SShreyas Bhatewara 		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2788d1a890faSShreyas Bhatewara 	}
2789f646968fSPatrick McHardy 	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
27903843e515SHarvey Harrison 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2791d1a890faSShreyas Bhatewara 
2792dacce2beSRonak Doshi 	if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2793dacce2beSRonak Doshi 					 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2794dacce2beSRonak Doshi 		devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2795dacce2beSRonak Doshi 
2796115924b6SShreyas Bhatewara 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2797115924b6SShreyas Bhatewara 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2798115924b6SShreyas Bhatewara 	devRead->misc.queueDescLen = cpu_to_le32(
279909c5088eSShreyas Bhatewara 		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
280009c5088eSShreyas Bhatewara 		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2801d1a890faSShreyas Bhatewara 
2802d1a890faSShreyas Bhatewara 	/* tx queue settings */
280309c5088eSShreyas Bhatewara 	devRead->misc.numTxQueues =  adapter->num_tx_queues;
280409c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
280509c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
280609c5088eSShreyas Bhatewara 		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
280709c5088eSShreyas Bhatewara 		tqc = &adapter->tqd_start[i].conf;
280809c5088eSShreyas Bhatewara 		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
280909c5088eSShreyas Bhatewara 		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
281009c5088eSShreyas Bhatewara 		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2811de1da8bcSRonak Doshi 		tqc->ddPA           = cpu_to_le64(~0ULL);
281209c5088eSShreyas Bhatewara 		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
281309c5088eSShreyas Bhatewara 		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
28143c8b3efcSShrikrishna Khare 		tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
281509c5088eSShreyas Bhatewara 		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
2816de1da8bcSRonak Doshi 		tqc->ddLen          = cpu_to_le32(0);
281709c5088eSShreyas Bhatewara 		tqc->intrIdx        = tq->comp_ring.intr_idx;
281809c5088eSShreyas Bhatewara 	}
2819d1a890faSShreyas Bhatewara 
2820d1a890faSShreyas Bhatewara 	/* rx queue settings */
282109c5088eSShreyas Bhatewara 	devRead->misc.numRxQueues = adapter->num_rx_queues;
282209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
282309c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
282409c5088eSShreyas Bhatewara 		rqc = &adapter->rqd_start[i].conf;
282509c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
282609c5088eSShreyas Bhatewara 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
282709c5088eSShreyas Bhatewara 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2828de1da8bcSRonak Doshi 		rqc->ddPA            = cpu_to_le64(~0ULL);
282909c5088eSShreyas Bhatewara 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
283009c5088eSShreyas Bhatewara 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
283109c5088eSShreyas Bhatewara 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
2832de1da8bcSRonak Doshi 		rqc->ddLen           = cpu_to_le32(0);
283309c5088eSShreyas Bhatewara 		rqc->intrIdx         = rq->comp_ring.intr_idx;
283450a5ce3eSShrikrishna Khare 		if (VMXNET3_VERSION_GE_3(adapter)) {
283550a5ce3eSShrikrishna Khare 			rqc->rxDataRingBasePA =
283650a5ce3eSShrikrishna Khare 				cpu_to_le64(rq->data_ring.basePA);
283750a5ce3eSShrikrishna Khare 			rqc->rxDataRingDescSize =
283850a5ce3eSShrikrishna Khare 				cpu_to_le16(rq->data_ring.desc_size);
283950a5ce3eSShrikrishna Khare 		}
284009c5088eSShreyas Bhatewara 	}
284109c5088eSShreyas Bhatewara 
284209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
284309c5088eSShreyas Bhatewara 	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
284409c5088eSShreyas Bhatewara 
284509c5088eSShreyas Bhatewara 	if (adapter->rss) {
284609c5088eSShreyas Bhatewara 		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
284766d35910SStephen Hemminger 
284809c5088eSShreyas Bhatewara 		devRead->misc.uptFeatures |= UPT1_F_RSS;
284909c5088eSShreyas Bhatewara 		devRead->misc.numRxQueues = adapter->num_rx_queues;
285009c5088eSShreyas Bhatewara 		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
285109c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV4 |
285209c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
285309c5088eSShreyas Bhatewara 				    UPT1_RSS_HASH_TYPE_IPV6;
285409c5088eSShreyas Bhatewara 		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
285509c5088eSShreyas Bhatewara 		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
285609c5088eSShreyas Bhatewara 		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
28576bf79cddSEric Dumazet 		netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
285866d35910SStephen Hemminger 
285909c5088eSShreyas Bhatewara 		for (i = 0; i < rssConf->indTableSize; i++)
2860278bc429SBen Hutchings 			rssConf->indTable[i] = ethtool_rxfh_indir_default(
2861278bc429SBen Hutchings 				i, adapter->num_rx_queues);
286209c5088eSShreyas Bhatewara 
286309c5088eSShreyas Bhatewara 		devRead->rssConfDesc.confVer = 1;
2864b0eb57cbSAndy King 		devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2865b0eb57cbSAndy King 		devRead->rssConfDesc.confPA =
2866b0eb57cbSAndy King 			cpu_to_le64(adapter->rss_conf_pa);
286709c5088eSShreyas Bhatewara 	}
286809c5088eSShreyas Bhatewara 
286909c5088eSShreyas Bhatewara #endif /* VMXNET3_RSS */
2870d1a890faSShreyas Bhatewara 
2871d1a890faSShreyas Bhatewara 	/* intr settings */
287239f9895aSRonak Doshi 	if (!VMXNET3_VERSION_GE_6(adapter) ||
287339f9895aSRonak Doshi 	    !adapter->queuesExtEnabled) {
2874d1a890faSShreyas Bhatewara 		devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2875d1a890faSShreyas Bhatewara 					     VMXNET3_IMM_AUTO;
2876d1a890faSShreyas Bhatewara 		devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2877d1a890faSShreyas Bhatewara 		for (i = 0; i < adapter->intr.num_intrs; i++)
2878d1a890faSShreyas Bhatewara 			devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2879d1a890faSShreyas Bhatewara 
2880d1a890faSShreyas Bhatewara 		devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
28816929fe8aSRonghua Zang 		devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
288239f9895aSRonak Doshi 	} else {
288339f9895aSRonak Doshi 		devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
288439f9895aSRonak Doshi 						   VMXNET3_IMM_AUTO;
288539f9895aSRonak Doshi 		devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
288639f9895aSRonak Doshi 		for (i = 0; i < adapter->intr.num_intrs; i++)
288739f9895aSRonak Doshi 			devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
288839f9895aSRonak Doshi 
288939f9895aSRonak Doshi 		devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
289039f9895aSRonak Doshi 		devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
289139f9895aSRonak Doshi 	}
2892d1a890faSShreyas Bhatewara 
2893d1a890faSShreyas Bhatewara 	/* rx filter settings */
2894d1a890faSShreyas Bhatewara 	devRead->rxFilterConf.rxMode = 0;
2895d1a890faSShreyas Bhatewara 	vmxnet3_restore_vlan(adapter);
2896f9f25026SShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2897f9f25026SShreyas Bhatewara 
2898d1a890faSShreyas Bhatewara 	/* the rest are already zeroed */
2899d1a890faSShreyas Bhatewara }
2900d1a890faSShreyas Bhatewara 
29014edef40eSShrikrishna Khare static void
vmxnet3_init_bufsize(struct vmxnet3_adapter * adapter)2902c7112ebdSRonak Doshi vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
2903c7112ebdSRonak Doshi {
2904c7112ebdSRonak Doshi 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2905c7112ebdSRonak Doshi 	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2906c7112ebdSRonak Doshi 	unsigned long flags;
2907c7112ebdSRonak Doshi 
2908c7112ebdSRonak Doshi 	if (!VMXNET3_VERSION_GE_7(adapter))
2909c7112ebdSRonak Doshi 		return;
2910c7112ebdSRonak Doshi 
2911c7112ebdSRonak Doshi 	cmdInfo->ringBufSize = adapter->ringBufSize;
2912c7112ebdSRonak Doshi 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2913c7112ebdSRonak Doshi 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2914c7112ebdSRonak Doshi 			       VMXNET3_CMD_SET_RING_BUFFER_SIZE);
2915c7112ebdSRonak Doshi 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2916c7112ebdSRonak Doshi }
2917c7112ebdSRonak Doshi 
2918c7112ebdSRonak Doshi static void
vmxnet3_init_coalesce(struct vmxnet3_adapter * adapter)29194edef40eSShrikrishna Khare vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
29204edef40eSShrikrishna Khare {
29214edef40eSShrikrishna Khare 	struct Vmxnet3_DriverShared *shared = adapter->shared;
29224edef40eSShrikrishna Khare 	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
29234edef40eSShrikrishna Khare 	unsigned long flags;
29244edef40eSShrikrishna Khare 
29254edef40eSShrikrishna Khare 	if (!VMXNET3_VERSION_GE_3(adapter))
29264edef40eSShrikrishna Khare 		return;
29274edef40eSShrikrishna Khare 
29284edef40eSShrikrishna Khare 	spin_lock_irqsave(&adapter->cmd_lock, flags);
29294edef40eSShrikrishna Khare 	cmdInfo->varConf.confVer = 1;
29304edef40eSShrikrishna Khare 	cmdInfo->varConf.confLen =
29314edef40eSShrikrishna Khare 		cpu_to_le32(sizeof(*adapter->coal_conf));
29324edef40eSShrikrishna Khare 	cmdInfo->varConf.confPA  = cpu_to_le64(adapter->coal_conf_pa);
29334edef40eSShrikrishna Khare 
29344edef40eSShrikrishna Khare 	if (adapter->default_coal_mode) {
29354edef40eSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
29364edef40eSShrikrishna Khare 				       VMXNET3_CMD_GET_COALESCE);
29374edef40eSShrikrishna Khare 	} else {
29384edef40eSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
29394edef40eSShrikrishna Khare 				       VMXNET3_CMD_SET_COALESCE);
29404edef40eSShrikrishna Khare 	}
29414edef40eSShrikrishna Khare 
29424edef40eSShrikrishna Khare 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
29434edef40eSShrikrishna Khare }
2944d1a890faSShreyas Bhatewara 
2945d3a8a9e5SRonak Doshi static void
vmxnet3_init_rssfields(struct vmxnet3_adapter * adapter)2946d3a8a9e5SRonak Doshi vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2947d3a8a9e5SRonak Doshi {
2948d3a8a9e5SRonak Doshi 	struct Vmxnet3_DriverShared *shared = adapter->shared;
2949d3a8a9e5SRonak Doshi 	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2950d3a8a9e5SRonak Doshi 	unsigned long flags;
2951d3a8a9e5SRonak Doshi 
2952d3a8a9e5SRonak Doshi 	if (!VMXNET3_VERSION_GE_4(adapter))
2953d3a8a9e5SRonak Doshi 		return;
2954d3a8a9e5SRonak Doshi 
2955d3a8a9e5SRonak Doshi 	spin_lock_irqsave(&adapter->cmd_lock, flags);
2956d3a8a9e5SRonak Doshi 
2957d3a8a9e5SRonak Doshi 	if (adapter->default_rss_fields) {
2958d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2959d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_GET_RSS_FIELDS);
2960d3a8a9e5SRonak Doshi 		adapter->rss_fields =
2961d3a8a9e5SRonak Doshi 			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2962d3a8a9e5SRonak Doshi 	} else {
29636f91f4baSRonak Doshi 		if (VMXNET3_VERSION_GE_7(adapter)) {
29646f91f4baSRonak Doshi 			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
29656f91f4baSRonak Doshi 			     adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
29666f91f4baSRonak Doshi 			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
29676f91f4baSRonak Doshi 						       VMXNET3_CAP_UDP_RSS)) {
29686f91f4baSRonak Doshi 				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
29696f91f4baSRonak Doshi 			} else {
29706f91f4baSRonak Doshi 				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
29716f91f4baSRonak Doshi 			}
29726f91f4baSRonak Doshi 
29736f91f4baSRonak Doshi 			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
29746f91f4baSRonak Doshi 			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
29756f91f4baSRonak Doshi 						       VMXNET3_CAP_ESP_RSS_IPV4)) {
29766f91f4baSRonak Doshi 				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
29776f91f4baSRonak Doshi 			} else {
29786f91f4baSRonak Doshi 				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
29796f91f4baSRonak Doshi 			}
29806f91f4baSRonak Doshi 
29816f91f4baSRonak Doshi 			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
29826f91f4baSRonak Doshi 			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
29836f91f4baSRonak Doshi 						       VMXNET3_CAP_ESP_RSS_IPV6)) {
29846f91f4baSRonak Doshi 				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
29856f91f4baSRonak Doshi 			} else {
29866f91f4baSRonak Doshi 				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
29876f91f4baSRonak Doshi 			}
29886f91f4baSRonak Doshi 
29896f91f4baSRonak Doshi 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
29906f91f4baSRonak Doshi 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
29916f91f4baSRonak Doshi 			adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
29926f91f4baSRonak Doshi 		}
2993d3a8a9e5SRonak Doshi 		cmdInfo->setRssFields = adapter->rss_fields;
2994d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2995d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_SET_RSS_FIELDS);
2996d3a8a9e5SRonak Doshi 		/* Not all requested RSS may get applied, so get and
2997d3a8a9e5SRonak Doshi 		 * cache what was actually applied.
2998d3a8a9e5SRonak Doshi 		 */
2999d3a8a9e5SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3000d3a8a9e5SRonak Doshi 				       VMXNET3_CMD_GET_RSS_FIELDS);
3001d3a8a9e5SRonak Doshi 		adapter->rss_fields =
3002d3a8a9e5SRonak Doshi 			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3003d3a8a9e5SRonak Doshi 	}
3004d3a8a9e5SRonak Doshi 
3005d3a8a9e5SRonak Doshi 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3006d3a8a9e5SRonak Doshi }
3007d3a8a9e5SRonak Doshi 
3008d1a890faSShreyas Bhatewara int
vmxnet3_activate_dev(struct vmxnet3_adapter * adapter)3009d1a890faSShreyas Bhatewara vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
3010d1a890faSShreyas Bhatewara {
301109c5088eSShreyas Bhatewara 	int err, i;
3012d1a890faSShreyas Bhatewara 	u32 ret;
301383d0feffSShreyas Bhatewara 	unsigned long flags;
3014d1a890faSShreyas Bhatewara 
3015fdcd79b9SStephen Hemminger 	netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
301609c5088eSShreyas Bhatewara 		" ring sizes %u %u %u\n", adapter->netdev->name,
301709c5088eSShreyas Bhatewara 		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
301809c5088eSShreyas Bhatewara 		adapter->tx_queue[0].tx_ring.size,
301909c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[0].size,
302009c5088eSShreyas Bhatewara 		adapter->rx_queue[0].rx_ring[1].size);
3021d1a890faSShreyas Bhatewara 
302209c5088eSShreyas Bhatewara 	vmxnet3_tq_init_all(adapter);
302309c5088eSShreyas Bhatewara 	err = vmxnet3_rq_init_all(adapter);
3024d1a890faSShreyas Bhatewara 	if (err) {
3025204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
3026204a6e65SStephen Hemminger 			   "Failed to init rx queue error %d\n", err);
3027d1a890faSShreyas Bhatewara 		goto rq_err;
3028d1a890faSShreyas Bhatewara 	}
3029d1a890faSShreyas Bhatewara 
3030d1a890faSShreyas Bhatewara 	err = vmxnet3_request_irqs(adapter);
3031d1a890faSShreyas Bhatewara 	if (err) {
3032204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
3033204a6e65SStephen Hemminger 			   "Failed to setup irq for error %d\n", err);
3034d1a890faSShreyas Bhatewara 		goto irq_err;
3035d1a890faSShreyas Bhatewara 	}
3036d1a890faSShreyas Bhatewara 
3037d1a890faSShreyas Bhatewara 	vmxnet3_setup_driver_shared(adapter);
3038d1a890faSShreyas Bhatewara 
3039115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
3040115924b6SShreyas Bhatewara 			       adapter->shared_pa));
3041115924b6SShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
3042115924b6SShreyas Bhatewara 			       adapter->shared_pa));
304383d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3044d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3045d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_ACTIVATE_DEV);
3046d1a890faSShreyas Bhatewara 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
304783d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3048d1a890faSShreyas Bhatewara 
3049d1a890faSShreyas Bhatewara 	if (ret != 0) {
3050204a6e65SStephen Hemminger 		netdev_err(adapter->netdev,
3051204a6e65SStephen Hemminger 			   "Failed to activate dev: error %u\n", ret);
3052d1a890faSShreyas Bhatewara 		err = -EINVAL;
3053d1a890faSShreyas Bhatewara 		goto activate_err;
3054d1a890faSShreyas Bhatewara 	}
305509c5088eSShreyas Bhatewara 
3056c7112ebdSRonak Doshi 	vmxnet3_init_bufsize(adapter);
30574edef40eSShrikrishna Khare 	vmxnet3_init_coalesce(adapter);
3058d3a8a9e5SRonak Doshi 	vmxnet3_init_rssfields(adapter);
30594edef40eSShrikrishna Khare 
306009c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
306109c5088eSShreyas Bhatewara 		VMXNET3_WRITE_BAR0_REG(adapter,
3062543fb674SRonak Doshi 				adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
306309c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[0].next2fill);
3064543fb674SRonak Doshi 		VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
306509c5088eSShreyas Bhatewara 				(i * VMXNET3_REG_ALIGN)),
306609c5088eSShreyas Bhatewara 				adapter->rx_queue[i].rx_ring[1].next2fill);
306709c5088eSShreyas Bhatewara 	}
3068d1a890faSShreyas Bhatewara 
3069d1a890faSShreyas Bhatewara 	/* Apply the rx filter settins last. */
3070d1a890faSShreyas Bhatewara 	vmxnet3_set_mc(adapter->netdev);
3071d1a890faSShreyas Bhatewara 
3072d1a890faSShreyas Bhatewara 	/*
3073d1a890faSShreyas Bhatewara 	 * Check link state when first activating device. It will start the
3074d1a890faSShreyas Bhatewara 	 * tx queue if the link is up.
3075d1a890faSShreyas Bhatewara 	 */
30764a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, true);
307739f9895aSRonak Doshi 	netif_tx_wake_all_queues(adapter->netdev);
307809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
307909c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
3080d1a890faSShreyas Bhatewara 	vmxnet3_enable_all_intrs(adapter);
3081d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3082d1a890faSShreyas Bhatewara 	return 0;
3083d1a890faSShreyas Bhatewara 
3084d1a890faSShreyas Bhatewara activate_err:
3085d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
3086d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
3087d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
3088d1a890faSShreyas Bhatewara irq_err:
3089d1a890faSShreyas Bhatewara rq_err:
3090d1a890faSShreyas Bhatewara 	/* free up buffers we allocated */
309109c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
3092d1a890faSShreyas Bhatewara 	return err;
3093d1a890faSShreyas Bhatewara }
3094d1a890faSShreyas Bhatewara 
3095d1a890faSShreyas Bhatewara 
3096d1a890faSShreyas Bhatewara void
vmxnet3_reset_dev(struct vmxnet3_adapter * adapter)3097d1a890faSShreyas Bhatewara vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
3098d1a890faSShreyas Bhatewara {
309983d0feffSShreyas Bhatewara 	unsigned long flags;
310083d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3101d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
310283d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3103d1a890faSShreyas Bhatewara }
3104d1a890faSShreyas Bhatewara 
3105d1a890faSShreyas Bhatewara 
3106d1a890faSShreyas Bhatewara int
vmxnet3_quiesce_dev(struct vmxnet3_adapter * adapter)3107d1a890faSShreyas Bhatewara vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
3108d1a890faSShreyas Bhatewara {
310909c5088eSShreyas Bhatewara 	int i;
311083d0feffSShreyas Bhatewara 	unsigned long flags;
3111d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
3112d1a890faSShreyas Bhatewara 		return 0;
3113d1a890faSShreyas Bhatewara 
3114d1a890faSShreyas Bhatewara 
311583d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3116d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3117d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
311883d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3119d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
3120d1a890faSShreyas Bhatewara 
312109c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
312209c5088eSShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
3123d1a890faSShreyas Bhatewara 	netif_tx_disable(adapter->netdev);
3124d1a890faSShreyas Bhatewara 	adapter->link_speed = 0;
3125d1a890faSShreyas Bhatewara 	netif_carrier_off(adapter->netdev);
3126d1a890faSShreyas Bhatewara 
312709c5088eSShreyas Bhatewara 	vmxnet3_tq_cleanup_all(adapter);
312809c5088eSShreyas Bhatewara 	vmxnet3_rq_cleanup_all(adapter);
3129d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
3130d1a890faSShreyas Bhatewara 	return 0;
3131d1a890faSShreyas Bhatewara }
3132d1a890faSShreyas Bhatewara 
3133d1a890faSShreyas Bhatewara 
3134d1a890faSShreyas Bhatewara static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter * adapter,const u8 * mac)31358bc7823eSJakub Kicinski vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
3136d1a890faSShreyas Bhatewara {
3137d1a890faSShreyas Bhatewara 	u32 tmp;
3138d1a890faSShreyas Bhatewara 
3139d1a890faSShreyas Bhatewara 	tmp = *(u32 *)mac;
3140d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
3141d1a890faSShreyas Bhatewara 
3142d1a890faSShreyas Bhatewara 	tmp = (mac[5] << 8) | mac[4];
3143d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
3144d1a890faSShreyas Bhatewara }
3145d1a890faSShreyas Bhatewara 
3146d1a890faSShreyas Bhatewara 
3147d1a890faSShreyas Bhatewara static int
vmxnet3_set_mac_addr(struct net_device * netdev,void * p)3148d1a890faSShreyas Bhatewara vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
3149d1a890faSShreyas Bhatewara {
3150d1a890faSShreyas Bhatewara 	struct sockaddr *addr = p;
3151d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3152d1a890faSShreyas Bhatewara 
3153ea52a0b5SJakub Kicinski 	dev_addr_set(netdev, addr->sa_data);
3154d1a890faSShreyas Bhatewara 	vmxnet3_write_mac_addr(adapter, addr->sa_data);
3155d1a890faSShreyas Bhatewara 
3156d1a890faSShreyas Bhatewara 	return 0;
3157d1a890faSShreyas Bhatewara }
3158d1a890faSShreyas Bhatewara 
3159d1a890faSShreyas Bhatewara 
3160d1a890faSShreyas Bhatewara /* ==================== initialization and cleanup routines ============ */
3161d1a890faSShreyas Bhatewara 
3162d1a890faSShreyas Bhatewara static int
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter * adapter)316361aeeceaShpreg@vmware.com vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
3164d1a890faSShreyas Bhatewara {
3165d1a890faSShreyas Bhatewara 	int err;
3166d1a890faSShreyas Bhatewara 	unsigned long mmio_start, mmio_len;
3167d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = adapter->pdev;
3168d1a890faSShreyas Bhatewara 
3169d1a890faSShreyas Bhatewara 	err = pci_enable_device(pdev);
3170d1a890faSShreyas Bhatewara 	if (err) {
3171204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3172d1a890faSShreyas Bhatewara 		return err;
3173d1a890faSShreyas Bhatewara 	}
3174d1a890faSShreyas Bhatewara 
3175d1a890faSShreyas Bhatewara 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
3176d1a890faSShreyas Bhatewara 					   vmxnet3_driver_name);
3177d1a890faSShreyas Bhatewara 	if (err) {
3178204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3179204a6e65SStephen Hemminger 			"Failed to request region for adapter: error %d\n", err);
318061aeeceaShpreg@vmware.com 		goto err_enable_device;
3181d1a890faSShreyas Bhatewara 	}
3182d1a890faSShreyas Bhatewara 
3183d1a890faSShreyas Bhatewara 	pci_set_master(pdev);
3184d1a890faSShreyas Bhatewara 
3185d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 0);
3186d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 0);
3187d1a890faSShreyas Bhatewara 	adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3188d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr0) {
3189204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar0\n");
3190d1a890faSShreyas Bhatewara 		err = -EIO;
3191d1a890faSShreyas Bhatewara 		goto err_ioremap;
3192d1a890faSShreyas Bhatewara 	}
3193d1a890faSShreyas Bhatewara 
3194d1a890faSShreyas Bhatewara 	mmio_start = pci_resource_start(pdev, 1);
3195d1a890faSShreyas Bhatewara 	mmio_len = pci_resource_len(pdev, 1);
3196d1a890faSShreyas Bhatewara 	adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3197d1a890faSShreyas Bhatewara 	if (!adapter->hw_addr1) {
3198204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to map bar1\n");
3199d1a890faSShreyas Bhatewara 		err = -EIO;
3200d1a890faSShreyas Bhatewara 		goto err_bar1;
3201d1a890faSShreyas Bhatewara 	}
3202d1a890faSShreyas Bhatewara 	return 0;
3203d1a890faSShreyas Bhatewara 
3204d1a890faSShreyas Bhatewara err_bar1:
3205d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
3206d1a890faSShreyas Bhatewara err_ioremap:
3207d1a890faSShreyas Bhatewara 	pci_release_selected_regions(pdev, (1 << 2) - 1);
320861aeeceaShpreg@vmware.com err_enable_device:
3209d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
3210d1a890faSShreyas Bhatewara 	return err;
3211d1a890faSShreyas Bhatewara }
3212d1a890faSShreyas Bhatewara 
3213d1a890faSShreyas Bhatewara 
3214d1a890faSShreyas Bhatewara static void
vmxnet3_free_pci_resources(struct vmxnet3_adapter * adapter)3215d1a890faSShreyas Bhatewara vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3216d1a890faSShreyas Bhatewara {
3217d1a890faSShreyas Bhatewara 	BUG_ON(!adapter->pdev);
3218d1a890faSShreyas Bhatewara 
3219d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr0);
3220d1a890faSShreyas Bhatewara 	iounmap(adapter->hw_addr1);
3221d1a890faSShreyas Bhatewara 	pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3222d1a890faSShreyas Bhatewara 	pci_disable_device(adapter->pdev);
3223d1a890faSShreyas Bhatewara }
3224d1a890faSShreyas Bhatewara 
3225d1a890faSShreyas Bhatewara 
322654f00cceSWilliam Tu void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter * adapter)3227d1a890faSShreyas Bhatewara vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3228d1a890faSShreyas Bhatewara {
322909c5088eSShreyas Bhatewara 	size_t sz, i, ring0_size, ring1_size, comp_size;
3230c7112ebdSRonak Doshi 	/* With version7 ring1 will have only T0 buffers */
3231c7112ebdSRonak Doshi 	if (!VMXNET3_VERSION_GE_7(adapter)) {
3232d1a890faSShreyas Bhatewara 		if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3233d1a890faSShreyas Bhatewara 					    VMXNET3_MAX_ETH_HDR_SIZE) {
3234d1a890faSShreyas Bhatewara 			adapter->skb_buf_size = adapter->netdev->mtu +
3235d1a890faSShreyas Bhatewara 						VMXNET3_MAX_ETH_HDR_SIZE;
3236d1a890faSShreyas Bhatewara 			if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3237d1a890faSShreyas Bhatewara 				adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3238d1a890faSShreyas Bhatewara 
3239d1a890faSShreyas Bhatewara 			adapter->rx_buf_per_pkt = 1;
3240d1a890faSShreyas Bhatewara 		} else {
3241d1a890faSShreyas Bhatewara 			adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3242d1a890faSShreyas Bhatewara 			sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3243d1a890faSShreyas Bhatewara 						    VMXNET3_MAX_ETH_HDR_SIZE;
3244d1a890faSShreyas Bhatewara 			adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3245d1a890faSShreyas Bhatewara 		}
3246c7112ebdSRonak Doshi 	} else {
3247c7112ebdSRonak Doshi 		adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3248c7112ebdSRonak Doshi 					    VMXNET3_MAX_SKB_BUF_SIZE);
3249c7112ebdSRonak Doshi 		adapter->rx_buf_per_pkt = 1;
3250c7112ebdSRonak Doshi 		adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3251c7112ebdSRonak Doshi 		adapter->ringBufSize.ring1BufSizeType1 = 0;
3252c7112ebdSRonak Doshi 		adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3253c7112ebdSRonak Doshi 	}
3254d1a890faSShreyas Bhatewara 
3255d1a890faSShreyas Bhatewara 	/*
3256d1a890faSShreyas Bhatewara 	 * for simplicity, force the ring0 size to be a multiple of
3257d1a890faSShreyas Bhatewara 	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3258d1a890faSShreyas Bhatewara 	 */
3259d1a890faSShreyas Bhatewara 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
326009c5088eSShreyas Bhatewara 	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
326109c5088eSShreyas Bhatewara 	ring0_size = (ring0_size + sz - 1) / sz * sz;
3262a53255d3SShreyas Bhatewara 	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
326309c5088eSShreyas Bhatewara 			   sz * sz);
326409c5088eSShreyas Bhatewara 	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
326553831aa1SShrikrishna Khare 	ring1_size = (ring1_size + sz - 1) / sz * sz;
326653831aa1SShrikrishna Khare 	ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
326753831aa1SShrikrishna Khare 			   sz * sz);
3268c7112ebdSRonak Doshi 	/* For v7 and later, keep ring size power of 2 for UPT */
3269c7112ebdSRonak Doshi 	if (VMXNET3_VERSION_GE_7(adapter)) {
3270c7112ebdSRonak Doshi 		ring0_size = rounddown_pow_of_two(ring0_size);
3271c7112ebdSRonak Doshi 		ring1_size = rounddown_pow_of_two(ring1_size);
3272c7112ebdSRonak Doshi 	}
327309c5088eSShreyas Bhatewara 	comp_size = ring0_size + ring1_size;
327409c5088eSShreyas Bhatewara 
327509c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
32765e264e2bSColin Ian King 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
32775e264e2bSColin Ian King 
327809c5088eSShreyas Bhatewara 		rq->rx_ring[0].size = ring0_size;
327909c5088eSShreyas Bhatewara 		rq->rx_ring[1].size = ring1_size;
328009c5088eSShreyas Bhatewara 		rq->comp_ring.size = comp_size;
328109c5088eSShreyas Bhatewara 	}
3282d1a890faSShreyas Bhatewara }
3283d1a890faSShreyas Bhatewara 
3284d1a890faSShreyas Bhatewara 
3285d1a890faSShreyas Bhatewara int
vmxnet3_create_queues(struct vmxnet3_adapter * adapter,u32 tx_ring_size,u32 rx_ring_size,u32 rx_ring2_size,u16 txdata_desc_size,u16 rxdata_desc_size)3286d1a890faSShreyas Bhatewara vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
32873c8b3efcSShrikrishna Khare 		      u32 rx_ring_size, u32 rx_ring2_size,
328850a5ce3eSShrikrishna Khare 		      u16 txdata_desc_size, u16 rxdata_desc_size)
3289d1a890faSShreyas Bhatewara {
329009c5088eSShreyas Bhatewara 	int err = 0, i;
3291d1a890faSShreyas Bhatewara 
329209c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++) {
329309c5088eSShreyas Bhatewara 		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
329409c5088eSShreyas Bhatewara 		tq->tx_ring.size   = tx_ring_size;
329509c5088eSShreyas Bhatewara 		tq->data_ring.size = tx_ring_size;
329609c5088eSShreyas Bhatewara 		tq->comp_ring.size = tx_ring_size;
32973c8b3efcSShrikrishna Khare 		tq->txdata_desc_size = txdata_desc_size;
329809c5088eSShreyas Bhatewara 		tq->shared = &adapter->tqd_start[i].ctrl;
329909c5088eSShreyas Bhatewara 		tq->stopped = true;
330009c5088eSShreyas Bhatewara 		tq->adapter = adapter;
330109c5088eSShreyas Bhatewara 		tq->qid = i;
330209c5088eSShreyas Bhatewara 		err = vmxnet3_tq_create(tq, adapter);
330309c5088eSShreyas Bhatewara 		/*
330409c5088eSShreyas Bhatewara 		 * Too late to change num_tx_queues. We cannot do away with
330509c5088eSShreyas Bhatewara 		 * lesser number of queues than what we asked for
330609c5088eSShreyas Bhatewara 		 */
3307d1a890faSShreyas Bhatewara 		if (err)
330809c5088eSShreyas Bhatewara 			goto queue_err;
330909c5088eSShreyas Bhatewara 	}
3310d1a890faSShreyas Bhatewara 
331109c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
331209c5088eSShreyas Bhatewara 	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3313d1a890faSShreyas Bhatewara 	vmxnet3_adjust_rx_ring_size(adapter);
331450a5ce3eSShrikrishna Khare 
331550a5ce3eSShrikrishna Khare 	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
331609c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++) {
331709c5088eSShreyas Bhatewara 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
331809c5088eSShreyas Bhatewara 		/* qid and qid2 for rx queues will be assigned later when num
331909c5088eSShreyas Bhatewara 		 * of rx queues is finalized after allocating intrs */
332009c5088eSShreyas Bhatewara 		rq->shared = &adapter->rqd_start[i].ctrl;
332109c5088eSShreyas Bhatewara 		rq->adapter = adapter;
332250a5ce3eSShrikrishna Khare 		rq->data_ring.desc_size = rxdata_desc_size;
332309c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create(rq, adapter);
332409c5088eSShreyas Bhatewara 		if (err) {
332509c5088eSShreyas Bhatewara 			if (i == 0) {
3326204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
3327204a6e65SStephen Hemminger 					   "Could not allocate any rx queues. "
3328204a6e65SStephen Hemminger 					   "Aborting.\n");
332909c5088eSShreyas Bhatewara 				goto queue_err;
333009c5088eSShreyas Bhatewara 			} else {
3331204a6e65SStephen Hemminger 				netdev_info(adapter->netdev,
3332204a6e65SStephen Hemminger 					    "Number of rx queues changed "
333309c5088eSShreyas Bhatewara 					    "to : %d.\n", i);
333409c5088eSShreyas Bhatewara 				adapter->num_rx_queues = i;
333509c5088eSShreyas Bhatewara 				err = 0;
333609c5088eSShreyas Bhatewara 				break;
333709c5088eSShreyas Bhatewara 			}
333809c5088eSShreyas Bhatewara 		}
333909c5088eSShreyas Bhatewara 	}
334050a5ce3eSShrikrishna Khare 
334150a5ce3eSShrikrishna Khare 	if (!adapter->rxdataring_enabled)
334250a5ce3eSShrikrishna Khare 		vmxnet3_rq_destroy_all_rxdataring(adapter);
334350a5ce3eSShrikrishna Khare 
334409c5088eSShreyas Bhatewara 	return err;
334509c5088eSShreyas Bhatewara queue_err:
334609c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
3347d1a890faSShreyas Bhatewara 	return err;
3348d1a890faSShreyas Bhatewara }
3349d1a890faSShreyas Bhatewara 
3350d1a890faSShreyas Bhatewara static int
vmxnet3_open(struct net_device * netdev)3351d1a890faSShreyas Bhatewara vmxnet3_open(struct net_device *netdev)
3352d1a890faSShreyas Bhatewara {
3353d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
335409c5088eSShreyas Bhatewara 	int err, i;
3355d1a890faSShreyas Bhatewara 
3356d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
3357d1a890faSShreyas Bhatewara 
335809c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_tx_queues; i++)
335909c5088eSShreyas Bhatewara 		spin_lock_init(&adapter->tx_queue[i].tx_lock);
3360d1a890faSShreyas Bhatewara 
33613c8b3efcSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
33623c8b3efcSShrikrishna Khare 		unsigned long flags;
33633c8b3efcSShrikrishna Khare 		u16 txdata_desc_size;
33643c8b3efcSShrikrishna Khare 
33653c8b3efcSShrikrishna Khare 		spin_lock_irqsave(&adapter->cmd_lock, flags);
33663c8b3efcSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
33673c8b3efcSShrikrishna Khare 				       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
33683c8b3efcSShrikrishna Khare 		txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
33693c8b3efcSShrikrishna Khare 							 VMXNET3_REG_CMD);
33703c8b3efcSShrikrishna Khare 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
33713c8b3efcSShrikrishna Khare 
33723c8b3efcSShrikrishna Khare 		if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
33733c8b3efcSShrikrishna Khare 		    (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
33743c8b3efcSShrikrishna Khare 		    (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
33753c8b3efcSShrikrishna Khare 			adapter->txdata_desc_size =
33763c8b3efcSShrikrishna Khare 				sizeof(struct Vmxnet3_TxDataDesc);
33773c8b3efcSShrikrishna Khare 		} else {
33783c8b3efcSShrikrishna Khare 			adapter->txdata_desc_size = txdata_desc_size;
33793c8b3efcSShrikrishna Khare 		}
33803c8b3efcSShrikrishna Khare 	} else {
33813c8b3efcSShrikrishna Khare 		adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
33823c8b3efcSShrikrishna Khare 	}
33833c8b3efcSShrikrishna Khare 
33843c8b3efcSShrikrishna Khare 	err = vmxnet3_create_queues(adapter,
33853c8b3efcSShrikrishna Khare 				    adapter->tx_ring_size,
3386f00e2b0aSNeil Horman 				    adapter->rx_ring_size,
33873c8b3efcSShrikrishna Khare 				    adapter->rx_ring2_size,
338850a5ce3eSShrikrishna Khare 				    adapter->txdata_desc_size,
338950a5ce3eSShrikrishna Khare 				    adapter->rxdata_desc_size);
3390d1a890faSShreyas Bhatewara 	if (err)
3391d1a890faSShreyas Bhatewara 		goto queue_err;
3392d1a890faSShreyas Bhatewara 
3393d1a890faSShreyas Bhatewara 	err = vmxnet3_activate_dev(adapter);
3394d1a890faSShreyas Bhatewara 	if (err)
3395d1a890faSShreyas Bhatewara 		goto activate_err;
3396d1a890faSShreyas Bhatewara 
3397d1a890faSShreyas Bhatewara 	return 0;
3398d1a890faSShreyas Bhatewara 
3399d1a890faSShreyas Bhatewara activate_err:
340009c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
340109c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
3402d1a890faSShreyas Bhatewara queue_err:
3403d1a890faSShreyas Bhatewara 	return err;
3404d1a890faSShreyas Bhatewara }
3405d1a890faSShreyas Bhatewara 
3406d1a890faSShreyas Bhatewara 
3407d1a890faSShreyas Bhatewara static int
vmxnet3_close(struct net_device * netdev)3408d1a890faSShreyas Bhatewara vmxnet3_close(struct net_device *netdev)
3409d1a890faSShreyas Bhatewara {
3410d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3411d1a890faSShreyas Bhatewara 
3412d1a890faSShreyas Bhatewara 	/*
3413d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
3414d1a890faSShreyas Bhatewara 	 * completion.
3415d1a890faSShreyas Bhatewara 	 */
3416d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
341793c65d13SYueHaibing 		usleep_range(1000, 2000);
3418d1a890faSShreyas Bhatewara 
3419d1a890faSShreyas Bhatewara 	vmxnet3_quiesce_dev(adapter);
3420d1a890faSShreyas Bhatewara 
342109c5088eSShreyas Bhatewara 	vmxnet3_rq_destroy_all(adapter);
342209c5088eSShreyas Bhatewara 	vmxnet3_tq_destroy_all(adapter);
3423d1a890faSShreyas Bhatewara 
3424d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3425d1a890faSShreyas Bhatewara 
3426d1a890faSShreyas Bhatewara 
3427d1a890faSShreyas Bhatewara 	return 0;
3428d1a890faSShreyas Bhatewara }
3429d1a890faSShreyas Bhatewara 
3430d1a890faSShreyas Bhatewara 
3431d1a890faSShreyas Bhatewara void
vmxnet3_force_close(struct vmxnet3_adapter * adapter)3432d1a890faSShreyas Bhatewara vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3433d1a890faSShreyas Bhatewara {
343409c5088eSShreyas Bhatewara 	int i;
343509c5088eSShreyas Bhatewara 
3436d1a890faSShreyas Bhatewara 	/*
3437d1a890faSShreyas Bhatewara 	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3438d1a890faSShreyas Bhatewara 	 * vmxnet3_close() will deadlock.
3439d1a890faSShreyas Bhatewara 	 */
3440d1a890faSShreyas Bhatewara 	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3441d1a890faSShreyas Bhatewara 
3442d1a890faSShreyas Bhatewara 	/* we need to enable NAPI, otherwise dev_close will deadlock */
344309c5088eSShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
344409c5088eSShreyas Bhatewara 		napi_enable(&adapter->rx_queue[i].napi);
34451c4d5f51SNeil Horman 	/*
34461c4d5f51SNeil Horman 	 * Need to clear the quiesce bit to ensure that vmxnet3_close
34471c4d5f51SNeil Horman 	 * can quiesce the device properly
34481c4d5f51SNeil Horman 	 */
34491c4d5f51SNeil Horman 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3450d1a890faSShreyas Bhatewara 	dev_close(adapter->netdev);
3451d1a890faSShreyas Bhatewara }
3452d1a890faSShreyas Bhatewara 
3453d1a890faSShreyas Bhatewara 
3454d1a890faSShreyas Bhatewara static int
vmxnet3_change_mtu(struct net_device * netdev,int new_mtu)3455d1a890faSShreyas Bhatewara vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3456d1a890faSShreyas Bhatewara {
3457d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3458d1a890faSShreyas Bhatewara 	int err = 0;
3459d1a890faSShreyas Bhatewara 
3460d1a890faSShreyas Bhatewara 	netdev->mtu = new_mtu;
3461d1a890faSShreyas Bhatewara 
3462d1a890faSShreyas Bhatewara 	/*
3463d1a890faSShreyas Bhatewara 	 * Reset_work may be in the middle of resetting the device, wait for its
3464d1a890faSShreyas Bhatewara 	 * completion.
3465d1a890faSShreyas Bhatewara 	 */
3466d1a890faSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
346793c65d13SYueHaibing 		usleep_range(1000, 2000);
3468d1a890faSShreyas Bhatewara 
3469d1a890faSShreyas Bhatewara 	if (netif_running(netdev)) {
3470d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
3471d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
3472d1a890faSShreyas Bhatewara 
3473d1a890faSShreyas Bhatewara 		/* we need to re-create the rx queue based on the new mtu */
347409c5088eSShreyas Bhatewara 		vmxnet3_rq_destroy_all(adapter);
3475d1a890faSShreyas Bhatewara 		vmxnet3_adjust_rx_ring_size(adapter);
347609c5088eSShreyas Bhatewara 		err = vmxnet3_rq_create_all(adapter);
3477d1a890faSShreyas Bhatewara 		if (err) {
3478204a6e65SStephen Hemminger 			netdev_err(netdev,
3479204a6e65SStephen Hemminger 				   "failed to re-create rx queues, "
3480204a6e65SStephen Hemminger 				   " error %d. Closing it.\n", err);
3481d1a890faSShreyas Bhatewara 			goto out;
3482d1a890faSShreyas Bhatewara 		}
3483d1a890faSShreyas Bhatewara 
3484d1a890faSShreyas Bhatewara 		err = vmxnet3_activate_dev(adapter);
3485d1a890faSShreyas Bhatewara 		if (err) {
3486204a6e65SStephen Hemminger 			netdev_err(netdev,
3487204a6e65SStephen Hemminger 				   "failed to re-activate, error %d. "
3488204a6e65SStephen Hemminger 				   "Closing it\n", err);
3489d1a890faSShreyas Bhatewara 			goto out;
3490d1a890faSShreyas Bhatewara 		}
3491d1a890faSShreyas Bhatewara 	}
3492d1a890faSShreyas Bhatewara 
3493d1a890faSShreyas Bhatewara out:
3494d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3495d1a890faSShreyas Bhatewara 	if (err)
3496d1a890faSShreyas Bhatewara 		vmxnet3_force_close(adapter);
3497d1a890faSShreyas Bhatewara 
3498d1a890faSShreyas Bhatewara 	return err;
3499d1a890faSShreyas Bhatewara }
3500d1a890faSShreyas Bhatewara 
3501d1a890faSShreyas Bhatewara 
3502d1a890faSShreyas Bhatewara static void
vmxnet3_declare_features(struct vmxnet3_adapter * adapter)3503c38f3068SChristophe JAILLET vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3504d1a890faSShreyas Bhatewara {
3505d1a890faSShreyas Bhatewara 	struct net_device *netdev = adapter->netdev;
3506d1a890faSShreyas Bhatewara 
3507a0d2730cSMichał Mirosław 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3508f646968fSPatrick McHardy 		NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3509f646968fSPatrick McHardy 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3510c38f3068SChristophe JAILLET 		NETIF_F_LRO | NETIF_F_HIGHDMA;
3511dacce2beSRonak Doshi 
3512dacce2beSRonak Doshi 	if (VMXNET3_VERSION_GE_4(adapter)) {
3513dacce2beSRonak Doshi 		netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3514dacce2beSRonak Doshi 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
3515dacce2beSRonak Doshi 
3516dacce2beSRonak Doshi 		netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3517dacce2beSRonak Doshi 			NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3518dacce2beSRonak Doshi 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3519dacce2beSRonak Doshi 			NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3520dacce2beSRonak Doshi 			NETIF_F_GSO_UDP_TUNNEL_CSUM;
3521dacce2beSRonak Doshi 	}
3522dacce2beSRonak Doshi 
35236f91f4baSRonak Doshi 	if (VMXNET3_VERSION_GE_7(adapter)) {
35246f91f4baSRonak Doshi 		unsigned long flags;
35256f91f4baSRonak Doshi 
35266f91f4baSRonak Doshi 		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
35276f91f4baSRonak Doshi 					       VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
35286f91f4baSRonak Doshi 			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
35296f91f4baSRonak Doshi 		}
35306f91f4baSRonak Doshi 		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
35316f91f4baSRonak Doshi 					       VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
35326f91f4baSRonak Doshi 			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
35336f91f4baSRonak Doshi 		}
35346f91f4baSRonak Doshi 		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
35356f91f4baSRonak Doshi 					       VMXNET3_CAP_GENEVE_TSO)) {
35366f91f4baSRonak Doshi 			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
35376f91f4baSRonak Doshi 		}
35386f91f4baSRonak Doshi 		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
35396f91f4baSRonak Doshi 					       VMXNET3_CAP_VXLAN_TSO)) {
35406f91f4baSRonak Doshi 			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
35416f91f4baSRonak Doshi 		}
35426f91f4baSRonak Doshi 		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
35436f91f4baSRonak Doshi 					       VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
35446f91f4baSRonak Doshi 			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
35456f91f4baSRonak Doshi 		}
35466f91f4baSRonak Doshi 		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
35476f91f4baSRonak Doshi 					       VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
35486f91f4baSRonak Doshi 			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
35496f91f4baSRonak Doshi 		}
35506f91f4baSRonak Doshi 
35516f91f4baSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
35526f91f4baSRonak Doshi 		spin_lock_irqsave(&adapter->cmd_lock, flags);
35536f91f4baSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
35546f91f4baSRonak Doshi 		adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
35556f91f4baSRonak Doshi 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
35566f91f4baSRonak Doshi 
3557a56b158aSRonak Doshi 		if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3558a56b158aSRonak Doshi 		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3559a56b158aSRonak Doshi 		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3560a56b158aSRonak Doshi 		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3561a56b158aSRonak Doshi 			netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3562a56b158aSRonak Doshi 			netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3563a56b158aSRonak Doshi 		}
35646f91f4baSRonak Doshi 		if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
35656f91f4baSRonak Doshi 		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
35666f91f4baSRonak Doshi 			netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3567a56b158aSRonak Doshi 			netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
35686f91f4baSRonak Doshi 		}
35696f91f4baSRonak Doshi 	}
35706f91f4baSRonak Doshi 
357172e85c45SJesse Gross 	netdev->vlan_features = netdev->hw_features &
3572f646968fSPatrick McHardy 				~(NETIF_F_HW_VLAN_CTAG_TX |
3573f646968fSPatrick McHardy 				  NETIF_F_HW_VLAN_CTAG_RX);
3574f646968fSPatrick McHardy 	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3575d1a890faSShreyas Bhatewara }
3576d1a890faSShreyas Bhatewara 
3577d1a890faSShreyas Bhatewara 
3578d1a890faSShreyas Bhatewara static void
vmxnet3_read_mac_addr(struct vmxnet3_adapter * adapter,u8 * mac)3579d1a890faSShreyas Bhatewara vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3580d1a890faSShreyas Bhatewara {
3581d1a890faSShreyas Bhatewara 	u32 tmp;
3582d1a890faSShreyas Bhatewara 
3583d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3584d1a890faSShreyas Bhatewara 	*(u32 *)mac = tmp;
3585d1a890faSShreyas Bhatewara 
3586d1a890faSShreyas Bhatewara 	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3587d1a890faSShreyas Bhatewara 	mac[4] = tmp & 0xff;
3588d1a890faSShreyas Bhatewara 	mac[5] = (tmp >> 8) & 0xff;
3589d1a890faSShreyas Bhatewara }
3590d1a890faSShreyas Bhatewara 
359109c5088eSShreyas Bhatewara #ifdef CONFIG_PCI_MSI
359209c5088eSShreyas Bhatewara 
359309c5088eSShreyas Bhatewara /*
359409c5088eSShreyas Bhatewara  * Enable MSIx vectors.
359509c5088eSShreyas Bhatewara  * Returns :
359625985edcSLucas De Marchi  *	VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3597b60b869dSAlexander Gordeev  *	 were enabled.
3598b60b869dSAlexander Gordeev  *	number of vectors which were enabled otherwise (this number is greater
359909c5088eSShreyas Bhatewara  *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
360009c5088eSShreyas Bhatewara  */
360109c5088eSShreyas Bhatewara 
360209c5088eSShreyas Bhatewara static int
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter * adapter,int nvec)3603b60b869dSAlexander Gordeev vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
360409c5088eSShreyas Bhatewara {
3605c0a1be38SAlexander Gordeev 	int ret = pci_enable_msix_range(adapter->pdev,
3606c0a1be38SAlexander Gordeev 					adapter->intr.msix_entries, nvec, nvec);
3607c0a1be38SAlexander Gordeev 
3608c0a1be38SAlexander Gordeev 	if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
36094bad25faSStephen Hemminger 		dev_err(&adapter->netdev->dev,
3610b60b869dSAlexander Gordeev 			"Failed to enable %d MSI-X, trying %d\n",
3611b60b869dSAlexander Gordeev 			nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
361209c5088eSShreyas Bhatewara 
3613c0a1be38SAlexander Gordeev 		ret = pci_enable_msix_range(adapter->pdev,
3614c0a1be38SAlexander Gordeev 					    adapter->intr.msix_entries,
3615c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT,
3616c0a1be38SAlexander Gordeev 					    VMXNET3_LINUX_MIN_MSIX_VECT);
3617c0a1be38SAlexander Gordeev 	}
3618c0a1be38SAlexander Gordeev 
3619c0a1be38SAlexander Gordeev 	if (ret < 0) {
3620c0a1be38SAlexander Gordeev 		dev_err(&adapter->netdev->dev,
3621c0a1be38SAlexander Gordeev 			"Failed to enable MSI-X, error: %d\n", ret);
3622c0a1be38SAlexander Gordeev 	}
3623c0a1be38SAlexander Gordeev 
3624c0a1be38SAlexander Gordeev 	return ret;
362509c5088eSShreyas Bhatewara }
362609c5088eSShreyas Bhatewara 
362709c5088eSShreyas Bhatewara 
362809c5088eSShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
3629d1a890faSShreyas Bhatewara 
3630d1a890faSShreyas Bhatewara static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter * adapter)3631d1a890faSShreyas Bhatewara vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3632d1a890faSShreyas Bhatewara {
3633d1a890faSShreyas Bhatewara 	u32 cfg;
3634e328d410SRoland Dreier 	unsigned long flags;
3635d1a890faSShreyas Bhatewara 
3636d1a890faSShreyas Bhatewara 	/* intr settings */
3637e328d410SRoland Dreier 	spin_lock_irqsave(&adapter->cmd_lock, flags);
3638d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3639d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_GET_CONF_INTR);
3640d1a890faSShreyas Bhatewara 	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3641e328d410SRoland Dreier 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3642d1a890faSShreyas Bhatewara 	adapter->intr.type = cfg & 0x3;
3643d1a890faSShreyas Bhatewara 	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3644d1a890faSShreyas Bhatewara 
3645d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_AUTO) {
36460bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSIX;
36470bdc0d70SShreyas Bhatewara 	}
3648d1a890faSShreyas Bhatewara 
36498f7e524cSRandy Dunlap #ifdef CONFIG_PCI_MSI
36500bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
3651f71ef02fSRonak Doshi 		int i, nvec, nvec_allocated;
36520bdc0d70SShreyas Bhatewara 
3653b60b869dSAlexander Gordeev 		nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3654b60b869dSAlexander Gordeev 			1 : adapter->num_tx_queues;
3655b60b869dSAlexander Gordeev 		nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3656b60b869dSAlexander Gordeev 			0 : adapter->num_rx_queues;
3657b60b869dSAlexander Gordeev 		nvec += 1;	/* for link event */
3658b60b869dSAlexander Gordeev 		nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3659b60b869dSAlexander Gordeev 		       nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
366009c5088eSShreyas Bhatewara 
3661b60b869dSAlexander Gordeev 		for (i = 0; i < nvec; i++)
3662b60b869dSAlexander Gordeev 			adapter->intr.msix_entries[i].entry = i;
366309c5088eSShreyas Bhatewara 
3664f71ef02fSRonak Doshi 		nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3665f71ef02fSRonak Doshi 		if (nvec_allocated < 0)
3666b60b869dSAlexander Gordeev 			goto msix_err;
366709c5088eSShreyas Bhatewara 
366809c5088eSShreyas Bhatewara 		/* If we cannot allocate one MSIx vector per queue
366909c5088eSShreyas Bhatewara 		 * then limit the number of rx queues to 1
367009c5088eSShreyas Bhatewara 		 */
3671f71ef02fSRonak Doshi 		if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3672f71ef02fSRonak Doshi 		    nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
367309c5088eSShreyas Bhatewara 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
36747e96fbf2SShreyas Bhatewara 			    || adapter->num_rx_queues != 1) {
367509c5088eSShreyas Bhatewara 				adapter->share_intr = VMXNET3_INTR_TXSHARE;
3676204a6e65SStephen Hemminger 				netdev_err(adapter->netdev,
3677204a6e65SStephen Hemminger 					   "Number of rx queues : 1\n");
367809c5088eSShreyas Bhatewara 				adapter->num_rx_queues = 1;
367909c5088eSShreyas Bhatewara 			}
3680d1a890faSShreyas Bhatewara 		}
3681b60b869dSAlexander Gordeev 
3682f71ef02fSRonak Doshi 		adapter->intr.num_intrs = nvec_allocated;
368309c5088eSShreyas Bhatewara 		return;
368409c5088eSShreyas Bhatewara 
3685b60b869dSAlexander Gordeev msix_err:
368609c5088eSShreyas Bhatewara 		/* If we cannot allocate MSIx vectors use only one rx queue */
36874bad25faSStephen Hemminger 		dev_info(&adapter->pdev->dev,
36884bad25faSStephen Hemminger 			 "Failed to enable MSI-X, error %d. "
3689f71ef02fSRonak Doshi 			 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
369009c5088eSShreyas Bhatewara 
36910bdc0d70SShreyas Bhatewara 		adapter->intr.type = VMXNET3_IT_MSI;
36920bdc0d70SShreyas Bhatewara 	}
3693d1a890faSShreyas Bhatewara 
36940bdc0d70SShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSI) {
3695b60b869dSAlexander Gordeev 		if (!pci_enable_msi(adapter->pdev)) {
369609c5088eSShreyas Bhatewara 			adapter->num_rx_queues = 1;
3697d1a890faSShreyas Bhatewara 			adapter->intr.num_intrs = 1;
3698d1a890faSShreyas Bhatewara 			return;
3699d1a890faSShreyas Bhatewara 		}
3700d1a890faSShreyas Bhatewara 	}
37010bdc0d70SShreyas Bhatewara #endif /* CONFIG_PCI_MSI */
3702d1a890faSShreyas Bhatewara 
370309c5088eSShreyas Bhatewara 	adapter->num_rx_queues = 1;
3704204a6e65SStephen Hemminger 	dev_info(&adapter->netdev->dev,
3705204a6e65SStephen Hemminger 		 "Using INTx interrupt, #Rx queues: 1.\n");
3706d1a890faSShreyas Bhatewara 	adapter->intr.type = VMXNET3_IT_INTX;
3707d1a890faSShreyas Bhatewara 
3708d1a890faSShreyas Bhatewara 	/* INT-X related setting */
3709d1a890faSShreyas Bhatewara 	adapter->intr.num_intrs = 1;
3710d1a890faSShreyas Bhatewara }
3711d1a890faSShreyas Bhatewara 
3712d1a890faSShreyas Bhatewara 
3713d1a890faSShreyas Bhatewara static void
vmxnet3_free_intr_resources(struct vmxnet3_adapter * adapter)3714d1a890faSShreyas Bhatewara vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3715d1a890faSShreyas Bhatewara {
3716d1a890faSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX)
3717d1a890faSShreyas Bhatewara 		pci_disable_msix(adapter->pdev);
3718d1a890faSShreyas Bhatewara 	else if (adapter->intr.type == VMXNET3_IT_MSI)
3719d1a890faSShreyas Bhatewara 		pci_disable_msi(adapter->pdev);
3720d1a890faSShreyas Bhatewara 	else
3721d1a890faSShreyas Bhatewara 		BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3722d1a890faSShreyas Bhatewara }
3723d1a890faSShreyas Bhatewara 
3724d1a890faSShreyas Bhatewara 
3725d1a890faSShreyas Bhatewara static void
vmxnet3_tx_timeout(struct net_device * netdev,unsigned int txqueue)37260290bd29SMichael S. Tsirkin vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3727d1a890faSShreyas Bhatewara {
3728d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3729d1a890faSShreyas Bhatewara 	adapter->tx_timeout_count++;
3730d1a890faSShreyas Bhatewara 
3731204a6e65SStephen Hemminger 	netdev_err(adapter->netdev, "tx hang\n");
3732d1a890faSShreyas Bhatewara 	schedule_work(&adapter->work);
3733d1a890faSShreyas Bhatewara }
3734d1a890faSShreyas Bhatewara 
3735d1a890faSShreyas Bhatewara 
3736d1a890faSShreyas Bhatewara static void
vmxnet3_reset_work(struct work_struct * data)3737d1a890faSShreyas Bhatewara vmxnet3_reset_work(struct work_struct *data)
3738d1a890faSShreyas Bhatewara {
3739d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
3740d1a890faSShreyas Bhatewara 
3741d1a890faSShreyas Bhatewara 	adapter = container_of(data, struct vmxnet3_adapter, work);
3742d1a890faSShreyas Bhatewara 
3743d1a890faSShreyas Bhatewara 	/* if another thread is resetting the device, no need to proceed */
3744d1a890faSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3745d1a890faSShreyas Bhatewara 		return;
3746d1a890faSShreyas Bhatewara 
3747d1a890faSShreyas Bhatewara 	/* if the device is closed, we must leave it alone */
3748d9a5f210SShreyas Bhatewara 	rtnl_lock();
3749d1a890faSShreyas Bhatewara 	if (netif_running(adapter->netdev)) {
3750204a6e65SStephen Hemminger 		netdev_notice(adapter->netdev, "resetting\n");
3751d1a890faSShreyas Bhatewara 		vmxnet3_quiesce_dev(adapter);
3752d1a890faSShreyas Bhatewara 		vmxnet3_reset_dev(adapter);
3753d1a890faSShreyas Bhatewara 		vmxnet3_activate_dev(adapter);
3754d1a890faSShreyas Bhatewara 	} else {
3755204a6e65SStephen Hemminger 		netdev_info(adapter->netdev, "already closed\n");
3756d1a890faSShreyas Bhatewara 	}
3757d9a5f210SShreyas Bhatewara 	rtnl_unlock();
3758d1a890faSShreyas Bhatewara 
3759277964e1SBenjamin Poirier 	netif_wake_queue(adapter->netdev);
3760d1a890faSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3761d1a890faSShreyas Bhatewara }
3762d1a890faSShreyas Bhatewara 
3763d1a890faSShreyas Bhatewara 
37643a4751a3SBill Pemberton static int
vmxnet3_probe_device(struct pci_dev * pdev,const struct pci_device_id * id)3765d1a890faSShreyas Bhatewara vmxnet3_probe_device(struct pci_dev *pdev,
3766d1a890faSShreyas Bhatewara 		     const struct pci_device_id *id)
3767d1a890faSShreyas Bhatewara {
3768d1a890faSShreyas Bhatewara 	static const struct net_device_ops vmxnet3_netdev_ops = {
3769d1a890faSShreyas Bhatewara 		.ndo_open = vmxnet3_open,
3770d1a890faSShreyas Bhatewara 		.ndo_stop = vmxnet3_close,
3771d1a890faSShreyas Bhatewara 		.ndo_start_xmit = vmxnet3_xmit_frame,
3772d1a890faSShreyas Bhatewara 		.ndo_set_mac_address = vmxnet3_set_mac_addr,
3773d1a890faSShreyas Bhatewara 		.ndo_change_mtu = vmxnet3_change_mtu,
37743dd7400bSRonak Doshi 		.ndo_fix_features = vmxnet3_fix_features,
3775a0d2730cSMichał Mirosław 		.ndo_set_features = vmxnet3_set_features,
37761dac3b1bSRonak Doshi 		.ndo_features_check = vmxnet3_features_check,
377795305f6cSstephen hemminger 		.ndo_get_stats64 = vmxnet3_get_stats64,
3778d1a890faSShreyas Bhatewara 		.ndo_tx_timeout = vmxnet3_tx_timeout,
3779afc4b13dSJiri Pirko 		.ndo_set_rx_mode = vmxnet3_set_mc,
3780d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3781d1a890faSShreyas Bhatewara 		.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3782d1a890faSShreyas Bhatewara #ifdef CONFIG_NET_POLL_CONTROLLER
3783d1a890faSShreyas Bhatewara 		.ndo_poll_controller = vmxnet3_netpoll,
3784d1a890faSShreyas Bhatewara #endif
378554f00cceSWilliam Tu 		.ndo_bpf = vmxnet3_xdp,
378654f00cceSWilliam Tu 		.ndo_xdp_xmit = vmxnet3_xdp_xmit,
3787d1a890faSShreyas Bhatewara 	};
3788d1a890faSShreyas Bhatewara 	int err;
3789d1a890faSShreyas Bhatewara 	u32 ver;
3790d1a890faSShreyas Bhatewara 	struct net_device *netdev;
3791d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter;
3792d1a890faSShreyas Bhatewara 	u8 mac[ETH_ALEN];
379309c5088eSShreyas Bhatewara 	int size;
379409c5088eSShreyas Bhatewara 	int num_tx_queues;
379509c5088eSShreyas Bhatewara 	int num_rx_queues;
379639f9895aSRonak Doshi 	int queues;
379739f9895aSRonak Doshi 	unsigned long flags;
3798d1a890faSShreyas Bhatewara 
3799e154b639SShreyas Bhatewara 	if (!pci_msi_enabled())
3800e154b639SShreyas Bhatewara 		enable_mq = 0;
3801e154b639SShreyas Bhatewara 
380209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
380309c5088eSShreyas Bhatewara 	if (enable_mq)
380409c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
380509c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
380609c5088eSShreyas Bhatewara 	else
380709c5088eSShreyas Bhatewara #endif
380809c5088eSShreyas Bhatewara 		num_rx_queues = 1;
380909c5088eSShreyas Bhatewara 
381009c5088eSShreyas Bhatewara 	if (enable_mq)
381109c5088eSShreyas Bhatewara 		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
381209c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
381309c5088eSShreyas Bhatewara 	else
381409c5088eSShreyas Bhatewara 		num_tx_queues = 1;
381509c5088eSShreyas Bhatewara 
381609c5088eSShreyas Bhatewara 	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
381709c5088eSShreyas Bhatewara 				   max(num_tx_queues, num_rx_queues));
381841de8d4cSJoe Perches 	if (!netdev)
3819d1a890faSShreyas Bhatewara 		return -ENOMEM;
3820d1a890faSShreyas Bhatewara 
3821d1a890faSShreyas Bhatewara 	pci_set_drvdata(pdev, netdev);
3822d1a890faSShreyas Bhatewara 	adapter = netdev_priv(netdev);
3823d1a890faSShreyas Bhatewara 	adapter->netdev = netdev;
3824d1a890faSShreyas Bhatewara 	adapter->pdev = pdev;
3825d1a890faSShreyas Bhatewara 
3826f00e2b0aSNeil Horman 	adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3827f00e2b0aSNeil Horman 	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
382853831aa1SShrikrishna Khare 	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3829f00e2b0aSNeil Horman 
3830c38f3068SChristophe JAILLET 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3831bf7bec46SChristophe JAILLET 	if (err) {
3832bf7bec46SChristophe JAILLET 		dev_err(&pdev->dev, "dma_set_mask failed\n");
383361aeeceaShpreg@vmware.com 		goto err_set_mask;
383461aeeceaShpreg@vmware.com 	}
383561aeeceaShpreg@vmware.com 
383683d0feffSShreyas Bhatewara 	spin_lock_init(&adapter->cmd_lock);
3837b0eb57cbSAndy King 	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3838b0eb57cbSAndy King 					     sizeof(struct vmxnet3_adapter),
3839bf7bec46SChristophe JAILLET 					     DMA_TO_DEVICE);
38405738a09dSAlexey Khoroshilov 	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
38415738a09dSAlexey Khoroshilov 		dev_err(&pdev->dev, "Failed to map dma\n");
38425738a09dSAlexey Khoroshilov 		err = -EFAULT;
384361aeeceaShpreg@vmware.com 		goto err_set_mask;
38445738a09dSAlexey Khoroshilov 	}
3845b0eb57cbSAndy King 	adapter->shared = dma_alloc_coherent(
3846b0eb57cbSAndy King 				&adapter->pdev->dev,
3847d1a890faSShreyas Bhatewara 				sizeof(struct Vmxnet3_DriverShared),
3848b0eb57cbSAndy King 				&adapter->shared_pa, GFP_KERNEL);
3849d1a890faSShreyas Bhatewara 	if (!adapter->shared) {
3850204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to allocate memory\n");
3851d1a890faSShreyas Bhatewara 		err = -ENOMEM;
3852d1a890faSShreyas Bhatewara 		goto err_alloc_shared;
3853d1a890faSShreyas Bhatewara 	}
3854d1a890faSShreyas Bhatewara 
385561aeeceaShpreg@vmware.com 	err = vmxnet3_alloc_pci_resources(adapter);
3856d1a890faSShreyas Bhatewara 	if (err < 0)
3857d1a890faSShreyas Bhatewara 		goto err_alloc_pci;
3858d1a890faSShreyas Bhatewara 
3859d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3860acc38e04SRonak Doshi 	if (ver & (1 << VMXNET3_REV_7)) {
3861acc38e04SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter,
3862acc38e04SRonak Doshi 				       VMXNET3_REG_VRRS,
3863acc38e04SRonak Doshi 				       1 << VMXNET3_REV_7);
3864acc38e04SRonak Doshi 		adapter->version = VMXNET3_REV_7 + 1;
3865acc38e04SRonak Doshi 	} else if (ver & (1 << VMXNET3_REV_6)) {
3866ce2639adSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter,
3867ce2639adSRonak Doshi 				       VMXNET3_REG_VRRS,
3868ce2639adSRonak Doshi 				       1 << VMXNET3_REV_6);
3869ce2639adSRonak Doshi 		adapter->version = VMXNET3_REV_6 + 1;
3870ce2639adSRonak Doshi 	} else if (ver & (1 << VMXNET3_REV_5)) {
3871ce2639adSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter,
3872ce2639adSRonak Doshi 				       VMXNET3_REG_VRRS,
3873ce2639adSRonak Doshi 				       1 << VMXNET3_REV_5);
3874ce2639adSRonak Doshi 		adapter->version = VMXNET3_REV_5 + 1;
3875ce2639adSRonak Doshi 	} else if (ver & (1 << VMXNET3_REV_4)) {
3876a31135e3SRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter,
3877a31135e3SRonak Doshi 				       VMXNET3_REG_VRRS,
3878a31135e3SRonak Doshi 				       1 << VMXNET3_REV_4);
3879a31135e3SRonak Doshi 		adapter->version = VMXNET3_REV_4 + 1;
3880a31135e3SRonak Doshi 	} else if (ver & (1 << VMXNET3_REV_3)) {
38816af9d787SShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
38826af9d787SShrikrishna Khare 				       VMXNET3_REG_VRRS,
38836af9d787SShrikrishna Khare 				       1 << VMXNET3_REV_3);
38846af9d787SShrikrishna Khare 		adapter->version = VMXNET3_REV_3 + 1;
38856af9d787SShrikrishna Khare 	} else if (ver & (1 << VMXNET3_REV_2)) {
3886190af10fSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
3887190af10fSShrikrishna Khare 				       VMXNET3_REG_VRRS,
3888190af10fSShrikrishna Khare 				       1 << VMXNET3_REV_2);
3889190af10fSShrikrishna Khare 		adapter->version = VMXNET3_REV_2 + 1;
3890190af10fSShrikrishna Khare 	} else if (ver & (1 << VMXNET3_REV_1)) {
3891190af10fSShrikrishna Khare 		VMXNET3_WRITE_BAR1_REG(adapter,
3892190af10fSShrikrishna Khare 				       VMXNET3_REG_VRRS,
3893190af10fSShrikrishna Khare 				       1 << VMXNET3_REV_1);
3894190af10fSShrikrishna Khare 		adapter->version = VMXNET3_REV_1 + 1;
3895d1a890faSShreyas Bhatewara 	} else {
3896204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3897204a6e65SStephen Hemminger 			"Incompatible h/w version (0x%x) for adapter\n", ver);
3898d1a890faSShreyas Bhatewara 		err = -EBUSY;
3899d1a890faSShreyas Bhatewara 		goto err_ver;
3900d1a890faSShreyas Bhatewara 	}
390145dac1d6SShreyas Bhatewara 	dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3902d1a890faSShreyas Bhatewara 
3903d1a890faSShreyas Bhatewara 	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3904d1a890faSShreyas Bhatewara 	if (ver & 1) {
3905d1a890faSShreyas Bhatewara 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3906d1a890faSShreyas Bhatewara 	} else {
3907204a6e65SStephen Hemminger 		dev_err(&pdev->dev,
3908204a6e65SStephen Hemminger 			"Incompatible upt version (0x%x) for adapter\n", ver);
3909d1a890faSShreyas Bhatewara 		err = -EBUSY;
3910d1a890faSShreyas Bhatewara 		goto err_ver;
3911d1a890faSShreyas Bhatewara 	}
3912d1a890faSShreyas Bhatewara 
39136f91f4baSRonak Doshi 	if (VMXNET3_VERSION_GE_7(adapter)) {
39146f91f4baSRonak Doshi 		adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
39156f91f4baSRonak Doshi 		adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
3916543fb674SRonak Doshi 		if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3917543fb674SRonak Doshi 			adapter->dev_caps[0] = adapter->devcap_supported[0] &
3918543fb674SRonak Doshi 							(1UL << VMXNET3_CAP_LARGE_BAR);
3919543fb674SRonak Doshi 		}
39202c5a5748SRonak Doshi 		if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
39212c5a5748SRonak Doshi 		    adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
39222c5a5748SRonak Doshi 		    adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
39232c5a5748SRonak Doshi 			adapter->dev_caps[0] |= adapter->devcap_supported[0] &
39242c5a5748SRonak Doshi 						(1UL << VMXNET3_CAP_OOORX_COMP);
39252c5a5748SRonak Doshi 		}
39266f91f4baSRonak Doshi 		if (adapter->dev_caps[0])
39276f91f4baSRonak Doshi 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
39286f91f4baSRonak Doshi 
39296f91f4baSRonak Doshi 		spin_lock_irqsave(&adapter->cmd_lock, flags);
39306f91f4baSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
39316f91f4baSRonak Doshi 		adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
39326f91f4baSRonak Doshi 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
39336f91f4baSRonak Doshi 	}
39346f91f4baSRonak Doshi 
3935543fb674SRonak Doshi 	if (VMXNET3_VERSION_GE_7(adapter) &&
3936543fb674SRonak Doshi 	    adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3937543fb674SRonak Doshi 		adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
3938543fb674SRonak Doshi 		adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
3939543fb674SRonak Doshi 		adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
3940543fb674SRonak Doshi 	} else {
3941543fb674SRonak Doshi 		adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
3942543fb674SRonak Doshi 		adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
3943543fb674SRonak Doshi 		adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
3944543fb674SRonak Doshi 	}
3945543fb674SRonak Doshi 
394639f9895aSRonak Doshi 	if (VMXNET3_VERSION_GE_6(adapter)) {
394739f9895aSRonak Doshi 		spin_lock_irqsave(&adapter->cmd_lock, flags);
394839f9895aSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
394939f9895aSRonak Doshi 				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
395039f9895aSRonak Doshi 		queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
395139f9895aSRonak Doshi 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
395239f9895aSRonak Doshi 		if (queues > 0) {
395339f9895aSRonak Doshi 			adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
395439f9895aSRonak Doshi 			adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
395539f9895aSRonak Doshi 		} else {
395639f9895aSRonak Doshi 			adapter->num_rx_queues = min(num_rx_queues,
395739f9895aSRonak Doshi 						     VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
395839f9895aSRonak Doshi 			adapter->num_tx_queues = min(num_tx_queues,
395939f9895aSRonak Doshi 						     VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
396039f9895aSRonak Doshi 		}
396139f9895aSRonak Doshi 		if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
396239f9895aSRonak Doshi 		    adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
396339f9895aSRonak Doshi 			adapter->queuesExtEnabled = true;
396439f9895aSRonak Doshi 		} else {
396539f9895aSRonak Doshi 			adapter->queuesExtEnabled = false;
396639f9895aSRonak Doshi 		}
396739f9895aSRonak Doshi 	} else {
396839f9895aSRonak Doshi 		adapter->queuesExtEnabled = false;
396915ccf2f4SRonak Doshi 		num_rx_queues = rounddown_pow_of_two(num_rx_queues);
397015ccf2f4SRonak Doshi 		num_tx_queues = rounddown_pow_of_two(num_tx_queues);
397139f9895aSRonak Doshi 		adapter->num_rx_queues = min(num_rx_queues,
397239f9895aSRonak Doshi 					     VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
397339f9895aSRonak Doshi 		adapter->num_tx_queues = min(num_tx_queues,
397439f9895aSRonak Doshi 					     VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
397539f9895aSRonak Doshi 	}
397639f9895aSRonak Doshi 	dev_info(&pdev->dev,
397739f9895aSRonak Doshi 		 "# of Tx queues : %d, # of Rx queues : %d\n",
397839f9895aSRonak Doshi 		 adapter->num_tx_queues, adapter->num_rx_queues);
397939f9895aSRonak Doshi 
398039f9895aSRonak Doshi 	adapter->rx_buf_per_pkt = 1;
398139f9895aSRonak Doshi 
398239f9895aSRonak Doshi 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
398339f9895aSRonak Doshi 	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
398439f9895aSRonak Doshi 	adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
398539f9895aSRonak Doshi 						&adapter->queue_desc_pa,
398639f9895aSRonak Doshi 						GFP_KERNEL);
398739f9895aSRonak Doshi 
398839f9895aSRonak Doshi 	if (!adapter->tqd_start) {
398939f9895aSRonak Doshi 		dev_err(&pdev->dev, "Failed to allocate memory\n");
399039f9895aSRonak Doshi 		err = -ENOMEM;
399139f9895aSRonak Doshi 		goto err_ver;
399239f9895aSRonak Doshi 	}
399339f9895aSRonak Doshi 	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
399439f9895aSRonak Doshi 							    adapter->num_tx_queues);
399539f9895aSRonak Doshi 
399639f9895aSRonak Doshi 	adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
399739f9895aSRonak Doshi 					      sizeof(struct Vmxnet3_PMConf),
399839f9895aSRonak Doshi 					      &adapter->pm_conf_pa,
399939f9895aSRonak Doshi 					      GFP_KERNEL);
400039f9895aSRonak Doshi 	if (adapter->pm_conf == NULL) {
400139f9895aSRonak Doshi 		err = -ENOMEM;
400239f9895aSRonak Doshi 		goto err_alloc_pm;
400339f9895aSRonak Doshi 	}
400439f9895aSRonak Doshi 
400539f9895aSRonak Doshi #ifdef VMXNET3_RSS
400639f9895aSRonak Doshi 
400739f9895aSRonak Doshi 	adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
400839f9895aSRonak Doshi 					       sizeof(struct UPT1_RSSConf),
400939f9895aSRonak Doshi 					       &adapter->rss_conf_pa,
401039f9895aSRonak Doshi 					       GFP_KERNEL);
401139f9895aSRonak Doshi 	if (adapter->rss_conf == NULL) {
401239f9895aSRonak Doshi 		err = -ENOMEM;
401339f9895aSRonak Doshi 		goto err_alloc_rss;
401439f9895aSRonak Doshi 	}
401539f9895aSRonak Doshi #endif /* VMXNET3_RSS */
401639f9895aSRonak Doshi 
40174edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
40184edef40eSShrikrishna Khare 		adapter->coal_conf =
40194edef40eSShrikrishna Khare 			dma_alloc_coherent(&adapter->pdev->dev,
40204edef40eSShrikrishna Khare 					   sizeof(struct Vmxnet3_CoalesceScheme)
40214edef40eSShrikrishna Khare 					   ,
40224edef40eSShrikrishna Khare 					   &adapter->coal_conf_pa,
40234edef40eSShrikrishna Khare 					   GFP_KERNEL);
40244edef40eSShrikrishna Khare 		if (!adapter->coal_conf) {
40254edef40eSShrikrishna Khare 			err = -ENOMEM;
402639f9895aSRonak Doshi 			goto err_coal_conf;
40274edef40eSShrikrishna Khare 		}
40284edef40eSShrikrishna Khare 		adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
40294edef40eSShrikrishna Khare 		adapter->default_coal_mode = true;
40304edef40eSShrikrishna Khare 	}
40314edef40eSShrikrishna Khare 
4032d3a8a9e5SRonak Doshi 	if (VMXNET3_VERSION_GE_4(adapter)) {
4033d3a8a9e5SRonak Doshi 		adapter->default_rss_fields = true;
4034d3a8a9e5SRonak Doshi 		adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
4035d3a8a9e5SRonak Doshi 	}
4036d3a8a9e5SRonak Doshi 
4037e101e7ddSShreyas Bhatewara 	SET_NETDEV_DEV(netdev, &pdev->dev);
4038c38f3068SChristophe JAILLET 	vmxnet3_declare_features(adapter);
403954f00cceSWilliam Tu 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
404054f00cceSWilliam Tu 			       NETDEV_XDP_ACT_NDO_XMIT;
4041d1a890faSShreyas Bhatewara 
404250a5ce3eSShrikrishna Khare 	adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
404350a5ce3eSShrikrishna Khare 		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
404450a5ce3eSShrikrishna Khare 
40454db37a78SStephen Hemminger 	if (adapter->num_tx_queues == adapter->num_rx_queues)
40464db37a78SStephen Hemminger 		adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
40474db37a78SStephen Hemminger 	else
404809c5088eSShreyas Bhatewara 		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
404909c5088eSShreyas Bhatewara 
4050d1a890faSShreyas Bhatewara 	vmxnet3_alloc_intr_resources(adapter);
4051d1a890faSShreyas Bhatewara 
405209c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
405309c5088eSShreyas Bhatewara 	if (adapter->num_rx_queues > 1 &&
405409c5088eSShreyas Bhatewara 	    adapter->intr.type == VMXNET3_IT_MSIX) {
405509c5088eSShreyas Bhatewara 		adapter->rss = true;
40567db11f75SStephen Hemminger 		netdev->hw_features |= NETIF_F_RXHASH;
40577db11f75SStephen Hemminger 		netdev->features |= NETIF_F_RXHASH;
4058204a6e65SStephen Hemminger 		dev_dbg(&pdev->dev, "RSS is enabled.\n");
405909c5088eSShreyas Bhatewara 	} else {
406009c5088eSShreyas Bhatewara 		adapter->rss = false;
406109c5088eSShreyas Bhatewara 	}
406209c5088eSShreyas Bhatewara #endif
406309c5088eSShreyas Bhatewara 
4064d1a890faSShreyas Bhatewara 	vmxnet3_read_mac_addr(adapter, mac);
4065ea52a0b5SJakub Kicinski 	dev_addr_set(netdev, mac);
4066d1a890faSShreyas Bhatewara 
4067d1a890faSShreyas Bhatewara 	netdev->netdev_ops = &vmxnet3_netdev_ops;
4068d1a890faSShreyas Bhatewara 	vmxnet3_set_ethtool_ops(netdev);
406909c5088eSShreyas Bhatewara 	netdev->watchdog_timeo = 5 * HZ;
4070d1a890faSShreyas Bhatewara 
40718c5663e4SRonak Doshi 	/* MTU range: 60 - 9190 */
4072d0c2c997SJarod Wilson 	netdev->min_mtu = VMXNET3_MIN_MTU;
40738c5663e4SRonak Doshi 	if (VMXNET3_VERSION_GE_6(adapter))
40748c5663e4SRonak Doshi 		netdev->max_mtu = VMXNET3_V6_MAX_MTU;
40758c5663e4SRonak Doshi 	else
4076d0c2c997SJarod Wilson 		netdev->max_mtu = VMXNET3_MAX_MTU;
4077d0c2c997SJarod Wilson 
4078d1a890faSShreyas Bhatewara 	INIT_WORK(&adapter->work, vmxnet3_reset_work);
4079e3bc4ffbSSteve Hodgson 	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4080d1a890faSShreyas Bhatewara 
408109c5088eSShreyas Bhatewara 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
408209c5088eSShreyas Bhatewara 		int i;
408309c5088eSShreyas Bhatewara 		for (i = 0; i < adapter->num_rx_queues; i++) {
408409c5088eSShreyas Bhatewara 			netif_napi_add(adapter->netdev,
408509c5088eSShreyas Bhatewara 				       &adapter->rx_queue[i].napi,
4086b48b89f9SJakub Kicinski 				       vmxnet3_poll_rx_only);
408709c5088eSShreyas Bhatewara 		}
408809c5088eSShreyas Bhatewara 	} else {
408909c5088eSShreyas Bhatewara 		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
4090b48b89f9SJakub Kicinski 			       vmxnet3_poll);
409109c5088eSShreyas Bhatewara 	}
409209c5088eSShreyas Bhatewara 
409309c5088eSShreyas Bhatewara 	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
409409c5088eSShreyas Bhatewara 	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
409509c5088eSShreyas Bhatewara 
40966cdd20c3SNeil Horman 	netif_carrier_off(netdev);
4097d1a890faSShreyas Bhatewara 	err = register_netdev(netdev);
4098d1a890faSShreyas Bhatewara 
4099d1a890faSShreyas Bhatewara 	if (err) {
4100204a6e65SStephen Hemminger 		dev_err(&pdev->dev, "Failed to register adapter\n");
4101d1a890faSShreyas Bhatewara 		goto err_register;
4102d1a890faSShreyas Bhatewara 	}
4103d1a890faSShreyas Bhatewara 
41044a1745fcSShreyas Bhatewara 	vmxnet3_check_link(adapter, false);
4105d1a890faSShreyas Bhatewara 	return 0;
4106d1a890faSShreyas Bhatewara 
4107d1a890faSShreyas Bhatewara err_register:
41084edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
41094edef40eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
41104edef40eSShrikrishna Khare 				  sizeof(struct Vmxnet3_CoalesceScheme),
41114edef40eSShrikrishna Khare 				  adapter->coal_conf, adapter->coal_conf_pa);
41124edef40eSShrikrishna Khare 	}
4113d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
411439f9895aSRonak Doshi err_coal_conf:
411509c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
4116b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4117b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
411809c5088eSShreyas Bhatewara err_alloc_rss:
411909c5088eSShreyas Bhatewara #endif
4120b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4121b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
4122d1a890faSShreyas Bhatewara err_alloc_pm:
4123b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
412409c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
412539f9895aSRonak Doshi err_ver:
412639f9895aSRonak Doshi 	vmxnet3_free_pci_resources(adapter);
412739f9895aSRonak Doshi err_alloc_pci:
4128b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
4129b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
4130d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
4131d1a890faSShreyas Bhatewara err_alloc_shared:
4132b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4133bf7bec46SChristophe JAILLET 			 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
413461aeeceaShpreg@vmware.com err_set_mask:
4135d1a890faSShreyas Bhatewara 	free_netdev(netdev);
4136d1a890faSShreyas Bhatewara 	return err;
4137d1a890faSShreyas Bhatewara }
4138d1a890faSShreyas Bhatewara 
4139d1a890faSShreyas Bhatewara 
41403a4751a3SBill Pemberton static void
vmxnet3_remove_device(struct pci_dev * pdev)4141d1a890faSShreyas Bhatewara vmxnet3_remove_device(struct pci_dev *pdev)
4142d1a890faSShreyas Bhatewara {
4143d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
4144d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
414509c5088eSShreyas Bhatewara 	int size = 0;
414639f9895aSRonak Doshi 	int num_rx_queues, rx_queues;
414739f9895aSRonak Doshi 	unsigned long flags;
414809c5088eSShreyas Bhatewara 
414909c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
415009c5088eSShreyas Bhatewara 	if (enable_mq)
415109c5088eSShreyas Bhatewara 		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
415209c5088eSShreyas Bhatewara 				    (int)num_online_cpus());
415309c5088eSShreyas Bhatewara 	else
415409c5088eSShreyas Bhatewara #endif
415509c5088eSShreyas Bhatewara 		num_rx_queues = 1;
415615ccf2f4SRonak Doshi 	if (!VMXNET3_VERSION_GE_6(adapter)) {
4157eebb02b1SShreyas Bhatewara 		num_rx_queues = rounddown_pow_of_two(num_rx_queues);
415815ccf2f4SRonak Doshi 	}
415939f9895aSRonak Doshi 	if (VMXNET3_VERSION_GE_6(adapter)) {
416039f9895aSRonak Doshi 		spin_lock_irqsave(&adapter->cmd_lock, flags);
416139f9895aSRonak Doshi 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
416239f9895aSRonak Doshi 				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
416339f9895aSRonak Doshi 		rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
416439f9895aSRonak Doshi 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
416539f9895aSRonak Doshi 		if (rx_queues > 0)
416639f9895aSRonak Doshi 			rx_queues = (rx_queues >> 8) & 0xff;
416739f9895aSRonak Doshi 		else
416839f9895aSRonak Doshi 			rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
416939f9895aSRonak Doshi 		num_rx_queues = min(num_rx_queues, rx_queues);
417039f9895aSRonak Doshi 	} else {
417139f9895aSRonak Doshi 		num_rx_queues = min(num_rx_queues,
417239f9895aSRonak Doshi 				    VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
417339f9895aSRonak Doshi 	}
4174d1a890faSShreyas Bhatewara 
417523f333a2STejun Heo 	cancel_work_sync(&adapter->work);
4176d1a890faSShreyas Bhatewara 
4177d1a890faSShreyas Bhatewara 	unregister_netdev(netdev);
4178d1a890faSShreyas Bhatewara 
4179d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
4180d1a890faSShreyas Bhatewara 	vmxnet3_free_pci_resources(adapter);
41814edef40eSShrikrishna Khare 	if (VMXNET3_VERSION_GE_3(adapter)) {
41824edef40eSShrikrishna Khare 		dma_free_coherent(&adapter->pdev->dev,
41834edef40eSShrikrishna Khare 				  sizeof(struct Vmxnet3_CoalesceScheme),
41844edef40eSShrikrishna Khare 				  adapter->coal_conf, adapter->coal_conf_pa);
41854edef40eSShrikrishna Khare 	}
418609c5088eSShreyas Bhatewara #ifdef VMXNET3_RSS
4187b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4188b0eb57cbSAndy King 			  adapter->rss_conf, adapter->rss_conf_pa);
418909c5088eSShreyas Bhatewara #endif
4190b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4191b0eb57cbSAndy King 			  adapter->pm_conf, adapter->pm_conf_pa);
419209c5088eSShreyas Bhatewara 
419309c5088eSShreyas Bhatewara 	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
419409c5088eSShreyas Bhatewara 	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4195b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
419609c5088eSShreyas Bhatewara 			  adapter->queue_desc_pa);
4197b0eb57cbSAndy King 	dma_free_coherent(&adapter->pdev->dev,
4198b0eb57cbSAndy King 			  sizeof(struct Vmxnet3_DriverShared),
4199d1a890faSShreyas Bhatewara 			  adapter->shared, adapter->shared_pa);
4200b0eb57cbSAndy King 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4201bf7bec46SChristophe JAILLET 			 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4202d1a890faSShreyas Bhatewara 	free_netdev(netdev);
4203d1a890faSShreyas Bhatewara }
4204d1a890faSShreyas Bhatewara 
vmxnet3_shutdown_device(struct pci_dev * pdev)4205e9ba47bfSShreyas Bhatewara static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4206e9ba47bfSShreyas Bhatewara {
4207e9ba47bfSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
4208e9ba47bfSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4209e9ba47bfSShreyas Bhatewara 	unsigned long flags;
4210e9ba47bfSShreyas Bhatewara 
4211e9ba47bfSShreyas Bhatewara 	/* Reset_work may be in the middle of resetting the device, wait for its
4212e9ba47bfSShreyas Bhatewara 	 * completion.
4213e9ba47bfSShreyas Bhatewara 	 */
4214e9ba47bfSShreyas Bhatewara 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
421593c65d13SYueHaibing 		usleep_range(1000, 2000);
4216e9ba47bfSShreyas Bhatewara 
4217e9ba47bfSShreyas Bhatewara 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4218e9ba47bfSShreyas Bhatewara 			     &adapter->state)) {
4219e9ba47bfSShreyas Bhatewara 		clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4220e9ba47bfSShreyas Bhatewara 		return;
4221e9ba47bfSShreyas Bhatewara 	}
4222e9ba47bfSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
4223e9ba47bfSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4224e9ba47bfSShreyas Bhatewara 			       VMXNET3_CMD_QUIESCE_DEV);
4225e9ba47bfSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4226e9ba47bfSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
4227e9ba47bfSShreyas Bhatewara 
4228e9ba47bfSShreyas Bhatewara 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4229e9ba47bfSShreyas Bhatewara }
4230e9ba47bfSShreyas Bhatewara 
4231d1a890faSShreyas Bhatewara 
4232d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
4233d1a890faSShreyas Bhatewara 
4234d1a890faSShreyas Bhatewara static int
vmxnet3_suspend(struct device * device)4235d1a890faSShreyas Bhatewara vmxnet3_suspend(struct device *device)
4236d1a890faSShreyas Bhatewara {
4237d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
4238d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
4239d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4240d1a890faSShreyas Bhatewara 	struct Vmxnet3_PMConf *pmConf;
4241d1a890faSShreyas Bhatewara 	struct ethhdr *ehdr;
4242d1a890faSShreyas Bhatewara 	struct arphdr *ahdr;
4243d1a890faSShreyas Bhatewara 	u8 *arpreq;
4244d1a890faSShreyas Bhatewara 	struct in_device *in_dev;
4245d1a890faSShreyas Bhatewara 	struct in_ifaddr *ifa;
424683d0feffSShreyas Bhatewara 	unsigned long flags;
4247d1a890faSShreyas Bhatewara 	int i = 0;
4248d1a890faSShreyas Bhatewara 
4249d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
4250d1a890faSShreyas Bhatewara 		return 0;
4251d1a890faSShreyas Bhatewara 
425251956cd6SShreyas Bhatewara 	for (i = 0; i < adapter->num_rx_queues; i++)
425351956cd6SShreyas Bhatewara 		napi_disable(&adapter->rx_queue[i].napi);
425451956cd6SShreyas Bhatewara 
4255d1a890faSShreyas Bhatewara 	vmxnet3_disable_all_intrs(adapter);
4256d1a890faSShreyas Bhatewara 	vmxnet3_free_irqs(adapter);
4257d1a890faSShreyas Bhatewara 	vmxnet3_free_intr_resources(adapter);
4258d1a890faSShreyas Bhatewara 
4259d1a890faSShreyas Bhatewara 	netif_device_detach(netdev);
4260d1a890faSShreyas Bhatewara 
4261d1a890faSShreyas Bhatewara 	/* Create wake-up filters. */
4262d1a890faSShreyas Bhatewara 	pmConf = adapter->pm_conf;
4263d1a890faSShreyas Bhatewara 	memset(pmConf, 0, sizeof(*pmConf));
4264d1a890faSShreyas Bhatewara 
4265d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_UCAST) {
4266d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_ALEN;
4267d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize = 1;
4268d1a890faSShreyas Bhatewara 		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4269d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4270d1a890faSShreyas Bhatewara 
42713843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4272d1a890faSShreyas Bhatewara 		i++;
4273d1a890faSShreyas Bhatewara 	}
4274d1a890faSShreyas Bhatewara 
4275d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_ARP) {
42762638eb8bSFlorian Westphal 		rcu_read_lock();
4277d1a890faSShreyas Bhatewara 
42782638eb8bSFlorian Westphal 		in_dev = __in_dev_get_rcu(netdev);
42792638eb8bSFlorian Westphal 		if (!in_dev) {
42802638eb8bSFlorian Westphal 			rcu_read_unlock();
4281d1a890faSShreyas Bhatewara 			goto skip_arp;
42822638eb8bSFlorian Westphal 		}
42832638eb8bSFlorian Westphal 
42842638eb8bSFlorian Westphal 		ifa = rcu_dereference(in_dev->ifa_list);
42852638eb8bSFlorian Westphal 		if (!ifa) {
42862638eb8bSFlorian Westphal 			rcu_read_unlock();
42872638eb8bSFlorian Westphal 			goto skip_arp;
42882638eb8bSFlorian Westphal 		}
4289d1a890faSShreyas Bhatewara 
4290d1a890faSShreyas Bhatewara 		pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4291d1a890faSShreyas Bhatewara 			sizeof(struct arphdr) +		/* ARP header */
4292d1a890faSShreyas Bhatewara 			2 * ETH_ALEN +		/* 2 Ethernet addresses*/
4293d1a890faSShreyas Bhatewara 			2 * sizeof(u32);	/*2 IPv4 addresses */
4294d1a890faSShreyas Bhatewara 		pmConf->filters[i].maskSize =
4295d1a890faSShreyas Bhatewara 			(pmConf->filters[i].patternSize - 1) / 8 + 1;
4296d1a890faSShreyas Bhatewara 
4297d1a890faSShreyas Bhatewara 		/* ETH_P_ARP in Ethernet header. */
4298d1a890faSShreyas Bhatewara 		ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4299d1a890faSShreyas Bhatewara 		ehdr->h_proto = htons(ETH_P_ARP);
4300d1a890faSShreyas Bhatewara 
4301d1a890faSShreyas Bhatewara 		/* ARPOP_REQUEST in ARP header. */
4302d1a890faSShreyas Bhatewara 		ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4303d1a890faSShreyas Bhatewara 		ahdr->ar_op = htons(ARPOP_REQUEST);
4304d1a890faSShreyas Bhatewara 		arpreq = (u8 *)(ahdr + 1);
4305d1a890faSShreyas Bhatewara 
4306d1a890faSShreyas Bhatewara 		/* The Unicast IPv4 address in 'tip' field. */
4307d1a890faSShreyas Bhatewara 		arpreq += 2 * ETH_ALEN + sizeof(u32);
43082638eb8bSFlorian Westphal 		*(__be32 *)arpreq = ifa->ifa_address;
43092638eb8bSFlorian Westphal 
43102638eb8bSFlorian Westphal 		rcu_read_unlock();
4311d1a890faSShreyas Bhatewara 
4312d1a890faSShreyas Bhatewara 		/* The mask for the relevant bits. */
4313d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[0] = 0x00;
4314d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4315d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4316d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[3] = 0x00;
4317d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4318d1a890faSShreyas Bhatewara 		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4319d1a890faSShreyas Bhatewara 
43203843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4321d1a890faSShreyas Bhatewara 		i++;
4322d1a890faSShreyas Bhatewara 	}
4323d1a890faSShreyas Bhatewara 
4324d1a890faSShreyas Bhatewara skip_arp:
4325d1a890faSShreyas Bhatewara 	if (adapter->wol & WAKE_MAGIC)
43263843e515SHarvey Harrison 		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4327d1a890faSShreyas Bhatewara 
4328d1a890faSShreyas Bhatewara 	pmConf->numFilters = i;
4329d1a890faSShreyas Bhatewara 
4330115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4331115924b6SShreyas Bhatewara 	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4332115924b6SShreyas Bhatewara 								  *pmConf));
4333b0eb57cbSAndy King 	adapter->shared->devRead.pmConfDesc.confPA =
4334b0eb57cbSAndy King 		cpu_to_le64(adapter->pm_conf_pa);
4335d1a890faSShreyas Bhatewara 
433683d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
4337d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4338d1a890faSShreyas Bhatewara 			       VMXNET3_CMD_UPDATE_PMCFG);
433983d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4340d1a890faSShreyas Bhatewara 
4341d1a890faSShreyas Bhatewara 	pci_save_state(pdev);
4342d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4343d1a890faSShreyas Bhatewara 			adapter->wol);
4344d1a890faSShreyas Bhatewara 	pci_disable_device(pdev);
4345d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4346d1a890faSShreyas Bhatewara 
4347d1a890faSShreyas Bhatewara 	return 0;
4348d1a890faSShreyas Bhatewara }
4349d1a890faSShreyas Bhatewara 
4350d1a890faSShreyas Bhatewara 
4351d1a890faSShreyas Bhatewara static int
vmxnet3_resume(struct device * device)4352d1a890faSShreyas Bhatewara vmxnet3_resume(struct device *device)
4353d1a890faSShreyas Bhatewara {
43545ec82c1eSShrikrishna Khare 	int err;
435583d0feffSShreyas Bhatewara 	unsigned long flags;
4356d1a890faSShreyas Bhatewara 	struct pci_dev *pdev = to_pci_dev(device);
4357d1a890faSShreyas Bhatewara 	struct net_device *netdev = pci_get_drvdata(pdev);
4358d1a890faSShreyas Bhatewara 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4359d1a890faSShreyas Bhatewara 
4360d1a890faSShreyas Bhatewara 	if (!netif_running(netdev))
4361d1a890faSShreyas Bhatewara 		return 0;
4362d1a890faSShreyas Bhatewara 
4363d1a890faSShreyas Bhatewara 	pci_set_power_state(pdev, PCI_D0);
4364d1a890faSShreyas Bhatewara 	pci_restore_state(pdev);
4365d1a890faSShreyas Bhatewara 	err = pci_enable_device_mem(pdev);
4366d1a890faSShreyas Bhatewara 	if (err != 0)
4367d1a890faSShreyas Bhatewara 		return err;
4368d1a890faSShreyas Bhatewara 
4369d1a890faSShreyas Bhatewara 	pci_enable_wake(pdev, PCI_D0, 0);
4370d1a890faSShreyas Bhatewara 
43715ec82c1eSShrikrishna Khare 	vmxnet3_alloc_intr_resources(adapter);
43725ec82c1eSShrikrishna Khare 
43735ec82c1eSShrikrishna Khare 	/* During hibernate and suspend, device has to be reinitialized as the
43745ec82c1eSShrikrishna Khare 	 * device state need not be preserved.
43755ec82c1eSShrikrishna Khare 	 */
43765ec82c1eSShrikrishna Khare 
43775ec82c1eSShrikrishna Khare 	/* Need not check adapter state as other reset tasks cannot run during
43785ec82c1eSShrikrishna Khare 	 * device resume.
43795ec82c1eSShrikrishna Khare 	 */
438083d0feffSShreyas Bhatewara 	spin_lock_irqsave(&adapter->cmd_lock, flags);
4381d1a890faSShreyas Bhatewara 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
43825ec82c1eSShrikrishna Khare 			       VMXNET3_CMD_QUIESCE_DEV);
438383d0feffSShreyas Bhatewara 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
43845ec82c1eSShrikrishna Khare 	vmxnet3_tq_cleanup_all(adapter);
43855ec82c1eSShrikrishna Khare 	vmxnet3_rq_cleanup_all(adapter);
43865ec82c1eSShrikrishna Khare 
43875ec82c1eSShrikrishna Khare 	vmxnet3_reset_dev(adapter);
43885ec82c1eSShrikrishna Khare 	err = vmxnet3_activate_dev(adapter);
43895ec82c1eSShrikrishna Khare 	if (err != 0) {
43905ec82c1eSShrikrishna Khare 		netdev_err(netdev,
43915ec82c1eSShrikrishna Khare 			   "failed to re-activate on resume, error: %d", err);
43925ec82c1eSShrikrishna Khare 		vmxnet3_force_close(adapter);
43935ec82c1eSShrikrishna Khare 		return err;
43945ec82c1eSShrikrishna Khare 	}
43955ec82c1eSShrikrishna Khare 	netif_device_attach(netdev);
4396d1a890faSShreyas Bhatewara 
4397d1a890faSShreyas Bhatewara 	return 0;
4398d1a890faSShreyas Bhatewara }
4399d1a890faSShreyas Bhatewara 
440047145210SAlexey Dobriyan static const struct dev_pm_ops vmxnet3_pm_ops = {
4401d1a890faSShreyas Bhatewara 	.suspend = vmxnet3_suspend,
4402d1a890faSShreyas Bhatewara 	.resume = vmxnet3_resume,
44035ec82c1eSShrikrishna Khare 	.freeze = vmxnet3_suspend,
44045ec82c1eSShrikrishna Khare 	.restore = vmxnet3_resume,
4405d1a890faSShreyas Bhatewara };
4406d1a890faSShreyas Bhatewara #endif
4407d1a890faSShreyas Bhatewara 
4408d1a890faSShreyas Bhatewara static struct pci_driver vmxnet3_driver = {
4409d1a890faSShreyas Bhatewara 	.name		= vmxnet3_driver_name,
4410d1a890faSShreyas Bhatewara 	.id_table	= vmxnet3_pciid_table,
4411d1a890faSShreyas Bhatewara 	.probe		= vmxnet3_probe_device,
44123a4751a3SBill Pemberton 	.remove		= vmxnet3_remove_device,
4413e9ba47bfSShreyas Bhatewara 	.shutdown	= vmxnet3_shutdown_device,
4414d1a890faSShreyas Bhatewara #ifdef CONFIG_PM
4415d1a890faSShreyas Bhatewara 	.driver.pm	= &vmxnet3_pm_ops,
4416d1a890faSShreyas Bhatewara #endif
4417d1a890faSShreyas Bhatewara };
4418d1a890faSShreyas Bhatewara 
4419d1a890faSShreyas Bhatewara 
4420d1a890faSShreyas Bhatewara static int __init
vmxnet3_init_module(void)4421d1a890faSShreyas Bhatewara vmxnet3_init_module(void)
4422d1a890faSShreyas Bhatewara {
4423204a6e65SStephen Hemminger 	pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4424d1a890faSShreyas Bhatewara 		VMXNET3_DRIVER_VERSION_REPORT);
4425d1a890faSShreyas Bhatewara 	return pci_register_driver(&vmxnet3_driver);
4426d1a890faSShreyas Bhatewara }
4427d1a890faSShreyas Bhatewara 
4428d1a890faSShreyas Bhatewara module_init(vmxnet3_init_module);
4429d1a890faSShreyas Bhatewara 
4430d1a890faSShreyas Bhatewara 
4431d1a890faSShreyas Bhatewara static void
vmxnet3_exit_module(void)4432d1a890faSShreyas Bhatewara vmxnet3_exit_module(void)
4433d1a890faSShreyas Bhatewara {
4434d1a890faSShreyas Bhatewara 	pci_unregister_driver(&vmxnet3_driver);
4435d1a890faSShreyas Bhatewara }
4436d1a890faSShreyas Bhatewara 
4437d1a890faSShreyas Bhatewara module_exit(vmxnet3_exit_module);
4438d1a890faSShreyas Bhatewara 
4439d1a890faSShreyas Bhatewara MODULE_AUTHOR("VMware, Inc.");
4440d1a890faSShreyas Bhatewara MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4441d1a890faSShreyas Bhatewara MODULE_LICENSE("GPL v2");
4442d1a890faSShreyas Bhatewara MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
4443