1 /*
2  * Linux driver for VMware's vmxnet3 ethernet NIC.
3  *
4  * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * The full GNU General Public License is included in this distribution in
21  * the file called "COPYING".
22  *
23  * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24  *
25  */
26 
27 
28 #include "vmxnet3_int.h"
29 
30 struct vmxnet3_stat_desc {
31 	char desc[ETH_GSTRING_LEN];
32 	int  offset;
33 };
34 
35 
36 static u32
37 vmxnet3_get_rx_csum(struct net_device *netdev)
38 {
39 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
40 	return adapter->rxcsum;
41 }
42 
43 
44 static int
45 vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46 {
47 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48 	unsigned long flags;
49 
50 	if (adapter->rxcsum != val) {
51 		adapter->rxcsum = val;
52 		if (netif_running(netdev)) {
53 			if (val)
54 				adapter->shared->devRead.misc.uptFeatures |=
55 				UPT1_F_RXCSUM;
56 			else
57 				adapter->shared->devRead.misc.uptFeatures &=
58 				~UPT1_F_RXCSUM;
59 
60 			spin_lock_irqsave(&adapter->cmd_lock, flags);
61 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
62 					       VMXNET3_CMD_UPDATE_FEATURE);
63 			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
64 		}
65 	}
66 	return 0;
67 }
68 
69 
70 /* per tq stats maintained by the device */
71 static const struct vmxnet3_stat_desc
72 vmxnet3_tq_dev_stats[] = {
73 	/* description,         offset */
74 	{ "Tx Queue#",        0 },
75 	{ "  TSO pkts tx",	offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
76 	{ "  TSO bytes tx",	offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
77 	{ "  ucast pkts tx",	offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
78 	{ "  ucast bytes tx",	offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
79 	{ "  mcast pkts tx",	offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
80 	{ "  mcast bytes tx",	offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
81 	{ "  bcast pkts tx",	offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
82 	{ "  bcast bytes tx",	offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
83 	{ "  pkts tx err",	offsetof(struct UPT1_TxStats, pktsTxError) },
84 	{ "  pkts tx discard",	offsetof(struct UPT1_TxStats, pktsTxDiscard) },
85 };
86 
87 /* per tq stats maintained by the driver */
88 static const struct vmxnet3_stat_desc
89 vmxnet3_tq_driver_stats[] = {
90 	/* description,         offset */
91 	{"  drv dropped tx total",	offsetof(struct vmxnet3_tq_driver_stats,
92 						 drop_total) },
93 	{ "     too many frags", offsetof(struct vmxnet3_tq_driver_stats,
94 					  drop_too_many_frags) },
95 	{ "     giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
96 					 drop_oversized_hdr) },
97 	{ "     hdr err",	offsetof(struct vmxnet3_tq_driver_stats,
98 					 drop_hdr_inspect_err) },
99 	{ "     tso",		offsetof(struct vmxnet3_tq_driver_stats,
100 					 drop_tso) },
101 	{ "  ring full",	offsetof(struct vmxnet3_tq_driver_stats,
102 					 tx_ring_full) },
103 	{ "  pkts linearized",	offsetof(struct vmxnet3_tq_driver_stats,
104 					 linearized) },
105 	{ "  hdr cloned",	offsetof(struct vmxnet3_tq_driver_stats,
106 					 copy_skb_header) },
107 	{ "  giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
108 					 oversized_hdr) },
109 };
110 
111 /* per rq stats maintained by the device */
112 static const struct vmxnet3_stat_desc
113 vmxnet3_rq_dev_stats[] = {
114 	{ "Rx Queue#",        0 },
115 	{ "  LRO pkts rx",	offsetof(struct UPT1_RxStats, LROPktsRxOK) },
116 	{ "  LRO byte rx",	offsetof(struct UPT1_RxStats, LROBytesRxOK) },
117 	{ "  ucast pkts rx",	offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
118 	{ "  ucast bytes rx",	offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
119 	{ "  mcast pkts rx",	offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
120 	{ "  mcast bytes rx",	offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
121 	{ "  bcast pkts rx",	offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
122 	{ "  bcast bytes rx",	offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
123 	{ "  pkts rx OOB",	offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
124 	{ "  pkts rx err",	offsetof(struct UPT1_RxStats, pktsRxError) },
125 };
126 
127 /* per rq stats maintained by the driver */
128 static const struct vmxnet3_stat_desc
129 vmxnet3_rq_driver_stats[] = {
130 	/* description,         offset */
131 	{ "  drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
132 					     drop_total) },
133 	{ "     err",		offsetof(struct vmxnet3_rq_driver_stats,
134 					 drop_err) },
135 	{ "     fcs",		offsetof(struct vmxnet3_rq_driver_stats,
136 					 drop_fcs) },
137 	{ "  rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
138 					  rx_buf_alloc_failure) },
139 };
140 
141 /* gloabl stats maintained by the driver */
142 static const struct vmxnet3_stat_desc
143 vmxnet3_global_stats[] = {
144 	/* description,         offset */
145 	{ "tx timeout count",	offsetof(struct vmxnet3_adapter,
146 					 tx_timeout_count) }
147 };
148 
149 
150 struct net_device_stats *
151 vmxnet3_get_stats(struct net_device *netdev)
152 {
153 	struct vmxnet3_adapter *adapter;
154 	struct vmxnet3_tq_driver_stats *drvTxStats;
155 	struct vmxnet3_rq_driver_stats *drvRxStats;
156 	struct UPT1_TxStats *devTxStats;
157 	struct UPT1_RxStats *devRxStats;
158 	struct net_device_stats *net_stats = &netdev->stats;
159 	unsigned long flags;
160 	int i;
161 
162 	adapter = netdev_priv(netdev);
163 
164 	/* Collect the dev stats into the shared area */
165 	spin_lock_irqsave(&adapter->cmd_lock, flags);
166 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
167 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
168 
169 	memset(net_stats, 0, sizeof(*net_stats));
170 	for (i = 0; i < adapter->num_tx_queues; i++) {
171 		devTxStats = &adapter->tqd_start[i].stats;
172 		drvTxStats = &adapter->tx_queue[i].stats;
173 		net_stats->tx_packets += devTxStats->ucastPktsTxOK +
174 					devTxStats->mcastPktsTxOK +
175 					devTxStats->bcastPktsTxOK;
176 		net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
177 				      devTxStats->mcastBytesTxOK +
178 				      devTxStats->bcastBytesTxOK;
179 		net_stats->tx_errors += devTxStats->pktsTxError;
180 		net_stats->tx_dropped += drvTxStats->drop_total;
181 	}
182 
183 	for (i = 0; i < adapter->num_rx_queues; i++) {
184 		devRxStats = &adapter->rqd_start[i].stats;
185 		drvRxStats = &adapter->rx_queue[i].stats;
186 		net_stats->rx_packets += devRxStats->ucastPktsRxOK +
187 					devRxStats->mcastPktsRxOK +
188 					devRxStats->bcastPktsRxOK;
189 
190 		net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
191 				      devRxStats->mcastBytesRxOK +
192 				      devRxStats->bcastBytesRxOK;
193 
194 		net_stats->rx_errors += devRxStats->pktsRxError;
195 		net_stats->rx_dropped += drvRxStats->drop_total;
196 		net_stats->multicast +=  devRxStats->mcastPktsRxOK;
197 	}
198 	return net_stats;
199 }
200 
201 static int
202 vmxnet3_get_sset_count(struct net_device *netdev, int sset)
203 {
204 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
205 	switch (sset) {
206 	case ETH_SS_STATS:
207 		return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
208 			ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
209 		       adapter->num_tx_queues +
210 		       (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
211 			ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
212 		       adapter->num_rx_queues +
213 			ARRAY_SIZE(vmxnet3_global_stats);
214 	default:
215 		return -EOPNOTSUPP;
216 	}
217 }
218 
219 
220 /* Should be multiple of 4 */
221 #define NUM_TX_REGS	8
222 #define NUM_RX_REGS	12
223 
224 static int
225 vmxnet3_get_regs_len(struct net_device *netdev)
226 {
227 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
228 	return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
229 		adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
230 }
231 
232 
233 static void
234 vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
235 {
236 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
237 
238 	strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
239 	drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
240 
241 	strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
242 		sizeof(drvinfo->version));
243 	drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
244 
245 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
246 	drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
247 
248 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
249 		ETHTOOL_BUSINFO_LEN);
250 	drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
251 	drvinfo->testinfo_len = 0;
252 	drvinfo->eedump_len   = 0;
253 	drvinfo->regdump_len  = vmxnet3_get_regs_len(netdev);
254 }
255 
256 
257 static void
258 vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
259 {
260 	 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
261 	if (stringset == ETH_SS_STATS) {
262 		int i, j;
263 		for (j = 0; j < adapter->num_tx_queues; j++) {
264 			for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
265 				memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
266 				       ETH_GSTRING_LEN);
267 				buf += ETH_GSTRING_LEN;
268 			}
269 			for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
270 			     i++) {
271 				memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
272 				       ETH_GSTRING_LEN);
273 				buf += ETH_GSTRING_LEN;
274 			}
275 		}
276 
277 		for (j = 0; j < adapter->num_rx_queues; j++) {
278 			for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
279 				memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
280 				       ETH_GSTRING_LEN);
281 				buf += ETH_GSTRING_LEN;
282 			}
283 			for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
284 			     i++) {
285 				memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
286 				       ETH_GSTRING_LEN);
287 				buf += ETH_GSTRING_LEN;
288 			}
289 		}
290 
291 		for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
292 			memcpy(buf, vmxnet3_global_stats[i].desc,
293 				ETH_GSTRING_LEN);
294 			buf += ETH_GSTRING_LEN;
295 		}
296 	}
297 }
298 
299 static int
300 vmxnet3_set_flags(struct net_device *netdev, u32 data)
301 {
302 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
303 	u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
304 	u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 	unsigned long flags;
306 
307 	if (data & ~ETH_FLAG_LRO)
308 		return -EOPNOTSUPP;
309 
310 	if (lro_requested ^ lro_present) {
311 		/* toggle the LRO feature*/
312 		netdev->features ^= NETIF_F_LRO;
313 
314 		/* update harware LRO capability accordingly */
315 		if (lro_requested)
316 			adapter->shared->devRead.misc.uptFeatures |=
317 							UPT1_F_LRO;
318 		else
319 			adapter->shared->devRead.misc.uptFeatures &=
320 							~UPT1_F_LRO;
321 		spin_lock_irqsave(&adapter->cmd_lock, flags);
322 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
323 				       VMXNET3_CMD_UPDATE_FEATURE);
324 		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
325 	}
326 	return 0;
327 }
328 
329 static void
330 vmxnet3_get_ethtool_stats(struct net_device *netdev,
331 			  struct ethtool_stats *stats, u64  *buf)
332 {
333 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
334 	unsigned long flags;
335 	u8 *base;
336 	int i;
337 	int j = 0;
338 
339 	spin_lock_irqsave(&adapter->cmd_lock, flags);
340 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
341 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
342 
343 	/* this does assume each counter is 64-bit wide */
344 	for (j = 0; j < adapter->num_tx_queues; j++) {
345 		base = (u8 *)&adapter->tqd_start[j].stats;
346 		*buf++ = (u64)j;
347 		for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
348 			*buf++ = *(u64 *)(base +
349 					  vmxnet3_tq_dev_stats[i].offset);
350 
351 		base = (u8 *)&adapter->tx_queue[j].stats;
352 		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
353 			*buf++ = *(u64 *)(base +
354 					  vmxnet3_tq_driver_stats[i].offset);
355 	}
356 
357 	for (j = 0; j < adapter->num_tx_queues; j++) {
358 		base = (u8 *)&adapter->rqd_start[j].stats;
359 		*buf++ = (u64) j;
360 		for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
361 			*buf++ = *(u64 *)(base +
362 					  vmxnet3_rq_dev_stats[i].offset);
363 
364 		base = (u8 *)&adapter->rx_queue[j].stats;
365 		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
366 			*buf++ = *(u64 *)(base +
367 					  vmxnet3_rq_driver_stats[i].offset);
368 	}
369 
370 	base = (u8 *)adapter;
371 	for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
372 		*buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
373 }
374 
375 
376 static void
377 vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
378 {
379 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
380 	u32 *buf = p;
381 	int i = 0, j = 0;
382 
383 	memset(p, 0, vmxnet3_get_regs_len(netdev));
384 
385 	regs->version = 1;
386 
387 	/* Update vmxnet3_get_regs_len if we want to dump more registers */
388 
389 	/* make each ring use multiple of 16 bytes */
390 	for (i = 0; i < adapter->num_tx_queues; i++) {
391 		buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
392 		buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
393 		buf[j++] = adapter->tx_queue[i].tx_ring.gen;
394 		buf[j++] = 0;
395 
396 		buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
397 		buf[j++] = adapter->tx_queue[i].comp_ring.gen;
398 		buf[j++] = adapter->tx_queue[i].stopped;
399 		buf[j++] = 0;
400 	}
401 
402 	for (i = 0; i < adapter->num_rx_queues; i++) {
403 		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
404 		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
405 		buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
406 		buf[j++] = 0;
407 
408 		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
409 		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
410 		buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
411 		buf[j++] = 0;
412 
413 		buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
414 		buf[j++] = adapter->rx_queue[i].comp_ring.gen;
415 		buf[j++] = 0;
416 		buf[j++] = 0;
417 	}
418 
419 }
420 
421 
422 static void
423 vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
424 {
425 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
426 
427 	wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
428 	wol->wolopts = adapter->wol;
429 }
430 
431 
432 static int
433 vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
434 {
435 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
436 
437 	if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
438 			    WAKE_MAGICSECURE)) {
439 		return -EOPNOTSUPP;
440 	}
441 
442 	adapter->wol = wol->wolopts;
443 
444 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
445 
446 	return 0;
447 }
448 
449 
450 static int
451 vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
452 {
453 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
454 
455 	ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
456 			  SUPPORTED_TP;
457 	ecmd->advertising = ADVERTISED_TP;
458 	ecmd->port = PORT_TP;
459 	ecmd->transceiver = XCVR_INTERNAL;
460 
461 	if (adapter->link_speed) {
462 		ecmd->speed = adapter->link_speed;
463 		ecmd->duplex = DUPLEX_FULL;
464 	} else {
465 		ecmd->speed = -1;
466 		ecmd->duplex = -1;
467 	}
468 	return 0;
469 }
470 
471 
472 static void
473 vmxnet3_get_ringparam(struct net_device *netdev,
474 		      struct ethtool_ringparam *param)
475 {
476 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
477 
478 	param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
479 	param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
480 	param->rx_mini_max_pending = 0;
481 	param->rx_jumbo_max_pending = 0;
482 
483 	param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
484 			    adapter->num_rx_queues;
485 	param->tx_pending = adapter->tx_queue[0].tx_ring.size *
486 			    adapter->num_tx_queues;
487 	param->rx_mini_pending = 0;
488 	param->rx_jumbo_pending = 0;
489 }
490 
491 
492 static int
493 vmxnet3_set_ringparam(struct net_device *netdev,
494 		      struct ethtool_ringparam *param)
495 {
496 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
497 	u32 new_tx_ring_size, new_rx_ring_size;
498 	u32 sz;
499 	int err = 0;
500 
501 	if (param->tx_pending == 0 || param->tx_pending >
502 						VMXNET3_TX_RING_MAX_SIZE)
503 		return -EINVAL;
504 
505 	if (param->rx_pending == 0 || param->rx_pending >
506 						VMXNET3_RX_RING_MAX_SIZE)
507 		return -EINVAL;
508 
509 
510 	/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
511 	new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
512 							~VMXNET3_RING_SIZE_MASK;
513 	new_tx_ring_size = min_t(u32, new_tx_ring_size,
514 				 VMXNET3_TX_RING_MAX_SIZE);
515 	if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
516 						VMXNET3_RING_SIZE_ALIGN) != 0)
517 		return -EINVAL;
518 
519 	/* ring0 has to be a multiple of
520 	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
521 	 */
522 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
523 	new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
524 	new_rx_ring_size = min_t(u32, new_rx_ring_size,
525 				 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
526 	if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
527 							   sz) != 0)
528 		return -EINVAL;
529 
530 	if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
531 	    new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
532 		return 0;
533 	}
534 
535 	/*
536 	 * Reset_work may be in the middle of resetting the device, wait for its
537 	 * completion.
538 	 */
539 	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
540 		msleep(1);
541 
542 	if (netif_running(netdev)) {
543 		vmxnet3_quiesce_dev(adapter);
544 		vmxnet3_reset_dev(adapter);
545 
546 		/* recreate the rx queue and the tx queue based on the
547 		 * new sizes */
548 		vmxnet3_tq_destroy_all(adapter);
549 		vmxnet3_rq_destroy_all(adapter);
550 
551 		err = vmxnet3_create_queues(adapter, new_tx_ring_size,
552 			new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
553 
554 		if (err) {
555 			/* failed, most likely because of OOM, try default
556 			 * size */
557 			printk(KERN_ERR "%s: failed to apply new sizes, try the"
558 				" default ones\n", netdev->name);
559 			err = vmxnet3_create_queues(adapter,
560 						    VMXNET3_DEF_TX_RING_SIZE,
561 						    VMXNET3_DEF_RX_RING_SIZE,
562 						    VMXNET3_DEF_RX_RING_SIZE);
563 			if (err) {
564 				printk(KERN_ERR "%s: failed to create queues "
565 					"with default sizes. Closing it\n",
566 					netdev->name);
567 				goto out;
568 			}
569 		}
570 
571 		err = vmxnet3_activate_dev(adapter);
572 		if (err)
573 			printk(KERN_ERR "%s: failed to re-activate, error %d."
574 				" Closing it\n", netdev->name, err);
575 	}
576 
577 out:
578 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
579 	if (err)
580 		vmxnet3_force_close(adapter);
581 
582 	return err;
583 }
584 
585 
586 static int
587 vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
588 		  void *rules)
589 {
590 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
591 	switch (info->cmd) {
592 	case ETHTOOL_GRXRINGS:
593 		info->data = adapter->num_rx_queues;
594 		return 0;
595 	}
596 	return -EOPNOTSUPP;
597 }
598 
599 #ifdef VMXNET3_RSS
600 static int
601 vmxnet3_get_rss_indir(struct net_device *netdev,
602 		      struct ethtool_rxfh_indir *p)
603 {
604 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
605 	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
606 	unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
607 
608 	p->size = rssConf->indTableSize;
609 	while (n--)
610 		p->ring_index[n] = rssConf->indTable[n];
611 	return 0;
612 
613 }
614 
615 static int
616 vmxnet3_set_rss_indir(struct net_device *netdev,
617 		      const struct ethtool_rxfh_indir *p)
618 {
619 	unsigned int i;
620 	unsigned long flags;
621 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
622 	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
623 
624 	if (p->size != rssConf->indTableSize)
625 		return -EINVAL;
626 	for (i = 0; i < rssConf->indTableSize; i++) {
627 		/*
628 		 * Return with error code if any of the queue indices
629 		 * is out of range
630 		 */
631 		if (p->ring_index[i] < 0 ||
632 		    p->ring_index[i] >= adapter->num_rx_queues)
633 			return -EINVAL;
634 	}
635 
636 	for (i = 0; i < rssConf->indTableSize; i++)
637 		rssConf->indTable[i] = p->ring_index[i];
638 
639 	spin_lock_irqsave(&adapter->cmd_lock, flags);
640 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
641 			       VMXNET3_CMD_UPDATE_RSSIDT);
642 	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
643 
644 	return 0;
645 
646 }
647 #endif
648 
649 static struct ethtool_ops vmxnet3_ethtool_ops = {
650 	.get_settings      = vmxnet3_get_settings,
651 	.get_drvinfo       = vmxnet3_get_drvinfo,
652 	.get_regs_len      = vmxnet3_get_regs_len,
653 	.get_regs          = vmxnet3_get_regs,
654 	.get_wol           = vmxnet3_get_wol,
655 	.set_wol           = vmxnet3_set_wol,
656 	.get_link          = ethtool_op_get_link,
657 	.get_rx_csum       = vmxnet3_get_rx_csum,
658 	.set_rx_csum       = vmxnet3_set_rx_csum,
659 	.get_tx_csum       = ethtool_op_get_tx_csum,
660 	.set_tx_csum       = ethtool_op_set_tx_hw_csum,
661 	.get_sg            = ethtool_op_get_sg,
662 	.set_sg            = ethtool_op_set_sg,
663 	.get_tso           = ethtool_op_get_tso,
664 	.set_tso           = ethtool_op_set_tso,
665 	.get_strings       = vmxnet3_get_strings,
666 	.get_flags	   = ethtool_op_get_flags,
667 	.set_flags	   = vmxnet3_set_flags,
668 	.get_sset_count	   = vmxnet3_get_sset_count,
669 	.get_ethtool_stats = vmxnet3_get_ethtool_stats,
670 	.get_ringparam     = vmxnet3_get_ringparam,
671 	.set_ringparam     = vmxnet3_set_ringparam,
672 	.get_rxnfc         = vmxnet3_get_rxnfc,
673 #ifdef VMXNET3_RSS
674 	.get_rxfh_indir    = vmxnet3_get_rss_indir,
675 	.set_rxfh_indir    = vmxnet3_set_rss_indir,
676 #endif
677 };
678 
679 void vmxnet3_set_ethtool_ops(struct net_device *netdev)
680 {
681 	SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
682 }
683