1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 
20 #include "cna.h"
21 
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/rtnetlink.h>
26 
27 #include "bna.h"
28 
29 #include "bnad.h"
30 
31 #define BNAD_NUM_TXF_COUNTERS 12
32 #define BNAD_NUM_RXF_COUNTERS 10
33 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
34 #define BNAD_NUM_RXQ_COUNTERS 7
35 #define BNAD_NUM_TXQ_COUNTERS 5
36 
37 static const char *bnad_net_stats_strings[] = {
38 	"rx_packets",
39 	"tx_packets",
40 	"rx_bytes",
41 	"tx_bytes",
42 	"rx_errors",
43 	"tx_errors",
44 	"rx_dropped",
45 	"tx_dropped",
46 	"multicast",
47 	"collisions",
48 	"rx_length_errors",
49 	"rx_crc_errors",
50 	"rx_frame_errors",
51 	"tx_fifo_errors",
52 
53 	"netif_queue_stop",
54 	"netif_queue_wakeup",
55 	"netif_queue_stopped",
56 	"tso4",
57 	"tso6",
58 	"tso_err",
59 	"tcpcsum_offload",
60 	"udpcsum_offload",
61 	"csum_help",
62 	"tx_skb_too_short",
63 	"tx_skb_stopping",
64 	"tx_skb_max_vectors",
65 	"tx_skb_mss_too_long",
66 	"tx_skb_tso_too_short",
67 	"tx_skb_tso_prepare",
68 	"tx_skb_non_tso_too_long",
69 	"tx_skb_tcp_hdr",
70 	"tx_skb_udp_hdr",
71 	"tx_skb_csum_err",
72 	"tx_skb_headlen_too_long",
73 	"tx_skb_headlen_zero",
74 	"tx_skb_frag_zero",
75 	"tx_skb_len_mismatch",
76 	"tx_skb_map_failed",
77 	"hw_stats_updates",
78 	"netif_rx_dropped",
79 
80 	"link_toggle",
81 	"cee_toggle",
82 
83 	"rxp_info_alloc_failed",
84 	"mbox_intr_disabled",
85 	"mbox_intr_enabled",
86 	"tx_unmap_q_alloc_failed",
87 	"rx_unmap_q_alloc_failed",
88 	"rxbuf_alloc_failed",
89 	"rxbuf_map_failed",
90 
91 	"mac_stats_clr_cnt",
92 	"mac_frame_64",
93 	"mac_frame_65_127",
94 	"mac_frame_128_255",
95 	"mac_frame_256_511",
96 	"mac_frame_512_1023",
97 	"mac_frame_1024_1518",
98 	"mac_frame_1518_1522",
99 	"mac_rx_bytes",
100 	"mac_rx_packets",
101 	"mac_rx_fcs_error",
102 	"mac_rx_multicast",
103 	"mac_rx_broadcast",
104 	"mac_rx_control_frames",
105 	"mac_rx_pause",
106 	"mac_rx_unknown_opcode",
107 	"mac_rx_alignment_error",
108 	"mac_rx_frame_length_error",
109 	"mac_rx_code_error",
110 	"mac_rx_carrier_sense_error",
111 	"mac_rx_undersize",
112 	"mac_rx_oversize",
113 	"mac_rx_fragments",
114 	"mac_rx_jabber",
115 	"mac_rx_drop",
116 
117 	"mac_tx_bytes",
118 	"mac_tx_packets",
119 	"mac_tx_multicast",
120 	"mac_tx_broadcast",
121 	"mac_tx_pause",
122 	"mac_tx_deferral",
123 	"mac_tx_excessive_deferral",
124 	"mac_tx_single_collision",
125 	"mac_tx_muliple_collision",
126 	"mac_tx_late_collision",
127 	"mac_tx_excessive_collision",
128 	"mac_tx_total_collision",
129 	"mac_tx_pause_honored",
130 	"mac_tx_drop",
131 	"mac_tx_jabber",
132 	"mac_tx_fcs_error",
133 	"mac_tx_control_frame",
134 	"mac_tx_oversize",
135 	"mac_tx_undersize",
136 	"mac_tx_fragments",
137 
138 	"bpc_tx_pause_0",
139 	"bpc_tx_pause_1",
140 	"bpc_tx_pause_2",
141 	"bpc_tx_pause_3",
142 	"bpc_tx_pause_4",
143 	"bpc_tx_pause_5",
144 	"bpc_tx_pause_6",
145 	"bpc_tx_pause_7",
146 	"bpc_tx_zero_pause_0",
147 	"bpc_tx_zero_pause_1",
148 	"bpc_tx_zero_pause_2",
149 	"bpc_tx_zero_pause_3",
150 	"bpc_tx_zero_pause_4",
151 	"bpc_tx_zero_pause_5",
152 	"bpc_tx_zero_pause_6",
153 	"bpc_tx_zero_pause_7",
154 	"bpc_tx_first_pause_0",
155 	"bpc_tx_first_pause_1",
156 	"bpc_tx_first_pause_2",
157 	"bpc_tx_first_pause_3",
158 	"bpc_tx_first_pause_4",
159 	"bpc_tx_first_pause_5",
160 	"bpc_tx_first_pause_6",
161 	"bpc_tx_first_pause_7",
162 
163 	"bpc_rx_pause_0",
164 	"bpc_rx_pause_1",
165 	"bpc_rx_pause_2",
166 	"bpc_rx_pause_3",
167 	"bpc_rx_pause_4",
168 	"bpc_rx_pause_5",
169 	"bpc_rx_pause_6",
170 	"bpc_rx_pause_7",
171 	"bpc_rx_zero_pause_0",
172 	"bpc_rx_zero_pause_1",
173 	"bpc_rx_zero_pause_2",
174 	"bpc_rx_zero_pause_3",
175 	"bpc_rx_zero_pause_4",
176 	"bpc_rx_zero_pause_5",
177 	"bpc_rx_zero_pause_6",
178 	"bpc_rx_zero_pause_7",
179 	"bpc_rx_first_pause_0",
180 	"bpc_rx_first_pause_1",
181 	"bpc_rx_first_pause_2",
182 	"bpc_rx_first_pause_3",
183 	"bpc_rx_first_pause_4",
184 	"bpc_rx_first_pause_5",
185 	"bpc_rx_first_pause_6",
186 	"bpc_rx_first_pause_7",
187 
188 	"rad_rx_frames",
189 	"rad_rx_octets",
190 	"rad_rx_vlan_frames",
191 	"rad_rx_ucast",
192 	"rad_rx_ucast_octets",
193 	"rad_rx_ucast_vlan",
194 	"rad_rx_mcast",
195 	"rad_rx_mcast_octets",
196 	"rad_rx_mcast_vlan",
197 	"rad_rx_bcast",
198 	"rad_rx_bcast_octets",
199 	"rad_rx_bcast_vlan",
200 	"rad_rx_drops",
201 
202 	"rlb_rad_rx_frames",
203 	"rlb_rad_rx_octets",
204 	"rlb_rad_rx_vlan_frames",
205 	"rlb_rad_rx_ucast",
206 	"rlb_rad_rx_ucast_octets",
207 	"rlb_rad_rx_ucast_vlan",
208 	"rlb_rad_rx_mcast",
209 	"rlb_rad_rx_mcast_octets",
210 	"rlb_rad_rx_mcast_vlan",
211 	"rlb_rad_rx_bcast",
212 	"rlb_rad_rx_bcast_octets",
213 	"rlb_rad_rx_bcast_vlan",
214 	"rlb_rad_rx_drops",
215 
216 	"fc_rx_ucast_octets",
217 	"fc_rx_ucast",
218 	"fc_rx_ucast_vlan",
219 	"fc_rx_mcast_octets",
220 	"fc_rx_mcast",
221 	"fc_rx_mcast_vlan",
222 	"fc_rx_bcast_octets",
223 	"fc_rx_bcast",
224 	"fc_rx_bcast_vlan",
225 
226 	"fc_tx_ucast_octets",
227 	"fc_tx_ucast",
228 	"fc_tx_ucast_vlan",
229 	"fc_tx_mcast_octets",
230 	"fc_tx_mcast",
231 	"fc_tx_mcast_vlan",
232 	"fc_tx_bcast_octets",
233 	"fc_tx_bcast",
234 	"fc_tx_bcast_vlan",
235 	"fc_tx_parity_errors",
236 	"fc_tx_timeout",
237 	"fc_tx_fid_parity_errors",
238 };
239 
240 #define BNAD_ETHTOOL_STATS_NUM	ARRAY_SIZE(bnad_net_stats_strings)
241 
242 static int
243 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
244 {
245 	cmd->supported = SUPPORTED_10000baseT_Full;
246 	cmd->advertising = ADVERTISED_10000baseT_Full;
247 	cmd->autoneg = AUTONEG_DISABLE;
248 	cmd->supported |= SUPPORTED_FIBRE;
249 	cmd->advertising |= ADVERTISED_FIBRE;
250 	cmd->port = PORT_FIBRE;
251 	cmd->phy_address = 0;
252 
253 	if (netif_carrier_ok(netdev)) {
254 		ethtool_cmd_speed_set(cmd, SPEED_10000);
255 		cmd->duplex = DUPLEX_FULL;
256 	} else {
257 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
258 		cmd->duplex = DUPLEX_UNKNOWN;
259 	}
260 	cmd->transceiver = XCVR_EXTERNAL;
261 	cmd->maxtxpkt = 0;
262 	cmd->maxrxpkt = 0;
263 
264 	return 0;
265 }
266 
267 static int
268 bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
269 {
270 	/* 10G full duplex setting supported only */
271 	if (cmd->autoneg == AUTONEG_ENABLE)
272 		return -EOPNOTSUPP; else {
273 		if ((ethtool_cmd_speed(cmd) == SPEED_10000)
274 		    && (cmd->duplex == DUPLEX_FULL))
275 			return 0;
276 	}
277 
278 	return -EOPNOTSUPP;
279 }
280 
281 static void
282 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
283 {
284 	struct bnad *bnad = netdev_priv(netdev);
285 	struct bfa_ioc_attr *ioc_attr;
286 	unsigned long flags;
287 
288 	strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
289 	strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
290 
291 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
292 	if (ioc_attr) {
293 		spin_lock_irqsave(&bnad->bna_lock, flags);
294 		bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
295 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
296 
297 		strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
298 			sizeof(drvinfo->fw_version));
299 		kfree(ioc_attr);
300 	}
301 
302 	strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
303 		sizeof(drvinfo->bus_info));
304 }
305 
306 static void
307 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
308 {
309 	wolinfo->supported = 0;
310 	wolinfo->wolopts = 0;
311 }
312 
313 static int
314 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
315 {
316 	struct bnad *bnad = netdev_priv(netdev);
317 	unsigned long flags;
318 
319 	/* Lock rqd. to access bnad->bna_lock */
320 	spin_lock_irqsave(&bnad->bna_lock, flags);
321 	coalesce->use_adaptive_rx_coalesce =
322 		(bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
323 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
324 
325 	coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
326 					BFI_COALESCING_TIMER_UNIT;
327 	coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
328 					BFI_COALESCING_TIMER_UNIT;
329 	coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
330 
331 	return 0;
332 }
333 
334 static int
335 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
336 {
337 	struct bnad *bnad = netdev_priv(netdev);
338 	unsigned long flags;
339 	int to_del = 0;
340 
341 	if (coalesce->rx_coalesce_usecs == 0 ||
342 	    coalesce->rx_coalesce_usecs >
343 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
344 		return -EINVAL;
345 
346 	if (coalesce->tx_coalesce_usecs == 0 ||
347 	    coalesce->tx_coalesce_usecs >
348 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
349 		return -EINVAL;
350 
351 	mutex_lock(&bnad->conf_mutex);
352 	/*
353 	 * Do not need to store rx_coalesce_usecs here
354 	 * Every time DIM is disabled, we can get it from the
355 	 * stack.
356 	 */
357 	spin_lock_irqsave(&bnad->bna_lock, flags);
358 	if (coalesce->use_adaptive_rx_coalesce) {
359 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
360 			bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
361 			bnad_dim_timer_start(bnad);
362 		}
363 	} else {
364 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
365 			bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
366 			if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
367 			    test_bit(BNAD_RF_DIM_TIMER_RUNNING,
368 			    &bnad->run_flags)) {
369 				clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
370 							&bnad->run_flags);
371 				to_del = 1;
372 			}
373 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
374 			if (to_del)
375 				del_timer_sync(&bnad->dim_timer);
376 			spin_lock_irqsave(&bnad->bna_lock, flags);
377 			bnad_rx_coalescing_timeo_set(bnad);
378 		}
379 	}
380 	if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
381 					BFI_COALESCING_TIMER_UNIT) {
382 		bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
383 						BFI_COALESCING_TIMER_UNIT;
384 		bnad_tx_coalescing_timeo_set(bnad);
385 	}
386 
387 	if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
388 					BFI_COALESCING_TIMER_UNIT) {
389 		bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
390 						BFI_COALESCING_TIMER_UNIT;
391 
392 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
393 			bnad_rx_coalescing_timeo_set(bnad);
394 
395 	}
396 
397 	/* Add Tx Inter-pkt DMA count?  */
398 
399 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
400 
401 	mutex_unlock(&bnad->conf_mutex);
402 	return 0;
403 }
404 
405 static void
406 bnad_get_ringparam(struct net_device *netdev,
407 		   struct ethtool_ringparam *ringparam)
408 {
409 	struct bnad *bnad = netdev_priv(netdev);
410 
411 	ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
412 	ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
413 
414 	ringparam->rx_pending = bnad->rxq_depth;
415 	ringparam->tx_pending = bnad->txq_depth;
416 }
417 
418 static int
419 bnad_set_ringparam(struct net_device *netdev,
420 		   struct ethtool_ringparam *ringparam)
421 {
422 	int i, current_err, err = 0;
423 	struct bnad *bnad = netdev_priv(netdev);
424 	unsigned long flags;
425 
426 	mutex_lock(&bnad->conf_mutex);
427 	if (ringparam->rx_pending == bnad->rxq_depth &&
428 	    ringparam->tx_pending == bnad->txq_depth) {
429 		mutex_unlock(&bnad->conf_mutex);
430 		return 0;
431 	}
432 
433 	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
434 	    ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
435 	    !is_power_of_2(ringparam->rx_pending)) {
436 		mutex_unlock(&bnad->conf_mutex);
437 		return -EINVAL;
438 	}
439 	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
440 	    ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
441 	    !is_power_of_2(ringparam->tx_pending)) {
442 		mutex_unlock(&bnad->conf_mutex);
443 		return -EINVAL;
444 	}
445 
446 	if (ringparam->rx_pending != bnad->rxq_depth) {
447 		bnad->rxq_depth = ringparam->rx_pending;
448 		if (!netif_running(netdev)) {
449 			mutex_unlock(&bnad->conf_mutex);
450 			return 0;
451 		}
452 
453 		for (i = 0; i < bnad->num_rx; i++) {
454 			if (!bnad->rx_info[i].rx)
455 				continue;
456 			bnad_destroy_rx(bnad, i);
457 			current_err = bnad_setup_rx(bnad, i);
458 			if (current_err && !err)
459 				err = current_err;
460 		}
461 
462 		if (!err && bnad->rx_info[0].rx) {
463 			/* restore rx configuration */
464 			bnad_restore_vlans(bnad, 0);
465 			bnad_enable_default_bcast(bnad);
466 			spin_lock_irqsave(&bnad->bna_lock, flags);
467 			bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
468 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
469 			bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
470 					     BNAD_CF_PROMISC);
471 			bnad_set_rx_mode(netdev);
472 		}
473 	}
474 	if (ringparam->tx_pending != bnad->txq_depth) {
475 		bnad->txq_depth = ringparam->tx_pending;
476 		if (!netif_running(netdev)) {
477 			mutex_unlock(&bnad->conf_mutex);
478 			return 0;
479 		}
480 
481 		for (i = 0; i < bnad->num_tx; i++) {
482 			if (!bnad->tx_info[i].tx)
483 				continue;
484 			bnad_destroy_tx(bnad, i);
485 			current_err = bnad_setup_tx(bnad, i);
486 			if (current_err && !err)
487 				err = current_err;
488 		}
489 	}
490 
491 	mutex_unlock(&bnad->conf_mutex);
492 	return err;
493 }
494 
495 static void
496 bnad_get_pauseparam(struct net_device *netdev,
497 		    struct ethtool_pauseparam *pauseparam)
498 {
499 	struct bnad *bnad = netdev_priv(netdev);
500 
501 	pauseparam->autoneg = 0;
502 	pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
503 	pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
504 }
505 
506 static int
507 bnad_set_pauseparam(struct net_device *netdev,
508 		    struct ethtool_pauseparam *pauseparam)
509 {
510 	struct bnad *bnad = netdev_priv(netdev);
511 	struct bna_pause_config pause_config;
512 	unsigned long flags;
513 
514 	if (pauseparam->autoneg == AUTONEG_ENABLE)
515 		return -EINVAL;
516 
517 	mutex_lock(&bnad->conf_mutex);
518 	if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
519 	    pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
520 		pause_config.rx_pause = pauseparam->rx_pause;
521 		pause_config.tx_pause = pauseparam->tx_pause;
522 		spin_lock_irqsave(&bnad->bna_lock, flags);
523 		bna_enet_pause_config(&bnad->bna.enet, &pause_config);
524 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
525 	}
526 	mutex_unlock(&bnad->conf_mutex);
527 	return 0;
528 }
529 
530 static void
531 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
532 {
533 	struct bnad *bnad = netdev_priv(netdev);
534 	int i, j, q_num;
535 	u32 bmap;
536 
537 	mutex_lock(&bnad->conf_mutex);
538 
539 	switch (stringset) {
540 	case ETH_SS_STATS:
541 		for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
542 			BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
543 				   ETH_GSTRING_LEN));
544 			memcpy(string, bnad_net_stats_strings[i],
545 			       ETH_GSTRING_LEN);
546 			string += ETH_GSTRING_LEN;
547 		}
548 		bmap = bna_tx_rid_mask(&bnad->bna);
549 		for (i = 0; bmap; i++) {
550 			if (bmap & 1) {
551 				sprintf(string, "txf%d_ucast_octets", i);
552 				string += ETH_GSTRING_LEN;
553 				sprintf(string, "txf%d_ucast", i);
554 				string += ETH_GSTRING_LEN;
555 				sprintf(string, "txf%d_ucast_vlan", i);
556 				string += ETH_GSTRING_LEN;
557 				sprintf(string, "txf%d_mcast_octets", i);
558 				string += ETH_GSTRING_LEN;
559 				sprintf(string, "txf%d_mcast", i);
560 				string += ETH_GSTRING_LEN;
561 				sprintf(string, "txf%d_mcast_vlan", i);
562 				string += ETH_GSTRING_LEN;
563 				sprintf(string, "txf%d_bcast_octets", i);
564 				string += ETH_GSTRING_LEN;
565 				sprintf(string, "txf%d_bcast", i);
566 				string += ETH_GSTRING_LEN;
567 				sprintf(string, "txf%d_bcast_vlan", i);
568 				string += ETH_GSTRING_LEN;
569 				sprintf(string, "txf%d_errors", i);
570 				string += ETH_GSTRING_LEN;
571 				sprintf(string, "txf%d_filter_vlan", i);
572 				string += ETH_GSTRING_LEN;
573 				sprintf(string, "txf%d_filter_mac_sa", i);
574 				string += ETH_GSTRING_LEN;
575 			}
576 			bmap >>= 1;
577 		}
578 
579 		bmap = bna_rx_rid_mask(&bnad->bna);
580 		for (i = 0; bmap; i++) {
581 			if (bmap & 1) {
582 				sprintf(string, "rxf%d_ucast_octets", i);
583 				string += ETH_GSTRING_LEN;
584 				sprintf(string, "rxf%d_ucast", i);
585 				string += ETH_GSTRING_LEN;
586 				sprintf(string, "rxf%d_ucast_vlan", i);
587 				string += ETH_GSTRING_LEN;
588 				sprintf(string, "rxf%d_mcast_octets", i);
589 				string += ETH_GSTRING_LEN;
590 				sprintf(string, "rxf%d_mcast", i);
591 				string += ETH_GSTRING_LEN;
592 				sprintf(string, "rxf%d_mcast_vlan", i);
593 				string += ETH_GSTRING_LEN;
594 				sprintf(string, "rxf%d_bcast_octets", i);
595 				string += ETH_GSTRING_LEN;
596 				sprintf(string, "rxf%d_bcast", i);
597 				string += ETH_GSTRING_LEN;
598 				sprintf(string, "rxf%d_bcast_vlan", i);
599 				string += ETH_GSTRING_LEN;
600 				sprintf(string, "rxf%d_frame_drops", i);
601 				string += ETH_GSTRING_LEN;
602 			}
603 			bmap >>= 1;
604 		}
605 
606 		q_num = 0;
607 		for (i = 0; i < bnad->num_rx; i++) {
608 			if (!bnad->rx_info[i].rx)
609 				continue;
610 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
611 				sprintf(string, "cq%d_producer_index", q_num);
612 				string += ETH_GSTRING_LEN;
613 				sprintf(string, "cq%d_consumer_index", q_num);
614 				string += ETH_GSTRING_LEN;
615 				sprintf(string, "cq%d_hw_producer_index",
616 					q_num);
617 				string += ETH_GSTRING_LEN;
618 				sprintf(string, "cq%d_intr", q_num);
619 				string += ETH_GSTRING_LEN;
620 				sprintf(string, "cq%d_poll", q_num);
621 				string += ETH_GSTRING_LEN;
622 				sprintf(string, "cq%d_schedule", q_num);
623 				string += ETH_GSTRING_LEN;
624 				sprintf(string, "cq%d_keep_poll", q_num);
625 				string += ETH_GSTRING_LEN;
626 				sprintf(string, "cq%d_complete", q_num);
627 				string += ETH_GSTRING_LEN;
628 				q_num++;
629 			}
630 		}
631 
632 		q_num = 0;
633 		for (i = 0; i < bnad->num_rx; i++) {
634 			if (!bnad->rx_info[i].rx)
635 				continue;
636 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
637 				sprintf(string, "rxq%d_packets", q_num);
638 				string += ETH_GSTRING_LEN;
639 				sprintf(string, "rxq%d_bytes", q_num);
640 				string += ETH_GSTRING_LEN;
641 				sprintf(string, "rxq%d_packets_with_error",
642 								q_num);
643 				string += ETH_GSTRING_LEN;
644 				sprintf(string, "rxq%d_allocbuf_failed", q_num);
645 				string += ETH_GSTRING_LEN;
646 				sprintf(string, "rxq%d_mapbuf_failed", q_num);
647 				string += ETH_GSTRING_LEN;
648 				sprintf(string, "rxq%d_producer_index", q_num);
649 				string += ETH_GSTRING_LEN;
650 				sprintf(string, "rxq%d_consumer_index", q_num);
651 				string += ETH_GSTRING_LEN;
652 				q_num++;
653 				if (bnad->rx_info[i].rx_ctrl[j].ccb &&
654 					bnad->rx_info[i].rx_ctrl[j].ccb->
655 					rcb[1] &&
656 					bnad->rx_info[i].rx_ctrl[j].ccb->
657 					rcb[1]->rxq) {
658 					sprintf(string, "rxq%d_packets", q_num);
659 					string += ETH_GSTRING_LEN;
660 					sprintf(string, "rxq%d_bytes", q_num);
661 					string += ETH_GSTRING_LEN;
662 					sprintf(string,
663 					"rxq%d_packets_with_error", q_num);
664 					string += ETH_GSTRING_LEN;
665 					sprintf(string, "rxq%d_allocbuf_failed",
666 								q_num);
667 					string += ETH_GSTRING_LEN;
668 					sprintf(string, "rxq%d_mapbuf_failed",
669 						q_num);
670 					string += ETH_GSTRING_LEN;
671 					sprintf(string, "rxq%d_producer_index",
672 								q_num);
673 					string += ETH_GSTRING_LEN;
674 					sprintf(string, "rxq%d_consumer_index",
675 								q_num);
676 					string += ETH_GSTRING_LEN;
677 					q_num++;
678 				}
679 			}
680 		}
681 
682 		q_num = 0;
683 		for (i = 0; i < bnad->num_tx; i++) {
684 			if (!bnad->tx_info[i].tx)
685 				continue;
686 			for (j = 0; j < bnad->num_txq_per_tx; j++) {
687 				sprintf(string, "txq%d_packets", q_num);
688 				string += ETH_GSTRING_LEN;
689 				sprintf(string, "txq%d_bytes", q_num);
690 				string += ETH_GSTRING_LEN;
691 				sprintf(string, "txq%d_producer_index", q_num);
692 				string += ETH_GSTRING_LEN;
693 				sprintf(string, "txq%d_consumer_index", q_num);
694 				string += ETH_GSTRING_LEN;
695 				sprintf(string, "txq%d_hw_consumer_index",
696 									q_num);
697 				string += ETH_GSTRING_LEN;
698 				q_num++;
699 			}
700 		}
701 
702 		break;
703 
704 	default:
705 		break;
706 	}
707 
708 	mutex_unlock(&bnad->conf_mutex);
709 }
710 
711 static int
712 bnad_get_stats_count_locked(struct net_device *netdev)
713 {
714 	struct bnad *bnad = netdev_priv(netdev);
715 	int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
716 	u32 bmap;
717 
718 	bmap = bna_tx_rid_mask(&bnad->bna);
719 	for (i = 0; bmap; i++) {
720 		if (bmap & 1)
721 			txf_active_num++;
722 		bmap >>= 1;
723 	}
724 	bmap = bna_rx_rid_mask(&bnad->bna);
725 	for (i = 0; bmap; i++) {
726 		if (bmap & 1)
727 			rxf_active_num++;
728 		bmap >>= 1;
729 	}
730 	count = BNAD_ETHTOOL_STATS_NUM +
731 		txf_active_num * BNAD_NUM_TXF_COUNTERS +
732 		rxf_active_num * BNAD_NUM_RXF_COUNTERS;
733 
734 	for (i = 0; i < bnad->num_rx; i++) {
735 		if (!bnad->rx_info[i].rx)
736 			continue;
737 		count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
738 		count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
739 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
740 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
741 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
742 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
743 				count +=  BNAD_NUM_RXQ_COUNTERS;
744 	}
745 
746 	for (i = 0; i < bnad->num_tx; i++) {
747 		if (!bnad->tx_info[i].tx)
748 			continue;
749 		count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
750 	}
751 	return count;
752 }
753 
754 static int
755 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
756 {
757 	int i, j;
758 	struct bna_rcb *rcb = NULL;
759 	struct bna_tcb *tcb = NULL;
760 
761 	for (i = 0; i < bnad->num_rx; i++) {
762 		if (!bnad->rx_info[i].rx)
763 			continue;
764 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
765 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
766 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
767 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
768 				buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
769 						ccb->producer_index;
770 				buf[bi++] = 0; /* ccb->consumer_index */
771 				buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
772 						ccb->hw_producer_index);
773 
774 				buf[bi++] = bnad->rx_info[i].
775 						rx_ctrl[j].rx_intr_ctr;
776 				buf[bi++] = bnad->rx_info[i].
777 						rx_ctrl[j].rx_poll_ctr;
778 				buf[bi++] = bnad->rx_info[i].
779 						rx_ctrl[j].rx_schedule;
780 				buf[bi++] = bnad->rx_info[i].
781 						rx_ctrl[j].rx_keep_poll;
782 				buf[bi++] = bnad->rx_info[i].
783 						rx_ctrl[j].rx_complete;
784 			}
785 	}
786 	for (i = 0; i < bnad->num_rx; i++) {
787 		if (!bnad->rx_info[i].rx)
788 			continue;
789 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
790 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
791 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
792 					bnad->rx_info[i].rx_ctrl[j].ccb->
793 					rcb[0]->rxq) {
794 					rcb = bnad->rx_info[i].rx_ctrl[j].
795 							ccb->rcb[0];
796 					buf[bi++] = rcb->rxq->rx_packets;
797 					buf[bi++] = rcb->rxq->rx_bytes;
798 					buf[bi++] = rcb->rxq->
799 							rx_packets_with_error;
800 					buf[bi++] = rcb->rxq->
801 							rxbuf_alloc_failed;
802 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
803 					buf[bi++] = rcb->producer_index;
804 					buf[bi++] = rcb->consumer_index;
805 				}
806 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
807 					bnad->rx_info[i].rx_ctrl[j].ccb->
808 					rcb[1]->rxq) {
809 					rcb = bnad->rx_info[i].rx_ctrl[j].
810 								ccb->rcb[1];
811 					buf[bi++] = rcb->rxq->rx_packets;
812 					buf[bi++] = rcb->rxq->rx_bytes;
813 					buf[bi++] = rcb->rxq->
814 							rx_packets_with_error;
815 					buf[bi++] = rcb->rxq->
816 							rxbuf_alloc_failed;
817 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
818 					buf[bi++] = rcb->producer_index;
819 					buf[bi++] = rcb->consumer_index;
820 				}
821 			}
822 	}
823 
824 	for (i = 0; i < bnad->num_tx; i++) {
825 		if (!bnad->tx_info[i].tx)
826 			continue;
827 		for (j = 0; j < bnad->num_txq_per_tx; j++)
828 			if (bnad->tx_info[i].tcb[j] &&
829 				bnad->tx_info[i].tcb[j]->txq) {
830 				tcb = bnad->tx_info[i].tcb[j];
831 				buf[bi++] = tcb->txq->tx_packets;
832 				buf[bi++] = tcb->txq->tx_bytes;
833 				buf[bi++] = tcb->producer_index;
834 				buf[bi++] = tcb->consumer_index;
835 				buf[bi++] = *(tcb->hw_consumer_index);
836 			}
837 	}
838 
839 	return bi;
840 }
841 
842 static void
843 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
844 		       u64 *buf)
845 {
846 	struct bnad *bnad = netdev_priv(netdev);
847 	int i, j, bi = 0;
848 	unsigned long flags;
849 	struct rtnl_link_stats64 net_stats64;
850 	u64 *stats64;
851 	u32 bmap;
852 
853 	mutex_lock(&bnad->conf_mutex);
854 	if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
855 		mutex_unlock(&bnad->conf_mutex);
856 		return;
857 	}
858 
859 	/*
860 	 * Used bna_lock to sync reads from bna_stats, which is written
861 	 * under the same lock
862 	 */
863 	spin_lock_irqsave(&bnad->bna_lock, flags);
864 
865 	memset(&net_stats64, 0, sizeof(net_stats64));
866 	bnad_netdev_qstats_fill(bnad, &net_stats64);
867 	bnad_netdev_hwstats_fill(bnad, &net_stats64);
868 
869 	buf[bi++] = net_stats64.rx_packets;
870 	buf[bi++] = net_stats64.tx_packets;
871 	buf[bi++] = net_stats64.rx_bytes;
872 	buf[bi++] = net_stats64.tx_bytes;
873 	buf[bi++] = net_stats64.rx_errors;
874 	buf[bi++] = net_stats64.tx_errors;
875 	buf[bi++] = net_stats64.rx_dropped;
876 	buf[bi++] = net_stats64.tx_dropped;
877 	buf[bi++] = net_stats64.multicast;
878 	buf[bi++] = net_stats64.collisions;
879 	buf[bi++] = net_stats64.rx_length_errors;
880 	buf[bi++] = net_stats64.rx_crc_errors;
881 	buf[bi++] = net_stats64.rx_frame_errors;
882 	buf[bi++] = net_stats64.tx_fifo_errors;
883 
884 	/* Get netif_queue_stopped from stack */
885 	bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
886 
887 	/* Fill driver stats into ethtool buffers */
888 	stats64 = (u64 *)&bnad->stats.drv_stats;
889 	for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
890 		buf[bi++] = stats64[i];
891 
892 	/* Fill hardware stats excluding the rxf/txf into ethtool bufs */
893 	stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
894 	for (i = 0;
895 	     i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
896 		sizeof(u64);
897 	     i++)
898 		buf[bi++] = stats64[i];
899 
900 	/* Fill txf stats into ethtool buffers */
901 	bmap = bna_tx_rid_mask(&bnad->bna);
902 	for (i = 0; bmap; i++) {
903 		if (bmap & 1) {
904 			stats64 = (u64 *)&bnad->stats.bna_stats->
905 						hw_stats.txf_stats[i];
906 			for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
907 					sizeof(u64); j++)
908 				buf[bi++] = stats64[j];
909 		}
910 		bmap >>= 1;
911 	}
912 
913 	/*  Fill rxf stats into ethtool buffers */
914 	bmap = bna_rx_rid_mask(&bnad->bna);
915 	for (i = 0; bmap; i++) {
916 		if (bmap & 1) {
917 			stats64 = (u64 *)&bnad->stats.bna_stats->
918 						hw_stats.rxf_stats[i];
919 			for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
920 					sizeof(u64); j++)
921 				buf[bi++] = stats64[j];
922 		}
923 		bmap >>= 1;
924 	}
925 
926 	/* Fill per Q stats into ethtool buffers */
927 	bi = bnad_per_q_stats_fill(bnad, buf, bi);
928 
929 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
930 
931 	mutex_unlock(&bnad->conf_mutex);
932 }
933 
934 static int
935 bnad_get_sset_count(struct net_device *netdev, int sset)
936 {
937 	switch (sset) {
938 	case ETH_SS_STATS:
939 		return bnad_get_stats_count_locked(netdev);
940 	default:
941 		return -EOPNOTSUPP;
942 	}
943 }
944 
945 static u32
946 bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
947 				u32 *base_offset)
948 {
949 	struct bfa_flash_attr *flash_attr;
950 	struct bnad_iocmd_comp fcomp;
951 	u32 i, flash_part = 0, ret;
952 	unsigned long flags = 0;
953 
954 	flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
955 	if (!flash_attr)
956 		return 0;
957 
958 	fcomp.bnad = bnad;
959 	fcomp.comp_status = 0;
960 
961 	init_completion(&fcomp.comp);
962 	spin_lock_irqsave(&bnad->bna_lock, flags);
963 	ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
964 				bnad_cb_completion, &fcomp);
965 	if (ret != BFA_STATUS_OK) {
966 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
967 		kfree(flash_attr);
968 		return 0;
969 	}
970 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
971 	wait_for_completion(&fcomp.comp);
972 	ret = fcomp.comp_status;
973 
974 	/* Check for the flash type & base offset value */
975 	if (ret == BFA_STATUS_OK) {
976 		for (i = 0; i < flash_attr->npart; i++) {
977 			if (offset >= flash_attr->part[i].part_off &&
978 			    offset < (flash_attr->part[i].part_off +
979 				      flash_attr->part[i].part_size)) {
980 				flash_part = flash_attr->part[i].part_type;
981 				*base_offset = flash_attr->part[i].part_off;
982 				break;
983 			}
984 		}
985 	}
986 	kfree(flash_attr);
987 	return flash_part;
988 }
989 
990 static int
991 bnad_get_eeprom_len(struct net_device *netdev)
992 {
993 	return BFA_TOTAL_FLASH_SIZE;
994 }
995 
996 static int
997 bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
998 		u8 *bytes)
999 {
1000 	struct bnad *bnad = netdev_priv(netdev);
1001 	struct bnad_iocmd_comp fcomp;
1002 	u32 flash_part = 0, base_offset = 0;
1003 	unsigned long flags = 0;
1004 	int ret = 0;
1005 
1006 	/* Fill the magic value */
1007 	eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
1008 
1009 	/* Query the flash partition based on the offset */
1010 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1011 				eeprom->offset, &base_offset);
1012 	if (flash_part == 0)
1013 		return -EFAULT;
1014 
1015 	fcomp.bnad = bnad;
1016 	fcomp.comp_status = 0;
1017 
1018 	init_completion(&fcomp.comp);
1019 	spin_lock_irqsave(&bnad->bna_lock, flags);
1020 	ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
1021 				bnad->id, bytes, eeprom->len,
1022 				eeprom->offset - base_offset,
1023 				bnad_cb_completion, &fcomp);
1024 	if (ret != BFA_STATUS_OK) {
1025 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1026 		goto done;
1027 	}
1028 
1029 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1030 	wait_for_completion(&fcomp.comp);
1031 	ret = fcomp.comp_status;
1032 done:
1033 	return ret;
1034 }
1035 
1036 static int
1037 bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
1038 		u8 *bytes)
1039 {
1040 	struct bnad *bnad = netdev_priv(netdev);
1041 	struct bnad_iocmd_comp fcomp;
1042 	u32 flash_part = 0, base_offset = 0;
1043 	unsigned long flags = 0;
1044 	int ret = 0;
1045 
1046 	/* Check if the flash update request is valid */
1047 	if (eeprom->magic != (bnad->pcidev->vendor |
1048 			     (bnad->pcidev->device << 16)))
1049 		return -EINVAL;
1050 
1051 	/* Query the flash partition based on the offset */
1052 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1053 				eeprom->offset, &base_offset);
1054 	if (flash_part == 0)
1055 		return -EFAULT;
1056 
1057 	fcomp.bnad = bnad;
1058 	fcomp.comp_status = 0;
1059 
1060 	init_completion(&fcomp.comp);
1061 	spin_lock_irqsave(&bnad->bna_lock, flags);
1062 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
1063 				bnad->id, bytes, eeprom->len,
1064 				eeprom->offset - base_offset,
1065 				bnad_cb_completion, &fcomp);
1066 	if (ret != BFA_STATUS_OK) {
1067 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1068 		goto done;
1069 	}
1070 
1071 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1072 	wait_for_completion(&fcomp.comp);
1073 	ret = fcomp.comp_status;
1074 done:
1075 	return ret;
1076 }
1077 
1078 static int
1079 bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
1080 {
1081 	struct bnad *bnad = netdev_priv(netdev);
1082 	struct bnad_iocmd_comp fcomp;
1083 	const struct firmware *fw;
1084 	int ret = 0;
1085 
1086 	ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
1087 	if (ret) {
1088 		netdev_err(netdev, "can't load firmware %s\n", eflash->data);
1089 		goto out;
1090 	}
1091 
1092 	fcomp.bnad = bnad;
1093 	fcomp.comp_status = 0;
1094 
1095 	init_completion(&fcomp.comp);
1096 	spin_lock_irq(&bnad->bna_lock);
1097 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
1098 				bnad->id, (u8 *)fw->data, fw->size, 0,
1099 				bnad_cb_completion, &fcomp);
1100 	if (ret != BFA_STATUS_OK) {
1101 		netdev_warn(netdev, "flash update failed with err=%d\n", ret);
1102 		ret = -EIO;
1103 		spin_unlock_irq(&bnad->bna_lock);
1104 		goto out;
1105 	}
1106 
1107 	spin_unlock_irq(&bnad->bna_lock);
1108 	wait_for_completion(&fcomp.comp);
1109 	if (fcomp.comp_status != BFA_STATUS_OK) {
1110 		ret = -EIO;
1111 		netdev_warn(netdev,
1112 			    "firmware image update failed with err=%d\n",
1113 			    fcomp.comp_status);
1114 	}
1115 out:
1116 	release_firmware(fw);
1117 	return ret;
1118 }
1119 
1120 static const struct ethtool_ops bnad_ethtool_ops = {
1121 	.get_settings = bnad_get_settings,
1122 	.set_settings = bnad_set_settings,
1123 	.get_drvinfo = bnad_get_drvinfo,
1124 	.get_wol = bnad_get_wol,
1125 	.get_link = ethtool_op_get_link,
1126 	.get_coalesce = bnad_get_coalesce,
1127 	.set_coalesce = bnad_set_coalesce,
1128 	.get_ringparam = bnad_get_ringparam,
1129 	.set_ringparam = bnad_set_ringparam,
1130 	.get_pauseparam = bnad_get_pauseparam,
1131 	.set_pauseparam = bnad_set_pauseparam,
1132 	.get_strings = bnad_get_strings,
1133 	.get_ethtool_stats = bnad_get_ethtool_stats,
1134 	.get_sset_count = bnad_get_sset_count,
1135 	.get_eeprom_len = bnad_get_eeprom_len,
1136 	.get_eeprom = bnad_get_eeprom,
1137 	.set_eeprom = bnad_set_eeprom,
1138 	.flash_device = bnad_flash_device,
1139 	.get_ts_info = ethtool_op_get_ts_info,
1140 };
1141 
1142 void
1143 bnad_set_ethtool_ops(struct net_device *netdev)
1144 {
1145 	netdev->ethtool_ops = &bnad_ethtool_ops;
1146 }
1147