1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 
20 #include "cna.h"
21 
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/rtnetlink.h>
26 
27 #include "bna.h"
28 
29 #include "bnad.h"
30 
31 #define BNAD_NUM_TXF_COUNTERS 12
32 #define BNAD_NUM_RXF_COUNTERS 10
33 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
34 #define BNAD_NUM_RXQ_COUNTERS 6
35 #define BNAD_NUM_TXQ_COUNTERS 5
36 
37 #define BNAD_ETHTOOL_STATS_NUM						\
38 	(sizeof(struct rtnl_link_stats64) / sizeof(u64) +	\
39 	sizeof(struct bnad_drv_stats) / sizeof(u64) +		\
40 	offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
41 
42 static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
43 	"rx_packets",
44 	"tx_packets",
45 	"rx_bytes",
46 	"tx_bytes",
47 	"rx_errors",
48 	"tx_errors",
49 	"rx_dropped",
50 	"tx_dropped",
51 	"multicast",
52 	"collisions",
53 
54 	"rx_length_errors",
55 	"rx_over_errors",
56 	"rx_crc_errors",
57 	"rx_frame_errors",
58 	"rx_fifo_errors",
59 	"rx_missed_errors",
60 
61 	"tx_aborted_errors",
62 	"tx_carrier_errors",
63 	"tx_fifo_errors",
64 	"tx_heartbeat_errors",
65 	"tx_window_errors",
66 
67 	"rx_compressed",
68 	"tx_compressed",
69 
70 	"netif_queue_stop",
71 	"netif_queue_wakeup",
72 	"netif_queue_stopped",
73 	"tso4",
74 	"tso6",
75 	"tso_err",
76 	"tcpcsum_offload",
77 	"udpcsum_offload",
78 	"csum_help",
79 	"tx_skb_too_short",
80 	"tx_skb_stopping",
81 	"tx_skb_max_vectors",
82 	"tx_skb_mss_too_long",
83 	"tx_skb_tso_too_short",
84 	"tx_skb_tso_prepare",
85 	"tx_skb_non_tso_too_long",
86 	"tx_skb_tcp_hdr",
87 	"tx_skb_udp_hdr",
88 	"tx_skb_csum_err",
89 	"tx_skb_headlen_too_long",
90 	"tx_skb_headlen_zero",
91 	"tx_skb_frag_zero",
92 	"tx_skb_len_mismatch",
93 	"hw_stats_updates",
94 	"netif_rx_dropped",
95 
96 	"link_toggle",
97 	"cee_toggle",
98 
99 	"rxp_info_alloc_failed",
100 	"mbox_intr_disabled",
101 	"mbox_intr_enabled",
102 	"tx_unmap_q_alloc_failed",
103 	"rx_unmap_q_alloc_failed",
104 	"rxbuf_alloc_failed",
105 
106 	"mac_stats_clr_cnt",
107 	"mac_frame_64",
108 	"mac_frame_65_127",
109 	"mac_frame_128_255",
110 	"mac_frame_256_511",
111 	"mac_frame_512_1023",
112 	"mac_frame_1024_1518",
113 	"mac_frame_1518_1522",
114 	"mac_rx_bytes",
115 	"mac_rx_packets",
116 	"mac_rx_fcs_error",
117 	"mac_rx_multicast",
118 	"mac_rx_broadcast",
119 	"mac_rx_control_frames",
120 	"mac_rx_pause",
121 	"mac_rx_unknown_opcode",
122 	"mac_rx_alignment_error",
123 	"mac_rx_frame_length_error",
124 	"mac_rx_code_error",
125 	"mac_rx_carrier_sense_error",
126 	"mac_rx_undersize",
127 	"mac_rx_oversize",
128 	"mac_rx_fragments",
129 	"mac_rx_jabber",
130 	"mac_rx_drop",
131 
132 	"mac_tx_bytes",
133 	"mac_tx_packets",
134 	"mac_tx_multicast",
135 	"mac_tx_broadcast",
136 	"mac_tx_pause",
137 	"mac_tx_deferral",
138 	"mac_tx_excessive_deferral",
139 	"mac_tx_single_collision",
140 	"mac_tx_muliple_collision",
141 	"mac_tx_late_collision",
142 	"mac_tx_excessive_collision",
143 	"mac_tx_total_collision",
144 	"mac_tx_pause_honored",
145 	"mac_tx_drop",
146 	"mac_tx_jabber",
147 	"mac_tx_fcs_error",
148 	"mac_tx_control_frame",
149 	"mac_tx_oversize",
150 	"mac_tx_undersize",
151 	"mac_tx_fragments",
152 
153 	"bpc_tx_pause_0",
154 	"bpc_tx_pause_1",
155 	"bpc_tx_pause_2",
156 	"bpc_tx_pause_3",
157 	"bpc_tx_pause_4",
158 	"bpc_tx_pause_5",
159 	"bpc_tx_pause_6",
160 	"bpc_tx_pause_7",
161 	"bpc_tx_zero_pause_0",
162 	"bpc_tx_zero_pause_1",
163 	"bpc_tx_zero_pause_2",
164 	"bpc_tx_zero_pause_3",
165 	"bpc_tx_zero_pause_4",
166 	"bpc_tx_zero_pause_5",
167 	"bpc_tx_zero_pause_6",
168 	"bpc_tx_zero_pause_7",
169 	"bpc_tx_first_pause_0",
170 	"bpc_tx_first_pause_1",
171 	"bpc_tx_first_pause_2",
172 	"bpc_tx_first_pause_3",
173 	"bpc_tx_first_pause_4",
174 	"bpc_tx_first_pause_5",
175 	"bpc_tx_first_pause_6",
176 	"bpc_tx_first_pause_7",
177 
178 	"bpc_rx_pause_0",
179 	"bpc_rx_pause_1",
180 	"bpc_rx_pause_2",
181 	"bpc_rx_pause_3",
182 	"bpc_rx_pause_4",
183 	"bpc_rx_pause_5",
184 	"bpc_rx_pause_6",
185 	"bpc_rx_pause_7",
186 	"bpc_rx_zero_pause_0",
187 	"bpc_rx_zero_pause_1",
188 	"bpc_rx_zero_pause_2",
189 	"bpc_rx_zero_pause_3",
190 	"bpc_rx_zero_pause_4",
191 	"bpc_rx_zero_pause_5",
192 	"bpc_rx_zero_pause_6",
193 	"bpc_rx_zero_pause_7",
194 	"bpc_rx_first_pause_0",
195 	"bpc_rx_first_pause_1",
196 	"bpc_rx_first_pause_2",
197 	"bpc_rx_first_pause_3",
198 	"bpc_rx_first_pause_4",
199 	"bpc_rx_first_pause_5",
200 	"bpc_rx_first_pause_6",
201 	"bpc_rx_first_pause_7",
202 
203 	"rad_rx_frames",
204 	"rad_rx_octets",
205 	"rad_rx_vlan_frames",
206 	"rad_rx_ucast",
207 	"rad_rx_ucast_octets",
208 	"rad_rx_ucast_vlan",
209 	"rad_rx_mcast",
210 	"rad_rx_mcast_octets",
211 	"rad_rx_mcast_vlan",
212 	"rad_rx_bcast",
213 	"rad_rx_bcast_octets",
214 	"rad_rx_bcast_vlan",
215 	"rad_rx_drops",
216 
217 	"rlb_rad_rx_frames",
218 	"rlb_rad_rx_octets",
219 	"rlb_rad_rx_vlan_frames",
220 	"rlb_rad_rx_ucast",
221 	"rlb_rad_rx_ucast_octets",
222 	"rlb_rad_rx_ucast_vlan",
223 	"rlb_rad_rx_mcast",
224 	"rlb_rad_rx_mcast_octets",
225 	"rlb_rad_rx_mcast_vlan",
226 	"rlb_rad_rx_bcast",
227 	"rlb_rad_rx_bcast_octets",
228 	"rlb_rad_rx_bcast_vlan",
229 	"rlb_rad_rx_drops",
230 
231 	"fc_rx_ucast_octets",
232 	"fc_rx_ucast",
233 	"fc_rx_ucast_vlan",
234 	"fc_rx_mcast_octets",
235 	"fc_rx_mcast",
236 	"fc_rx_mcast_vlan",
237 	"fc_rx_bcast_octets",
238 	"fc_rx_bcast",
239 	"fc_rx_bcast_vlan",
240 
241 	"fc_tx_ucast_octets",
242 	"fc_tx_ucast",
243 	"fc_tx_ucast_vlan",
244 	"fc_tx_mcast_octets",
245 	"fc_tx_mcast",
246 	"fc_tx_mcast_vlan",
247 	"fc_tx_bcast_octets",
248 	"fc_tx_bcast",
249 	"fc_tx_bcast_vlan",
250 	"fc_tx_parity_errors",
251 	"fc_tx_timeout",
252 	"fc_tx_fid_parity_errors",
253 };
254 
255 static int
256 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
257 {
258 	cmd->supported = SUPPORTED_10000baseT_Full;
259 	cmd->advertising = ADVERTISED_10000baseT_Full;
260 	cmd->autoneg = AUTONEG_DISABLE;
261 	cmd->supported |= SUPPORTED_FIBRE;
262 	cmd->advertising |= ADVERTISED_FIBRE;
263 	cmd->port = PORT_FIBRE;
264 	cmd->phy_address = 0;
265 
266 	if (netif_carrier_ok(netdev)) {
267 		ethtool_cmd_speed_set(cmd, SPEED_10000);
268 		cmd->duplex = DUPLEX_FULL;
269 	} else {
270 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
271 		cmd->duplex = DUPLEX_UNKNOWN;
272 	}
273 	cmd->transceiver = XCVR_EXTERNAL;
274 	cmd->maxtxpkt = 0;
275 	cmd->maxrxpkt = 0;
276 
277 	return 0;
278 }
279 
280 static int
281 bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
282 {
283 	/* 10G full duplex setting supported only */
284 	if (cmd->autoneg == AUTONEG_ENABLE)
285 		return -EOPNOTSUPP; else {
286 		if ((ethtool_cmd_speed(cmd) == SPEED_10000)
287 		    && (cmd->duplex == DUPLEX_FULL))
288 			return 0;
289 	}
290 
291 	return -EOPNOTSUPP;
292 }
293 
294 static void
295 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
296 {
297 	struct bnad *bnad = netdev_priv(netdev);
298 	struct bfa_ioc_attr *ioc_attr;
299 	unsigned long flags;
300 
301 	strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
302 	strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
303 
304 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
305 	if (ioc_attr) {
306 		spin_lock_irqsave(&bnad->bna_lock, flags);
307 		bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
308 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
309 
310 		strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
311 			sizeof(drvinfo->fw_version));
312 		kfree(ioc_attr);
313 	}
314 
315 	strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
316 		sizeof(drvinfo->bus_info));
317 }
318 
319 static void
320 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
321 {
322 	wolinfo->supported = 0;
323 	wolinfo->wolopts = 0;
324 }
325 
326 static int
327 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
328 {
329 	struct bnad *bnad = netdev_priv(netdev);
330 	unsigned long flags;
331 
332 	/* Lock rqd. to access bnad->bna_lock */
333 	spin_lock_irqsave(&bnad->bna_lock, flags);
334 	coalesce->use_adaptive_rx_coalesce =
335 		(bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
336 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
337 
338 	coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
339 					BFI_COALESCING_TIMER_UNIT;
340 	coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
341 					BFI_COALESCING_TIMER_UNIT;
342 	coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
343 
344 	return 0;
345 }
346 
347 static int
348 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
349 {
350 	struct bnad *bnad = netdev_priv(netdev);
351 	unsigned long flags;
352 	int to_del = 0;
353 
354 	if (coalesce->rx_coalesce_usecs == 0 ||
355 	    coalesce->rx_coalesce_usecs >
356 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
357 		return -EINVAL;
358 
359 	if (coalesce->tx_coalesce_usecs == 0 ||
360 	    coalesce->tx_coalesce_usecs >
361 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
362 		return -EINVAL;
363 
364 	mutex_lock(&bnad->conf_mutex);
365 	/*
366 	 * Do not need to store rx_coalesce_usecs here
367 	 * Every time DIM is disabled, we can get it from the
368 	 * stack.
369 	 */
370 	spin_lock_irqsave(&bnad->bna_lock, flags);
371 	if (coalesce->use_adaptive_rx_coalesce) {
372 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
373 			bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
374 			bnad_dim_timer_start(bnad);
375 		}
376 	} else {
377 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
378 			bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
379 			if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
380 			    test_bit(BNAD_RF_DIM_TIMER_RUNNING,
381 			    &bnad->run_flags)) {
382 				clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
383 							&bnad->run_flags);
384 				to_del = 1;
385 			}
386 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
387 			if (to_del)
388 				del_timer_sync(&bnad->dim_timer);
389 			spin_lock_irqsave(&bnad->bna_lock, flags);
390 			bnad_rx_coalescing_timeo_set(bnad);
391 		}
392 	}
393 	if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
394 					BFI_COALESCING_TIMER_UNIT) {
395 		bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
396 						BFI_COALESCING_TIMER_UNIT;
397 		bnad_tx_coalescing_timeo_set(bnad);
398 	}
399 
400 	if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
401 					BFI_COALESCING_TIMER_UNIT) {
402 		bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
403 						BFI_COALESCING_TIMER_UNIT;
404 
405 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
406 			bnad_rx_coalescing_timeo_set(bnad);
407 
408 	}
409 
410 	/* Add Tx Inter-pkt DMA count?  */
411 
412 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
413 
414 	mutex_unlock(&bnad->conf_mutex);
415 	return 0;
416 }
417 
418 static void
419 bnad_get_ringparam(struct net_device *netdev,
420 		   struct ethtool_ringparam *ringparam)
421 {
422 	struct bnad *bnad = netdev_priv(netdev);
423 
424 	ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
425 	ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
426 
427 	ringparam->rx_pending = bnad->rxq_depth;
428 	ringparam->tx_pending = bnad->txq_depth;
429 }
430 
431 static int
432 bnad_set_ringparam(struct net_device *netdev,
433 		   struct ethtool_ringparam *ringparam)
434 {
435 	int i, current_err, err = 0;
436 	struct bnad *bnad = netdev_priv(netdev);
437 	unsigned long flags;
438 
439 	mutex_lock(&bnad->conf_mutex);
440 	if (ringparam->rx_pending == bnad->rxq_depth &&
441 	    ringparam->tx_pending == bnad->txq_depth) {
442 		mutex_unlock(&bnad->conf_mutex);
443 		return 0;
444 	}
445 
446 	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
447 	    ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
448 	    !BNA_POWER_OF_2(ringparam->rx_pending)) {
449 		mutex_unlock(&bnad->conf_mutex);
450 		return -EINVAL;
451 	}
452 	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
453 	    ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
454 	    !BNA_POWER_OF_2(ringparam->tx_pending)) {
455 		mutex_unlock(&bnad->conf_mutex);
456 		return -EINVAL;
457 	}
458 
459 	if (ringparam->rx_pending != bnad->rxq_depth) {
460 		bnad->rxq_depth = ringparam->rx_pending;
461 		if (!netif_running(netdev)) {
462 			mutex_unlock(&bnad->conf_mutex);
463 			return 0;
464 		}
465 
466 		for (i = 0; i < bnad->num_rx; i++) {
467 			if (!bnad->rx_info[i].rx)
468 				continue;
469 			bnad_destroy_rx(bnad, i);
470 			current_err = bnad_setup_rx(bnad, i);
471 			if (current_err && !err)
472 				err = current_err;
473 		}
474 
475 		if (!err && bnad->rx_info[0].rx) {
476 			/* restore rx configuration */
477 			bnad_restore_vlans(bnad, 0);
478 			bnad_enable_default_bcast(bnad);
479 			spin_lock_irqsave(&bnad->bna_lock, flags);
480 			bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
481 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
482 			bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
483 					     BNAD_CF_PROMISC);
484 			bnad_set_rx_mode(netdev);
485 		}
486 	}
487 	if (ringparam->tx_pending != bnad->txq_depth) {
488 		bnad->txq_depth = ringparam->tx_pending;
489 		if (!netif_running(netdev)) {
490 			mutex_unlock(&bnad->conf_mutex);
491 			return 0;
492 		}
493 
494 		for (i = 0; i < bnad->num_tx; i++) {
495 			if (!bnad->tx_info[i].tx)
496 				continue;
497 			bnad_destroy_tx(bnad, i);
498 			current_err = bnad_setup_tx(bnad, i);
499 			if (current_err && !err)
500 				err = current_err;
501 		}
502 	}
503 
504 	mutex_unlock(&bnad->conf_mutex);
505 	return err;
506 }
507 
508 static void
509 bnad_get_pauseparam(struct net_device *netdev,
510 		    struct ethtool_pauseparam *pauseparam)
511 {
512 	struct bnad *bnad = netdev_priv(netdev);
513 
514 	pauseparam->autoneg = 0;
515 	pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
516 	pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
517 }
518 
519 static int
520 bnad_set_pauseparam(struct net_device *netdev,
521 		    struct ethtool_pauseparam *pauseparam)
522 {
523 	struct bnad *bnad = netdev_priv(netdev);
524 	struct bna_pause_config pause_config;
525 	unsigned long flags;
526 
527 	if (pauseparam->autoneg == AUTONEG_ENABLE)
528 		return -EINVAL;
529 
530 	mutex_lock(&bnad->conf_mutex);
531 	if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
532 	    pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
533 		pause_config.rx_pause = pauseparam->rx_pause;
534 		pause_config.tx_pause = pauseparam->tx_pause;
535 		spin_lock_irqsave(&bnad->bna_lock, flags);
536 		bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
537 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
538 	}
539 	mutex_unlock(&bnad->conf_mutex);
540 	return 0;
541 }
542 
543 static void
544 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
545 {
546 	struct bnad *bnad = netdev_priv(netdev);
547 	int i, j, q_num;
548 	u32 bmap;
549 
550 	mutex_lock(&bnad->conf_mutex);
551 
552 	switch (stringset) {
553 	case ETH_SS_STATS:
554 		for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
555 			BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
556 				   ETH_GSTRING_LEN));
557 			memcpy(string, bnad_net_stats_strings[i],
558 			       ETH_GSTRING_LEN);
559 			string += ETH_GSTRING_LEN;
560 		}
561 		bmap = bna_tx_rid_mask(&bnad->bna);
562 		for (i = 0; bmap; i++) {
563 			if (bmap & 1) {
564 				sprintf(string, "txf%d_ucast_octets", i);
565 				string += ETH_GSTRING_LEN;
566 				sprintf(string, "txf%d_ucast", i);
567 				string += ETH_GSTRING_LEN;
568 				sprintf(string, "txf%d_ucast_vlan", i);
569 				string += ETH_GSTRING_LEN;
570 				sprintf(string, "txf%d_mcast_octets", i);
571 				string += ETH_GSTRING_LEN;
572 				sprintf(string, "txf%d_mcast", i);
573 				string += ETH_GSTRING_LEN;
574 				sprintf(string, "txf%d_mcast_vlan", i);
575 				string += ETH_GSTRING_LEN;
576 				sprintf(string, "txf%d_bcast_octets", i);
577 				string += ETH_GSTRING_LEN;
578 				sprintf(string, "txf%d_bcast", i);
579 				string += ETH_GSTRING_LEN;
580 				sprintf(string, "txf%d_bcast_vlan", i);
581 				string += ETH_GSTRING_LEN;
582 				sprintf(string, "txf%d_errors", i);
583 				string += ETH_GSTRING_LEN;
584 				sprintf(string, "txf%d_filter_vlan", i);
585 				string += ETH_GSTRING_LEN;
586 				sprintf(string, "txf%d_filter_mac_sa", i);
587 				string += ETH_GSTRING_LEN;
588 			}
589 			bmap >>= 1;
590 		}
591 
592 		bmap = bna_rx_rid_mask(&bnad->bna);
593 		for (i = 0; bmap; i++) {
594 			if (bmap & 1) {
595 				sprintf(string, "rxf%d_ucast_octets", i);
596 				string += ETH_GSTRING_LEN;
597 				sprintf(string, "rxf%d_ucast", i);
598 				string += ETH_GSTRING_LEN;
599 				sprintf(string, "rxf%d_ucast_vlan", i);
600 				string += ETH_GSTRING_LEN;
601 				sprintf(string, "rxf%d_mcast_octets", i);
602 				string += ETH_GSTRING_LEN;
603 				sprintf(string, "rxf%d_mcast", i);
604 				string += ETH_GSTRING_LEN;
605 				sprintf(string, "rxf%d_mcast_vlan", i);
606 				string += ETH_GSTRING_LEN;
607 				sprintf(string, "rxf%d_bcast_octets", i);
608 				string += ETH_GSTRING_LEN;
609 				sprintf(string, "rxf%d_bcast", i);
610 				string += ETH_GSTRING_LEN;
611 				sprintf(string, "rxf%d_bcast_vlan", i);
612 				string += ETH_GSTRING_LEN;
613 				sprintf(string, "rxf%d_frame_drops", i);
614 				string += ETH_GSTRING_LEN;
615 			}
616 			bmap >>= 1;
617 		}
618 
619 		q_num = 0;
620 		for (i = 0; i < bnad->num_rx; i++) {
621 			if (!bnad->rx_info[i].rx)
622 				continue;
623 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
624 				sprintf(string, "cq%d_producer_index", q_num);
625 				string += ETH_GSTRING_LEN;
626 				sprintf(string, "cq%d_consumer_index", q_num);
627 				string += ETH_GSTRING_LEN;
628 				sprintf(string, "cq%d_hw_producer_index",
629 					q_num);
630 				string += ETH_GSTRING_LEN;
631 				sprintf(string, "cq%d_intr", q_num);
632 				string += ETH_GSTRING_LEN;
633 				sprintf(string, "cq%d_poll", q_num);
634 				string += ETH_GSTRING_LEN;
635 				sprintf(string, "cq%d_schedule", q_num);
636 				string += ETH_GSTRING_LEN;
637 				sprintf(string, "cq%d_keep_poll", q_num);
638 				string += ETH_GSTRING_LEN;
639 				sprintf(string, "cq%d_complete", q_num);
640 				string += ETH_GSTRING_LEN;
641 				q_num++;
642 			}
643 		}
644 
645 		q_num = 0;
646 		for (i = 0; i < bnad->num_rx; i++) {
647 			if (!bnad->rx_info[i].rx)
648 				continue;
649 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
650 				sprintf(string, "rxq%d_packets", q_num);
651 				string += ETH_GSTRING_LEN;
652 				sprintf(string, "rxq%d_bytes", q_num);
653 				string += ETH_GSTRING_LEN;
654 				sprintf(string, "rxq%d_packets_with_error",
655 								q_num);
656 				string += ETH_GSTRING_LEN;
657 				sprintf(string, "rxq%d_allocbuf_failed", q_num);
658 				string += ETH_GSTRING_LEN;
659 				sprintf(string, "rxq%d_producer_index", q_num);
660 				string += ETH_GSTRING_LEN;
661 				sprintf(string, "rxq%d_consumer_index", q_num);
662 				string += ETH_GSTRING_LEN;
663 				q_num++;
664 				if (bnad->rx_info[i].rx_ctrl[j].ccb &&
665 					bnad->rx_info[i].rx_ctrl[j].ccb->
666 					rcb[1] &&
667 					bnad->rx_info[i].rx_ctrl[j].ccb->
668 					rcb[1]->rxq) {
669 					sprintf(string, "rxq%d_packets", q_num);
670 					string += ETH_GSTRING_LEN;
671 					sprintf(string, "rxq%d_bytes", q_num);
672 					string += ETH_GSTRING_LEN;
673 					sprintf(string,
674 					"rxq%d_packets_with_error", q_num);
675 					string += ETH_GSTRING_LEN;
676 					sprintf(string, "rxq%d_allocbuf_failed",
677 								q_num);
678 					string += ETH_GSTRING_LEN;
679 					sprintf(string, "rxq%d_producer_index",
680 								q_num);
681 					string += ETH_GSTRING_LEN;
682 					sprintf(string, "rxq%d_consumer_index",
683 								q_num);
684 					string += ETH_GSTRING_LEN;
685 					q_num++;
686 				}
687 			}
688 		}
689 
690 		q_num = 0;
691 		for (i = 0; i < bnad->num_tx; i++) {
692 			if (!bnad->tx_info[i].tx)
693 				continue;
694 			for (j = 0; j < bnad->num_txq_per_tx; j++) {
695 				sprintf(string, "txq%d_packets", q_num);
696 				string += ETH_GSTRING_LEN;
697 				sprintf(string, "txq%d_bytes", q_num);
698 				string += ETH_GSTRING_LEN;
699 				sprintf(string, "txq%d_producer_index", q_num);
700 				string += ETH_GSTRING_LEN;
701 				sprintf(string, "txq%d_consumer_index", q_num);
702 				string += ETH_GSTRING_LEN;
703 				sprintf(string, "txq%d_hw_consumer_index",
704 									q_num);
705 				string += ETH_GSTRING_LEN;
706 				q_num++;
707 			}
708 		}
709 
710 		break;
711 
712 	default:
713 		break;
714 	}
715 
716 	mutex_unlock(&bnad->conf_mutex);
717 }
718 
719 static int
720 bnad_get_stats_count_locked(struct net_device *netdev)
721 {
722 	struct bnad *bnad = netdev_priv(netdev);
723 	int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
724 	u32 bmap;
725 
726 	bmap = bna_tx_rid_mask(&bnad->bna);
727 	for (i = 0; bmap; i++) {
728 		if (bmap & 1)
729 			txf_active_num++;
730 		bmap >>= 1;
731 	}
732 	bmap = bna_rx_rid_mask(&bnad->bna);
733 	for (i = 0; bmap; i++) {
734 		if (bmap & 1)
735 			rxf_active_num++;
736 		bmap >>= 1;
737 	}
738 	count = BNAD_ETHTOOL_STATS_NUM +
739 		txf_active_num * BNAD_NUM_TXF_COUNTERS +
740 		rxf_active_num * BNAD_NUM_RXF_COUNTERS;
741 
742 	for (i = 0; i < bnad->num_rx; i++) {
743 		if (!bnad->rx_info[i].rx)
744 			continue;
745 		count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
746 		count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
747 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
748 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
749 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
750 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
751 				count +=  BNAD_NUM_RXQ_COUNTERS;
752 	}
753 
754 	for (i = 0; i < bnad->num_tx; i++) {
755 		if (!bnad->tx_info[i].tx)
756 			continue;
757 		count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
758 	}
759 	return count;
760 }
761 
762 static int
763 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
764 {
765 	int i, j;
766 	struct bna_rcb *rcb = NULL;
767 	struct bna_tcb *tcb = NULL;
768 
769 	for (i = 0; i < bnad->num_rx; i++) {
770 		if (!bnad->rx_info[i].rx)
771 			continue;
772 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
773 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
774 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
775 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
776 				buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
777 						ccb->producer_index;
778 				buf[bi++] = 0; /* ccb->consumer_index */
779 				buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
780 						ccb->hw_producer_index);
781 
782 				buf[bi++] = bnad->rx_info[i].
783 						rx_ctrl[j].rx_intr_ctr;
784 				buf[bi++] = bnad->rx_info[i].
785 						rx_ctrl[j].rx_poll_ctr;
786 				buf[bi++] = bnad->rx_info[i].
787 						rx_ctrl[j].rx_schedule;
788 				buf[bi++] = bnad->rx_info[i].
789 						rx_ctrl[j].rx_keep_poll;
790 				buf[bi++] = bnad->rx_info[i].
791 						rx_ctrl[j].rx_complete;
792 			}
793 	}
794 	for (i = 0; i < bnad->num_rx; i++) {
795 		if (!bnad->rx_info[i].rx)
796 			continue;
797 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
798 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
799 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
800 					bnad->rx_info[i].rx_ctrl[j].ccb->
801 					rcb[0]->rxq) {
802 					rcb = bnad->rx_info[i].rx_ctrl[j].
803 							ccb->rcb[0];
804 					buf[bi++] = rcb->rxq->rx_packets;
805 					buf[bi++] = rcb->rxq->rx_bytes;
806 					buf[bi++] = rcb->rxq->
807 							rx_packets_with_error;
808 					buf[bi++] = rcb->rxq->
809 							rxbuf_alloc_failed;
810 					buf[bi++] = rcb->producer_index;
811 					buf[bi++] = rcb->consumer_index;
812 				}
813 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
814 					bnad->rx_info[i].rx_ctrl[j].ccb->
815 					rcb[1]->rxq) {
816 					rcb = bnad->rx_info[i].rx_ctrl[j].
817 								ccb->rcb[1];
818 					buf[bi++] = rcb->rxq->rx_packets;
819 					buf[bi++] = rcb->rxq->rx_bytes;
820 					buf[bi++] = rcb->rxq->
821 							rx_packets_with_error;
822 					buf[bi++] = rcb->rxq->
823 							rxbuf_alloc_failed;
824 					buf[bi++] = rcb->producer_index;
825 					buf[bi++] = rcb->consumer_index;
826 				}
827 			}
828 	}
829 
830 	for (i = 0; i < bnad->num_tx; i++) {
831 		if (!bnad->tx_info[i].tx)
832 			continue;
833 		for (j = 0; j < bnad->num_txq_per_tx; j++)
834 			if (bnad->tx_info[i].tcb[j] &&
835 				bnad->tx_info[i].tcb[j]->txq) {
836 				tcb = bnad->tx_info[i].tcb[j];
837 				buf[bi++] = tcb->txq->tx_packets;
838 				buf[bi++] = tcb->txq->tx_bytes;
839 				buf[bi++] = tcb->producer_index;
840 				buf[bi++] = tcb->consumer_index;
841 				buf[bi++] = *(tcb->hw_consumer_index);
842 			}
843 	}
844 
845 	return bi;
846 }
847 
848 static void
849 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
850 		       u64 *buf)
851 {
852 	struct bnad *bnad = netdev_priv(netdev);
853 	int i, j, bi;
854 	unsigned long flags;
855 	struct rtnl_link_stats64 *net_stats64;
856 	u64 *stats64;
857 	u32 bmap;
858 
859 	mutex_lock(&bnad->conf_mutex);
860 	if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
861 		mutex_unlock(&bnad->conf_mutex);
862 		return;
863 	}
864 
865 	/*
866 	 * Used bna_lock to sync reads from bna_stats, which is written
867 	 * under the same lock
868 	 */
869 	spin_lock_irqsave(&bnad->bna_lock, flags);
870 	bi = 0;
871 	memset(buf, 0, stats->n_stats * sizeof(u64));
872 
873 	net_stats64 = (struct rtnl_link_stats64 *)buf;
874 	bnad_netdev_qstats_fill(bnad, net_stats64);
875 	bnad_netdev_hwstats_fill(bnad, net_stats64);
876 
877 	bi = sizeof(*net_stats64) / sizeof(u64);
878 
879 	/* Get netif_queue_stopped from stack */
880 	bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
881 
882 	/* Fill driver stats into ethtool buffers */
883 	stats64 = (u64 *)&bnad->stats.drv_stats;
884 	for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
885 		buf[bi++] = stats64[i];
886 
887 	/* Fill hardware stats excluding the rxf/txf into ethtool bufs */
888 	stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
889 	for (i = 0;
890 	     i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
891 		sizeof(u64);
892 	     i++)
893 		buf[bi++] = stats64[i];
894 
895 	/* Fill txf stats into ethtool buffers */
896 	bmap = bna_tx_rid_mask(&bnad->bna);
897 	for (i = 0; bmap; i++) {
898 		if (bmap & 1) {
899 			stats64 = (u64 *)&bnad->stats.bna_stats->
900 						hw_stats.txf_stats[i];
901 			for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
902 					sizeof(u64); j++)
903 				buf[bi++] = stats64[j];
904 		}
905 		bmap >>= 1;
906 	}
907 
908 	/*  Fill rxf stats into ethtool buffers */
909 	bmap = bna_rx_rid_mask(&bnad->bna);
910 	for (i = 0; bmap; i++) {
911 		if (bmap & 1) {
912 			stats64 = (u64 *)&bnad->stats.bna_stats->
913 						hw_stats.rxf_stats[i];
914 			for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
915 					sizeof(u64); j++)
916 				buf[bi++] = stats64[j];
917 		}
918 		bmap >>= 1;
919 	}
920 
921 	/* Fill per Q stats into ethtool buffers */
922 	bi = bnad_per_q_stats_fill(bnad, buf, bi);
923 
924 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
925 
926 	mutex_unlock(&bnad->conf_mutex);
927 }
928 
929 static int
930 bnad_get_sset_count(struct net_device *netdev, int sset)
931 {
932 	switch (sset) {
933 	case ETH_SS_STATS:
934 		return bnad_get_stats_count_locked(netdev);
935 	default:
936 		return -EOPNOTSUPP;
937 	}
938 }
939 
940 static u32
941 bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
942 				u32 *base_offset)
943 {
944 	struct bfa_flash_attr *flash_attr;
945 	struct bnad_iocmd_comp fcomp;
946 	u32 i, flash_part = 0, ret;
947 	unsigned long flags = 0;
948 
949 	flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
950 	if (!flash_attr)
951 		return 0;
952 
953 	fcomp.bnad = bnad;
954 	fcomp.comp_status = 0;
955 
956 	init_completion(&fcomp.comp);
957 	spin_lock_irqsave(&bnad->bna_lock, flags);
958 	ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
959 				bnad_cb_completion, &fcomp);
960 	if (ret != BFA_STATUS_OK) {
961 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
962 		kfree(flash_attr);
963 		return 0;
964 	}
965 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
966 	wait_for_completion(&fcomp.comp);
967 	ret = fcomp.comp_status;
968 
969 	/* Check for the flash type & base offset value */
970 	if (ret == BFA_STATUS_OK) {
971 		for (i = 0; i < flash_attr->npart; i++) {
972 			if (offset >= flash_attr->part[i].part_off &&
973 			    offset < (flash_attr->part[i].part_off +
974 				      flash_attr->part[i].part_size)) {
975 				flash_part = flash_attr->part[i].part_type;
976 				*base_offset = flash_attr->part[i].part_off;
977 				break;
978 			}
979 		}
980 	}
981 	kfree(flash_attr);
982 	return flash_part;
983 }
984 
985 static int
986 bnad_get_eeprom_len(struct net_device *netdev)
987 {
988 	return BFA_TOTAL_FLASH_SIZE;
989 }
990 
991 static int
992 bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
993 		u8 *bytes)
994 {
995 	struct bnad *bnad = netdev_priv(netdev);
996 	struct bnad_iocmd_comp fcomp;
997 	u32 flash_part = 0, base_offset = 0;
998 	unsigned long flags = 0;
999 	int ret = 0;
1000 
1001 	/* Fill the magic value */
1002 	eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
1003 
1004 	/* Query the flash partition based on the offset */
1005 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1006 				eeprom->offset, &base_offset);
1007 	if (flash_part == 0)
1008 		return -EFAULT;
1009 
1010 	fcomp.bnad = bnad;
1011 	fcomp.comp_status = 0;
1012 
1013 	init_completion(&fcomp.comp);
1014 	spin_lock_irqsave(&bnad->bna_lock, flags);
1015 	ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
1016 				bnad->id, bytes, eeprom->len,
1017 				eeprom->offset - base_offset,
1018 				bnad_cb_completion, &fcomp);
1019 	if (ret != BFA_STATUS_OK) {
1020 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1021 		goto done;
1022 	}
1023 
1024 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1025 	wait_for_completion(&fcomp.comp);
1026 	ret = fcomp.comp_status;
1027 done:
1028 	return ret;
1029 }
1030 
1031 static int
1032 bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
1033 		u8 *bytes)
1034 {
1035 	struct bnad *bnad = netdev_priv(netdev);
1036 	struct bnad_iocmd_comp fcomp;
1037 	u32 flash_part = 0, base_offset = 0;
1038 	unsigned long flags = 0;
1039 	int ret = 0;
1040 
1041 	/* Check if the flash update request is valid */
1042 	if (eeprom->magic != (bnad->pcidev->vendor |
1043 			     (bnad->pcidev->device << 16)))
1044 		return -EINVAL;
1045 
1046 	/* Query the flash partition based on the offset */
1047 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1048 				eeprom->offset, &base_offset);
1049 	if (flash_part == 0)
1050 		return -EFAULT;
1051 
1052 	fcomp.bnad = bnad;
1053 	fcomp.comp_status = 0;
1054 
1055 	init_completion(&fcomp.comp);
1056 	spin_lock_irqsave(&bnad->bna_lock, flags);
1057 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
1058 				bnad->id, bytes, eeprom->len,
1059 				eeprom->offset - base_offset,
1060 				bnad_cb_completion, &fcomp);
1061 	if (ret != BFA_STATUS_OK) {
1062 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1063 		goto done;
1064 	}
1065 
1066 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1067 	wait_for_completion(&fcomp.comp);
1068 	ret = fcomp.comp_status;
1069 done:
1070 	return ret;
1071 }
1072 
1073 static int
1074 bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
1075 {
1076 	struct bnad *bnad = netdev_priv(netdev);
1077 	struct bnad_iocmd_comp fcomp;
1078 	const struct firmware *fw;
1079 	int ret = 0;
1080 
1081 	ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
1082 	if (ret) {
1083 		pr_err("BNA: Can't locate firmware %s\n", eflash->data);
1084 		goto out;
1085 	}
1086 
1087 	fcomp.bnad = bnad;
1088 	fcomp.comp_status = 0;
1089 
1090 	init_completion(&fcomp.comp);
1091 	spin_lock_irq(&bnad->bna_lock);
1092 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
1093 				bnad->id, (u8 *)fw->data, fw->size, 0,
1094 				bnad_cb_completion, &fcomp);
1095 	if (ret != BFA_STATUS_OK) {
1096 		pr_warn("BNA: Flash update failed with err: %d\n", ret);
1097 		ret = -EIO;
1098 		spin_unlock_irq(&bnad->bna_lock);
1099 		goto out;
1100 	}
1101 
1102 	spin_unlock_irq(&bnad->bna_lock);
1103 	wait_for_completion(&fcomp.comp);
1104 	if (fcomp.comp_status != BFA_STATUS_OK) {
1105 		ret = -EIO;
1106 		pr_warn("BNA: Firmware image update to flash failed with: %d\n",
1107 			fcomp.comp_status);
1108 	}
1109 out:
1110 	release_firmware(fw);
1111 	return ret;
1112 }
1113 
1114 static const struct ethtool_ops bnad_ethtool_ops = {
1115 	.get_settings = bnad_get_settings,
1116 	.set_settings = bnad_set_settings,
1117 	.get_drvinfo = bnad_get_drvinfo,
1118 	.get_wol = bnad_get_wol,
1119 	.get_link = ethtool_op_get_link,
1120 	.get_coalesce = bnad_get_coalesce,
1121 	.set_coalesce = bnad_set_coalesce,
1122 	.get_ringparam = bnad_get_ringparam,
1123 	.set_ringparam = bnad_set_ringparam,
1124 	.get_pauseparam = bnad_get_pauseparam,
1125 	.set_pauseparam = bnad_set_pauseparam,
1126 	.get_strings = bnad_get_strings,
1127 	.get_ethtool_stats = bnad_get_ethtool_stats,
1128 	.get_sset_count = bnad_get_sset_count,
1129 	.get_eeprom_len = bnad_get_eeprom_len,
1130 	.get_eeprom = bnad_get_eeprom,
1131 	.set_eeprom = bnad_set_eeprom,
1132 	.flash_device = bnad_flash_device,
1133 	.get_ts_info = ethtool_op_get_ts_info,
1134 };
1135 
1136 void
1137 bnad_set_ethtool_ops(struct net_device *netdev)
1138 {
1139 	netdev->ethtool_ops = &bnad_ethtool_ops;
1140 }
1141