1 /*
2  * Copyright (C) 2005 - 2016 Broadcom
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <linux/ethtool.h>
21 
22 struct be_ethtool_stat {
23 	char desc[ETH_GSTRING_LEN];
24 	int type;
25 	int size;
26 	int offset;
27 };
28 
29 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 					offsetof(_struct, field)
32 #define DRVSTAT_TX_INFO(field)	#field, DRVSTAT_TX,\
33 					FIELDINFO(struct be_tx_stats, field)
34 #define DRVSTAT_RX_INFO(field)	#field, DRVSTAT_RX,\
35 					FIELDINFO(struct be_rx_stats, field)
36 #define	DRVSTAT_INFO(field)	#field, DRVSTAT,\
37 					FIELDINFO(struct be_drv_stats, field)
38 
39 static const struct be_ethtool_stat et_stats[] = {
40 	{DRVSTAT_INFO(rx_crc_errors)},
41 	{DRVSTAT_INFO(rx_alignment_symbol_errors)},
42 	{DRVSTAT_INFO(rx_pause_frames)},
43 	{DRVSTAT_INFO(rx_control_frames)},
44 	/* Received packets dropped when the Ethernet length field
45 	 * is not equal to the actual Ethernet data length.
46 	 */
47 	{DRVSTAT_INFO(rx_in_range_errors)},
48 	/* Received packets dropped when their length field is >= 1501 bytes
49 	 * and <= 1535 bytes.
50 	 */
51 	{DRVSTAT_INFO(rx_out_range_errors)},
52 	/* Received packets dropped when they are longer than 9216 bytes */
53 	{DRVSTAT_INFO(rx_frame_too_long)},
54 	/* Received packets dropped when they don't pass the unicast or
55 	 * multicast address filtering.
56 	 */
57 	{DRVSTAT_INFO(rx_address_filtered)},
58 	/* Received packets dropped when IP packet length field is less than
59 	 * the IP header length field.
60 	 */
61 	{DRVSTAT_INFO(rx_dropped_too_small)},
62 	/* Received packets dropped when IP length field is greater than
63 	 * the actual packet length.
64 	 */
65 	{DRVSTAT_INFO(rx_dropped_too_short)},
66 	/* Received packets dropped when the IP header length field is less
67 	 * than 5.
68 	 */
69 	{DRVSTAT_INFO(rx_dropped_header_too_small)},
70 	/* Received packets dropped when the TCP header length field is less
71 	 * than 5 or the TCP header length + IP header length is more
72 	 * than IP packet length.
73 	 */
74 	{DRVSTAT_INFO(rx_dropped_tcp_length)},
75 	{DRVSTAT_INFO(rx_dropped_runt)},
76 	/* Number of received packets dropped when a fifo for descriptors going
77 	 * into the packet demux block overflows. In normal operation, this
78 	 * fifo must never overflow.
79 	 */
80 	{DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
81 	/* Received packets dropped when the RX block runs out of space in
82 	 * one of its input FIFOs. This could happen due a long burst of
83 	 * minimum-sized (64b) frames in the receive path.
84 	 * This counter may also be erroneously incremented rarely.
85 	 */
86 	{DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
87 	{DRVSTAT_INFO(rx_ip_checksum_errs)},
88 	{DRVSTAT_INFO(rx_tcp_checksum_errs)},
89 	{DRVSTAT_INFO(rx_udp_checksum_errs)},
90 	{DRVSTAT_INFO(tx_pauseframes)},
91 	{DRVSTAT_INFO(tx_controlframes)},
92 	{DRVSTAT_INFO(rx_priority_pause_frames)},
93 	{DRVSTAT_INFO(tx_priority_pauseframes)},
94 	/* Received packets dropped when an internal fifo going into
95 	 * main packet buffer tank (PMEM) overflows.
96 	 */
97 	{DRVSTAT_INFO(pmem_fifo_overflow_drop)},
98 	{DRVSTAT_INFO(jabber_events)},
99 	/* Received packets dropped due to lack of available HW packet buffers
100 	 * used to temporarily hold the received packets.
101 	 */
102 	{DRVSTAT_INFO(rx_drops_no_pbuf)},
103 	/* Received packets dropped due to input receive buffer
104 	 * descriptor fifo overflowing.
105 	 */
106 	{DRVSTAT_INFO(rx_drops_no_erx_descr)},
107 	/* Packets dropped because the internal FIFO to the offloaded TCP
108 	 * receive processing block is full. This could happen only for
109 	 * offloaded iSCSI or FCoE trarffic.
110 	 */
111 	{DRVSTAT_INFO(rx_drops_no_tpre_descr)},
112 	/* Received packets dropped when they need more than 8
113 	 * receive buffers. This cannot happen as the driver configures
114 	 * 2048 byte receive buffers.
115 	 */
116 	{DRVSTAT_INFO(rx_drops_too_many_frags)},
117 	{DRVSTAT_INFO(forwarded_packets)},
118 	/* Received packets dropped when the frame length
119 	 * is more than 9018 bytes
120 	 */
121 	{DRVSTAT_INFO(rx_drops_mtu)},
122 	/* Number of dma mapping errors */
123 	{DRVSTAT_INFO(dma_map_errors)},
124 	/* Number of packets dropped due to random early drop function */
125 	{DRVSTAT_INFO(eth_red_drops)},
126 	{DRVSTAT_INFO(rx_roce_bytes_lsd)},
127 	{DRVSTAT_INFO(rx_roce_bytes_msd)},
128 	{DRVSTAT_INFO(rx_roce_frames)},
129 	{DRVSTAT_INFO(roce_drops_payload_len)},
130 	{DRVSTAT_INFO(roce_drops_crc)}
131 };
132 
133 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
134 
135 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
136  * are first and second members respectively.
137  */
138 static const struct be_ethtool_stat et_rx_stats[] = {
139 	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
140 	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
141 	{DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
142 	{DRVSTAT_RX_INFO(rx_compl)},
143 	{DRVSTAT_RX_INFO(rx_compl_err)},
144 	{DRVSTAT_RX_INFO(rx_mcast_pkts)},
145 	/* Number of page allocation failures while posting receive buffers
146 	 * to HW.
147 	 */
148 	{DRVSTAT_RX_INFO(rx_post_fail)},
149 	/* Recevied packets dropped due to skb allocation failure */
150 	{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
151 	/* Received packets dropped due to lack of available fetched buffers
152 	 * posted by the driver.
153 	 */
154 	{DRVSTAT_RX_INFO(rx_drops_no_frags)}
155 };
156 
157 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
158 
159 /* Stats related to multi TX queues: get_stats routine assumes compl is the
160  * first member
161  */
162 static const struct be_ethtool_stat et_tx_stats[] = {
163 	{DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
164 	/* This counter is incremented when the HW encounters an error while
165 	 * parsing the packet header of an outgoing TX request. This counter is
166 	 * applicable only for BE2, BE3 and Skyhawk based adapters.
167 	 */
168 	{DRVSTAT_TX_INFO(tx_hdr_parse_err)},
169 	/* This counter is incremented when an error occurs in the DMA
170 	 * operation associated with the TX request from the host to the device.
171 	 */
172 	{DRVSTAT_TX_INFO(tx_dma_err)},
173 	/* This counter is incremented when MAC or VLAN spoof checking is
174 	 * enabled on the interface and the TX request fails the spoof check
175 	 * in HW.
176 	 */
177 	{DRVSTAT_TX_INFO(tx_spoof_check_err)},
178 	/* This counter is incremented when the HW encounters an error while
179 	 * performing TSO offload. This counter is applicable only for Lancer
180 	 * adapters.
181 	 */
182 	{DRVSTAT_TX_INFO(tx_tso_err)},
183 	/* This counter is incremented when the HW detects Q-in-Q style VLAN
184 	 * tagging in a packet and such tagging is not expected on the outgoing
185 	 * interface. This counter is applicable only for Lancer adapters.
186 	 */
187 	{DRVSTAT_TX_INFO(tx_qinq_err)},
188 	/* This counter is incremented when the HW detects parity errors in the
189 	 * packet data. This counter is applicable only for Lancer adapters.
190 	 */
191 	{DRVSTAT_TX_INFO(tx_internal_parity_err)},
192 	{DRVSTAT_TX_INFO(tx_bytes)},
193 	{DRVSTAT_TX_INFO(tx_pkts)},
194 	{DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
195 	/* Number of skbs queued for trasmission by the driver */
196 	{DRVSTAT_TX_INFO(tx_reqs)},
197 	/* Number of times the TX queue was stopped due to lack
198 	 * of spaces in the TXQ.
199 	 */
200 	{DRVSTAT_TX_INFO(tx_stops)},
201 	/* Pkts dropped in the driver's transmit path */
202 	{DRVSTAT_TX_INFO(tx_drv_drops)}
203 };
204 
205 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
206 
207 static const char et_self_tests[][ETH_GSTRING_LEN] = {
208 	"MAC Loopback test",
209 	"PHY Loopback test",
210 	"External Loopback test",
211 	"DDR DMA test",
212 	"Link test"
213 };
214 
215 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
216 #define BE_MAC_LOOPBACK 0x0
217 #define BE_PHY_LOOPBACK 0x1
218 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
219 #define BE_NO_LOOPBACK 0xff
220 
221 static void be_get_drvinfo(struct net_device *netdev,
222 			   struct ethtool_drvinfo *drvinfo)
223 {
224 	struct be_adapter *adapter = netdev_priv(netdev);
225 
226 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
227 	strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
228 	if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
229 		strlcpy(drvinfo->fw_version, adapter->fw_ver,
230 			sizeof(drvinfo->fw_version));
231 	else
232 		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
233 			 "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
234 
235 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
236 		sizeof(drvinfo->bus_info));
237 }
238 
239 static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
240 {
241 	u32 data_read = 0, eof;
242 	u8 addn_status;
243 	struct be_dma_mem data_len_cmd;
244 
245 	memset(&data_len_cmd, 0, sizeof(data_len_cmd));
246 	/* data_offset and data_size should be 0 to get reg len */
247 	lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name,
248 			       &data_read, &eof, &addn_status);
249 
250 	return data_read;
251 }
252 
253 static int be_get_dump_len(struct be_adapter *adapter)
254 {
255 	u32 dump_size = 0;
256 
257 	if (lancer_chip(adapter))
258 		dump_size = lancer_cmd_get_file_len(adapter,
259 						    LANCER_FW_DUMP_FILE);
260 	else
261 		dump_size = adapter->fat_dump_len;
262 
263 	return dump_size;
264 }
265 
266 static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
267 				u32 buf_len, void *buf)
268 {
269 	struct be_dma_mem read_cmd;
270 	u32 read_len = 0, total_read_len = 0, chunk_size;
271 	u32 eof = 0;
272 	u8 addn_status;
273 	int status = 0;
274 
275 	read_cmd.size = LANCER_READ_FILE_CHUNK;
276 	read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
277 					  &read_cmd.dma, GFP_ATOMIC);
278 
279 	if (!read_cmd.va) {
280 		dev_err(&adapter->pdev->dev,
281 			"Memory allocation failure while reading dump\n");
282 		return -ENOMEM;
283 	}
284 
285 	while ((total_read_len < buf_len) && !eof) {
286 		chunk_size = min_t(u32, (buf_len - total_read_len),
287 				   LANCER_READ_FILE_CHUNK);
288 		chunk_size = ALIGN(chunk_size, 4);
289 		status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
290 						total_read_len, file_name,
291 						&read_len, &eof, &addn_status);
292 		if (!status) {
293 			memcpy(buf + total_read_len, read_cmd.va, read_len);
294 			total_read_len += read_len;
295 			eof &= LANCER_READ_FILE_EOF_MASK;
296 		} else {
297 			status = -EIO;
298 			break;
299 		}
300 	}
301 	dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
302 			  read_cmd.dma);
303 
304 	return status;
305 }
306 
307 static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len,
308 			     void *buf)
309 {
310 	int status = 0;
311 
312 	if (lancer_chip(adapter))
313 		status = lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
314 					      dump_len, buf);
315 	else
316 		status = be_cmd_get_fat_dump(adapter, dump_len, buf);
317 
318 	return status;
319 }
320 
321 static int be_get_coalesce(struct net_device *netdev,
322 			   struct ethtool_coalesce *et)
323 {
324 	struct be_adapter *adapter = netdev_priv(netdev);
325 	struct be_aic_obj *aic = &adapter->aic_obj[0];
326 
327 	et->rx_coalesce_usecs = aic->prev_eqd;
328 	et->rx_coalesce_usecs_high = aic->max_eqd;
329 	et->rx_coalesce_usecs_low = aic->min_eqd;
330 
331 	et->tx_coalesce_usecs = aic->prev_eqd;
332 	et->tx_coalesce_usecs_high = aic->max_eqd;
333 	et->tx_coalesce_usecs_low = aic->min_eqd;
334 
335 	et->use_adaptive_rx_coalesce = aic->enable;
336 	et->use_adaptive_tx_coalesce = aic->enable;
337 
338 	return 0;
339 }
340 
341 /* TX attributes are ignored. Only RX attributes are considered
342  * eqd cmd is issued in the worker thread.
343  */
344 static int be_set_coalesce(struct net_device *netdev,
345 			   struct ethtool_coalesce *et)
346 {
347 	struct be_adapter *adapter = netdev_priv(netdev);
348 	struct be_aic_obj *aic = &adapter->aic_obj[0];
349 	struct be_eq_obj *eqo;
350 	int i;
351 
352 	for_all_evt_queues(adapter, eqo, i) {
353 		aic->enable = et->use_adaptive_rx_coalesce;
354 		aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
355 		aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
356 		aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
357 		aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
358 		aic++;
359 	}
360 
361 	/* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
362 	 * When AIC is disabled, persistently force set EQD value via the
363 	 * FW cmd, so that we don't have to calculate the delay multiplier
364 	 * encode value each time EQ_DB is rung
365 	 */
366 	if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
367 		be_eqd_update(adapter, true);
368 
369 	return 0;
370 }
371 
372 static void be_get_ethtool_stats(struct net_device *netdev,
373 				 struct ethtool_stats *stats, uint64_t *data)
374 {
375 	struct be_adapter *adapter = netdev_priv(netdev);
376 	struct be_rx_obj *rxo;
377 	struct be_tx_obj *txo;
378 	void *p;
379 	unsigned int i, j, base = 0, start;
380 
381 	for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
382 		p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
383 		data[i] = *(u32 *)p;
384 	}
385 	base += ETHTOOL_STATS_NUM;
386 
387 	for_all_rx_queues(adapter, rxo, j) {
388 		struct be_rx_stats *stats = rx_stats(rxo);
389 
390 		do {
391 			start = u64_stats_fetch_begin_irq(&stats->sync);
392 			data[base] = stats->rx_bytes;
393 			data[base + 1] = stats->rx_pkts;
394 		} while (u64_stats_fetch_retry_irq(&stats->sync, start));
395 
396 		for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
397 			p = (u8 *)stats + et_rx_stats[i].offset;
398 			data[base + i] = *(u32 *)p;
399 		}
400 		base += ETHTOOL_RXSTATS_NUM;
401 	}
402 
403 	for_all_tx_queues(adapter, txo, j) {
404 		struct be_tx_stats *stats = tx_stats(txo);
405 
406 		do {
407 			start = u64_stats_fetch_begin_irq(&stats->sync_compl);
408 			data[base] = stats->tx_compl;
409 		} while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
410 
411 		do {
412 			start = u64_stats_fetch_begin_irq(&stats->sync);
413 			for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
414 				p = (u8 *)stats + et_tx_stats[i].offset;
415 				data[base + i] =
416 					(et_tx_stats[i].size == sizeof(u64)) ?
417 						*(u64 *)p : *(u32 *)p;
418 			}
419 		} while (u64_stats_fetch_retry_irq(&stats->sync, start));
420 		base += ETHTOOL_TXSTATS_NUM;
421 	}
422 }
423 
424 static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
425 				uint8_t *data)
426 {
427 	struct be_adapter *adapter = netdev_priv(netdev);
428 	int i, j;
429 
430 	switch (stringset) {
431 	case ETH_SS_STATS:
432 		for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
433 			memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
434 			data += ETH_GSTRING_LEN;
435 		}
436 		for (i = 0; i < adapter->num_rx_qs; i++) {
437 			for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
438 				sprintf(data, "rxq%d: %s", i,
439 					et_rx_stats[j].desc);
440 				data += ETH_GSTRING_LEN;
441 			}
442 		}
443 		for (i = 0; i < adapter->num_tx_qs; i++) {
444 			for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
445 				sprintf(data, "txq%d: %s", i,
446 					et_tx_stats[j].desc);
447 				data += ETH_GSTRING_LEN;
448 			}
449 		}
450 		break;
451 	case ETH_SS_TEST:
452 		for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
453 			memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
454 			data += ETH_GSTRING_LEN;
455 		}
456 		break;
457 	}
458 }
459 
460 static int be_get_sset_count(struct net_device *netdev, int stringset)
461 {
462 	struct be_adapter *adapter = netdev_priv(netdev);
463 
464 	switch (stringset) {
465 	case ETH_SS_TEST:
466 		return ETHTOOL_TESTS_NUM;
467 	case ETH_SS_STATS:
468 		return ETHTOOL_STATS_NUM +
469 			adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
470 			adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
471 	default:
472 		return -EINVAL;
473 	}
474 }
475 
476 static u32 be_get_port_type(struct be_adapter *adapter)
477 {
478 	u32 port;
479 
480 	switch (adapter->phy.interface_type) {
481 	case PHY_TYPE_BASET_1GB:
482 	case PHY_TYPE_BASEX_1GB:
483 	case PHY_TYPE_SGMII:
484 		port = PORT_TP;
485 		break;
486 	case PHY_TYPE_SFP_PLUS_10GB:
487 		if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE)
488 			port = PORT_DA;
489 		else
490 			port = PORT_FIBRE;
491 		break;
492 	case PHY_TYPE_QSFP:
493 		if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE)
494 			port = PORT_DA;
495 		else
496 			port = PORT_FIBRE;
497 		break;
498 	case PHY_TYPE_XFP_10GB:
499 	case PHY_TYPE_SFP_1GB:
500 		port = PORT_FIBRE;
501 		break;
502 	case PHY_TYPE_BASET_10GB:
503 		port = PORT_TP;
504 		break;
505 	default:
506 		port = PORT_OTHER;
507 	}
508 
509 	return port;
510 }
511 
512 static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
513 {
514 	u32 val = 0;
515 
516 	switch (adapter->phy.interface_type) {
517 	case PHY_TYPE_BASET_1GB:
518 	case PHY_TYPE_BASEX_1GB:
519 	case PHY_TYPE_SGMII:
520 		val |= SUPPORTED_TP;
521 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
522 			val |= SUPPORTED_1000baseT_Full;
523 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
524 			val |= SUPPORTED_100baseT_Full;
525 		if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
526 			val |= SUPPORTED_10baseT_Full;
527 		break;
528 	case PHY_TYPE_KX4_10GB:
529 		val |= SUPPORTED_Backplane;
530 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
531 			val |= SUPPORTED_1000baseKX_Full;
532 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
533 			val |= SUPPORTED_10000baseKX4_Full;
534 		break;
535 	case PHY_TYPE_KR2_20GB:
536 		val |= SUPPORTED_Backplane;
537 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
538 			val |= SUPPORTED_10000baseKR_Full;
539 		if (if_speeds & BE_SUPPORTED_SPEED_20GBPS)
540 			val |= SUPPORTED_20000baseKR2_Full;
541 		break;
542 	case PHY_TYPE_KR_10GB:
543 		val |= SUPPORTED_Backplane |
544 				SUPPORTED_10000baseKR_Full;
545 		break;
546 	case PHY_TYPE_KR4_40GB:
547 		val |= SUPPORTED_Backplane;
548 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
549 			val |= SUPPORTED_10000baseKR_Full;
550 		if (if_speeds & BE_SUPPORTED_SPEED_40GBPS)
551 			val |= SUPPORTED_40000baseKR4_Full;
552 		break;
553 	case PHY_TYPE_QSFP:
554 		if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) {
555 			switch (adapter->phy.cable_type) {
556 			case QSFP_PLUS_CR4_CABLE:
557 				val |= SUPPORTED_40000baseCR4_Full;
558 				break;
559 			case QSFP_PLUS_LR4_CABLE:
560 				val |= SUPPORTED_40000baseLR4_Full;
561 				break;
562 			default:
563 				val |= SUPPORTED_40000baseSR4_Full;
564 				break;
565 			}
566 		}
567 	case PHY_TYPE_SFP_PLUS_10GB:
568 	case PHY_TYPE_XFP_10GB:
569 	case PHY_TYPE_SFP_1GB:
570 		val |= SUPPORTED_FIBRE;
571 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
572 			val |= SUPPORTED_10000baseT_Full;
573 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
574 			val |= SUPPORTED_1000baseT_Full;
575 		break;
576 	case PHY_TYPE_BASET_10GB:
577 		val |= SUPPORTED_TP;
578 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
579 			val |= SUPPORTED_10000baseT_Full;
580 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
581 			val |= SUPPORTED_1000baseT_Full;
582 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
583 			val |= SUPPORTED_100baseT_Full;
584 		break;
585 	default:
586 		val |= SUPPORTED_TP;
587 	}
588 
589 	return val;
590 }
591 
592 bool be_pause_supported(struct be_adapter *adapter)
593 {
594 	return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
595 		adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
596 		false : true;
597 }
598 
599 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
600 {
601 	struct be_adapter *adapter = netdev_priv(netdev);
602 	u8 link_status;
603 	u16 link_speed = 0;
604 	int status;
605 	u32 auto_speeds;
606 	u32 fixed_speeds;
607 
608 	if (adapter->phy.link_speed < 0) {
609 		status = be_cmd_link_status_query(adapter, &link_speed,
610 						  &link_status, 0);
611 		if (!status)
612 			be_link_status_update(adapter, link_status);
613 		ethtool_cmd_speed_set(ecmd, link_speed);
614 
615 		status = be_cmd_get_phy_info(adapter);
616 		if (!status) {
617 			auto_speeds = adapter->phy.auto_speeds_supported;
618 			fixed_speeds = adapter->phy.fixed_speeds_supported;
619 
620 			be_cmd_query_cable_type(adapter);
621 
622 			ecmd->supported =
623 				convert_to_et_setting(adapter,
624 						      auto_speeds |
625 						      fixed_speeds);
626 			ecmd->advertising =
627 				convert_to_et_setting(adapter, auto_speeds);
628 
629 			ecmd->port = be_get_port_type(adapter);
630 
631 			if (adapter->phy.auto_speeds_supported) {
632 				ecmd->supported |= SUPPORTED_Autoneg;
633 				ecmd->autoneg = AUTONEG_ENABLE;
634 				ecmd->advertising |= ADVERTISED_Autoneg;
635 			}
636 
637 			ecmd->supported |= SUPPORTED_Pause;
638 			if (be_pause_supported(adapter))
639 				ecmd->advertising |= ADVERTISED_Pause;
640 
641 			switch (adapter->phy.interface_type) {
642 			case PHY_TYPE_KR_10GB:
643 			case PHY_TYPE_KX4_10GB:
644 				ecmd->transceiver = XCVR_INTERNAL;
645 				break;
646 			default:
647 				ecmd->transceiver = XCVR_EXTERNAL;
648 				break;
649 			}
650 		} else {
651 			ecmd->port = PORT_OTHER;
652 			ecmd->autoneg = AUTONEG_DISABLE;
653 			ecmd->transceiver = XCVR_DUMMY1;
654 		}
655 
656 		/* Save for future use */
657 		adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
658 		adapter->phy.port_type = ecmd->port;
659 		adapter->phy.transceiver = ecmd->transceiver;
660 		adapter->phy.autoneg = ecmd->autoneg;
661 		adapter->phy.advertising = ecmd->advertising;
662 		adapter->phy.supported = ecmd->supported;
663 	} else {
664 		ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
665 		ecmd->port = adapter->phy.port_type;
666 		ecmd->transceiver = adapter->phy.transceiver;
667 		ecmd->autoneg = adapter->phy.autoneg;
668 		ecmd->advertising = adapter->phy.advertising;
669 		ecmd->supported = adapter->phy.supported;
670 	}
671 
672 	ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
673 	ecmd->phy_address = adapter->port_num;
674 
675 	return 0;
676 }
677 
678 static void be_get_ringparam(struct net_device *netdev,
679 			     struct ethtool_ringparam *ring)
680 {
681 	struct be_adapter *adapter = netdev_priv(netdev);
682 
683 	ring->rx_max_pending = adapter->rx_obj[0].q.len;
684 	ring->rx_pending = adapter->rx_obj[0].q.len;
685 	ring->tx_max_pending = adapter->tx_obj[0].q.len;
686 	ring->tx_pending = adapter->tx_obj[0].q.len;
687 }
688 
689 static void
690 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
691 {
692 	struct be_adapter *adapter = netdev_priv(netdev);
693 
694 	be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
695 	ecmd->autoneg = adapter->phy.fc_autoneg;
696 }
697 
698 static int
699 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
700 {
701 	struct be_adapter *adapter = netdev_priv(netdev);
702 	int status;
703 
704 	if (ecmd->autoneg != adapter->phy.fc_autoneg)
705 		return -EINVAL;
706 
707 	status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
708 					 ecmd->rx_pause);
709 	if (status) {
710 		dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
711 		return be_cmd_status(status);
712 	}
713 
714 	adapter->tx_fc = ecmd->tx_pause;
715 	adapter->rx_fc = ecmd->rx_pause;
716 	return 0;
717 }
718 
719 static int be_set_phys_id(struct net_device *netdev,
720 			  enum ethtool_phys_id_state state)
721 {
722 	struct be_adapter *adapter = netdev_priv(netdev);
723 	int status = 0;
724 
725 	switch (state) {
726 	case ETHTOOL_ID_ACTIVE:
727 		status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
728 						 &adapter->beacon_state);
729 		if (status)
730 			return be_cmd_status(status);
731 		return 1;       /* cycle on/off once per second */
732 
733 	case ETHTOOL_ID_ON:
734 		status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
735 						 0, 0, BEACON_STATE_ENABLED);
736 		break;
737 
738 	case ETHTOOL_ID_OFF:
739 		status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
740 						 0, 0, BEACON_STATE_DISABLED);
741 		break;
742 
743 	case ETHTOOL_ID_INACTIVE:
744 		status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
745 						 0, 0, adapter->beacon_state);
746 	}
747 
748 	return be_cmd_status(status);
749 }
750 
751 static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
752 {
753 	struct be_adapter *adapter = netdev_priv(netdev);
754 	struct device *dev = &adapter->pdev->dev;
755 	int status;
756 
757 	if (!lancer_chip(adapter) ||
758 	    !check_privilege(adapter, MAX_PRIVILEGES))
759 		return -EOPNOTSUPP;
760 
761 	switch (dump->flag) {
762 	case LANCER_INITIATE_FW_DUMP:
763 		status = lancer_initiate_dump(adapter);
764 		if (!status)
765 			dev_info(dev, "FW dump initiated successfully\n");
766 		break;
767 	case LANCER_DELETE_FW_DUMP:
768 		status = lancer_delete_dump(adapter);
769 		if (!status)
770 			dev_info(dev, "FW dump deleted successfully\n");
771 	break;
772 	default:
773 		dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
774 		return -EINVAL;
775 	}
776 	return status;
777 }
778 
779 static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
780 {
781 	struct be_adapter *adapter = netdev_priv(netdev);
782 
783 	if (adapter->wol_cap & BE_WOL_CAP) {
784 		wol->supported |= WAKE_MAGIC;
785 		if (adapter->wol_en)
786 			wol->wolopts |= WAKE_MAGIC;
787 	} else {
788 		wol->wolopts = 0;
789 	}
790 	memset(&wol->sopass, 0, sizeof(wol->sopass));
791 }
792 
793 static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
794 {
795 	struct be_adapter *adapter = netdev_priv(netdev);
796 	struct device *dev = &adapter->pdev->dev;
797 	struct be_dma_mem cmd;
798 	u8 mac[ETH_ALEN];
799 	bool enable;
800 	int status;
801 
802 	if (wol->wolopts & ~WAKE_MAGIC)
803 		return -EOPNOTSUPP;
804 
805 	if (!(adapter->wol_cap & BE_WOL_CAP)) {
806 		dev_warn(&adapter->pdev->dev, "WOL not supported\n");
807 		return -EOPNOTSUPP;
808 	}
809 
810 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
811 	cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
812 	if (!cmd.va)
813 		return -ENOMEM;
814 
815 	eth_zero_addr(mac);
816 
817 	enable = wol->wolopts & WAKE_MAGIC;
818 	if (enable)
819 		ether_addr_copy(mac, adapter->netdev->dev_addr);
820 
821 	status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
822 	if (status) {
823 		dev_err(dev, "Could not set Wake-on-lan mac address\n");
824 		status = be_cmd_status(status);
825 		goto err;
826 	}
827 
828 	pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
829 	pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
830 
831 	adapter->wol_en = enable ? true : false;
832 
833 err:
834 	dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
835 	return status;
836 }
837 
838 static int be_test_ddr_dma(struct be_adapter *adapter)
839 {
840 	int ret, i;
841 	struct be_dma_mem ddrdma_cmd;
842 	static const u64 pattern[2] = {
843 		0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
844 	};
845 
846 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
847 	ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
848 					    ddrdma_cmd.size, &ddrdma_cmd.dma,
849 					    GFP_KERNEL);
850 	if (!ddrdma_cmd.va)
851 		return -ENOMEM;
852 
853 	for (i = 0; i < 2; i++) {
854 		ret = be_cmd_ddr_dma_test(adapter, pattern[i],
855 					  4096, &ddrdma_cmd);
856 		if (ret != 0)
857 			goto err;
858 	}
859 
860 err:
861 	dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
862 			  ddrdma_cmd.dma);
863 	return be_cmd_status(ret);
864 }
865 
866 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
867 			    u64 *status)
868 {
869 	int ret;
870 
871 	ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
872 				  loopback_type, 1);
873 	if (ret)
874 		return ret;
875 
876 	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
877 				       loopback_type, 1500, 2, 0xabc);
878 
879 	ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
880 				  BE_NO_LOOPBACK, 1);
881 	if (ret)
882 		return ret;
883 
884 	return *status;
885 }
886 
887 static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
888 			 u64 *data)
889 {
890 	struct be_adapter *adapter = netdev_priv(netdev);
891 	int status;
892 	u8 link_status = 0;
893 
894 	if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
895 		dev_err(&adapter->pdev->dev, "Self test not supported\n");
896 		test->flags |= ETH_TEST_FL_FAILED;
897 		return;
898 	}
899 
900 	memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
901 
902 	if (test->flags & ETH_TEST_FL_OFFLINE) {
903 		if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
904 			test->flags |= ETH_TEST_FL_FAILED;
905 
906 		if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
907 			test->flags |= ETH_TEST_FL_FAILED;
908 
909 		if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
910 			if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
911 					     &data[2]) != 0)
912 				test->flags |= ETH_TEST_FL_FAILED;
913 			test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
914 		}
915 	}
916 
917 	if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
918 		data[3] = 1;
919 		test->flags |= ETH_TEST_FL_FAILED;
920 	}
921 
922 	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
923 	if (status) {
924 		test->flags |= ETH_TEST_FL_FAILED;
925 		data[4] = -1;
926 	} else if (!link_status) {
927 		test->flags |= ETH_TEST_FL_FAILED;
928 		data[4] = 1;
929 	}
930 }
931 
932 static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
933 {
934 	struct be_adapter *adapter = netdev_priv(netdev);
935 
936 	return be_load_fw(adapter, efl->data);
937 }
938 
939 static int
940 be_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
941 {
942 	struct be_adapter *adapter = netdev_priv(netdev);
943 
944 	if (!check_privilege(adapter, MAX_PRIVILEGES))
945 		return -EOPNOTSUPP;
946 
947 	dump->len = be_get_dump_len(adapter);
948 	dump->version = 1;
949 	dump->flag = 0x1;	/* FW dump is enabled */
950 	return 0;
951 }
952 
953 static int
954 be_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
955 		 void *buf)
956 {
957 	struct be_adapter *adapter = netdev_priv(netdev);
958 	int status;
959 
960 	if (!check_privilege(adapter, MAX_PRIVILEGES))
961 		return -EOPNOTSUPP;
962 
963 	status = be_read_dump_data(adapter, dump->len, buf);
964 	return be_cmd_status(status);
965 }
966 
967 static int be_get_eeprom_len(struct net_device *netdev)
968 {
969 	struct be_adapter *adapter = netdev_priv(netdev);
970 
971 	if (!check_privilege(adapter, MAX_PRIVILEGES))
972 		return 0;
973 
974 	if (lancer_chip(adapter)) {
975 		if (be_physfn(adapter))
976 			return lancer_cmd_get_file_len(adapter,
977 						       LANCER_VPD_PF_FILE);
978 		else
979 			return lancer_cmd_get_file_len(adapter,
980 						       LANCER_VPD_VF_FILE);
981 	} else {
982 		return BE_READ_SEEPROM_LEN;
983 	}
984 }
985 
986 static int be_read_eeprom(struct net_device *netdev,
987 			  struct ethtool_eeprom *eeprom, uint8_t *data)
988 {
989 	struct be_adapter *adapter = netdev_priv(netdev);
990 	struct be_dma_mem eeprom_cmd;
991 	struct be_cmd_resp_seeprom_read *resp;
992 	int status;
993 
994 	if (!eeprom->len)
995 		return -EINVAL;
996 
997 	if (lancer_chip(adapter)) {
998 		if (be_physfn(adapter))
999 			return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
1000 						    eeprom->len, data);
1001 		else
1002 			return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
1003 						    eeprom->len, data);
1004 	}
1005 
1006 	eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
1007 
1008 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
1009 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
1010 	eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1011 					    eeprom_cmd.size, &eeprom_cmd.dma,
1012 					    GFP_KERNEL);
1013 
1014 	if (!eeprom_cmd.va)
1015 		return -ENOMEM;
1016 
1017 	status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
1018 
1019 	if (!status) {
1020 		resp = eeprom_cmd.va;
1021 		memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
1022 	}
1023 	dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
1024 			  eeprom_cmd.dma);
1025 
1026 	return be_cmd_status(status);
1027 }
1028 
1029 static u32 be_get_msg_level(struct net_device *netdev)
1030 {
1031 	struct be_adapter *adapter = netdev_priv(netdev);
1032 
1033 	return adapter->msg_enable;
1034 }
1035 
1036 static void be_set_msg_level(struct net_device *netdev, u32 level)
1037 {
1038 	struct be_adapter *adapter = netdev_priv(netdev);
1039 
1040 	if (adapter->msg_enable == level)
1041 		return;
1042 
1043 	if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
1044 		if (BEx_chip(adapter))
1045 			be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
1046 						FW_LOG_LEVEL_DEFAULT :
1047 						FW_LOG_LEVEL_FATAL);
1048 	adapter->msg_enable = level;
1049 }
1050 
1051 static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
1052 {
1053 	u64 data = 0;
1054 
1055 	switch (flow_type) {
1056 	case TCP_V4_FLOW:
1057 		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
1058 			data |= RXH_IP_DST | RXH_IP_SRC;
1059 		if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
1060 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1061 		break;
1062 	case UDP_V4_FLOW:
1063 		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
1064 			data |= RXH_IP_DST | RXH_IP_SRC;
1065 		if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
1066 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1067 		break;
1068 	case TCP_V6_FLOW:
1069 		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
1070 			data |= RXH_IP_DST | RXH_IP_SRC;
1071 		if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
1072 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1073 		break;
1074 	case UDP_V6_FLOW:
1075 		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
1076 			data |= RXH_IP_DST | RXH_IP_SRC;
1077 		if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
1078 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1079 		break;
1080 	}
1081 
1082 	return data;
1083 }
1084 
1085 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1086 			u32 *rule_locs)
1087 {
1088 	struct be_adapter *adapter = netdev_priv(netdev);
1089 
1090 	if (!be_multi_rxq(adapter)) {
1091 		dev_info(&adapter->pdev->dev,
1092 			 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
1093 		return -EINVAL;
1094 	}
1095 
1096 	switch (cmd->cmd) {
1097 	case ETHTOOL_GRXFH:
1098 		cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
1099 		break;
1100 	case ETHTOOL_GRXRINGS:
1101 		cmd->data = adapter->num_rx_qs - 1;
1102 		break;
1103 	default:
1104 		return -EINVAL;
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 static int be_set_rss_hash_opts(struct be_adapter *adapter,
1111 				struct ethtool_rxnfc *cmd)
1112 {
1113 	int status;
1114 	u32 rss_flags = adapter->rss_info.rss_flags;
1115 
1116 	if (cmd->data != L3_RSS_FLAGS &&
1117 	    cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1118 		return -EINVAL;
1119 
1120 	switch (cmd->flow_type) {
1121 	case TCP_V4_FLOW:
1122 		if (cmd->data == L3_RSS_FLAGS)
1123 			rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1124 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1125 			rss_flags |= RSS_ENABLE_IPV4 |
1126 					RSS_ENABLE_TCP_IPV4;
1127 		break;
1128 	case TCP_V6_FLOW:
1129 		if (cmd->data == L3_RSS_FLAGS)
1130 			rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1131 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1132 			rss_flags |= RSS_ENABLE_IPV6 |
1133 					RSS_ENABLE_TCP_IPV6;
1134 		break;
1135 	case UDP_V4_FLOW:
1136 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1137 		    BEx_chip(adapter))
1138 			return -EINVAL;
1139 
1140 		if (cmd->data == L3_RSS_FLAGS)
1141 			rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1142 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1143 			rss_flags |= RSS_ENABLE_IPV4 |
1144 					RSS_ENABLE_UDP_IPV4;
1145 		break;
1146 	case UDP_V6_FLOW:
1147 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1148 		    BEx_chip(adapter))
1149 			return -EINVAL;
1150 
1151 		if (cmd->data == L3_RSS_FLAGS)
1152 			rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1153 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1154 			rss_flags |= RSS_ENABLE_IPV6 |
1155 					RSS_ENABLE_UDP_IPV6;
1156 		break;
1157 	default:
1158 		return -EINVAL;
1159 	}
1160 
1161 	if (rss_flags == adapter->rss_info.rss_flags)
1162 		return 0;
1163 
1164 	status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1165 				   rss_flags, RSS_INDIR_TABLE_LEN,
1166 				   adapter->rss_info.rss_hkey);
1167 	if (!status)
1168 		adapter->rss_info.rss_flags = rss_flags;
1169 
1170 	return be_cmd_status(status);
1171 }
1172 
1173 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1174 {
1175 	struct be_adapter *adapter = netdev_priv(netdev);
1176 	int status = 0;
1177 
1178 	if (!be_multi_rxq(adapter)) {
1179 		dev_err(&adapter->pdev->dev,
1180 			"ethtool::set_rxnfc: RX flow hashing is disabled\n");
1181 		return -EINVAL;
1182 	}
1183 
1184 	switch (cmd->cmd) {
1185 	case ETHTOOL_SRXFH:
1186 		status = be_set_rss_hash_opts(adapter, cmd);
1187 		break;
1188 	default:
1189 		return -EINVAL;
1190 	}
1191 
1192 	return status;
1193 }
1194 
1195 static void be_get_channels(struct net_device *netdev,
1196 			    struct ethtool_channels *ch)
1197 {
1198 	struct be_adapter *adapter = netdev_priv(netdev);
1199 	u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
1200 
1201 	/* num_tx_qs is always same as the number of irqs used for TX */
1202 	ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
1203 	ch->rx_count = num_rx_irqs - ch->combined_count;
1204 	ch->tx_count = adapter->num_tx_qs - ch->combined_count;
1205 
1206 	ch->max_combined = be_max_qp_irqs(adapter);
1207 	/* The user must create atleast one combined channel */
1208 	ch->max_rx = be_max_rx_irqs(adapter) - 1;
1209 	ch->max_tx = be_max_tx_irqs(adapter) - 1;
1210 }
1211 
1212 static int be_set_channels(struct net_device  *netdev,
1213 			   struct ethtool_channels *ch)
1214 {
1215 	struct be_adapter *adapter = netdev_priv(netdev);
1216 	int status;
1217 
1218 	/* we support either only combined channels or a combination of
1219 	 * combined and either RX-only or TX-only channels.
1220 	 */
1221 	if (ch->other_count || !ch->combined_count ||
1222 	    (ch->rx_count && ch->tx_count))
1223 		return -EINVAL;
1224 
1225 	if (ch->combined_count > be_max_qp_irqs(adapter) ||
1226 	    (ch->rx_count &&
1227 	     (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
1228 	    (ch->tx_count &&
1229 	     (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
1230 		return -EINVAL;
1231 
1232 	adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
1233 	adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
1234 
1235 	status = be_update_queues(adapter);
1236 	return be_cmd_status(status);
1237 }
1238 
1239 static u32 be_get_rxfh_indir_size(struct net_device *netdev)
1240 {
1241 	return RSS_INDIR_TABLE_LEN;
1242 }
1243 
1244 static u32 be_get_rxfh_key_size(struct net_device *netdev)
1245 {
1246 	return RSS_HASH_KEY_LEN;
1247 }
1248 
1249 static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
1250 		       u8 *hfunc)
1251 {
1252 	struct be_adapter *adapter = netdev_priv(netdev);
1253 	int i;
1254 	struct rss_info *rss = &adapter->rss_info;
1255 
1256 	if (indir) {
1257 		for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
1258 			indir[i] = rss->rss_queue[i];
1259 	}
1260 
1261 	if (hkey)
1262 		memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
1263 
1264 	if (hfunc)
1265 		*hfunc = ETH_RSS_HASH_TOP;
1266 
1267 	return 0;
1268 }
1269 
1270 static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1271 		       const u8 *hkey, const u8 hfunc)
1272 {
1273 	int rc = 0, i, j;
1274 	struct be_adapter *adapter = netdev_priv(netdev);
1275 	u8 rsstable[RSS_INDIR_TABLE_LEN];
1276 
1277 	/* We do not allow change in unsupported parameters */
1278 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1279 		return -EOPNOTSUPP;
1280 
1281 	if (indir) {
1282 		struct be_rx_obj *rxo;
1283 
1284 		for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
1285 			j = indir[i];
1286 			rxo = &adapter->rx_obj[j];
1287 			rsstable[i] = rxo->rss_id;
1288 			adapter->rss_info.rss_queue[i] = j;
1289 		}
1290 	} else {
1291 		memcpy(rsstable, adapter->rss_info.rsstable,
1292 		       RSS_INDIR_TABLE_LEN);
1293 	}
1294 
1295 	if (!hkey)
1296 		hkey =  adapter->rss_info.rss_hkey;
1297 
1298 	rc = be_cmd_rss_config(adapter, rsstable,
1299 			       adapter->rss_info.rss_flags,
1300 			       RSS_INDIR_TABLE_LEN, hkey);
1301 	if (rc) {
1302 		adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
1303 		return -EIO;
1304 	}
1305 	memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
1306 	memcpy(adapter->rss_info.rsstable, rsstable,
1307 	       RSS_INDIR_TABLE_LEN);
1308 	return 0;
1309 }
1310 
1311 static int be_get_module_info(struct net_device *netdev,
1312 			      struct ethtool_modinfo *modinfo)
1313 {
1314 	struct be_adapter *adapter = netdev_priv(netdev);
1315 	u8 page_data[PAGE_DATA_LEN];
1316 	int status;
1317 
1318 	if (!check_privilege(adapter, MAX_PRIVILEGES))
1319 		return -EOPNOTSUPP;
1320 
1321 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
1322 						   page_data);
1323 	if (!status) {
1324 		if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
1325 			modinfo->type = ETH_MODULE_SFF_8079;
1326 			modinfo->eeprom_len = PAGE_DATA_LEN;
1327 		} else {
1328 			modinfo->type = ETH_MODULE_SFF_8472;
1329 			modinfo->eeprom_len = 2 * PAGE_DATA_LEN;
1330 		}
1331 	}
1332 	return be_cmd_status(status);
1333 }
1334 
1335 static int be_get_module_eeprom(struct net_device *netdev,
1336 				struct ethtool_eeprom *eeprom, u8 *data)
1337 {
1338 	struct be_adapter *adapter = netdev_priv(netdev);
1339 	int status;
1340 
1341 	if (!check_privilege(adapter, MAX_PRIVILEGES))
1342 		return -EOPNOTSUPP;
1343 
1344 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
1345 						   data);
1346 	if (status)
1347 		goto err;
1348 
1349 	if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
1350 		status = be_cmd_read_port_transceiver_data(adapter,
1351 							   TR_PAGE_A2,
1352 							   data +
1353 							   PAGE_DATA_LEN);
1354 		if (status)
1355 			goto err;
1356 	}
1357 	if (eeprom->offset)
1358 		memcpy(data, data + eeprom->offset, eeprom->len);
1359 err:
1360 	return be_cmd_status(status);
1361 }
1362 
1363 const struct ethtool_ops be_ethtool_ops = {
1364 	.get_settings = be_get_settings,
1365 	.get_drvinfo = be_get_drvinfo,
1366 	.get_wol = be_get_wol,
1367 	.set_wol = be_set_wol,
1368 	.get_link = ethtool_op_get_link,
1369 	.get_eeprom_len = be_get_eeprom_len,
1370 	.get_eeprom = be_read_eeprom,
1371 	.get_coalesce = be_get_coalesce,
1372 	.set_coalesce = be_set_coalesce,
1373 	.get_ringparam = be_get_ringparam,
1374 	.get_pauseparam = be_get_pauseparam,
1375 	.set_pauseparam = be_set_pauseparam,
1376 	.get_strings = be_get_stat_strings,
1377 	.set_phys_id = be_set_phys_id,
1378 	.set_dump = be_set_dump,
1379 	.get_msglevel = be_get_msg_level,
1380 	.set_msglevel = be_set_msg_level,
1381 	.get_sset_count = be_get_sset_count,
1382 	.get_ethtool_stats = be_get_ethtool_stats,
1383 	.flash_device = be_do_flash,
1384 	.self_test = be_self_test,
1385 	.get_rxnfc = be_get_rxnfc,
1386 	.set_rxnfc = be_set_rxnfc,
1387 	.get_rxfh_indir_size = be_get_rxfh_indir_size,
1388 	.get_rxfh_key_size = be_get_rxfh_key_size,
1389 	.get_rxfh = be_get_rxfh,
1390 	.set_rxfh = be_set_rxfh,
1391 	.get_dump_flag = be_get_dump_flag,
1392 	.get_dump_data = be_get_dump_data,
1393 	.get_channels = be_get_channels,
1394 	.set_channels = be_set_channels,
1395 	.get_module_info = be_get_module_info,
1396 	.get_module_eeprom = be_get_module_eeprom
1397 };
1398