1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <linux/ethtool.h>
21 
22 struct be_ethtool_stat {
23 	char desc[ETH_GSTRING_LEN];
24 	int type;
25 	int size;
26 	int offset;
27 };
28 
29 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 					offsetof(_struct, field)
32 #define DRVSTAT_TX_INFO(field)	#field, DRVSTAT_TX,\
33 					FIELDINFO(struct be_tx_stats, field)
34 #define DRVSTAT_RX_INFO(field)	#field, DRVSTAT_RX,\
35 					FIELDINFO(struct be_rx_stats, field)
36 #define	DRVSTAT_INFO(field)	#field, DRVSTAT,\
37 					FIELDINFO(struct be_drv_stats, field)
38 
39 static const struct be_ethtool_stat et_stats[] = {
40 	{DRVSTAT_INFO(rx_crc_errors)},
41 	{DRVSTAT_INFO(rx_alignment_symbol_errors)},
42 	{DRVSTAT_INFO(rx_pause_frames)},
43 	{DRVSTAT_INFO(rx_control_frames)},
44 	/* Received packets dropped when the Ethernet length field
45 	 * is not equal to the actual Ethernet data length.
46 	 */
47 	{DRVSTAT_INFO(rx_in_range_errors)},
48 	/* Received packets dropped when their length field is >= 1501 bytes
49 	 * and <= 1535 bytes.
50 	 */
51 	{DRVSTAT_INFO(rx_out_range_errors)},
52 	/* Received packets dropped when they are longer than 9216 bytes */
53 	{DRVSTAT_INFO(rx_frame_too_long)},
54 	/* Received packets dropped when they don't pass the unicast or
55 	 * multicast address filtering.
56 	 */
57 	{DRVSTAT_INFO(rx_address_filtered)},
58 	/* Received packets dropped when IP packet length field is less than
59 	 * the IP header length field.
60 	 */
61 	{DRVSTAT_INFO(rx_dropped_too_small)},
62 	/* Received packets dropped when IP length field is greater than
63 	 * the actual packet length.
64 	 */
65 	{DRVSTAT_INFO(rx_dropped_too_short)},
66 	/* Received packets dropped when the IP header length field is less
67 	 * than 5.
68 	 */
69 	{DRVSTAT_INFO(rx_dropped_header_too_small)},
70 	/* Received packets dropped when the TCP header length field is less
71 	 * than 5 or the TCP header length + IP header length is more
72 	 * than IP packet length.
73 	 */
74 	{DRVSTAT_INFO(rx_dropped_tcp_length)},
75 	{DRVSTAT_INFO(rx_dropped_runt)},
76 	/* Number of received packets dropped when a fifo for descriptors going
77 	 * into the packet demux block overflows. In normal operation, this
78 	 * fifo must never overflow.
79 	 */
80 	{DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
81 	{DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
82 	{DRVSTAT_INFO(rx_ip_checksum_errs)},
83 	{DRVSTAT_INFO(rx_tcp_checksum_errs)},
84 	{DRVSTAT_INFO(rx_udp_checksum_errs)},
85 	{DRVSTAT_INFO(tx_pauseframes)},
86 	{DRVSTAT_INFO(tx_controlframes)},
87 	{DRVSTAT_INFO(rx_priority_pause_frames)},
88 	{DRVSTAT_INFO(tx_priority_pauseframes)},
89 	/* Received packets dropped when an internal fifo going into
90 	 * main packet buffer tank (PMEM) overflows.
91 	 */
92 	{DRVSTAT_INFO(pmem_fifo_overflow_drop)},
93 	{DRVSTAT_INFO(jabber_events)},
94 	/* Received packets dropped due to lack of available HW packet buffers
95 	 * used to temporarily hold the received packets.
96 	 */
97 	{DRVSTAT_INFO(rx_drops_no_pbuf)},
98 	/* Received packets dropped due to input receive buffer
99 	 * descriptor fifo overflowing.
100 	 */
101 	{DRVSTAT_INFO(rx_drops_no_erx_descr)},
102 	/* Packets dropped because the internal FIFO to the offloaded TCP
103 	 * receive processing block is full. This could happen only for
104 	 * offloaded iSCSI or FCoE trarffic.
105 	 */
106 	{DRVSTAT_INFO(rx_drops_no_tpre_descr)},
107 	/* Received packets dropped when they need more than 8
108 	 * receive buffers. This cannot happen as the driver configures
109 	 * 2048 byte receive buffers.
110 	 */
111 	{DRVSTAT_INFO(rx_drops_too_many_frags)},
112 	{DRVSTAT_INFO(forwarded_packets)},
113 	/* Received packets dropped when the frame length
114 	 * is more than 9018 bytes
115 	 */
116 	{DRVSTAT_INFO(rx_drops_mtu)},
117 	/* Number of packets dropped due to random early drop function */
118 	{DRVSTAT_INFO(eth_red_drops)},
119 	{DRVSTAT_INFO(be_on_die_temperature)}
120 };
121 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
122 
123 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
124  * are first and second members respectively.
125  */
126 static const struct be_ethtool_stat et_rx_stats[] = {
127 	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
128 	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
129 	{DRVSTAT_RX_INFO(rx_compl)},
130 	{DRVSTAT_RX_INFO(rx_mcast_pkts)},
131 	/* Number of page allocation failures while posting receive buffers
132 	 * to HW.
133 	 */
134 	{DRVSTAT_RX_INFO(rx_post_fail)},
135 	/* Recevied packets dropped due to skb allocation failure */
136 	{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
137 	/* Received packets dropped due to lack of available fetched buffers
138 	 * posted by the driver.
139 	 */
140 	{DRVSTAT_RX_INFO(rx_drops_no_frags)}
141 };
142 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
143 
144 /* Stats related to multi TX queues: get_stats routine assumes compl is the
145  * first member
146  */
147 static const struct be_ethtool_stat et_tx_stats[] = {
148 	{DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
149 	{DRVSTAT_TX_INFO(tx_bytes)},
150 	{DRVSTAT_TX_INFO(tx_pkts)},
151 	/* Number of skbs queued for trasmission by the driver */
152 	{DRVSTAT_TX_INFO(tx_reqs)},
153 	/* Number of TX work request blocks DMAed to HW */
154 	{DRVSTAT_TX_INFO(tx_wrbs)},
155 	/* Number of times the TX queue was stopped due to lack
156 	 * of spaces in the TXQ.
157 	 */
158 	{DRVSTAT_TX_INFO(tx_stops)}
159 };
160 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
161 
162 static const char et_self_tests[][ETH_GSTRING_LEN] = {
163 	"MAC Loopback test",
164 	"PHY Loopback test",
165 	"External Loopback test",
166 	"DDR DMA test",
167 	"Link test"
168 };
169 
170 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
171 #define BE_MAC_LOOPBACK 0x0
172 #define BE_PHY_LOOPBACK 0x1
173 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
174 #define BE_NO_LOOPBACK 0xff
175 
176 static void be_get_drvinfo(struct net_device *netdev,
177 				struct ethtool_drvinfo *drvinfo)
178 {
179 	struct be_adapter *adapter = netdev_priv(netdev);
180 	char fw_on_flash[FW_VER_LEN];
181 
182 	memset(fw_on_flash, 0 , sizeof(fw_on_flash));
183 	be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
184 
185 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
186 	strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
187 	if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN))
188 		strlcpy(drvinfo->fw_version, adapter->fw_ver,
189 			sizeof(drvinfo->fw_version));
190 	else
191 		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
192 			 "%s [%s]", adapter->fw_ver, fw_on_flash);
193 
194 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
195 		sizeof(drvinfo->bus_info));
196 	drvinfo->testinfo_len = 0;
197 	drvinfo->regdump_len = 0;
198 	drvinfo->eedump_len = 0;
199 }
200 
201 static u32
202 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
203 {
204 	u32 data_read = 0, eof;
205 	u8 addn_status;
206 	struct be_dma_mem data_len_cmd;
207 	int status;
208 
209 	memset(&data_len_cmd, 0, sizeof(data_len_cmd));
210 	/* data_offset and data_size should be 0 to get reg len */
211 	status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
212 				file_name, &data_read, &eof, &addn_status);
213 
214 	return data_read;
215 }
216 
217 static int
218 lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
219 		u32 buf_len, void *buf)
220 {
221 	struct be_dma_mem read_cmd;
222 	u32 read_len = 0, total_read_len = 0, chunk_size;
223 	u32 eof = 0;
224 	u8 addn_status;
225 	int status = 0;
226 
227 	read_cmd.size = LANCER_READ_FILE_CHUNK;
228 	read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
229 			&read_cmd.dma);
230 
231 	if (!read_cmd.va) {
232 		dev_err(&adapter->pdev->dev,
233 				"Memory allocation failure while reading dump\n");
234 		return -ENOMEM;
235 	}
236 
237 	while ((total_read_len < buf_len) && !eof) {
238 		chunk_size = min_t(u32, (buf_len - total_read_len),
239 				LANCER_READ_FILE_CHUNK);
240 		chunk_size = ALIGN(chunk_size, 4);
241 		status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
242 				total_read_len, file_name, &read_len,
243 				&eof, &addn_status);
244 		if (!status) {
245 			memcpy(buf + total_read_len, read_cmd.va, read_len);
246 			total_read_len += read_len;
247 			eof &= LANCER_READ_FILE_EOF_MASK;
248 		} else {
249 			status = -EIO;
250 			break;
251 		}
252 	}
253 	pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
254 			read_cmd.dma);
255 
256 	return status;
257 }
258 
259 static int
260 be_get_reg_len(struct net_device *netdev)
261 {
262 	struct be_adapter *adapter = netdev_priv(netdev);
263 	u32 log_size = 0;
264 
265 	if (!check_privilege(adapter, MAX_PRIVILEGES))
266 		return 0;
267 
268 	if (be_physfn(adapter)) {
269 		if (lancer_chip(adapter))
270 			log_size = lancer_cmd_get_file_len(adapter,
271 					LANCER_FW_DUMP_FILE);
272 		else
273 			be_cmd_get_reg_len(adapter, &log_size);
274 	}
275 	return log_size;
276 }
277 
278 static void
279 be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
280 {
281 	struct be_adapter *adapter = netdev_priv(netdev);
282 
283 	if (be_physfn(adapter)) {
284 		memset(buf, 0, regs->len);
285 		if (lancer_chip(adapter))
286 			lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
287 					regs->len, buf);
288 		else
289 			be_cmd_get_regs(adapter, regs->len, buf);
290 	}
291 }
292 
293 static int be_get_coalesce(struct net_device *netdev,
294 			   struct ethtool_coalesce *et)
295 {
296 	struct be_adapter *adapter = netdev_priv(netdev);
297 	struct be_eq_obj *eqo = &adapter->eq_obj[0];
298 
299 
300 	et->rx_coalesce_usecs = eqo->cur_eqd;
301 	et->rx_coalesce_usecs_high = eqo->max_eqd;
302 	et->rx_coalesce_usecs_low = eqo->min_eqd;
303 
304 	et->tx_coalesce_usecs = eqo->cur_eqd;
305 	et->tx_coalesce_usecs_high = eqo->max_eqd;
306 	et->tx_coalesce_usecs_low = eqo->min_eqd;
307 
308 	et->use_adaptive_rx_coalesce = eqo->enable_aic;
309 	et->use_adaptive_tx_coalesce = eqo->enable_aic;
310 
311 	return 0;
312 }
313 
314 /* TX attributes are ignored. Only RX attributes are considered
315  * eqd cmd is issued in the worker thread.
316  */
317 static int be_set_coalesce(struct net_device *netdev,
318 			   struct ethtool_coalesce *et)
319 {
320 	struct be_adapter *adapter = netdev_priv(netdev);
321 	struct be_eq_obj *eqo;
322 	int i;
323 
324 	for_all_evt_queues(adapter, eqo, i) {
325 		eqo->enable_aic = et->use_adaptive_rx_coalesce;
326 		eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
327 		eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
328 		eqo->eqd = et->rx_coalesce_usecs;
329 	}
330 
331 	return 0;
332 }
333 
334 static void
335 be_get_ethtool_stats(struct net_device *netdev,
336 		struct ethtool_stats *stats, uint64_t *data)
337 {
338 	struct be_adapter *adapter = netdev_priv(netdev);
339 	struct be_rx_obj *rxo;
340 	struct be_tx_obj *txo;
341 	void *p;
342 	unsigned int i, j, base = 0, start;
343 
344 	for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
345 		p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
346 		data[i] = *(u32 *)p;
347 	}
348 	base += ETHTOOL_STATS_NUM;
349 
350 	for_all_rx_queues(adapter, rxo, j) {
351 		struct be_rx_stats *stats = rx_stats(rxo);
352 
353 		do {
354 			start = u64_stats_fetch_begin_bh(&stats->sync);
355 			data[base] = stats->rx_bytes;
356 			data[base + 1] = stats->rx_pkts;
357 		} while (u64_stats_fetch_retry_bh(&stats->sync, start));
358 
359 		for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
360 			p = (u8 *)stats + et_rx_stats[i].offset;
361 			data[base + i] = *(u32 *)p;
362 		}
363 		base += ETHTOOL_RXSTATS_NUM;
364 	}
365 
366 	for_all_tx_queues(adapter, txo, j) {
367 		struct be_tx_stats *stats = tx_stats(txo);
368 
369 		do {
370 			start = u64_stats_fetch_begin_bh(&stats->sync_compl);
371 			data[base] = stats->tx_compl;
372 		} while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
373 
374 		do {
375 			start = u64_stats_fetch_begin_bh(&stats->sync);
376 			for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
377 				p = (u8 *)stats + et_tx_stats[i].offset;
378 				data[base + i] =
379 					(et_tx_stats[i].size == sizeof(u64)) ?
380 						*(u64 *)p : *(u32 *)p;
381 			}
382 		} while (u64_stats_fetch_retry_bh(&stats->sync, start));
383 		base += ETHTOOL_TXSTATS_NUM;
384 	}
385 }
386 
387 static void
388 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
389 		uint8_t *data)
390 {
391 	struct be_adapter *adapter = netdev_priv(netdev);
392 	int i, j;
393 
394 	switch (stringset) {
395 	case ETH_SS_STATS:
396 		for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
397 			memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
398 			data += ETH_GSTRING_LEN;
399 		}
400 		for (i = 0; i < adapter->num_rx_qs; i++) {
401 			for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
402 				sprintf(data, "rxq%d: %s", i,
403 					et_rx_stats[j].desc);
404 				data += ETH_GSTRING_LEN;
405 			}
406 		}
407 		for (i = 0; i < adapter->num_tx_qs; i++) {
408 			for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
409 				sprintf(data, "txq%d: %s", i,
410 					et_tx_stats[j].desc);
411 				data += ETH_GSTRING_LEN;
412 			}
413 		}
414 		break;
415 	case ETH_SS_TEST:
416 		for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
417 			memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
418 			data += ETH_GSTRING_LEN;
419 		}
420 		break;
421 	}
422 }
423 
424 static int be_get_sset_count(struct net_device *netdev, int stringset)
425 {
426 	struct be_adapter *adapter = netdev_priv(netdev);
427 
428 	switch (stringset) {
429 	case ETH_SS_TEST:
430 		return ETHTOOL_TESTS_NUM;
431 	case ETH_SS_STATS:
432 		return ETHTOOL_STATS_NUM +
433 			adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
434 			adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
435 	default:
436 		return -EINVAL;
437 	}
438 }
439 
440 static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
441 {
442 	u32 port;
443 
444 	switch (phy_type) {
445 	case PHY_TYPE_BASET_1GB:
446 	case PHY_TYPE_BASEX_1GB:
447 	case PHY_TYPE_SGMII:
448 		port = PORT_TP;
449 		break;
450 	case PHY_TYPE_SFP_PLUS_10GB:
451 		port = dac_cable_len ? PORT_DA : PORT_FIBRE;
452 		break;
453 	case PHY_TYPE_XFP_10GB:
454 	case PHY_TYPE_SFP_1GB:
455 		port = PORT_FIBRE;
456 		break;
457 	case PHY_TYPE_BASET_10GB:
458 		port = PORT_TP;
459 		break;
460 	default:
461 		port = PORT_OTHER;
462 	}
463 
464 	return port;
465 }
466 
467 static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
468 {
469 	u32 val = 0;
470 
471 	switch (if_type) {
472 	case PHY_TYPE_BASET_1GB:
473 	case PHY_TYPE_BASEX_1GB:
474 	case PHY_TYPE_SGMII:
475 		val |= SUPPORTED_TP;
476 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
477 			val |= SUPPORTED_1000baseT_Full;
478 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
479 			val |= SUPPORTED_100baseT_Full;
480 		if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
481 			val |= SUPPORTED_10baseT_Full;
482 		break;
483 	case PHY_TYPE_KX4_10GB:
484 		val |= SUPPORTED_Backplane;
485 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
486 			val |= SUPPORTED_1000baseKX_Full;
487 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
488 			val |= SUPPORTED_10000baseKX4_Full;
489 		break;
490 	case PHY_TYPE_KR_10GB:
491 		val |= SUPPORTED_Backplane |
492 				SUPPORTED_10000baseKR_Full;
493 		break;
494 	case PHY_TYPE_SFP_PLUS_10GB:
495 	case PHY_TYPE_XFP_10GB:
496 	case PHY_TYPE_SFP_1GB:
497 		val |= SUPPORTED_FIBRE;
498 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
499 			val |= SUPPORTED_10000baseT_Full;
500 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
501 			val |= SUPPORTED_1000baseT_Full;
502 		break;
503 	case PHY_TYPE_BASET_10GB:
504 		val |= SUPPORTED_TP;
505 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
506 			val |= SUPPORTED_10000baseT_Full;
507 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
508 			val |= SUPPORTED_1000baseT_Full;
509 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
510 			val |= SUPPORTED_100baseT_Full;
511 		break;
512 	default:
513 		val |= SUPPORTED_TP;
514 	}
515 
516 	return val;
517 }
518 
519 bool be_pause_supported(struct be_adapter *adapter)
520 {
521 	return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
522 		adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
523 		false : true;
524 }
525 
526 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
527 {
528 	struct be_adapter *adapter = netdev_priv(netdev);
529 	u8 link_status;
530 	u16 link_speed = 0;
531 	int status;
532 	u32 auto_speeds;
533 	u32 fixed_speeds;
534 	u32 dac_cable_len;
535 	u16 interface_type;
536 
537 	if (adapter->phy.link_speed < 0) {
538 		status = be_cmd_link_status_query(adapter, &link_speed,
539 						  &link_status, 0);
540 		if (!status)
541 			be_link_status_update(adapter, link_status);
542 		ethtool_cmd_speed_set(ecmd, link_speed);
543 
544 		status = be_cmd_get_phy_info(adapter);
545 		if (!status) {
546 			interface_type = adapter->phy.interface_type;
547 			auto_speeds = adapter->phy.auto_speeds_supported;
548 			fixed_speeds = adapter->phy.fixed_speeds_supported;
549 			dac_cable_len = adapter->phy.dac_cable_len;
550 
551 			ecmd->supported =
552 				convert_to_et_setting(interface_type,
553 						      auto_speeds |
554 						      fixed_speeds);
555 			ecmd->advertising =
556 				convert_to_et_setting(interface_type,
557 						      auto_speeds);
558 
559 			ecmd->port = be_get_port_type(interface_type,
560 						      dac_cable_len);
561 
562 			if (adapter->phy.auto_speeds_supported) {
563 				ecmd->supported |= SUPPORTED_Autoneg;
564 				ecmd->autoneg = AUTONEG_ENABLE;
565 				ecmd->advertising |= ADVERTISED_Autoneg;
566 			}
567 
568 			ecmd->supported |= SUPPORTED_Pause;
569 			if (be_pause_supported(adapter))
570 				ecmd->advertising |= ADVERTISED_Pause;
571 
572 			switch (adapter->phy.interface_type) {
573 			case PHY_TYPE_KR_10GB:
574 			case PHY_TYPE_KX4_10GB:
575 				ecmd->transceiver = XCVR_INTERNAL;
576 				break;
577 			default:
578 				ecmd->transceiver = XCVR_EXTERNAL;
579 				break;
580 			}
581 		} else {
582 			ecmd->port = PORT_OTHER;
583 			ecmd->autoneg = AUTONEG_DISABLE;
584 			ecmd->transceiver = XCVR_DUMMY1;
585 		}
586 
587 		/* Save for future use */
588 		adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
589 		adapter->phy.port_type = ecmd->port;
590 		adapter->phy.transceiver = ecmd->transceiver;
591 		adapter->phy.autoneg = ecmd->autoneg;
592 		adapter->phy.advertising = ecmd->advertising;
593 		adapter->phy.supported = ecmd->supported;
594 	} else {
595 		ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
596 		ecmd->port = adapter->phy.port_type;
597 		ecmd->transceiver = adapter->phy.transceiver;
598 		ecmd->autoneg = adapter->phy.autoneg;
599 		ecmd->advertising = adapter->phy.advertising;
600 		ecmd->supported = adapter->phy.supported;
601 	}
602 
603 	ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
604 	ecmd->phy_address = adapter->port_num;
605 
606 	return 0;
607 }
608 
609 static void be_get_ringparam(struct net_device *netdev,
610 			     struct ethtool_ringparam *ring)
611 {
612 	struct be_adapter *adapter = netdev_priv(netdev);
613 
614 	ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
615 	ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
616 }
617 
618 static void
619 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
620 {
621 	struct be_adapter *adapter = netdev_priv(netdev);
622 
623 	be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
624 	ecmd->autoneg = adapter->phy.fc_autoneg;
625 }
626 
627 static int
628 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
629 {
630 	struct be_adapter *adapter = netdev_priv(netdev);
631 	int status;
632 
633 	if (ecmd->autoneg != adapter->phy.fc_autoneg)
634 		return -EINVAL;
635 	adapter->tx_fc = ecmd->tx_pause;
636 	adapter->rx_fc = ecmd->rx_pause;
637 
638 	status = be_cmd_set_flow_control(adapter,
639 					adapter->tx_fc, adapter->rx_fc);
640 	if (status)
641 		dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
642 
643 	return status;
644 }
645 
646 static int
647 be_set_phys_id(struct net_device *netdev,
648 	       enum ethtool_phys_id_state state)
649 {
650 	struct be_adapter *adapter = netdev_priv(netdev);
651 
652 	switch (state) {
653 	case ETHTOOL_ID_ACTIVE:
654 		be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
655 					&adapter->beacon_state);
656 		return 1;	/* cycle on/off once per second */
657 
658 	case ETHTOOL_ID_ON:
659 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
660 					BEACON_STATE_ENABLED);
661 		break;
662 
663 	case ETHTOOL_ID_OFF:
664 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
665 					BEACON_STATE_DISABLED);
666 		break;
667 
668 	case ETHTOOL_ID_INACTIVE:
669 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
670 					adapter->beacon_state);
671 	}
672 
673 	return 0;
674 }
675 
676 
677 static void
678 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
679 {
680 	struct be_adapter *adapter = netdev_priv(netdev);
681 
682 	if (be_is_wol_supported(adapter)) {
683 		wol->supported |= WAKE_MAGIC;
684 		if (adapter->wol)
685 			wol->wolopts |= WAKE_MAGIC;
686 	} else
687 		wol->wolopts = 0;
688 	memset(&wol->sopass, 0, sizeof(wol->sopass));
689 }
690 
691 static int
692 be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
693 {
694 	struct be_adapter *adapter = netdev_priv(netdev);
695 
696 	if (wol->wolopts & ~WAKE_MAGIC)
697 		return -EOPNOTSUPP;
698 
699 	if (!be_is_wol_supported(adapter)) {
700 		dev_warn(&adapter->pdev->dev, "WOL not supported\n");
701 		return -EOPNOTSUPP;
702 	}
703 
704 	if (wol->wolopts & WAKE_MAGIC)
705 		adapter->wol = true;
706 	else
707 		adapter->wol = false;
708 
709 	return 0;
710 }
711 
712 static int
713 be_test_ddr_dma(struct be_adapter *adapter)
714 {
715 	int ret, i;
716 	struct be_dma_mem ddrdma_cmd;
717 	static const u64 pattern[2] = {
718 		0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
719 	};
720 
721 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
722 	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
723 					   &ddrdma_cmd.dma, GFP_KERNEL);
724 	if (!ddrdma_cmd.va)
725 		return -ENOMEM;
726 
727 	for (i = 0; i < 2; i++) {
728 		ret = be_cmd_ddr_dma_test(adapter, pattern[i],
729 					4096, &ddrdma_cmd);
730 		if (ret != 0)
731 			goto err;
732 	}
733 
734 err:
735 	dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
736 			  ddrdma_cmd.dma);
737 	return ret;
738 }
739 
740 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
741 				u64 *status)
742 {
743 	be_cmd_set_loopback(adapter, adapter->hba_port_num,
744 				loopback_type, 1);
745 	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
746 				loopback_type, 1500,
747 				2, 0xabc);
748 	be_cmd_set_loopback(adapter, adapter->hba_port_num,
749 				BE_NO_LOOPBACK, 1);
750 	return *status;
751 }
752 
753 static void
754 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
755 {
756 	struct be_adapter *adapter = netdev_priv(netdev);
757 	int status;
758 	u8 link_status = 0;
759 
760 	if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
761 		dev_err(&adapter->pdev->dev, "Self test not supported\n");
762 		test->flags |= ETH_TEST_FL_FAILED;
763 		return;
764 	}
765 
766 	memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
767 
768 	if (test->flags & ETH_TEST_FL_OFFLINE) {
769 		if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
770 						&data[0]) != 0) {
771 			test->flags |= ETH_TEST_FL_FAILED;
772 		}
773 		if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
774 						&data[1]) != 0) {
775 			test->flags |= ETH_TEST_FL_FAILED;
776 		}
777 		if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
778 						&data[2]) != 0) {
779 			test->flags |= ETH_TEST_FL_FAILED;
780 		}
781 	}
782 
783 	if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
784 		data[3] = 1;
785 		test->flags |= ETH_TEST_FL_FAILED;
786 	}
787 
788 	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
789 	if (status) {
790 		test->flags |= ETH_TEST_FL_FAILED;
791 		data[4] = -1;
792 	} else if (!link_status) {
793 		test->flags |= ETH_TEST_FL_FAILED;
794 		data[4] = 1;
795 	}
796 }
797 
798 static int
799 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
800 {
801 	struct be_adapter *adapter = netdev_priv(netdev);
802 
803 	return be_load_fw(adapter, efl->data);
804 }
805 
806 static int
807 be_get_eeprom_len(struct net_device *netdev)
808 {
809 	struct be_adapter *adapter = netdev_priv(netdev);
810 
811 	if (!check_privilege(adapter, MAX_PRIVILEGES))
812 		return 0;
813 
814 	if (lancer_chip(adapter)) {
815 		if (be_physfn(adapter))
816 			return lancer_cmd_get_file_len(adapter,
817 					LANCER_VPD_PF_FILE);
818 		else
819 			return lancer_cmd_get_file_len(adapter,
820 					LANCER_VPD_VF_FILE);
821 	} else {
822 		return BE_READ_SEEPROM_LEN;
823 	}
824 }
825 
826 static int
827 be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
828 			uint8_t *data)
829 {
830 	struct be_adapter *adapter = netdev_priv(netdev);
831 	struct be_dma_mem eeprom_cmd;
832 	struct be_cmd_resp_seeprom_read *resp;
833 	int status;
834 
835 	if (!eeprom->len)
836 		return -EINVAL;
837 
838 	if (lancer_chip(adapter)) {
839 		if (be_physfn(adapter))
840 			return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
841 					eeprom->len, data);
842 		else
843 			return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
844 					eeprom->len, data);
845 	}
846 
847 	eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
848 
849 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
850 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
851 	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
852 					   &eeprom_cmd.dma, GFP_KERNEL);
853 
854 	if (!eeprom_cmd.va)
855 		return -ENOMEM;
856 
857 	status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
858 
859 	if (!status) {
860 		resp = eeprom_cmd.va;
861 		memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
862 	}
863 	dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
864 			  eeprom_cmd.dma);
865 
866 	return status;
867 }
868 
869 static u32 be_get_msg_level(struct net_device *netdev)
870 {
871 	struct be_adapter *adapter = netdev_priv(netdev);
872 
873 	if (lancer_chip(adapter)) {
874 		dev_err(&adapter->pdev->dev, "Operation not supported\n");
875 		return -EOPNOTSUPP;
876 	}
877 
878 	return adapter->msg_enable;
879 }
880 
881 static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
882 {
883 	struct be_dma_mem extfat_cmd;
884 	struct be_fat_conf_params *cfgs;
885 	int status;
886 	int i, j;
887 
888 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
889 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
890 	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
891 					     &extfat_cmd.dma);
892 	if (!extfat_cmd.va) {
893 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
894 			__func__);
895 		goto err;
896 	}
897 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
898 	if (!status) {
899 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
900 					sizeof(struct be_cmd_resp_hdr));
901 		for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
902 			u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
903 			for (j = 0; j < num_modes; j++) {
904 				if (cfgs->module[i].trace_lvl[j].mode ==
905 								MODE_UART)
906 					cfgs->module[i].trace_lvl[j].dbg_lvl =
907 							cpu_to_le32(level);
908 			}
909 		}
910 		status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
911 							cfgs);
912 		if (status)
913 			dev_err(&adapter->pdev->dev,
914 				"Message level set failed\n");
915 	} else {
916 		dev_err(&adapter->pdev->dev, "Message level get failed\n");
917 	}
918 
919 	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
920 			    extfat_cmd.dma);
921 err:
922 	return;
923 }
924 
925 static void be_set_msg_level(struct net_device *netdev, u32 level)
926 {
927 	struct be_adapter *adapter = netdev_priv(netdev);
928 
929 	if (lancer_chip(adapter)) {
930 		dev_err(&adapter->pdev->dev, "Operation not supported\n");
931 		return;
932 	}
933 
934 	if (adapter->msg_enable == level)
935 		return;
936 
937 	if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
938 		be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
939 				    FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
940 	adapter->msg_enable = level;
941 
942 	return;
943 }
944 
945 static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
946 {
947 	u64 data = 0;
948 
949 	switch (flow_type) {
950 	case TCP_V4_FLOW:
951 		if (adapter->rss_flags & RSS_ENABLE_IPV4)
952 			data |= RXH_IP_DST | RXH_IP_SRC;
953 		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
954 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
955 		break;
956 	case UDP_V4_FLOW:
957 		if (adapter->rss_flags & RSS_ENABLE_IPV4)
958 			data |= RXH_IP_DST | RXH_IP_SRC;
959 		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
960 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
961 		break;
962 	case TCP_V6_FLOW:
963 		if (adapter->rss_flags & RSS_ENABLE_IPV6)
964 			data |= RXH_IP_DST | RXH_IP_SRC;
965 		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
966 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
967 		break;
968 	case UDP_V6_FLOW:
969 		if (adapter->rss_flags & RSS_ENABLE_IPV6)
970 			data |= RXH_IP_DST | RXH_IP_SRC;
971 		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
972 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
973 		break;
974 	}
975 
976 	return data;
977 }
978 
979 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
980 		      u32 *rule_locs)
981 {
982 	struct be_adapter *adapter = netdev_priv(netdev);
983 
984 	if (!be_multi_rxq(adapter)) {
985 		dev_info(&adapter->pdev->dev,
986 			 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
987 		return -EINVAL;
988 	}
989 
990 	switch (cmd->cmd) {
991 	case ETHTOOL_GRXFH:
992 		cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
993 		break;
994 	case ETHTOOL_GRXRINGS:
995 		cmd->data = adapter->num_rx_qs - 1;
996 		break;
997 	default:
998 		return -EINVAL;
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 static int be_set_rss_hash_opts(struct be_adapter *adapter,
1005 				struct ethtool_rxnfc *cmd)
1006 {
1007 	struct be_rx_obj *rxo;
1008 	int status = 0, i, j;
1009 	u8 rsstable[128];
1010 	u32 rss_flags = adapter->rss_flags;
1011 
1012 	if (cmd->data != L3_RSS_FLAGS &&
1013 	    cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1014 		return -EINVAL;
1015 
1016 	switch (cmd->flow_type) {
1017 	case TCP_V4_FLOW:
1018 		if (cmd->data == L3_RSS_FLAGS)
1019 			rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1020 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1021 			rss_flags |= RSS_ENABLE_IPV4 |
1022 					RSS_ENABLE_TCP_IPV4;
1023 		break;
1024 	case TCP_V6_FLOW:
1025 		if (cmd->data == L3_RSS_FLAGS)
1026 			rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1027 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1028 			rss_flags |= RSS_ENABLE_IPV6 |
1029 					RSS_ENABLE_TCP_IPV6;
1030 		break;
1031 	case UDP_V4_FLOW:
1032 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1033 		    BEx_chip(adapter))
1034 			return -EINVAL;
1035 
1036 		if (cmd->data == L3_RSS_FLAGS)
1037 			rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1038 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1039 			rss_flags |= RSS_ENABLE_IPV4 |
1040 					RSS_ENABLE_UDP_IPV4;
1041 		break;
1042 	case UDP_V6_FLOW:
1043 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1044 		    BEx_chip(adapter))
1045 			return -EINVAL;
1046 
1047 		if (cmd->data == L3_RSS_FLAGS)
1048 			rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1049 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1050 			rss_flags |= RSS_ENABLE_IPV6 |
1051 					RSS_ENABLE_UDP_IPV6;
1052 		break;
1053 	default:
1054 		return -EINVAL;
1055 	}
1056 
1057 	if (rss_flags == adapter->rss_flags)
1058 		return status;
1059 
1060 	if (be_multi_rxq(adapter)) {
1061 		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
1062 			for_all_rss_queues(adapter, rxo, i) {
1063 				if ((j + i) >= 128)
1064 					break;
1065 				rsstable[j + i] = rxo->rss_id;
1066 			}
1067 		}
1068 	}
1069 	status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
1070 	if (!status)
1071 		adapter->rss_flags = rss_flags;
1072 
1073 	return status;
1074 }
1075 
1076 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1077 {
1078 	struct be_adapter *adapter = netdev_priv(netdev);
1079 	int status = 0;
1080 
1081 	if (!be_multi_rxq(adapter)) {
1082 		dev_err(&adapter->pdev->dev,
1083 			"ethtool::set_rxnfc: RX flow hashing is disabled\n");
1084 		return -EINVAL;
1085 	}
1086 
1087 	switch (cmd->cmd) {
1088 	case ETHTOOL_SRXFH:
1089 		status = be_set_rss_hash_opts(adapter, cmd);
1090 		break;
1091 	default:
1092 		return -EINVAL;
1093 	}
1094 
1095 	return status;
1096 }
1097 
1098 const struct ethtool_ops be_ethtool_ops = {
1099 	.get_settings = be_get_settings,
1100 	.get_drvinfo = be_get_drvinfo,
1101 	.get_wol = be_get_wol,
1102 	.set_wol = be_set_wol,
1103 	.get_link = ethtool_op_get_link,
1104 	.get_eeprom_len = be_get_eeprom_len,
1105 	.get_eeprom = be_read_eeprom,
1106 	.get_coalesce = be_get_coalesce,
1107 	.set_coalesce = be_set_coalesce,
1108 	.get_ringparam = be_get_ringparam,
1109 	.get_pauseparam = be_get_pauseparam,
1110 	.set_pauseparam = be_set_pauseparam,
1111 	.get_strings = be_get_stat_strings,
1112 	.set_phys_id = be_set_phys_id,
1113 	.get_msglevel = be_get_msg_level,
1114 	.set_msglevel = be_set_msg_level,
1115 	.get_sset_count = be_get_sset_count,
1116 	.get_ethtool_stats = be_get_ethtool_stats,
1117 	.get_regs_len = be_get_reg_len,
1118 	.get_regs = be_get_regs,
1119 	.flash_device = be_do_flash,
1120 	.self_test = be_self_test,
1121 	.get_rxnfc = be_get_rxnfc,
1122 	.set_rxnfc = be_set_rxnfc,
1123 };
1124