1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <linux/ethtool.h>
21 
22 struct be_ethtool_stat {
23 	char desc[ETH_GSTRING_LEN];
24 	int type;
25 	int size;
26 	int offset;
27 };
28 
29 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
30 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 					offsetof(_struct, field)
32 #define DRVSTAT_TX_INFO(field)	#field, DRVSTAT_TX,\
33 					FIELDINFO(struct be_tx_stats, field)
34 #define DRVSTAT_RX_INFO(field)	#field, DRVSTAT_RX,\
35 					FIELDINFO(struct be_rx_stats, field)
36 #define	DRVSTAT_INFO(field)	#field, DRVSTAT,\
37 					FIELDINFO(struct be_drv_stats, field)
38 
39 static const struct be_ethtool_stat et_stats[] = {
40 	{DRVSTAT_INFO(rx_crc_errors)},
41 	{DRVSTAT_INFO(rx_alignment_symbol_errors)},
42 	{DRVSTAT_INFO(rx_pause_frames)},
43 	{DRVSTAT_INFO(rx_control_frames)},
44 	/* Received packets dropped when the Ethernet length field
45 	 * is not equal to the actual Ethernet data length.
46 	 */
47 	{DRVSTAT_INFO(rx_in_range_errors)},
48 	/* Received packets dropped when their length field is >= 1501 bytes
49 	 * and <= 1535 bytes.
50 	 */
51 	{DRVSTAT_INFO(rx_out_range_errors)},
52 	/* Received packets dropped when they are longer than 9216 bytes */
53 	{DRVSTAT_INFO(rx_frame_too_long)},
54 	/* Received packets dropped when they don't pass the unicast or
55 	 * multicast address filtering.
56 	 */
57 	{DRVSTAT_INFO(rx_address_filtered)},
58 	/* Received packets dropped when IP packet length field is less than
59 	 * the IP header length field.
60 	 */
61 	{DRVSTAT_INFO(rx_dropped_too_small)},
62 	/* Received packets dropped when IP length field is greater than
63 	 * the actual packet length.
64 	 */
65 	{DRVSTAT_INFO(rx_dropped_too_short)},
66 	/* Received packets dropped when the IP header length field is less
67 	 * than 5.
68 	 */
69 	{DRVSTAT_INFO(rx_dropped_header_too_small)},
70 	/* Received packets dropped when the TCP header length field is less
71 	 * than 5 or the TCP header length + IP header length is more
72 	 * than IP packet length.
73 	 */
74 	{DRVSTAT_INFO(rx_dropped_tcp_length)},
75 	{DRVSTAT_INFO(rx_dropped_runt)},
76 	/* Number of received packets dropped when a fifo for descriptors going
77 	 * into the packet demux block overflows. In normal operation, this
78 	 * fifo must never overflow.
79 	 */
80 	{DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
81 	{DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
82 	{DRVSTAT_INFO(rx_ip_checksum_errs)},
83 	{DRVSTAT_INFO(rx_tcp_checksum_errs)},
84 	{DRVSTAT_INFO(rx_udp_checksum_errs)},
85 	{DRVSTAT_INFO(tx_pauseframes)},
86 	{DRVSTAT_INFO(tx_controlframes)},
87 	{DRVSTAT_INFO(rx_priority_pause_frames)},
88 	{DRVSTAT_INFO(tx_priority_pauseframes)},
89 	/* Received packets dropped when an internal fifo going into
90 	 * main packet buffer tank (PMEM) overflows.
91 	 */
92 	{DRVSTAT_INFO(pmem_fifo_overflow_drop)},
93 	{DRVSTAT_INFO(jabber_events)},
94 	/* Received packets dropped due to lack of available HW packet buffers
95 	 * used to temporarily hold the received packets.
96 	 */
97 	{DRVSTAT_INFO(rx_drops_no_pbuf)},
98 	/* Received packets dropped due to input receive buffer
99 	 * descriptor fifo overflowing.
100 	 */
101 	{DRVSTAT_INFO(rx_drops_no_erx_descr)},
102 	/* Packets dropped because the internal FIFO to the offloaded TCP
103 	 * receive processing block is full. This could happen only for
104 	 * offloaded iSCSI or FCoE trarffic.
105 	 */
106 	{DRVSTAT_INFO(rx_drops_no_tpre_descr)},
107 	/* Received packets dropped when they need more than 8
108 	 * receive buffers. This cannot happen as the driver configures
109 	 * 2048 byte receive buffers.
110 	 */
111 	{DRVSTAT_INFO(rx_drops_too_many_frags)},
112 	{DRVSTAT_INFO(forwarded_packets)},
113 	/* Received packets dropped when the frame length
114 	 * is more than 9018 bytes
115 	 */
116 	{DRVSTAT_INFO(rx_drops_mtu)},
117 	/* Number of packets dropped due to random early drop function */
118 	{DRVSTAT_INFO(eth_red_drops)},
119 	{DRVSTAT_INFO(be_on_die_temperature)},
120 	{DRVSTAT_INFO(rx_roce_bytes_lsd)},
121 	{DRVSTAT_INFO(rx_roce_bytes_msd)},
122 	{DRVSTAT_INFO(rx_roce_frames)},
123 	{DRVSTAT_INFO(roce_drops_payload_len)},
124 	{DRVSTAT_INFO(roce_drops_crc)}
125 };
126 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
127 
128 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
129  * are first and second members respectively.
130  */
131 static const struct be_ethtool_stat et_rx_stats[] = {
132 	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
133 	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
134 	{DRVSTAT_RX_INFO(rx_compl)},
135 	{DRVSTAT_RX_INFO(rx_mcast_pkts)},
136 	/* Number of page allocation failures while posting receive buffers
137 	 * to HW.
138 	 */
139 	{DRVSTAT_RX_INFO(rx_post_fail)},
140 	/* Recevied packets dropped due to skb allocation failure */
141 	{DRVSTAT_RX_INFO(rx_drops_no_skbs)},
142 	/* Received packets dropped due to lack of available fetched buffers
143 	 * posted by the driver.
144 	 */
145 	{DRVSTAT_RX_INFO(rx_drops_no_frags)}
146 };
147 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
148 
149 /* Stats related to multi TX queues: get_stats routine assumes compl is the
150  * first member
151  */
152 static const struct be_ethtool_stat et_tx_stats[] = {
153 	{DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
154 	{DRVSTAT_TX_INFO(tx_bytes)},
155 	{DRVSTAT_TX_INFO(tx_pkts)},
156 	/* Number of skbs queued for trasmission by the driver */
157 	{DRVSTAT_TX_INFO(tx_reqs)},
158 	/* Number of TX work request blocks DMAed to HW */
159 	{DRVSTAT_TX_INFO(tx_wrbs)},
160 	/* Number of times the TX queue was stopped due to lack
161 	 * of spaces in the TXQ.
162 	 */
163 	{DRVSTAT_TX_INFO(tx_stops)},
164 	/* Pkts dropped in the driver's transmit path */
165 	{DRVSTAT_TX_INFO(tx_drv_drops)}
166 };
167 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
168 
169 static const char et_self_tests[][ETH_GSTRING_LEN] = {
170 	"MAC Loopback test",
171 	"PHY Loopback test",
172 	"External Loopback test",
173 	"DDR DMA test",
174 	"Link test"
175 };
176 
177 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
178 #define BE_MAC_LOOPBACK 0x0
179 #define BE_PHY_LOOPBACK 0x1
180 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
181 #define BE_NO_LOOPBACK 0xff
182 
183 static void be_get_drvinfo(struct net_device *netdev,
184 				struct ethtool_drvinfo *drvinfo)
185 {
186 	struct be_adapter *adapter = netdev_priv(netdev);
187 
188 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
189 	strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
190 	if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
191 		strlcpy(drvinfo->fw_version, adapter->fw_ver,
192 			sizeof(drvinfo->fw_version));
193 	else
194 		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
195 			 "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
196 
197 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
198 		sizeof(drvinfo->bus_info));
199 	drvinfo->testinfo_len = 0;
200 	drvinfo->regdump_len = 0;
201 	drvinfo->eedump_len = 0;
202 }
203 
204 static u32
205 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
206 {
207 	u32 data_read = 0, eof;
208 	u8 addn_status;
209 	struct be_dma_mem data_len_cmd;
210 	int status;
211 
212 	memset(&data_len_cmd, 0, sizeof(data_len_cmd));
213 	/* data_offset and data_size should be 0 to get reg len */
214 	status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
215 				file_name, &data_read, &eof, &addn_status);
216 
217 	return data_read;
218 }
219 
220 static int
221 lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
222 		u32 buf_len, void *buf)
223 {
224 	struct be_dma_mem read_cmd;
225 	u32 read_len = 0, total_read_len = 0, chunk_size;
226 	u32 eof = 0;
227 	u8 addn_status;
228 	int status = 0;
229 
230 	read_cmd.size = LANCER_READ_FILE_CHUNK;
231 	read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
232 			&read_cmd.dma);
233 
234 	if (!read_cmd.va) {
235 		dev_err(&adapter->pdev->dev,
236 				"Memory allocation failure while reading dump\n");
237 		return -ENOMEM;
238 	}
239 
240 	while ((total_read_len < buf_len) && !eof) {
241 		chunk_size = min_t(u32, (buf_len - total_read_len),
242 				LANCER_READ_FILE_CHUNK);
243 		chunk_size = ALIGN(chunk_size, 4);
244 		status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
245 				total_read_len, file_name, &read_len,
246 				&eof, &addn_status);
247 		if (!status) {
248 			memcpy(buf + total_read_len, read_cmd.va, read_len);
249 			total_read_len += read_len;
250 			eof &= LANCER_READ_FILE_EOF_MASK;
251 		} else {
252 			status = -EIO;
253 			break;
254 		}
255 	}
256 	pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
257 			read_cmd.dma);
258 
259 	return status;
260 }
261 
262 static int
263 be_get_reg_len(struct net_device *netdev)
264 {
265 	struct be_adapter *adapter = netdev_priv(netdev);
266 	u32 log_size = 0;
267 
268 	if (!check_privilege(adapter, MAX_PRIVILEGES))
269 		return 0;
270 
271 	if (be_physfn(adapter)) {
272 		if (lancer_chip(adapter))
273 			log_size = lancer_cmd_get_file_len(adapter,
274 					LANCER_FW_DUMP_FILE);
275 		else
276 			be_cmd_get_reg_len(adapter, &log_size);
277 	}
278 	return log_size;
279 }
280 
281 static void
282 be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
283 {
284 	struct be_adapter *adapter = netdev_priv(netdev);
285 
286 	if (be_physfn(adapter)) {
287 		memset(buf, 0, regs->len);
288 		if (lancer_chip(adapter))
289 			lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
290 					regs->len, buf);
291 		else
292 			be_cmd_get_regs(adapter, regs->len, buf);
293 	}
294 }
295 
296 static int be_get_coalesce(struct net_device *netdev,
297 			   struct ethtool_coalesce *et)
298 {
299 	struct be_adapter *adapter = netdev_priv(netdev);
300 	struct be_aic_obj *aic = &adapter->aic_obj[0];
301 
302 
303 	et->rx_coalesce_usecs = aic->prev_eqd;
304 	et->rx_coalesce_usecs_high = aic->max_eqd;
305 	et->rx_coalesce_usecs_low = aic->min_eqd;
306 
307 	et->tx_coalesce_usecs = aic->prev_eqd;
308 	et->tx_coalesce_usecs_high = aic->max_eqd;
309 	et->tx_coalesce_usecs_low = aic->min_eqd;
310 
311 	et->use_adaptive_rx_coalesce = aic->enable;
312 	et->use_adaptive_tx_coalesce = aic->enable;
313 
314 	return 0;
315 }
316 
317 /* TX attributes are ignored. Only RX attributes are considered
318  * eqd cmd is issued in the worker thread.
319  */
320 static int be_set_coalesce(struct net_device *netdev,
321 			   struct ethtool_coalesce *et)
322 {
323 	struct be_adapter *adapter = netdev_priv(netdev);
324 	struct be_aic_obj *aic = &adapter->aic_obj[0];
325 	struct be_eq_obj *eqo;
326 	int i;
327 
328 	for_all_evt_queues(adapter, eqo, i) {
329 		aic->enable = et->use_adaptive_rx_coalesce;
330 		aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
331 		aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
332 		aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
333 		aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
334 		aic++;
335 	}
336 
337 	return 0;
338 }
339 
340 static void
341 be_get_ethtool_stats(struct net_device *netdev,
342 		struct ethtool_stats *stats, uint64_t *data)
343 {
344 	struct be_adapter *adapter = netdev_priv(netdev);
345 	struct be_rx_obj *rxo;
346 	struct be_tx_obj *txo;
347 	void *p;
348 	unsigned int i, j, base = 0, start;
349 
350 	for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
351 		p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
352 		data[i] = *(u32 *)p;
353 	}
354 	base += ETHTOOL_STATS_NUM;
355 
356 	for_all_rx_queues(adapter, rxo, j) {
357 		struct be_rx_stats *stats = rx_stats(rxo);
358 
359 		do {
360 			start = u64_stats_fetch_begin_bh(&stats->sync);
361 			data[base] = stats->rx_bytes;
362 			data[base + 1] = stats->rx_pkts;
363 		} while (u64_stats_fetch_retry_bh(&stats->sync, start));
364 
365 		for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
366 			p = (u8 *)stats + et_rx_stats[i].offset;
367 			data[base + i] = *(u32 *)p;
368 		}
369 		base += ETHTOOL_RXSTATS_NUM;
370 	}
371 
372 	for_all_tx_queues(adapter, txo, j) {
373 		struct be_tx_stats *stats = tx_stats(txo);
374 
375 		do {
376 			start = u64_stats_fetch_begin_bh(&stats->sync_compl);
377 			data[base] = stats->tx_compl;
378 		} while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
379 
380 		do {
381 			start = u64_stats_fetch_begin_bh(&stats->sync);
382 			for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
383 				p = (u8 *)stats + et_tx_stats[i].offset;
384 				data[base + i] =
385 					(et_tx_stats[i].size == sizeof(u64)) ?
386 						*(u64 *)p : *(u32 *)p;
387 			}
388 		} while (u64_stats_fetch_retry_bh(&stats->sync, start));
389 		base += ETHTOOL_TXSTATS_NUM;
390 	}
391 }
392 
393 static void
394 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
395 		uint8_t *data)
396 {
397 	struct be_adapter *adapter = netdev_priv(netdev);
398 	int i, j;
399 
400 	switch (stringset) {
401 	case ETH_SS_STATS:
402 		for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
403 			memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
404 			data += ETH_GSTRING_LEN;
405 		}
406 		for (i = 0; i < adapter->num_rx_qs; i++) {
407 			for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
408 				sprintf(data, "rxq%d: %s", i,
409 					et_rx_stats[j].desc);
410 				data += ETH_GSTRING_LEN;
411 			}
412 		}
413 		for (i = 0; i < adapter->num_tx_qs; i++) {
414 			for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
415 				sprintf(data, "txq%d: %s", i,
416 					et_tx_stats[j].desc);
417 				data += ETH_GSTRING_LEN;
418 			}
419 		}
420 		break;
421 	case ETH_SS_TEST:
422 		for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
423 			memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
424 			data += ETH_GSTRING_LEN;
425 		}
426 		break;
427 	}
428 }
429 
430 static int be_get_sset_count(struct net_device *netdev, int stringset)
431 {
432 	struct be_adapter *adapter = netdev_priv(netdev);
433 
434 	switch (stringset) {
435 	case ETH_SS_TEST:
436 		return ETHTOOL_TESTS_NUM;
437 	case ETH_SS_STATS:
438 		return ETHTOOL_STATS_NUM +
439 			adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
440 			adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
441 	default:
442 		return -EINVAL;
443 	}
444 }
445 
446 static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
447 {
448 	u32 port;
449 
450 	switch (phy_type) {
451 	case PHY_TYPE_BASET_1GB:
452 	case PHY_TYPE_BASEX_1GB:
453 	case PHY_TYPE_SGMII:
454 		port = PORT_TP;
455 		break;
456 	case PHY_TYPE_SFP_PLUS_10GB:
457 		port = dac_cable_len ? PORT_DA : PORT_FIBRE;
458 		break;
459 	case PHY_TYPE_XFP_10GB:
460 	case PHY_TYPE_SFP_1GB:
461 		port = PORT_FIBRE;
462 		break;
463 	case PHY_TYPE_BASET_10GB:
464 		port = PORT_TP;
465 		break;
466 	default:
467 		port = PORT_OTHER;
468 	}
469 
470 	return port;
471 }
472 
473 static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
474 {
475 	u32 val = 0;
476 
477 	switch (if_type) {
478 	case PHY_TYPE_BASET_1GB:
479 	case PHY_TYPE_BASEX_1GB:
480 	case PHY_TYPE_SGMII:
481 		val |= SUPPORTED_TP;
482 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
483 			val |= SUPPORTED_1000baseT_Full;
484 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
485 			val |= SUPPORTED_100baseT_Full;
486 		if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
487 			val |= SUPPORTED_10baseT_Full;
488 		break;
489 	case PHY_TYPE_KX4_10GB:
490 		val |= SUPPORTED_Backplane;
491 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
492 			val |= SUPPORTED_1000baseKX_Full;
493 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
494 			val |= SUPPORTED_10000baseKX4_Full;
495 		break;
496 	case PHY_TYPE_KR_10GB:
497 		val |= SUPPORTED_Backplane |
498 				SUPPORTED_10000baseKR_Full;
499 		break;
500 	case PHY_TYPE_SFP_PLUS_10GB:
501 	case PHY_TYPE_XFP_10GB:
502 	case PHY_TYPE_SFP_1GB:
503 		val |= SUPPORTED_FIBRE;
504 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
505 			val |= SUPPORTED_10000baseT_Full;
506 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
507 			val |= SUPPORTED_1000baseT_Full;
508 		break;
509 	case PHY_TYPE_BASET_10GB:
510 		val |= SUPPORTED_TP;
511 		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
512 			val |= SUPPORTED_10000baseT_Full;
513 		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
514 			val |= SUPPORTED_1000baseT_Full;
515 		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
516 			val |= SUPPORTED_100baseT_Full;
517 		break;
518 	default:
519 		val |= SUPPORTED_TP;
520 	}
521 
522 	return val;
523 }
524 
525 bool be_pause_supported(struct be_adapter *adapter)
526 {
527 	return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
528 		adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
529 		false : true;
530 }
531 
532 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
533 {
534 	struct be_adapter *adapter = netdev_priv(netdev);
535 	u8 link_status;
536 	u16 link_speed = 0;
537 	int status;
538 	u32 auto_speeds;
539 	u32 fixed_speeds;
540 	u32 dac_cable_len;
541 	u16 interface_type;
542 
543 	if (adapter->phy.link_speed < 0) {
544 		status = be_cmd_link_status_query(adapter, &link_speed,
545 						  &link_status, 0);
546 		if (!status)
547 			be_link_status_update(adapter, link_status);
548 		ethtool_cmd_speed_set(ecmd, link_speed);
549 
550 		status = be_cmd_get_phy_info(adapter);
551 		if (!status) {
552 			interface_type = adapter->phy.interface_type;
553 			auto_speeds = adapter->phy.auto_speeds_supported;
554 			fixed_speeds = adapter->phy.fixed_speeds_supported;
555 			dac_cable_len = adapter->phy.dac_cable_len;
556 
557 			ecmd->supported =
558 				convert_to_et_setting(interface_type,
559 						      auto_speeds |
560 						      fixed_speeds);
561 			ecmd->advertising =
562 				convert_to_et_setting(interface_type,
563 						      auto_speeds);
564 
565 			ecmd->port = be_get_port_type(interface_type,
566 						      dac_cable_len);
567 
568 			if (adapter->phy.auto_speeds_supported) {
569 				ecmd->supported |= SUPPORTED_Autoneg;
570 				ecmd->autoneg = AUTONEG_ENABLE;
571 				ecmd->advertising |= ADVERTISED_Autoneg;
572 			}
573 
574 			ecmd->supported |= SUPPORTED_Pause;
575 			if (be_pause_supported(adapter))
576 				ecmd->advertising |= ADVERTISED_Pause;
577 
578 			switch (adapter->phy.interface_type) {
579 			case PHY_TYPE_KR_10GB:
580 			case PHY_TYPE_KX4_10GB:
581 				ecmd->transceiver = XCVR_INTERNAL;
582 				break;
583 			default:
584 				ecmd->transceiver = XCVR_EXTERNAL;
585 				break;
586 			}
587 		} else {
588 			ecmd->port = PORT_OTHER;
589 			ecmd->autoneg = AUTONEG_DISABLE;
590 			ecmd->transceiver = XCVR_DUMMY1;
591 		}
592 
593 		/* Save for future use */
594 		adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
595 		adapter->phy.port_type = ecmd->port;
596 		adapter->phy.transceiver = ecmd->transceiver;
597 		adapter->phy.autoneg = ecmd->autoneg;
598 		adapter->phy.advertising = ecmd->advertising;
599 		adapter->phy.supported = ecmd->supported;
600 	} else {
601 		ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
602 		ecmd->port = adapter->phy.port_type;
603 		ecmd->transceiver = adapter->phy.transceiver;
604 		ecmd->autoneg = adapter->phy.autoneg;
605 		ecmd->advertising = adapter->phy.advertising;
606 		ecmd->supported = adapter->phy.supported;
607 	}
608 
609 	ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
610 	ecmd->phy_address = adapter->port_num;
611 
612 	return 0;
613 }
614 
615 static void be_get_ringparam(struct net_device *netdev,
616 			     struct ethtool_ringparam *ring)
617 {
618 	struct be_adapter *adapter = netdev_priv(netdev);
619 
620 	ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
621 	ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
622 }
623 
624 static void
625 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
626 {
627 	struct be_adapter *adapter = netdev_priv(netdev);
628 
629 	be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
630 	ecmd->autoneg = adapter->phy.fc_autoneg;
631 }
632 
633 static int
634 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
635 {
636 	struct be_adapter *adapter = netdev_priv(netdev);
637 	int status;
638 
639 	if (ecmd->autoneg != adapter->phy.fc_autoneg)
640 		return -EINVAL;
641 	adapter->tx_fc = ecmd->tx_pause;
642 	adapter->rx_fc = ecmd->rx_pause;
643 
644 	status = be_cmd_set_flow_control(adapter,
645 					adapter->tx_fc, adapter->rx_fc);
646 	if (status)
647 		dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
648 
649 	return status;
650 }
651 
652 static int
653 be_set_phys_id(struct net_device *netdev,
654 	       enum ethtool_phys_id_state state)
655 {
656 	struct be_adapter *adapter = netdev_priv(netdev);
657 
658 	switch (state) {
659 	case ETHTOOL_ID_ACTIVE:
660 		be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
661 					&adapter->beacon_state);
662 		return 1;	/* cycle on/off once per second */
663 
664 	case ETHTOOL_ID_ON:
665 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
666 					BEACON_STATE_ENABLED);
667 		break;
668 
669 	case ETHTOOL_ID_OFF:
670 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
671 					BEACON_STATE_DISABLED);
672 		break;
673 
674 	case ETHTOOL_ID_INACTIVE:
675 		be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
676 					adapter->beacon_state);
677 	}
678 
679 	return 0;
680 }
681 
682 static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
683 {
684 	struct be_adapter *adapter = netdev_priv(netdev);
685 	struct device *dev = &adapter->pdev->dev;
686 	int status;
687 
688 	if (!lancer_chip(adapter)) {
689 		dev_err(dev, "FW dump not supported\n");
690 		return -EOPNOTSUPP;
691 	}
692 
693 	if (dump_present(adapter)) {
694 		dev_err(dev, "Previous dump not cleared, not forcing dump\n");
695 		return 0;
696 	}
697 
698 	switch (dump->flag) {
699 	case LANCER_INITIATE_FW_DUMP:
700 		status = lancer_initiate_dump(adapter);
701 		if (!status)
702 			dev_info(dev, "F/w dump initiated successfully\n");
703 		break;
704 	default:
705 		dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
706 		return -EINVAL;
707 	}
708 	return status;
709 }
710 
711 static void
712 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
713 {
714 	struct be_adapter *adapter = netdev_priv(netdev);
715 
716 	if (be_is_wol_supported(adapter)) {
717 		wol->supported |= WAKE_MAGIC;
718 		if (adapter->wol)
719 			wol->wolopts |= WAKE_MAGIC;
720 	} else
721 		wol->wolopts = 0;
722 	memset(&wol->sopass, 0, sizeof(wol->sopass));
723 }
724 
725 static int
726 be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
727 {
728 	struct be_adapter *adapter = netdev_priv(netdev);
729 
730 	if (wol->wolopts & ~WAKE_MAGIC)
731 		return -EOPNOTSUPP;
732 
733 	if (!be_is_wol_supported(adapter)) {
734 		dev_warn(&adapter->pdev->dev, "WOL not supported\n");
735 		return -EOPNOTSUPP;
736 	}
737 
738 	if (wol->wolopts & WAKE_MAGIC)
739 		adapter->wol = true;
740 	else
741 		adapter->wol = false;
742 
743 	return 0;
744 }
745 
746 static int
747 be_test_ddr_dma(struct be_adapter *adapter)
748 {
749 	int ret, i;
750 	struct be_dma_mem ddrdma_cmd;
751 	static const u64 pattern[2] = {
752 		0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
753 	};
754 
755 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
756 	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
757 					   &ddrdma_cmd.dma, GFP_KERNEL);
758 	if (!ddrdma_cmd.va)
759 		return -ENOMEM;
760 
761 	for (i = 0; i < 2; i++) {
762 		ret = be_cmd_ddr_dma_test(adapter, pattern[i],
763 					4096, &ddrdma_cmd);
764 		if (ret != 0)
765 			goto err;
766 	}
767 
768 err:
769 	dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
770 			  ddrdma_cmd.dma);
771 	return ret;
772 }
773 
774 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
775 				u64 *status)
776 {
777 	be_cmd_set_loopback(adapter, adapter->hba_port_num,
778 				loopback_type, 1);
779 	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
780 				loopback_type, 1500,
781 				2, 0xabc);
782 	be_cmd_set_loopback(adapter, adapter->hba_port_num,
783 				BE_NO_LOOPBACK, 1);
784 	return *status;
785 }
786 
787 static void
788 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
789 {
790 	struct be_adapter *adapter = netdev_priv(netdev);
791 	int status;
792 	u8 link_status = 0;
793 
794 	if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
795 		dev_err(&adapter->pdev->dev, "Self test not supported\n");
796 		test->flags |= ETH_TEST_FL_FAILED;
797 		return;
798 	}
799 
800 	memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
801 
802 	if (test->flags & ETH_TEST_FL_OFFLINE) {
803 		if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
804 						&data[0]) != 0) {
805 			test->flags |= ETH_TEST_FL_FAILED;
806 		}
807 		if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
808 						&data[1]) != 0) {
809 			test->flags |= ETH_TEST_FL_FAILED;
810 		}
811 		if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
812 						&data[2]) != 0) {
813 			test->flags |= ETH_TEST_FL_FAILED;
814 		}
815 	}
816 
817 	if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
818 		data[3] = 1;
819 		test->flags |= ETH_TEST_FL_FAILED;
820 	}
821 
822 	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
823 	if (status) {
824 		test->flags |= ETH_TEST_FL_FAILED;
825 		data[4] = -1;
826 	} else if (!link_status) {
827 		test->flags |= ETH_TEST_FL_FAILED;
828 		data[4] = 1;
829 	}
830 }
831 
832 static int
833 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
834 {
835 	struct be_adapter *adapter = netdev_priv(netdev);
836 
837 	return be_load_fw(adapter, efl->data);
838 }
839 
840 static int
841 be_get_eeprom_len(struct net_device *netdev)
842 {
843 	struct be_adapter *adapter = netdev_priv(netdev);
844 
845 	if (!check_privilege(adapter, MAX_PRIVILEGES))
846 		return 0;
847 
848 	if (lancer_chip(adapter)) {
849 		if (be_physfn(adapter))
850 			return lancer_cmd_get_file_len(adapter,
851 					LANCER_VPD_PF_FILE);
852 		else
853 			return lancer_cmd_get_file_len(adapter,
854 					LANCER_VPD_VF_FILE);
855 	} else {
856 		return BE_READ_SEEPROM_LEN;
857 	}
858 }
859 
860 static int
861 be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
862 			uint8_t *data)
863 {
864 	struct be_adapter *adapter = netdev_priv(netdev);
865 	struct be_dma_mem eeprom_cmd;
866 	struct be_cmd_resp_seeprom_read *resp;
867 	int status;
868 
869 	if (!eeprom->len)
870 		return -EINVAL;
871 
872 	if (lancer_chip(adapter)) {
873 		if (be_physfn(adapter))
874 			return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
875 					eeprom->len, data);
876 		else
877 			return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
878 					eeprom->len, data);
879 	}
880 
881 	eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
882 
883 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
884 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
885 	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
886 					   &eeprom_cmd.dma, GFP_KERNEL);
887 
888 	if (!eeprom_cmd.va)
889 		return -ENOMEM;
890 
891 	status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
892 
893 	if (!status) {
894 		resp = eeprom_cmd.va;
895 		memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
896 	}
897 	dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
898 			  eeprom_cmd.dma);
899 
900 	return status;
901 }
902 
903 static u32 be_get_msg_level(struct net_device *netdev)
904 {
905 	struct be_adapter *adapter = netdev_priv(netdev);
906 
907 	if (lancer_chip(adapter)) {
908 		dev_err(&adapter->pdev->dev, "Operation not supported\n");
909 		return -EOPNOTSUPP;
910 	}
911 
912 	return adapter->msg_enable;
913 }
914 
915 static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
916 {
917 	struct be_dma_mem extfat_cmd;
918 	struct be_fat_conf_params *cfgs;
919 	int status;
920 	int i, j;
921 
922 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
923 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
924 	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
925 					     &extfat_cmd.dma);
926 	if (!extfat_cmd.va) {
927 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
928 			__func__);
929 		goto err;
930 	}
931 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
932 	if (!status) {
933 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
934 					sizeof(struct be_cmd_resp_hdr));
935 		for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
936 			u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
937 			for (j = 0; j < num_modes; j++) {
938 				if (cfgs->module[i].trace_lvl[j].mode ==
939 								MODE_UART)
940 					cfgs->module[i].trace_lvl[j].dbg_lvl =
941 							cpu_to_le32(level);
942 			}
943 		}
944 		status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
945 							cfgs);
946 		if (status)
947 			dev_err(&adapter->pdev->dev,
948 				"Message level set failed\n");
949 	} else {
950 		dev_err(&adapter->pdev->dev, "Message level get failed\n");
951 	}
952 
953 	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
954 			    extfat_cmd.dma);
955 err:
956 	return;
957 }
958 
959 static void be_set_msg_level(struct net_device *netdev, u32 level)
960 {
961 	struct be_adapter *adapter = netdev_priv(netdev);
962 
963 	if (lancer_chip(adapter)) {
964 		dev_err(&adapter->pdev->dev, "Operation not supported\n");
965 		return;
966 	}
967 
968 	if (adapter->msg_enable == level)
969 		return;
970 
971 	if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
972 		be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
973 				    FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
974 	adapter->msg_enable = level;
975 
976 	return;
977 }
978 
979 static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
980 {
981 	u64 data = 0;
982 
983 	switch (flow_type) {
984 	case TCP_V4_FLOW:
985 		if (adapter->rss_flags & RSS_ENABLE_IPV4)
986 			data |= RXH_IP_DST | RXH_IP_SRC;
987 		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
988 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
989 		break;
990 	case UDP_V4_FLOW:
991 		if (adapter->rss_flags & RSS_ENABLE_IPV4)
992 			data |= RXH_IP_DST | RXH_IP_SRC;
993 		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
994 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
995 		break;
996 	case TCP_V6_FLOW:
997 		if (adapter->rss_flags & RSS_ENABLE_IPV6)
998 			data |= RXH_IP_DST | RXH_IP_SRC;
999 		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
1000 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1001 		break;
1002 	case UDP_V6_FLOW:
1003 		if (adapter->rss_flags & RSS_ENABLE_IPV6)
1004 			data |= RXH_IP_DST | RXH_IP_SRC;
1005 		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
1006 			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1007 		break;
1008 	}
1009 
1010 	return data;
1011 }
1012 
1013 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1014 		      u32 *rule_locs)
1015 {
1016 	struct be_adapter *adapter = netdev_priv(netdev);
1017 
1018 	if (!be_multi_rxq(adapter)) {
1019 		dev_info(&adapter->pdev->dev,
1020 			 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
1021 		return -EINVAL;
1022 	}
1023 
1024 	switch (cmd->cmd) {
1025 	case ETHTOOL_GRXFH:
1026 		cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
1027 		break;
1028 	case ETHTOOL_GRXRINGS:
1029 		cmd->data = adapter->num_rx_qs - 1;
1030 		break;
1031 	default:
1032 		return -EINVAL;
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 static int be_set_rss_hash_opts(struct be_adapter *adapter,
1039 				struct ethtool_rxnfc *cmd)
1040 {
1041 	struct be_rx_obj *rxo;
1042 	int status = 0, i, j;
1043 	u8 rsstable[128];
1044 	u32 rss_flags = adapter->rss_flags;
1045 
1046 	if (cmd->data != L3_RSS_FLAGS &&
1047 	    cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1048 		return -EINVAL;
1049 
1050 	switch (cmd->flow_type) {
1051 	case TCP_V4_FLOW:
1052 		if (cmd->data == L3_RSS_FLAGS)
1053 			rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1054 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1055 			rss_flags |= RSS_ENABLE_IPV4 |
1056 					RSS_ENABLE_TCP_IPV4;
1057 		break;
1058 	case TCP_V6_FLOW:
1059 		if (cmd->data == L3_RSS_FLAGS)
1060 			rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1061 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1062 			rss_flags |= RSS_ENABLE_IPV6 |
1063 					RSS_ENABLE_TCP_IPV6;
1064 		break;
1065 	case UDP_V4_FLOW:
1066 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1067 		    BEx_chip(adapter))
1068 			return -EINVAL;
1069 
1070 		if (cmd->data == L3_RSS_FLAGS)
1071 			rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1072 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1073 			rss_flags |= RSS_ENABLE_IPV4 |
1074 					RSS_ENABLE_UDP_IPV4;
1075 		break;
1076 	case UDP_V6_FLOW:
1077 		if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1078 		    BEx_chip(adapter))
1079 			return -EINVAL;
1080 
1081 		if (cmd->data == L3_RSS_FLAGS)
1082 			rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1083 		else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1084 			rss_flags |= RSS_ENABLE_IPV6 |
1085 					RSS_ENABLE_UDP_IPV6;
1086 		break;
1087 	default:
1088 		return -EINVAL;
1089 	}
1090 
1091 	if (rss_flags == adapter->rss_flags)
1092 		return status;
1093 
1094 	if (be_multi_rxq(adapter)) {
1095 		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
1096 			for_all_rss_queues(adapter, rxo, i) {
1097 				if ((j + i) >= 128)
1098 					break;
1099 				rsstable[j + i] = rxo->rss_id;
1100 			}
1101 		}
1102 	}
1103 	status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
1104 	if (!status)
1105 		adapter->rss_flags = rss_flags;
1106 
1107 	return status;
1108 }
1109 
1110 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1111 {
1112 	struct be_adapter *adapter = netdev_priv(netdev);
1113 	int status = 0;
1114 
1115 	if (!be_multi_rxq(adapter)) {
1116 		dev_err(&adapter->pdev->dev,
1117 			"ethtool::set_rxnfc: RX flow hashing is disabled\n");
1118 		return -EINVAL;
1119 	}
1120 
1121 	switch (cmd->cmd) {
1122 	case ETHTOOL_SRXFH:
1123 		status = be_set_rss_hash_opts(adapter, cmd);
1124 		break;
1125 	default:
1126 		return -EINVAL;
1127 	}
1128 
1129 	return status;
1130 }
1131 
1132 static void be_get_channels(struct net_device *netdev,
1133 			    struct ethtool_channels *ch)
1134 {
1135 	struct be_adapter *adapter = netdev_priv(netdev);
1136 
1137 	ch->combined_count = adapter->num_evt_qs;
1138 	ch->max_combined = be_max_qs(adapter);
1139 }
1140 
1141 static int be_set_channels(struct net_device  *netdev,
1142 			   struct ethtool_channels *ch)
1143 {
1144 	struct be_adapter *adapter = netdev_priv(netdev);
1145 
1146 	if (ch->rx_count || ch->tx_count || ch->other_count ||
1147 	    !ch->combined_count || ch->combined_count > be_max_qs(adapter))
1148 		return -EINVAL;
1149 
1150 	adapter->cfg_num_qs = ch->combined_count;
1151 
1152 	return be_update_queues(adapter);
1153 }
1154 
1155 const struct ethtool_ops be_ethtool_ops = {
1156 	.get_settings = be_get_settings,
1157 	.get_drvinfo = be_get_drvinfo,
1158 	.get_wol = be_get_wol,
1159 	.set_wol = be_set_wol,
1160 	.get_link = ethtool_op_get_link,
1161 	.get_eeprom_len = be_get_eeprom_len,
1162 	.get_eeprom = be_read_eeprom,
1163 	.get_coalesce = be_get_coalesce,
1164 	.set_coalesce = be_set_coalesce,
1165 	.get_ringparam = be_get_ringparam,
1166 	.get_pauseparam = be_get_pauseparam,
1167 	.set_pauseparam = be_set_pauseparam,
1168 	.get_strings = be_get_stat_strings,
1169 	.set_phys_id = be_set_phys_id,
1170 	.set_dump = be_set_dump,
1171 	.get_msglevel = be_get_msg_level,
1172 	.set_msglevel = be_set_msg_level,
1173 	.get_sset_count = be_get_sset_count,
1174 	.get_ethtool_stats = be_get_ethtool_stats,
1175 	.get_regs_len = be_get_reg_len,
1176 	.get_regs = be_get_regs,
1177 	.flash_device = be_do_flash,
1178 	.self_test = be_self_test,
1179 	.get_rxnfc = be_get_rxnfc,
1180 	.set_rxnfc = be_set_rxnfc,
1181 	.get_channels = be_get_channels,
1182 	.set_channels = be_set_channels
1183 };
1184