1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2013-2015 Chelsio Communications.  All rights reserved.
4  */
5 
6 #include <linux/firmware.h>
7 #include <linux/mdio.h>
8 
9 #include "cxgb4.h"
10 #include "t4_regs.h"
11 #include "t4fw_api.h"
12 #include "cxgb4_cudbg.h"
13 #include "cxgb4_filter.h"
14 #include "cxgb4_tc_flower.h"
15 
16 #define EEPROM_MAGIC 0x38E2F10C
17 
18 static u32 get_msglevel(struct net_device *dev)
19 {
20 	return netdev2adap(dev)->msg_enable;
21 }
22 
23 static void set_msglevel(struct net_device *dev, u32 val)
24 {
25 	netdev2adap(dev)->msg_enable = val;
26 }
27 
28 enum cxgb4_ethtool_tests {
29 	CXGB4_ETHTOOL_LB_TEST,
30 	CXGB4_ETHTOOL_MAX_TEST,
31 };
32 
33 static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
34 	"Loop back test (offline)",
35 };
36 
37 static const char * const flash_region_strings[] = {
38 	"All",
39 	"Firmware",
40 	"PHY Firmware",
41 	"Boot",
42 	"Boot CFG",
43 };
44 
45 static const char stats_strings[][ETH_GSTRING_LEN] = {
46 	"tx_octets_ok           ",
47 	"tx_frames_ok           ",
48 	"tx_broadcast_frames    ",
49 	"tx_multicast_frames    ",
50 	"tx_unicast_frames      ",
51 	"tx_error_frames        ",
52 
53 	"tx_frames_64           ",
54 	"tx_frames_65_to_127    ",
55 	"tx_frames_128_to_255   ",
56 	"tx_frames_256_to_511   ",
57 	"tx_frames_512_to_1023  ",
58 	"tx_frames_1024_to_1518 ",
59 	"tx_frames_1519_to_max  ",
60 
61 	"tx_frames_dropped      ",
62 	"tx_pause_frames        ",
63 	"tx_ppp0_frames         ",
64 	"tx_ppp1_frames         ",
65 	"tx_ppp2_frames         ",
66 	"tx_ppp3_frames         ",
67 	"tx_ppp4_frames         ",
68 	"tx_ppp5_frames         ",
69 	"tx_ppp6_frames         ",
70 	"tx_ppp7_frames         ",
71 
72 	"rx_octets_ok           ",
73 	"rx_frames_ok           ",
74 	"rx_broadcast_frames    ",
75 	"rx_multicast_frames    ",
76 	"rx_unicast_frames      ",
77 
78 	"rx_frames_too_long     ",
79 	"rx_jabber_errors       ",
80 	"rx_fcs_errors          ",
81 	"rx_length_errors       ",
82 	"rx_symbol_errors       ",
83 	"rx_runt_frames         ",
84 
85 	"rx_frames_64           ",
86 	"rx_frames_65_to_127    ",
87 	"rx_frames_128_to_255   ",
88 	"rx_frames_256_to_511   ",
89 	"rx_frames_512_to_1023  ",
90 	"rx_frames_1024_to_1518 ",
91 	"rx_frames_1519_to_max  ",
92 
93 	"rx_pause_frames        ",
94 	"rx_ppp0_frames         ",
95 	"rx_ppp1_frames         ",
96 	"rx_ppp2_frames         ",
97 	"rx_ppp3_frames         ",
98 	"rx_ppp4_frames         ",
99 	"rx_ppp5_frames         ",
100 	"rx_ppp6_frames         ",
101 	"rx_ppp7_frames         ",
102 
103 	"rx_bg0_frames_dropped  ",
104 	"rx_bg1_frames_dropped  ",
105 	"rx_bg2_frames_dropped  ",
106 	"rx_bg3_frames_dropped  ",
107 	"rx_bg0_frames_trunc    ",
108 	"rx_bg1_frames_trunc    ",
109 	"rx_bg2_frames_trunc    ",
110 	"rx_bg3_frames_trunc    ",
111 
112 	"tso                    ",
113 	"uso                    ",
114 	"tx_csum_offload        ",
115 	"rx_csum_good           ",
116 	"vlan_extractions       ",
117 	"vlan_insertions        ",
118 	"gro_packets            ",
119 	"gro_merged             ",
120 };
121 
122 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
123 	"db_drop                ",
124 	"db_full                ",
125 	"db_empty               ",
126 	"write_coal_success     ",
127 	"write_coal_fail        ",
128 #ifdef CONFIG_CHELSIO_TLS_DEVICE
129 	"tx_tls_encrypted_packets",
130 	"tx_tls_encrypted_bytes  ",
131 	"tx_tls_ctx              ",
132 	"tx_tls_ooo              ",
133 	"tx_tls_skip_no_sync_data",
134 	"tx_tls_drop_no_sync_data",
135 	"tx_tls_drop_bypass_req  ",
136 #endif
137 };
138 
139 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
140 	"-------Loopback----------- ",
141 	"octets_ok              ",
142 	"frames_ok              ",
143 	"bcast_frames           ",
144 	"mcast_frames           ",
145 	"ucast_frames           ",
146 	"error_frames           ",
147 	"frames_64              ",
148 	"frames_65_to_127       ",
149 	"frames_128_to_255      ",
150 	"frames_256_to_511      ",
151 	"frames_512_to_1023     ",
152 	"frames_1024_to_1518    ",
153 	"frames_1519_to_max     ",
154 	"frames_dropped         ",
155 	"bg0_frames_dropped     ",
156 	"bg1_frames_dropped     ",
157 	"bg2_frames_dropped     ",
158 	"bg3_frames_dropped     ",
159 	"bg0_frames_trunc       ",
160 	"bg1_frames_trunc       ",
161 	"bg2_frames_trunc       ",
162 	"bg3_frames_trunc       ",
163 };
164 
165 static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
166 	[PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
167 };
168 
169 static int get_sset_count(struct net_device *dev, int sset)
170 {
171 	switch (sset) {
172 	case ETH_SS_STATS:
173 		return ARRAY_SIZE(stats_strings) +
174 		       ARRAY_SIZE(adapter_stats_strings) +
175 		       ARRAY_SIZE(loopback_stats_strings);
176 	case ETH_SS_PRIV_FLAGS:
177 		return ARRAY_SIZE(cxgb4_priv_flags_strings);
178 	case ETH_SS_TEST:
179 		return ARRAY_SIZE(cxgb4_selftest_strings);
180 	default:
181 		return -EOPNOTSUPP;
182 	}
183 }
184 
185 static int get_regs_len(struct net_device *dev)
186 {
187 	struct adapter *adap = netdev2adap(dev);
188 
189 	return t4_get_regs_len(adap);
190 }
191 
192 static int get_eeprom_len(struct net_device *dev)
193 {
194 	return EEPROMSIZE;
195 }
196 
197 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
198 {
199 	struct adapter *adapter = netdev2adap(dev);
200 	u32 exprom_vers;
201 
202 	strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
203 	strlcpy(info->bus_info, pci_name(adapter->pdev),
204 		sizeof(info->bus_info));
205 	info->regdump_len = get_regs_len(dev);
206 
207 	if (adapter->params.fw_vers)
208 		snprintf(info->fw_version, sizeof(info->fw_version),
209 			 "%u.%u.%u.%u, TP %u.%u.%u.%u",
210 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
211 			 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
212 			 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
213 			 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
214 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
215 			 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
216 			 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
217 			 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
218 
219 	if (!t4_get_exprom_version(adapter, &exprom_vers))
220 		snprintf(info->erom_version, sizeof(info->erom_version),
221 			 "%u.%u.%u.%u",
222 			 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
223 			 FW_HDR_FW_VER_MINOR_G(exprom_vers),
224 			 FW_HDR_FW_VER_MICRO_G(exprom_vers),
225 			 FW_HDR_FW_VER_BUILD_G(exprom_vers));
226 	info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
227 }
228 
229 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
230 {
231 	if (stringset == ETH_SS_STATS) {
232 		memcpy(data, stats_strings, sizeof(stats_strings));
233 		data += sizeof(stats_strings);
234 		memcpy(data, adapter_stats_strings,
235 		       sizeof(adapter_stats_strings));
236 		data += sizeof(adapter_stats_strings);
237 		memcpy(data, loopback_stats_strings,
238 		       sizeof(loopback_stats_strings));
239 	} else if (stringset == ETH_SS_PRIV_FLAGS) {
240 		memcpy(data, cxgb4_priv_flags_strings,
241 		       sizeof(cxgb4_priv_flags_strings));
242 	} else if (stringset == ETH_SS_TEST) {
243 		memcpy(data, cxgb4_selftest_strings,
244 		       sizeof(cxgb4_selftest_strings));
245 	}
246 }
247 
248 /* port stats maintained per queue of the port. They should be in the same
249  * order as in stats_strings above.
250  */
251 struct queue_port_stats {
252 	u64 tso;
253 	u64 uso;
254 	u64 tx_csum;
255 	u64 rx_csum;
256 	u64 vlan_ex;
257 	u64 vlan_ins;
258 	u64 gro_pkts;
259 	u64 gro_merged;
260 };
261 
262 struct adapter_stats {
263 	u64 db_drop;
264 	u64 db_full;
265 	u64 db_empty;
266 	u64 wc_success;
267 	u64 wc_fail;
268 #ifdef CONFIG_CHELSIO_TLS_DEVICE
269 	u64 tx_tls_encrypted_packets;
270 	u64 tx_tls_encrypted_bytes;
271 	u64 tx_tls_ctx;
272 	u64 tx_tls_ooo;
273 	u64 tx_tls_skip_no_sync_data;
274 	u64 tx_tls_drop_no_sync_data;
275 	u64 tx_tls_drop_bypass_req;
276 #endif
277 };
278 
279 static void collect_sge_port_stats(const struct adapter *adap,
280 				   const struct port_info *p,
281 				   struct queue_port_stats *s)
282 {
283 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
284 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
285 	struct sge_eohw_txq *eohw_tx;
286 	unsigned int i;
287 
288 	memset(s, 0, sizeof(*s));
289 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
290 		s->tso += tx->tso;
291 		s->uso += tx->uso;
292 		s->tx_csum += tx->tx_cso;
293 		s->rx_csum += rx->stats.rx_cso;
294 		s->vlan_ex += rx->stats.vlan_ex;
295 		s->vlan_ins += tx->vlan_ins;
296 		s->gro_pkts += rx->stats.lro_pkts;
297 		s->gro_merged += rx->stats.lro_merged;
298 	}
299 
300 	if (adap->sge.eohw_txq) {
301 		eohw_tx = &adap->sge.eohw_txq[p->first_qset];
302 		for (i = 0; i < p->nqsets; i++, eohw_tx++) {
303 			s->tso += eohw_tx->tso;
304 			s->uso += eohw_tx->uso;
305 			s->tx_csum += eohw_tx->tx_cso;
306 			s->vlan_ins += eohw_tx->vlan_ins;
307 		}
308 	}
309 }
310 
311 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
312 {
313 	u64 val1, val2;
314 
315 	memset(s, 0, sizeof(*s));
316 
317 	s->db_drop = adap->db_stats.db_drop;
318 	s->db_full = adap->db_stats.db_full;
319 	s->db_empty = adap->db_stats.db_empty;
320 
321 	if (!is_t4(adap->params.chip)) {
322 		int v;
323 
324 		v = t4_read_reg(adap, SGE_STAT_CFG_A);
325 		if (STATSOURCE_T5_G(v) == 7) {
326 			val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
327 			val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
328 			s->wc_success = val1 - val2;
329 			s->wc_fail = val2;
330 		}
331 	}
332 }
333 
334 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
335 		      u64 *data)
336 {
337 	struct port_info *pi = netdev_priv(dev);
338 	struct adapter *adapter = pi->adapter;
339 	struct lb_port_stats s;
340 	int i;
341 	u64 *p0;
342 
343 	t4_get_port_stats_offset(adapter, pi->tx_chan,
344 				 (struct port_stats *)data,
345 				 &pi->stats_base);
346 
347 	data += sizeof(struct port_stats) / sizeof(u64);
348 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
349 	data += sizeof(struct queue_port_stats) / sizeof(u64);
350 	collect_adapter_stats(adapter, (struct adapter_stats *)data);
351 	data += sizeof(struct adapter_stats) / sizeof(u64);
352 
353 	*data++ = (u64)pi->port_id;
354 	memset(&s, 0, sizeof(s));
355 	t4_get_lb_stats(adapter, pi->port_id, &s);
356 
357 	p0 = &s.octets;
358 	for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
359 		*data++ = (unsigned long long)*p0++;
360 }
361 
362 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
363 		     void *buf)
364 {
365 	struct adapter *adap = netdev2adap(dev);
366 	size_t buf_size;
367 
368 	buf_size = t4_get_regs_len(adap);
369 	regs->version = mk_adap_vers(adap);
370 	t4_get_regs(adap, buf, buf_size);
371 }
372 
373 static int restart_autoneg(struct net_device *dev)
374 {
375 	struct port_info *p = netdev_priv(dev);
376 
377 	if (!netif_running(dev))
378 		return -EAGAIN;
379 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
380 		return -EINVAL;
381 	t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
382 	return 0;
383 }
384 
385 static int identify_port(struct net_device *dev,
386 			 enum ethtool_phys_id_state state)
387 {
388 	unsigned int val;
389 	struct adapter *adap = netdev2adap(dev);
390 
391 	if (state == ETHTOOL_ID_ACTIVE)
392 		val = 0xffff;
393 	else if (state == ETHTOOL_ID_INACTIVE)
394 		val = 0;
395 	else
396 		return -EINVAL;
397 
398 	return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
399 }
400 
401 /**
402  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
403  *	@port_type: Firmware Port Type
404  *	@mod_type: Firmware Module Type
405  *
406  *	Translate Firmware Port/Module type to Ethtool Port Type.
407  */
408 static int from_fw_port_mod_type(enum fw_port_type port_type,
409 				 enum fw_port_module_type mod_type)
410 {
411 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
412 	    port_type == FW_PORT_TYPE_BT_XFI ||
413 	    port_type == FW_PORT_TYPE_BT_XAUI) {
414 		return PORT_TP;
415 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
416 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
417 		return PORT_FIBRE;
418 	} else if (port_type == FW_PORT_TYPE_SFP ||
419 		   port_type == FW_PORT_TYPE_QSFP_10G ||
420 		   port_type == FW_PORT_TYPE_QSA ||
421 		   port_type == FW_PORT_TYPE_QSFP ||
422 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
423 		   port_type == FW_PORT_TYPE_CR_QSFP ||
424 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
425 		   port_type == FW_PORT_TYPE_SFP28) {
426 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
427 		    mod_type == FW_PORT_MOD_TYPE_SR ||
428 		    mod_type == FW_PORT_MOD_TYPE_ER ||
429 		    mod_type == FW_PORT_MOD_TYPE_LRM)
430 			return PORT_FIBRE;
431 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
432 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
433 			return PORT_DA;
434 		else
435 			return PORT_OTHER;
436 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
437 		   port_type == FW_PORT_TYPE_KR_SFP28 ||
438 		   port_type == FW_PORT_TYPE_KR_XLAUI) {
439 		return PORT_NONE;
440 	}
441 
442 	return PORT_OTHER;
443 }
444 
445 /**
446  *	speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities
447  *	@speed: speed in Kb/s
448  *
449  *	Translates a specific Port Speed into a Firmware Port Capabilities
450  *	value.
451  */
452 static unsigned int speed_to_fw_caps(int speed)
453 {
454 	if (speed == 100)
455 		return FW_PORT_CAP32_SPEED_100M;
456 	if (speed == 1000)
457 		return FW_PORT_CAP32_SPEED_1G;
458 	if (speed == 10000)
459 		return FW_PORT_CAP32_SPEED_10G;
460 	if (speed == 25000)
461 		return FW_PORT_CAP32_SPEED_25G;
462 	if (speed == 40000)
463 		return FW_PORT_CAP32_SPEED_40G;
464 	if (speed == 50000)
465 		return FW_PORT_CAP32_SPEED_50G;
466 	if (speed == 100000)
467 		return FW_PORT_CAP32_SPEED_100G;
468 	if (speed == 200000)
469 		return FW_PORT_CAP32_SPEED_200G;
470 	if (speed == 400000)
471 		return FW_PORT_CAP32_SPEED_400G;
472 	return 0;
473 }
474 
475 /**
476  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
477  *	@port_type: Firmware Port Type
478  *	@fw_caps: Firmware Port Capabilities
479  *	@link_mode_mask: ethtool Link Mode Mask
480  *
481  *	Translate a Firmware Port Capabilities specification to an ethtool
482  *	Link Mode Mask.
483  */
484 static void fw_caps_to_lmm(enum fw_port_type port_type,
485 			   fw_port_cap32_t fw_caps,
486 			   unsigned long *link_mode_mask)
487 {
488 	#define SET_LMM(__lmm_name) \
489 		do { \
490 			__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
491 				  link_mode_mask); \
492 		} while (0)
493 
494 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
495 		do { \
496 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
497 				SET_LMM(__lmm_name); \
498 		} while (0)
499 
500 	switch (port_type) {
501 	case FW_PORT_TYPE_BT_SGMII:
502 	case FW_PORT_TYPE_BT_XFI:
503 	case FW_PORT_TYPE_BT_XAUI:
504 		SET_LMM(TP);
505 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
506 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
507 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
508 		break;
509 
510 	case FW_PORT_TYPE_KX4:
511 	case FW_PORT_TYPE_KX:
512 		SET_LMM(Backplane);
513 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
514 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
515 		break;
516 
517 	case FW_PORT_TYPE_KR:
518 		SET_LMM(Backplane);
519 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
520 		break;
521 
522 	case FW_PORT_TYPE_BP_AP:
523 		SET_LMM(Backplane);
524 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
525 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
526 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
527 		break;
528 
529 	case FW_PORT_TYPE_BP4_AP:
530 		SET_LMM(Backplane);
531 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
532 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
533 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
534 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
535 		break;
536 
537 	case FW_PORT_TYPE_FIBER_XFI:
538 	case FW_PORT_TYPE_FIBER_XAUI:
539 	case FW_PORT_TYPE_SFP:
540 	case FW_PORT_TYPE_QSFP_10G:
541 	case FW_PORT_TYPE_QSA:
542 		SET_LMM(FIBRE);
543 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
544 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
545 		break;
546 
547 	case FW_PORT_TYPE_BP40_BA:
548 	case FW_PORT_TYPE_QSFP:
549 		SET_LMM(FIBRE);
550 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
551 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
552 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
553 		break;
554 
555 	case FW_PORT_TYPE_CR_QSFP:
556 	case FW_PORT_TYPE_SFP28:
557 		SET_LMM(FIBRE);
558 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
559 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
560 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
561 		break;
562 
563 	case FW_PORT_TYPE_KR_SFP28:
564 		SET_LMM(Backplane);
565 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
566 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
567 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
568 		break;
569 
570 	case FW_PORT_TYPE_KR_XLAUI:
571 		SET_LMM(Backplane);
572 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
573 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
574 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
575 		break;
576 
577 	case FW_PORT_TYPE_CR2_QSFP:
578 		SET_LMM(FIBRE);
579 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
580 		break;
581 
582 	case FW_PORT_TYPE_KR4_100G:
583 	case FW_PORT_TYPE_CR4_QSFP:
584 		SET_LMM(FIBRE);
585 		FW_CAPS_TO_LMM(SPEED_1G,  1000baseT_Full);
586 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
587 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
588 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
589 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
590 		FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
591 		break;
592 
593 	default:
594 		break;
595 	}
596 
597 	if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
598 		FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
599 		FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
600 	} else {
601 		SET_LMM(FEC_NONE);
602 	}
603 
604 	FW_CAPS_TO_LMM(ANEG, Autoneg);
605 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
606 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
607 
608 	#undef FW_CAPS_TO_LMM
609 	#undef SET_LMM
610 }
611 
612 /**
613  *	lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
614  *	capabilities
615  *	@link_mode_mask: ethtool Link Mode Mask
616  *
617  *	Translate ethtool Link Mode Mask into a Firmware Port capabilities
618  *	value.
619  */
620 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
621 {
622 	unsigned int fw_caps = 0;
623 
624 	#define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
625 		do { \
626 			if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
627 				     link_mode_mask)) \
628 				fw_caps |= FW_PORT_CAP32_ ## __fw_name; \
629 		} while (0)
630 
631 	LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
632 	LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
633 	LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
634 	LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
635 	LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
636 	LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G);
637 	LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
638 
639 	#undef LMM_TO_FW_CAPS
640 
641 	return fw_caps;
642 }
643 
644 static int get_link_ksettings(struct net_device *dev,
645 			      struct ethtool_link_ksettings *link_ksettings)
646 {
647 	struct port_info *pi = netdev_priv(dev);
648 	struct ethtool_link_settings *base = &link_ksettings->base;
649 
650 	/* For the nonce, the Firmware doesn't send up Port State changes
651 	 * when the Virtual Interface attached to the Port is down.  So
652 	 * if it's down, let's grab any changes.
653 	 */
654 	if (!netif_running(dev))
655 		(void)t4_update_port_info(pi);
656 
657 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
658 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
659 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
660 
661 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
662 
663 	if (pi->mdio_addr >= 0) {
664 		base->phy_address = pi->mdio_addr;
665 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
666 				      ? ETH_MDIO_SUPPORTS_C22
667 				      : ETH_MDIO_SUPPORTS_C45);
668 	} else {
669 		base->phy_address = 255;
670 		base->mdio_support = 0;
671 	}
672 
673 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
674 		       link_ksettings->link_modes.supported);
675 	fw_caps_to_lmm(pi->port_type,
676 		       t4_link_acaps(pi->adapter,
677 				     pi->lport,
678 				     &pi->link_cfg),
679 		       link_ksettings->link_modes.advertising);
680 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
681 		       link_ksettings->link_modes.lp_advertising);
682 
683 	base->speed = (netif_carrier_ok(dev)
684 		       ? pi->link_cfg.speed
685 		       : SPEED_UNKNOWN);
686 	base->duplex = DUPLEX_FULL;
687 
688 	base->autoneg = pi->link_cfg.autoneg;
689 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
690 		ethtool_link_ksettings_add_link_mode(link_ksettings,
691 						     supported, Autoneg);
692 	if (pi->link_cfg.autoneg)
693 		ethtool_link_ksettings_add_link_mode(link_ksettings,
694 						     advertising, Autoneg);
695 
696 	return 0;
697 }
698 
699 static int set_link_ksettings(struct net_device *dev,
700 			    const struct ethtool_link_ksettings *link_ksettings)
701 {
702 	struct port_info *pi = netdev_priv(dev);
703 	struct link_config *lc = &pi->link_cfg;
704 	const struct ethtool_link_settings *base = &link_ksettings->base;
705 	struct link_config old_lc;
706 	unsigned int fw_caps;
707 	int ret = 0;
708 
709 	/* only full-duplex supported */
710 	if (base->duplex != DUPLEX_FULL)
711 		return -EINVAL;
712 
713 	old_lc = *lc;
714 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
715 	    base->autoneg == AUTONEG_DISABLE) {
716 		fw_caps = speed_to_fw_caps(base->speed);
717 
718 		/* Speed must be supported by Physical Port Capabilities. */
719 		if (!(lc->pcaps & fw_caps))
720 			return -EINVAL;
721 
722 		lc->speed_caps = fw_caps;
723 		lc->acaps = fw_caps;
724 	} else {
725 		fw_caps =
726 			lmm_to_fw_caps(link_ksettings->link_modes.advertising);
727 		if (!(lc->pcaps & fw_caps))
728 			return -EINVAL;
729 		lc->speed_caps = 0;
730 		lc->acaps = fw_caps | FW_PORT_CAP32_ANEG;
731 	}
732 	lc->autoneg = base->autoneg;
733 
734 	/* If the firmware rejects the Link Configuration request, back out
735 	 * the changes and report the error.
736 	 */
737 	ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
738 	if (ret)
739 		*lc = old_lc;
740 
741 	return ret;
742 }
743 
744 /* Translate the Firmware FEC value into the ethtool value. */
745 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
746 {
747 	unsigned int eth_fec = 0;
748 
749 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
750 		eth_fec |= ETHTOOL_FEC_RS;
751 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
752 		eth_fec |= ETHTOOL_FEC_BASER;
753 
754 	/* if nothing is set, then FEC is off */
755 	if (!eth_fec)
756 		eth_fec = ETHTOOL_FEC_OFF;
757 
758 	return eth_fec;
759 }
760 
761 /* Translate Common Code FEC value into ethtool value. */
762 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
763 {
764 	unsigned int eth_fec = 0;
765 
766 	if (cc_fec & FEC_AUTO)
767 		eth_fec |= ETHTOOL_FEC_AUTO;
768 	if (cc_fec & FEC_RS)
769 		eth_fec |= ETHTOOL_FEC_RS;
770 	if (cc_fec & FEC_BASER_RS)
771 		eth_fec |= ETHTOOL_FEC_BASER;
772 
773 	/* if nothing is set, then FEC is off */
774 	if (!eth_fec)
775 		eth_fec = ETHTOOL_FEC_OFF;
776 
777 	return eth_fec;
778 }
779 
780 /* Translate ethtool FEC value into Common Code value. */
781 static inline unsigned int eth_to_cc_fec(unsigned int eth_fec)
782 {
783 	unsigned int cc_fec = 0;
784 
785 	if (eth_fec & ETHTOOL_FEC_OFF)
786 		return cc_fec;
787 
788 	if (eth_fec & ETHTOOL_FEC_AUTO)
789 		cc_fec |= FEC_AUTO;
790 	if (eth_fec & ETHTOOL_FEC_RS)
791 		cc_fec |= FEC_RS;
792 	if (eth_fec & ETHTOOL_FEC_BASER)
793 		cc_fec |= FEC_BASER_RS;
794 
795 	return cc_fec;
796 }
797 
798 static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
799 {
800 	const struct port_info *pi = netdev_priv(dev);
801 	const struct link_config *lc = &pi->link_cfg;
802 
803 	/* Translate the Firmware FEC Support into the ethtool value.  We
804 	 * always support IEEE 802.3 "automatic" selection of Link FEC type if
805 	 * any FEC is supported.
806 	 */
807 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
808 	if (fec->fec != ETHTOOL_FEC_OFF)
809 		fec->fec |= ETHTOOL_FEC_AUTO;
810 
811 	/* Translate the current internal FEC parameters into the
812 	 * ethtool values.
813 	 */
814 	fec->active_fec = cc_to_eth_fec(lc->fec);
815 
816 	return 0;
817 }
818 
819 static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
820 {
821 	struct port_info *pi = netdev_priv(dev);
822 	struct link_config *lc = &pi->link_cfg;
823 	struct link_config old_lc;
824 	int ret;
825 
826 	/* Save old Link Configuration in case the L1 Configure below
827 	 * fails.
828 	 */
829 	old_lc = *lc;
830 
831 	/* Try to perform the L1 Configure and return the result of that
832 	 * effort.  If it fails, revert the attempted change.
833 	 */
834 	lc->requested_fec = eth_to_cc_fec(fec->fec);
835 	ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox,
836 			    pi->tx_chan, lc);
837 	if (ret)
838 		*lc = old_lc;
839 	return ret;
840 }
841 
842 static void get_pauseparam(struct net_device *dev,
843 			   struct ethtool_pauseparam *epause)
844 {
845 	struct port_info *p = netdev_priv(dev);
846 
847 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
848 	epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
849 	epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
850 }
851 
852 static int set_pauseparam(struct net_device *dev,
853 			  struct ethtool_pauseparam *epause)
854 {
855 	struct port_info *p = netdev_priv(dev);
856 	struct link_config *lc = &p->link_cfg;
857 
858 	if (epause->autoneg == AUTONEG_DISABLE)
859 		lc->requested_fc = 0;
860 	else if (lc->pcaps & FW_PORT_CAP32_ANEG)
861 		lc->requested_fc = PAUSE_AUTONEG;
862 	else
863 		return -EINVAL;
864 
865 	if (epause->rx_pause)
866 		lc->requested_fc |= PAUSE_RX;
867 	if (epause->tx_pause)
868 		lc->requested_fc |= PAUSE_TX;
869 	if (netif_running(dev))
870 		return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
871 				     lc);
872 	return 0;
873 }
874 
875 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
876 {
877 	const struct port_info *pi = netdev_priv(dev);
878 	const struct sge *s = &pi->adapter->sge;
879 
880 	e->rx_max_pending = MAX_RX_BUFFERS;
881 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
882 	e->rx_jumbo_max_pending = 0;
883 	e->tx_max_pending = MAX_TXQ_ENTRIES;
884 
885 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
886 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
887 	e->rx_jumbo_pending = 0;
888 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
889 }
890 
891 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
892 {
893 	int i;
894 	const struct port_info *pi = netdev_priv(dev);
895 	struct adapter *adapter = pi->adapter;
896 	struct sge *s = &adapter->sge;
897 
898 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
899 	    e->tx_pending > MAX_TXQ_ENTRIES ||
900 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
901 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
902 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
903 		return -EINVAL;
904 
905 	if (adapter->flags & CXGB4_FULL_INIT_DONE)
906 		return -EBUSY;
907 
908 	for (i = 0; i < pi->nqsets; ++i) {
909 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
910 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
911 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
912 	}
913 	return 0;
914 }
915 
916 /**
917  * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
918  * @dev: the network device
919  * @us: the hold-off time in us, or 0 to disable timer
920  * @cnt: the hold-off packet count, or 0 to disable counter
921  *
922  * Set the RX interrupt hold-off parameters for a network device.
923  */
924 static int set_rx_intr_params(struct net_device *dev,
925 			      unsigned int us, unsigned int cnt)
926 {
927 	int i, err;
928 	struct port_info *pi = netdev_priv(dev);
929 	struct adapter *adap = pi->adapter;
930 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
931 
932 	for (i = 0; i < pi->nqsets; i++, q++) {
933 		err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
934 		if (err)
935 			return err;
936 	}
937 	return 0;
938 }
939 
940 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
941 {
942 	int i;
943 	struct port_info *pi = netdev_priv(dev);
944 	struct adapter *adap = pi->adapter;
945 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
946 
947 	for (i = 0; i < pi->nqsets; i++, q++)
948 		q->rspq.adaptive_rx = adaptive_rx;
949 
950 	return 0;
951 }
952 
953 static int get_adaptive_rx_setting(struct net_device *dev)
954 {
955 	struct port_info *pi = netdev_priv(dev);
956 	struct adapter *adap = pi->adapter;
957 	struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
958 
959 	return q->rspq.adaptive_rx;
960 }
961 
962 /* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
963  * Ethernet TX Queues.
964  */
965 static int get_dbqtimer_tick(struct net_device *dev)
966 {
967 	struct port_info *pi = netdev_priv(dev);
968 	struct adapter *adap = pi->adapter;
969 
970 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
971 		return 0;
972 
973 	return adap->sge.dbqtimer_tick;
974 }
975 
976 /* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
977  * associated with a Network Device.
978  */
979 static int get_dbqtimer(struct net_device *dev)
980 {
981 	struct port_info *pi = netdev_priv(dev);
982 	struct adapter *adap = pi->adapter;
983 	struct sge_eth_txq *txq;
984 
985 	txq = &adap->sge.ethtxq[pi->first_qset];
986 
987 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
988 		return 0;
989 
990 	/* all of the TX Queues use the same Timer Index */
991 	return adap->sge.dbqtimer_val[txq->dbqtimerix];
992 }
993 
994 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
995  * Queues.  This is the fundamental "Tick" that sets the scale of values which
996  * can be used.  Individual Ethernet TX Queues index into a relatively small
997  * array of Tick Multipliers.  Changing the base Tick will thus change all of
998  * the resulting Timer Values associated with those multipliers for all
999  * Ethernet TX Queues.
1000  */
1001 static int set_dbqtimer_tick(struct net_device *dev, int usecs)
1002 {
1003 	struct port_info *pi = netdev_priv(dev);
1004 	struct adapter *adap = pi->adapter;
1005 	struct sge *s = &adap->sge;
1006 	u32 param, val;
1007 	int ret;
1008 
1009 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1010 		return 0;
1011 
1012 	/* return early if it's the same Timer Tick we're already using */
1013 	if (s->dbqtimer_tick == usecs)
1014 		return 0;
1015 
1016 	/* attempt to set the new Timer Tick value */
1017 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1018 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
1019 	val = usecs;
1020 	ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
1021 	if (ret)
1022 		return ret;
1023 	s->dbqtimer_tick = usecs;
1024 
1025 	/* if successful, reread resulting dependent Timer values */
1026 	ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
1027 				    s->dbqtimer_val);
1028 	return ret;
1029 }
1030 
1031 /* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
1032  * associated with a Network Device.  There is a relatively small array of
1033  * possible Timer Values so we need to pick the closest value available.
1034  */
1035 static int set_dbqtimer(struct net_device *dev, int usecs)
1036 {
1037 	int qix, timerix, min_timerix, delta, min_delta;
1038 	struct port_info *pi = netdev_priv(dev);
1039 	struct adapter *adap = pi->adapter;
1040 	struct sge *s = &adap->sge;
1041 	struct sge_eth_txq *txq;
1042 	u32 param, val;
1043 	int ret;
1044 
1045 	if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1046 		return 0;
1047 
1048 	/* Find the SGE Doorbell Timer Value that's closest to the requested
1049 	 * value.
1050 	 */
1051 	min_delta = INT_MAX;
1052 	min_timerix = 0;
1053 	for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
1054 		delta = s->dbqtimer_val[timerix] - usecs;
1055 		if (delta < 0)
1056 			delta = -delta;
1057 		if (delta < min_delta) {
1058 			min_delta = delta;
1059 			min_timerix = timerix;
1060 		}
1061 	}
1062 
1063 	/* Return early if it's the same Timer Index we're already using.
1064 	 * We use the same Timer Index for all of the TX Queues for an
1065 	 * interface so it's only necessary to check the first one.
1066 	 */
1067 	txq = &s->ethtxq[pi->first_qset];
1068 	if (txq->dbqtimerix == min_timerix)
1069 		return 0;
1070 
1071 	for (qix = 0; qix < pi->nqsets; qix++, txq++) {
1072 		if (adap->flags & CXGB4_FULL_INIT_DONE) {
1073 			param =
1074 			 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1075 			  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
1076 			  FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
1077 			val = min_timerix;
1078 			ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
1079 					    1, &param, &val);
1080 			if (ret)
1081 				return ret;
1082 		}
1083 		txq->dbqtimerix = min_timerix;
1084 	}
1085 	return 0;
1086 }
1087 
1088 /* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
1089  * Queues and the Timer Value for the Ethernet TX Queues associated with a
1090  * Network Device.  Since changing the global Tick changes all of the
1091  * available Timer Values, we need to do this first before selecting the
1092  * resulting closest Timer Value.  Moreover, since the Tick is global,
1093  * changing it affects the Timer Values for all Network Devices on the
1094  * adapter.  So, before changing the Tick, we grab all of the current Timer
1095  * Values for other Network Devices on this Adapter and then attempt to select
1096  * new Timer Values which are close to the old values ...
1097  */
1098 static int set_dbqtimer_tickval(struct net_device *dev,
1099 				int tick_usecs, int timer_usecs)
1100 {
1101 	struct port_info *pi = netdev_priv(dev);
1102 	struct adapter *adap = pi->adapter;
1103 	int timer[MAX_NPORTS];
1104 	unsigned int port;
1105 	int ret;
1106 
1107 	/* Grab the other adapter Network Interface current timers and fill in
1108 	 * the new one for this Network Interface.
1109 	 */
1110 	for_each_port(adap, port)
1111 		if (port == pi->port_id)
1112 			timer[port] = timer_usecs;
1113 		else
1114 			timer[port] = get_dbqtimer(adap->port[port]);
1115 
1116 	/* Change the global Tick first ... */
1117 	ret = set_dbqtimer_tick(dev, tick_usecs);
1118 	if (ret)
1119 		return ret;
1120 
1121 	/* ... and then set all of the Network Interface Timer Values ... */
1122 	for_each_port(adap, port) {
1123 		ret = set_dbqtimer(adap->port[port], timer[port]);
1124 		if (ret)
1125 			return ret;
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 static int set_coalesce(struct net_device *dev,
1132 			struct ethtool_coalesce *coalesce)
1133 {
1134 	int ret;
1135 
1136 	set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
1137 
1138 	ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
1139 				 coalesce->rx_max_coalesced_frames);
1140 	if (ret)
1141 		return ret;
1142 
1143 	return set_dbqtimer_tickval(dev,
1144 				    coalesce->tx_coalesce_usecs_irq,
1145 				    coalesce->tx_coalesce_usecs);
1146 }
1147 
1148 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1149 {
1150 	const struct port_info *pi = netdev_priv(dev);
1151 	const struct adapter *adap = pi->adapter;
1152 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1153 
1154 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
1155 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
1156 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
1157 	c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
1158 	c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
1159 	c->tx_coalesce_usecs = get_dbqtimer(dev);
1160 	return 0;
1161 }
1162 
1163 /* The next two routines implement eeprom read/write from physical addresses.
1164  */
1165 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1166 {
1167 	int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1168 
1169 	if (vaddr >= 0)
1170 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1171 	return vaddr < 0 ? vaddr : 0;
1172 }
1173 
1174 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1175 {
1176 	int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1177 
1178 	if (vaddr >= 0)
1179 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1180 	return vaddr < 0 ? vaddr : 0;
1181 }
1182 
1183 #define EEPROM_MAGIC 0x38E2F10C
1184 
1185 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1186 		      u8 *data)
1187 {
1188 	int i, err = 0;
1189 	struct adapter *adapter = netdev2adap(dev);
1190 	u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
1191 
1192 	if (!buf)
1193 		return -ENOMEM;
1194 
1195 	e->magic = EEPROM_MAGIC;
1196 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1197 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1198 
1199 	if (!err)
1200 		memcpy(data, buf + e->offset, e->len);
1201 	kvfree(buf);
1202 	return err;
1203 }
1204 
1205 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1206 		      u8 *data)
1207 {
1208 	u8 *buf;
1209 	int err = 0;
1210 	u32 aligned_offset, aligned_len, *p;
1211 	struct adapter *adapter = netdev2adap(dev);
1212 
1213 	if (eeprom->magic != EEPROM_MAGIC)
1214 		return -EINVAL;
1215 
1216 	aligned_offset = eeprom->offset & ~3;
1217 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1218 
1219 	if (adapter->pf > 0) {
1220 		u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1221 
1222 		if (aligned_offset < start ||
1223 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1224 			return -EPERM;
1225 	}
1226 
1227 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1228 		/* RMW possibly needed for first or last words.
1229 		 */
1230 		buf = kvzalloc(aligned_len, GFP_KERNEL);
1231 		if (!buf)
1232 			return -ENOMEM;
1233 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1234 		if (!err && aligned_len > 4)
1235 			err = eeprom_rd_phys(adapter,
1236 					     aligned_offset + aligned_len - 4,
1237 					     (u32 *)&buf[aligned_len - 4]);
1238 		if (err)
1239 			goto out;
1240 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1241 	} else {
1242 		buf = data;
1243 	}
1244 
1245 	err = t4_seeprom_wp(adapter, false);
1246 	if (err)
1247 		goto out;
1248 
1249 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1250 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
1251 		aligned_offset += 4;
1252 	}
1253 
1254 	if (!err)
1255 		err = t4_seeprom_wp(adapter, true);
1256 out:
1257 	if (buf != data)
1258 		kvfree(buf);
1259 	return err;
1260 }
1261 
1262 static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev,
1263 				       const u8 *data, u32 size)
1264 {
1265 	struct adapter *adap = netdev2adap(netdev);
1266 	int ret;
1267 
1268 	ret = t4_load_bootcfg(adap, data, size);
1269 	if (ret)
1270 		dev_err(adap->pdev_dev, "Failed to load boot cfg image\n");
1271 
1272 	return ret;
1273 }
1274 
1275 static int cxgb4_ethtool_flash_boot(struct net_device *netdev,
1276 				    const u8 *bdata, u32 size)
1277 {
1278 	struct adapter *adap = netdev2adap(netdev);
1279 	unsigned int offset;
1280 	u8 *data;
1281 	int ret;
1282 
1283 	data = kmemdup(bdata, size, GFP_KERNEL);
1284 	if (!data)
1285 		return -ENOMEM;
1286 
1287 	offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A)));
1288 
1289 	ret = t4_load_boot(adap, data, offset, size);
1290 	if (ret)
1291 		dev_err(adap->pdev_dev, "Failed to load boot image\n");
1292 
1293 	kfree(data);
1294 	return ret;
1295 }
1296 
1297 #define CXGB4_PHY_SIG 0x130000ea
1298 
1299 static int cxgb4_validate_phy_image(const u8 *data, u32 *size)
1300 {
1301 	struct cxgb4_fw_data *header;
1302 
1303 	header = (struct cxgb4_fw_data *)data;
1304 	if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG)
1305 		return -EINVAL;
1306 
1307 	return 0;
1308 }
1309 
1310 static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
1311 				   const u8 *data, u32 size)
1312 {
1313 	struct adapter *adap = netdev2adap(netdev);
1314 	int ret;
1315 
1316 	ret = cxgb4_validate_phy_image(data, NULL);
1317 	if (ret) {
1318 		dev_err(adap->pdev_dev, "PHY signature mismatch\n");
1319 		return ret;
1320 	}
1321 
1322 	spin_lock_bh(&adap->win0_lock);
1323 	ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1324 	spin_unlock_bh(&adap->win0_lock);
1325 	if (ret)
1326 		dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
1327 
1328 	return ret;
1329 }
1330 
1331 static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1332 				  const u8 *data, u32 size)
1333 {
1334 	struct adapter *adap = netdev2adap(netdev);
1335 	unsigned int mbox = PCIE_FW_MASTER_M + 1;
1336 	int ret;
1337 
1338 	/* If the adapter has been fully initialized then we'll go ahead and
1339 	 * try to get the firmware's cooperation in upgrading to the new
1340 	 * firmware image otherwise we'll try to do the entire job from the
1341 	 * host ... and we always "force" the operation in this path.
1342 	 */
1343 	if (adap->flags & CXGB4_FULL_INIT_DONE)
1344 		mbox = adap->mbox;
1345 
1346 	ret = t4_fw_upgrade(adap, mbox, data, size, 1);
1347 	if (ret)
1348 		dev_err(adap->pdev_dev,
1349 			"Failed to flash firmware\n");
1350 
1351 	return ret;
1352 }
1353 
1354 static int cxgb4_ethtool_flash_region(struct net_device *netdev,
1355 				      const u8 *data, u32 size, u32 region)
1356 {
1357 	struct adapter *adap = netdev2adap(netdev);
1358 	int ret;
1359 
1360 	switch (region) {
1361 	case CXGB4_ETHTOOL_FLASH_FW:
1362 		ret = cxgb4_ethtool_flash_fw(netdev, data, size);
1363 		break;
1364 	case CXGB4_ETHTOOL_FLASH_PHY:
1365 		ret = cxgb4_ethtool_flash_phy(netdev, data, size);
1366 		break;
1367 	case CXGB4_ETHTOOL_FLASH_BOOT:
1368 		ret = cxgb4_ethtool_flash_boot(netdev, data, size);
1369 		break;
1370 	case CXGB4_ETHTOOL_FLASH_BOOTCFG:
1371 		ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size);
1372 		break;
1373 	default:
1374 		ret = -EOPNOTSUPP;
1375 		break;
1376 	}
1377 
1378 	if (!ret)
1379 		dev_info(adap->pdev_dev,
1380 			 "loading %s successful, reload cxgb4 driver\n",
1381 			 flash_region_strings[region]);
1382 	return ret;
1383 }
1384 
1385 #define CXGB4_FW_SIG 0x4368656c
1386 #define CXGB4_FW_SIG_OFFSET 0x160
1387 
1388 static int cxgb4_validate_fw_image(const u8 *data, u32 *size)
1389 {
1390 	struct cxgb4_fw_data *header;
1391 
1392 	header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET];
1393 	if (be32_to_cpu(header->signature) != CXGB4_FW_SIG)
1394 		return -EINVAL;
1395 
1396 	if (size)
1397 		*size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512;
1398 
1399 	return 0;
1400 }
1401 
1402 static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size)
1403 {
1404 	struct cxgb4_bootcfg_data *header;
1405 
1406 	header = (struct cxgb4_bootcfg_data *)data;
1407 	if (le16_to_cpu(header->signature) != BOOT_CFG_SIG)
1408 		return -EINVAL;
1409 
1410 	return 0;
1411 }
1412 
1413 static int cxgb4_validate_boot_image(const u8 *data, u32 *size)
1414 {
1415 	struct cxgb4_pci_exp_rom_header *exp_header;
1416 	struct cxgb4_pcir_data *pcir_header;
1417 	struct legacy_pci_rom_hdr *header;
1418 	const u8 *cur_header = data;
1419 	u16 pcir_offset;
1420 
1421 	exp_header = (struct cxgb4_pci_exp_rom_header *)data;
1422 
1423 	if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE)
1424 		return -EINVAL;
1425 
1426 	if (size) {
1427 		do {
1428 			header = (struct legacy_pci_rom_hdr *)cur_header;
1429 			pcir_offset = le16_to_cpu(header->pcir_offset);
1430 			pcir_header = (struct cxgb4_pcir_data *)(cur_header +
1431 				      pcir_offset);
1432 
1433 			*size += header->size512 * 512;
1434 			cur_header += header->size512 * 512;
1435 		} while (!(pcir_header->indicator & CXGB4_HDR_INDI));
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size)
1442 {
1443 	if (!cxgb4_validate_fw_image(data, size))
1444 		return CXGB4_ETHTOOL_FLASH_FW;
1445 	if (!cxgb4_validate_boot_image(data, size))
1446 		return CXGB4_ETHTOOL_FLASH_BOOT;
1447 	if (!cxgb4_validate_phy_image(data, size))
1448 		return CXGB4_ETHTOOL_FLASH_PHY;
1449 	if (!cxgb4_validate_bootcfg_image(data, size))
1450 		return CXGB4_ETHTOOL_FLASH_BOOTCFG;
1451 
1452 	return -EOPNOTSUPP;
1453 }
1454 
1455 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1456 {
1457 	struct adapter *adap = netdev2adap(netdev);
1458 	const struct firmware *fw;
1459 	unsigned int master;
1460 	u8 master_vld = 0;
1461 	const u8 *fw_data;
1462 	size_t fw_size;
1463 	u32 size = 0;
1464 	u32 pcie_fw;
1465 	int region;
1466 	int ret;
1467 
1468 	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
1469 	master = PCIE_FW_MASTER_G(pcie_fw);
1470 	if (pcie_fw & PCIE_FW_MASTER_VLD_F)
1471 		master_vld = 1;
1472 	/* if csiostor is the master return */
1473 	if (master_vld && (master != adap->pf)) {
1474 		dev_warn(adap->pdev_dev,
1475 			 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
1476 		return -EOPNOTSUPP;
1477 	}
1478 
1479 	ef->data[sizeof(ef->data) - 1] = '\0';
1480 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1481 	if (ret < 0)
1482 		return ret;
1483 
1484 	fw_data = fw->data;
1485 	fw_size = fw->size;
1486 	if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) {
1487 		while (fw_size > 0) {
1488 			size = 0;
1489 			region = cxgb4_ethtool_get_flash_region(fw_data, &size);
1490 			if (region < 0 || !size) {
1491 				ret = region;
1492 				goto out_free_fw;
1493 			}
1494 
1495 			ret = cxgb4_ethtool_flash_region(netdev, fw_data, size,
1496 							 region);
1497 			if (ret)
1498 				goto out_free_fw;
1499 
1500 			fw_data += size;
1501 			fw_size -= size;
1502 		}
1503 	} else {
1504 		ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size,
1505 						 ef->region);
1506 	}
1507 
1508 out_free_fw:
1509 	release_firmware(fw);
1510 	return ret;
1511 }
1512 
1513 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
1514 {
1515 	struct port_info *pi = netdev_priv(dev);
1516 	struct  adapter *adapter = pi->adapter;
1517 
1518 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1519 				   SOF_TIMESTAMPING_RX_SOFTWARE |
1520 				   SOF_TIMESTAMPING_SOFTWARE;
1521 
1522 	ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
1523 				    SOF_TIMESTAMPING_TX_HARDWARE |
1524 				    SOF_TIMESTAMPING_RAW_HARDWARE;
1525 
1526 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1527 			    (1 << HWTSTAMP_TX_ON);
1528 
1529 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1530 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1531 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1532 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1533 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1534 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1535 
1536 	if (adapter->ptp_clock)
1537 		ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
1538 	else
1539 		ts_info->phc_index = -1;
1540 
1541 	return 0;
1542 }
1543 
1544 static u32 get_rss_table_size(struct net_device *dev)
1545 {
1546 	const struct port_info *pi = netdev_priv(dev);
1547 
1548 	return pi->rss_size;
1549 }
1550 
1551 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
1552 {
1553 	const struct port_info *pi = netdev_priv(dev);
1554 	unsigned int n = pi->rss_size;
1555 
1556 	if (hfunc)
1557 		*hfunc = ETH_RSS_HASH_TOP;
1558 	if (!p)
1559 		return 0;
1560 	while (n--)
1561 		p[n] = pi->rss[n];
1562 	return 0;
1563 }
1564 
1565 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
1566 			 const u8 hfunc)
1567 {
1568 	unsigned int i;
1569 	struct port_info *pi = netdev_priv(dev);
1570 
1571 	/* We require at least one supported parameter to be changed and no
1572 	 * change in any of the unsupported parameters
1573 	 */
1574 	if (key ||
1575 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1576 		return -EOPNOTSUPP;
1577 	if (!p)
1578 		return 0;
1579 
1580 	/* Interface must be brought up atleast once */
1581 	if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
1582 		for (i = 0; i < pi->rss_size; i++)
1583 			pi->rss[i] = p[i];
1584 
1585 		return cxgb4_write_rss(pi, pi->rss);
1586 	}
1587 
1588 	return -EPERM;
1589 }
1590 
1591 static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1592 						   u32 ftid)
1593 {
1594 	struct tid_info *t = &adap->tids;
1595 	struct filter_entry *f;
1596 
1597 	if (ftid < t->nhpftids)
1598 		f = &adap->tids.hpftid_tab[ftid];
1599 	else if (ftid < t->nftids)
1600 		f = &adap->tids.ftid_tab[ftid - t->nhpftids];
1601 	else
1602 		f = lookup_tid(&adap->tids, ftid);
1603 
1604 	return f;
1605 }
1606 
1607 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1608 				   struct ch_filter_specification *dfs)
1609 {
1610 	switch (dfs->val.proto) {
1611 	case IPPROTO_TCP:
1612 		if (dfs->type)
1613 			fs->flow_type = TCP_V6_FLOW;
1614 		else
1615 			fs->flow_type = TCP_V4_FLOW;
1616 		break;
1617 	case IPPROTO_UDP:
1618 		if (dfs->type)
1619 			fs->flow_type = UDP_V6_FLOW;
1620 		else
1621 			fs->flow_type = UDP_V4_FLOW;
1622 		break;
1623 	}
1624 
1625 	if (dfs->type) {
1626 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
1627 		fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
1628 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
1629 		fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
1630 		memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
1631 		       sizeof(fs->h_u.tcp_ip6_spec.ip6src));
1632 		memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
1633 		       sizeof(fs->m_u.tcp_ip6_spec.ip6src));
1634 		memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
1635 		       sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
1636 		memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
1637 		       sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
1638 		fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
1639 		fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
1640 	} else {
1641 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
1642 		fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
1643 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
1644 		fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
1645 		memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
1646 		       sizeof(fs->h_u.tcp_ip4_spec.ip4src));
1647 		memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
1648 		       sizeof(fs->m_u.tcp_ip4_spec.ip4src));
1649 		memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
1650 		       sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
1651 		memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
1652 		       sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
1653 		fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
1654 		fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
1655 	}
1656 	fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
1657 	fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
1658 	fs->flow_type |= FLOW_EXT;
1659 
1660 	if (dfs->action == FILTER_DROP)
1661 		fs->ring_cookie = RX_CLS_FLOW_DISC;
1662 	else
1663 		fs->ring_cookie = dfs->iq;
1664 }
1665 
1666 static int cxgb4_ntuple_get_filter(struct net_device *dev,
1667 				   struct ethtool_rxnfc *cmd,
1668 				   unsigned int loc)
1669 {
1670 	const struct port_info *pi = netdev_priv(dev);
1671 	struct adapter *adap = netdev2adap(dev);
1672 	struct filter_entry *f;
1673 	int ftid;
1674 
1675 	if (!(adap->flags & CXGB4_FULL_INIT_DONE))
1676 		return -EAGAIN;
1677 
1678 	/* Check for maximum filter range */
1679 	if (!adap->ethtool_filters)
1680 		return -EOPNOTSUPP;
1681 
1682 	if (loc >= adap->ethtool_filters->nentries)
1683 		return -ERANGE;
1684 
1685 	if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
1686 		return -ENOENT;
1687 
1688 	ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
1689 
1690 	/* Fetch filter_entry */
1691 	f = cxgb4_get_filter_entry(adap, ftid);
1692 
1693 	cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
1694 
1695 	return 0;
1696 }
1697 
1698 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1699 		     u32 *rules)
1700 {
1701 	const struct port_info *pi = netdev_priv(dev);
1702 	struct adapter *adap = netdev2adap(dev);
1703 	unsigned int count = 0, index = 0;
1704 	int ret = 0;
1705 
1706 	switch (info->cmd) {
1707 	case ETHTOOL_GRXFH: {
1708 		unsigned int v = pi->rss_mode;
1709 
1710 		info->data = 0;
1711 		switch (info->flow_type) {
1712 		case TCP_V4_FLOW:
1713 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1714 				info->data = RXH_IP_SRC | RXH_IP_DST |
1715 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1716 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1717 				info->data = RXH_IP_SRC | RXH_IP_DST;
1718 			break;
1719 		case UDP_V4_FLOW:
1720 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1721 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1722 				info->data = RXH_IP_SRC | RXH_IP_DST |
1723 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1724 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1725 				info->data = RXH_IP_SRC | RXH_IP_DST;
1726 			break;
1727 		case SCTP_V4_FLOW:
1728 		case AH_ESP_V4_FLOW:
1729 		case IPV4_FLOW:
1730 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1731 				info->data = RXH_IP_SRC | RXH_IP_DST;
1732 			break;
1733 		case TCP_V6_FLOW:
1734 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1735 				info->data = RXH_IP_SRC | RXH_IP_DST |
1736 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1737 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1738 				info->data = RXH_IP_SRC | RXH_IP_DST;
1739 			break;
1740 		case UDP_V6_FLOW:
1741 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1742 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1743 				info->data = RXH_IP_SRC | RXH_IP_DST |
1744 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1745 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1746 				info->data = RXH_IP_SRC | RXH_IP_DST;
1747 			break;
1748 		case SCTP_V6_FLOW:
1749 		case AH_ESP_V6_FLOW:
1750 		case IPV6_FLOW:
1751 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1752 				info->data = RXH_IP_SRC | RXH_IP_DST;
1753 			break;
1754 		}
1755 		return 0;
1756 	}
1757 	case ETHTOOL_GRXRINGS:
1758 		info->data = pi->nqsets;
1759 		return 0;
1760 	case ETHTOOL_GRXCLSRLCNT:
1761 		info->rule_cnt =
1762 		       adap->ethtool_filters->port[pi->port_id].in_use;
1763 		return 0;
1764 	case ETHTOOL_GRXCLSRULE:
1765 		return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
1766 	case ETHTOOL_GRXCLSRLALL:
1767 		info->data = adap->ethtool_filters->nentries;
1768 		while (count < info->rule_cnt) {
1769 			ret = cxgb4_ntuple_get_filter(dev, info, index);
1770 			if (!ret)
1771 				rules[count++] = index;
1772 			index++;
1773 		}
1774 		return 0;
1775 	}
1776 
1777 	return -EOPNOTSUPP;
1778 }
1779 
1780 static int cxgb4_ntuple_del_filter(struct net_device *dev,
1781 				   struct ethtool_rxnfc *cmd)
1782 {
1783 	struct cxgb4_ethtool_filter_info *filter_info;
1784 	struct adapter *adapter = netdev2adap(dev);
1785 	struct port_info *pi = netdev_priv(dev);
1786 	struct filter_entry *f;
1787 	u32 filter_id;
1788 	int ret;
1789 
1790 	if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1791 		return -EAGAIN;  /* can still change nfilters */
1792 
1793 	if (!adapter->ethtool_filters)
1794 		return -EOPNOTSUPP;
1795 
1796 	if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1797 		dev_err(adapter->pdev_dev,
1798 			"Location must be < %u",
1799 			adapter->ethtool_filters->nentries);
1800 		return -ERANGE;
1801 	}
1802 
1803 	filter_info = &adapter->ethtool_filters->port[pi->port_id];
1804 
1805 	if (!test_bit(cmd->fs.location, filter_info->bmap))
1806 		return -ENOENT;
1807 
1808 	filter_id = filter_info->loc_array[cmd->fs.location];
1809 	f = cxgb4_get_filter_entry(adapter, filter_id);
1810 
1811 	ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1812 	if (ret)
1813 		goto err;
1814 
1815 	clear_bit(cmd->fs.location, filter_info->bmap);
1816 	filter_info->in_use--;
1817 
1818 err:
1819 	return ret;
1820 }
1821 
1822 /* Add Ethtool n-tuple filters. */
1823 static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1824 				   struct ethtool_rxnfc *cmd)
1825 {
1826 	struct ethtool_rx_flow_spec_input input = {};
1827 	struct cxgb4_ethtool_filter_info *filter_info;
1828 	struct adapter *adapter = netdev2adap(netdev);
1829 	struct port_info *pi = netdev_priv(netdev);
1830 	struct ch_filter_specification fs;
1831 	struct ethtool_rx_flow_rule *flow;
1832 	u32 tid;
1833 	int ret;
1834 
1835 	if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1836 		return -EAGAIN;  /* can still change nfilters */
1837 
1838 	if (!adapter->ethtool_filters)
1839 		return -EOPNOTSUPP;
1840 
1841 	if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1842 		dev_err(adapter->pdev_dev,
1843 			"Location must be < %u",
1844 			adapter->ethtool_filters->nentries);
1845 		return -ERANGE;
1846 	}
1847 
1848 	if (test_bit(cmd->fs.location,
1849 		     adapter->ethtool_filters->port[pi->port_id].bmap))
1850 		return -EEXIST;
1851 
1852 	memset(&fs, 0, sizeof(fs));
1853 
1854 	input.fs = &cmd->fs;
1855 	flow = ethtool_rx_flow_rule_create(&input);
1856 	if (IS_ERR(flow)) {
1857 		ret = PTR_ERR(flow);
1858 		goto exit;
1859 	}
1860 
1861 	fs.hitcnts = 1;
1862 
1863 	ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
1864 				      NULL, &fs, &tid);
1865 	if (ret)
1866 		goto free;
1867 
1868 	filter_info = &adapter->ethtool_filters->port[pi->port_id];
1869 
1870 	filter_info->loc_array[cmd->fs.location] = tid;
1871 	set_bit(cmd->fs.location, filter_info->bmap);
1872 	filter_info->in_use++;
1873 
1874 free:
1875 	ethtool_rx_flow_rule_destroy(flow);
1876 exit:
1877 	return ret;
1878 }
1879 
1880 static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1881 {
1882 	int ret = -EOPNOTSUPP;
1883 
1884 	switch (cmd->cmd) {
1885 	case ETHTOOL_SRXCLSRLINS:
1886 		ret = cxgb4_ntuple_set_filter(dev, cmd);
1887 		break;
1888 	case ETHTOOL_SRXCLSRLDEL:
1889 		ret = cxgb4_ntuple_del_filter(dev, cmd);
1890 		break;
1891 	default:
1892 		break;
1893 	}
1894 
1895 	return ret;
1896 }
1897 
1898 static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
1899 {
1900 	struct adapter *adapter = netdev2adap(dev);
1901 	u32 len = 0;
1902 
1903 	len = sizeof(struct cudbg_hdr) +
1904 	      sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1905 	len += cxgb4_get_dump_length(adapter, eth_dump->flag);
1906 
1907 	adapter->eth_dump.flag = eth_dump->flag;
1908 	adapter->eth_dump.len = len;
1909 	return 0;
1910 }
1911 
1912 static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
1913 {
1914 	struct adapter *adapter = netdev2adap(dev);
1915 
1916 	eth_dump->flag = adapter->eth_dump.flag;
1917 	eth_dump->len = adapter->eth_dump.len;
1918 	eth_dump->version = adapter->eth_dump.version;
1919 	return 0;
1920 }
1921 
1922 static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
1923 			 void *buf)
1924 {
1925 	struct adapter *adapter = netdev2adap(dev);
1926 	u32 len = 0;
1927 	int ret = 0;
1928 
1929 	if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
1930 		return -ENOENT;
1931 
1932 	len = sizeof(struct cudbg_hdr) +
1933 	      sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1934 	len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
1935 	if (eth_dump->len < len)
1936 		return -ENOMEM;
1937 
1938 	ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
1939 	if (ret)
1940 		return ret;
1941 
1942 	eth_dump->flag = adapter->eth_dump.flag;
1943 	eth_dump->len = len;
1944 	eth_dump->version = adapter->eth_dump.version;
1945 	return 0;
1946 }
1947 
1948 static int cxgb4_get_module_info(struct net_device *dev,
1949 				 struct ethtool_modinfo *modinfo)
1950 {
1951 	struct port_info *pi = netdev_priv(dev);
1952 	u8 sff8472_comp, sff_diag_type, sff_rev;
1953 	struct adapter *adapter = pi->adapter;
1954 	int ret;
1955 
1956 	if (!t4_is_inserted_mod_type(pi->mod_type))
1957 		return -EINVAL;
1958 
1959 	switch (pi->port_type) {
1960 	case FW_PORT_TYPE_SFP:
1961 	case FW_PORT_TYPE_QSA:
1962 	case FW_PORT_TYPE_SFP28:
1963 		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1964 				I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
1965 				SFF_8472_COMP_LEN, &sff8472_comp);
1966 		if (ret)
1967 			return ret;
1968 		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1969 				I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
1970 				SFP_DIAG_TYPE_LEN, &sff_diag_type);
1971 		if (ret)
1972 			return ret;
1973 
1974 		if (!sff8472_comp || (sff_diag_type & 4)) {
1975 			modinfo->type = ETH_MODULE_SFF_8079;
1976 			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1977 		} else {
1978 			modinfo->type = ETH_MODULE_SFF_8472;
1979 			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1980 		}
1981 		break;
1982 
1983 	case FW_PORT_TYPE_QSFP:
1984 	case FW_PORT_TYPE_QSFP_10G:
1985 	case FW_PORT_TYPE_CR_QSFP:
1986 	case FW_PORT_TYPE_CR2_QSFP:
1987 	case FW_PORT_TYPE_CR4_QSFP:
1988 		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
1989 				I2C_DEV_ADDR_A0, SFF_REV_ADDR,
1990 				SFF_REV_LEN, &sff_rev);
1991 		/* For QSFP type ports, revision value >= 3
1992 		 * means the SFP is 8636 compliant.
1993 		 */
1994 		if (ret)
1995 			return ret;
1996 		if (sff_rev >= 0x3) {
1997 			modinfo->type = ETH_MODULE_SFF_8636;
1998 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1999 		} else {
2000 			modinfo->type = ETH_MODULE_SFF_8436;
2001 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2002 		}
2003 		break;
2004 
2005 	default:
2006 		return -EINVAL;
2007 	}
2008 
2009 	return 0;
2010 }
2011 
2012 static int cxgb4_get_module_eeprom(struct net_device *dev,
2013 				   struct ethtool_eeprom *eprom, u8 *data)
2014 {
2015 	int ret = 0, offset = eprom->offset, len = eprom->len;
2016 	struct port_info *pi = netdev_priv(dev);
2017 	struct adapter *adapter = pi->adapter;
2018 
2019 	memset(data, 0, eprom->len);
2020 	if (offset + len <= I2C_PAGE_SIZE)
2021 		return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2022 				 I2C_DEV_ADDR_A0, offset, len, data);
2023 
2024 	/* offset + len spans 0xa0 and 0xa1 pages */
2025 	if (offset <= I2C_PAGE_SIZE) {
2026 		/* read 0xa0 page */
2027 		len = I2C_PAGE_SIZE - offset;
2028 		ret =  t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2029 				 I2C_DEV_ADDR_A0, offset, len, data);
2030 		if (ret)
2031 			return ret;
2032 		offset = I2C_PAGE_SIZE;
2033 		/* Remaining bytes to be read from second page =
2034 		 * Total length - bytes read from first page
2035 		 */
2036 		len = eprom->len - len;
2037 	}
2038 	/* Read additional optical diagnostics from page 0xa2 if supported */
2039 	return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
2040 			 offset, len, &data[eprom->len - len]);
2041 }
2042 
2043 static u32 cxgb4_get_priv_flags(struct net_device *netdev)
2044 {
2045 	struct port_info *pi = netdev_priv(netdev);
2046 	struct adapter *adapter = pi->adapter;
2047 
2048 	return (adapter->eth_flags | pi->eth_flags);
2049 }
2050 
2051 /**
2052  *	set_flags - set/unset specified flags if passed in new_flags
2053  *	@cur_flags: pointer to current flags
2054  *	@new_flags: new incoming flags
2055  *	@flags: set of flags to set/unset
2056  */
2057 static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
2058 {
2059 	*cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
2060 }
2061 
2062 static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
2063 {
2064 	struct port_info *pi = netdev_priv(netdev);
2065 	struct adapter *adapter = pi->adapter;
2066 
2067 	set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
2068 	set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
2069 
2070 	return 0;
2071 }
2072 
2073 static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status)
2074 {
2075 	int dev_state = netif_running(netdev);
2076 
2077 	if (dev_state) {
2078 		netif_tx_stop_all_queues(netdev);
2079 		netif_carrier_off(netdev);
2080 	}
2081 
2082 	*lb_status = cxgb4_selftest_lb_pkt(netdev);
2083 
2084 	if (dev_state) {
2085 		netif_tx_start_all_queues(netdev);
2086 		netif_carrier_on(netdev);
2087 	}
2088 }
2089 
2090 static void cxgb4_self_test(struct net_device *netdev,
2091 			    struct ethtool_test *eth_test, u64 *data)
2092 {
2093 	struct port_info *pi = netdev_priv(netdev);
2094 	struct adapter *adap = pi->adapter;
2095 
2096 	memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
2097 
2098 	if (!(adap->flags & CXGB4_FULL_INIT_DONE) ||
2099 	    !(adap->flags & CXGB4_FW_OK)) {
2100 		eth_test->flags |= ETH_TEST_FL_FAILED;
2101 		return;
2102 	}
2103 
2104 	if (eth_test->flags & ETH_TEST_FL_OFFLINE)
2105 		cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
2106 
2107 	if (data[CXGB4_ETHTOOL_LB_TEST])
2108 		eth_test->flags |= ETH_TEST_FL_FAILED;
2109 }
2110 
2111 static const struct ethtool_ops cxgb_ethtool_ops = {
2112 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2113 				     ETHTOOL_COALESCE_RX_MAX_FRAMES |
2114 				     ETHTOOL_COALESCE_TX_USECS_IRQ |
2115 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2116 	.get_link_ksettings = get_link_ksettings,
2117 	.set_link_ksettings = set_link_ksettings,
2118 	.get_fecparam      = get_fecparam,
2119 	.set_fecparam      = set_fecparam,
2120 	.get_drvinfo       = get_drvinfo,
2121 	.get_msglevel      = get_msglevel,
2122 	.set_msglevel      = set_msglevel,
2123 	.get_ringparam     = get_sge_param,
2124 	.set_ringparam     = set_sge_param,
2125 	.get_coalesce      = get_coalesce,
2126 	.set_coalesce      = set_coalesce,
2127 	.get_eeprom_len    = get_eeprom_len,
2128 	.get_eeprom        = get_eeprom,
2129 	.set_eeprom        = set_eeprom,
2130 	.get_pauseparam    = get_pauseparam,
2131 	.set_pauseparam    = set_pauseparam,
2132 	.get_link          = ethtool_op_get_link,
2133 	.get_strings       = get_strings,
2134 	.set_phys_id       = identify_port,
2135 	.nway_reset        = restart_autoneg,
2136 	.get_sset_count    = get_sset_count,
2137 	.get_ethtool_stats = get_stats,
2138 	.get_regs_len      = get_regs_len,
2139 	.get_regs          = get_regs,
2140 	.get_rxnfc         = get_rxnfc,
2141 	.set_rxnfc         = set_rxnfc,
2142 	.get_rxfh_indir_size = get_rss_table_size,
2143 	.get_rxfh	   = get_rss_table,
2144 	.set_rxfh	   = set_rss_table,
2145 	.self_test	   = cxgb4_self_test,
2146 	.flash_device      = set_flash,
2147 	.get_ts_info       = get_ts_info,
2148 	.set_dump          = set_dump,
2149 	.get_dump_flag     = get_dump_flag,
2150 	.get_dump_data     = get_dump_data,
2151 	.get_module_info   = cxgb4_get_module_info,
2152 	.get_module_eeprom = cxgb4_get_module_eeprom,
2153 	.get_priv_flags    = cxgb4_get_priv_flags,
2154 	.set_priv_flags    = cxgb4_set_priv_flags,
2155 };
2156 
2157 void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
2158 {
2159 	struct cxgb4_ethtool_filter_info *eth_filter_info;
2160 	u8 i;
2161 
2162 	if (!adap->ethtool_filters)
2163 		return;
2164 
2165 	eth_filter_info = adap->ethtool_filters->port;
2166 
2167 	if (eth_filter_info) {
2168 		for (i = 0; i < adap->params.nports; i++) {
2169 			kvfree(eth_filter_info[i].loc_array);
2170 			kfree(eth_filter_info[i].bmap);
2171 		}
2172 		kfree(eth_filter_info);
2173 	}
2174 
2175 	kfree(adap->ethtool_filters);
2176 }
2177 
2178 int cxgb4_init_ethtool_filters(struct adapter *adap)
2179 {
2180 	struct cxgb4_ethtool_filter_info *eth_filter_info;
2181 	struct cxgb4_ethtool_filter *eth_filter;
2182 	struct tid_info *tids = &adap->tids;
2183 	u32 nentries, i;
2184 	int ret;
2185 
2186 	eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
2187 	if (!eth_filter)
2188 		return -ENOMEM;
2189 
2190 	eth_filter_info = kcalloc(adap->params.nports,
2191 				  sizeof(*eth_filter_info),
2192 				  GFP_KERNEL);
2193 	if (!eth_filter_info) {
2194 		ret = -ENOMEM;
2195 		goto free_eth_filter;
2196 	}
2197 
2198 	eth_filter->port = eth_filter_info;
2199 
2200 	nentries = tids->nhpftids + tids->nftids;
2201 	if (is_hashfilter(adap))
2202 		nentries += tids->nhash +
2203 			    (adap->tids.stid_base - adap->tids.tid_base);
2204 	eth_filter->nentries = nentries;
2205 
2206 	for (i = 0; i < adap->params.nports; i++) {
2207 		eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
2208 		if (!eth_filter->port[i].loc_array) {
2209 			ret = -ENOMEM;
2210 			goto free_eth_finfo;
2211 		}
2212 
2213 		eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
2214 						   sizeof(unsigned long),
2215 						   GFP_KERNEL);
2216 		if (!eth_filter->port[i].bmap) {
2217 			ret = -ENOMEM;
2218 			goto free_eth_finfo;
2219 		}
2220 	}
2221 
2222 	adap->ethtool_filters = eth_filter;
2223 	return 0;
2224 
2225 free_eth_finfo:
2226 	while (i-- > 0) {
2227 		kfree(eth_filter->port[i].bmap);
2228 		kvfree(eth_filter->port[i].loc_array);
2229 	}
2230 	kfree(eth_filter_info);
2231 
2232 free_eth_filter:
2233 	kfree(eth_filter);
2234 
2235 	return ret;
2236 }
2237 
2238 void cxgb4_set_ethtool_ops(struct net_device *netdev)
2239 {
2240 	netdev->ethtool_ops = &cxgb_ethtool_ops;
2241 }
2242