1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/ctype.h>
12 #include <linux/stringify.h>
13 #include <linux/ethtool.h>
14 #include <linux/ethtool_netlink.h>
15 #include <linux/linkmode.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/etherdevice.h>
19 #include <linux/crc32.h>
20 #include <linux/firmware.h>
21 #include <linux/utsname.h>
22 #include <linux/time.h>
23 #include <linux/ptp_clock_kernel.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/timecounter.h>
26 #include <net/netlink.h>
27 #include "bnxt_hsi.h"
28 #include "bnxt.h"
29 #include "bnxt_hwrm.h"
30 #include "bnxt_ulp.h"
31 #include "bnxt_xdp.h"
32 #include "bnxt_ptp.h"
33 #include "bnxt_ethtool.h"
34 #include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
35 #include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
36 #include "bnxt_coredump.h"
37 
38 #define BNXT_NVM_ERR_MSG(dev, extack, msg)			\
39 	do {							\
40 		if (extack)					\
41 			NL_SET_ERR_MSG_MOD(extack, msg);	\
42 		netdev_err(dev, "%s\n", msg);			\
43 	} while (0)
44 
45 static u32 bnxt_get_msglevel(struct net_device *dev)
46 {
47 	struct bnxt *bp = netdev_priv(dev);
48 
49 	return bp->msg_enable;
50 }
51 
52 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
53 {
54 	struct bnxt *bp = netdev_priv(dev);
55 
56 	bp->msg_enable = value;
57 }
58 
59 static int bnxt_get_coalesce(struct net_device *dev,
60 			     struct ethtool_coalesce *coal,
61 			     struct kernel_ethtool_coalesce *kernel_coal,
62 			     struct netlink_ext_ack *extack)
63 {
64 	struct bnxt *bp = netdev_priv(dev);
65 	struct bnxt_coal *hw_coal;
66 	u16 mult;
67 
68 	memset(coal, 0, sizeof(*coal));
69 
70 	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
71 
72 	hw_coal = &bp->rx_coal;
73 	mult = hw_coal->bufs_per_record;
74 	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
75 	coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
76 	coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
77 	coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
78 	if (hw_coal->flags &
79 	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
80 		kernel_coal->use_cqe_mode_rx = true;
81 
82 	hw_coal = &bp->tx_coal;
83 	mult = hw_coal->bufs_per_record;
84 	coal->tx_coalesce_usecs = hw_coal->coal_ticks;
85 	coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
86 	coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
87 	coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
88 	if (hw_coal->flags &
89 	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
90 		kernel_coal->use_cqe_mode_tx = true;
91 
92 	coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
93 
94 	return 0;
95 }
96 
97 static int bnxt_set_coalesce(struct net_device *dev,
98 			     struct ethtool_coalesce *coal,
99 			     struct kernel_ethtool_coalesce *kernel_coal,
100 			     struct netlink_ext_ack *extack)
101 {
102 	struct bnxt *bp = netdev_priv(dev);
103 	bool update_stats = false;
104 	struct bnxt_coal *hw_coal;
105 	int rc = 0;
106 	u16 mult;
107 
108 	if (coal->use_adaptive_rx_coalesce) {
109 		bp->flags |= BNXT_FLAG_DIM;
110 	} else {
111 		if (bp->flags & BNXT_FLAG_DIM) {
112 			bp->flags &= ~(BNXT_FLAG_DIM);
113 			goto reset_coalesce;
114 		}
115 	}
116 
117 	if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
118 	    !(bp->coal_cap.cmpl_params &
119 	      RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
120 		return -EOPNOTSUPP;
121 
122 	hw_coal = &bp->rx_coal;
123 	mult = hw_coal->bufs_per_record;
124 	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
125 	hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
126 	hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
127 	hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
128 	hw_coal->flags &=
129 		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
130 	if (kernel_coal->use_cqe_mode_rx)
131 		hw_coal->flags |=
132 			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
133 
134 	hw_coal = &bp->tx_coal;
135 	mult = hw_coal->bufs_per_record;
136 	hw_coal->coal_ticks = coal->tx_coalesce_usecs;
137 	hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
138 	hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
139 	hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
140 	hw_coal->flags &=
141 		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
142 	if (kernel_coal->use_cqe_mode_tx)
143 		hw_coal->flags |=
144 			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
145 
146 	if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
147 		u32 stats_ticks = coal->stats_block_coalesce_usecs;
148 
149 		/* Allow 0, which means disable. */
150 		if (stats_ticks)
151 			stats_ticks = clamp_t(u32, stats_ticks,
152 					      BNXT_MIN_STATS_COAL_TICKS,
153 					      BNXT_MAX_STATS_COAL_TICKS);
154 		stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
155 		bp->stats_coal_ticks = stats_ticks;
156 		if (bp->stats_coal_ticks)
157 			bp->current_interval =
158 				bp->stats_coal_ticks * HZ / 1000000;
159 		else
160 			bp->current_interval = BNXT_TIMER_INTERVAL;
161 		update_stats = true;
162 	}
163 
164 reset_coalesce:
165 	if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
166 		if (update_stats) {
167 			rc = bnxt_close_nic(bp, true, false);
168 			if (!rc)
169 				rc = bnxt_open_nic(bp, true, false);
170 		} else {
171 			rc = bnxt_hwrm_set_coal(bp);
172 		}
173 	}
174 
175 	return rc;
176 }
177 
178 static const char * const bnxt_ring_rx_stats_str[] = {
179 	"rx_ucast_packets",
180 	"rx_mcast_packets",
181 	"rx_bcast_packets",
182 	"rx_discards",
183 	"rx_errors",
184 	"rx_ucast_bytes",
185 	"rx_mcast_bytes",
186 	"rx_bcast_bytes",
187 };
188 
189 static const char * const bnxt_ring_tx_stats_str[] = {
190 	"tx_ucast_packets",
191 	"tx_mcast_packets",
192 	"tx_bcast_packets",
193 	"tx_errors",
194 	"tx_discards",
195 	"tx_ucast_bytes",
196 	"tx_mcast_bytes",
197 	"tx_bcast_bytes",
198 };
199 
200 static const char * const bnxt_ring_tpa_stats_str[] = {
201 	"tpa_packets",
202 	"tpa_bytes",
203 	"tpa_events",
204 	"tpa_aborts",
205 };
206 
207 static const char * const bnxt_ring_tpa2_stats_str[] = {
208 	"rx_tpa_eligible_pkt",
209 	"rx_tpa_eligible_bytes",
210 	"rx_tpa_pkt",
211 	"rx_tpa_bytes",
212 	"rx_tpa_errors",
213 	"rx_tpa_events",
214 };
215 
216 static const char * const bnxt_rx_sw_stats_str[] = {
217 	"rx_l4_csum_errors",
218 	"rx_resets",
219 	"rx_buf_errors",
220 };
221 
222 static const char * const bnxt_cmn_sw_stats_str[] = {
223 	"missed_irqs",
224 };
225 
226 #define BNXT_RX_STATS_ENTRY(counter)	\
227 	{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
228 
229 #define BNXT_TX_STATS_ENTRY(counter)	\
230 	{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
231 
232 #define BNXT_RX_STATS_EXT_ENTRY(counter)	\
233 	{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
234 
235 #define BNXT_TX_STATS_EXT_ENTRY(counter)	\
236 	{ BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
237 
238 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n)				\
239 	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),	\
240 	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
241 
242 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n)				\
243 	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),	\
244 	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
245 
246 #define BNXT_RX_STATS_EXT_PFC_ENTRIES				\
247 	BNXT_RX_STATS_EXT_PFC_ENTRY(0),				\
248 	BNXT_RX_STATS_EXT_PFC_ENTRY(1),				\
249 	BNXT_RX_STATS_EXT_PFC_ENTRY(2),				\
250 	BNXT_RX_STATS_EXT_PFC_ENTRY(3),				\
251 	BNXT_RX_STATS_EXT_PFC_ENTRY(4),				\
252 	BNXT_RX_STATS_EXT_PFC_ENTRY(5),				\
253 	BNXT_RX_STATS_EXT_PFC_ENTRY(6),				\
254 	BNXT_RX_STATS_EXT_PFC_ENTRY(7)
255 
256 #define BNXT_TX_STATS_EXT_PFC_ENTRIES				\
257 	BNXT_TX_STATS_EXT_PFC_ENTRY(0),				\
258 	BNXT_TX_STATS_EXT_PFC_ENTRY(1),				\
259 	BNXT_TX_STATS_EXT_PFC_ENTRY(2),				\
260 	BNXT_TX_STATS_EXT_PFC_ENTRY(3),				\
261 	BNXT_TX_STATS_EXT_PFC_ENTRY(4),				\
262 	BNXT_TX_STATS_EXT_PFC_ENTRY(5),				\
263 	BNXT_TX_STATS_EXT_PFC_ENTRY(6),				\
264 	BNXT_TX_STATS_EXT_PFC_ENTRY(7)
265 
266 #define BNXT_RX_STATS_EXT_COS_ENTRY(n)				\
267 	BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),		\
268 	BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
269 
270 #define BNXT_TX_STATS_EXT_COS_ENTRY(n)				\
271 	BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),		\
272 	BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
273 
274 #define BNXT_RX_STATS_EXT_COS_ENTRIES				\
275 	BNXT_RX_STATS_EXT_COS_ENTRY(0),				\
276 	BNXT_RX_STATS_EXT_COS_ENTRY(1),				\
277 	BNXT_RX_STATS_EXT_COS_ENTRY(2),				\
278 	BNXT_RX_STATS_EXT_COS_ENTRY(3),				\
279 	BNXT_RX_STATS_EXT_COS_ENTRY(4),				\
280 	BNXT_RX_STATS_EXT_COS_ENTRY(5),				\
281 	BNXT_RX_STATS_EXT_COS_ENTRY(6),				\
282 	BNXT_RX_STATS_EXT_COS_ENTRY(7)				\
283 
284 #define BNXT_TX_STATS_EXT_COS_ENTRIES				\
285 	BNXT_TX_STATS_EXT_COS_ENTRY(0),				\
286 	BNXT_TX_STATS_EXT_COS_ENTRY(1),				\
287 	BNXT_TX_STATS_EXT_COS_ENTRY(2),				\
288 	BNXT_TX_STATS_EXT_COS_ENTRY(3),				\
289 	BNXT_TX_STATS_EXT_COS_ENTRY(4),				\
290 	BNXT_TX_STATS_EXT_COS_ENTRY(5),				\
291 	BNXT_TX_STATS_EXT_COS_ENTRY(6),				\
292 	BNXT_TX_STATS_EXT_COS_ENTRY(7)				\
293 
294 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)			\
295 	BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),	\
296 	BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
297 
298 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES				\
299 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),				\
300 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),				\
301 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),				\
302 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),				\
303 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),				\
304 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),				\
305 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),				\
306 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
307 
308 #define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
309 	{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),	\
310 	  __stringify(counter##_pri##n) }
311 
312 #define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
313 	{ BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),	\
314 	  __stringify(counter##_pri##n) }
315 
316 #define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
317 	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
318 	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
319 	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
320 	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
321 	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
322 	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
323 	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
324 	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
325 
326 #define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
327 	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
328 	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
329 	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
330 	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
331 	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
332 	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
333 	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
334 	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
335 
336 enum {
337 	RX_TOTAL_DISCARDS,
338 	TX_TOTAL_DISCARDS,
339 	RX_NETPOLL_DISCARDS,
340 };
341 
342 static const char *const bnxt_ring_err_stats_arr[] = {
343 	"rx_total_l4_csum_errors",
344 	"rx_total_resets",
345 	"rx_total_buf_errors",
346 	"rx_total_oom_discards",
347 	"rx_total_netpoll_discards",
348 	"rx_total_ring_discards",
349 	"tx_total_resets",
350 	"tx_total_ring_discards",
351 	"total_missed_irqs",
352 };
353 
354 #define NUM_RING_RX_SW_STATS		ARRAY_SIZE(bnxt_rx_sw_stats_str)
355 #define NUM_RING_CMN_SW_STATS		ARRAY_SIZE(bnxt_cmn_sw_stats_str)
356 #define NUM_RING_RX_HW_STATS		ARRAY_SIZE(bnxt_ring_rx_stats_str)
357 #define NUM_RING_TX_HW_STATS		ARRAY_SIZE(bnxt_ring_tx_stats_str)
358 
359 static const struct {
360 	long offset;
361 	char string[ETH_GSTRING_LEN];
362 } bnxt_port_stats_arr[] = {
363 	BNXT_RX_STATS_ENTRY(rx_64b_frames),
364 	BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
365 	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
366 	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
367 	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
368 	BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
369 	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
370 	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
371 	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
372 	BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
373 	BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
374 	BNXT_RX_STATS_ENTRY(rx_total_frames),
375 	BNXT_RX_STATS_ENTRY(rx_ucast_frames),
376 	BNXT_RX_STATS_ENTRY(rx_mcast_frames),
377 	BNXT_RX_STATS_ENTRY(rx_bcast_frames),
378 	BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
379 	BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
380 	BNXT_RX_STATS_ENTRY(rx_pause_frames),
381 	BNXT_RX_STATS_ENTRY(rx_pfc_frames),
382 	BNXT_RX_STATS_ENTRY(rx_align_err_frames),
383 	BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
384 	BNXT_RX_STATS_ENTRY(rx_jbr_frames),
385 	BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
386 	BNXT_RX_STATS_ENTRY(rx_tagged_frames),
387 	BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
388 	BNXT_RX_STATS_ENTRY(rx_good_frames),
389 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
390 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
391 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
392 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
393 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
394 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
395 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
396 	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
397 	BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
398 	BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
399 	BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
400 	BNXT_RX_STATS_ENTRY(rx_bytes),
401 	BNXT_RX_STATS_ENTRY(rx_runt_bytes),
402 	BNXT_RX_STATS_ENTRY(rx_runt_frames),
403 	BNXT_RX_STATS_ENTRY(rx_stat_discard),
404 	BNXT_RX_STATS_ENTRY(rx_stat_err),
405 
406 	BNXT_TX_STATS_ENTRY(tx_64b_frames),
407 	BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
408 	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
409 	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
410 	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
411 	BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
412 	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
413 	BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
414 	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
415 	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
416 	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
417 	BNXT_TX_STATS_ENTRY(tx_good_frames),
418 	BNXT_TX_STATS_ENTRY(tx_total_frames),
419 	BNXT_TX_STATS_ENTRY(tx_ucast_frames),
420 	BNXT_TX_STATS_ENTRY(tx_mcast_frames),
421 	BNXT_TX_STATS_ENTRY(tx_bcast_frames),
422 	BNXT_TX_STATS_ENTRY(tx_pause_frames),
423 	BNXT_TX_STATS_ENTRY(tx_pfc_frames),
424 	BNXT_TX_STATS_ENTRY(tx_jabber_frames),
425 	BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
426 	BNXT_TX_STATS_ENTRY(tx_err),
427 	BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
428 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
429 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
430 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
431 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
432 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
433 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
434 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
435 	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
436 	BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
437 	BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
438 	BNXT_TX_STATS_ENTRY(tx_total_collisions),
439 	BNXT_TX_STATS_ENTRY(tx_bytes),
440 	BNXT_TX_STATS_ENTRY(tx_xthol_frames),
441 	BNXT_TX_STATS_ENTRY(tx_stat_discard),
442 	BNXT_TX_STATS_ENTRY(tx_stat_error),
443 };
444 
445 static const struct {
446 	long offset;
447 	char string[ETH_GSTRING_LEN];
448 } bnxt_port_stats_ext_arr[] = {
449 	BNXT_RX_STATS_EXT_ENTRY(link_down_events),
450 	BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
451 	BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
452 	BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
453 	BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
454 	BNXT_RX_STATS_EXT_COS_ENTRIES,
455 	BNXT_RX_STATS_EXT_PFC_ENTRIES,
456 	BNXT_RX_STATS_EXT_ENTRY(rx_bits),
457 	BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
458 	BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
459 	BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
460 	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
461 	BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
462 	BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
463 };
464 
465 static const struct {
466 	long offset;
467 	char string[ETH_GSTRING_LEN];
468 } bnxt_tx_port_stats_ext_arr[] = {
469 	BNXT_TX_STATS_EXT_COS_ENTRIES,
470 	BNXT_TX_STATS_EXT_PFC_ENTRIES,
471 };
472 
473 static const struct {
474 	long base_off;
475 	char string[ETH_GSTRING_LEN];
476 } bnxt_rx_bytes_pri_arr[] = {
477 	BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
478 };
479 
480 static const struct {
481 	long base_off;
482 	char string[ETH_GSTRING_LEN];
483 } bnxt_rx_pkts_pri_arr[] = {
484 	BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
485 };
486 
487 static const struct {
488 	long base_off;
489 	char string[ETH_GSTRING_LEN];
490 } bnxt_tx_bytes_pri_arr[] = {
491 	BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
492 };
493 
494 static const struct {
495 	long base_off;
496 	char string[ETH_GSTRING_LEN];
497 } bnxt_tx_pkts_pri_arr[] = {
498 	BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
499 };
500 
501 #define BNXT_NUM_RING_ERR_STATS	ARRAY_SIZE(bnxt_ring_err_stats_arr)
502 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
503 #define BNXT_NUM_STATS_PRI			\
504 	(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +	\
505 	 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +	\
506 	 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +	\
507 	 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
508 
509 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
510 {
511 	if (BNXT_SUPPORTS_TPA(bp)) {
512 		if (bp->max_tpa_v2) {
513 			if (BNXT_CHIP_P5_THOR(bp))
514 				return BNXT_NUM_TPA_RING_STATS_P5;
515 			return BNXT_NUM_TPA_RING_STATS_P5_SR2;
516 		}
517 		return BNXT_NUM_TPA_RING_STATS;
518 	}
519 	return 0;
520 }
521 
522 static int bnxt_get_num_ring_stats(struct bnxt *bp)
523 {
524 	int rx, tx, cmn;
525 
526 	rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
527 	     bnxt_get_num_tpa_ring_stats(bp);
528 	tx = NUM_RING_TX_HW_STATS;
529 	cmn = NUM_RING_CMN_SW_STATS;
530 	return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
531 	       cmn * bp->cp_nr_rings;
532 }
533 
534 static int bnxt_get_num_stats(struct bnxt *bp)
535 {
536 	int num_stats = bnxt_get_num_ring_stats(bp);
537 
538 	num_stats += BNXT_NUM_RING_ERR_STATS;
539 
540 	if (bp->flags & BNXT_FLAG_PORT_STATS)
541 		num_stats += BNXT_NUM_PORT_STATS;
542 
543 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
544 		num_stats += bp->fw_rx_stats_ext_size +
545 			     bp->fw_tx_stats_ext_size;
546 		if (bp->pri2cos_valid)
547 			num_stats += BNXT_NUM_STATS_PRI;
548 	}
549 
550 	return num_stats;
551 }
552 
553 static int bnxt_get_sset_count(struct net_device *dev, int sset)
554 {
555 	struct bnxt *bp = netdev_priv(dev);
556 
557 	switch (sset) {
558 	case ETH_SS_STATS:
559 		return bnxt_get_num_stats(bp);
560 	case ETH_SS_TEST:
561 		if (!bp->num_tests)
562 			return -EOPNOTSUPP;
563 		return bp->num_tests;
564 	default:
565 		return -EOPNOTSUPP;
566 	}
567 }
568 
569 static bool is_rx_ring(struct bnxt *bp, int ring_num)
570 {
571 	return ring_num < bp->rx_nr_rings;
572 }
573 
574 static bool is_tx_ring(struct bnxt *bp, int ring_num)
575 {
576 	int tx_base = 0;
577 
578 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
579 		tx_base = bp->rx_nr_rings;
580 
581 	if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
582 		return true;
583 	return false;
584 }
585 
586 static void bnxt_get_ethtool_stats(struct net_device *dev,
587 				   struct ethtool_stats *stats, u64 *buf)
588 {
589 	struct bnxt_total_ring_err_stats ring_err_stats = {0};
590 	struct bnxt *bp = netdev_priv(dev);
591 	u64 *curr, *prev;
592 	u32 tpa_stats;
593 	u32 i, j = 0;
594 
595 	if (!bp->bnapi) {
596 		j += bnxt_get_num_ring_stats(bp);
597 		goto skip_ring_stats;
598 	}
599 
600 	tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
601 	for (i = 0; i < bp->cp_nr_rings; i++) {
602 		struct bnxt_napi *bnapi = bp->bnapi[i];
603 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
604 		u64 *sw_stats = cpr->stats.sw_stats;
605 		u64 *sw;
606 		int k;
607 
608 		if (is_rx_ring(bp, i)) {
609 			for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
610 				buf[j] = sw_stats[k];
611 		}
612 		if (is_tx_ring(bp, i)) {
613 			k = NUM_RING_RX_HW_STATS;
614 			for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
615 			       j++, k++)
616 				buf[j] = sw_stats[k];
617 		}
618 		if (!tpa_stats || !is_rx_ring(bp, i))
619 			goto skip_tpa_ring_stats;
620 
621 		k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
622 		for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
623 			   tpa_stats; j++, k++)
624 			buf[j] = sw_stats[k];
625 
626 skip_tpa_ring_stats:
627 		sw = (u64 *)&cpr->sw_stats.rx;
628 		if (is_rx_ring(bp, i)) {
629 			for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
630 				buf[j] = sw[k];
631 		}
632 
633 		sw = (u64 *)&cpr->sw_stats.cmn;
634 		for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
635 			buf[j] = sw[k];
636 	}
637 
638 	bnxt_get_ring_err_stats(bp, &ring_err_stats);
639 
640 skip_ring_stats:
641 	curr = &ring_err_stats.rx_total_l4_csum_errors;
642 	prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
643 	for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
644 		buf[j] = *curr + *prev;
645 
646 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
647 		u64 *port_stats = bp->port_stats.sw_stats;
648 
649 		for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
650 			buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
651 	}
652 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
653 		u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
654 		u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
655 
656 		for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
657 			buf[j] = *(rx_port_stats_ext +
658 				   bnxt_port_stats_ext_arr[i].offset);
659 		}
660 		for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
661 			buf[j] = *(tx_port_stats_ext +
662 				   bnxt_tx_port_stats_ext_arr[i].offset);
663 		}
664 		if (bp->pri2cos_valid) {
665 			for (i = 0; i < 8; i++, j++) {
666 				long n = bnxt_rx_bytes_pri_arr[i].base_off +
667 					 bp->pri2cos_idx[i];
668 
669 				buf[j] = *(rx_port_stats_ext + n);
670 			}
671 			for (i = 0; i < 8; i++, j++) {
672 				long n = bnxt_rx_pkts_pri_arr[i].base_off +
673 					 bp->pri2cos_idx[i];
674 
675 				buf[j] = *(rx_port_stats_ext + n);
676 			}
677 			for (i = 0; i < 8; i++, j++) {
678 				long n = bnxt_tx_bytes_pri_arr[i].base_off +
679 					 bp->pri2cos_idx[i];
680 
681 				buf[j] = *(tx_port_stats_ext + n);
682 			}
683 			for (i = 0; i < 8; i++, j++) {
684 				long n = bnxt_tx_pkts_pri_arr[i].base_off +
685 					 bp->pri2cos_idx[i];
686 
687 				buf[j] = *(tx_port_stats_ext + n);
688 			}
689 		}
690 	}
691 }
692 
693 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
694 {
695 	struct bnxt *bp = netdev_priv(dev);
696 	static const char * const *str;
697 	u32 i, j, num_str;
698 
699 	switch (stringset) {
700 	case ETH_SS_STATS:
701 		for (i = 0; i < bp->cp_nr_rings; i++) {
702 			if (is_rx_ring(bp, i)) {
703 				num_str = NUM_RING_RX_HW_STATS;
704 				for (j = 0; j < num_str; j++) {
705 					sprintf(buf, "[%d]: %s", i,
706 						bnxt_ring_rx_stats_str[j]);
707 					buf += ETH_GSTRING_LEN;
708 				}
709 			}
710 			if (is_tx_ring(bp, i)) {
711 				num_str = NUM_RING_TX_HW_STATS;
712 				for (j = 0; j < num_str; j++) {
713 					sprintf(buf, "[%d]: %s", i,
714 						bnxt_ring_tx_stats_str[j]);
715 					buf += ETH_GSTRING_LEN;
716 				}
717 			}
718 			num_str = bnxt_get_num_tpa_ring_stats(bp);
719 			if (!num_str || !is_rx_ring(bp, i))
720 				goto skip_tpa_stats;
721 
722 			if (bp->max_tpa_v2)
723 				str = bnxt_ring_tpa2_stats_str;
724 			else
725 				str = bnxt_ring_tpa_stats_str;
726 
727 			for (j = 0; j < num_str; j++) {
728 				sprintf(buf, "[%d]: %s", i, str[j]);
729 				buf += ETH_GSTRING_LEN;
730 			}
731 skip_tpa_stats:
732 			if (is_rx_ring(bp, i)) {
733 				num_str = NUM_RING_RX_SW_STATS;
734 				for (j = 0; j < num_str; j++) {
735 					sprintf(buf, "[%d]: %s", i,
736 						bnxt_rx_sw_stats_str[j]);
737 					buf += ETH_GSTRING_LEN;
738 				}
739 			}
740 			num_str = NUM_RING_CMN_SW_STATS;
741 			for (j = 0; j < num_str; j++) {
742 				sprintf(buf, "[%d]: %s", i,
743 					bnxt_cmn_sw_stats_str[j]);
744 				buf += ETH_GSTRING_LEN;
745 			}
746 		}
747 		for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
748 			strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
749 			buf += ETH_GSTRING_LEN;
750 		}
751 
752 		if (bp->flags & BNXT_FLAG_PORT_STATS) {
753 			for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
754 				strcpy(buf, bnxt_port_stats_arr[i].string);
755 				buf += ETH_GSTRING_LEN;
756 			}
757 		}
758 		if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
759 			for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
760 				strcpy(buf, bnxt_port_stats_ext_arr[i].string);
761 				buf += ETH_GSTRING_LEN;
762 			}
763 			for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
764 				strcpy(buf,
765 				       bnxt_tx_port_stats_ext_arr[i].string);
766 				buf += ETH_GSTRING_LEN;
767 			}
768 			if (bp->pri2cos_valid) {
769 				for (i = 0; i < 8; i++) {
770 					strcpy(buf,
771 					       bnxt_rx_bytes_pri_arr[i].string);
772 					buf += ETH_GSTRING_LEN;
773 				}
774 				for (i = 0; i < 8; i++) {
775 					strcpy(buf,
776 					       bnxt_rx_pkts_pri_arr[i].string);
777 					buf += ETH_GSTRING_LEN;
778 				}
779 				for (i = 0; i < 8; i++) {
780 					strcpy(buf,
781 					       bnxt_tx_bytes_pri_arr[i].string);
782 					buf += ETH_GSTRING_LEN;
783 				}
784 				for (i = 0; i < 8; i++) {
785 					strcpy(buf,
786 					       bnxt_tx_pkts_pri_arr[i].string);
787 					buf += ETH_GSTRING_LEN;
788 				}
789 			}
790 		}
791 		break;
792 	case ETH_SS_TEST:
793 		if (bp->num_tests)
794 			memcpy(buf, bp->test_info->string,
795 			       bp->num_tests * ETH_GSTRING_LEN);
796 		break;
797 	default:
798 		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
799 			   stringset);
800 		break;
801 	}
802 }
803 
804 static void bnxt_get_ringparam(struct net_device *dev,
805 			       struct ethtool_ringparam *ering,
806 			       struct kernel_ethtool_ringparam *kernel_ering,
807 			       struct netlink_ext_ack *extack)
808 {
809 	struct bnxt *bp = netdev_priv(dev);
810 
811 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
812 		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
813 		ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
814 		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
815 	} else {
816 		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
817 		ering->rx_jumbo_max_pending = 0;
818 		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
819 	}
820 	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
821 
822 	ering->rx_pending = bp->rx_ring_size;
823 	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
824 	ering->tx_pending = bp->tx_ring_size;
825 }
826 
827 static int bnxt_set_ringparam(struct net_device *dev,
828 			      struct ethtool_ringparam *ering,
829 			      struct kernel_ethtool_ringparam *kernel_ering,
830 			      struct netlink_ext_ack *extack)
831 {
832 	struct bnxt *bp = netdev_priv(dev);
833 
834 	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
835 	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
836 	    (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
837 		return -EINVAL;
838 
839 	if (netif_running(dev))
840 		bnxt_close_nic(bp, false, false);
841 
842 	bp->rx_ring_size = ering->rx_pending;
843 	bp->tx_ring_size = ering->tx_pending;
844 	bnxt_set_ring_params(bp);
845 
846 	if (netif_running(dev))
847 		return bnxt_open_nic(bp, false, false);
848 
849 	return 0;
850 }
851 
852 static void bnxt_get_channels(struct net_device *dev,
853 			      struct ethtool_channels *channel)
854 {
855 	struct bnxt *bp = netdev_priv(dev);
856 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
857 	int max_rx_rings, max_tx_rings, tcs;
858 	int max_tx_sch_inputs, tx_grps;
859 
860 	/* Get the most up-to-date max_tx_sch_inputs. */
861 	if (netif_running(dev) && BNXT_NEW_RM(bp))
862 		bnxt_hwrm_func_resc_qcaps(bp, false);
863 	max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
864 
865 	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
866 	if (max_tx_sch_inputs)
867 		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
868 
869 	tcs = netdev_get_num_tc(dev);
870 	tx_grps = max(tcs, 1);
871 	if (bp->tx_nr_rings_xdp)
872 		tx_grps++;
873 	max_tx_rings /= tx_grps;
874 	channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
875 
876 	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
877 		max_rx_rings = 0;
878 		max_tx_rings = 0;
879 	}
880 	if (max_tx_sch_inputs)
881 		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
882 
883 	if (tcs > 1)
884 		max_tx_rings /= tcs;
885 
886 	channel->max_rx = max_rx_rings;
887 	channel->max_tx = max_tx_rings;
888 	channel->max_other = 0;
889 	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
890 		channel->combined_count = bp->rx_nr_rings;
891 		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
892 			channel->combined_count--;
893 	} else {
894 		if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
895 			channel->rx_count = bp->rx_nr_rings;
896 			channel->tx_count = bp->tx_nr_rings_per_tc;
897 		}
898 	}
899 }
900 
901 static int bnxt_set_channels(struct net_device *dev,
902 			     struct ethtool_channels *channel)
903 {
904 	struct bnxt *bp = netdev_priv(dev);
905 	int req_tx_rings, req_rx_rings, tcs;
906 	bool sh = false;
907 	int tx_xdp = 0;
908 	int rc = 0;
909 
910 	if (channel->other_count)
911 		return -EINVAL;
912 
913 	if (!channel->combined_count &&
914 	    (!channel->rx_count || !channel->tx_count))
915 		return -EINVAL;
916 
917 	if (channel->combined_count &&
918 	    (channel->rx_count || channel->tx_count))
919 		return -EINVAL;
920 
921 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
922 					    channel->tx_count))
923 		return -EINVAL;
924 
925 	if (channel->combined_count)
926 		sh = true;
927 
928 	tcs = netdev_get_num_tc(dev);
929 
930 	req_tx_rings = sh ? channel->combined_count : channel->tx_count;
931 	req_rx_rings = sh ? channel->combined_count : channel->rx_count;
932 	if (bp->tx_nr_rings_xdp) {
933 		if (!sh) {
934 			netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
935 			return -EINVAL;
936 		}
937 		tx_xdp = req_rx_rings;
938 	}
939 	rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
940 	if (rc) {
941 		netdev_warn(dev, "Unable to allocate the requested rings\n");
942 		return rc;
943 	}
944 
945 	if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
946 	    bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
947 	    netif_is_rxfh_configured(dev)) {
948 		netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
949 		return -EINVAL;
950 	}
951 
952 	if (netif_running(dev)) {
953 		if (BNXT_PF(bp)) {
954 			/* TODO CHIMP_FW: Send message to all VF's
955 			 * before PF unload
956 			 */
957 		}
958 		rc = bnxt_close_nic(bp, true, false);
959 		if (rc) {
960 			netdev_err(bp->dev, "Set channel failure rc :%x\n",
961 				   rc);
962 			return rc;
963 		}
964 	}
965 
966 	if (sh) {
967 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
968 		bp->rx_nr_rings = channel->combined_count;
969 		bp->tx_nr_rings_per_tc = channel->combined_count;
970 	} else {
971 		bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
972 		bp->rx_nr_rings = channel->rx_count;
973 		bp->tx_nr_rings_per_tc = channel->tx_count;
974 	}
975 	bp->tx_nr_rings_xdp = tx_xdp;
976 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
977 	if (tcs > 1)
978 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
979 
980 	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
981 			       bp->tx_nr_rings + bp->rx_nr_rings;
982 
983 	/* After changing number of rx channels, update NTUPLE feature. */
984 	netdev_update_features(dev);
985 	if (netif_running(dev)) {
986 		rc = bnxt_open_nic(bp, true, false);
987 		if ((!rc) && BNXT_PF(bp)) {
988 			/* TODO CHIMP_FW: Send message to all VF's
989 			 * to renable
990 			 */
991 		}
992 	} else {
993 		rc = bnxt_reserve_rings(bp, true);
994 	}
995 
996 	return rc;
997 }
998 
999 #ifdef CONFIG_RFS_ACCEL
1000 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1001 			    u32 *rule_locs)
1002 {
1003 	int i, j = 0;
1004 
1005 	cmd->data = bp->ntp_fltr_count;
1006 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
1007 		struct hlist_head *head;
1008 		struct bnxt_ntuple_filter *fltr;
1009 
1010 		head = &bp->ntp_fltr_hash_tbl[i];
1011 		rcu_read_lock();
1012 		hlist_for_each_entry_rcu(fltr, head, hash) {
1013 			if (j == cmd->rule_cnt)
1014 				break;
1015 			rule_locs[j++] = fltr->sw_id;
1016 		}
1017 		rcu_read_unlock();
1018 		if (j == cmd->rule_cnt)
1019 			break;
1020 	}
1021 	cmd->rule_cnt = j;
1022 	return 0;
1023 }
1024 
1025 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1026 {
1027 	struct ethtool_rx_flow_spec *fs =
1028 		(struct ethtool_rx_flow_spec *)&cmd->fs;
1029 	struct bnxt_ntuple_filter *fltr;
1030 	struct flow_keys *fkeys;
1031 	int i, rc = -EINVAL;
1032 
1033 	if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
1034 		return rc;
1035 
1036 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
1037 		struct hlist_head *head;
1038 
1039 		head = &bp->ntp_fltr_hash_tbl[i];
1040 		rcu_read_lock();
1041 		hlist_for_each_entry_rcu(fltr, head, hash) {
1042 			if (fltr->sw_id == fs->location)
1043 				goto fltr_found;
1044 		}
1045 		rcu_read_unlock();
1046 	}
1047 	return rc;
1048 
1049 fltr_found:
1050 	fkeys = &fltr->fkeys;
1051 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1052 		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1053 			fs->flow_type = TCP_V4_FLOW;
1054 		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1055 			fs->flow_type = UDP_V4_FLOW;
1056 		else
1057 			goto fltr_err;
1058 
1059 		fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1060 		fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1061 
1062 		fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1063 		fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1064 
1065 		fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1066 		fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1067 
1068 		fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1069 		fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1070 	} else {
1071 		int i;
1072 
1073 		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1074 			fs->flow_type = TCP_V6_FLOW;
1075 		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1076 			fs->flow_type = UDP_V6_FLOW;
1077 		else
1078 			goto fltr_err;
1079 
1080 		*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1081 			fkeys->addrs.v6addrs.src;
1082 		*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1083 			fkeys->addrs.v6addrs.dst;
1084 		for (i = 0; i < 4; i++) {
1085 			fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
1086 			fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
1087 		}
1088 		fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1089 		fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1090 
1091 		fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1092 		fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1093 	}
1094 
1095 	fs->ring_cookie = fltr->rxq;
1096 	rc = 0;
1097 
1098 fltr_err:
1099 	rcu_read_unlock();
1100 
1101 	return rc;
1102 }
1103 #endif
1104 
1105 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1106 {
1107 	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1108 		return RXH_IP_SRC | RXH_IP_DST;
1109 	return 0;
1110 }
1111 
1112 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1113 {
1114 	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1115 		return RXH_IP_SRC | RXH_IP_DST;
1116 	return 0;
1117 }
1118 
1119 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1120 {
1121 	cmd->data = 0;
1122 	switch (cmd->flow_type) {
1123 	case TCP_V4_FLOW:
1124 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1125 			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1126 				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1127 		cmd->data |= get_ethtool_ipv4_rss(bp);
1128 		break;
1129 	case UDP_V4_FLOW:
1130 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1131 			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1132 				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1133 		fallthrough;
1134 	case SCTP_V4_FLOW:
1135 	case AH_ESP_V4_FLOW:
1136 	case AH_V4_FLOW:
1137 	case ESP_V4_FLOW:
1138 	case IPV4_FLOW:
1139 		cmd->data |= get_ethtool_ipv4_rss(bp);
1140 		break;
1141 
1142 	case TCP_V6_FLOW:
1143 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1144 			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1145 				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1146 		cmd->data |= get_ethtool_ipv6_rss(bp);
1147 		break;
1148 	case UDP_V6_FLOW:
1149 		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1150 			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1151 				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1152 		fallthrough;
1153 	case SCTP_V6_FLOW:
1154 	case AH_ESP_V6_FLOW:
1155 	case AH_V6_FLOW:
1156 	case ESP_V6_FLOW:
1157 	case IPV6_FLOW:
1158 		cmd->data |= get_ethtool_ipv6_rss(bp);
1159 		break;
1160 	}
1161 	return 0;
1162 }
1163 
1164 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1165 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1166 
1167 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1168 {
1169 	u32 rss_hash_cfg = bp->rss_hash_cfg;
1170 	int tuple, rc = 0;
1171 
1172 	if (cmd->data == RXH_4TUPLE)
1173 		tuple = 4;
1174 	else if (cmd->data == RXH_2TUPLE)
1175 		tuple = 2;
1176 	else if (!cmd->data)
1177 		tuple = 0;
1178 	else
1179 		return -EINVAL;
1180 
1181 	if (cmd->flow_type == TCP_V4_FLOW) {
1182 		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1183 		if (tuple == 4)
1184 			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1185 	} else if (cmd->flow_type == UDP_V4_FLOW) {
1186 		if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1187 			return -EINVAL;
1188 		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1189 		if (tuple == 4)
1190 			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1191 	} else if (cmd->flow_type == TCP_V6_FLOW) {
1192 		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1193 		if (tuple == 4)
1194 			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1195 	} else if (cmd->flow_type == UDP_V6_FLOW) {
1196 		if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1197 			return -EINVAL;
1198 		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1199 		if (tuple == 4)
1200 			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1201 	} else if (tuple == 4) {
1202 		return -EINVAL;
1203 	}
1204 
1205 	switch (cmd->flow_type) {
1206 	case TCP_V4_FLOW:
1207 	case UDP_V4_FLOW:
1208 	case SCTP_V4_FLOW:
1209 	case AH_ESP_V4_FLOW:
1210 	case AH_V4_FLOW:
1211 	case ESP_V4_FLOW:
1212 	case IPV4_FLOW:
1213 		if (tuple == 2)
1214 			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1215 		else if (!tuple)
1216 			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1217 		break;
1218 
1219 	case TCP_V6_FLOW:
1220 	case UDP_V6_FLOW:
1221 	case SCTP_V6_FLOW:
1222 	case AH_ESP_V6_FLOW:
1223 	case AH_V6_FLOW:
1224 	case ESP_V6_FLOW:
1225 	case IPV6_FLOW:
1226 		if (tuple == 2)
1227 			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1228 		else if (!tuple)
1229 			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1230 		break;
1231 	}
1232 
1233 	if (bp->rss_hash_cfg == rss_hash_cfg)
1234 		return 0;
1235 
1236 	if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
1237 		bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1238 	bp->rss_hash_cfg = rss_hash_cfg;
1239 	if (netif_running(bp->dev)) {
1240 		bnxt_close_nic(bp, false, false);
1241 		rc = bnxt_open_nic(bp, false, false);
1242 	}
1243 	return rc;
1244 }
1245 
1246 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1247 			  u32 *rule_locs)
1248 {
1249 	struct bnxt *bp = netdev_priv(dev);
1250 	int rc = 0;
1251 
1252 	switch (cmd->cmd) {
1253 #ifdef CONFIG_RFS_ACCEL
1254 	case ETHTOOL_GRXRINGS:
1255 		cmd->data = bp->rx_nr_rings;
1256 		break;
1257 
1258 	case ETHTOOL_GRXCLSRLCNT:
1259 		cmd->rule_cnt = bp->ntp_fltr_count;
1260 		cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1261 		break;
1262 
1263 	case ETHTOOL_GRXCLSRLALL:
1264 		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1265 		break;
1266 
1267 	case ETHTOOL_GRXCLSRULE:
1268 		rc = bnxt_grxclsrule(bp, cmd);
1269 		break;
1270 #endif
1271 
1272 	case ETHTOOL_GRXFH:
1273 		rc = bnxt_grxfh(bp, cmd);
1274 		break;
1275 
1276 	default:
1277 		rc = -EOPNOTSUPP;
1278 		break;
1279 	}
1280 
1281 	return rc;
1282 }
1283 
1284 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1285 {
1286 	struct bnxt *bp = netdev_priv(dev);
1287 	int rc;
1288 
1289 	switch (cmd->cmd) {
1290 	case ETHTOOL_SRXFH:
1291 		rc = bnxt_srxfh(bp, cmd);
1292 		break;
1293 
1294 	default:
1295 		rc = -EOPNOTSUPP;
1296 		break;
1297 	}
1298 	return rc;
1299 }
1300 
1301 u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1302 {
1303 	struct bnxt *bp = netdev_priv(dev);
1304 
1305 	if (bp->flags & BNXT_FLAG_CHIP_P5)
1306 		return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
1307 	return HW_HASH_INDEX_SIZE;
1308 }
1309 
1310 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1311 {
1312 	return HW_HASH_KEY_SIZE;
1313 }
1314 
1315 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1316 			 u8 *hfunc)
1317 {
1318 	struct bnxt *bp = netdev_priv(dev);
1319 	struct bnxt_vnic_info *vnic;
1320 	u32 i, tbl_size;
1321 
1322 	if (hfunc)
1323 		*hfunc = ETH_RSS_HASH_TOP;
1324 
1325 	if (!bp->vnic_info)
1326 		return 0;
1327 
1328 	vnic = &bp->vnic_info[0];
1329 	if (indir && bp->rss_indir_tbl) {
1330 		tbl_size = bnxt_get_rxfh_indir_size(dev);
1331 		for (i = 0; i < tbl_size; i++)
1332 			indir[i] = bp->rss_indir_tbl[i];
1333 	}
1334 
1335 	if (key && vnic->rss_hash_key)
1336 		memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1337 
1338 	return 0;
1339 }
1340 
1341 static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
1342 			 const u8 *key, const u8 hfunc)
1343 {
1344 	struct bnxt *bp = netdev_priv(dev);
1345 	int rc = 0;
1346 
1347 	if (hfunc && hfunc != ETH_RSS_HASH_TOP)
1348 		return -EOPNOTSUPP;
1349 
1350 	if (key)
1351 		return -EOPNOTSUPP;
1352 
1353 	if (indir) {
1354 		u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1355 
1356 		for (i = 0; i < tbl_size; i++)
1357 			bp->rss_indir_tbl[i] = indir[i];
1358 		pad = bp->rss_indir_tbl_entries - tbl_size;
1359 		if (pad)
1360 			memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1361 	}
1362 
1363 	if (netif_running(bp->dev)) {
1364 		bnxt_close_nic(bp, false, false);
1365 		rc = bnxt_open_nic(bp, false, false);
1366 	}
1367 	return rc;
1368 }
1369 
1370 static void bnxt_get_drvinfo(struct net_device *dev,
1371 			     struct ethtool_drvinfo *info)
1372 {
1373 	struct bnxt *bp = netdev_priv(dev);
1374 
1375 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1376 	strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1377 	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1378 	info->n_stats = bnxt_get_num_stats(bp);
1379 	info->testinfo_len = bp->num_tests;
1380 	/* TODO CHIMP_FW: eeprom dump details */
1381 	info->eedump_len = 0;
1382 	/* TODO CHIMP FW: reg dump details */
1383 	info->regdump_len = 0;
1384 }
1385 
1386 static int bnxt_get_regs_len(struct net_device *dev)
1387 {
1388 	struct bnxt *bp = netdev_priv(dev);
1389 	int reg_len;
1390 
1391 	if (!BNXT_PF(bp))
1392 		return -EOPNOTSUPP;
1393 
1394 	reg_len = BNXT_PXP_REG_LEN;
1395 
1396 	if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1397 		reg_len += sizeof(struct pcie_ctx_hw_stats);
1398 
1399 	return reg_len;
1400 }
1401 
1402 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1403 			  void *_p)
1404 {
1405 	struct pcie_ctx_hw_stats *hw_pcie_stats;
1406 	struct hwrm_pcie_qstats_input *req;
1407 	struct bnxt *bp = netdev_priv(dev);
1408 	dma_addr_t hw_pcie_stats_addr;
1409 	int rc;
1410 
1411 	regs->version = 0;
1412 	bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1413 
1414 	if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1415 		return;
1416 
1417 	if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
1418 		return;
1419 
1420 	hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
1421 					   &hw_pcie_stats_addr);
1422 	if (!hw_pcie_stats) {
1423 		hwrm_req_drop(bp, req);
1424 		return;
1425 	}
1426 
1427 	regs->version = 1;
1428 	hwrm_req_hold(bp, req); /* hold on to slice */
1429 	req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1430 	req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1431 	rc = hwrm_req_send(bp, req);
1432 	if (!rc) {
1433 		__le64 *src = (__le64 *)hw_pcie_stats;
1434 		u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1435 		int i;
1436 
1437 		for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1438 			dst[i] = le64_to_cpu(src[i]);
1439 	}
1440 	hwrm_req_drop(bp, req);
1441 }
1442 
1443 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1444 {
1445 	struct bnxt *bp = netdev_priv(dev);
1446 
1447 	wol->supported = 0;
1448 	wol->wolopts = 0;
1449 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1450 	if (bp->flags & BNXT_FLAG_WOL_CAP) {
1451 		wol->supported = WAKE_MAGIC;
1452 		if (bp->wol)
1453 			wol->wolopts = WAKE_MAGIC;
1454 	}
1455 }
1456 
1457 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1458 {
1459 	struct bnxt *bp = netdev_priv(dev);
1460 
1461 	if (wol->wolopts & ~WAKE_MAGIC)
1462 		return -EINVAL;
1463 
1464 	if (wol->wolopts & WAKE_MAGIC) {
1465 		if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1466 			return -EINVAL;
1467 		if (!bp->wol) {
1468 			if (bnxt_hwrm_alloc_wol_fltr(bp))
1469 				return -EBUSY;
1470 			bp->wol = 1;
1471 		}
1472 	} else {
1473 		if (bp->wol) {
1474 			if (bnxt_hwrm_free_wol_fltr(bp))
1475 				return -EBUSY;
1476 			bp->wol = 0;
1477 		}
1478 	}
1479 	return 0;
1480 }
1481 
1482 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1483 {
1484 	u32 speed_mask = 0;
1485 
1486 	/* TODO: support 25GB, 40GB, 50GB with different cable type */
1487 	/* set the advertised speeds */
1488 	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1489 		speed_mask |= ADVERTISED_100baseT_Full;
1490 	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1491 		speed_mask |= ADVERTISED_1000baseT_Full;
1492 	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1493 		speed_mask |= ADVERTISED_2500baseX_Full;
1494 	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1495 		speed_mask |= ADVERTISED_10000baseT_Full;
1496 	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1497 		speed_mask |= ADVERTISED_40000baseCR4_Full;
1498 
1499 	if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1500 		speed_mask |= ADVERTISED_Pause;
1501 	else if (fw_pause & BNXT_LINK_PAUSE_TX)
1502 		speed_mask |= ADVERTISED_Asym_Pause;
1503 	else if (fw_pause & BNXT_LINK_PAUSE_RX)
1504 		speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1505 
1506 	return speed_mask;
1507 }
1508 
1509 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1510 {									\
1511 	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB)			\
1512 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1513 						     100baseT_Full);	\
1514 	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB)			\
1515 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1516 						     1000baseT_Full);	\
1517 	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB)			\
1518 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1519 						     10000baseT_Full);	\
1520 	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB)			\
1521 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1522 						     25000baseCR_Full);	\
1523 	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB)			\
1524 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1525 						     40000baseCR4_Full);\
1526 	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB)			\
1527 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1528 						     50000baseCR2_Full);\
1529 	if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB)			\
1530 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1531 						     100000baseCR4_Full);\
1532 	if ((fw_pause) & BNXT_LINK_PAUSE_RX) {				\
1533 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1534 						     Pause);		\
1535 		if (!((fw_pause) & BNXT_LINK_PAUSE_TX))			\
1536 			ethtool_link_ksettings_add_link_mode(		\
1537 					lk_ksettings, name, Asym_Pause);\
1538 	} else if ((fw_pause) & BNXT_LINK_PAUSE_TX) {			\
1539 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1540 						     Asym_Pause);	\
1541 	}								\
1542 }
1543 
1544 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name)		\
1545 {									\
1546 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1547 						  100baseT_Full) ||	\
1548 	    ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1549 						  100baseT_Half))	\
1550 		(fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB;		\
1551 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1552 						  1000baseT_Full) ||	\
1553 	    ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1554 						  1000baseT_Half))	\
1555 		(fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB;			\
1556 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1557 						  10000baseT_Full))	\
1558 		(fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB;		\
1559 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1560 						  25000baseCR_Full))	\
1561 		(fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB;		\
1562 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1563 						  40000baseCR4_Full))	\
1564 		(fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB;		\
1565 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1566 						  50000baseCR2_Full))	\
1567 		(fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB;		\
1568 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1569 						  100000baseCR4_Full))	\
1570 		(fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB;		\
1571 }
1572 
1573 #define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name)	\
1574 {									\
1575 	if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB)		\
1576 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1577 						     50000baseCR_Full);	\
1578 	if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB)		\
1579 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1580 						     100000baseCR2_Full);\
1581 	if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB)		\
1582 		ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1583 						     200000baseCR4_Full);\
1584 }
1585 
1586 #define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name)	\
1587 {									\
1588 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1589 						  50000baseCR_Full))	\
1590 		(fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB;		\
1591 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1592 						  100000baseCR2_Full))	\
1593 		(fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB;		\
1594 	if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,	\
1595 						  200000baseCR4_Full))	\
1596 		(fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB;		\
1597 }
1598 
1599 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
1600 				struct ethtool_link_ksettings *lk_ksettings)
1601 {
1602 	u16 fec_cfg = link_info->fec_cfg;
1603 
1604 	if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
1605 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1606 				 lk_ksettings->link_modes.advertising);
1607 		return;
1608 	}
1609 	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1610 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1611 				 lk_ksettings->link_modes.advertising);
1612 	if (fec_cfg & BNXT_FEC_ENC_RS)
1613 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1614 				 lk_ksettings->link_modes.advertising);
1615 	if (fec_cfg & BNXT_FEC_ENC_LLRS)
1616 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1617 				 lk_ksettings->link_modes.advertising);
1618 }
1619 
1620 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1621 				struct ethtool_link_ksettings *lk_ksettings)
1622 {
1623 	u16 fw_speeds = link_info->advertising;
1624 	u8 fw_pause = 0;
1625 
1626 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1627 		fw_pause = link_info->auto_pause_setting;
1628 
1629 	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
1630 	fw_speeds = link_info->advertising_pam4;
1631 	BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
1632 	bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
1633 }
1634 
1635 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1636 				struct ethtool_link_ksettings *lk_ksettings)
1637 {
1638 	u16 fw_speeds = link_info->lp_auto_link_speeds;
1639 	u8 fw_pause = 0;
1640 
1641 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1642 		fw_pause = link_info->lp_pause;
1643 
1644 	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1645 				lp_advertising);
1646 	fw_speeds = link_info->lp_auto_pam4_link_speeds;
1647 	BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
1648 }
1649 
1650 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
1651 				struct ethtool_link_ksettings *lk_ksettings)
1652 {
1653 	u16 fec_cfg = link_info->fec_cfg;
1654 
1655 	if (fec_cfg & BNXT_FEC_NONE) {
1656 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1657 				 lk_ksettings->link_modes.supported);
1658 		return;
1659 	}
1660 	if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
1661 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1662 				 lk_ksettings->link_modes.supported);
1663 	if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
1664 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1665 				 lk_ksettings->link_modes.supported);
1666 	if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
1667 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1668 				 lk_ksettings->link_modes.supported);
1669 }
1670 
1671 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1672 				struct ethtool_link_ksettings *lk_ksettings)
1673 {
1674 	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
1675 	u16 fw_speeds = link_info->support_speeds;
1676 
1677 	BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1678 	fw_speeds = link_info->support_pam4_speeds;
1679 	BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
1680 
1681 	if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
1682 		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1683 						     Pause);
1684 		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1685 						     Asym_Pause);
1686 	}
1687 
1688 	if (link_info->support_auto_speeds ||
1689 	    link_info->support_pam4_auto_speeds)
1690 		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1691 						     Autoneg);
1692 	bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
1693 }
1694 
1695 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1696 {
1697 	switch (fw_link_speed) {
1698 	case BNXT_LINK_SPEED_100MB:
1699 		return SPEED_100;
1700 	case BNXT_LINK_SPEED_1GB:
1701 		return SPEED_1000;
1702 	case BNXT_LINK_SPEED_2_5GB:
1703 		return SPEED_2500;
1704 	case BNXT_LINK_SPEED_10GB:
1705 		return SPEED_10000;
1706 	case BNXT_LINK_SPEED_20GB:
1707 		return SPEED_20000;
1708 	case BNXT_LINK_SPEED_25GB:
1709 		return SPEED_25000;
1710 	case BNXT_LINK_SPEED_40GB:
1711 		return SPEED_40000;
1712 	case BNXT_LINK_SPEED_50GB:
1713 		return SPEED_50000;
1714 	case BNXT_LINK_SPEED_100GB:
1715 		return SPEED_100000;
1716 	case BNXT_LINK_SPEED_200GB:
1717 		return SPEED_200000;
1718 	default:
1719 		return SPEED_UNKNOWN;
1720 	}
1721 }
1722 
1723 static int bnxt_get_link_ksettings(struct net_device *dev,
1724 				   struct ethtool_link_ksettings *lk_ksettings)
1725 {
1726 	struct bnxt *bp = netdev_priv(dev);
1727 	struct bnxt_link_info *link_info = &bp->link_info;
1728 	struct ethtool_link_settings *base = &lk_ksettings->base;
1729 	u32 ethtool_speed;
1730 
1731 	ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1732 	mutex_lock(&bp->link_lock);
1733 	bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1734 
1735 	ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1736 	if (link_info->autoneg) {
1737 		bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1738 		ethtool_link_ksettings_add_link_mode(lk_ksettings,
1739 						     advertising, Autoneg);
1740 		base->autoneg = AUTONEG_ENABLE;
1741 		base->duplex = DUPLEX_UNKNOWN;
1742 		if (link_info->phy_link_status == BNXT_LINK_LINK) {
1743 			bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1744 			if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1745 				base->duplex = DUPLEX_FULL;
1746 			else
1747 				base->duplex = DUPLEX_HALF;
1748 		}
1749 		ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1750 	} else {
1751 		base->autoneg = AUTONEG_DISABLE;
1752 		ethtool_speed =
1753 			bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1754 		base->duplex = DUPLEX_HALF;
1755 		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1756 			base->duplex = DUPLEX_FULL;
1757 	}
1758 	base->speed = ethtool_speed;
1759 
1760 	base->port = PORT_NONE;
1761 	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1762 		base->port = PORT_TP;
1763 		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1764 						     TP);
1765 		ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1766 						     TP);
1767 	} else {
1768 		ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1769 						     FIBRE);
1770 		ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1771 						     FIBRE);
1772 
1773 		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1774 			base->port = PORT_DA;
1775 		else if (link_info->media_type ==
1776 			 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1777 			base->port = PORT_FIBRE;
1778 	}
1779 	base->phy_address = link_info->phy_addr;
1780 	mutex_unlock(&bp->link_lock);
1781 
1782 	return 0;
1783 }
1784 
1785 static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
1786 {
1787 	struct bnxt *bp = netdev_priv(dev);
1788 	struct bnxt_link_info *link_info = &bp->link_info;
1789 	u16 support_pam4_spds = link_info->support_pam4_speeds;
1790 	u16 support_spds = link_info->support_speeds;
1791 	u8 sig_mode = BNXT_SIG_MODE_NRZ;
1792 	u16 fw_speed = 0;
1793 
1794 	switch (ethtool_speed) {
1795 	case SPEED_100:
1796 		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1797 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
1798 		break;
1799 	case SPEED_1000:
1800 		if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1801 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
1802 		break;
1803 	case SPEED_2500:
1804 		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1805 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
1806 		break;
1807 	case SPEED_10000:
1808 		if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1809 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
1810 		break;
1811 	case SPEED_20000:
1812 		if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1813 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
1814 		break;
1815 	case SPEED_25000:
1816 		if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1817 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
1818 		break;
1819 	case SPEED_40000:
1820 		if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1821 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
1822 		break;
1823 	case SPEED_50000:
1824 		if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
1825 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
1826 		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
1827 			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
1828 			sig_mode = BNXT_SIG_MODE_PAM4;
1829 		}
1830 		break;
1831 	case SPEED_100000:
1832 		if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
1833 			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
1834 		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
1835 			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
1836 			sig_mode = BNXT_SIG_MODE_PAM4;
1837 		}
1838 		break;
1839 	case SPEED_200000:
1840 		if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
1841 			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
1842 			sig_mode = BNXT_SIG_MODE_PAM4;
1843 		}
1844 		break;
1845 	}
1846 
1847 	if (!fw_speed) {
1848 		netdev_err(dev, "unsupported speed!\n");
1849 		return -EINVAL;
1850 	}
1851 
1852 	if (link_info->req_link_speed == fw_speed &&
1853 	    link_info->req_signal_mode == sig_mode &&
1854 	    link_info->autoneg == 0)
1855 		return -EALREADY;
1856 
1857 	link_info->req_link_speed = fw_speed;
1858 	link_info->req_signal_mode = sig_mode;
1859 	link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1860 	link_info->autoneg = 0;
1861 	link_info->advertising = 0;
1862 	link_info->advertising_pam4 = 0;
1863 
1864 	return 0;
1865 }
1866 
1867 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1868 {
1869 	u16 fw_speed_mask = 0;
1870 
1871 	/* only support autoneg at speed 100, 1000, and 10000 */
1872 	if (advertising & (ADVERTISED_100baseT_Full |
1873 			   ADVERTISED_100baseT_Half)) {
1874 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1875 	}
1876 	if (advertising & (ADVERTISED_1000baseT_Full |
1877 			   ADVERTISED_1000baseT_Half)) {
1878 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1879 	}
1880 	if (advertising & ADVERTISED_10000baseT_Full)
1881 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1882 
1883 	if (advertising & ADVERTISED_40000baseCR4_Full)
1884 		fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1885 
1886 	return fw_speed_mask;
1887 }
1888 
1889 static int bnxt_set_link_ksettings(struct net_device *dev,
1890 			   const struct ethtool_link_ksettings *lk_ksettings)
1891 {
1892 	struct bnxt *bp = netdev_priv(dev);
1893 	struct bnxt_link_info *link_info = &bp->link_info;
1894 	const struct ethtool_link_settings *base = &lk_ksettings->base;
1895 	bool set_pause = false;
1896 	u32 speed;
1897 	int rc = 0;
1898 
1899 	if (!BNXT_PHY_CFG_ABLE(bp))
1900 		return -EOPNOTSUPP;
1901 
1902 	mutex_lock(&bp->link_lock);
1903 	if (base->autoneg == AUTONEG_ENABLE) {
1904 		link_info->advertising = 0;
1905 		link_info->advertising_pam4 = 0;
1906 		BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
1907 					advertising);
1908 		BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
1909 					     lk_ksettings, advertising);
1910 		link_info->autoneg |= BNXT_AUTONEG_SPEED;
1911 		if (!link_info->advertising && !link_info->advertising_pam4) {
1912 			link_info->advertising = link_info->support_auto_speeds;
1913 			link_info->advertising_pam4 =
1914 				link_info->support_pam4_auto_speeds;
1915 		}
1916 		/* any change to autoneg will cause link change, therefore the
1917 		 * driver should put back the original pause setting in autoneg
1918 		 */
1919 		if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
1920 			set_pause = true;
1921 	} else {
1922 		u8 phy_type = link_info->phy_type;
1923 
1924 		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
1925 		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1926 		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1927 			netdev_err(dev, "10GBase-T devices must autoneg\n");
1928 			rc = -EINVAL;
1929 			goto set_setting_exit;
1930 		}
1931 		if (base->duplex == DUPLEX_HALF) {
1932 			netdev_err(dev, "HALF DUPLEX is not supported!\n");
1933 			rc = -EINVAL;
1934 			goto set_setting_exit;
1935 		}
1936 		speed = base->speed;
1937 		rc = bnxt_force_link_speed(dev, speed);
1938 		if (rc) {
1939 			if (rc == -EALREADY)
1940 				rc = 0;
1941 			goto set_setting_exit;
1942 		}
1943 	}
1944 
1945 	if (netif_running(dev))
1946 		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1947 
1948 set_setting_exit:
1949 	mutex_unlock(&bp->link_lock);
1950 	return rc;
1951 }
1952 
1953 static int bnxt_get_fecparam(struct net_device *dev,
1954 			     struct ethtool_fecparam *fec)
1955 {
1956 	struct bnxt *bp = netdev_priv(dev);
1957 	struct bnxt_link_info *link_info;
1958 	u8 active_fec;
1959 	u16 fec_cfg;
1960 
1961 	link_info = &bp->link_info;
1962 	fec_cfg = link_info->fec_cfg;
1963 	active_fec = link_info->active_fec_sig_mode &
1964 		     PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
1965 	if (fec_cfg & BNXT_FEC_NONE) {
1966 		fec->fec = ETHTOOL_FEC_NONE;
1967 		fec->active_fec = ETHTOOL_FEC_NONE;
1968 		return 0;
1969 	}
1970 	if (fec_cfg & BNXT_FEC_AUTONEG)
1971 		fec->fec |= ETHTOOL_FEC_AUTO;
1972 	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1973 		fec->fec |= ETHTOOL_FEC_BASER;
1974 	if (fec_cfg & BNXT_FEC_ENC_RS)
1975 		fec->fec |= ETHTOOL_FEC_RS;
1976 	if (fec_cfg & BNXT_FEC_ENC_LLRS)
1977 		fec->fec |= ETHTOOL_FEC_LLRS;
1978 
1979 	switch (active_fec) {
1980 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
1981 		fec->active_fec |= ETHTOOL_FEC_BASER;
1982 		break;
1983 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
1984 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
1985 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
1986 		fec->active_fec |= ETHTOOL_FEC_RS;
1987 		break;
1988 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
1989 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
1990 		fec->active_fec |= ETHTOOL_FEC_LLRS;
1991 		break;
1992 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
1993 		fec->active_fec |= ETHTOOL_FEC_OFF;
1994 		break;
1995 	}
1996 	return 0;
1997 }
1998 
1999 static void bnxt_get_fec_stats(struct net_device *dev,
2000 			       struct ethtool_fec_stats *fec_stats)
2001 {
2002 	struct bnxt *bp = netdev_priv(dev);
2003 	u64 *rx;
2004 
2005 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
2006 		return;
2007 
2008 	rx = bp->rx_port_stats_ext.sw_stats;
2009 	fec_stats->corrected_bits.total =
2010 		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
2011 
2012 	if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
2013 		return;
2014 
2015 	fec_stats->corrected_blocks.total =
2016 		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
2017 	fec_stats->uncorrectable_blocks.total =
2018 		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
2019 }
2020 
2021 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
2022 					 u32 fec)
2023 {
2024 	u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
2025 
2026 	if (fec & ETHTOOL_FEC_BASER)
2027 		fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
2028 	else if (fec & ETHTOOL_FEC_RS)
2029 		fw_fec |= BNXT_FEC_RS_ON(link_info);
2030 	else if (fec & ETHTOOL_FEC_LLRS)
2031 		fw_fec |= BNXT_FEC_LLRS_ON;
2032 	return fw_fec;
2033 }
2034 
2035 static int bnxt_set_fecparam(struct net_device *dev,
2036 			     struct ethtool_fecparam *fecparam)
2037 {
2038 	struct hwrm_port_phy_cfg_input *req;
2039 	struct bnxt *bp = netdev_priv(dev);
2040 	struct bnxt_link_info *link_info;
2041 	u32 new_cfg, fec = fecparam->fec;
2042 	u16 fec_cfg;
2043 	int rc;
2044 
2045 	link_info = &bp->link_info;
2046 	fec_cfg = link_info->fec_cfg;
2047 	if (fec_cfg & BNXT_FEC_NONE)
2048 		return -EOPNOTSUPP;
2049 
2050 	if (fec & ETHTOOL_FEC_OFF) {
2051 		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
2052 			  BNXT_FEC_ALL_OFF(link_info);
2053 		goto apply_fec;
2054 	}
2055 	if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
2056 	    ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
2057 	    ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
2058 	    ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
2059 		return -EINVAL;
2060 
2061 	if (fec & ETHTOOL_FEC_AUTO) {
2062 		if (!link_info->autoneg)
2063 			return -EINVAL;
2064 		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
2065 	} else {
2066 		new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
2067 	}
2068 
2069 apply_fec:
2070 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
2071 	if (rc)
2072 		return rc;
2073 	req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2074 	rc = hwrm_req_send(bp, req);
2075 	/* update current settings */
2076 	if (!rc) {
2077 		mutex_lock(&bp->link_lock);
2078 		bnxt_update_link(bp, false);
2079 		mutex_unlock(&bp->link_lock);
2080 	}
2081 	return rc;
2082 }
2083 
2084 static void bnxt_get_pauseparam(struct net_device *dev,
2085 				struct ethtool_pauseparam *epause)
2086 {
2087 	struct bnxt *bp = netdev_priv(dev);
2088 	struct bnxt_link_info *link_info = &bp->link_info;
2089 
2090 	if (BNXT_VF(bp))
2091 		return;
2092 	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2093 	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2094 	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2095 }
2096 
2097 static void bnxt_get_pause_stats(struct net_device *dev,
2098 				 struct ethtool_pause_stats *epstat)
2099 {
2100 	struct bnxt *bp = netdev_priv(dev);
2101 	u64 *rx, *tx;
2102 
2103 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2104 		return;
2105 
2106 	rx = bp->port_stats.sw_stats;
2107 	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2108 
2109 	epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2110 	epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2111 }
2112 
2113 static int bnxt_set_pauseparam(struct net_device *dev,
2114 			       struct ethtool_pauseparam *epause)
2115 {
2116 	int rc = 0;
2117 	struct bnxt *bp = netdev_priv(dev);
2118 	struct bnxt_link_info *link_info = &bp->link_info;
2119 
2120 	if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2121 		return -EOPNOTSUPP;
2122 
2123 	mutex_lock(&bp->link_lock);
2124 	if (epause->autoneg) {
2125 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2126 			rc = -EINVAL;
2127 			goto pause_exit;
2128 		}
2129 
2130 		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2131 		link_info->req_flow_ctrl = 0;
2132 	} else {
2133 		/* when transition from auto pause to force pause,
2134 		 * force a link change
2135 		 */
2136 		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2137 			link_info->force_link_chng = true;
2138 		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2139 		link_info->req_flow_ctrl = 0;
2140 	}
2141 	if (epause->rx_pause)
2142 		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2143 
2144 	if (epause->tx_pause)
2145 		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2146 
2147 	if (netif_running(dev))
2148 		rc = bnxt_hwrm_set_pause(bp);
2149 
2150 pause_exit:
2151 	mutex_unlock(&bp->link_lock);
2152 	return rc;
2153 }
2154 
2155 static u32 bnxt_get_link(struct net_device *dev)
2156 {
2157 	struct bnxt *bp = netdev_priv(dev);
2158 
2159 	/* TODO: handle MF, VF, driver close case */
2160 	return BNXT_LINK_IS_UP(bp);
2161 }
2162 
2163 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2164 			       struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2165 {
2166 	struct hwrm_nvm_get_dev_info_output *resp;
2167 	struct hwrm_nvm_get_dev_info_input *req;
2168 	int rc;
2169 
2170 	if (BNXT_VF(bp))
2171 		return -EOPNOTSUPP;
2172 
2173 	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
2174 	if (rc)
2175 		return rc;
2176 
2177 	resp = hwrm_req_hold(bp, req);
2178 	rc = hwrm_req_send(bp, req);
2179 	if (!rc)
2180 		memcpy(nvm_dev_info, resp, sizeof(*resp));
2181 	hwrm_req_drop(bp, req);
2182 	return rc;
2183 }
2184 
2185 static void bnxt_print_admin_err(struct bnxt *bp)
2186 {
2187 	netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2188 }
2189 
2190 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2191 			 u16 ext, u16 *index, u32 *item_length,
2192 			 u32 *data_length);
2193 
2194 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2195 		     u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2196 		     u32 dir_item_len, const u8 *data,
2197 		     size_t data_len)
2198 {
2199 	struct bnxt *bp = netdev_priv(dev);
2200 	struct hwrm_nvm_write_input *req;
2201 	int rc;
2202 
2203 	rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
2204 	if (rc)
2205 		return rc;
2206 
2207 	if (data_len && data) {
2208 		dma_addr_t dma_handle;
2209 		u8 *kmem;
2210 
2211 		kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
2212 		if (!kmem) {
2213 			hwrm_req_drop(bp, req);
2214 			return -ENOMEM;
2215 		}
2216 
2217 		req->dir_data_length = cpu_to_le32(data_len);
2218 
2219 		memcpy(kmem, data, data_len);
2220 		req->host_src_addr = cpu_to_le64(dma_handle);
2221 	}
2222 
2223 	hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
2224 	req->dir_type = cpu_to_le16(dir_type);
2225 	req->dir_ordinal = cpu_to_le16(dir_ordinal);
2226 	req->dir_ext = cpu_to_le16(dir_ext);
2227 	req->dir_attr = cpu_to_le16(dir_attr);
2228 	req->dir_item_length = cpu_to_le32(dir_item_len);
2229 	rc = hwrm_req_send(bp, req);
2230 
2231 	if (rc == -EACCES)
2232 		bnxt_print_admin_err(bp);
2233 	return rc;
2234 }
2235 
2236 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
2237 			     u8 self_reset, u8 flags)
2238 {
2239 	struct bnxt *bp = netdev_priv(dev);
2240 	struct hwrm_fw_reset_input *req;
2241 	int rc;
2242 
2243 	if (!bnxt_hwrm_reset_permitted(bp)) {
2244 		netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
2245 		return -EPERM;
2246 	}
2247 
2248 	rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
2249 	if (rc)
2250 		return rc;
2251 
2252 	req->embedded_proc_type = proc_type;
2253 	req->selfrst_status = self_reset;
2254 	req->flags = flags;
2255 
2256 	if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
2257 		rc = hwrm_req_send_silent(bp, req);
2258 	} else {
2259 		rc = hwrm_req_send(bp, req);
2260 		if (rc == -EACCES)
2261 			bnxt_print_admin_err(bp);
2262 	}
2263 	return rc;
2264 }
2265 
2266 static int bnxt_firmware_reset(struct net_device *dev,
2267 			       enum bnxt_nvm_directory_type dir_type)
2268 {
2269 	u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
2270 	u8 proc_type, flags = 0;
2271 
2272 	/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
2273 	/*       (e.g. when firmware isn't already running) */
2274 	switch (dir_type) {
2275 	case BNX_DIR_TYPE_CHIMP_PATCH:
2276 	case BNX_DIR_TYPE_BOOTCODE:
2277 	case BNX_DIR_TYPE_BOOTCODE_2:
2278 		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
2279 		/* Self-reset ChiMP upon next PCIe reset: */
2280 		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2281 		break;
2282 	case BNX_DIR_TYPE_APE_FW:
2283 	case BNX_DIR_TYPE_APE_PATCH:
2284 		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
2285 		/* Self-reset APE upon next PCIe reset: */
2286 		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2287 		break;
2288 	case BNX_DIR_TYPE_KONG_FW:
2289 	case BNX_DIR_TYPE_KONG_PATCH:
2290 		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
2291 		break;
2292 	case BNX_DIR_TYPE_BONO_FW:
2293 	case BNX_DIR_TYPE_BONO_PATCH:
2294 		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
2295 		break;
2296 	default:
2297 		return -EINVAL;
2298 	}
2299 
2300 	return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
2301 }
2302 
2303 static int bnxt_firmware_reset_chip(struct net_device *dev)
2304 {
2305 	struct bnxt *bp = netdev_priv(dev);
2306 	u8 flags = 0;
2307 
2308 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
2309 		flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
2310 
2311 	return bnxt_hwrm_firmware_reset(dev,
2312 					FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
2313 					FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
2314 					flags);
2315 }
2316 
2317 static int bnxt_firmware_reset_ap(struct net_device *dev)
2318 {
2319 	return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
2320 					FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
2321 					0);
2322 }
2323 
2324 static int bnxt_flash_firmware(struct net_device *dev,
2325 			       u16 dir_type,
2326 			       const u8 *fw_data,
2327 			       size_t fw_size)
2328 {
2329 	int	rc = 0;
2330 	u16	code_type;
2331 	u32	stored_crc;
2332 	u32	calculated_crc;
2333 	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
2334 
2335 	switch (dir_type) {
2336 	case BNX_DIR_TYPE_BOOTCODE:
2337 	case BNX_DIR_TYPE_BOOTCODE_2:
2338 		code_type = CODE_BOOT;
2339 		break;
2340 	case BNX_DIR_TYPE_CHIMP_PATCH:
2341 		code_type = CODE_CHIMP_PATCH;
2342 		break;
2343 	case BNX_DIR_TYPE_APE_FW:
2344 		code_type = CODE_MCTP_PASSTHRU;
2345 		break;
2346 	case BNX_DIR_TYPE_APE_PATCH:
2347 		code_type = CODE_APE_PATCH;
2348 		break;
2349 	case BNX_DIR_TYPE_KONG_FW:
2350 		code_type = CODE_KONG_FW;
2351 		break;
2352 	case BNX_DIR_TYPE_KONG_PATCH:
2353 		code_type = CODE_KONG_PATCH;
2354 		break;
2355 	case BNX_DIR_TYPE_BONO_FW:
2356 		code_type = CODE_BONO_FW;
2357 		break;
2358 	case BNX_DIR_TYPE_BONO_PATCH:
2359 		code_type = CODE_BONO_PATCH;
2360 		break;
2361 	default:
2362 		netdev_err(dev, "Unsupported directory entry type: %u\n",
2363 			   dir_type);
2364 		return -EINVAL;
2365 	}
2366 	if (fw_size < sizeof(struct bnxt_fw_header)) {
2367 		netdev_err(dev, "Invalid firmware file size: %u\n",
2368 			   (unsigned int)fw_size);
2369 		return -EINVAL;
2370 	}
2371 	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
2372 		netdev_err(dev, "Invalid firmware signature: %08X\n",
2373 			   le32_to_cpu(header->signature));
2374 		return -EINVAL;
2375 	}
2376 	if (header->code_type != code_type) {
2377 		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
2378 			   code_type, header->code_type);
2379 		return -EINVAL;
2380 	}
2381 	if (header->device != DEVICE_CUMULUS_FAMILY) {
2382 		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
2383 			   DEVICE_CUMULUS_FAMILY, header->device);
2384 		return -EINVAL;
2385 	}
2386 	/* Confirm the CRC32 checksum of the file: */
2387 	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2388 					     sizeof(stored_crc)));
2389 	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2390 	if (calculated_crc != stored_crc) {
2391 		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
2392 			   (unsigned long)stored_crc,
2393 			   (unsigned long)calculated_crc);
2394 		return -EINVAL;
2395 	}
2396 	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2397 			      0, 0, 0, fw_data, fw_size);
2398 	if (rc == 0)	/* Firmware update successful */
2399 		rc = bnxt_firmware_reset(dev, dir_type);
2400 
2401 	return rc;
2402 }
2403 
2404 static int bnxt_flash_microcode(struct net_device *dev,
2405 				u16 dir_type,
2406 				const u8 *fw_data,
2407 				size_t fw_size)
2408 {
2409 	struct bnxt_ucode_trailer *trailer;
2410 	u32 calculated_crc;
2411 	u32 stored_crc;
2412 	int rc = 0;
2413 
2414 	if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
2415 		netdev_err(dev, "Invalid microcode file size: %u\n",
2416 			   (unsigned int)fw_size);
2417 		return -EINVAL;
2418 	}
2419 	trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
2420 						sizeof(*trailer)));
2421 	if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
2422 		netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
2423 			   le32_to_cpu(trailer->sig));
2424 		return -EINVAL;
2425 	}
2426 	if (le16_to_cpu(trailer->dir_type) != dir_type) {
2427 		netdev_err(dev, "Expected microcode type: %d, read: %d\n",
2428 			   dir_type, le16_to_cpu(trailer->dir_type));
2429 		return -EINVAL;
2430 	}
2431 	if (le16_to_cpu(trailer->trailer_length) <
2432 		sizeof(struct bnxt_ucode_trailer)) {
2433 		netdev_err(dev, "Invalid microcode trailer length: %d\n",
2434 			   le16_to_cpu(trailer->trailer_length));
2435 		return -EINVAL;
2436 	}
2437 
2438 	/* Confirm the CRC32 checksum of the file: */
2439 	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2440 					     sizeof(stored_crc)));
2441 	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2442 	if (calculated_crc != stored_crc) {
2443 		netdev_err(dev,
2444 			   "CRC32 (%08lX) does not match calculated: %08lX\n",
2445 			   (unsigned long)stored_crc,
2446 			   (unsigned long)calculated_crc);
2447 		return -EINVAL;
2448 	}
2449 	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2450 			      0, 0, 0, fw_data, fw_size);
2451 
2452 	return rc;
2453 }
2454 
2455 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
2456 {
2457 	switch (dir_type) {
2458 	case BNX_DIR_TYPE_CHIMP_PATCH:
2459 	case BNX_DIR_TYPE_BOOTCODE:
2460 	case BNX_DIR_TYPE_BOOTCODE_2:
2461 	case BNX_DIR_TYPE_APE_FW:
2462 	case BNX_DIR_TYPE_APE_PATCH:
2463 	case BNX_DIR_TYPE_KONG_FW:
2464 	case BNX_DIR_TYPE_KONG_PATCH:
2465 	case BNX_DIR_TYPE_BONO_FW:
2466 	case BNX_DIR_TYPE_BONO_PATCH:
2467 		return true;
2468 	}
2469 
2470 	return false;
2471 }
2472 
2473 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
2474 {
2475 	switch (dir_type) {
2476 	case BNX_DIR_TYPE_AVS:
2477 	case BNX_DIR_TYPE_EXP_ROM_MBA:
2478 	case BNX_DIR_TYPE_PCIE:
2479 	case BNX_DIR_TYPE_TSCF_UCODE:
2480 	case BNX_DIR_TYPE_EXT_PHY:
2481 	case BNX_DIR_TYPE_CCM:
2482 	case BNX_DIR_TYPE_ISCSI_BOOT:
2483 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2484 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2485 		return true;
2486 	}
2487 
2488 	return false;
2489 }
2490 
2491 static bool bnxt_dir_type_is_executable(u16 dir_type)
2492 {
2493 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2494 		bnxt_dir_type_is_other_exec_format(dir_type);
2495 }
2496 
2497 static int bnxt_flash_firmware_from_file(struct net_device *dev,
2498 					 u16 dir_type,
2499 					 const char *filename)
2500 {
2501 	const struct firmware  *fw;
2502 	int			rc;
2503 
2504 	rc = request_firmware(&fw, filename, &dev->dev);
2505 	if (rc != 0) {
2506 		netdev_err(dev, "Error %d requesting firmware file: %s\n",
2507 			   rc, filename);
2508 		return rc;
2509 	}
2510 	if (bnxt_dir_type_is_ape_bin_format(dir_type))
2511 		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
2512 	else if (bnxt_dir_type_is_other_exec_format(dir_type))
2513 		rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
2514 	else
2515 		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2516 				      0, 0, 0, fw->data, fw->size);
2517 	release_firmware(fw);
2518 	return rc;
2519 }
2520 
2521 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
2522 #define MSG_INVALID_PKG "PKG install error : Invalid package"
2523 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
2524 #define MSG_INVALID_DEV "PKG install error : Invalid device"
2525 #define MSG_INTERNAL_ERR "PKG install error : Internal error"
2526 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
2527 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
2528 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
2529 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
2530 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
2531 
2532 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
2533 				    struct netlink_ext_ack *extack)
2534 {
2535 	switch (result) {
2536 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
2537 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
2538 	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
2539 	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
2540 	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
2541 	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
2542 		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
2543 		return -EINVAL;
2544 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
2545 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
2546 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
2547 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
2548 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
2549 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
2550 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
2551 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
2552 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
2553 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
2554 	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
2555 	case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
2556 	case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
2557 		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
2558 		return -ENOPKG;
2559 	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
2560 		BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
2561 		return -EPERM;
2562 	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
2563 	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
2564 	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
2565 	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
2566 	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
2567 		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
2568 		return -EOPNOTSUPP;
2569 	default:
2570 		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
2571 		return -EIO;
2572 	}
2573 }
2574 
2575 #define BNXT_PKG_DMA_SIZE	0x40000
2576 #define BNXT_NVM_MORE_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
2577 #define BNXT_NVM_LAST_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
2578 
2579 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
2580 				    struct netlink_ext_ack *extack)
2581 {
2582 	u32 item_len;
2583 	int rc;
2584 
2585 	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2586 				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
2587 				  &item_len, NULL);
2588 	if (rc) {
2589 		BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
2590 		return rc;
2591 	}
2592 
2593 	if (fw_size > item_len) {
2594 		rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
2595 				      BNX_DIR_ORDINAL_FIRST, 0, 1,
2596 				      round_up(fw_size, 4096), NULL, 0);
2597 		if (rc) {
2598 			BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
2599 			return rc;
2600 		}
2601 	}
2602 	return 0;
2603 }
2604 
2605 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
2606 				   u32 install_type, struct netlink_ext_ack *extack)
2607 {
2608 	struct hwrm_nvm_install_update_input *install;
2609 	struct hwrm_nvm_install_update_output *resp;
2610 	struct hwrm_nvm_modify_input *modify;
2611 	struct bnxt *bp = netdev_priv(dev);
2612 	bool defrag_attempted = false;
2613 	dma_addr_t dma_handle;
2614 	u8 *kmem = NULL;
2615 	u32 modify_len;
2616 	u32 item_len;
2617 	u8 cmd_err;
2618 	u16 index;
2619 	int rc;
2620 
2621 	/* resize before flashing larger image than available space */
2622 	rc = bnxt_resize_update_entry(dev, fw->size, extack);
2623 	if (rc)
2624 		return rc;
2625 
2626 	bnxt_hwrm_fw_set_time(bp);
2627 
2628 	rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
2629 	if (rc)
2630 		return rc;
2631 
2632 	/* Try allocating a large DMA buffer first.  Older fw will
2633 	 * cause excessive NVRAM erases when using small blocks.
2634 	 */
2635 	modify_len = roundup_pow_of_two(fw->size);
2636 	modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
2637 	while (1) {
2638 		kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
2639 		if (!kmem && modify_len > PAGE_SIZE)
2640 			modify_len /= 2;
2641 		else
2642 			break;
2643 	}
2644 	if (!kmem) {
2645 		hwrm_req_drop(bp, modify);
2646 		return -ENOMEM;
2647 	}
2648 
2649 	rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
2650 	if (rc) {
2651 		hwrm_req_drop(bp, modify);
2652 		return rc;
2653 	}
2654 
2655 	hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
2656 	hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
2657 
2658 	hwrm_req_hold(bp, modify);
2659 	modify->host_src_addr = cpu_to_le64(dma_handle);
2660 
2661 	resp = hwrm_req_hold(bp, install);
2662 	if ((install_type & 0xffff) == 0)
2663 		install_type >>= 16;
2664 	install->install_type = cpu_to_le32(install_type);
2665 
2666 	do {
2667 		u32 copied = 0, len = modify_len;
2668 
2669 		rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2670 					  BNX_DIR_ORDINAL_FIRST,
2671 					  BNX_DIR_EXT_NONE,
2672 					  &index, &item_len, NULL);
2673 		if (rc) {
2674 			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
2675 			break;
2676 		}
2677 		if (fw->size > item_len) {
2678 			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
2679 			rc = -EFBIG;
2680 			break;
2681 		}
2682 
2683 		modify->dir_idx = cpu_to_le16(index);
2684 
2685 		if (fw->size > modify_len)
2686 			modify->flags = BNXT_NVM_MORE_FLAG;
2687 		while (copied < fw->size) {
2688 			u32 balance = fw->size - copied;
2689 
2690 			if (balance <= modify_len) {
2691 				len = balance;
2692 				if (copied)
2693 					modify->flags |= BNXT_NVM_LAST_FLAG;
2694 			}
2695 			memcpy(kmem, fw->data + copied, len);
2696 			modify->len = cpu_to_le32(len);
2697 			modify->offset = cpu_to_le32(copied);
2698 			rc = hwrm_req_send(bp, modify);
2699 			if (rc)
2700 				goto pkg_abort;
2701 			copied += len;
2702 		}
2703 
2704 		rc = hwrm_req_send_silent(bp, install);
2705 		if (!rc)
2706 			break;
2707 
2708 		if (defrag_attempted) {
2709 			/* We have tried to defragment already in the previous
2710 			 * iteration. Return with the result for INSTALL_UPDATE
2711 			 */
2712 			break;
2713 		}
2714 
2715 		cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
2716 
2717 		switch (cmd_err) {
2718 		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
2719 			BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
2720 			rc = -EALREADY;
2721 			break;
2722 		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
2723 			install->flags =
2724 				cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2725 
2726 			rc = hwrm_req_send_silent(bp, install);
2727 			if (!rc)
2728 				break;
2729 
2730 			cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
2731 
2732 			if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
2733 				/* FW has cleared NVM area, driver will create
2734 				 * UPDATE directory and try the flash again
2735 				 */
2736 				defrag_attempted = true;
2737 				install->flags = 0;
2738 				rc = bnxt_flash_nvram(bp->dev,
2739 						      BNX_DIR_TYPE_UPDATE,
2740 						      BNX_DIR_ORDINAL_FIRST,
2741 						      0, 0, item_len, NULL, 0);
2742 				if (!rc)
2743 					break;
2744 			}
2745 			fallthrough;
2746 		default:
2747 			BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
2748 		}
2749 	} while (defrag_attempted && !rc);
2750 
2751 pkg_abort:
2752 	hwrm_req_drop(bp, modify);
2753 	hwrm_req_drop(bp, install);
2754 
2755 	if (resp->result) {
2756 		netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
2757 			   (s8)resp->result, (int)resp->problem_item);
2758 		rc = nvm_update_err_to_stderr(dev, resp->result, extack);
2759 	}
2760 	if (rc == -EACCES)
2761 		bnxt_print_admin_err(bp);
2762 	return rc;
2763 }
2764 
2765 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
2766 					u32 install_type, struct netlink_ext_ack *extack)
2767 {
2768 	const struct firmware *fw;
2769 	int rc;
2770 
2771 	rc = request_firmware(&fw, filename, &dev->dev);
2772 	if (rc != 0) {
2773 		netdev_err(dev, "PKG error %d requesting file: %s\n",
2774 			   rc, filename);
2775 		return rc;
2776 	}
2777 
2778 	rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
2779 
2780 	release_firmware(fw);
2781 
2782 	return rc;
2783 }
2784 
2785 static int bnxt_flash_device(struct net_device *dev,
2786 			     struct ethtool_flash *flash)
2787 {
2788 	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
2789 		netdev_err(dev, "flashdev not supported from a virtual function\n");
2790 		return -EINVAL;
2791 	}
2792 
2793 	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
2794 	    flash->region > 0xffff)
2795 		return bnxt_flash_package_from_file(dev, flash->data,
2796 						    flash->region, NULL);
2797 
2798 	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
2799 }
2800 
2801 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
2802 {
2803 	struct hwrm_nvm_get_dir_info_output *output;
2804 	struct hwrm_nvm_get_dir_info_input *req;
2805 	struct bnxt *bp = netdev_priv(dev);
2806 	int rc;
2807 
2808 	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
2809 	if (rc)
2810 		return rc;
2811 
2812 	output = hwrm_req_hold(bp, req);
2813 	rc = hwrm_req_send(bp, req);
2814 	if (!rc) {
2815 		*entries = le32_to_cpu(output->entries);
2816 		*length = le32_to_cpu(output->entry_length);
2817 	}
2818 	hwrm_req_drop(bp, req);
2819 	return rc;
2820 }
2821 
2822 static int bnxt_get_eeprom_len(struct net_device *dev)
2823 {
2824 	struct bnxt *bp = netdev_priv(dev);
2825 
2826 	if (BNXT_VF(bp))
2827 		return 0;
2828 
2829 	/* The -1 return value allows the entire 32-bit range of offsets to be
2830 	 * passed via the ethtool command-line utility.
2831 	 */
2832 	return -1;
2833 }
2834 
2835 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
2836 {
2837 	struct bnxt *bp = netdev_priv(dev);
2838 	int rc;
2839 	u32 dir_entries;
2840 	u32 entry_length;
2841 	u8 *buf;
2842 	size_t buflen;
2843 	dma_addr_t dma_handle;
2844 	struct hwrm_nvm_get_dir_entries_input *req;
2845 
2846 	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
2847 	if (rc != 0)
2848 		return rc;
2849 
2850 	if (!dir_entries || !entry_length)
2851 		return -EIO;
2852 
2853 	/* Insert 2 bytes of directory info (count and size of entries) */
2854 	if (len < 2)
2855 		return -EINVAL;
2856 
2857 	*data++ = dir_entries;
2858 	*data++ = entry_length;
2859 	len -= 2;
2860 	memset(data, 0xff, len);
2861 
2862 	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
2863 	if (rc)
2864 		return rc;
2865 
2866 	buflen = mul_u32_u32(dir_entries, entry_length);
2867 	buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
2868 	if (!buf) {
2869 		hwrm_req_drop(bp, req);
2870 		return -ENOMEM;
2871 	}
2872 	req->host_dest_addr = cpu_to_le64(dma_handle);
2873 
2874 	hwrm_req_hold(bp, req); /* hold the slice */
2875 	rc = hwrm_req_send(bp, req);
2876 	if (rc == 0)
2877 		memcpy(data, buf, len > buflen ? buflen : len);
2878 	hwrm_req_drop(bp, req);
2879 	return rc;
2880 }
2881 
2882 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
2883 			u32 length, u8 *data)
2884 {
2885 	struct bnxt *bp = netdev_priv(dev);
2886 	int rc;
2887 	u8 *buf;
2888 	dma_addr_t dma_handle;
2889 	struct hwrm_nvm_read_input *req;
2890 
2891 	if (!length)
2892 		return -EINVAL;
2893 
2894 	rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
2895 	if (rc)
2896 		return rc;
2897 
2898 	buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
2899 	if (!buf) {
2900 		hwrm_req_drop(bp, req);
2901 		return -ENOMEM;
2902 	}
2903 
2904 	req->host_dest_addr = cpu_to_le64(dma_handle);
2905 	req->dir_idx = cpu_to_le16(index);
2906 	req->offset = cpu_to_le32(offset);
2907 	req->len = cpu_to_le32(length);
2908 
2909 	hwrm_req_hold(bp, req); /* hold the slice */
2910 	rc = hwrm_req_send(bp, req);
2911 	if (rc == 0)
2912 		memcpy(data, buf, length);
2913 	hwrm_req_drop(bp, req);
2914 	return rc;
2915 }
2916 
2917 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2918 			 u16 ext, u16 *index, u32 *item_length,
2919 			 u32 *data_length)
2920 {
2921 	struct hwrm_nvm_find_dir_entry_output *output;
2922 	struct hwrm_nvm_find_dir_entry_input *req;
2923 	struct bnxt *bp = netdev_priv(dev);
2924 	int rc;
2925 
2926 	rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
2927 	if (rc)
2928 		return rc;
2929 
2930 	req->enables = 0;
2931 	req->dir_idx = 0;
2932 	req->dir_type = cpu_to_le16(type);
2933 	req->dir_ordinal = cpu_to_le16(ordinal);
2934 	req->dir_ext = cpu_to_le16(ext);
2935 	req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
2936 	output = hwrm_req_hold(bp, req);
2937 	rc = hwrm_req_send_silent(bp, req);
2938 	if (rc == 0) {
2939 		if (index)
2940 			*index = le16_to_cpu(output->dir_idx);
2941 		if (item_length)
2942 			*item_length = le32_to_cpu(output->dir_item_length);
2943 		if (data_length)
2944 			*data_length = le32_to_cpu(output->dir_data_length);
2945 	}
2946 	hwrm_req_drop(bp, req);
2947 	return rc;
2948 }
2949 
2950 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2951 {
2952 	char	*retval = NULL;
2953 	char	*p;
2954 	char	*value;
2955 	int	field = 0;
2956 
2957 	if (datalen < 1)
2958 		return NULL;
2959 	/* null-terminate the log data (removing last '\n'): */
2960 	data[datalen - 1] = 0;
2961 	for (p = data; *p != 0; p++) {
2962 		field = 0;
2963 		retval = NULL;
2964 		while (*p != 0 && *p != '\n') {
2965 			value = p;
2966 			while (*p != 0 && *p != '\t' && *p != '\n')
2967 				p++;
2968 			if (field == desired_field)
2969 				retval = value;
2970 			if (*p != '\t')
2971 				break;
2972 			*p = 0;
2973 			field++;
2974 			p++;
2975 		}
2976 		if (*p == 0)
2977 			break;
2978 		*p = 0;
2979 	}
2980 	return retval;
2981 }
2982 
2983 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
2984 {
2985 	struct bnxt *bp = netdev_priv(dev);
2986 	u16 index = 0;
2987 	char *pkgver;
2988 	u32 pkglen;
2989 	u8 *pkgbuf;
2990 	int rc;
2991 
2992 	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2993 				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2994 				  &index, NULL, &pkglen);
2995 	if (rc)
2996 		return rc;
2997 
2998 	pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2999 	if (!pkgbuf) {
3000 		dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
3001 			pkglen);
3002 		return -ENOMEM;
3003 	}
3004 
3005 	rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
3006 	if (rc)
3007 		goto err;
3008 
3009 	pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
3010 				   pkglen);
3011 	if (pkgver && *pkgver != 0 && isdigit(*pkgver))
3012 		strscpy(ver, pkgver, size);
3013 	else
3014 		rc = -ENOENT;
3015 
3016 err:
3017 	kfree(pkgbuf);
3018 
3019 	return rc;
3020 }
3021 
3022 static void bnxt_get_pkgver(struct net_device *dev)
3023 {
3024 	struct bnxt *bp = netdev_priv(dev);
3025 	char buf[FW_VER_STR_LEN];
3026 	int len;
3027 
3028 	if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
3029 		len = strlen(bp->fw_ver_str);
3030 		snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
3031 			 "/pkg %s", buf);
3032 	}
3033 }
3034 
3035 static int bnxt_get_eeprom(struct net_device *dev,
3036 			   struct ethtool_eeprom *eeprom,
3037 			   u8 *data)
3038 {
3039 	u32 index;
3040 	u32 offset;
3041 
3042 	if (eeprom->offset == 0) /* special offset value to get directory */
3043 		return bnxt_get_nvram_directory(dev, eeprom->len, data);
3044 
3045 	index = eeprom->offset >> 24;
3046 	offset = eeprom->offset & 0xffffff;
3047 
3048 	if (index == 0) {
3049 		netdev_err(dev, "unsupported index value: %d\n", index);
3050 		return -EINVAL;
3051 	}
3052 
3053 	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
3054 }
3055 
3056 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
3057 {
3058 	struct hwrm_nvm_erase_dir_entry_input *req;
3059 	struct bnxt *bp = netdev_priv(dev);
3060 	int rc;
3061 
3062 	rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
3063 	if (rc)
3064 		return rc;
3065 
3066 	req->dir_idx = cpu_to_le16(index);
3067 	return hwrm_req_send(bp, req);
3068 }
3069 
3070 static int bnxt_set_eeprom(struct net_device *dev,
3071 			   struct ethtool_eeprom *eeprom,
3072 			   u8 *data)
3073 {
3074 	struct bnxt *bp = netdev_priv(dev);
3075 	u8 index, dir_op;
3076 	u16 type, ext, ordinal, attr;
3077 
3078 	if (!BNXT_PF(bp)) {
3079 		netdev_err(dev, "NVM write not supported from a virtual function\n");
3080 		return -EINVAL;
3081 	}
3082 
3083 	type = eeprom->magic >> 16;
3084 
3085 	if (type == 0xffff) { /* special value for directory operations */
3086 		index = eeprom->magic & 0xff;
3087 		dir_op = eeprom->magic >> 8;
3088 		if (index == 0)
3089 			return -EINVAL;
3090 		switch (dir_op) {
3091 		case 0x0e: /* erase */
3092 			if (eeprom->offset != ~eeprom->magic)
3093 				return -EINVAL;
3094 			return bnxt_erase_nvram_directory(dev, index - 1);
3095 		default:
3096 			return -EINVAL;
3097 		}
3098 	}
3099 
3100 	/* Create or re-write an NVM item: */
3101 	if (bnxt_dir_type_is_executable(type))
3102 		return -EOPNOTSUPP;
3103 	ext = eeprom->magic & 0xffff;
3104 	ordinal = eeprom->offset >> 16;
3105 	attr = eeprom->offset & 0xffff;
3106 
3107 	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
3108 				eeprom->len);
3109 }
3110 
3111 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
3112 {
3113 	struct bnxt *bp = netdev_priv(dev);
3114 	struct ethtool_eee *eee = &bp->eee;
3115 	struct bnxt_link_info *link_info = &bp->link_info;
3116 	u32 advertising;
3117 	int rc = 0;
3118 
3119 	if (!BNXT_PHY_CFG_ABLE(bp))
3120 		return -EOPNOTSUPP;
3121 
3122 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3123 		return -EOPNOTSUPP;
3124 
3125 	mutex_lock(&bp->link_lock);
3126 	advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
3127 	if (!edata->eee_enabled)
3128 		goto eee_ok;
3129 
3130 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3131 		netdev_warn(dev, "EEE requires autoneg\n");
3132 		rc = -EINVAL;
3133 		goto eee_exit;
3134 	}
3135 	if (edata->tx_lpi_enabled) {
3136 		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
3137 				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
3138 			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
3139 				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
3140 			rc = -EINVAL;
3141 			goto eee_exit;
3142 		} else if (!bp->lpi_tmr_hi) {
3143 			edata->tx_lpi_timer = eee->tx_lpi_timer;
3144 		}
3145 	}
3146 	if (!edata->advertised) {
3147 		edata->advertised = advertising & eee->supported;
3148 	} else if (edata->advertised & ~advertising) {
3149 		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
3150 			    edata->advertised, advertising);
3151 		rc = -EINVAL;
3152 		goto eee_exit;
3153 	}
3154 
3155 	eee->advertised = edata->advertised;
3156 	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
3157 	eee->tx_lpi_timer = edata->tx_lpi_timer;
3158 eee_ok:
3159 	eee->eee_enabled = edata->eee_enabled;
3160 
3161 	if (netif_running(dev))
3162 		rc = bnxt_hwrm_set_link_setting(bp, false, true);
3163 
3164 eee_exit:
3165 	mutex_unlock(&bp->link_lock);
3166 	return rc;
3167 }
3168 
3169 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
3170 {
3171 	struct bnxt *bp = netdev_priv(dev);
3172 
3173 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3174 		return -EOPNOTSUPP;
3175 
3176 	*edata = bp->eee;
3177 	if (!bp->eee.eee_enabled) {
3178 		/* Preserve tx_lpi_timer so that the last value will be used
3179 		 * by default when it is re-enabled.
3180 		 */
3181 		edata->advertised = 0;
3182 		edata->tx_lpi_enabled = 0;
3183 	}
3184 
3185 	if (!bp->eee.eee_active)
3186 		edata->lp_advertised = 0;
3187 
3188 	return 0;
3189 }
3190 
3191 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
3192 					    u16 page_number, u8 bank,
3193 					    u16 start_addr, u16 data_length,
3194 					    u8 *buf)
3195 {
3196 	struct hwrm_port_phy_i2c_read_output *output;
3197 	struct hwrm_port_phy_i2c_read_input *req;
3198 	int rc, byte_offset = 0;
3199 
3200 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
3201 	if (rc)
3202 		return rc;
3203 
3204 	output = hwrm_req_hold(bp, req);
3205 	req->i2c_slave_addr = i2c_addr;
3206 	req->page_number = cpu_to_le16(page_number);
3207 	req->port_id = cpu_to_le16(bp->pf.port_id);
3208 	do {
3209 		u16 xfer_size;
3210 
3211 		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3212 		data_length -= xfer_size;
3213 		req->page_offset = cpu_to_le16(start_addr + byte_offset);
3214 		req->data_length = xfer_size;
3215 		req->enables =
3216 			cpu_to_le32((start_addr + byte_offset ?
3217 				     PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
3218 				     0) |
3219 				    (bank ?
3220 				     PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
3221 				     0));
3222 		rc = hwrm_req_send(bp, req);
3223 		if (!rc)
3224 			memcpy(buf + byte_offset, output->data, xfer_size);
3225 		byte_offset += xfer_size;
3226 	} while (!rc && data_length > 0);
3227 	hwrm_req_drop(bp, req);
3228 
3229 	return rc;
3230 }
3231 
3232 static int bnxt_get_module_info(struct net_device *dev,
3233 				struct ethtool_modinfo *modinfo)
3234 {
3235 	u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
3236 	struct bnxt *bp = netdev_priv(dev);
3237 	int rc;
3238 
3239 	/* No point in going further if phy status indicates
3240 	 * module is not inserted or if it is powered down or
3241 	 * if it is of type 10GBase-T
3242 	 */
3243 	if (bp->link_info.module_status >
3244 		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3245 		return -EOPNOTSUPP;
3246 
3247 	/* This feature is not supported in older firmware versions */
3248 	if (bp->hwrm_spec_code < 0x10202)
3249 		return -EOPNOTSUPP;
3250 
3251 	rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
3252 					      SFF_DIAG_SUPPORT_OFFSET + 1,
3253 					      data);
3254 	if (!rc) {
3255 		u8 module_id = data[0];
3256 		u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
3257 
3258 		switch (module_id) {
3259 		case SFF_MODULE_ID_SFP:
3260 			modinfo->type = ETH_MODULE_SFF_8472;
3261 			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3262 			if (!diag_supported)
3263 				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3264 			break;
3265 		case SFF_MODULE_ID_QSFP:
3266 		case SFF_MODULE_ID_QSFP_PLUS:
3267 			modinfo->type = ETH_MODULE_SFF_8436;
3268 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3269 			break;
3270 		case SFF_MODULE_ID_QSFP28:
3271 			modinfo->type = ETH_MODULE_SFF_8636;
3272 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
3273 			break;
3274 		default:
3275 			rc = -EOPNOTSUPP;
3276 			break;
3277 		}
3278 	}
3279 	return rc;
3280 }
3281 
3282 static int bnxt_get_module_eeprom(struct net_device *dev,
3283 				  struct ethtool_eeprom *eeprom,
3284 				  u8 *data)
3285 {
3286 	struct bnxt *bp = netdev_priv(dev);
3287 	u16  start = eeprom->offset, length = eeprom->len;
3288 	int rc = 0;
3289 
3290 	memset(data, 0, eeprom->len);
3291 
3292 	/* Read A0 portion of the EEPROM */
3293 	if (start < ETH_MODULE_SFF_8436_LEN) {
3294 		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
3295 			length = ETH_MODULE_SFF_8436_LEN - start;
3296 		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
3297 						      start, length, data);
3298 		if (rc)
3299 			return rc;
3300 		start += length;
3301 		data += length;
3302 		length = eeprom->len - length;
3303 	}
3304 
3305 	/* Read A2 portion of the EEPROM */
3306 	if (length) {
3307 		start -= ETH_MODULE_SFF_8436_LEN;
3308 		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
3309 						      start, length, data);
3310 	}
3311 	return rc;
3312 }
3313 
3314 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
3315 {
3316 	if (bp->link_info.module_status <=
3317 	    PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3318 		return 0;
3319 
3320 	switch (bp->link_info.module_status) {
3321 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
3322 		NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
3323 		break;
3324 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
3325 		NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
3326 		break;
3327 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
3328 		NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
3329 		break;
3330 	default:
3331 		NL_SET_ERR_MSG_MOD(extack, "Unknown error");
3332 		break;
3333 	}
3334 	return -EINVAL;
3335 }
3336 
3337 static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
3338 					  const struct ethtool_module_eeprom *page_data,
3339 					  struct netlink_ext_ack *extack)
3340 {
3341 	struct bnxt *bp = netdev_priv(dev);
3342 	int rc;
3343 
3344 	rc = bnxt_get_module_status(bp, extack);
3345 	if (rc)
3346 		return rc;
3347 
3348 	if (bp->hwrm_spec_code < 0x10202) {
3349 		NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
3350 		return -EINVAL;
3351 	}
3352 
3353 	if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
3354 		NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
3355 		return -EINVAL;
3356 	}
3357 
3358 	rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
3359 					      page_data->page, page_data->bank,
3360 					      page_data->offset,
3361 					      page_data->length,
3362 					      page_data->data);
3363 	if (rc) {
3364 		NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
3365 		return rc;
3366 	}
3367 	return page_data->length;
3368 }
3369 
3370 static int bnxt_nway_reset(struct net_device *dev)
3371 {
3372 	int rc = 0;
3373 
3374 	struct bnxt *bp = netdev_priv(dev);
3375 	struct bnxt_link_info *link_info = &bp->link_info;
3376 
3377 	if (!BNXT_PHY_CFG_ABLE(bp))
3378 		return -EOPNOTSUPP;
3379 
3380 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
3381 		return -EINVAL;
3382 
3383 	if (netif_running(dev))
3384 		rc = bnxt_hwrm_set_link_setting(bp, true, false);
3385 
3386 	return rc;
3387 }
3388 
3389 static int bnxt_set_phys_id(struct net_device *dev,
3390 			    enum ethtool_phys_id_state state)
3391 {
3392 	struct hwrm_port_led_cfg_input *req;
3393 	struct bnxt *bp = netdev_priv(dev);
3394 	struct bnxt_pf_info *pf = &bp->pf;
3395 	struct bnxt_led_cfg *led_cfg;
3396 	u8 led_state;
3397 	__le16 duration;
3398 	int rc, i;
3399 
3400 	if (!bp->num_leds || BNXT_VF(bp))
3401 		return -EOPNOTSUPP;
3402 
3403 	if (state == ETHTOOL_ID_ACTIVE) {
3404 		led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
3405 		duration = cpu_to_le16(500);
3406 	} else if (state == ETHTOOL_ID_INACTIVE) {
3407 		led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
3408 		duration = cpu_to_le16(0);
3409 	} else {
3410 		return -EINVAL;
3411 	}
3412 	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
3413 	if (rc)
3414 		return rc;
3415 
3416 	req->port_id = cpu_to_le16(pf->port_id);
3417 	req->num_leds = bp->num_leds;
3418 	led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
3419 	for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3420 		req->enables |= BNXT_LED_DFLT_ENABLES(i);
3421 		led_cfg->led_id = bp->leds[i].led_id;
3422 		led_cfg->led_state = led_state;
3423 		led_cfg->led_blink_on = duration;
3424 		led_cfg->led_blink_off = duration;
3425 		led_cfg->led_group_id = bp->leds[i].led_group_id;
3426 	}
3427 	return hwrm_req_send(bp, req);
3428 }
3429 
3430 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
3431 {
3432 	struct hwrm_selftest_irq_input *req;
3433 	int rc;
3434 
3435 	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
3436 	if (rc)
3437 		return rc;
3438 
3439 	req->cmpl_ring = cpu_to_le16(cmpl_ring);
3440 	return hwrm_req_send(bp, req);
3441 }
3442 
3443 static int bnxt_test_irq(struct bnxt *bp)
3444 {
3445 	int i;
3446 
3447 	for (i = 0; i < bp->cp_nr_rings; i++) {
3448 		u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
3449 		int rc;
3450 
3451 		rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
3452 		if (rc)
3453 			return rc;
3454 	}
3455 	return 0;
3456 }
3457 
3458 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
3459 {
3460 	struct hwrm_port_mac_cfg_input *req;
3461 	int rc;
3462 
3463 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
3464 	if (rc)
3465 		return rc;
3466 
3467 	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
3468 	if (enable)
3469 		req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
3470 	else
3471 		req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
3472 	return hwrm_req_send(bp, req);
3473 }
3474 
3475 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
3476 {
3477 	struct hwrm_port_phy_qcaps_output *resp;
3478 	struct hwrm_port_phy_qcaps_input *req;
3479 	int rc;
3480 
3481 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
3482 	if (rc)
3483 		return rc;
3484 
3485 	resp = hwrm_req_hold(bp, req);
3486 	rc = hwrm_req_send(bp, req);
3487 	if (!rc)
3488 		*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
3489 
3490 	hwrm_req_drop(bp, req);
3491 	return rc;
3492 }
3493 
3494 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
3495 				    struct hwrm_port_phy_cfg_input *req)
3496 {
3497 	struct bnxt_link_info *link_info = &bp->link_info;
3498 	u16 fw_advertising;
3499 	u16 fw_speed;
3500 	int rc;
3501 
3502 	if (!link_info->autoneg ||
3503 	    (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
3504 		return 0;
3505 
3506 	rc = bnxt_query_force_speeds(bp, &fw_advertising);
3507 	if (rc)
3508 		return rc;
3509 
3510 	fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
3511 	if (BNXT_LINK_IS_UP(bp))
3512 		fw_speed = bp->link_info.link_speed;
3513 	else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
3514 		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
3515 	else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
3516 		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
3517 	else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
3518 		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
3519 	else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
3520 		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
3521 
3522 	req->force_link_speed = cpu_to_le16(fw_speed);
3523 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
3524 				  PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3525 	rc = hwrm_req_send(bp, req);
3526 	req->flags = 0;
3527 	req->force_link_speed = cpu_to_le16(0);
3528 	return rc;
3529 }
3530 
3531 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
3532 {
3533 	struct hwrm_port_phy_cfg_input *req;
3534 	int rc;
3535 
3536 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
3537 	if (rc)
3538 		return rc;
3539 
3540 	/* prevent bnxt_disable_an_for_lpbk() from consuming the request */
3541 	hwrm_req_hold(bp, req);
3542 
3543 	if (enable) {
3544 		bnxt_disable_an_for_lpbk(bp, req);
3545 		if (ext)
3546 			req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
3547 		else
3548 			req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
3549 	} else {
3550 		req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
3551 	}
3552 	req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
3553 	rc = hwrm_req_send(bp, req);
3554 	hwrm_req_drop(bp, req);
3555 	return rc;
3556 }
3557 
3558 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3559 			    u32 raw_cons, int pkt_size)
3560 {
3561 	struct bnxt_napi *bnapi = cpr->bnapi;
3562 	struct bnxt_rx_ring_info *rxr;
3563 	struct bnxt_sw_rx_bd *rx_buf;
3564 	struct rx_cmp *rxcmp;
3565 	u16 cp_cons, cons;
3566 	u8 *data;
3567 	u32 len;
3568 	int i;
3569 
3570 	rxr = bnapi->rx_ring;
3571 	cp_cons = RING_CMP(raw_cons);
3572 	rxcmp = (struct rx_cmp *)
3573 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3574 	cons = rxcmp->rx_cmp_opaque;
3575 	rx_buf = &rxr->rx_buf_ring[cons];
3576 	data = rx_buf->data_ptr;
3577 	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
3578 	if (len != pkt_size)
3579 		return -EIO;
3580 	i = ETH_ALEN;
3581 	if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
3582 		return -EIO;
3583 	i += ETH_ALEN;
3584 	for (  ; i < pkt_size; i++) {
3585 		if (data[i] != (u8)(i & 0xff))
3586 			return -EIO;
3587 	}
3588 	return 0;
3589 }
3590 
3591 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3592 			      int pkt_size)
3593 {
3594 	struct tx_cmp *txcmp;
3595 	int rc = -EIO;
3596 	u32 raw_cons;
3597 	u32 cons;
3598 	int i;
3599 
3600 	raw_cons = cpr->cp_raw_cons;
3601 	for (i = 0; i < 200; i++) {
3602 		cons = RING_CMP(raw_cons);
3603 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3604 
3605 		if (!TX_CMP_VALID(txcmp, raw_cons)) {
3606 			udelay(5);
3607 			continue;
3608 		}
3609 
3610 		/* The valid test of the entry must be done first before
3611 		 * reading any further.
3612 		 */
3613 		dma_rmb();
3614 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
3615 			rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
3616 			raw_cons = NEXT_RAW_CMP(raw_cons);
3617 			raw_cons = NEXT_RAW_CMP(raw_cons);
3618 			break;
3619 		}
3620 		raw_cons = NEXT_RAW_CMP(raw_cons);
3621 	}
3622 	cpr->cp_raw_cons = raw_cons;
3623 	return rc;
3624 }
3625 
3626 static int bnxt_run_loopback(struct bnxt *bp)
3627 {
3628 	struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
3629 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
3630 	struct bnxt_cp_ring_info *cpr;
3631 	int pkt_size, i = 0;
3632 	struct sk_buff *skb;
3633 	dma_addr_t map;
3634 	u8 *data;
3635 	int rc;
3636 
3637 	cpr = &rxr->bnapi->cp_ring;
3638 	if (bp->flags & BNXT_FLAG_CHIP_P5)
3639 		cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
3640 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
3641 	skb = netdev_alloc_skb(bp->dev, pkt_size);
3642 	if (!skb)
3643 		return -ENOMEM;
3644 	data = skb_put(skb, pkt_size);
3645 	ether_addr_copy(&data[i], bp->dev->dev_addr);
3646 	i += ETH_ALEN;
3647 	ether_addr_copy(&data[i], bp->dev->dev_addr);
3648 	i += ETH_ALEN;
3649 	for ( ; i < pkt_size; i++)
3650 		data[i] = (u8)(i & 0xff);
3651 
3652 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
3653 			     DMA_TO_DEVICE);
3654 	if (dma_mapping_error(&bp->pdev->dev, map)) {
3655 		dev_kfree_skb(skb);
3656 		return -EIO;
3657 	}
3658 	bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
3659 
3660 	/* Sync BD data before updating doorbell */
3661 	wmb();
3662 
3663 	bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
3664 	rc = bnxt_poll_loopback(bp, cpr, pkt_size);
3665 
3666 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
3667 	dev_kfree_skb(skb);
3668 	return rc;
3669 }
3670 
3671 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
3672 {
3673 	struct hwrm_selftest_exec_output *resp;
3674 	struct hwrm_selftest_exec_input *req;
3675 	int rc;
3676 
3677 	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
3678 	if (rc)
3679 		return rc;
3680 
3681 	hwrm_req_timeout(bp, req, bp->test_info->timeout);
3682 	req->flags = test_mask;
3683 
3684 	resp = hwrm_req_hold(bp, req);
3685 	rc = hwrm_req_send(bp, req);
3686 	*test_results = resp->test_success;
3687 	hwrm_req_drop(bp, req);
3688 	return rc;
3689 }
3690 
3691 #define BNXT_DRV_TESTS			4
3692 #define BNXT_MACLPBK_TEST_IDX		(bp->num_tests - BNXT_DRV_TESTS)
3693 #define BNXT_PHYLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 1)
3694 #define BNXT_EXTLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2)
3695 #define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 3)
3696 
3697 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
3698 			   u64 *buf)
3699 {
3700 	struct bnxt *bp = netdev_priv(dev);
3701 	bool do_ext_lpbk = false;
3702 	bool offline = false;
3703 	u8 test_results = 0;
3704 	u8 test_mask = 0;
3705 	int rc = 0, i;
3706 
3707 	if (!bp->num_tests || !BNXT_PF(bp))
3708 		return;
3709 	memset(buf, 0, sizeof(u64) * bp->num_tests);
3710 	if (!netif_running(dev)) {
3711 		etest->flags |= ETH_TEST_FL_FAILED;
3712 		return;
3713 	}
3714 
3715 	if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
3716 	    (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
3717 		do_ext_lpbk = true;
3718 
3719 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
3720 		if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
3721 			etest->flags |= ETH_TEST_FL_FAILED;
3722 			netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
3723 			return;
3724 		}
3725 		offline = true;
3726 	}
3727 
3728 	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3729 		u8 bit_val = 1 << i;
3730 
3731 		if (!(bp->test_info->offline_mask & bit_val))
3732 			test_mask |= bit_val;
3733 		else if (offline)
3734 			test_mask |= bit_val;
3735 	}
3736 	if (!offline) {
3737 		bnxt_run_fw_tests(bp, test_mask, &test_results);
3738 	} else {
3739 		bnxt_ulp_stop(bp);
3740 		rc = bnxt_close_nic(bp, true, false);
3741 		if (rc) {
3742 			etest->flags |= ETH_TEST_FL_FAILED;
3743 			bnxt_ulp_start(bp, rc);
3744 			return;
3745 		}
3746 		bnxt_run_fw_tests(bp, test_mask, &test_results);
3747 
3748 		buf[BNXT_MACLPBK_TEST_IDX] = 1;
3749 		bnxt_hwrm_mac_loopback(bp, true);
3750 		msleep(250);
3751 		rc = bnxt_half_open_nic(bp);
3752 		if (rc) {
3753 			bnxt_hwrm_mac_loopback(bp, false);
3754 			etest->flags |= ETH_TEST_FL_FAILED;
3755 			bnxt_ulp_start(bp, rc);
3756 			return;
3757 		}
3758 		if (bnxt_run_loopback(bp))
3759 			etest->flags |= ETH_TEST_FL_FAILED;
3760 		else
3761 			buf[BNXT_MACLPBK_TEST_IDX] = 0;
3762 
3763 		bnxt_hwrm_mac_loopback(bp, false);
3764 		bnxt_hwrm_phy_loopback(bp, true, false);
3765 		msleep(1000);
3766 		if (bnxt_run_loopback(bp)) {
3767 			buf[BNXT_PHYLPBK_TEST_IDX] = 1;
3768 			etest->flags |= ETH_TEST_FL_FAILED;
3769 		}
3770 		if (do_ext_lpbk) {
3771 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
3772 			bnxt_hwrm_phy_loopback(bp, true, true);
3773 			msleep(1000);
3774 			if (bnxt_run_loopback(bp)) {
3775 				buf[BNXT_EXTLPBK_TEST_IDX] = 1;
3776 				etest->flags |= ETH_TEST_FL_FAILED;
3777 			}
3778 		}
3779 		bnxt_hwrm_phy_loopback(bp, false, false);
3780 		bnxt_half_close_nic(bp);
3781 		rc = bnxt_open_nic(bp, true, true);
3782 		bnxt_ulp_start(bp, rc);
3783 	}
3784 	if (rc || bnxt_test_irq(bp)) {
3785 		buf[BNXT_IRQ_TEST_IDX] = 1;
3786 		etest->flags |= ETH_TEST_FL_FAILED;
3787 	}
3788 	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3789 		u8 bit_val = 1 << i;
3790 
3791 		if ((test_mask & bit_val) && !(test_results & bit_val)) {
3792 			buf[i] = 1;
3793 			etest->flags |= ETH_TEST_FL_FAILED;
3794 		}
3795 	}
3796 }
3797 
3798 static int bnxt_reset(struct net_device *dev, u32 *flags)
3799 {
3800 	struct bnxt *bp = netdev_priv(dev);
3801 	bool reload = false;
3802 	u32 req = *flags;
3803 
3804 	if (!req)
3805 		return -EINVAL;
3806 
3807 	if (!BNXT_PF(bp)) {
3808 		netdev_err(dev, "Reset is not supported from a VF\n");
3809 		return -EOPNOTSUPP;
3810 	}
3811 
3812 	if (pci_vfs_assigned(bp->pdev) &&
3813 	    !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
3814 		netdev_err(dev,
3815 			   "Reset not allowed when VFs are assigned to VMs\n");
3816 		return -EBUSY;
3817 	}
3818 
3819 	if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
3820 		/* This feature is not supported in older firmware versions */
3821 		if (bp->hwrm_spec_code >= 0x10803) {
3822 			if (!bnxt_firmware_reset_chip(dev)) {
3823 				netdev_info(dev, "Firmware reset request successful.\n");
3824 				if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
3825 					reload = true;
3826 				*flags &= ~BNXT_FW_RESET_CHIP;
3827 			}
3828 		} else if (req == BNXT_FW_RESET_CHIP) {
3829 			return -EOPNOTSUPP; /* only request, fail hard */
3830 		}
3831 	}
3832 
3833 	if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
3834 		/* This feature is not supported in older firmware versions */
3835 		if (bp->hwrm_spec_code >= 0x10803) {
3836 			if (!bnxt_firmware_reset_ap(dev)) {
3837 				netdev_info(dev, "Reset application processor successful.\n");
3838 				reload = true;
3839 				*flags &= ~BNXT_FW_RESET_AP;
3840 			}
3841 		} else if (req == BNXT_FW_RESET_AP) {
3842 			return -EOPNOTSUPP; /* only request, fail hard */
3843 		}
3844 	}
3845 
3846 	if (reload)
3847 		netdev_info(dev, "Reload driver to complete reset\n");
3848 
3849 	return 0;
3850 }
3851 
3852 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
3853 {
3854 	struct bnxt *bp = netdev_priv(dev);
3855 
3856 	if (dump->flag > BNXT_DUMP_CRASH) {
3857 		netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
3858 		return -EINVAL;
3859 	}
3860 
3861 	if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
3862 		netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
3863 		return -EOPNOTSUPP;
3864 	}
3865 
3866 	bp->dump_flag = dump->flag;
3867 	return 0;
3868 }
3869 
3870 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
3871 {
3872 	struct bnxt *bp = netdev_priv(dev);
3873 
3874 	if (bp->hwrm_spec_code < 0x10801)
3875 		return -EOPNOTSUPP;
3876 
3877 	dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
3878 			bp->ver_resp.hwrm_fw_min_8b << 16 |
3879 			bp->ver_resp.hwrm_fw_bld_8b << 8 |
3880 			bp->ver_resp.hwrm_fw_rsvd_8b;
3881 
3882 	dump->flag = bp->dump_flag;
3883 	dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
3884 	return 0;
3885 }
3886 
3887 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
3888 			      void *buf)
3889 {
3890 	struct bnxt *bp = netdev_priv(dev);
3891 
3892 	if (bp->hwrm_spec_code < 0x10801)
3893 		return -EOPNOTSUPP;
3894 
3895 	memset(buf, 0, dump->len);
3896 
3897 	dump->flag = bp->dump_flag;
3898 	return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
3899 }
3900 
3901 static int bnxt_get_ts_info(struct net_device *dev,
3902 			    struct ethtool_ts_info *info)
3903 {
3904 	struct bnxt *bp = netdev_priv(dev);
3905 	struct bnxt_ptp_cfg *ptp;
3906 
3907 	ptp = bp->ptp_cfg;
3908 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3909 				SOF_TIMESTAMPING_RX_SOFTWARE |
3910 				SOF_TIMESTAMPING_SOFTWARE;
3911 
3912 	info->phc_index = -1;
3913 	if (!ptp)
3914 		return 0;
3915 
3916 	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
3917 				 SOF_TIMESTAMPING_RX_HARDWARE |
3918 				 SOF_TIMESTAMPING_RAW_HARDWARE;
3919 	if (ptp->ptp_clock)
3920 		info->phc_index = ptp_clock_index(ptp->ptp_clock);
3921 
3922 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
3923 
3924 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
3925 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
3926 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
3927 
3928 	if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
3929 		info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
3930 	return 0;
3931 }
3932 
3933 void bnxt_ethtool_init(struct bnxt *bp)
3934 {
3935 	struct hwrm_selftest_qlist_output *resp;
3936 	struct hwrm_selftest_qlist_input *req;
3937 	struct bnxt_test_info *test_info;
3938 	struct net_device *dev = bp->dev;
3939 	int i, rc;
3940 
3941 	if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
3942 		bnxt_get_pkgver(dev);
3943 
3944 	bp->num_tests = 0;
3945 	if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
3946 		return;
3947 
3948 	test_info = bp->test_info;
3949 	if (!test_info) {
3950 		test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
3951 		if (!test_info)
3952 			return;
3953 		bp->test_info = test_info;
3954 	}
3955 
3956 	if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
3957 		return;
3958 
3959 	resp = hwrm_req_hold(bp, req);
3960 	rc = hwrm_req_send_silent(bp, req);
3961 	if (rc)
3962 		goto ethtool_init_exit;
3963 
3964 	bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
3965 	if (bp->num_tests > BNXT_MAX_TEST)
3966 		bp->num_tests = BNXT_MAX_TEST;
3967 
3968 	test_info->offline_mask = resp->offline_tests;
3969 	test_info->timeout = le16_to_cpu(resp->test_timeout);
3970 	if (!test_info->timeout)
3971 		test_info->timeout = HWRM_CMD_TIMEOUT;
3972 	for (i = 0; i < bp->num_tests; i++) {
3973 		char *str = test_info->string[i];
3974 		char *fw_str = resp->test_name[i];
3975 
3976 		if (i == BNXT_MACLPBK_TEST_IDX) {
3977 			strcpy(str, "Mac loopback test (offline)");
3978 		} else if (i == BNXT_PHYLPBK_TEST_IDX) {
3979 			strcpy(str, "Phy loopback test (offline)");
3980 		} else if (i == BNXT_EXTLPBK_TEST_IDX) {
3981 			strcpy(str, "Ext loopback test (offline)");
3982 		} else if (i == BNXT_IRQ_TEST_IDX) {
3983 			strcpy(str, "Interrupt_test (offline)");
3984 		} else {
3985 			snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
3986 				 fw_str, test_info->offline_mask & (1 << i) ?
3987 					"offline" : "online");
3988 		}
3989 	}
3990 
3991 ethtool_init_exit:
3992 	hwrm_req_drop(bp, req);
3993 }
3994 
3995 static void bnxt_get_eth_phy_stats(struct net_device *dev,
3996 				   struct ethtool_eth_phy_stats *phy_stats)
3997 {
3998 	struct bnxt *bp = netdev_priv(dev);
3999 	u64 *rx;
4000 
4001 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4002 		return;
4003 
4004 	rx = bp->rx_port_stats_ext.sw_stats;
4005 	phy_stats->SymbolErrorDuringCarrier =
4006 		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
4007 }
4008 
4009 static void bnxt_get_eth_mac_stats(struct net_device *dev,
4010 				   struct ethtool_eth_mac_stats *mac_stats)
4011 {
4012 	struct bnxt *bp = netdev_priv(dev);
4013 	u64 *rx, *tx;
4014 
4015 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4016 		return;
4017 
4018 	rx = bp->port_stats.sw_stats;
4019 	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4020 
4021 	mac_stats->FramesReceivedOK =
4022 		BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
4023 	mac_stats->FramesTransmittedOK =
4024 		BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
4025 	mac_stats->FrameCheckSequenceErrors =
4026 		BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
4027 	mac_stats->AlignmentErrors =
4028 		BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
4029 	mac_stats->OutOfRangeLengthField =
4030 		BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
4031 }
4032 
4033 static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
4034 				    struct ethtool_eth_ctrl_stats *ctrl_stats)
4035 {
4036 	struct bnxt *bp = netdev_priv(dev);
4037 	u64 *rx;
4038 
4039 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4040 		return;
4041 
4042 	rx = bp->port_stats.sw_stats;
4043 	ctrl_stats->MACControlFramesReceived =
4044 		BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
4045 }
4046 
4047 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
4048 	{    0,    64 },
4049 	{   65,   127 },
4050 	{  128,   255 },
4051 	{  256,   511 },
4052 	{  512,  1023 },
4053 	{ 1024,  1518 },
4054 	{ 1519,  2047 },
4055 	{ 2048,  4095 },
4056 	{ 4096,  9216 },
4057 	{ 9217, 16383 },
4058 	{}
4059 };
4060 
4061 static void bnxt_get_rmon_stats(struct net_device *dev,
4062 				struct ethtool_rmon_stats *rmon_stats,
4063 				const struct ethtool_rmon_hist_range **ranges)
4064 {
4065 	struct bnxt *bp = netdev_priv(dev);
4066 	u64 *rx, *tx;
4067 
4068 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4069 		return;
4070 
4071 	rx = bp->port_stats.sw_stats;
4072 	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4073 
4074 	rmon_stats->jabbers =
4075 		BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
4076 	rmon_stats->oversize_pkts =
4077 		BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
4078 	rmon_stats->undersize_pkts =
4079 		BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
4080 
4081 	rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
4082 	rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
4083 	rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
4084 	rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
4085 	rmon_stats->hist[4] =
4086 		BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
4087 	rmon_stats->hist[5] =
4088 		BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
4089 	rmon_stats->hist[6] =
4090 		BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
4091 	rmon_stats->hist[7] =
4092 		BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
4093 	rmon_stats->hist[8] =
4094 		BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
4095 	rmon_stats->hist[9] =
4096 		BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
4097 
4098 	rmon_stats->hist_tx[0] =
4099 		BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
4100 	rmon_stats->hist_tx[1] =
4101 		BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
4102 	rmon_stats->hist_tx[2] =
4103 		BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
4104 	rmon_stats->hist_tx[3] =
4105 		BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
4106 	rmon_stats->hist_tx[4] =
4107 		BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
4108 	rmon_stats->hist_tx[5] =
4109 		BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
4110 	rmon_stats->hist_tx[6] =
4111 		BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
4112 	rmon_stats->hist_tx[7] =
4113 		BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
4114 	rmon_stats->hist_tx[8] =
4115 		BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
4116 	rmon_stats->hist_tx[9] =
4117 		BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
4118 
4119 	*ranges = bnxt_rmon_ranges;
4120 }
4121 
4122 static void bnxt_get_link_ext_stats(struct net_device *dev,
4123 				    struct ethtool_link_ext_stats *stats)
4124 {
4125 	struct bnxt *bp = netdev_priv(dev);
4126 	u64 *rx;
4127 
4128 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4129 		return;
4130 
4131 	rx = bp->rx_port_stats_ext.sw_stats;
4132 	stats->link_down_events =
4133 		*(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
4134 }
4135 
4136 void bnxt_ethtool_free(struct bnxt *bp)
4137 {
4138 	kfree(bp->test_info);
4139 	bp->test_info = NULL;
4140 }
4141 
4142 const struct ethtool_ops bnxt_ethtool_ops = {
4143 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4144 				     ETHTOOL_COALESCE_MAX_FRAMES |
4145 				     ETHTOOL_COALESCE_USECS_IRQ |
4146 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
4147 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS |
4148 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
4149 				     ETHTOOL_COALESCE_USE_CQE,
4150 	.get_link_ksettings	= bnxt_get_link_ksettings,
4151 	.set_link_ksettings	= bnxt_set_link_ksettings,
4152 	.get_fec_stats		= bnxt_get_fec_stats,
4153 	.get_fecparam		= bnxt_get_fecparam,
4154 	.set_fecparam		= bnxt_set_fecparam,
4155 	.get_pause_stats	= bnxt_get_pause_stats,
4156 	.get_pauseparam		= bnxt_get_pauseparam,
4157 	.set_pauseparam		= bnxt_set_pauseparam,
4158 	.get_drvinfo		= bnxt_get_drvinfo,
4159 	.get_regs_len		= bnxt_get_regs_len,
4160 	.get_regs		= bnxt_get_regs,
4161 	.get_wol		= bnxt_get_wol,
4162 	.set_wol		= bnxt_set_wol,
4163 	.get_coalesce		= bnxt_get_coalesce,
4164 	.set_coalesce		= bnxt_set_coalesce,
4165 	.get_msglevel		= bnxt_get_msglevel,
4166 	.set_msglevel		= bnxt_set_msglevel,
4167 	.get_sset_count		= bnxt_get_sset_count,
4168 	.get_strings		= bnxt_get_strings,
4169 	.get_ethtool_stats	= bnxt_get_ethtool_stats,
4170 	.set_ringparam		= bnxt_set_ringparam,
4171 	.get_ringparam		= bnxt_get_ringparam,
4172 	.get_channels		= bnxt_get_channels,
4173 	.set_channels		= bnxt_set_channels,
4174 	.get_rxnfc		= bnxt_get_rxnfc,
4175 	.set_rxnfc		= bnxt_set_rxnfc,
4176 	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
4177 	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
4178 	.get_rxfh               = bnxt_get_rxfh,
4179 	.set_rxfh		= bnxt_set_rxfh,
4180 	.flash_device		= bnxt_flash_device,
4181 	.get_eeprom_len         = bnxt_get_eeprom_len,
4182 	.get_eeprom             = bnxt_get_eeprom,
4183 	.set_eeprom		= bnxt_set_eeprom,
4184 	.get_link		= bnxt_get_link,
4185 	.get_link_ext_stats	= bnxt_get_link_ext_stats,
4186 	.get_eee		= bnxt_get_eee,
4187 	.set_eee		= bnxt_set_eee,
4188 	.get_module_info	= bnxt_get_module_info,
4189 	.get_module_eeprom	= bnxt_get_module_eeprom,
4190 	.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
4191 	.nway_reset		= bnxt_nway_reset,
4192 	.set_phys_id		= bnxt_set_phys_id,
4193 	.self_test		= bnxt_self_test,
4194 	.get_ts_info		= bnxt_get_ts_info,
4195 	.reset			= bnxt_reset,
4196 	.set_dump		= bnxt_set_dump,
4197 	.get_dump_flag		= bnxt_get_dump_flag,
4198 	.get_dump_data		= bnxt_get_dump_data,
4199 	.get_eth_phy_stats	= bnxt_get_eth_phy_stats,
4200 	.get_eth_mac_stats	= bnxt_get_eth_mac_stats,
4201 	.get_eth_ctrl_stats	= bnxt_get_eth_ctrl_stats,
4202 	.get_rmon_stats		= bnxt_get_rmon_stats,
4203 };
4204