xref: /openbmc/linux/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c (revision c51d39010a1bccc9c1294e2d7c00005aefeb2b5c)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 
33 static int octnet_get_link_stats(struct net_device *netdev);
34 
35 struct oct_mdio_cmd_context {
36 	int octeon_id;
37 	wait_queue_head_t wc;
38 	int cond;
39 };
40 
41 struct oct_mdio_cmd_resp {
42 	u64 rh;
43 	struct oct_mdio_cmd resp;
44 	u64 status;
45 };
46 
47 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
48 
49 /* Octeon's interface mode of operation */
50 enum {
51 	INTERFACE_MODE_DISABLED,
52 	INTERFACE_MODE_RGMII,
53 	INTERFACE_MODE_GMII,
54 	INTERFACE_MODE_SPI,
55 	INTERFACE_MODE_PCIE,
56 	INTERFACE_MODE_XAUI,
57 	INTERFACE_MODE_SGMII,
58 	INTERFACE_MODE_PICMG,
59 	INTERFACE_MODE_NPI,
60 	INTERFACE_MODE_LOOP,
61 	INTERFACE_MODE_SRIO,
62 	INTERFACE_MODE_ILK,
63 	INTERFACE_MODE_RXAUI,
64 	INTERFACE_MODE_QSGMII,
65 	INTERFACE_MODE_AGL,
66 	INTERFACE_MODE_XLAUI,
67 	INTERFACE_MODE_XFI,
68 	INTERFACE_MODE_10G_KR,
69 	INTERFACE_MODE_40G_KR4,
70 	INTERFACE_MODE_MIXED,
71 };
72 
73 #define OCT_ETHTOOL_REGDUMP_LEN  4096
74 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
75 #define OCT_ETHTOOL_REGSVER  1
76 
77 /* statistics of PF */
78 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
79 	"rx_packets",
80 	"tx_packets",
81 	"rx_bytes",
82 	"tx_bytes",
83 	"rx_errors",	/*jabber_err+l2_err+frame_err */
84 	"tx_errors",	/*fw_err_pko+fw_err_link+fw_err_drop */
85 	"rx_dropped",   /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
86 			 *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
87 			 */
88 	"tx_dropped",
89 
90 	"tx_total_sent",
91 	"tx_total_fwd",
92 	"tx_err_pko",
93 	"tx_err_link",
94 	"tx_err_drop",
95 
96 	"tx_tso",
97 	"tx_tso_packets",
98 	"tx_tso_err",
99 	"tx_vxlan",
100 
101 	"mac_tx_total_pkts",
102 	"mac_tx_total_bytes",
103 	"mac_tx_mcast_pkts",
104 	"mac_tx_bcast_pkts",
105 	"mac_tx_ctl_packets",	/*oct->link_stats.fromhost.ctl_sent */
106 	"mac_tx_total_collisions",
107 	"mac_tx_one_collision",
108 	"mac_tx_multi_collison",
109 	"mac_tx_max_collision_fail",
110 	"mac_tx_max_deferal_fail",
111 	"mac_tx_fifo_err",
112 	"mac_tx_runts",
113 
114 	"rx_total_rcvd",
115 	"rx_total_fwd",
116 	"rx_jabber_err",
117 	"rx_l2_err",
118 	"rx_frame_err",
119 	"rx_err_pko",
120 	"rx_err_link",
121 	"rx_err_drop",
122 
123 	"rx_vxlan",
124 	"rx_vxlan_err",
125 
126 	"rx_lro_pkts",
127 	"rx_lro_bytes",
128 	"rx_total_lro",
129 
130 	"rx_lro_aborts",
131 	"rx_lro_aborts_port",
132 	"rx_lro_aborts_seq",
133 	"rx_lro_aborts_tsval",
134 	"rx_lro_aborts_timer",
135 	"rx_fwd_rate",
136 
137 	"mac_rx_total_rcvd",
138 	"mac_rx_bytes",
139 	"mac_rx_total_bcst",
140 	"mac_rx_total_mcst",
141 	"mac_rx_runts",
142 	"mac_rx_ctl_packets",
143 	"mac_rx_fifo_err",
144 	"mac_rx_dma_drop",
145 	"mac_rx_fcs_err",
146 
147 	"link_state_changes",
148 };
149 
150 /* statistics of host tx queue */
151 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
152 	"packets",		/*oct->instr_queue[iq_no]->stats.tx_done*/
153 	"bytes",		/*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
154 	"dropped",
155 	"iq_busy",
156 	"sgentry_sent",
157 
158 	"fw_instr_posted",
159 	"fw_instr_processed",
160 	"fw_instr_dropped",
161 	"fw_bytes_sent",
162 
163 	"tso",
164 	"vxlan",
165 	"txq_restart",
166 };
167 
168 /* statistics of host rx queue */
169 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
170 	"packets",		/*oct->droq[oq_no]->stats.rx_pkts_received */
171 	"bytes",		/*oct->droq[oq_no]->stats.rx_bytes_received */
172 	"dropped",		/*oct->droq[oq_no]->stats.rx_dropped+
173 				 *oct->droq[oq_no]->stats.dropped_nodispatch+
174 				 *oct->droq[oq_no]->stats.dropped_toomany+
175 				 *oct->droq[oq_no]->stats.dropped_nomem
176 				 */
177 	"dropped_nomem",
178 	"dropped_toomany",
179 	"fw_dropped",
180 	"fw_pkts_received",
181 	"fw_bytes_received",
182 	"fw_dropped_nodispatch",
183 
184 	"vxlan",
185 	"buffer_alloc_failure",
186 };
187 
188 /* LiquidIO driver private flags */
189 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
190 };
191 
192 #define OCTNIC_NCMD_AUTONEG_ON  0x1
193 #define OCTNIC_NCMD_PHY_ON      0x2
194 
195 static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
196 {
197 	struct lio *lio = GET_LIO(netdev);
198 	struct octeon_device *oct = lio->oct_dev;
199 	struct oct_link_info *linfo;
200 
201 	linfo = &lio->linfo;
202 
203 	if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
204 	    linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
205 	    linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
206 		ecmd->port = PORT_FIBRE;
207 		ecmd->supported =
208 			(SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
209 			 SUPPORTED_Pause);
210 		ecmd->advertising =
211 			(ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
212 		ecmd->transceiver = XCVR_EXTERNAL;
213 		ecmd->autoneg = AUTONEG_DISABLE;
214 
215 	} else {
216 		dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
217 			linfo->link.s.if_mode);
218 	}
219 
220 	if (linfo->link.s.link_up) {
221 		ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
222 		ecmd->duplex = linfo->link.s.duplex;
223 	} else {
224 		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
225 		ecmd->duplex = DUPLEX_UNKNOWN;
226 	}
227 
228 	return 0;
229 }
230 
231 static void
232 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
233 {
234 	struct lio *lio;
235 	struct octeon_device *oct;
236 
237 	lio = GET_LIO(netdev);
238 	oct = lio->oct_dev;
239 
240 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
241 	strcpy(drvinfo->driver, "liquidio");
242 	strcpy(drvinfo->version, LIQUIDIO_VERSION);
243 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
244 		ETHTOOL_FWVERS_LEN);
245 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
246 }
247 
248 static void
249 lio_ethtool_get_channels(struct net_device *dev,
250 			 struct ethtool_channels *channel)
251 {
252 	struct lio *lio = GET_LIO(dev);
253 	struct octeon_device *oct = lio->oct_dev;
254 	u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
255 
256 	if (OCTEON_CN6XXX(oct)) {
257 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
258 
259 		max_rx = CFG_GET_OQ_MAX_Q(conf6x);
260 		max_tx = CFG_GET_IQ_MAX_Q(conf6x);
261 		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
262 		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
263 	} else if (OCTEON_CN23XX_PF(oct)) {
264 		struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
265 
266 		max_rx = CFG_GET_OQ_MAX_Q(conf23);
267 		max_tx = CFG_GET_IQ_MAX_Q(conf23);
268 		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
269 		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
270 	}
271 
272 	channel->max_rx = max_rx;
273 	channel->max_tx = max_tx;
274 	channel->rx_count = rx_count;
275 	channel->tx_count = tx_count;
276 }
277 
278 static int lio_get_eeprom_len(struct net_device *netdev)
279 {
280 	u8 buf[128];
281 	struct lio *lio = GET_LIO(netdev);
282 	struct octeon_device *oct_dev = lio->oct_dev;
283 	struct octeon_board_info *board_info;
284 	int len;
285 
286 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
287 	len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
288 		      board_info->name, board_info->serial_number,
289 		      board_info->major, board_info->minor);
290 
291 	return len;
292 }
293 
294 static int
295 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
296 	       u8 *bytes)
297 {
298 	struct lio *lio = GET_LIO(netdev);
299 	struct octeon_device *oct_dev = lio->oct_dev;
300 	struct octeon_board_info *board_info;
301 
302 	if (eeprom->offset)
303 		return -EINVAL;
304 
305 	eeprom->magic = oct_dev->pci_dev->vendor;
306 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
307 	sprintf((char *)bytes,
308 		"boardname:%s serialnum:%s maj:%lld min:%lld\n",
309 		board_info->name, board_info->serial_number,
310 		board_info->major, board_info->minor);
311 
312 	return 0;
313 }
314 
315 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
316 {
317 	struct lio *lio = GET_LIO(netdev);
318 	struct octeon_device *oct = lio->oct_dev;
319 	struct octnic_ctrl_pkt nctrl;
320 	int ret = 0;
321 
322 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
323 
324 	nctrl.ncmd.u64 = 0;
325 	nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
326 	nctrl.ncmd.s.param1 = addr;
327 	nctrl.ncmd.s.param2 = val;
328 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
329 	nctrl.wait_time = 100;
330 	nctrl.netpndev = (u64)netdev;
331 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
332 
333 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
334 	if (ret < 0) {
335 		dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
336 		return -EINVAL;
337 	}
338 
339 	return 0;
340 }
341 
342 static int octnet_id_active(struct net_device *netdev, int val)
343 {
344 	struct lio *lio = GET_LIO(netdev);
345 	struct octeon_device *oct = lio->oct_dev;
346 	struct octnic_ctrl_pkt nctrl;
347 	int ret = 0;
348 
349 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
350 
351 	nctrl.ncmd.u64 = 0;
352 	nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
353 	nctrl.ncmd.s.param1 = val;
354 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
355 	nctrl.wait_time = 100;
356 	nctrl.netpndev = (u64)netdev;
357 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
358 
359 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
360 	if (ret < 0) {
361 		dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
362 		return -EINVAL;
363 	}
364 
365 	return 0;
366 }
367 
368 /* Callback for when mdio command response arrives
369  */
370 static void octnet_mdio_resp_callback(struct octeon_device *oct,
371 				      u32 status,
372 				      void *buf)
373 {
374 	struct oct_mdio_cmd_context *mdio_cmd_ctx;
375 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
376 
377 	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
378 
379 	oct = lio_get_device(mdio_cmd_ctx->octeon_id);
380 	if (status) {
381 		dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
382 			CVM_CAST64(status));
383 		WRITE_ONCE(mdio_cmd_ctx->cond, -1);
384 	} else {
385 		WRITE_ONCE(mdio_cmd_ctx->cond, 1);
386 	}
387 	wake_up_interruptible(&mdio_cmd_ctx->wc);
388 }
389 
390 /* This routine provides PHY access routines for
391  * mdio  clause45 .
392  */
393 static int
394 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
395 {
396 	struct octeon_device *oct_dev = lio->oct_dev;
397 	struct octeon_soft_command *sc;
398 	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
399 	struct oct_mdio_cmd_context *mdio_cmd_ctx;
400 	struct oct_mdio_cmd *mdio_cmd;
401 	int retval = 0;
402 
403 	sc = (struct octeon_soft_command *)
404 		octeon_alloc_soft_command(oct_dev,
405 					  sizeof(struct oct_mdio_cmd),
406 					  sizeof(struct oct_mdio_cmd_resp),
407 					  sizeof(struct oct_mdio_cmd_context));
408 
409 	if (!sc)
410 		return -ENOMEM;
411 
412 	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
413 	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
414 	mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
415 
416 	WRITE_ONCE(mdio_cmd_ctx->cond, 0);
417 	mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
418 	mdio_cmd->op = op;
419 	mdio_cmd->mdio_addr = loc;
420 	if (op)
421 		mdio_cmd->value1 = *value;
422 	octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
423 
424 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
425 
426 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
427 				    0, 0, 0);
428 
429 	sc->wait_time = 1000;
430 	sc->callback = octnet_mdio_resp_callback;
431 	sc->callback_arg = sc;
432 
433 	init_waitqueue_head(&mdio_cmd_ctx->wc);
434 
435 	retval = octeon_send_soft_command(oct_dev, sc);
436 
437 	if (retval == IQ_SEND_FAILED) {
438 		dev_err(&oct_dev->pci_dev->dev,
439 			"octnet_mdio45_access instruction failed status: %x\n",
440 			retval);
441 		retval = -EBUSY;
442 	} else {
443 		/* Sleep on a wait queue till the cond flag indicates that the
444 		 * response arrived
445 		 */
446 		sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
447 		retval = mdio_cmd_rsp->status;
448 		if (retval) {
449 			dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
450 			retval = -EBUSY;
451 		} else {
452 			octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
453 					    sizeof(struct oct_mdio_cmd) / 8);
454 
455 			if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
456 				if (!op)
457 					*value = mdio_cmd_rsp->resp.value1;
458 			} else {
459 				retval = -EINVAL;
460 			}
461 		}
462 	}
463 
464 	octeon_free_soft_command(oct_dev, sc);
465 
466 	return retval;
467 }
468 
469 static int lio_set_phys_id(struct net_device *netdev,
470 			   enum ethtool_phys_id_state state)
471 {
472 	struct lio *lio = GET_LIO(netdev);
473 	struct octeon_device *oct = lio->oct_dev;
474 	int value, ret;
475 
476 	switch (state) {
477 	case ETHTOOL_ID_ACTIVE:
478 		if (oct->chip_id == OCTEON_CN66XX) {
479 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
480 					   VITESSE_PHY_GPIO_DRIVEON);
481 			return 2;
482 
483 		} else if (oct->chip_id == OCTEON_CN68XX) {
484 			/* Save the current LED settings */
485 			ret = octnet_mdio45_access(lio, 0,
486 						   LIO68XX_LED_BEACON_ADDR,
487 						   &lio->phy_beacon_val);
488 			if (ret)
489 				return ret;
490 
491 			ret = octnet_mdio45_access(lio, 0,
492 						   LIO68XX_LED_CTRL_ADDR,
493 						   &lio->led_ctrl_val);
494 			if (ret)
495 				return ret;
496 
497 			/* Configure Beacon values */
498 			value = LIO68XX_LED_BEACON_CFGON;
499 			ret = octnet_mdio45_access(lio, 1,
500 						   LIO68XX_LED_BEACON_ADDR,
501 						   &value);
502 			if (ret)
503 				return ret;
504 
505 			value = LIO68XX_LED_CTRL_CFGON;
506 			ret = octnet_mdio45_access(lio, 1,
507 						   LIO68XX_LED_CTRL_ADDR,
508 						   &value);
509 			if (ret)
510 				return ret;
511 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
512 			octnet_id_active(netdev, LED_IDENTIFICATION_ON);
513 
514 			/* returns 0 since updates are asynchronous */
515 			return 0;
516 		} else {
517 			return -EINVAL;
518 		}
519 		break;
520 
521 	case ETHTOOL_ID_ON:
522 		if (oct->chip_id == OCTEON_CN66XX) {
523 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
524 					   VITESSE_PHY_GPIO_HIGH);
525 
526 		} else if (oct->chip_id == OCTEON_CN68XX) {
527 			return -EINVAL;
528 		} else {
529 			return -EINVAL;
530 		}
531 		break;
532 
533 	case ETHTOOL_ID_OFF:
534 		if (oct->chip_id == OCTEON_CN66XX)
535 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
536 					   VITESSE_PHY_GPIO_LOW);
537 		else if (oct->chip_id == OCTEON_CN68XX)
538 			return -EINVAL;
539 		else
540 			return -EINVAL;
541 
542 		break;
543 
544 	case ETHTOOL_ID_INACTIVE:
545 		if (oct->chip_id == OCTEON_CN66XX) {
546 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
547 					   VITESSE_PHY_GPIO_DRIVEOFF);
548 		} else if (oct->chip_id == OCTEON_CN68XX) {
549 			/* Restore LED settings */
550 			ret = octnet_mdio45_access(lio, 1,
551 						   LIO68XX_LED_CTRL_ADDR,
552 						   &lio->led_ctrl_val);
553 			if (ret)
554 				return ret;
555 
556 			ret = octnet_mdio45_access(lio, 1,
557 						   LIO68XX_LED_BEACON_ADDR,
558 						   &lio->phy_beacon_val);
559 			if (ret)
560 				return ret;
561 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
562 			octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
563 
564 			return 0;
565 		} else {
566 			return -EINVAL;
567 		}
568 		break;
569 
570 	default:
571 		return -EINVAL;
572 	}
573 
574 	return 0;
575 }
576 
577 static void
578 lio_ethtool_get_ringparam(struct net_device *netdev,
579 			  struct ethtool_ringparam *ering)
580 {
581 	struct lio *lio = GET_LIO(netdev);
582 	struct octeon_device *oct = lio->oct_dev;
583 	u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
584 	    rx_pending = 0;
585 
586 	if (OCTEON_CN6XXX(oct)) {
587 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
588 
589 		tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
590 		rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
591 		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
592 		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
593 	} else if (OCTEON_CN23XX_PF(oct)) {
594 		struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
595 
596 		tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
597 		rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
598 		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
599 		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
600 	}
601 
602 	if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
603 		ering->rx_pending = 0;
604 		ering->rx_max_pending = 0;
605 		ering->rx_mini_pending = 0;
606 		ering->rx_jumbo_pending = rx_pending;
607 		ering->rx_mini_max_pending = 0;
608 		ering->rx_jumbo_max_pending = rx_max_pending;
609 	} else {
610 		ering->rx_pending = rx_pending;
611 		ering->rx_max_pending = rx_max_pending;
612 		ering->rx_mini_pending = 0;
613 		ering->rx_jumbo_pending = 0;
614 		ering->rx_mini_max_pending = 0;
615 		ering->rx_jumbo_max_pending = 0;
616 	}
617 
618 	ering->tx_pending = tx_pending;
619 	ering->tx_max_pending = tx_max_pending;
620 }
621 
622 static u32 lio_get_msglevel(struct net_device *netdev)
623 {
624 	struct lio *lio = GET_LIO(netdev);
625 
626 	return lio->msg_enable;
627 }
628 
629 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
630 {
631 	struct lio *lio = GET_LIO(netdev);
632 
633 	if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
634 		if (msglvl & NETIF_MSG_HW)
635 			liquidio_set_feature(netdev,
636 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
637 		else
638 			liquidio_set_feature(netdev,
639 					     OCTNET_CMD_VERBOSE_DISABLE, 0);
640 	}
641 
642 	lio->msg_enable = msglvl;
643 }
644 
645 static void
646 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
647 {
648 	/* Notes: Not supporting any auto negotiation in these
649 	 * drivers. Just report pause frame support.
650 	 */
651 	struct lio *lio = GET_LIO(netdev);
652 	struct octeon_device *oct = lio->oct_dev;
653 
654 	pause->autoneg = 0;
655 
656 	pause->tx_pause = oct->tx_pause;
657 	pause->rx_pause = oct->rx_pause;
658 }
659 
660 static int
661 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
662 {
663 	/* Notes: Not supporting any auto negotiation in these
664 	 * drivers.
665 	 */
666 	struct lio *lio = GET_LIO(netdev);
667 	struct octeon_device *oct = lio->oct_dev;
668 	struct octnic_ctrl_pkt nctrl;
669 	struct oct_link_info *linfo = &lio->linfo;
670 
671 	int ret = 0;
672 
673 	if (oct->chip_id != OCTEON_CN23XX_PF_VID)
674 		return -EINVAL;
675 
676 	if (linfo->link.s.duplex == 0) {
677 		/*no flow control for half duplex*/
678 		if (pause->rx_pause || pause->tx_pause)
679 			return -EINVAL;
680 	}
681 
682 	/*do not support autoneg of link flow control*/
683 	if (pause->autoneg == AUTONEG_ENABLE)
684 		return -EINVAL;
685 
686 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
687 
688 	nctrl.ncmd.u64 = 0;
689 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
690 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
691 	nctrl.wait_time = 100;
692 	nctrl.netpndev = (u64)netdev;
693 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
694 
695 	if (pause->rx_pause) {
696 		/*enable rx pause*/
697 		nctrl.ncmd.s.param1 = 1;
698 	} else {
699 		/*disable rx pause*/
700 		nctrl.ncmd.s.param1 = 0;
701 	}
702 
703 	if (pause->tx_pause) {
704 		/*enable tx pause*/
705 		nctrl.ncmd.s.param2 = 1;
706 	} else {
707 		/*disable tx pause*/
708 		nctrl.ncmd.s.param2 = 0;
709 	}
710 
711 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
712 	if (ret < 0) {
713 		dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
714 		return -EINVAL;
715 	}
716 
717 	oct->rx_pause = pause->rx_pause;
718 	oct->tx_pause = pause->tx_pause;
719 
720 	return 0;
721 }
722 
723 static void
724 lio_get_ethtool_stats(struct net_device *netdev,
725 		      struct ethtool_stats *stats  __attribute__((unused)),
726 		      u64 *data)
727 {
728 	struct lio *lio = GET_LIO(netdev);
729 	struct octeon_device *oct_dev = lio->oct_dev;
730 	struct net_device_stats *netstats = &netdev->stats;
731 	int i = 0, j;
732 
733 	netdev->netdev_ops->ndo_get_stats(netdev);
734 	octnet_get_link_stats(netdev);
735 
736 	/*sum of oct->droq[oq_no]->stats->rx_pkts_received */
737 	data[i++] = CVM_CAST64(netstats->rx_packets);
738 	/*sum of oct->instr_queue[iq_no]->stats.tx_done */
739 	data[i++] = CVM_CAST64(netstats->tx_packets);
740 	/*sum of oct->droq[oq_no]->stats->rx_bytes_received */
741 	data[i++] = CVM_CAST64(netstats->rx_bytes);
742 	/*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
743 	data[i++] = CVM_CAST64(netstats->tx_bytes);
744 	data[i++] = CVM_CAST64(netstats->rx_errors);
745 	data[i++] = CVM_CAST64(netstats->tx_errors);
746 	/*sum of oct->droq[oq_no]->stats->rx_dropped +
747 	 *oct->droq[oq_no]->stats->dropped_nodispatch +
748 	 *oct->droq[oq_no]->stats->dropped_toomany +
749 	 *oct->droq[oq_no]->stats->dropped_nomem
750 	 */
751 	data[i++] = CVM_CAST64(netstats->rx_dropped);
752 	/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
753 	data[i++] = CVM_CAST64(netstats->tx_dropped);
754 
755 	/* firmware tx stats */
756 	/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
757 	 *fromhost.fw_total_sent
758 	 */
759 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
760 	/*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
761 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
762 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
763 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
764 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
765 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
766 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
767 	 *fw_err_drop
768 	 */
769 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
770 
771 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
772 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
773 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
774 	 *fw_tso_fwd
775 	 */
776 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
777 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
778 	 *fw_err_tso
779 	 */
780 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
781 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
782 	 *fw_tx_vxlan
783 	 */
784 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
785 
786 	/* mac tx statistics */
787 	/*CVMX_BGXX_CMRX_TX_STAT5 */
788 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
789 	/*CVMX_BGXX_CMRX_TX_STAT4 */
790 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
791 	/*CVMX_BGXX_CMRX_TX_STAT15 */
792 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
793 	/*CVMX_BGXX_CMRX_TX_STAT14 */
794 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
795 	/*CVMX_BGXX_CMRX_TX_STAT17 */
796 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
797 	/*CVMX_BGXX_CMRX_TX_STAT0 */
798 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
799 	/*CVMX_BGXX_CMRX_TX_STAT3 */
800 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
801 	/*CVMX_BGXX_CMRX_TX_STAT2 */
802 	data[i++] =
803 		CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
804 	/*CVMX_BGXX_CMRX_TX_STAT0 */
805 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
806 	/*CVMX_BGXX_CMRX_TX_STAT1 */
807 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
808 	/*CVMX_BGXX_CMRX_TX_STAT16 */
809 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
810 	/*CVMX_BGXX_CMRX_TX_STAT6 */
811 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
812 
813 	/* RX firmware stats */
814 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
815 	 *fw_total_rcvd
816 	 */
817 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
818 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
819 	 *fw_total_fwd
820 	 */
821 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
822 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
823 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
824 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
825 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
826 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
827 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
828 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
829 	 *fw_err_pko
830 	 */
831 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
832 	/*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
833 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
834 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
835 	 *fromwire.fw_err_drop
836 	 */
837 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
838 
839 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
840 	 *fromwire.fw_rx_vxlan
841 	 */
842 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
843 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
844 	 *fromwire.fw_rx_vxlan_err
845 	 */
846 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
847 
848 	/* LRO */
849 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
850 	 *fw_lro_pkts
851 	 */
852 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
853 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
854 	 *fw_lro_octs
855 	 */
856 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
857 	/*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
858 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
859 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
860 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
861 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
862 	 *fw_lro_aborts_port
863 	 */
864 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
865 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
866 	 *fw_lro_aborts_seq
867 	 */
868 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
869 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
870 	 *fw_lro_aborts_tsval
871 	 */
872 	data[i++] =
873 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
874 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
875 	 *fw_lro_aborts_timer
876 	 */
877 	/* intrmod: packet forward rate */
878 	data[i++] =
879 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
880 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
881 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
882 
883 	/* mac: link-level stats */
884 	/*CVMX_BGXX_CMRX_RX_STAT0 */
885 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
886 	/*CVMX_BGXX_CMRX_RX_STAT1 */
887 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
888 	/*CVMX_PKI_STATX_STAT5 */
889 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
890 	/*CVMX_PKI_STATX_STAT5 */
891 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
892 	/*wqe->word2.err_code or wqe->word2.err_level */
893 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
894 	/*CVMX_BGXX_CMRX_RX_STAT2 */
895 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
896 	/*CVMX_BGXX_CMRX_RX_STAT6 */
897 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
898 	/*CVMX_BGXX_CMRX_RX_STAT4 */
899 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
900 	/*wqe->word2.err_code or wqe->word2.err_level */
901 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
902 	/*lio->link_changes*/
903 	data[i++] = CVM_CAST64(lio->link_changes);
904 
905 	for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
906 		if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
907 			continue;
908 		/*packets to network port*/
909 		/*# of packets tx to network */
910 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
911 		/*# of bytes tx to network */
912 		data[i++] =
913 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
914 		/*# of packets dropped */
915 		data[i++] =
916 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
917 		/*# of tx fails due to queue full */
918 		data[i++] =
919 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
920 		/*XXX gather entries sent */
921 		data[i++] =
922 			CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
923 
924 		/*instruction to firmware: data and control */
925 		/*# of instructions to the queue */
926 		data[i++] =
927 			CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
928 		/*# of instructions processed */
929 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
930 				       stats.instr_processed);
931 		/*# of instructions could not be processed */
932 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
933 				       stats.instr_dropped);
934 		/*bytes sent through the queue */
935 		data[i++] =
936 			CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
937 
938 		/*tso request*/
939 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
940 		/*vxlan request*/
941 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
942 		/*txq restart*/
943 		data[i++] =
944 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
945 	}
946 
947 	/* RX */
948 	for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
949 		if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
950 			continue;
951 
952 		/*packets send to TCP/IP network stack */
953 		/*# of packets to network stack */
954 		data[i++] =
955 			CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
956 		/*# of bytes to network stack */
957 		data[i++] =
958 			CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
959 		/*# of packets dropped */
960 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
961 				       oct_dev->droq[j]->stats.dropped_toomany +
962 				       oct_dev->droq[j]->stats.rx_dropped);
963 		data[i++] =
964 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
965 		data[i++] =
966 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
967 		data[i++] =
968 			CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
969 
970 		/*control and data path*/
971 		data[i++] =
972 			CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
973 		data[i++] =
974 			CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
975 		data[i++] =
976 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
977 
978 		data[i++] =
979 			CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
980 		data[i++] =
981 			CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
982 	}
983 }
984 
985 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
986 {
987 	struct octeon_device *oct_dev = lio->oct_dev;
988 	int i;
989 
990 	switch (oct_dev->chip_id) {
991 	case OCTEON_CN23XX_PF_VID:
992 		for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
993 			sprintf(data, "%s", oct_priv_flags_strings[i]);
994 			data += ETH_GSTRING_LEN;
995 		}
996 		break;
997 	case OCTEON_CN68XX:
998 	case OCTEON_CN66XX:
999 		break;
1000 	default:
1001 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1002 		break;
1003 	}
1004 }
1005 
1006 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1007 {
1008 	struct lio *lio = GET_LIO(netdev);
1009 	struct octeon_device *oct_dev = lio->oct_dev;
1010 	int num_iq_stats, num_oq_stats, i, j;
1011 	int num_stats;
1012 
1013 	switch (stringset) {
1014 	case ETH_SS_STATS:
1015 		num_stats = ARRAY_SIZE(oct_stats_strings);
1016 		for (j = 0; j < num_stats; j++) {
1017 			sprintf(data, "%s", oct_stats_strings[j]);
1018 			data += ETH_GSTRING_LEN;
1019 		}
1020 
1021 		num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1022 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1023 			if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1024 				continue;
1025 			for (j = 0; j < num_iq_stats; j++) {
1026 				sprintf(data, "tx-%d-%s", i,
1027 					oct_iq_stats_strings[j]);
1028 				data += ETH_GSTRING_LEN;
1029 			}
1030 		}
1031 
1032 		num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1033 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1034 			if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1035 				continue;
1036 			for (j = 0; j < num_oq_stats; j++) {
1037 				sprintf(data, "rx-%d-%s", i,
1038 					oct_droq_stats_strings[j]);
1039 				data += ETH_GSTRING_LEN;
1040 			}
1041 		}
1042 		break;
1043 
1044 	case ETH_SS_PRIV_FLAGS:
1045 		lio_get_priv_flags_strings(lio, data);
1046 		break;
1047 	default:
1048 		netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1049 		break;
1050 	}
1051 }
1052 
1053 static int lio_get_priv_flags_ss_count(struct lio *lio)
1054 {
1055 	struct octeon_device *oct_dev = lio->oct_dev;
1056 
1057 	switch (oct_dev->chip_id) {
1058 	case OCTEON_CN23XX_PF_VID:
1059 		return ARRAY_SIZE(oct_priv_flags_strings);
1060 	case OCTEON_CN68XX:
1061 	case OCTEON_CN66XX:
1062 		return -EOPNOTSUPP;
1063 	default:
1064 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1065 		return -EOPNOTSUPP;
1066 	}
1067 }
1068 
1069 static int lio_get_sset_count(struct net_device *netdev, int sset)
1070 {
1071 	struct lio *lio = GET_LIO(netdev);
1072 	struct octeon_device *oct_dev = lio->oct_dev;
1073 
1074 	switch (sset) {
1075 	case ETH_SS_STATS:
1076 		return (ARRAY_SIZE(oct_stats_strings) +
1077 			ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1078 			ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1079 	case ETH_SS_PRIV_FLAGS:
1080 		return lio_get_priv_flags_ss_count(lio);
1081 	default:
1082 		return -EOPNOTSUPP;
1083 	}
1084 }
1085 
1086 static int lio_get_intr_coalesce(struct net_device *netdev,
1087 				 struct ethtool_coalesce *intr_coal)
1088 {
1089 	struct lio *lio = GET_LIO(netdev);
1090 	struct octeon_device *oct = lio->oct_dev;
1091 	struct octeon_instr_queue *iq;
1092 	struct oct_intrmod_cfg *intrmod_cfg;
1093 
1094 	intrmod_cfg = &oct->intrmod;
1095 
1096 	switch (oct->chip_id) {
1097 	case OCTEON_CN23XX_PF_VID:
1098 		if (!intrmod_cfg->rx_enable) {
1099 			intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
1100 			intr_coal->rx_max_coalesced_frames =
1101 				intrmod_cfg->rx_frames;
1102 		}
1103 		if (!intrmod_cfg->tx_enable)
1104 			intr_coal->tx_max_coalesced_frames =
1105 				intrmod_cfg->tx_frames;
1106 		break;
1107 	case OCTEON_CN68XX:
1108 	case OCTEON_CN66XX: {
1109 		struct octeon_cn6xxx *cn6xxx =
1110 			(struct octeon_cn6xxx *)oct->chip;
1111 
1112 		if (!intrmod_cfg->rx_enable) {
1113 			intr_coal->rx_coalesce_usecs =
1114 				CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
1115 			intr_coal->rx_max_coalesced_frames =
1116 				CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
1117 		}
1118 		iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
1119 		intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
1120 		break;
1121 	}
1122 	default:
1123 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1124 		return -EINVAL;
1125 	}
1126 	if (intrmod_cfg->rx_enable) {
1127 		intr_coal->use_adaptive_rx_coalesce =
1128 			intrmod_cfg->rx_enable;
1129 		intr_coal->rate_sample_interval =
1130 			intrmod_cfg->check_intrvl;
1131 		intr_coal->pkt_rate_high =
1132 			intrmod_cfg->maxpkt_ratethr;
1133 		intr_coal->pkt_rate_low =
1134 			intrmod_cfg->minpkt_ratethr;
1135 		intr_coal->rx_max_coalesced_frames_high =
1136 			intrmod_cfg->rx_maxcnt_trigger;
1137 		intr_coal->rx_coalesce_usecs_high =
1138 			intrmod_cfg->rx_maxtmr_trigger;
1139 		intr_coal->rx_coalesce_usecs_low =
1140 			intrmod_cfg->rx_mintmr_trigger;
1141 		intr_coal->rx_max_coalesced_frames_low =
1142 		    intrmod_cfg->rx_mincnt_trigger;
1143 	}
1144 	if (OCTEON_CN23XX_PF(oct) &&
1145 	    (intrmod_cfg->tx_enable)) {
1146 		intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
1147 		intr_coal->tx_max_coalesced_frames_high =
1148 		    intrmod_cfg->tx_maxcnt_trigger;
1149 		intr_coal->tx_max_coalesced_frames_low =
1150 		    intrmod_cfg->tx_mincnt_trigger;
1151 	}
1152 	return 0;
1153 }
1154 
1155 /* Callback function for intrmod */
1156 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1157 				    u32 status,
1158 				    void *ptr)
1159 {
1160 	struct oct_intrmod_cmd *cmd = ptr;
1161 	struct octeon_soft_command *sc = cmd->sc;
1162 
1163 	oct_dev = cmd->oct_dev;
1164 
1165 	if (status)
1166 		dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
1167 			CVM_CAST64(status));
1168 	else
1169 		dev_info(&oct_dev->pci_dev->dev,
1170 			 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
1171 			 oct_dev->intrmod.rx_enable);
1172 
1173 	octeon_free_soft_command(oct_dev, sc);
1174 }
1175 
1176 /*  Configure interrupt moderation parameters */
1177 static int octnet_set_intrmod_cfg(struct lio *lio,
1178 				  struct oct_intrmod_cfg *intr_cfg)
1179 {
1180 	struct octeon_soft_command *sc;
1181 	struct oct_intrmod_cmd *cmd;
1182 	struct oct_intrmod_cfg *cfg;
1183 	int retval;
1184 	struct octeon_device *oct_dev = lio->oct_dev;
1185 
1186 	/* Alloc soft command */
1187 	sc = (struct octeon_soft_command *)
1188 		octeon_alloc_soft_command(oct_dev,
1189 					  sizeof(struct oct_intrmod_cfg),
1190 					  0,
1191 					  sizeof(struct oct_intrmod_cmd));
1192 
1193 	if (!sc)
1194 		return -ENOMEM;
1195 
1196 	cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
1197 	cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1198 
1199 	memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1200 	octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1201 	cmd->sc = sc;
1202 	cmd->cfg = cfg;
1203 	cmd->oct_dev = oct_dev;
1204 
1205 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1206 
1207 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1208 				    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1209 
1210 	sc->callback = octnet_intrmod_callback;
1211 	sc->callback_arg = cmd;
1212 	sc->wait_time = 1000;
1213 
1214 	retval = octeon_send_soft_command(oct_dev, sc);
1215 	if (retval == IQ_SEND_FAILED) {
1216 		octeon_free_soft_command(oct_dev, sc);
1217 		return -EINVAL;
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 static void
1224 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1225 			  u32 status, void *ptr)
1226 {
1227 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1228 	struct oct_nic_stats_resp *resp =
1229 	    (struct oct_nic_stats_resp *)sc->virtrptr;
1230 	struct oct_nic_stats_ctrl *ctrl =
1231 	    (struct oct_nic_stats_ctrl *)sc->ctxptr;
1232 	struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1233 	struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1234 
1235 	struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1236 	struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1237 
1238 	if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1239 		octeon_swap_8B_data((u64 *)&resp->stats,
1240 				    (sizeof(struct oct_link_stats)) >> 3);
1241 
1242 		/* RX link-level stats */
1243 		rstats->total_rcvd = rsp_rstats->total_rcvd;
1244 		rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1245 		rstats->total_bcst = rsp_rstats->total_bcst;
1246 		rstats->total_mcst = rsp_rstats->total_mcst;
1247 		rstats->runts      = rsp_rstats->runts;
1248 		rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1249 		/* Accounts for over/under-run of buffers */
1250 		rstats->fifo_err  = rsp_rstats->fifo_err;
1251 		rstats->dmac_drop = rsp_rstats->dmac_drop;
1252 		rstats->fcs_err   = rsp_rstats->fcs_err;
1253 		rstats->jabber_err = rsp_rstats->jabber_err;
1254 		rstats->l2_err    = rsp_rstats->l2_err;
1255 		rstats->frame_err = rsp_rstats->frame_err;
1256 
1257 		/* RX firmware stats */
1258 		rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1259 		rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1260 		rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1261 		rstats->fw_err_link = rsp_rstats->fw_err_link;
1262 		rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1263 		rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1264 		rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1265 
1266 		/* Number of packets that are LROed      */
1267 		rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1268 		/* Number of octets that are LROed       */
1269 		rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1270 		/* Number of LRO packets formed          */
1271 		rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1272 		/* Number of times lRO of packet aborted */
1273 		rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1274 		rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1275 		rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1276 		rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1277 		rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1278 		/* intrmod: packet forward rate */
1279 		rstats->fwd_rate = rsp_rstats->fwd_rate;
1280 
1281 		/* TX link-level stats */
1282 		tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1283 		tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1284 		tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1285 		tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1286 		tstats->ctl_sent = rsp_tstats->ctl_sent;
1287 		/* Packets sent after one collision*/
1288 		tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1289 		/* Packets sent after multiple collision*/
1290 		tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1291 		/* Packets not sent due to max collisions */
1292 		tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1293 		/* Packets not sent due to max deferrals */
1294 		tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1295 		/* Accounts for over/under-run of buffers */
1296 		tstats->fifo_err = rsp_tstats->fifo_err;
1297 		tstats->runts = rsp_tstats->runts;
1298 		/* Total number of collisions detected */
1299 		tstats->total_collisions = rsp_tstats->total_collisions;
1300 
1301 		/* firmware stats */
1302 		tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1303 		tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1304 		tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1305 		tstats->fw_err_link = rsp_tstats->fw_err_link;
1306 		tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1307 		tstats->fw_tso = rsp_tstats->fw_tso;
1308 		tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1309 		tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1310 		tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1311 
1312 		resp->status = 1;
1313 	} else {
1314 		resp->status = -1;
1315 	}
1316 	complete(&ctrl->complete);
1317 }
1318 
1319 /*  Configure interrupt moderation parameters */
1320 static int octnet_get_link_stats(struct net_device *netdev)
1321 {
1322 	struct lio *lio = GET_LIO(netdev);
1323 	struct octeon_device *oct_dev = lio->oct_dev;
1324 
1325 	struct octeon_soft_command *sc;
1326 	struct oct_nic_stats_ctrl *ctrl;
1327 	struct oct_nic_stats_resp *resp;
1328 
1329 	int retval;
1330 
1331 	/* Alloc soft command */
1332 	sc = (struct octeon_soft_command *)
1333 		octeon_alloc_soft_command(oct_dev,
1334 					  0,
1335 					  sizeof(struct oct_nic_stats_resp),
1336 					  sizeof(struct octnic_ctrl_pkt));
1337 
1338 	if (!sc)
1339 		return -ENOMEM;
1340 
1341 	resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1342 	memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1343 
1344 	ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1345 	memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1346 	ctrl->netdev = netdev;
1347 	init_completion(&ctrl->complete);
1348 
1349 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1350 
1351 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1352 				    OPCODE_NIC_PORT_STATS, 0, 0, 0);
1353 
1354 	sc->callback = octnet_nic_stats_callback;
1355 	sc->callback_arg = sc;
1356 	sc->wait_time = 500;	/*in milli seconds*/
1357 
1358 	retval = octeon_send_soft_command(oct_dev, sc);
1359 	if (retval == IQ_SEND_FAILED) {
1360 		octeon_free_soft_command(oct_dev, sc);
1361 		return -EINVAL;
1362 	}
1363 
1364 	wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1365 
1366 	if (resp->status != 1) {
1367 		octeon_free_soft_command(oct_dev, sc);
1368 
1369 		return -EINVAL;
1370 	}
1371 
1372 	octeon_free_soft_command(oct_dev, sc);
1373 
1374 	return 0;
1375 }
1376 
1377 /* Enable/Disable auto interrupt Moderation */
1378 static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
1379 				 *intr_coal)
1380 {
1381 	int ret = 0;
1382 	struct octeon_device *oct = lio->oct_dev;
1383 	struct oct_intrmod_cfg *intrmod_cfg;
1384 
1385 	intrmod_cfg = &oct->intrmod;
1386 
1387 	if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
1388 		if (intr_coal->rate_sample_interval)
1389 			intrmod_cfg->check_intrvl =
1390 				intr_coal->rate_sample_interval;
1391 		else
1392 			intrmod_cfg->check_intrvl =
1393 				LIO_INTRMOD_CHECK_INTERVAL;
1394 
1395 		if (intr_coal->pkt_rate_high)
1396 			intrmod_cfg->maxpkt_ratethr =
1397 				intr_coal->pkt_rate_high;
1398 		else
1399 			intrmod_cfg->maxpkt_ratethr =
1400 				LIO_INTRMOD_MAXPKT_RATETHR;
1401 
1402 		if (intr_coal->pkt_rate_low)
1403 			intrmod_cfg->minpkt_ratethr =
1404 				intr_coal->pkt_rate_low;
1405 		else
1406 			intrmod_cfg->minpkt_ratethr =
1407 				LIO_INTRMOD_MINPKT_RATETHR;
1408 	}
1409 	if (oct->intrmod.rx_enable) {
1410 		if (intr_coal->rx_max_coalesced_frames_high)
1411 			intrmod_cfg->rx_maxcnt_trigger =
1412 				intr_coal->rx_max_coalesced_frames_high;
1413 		else
1414 			intrmod_cfg->rx_maxcnt_trigger =
1415 				LIO_INTRMOD_RXMAXCNT_TRIGGER;
1416 
1417 		if (intr_coal->rx_coalesce_usecs_high)
1418 			intrmod_cfg->rx_maxtmr_trigger =
1419 				intr_coal->rx_coalesce_usecs_high;
1420 		else
1421 			intrmod_cfg->rx_maxtmr_trigger =
1422 				LIO_INTRMOD_RXMAXTMR_TRIGGER;
1423 
1424 		if (intr_coal->rx_coalesce_usecs_low)
1425 			intrmod_cfg->rx_mintmr_trigger =
1426 				intr_coal->rx_coalesce_usecs_low;
1427 		else
1428 			intrmod_cfg->rx_mintmr_trigger =
1429 				LIO_INTRMOD_RXMINTMR_TRIGGER;
1430 
1431 		if (intr_coal->rx_max_coalesced_frames_low)
1432 			intrmod_cfg->rx_mincnt_trigger =
1433 				intr_coal->rx_max_coalesced_frames_low;
1434 		else
1435 			intrmod_cfg->rx_mincnt_trigger =
1436 				LIO_INTRMOD_RXMINCNT_TRIGGER;
1437 	}
1438 	if (oct->intrmod.tx_enable) {
1439 		if (intr_coal->tx_max_coalesced_frames_high)
1440 			intrmod_cfg->tx_maxcnt_trigger =
1441 				intr_coal->tx_max_coalesced_frames_high;
1442 		else
1443 			intrmod_cfg->tx_maxcnt_trigger =
1444 				LIO_INTRMOD_TXMAXCNT_TRIGGER;
1445 		if (intr_coal->tx_max_coalesced_frames_low)
1446 			intrmod_cfg->tx_mincnt_trigger =
1447 				intr_coal->tx_max_coalesced_frames_low;
1448 		else
1449 			intrmod_cfg->tx_mincnt_trigger =
1450 				LIO_INTRMOD_TXMINCNT_TRIGGER;
1451 	}
1452 
1453 	ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
1454 
1455 	return ret;
1456 }
1457 
1458 static int
1459 oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
1460 {
1461 	struct octeon_device *oct = lio->oct_dev;
1462 	u32 rx_max_coalesced_frames;
1463 
1464 	/* Config Cnt based interrupt values */
1465 	switch (oct->chip_id) {
1466 	case OCTEON_CN68XX:
1467 	case OCTEON_CN66XX: {
1468 		struct octeon_cn6xxx *cn6xxx =
1469 			(struct octeon_cn6xxx *)oct->chip;
1470 
1471 		if (!intr_coal->rx_max_coalesced_frames)
1472 			rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1473 		else
1474 			rx_max_coalesced_frames =
1475 				intr_coal->rx_max_coalesced_frames;
1476 		octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1477 				 rx_max_coalesced_frames);
1478 		CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1479 		break;
1480 	}
1481 	case OCTEON_CN23XX_PF_VID: {
1482 		int q_no;
1483 
1484 		if (!intr_coal->rx_max_coalesced_frames)
1485 			rx_max_coalesced_frames = oct->intrmod.rx_frames;
1486 		else
1487 			rx_max_coalesced_frames =
1488 			    intr_coal->rx_max_coalesced_frames;
1489 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1490 			q_no += oct->sriov_info.pf_srn;
1491 			octeon_write_csr64(
1492 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1493 			    (octeon_read_csr64(
1494 				 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1495 			     (0x3fffff00000000UL)) |
1496 				rx_max_coalesced_frames);
1497 			/*consider setting resend bit*/
1498 		}
1499 		oct->intrmod.rx_frames = rx_max_coalesced_frames;
1500 		break;
1501 	}
1502 	default:
1503 		return -EINVAL;
1504 	}
1505 	return 0;
1506 }
1507 
1508 static int oct_cfg_rx_intrtime(struct lio *lio,
1509 			       struct ethtool_coalesce *intr_coal)
1510 {
1511 	struct octeon_device *oct = lio->oct_dev;
1512 	u32 time_threshold, rx_coalesce_usecs;
1513 
1514 	/* Config Time based interrupt values */
1515 	switch (oct->chip_id) {
1516 	case OCTEON_CN68XX:
1517 	case OCTEON_CN66XX: {
1518 		struct octeon_cn6xxx *cn6xxx =
1519 			(struct octeon_cn6xxx *)oct->chip;
1520 		if (!intr_coal->rx_coalesce_usecs)
1521 			rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1522 		else
1523 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1524 
1525 		time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1526 							 rx_coalesce_usecs);
1527 		octeon_write_csr(oct,
1528 				 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1529 				 time_threshold);
1530 
1531 		CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1532 		break;
1533 	}
1534 	case OCTEON_CN23XX_PF_VID: {
1535 		u64 time_threshold;
1536 		int q_no;
1537 
1538 		if (!intr_coal->rx_coalesce_usecs)
1539 			rx_coalesce_usecs = oct->intrmod.rx_usecs;
1540 		else
1541 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1542 		time_threshold =
1543 		    cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1544 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1545 			q_no += oct->sriov_info.pf_srn;
1546 			octeon_write_csr64(oct,
1547 					   CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1548 					   (oct->intrmod.rx_frames |
1549 					    (time_threshold << 32)));
1550 			/*consider writing to resend bit here*/
1551 		}
1552 		oct->intrmod.rx_usecs = rx_coalesce_usecs;
1553 		break;
1554 	}
1555 	default:
1556 		return -EINVAL;
1557 	}
1558 
1559 	return 0;
1560 }
1561 
1562 static int
1563 oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1564 		   __attribute__((unused)))
1565 {
1566 	struct octeon_device *oct = lio->oct_dev;
1567 	u32 iq_intr_pkt;
1568 	void __iomem *inst_cnt_reg;
1569 	u64 val;
1570 
1571 	/* Config Cnt based interrupt values */
1572 	switch (oct->chip_id) {
1573 	case OCTEON_CN68XX:
1574 	case OCTEON_CN66XX:
1575 		break;
1576 	case OCTEON_CN23XX_PF_VID: {
1577 		int q_no;
1578 
1579 		if (!intr_coal->tx_max_coalesced_frames)
1580 			iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
1581 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
1582 		else
1583 			iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
1584 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
1585 		for (q_no = 0; q_no < oct->num_iqs; q_no++) {
1586 			inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
1587 			val = readq(inst_cnt_reg);
1588 			/*clear wmark and count.dont want to write count back*/
1589 			val = (val & 0xFFFF000000000000ULL) |
1590 			      ((u64)iq_intr_pkt
1591 			       << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
1592 			writeq(val, inst_cnt_reg);
1593 			/*consider setting resend bit*/
1594 		}
1595 		oct->intrmod.tx_frames = iq_intr_pkt;
1596 		break;
1597 	}
1598 	default:
1599 		return -EINVAL;
1600 	}
1601 	return 0;
1602 }
1603 
1604 static int lio_set_intr_coalesce(struct net_device *netdev,
1605 				 struct ethtool_coalesce *intr_coal)
1606 {
1607 	struct lio *lio = GET_LIO(netdev);
1608 	int ret;
1609 	struct octeon_device *oct = lio->oct_dev;
1610 	u32 j, q_no;
1611 	int db_max, db_min;
1612 
1613 	switch (oct->chip_id) {
1614 	case OCTEON_CN68XX:
1615 	case OCTEON_CN66XX:
1616 		db_min = CN6XXX_DB_MIN;
1617 		db_max = CN6XXX_DB_MAX;
1618 		if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1619 		    (intr_coal->tx_max_coalesced_frames <= db_max)) {
1620 			for (j = 0; j < lio->linfo.num_txpciq; j++) {
1621 				q_no = lio->linfo.txpciq[j].s.q_no;
1622 				oct->instr_queue[q_no]->fill_threshold =
1623 					intr_coal->tx_max_coalesced_frames;
1624 			}
1625 		} else {
1626 			dev_err(&oct->pci_dev->dev,
1627 				"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1628 				intr_coal->tx_max_coalesced_frames, db_min,
1629 				db_max);
1630 			return -EINVAL;
1631 		}
1632 		break;
1633 	case OCTEON_CN23XX_PF_VID:
1634 		break;
1635 	default:
1636 		return -EINVAL;
1637 	}
1638 
1639 	oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1640 	oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
1641 
1642 	ret = oct_cfg_adaptive_intr(lio, intr_coal);
1643 
1644 	if (!intr_coal->use_adaptive_rx_coalesce) {
1645 		ret = oct_cfg_rx_intrtime(lio, intr_coal);
1646 		if (ret)
1647 			goto ret_intrmod;
1648 
1649 		ret = oct_cfg_rx_intrcnt(lio, intr_coal);
1650 		if (ret)
1651 			goto ret_intrmod;
1652 	}
1653 	if (!intr_coal->use_adaptive_tx_coalesce) {
1654 		ret = oct_cfg_tx_intrcnt(lio, intr_coal);
1655 		if (ret)
1656 			goto ret_intrmod;
1657 	}
1658 
1659 	return 0;
1660 ret_intrmod:
1661 	return ret;
1662 }
1663 
1664 static int lio_get_ts_info(struct net_device *netdev,
1665 			   struct ethtool_ts_info *info)
1666 {
1667 	struct lio *lio = GET_LIO(netdev);
1668 
1669 	info->so_timestamping =
1670 #ifdef PTP_HARDWARE_TIMESTAMPING
1671 		SOF_TIMESTAMPING_TX_HARDWARE |
1672 		SOF_TIMESTAMPING_RX_HARDWARE |
1673 		SOF_TIMESTAMPING_RAW_HARDWARE |
1674 		SOF_TIMESTAMPING_TX_SOFTWARE |
1675 #endif
1676 		SOF_TIMESTAMPING_RX_SOFTWARE |
1677 		SOF_TIMESTAMPING_SOFTWARE;
1678 
1679 	if (lio->ptp_clock)
1680 		info->phc_index = ptp_clock_index(lio->ptp_clock);
1681 	else
1682 		info->phc_index = -1;
1683 
1684 #ifdef PTP_HARDWARE_TIMESTAMPING
1685 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1686 
1687 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1688 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1689 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1690 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1691 #endif
1692 
1693 	return 0;
1694 }
1695 
1696 static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
1697 {
1698 	struct lio *lio = GET_LIO(netdev);
1699 	struct octeon_device *oct = lio->oct_dev;
1700 	struct oct_link_info *linfo;
1701 	struct octnic_ctrl_pkt nctrl;
1702 	int ret = 0;
1703 
1704 	/* get the link info */
1705 	linfo = &lio->linfo;
1706 
1707 	if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
1708 		return -EINVAL;
1709 
1710 	if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
1711 						  ecmd->speed != SPEED_10) ||
1712 						 (ecmd->duplex != DUPLEX_HALF &&
1713 						  ecmd->duplex != DUPLEX_FULL)))
1714 		return -EINVAL;
1715 
1716 	/* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
1717 	 * as they operate at fixed Speed and Duplex settings
1718 	 */
1719 	if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
1720 	    linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
1721 	    linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
1722 		dev_info(&oct->pci_dev->dev,
1723 			 "Autonegotiation, duplex and speed settings cannot be modified.\n");
1724 		return -EINVAL;
1725 	}
1726 
1727 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1728 
1729 	nctrl.ncmd.u64 = 0;
1730 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
1731 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1732 	nctrl.wait_time = 1000;
1733 	nctrl.netpndev = (u64)netdev;
1734 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1735 
1736 	/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1737 	 * to SE core application using ncmd.s.more & ncmd.s.param
1738 	 */
1739 	if (ecmd->autoneg == AUTONEG_ENABLE) {
1740 		/* Autoneg ON */
1741 		nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
1742 				     OCTNIC_NCMD_AUTONEG_ON;
1743 		nctrl.ncmd.s.param1 = ecmd->advertising;
1744 	} else {
1745 		/* Autoneg OFF */
1746 		nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
1747 
1748 		nctrl.ncmd.s.param2 = ecmd->duplex;
1749 
1750 		nctrl.ncmd.s.param1 = ecmd->speed;
1751 	}
1752 
1753 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1754 	if (ret < 0) {
1755 		dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1756 		return -1;
1757 	}
1758 
1759 	return 0;
1760 }
1761 
1762 static int lio_nway_reset(struct net_device *netdev)
1763 {
1764 	if (netif_running(netdev)) {
1765 		struct ethtool_cmd ecmd;
1766 
1767 		memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1768 		ecmd.autoneg = 0;
1769 		ecmd.speed = 0;
1770 		ecmd.duplex = 0;
1771 		lio_set_settings(netdev, &ecmd);
1772 	}
1773 	return 0;
1774 }
1775 
1776 /* Return register dump len. */
1777 static int lio_get_regs_len(struct net_device *dev)
1778 {
1779 	struct lio *lio = GET_LIO(dev);
1780 	struct octeon_device *oct = lio->oct_dev;
1781 
1782 	switch (oct->chip_id) {
1783 	case OCTEON_CN23XX_PF_VID:
1784 		return OCT_ETHTOOL_REGDUMP_LEN_23XX;
1785 	default:
1786 		return OCT_ETHTOOL_REGDUMP_LEN;
1787 	}
1788 }
1789 
1790 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
1791 {
1792 	u32 reg;
1793 	u8 pf_num = oct->pf_num;
1794 	int len = 0;
1795 	int i;
1796 
1797 	/* PCI  Window Registers */
1798 
1799 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1800 
1801 	/*0x29030 or 0x29040*/
1802 	reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
1803 	len += sprintf(s + len,
1804 		       "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
1805 		       reg, oct->pcie_port, oct->pf_num,
1806 		       (u64)octeon_read_csr64(oct, reg));
1807 
1808 	/*0x27080 or 0x27090*/
1809 	reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
1810 	len +=
1811 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
1812 		    reg, oct->pcie_port, oct->pf_num,
1813 		    (u64)octeon_read_csr64(oct, reg));
1814 
1815 	/*0x27000 or 0x27010*/
1816 	reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
1817 	len +=
1818 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
1819 		    reg, oct->pcie_port, oct->pf_num,
1820 		    (u64)octeon_read_csr64(oct, reg));
1821 
1822 	/*0x29120*/
1823 	reg = 0x29120;
1824 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
1825 		       (u64)octeon_read_csr64(oct, reg));
1826 
1827 	/*0x27300*/
1828 	reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
1829 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
1830 	len += sprintf(
1831 	    s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
1832 	    oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
1833 
1834 	/*0x27200*/
1835 	reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
1836 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
1837 	len += sprintf(s + len,
1838 		       "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
1839 		       reg, oct->pcie_port, oct->pf_num,
1840 		       (u64)octeon_read_csr64(oct, reg));
1841 
1842 	/*29130*/
1843 	reg = CN23XX_SLI_PKT_CNT_INT;
1844 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
1845 		       (u64)octeon_read_csr64(oct, reg));
1846 
1847 	/*0x29140*/
1848 	reg = CN23XX_SLI_PKT_TIME_INT;
1849 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
1850 		       (u64)octeon_read_csr64(oct, reg));
1851 
1852 	/*0x29160*/
1853 	reg = 0x29160;
1854 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
1855 		       (u64)octeon_read_csr64(oct, reg));
1856 
1857 	/*0x29180*/
1858 	reg = CN23XX_SLI_OQ_WMARK;
1859 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
1860 		       reg, (u64)octeon_read_csr64(oct, reg));
1861 
1862 	/*0x291E0*/
1863 	reg = CN23XX_SLI_PKT_IOQ_RING_RST;
1864 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
1865 		       (u64)octeon_read_csr64(oct, reg));
1866 
1867 	/*0x29210*/
1868 	reg = CN23XX_SLI_GBL_CONTROL;
1869 	len += sprintf(s + len,
1870 		       "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
1871 		       (u64)octeon_read_csr64(oct, reg));
1872 
1873 	/*0x29220*/
1874 	reg = 0x29220;
1875 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
1876 		       reg, (u64)octeon_read_csr64(oct, reg));
1877 
1878 	/*PF only*/
1879 	if (pf_num == 0) {
1880 		/*0x29260*/
1881 		reg = CN23XX_SLI_OUT_BP_EN_W1S;
1882 		len += sprintf(s + len,
1883 			       "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
1884 			       reg, (u64)octeon_read_csr64(oct, reg));
1885 	} else if (pf_num == 1) {
1886 		/*0x29270*/
1887 		reg = CN23XX_SLI_OUT_BP_EN2_W1S;
1888 		len += sprintf(s + len,
1889 			       "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
1890 			       reg, (u64)octeon_read_csr64(oct, reg));
1891 	}
1892 
1893 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1894 		reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
1895 		len +=
1896 		    sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
1897 			    reg, i, (u64)octeon_read_csr64(oct, reg));
1898 	}
1899 
1900 	/*0x10040*/
1901 	for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1902 		reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
1903 		len += sprintf(s + len,
1904 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
1905 			       reg, i, (u64)octeon_read_csr64(oct, reg));
1906 	}
1907 
1908 	/*0x10080*/
1909 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1910 		reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
1911 		len += sprintf(s + len,
1912 			       "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
1913 			       reg, i, (u64)octeon_read_csr64(oct, reg));
1914 	}
1915 
1916 	/*0x10090*/
1917 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1918 		reg = CN23XX_SLI_OQ_SIZE(i);
1919 		len += sprintf(
1920 		    s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
1921 		    reg, i, (u64)octeon_read_csr64(oct, reg));
1922 	}
1923 
1924 	/*0x10050*/
1925 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1926 		reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
1927 		len += sprintf(
1928 			s + len,
1929 			"\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
1930 			reg, i, (u64)octeon_read_csr64(oct, reg));
1931 	}
1932 
1933 	/*0x10070*/
1934 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1935 		reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
1936 		len += sprintf(s + len,
1937 			       "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
1938 			       reg, i, (u64)octeon_read_csr64(oct, reg));
1939 	}
1940 
1941 	/*0x100a0*/
1942 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1943 		reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
1944 		len += sprintf(s + len,
1945 			       "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
1946 			       reg, i, (u64)octeon_read_csr64(oct, reg));
1947 	}
1948 
1949 	/*0x100b0*/
1950 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1951 		reg = CN23XX_SLI_OQ_PKTS_SENT(i);
1952 		len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
1953 			       reg, i, (u64)octeon_read_csr64(oct, reg));
1954 	}
1955 
1956 	/*0x100c0*/
1957 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
1958 		reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
1959 		len += sprintf(s + len,
1960 			       "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
1961 			       reg, i, (u64)octeon_read_csr64(oct, reg));
1962 
1963 		/*0x10000*/
1964 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1965 			reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
1966 			len += sprintf(
1967 				s + len,
1968 				"\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
1969 				reg, i, (u64)octeon_read_csr64(oct, reg));
1970 		}
1971 
1972 		/*0x10010*/
1973 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1974 			reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
1975 			len += sprintf(
1976 			    s + len,
1977 			    "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
1978 			    i, (u64)octeon_read_csr64(oct, reg));
1979 		}
1980 
1981 		/*0x10020*/
1982 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1983 			reg = CN23XX_SLI_IQ_DOORBELL(i);
1984 			len += sprintf(
1985 			    s + len,
1986 			    "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
1987 			    reg, i, (u64)octeon_read_csr64(oct, reg));
1988 		}
1989 
1990 		/*0x10030*/
1991 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
1992 			reg = CN23XX_SLI_IQ_SIZE(i);
1993 			len += sprintf(
1994 			    s + len,
1995 			    "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
1996 			    reg, i, (u64)octeon_read_csr64(oct, reg));
1997 		}
1998 
1999 		/*0x10040*/
2000 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2001 			reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2002 		len += sprintf(s + len,
2003 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2004 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2005 	}
2006 
2007 	return len;
2008 }
2009 
2010 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2011 {
2012 	u32 reg;
2013 	int i, len = 0;
2014 
2015 	/* PCI  Window Registers */
2016 
2017 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2018 	reg = CN6XXX_WIN_WR_ADDR_LO;
2019 	len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2020 		       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2021 	reg = CN6XXX_WIN_WR_ADDR_HI;
2022 	len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2023 		       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2024 	reg = CN6XXX_WIN_RD_ADDR_LO;
2025 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2026 		       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2027 	reg = CN6XXX_WIN_RD_ADDR_HI;
2028 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2029 		       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2030 	reg = CN6XXX_WIN_WR_DATA_LO;
2031 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2032 		       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2033 	reg = CN6XXX_WIN_WR_DATA_HI;
2034 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2035 		       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2036 	len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2037 		       CN6XXX_WIN_WR_MASK_REG,
2038 		       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2039 
2040 	/* PCI  Interrupt Register */
2041 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2042 		       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2043 						CN6XXX_SLI_INT_ENB64_PORT0));
2044 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2045 		       CN6XXX_SLI_INT_ENB64_PORT1,
2046 		       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2047 	len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2048 		       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2049 
2050 	/* PCI  Output queue registers */
2051 	for (i = 0; i < oct->num_oqs; i++) {
2052 		reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2053 		len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2054 			       reg, i, octeon_read_csr(oct, reg));
2055 		reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2056 		len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2057 			       reg, i, octeon_read_csr(oct, reg));
2058 	}
2059 	reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2060 	len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2061 		       reg, octeon_read_csr(oct, reg));
2062 	reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2063 	len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2064 		       reg, octeon_read_csr(oct, reg));
2065 
2066 	/* PCI  Input queue registers */
2067 	for (i = 0; i <= 3; i++) {
2068 		u32 reg;
2069 
2070 		reg = CN6XXX_SLI_IQ_DOORBELL(i);
2071 		len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2072 			       reg, i, octeon_read_csr(oct, reg));
2073 		reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2074 		len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2075 			       reg, i, octeon_read_csr(oct, reg));
2076 	}
2077 
2078 	/* PCI  DMA registers */
2079 
2080 	len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2081 		       CN6XXX_DMA_CNT(0),
2082 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2083 	reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2084 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2085 		       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2086 	reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2087 	len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2088 		       CN6XXX_DMA_TIME_INT_LEVEL(0),
2089 		       octeon_read_csr(oct, reg));
2090 
2091 	len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2092 		       CN6XXX_DMA_CNT(1),
2093 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2094 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2095 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2096 		       CN6XXX_DMA_PKT_INT_LEVEL(1),
2097 		       octeon_read_csr(oct, reg));
2098 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2099 	len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2100 		       CN6XXX_DMA_TIME_INT_LEVEL(1),
2101 		       octeon_read_csr(oct, reg));
2102 
2103 	/* PCI  Index registers */
2104 
2105 	len += sprintf(s + len, "\n");
2106 
2107 	for (i = 0; i < 16; i++) {
2108 		reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2109 		len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2110 			       CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2111 	}
2112 
2113 	return len;
2114 }
2115 
2116 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2117 {
2118 	u32 val;
2119 	int i, len = 0;
2120 
2121 	/* PCI CONFIG Registers */
2122 
2123 	len += sprintf(s + len,
2124 		       "\n\t Octeon Config space Registers\n\n");
2125 
2126 	for (i = 0; i <= 13; i++) {
2127 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2128 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2129 			       (i * 4), i, val);
2130 	}
2131 
2132 	for (i = 30; i <= 34; i++) {
2133 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2134 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2135 			       (i * 4), i, val);
2136 	}
2137 
2138 	return len;
2139 }
2140 
2141 /*  Return register dump user app.  */
2142 static void lio_get_regs(struct net_device *dev,
2143 			 struct ethtool_regs *regs, void *regbuf)
2144 {
2145 	struct lio *lio = GET_LIO(dev);
2146 	int len = 0;
2147 	struct octeon_device *oct = lio->oct_dev;
2148 
2149 	regs->version = OCT_ETHTOOL_REGSVER;
2150 
2151 	switch (oct->chip_id) {
2152 	case OCTEON_CN23XX_PF_VID:
2153 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2154 		len += cn23xx_read_csr_reg(regbuf + len, oct);
2155 		break;
2156 	case OCTEON_CN68XX:
2157 	case OCTEON_CN66XX:
2158 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
2159 		len += cn6xxx_read_csr_reg(regbuf + len, oct);
2160 		len += cn6xxx_read_config_reg(regbuf + len, oct);
2161 		break;
2162 	default:
2163 		dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
2164 			__func__, oct->chip_id);
2165 	}
2166 }
2167 
2168 static u32 lio_get_priv_flags(struct net_device *netdev)
2169 {
2170 	struct lio *lio = GET_LIO(netdev);
2171 
2172 	return lio->oct_dev->priv_flags;
2173 }
2174 
2175 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2176 {
2177 	struct lio *lio = GET_LIO(netdev);
2178 	bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
2179 
2180 	lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
2181 			  intr_by_tx_bytes);
2182 	return 0;
2183 }
2184 
2185 static const struct ethtool_ops lio_ethtool_ops = {
2186 	.get_settings		= lio_get_settings,
2187 	.get_link		= ethtool_op_get_link,
2188 	.get_drvinfo		= lio_get_drvinfo,
2189 	.get_ringparam		= lio_ethtool_get_ringparam,
2190 	.get_channels		= lio_ethtool_get_channels,
2191 	.set_phys_id		= lio_set_phys_id,
2192 	.get_eeprom_len		= lio_get_eeprom_len,
2193 	.get_eeprom		= lio_get_eeprom,
2194 	.get_strings		= lio_get_strings,
2195 	.get_ethtool_stats	= lio_get_ethtool_stats,
2196 	.get_pauseparam		= lio_get_pauseparam,
2197 	.set_pauseparam		= lio_set_pauseparam,
2198 	.get_regs_len		= lio_get_regs_len,
2199 	.get_regs		= lio_get_regs,
2200 	.get_msglevel		= lio_get_msglevel,
2201 	.set_msglevel		= lio_set_msglevel,
2202 	.get_sset_count		= lio_get_sset_count,
2203 	.nway_reset		= lio_nway_reset,
2204 	.set_settings		= lio_set_settings,
2205 	.get_coalesce		= lio_get_intr_coalesce,
2206 	.set_coalesce		= lio_set_intr_coalesce,
2207 	.get_priv_flags		= lio_get_priv_flags,
2208 	.set_priv_flags		= lio_set_priv_flags,
2209 	.get_ts_info		= lio_get_ts_info,
2210 };
2211 
2212 void liquidio_set_ethtool_ops(struct net_device *netdev)
2213 {
2214 	netdev->ethtool_ops = &lio_ethtool_ops;
2215 }
2216