1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 #include "cn23xx_vf_device.h"
33 
34 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
35 static int octnet_get_link_stats(struct net_device *netdev);
36 
37 struct oct_intrmod_context {
38 	int octeon_id;
39 	wait_queue_head_t wc;
40 	int cond;
41 	int status;
42 };
43 
44 struct oct_intrmod_resp {
45 	u64     rh;
46 	struct oct_intrmod_cfg intrmod;
47 	u64     status;
48 };
49 
50 struct oct_mdio_cmd_context {
51 	int octeon_id;
52 	wait_queue_head_t wc;
53 	int cond;
54 };
55 
56 struct oct_mdio_cmd_resp {
57 	u64 rh;
58 	struct oct_mdio_cmd resp;
59 	u64 status;
60 };
61 
62 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
63 
64 /* Octeon's interface mode of operation */
65 enum {
66 	INTERFACE_MODE_DISABLED,
67 	INTERFACE_MODE_RGMII,
68 	INTERFACE_MODE_GMII,
69 	INTERFACE_MODE_SPI,
70 	INTERFACE_MODE_PCIE,
71 	INTERFACE_MODE_XAUI,
72 	INTERFACE_MODE_SGMII,
73 	INTERFACE_MODE_PICMG,
74 	INTERFACE_MODE_NPI,
75 	INTERFACE_MODE_LOOP,
76 	INTERFACE_MODE_SRIO,
77 	INTERFACE_MODE_ILK,
78 	INTERFACE_MODE_RXAUI,
79 	INTERFACE_MODE_QSGMII,
80 	INTERFACE_MODE_AGL,
81 	INTERFACE_MODE_XLAUI,
82 	INTERFACE_MODE_XFI,
83 	INTERFACE_MODE_10G_KR,
84 	INTERFACE_MODE_40G_KR4,
85 	INTERFACE_MODE_MIXED,
86 };
87 
88 #define OCT_ETHTOOL_REGDUMP_LEN  4096
89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
90 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
91 #define OCT_ETHTOOL_REGSVER  1
92 
93 /* statistics of PF */
94 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
95 	"rx_packets",
96 	"tx_packets",
97 	"rx_bytes",
98 	"tx_bytes",
99 	"rx_errors",	/*jabber_err+l2_err+frame_err */
100 	"tx_errors",	/*fw_err_pko+fw_err_link+fw_err_drop */
101 	"rx_dropped",   /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
102 			 *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
103 			 */
104 	"tx_dropped",
105 
106 	"tx_total_sent",
107 	"tx_total_fwd",
108 	"tx_err_pko",
109 	"tx_err_pki",
110 	"tx_err_link",
111 	"tx_err_drop",
112 
113 	"tx_tso",
114 	"tx_tso_packets",
115 	"tx_tso_err",
116 	"tx_vxlan",
117 
118 	"mac_tx_total_pkts",
119 	"mac_tx_total_bytes",
120 	"mac_tx_mcast_pkts",
121 	"mac_tx_bcast_pkts",
122 	"mac_tx_ctl_packets",	/*oct->link_stats.fromhost.ctl_sent */
123 	"mac_tx_total_collisions",
124 	"mac_tx_one_collision",
125 	"mac_tx_multi_collison",
126 	"mac_tx_max_collision_fail",
127 	"mac_tx_max_deferal_fail",
128 	"mac_tx_fifo_err",
129 	"mac_tx_runts",
130 
131 	"rx_total_rcvd",
132 	"rx_total_fwd",
133 	"rx_jabber_err",
134 	"rx_l2_err",
135 	"rx_frame_err",
136 	"rx_err_pko",
137 	"rx_err_link",
138 	"rx_err_drop",
139 
140 	"rx_vxlan",
141 	"rx_vxlan_err",
142 
143 	"rx_lro_pkts",
144 	"rx_lro_bytes",
145 	"rx_total_lro",
146 
147 	"rx_lro_aborts",
148 	"rx_lro_aborts_port",
149 	"rx_lro_aborts_seq",
150 	"rx_lro_aborts_tsval",
151 	"rx_lro_aborts_timer",
152 	"rx_fwd_rate",
153 
154 	"mac_rx_total_rcvd",
155 	"mac_rx_bytes",
156 	"mac_rx_total_bcst",
157 	"mac_rx_total_mcst",
158 	"mac_rx_runts",
159 	"mac_rx_ctl_packets",
160 	"mac_rx_fifo_err",
161 	"mac_rx_dma_drop",
162 	"mac_rx_fcs_err",
163 
164 	"link_state_changes",
165 };
166 
167 /* statistics of VF */
168 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
169 	"rx_packets",
170 	"tx_packets",
171 	"rx_bytes",
172 	"tx_bytes",
173 	"rx_errors", /* jabber_err + l2_err+frame_err */
174 	"tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
175 	"rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
176 	"tx_dropped",
177 	"link_state_changes",
178 };
179 
180 /* statistics of host tx queue */
181 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
182 	"packets",		/*oct->instr_queue[iq_no]->stats.tx_done*/
183 	"bytes",		/*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
184 	"dropped",
185 	"iq_busy",
186 	"sgentry_sent",
187 
188 	"fw_instr_posted",
189 	"fw_instr_processed",
190 	"fw_instr_dropped",
191 	"fw_bytes_sent",
192 
193 	"tso",
194 	"vxlan",
195 	"txq_restart",
196 };
197 
198 /* statistics of host rx queue */
199 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
200 	"packets",		/*oct->droq[oq_no]->stats.rx_pkts_received */
201 	"bytes",		/*oct->droq[oq_no]->stats.rx_bytes_received */
202 	"dropped",		/*oct->droq[oq_no]->stats.rx_dropped+
203 				 *oct->droq[oq_no]->stats.dropped_nodispatch+
204 				 *oct->droq[oq_no]->stats.dropped_toomany+
205 				 *oct->droq[oq_no]->stats.dropped_nomem
206 				 */
207 	"dropped_nomem",
208 	"dropped_toomany",
209 	"fw_dropped",
210 	"fw_pkts_received",
211 	"fw_bytes_received",
212 	"fw_dropped_nodispatch",
213 
214 	"vxlan",
215 	"buffer_alloc_failure",
216 };
217 
218 /* LiquidIO driver private flags */
219 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
220 };
221 
222 #define OCTNIC_NCMD_AUTONEG_ON  0x1
223 #define OCTNIC_NCMD_PHY_ON      0x2
224 
225 static int lio_get_link_ksettings(struct net_device *netdev,
226 				  struct ethtool_link_ksettings *ecmd)
227 {
228 	struct lio *lio = GET_LIO(netdev);
229 	struct octeon_device *oct = lio->oct_dev;
230 	struct oct_link_info *linfo;
231 	u32 supported = 0, advertising = 0;
232 
233 	linfo = &lio->linfo;
234 
235 	switch (linfo->link.s.phy_type) {
236 	case LIO_PHY_PORT_TP:
237 		ecmd->base.port = PORT_TP;
238 		supported = (SUPPORTED_10000baseT_Full |
239 			     SUPPORTED_TP | SUPPORTED_Pause);
240 		advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
241 		ecmd->base.autoneg = AUTONEG_DISABLE;
242 		break;
243 
244 	case LIO_PHY_PORT_FIBRE:
245 		ecmd->base.port = PORT_FIBRE;
246 
247 		if (linfo->link.s.speed == SPEED_10000) {
248 			supported = SUPPORTED_10000baseT_Full;
249 			advertising = ADVERTISED_10000baseT_Full;
250 		}
251 
252 		supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
253 		advertising |= ADVERTISED_Pause;
254 		ecmd->base.autoneg = AUTONEG_DISABLE;
255 		break;
256 	}
257 
258 	if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
259 	    linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
260 	    linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
261 	    linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
262 		ethtool_convert_legacy_u32_to_link_mode(
263 			ecmd->link_modes.supported, supported);
264 		ethtool_convert_legacy_u32_to_link_mode(
265 			ecmd->link_modes.advertising, advertising);
266 	} else {
267 		dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
268 			linfo->link.s.if_mode);
269 	}
270 
271 	if (linfo->link.s.link_up) {
272 		ecmd->base.speed = linfo->link.s.speed;
273 		ecmd->base.duplex = linfo->link.s.duplex;
274 	} else {
275 		ecmd->base.speed = SPEED_UNKNOWN;
276 		ecmd->base.duplex = DUPLEX_UNKNOWN;
277 	}
278 
279 	return 0;
280 }
281 
282 static void
283 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
284 {
285 	struct lio *lio;
286 	struct octeon_device *oct;
287 
288 	lio = GET_LIO(netdev);
289 	oct = lio->oct_dev;
290 
291 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
292 	strcpy(drvinfo->driver, "liquidio");
293 	strcpy(drvinfo->version, LIQUIDIO_VERSION);
294 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
295 		ETHTOOL_FWVERS_LEN);
296 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
297 }
298 
299 static void
300 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
301 {
302 	struct octeon_device *oct;
303 	struct lio *lio;
304 
305 	lio = GET_LIO(netdev);
306 	oct = lio->oct_dev;
307 
308 	memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
309 	strcpy(drvinfo->driver, "liquidio_vf");
310 	strcpy(drvinfo->version, LIQUIDIO_VERSION);
311 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
312 		ETHTOOL_FWVERS_LEN);
313 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
314 }
315 
316 static int
317 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
318 {
319 	struct lio *lio = GET_LIO(netdev);
320 	struct octeon_device *oct = lio->oct_dev;
321 	struct octnic_ctrl_pkt nctrl;
322 	int ret = 0;
323 
324 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
325 
326 	nctrl.ncmd.u64 = 0;
327 	nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
328 	nctrl.ncmd.s.param1 = num_queues;
329 	nctrl.ncmd.s.param2 = num_queues;
330 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
331 	nctrl.wait_time = 100;
332 	nctrl.netpndev = (u64)netdev;
333 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
334 
335 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
336 	if (ret < 0) {
337 		dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
338 			ret);
339 		return -1;
340 	}
341 
342 	return 0;
343 }
344 
345 static void
346 lio_ethtool_get_channels(struct net_device *dev,
347 			 struct ethtool_channels *channel)
348 {
349 	struct lio *lio = GET_LIO(dev);
350 	struct octeon_device *oct = lio->oct_dev;
351 	u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
352 	u32 combined_count = 0, max_combined = 0;
353 
354 	if (OCTEON_CN6XXX(oct)) {
355 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
356 
357 		max_rx = CFG_GET_OQ_MAX_Q(conf6x);
358 		max_tx = CFG_GET_IQ_MAX_Q(conf6x);
359 		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
360 		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
361 	} else if (OCTEON_CN23XX_PF(oct)) {
362 		max_combined = lio->linfo.num_txpciq;
363 		combined_count = oct->num_iqs;
364 	} else if (OCTEON_CN23XX_VF(oct)) {
365 		u64 reg_val = 0ULL;
366 		u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
367 
368 		reg_val = octeon_read_csr64(oct, ctrl);
369 		reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
370 		max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
371 		combined_count = oct->num_iqs;
372 	}
373 
374 	channel->max_rx = max_rx;
375 	channel->max_tx = max_tx;
376 	channel->max_combined = max_combined;
377 	channel->rx_count = rx_count;
378 	channel->tx_count = tx_count;
379 	channel->combined_count = combined_count;
380 }
381 
382 static int
383 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
384 {
385 	struct msix_entry *msix_entries;
386 	int num_msix_irqs = 0;
387 	int i;
388 
389 	if (!oct->msix_on)
390 		return 0;
391 
392 	/* Disable the input and output queues now. No more packets will
393 	 * arrive from Octeon.
394 	 */
395 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
396 
397 	if (oct->msix_on) {
398 		if (OCTEON_CN23XX_PF(oct))
399 			num_msix_irqs = oct->num_msix_irqs - 1;
400 		else if (OCTEON_CN23XX_VF(oct))
401 			num_msix_irqs = oct->num_msix_irqs;
402 
403 		msix_entries = (struct msix_entry *)oct->msix_entries;
404 		for (i = 0; i < num_msix_irqs; i++) {
405 			if (oct->ioq_vector[i].vector) {
406 				/* clear the affinity_cpumask */
407 				irq_set_affinity_hint(msix_entries[i].vector,
408 						      NULL);
409 				free_irq(msix_entries[i].vector,
410 					 &oct->ioq_vector[i]);
411 				oct->ioq_vector[i].vector = 0;
412 			}
413 		}
414 
415 		/* non-iov vector's argument is oct struct */
416 		if (OCTEON_CN23XX_PF(oct))
417 			free_irq(msix_entries[i].vector, oct);
418 
419 		pci_disable_msix(oct->pci_dev);
420 		kfree(oct->msix_entries);
421 		oct->msix_entries = NULL;
422 	}
423 
424 	kfree(oct->irq_name_storage);
425 	oct->irq_name_storage = NULL;
426 	if (octeon_setup_interrupt(oct, num_ioqs)) {
427 		dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
428 		return 1;
429 	}
430 
431 	/* Enable Octeon device interrupts */
432 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
433 
434 	return 0;
435 }
436 
437 static int
438 lio_ethtool_set_channels(struct net_device *dev,
439 			 struct ethtool_channels *channel)
440 {
441 	u32 combined_count, max_combined;
442 	struct lio *lio = GET_LIO(dev);
443 	struct octeon_device *oct = lio->oct_dev;
444 	int stopped = 0;
445 
446 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
447 		dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
448 		return -EINVAL;
449 	}
450 
451 	if (!channel->combined_count || channel->other_count ||
452 	    channel->rx_count || channel->tx_count)
453 		return -EINVAL;
454 
455 	combined_count = channel->combined_count;
456 
457 	if (OCTEON_CN23XX_PF(oct)) {
458 		max_combined = channel->max_combined;
459 	} else if (OCTEON_CN23XX_VF(oct)) {
460 		u64 reg_val = 0ULL;
461 		u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
462 
463 		reg_val = octeon_read_csr64(oct, ctrl);
464 		reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
465 		max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
466 	} else {
467 		return -EINVAL;
468 	}
469 
470 	if (combined_count > max_combined || combined_count < 1)
471 		return -EINVAL;
472 
473 	if (combined_count == oct->num_iqs)
474 		return 0;
475 
476 	ifstate_set(lio, LIO_IFSTATE_RESETTING);
477 
478 	if (netif_running(dev)) {
479 		dev->netdev_ops->ndo_stop(dev);
480 		stopped = 1;
481 	}
482 
483 	if (lio_reset_queues(dev, combined_count))
484 		return -EINVAL;
485 
486 	lio_irq_reallocate_irqs(oct, combined_count);
487 	if (stopped)
488 		dev->netdev_ops->ndo_open(dev);
489 
490 	ifstate_reset(lio, LIO_IFSTATE_RESETTING);
491 
492 	return 0;
493 }
494 
495 static int lio_get_eeprom_len(struct net_device *netdev)
496 {
497 	u8 buf[192];
498 	struct lio *lio = GET_LIO(netdev);
499 	struct octeon_device *oct_dev = lio->oct_dev;
500 	struct octeon_board_info *board_info;
501 	int len;
502 
503 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
504 	len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
505 		      board_info->name, board_info->serial_number,
506 		      board_info->major, board_info->minor);
507 
508 	return len;
509 }
510 
511 static int
512 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
513 	       u8 *bytes)
514 {
515 	struct lio *lio = GET_LIO(netdev);
516 	struct octeon_device *oct_dev = lio->oct_dev;
517 	struct octeon_board_info *board_info;
518 
519 	if (eeprom->offset)
520 		return -EINVAL;
521 
522 	eeprom->magic = oct_dev->pci_dev->vendor;
523 	board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
524 	sprintf((char *)bytes,
525 		"boardname:%s serialnum:%s maj:%lld min:%lld\n",
526 		board_info->name, board_info->serial_number,
527 		board_info->major, board_info->minor);
528 
529 	return 0;
530 }
531 
532 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
533 {
534 	struct lio *lio = GET_LIO(netdev);
535 	struct octeon_device *oct = lio->oct_dev;
536 	struct octnic_ctrl_pkt nctrl;
537 	int ret = 0;
538 
539 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
540 
541 	nctrl.ncmd.u64 = 0;
542 	nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
543 	nctrl.ncmd.s.param1 = addr;
544 	nctrl.ncmd.s.param2 = val;
545 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
546 	nctrl.wait_time = 100;
547 	nctrl.netpndev = (u64)netdev;
548 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
549 
550 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
551 	if (ret < 0) {
552 		dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
553 		return -EINVAL;
554 	}
555 
556 	return 0;
557 }
558 
559 static int octnet_id_active(struct net_device *netdev, int val)
560 {
561 	struct lio *lio = GET_LIO(netdev);
562 	struct octeon_device *oct = lio->oct_dev;
563 	struct octnic_ctrl_pkt nctrl;
564 	int ret = 0;
565 
566 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
567 
568 	nctrl.ncmd.u64 = 0;
569 	nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
570 	nctrl.ncmd.s.param1 = val;
571 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
572 	nctrl.wait_time = 100;
573 	nctrl.netpndev = (u64)netdev;
574 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
575 
576 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
577 	if (ret < 0) {
578 		dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
579 		return -EINVAL;
580 	}
581 
582 	return 0;
583 }
584 
585 /* Callback for when mdio command response arrives
586  */
587 static void octnet_mdio_resp_callback(struct octeon_device *oct,
588 				      u32 status,
589 				      void *buf)
590 {
591 	struct oct_mdio_cmd_context *mdio_cmd_ctx;
592 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
593 
594 	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
595 
596 	oct = lio_get_device(mdio_cmd_ctx->octeon_id);
597 	if (status) {
598 		dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
599 			CVM_CAST64(status));
600 		WRITE_ONCE(mdio_cmd_ctx->cond, -1);
601 	} else {
602 		WRITE_ONCE(mdio_cmd_ctx->cond, 1);
603 	}
604 	wake_up_interruptible(&mdio_cmd_ctx->wc);
605 }
606 
607 /* This routine provides PHY access routines for
608  * mdio  clause45 .
609  */
610 static int
611 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
612 {
613 	struct octeon_device *oct_dev = lio->oct_dev;
614 	struct octeon_soft_command *sc;
615 	struct oct_mdio_cmd_resp *mdio_cmd_rsp;
616 	struct oct_mdio_cmd_context *mdio_cmd_ctx;
617 	struct oct_mdio_cmd *mdio_cmd;
618 	int retval = 0;
619 
620 	sc = (struct octeon_soft_command *)
621 		octeon_alloc_soft_command(oct_dev,
622 					  sizeof(struct oct_mdio_cmd),
623 					  sizeof(struct oct_mdio_cmd_resp),
624 					  sizeof(struct oct_mdio_cmd_context));
625 
626 	if (!sc)
627 		return -ENOMEM;
628 
629 	mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
630 	mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
631 	mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
632 
633 	WRITE_ONCE(mdio_cmd_ctx->cond, 0);
634 	mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
635 	mdio_cmd->op = op;
636 	mdio_cmd->mdio_addr = loc;
637 	if (op)
638 		mdio_cmd->value1 = *value;
639 	octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
640 
641 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
642 
643 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
644 				    0, 0, 0);
645 
646 	sc->wait_time = 1000;
647 	sc->callback = octnet_mdio_resp_callback;
648 	sc->callback_arg = sc;
649 
650 	init_waitqueue_head(&mdio_cmd_ctx->wc);
651 
652 	retval = octeon_send_soft_command(oct_dev, sc);
653 
654 	if (retval == IQ_SEND_FAILED) {
655 		dev_err(&oct_dev->pci_dev->dev,
656 			"octnet_mdio45_access instruction failed status: %x\n",
657 			retval);
658 		retval = -EBUSY;
659 	} else {
660 		/* Sleep on a wait queue till the cond flag indicates that the
661 		 * response arrived
662 		 */
663 		sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
664 		retval = mdio_cmd_rsp->status;
665 		if (retval) {
666 			dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
667 			retval = -EBUSY;
668 		} else {
669 			octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
670 					    sizeof(struct oct_mdio_cmd) / 8);
671 
672 			if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
673 				if (!op)
674 					*value = mdio_cmd_rsp->resp.value1;
675 			} else {
676 				retval = -EINVAL;
677 			}
678 		}
679 	}
680 
681 	octeon_free_soft_command(oct_dev, sc);
682 
683 	return retval;
684 }
685 
686 static int lio_set_phys_id(struct net_device *netdev,
687 			   enum ethtool_phys_id_state state)
688 {
689 	struct lio *lio = GET_LIO(netdev);
690 	struct octeon_device *oct = lio->oct_dev;
691 	int value, ret;
692 
693 	switch (state) {
694 	case ETHTOOL_ID_ACTIVE:
695 		if (oct->chip_id == OCTEON_CN66XX) {
696 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
697 					   VITESSE_PHY_GPIO_DRIVEON);
698 			return 2;
699 
700 		} else if (oct->chip_id == OCTEON_CN68XX) {
701 			/* Save the current LED settings */
702 			ret = octnet_mdio45_access(lio, 0,
703 						   LIO68XX_LED_BEACON_ADDR,
704 						   &lio->phy_beacon_val);
705 			if (ret)
706 				return ret;
707 
708 			ret = octnet_mdio45_access(lio, 0,
709 						   LIO68XX_LED_CTRL_ADDR,
710 						   &lio->led_ctrl_val);
711 			if (ret)
712 				return ret;
713 
714 			/* Configure Beacon values */
715 			value = LIO68XX_LED_BEACON_CFGON;
716 			ret = octnet_mdio45_access(lio, 1,
717 						   LIO68XX_LED_BEACON_ADDR,
718 						   &value);
719 			if (ret)
720 				return ret;
721 
722 			value = LIO68XX_LED_CTRL_CFGON;
723 			ret = octnet_mdio45_access(lio, 1,
724 						   LIO68XX_LED_CTRL_ADDR,
725 						   &value);
726 			if (ret)
727 				return ret;
728 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
729 			octnet_id_active(netdev, LED_IDENTIFICATION_ON);
730 
731 			/* returns 0 since updates are asynchronous */
732 			return 0;
733 		} else {
734 			return -EINVAL;
735 		}
736 		break;
737 
738 	case ETHTOOL_ID_ON:
739 		if (oct->chip_id == OCTEON_CN66XX)
740 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
741 					   VITESSE_PHY_GPIO_HIGH);
742 		else
743 			return -EINVAL;
744 
745 		break;
746 
747 	case ETHTOOL_ID_OFF:
748 		if (oct->chip_id == OCTEON_CN66XX)
749 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
750 					   VITESSE_PHY_GPIO_LOW);
751 		else
752 			return -EINVAL;
753 
754 		break;
755 
756 	case ETHTOOL_ID_INACTIVE:
757 		if (oct->chip_id == OCTEON_CN66XX) {
758 			octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
759 					   VITESSE_PHY_GPIO_DRIVEOFF);
760 		} else if (oct->chip_id == OCTEON_CN68XX) {
761 			/* Restore LED settings */
762 			ret = octnet_mdio45_access(lio, 1,
763 						   LIO68XX_LED_CTRL_ADDR,
764 						   &lio->led_ctrl_val);
765 			if (ret)
766 				return ret;
767 
768 			ret = octnet_mdio45_access(lio, 1,
769 						   LIO68XX_LED_BEACON_ADDR,
770 						   &lio->phy_beacon_val);
771 			if (ret)
772 				return ret;
773 		} else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
774 			octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
775 
776 			return 0;
777 		} else {
778 			return -EINVAL;
779 		}
780 		break;
781 
782 	default:
783 		return -EINVAL;
784 	}
785 
786 	return 0;
787 }
788 
789 static void
790 lio_ethtool_get_ringparam(struct net_device *netdev,
791 			  struct ethtool_ringparam *ering)
792 {
793 	struct lio *lio = GET_LIO(netdev);
794 	struct octeon_device *oct = lio->oct_dev;
795 	u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
796 	    rx_pending = 0;
797 
798 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
799 		return;
800 
801 	if (OCTEON_CN6XXX(oct)) {
802 		struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
803 
804 		tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
805 		rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
806 		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
807 		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
808 	} else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
809 		tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
810 		rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
811 		rx_pending = oct->droq[0]->max_count;
812 		tx_pending = oct->instr_queue[0]->max_count;
813 	}
814 
815 	ering->tx_pending = tx_pending;
816 	ering->tx_max_pending = tx_max_pending;
817 	ering->rx_pending = rx_pending;
818 	ering->rx_max_pending = rx_max_pending;
819 	ering->rx_mini_pending = 0;
820 	ering->rx_jumbo_pending = 0;
821 	ering->rx_mini_max_pending = 0;
822 	ering->rx_jumbo_max_pending = 0;
823 }
824 
825 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
826 {
827 	struct lio *lio = GET_LIO(netdev);
828 	struct octeon_device *oct = lio->oct_dev;
829 	struct napi_struct *napi, *n;
830 	int i, update = 0;
831 
832 	if (wait_for_pending_requests(oct))
833 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
834 
835 	if (lio_wait_for_instr_fetch(oct))
836 		dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
837 
838 	if (octeon_set_io_queues_off(oct)) {
839 		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
840 		return -1;
841 	}
842 
843 	/* Disable the input and output queues now. No more packets will
844 	 * arrive from Octeon.
845 	 */
846 	oct->fn_list.disable_io_queues(oct);
847 	/* Delete NAPI */
848 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
849 		netif_napi_del(napi);
850 
851 	if (num_qs != oct->num_iqs) {
852 		netif_set_real_num_rx_queues(netdev, num_qs);
853 		netif_set_real_num_tx_queues(netdev, num_qs);
854 		update = 1;
855 	}
856 
857 	for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
858 		if (!(oct->io_qmask.oq & BIT_ULL(i)))
859 			continue;
860 		octeon_delete_droq(oct, i);
861 	}
862 
863 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
864 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
865 			continue;
866 		octeon_delete_instr_queue(oct, i);
867 	}
868 
869 	if (oct->fn_list.setup_device_regs(oct)) {
870 		dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
871 		return -1;
872 	}
873 
874 	if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
875 		dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n");
876 		return -1;
877 	}
878 
879 	/* Enable the input and output queues for this Octeon device */
880 	if (oct->fn_list.enable_io_queues(oct)) {
881 		dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues");
882 		return -1;
883 	}
884 
885 	if (update && lio_send_queue_count_update(netdev, num_qs))
886 		return -1;
887 
888 	return 0;
889 }
890 
891 static int lio_ethtool_set_ringparam(struct net_device *netdev,
892 				     struct ethtool_ringparam *ering)
893 {
894 	u32 rx_count, tx_count, rx_count_old, tx_count_old;
895 	struct lio *lio = GET_LIO(netdev);
896 	struct octeon_device *oct = lio->oct_dev;
897 	int stopped = 0;
898 
899 	if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
900 		return -EINVAL;
901 
902 	if (ering->rx_mini_pending || ering->rx_jumbo_pending)
903 		return -EINVAL;
904 
905 	rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
906 			   CN23XX_MAX_OQ_DESCRIPTORS);
907 	tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
908 			   CN23XX_MAX_IQ_DESCRIPTORS);
909 
910 	rx_count_old = oct->droq[0]->max_count;
911 	tx_count_old = oct->instr_queue[0]->max_count;
912 
913 	if (rx_count == rx_count_old && tx_count == tx_count_old)
914 		return 0;
915 
916 	ifstate_set(lio, LIO_IFSTATE_RESETTING);
917 
918 	if (netif_running(netdev)) {
919 		netdev->netdev_ops->ndo_stop(netdev);
920 		stopped = 1;
921 	}
922 
923 	/* Change RX/TX DESCS  count */
924 	if (tx_count != tx_count_old)
925 		CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
926 					    tx_count);
927 	if (rx_count != rx_count_old)
928 		CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
929 					    rx_count);
930 
931 	if (lio_reset_queues(netdev, lio->linfo.num_txpciq))
932 		goto err_lio_reset_queues;
933 
934 	if (stopped)
935 		netdev->netdev_ops->ndo_open(netdev);
936 
937 	ifstate_reset(lio, LIO_IFSTATE_RESETTING);
938 
939 	return 0;
940 
941 err_lio_reset_queues:
942 	if (tx_count != tx_count_old)
943 		CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
944 					    tx_count_old);
945 	if (rx_count != rx_count_old)
946 		CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
947 					    rx_count_old);
948 	return -EINVAL;
949 }
950 
951 static u32 lio_get_msglevel(struct net_device *netdev)
952 {
953 	struct lio *lio = GET_LIO(netdev);
954 
955 	return lio->msg_enable;
956 }
957 
958 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
959 {
960 	struct lio *lio = GET_LIO(netdev);
961 
962 	if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
963 		if (msglvl & NETIF_MSG_HW)
964 			liquidio_set_feature(netdev,
965 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
966 		else
967 			liquidio_set_feature(netdev,
968 					     OCTNET_CMD_VERBOSE_DISABLE, 0);
969 	}
970 
971 	lio->msg_enable = msglvl;
972 }
973 
974 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
975 {
976 	struct lio *lio = GET_LIO(netdev);
977 
978 	lio->msg_enable = msglvl;
979 }
980 
981 static void
982 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
983 {
984 	/* Notes: Not supporting any auto negotiation in these
985 	 * drivers. Just report pause frame support.
986 	 */
987 	struct lio *lio = GET_LIO(netdev);
988 	struct octeon_device *oct = lio->oct_dev;
989 
990 	pause->autoneg = 0;
991 
992 	pause->tx_pause = oct->tx_pause;
993 	pause->rx_pause = oct->rx_pause;
994 }
995 
996 static int
997 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
998 {
999 	/* Notes: Not supporting any auto negotiation in these
1000 	 * drivers.
1001 	 */
1002 	struct lio *lio = GET_LIO(netdev);
1003 	struct octeon_device *oct = lio->oct_dev;
1004 	struct octnic_ctrl_pkt nctrl;
1005 	struct oct_link_info *linfo = &lio->linfo;
1006 
1007 	int ret = 0;
1008 
1009 	if (oct->chip_id != OCTEON_CN23XX_PF_VID)
1010 		return -EINVAL;
1011 
1012 	if (linfo->link.s.duplex == 0) {
1013 		/*no flow control for half duplex*/
1014 		if (pause->rx_pause || pause->tx_pause)
1015 			return -EINVAL;
1016 	}
1017 
1018 	/*do not support autoneg of link flow control*/
1019 	if (pause->autoneg == AUTONEG_ENABLE)
1020 		return -EINVAL;
1021 
1022 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1023 
1024 	nctrl.ncmd.u64 = 0;
1025 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
1026 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1027 	nctrl.wait_time = 100;
1028 	nctrl.netpndev = (u64)netdev;
1029 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1030 
1031 	if (pause->rx_pause) {
1032 		/*enable rx pause*/
1033 		nctrl.ncmd.s.param1 = 1;
1034 	} else {
1035 		/*disable rx pause*/
1036 		nctrl.ncmd.s.param1 = 0;
1037 	}
1038 
1039 	if (pause->tx_pause) {
1040 		/*enable tx pause*/
1041 		nctrl.ncmd.s.param2 = 1;
1042 	} else {
1043 		/*disable tx pause*/
1044 		nctrl.ncmd.s.param2 = 0;
1045 	}
1046 
1047 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1048 	if (ret < 0) {
1049 		dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
1050 		return -EINVAL;
1051 	}
1052 
1053 	oct->rx_pause = pause->rx_pause;
1054 	oct->tx_pause = pause->tx_pause;
1055 
1056 	return 0;
1057 }
1058 
1059 static void
1060 lio_get_ethtool_stats(struct net_device *netdev,
1061 		      struct ethtool_stats *stats  __attribute__((unused)),
1062 		      u64 *data)
1063 {
1064 	struct lio *lio = GET_LIO(netdev);
1065 	struct octeon_device *oct_dev = lio->oct_dev;
1066 	struct net_device_stats *netstats = &netdev->stats;
1067 	int i = 0, j;
1068 
1069 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1070 		return;
1071 
1072 	netdev->netdev_ops->ndo_get_stats(netdev);
1073 	octnet_get_link_stats(netdev);
1074 
1075 	/*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1076 	data[i++] = CVM_CAST64(netstats->rx_packets);
1077 	/*sum of oct->instr_queue[iq_no]->stats.tx_done */
1078 	data[i++] = CVM_CAST64(netstats->tx_packets);
1079 	/*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1080 	data[i++] = CVM_CAST64(netstats->rx_bytes);
1081 	/*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1082 	data[i++] = CVM_CAST64(netstats->tx_bytes);
1083 	data[i++] = CVM_CAST64(netstats->rx_errors);
1084 	data[i++] = CVM_CAST64(netstats->tx_errors);
1085 	/*sum of oct->droq[oq_no]->stats->rx_dropped +
1086 	 *oct->droq[oq_no]->stats->dropped_nodispatch +
1087 	 *oct->droq[oq_no]->stats->dropped_toomany +
1088 	 *oct->droq[oq_no]->stats->dropped_nomem
1089 	 */
1090 	data[i++] = CVM_CAST64(netstats->rx_dropped);
1091 	/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1092 	data[i++] = CVM_CAST64(netstats->tx_dropped);
1093 
1094 	/* firmware tx stats */
1095 	/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1096 	 *fromhost.fw_total_sent
1097 	 */
1098 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
1099 	/*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1100 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
1101 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1102 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
1103 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1104 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
1105 	/*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1106 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
1107 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1108 	 *fw_err_drop
1109 	 */
1110 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
1111 
1112 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1113 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
1114 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1115 	 *fw_tso_fwd
1116 	 */
1117 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
1118 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1119 	 *fw_err_tso
1120 	 */
1121 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
1122 	/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1123 	 *fw_tx_vxlan
1124 	 */
1125 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1126 
1127 	/* mac tx statistics */
1128 	/*CVMX_BGXX_CMRX_TX_STAT5 */
1129 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
1130 	/*CVMX_BGXX_CMRX_TX_STAT4 */
1131 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
1132 	/*CVMX_BGXX_CMRX_TX_STAT15 */
1133 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
1134 	/*CVMX_BGXX_CMRX_TX_STAT14 */
1135 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
1136 	/*CVMX_BGXX_CMRX_TX_STAT17 */
1137 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
1138 	/*CVMX_BGXX_CMRX_TX_STAT0 */
1139 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
1140 	/*CVMX_BGXX_CMRX_TX_STAT3 */
1141 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
1142 	/*CVMX_BGXX_CMRX_TX_STAT2 */
1143 	data[i++] =
1144 		CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
1145 	/*CVMX_BGXX_CMRX_TX_STAT0 */
1146 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
1147 	/*CVMX_BGXX_CMRX_TX_STAT1 */
1148 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
1149 	/*CVMX_BGXX_CMRX_TX_STAT16 */
1150 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
1151 	/*CVMX_BGXX_CMRX_TX_STAT6 */
1152 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
1153 
1154 	/* RX firmware stats */
1155 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1156 	 *fw_total_rcvd
1157 	 */
1158 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
1159 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1160 	 *fw_total_fwd
1161 	 */
1162 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1163 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1164 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1165 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1166 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
1167 	/*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1168 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
1169 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1170 	 *fw_err_pko
1171 	 */
1172 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
1173 	/*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1174 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
1175 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1176 	 *fromwire.fw_err_drop
1177 	 */
1178 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
1179 
1180 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1181 	 *fromwire.fw_rx_vxlan
1182 	 */
1183 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
1184 	/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1185 	 *fromwire.fw_rx_vxlan_err
1186 	 */
1187 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
1188 
1189 	/* LRO */
1190 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1191 	 *fw_lro_pkts
1192 	 */
1193 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
1194 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1195 	 *fw_lro_octs
1196 	 */
1197 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
1198 	/*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1199 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
1200 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1201 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
1202 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1203 	 *fw_lro_aborts_port
1204 	 */
1205 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
1206 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1207 	 *fw_lro_aborts_seq
1208 	 */
1209 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
1210 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1211 	 *fw_lro_aborts_tsval
1212 	 */
1213 	data[i++] =
1214 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
1215 	/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1216 	 *fw_lro_aborts_timer
1217 	 */
1218 	/* intrmod: packet forward rate */
1219 	data[i++] =
1220 		CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
1221 	/*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1222 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
1223 
1224 	/* mac: link-level stats */
1225 	/*CVMX_BGXX_CMRX_RX_STAT0 */
1226 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
1227 	/*CVMX_BGXX_CMRX_RX_STAT1 */
1228 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
1229 	/*CVMX_PKI_STATX_STAT5 */
1230 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
1231 	/*CVMX_PKI_STATX_STAT5 */
1232 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
1233 	/*wqe->word2.err_code or wqe->word2.err_level */
1234 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
1235 	/*CVMX_BGXX_CMRX_RX_STAT2 */
1236 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
1237 	/*CVMX_BGXX_CMRX_RX_STAT6 */
1238 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
1239 	/*CVMX_BGXX_CMRX_RX_STAT4 */
1240 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
1241 	/*wqe->word2.err_code or wqe->word2.err_level */
1242 	data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
1243 	/*lio->link_changes*/
1244 	data[i++] = CVM_CAST64(lio->link_changes);
1245 
1246 	for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
1247 		if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
1248 			continue;
1249 		/*packets to network port*/
1250 		/*# of packets tx to network */
1251 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1252 		/*# of bytes tx to network */
1253 		data[i++] =
1254 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1255 		/*# of packets dropped */
1256 		data[i++] =
1257 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
1258 		/*# of tx fails due to queue full */
1259 		data[i++] =
1260 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
1261 		/*XXX gather entries sent */
1262 		data[i++] =
1263 			CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
1264 
1265 		/*instruction to firmware: data and control */
1266 		/*# of instructions to the queue */
1267 		data[i++] =
1268 			CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
1269 		/*# of instructions processed */
1270 		data[i++] = CVM_CAST64(
1271 				oct_dev->instr_queue[j]->stats.instr_processed);
1272 		/*# of instructions could not be processed */
1273 		data[i++] = CVM_CAST64(
1274 				oct_dev->instr_queue[j]->stats.instr_dropped);
1275 		/*bytes sent through the queue */
1276 		data[i++] =
1277 			CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1278 
1279 		/*tso request*/
1280 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1281 		/*vxlan request*/
1282 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1283 		/*txq restart*/
1284 		data[i++] =
1285 			CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1286 	}
1287 
1288 	/* RX */
1289 	for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1290 		if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1291 			continue;
1292 
1293 		/*packets send to TCP/IP network stack */
1294 		/*# of packets to network stack */
1295 		data[i++] =
1296 			CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1297 		/*# of bytes to network stack */
1298 		data[i++] =
1299 			CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1300 		/*# of packets dropped */
1301 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1302 				       oct_dev->droq[j]->stats.dropped_toomany +
1303 				       oct_dev->droq[j]->stats.rx_dropped);
1304 		data[i++] =
1305 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1306 		data[i++] =
1307 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1308 		data[i++] =
1309 			CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1310 
1311 		/*control and data path*/
1312 		data[i++] =
1313 			CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1314 		data[i++] =
1315 			CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1316 		data[i++] =
1317 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1318 
1319 		data[i++] =
1320 			CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1321 		data[i++] =
1322 			CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1323 	}
1324 }
1325 
1326 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1327 				     struct ethtool_stats *stats
1328 				     __attribute__((unused)),
1329 				     u64 *data)
1330 {
1331 	struct net_device_stats *netstats = &netdev->stats;
1332 	struct lio *lio = GET_LIO(netdev);
1333 	struct octeon_device *oct_dev = lio->oct_dev;
1334 	int i = 0, j, vj;
1335 
1336 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1337 		return;
1338 
1339 	netdev->netdev_ops->ndo_get_stats(netdev);
1340 	/* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1341 	data[i++] = CVM_CAST64(netstats->rx_packets);
1342 	/* sum of oct->instr_queue[iq_no]->stats.tx_done */
1343 	data[i++] = CVM_CAST64(netstats->tx_packets);
1344 	/* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1345 	data[i++] = CVM_CAST64(netstats->rx_bytes);
1346 	/* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1347 	data[i++] = CVM_CAST64(netstats->tx_bytes);
1348 	data[i++] = CVM_CAST64(netstats->rx_errors);
1349 	data[i++] = CVM_CAST64(netstats->tx_errors);
1350 	 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1351 	  * oct->droq[oq_no]->stats->dropped_nodispatch +
1352 	  * oct->droq[oq_no]->stats->dropped_toomany +
1353 	  * oct->droq[oq_no]->stats->dropped_nomem
1354 	  */
1355 	data[i++] = CVM_CAST64(netstats->rx_dropped);
1356 	/* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1357 	data[i++] = CVM_CAST64(netstats->tx_dropped);
1358 	/* lio->link_changes */
1359 	data[i++] = CVM_CAST64(lio->link_changes);
1360 
1361 	for (vj = 0; vj < oct_dev->num_iqs; vj++) {
1362 		j = lio->linfo.txpciq[vj].s.q_no;
1363 
1364 		/* packets to network port */
1365 		/* # of packets tx to network */
1366 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1367 		 /* # of bytes tx to network */
1368 		data[i++] = CVM_CAST64(
1369 				oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1370 		/* # of packets dropped */
1371 		data[i++] = CVM_CAST64(
1372 				oct_dev->instr_queue[j]->stats.tx_dropped);
1373 		/* # of tx fails due to queue full */
1374 		data[i++] = CVM_CAST64(
1375 				oct_dev->instr_queue[j]->stats.tx_iq_busy);
1376 		/* XXX gather entries sent */
1377 		data[i++] = CVM_CAST64(
1378 				oct_dev->instr_queue[j]->stats.sgentry_sent);
1379 
1380 		/* instruction to firmware: data and control */
1381 		/* # of instructions to the queue */
1382 		data[i++] = CVM_CAST64(
1383 				oct_dev->instr_queue[j]->stats.instr_posted);
1384 		/* # of instructions processed */
1385 		data[i++] =
1386 		    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1387 		/* # of instructions could not be processed */
1388 		data[i++] =
1389 		    CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1390 		/* bytes sent through the queue */
1391 		data[i++] = CVM_CAST64(
1392 				oct_dev->instr_queue[j]->stats.bytes_sent);
1393 		/* tso request */
1394 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1395 		/* vxlan request */
1396 		data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1397 		/* txq restart */
1398 		data[i++] = CVM_CAST64(
1399 				oct_dev->instr_queue[j]->stats.tx_restart);
1400 	}
1401 
1402 	/* RX */
1403 	for (vj = 0; vj < oct_dev->num_oqs; vj++) {
1404 		j = lio->linfo.rxpciq[vj].s.q_no;
1405 
1406 		/* packets send to TCP/IP network stack */
1407 		/* # of packets to network stack */
1408 		data[i++] = CVM_CAST64(
1409 				oct_dev->droq[j]->stats.rx_pkts_received);
1410 		/* # of bytes to network stack */
1411 		data[i++] = CVM_CAST64(
1412 				oct_dev->droq[j]->stats.rx_bytes_received);
1413 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1414 				       oct_dev->droq[j]->stats.dropped_toomany +
1415 				       oct_dev->droq[j]->stats.rx_dropped);
1416 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1417 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1418 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1419 
1420 		/* control and data path */
1421 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1422 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1423 		data[i++] =
1424 			CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1425 
1426 		data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1427 		data[i++] =
1428 		    CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1429 	}
1430 }
1431 
1432 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1433 {
1434 	struct octeon_device *oct_dev = lio->oct_dev;
1435 	int i;
1436 
1437 	switch (oct_dev->chip_id) {
1438 	case OCTEON_CN23XX_PF_VID:
1439 	case OCTEON_CN23XX_VF_VID:
1440 		for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1441 			sprintf(data, "%s", oct_priv_flags_strings[i]);
1442 			data += ETH_GSTRING_LEN;
1443 		}
1444 		break;
1445 	case OCTEON_CN68XX:
1446 	case OCTEON_CN66XX:
1447 		break;
1448 	default:
1449 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1450 		break;
1451 	}
1452 }
1453 
1454 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1455 {
1456 	struct lio *lio = GET_LIO(netdev);
1457 	struct octeon_device *oct_dev = lio->oct_dev;
1458 	int num_iq_stats, num_oq_stats, i, j;
1459 	int num_stats;
1460 
1461 	switch (stringset) {
1462 	case ETH_SS_STATS:
1463 		num_stats = ARRAY_SIZE(oct_stats_strings);
1464 		for (j = 0; j < num_stats; j++) {
1465 			sprintf(data, "%s", oct_stats_strings[j]);
1466 			data += ETH_GSTRING_LEN;
1467 		}
1468 
1469 		num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1470 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1471 			if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1472 				continue;
1473 			for (j = 0; j < num_iq_stats; j++) {
1474 				sprintf(data, "tx-%d-%s", i,
1475 					oct_iq_stats_strings[j]);
1476 				data += ETH_GSTRING_LEN;
1477 			}
1478 		}
1479 
1480 		num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1481 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1482 			if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1483 				continue;
1484 			for (j = 0; j < num_oq_stats; j++) {
1485 				sprintf(data, "rx-%d-%s", i,
1486 					oct_droq_stats_strings[j]);
1487 				data += ETH_GSTRING_LEN;
1488 			}
1489 		}
1490 		break;
1491 
1492 	case ETH_SS_PRIV_FLAGS:
1493 		lio_get_priv_flags_strings(lio, data);
1494 		break;
1495 	default:
1496 		netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1497 		break;
1498 	}
1499 }
1500 
1501 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1502 			       u8 *data)
1503 {
1504 	int num_iq_stats, num_oq_stats, i, j;
1505 	struct lio *lio = GET_LIO(netdev);
1506 	struct octeon_device *oct_dev = lio->oct_dev;
1507 	int num_stats;
1508 
1509 	switch (stringset) {
1510 	case ETH_SS_STATS:
1511 		num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1512 		for (j = 0; j < num_stats; j++) {
1513 			sprintf(data, "%s", oct_vf_stats_strings[j]);
1514 			data += ETH_GSTRING_LEN;
1515 		}
1516 
1517 		num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1518 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1519 			if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1520 				continue;
1521 			for (j = 0; j < num_iq_stats; j++) {
1522 				sprintf(data, "tx-%d-%s", i,
1523 					oct_iq_stats_strings[j]);
1524 				data += ETH_GSTRING_LEN;
1525 			}
1526 		}
1527 
1528 		num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1529 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1530 			if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1531 				continue;
1532 			for (j = 0; j < num_oq_stats; j++) {
1533 				sprintf(data, "rx-%d-%s", i,
1534 					oct_droq_stats_strings[j]);
1535 				data += ETH_GSTRING_LEN;
1536 			}
1537 		}
1538 		break;
1539 
1540 	case ETH_SS_PRIV_FLAGS:
1541 		lio_get_priv_flags_strings(lio, data);
1542 		break;
1543 	default:
1544 		netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1545 		break;
1546 	}
1547 }
1548 
1549 static int lio_get_priv_flags_ss_count(struct lio *lio)
1550 {
1551 	struct octeon_device *oct_dev = lio->oct_dev;
1552 
1553 	switch (oct_dev->chip_id) {
1554 	case OCTEON_CN23XX_PF_VID:
1555 	case OCTEON_CN23XX_VF_VID:
1556 		return ARRAY_SIZE(oct_priv_flags_strings);
1557 	case OCTEON_CN68XX:
1558 	case OCTEON_CN66XX:
1559 		return -EOPNOTSUPP;
1560 	default:
1561 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1562 		return -EOPNOTSUPP;
1563 	}
1564 }
1565 
1566 static int lio_get_sset_count(struct net_device *netdev, int sset)
1567 {
1568 	struct lio *lio = GET_LIO(netdev);
1569 	struct octeon_device *oct_dev = lio->oct_dev;
1570 
1571 	switch (sset) {
1572 	case ETH_SS_STATS:
1573 		return (ARRAY_SIZE(oct_stats_strings) +
1574 			ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1575 			ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1576 	case ETH_SS_PRIV_FLAGS:
1577 		return lio_get_priv_flags_ss_count(lio);
1578 	default:
1579 		return -EOPNOTSUPP;
1580 	}
1581 }
1582 
1583 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1584 {
1585 	struct lio *lio = GET_LIO(netdev);
1586 	struct octeon_device *oct_dev = lio->oct_dev;
1587 
1588 	switch (sset) {
1589 	case ETH_SS_STATS:
1590 		return (ARRAY_SIZE(oct_vf_stats_strings) +
1591 			ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1592 			ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1593 	case ETH_SS_PRIV_FLAGS:
1594 		return lio_get_priv_flags_ss_count(lio);
1595 	default:
1596 		return -EOPNOTSUPP;
1597 	}
1598 }
1599 
1600 /* Callback function for intrmod */
1601 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1602 				    u32 status,
1603 				    void *ptr)
1604 {
1605 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1606 	struct oct_intrmod_context *ctx;
1607 
1608 	ctx  = (struct oct_intrmod_context *)sc->ctxptr;
1609 
1610 	ctx->status = status;
1611 
1612 	WRITE_ONCE(ctx->cond, 1);
1613 
1614 	/* This barrier is required to be sure that the response has been
1615 	 * written fully before waking up the handler
1616 	 */
1617 	wmb();
1618 
1619 	wake_up_interruptible(&ctx->wc);
1620 }
1621 
1622 /*  get interrupt moderation parameters */
1623 static int octnet_get_intrmod_cfg(struct lio *lio,
1624 				  struct oct_intrmod_cfg *intr_cfg)
1625 {
1626 	struct octeon_soft_command *sc;
1627 	struct oct_intrmod_context *ctx;
1628 	struct oct_intrmod_resp *resp;
1629 	int retval;
1630 	struct octeon_device *oct_dev = lio->oct_dev;
1631 
1632 	/* Alloc soft command */
1633 	sc = (struct octeon_soft_command *)
1634 		octeon_alloc_soft_command(oct_dev,
1635 					  0,
1636 					  sizeof(struct oct_intrmod_resp),
1637 					  sizeof(struct oct_intrmod_context));
1638 
1639 	if (!sc)
1640 		return -ENOMEM;
1641 
1642 	resp = (struct oct_intrmod_resp *)sc->virtrptr;
1643 	memset(resp, 0, sizeof(struct oct_intrmod_resp));
1644 
1645 	ctx = (struct oct_intrmod_context *)sc->ctxptr;
1646 	memset(ctx, 0, sizeof(struct oct_intrmod_context));
1647 	WRITE_ONCE(ctx->cond, 0);
1648 	ctx->octeon_id = lio_get_device_id(oct_dev);
1649 	init_waitqueue_head(&ctx->wc);
1650 
1651 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1652 
1653 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1654 				    OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
1655 
1656 	sc->callback = octnet_intrmod_callback;
1657 	sc->callback_arg = sc;
1658 	sc->wait_time = 1000;
1659 
1660 	retval = octeon_send_soft_command(oct_dev, sc);
1661 	if (retval == IQ_SEND_FAILED) {
1662 		octeon_free_soft_command(oct_dev, sc);
1663 		return -EINVAL;
1664 	}
1665 
1666 	/* Sleep on a wait queue till the cond flag indicates that the
1667 	 * response arrived or timed-out.
1668 	 */
1669 	if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
1670 		dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
1671 		goto intrmod_info_wait_intr;
1672 	}
1673 
1674 	retval = ctx->status || resp->status;
1675 	if (retval) {
1676 		dev_err(&oct_dev->pci_dev->dev,
1677 			"Get interrupt moderation parameters failed\n");
1678 		goto intrmod_info_wait_fail;
1679 	}
1680 
1681 	octeon_swap_8B_data((u64 *)&resp->intrmod,
1682 			    (sizeof(struct oct_intrmod_cfg)) / 8);
1683 	memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
1684 	octeon_free_soft_command(oct_dev, sc);
1685 
1686 	return 0;
1687 
1688 intrmod_info_wait_fail:
1689 
1690 	octeon_free_soft_command(oct_dev, sc);
1691 
1692 intrmod_info_wait_intr:
1693 
1694 	return -ENODEV;
1695 }
1696 
1697 /*  Configure interrupt moderation parameters */
1698 static int octnet_set_intrmod_cfg(struct lio *lio,
1699 				  struct oct_intrmod_cfg *intr_cfg)
1700 {
1701 	struct octeon_soft_command *sc;
1702 	struct oct_intrmod_context *ctx;
1703 	struct oct_intrmod_cfg *cfg;
1704 	int retval;
1705 	struct octeon_device *oct_dev = lio->oct_dev;
1706 
1707 	/* Alloc soft command */
1708 	sc = (struct octeon_soft_command *)
1709 		octeon_alloc_soft_command(oct_dev,
1710 					  sizeof(struct oct_intrmod_cfg),
1711 					  0,
1712 					  sizeof(struct oct_intrmod_context));
1713 
1714 	if (!sc)
1715 		return -ENOMEM;
1716 
1717 	ctx = (struct oct_intrmod_context *)sc->ctxptr;
1718 
1719 	WRITE_ONCE(ctx->cond, 0);
1720 	ctx->octeon_id = lio_get_device_id(oct_dev);
1721 	init_waitqueue_head(&ctx->wc);
1722 
1723 	cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1724 
1725 	memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1726 	octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1727 
1728 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1729 
1730 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1731 				    OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1732 
1733 	sc->callback = octnet_intrmod_callback;
1734 	sc->callback_arg = sc;
1735 	sc->wait_time = 1000;
1736 
1737 	retval = octeon_send_soft_command(oct_dev, sc);
1738 	if (retval == IQ_SEND_FAILED) {
1739 		octeon_free_soft_command(oct_dev, sc);
1740 		return -EINVAL;
1741 	}
1742 
1743 	/* Sleep on a wait queue till the cond flag indicates that the
1744 	 * response arrived or timed-out.
1745 	 */
1746 	if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
1747 		retval = ctx->status;
1748 		if (retval)
1749 			dev_err(&oct_dev->pci_dev->dev,
1750 				"intrmod config failed. Status: %llx\n",
1751 				CVM_CAST64(retval));
1752 		else
1753 			dev_info(&oct_dev->pci_dev->dev,
1754 				 "Rx-Adaptive Interrupt moderation %s\n",
1755 				 (intr_cfg->rx_enable) ?
1756 				 "enabled" : "disabled");
1757 
1758 		octeon_free_soft_command(oct_dev, sc);
1759 
1760 		return ((retval) ? -ENODEV : 0);
1761 	}
1762 
1763 	dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
1764 
1765 	return -EINTR;
1766 }
1767 
1768 static void
1769 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1770 			  u32 status, void *ptr)
1771 {
1772 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1773 	struct oct_nic_stats_resp *resp =
1774 	    (struct oct_nic_stats_resp *)sc->virtrptr;
1775 	struct oct_nic_stats_ctrl *ctrl =
1776 	    (struct oct_nic_stats_ctrl *)sc->ctxptr;
1777 	struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1778 	struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1779 
1780 	struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1781 	struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1782 
1783 	if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1784 		octeon_swap_8B_data((u64 *)&resp->stats,
1785 				    (sizeof(struct oct_link_stats)) >> 3);
1786 
1787 		/* RX link-level stats */
1788 		rstats->total_rcvd = rsp_rstats->total_rcvd;
1789 		rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1790 		rstats->total_bcst = rsp_rstats->total_bcst;
1791 		rstats->total_mcst = rsp_rstats->total_mcst;
1792 		rstats->runts      = rsp_rstats->runts;
1793 		rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1794 		/* Accounts for over/under-run of buffers */
1795 		rstats->fifo_err  = rsp_rstats->fifo_err;
1796 		rstats->dmac_drop = rsp_rstats->dmac_drop;
1797 		rstats->fcs_err   = rsp_rstats->fcs_err;
1798 		rstats->jabber_err = rsp_rstats->jabber_err;
1799 		rstats->l2_err    = rsp_rstats->l2_err;
1800 		rstats->frame_err = rsp_rstats->frame_err;
1801 
1802 		/* RX firmware stats */
1803 		rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1804 		rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1805 		rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1806 		rstats->fw_err_link = rsp_rstats->fw_err_link;
1807 		rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1808 		rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1809 		rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1810 
1811 		/* Number of packets that are LROed      */
1812 		rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1813 		/* Number of octets that are LROed       */
1814 		rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1815 		/* Number of LRO packets formed          */
1816 		rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1817 		/* Number of times lRO of packet aborted */
1818 		rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1819 		rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1820 		rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1821 		rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1822 		rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1823 		/* intrmod: packet forward rate */
1824 		rstats->fwd_rate = rsp_rstats->fwd_rate;
1825 
1826 		/* TX link-level stats */
1827 		tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1828 		tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1829 		tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1830 		tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1831 		tstats->ctl_sent = rsp_tstats->ctl_sent;
1832 		/* Packets sent after one collision*/
1833 		tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1834 		/* Packets sent after multiple collision*/
1835 		tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1836 		/* Packets not sent due to max collisions */
1837 		tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1838 		/* Packets not sent due to max deferrals */
1839 		tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1840 		/* Accounts for over/under-run of buffers */
1841 		tstats->fifo_err = rsp_tstats->fifo_err;
1842 		tstats->runts = rsp_tstats->runts;
1843 		/* Total number of collisions detected */
1844 		tstats->total_collisions = rsp_tstats->total_collisions;
1845 
1846 		/* firmware stats */
1847 		tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1848 		tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1849 		tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1850 		tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1851 		tstats->fw_err_link = rsp_tstats->fw_err_link;
1852 		tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1853 		tstats->fw_tso = rsp_tstats->fw_tso;
1854 		tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1855 		tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1856 		tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1857 
1858 		resp->status = 1;
1859 	} else {
1860 		resp->status = -1;
1861 	}
1862 	complete(&ctrl->complete);
1863 }
1864 
1865 /*  Configure interrupt moderation parameters */
1866 static int octnet_get_link_stats(struct net_device *netdev)
1867 {
1868 	struct lio *lio = GET_LIO(netdev);
1869 	struct octeon_device *oct_dev = lio->oct_dev;
1870 
1871 	struct octeon_soft_command *sc;
1872 	struct oct_nic_stats_ctrl *ctrl;
1873 	struct oct_nic_stats_resp *resp;
1874 
1875 	int retval;
1876 
1877 	/* Alloc soft command */
1878 	sc = (struct octeon_soft_command *)
1879 		octeon_alloc_soft_command(oct_dev,
1880 					  0,
1881 					  sizeof(struct oct_nic_stats_resp),
1882 					  sizeof(struct octnic_ctrl_pkt));
1883 
1884 	if (!sc)
1885 		return -ENOMEM;
1886 
1887 	resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1888 	memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1889 
1890 	ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1891 	memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1892 	ctrl->netdev = netdev;
1893 	init_completion(&ctrl->complete);
1894 
1895 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1896 
1897 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1898 				    OPCODE_NIC_PORT_STATS, 0, 0, 0);
1899 
1900 	sc->callback = octnet_nic_stats_callback;
1901 	sc->callback_arg = sc;
1902 	sc->wait_time = 500;	/*in milli seconds*/
1903 
1904 	retval = octeon_send_soft_command(oct_dev, sc);
1905 	if (retval == IQ_SEND_FAILED) {
1906 		octeon_free_soft_command(oct_dev, sc);
1907 		return -EINVAL;
1908 	}
1909 
1910 	wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1911 
1912 	if (resp->status != 1) {
1913 		octeon_free_soft_command(oct_dev, sc);
1914 
1915 		return -EINVAL;
1916 	}
1917 
1918 	octeon_free_soft_command(oct_dev, sc);
1919 
1920 	return 0;
1921 }
1922 
1923 static int lio_get_intr_coalesce(struct net_device *netdev,
1924 				 struct ethtool_coalesce *intr_coal)
1925 {
1926 	struct lio *lio = GET_LIO(netdev);
1927 	struct octeon_device *oct = lio->oct_dev;
1928 	struct octeon_instr_queue *iq;
1929 	struct oct_intrmod_cfg intrmod_cfg;
1930 
1931 	if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
1932 		return -ENODEV;
1933 
1934 	switch (oct->chip_id) {
1935 	case OCTEON_CN23XX_PF_VID:
1936 	case OCTEON_CN23XX_VF_VID: {
1937 		if (!intrmod_cfg.rx_enable) {
1938 			intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
1939 			intr_coal->rx_max_coalesced_frames =
1940 				oct->rx_max_coalesced_frames;
1941 		}
1942 		if (!intrmod_cfg.tx_enable)
1943 			intr_coal->tx_max_coalesced_frames =
1944 				oct->tx_max_coalesced_frames;
1945 		break;
1946 	}
1947 	case OCTEON_CN68XX:
1948 	case OCTEON_CN66XX: {
1949 		struct octeon_cn6xxx *cn6xxx =
1950 			(struct octeon_cn6xxx *)oct->chip;
1951 
1952 		if (!intrmod_cfg.rx_enable) {
1953 			intr_coal->rx_coalesce_usecs =
1954 				CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
1955 			intr_coal->rx_max_coalesced_frames =
1956 				CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
1957 		}
1958 		iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
1959 		intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
1960 		break;
1961 	}
1962 	default:
1963 		netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1964 		return -EINVAL;
1965 	}
1966 	if (intrmod_cfg.rx_enable) {
1967 		intr_coal->use_adaptive_rx_coalesce =
1968 			intrmod_cfg.rx_enable;
1969 		intr_coal->rate_sample_interval =
1970 			intrmod_cfg.check_intrvl;
1971 		intr_coal->pkt_rate_high =
1972 			intrmod_cfg.maxpkt_ratethr;
1973 		intr_coal->pkt_rate_low =
1974 			intrmod_cfg.minpkt_ratethr;
1975 		intr_coal->rx_max_coalesced_frames_high =
1976 			intrmod_cfg.rx_maxcnt_trigger;
1977 		intr_coal->rx_coalesce_usecs_high =
1978 			intrmod_cfg.rx_maxtmr_trigger;
1979 		intr_coal->rx_coalesce_usecs_low =
1980 			intrmod_cfg.rx_mintmr_trigger;
1981 		intr_coal->rx_max_coalesced_frames_low =
1982 			intrmod_cfg.rx_mincnt_trigger;
1983 	}
1984 	if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
1985 	    (intrmod_cfg.tx_enable)) {
1986 		intr_coal->use_adaptive_tx_coalesce =
1987 			intrmod_cfg.tx_enable;
1988 		intr_coal->tx_max_coalesced_frames_high =
1989 			intrmod_cfg.tx_maxcnt_trigger;
1990 		intr_coal->tx_max_coalesced_frames_low =
1991 			intrmod_cfg.tx_mincnt_trigger;
1992 	}
1993 	return 0;
1994 }
1995 
1996 /* Enable/Disable auto interrupt Moderation */
1997 static int oct_cfg_adaptive_intr(struct lio *lio,
1998 				 struct oct_intrmod_cfg *intrmod_cfg,
1999 				 struct ethtool_coalesce *intr_coal)
2000 {
2001 	int ret = 0;
2002 
2003 	if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
2004 		intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
2005 		intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
2006 		intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
2007 	}
2008 	if (intrmod_cfg->rx_enable) {
2009 		intrmod_cfg->rx_maxcnt_trigger =
2010 			intr_coal->rx_max_coalesced_frames_high;
2011 		intrmod_cfg->rx_maxtmr_trigger =
2012 			intr_coal->rx_coalesce_usecs_high;
2013 		intrmod_cfg->rx_mintmr_trigger =
2014 			intr_coal->rx_coalesce_usecs_low;
2015 		intrmod_cfg->rx_mincnt_trigger =
2016 			intr_coal->rx_max_coalesced_frames_low;
2017 	}
2018 	if (intrmod_cfg->tx_enable) {
2019 		intrmod_cfg->tx_maxcnt_trigger =
2020 			intr_coal->tx_max_coalesced_frames_high;
2021 		intrmod_cfg->tx_mincnt_trigger =
2022 			intr_coal->tx_max_coalesced_frames_low;
2023 	}
2024 
2025 	ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
2026 
2027 	return ret;
2028 }
2029 
2030 static int
2031 oct_cfg_rx_intrcnt(struct lio *lio,
2032 		   struct oct_intrmod_cfg *intrmod,
2033 		   struct ethtool_coalesce *intr_coal)
2034 {
2035 	struct octeon_device *oct = lio->oct_dev;
2036 	u32 rx_max_coalesced_frames;
2037 
2038 	/* Config Cnt based interrupt values */
2039 	switch (oct->chip_id) {
2040 	case OCTEON_CN68XX:
2041 	case OCTEON_CN66XX: {
2042 		struct octeon_cn6xxx *cn6xxx =
2043 			(struct octeon_cn6xxx *)oct->chip;
2044 
2045 		if (!intr_coal->rx_max_coalesced_frames)
2046 			rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
2047 		else
2048 			rx_max_coalesced_frames =
2049 				intr_coal->rx_max_coalesced_frames;
2050 		octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
2051 				 rx_max_coalesced_frames);
2052 		CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
2053 		break;
2054 	}
2055 	case OCTEON_CN23XX_PF_VID: {
2056 		int q_no;
2057 
2058 		if (!intr_coal->rx_max_coalesced_frames)
2059 			rx_max_coalesced_frames = intrmod->rx_frames;
2060 		else
2061 			rx_max_coalesced_frames =
2062 			    intr_coal->rx_max_coalesced_frames;
2063 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2064 			q_no += oct->sriov_info.pf_srn;
2065 			octeon_write_csr64(
2066 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2067 			    (octeon_read_csr64(
2068 				 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2069 			     (0x3fffff00000000UL)) |
2070 				(rx_max_coalesced_frames - 1));
2071 			/*consider setting resend bit*/
2072 		}
2073 		intrmod->rx_frames = rx_max_coalesced_frames;
2074 		oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2075 		break;
2076 	}
2077 	case OCTEON_CN23XX_VF_VID: {
2078 		int q_no;
2079 
2080 		if (!intr_coal->rx_max_coalesced_frames)
2081 			rx_max_coalesced_frames = intrmod->rx_frames;
2082 		else
2083 			rx_max_coalesced_frames =
2084 			    intr_coal->rx_max_coalesced_frames;
2085 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2086 			octeon_write_csr64(
2087 			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2088 			    (octeon_read_csr64(
2089 				 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2090 			     (0x3fffff00000000UL)) |
2091 				(rx_max_coalesced_frames - 1));
2092 			/*consider writing to resend bit here*/
2093 		}
2094 		intrmod->rx_frames = rx_max_coalesced_frames;
2095 		oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2096 		break;
2097 	}
2098 	default:
2099 		return -EINVAL;
2100 	}
2101 	return 0;
2102 }
2103 
2104 static int oct_cfg_rx_intrtime(struct lio *lio,
2105 			       struct oct_intrmod_cfg *intrmod,
2106 			       struct ethtool_coalesce *intr_coal)
2107 {
2108 	struct octeon_device *oct = lio->oct_dev;
2109 	u32 time_threshold, rx_coalesce_usecs;
2110 
2111 	/* Config Time based interrupt values */
2112 	switch (oct->chip_id) {
2113 	case OCTEON_CN68XX:
2114 	case OCTEON_CN66XX: {
2115 		struct octeon_cn6xxx *cn6xxx =
2116 			(struct octeon_cn6xxx *)oct->chip;
2117 		if (!intr_coal->rx_coalesce_usecs)
2118 			rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
2119 		else
2120 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2121 
2122 		time_threshold = lio_cn6xxx_get_oq_ticks(oct,
2123 							 rx_coalesce_usecs);
2124 		octeon_write_csr(oct,
2125 				 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
2126 				 time_threshold);
2127 
2128 		CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
2129 		break;
2130 	}
2131 	case OCTEON_CN23XX_PF_VID: {
2132 		u64 time_threshold;
2133 		int q_no;
2134 
2135 		if (!intr_coal->rx_coalesce_usecs)
2136 			rx_coalesce_usecs = intrmod->rx_usecs;
2137 		else
2138 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2139 		time_threshold =
2140 		    cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2141 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2142 			q_no += oct->sriov_info.pf_srn;
2143 			octeon_write_csr64(oct,
2144 					   CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2145 					   (intrmod->rx_frames |
2146 					    ((u64)time_threshold << 32)));
2147 			/*consider writing to resend bit here*/
2148 		}
2149 		intrmod->rx_usecs = rx_coalesce_usecs;
2150 		oct->rx_coalesce_usecs = rx_coalesce_usecs;
2151 		break;
2152 	}
2153 	case OCTEON_CN23XX_VF_VID: {
2154 		u64 time_threshold;
2155 		int q_no;
2156 
2157 		if (!intr_coal->rx_coalesce_usecs)
2158 			rx_coalesce_usecs = intrmod->rx_usecs;
2159 		else
2160 			rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2161 
2162 		time_threshold =
2163 		    cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2164 		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2165 			octeon_write_csr64(
2166 				oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2167 				(intrmod->rx_frames |
2168 				 ((u64)time_threshold << 32)));
2169 			/*consider setting resend bit*/
2170 		}
2171 		intrmod->rx_usecs = rx_coalesce_usecs;
2172 		oct->rx_coalesce_usecs = rx_coalesce_usecs;
2173 		break;
2174 	}
2175 	default:
2176 		return -EINVAL;
2177 	}
2178 
2179 	return 0;
2180 }
2181 
2182 static int
2183 oct_cfg_tx_intrcnt(struct lio *lio,
2184 		   struct oct_intrmod_cfg *intrmod,
2185 		   struct ethtool_coalesce *intr_coal)
2186 {
2187 	struct octeon_device *oct = lio->oct_dev;
2188 	u32 iq_intr_pkt;
2189 	void __iomem *inst_cnt_reg;
2190 	u64 val;
2191 
2192 	/* Config Cnt based interrupt values */
2193 	switch (oct->chip_id) {
2194 	case OCTEON_CN68XX:
2195 	case OCTEON_CN66XX:
2196 		break;
2197 	case OCTEON_CN23XX_VF_VID:
2198 	case OCTEON_CN23XX_PF_VID: {
2199 		int q_no;
2200 
2201 		if (!intr_coal->tx_max_coalesced_frames)
2202 			iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
2203 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
2204 		else
2205 			iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
2206 				      CN23XX_PKT_IN_DONE_WMARK_MASK;
2207 		for (q_no = 0; q_no < oct->num_iqs; q_no++) {
2208 			inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
2209 			val = readq(inst_cnt_reg);
2210 			/*clear wmark and count.dont want to write count back*/
2211 			val = (val & 0xFFFF000000000000ULL) |
2212 			      ((u64)(iq_intr_pkt - 1)
2213 			       << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
2214 			writeq(val, inst_cnt_reg);
2215 			/*consider setting resend bit*/
2216 		}
2217 		intrmod->tx_frames = iq_intr_pkt;
2218 		oct->tx_max_coalesced_frames = iq_intr_pkt;
2219 		break;
2220 	}
2221 	default:
2222 		return -EINVAL;
2223 	}
2224 	return 0;
2225 }
2226 
2227 static int lio_set_intr_coalesce(struct net_device *netdev,
2228 				 struct ethtool_coalesce *intr_coal)
2229 {
2230 	struct lio *lio = GET_LIO(netdev);
2231 	int ret;
2232 	struct octeon_device *oct = lio->oct_dev;
2233 	struct oct_intrmod_cfg intrmod = {0};
2234 	u32 j, q_no;
2235 	int db_max, db_min;
2236 
2237 	switch (oct->chip_id) {
2238 	case OCTEON_CN68XX:
2239 	case OCTEON_CN66XX:
2240 		db_min = CN6XXX_DB_MIN;
2241 		db_max = CN6XXX_DB_MAX;
2242 		if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
2243 		    (intr_coal->tx_max_coalesced_frames <= db_max)) {
2244 			for (j = 0; j < lio->linfo.num_txpciq; j++) {
2245 				q_no = lio->linfo.txpciq[j].s.q_no;
2246 				oct->instr_queue[q_no]->fill_threshold =
2247 					intr_coal->tx_max_coalesced_frames;
2248 			}
2249 		} else {
2250 			dev_err(&oct->pci_dev->dev,
2251 				"LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2252 				intr_coal->tx_max_coalesced_frames,
2253 				db_min, db_max);
2254 			return -EINVAL;
2255 		}
2256 		break;
2257 	case OCTEON_CN23XX_PF_VID:
2258 	case OCTEON_CN23XX_VF_VID:
2259 		break;
2260 	default:
2261 		return -EINVAL;
2262 	}
2263 
2264 	intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
2265 	intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
2266 	intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2267 	intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2268 	intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2269 
2270 	ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
2271 
2272 	if (!intr_coal->use_adaptive_rx_coalesce) {
2273 		ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
2274 		if (ret)
2275 			goto ret_intrmod;
2276 
2277 		ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
2278 		if (ret)
2279 			goto ret_intrmod;
2280 	} else {
2281 		oct->rx_coalesce_usecs =
2282 			CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2283 		oct->rx_max_coalesced_frames =
2284 			CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2285 	}
2286 
2287 	if (!intr_coal->use_adaptive_tx_coalesce) {
2288 		ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2289 		if (ret)
2290 			goto ret_intrmod;
2291 	} else {
2292 		oct->tx_max_coalesced_frames =
2293 			CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2294 	}
2295 
2296 	return 0;
2297 ret_intrmod:
2298 	return ret;
2299 }
2300 
2301 static int lio_get_ts_info(struct net_device *netdev,
2302 			   struct ethtool_ts_info *info)
2303 {
2304 	struct lio *lio = GET_LIO(netdev);
2305 
2306 	info->so_timestamping =
2307 #ifdef PTP_HARDWARE_TIMESTAMPING
2308 		SOF_TIMESTAMPING_TX_HARDWARE |
2309 		SOF_TIMESTAMPING_RX_HARDWARE |
2310 		SOF_TIMESTAMPING_RAW_HARDWARE |
2311 		SOF_TIMESTAMPING_TX_SOFTWARE |
2312 #endif
2313 		SOF_TIMESTAMPING_RX_SOFTWARE |
2314 		SOF_TIMESTAMPING_SOFTWARE;
2315 
2316 	if (lio->ptp_clock)
2317 		info->phc_index = ptp_clock_index(lio->ptp_clock);
2318 	else
2319 		info->phc_index = -1;
2320 
2321 #ifdef PTP_HARDWARE_TIMESTAMPING
2322 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2323 
2324 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2325 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2326 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2327 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2328 #endif
2329 
2330 	return 0;
2331 }
2332 
2333 /* Return register dump len. */
2334 static int lio_get_regs_len(struct net_device *dev)
2335 {
2336 	struct lio *lio = GET_LIO(dev);
2337 	struct octeon_device *oct = lio->oct_dev;
2338 
2339 	switch (oct->chip_id) {
2340 	case OCTEON_CN23XX_PF_VID:
2341 		return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2342 	case OCTEON_CN23XX_VF_VID:
2343 		return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2344 	default:
2345 		return OCT_ETHTOOL_REGDUMP_LEN;
2346 	}
2347 }
2348 
2349 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2350 {
2351 	u32 reg;
2352 	u8 pf_num = oct->pf_num;
2353 	int len = 0;
2354 	int i;
2355 
2356 	/* PCI  Window Registers */
2357 
2358 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2359 
2360 	/*0x29030 or 0x29040*/
2361 	reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2362 	len += sprintf(s + len,
2363 		       "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2364 		       reg, oct->pcie_port, oct->pf_num,
2365 		       (u64)octeon_read_csr64(oct, reg));
2366 
2367 	/*0x27080 or 0x27090*/
2368 	reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2369 	len +=
2370 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2371 		    reg, oct->pcie_port, oct->pf_num,
2372 		    (u64)octeon_read_csr64(oct, reg));
2373 
2374 	/*0x27000 or 0x27010*/
2375 	reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2376 	len +=
2377 	    sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2378 		    reg, oct->pcie_port, oct->pf_num,
2379 		    (u64)octeon_read_csr64(oct, reg));
2380 
2381 	/*0x29120*/
2382 	reg = 0x29120;
2383 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2384 		       (u64)octeon_read_csr64(oct, reg));
2385 
2386 	/*0x27300*/
2387 	reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2388 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2389 	len += sprintf(
2390 	    s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2391 	    oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2392 
2393 	/*0x27200*/
2394 	reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2395 	      (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2396 	len += sprintf(s + len,
2397 		       "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2398 		       reg, oct->pcie_port, oct->pf_num,
2399 		       (u64)octeon_read_csr64(oct, reg));
2400 
2401 	/*29130*/
2402 	reg = CN23XX_SLI_PKT_CNT_INT;
2403 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2404 		       (u64)octeon_read_csr64(oct, reg));
2405 
2406 	/*0x29140*/
2407 	reg = CN23XX_SLI_PKT_TIME_INT;
2408 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2409 		       (u64)octeon_read_csr64(oct, reg));
2410 
2411 	/*0x29160*/
2412 	reg = 0x29160;
2413 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2414 		       (u64)octeon_read_csr64(oct, reg));
2415 
2416 	/*0x29180*/
2417 	reg = CN23XX_SLI_OQ_WMARK;
2418 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2419 		       reg, (u64)octeon_read_csr64(oct, reg));
2420 
2421 	/*0x291E0*/
2422 	reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2423 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2424 		       (u64)octeon_read_csr64(oct, reg));
2425 
2426 	/*0x29210*/
2427 	reg = CN23XX_SLI_GBL_CONTROL;
2428 	len += sprintf(s + len,
2429 		       "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2430 		       (u64)octeon_read_csr64(oct, reg));
2431 
2432 	/*0x29220*/
2433 	reg = 0x29220;
2434 	len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2435 		       reg, (u64)octeon_read_csr64(oct, reg));
2436 
2437 	/*PF only*/
2438 	if (pf_num == 0) {
2439 		/*0x29260*/
2440 		reg = CN23XX_SLI_OUT_BP_EN_W1S;
2441 		len += sprintf(s + len,
2442 			       "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2443 			       reg, (u64)octeon_read_csr64(oct, reg));
2444 	} else if (pf_num == 1) {
2445 		/*0x29270*/
2446 		reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2447 		len += sprintf(s + len,
2448 			       "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2449 			       reg, (u64)octeon_read_csr64(oct, reg));
2450 	}
2451 
2452 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2453 		reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2454 		len +=
2455 		    sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2456 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2457 	}
2458 
2459 	/*0x10040*/
2460 	for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2461 		reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2462 		len += sprintf(s + len,
2463 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2464 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2465 	}
2466 
2467 	/*0x10080*/
2468 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2469 		reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2470 		len += sprintf(s + len,
2471 			       "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2472 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2473 	}
2474 
2475 	/*0x10090*/
2476 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2477 		reg = CN23XX_SLI_OQ_SIZE(i);
2478 		len += sprintf(
2479 		    s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2480 		    reg, i, (u64)octeon_read_csr64(oct, reg));
2481 	}
2482 
2483 	/*0x10050*/
2484 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2485 		reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2486 		len += sprintf(
2487 			s + len,
2488 			"\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2489 			reg, i, (u64)octeon_read_csr64(oct, reg));
2490 	}
2491 
2492 	/*0x10070*/
2493 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2494 		reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2495 		len += sprintf(s + len,
2496 			       "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2497 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2498 	}
2499 
2500 	/*0x100a0*/
2501 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2502 		reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2503 		len += sprintf(s + len,
2504 			       "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2505 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2506 	}
2507 
2508 	/*0x100b0*/
2509 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2510 		reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2511 		len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2512 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2513 	}
2514 
2515 	/*0x100c0*/
2516 	for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2517 		reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2518 		len += sprintf(s + len,
2519 			       "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2520 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2521 
2522 		/*0x10000*/
2523 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2524 			reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2525 			len += sprintf(
2526 				s + len,
2527 				"\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2528 				reg, i, (u64)octeon_read_csr64(oct, reg));
2529 		}
2530 
2531 		/*0x10010*/
2532 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2533 			reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2534 			len += sprintf(
2535 			    s + len,
2536 			    "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2537 			    i, (u64)octeon_read_csr64(oct, reg));
2538 		}
2539 
2540 		/*0x10020*/
2541 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2542 			reg = CN23XX_SLI_IQ_DOORBELL(i);
2543 			len += sprintf(
2544 			    s + len,
2545 			    "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2546 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2547 		}
2548 
2549 		/*0x10030*/
2550 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2551 			reg = CN23XX_SLI_IQ_SIZE(i);
2552 			len += sprintf(
2553 			    s + len,
2554 			    "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2555 			    reg, i, (u64)octeon_read_csr64(oct, reg));
2556 		}
2557 
2558 		/*0x10040*/
2559 		for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2560 			reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2561 		len += sprintf(s + len,
2562 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2563 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2564 	}
2565 
2566 	return len;
2567 }
2568 
2569 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2570 {
2571 	int len = 0;
2572 	u32 reg;
2573 	int i;
2574 
2575 	/* PCI  Window Registers */
2576 
2577 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2578 
2579 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2580 		reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2581 		len += sprintf(s + len,
2582 			       "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2583 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2584 	}
2585 
2586 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2587 		reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2588 		len += sprintf(s + len,
2589 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2590 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2591 	}
2592 
2593 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2594 		reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2595 		len += sprintf(s + len,
2596 			       "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2597 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2598 	}
2599 
2600 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2601 		reg = CN23XX_VF_SLI_OQ_SIZE(i);
2602 		len += sprintf(s + len,
2603 			       "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2604 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2605 	}
2606 
2607 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2608 		reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2609 		len += sprintf(s + len,
2610 			       "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2611 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2612 	}
2613 
2614 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2615 		reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2616 		len += sprintf(s + len,
2617 			       "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2618 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2619 	}
2620 
2621 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2622 		reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2623 		len += sprintf(s + len,
2624 			       "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2625 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2626 	}
2627 
2628 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2629 		reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2630 		len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2631 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2632 	}
2633 
2634 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2635 		reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2636 		len += sprintf(s + len,
2637 			       "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2638 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2639 	}
2640 
2641 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2642 		reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2643 		len += sprintf(s + len,
2644 			       "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2645 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2646 	}
2647 
2648 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2649 		reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2650 		len += sprintf(s + len,
2651 			       "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2652 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2653 	}
2654 
2655 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2656 		reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2657 		len += sprintf(s + len,
2658 			       "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2659 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2660 	}
2661 
2662 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2663 		reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2664 		len += sprintf(s + len,
2665 			       "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2666 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2667 	}
2668 
2669 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2670 		reg = CN23XX_VF_SLI_IQ_SIZE(i);
2671 		len += sprintf(s + len,
2672 			       "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2673 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2674 	}
2675 
2676 	for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2677 		reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2678 		len += sprintf(s + len,
2679 			       "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2680 			       reg, i, (u64)octeon_read_csr64(oct, reg));
2681 	}
2682 
2683 	return len;
2684 }
2685 
2686 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2687 {
2688 	u32 reg;
2689 	int i, len = 0;
2690 
2691 	/* PCI  Window Registers */
2692 
2693 	len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2694 	reg = CN6XXX_WIN_WR_ADDR_LO;
2695 	len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2696 		       CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2697 	reg = CN6XXX_WIN_WR_ADDR_HI;
2698 	len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2699 		       CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2700 	reg = CN6XXX_WIN_RD_ADDR_LO;
2701 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2702 		       CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2703 	reg = CN6XXX_WIN_RD_ADDR_HI;
2704 	len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2705 		       CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2706 	reg = CN6XXX_WIN_WR_DATA_LO;
2707 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2708 		       CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2709 	reg = CN6XXX_WIN_WR_DATA_HI;
2710 	len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2711 		       CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2712 	len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2713 		       CN6XXX_WIN_WR_MASK_REG,
2714 		       octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2715 
2716 	/* PCI  Interrupt Register */
2717 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2718 		       CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2719 						CN6XXX_SLI_INT_ENB64_PORT0));
2720 	len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2721 		       CN6XXX_SLI_INT_ENB64_PORT1,
2722 		       octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2723 	len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2724 		       octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2725 
2726 	/* PCI  Output queue registers */
2727 	for (i = 0; i < oct->num_oqs; i++) {
2728 		reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2729 		len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2730 			       reg, i, octeon_read_csr(oct, reg));
2731 		reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2732 		len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2733 			       reg, i, octeon_read_csr(oct, reg));
2734 	}
2735 	reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2736 	len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2737 		       reg, octeon_read_csr(oct, reg));
2738 	reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2739 	len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2740 		       reg, octeon_read_csr(oct, reg));
2741 
2742 	/* PCI  Input queue registers */
2743 	for (i = 0; i <= 3; i++) {
2744 		u32 reg;
2745 
2746 		reg = CN6XXX_SLI_IQ_DOORBELL(i);
2747 		len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2748 			       reg, i, octeon_read_csr(oct, reg));
2749 		reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2750 		len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2751 			       reg, i, octeon_read_csr(oct, reg));
2752 	}
2753 
2754 	/* PCI  DMA registers */
2755 
2756 	len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2757 		       CN6XXX_DMA_CNT(0),
2758 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2759 	reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2760 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2761 		       CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2762 	reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2763 	len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2764 		       CN6XXX_DMA_TIME_INT_LEVEL(0),
2765 		       octeon_read_csr(oct, reg));
2766 
2767 	len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2768 		       CN6XXX_DMA_CNT(1),
2769 		       octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2770 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2771 	len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2772 		       CN6XXX_DMA_PKT_INT_LEVEL(1),
2773 		       octeon_read_csr(oct, reg));
2774 	reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2775 	len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2776 		       CN6XXX_DMA_TIME_INT_LEVEL(1),
2777 		       octeon_read_csr(oct, reg));
2778 
2779 	/* PCI  Index registers */
2780 
2781 	len += sprintf(s + len, "\n");
2782 
2783 	for (i = 0; i < 16; i++) {
2784 		reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2785 		len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2786 			       CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2787 	}
2788 
2789 	return len;
2790 }
2791 
2792 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2793 {
2794 	u32 val;
2795 	int i, len = 0;
2796 
2797 	/* PCI CONFIG Registers */
2798 
2799 	len += sprintf(s + len,
2800 		       "\n\t Octeon Config space Registers\n\n");
2801 
2802 	for (i = 0; i <= 13; i++) {
2803 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2804 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2805 			       (i * 4), i, val);
2806 	}
2807 
2808 	for (i = 30; i <= 34; i++) {
2809 		pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2810 		len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2811 			       (i * 4), i, val);
2812 	}
2813 
2814 	return len;
2815 }
2816 
2817 /*  Return register dump user app.  */
2818 static void lio_get_regs(struct net_device *dev,
2819 			 struct ethtool_regs *regs, void *regbuf)
2820 {
2821 	struct lio *lio = GET_LIO(dev);
2822 	int len = 0;
2823 	struct octeon_device *oct = lio->oct_dev;
2824 
2825 	regs->version = OCT_ETHTOOL_REGSVER;
2826 
2827 	switch (oct->chip_id) {
2828 	case OCTEON_CN23XX_PF_VID:
2829 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2830 		len += cn23xx_read_csr_reg(regbuf + len, oct);
2831 		break;
2832 	case OCTEON_CN23XX_VF_VID:
2833 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
2834 		len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
2835 		break;
2836 	case OCTEON_CN68XX:
2837 	case OCTEON_CN66XX:
2838 		memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
2839 		len += cn6xxx_read_csr_reg(regbuf + len, oct);
2840 		len += cn6xxx_read_config_reg(regbuf + len, oct);
2841 		break;
2842 	default:
2843 		dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
2844 			__func__, oct->chip_id);
2845 	}
2846 }
2847 
2848 static u32 lio_get_priv_flags(struct net_device *netdev)
2849 {
2850 	struct lio *lio = GET_LIO(netdev);
2851 
2852 	return lio->oct_dev->priv_flags;
2853 }
2854 
2855 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2856 {
2857 	struct lio *lio = GET_LIO(netdev);
2858 	bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
2859 
2860 	lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
2861 			  intr_by_tx_bytes);
2862 	return 0;
2863 }
2864 
2865 static const struct ethtool_ops lio_ethtool_ops = {
2866 	.get_link_ksettings	= lio_get_link_ksettings,
2867 	.get_link		= ethtool_op_get_link,
2868 	.get_drvinfo		= lio_get_drvinfo,
2869 	.get_ringparam		= lio_ethtool_get_ringparam,
2870 	.set_ringparam		= lio_ethtool_set_ringparam,
2871 	.get_channels		= lio_ethtool_get_channels,
2872 	.set_channels		= lio_ethtool_set_channels,
2873 	.set_phys_id		= lio_set_phys_id,
2874 	.get_eeprom_len		= lio_get_eeprom_len,
2875 	.get_eeprom		= lio_get_eeprom,
2876 	.get_strings		= lio_get_strings,
2877 	.get_ethtool_stats	= lio_get_ethtool_stats,
2878 	.get_pauseparam		= lio_get_pauseparam,
2879 	.set_pauseparam		= lio_set_pauseparam,
2880 	.get_regs_len		= lio_get_regs_len,
2881 	.get_regs		= lio_get_regs,
2882 	.get_msglevel		= lio_get_msglevel,
2883 	.set_msglevel		= lio_set_msglevel,
2884 	.get_sset_count		= lio_get_sset_count,
2885 	.get_coalesce		= lio_get_intr_coalesce,
2886 	.set_coalesce		= lio_set_intr_coalesce,
2887 	.get_priv_flags		= lio_get_priv_flags,
2888 	.set_priv_flags		= lio_set_priv_flags,
2889 	.get_ts_info		= lio_get_ts_info,
2890 };
2891 
2892 static const struct ethtool_ops lio_vf_ethtool_ops = {
2893 	.get_link_ksettings	= lio_get_link_ksettings,
2894 	.get_link		= ethtool_op_get_link,
2895 	.get_drvinfo		= lio_get_vf_drvinfo,
2896 	.get_ringparam		= lio_ethtool_get_ringparam,
2897 	.set_ringparam          = lio_ethtool_set_ringparam,
2898 	.get_channels		= lio_ethtool_get_channels,
2899 	.set_channels		= lio_ethtool_set_channels,
2900 	.get_strings		= lio_vf_get_strings,
2901 	.get_ethtool_stats	= lio_vf_get_ethtool_stats,
2902 	.get_regs_len		= lio_get_regs_len,
2903 	.get_regs		= lio_get_regs,
2904 	.get_msglevel		= lio_get_msglevel,
2905 	.set_msglevel		= lio_vf_set_msglevel,
2906 	.get_sset_count		= lio_vf_get_sset_count,
2907 	.get_coalesce		= lio_get_intr_coalesce,
2908 	.set_coalesce		= lio_set_intr_coalesce,
2909 	.get_priv_flags		= lio_get_priv_flags,
2910 	.set_priv_flags		= lio_set_priv_flags,
2911 	.get_ts_info		= lio_get_ts_info,
2912 };
2913 
2914 void liquidio_set_ethtool_ops(struct net_device *netdev)
2915 {
2916 	struct lio *lio = GET_LIO(netdev);
2917 	struct octeon_device *oct = lio->oct_dev;
2918 
2919 	if (OCTEON_CN23XX_VF(oct))
2920 		netdev->ethtool_ops = &lio_vf_ethtool_ops;
2921 	else
2922 		netdev->ethtool_ops = &lio_ethtool_ops;
2923 }
2924