1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
39 
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
52 
53 static int ddr_timeout = 10000;
54 module_param(ddr_timeout, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout,
56 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
57 
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
59 
60 static int debug = -1;
61 module_param(debug, int, 0644);
62 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63 
64 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
65 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
66 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
67 
68 static u32 console_bitmask;
69 module_param(console_bitmask, int, 0644);
70 MODULE_PARM_DESC(console_bitmask,
71 		 "Bitmask indicating which consoles have debug output redirected to syslog.");
72 
73 /**
74  * \brief determines if a given console has debug enabled.
75  * @param console console to check
76  * @returns  1 = enabled. 0 otherwise
77  */
78 static int octeon_console_debug_enabled(u32 console)
79 {
80 	return (console_bitmask >> (console)) & 0x1;
81 }
82 
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
85 
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
88 /* update localtime to octeon firmware every 60 seconds.
89  * make firmware to use same time reference, so that it will be easy to
90  * correlate firmware logged events/errors with host events, for debugging.
91  */
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93 
94 /* time to wait for possible in-flight requests in milliseconds */
95 #define WAIT_INFLIGHT_REQUEST	msecs_to_jiffies(1000)
96 
97 struct lio_trusted_vf_ctx {
98 	struct completion complete;
99 	int status;
100 };
101 
102 struct liquidio_rx_ctl_context {
103 	int octeon_id;
104 
105 	wait_queue_head_t wc;
106 
107 	int cond;
108 };
109 
110 struct oct_link_status_resp {
111 	u64 rh;
112 	struct oct_link_info link_info;
113 	u64 status;
114 };
115 
116 struct oct_timestamp_resp {
117 	u64 rh;
118 	u64 timestamp;
119 	u64 status;
120 };
121 
122 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
123 
124 union tx_info {
125 	u64 u64;
126 	struct {
127 #ifdef __BIG_ENDIAN_BITFIELD
128 		u16 gso_size;
129 		u16 gso_segs;
130 		u32 reserved;
131 #else
132 		u32 reserved;
133 		u16 gso_segs;
134 		u16 gso_size;
135 #endif
136 	} s;
137 };
138 
139 /** Octeon device properties to be used by the NIC module.
140  * Each octeon device in the system will be represented
141  * by this structure in the NIC module.
142  */
143 
144 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
145 #define OCTNIC_GSO_MAX_SIZE                                                    \
146 	(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
147 
148 struct handshake {
149 	struct completion init;
150 	struct completion started;
151 	struct pci_dev *pci_dev;
152 	int init_ok;
153 	int started_ok;
154 };
155 
156 #ifdef CONFIG_PCI_IOV
157 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
158 #endif
159 
160 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
161 				    char *prefix, char *suffix);
162 
163 static int octeon_device_init(struct octeon_device *);
164 static int liquidio_stop(struct net_device *netdev);
165 static void liquidio_remove(struct pci_dev *pdev);
166 static int liquidio_probe(struct pci_dev *pdev,
167 			  const struct pci_device_id *ent);
168 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
169 				      int linkstate);
170 
171 static struct handshake handshake[MAX_OCTEON_DEVICES];
172 static struct completion first_stage;
173 
174 static void octeon_droq_bh(unsigned long pdev)
175 {
176 	int q_no;
177 	int reschedule = 0;
178 	struct octeon_device *oct = (struct octeon_device *)pdev;
179 	struct octeon_device_priv *oct_priv =
180 		(struct octeon_device_priv *)oct->priv;
181 
182 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
183 		if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
184 			continue;
185 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
186 							  MAX_PACKET_BUDGET);
187 		lio_enable_irq(oct->droq[q_no], NULL);
188 
189 		if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
190 			/* set time and cnt interrupt thresholds for this DROQ
191 			 * for NAPI
192 			 */
193 			int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
194 
195 			octeon_write_csr64(
196 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
197 			    0x5700000040ULL);
198 			octeon_write_csr64(
199 			    oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
200 		}
201 	}
202 
203 	if (reschedule)
204 		tasklet_schedule(&oct_priv->droq_tasklet);
205 }
206 
207 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
208 {
209 	struct octeon_device_priv *oct_priv =
210 		(struct octeon_device_priv *)oct->priv;
211 	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
212 	int i;
213 
214 	do {
215 		pending_pkts = 0;
216 
217 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
218 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
219 				continue;
220 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
221 		}
222 		if (pkt_cnt > 0) {
223 			pending_pkts += pkt_cnt;
224 			tasklet_schedule(&oct_priv->droq_tasklet);
225 		}
226 		pkt_cnt = 0;
227 		schedule_timeout_uninterruptible(1);
228 
229 	} while (retry-- && pending_pkts);
230 
231 	return pkt_cnt;
232 }
233 
234 /**
235  * \brief Forces all IO queues off on a given device
236  * @param oct Pointer to Octeon device
237  */
238 static void force_io_queues_off(struct octeon_device *oct)
239 {
240 	if ((oct->chip_id == OCTEON_CN66XX) ||
241 	    (oct->chip_id == OCTEON_CN68XX)) {
242 		/* Reset the Enable bits for Input Queues. */
243 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
244 
245 		/* Reset the Enable bits for Output Queues. */
246 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
247 	}
248 }
249 
250 /**
251  * \brief Cause device to go quiet so it can be safely removed/reset/etc
252  * @param oct Pointer to Octeon device
253  */
254 static inline void pcierror_quiesce_device(struct octeon_device *oct)
255 {
256 	int i;
257 
258 	/* Disable the input and output queues now. No more packets will
259 	 * arrive from Octeon, but we should wait for all packet processing
260 	 * to finish.
261 	 */
262 	force_io_queues_off(oct);
263 
264 	/* To allow for in-flight requests */
265 	schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
266 
267 	if (wait_for_pending_requests(oct))
268 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
269 
270 	/* Force all requests waiting to be fetched by OCTEON to complete. */
271 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
272 		struct octeon_instr_queue *iq;
273 
274 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
275 			continue;
276 		iq = oct->instr_queue[i];
277 
278 		if (atomic_read(&iq->instr_pending)) {
279 			spin_lock_bh(&iq->lock);
280 			iq->fill_cnt = 0;
281 			iq->octeon_read_index = iq->host_write_index;
282 			iq->stats.instr_processed +=
283 				atomic_read(&iq->instr_pending);
284 			lio_process_iq_request_list(oct, iq, 0);
285 			spin_unlock_bh(&iq->lock);
286 		}
287 	}
288 
289 	/* Force all pending ordered list requests to time out. */
290 	lio_process_ordered_list(oct, 1);
291 
292 	/* We do not need to wait for output queue packets to be processed. */
293 }
294 
295 /**
296  * \brief Cleanup PCI AER uncorrectable error status
297  * @param dev Pointer to PCI device
298  */
299 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
300 {
301 	int pos = 0x100;
302 	u32 status, mask;
303 
304 	pr_info("%s :\n", __func__);
305 
306 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
307 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
308 	if (dev->error_state == pci_channel_io_normal)
309 		status &= ~mask;        /* Clear corresponding nonfatal bits */
310 	else
311 		status &= mask;         /* Clear corresponding fatal bits */
312 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
313 }
314 
315 /**
316  * \brief Stop all PCI IO to a given device
317  * @param dev Pointer to Octeon device
318  */
319 static void stop_pci_io(struct octeon_device *oct)
320 {
321 	/* No more instructions will be forwarded. */
322 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
323 
324 	pci_disable_device(oct->pci_dev);
325 
326 	/* Disable interrupts  */
327 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
328 
329 	pcierror_quiesce_device(oct);
330 
331 	/* Release the interrupt line */
332 	free_irq(oct->pci_dev->irq, oct);
333 
334 	if (oct->flags & LIO_FLAG_MSI_ENABLED)
335 		pci_disable_msi(oct->pci_dev);
336 
337 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
338 		lio_get_state_string(&oct->status));
339 
340 	/* making it a common function for all OCTEON models */
341 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
342 }
343 
344 /**
345  * \brief called when PCI error is detected
346  * @param pdev Pointer to PCI device
347  * @param state The current pci connection state
348  *
349  * This function is called after a PCI bus error affecting
350  * this device has been detected.
351  */
352 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
353 						     pci_channel_state_t state)
354 {
355 	struct octeon_device *oct = pci_get_drvdata(pdev);
356 
357 	/* Non-correctable Non-fatal errors */
358 	if (state == pci_channel_io_normal) {
359 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
360 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
361 		return PCI_ERS_RESULT_CAN_RECOVER;
362 	}
363 
364 	/* Non-correctable Fatal errors */
365 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
366 	stop_pci_io(oct);
367 
368 	/* Always return a DISCONNECT. There is no support for recovery but only
369 	 * for a clean shutdown.
370 	 */
371 	return PCI_ERS_RESULT_DISCONNECT;
372 }
373 
374 /**
375  * \brief mmio handler
376  * @param pdev Pointer to PCI device
377  */
378 static pci_ers_result_t liquidio_pcie_mmio_enabled(
379 				struct pci_dev *pdev __attribute__((unused)))
380 {
381 	/* We should never hit this since we never ask for a reset for a Fatal
382 	 * Error. We always return DISCONNECT in io_error above.
383 	 * But play safe and return RECOVERED for now.
384 	 */
385 	return PCI_ERS_RESULT_RECOVERED;
386 }
387 
388 /**
389  * \brief called after the pci bus has been reset.
390  * @param pdev Pointer to PCI device
391  *
392  * Restart the card from scratch, as if from a cold-boot. Implementation
393  * resembles the first-half of the octeon_resume routine.
394  */
395 static pci_ers_result_t liquidio_pcie_slot_reset(
396 				struct pci_dev *pdev __attribute__((unused)))
397 {
398 	/* We should never hit this since we never ask for a reset for a Fatal
399 	 * Error. We always return DISCONNECT in io_error above.
400 	 * But play safe and return RECOVERED for now.
401 	 */
402 	return PCI_ERS_RESULT_RECOVERED;
403 }
404 
405 /**
406  * \brief called when traffic can start flowing again.
407  * @param pdev Pointer to PCI device
408  *
409  * This callback is called when the error recovery driver tells us that
410  * its OK to resume normal operation. Implementation resembles the
411  * second-half of the octeon_resume routine.
412  */
413 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
414 {
415 	/* Nothing to be done here. */
416 }
417 
418 #ifdef CONFIG_PM
419 /**
420  * \brief called when suspending
421  * @param pdev Pointer to PCI device
422  * @param state state to suspend to
423  */
424 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
425 			    pm_message_t state __attribute__((unused)))
426 {
427 	return 0;
428 }
429 
430 /**
431  * \brief called when resuming
432  * @param pdev Pointer to PCI device
433  */
434 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
435 {
436 	return 0;
437 }
438 #endif
439 
440 /* For PCI-E Advanced Error Recovery (AER) Interface */
441 static const struct pci_error_handlers liquidio_err_handler = {
442 	.error_detected = liquidio_pcie_error_detected,
443 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
444 	.slot_reset	= liquidio_pcie_slot_reset,
445 	.resume		= liquidio_pcie_resume,
446 };
447 
448 static const struct pci_device_id liquidio_pci_tbl[] = {
449 	{       /* 68xx */
450 		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
451 	},
452 	{       /* 66xx */
453 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
454 	},
455 	{       /* 23xx pf */
456 		PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
457 	},
458 	{
459 		0, 0, 0, 0, 0, 0, 0
460 	}
461 };
462 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
463 
464 static struct pci_driver liquidio_pci_driver = {
465 	.name		= "LiquidIO",
466 	.id_table	= liquidio_pci_tbl,
467 	.probe		= liquidio_probe,
468 	.remove		= liquidio_remove,
469 	.err_handler	= &liquidio_err_handler,    /* For AER */
470 
471 #ifdef CONFIG_PM
472 	.suspend	= liquidio_suspend,
473 	.resume		= liquidio_resume,
474 #endif
475 #ifdef CONFIG_PCI_IOV
476 	.sriov_configure = liquidio_enable_sriov,
477 #endif
478 };
479 
480 /**
481  * \brief register PCI driver
482  */
483 static int liquidio_init_pci(void)
484 {
485 	return pci_register_driver(&liquidio_pci_driver);
486 }
487 
488 /**
489  * \brief unregister PCI driver
490  */
491 static void liquidio_deinit_pci(void)
492 {
493 	pci_unregister_driver(&liquidio_pci_driver);
494 }
495 
496 /**
497  * \brief Check Tx queue status, and take appropriate action
498  * @param lio per-network private data
499  * @returns 0 if full, number of queues woken up otherwise
500  */
501 static inline int check_txq_status(struct lio *lio)
502 {
503 	int numqs = lio->netdev->real_num_tx_queues;
504 	int ret_val = 0;
505 	int q, iq;
506 
507 	/* check each sub-queue state */
508 	for (q = 0; q < numqs; q++) {
509 		iq = lio->linfo.txpciq[q %
510 			lio->oct_dev->num_iqs].s.q_no;
511 		if (octnet_iq_is_full(lio->oct_dev, iq))
512 			continue;
513 		if (__netif_subqueue_stopped(lio->netdev, q)) {
514 			netif_wake_subqueue(lio->netdev, q);
515 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
516 						  tx_restart, 1);
517 			ret_val++;
518 		}
519 	}
520 
521 	return ret_val;
522 }
523 
524 /**
525  * \brief Print link information
526  * @param netdev network device
527  */
528 static void print_link_info(struct net_device *netdev)
529 {
530 	struct lio *lio = GET_LIO(netdev);
531 
532 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
533 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
534 		struct oct_link_info *linfo = &lio->linfo;
535 
536 		if (linfo->link.s.link_up) {
537 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
538 				   linfo->link.s.speed,
539 				   (linfo->link.s.duplex) ? "Full" : "Half");
540 		} else {
541 			netif_info(lio, link, lio->netdev, "Link Down\n");
542 		}
543 	}
544 }
545 
546 /**
547  * \brief Routine to notify MTU change
548  * @param work work_struct data structure
549  */
550 static void octnet_link_status_change(struct work_struct *work)
551 {
552 	struct cavium_wk *wk = (struct cavium_wk *)work;
553 	struct lio *lio = (struct lio *)wk->ctxptr;
554 
555 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
556 	 * this API is invoked only when new max-MTU of the interface is
557 	 * less than current MTU.
558 	 */
559 	rtnl_lock();
560 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
561 	rtnl_unlock();
562 }
563 
564 /**
565  * \brief Sets up the mtu status change work
566  * @param netdev network device
567  */
568 static inline int setup_link_status_change_wq(struct net_device *netdev)
569 {
570 	struct lio *lio = GET_LIO(netdev);
571 	struct octeon_device *oct = lio->oct_dev;
572 
573 	lio->link_status_wq.wq = alloc_workqueue("link-status",
574 						 WQ_MEM_RECLAIM, 0);
575 	if (!lio->link_status_wq.wq) {
576 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
577 		return -1;
578 	}
579 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
580 			  octnet_link_status_change);
581 	lio->link_status_wq.wk.ctxptr = lio;
582 
583 	return 0;
584 }
585 
586 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
587 {
588 	struct lio *lio = GET_LIO(netdev);
589 
590 	if (lio->link_status_wq.wq) {
591 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
592 		destroy_workqueue(lio->link_status_wq.wq);
593 	}
594 }
595 
596 /**
597  * \brief Update link status
598  * @param netdev network device
599  * @param ls link status structure
600  *
601  * Called on receipt of a link status response from the core application to
602  * update each interface's link status.
603  */
604 static inline void update_link_status(struct net_device *netdev,
605 				      union oct_link_status *ls)
606 {
607 	struct lio *lio = GET_LIO(netdev);
608 	int changed = (lio->linfo.link.u64 != ls->u64);
609 	int current_max_mtu = lio->linfo.link.s.mtu;
610 	struct octeon_device *oct = lio->oct_dev;
611 
612 	dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
613 		__func__, lio->linfo.link.u64, ls->u64);
614 	lio->linfo.link.u64 = ls->u64;
615 
616 	if ((lio->intf_open) && (changed)) {
617 		print_link_info(netdev);
618 		lio->link_changes++;
619 
620 		if (lio->linfo.link.s.link_up) {
621 			dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
622 			netif_carrier_on(netdev);
623 			wake_txqs(netdev);
624 		} else {
625 			dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
626 			netif_carrier_off(netdev);
627 			stop_txqs(netdev);
628 		}
629 		if (lio->linfo.link.s.mtu != current_max_mtu) {
630 			netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
631 				   current_max_mtu, lio->linfo.link.s.mtu);
632 			netdev->max_mtu = lio->linfo.link.s.mtu;
633 		}
634 		if (lio->linfo.link.s.mtu < netdev->mtu) {
635 			dev_warn(&oct->pci_dev->dev,
636 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
637 				     netdev->mtu, lio->linfo.link.s.mtu);
638 			queue_delayed_work(lio->link_status_wq.wq,
639 					   &lio->link_status_wq.wk.work, 0);
640 		}
641 	}
642 }
643 
644 /**
645  * lio_sync_octeon_time_cb - callback that is invoked when soft command
646  * sent by lio_sync_octeon_time() has completed successfully or failed
647  *
648  * @oct - octeon device structure
649  * @status - indicates success or failure
650  * @buf - pointer to the command that was sent to firmware
651  **/
652 static void lio_sync_octeon_time_cb(struct octeon_device *oct,
653 				    u32 status, void *buf)
654 {
655 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
656 
657 	if (status)
658 		dev_err(&oct->pci_dev->dev,
659 			"Failed to sync time to octeon; error=%d\n", status);
660 
661 	octeon_free_soft_command(oct, sc);
662 }
663 
664 /**
665  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
666  * firmware will correct it's time, in case there is a time skew
667  *
668  * @work: work scheduled to send time update to octeon firmware
669  **/
670 static void lio_sync_octeon_time(struct work_struct *work)
671 {
672 	struct cavium_wk *wk = (struct cavium_wk *)work;
673 	struct lio *lio = (struct lio *)wk->ctxptr;
674 	struct octeon_device *oct = lio->oct_dev;
675 	struct octeon_soft_command *sc;
676 	struct timespec64 ts;
677 	struct lio_time *lt;
678 	int ret;
679 
680 	sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
681 	if (!sc) {
682 		dev_err(&oct->pci_dev->dev,
683 			"Failed to sync time to octeon: soft command allocation failed\n");
684 		return;
685 	}
686 
687 	lt = (struct lio_time *)sc->virtdptr;
688 
689 	/* Get time of the day */
690 	getnstimeofday64(&ts);
691 	lt->sec = ts.tv_sec;
692 	lt->nsec = ts.tv_nsec;
693 	octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
694 
695 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
696 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
697 				    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
698 
699 	sc->callback = lio_sync_octeon_time_cb;
700 	sc->callback_arg = sc;
701 	sc->wait_time = 1000;
702 
703 	ret = octeon_send_soft_command(oct, sc);
704 	if (ret == IQ_SEND_FAILED) {
705 		dev_err(&oct->pci_dev->dev,
706 			"Failed to sync time to octeon: failed to send soft command\n");
707 		octeon_free_soft_command(oct, sc);
708 	}
709 
710 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
711 			   &lio->sync_octeon_time_wq.wk.work,
712 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
713 }
714 
715 /**
716  * setup_sync_octeon_time_wq - Sets up the work to periodically update
717  * local time to octeon firmware
718  *
719  * @netdev - network device which should send time update to firmware
720  **/
721 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
722 {
723 	struct lio *lio = GET_LIO(netdev);
724 	struct octeon_device *oct = lio->oct_dev;
725 
726 	lio->sync_octeon_time_wq.wq =
727 		alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
728 	if (!lio->sync_octeon_time_wq.wq) {
729 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
730 		return -1;
731 	}
732 	INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
733 			  lio_sync_octeon_time);
734 	lio->sync_octeon_time_wq.wk.ctxptr = lio;
735 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
736 			   &lio->sync_octeon_time_wq.wk.work,
737 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
738 
739 	return 0;
740 }
741 
742 /**
743  * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
744  * to periodically update local time to octeon firmware
745  *
746  * @netdev - network device which should send time update to firmware
747  **/
748 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
749 {
750 	struct lio *lio = GET_LIO(netdev);
751 	struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
752 
753 	if (time_wq->wq) {
754 		cancel_delayed_work_sync(&time_wq->wk.work);
755 		destroy_workqueue(time_wq->wq);
756 	}
757 }
758 
759 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
760 {
761 	struct octeon_device *other_oct;
762 
763 	other_oct = lio_get_device(oct->octeon_id + 1);
764 
765 	if (other_oct && other_oct->pci_dev) {
766 		int oct_busnum, other_oct_busnum;
767 
768 		oct_busnum = oct->pci_dev->bus->number;
769 		other_oct_busnum = other_oct->pci_dev->bus->number;
770 
771 		if (oct_busnum == other_oct_busnum) {
772 			int oct_slot, other_oct_slot;
773 
774 			oct_slot = PCI_SLOT(oct->pci_dev->devfn);
775 			other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
776 
777 			if (oct_slot == other_oct_slot)
778 				return other_oct;
779 		}
780 	}
781 
782 	return NULL;
783 }
784 
785 static void disable_all_vf_links(struct octeon_device *oct)
786 {
787 	struct net_device *netdev;
788 	int max_vfs, vf, i;
789 
790 	if (!oct)
791 		return;
792 
793 	max_vfs = oct->sriov_info.max_vfs;
794 
795 	for (i = 0; i < oct->ifcount; i++) {
796 		netdev = oct->props[i].netdev;
797 		if (!netdev)
798 			continue;
799 
800 		for (vf = 0; vf < max_vfs; vf++)
801 			liquidio_set_vf_link_state(netdev, vf,
802 						   IFLA_VF_LINK_STATE_DISABLE);
803 	}
804 }
805 
806 static int liquidio_watchdog(void *param)
807 {
808 	bool err_msg_was_printed[LIO_MAX_CORES];
809 	u16 mask_of_crashed_or_stuck_cores = 0;
810 	bool all_vf_links_are_disabled = false;
811 	struct octeon_device *oct = param;
812 	struct octeon_device *other_oct;
813 #ifdef CONFIG_MODULE_UNLOAD
814 	long refcount, vfs_referencing_pf;
815 	u64 vfs_mask1, vfs_mask2;
816 #endif
817 	int core;
818 
819 	memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
820 
821 	while (!kthread_should_stop()) {
822 		/* sleep for a couple of seconds so that we don't hog the CPU */
823 		set_current_state(TASK_INTERRUPTIBLE);
824 		schedule_timeout(msecs_to_jiffies(2000));
825 
826 		mask_of_crashed_or_stuck_cores =
827 		    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
828 
829 		if (!mask_of_crashed_or_stuck_cores)
830 			continue;
831 
832 		WRITE_ONCE(oct->cores_crashed, true);
833 		other_oct = get_other_octeon_device(oct);
834 		if (other_oct)
835 			WRITE_ONCE(other_oct->cores_crashed, true);
836 
837 		for (core = 0; core < LIO_MAX_CORES; core++) {
838 			bool core_crashed_or_got_stuck;
839 
840 			core_crashed_or_got_stuck =
841 						(mask_of_crashed_or_stuck_cores
842 						 >> core) & 1;
843 
844 			if (core_crashed_or_got_stuck &&
845 			    !err_msg_was_printed[core]) {
846 				dev_err(&oct->pci_dev->dev,
847 					"ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
848 					core);
849 				err_msg_was_printed[core] = true;
850 			}
851 		}
852 
853 		if (all_vf_links_are_disabled)
854 			continue;
855 
856 		disable_all_vf_links(oct);
857 		disable_all_vf_links(other_oct);
858 		all_vf_links_are_disabled = true;
859 
860 #ifdef CONFIG_MODULE_UNLOAD
861 		vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
862 		vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
863 
864 		vfs_referencing_pf  = hweight64(vfs_mask1);
865 		vfs_referencing_pf += hweight64(vfs_mask2);
866 
867 		refcount = module_refcount(THIS_MODULE);
868 		if (refcount >= vfs_referencing_pf) {
869 			while (vfs_referencing_pf) {
870 				module_put(THIS_MODULE);
871 				vfs_referencing_pf--;
872 			}
873 		}
874 #endif
875 	}
876 
877 	return 0;
878 }
879 
880 /**
881  * \brief PCI probe handler
882  * @param pdev PCI device structure
883  * @param ent unused
884  */
885 static int
886 liquidio_probe(struct pci_dev *pdev,
887 	       const struct pci_device_id *ent __attribute__((unused)))
888 {
889 	struct octeon_device *oct_dev = NULL;
890 	struct handshake *hs;
891 
892 	oct_dev = octeon_allocate_device(pdev->device,
893 					 sizeof(struct octeon_device_priv));
894 	if (!oct_dev) {
895 		dev_err(&pdev->dev, "Unable to allocate device\n");
896 		return -ENOMEM;
897 	}
898 
899 	if (pdev->device == OCTEON_CN23XX_PF_VID)
900 		oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
901 
902 	/* Enable PTP for 6XXX Device */
903 	if (((pdev->device == OCTEON_CN66XX) ||
904 	     (pdev->device == OCTEON_CN68XX)))
905 		oct_dev->ptp_enable = true;
906 	else
907 		oct_dev->ptp_enable = false;
908 
909 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
910 		 (u32)pdev->vendor, (u32)pdev->device);
911 
912 	/* Assign octeon_device for this device to the private data area. */
913 	pci_set_drvdata(pdev, oct_dev);
914 
915 	/* set linux specific device pointer */
916 	oct_dev->pci_dev = (void *)pdev;
917 
918 	oct_dev->subsystem_id = pdev->subsystem_vendor |
919 		(pdev->subsystem_device << 16);
920 
921 	hs = &handshake[oct_dev->octeon_id];
922 	init_completion(&hs->init);
923 	init_completion(&hs->started);
924 	hs->pci_dev = pdev;
925 
926 	if (oct_dev->octeon_id == 0)
927 		/* first LiquidIO NIC is detected */
928 		complete(&first_stage);
929 
930 	if (octeon_device_init(oct_dev)) {
931 		complete(&hs->init);
932 		liquidio_remove(pdev);
933 		return -ENOMEM;
934 	}
935 
936 	if (OCTEON_CN23XX_PF(oct_dev)) {
937 		u8 bus, device, function;
938 
939 		if (atomic_read(oct_dev->adapter_refcount) == 1) {
940 			/* Each NIC gets one watchdog kernel thread.  The first
941 			 * PF (of each NIC) that gets pci_driver->probe()'d
942 			 * creates that thread.
943 			 */
944 			bus = pdev->bus->number;
945 			device = PCI_SLOT(pdev->devfn);
946 			function = PCI_FUNC(pdev->devfn);
947 			oct_dev->watchdog_task = kthread_create(
948 			    liquidio_watchdog, oct_dev,
949 			    "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
950 			if (!IS_ERR(oct_dev->watchdog_task)) {
951 				wake_up_process(oct_dev->watchdog_task);
952 			} else {
953 				oct_dev->watchdog_task = NULL;
954 				dev_err(&oct_dev->pci_dev->dev,
955 					"failed to create kernel_thread\n");
956 				liquidio_remove(pdev);
957 				return -1;
958 			}
959 		}
960 	}
961 
962 	oct_dev->rx_pause = 1;
963 	oct_dev->tx_pause = 1;
964 
965 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
966 
967 	return 0;
968 }
969 
970 static bool fw_type_is_auto(void)
971 {
972 	return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
973 		       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
974 }
975 
976 /**
977  * \brief PCI FLR for each Octeon device.
978  * @param oct octeon device
979  */
980 static void octeon_pci_flr(struct octeon_device *oct)
981 {
982 	int rc;
983 
984 	pci_save_state(oct->pci_dev);
985 
986 	pci_cfg_access_lock(oct->pci_dev);
987 
988 	/* Quiesce the device completely */
989 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
990 			      PCI_COMMAND_INTX_DISABLE);
991 
992 	rc = __pci_reset_function_locked(oct->pci_dev);
993 
994 	if (rc != 0)
995 		dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
996 			rc, oct->pf_num);
997 
998 	pci_cfg_access_unlock(oct->pci_dev);
999 
1000 	pci_restore_state(oct->pci_dev);
1001 }
1002 
1003 /**
1004  *\brief Destroy resources associated with octeon device
1005  * @param pdev PCI device structure
1006  * @param ent unused
1007  */
1008 static void octeon_destroy_resources(struct octeon_device *oct)
1009 {
1010 	int i, refcount;
1011 	struct msix_entry *msix_entries;
1012 	struct octeon_device_priv *oct_priv =
1013 		(struct octeon_device_priv *)oct->priv;
1014 
1015 	struct handshake *hs;
1016 
1017 	switch (atomic_read(&oct->status)) {
1018 	case OCT_DEV_RUNNING:
1019 	case OCT_DEV_CORE_OK:
1020 
1021 		/* No more instructions will be forwarded. */
1022 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
1023 
1024 		oct->app_mode = CVM_DRV_INVALID_APP;
1025 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1026 			lio_get_state_string(&oct->status));
1027 
1028 		schedule_timeout_uninterruptible(HZ / 10);
1029 
1030 		/* fallthrough */
1031 	case OCT_DEV_HOST_OK:
1032 
1033 		/* fallthrough */
1034 	case OCT_DEV_CONSOLE_INIT_DONE:
1035 		/* Remove any consoles */
1036 		octeon_remove_consoles(oct);
1037 
1038 		/* fallthrough */
1039 	case OCT_DEV_IO_QUEUES_DONE:
1040 		if (wait_for_pending_requests(oct))
1041 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1042 
1043 		if (lio_wait_for_instr_fetch(oct))
1044 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1045 
1046 		/* Disable the input and output queues now. No more packets will
1047 		 * arrive from Octeon, but we should wait for all packet
1048 		 * processing to finish.
1049 		 */
1050 		oct->fn_list.disable_io_queues(oct);
1051 
1052 		if (lio_wait_for_oq_pkts(oct))
1053 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1054 
1055 	/* fallthrough */
1056 	case OCT_DEV_INTR_SET_DONE:
1057 		/* Disable interrupts  */
1058 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1059 
1060 		if (oct->msix_on) {
1061 			msix_entries = (struct msix_entry *)oct->msix_entries;
1062 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1063 				if (oct->ioq_vector[i].vector) {
1064 					/* clear the affinity_cpumask */
1065 					irq_set_affinity_hint(
1066 							msix_entries[i].vector,
1067 							NULL);
1068 					free_irq(msix_entries[i].vector,
1069 						 &oct->ioq_vector[i]);
1070 					oct->ioq_vector[i].vector = 0;
1071 				}
1072 			}
1073 			/* non-iov vector's argument is oct struct */
1074 			free_irq(msix_entries[i].vector, oct);
1075 
1076 			pci_disable_msix(oct->pci_dev);
1077 			kfree(oct->msix_entries);
1078 			oct->msix_entries = NULL;
1079 		} else {
1080 			/* Release the interrupt line */
1081 			free_irq(oct->pci_dev->irq, oct);
1082 
1083 			if (oct->flags & LIO_FLAG_MSI_ENABLED)
1084 				pci_disable_msi(oct->pci_dev);
1085 		}
1086 
1087 		kfree(oct->irq_name_storage);
1088 		oct->irq_name_storage = NULL;
1089 
1090 	/* fallthrough */
1091 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1092 		if (OCTEON_CN23XX_PF(oct))
1093 			octeon_free_ioq_vector(oct);
1094 
1095 	/* fallthrough */
1096 	case OCT_DEV_MBOX_SETUP_DONE:
1097 		if (OCTEON_CN23XX_PF(oct))
1098 			oct->fn_list.free_mbox(oct);
1099 
1100 	/* fallthrough */
1101 	case OCT_DEV_IN_RESET:
1102 	case OCT_DEV_DROQ_INIT_DONE:
1103 		/* Wait for any pending operations */
1104 		mdelay(100);
1105 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1106 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
1107 				continue;
1108 			octeon_delete_droq(oct, i);
1109 		}
1110 
1111 		/* Force any pending handshakes to complete */
1112 		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1113 			hs = &handshake[i];
1114 
1115 			if (hs->pci_dev) {
1116 				handshake[oct->octeon_id].init_ok = 0;
1117 				complete(&handshake[oct->octeon_id].init);
1118 				handshake[oct->octeon_id].started_ok = 0;
1119 				complete(&handshake[oct->octeon_id].started);
1120 			}
1121 		}
1122 
1123 		/* fallthrough */
1124 	case OCT_DEV_RESP_LIST_INIT_DONE:
1125 		octeon_delete_response_list(oct);
1126 
1127 		/* fallthrough */
1128 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1129 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1130 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1131 				continue;
1132 			octeon_delete_instr_queue(oct, i);
1133 		}
1134 #ifdef CONFIG_PCI_IOV
1135 		if (oct->sriov_info.sriov_enabled)
1136 			pci_disable_sriov(oct->pci_dev);
1137 #endif
1138 		/* fallthrough */
1139 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1140 		octeon_free_sc_buffer_pool(oct);
1141 
1142 		/* fallthrough */
1143 	case OCT_DEV_DISPATCH_INIT_DONE:
1144 		octeon_delete_dispatch_list(oct);
1145 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
1146 
1147 		/* fallthrough */
1148 	case OCT_DEV_PCI_MAP_DONE:
1149 		refcount = octeon_deregister_device(oct);
1150 
1151 		/* Soft reset the octeon device before exiting.
1152 		 * However, if fw was loaded from card (i.e. autoboot),
1153 		 * perform an FLR instead.
1154 		 * Implementation note: only soft-reset the device
1155 		 * if it is a CN6XXX OR the LAST CN23XX device.
1156 		 */
1157 		if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1158 			octeon_pci_flr(oct);
1159 		else if (OCTEON_CN6XXX(oct) || !refcount)
1160 			oct->fn_list.soft_reset(oct);
1161 
1162 		octeon_unmap_pci_barx(oct, 0);
1163 		octeon_unmap_pci_barx(oct, 1);
1164 
1165 		/* fallthrough */
1166 	case OCT_DEV_PCI_ENABLE_DONE:
1167 		pci_clear_master(oct->pci_dev);
1168 		/* Disable the device, releasing the PCI INT */
1169 		pci_disable_device(oct->pci_dev);
1170 
1171 		/* fallthrough */
1172 	case OCT_DEV_BEGIN_STATE:
1173 		/* Nothing to be done here either */
1174 		break;
1175 	}                       /* end switch (oct->status) */
1176 
1177 	tasklet_kill(&oct_priv->droq_tasklet);
1178 }
1179 
1180 /**
1181  * \brief Callback for rx ctrl
1182  * @param status status of request
1183  * @param buf pointer to resp structure
1184  */
1185 static void rx_ctl_callback(struct octeon_device *oct,
1186 			    u32 status,
1187 			    void *buf)
1188 {
1189 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1190 	struct liquidio_rx_ctl_context *ctx;
1191 
1192 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1193 
1194 	oct = lio_get_device(ctx->octeon_id);
1195 	if (status)
1196 		dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1197 			CVM_CAST64(status));
1198 	WRITE_ONCE(ctx->cond, 1);
1199 
1200 	/* This barrier is required to be sure that the response has been
1201 	 * written fully before waking up the handler
1202 	 */
1203 	wmb();
1204 
1205 	wake_up_interruptible(&ctx->wc);
1206 }
1207 
1208 /**
1209  * \brief Send Rx control command
1210  * @param lio per-network private data
1211  * @param start_stop whether to start or stop
1212  */
1213 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1214 {
1215 	struct octeon_soft_command *sc;
1216 	struct liquidio_rx_ctl_context *ctx;
1217 	union octnet_cmd *ncmd;
1218 	int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1219 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1220 	int retval;
1221 
1222 	if (oct->props[lio->ifidx].rx_on == start_stop)
1223 		return;
1224 
1225 	sc = (struct octeon_soft_command *)
1226 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1227 					  16, ctx_size);
1228 
1229 	ncmd = (union octnet_cmd *)sc->virtdptr;
1230 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1231 
1232 	WRITE_ONCE(ctx->cond, 0);
1233 	ctx->octeon_id = lio_get_device_id(oct);
1234 	init_waitqueue_head(&ctx->wc);
1235 
1236 	ncmd->u64 = 0;
1237 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1238 	ncmd->s.param1 = start_stop;
1239 
1240 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1241 
1242 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1243 
1244 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1245 				    OPCODE_NIC_CMD, 0, 0, 0);
1246 
1247 	sc->callback = rx_ctl_callback;
1248 	sc->callback_arg = sc;
1249 	sc->wait_time = 5000;
1250 
1251 	retval = octeon_send_soft_command(oct, sc);
1252 	if (retval == IQ_SEND_FAILED) {
1253 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1254 	} else {
1255 		/* Sleep on a wait queue till the cond flag indicates that the
1256 		 * response arrived or timed-out.
1257 		 */
1258 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1259 			return;
1260 		oct->props[lio->ifidx].rx_on = start_stop;
1261 	}
1262 
1263 	octeon_free_soft_command(oct, sc);
1264 }
1265 
1266 /**
1267  * \brief Destroy NIC device interface
1268  * @param oct octeon device
1269  * @param ifidx which interface to destroy
1270  *
1271  * Cleanup associated with each interface for an Octeon device  when NIC
1272  * module is being unloaded or if initialization fails during load.
1273  */
1274 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1275 {
1276 	struct net_device *netdev = oct->props[ifidx].netdev;
1277 	struct lio *lio;
1278 	struct napi_struct *napi, *n;
1279 
1280 	if (!netdev) {
1281 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1282 			__func__, ifidx);
1283 		return;
1284 	}
1285 
1286 	lio = GET_LIO(netdev);
1287 
1288 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1289 
1290 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1291 		liquidio_stop(netdev);
1292 
1293 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1294 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1295 			napi_disable(napi);
1296 
1297 		oct->props[lio->ifidx].napi_enabled = 0;
1298 
1299 		if (OCTEON_CN23XX_PF(oct))
1300 			oct->droq[0]->ops.poll_mode = 0;
1301 	}
1302 
1303 	/* Delete NAPI */
1304 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1305 		netif_napi_del(napi);
1306 
1307 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1308 		unregister_netdev(netdev);
1309 
1310 	cleanup_sync_octeon_time_wq(netdev);
1311 	cleanup_link_status_change_wq(netdev);
1312 
1313 	cleanup_rx_oom_poll_fn(netdev);
1314 
1315 	lio_delete_glists(lio);
1316 
1317 	free_netdev(netdev);
1318 
1319 	oct->props[ifidx].gmxport = -1;
1320 
1321 	oct->props[ifidx].netdev = NULL;
1322 }
1323 
1324 /**
1325  * \brief Stop complete NIC functionality
1326  * @param oct octeon device
1327  */
1328 static int liquidio_stop_nic_module(struct octeon_device *oct)
1329 {
1330 	int i, j;
1331 	struct lio *lio;
1332 
1333 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1334 	if (!oct->ifcount) {
1335 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1336 		return 1;
1337 	}
1338 
1339 	spin_lock_bh(&oct->cmd_resp_wqlock);
1340 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1341 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1342 
1343 	lio_vf_rep_destroy(oct);
1344 
1345 	for (i = 0; i < oct->ifcount; i++) {
1346 		lio = GET_LIO(oct->props[i].netdev);
1347 		for (j = 0; j < oct->num_oqs; j++)
1348 			octeon_unregister_droq_ops(oct,
1349 						   lio->linfo.rxpciq[j].s.q_no);
1350 	}
1351 
1352 	for (i = 0; i < oct->ifcount; i++)
1353 		liquidio_destroy_nic_device(oct, i);
1354 
1355 	if (oct->devlink) {
1356 		devlink_unregister(oct->devlink);
1357 		devlink_free(oct->devlink);
1358 		oct->devlink = NULL;
1359 	}
1360 
1361 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1362 	return 0;
1363 }
1364 
1365 /**
1366  * \brief Cleans up resources at unload time
1367  * @param pdev PCI device structure
1368  */
1369 static void liquidio_remove(struct pci_dev *pdev)
1370 {
1371 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1372 
1373 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1374 
1375 	if (oct_dev->watchdog_task)
1376 		kthread_stop(oct_dev->watchdog_task);
1377 
1378 	if (!oct_dev->octeon_id &&
1379 	    oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1380 		lio_vf_rep_modexit();
1381 
1382 	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1383 		liquidio_stop_nic_module(oct_dev);
1384 
1385 	/* Reset the octeon device and cleanup all memory allocated for
1386 	 * the octeon device by driver.
1387 	 */
1388 	octeon_destroy_resources(oct_dev);
1389 
1390 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1391 
1392 	/* This octeon device has been removed. Update the global
1393 	 * data structure to reflect this. Free the device structure.
1394 	 */
1395 	octeon_free_device_mem(oct_dev);
1396 }
1397 
1398 /**
1399  * \brief Identify the Octeon device and to map the BAR address space
1400  * @param oct octeon device
1401  */
1402 static int octeon_chip_specific_setup(struct octeon_device *oct)
1403 {
1404 	u32 dev_id, rev_id;
1405 	int ret = 1;
1406 	char *s;
1407 
1408 	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1409 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1410 	oct->rev_id = rev_id & 0xff;
1411 
1412 	switch (dev_id) {
1413 	case OCTEON_CN68XX_PCIID:
1414 		oct->chip_id = OCTEON_CN68XX;
1415 		ret = lio_setup_cn68xx_octeon_device(oct);
1416 		s = "CN68XX";
1417 		break;
1418 
1419 	case OCTEON_CN66XX_PCIID:
1420 		oct->chip_id = OCTEON_CN66XX;
1421 		ret = lio_setup_cn66xx_octeon_device(oct);
1422 		s = "CN66XX";
1423 		break;
1424 
1425 	case OCTEON_CN23XX_PCIID_PF:
1426 		oct->chip_id = OCTEON_CN23XX_PF_VID;
1427 		ret = setup_cn23xx_octeon_pf_device(oct);
1428 		if (ret)
1429 			break;
1430 #ifdef CONFIG_PCI_IOV
1431 		if (!ret)
1432 			pci_sriov_set_totalvfs(oct->pci_dev,
1433 					       oct->sriov_info.max_vfs);
1434 #endif
1435 		s = "CN23XX";
1436 		break;
1437 
1438 	default:
1439 		s = "?";
1440 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1441 			dev_id);
1442 	}
1443 
1444 	if (!ret)
1445 		dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1446 			 OCTEON_MAJOR_REV(oct),
1447 			 OCTEON_MINOR_REV(oct),
1448 			 octeon_get_conf(oct)->card_name,
1449 			 LIQUIDIO_VERSION);
1450 
1451 	return ret;
1452 }
1453 
1454 /**
1455  * \brief PCI initialization for each Octeon device.
1456  * @param oct octeon device
1457  */
1458 static int octeon_pci_os_setup(struct octeon_device *oct)
1459 {
1460 	/* setup PCI stuff first */
1461 	if (pci_enable_device(oct->pci_dev)) {
1462 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1463 		return 1;
1464 	}
1465 
1466 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1467 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1468 		pci_disable_device(oct->pci_dev);
1469 		return 1;
1470 	}
1471 
1472 	/* Enable PCI DMA Master. */
1473 	pci_set_master(oct->pci_dev);
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * \brief Unmap and free network buffer
1480  * @param buf buffer
1481  */
1482 static void free_netbuf(void *buf)
1483 {
1484 	struct sk_buff *skb;
1485 	struct octnet_buf_free_info *finfo;
1486 	struct lio *lio;
1487 
1488 	finfo = (struct octnet_buf_free_info *)buf;
1489 	skb = finfo->skb;
1490 	lio = finfo->lio;
1491 
1492 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1493 			 DMA_TO_DEVICE);
1494 
1495 	tx_buffer_free(skb);
1496 }
1497 
1498 /**
1499  * \brief Unmap and free gather buffer
1500  * @param buf buffer
1501  */
1502 static void free_netsgbuf(void *buf)
1503 {
1504 	struct octnet_buf_free_info *finfo;
1505 	struct sk_buff *skb;
1506 	struct lio *lio;
1507 	struct octnic_gather *g;
1508 	int i, frags, iq;
1509 
1510 	finfo = (struct octnet_buf_free_info *)buf;
1511 	skb = finfo->skb;
1512 	lio = finfo->lio;
1513 	g = finfo->g;
1514 	frags = skb_shinfo(skb)->nr_frags;
1515 
1516 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1517 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1518 			 DMA_TO_DEVICE);
1519 
1520 	i = 1;
1521 	while (frags--) {
1522 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1523 
1524 		pci_unmap_page((lio->oct_dev)->pci_dev,
1525 			       g->sg[(i >> 2)].ptr[(i & 3)],
1526 			       frag->size, DMA_TO_DEVICE);
1527 		i++;
1528 	}
1529 
1530 	iq = skb_iq(lio->oct_dev, skb);
1531 	spin_lock(&lio->glist_lock[iq]);
1532 	list_add_tail(&g->list, &lio->glist[iq]);
1533 	spin_unlock(&lio->glist_lock[iq]);
1534 
1535 	tx_buffer_free(skb);
1536 }
1537 
1538 /**
1539  * \brief Unmap and free gather buffer with response
1540  * @param buf buffer
1541  */
1542 static void free_netsgbuf_with_resp(void *buf)
1543 {
1544 	struct octeon_soft_command *sc;
1545 	struct octnet_buf_free_info *finfo;
1546 	struct sk_buff *skb;
1547 	struct lio *lio;
1548 	struct octnic_gather *g;
1549 	int i, frags, iq;
1550 
1551 	sc = (struct octeon_soft_command *)buf;
1552 	skb = (struct sk_buff *)sc->callback_arg;
1553 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1554 
1555 	lio = finfo->lio;
1556 	g = finfo->g;
1557 	frags = skb_shinfo(skb)->nr_frags;
1558 
1559 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1560 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1561 			 DMA_TO_DEVICE);
1562 
1563 	i = 1;
1564 	while (frags--) {
1565 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1566 
1567 		pci_unmap_page((lio->oct_dev)->pci_dev,
1568 			       g->sg[(i >> 2)].ptr[(i & 3)],
1569 			       frag->size, DMA_TO_DEVICE);
1570 		i++;
1571 	}
1572 
1573 	iq = skb_iq(lio->oct_dev, skb);
1574 
1575 	spin_lock(&lio->glist_lock[iq]);
1576 	list_add_tail(&g->list, &lio->glist[iq]);
1577 	spin_unlock(&lio->glist_lock[iq]);
1578 
1579 	/* Don't free the skb yet */
1580 }
1581 
1582 /**
1583  * \brief Adjust ptp frequency
1584  * @param ptp PTP clock info
1585  * @param ppb how much to adjust by, in parts-per-billion
1586  */
1587 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1588 {
1589 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1590 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1591 	u64 comp, delta;
1592 	unsigned long flags;
1593 	bool neg_adj = false;
1594 
1595 	if (ppb < 0) {
1596 		neg_adj = true;
1597 		ppb = -ppb;
1598 	}
1599 
1600 	/* The hardware adds the clock compensation value to the
1601 	 * PTP clock on every coprocessor clock cycle, so we
1602 	 * compute the delta in terms of coprocessor clocks.
1603 	 */
1604 	delta = (u64)ppb << 32;
1605 	do_div(delta, oct->coproc_clock_rate);
1606 
1607 	spin_lock_irqsave(&lio->ptp_lock, flags);
1608 	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1609 	if (neg_adj)
1610 		comp -= delta;
1611 	else
1612 		comp += delta;
1613 	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1614 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1615 
1616 	return 0;
1617 }
1618 
1619 /**
1620  * \brief Adjust ptp time
1621  * @param ptp PTP clock info
1622  * @param delta how much to adjust by, in nanosecs
1623  */
1624 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1625 {
1626 	unsigned long flags;
1627 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1628 
1629 	spin_lock_irqsave(&lio->ptp_lock, flags);
1630 	lio->ptp_adjust += delta;
1631 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1632 
1633 	return 0;
1634 }
1635 
1636 /**
1637  * \brief Get hardware clock time, including any adjustment
1638  * @param ptp PTP clock info
1639  * @param ts timespec
1640  */
1641 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1642 				struct timespec64 *ts)
1643 {
1644 	u64 ns;
1645 	unsigned long flags;
1646 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1647 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1648 
1649 	spin_lock_irqsave(&lio->ptp_lock, flags);
1650 	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1651 	ns += lio->ptp_adjust;
1652 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1653 
1654 	*ts = ns_to_timespec64(ns);
1655 
1656 	return 0;
1657 }
1658 
1659 /**
1660  * \brief Set hardware clock time. Reset adjustment
1661  * @param ptp PTP clock info
1662  * @param ts timespec
1663  */
1664 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1665 				const struct timespec64 *ts)
1666 {
1667 	u64 ns;
1668 	unsigned long flags;
1669 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1670 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1671 
1672 	ns = timespec64_to_ns(ts);
1673 
1674 	spin_lock_irqsave(&lio->ptp_lock, flags);
1675 	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1676 	lio->ptp_adjust = 0;
1677 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1678 
1679 	return 0;
1680 }
1681 
1682 /**
1683  * \brief Check if PTP is enabled
1684  * @param ptp PTP clock info
1685  * @param rq request
1686  * @param on is it on
1687  */
1688 static int
1689 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1690 		    struct ptp_clock_request *rq __attribute__((unused)),
1691 		    int on __attribute__((unused)))
1692 {
1693 	return -EOPNOTSUPP;
1694 }
1695 
1696 /**
1697  * \brief Open PTP clock source
1698  * @param netdev network device
1699  */
1700 static void oct_ptp_open(struct net_device *netdev)
1701 {
1702 	struct lio *lio = GET_LIO(netdev);
1703 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1704 
1705 	spin_lock_init(&lio->ptp_lock);
1706 
1707 	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1708 	lio->ptp_info.owner = THIS_MODULE;
1709 	lio->ptp_info.max_adj = 250000000;
1710 	lio->ptp_info.n_alarm = 0;
1711 	lio->ptp_info.n_ext_ts = 0;
1712 	lio->ptp_info.n_per_out = 0;
1713 	lio->ptp_info.pps = 0;
1714 	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1715 	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1716 	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1717 	lio->ptp_info.settime64 = liquidio_ptp_settime;
1718 	lio->ptp_info.enable = liquidio_ptp_enable;
1719 
1720 	lio->ptp_adjust = 0;
1721 
1722 	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1723 					     &oct->pci_dev->dev);
1724 
1725 	if (IS_ERR(lio->ptp_clock))
1726 		lio->ptp_clock = NULL;
1727 }
1728 
1729 /**
1730  * \brief Init PTP clock
1731  * @param oct octeon device
1732  */
1733 static void liquidio_ptp_init(struct octeon_device *oct)
1734 {
1735 	u64 clock_comp, cfg;
1736 
1737 	clock_comp = (u64)NSEC_PER_SEC << 32;
1738 	do_div(clock_comp, oct->coproc_clock_rate);
1739 	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1740 
1741 	/* Enable */
1742 	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1743 	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1744 }
1745 
1746 /**
1747  * \brief Load firmware to device
1748  * @param oct octeon device
1749  *
1750  * Maps device to firmware filename, requests firmware, and downloads it
1751  */
1752 static int load_firmware(struct octeon_device *oct)
1753 {
1754 	int ret = 0;
1755 	const struct firmware *fw;
1756 	char fw_name[LIO_MAX_FW_FILENAME_LEN];
1757 	char *tmp_fw_type;
1758 
1759 	if (fw_type_is_auto()) {
1760 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1761 		strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1762 	} else {
1763 		tmp_fw_type = fw_type;
1764 	}
1765 
1766 	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1767 		octeon_get_conf(oct)->card_name, tmp_fw_type,
1768 		LIO_FW_NAME_SUFFIX);
1769 
1770 	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1771 	if (ret) {
1772 		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1773 			fw_name);
1774 		release_firmware(fw);
1775 		return ret;
1776 	}
1777 
1778 	ret = octeon_download_firmware(oct, fw->data, fw->size);
1779 
1780 	release_firmware(fw);
1781 
1782 	return ret;
1783 }
1784 
1785 /**
1786  * \brief Poll routine for checking transmit queue status
1787  * @param work work_struct data structure
1788  */
1789 static void octnet_poll_check_txq_status(struct work_struct *work)
1790 {
1791 	struct cavium_wk *wk = (struct cavium_wk *)work;
1792 	struct lio *lio = (struct lio *)wk->ctxptr;
1793 
1794 	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1795 		return;
1796 
1797 	check_txq_status(lio);
1798 	queue_delayed_work(lio->txq_status_wq.wq,
1799 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1800 }
1801 
1802 /**
1803  * \brief Sets up the txq poll check
1804  * @param netdev network device
1805  */
1806 static inline int setup_tx_poll_fn(struct net_device *netdev)
1807 {
1808 	struct lio *lio = GET_LIO(netdev);
1809 	struct octeon_device *oct = lio->oct_dev;
1810 
1811 	lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1812 						WQ_MEM_RECLAIM, 0);
1813 	if (!lio->txq_status_wq.wq) {
1814 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1815 		return -1;
1816 	}
1817 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1818 			  octnet_poll_check_txq_status);
1819 	lio->txq_status_wq.wk.ctxptr = lio;
1820 	queue_delayed_work(lio->txq_status_wq.wq,
1821 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1822 	return 0;
1823 }
1824 
1825 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1826 {
1827 	struct lio *lio = GET_LIO(netdev);
1828 
1829 	if (lio->txq_status_wq.wq) {
1830 		cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1831 		destroy_workqueue(lio->txq_status_wq.wq);
1832 	}
1833 }
1834 
1835 /**
1836  * \brief Net device open for LiquidIO
1837  * @param netdev network device
1838  */
1839 static int liquidio_open(struct net_device *netdev)
1840 {
1841 	struct lio *lio = GET_LIO(netdev);
1842 	struct octeon_device *oct = lio->oct_dev;
1843 	struct napi_struct *napi, *n;
1844 
1845 	if (oct->props[lio->ifidx].napi_enabled == 0) {
1846 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1847 			napi_enable(napi);
1848 
1849 		oct->props[lio->ifidx].napi_enabled = 1;
1850 
1851 		if (OCTEON_CN23XX_PF(oct))
1852 			oct->droq[0]->ops.poll_mode = 1;
1853 	}
1854 
1855 	if (oct->ptp_enable)
1856 		oct_ptp_open(netdev);
1857 
1858 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1859 
1860 	if (OCTEON_CN23XX_PF(oct)) {
1861 		if (!oct->msix_on)
1862 			if (setup_tx_poll_fn(netdev))
1863 				return -1;
1864 	} else {
1865 		if (setup_tx_poll_fn(netdev))
1866 			return -1;
1867 	}
1868 
1869 	netif_tx_start_all_queues(netdev);
1870 
1871 	/* Ready for link status updates */
1872 	lio->intf_open = 1;
1873 
1874 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1875 
1876 	/* tell Octeon to start forwarding packets to host */
1877 	send_rx_ctrl_cmd(lio, 1);
1878 
1879 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1880 		 netdev->name);
1881 
1882 	return 0;
1883 }
1884 
1885 /**
1886  * \brief Net device stop for LiquidIO
1887  * @param netdev network device
1888  */
1889 static int liquidio_stop(struct net_device *netdev)
1890 {
1891 	struct lio *lio = GET_LIO(netdev);
1892 	struct octeon_device *oct = lio->oct_dev;
1893 	struct napi_struct *napi, *n;
1894 
1895 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1896 
1897 	/* Stop any link updates */
1898 	lio->intf_open = 0;
1899 
1900 	stop_txqs(netdev);
1901 
1902 	/* Inform that netif carrier is down */
1903 	netif_carrier_off(netdev);
1904 	netif_tx_disable(netdev);
1905 
1906 	lio->linfo.link.s.link_up = 0;
1907 	lio->link_changes++;
1908 
1909 	/* Tell Octeon that nic interface is down. */
1910 	send_rx_ctrl_cmd(lio, 0);
1911 
1912 	if (OCTEON_CN23XX_PF(oct)) {
1913 		if (!oct->msix_on)
1914 			cleanup_tx_poll_fn(netdev);
1915 	} else {
1916 		cleanup_tx_poll_fn(netdev);
1917 	}
1918 
1919 	if (lio->ptp_clock) {
1920 		ptp_clock_unregister(lio->ptp_clock);
1921 		lio->ptp_clock = NULL;
1922 	}
1923 
1924 	/* Wait for any pending Rx descriptors */
1925 	if (lio_wait_for_clean_oq(oct))
1926 		netif_info(lio, rx_err, lio->netdev,
1927 			   "Proceeding with stop interface after partial RX desc processing\n");
1928 
1929 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1930 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1931 			napi_disable(napi);
1932 
1933 		oct->props[lio->ifidx].napi_enabled = 0;
1934 
1935 		if (OCTEON_CN23XX_PF(oct))
1936 			oct->droq[0]->ops.poll_mode = 0;
1937 	}
1938 
1939 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1940 
1941 	return 0;
1942 }
1943 
1944 /**
1945  * \brief Converts a mask based on net device flags
1946  * @param netdev network device
1947  *
1948  * This routine generates a octnet_ifflags mask from the net device flags
1949  * received from the OS.
1950  */
1951 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1952 {
1953 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1954 
1955 	if (netdev->flags & IFF_PROMISC)
1956 		f |= OCTNET_IFFLAG_PROMISC;
1957 
1958 	if (netdev->flags & IFF_ALLMULTI)
1959 		f |= OCTNET_IFFLAG_ALLMULTI;
1960 
1961 	if (netdev->flags & IFF_MULTICAST) {
1962 		f |= OCTNET_IFFLAG_MULTICAST;
1963 
1964 		/* Accept all multicast addresses if there are more than we
1965 		 * can handle
1966 		 */
1967 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1968 			f |= OCTNET_IFFLAG_ALLMULTI;
1969 	}
1970 
1971 	if (netdev->flags & IFF_BROADCAST)
1972 		f |= OCTNET_IFFLAG_BROADCAST;
1973 
1974 	return f;
1975 }
1976 
1977 /**
1978  * \brief Net device set_multicast_list
1979  * @param netdev network device
1980  */
1981 static void liquidio_set_mcast_list(struct net_device *netdev)
1982 {
1983 	struct lio *lio = GET_LIO(netdev);
1984 	struct octeon_device *oct = lio->oct_dev;
1985 	struct octnic_ctrl_pkt nctrl;
1986 	struct netdev_hw_addr *ha;
1987 	u64 *mc;
1988 	int ret;
1989 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1990 
1991 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1992 
1993 	/* Create a ctrl pkt command to be sent to core app. */
1994 	nctrl.ncmd.u64 = 0;
1995 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1996 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1997 	nctrl.ncmd.s.param2 = mc_count;
1998 	nctrl.ncmd.s.more = mc_count;
1999 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2000 	nctrl.netpndev = (u64)netdev;
2001 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2002 
2003 	/* copy all the addresses into the udd */
2004 	mc = &nctrl.udd[0];
2005 	netdev_for_each_mc_addr(ha, netdev) {
2006 		*mc = 0;
2007 		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2008 		/* no need to swap bytes */
2009 
2010 		if (++mc > &nctrl.udd[mc_count])
2011 			break;
2012 	}
2013 
2014 	/* Apparently, any activity in this call from the kernel has to
2015 	 * be atomic. So we won't wait for response.
2016 	 */
2017 	nctrl.wait_time = 0;
2018 
2019 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2020 	if (ret < 0) {
2021 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2022 			ret);
2023 	}
2024 }
2025 
2026 /**
2027  * \brief Net device set_mac_address
2028  * @param netdev network device
2029  */
2030 static int liquidio_set_mac(struct net_device *netdev, void *p)
2031 {
2032 	int ret = 0;
2033 	struct lio *lio = GET_LIO(netdev);
2034 	struct octeon_device *oct = lio->oct_dev;
2035 	struct sockaddr *addr = (struct sockaddr *)p;
2036 	struct octnic_ctrl_pkt nctrl;
2037 
2038 	if (!is_valid_ether_addr(addr->sa_data))
2039 		return -EADDRNOTAVAIL;
2040 
2041 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2042 
2043 	nctrl.ncmd.u64 = 0;
2044 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2045 	nctrl.ncmd.s.param1 = 0;
2046 	nctrl.ncmd.s.more = 1;
2047 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2048 	nctrl.netpndev = (u64)netdev;
2049 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2050 	nctrl.wait_time = 100;
2051 
2052 	nctrl.udd[0] = 0;
2053 	/* The MAC Address is presented in network byte order. */
2054 	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2055 
2056 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2057 	if (ret < 0) {
2058 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2059 		return -ENOMEM;
2060 	}
2061 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2062 	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2063 
2064 	return 0;
2065 }
2066 
2067 static void
2068 liquidio_get_stats64(struct net_device *netdev,
2069 		     struct rtnl_link_stats64 *lstats)
2070 {
2071 	struct lio *lio = GET_LIO(netdev);
2072 	struct octeon_device *oct;
2073 	u64 pkts = 0, drop = 0, bytes = 0;
2074 	struct oct_droq_stats *oq_stats;
2075 	struct oct_iq_stats *iq_stats;
2076 	int i, iq_no, oq_no;
2077 
2078 	oct = lio->oct_dev;
2079 
2080 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2081 		return;
2082 
2083 	for (i = 0; i < oct->num_iqs; i++) {
2084 		iq_no = lio->linfo.txpciq[i].s.q_no;
2085 		iq_stats = &oct->instr_queue[iq_no]->stats;
2086 		pkts += iq_stats->tx_done;
2087 		drop += iq_stats->tx_dropped;
2088 		bytes += iq_stats->tx_tot_bytes;
2089 	}
2090 
2091 	lstats->tx_packets = pkts;
2092 	lstats->tx_bytes = bytes;
2093 	lstats->tx_dropped = drop;
2094 
2095 	pkts = 0;
2096 	drop = 0;
2097 	bytes = 0;
2098 
2099 	for (i = 0; i < oct->num_oqs; i++) {
2100 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2101 		oq_stats = &oct->droq[oq_no]->stats;
2102 		pkts += oq_stats->rx_pkts_received;
2103 		drop += (oq_stats->rx_dropped +
2104 			 oq_stats->dropped_nodispatch +
2105 			 oq_stats->dropped_toomany +
2106 			 oq_stats->dropped_nomem);
2107 		bytes += oq_stats->rx_bytes_received;
2108 	}
2109 
2110 	lstats->rx_bytes = bytes;
2111 	lstats->rx_packets = pkts;
2112 	lstats->rx_dropped = drop;
2113 
2114 	octnet_get_link_stats(netdev);
2115 	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2116 	lstats->collisions = oct->link_stats.fromhost.total_collisions;
2117 
2118 	/* detailed rx_errors: */
2119 	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2120 	/* recved pkt with crc error    */
2121 	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2122 	/* recv'd frame alignment error */
2123 	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2124 	/* recv'r fifo overrun */
2125 	lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2126 
2127 	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2128 		lstats->rx_frame_errors + lstats->rx_fifo_errors;
2129 
2130 	/* detailed tx_errors */
2131 	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2132 	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2133 	lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2134 
2135 	lstats->tx_errors = lstats->tx_aborted_errors +
2136 		lstats->tx_carrier_errors +
2137 		lstats->tx_fifo_errors;
2138 }
2139 
2140 /**
2141  * \brief Handler for SIOCSHWTSTAMP ioctl
2142  * @param netdev network device
2143  * @param ifr interface request
2144  * @param cmd command
2145  */
2146 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2147 {
2148 	struct hwtstamp_config conf;
2149 	struct lio *lio = GET_LIO(netdev);
2150 
2151 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2152 		return -EFAULT;
2153 
2154 	if (conf.flags)
2155 		return -EINVAL;
2156 
2157 	switch (conf.tx_type) {
2158 	case HWTSTAMP_TX_ON:
2159 	case HWTSTAMP_TX_OFF:
2160 		break;
2161 	default:
2162 		return -ERANGE;
2163 	}
2164 
2165 	switch (conf.rx_filter) {
2166 	case HWTSTAMP_FILTER_NONE:
2167 		break;
2168 	case HWTSTAMP_FILTER_ALL:
2169 	case HWTSTAMP_FILTER_SOME:
2170 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2171 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2172 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2173 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2174 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2175 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2176 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2177 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2178 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2179 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2180 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2181 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2182 	case HWTSTAMP_FILTER_NTP_ALL:
2183 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2184 		break;
2185 	default:
2186 		return -ERANGE;
2187 	}
2188 
2189 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2190 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2191 
2192 	else
2193 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2194 
2195 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2196 }
2197 
2198 /**
2199  * \brief ioctl handler
2200  * @param netdev network device
2201  * @param ifr interface request
2202  * @param cmd command
2203  */
2204 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2205 {
2206 	struct lio *lio = GET_LIO(netdev);
2207 
2208 	switch (cmd) {
2209 	case SIOCSHWTSTAMP:
2210 		if (lio->oct_dev->ptp_enable)
2211 			return hwtstamp_ioctl(netdev, ifr);
2212 	default:
2213 		return -EOPNOTSUPP;
2214 	}
2215 }
2216 
2217 /**
2218  * \brief handle a Tx timestamp response
2219  * @param status response status
2220  * @param buf pointer to skb
2221  */
2222 static void handle_timestamp(struct octeon_device *oct,
2223 			     u32 status,
2224 			     void *buf)
2225 {
2226 	struct octnet_buf_free_info *finfo;
2227 	struct octeon_soft_command *sc;
2228 	struct oct_timestamp_resp *resp;
2229 	struct lio *lio;
2230 	struct sk_buff *skb = (struct sk_buff *)buf;
2231 
2232 	finfo = (struct octnet_buf_free_info *)skb->cb;
2233 	lio = finfo->lio;
2234 	sc = finfo->sc;
2235 	oct = lio->oct_dev;
2236 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2237 
2238 	if (status != OCTEON_REQUEST_DONE) {
2239 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2240 			CVM_CAST64(status));
2241 		resp->timestamp = 0;
2242 	}
2243 
2244 	octeon_swap_8B_data(&resp->timestamp, 1);
2245 
2246 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2247 		struct skb_shared_hwtstamps ts;
2248 		u64 ns = resp->timestamp;
2249 
2250 		netif_info(lio, tx_done, lio->netdev,
2251 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2252 			   skb, (unsigned long long)ns);
2253 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2254 		skb_tstamp_tx(skb, &ts);
2255 	}
2256 
2257 	octeon_free_soft_command(oct, sc);
2258 	tx_buffer_free(skb);
2259 }
2260 
2261 /* \brief Send a data packet that will be timestamped
2262  * @param oct octeon device
2263  * @param ndata pointer to network data
2264  * @param finfo pointer to private network data
2265  */
2266 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2267 					 struct octnic_data_pkt *ndata,
2268 					 struct octnet_buf_free_info *finfo,
2269 					 int xmit_more)
2270 {
2271 	int retval;
2272 	struct octeon_soft_command *sc;
2273 	struct lio *lio;
2274 	int ring_doorbell;
2275 	u32 len;
2276 
2277 	lio = finfo->lio;
2278 
2279 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2280 					    sizeof(struct oct_timestamp_resp));
2281 	finfo->sc = sc;
2282 
2283 	if (!sc) {
2284 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2285 		return IQ_SEND_FAILED;
2286 	}
2287 
2288 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2289 		ndata->reqtype = REQTYPE_RESP_NET;
2290 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2291 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2292 
2293 	sc->callback = handle_timestamp;
2294 	sc->callback_arg = finfo->skb;
2295 	sc->iq_no = ndata->q_no;
2296 
2297 	if (OCTEON_CN23XX_PF(oct))
2298 		len = (u32)((struct octeon_instr_ih3 *)
2299 			    (&sc->cmd.cmd3.ih3))->dlengsz;
2300 	else
2301 		len = (u32)((struct octeon_instr_ih2 *)
2302 			    (&sc->cmd.cmd2.ih2))->dlengsz;
2303 
2304 	ring_doorbell = !xmit_more;
2305 
2306 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2307 				     sc, len, ndata->reqtype);
2308 
2309 	if (retval == IQ_SEND_FAILED) {
2310 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2311 			retval);
2312 		octeon_free_soft_command(oct, sc);
2313 	} else {
2314 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2315 	}
2316 
2317 	return retval;
2318 }
2319 
2320 /** \brief Transmit networks packets to the Octeon interface
2321  * @param skbuff   skbuff struct to be passed to network layer.
2322  * @param netdev    pointer to network device
2323  * @returns whether the packet was transmitted to the device okay or not
2324  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2325  */
2326 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2327 {
2328 	struct lio *lio;
2329 	struct octnet_buf_free_info *finfo;
2330 	union octnic_cmd_setup cmdsetup;
2331 	struct octnic_data_pkt ndata;
2332 	struct octeon_device *oct;
2333 	struct oct_iq_stats *stats;
2334 	struct octeon_instr_irh *irh;
2335 	union tx_info *tx_info;
2336 	int status = 0;
2337 	int q_idx = 0, iq_no = 0;
2338 	int j, xmit_more = 0;
2339 	u64 dptr = 0;
2340 	u32 tag = 0;
2341 
2342 	lio = GET_LIO(netdev);
2343 	oct = lio->oct_dev;
2344 
2345 	q_idx = skb_iq(oct, skb);
2346 	tag = q_idx;
2347 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2348 
2349 	stats = &oct->instr_queue[iq_no]->stats;
2350 
2351 	/* Check for all conditions in which the current packet cannot be
2352 	 * transmitted.
2353 	 */
2354 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2355 	    (!lio->linfo.link.s.link_up) ||
2356 	    (skb->len <= 0)) {
2357 		netif_info(lio, tx_err, lio->netdev,
2358 			   "Transmit failed link_status : %d\n",
2359 			   lio->linfo.link.s.link_up);
2360 		goto lio_xmit_failed;
2361 	}
2362 
2363 	/* Use space in skb->cb to store info used to unmap and
2364 	 * free the buffers.
2365 	 */
2366 	finfo = (struct octnet_buf_free_info *)skb->cb;
2367 	finfo->lio = lio;
2368 	finfo->skb = skb;
2369 	finfo->sc = NULL;
2370 
2371 	/* Prepare the attributes for the data to be passed to OSI. */
2372 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2373 
2374 	ndata.buf = (void *)finfo;
2375 
2376 	ndata.q_no = iq_no;
2377 
2378 	if (octnet_iq_is_full(oct, ndata.q_no)) {
2379 		/* defer sending if queue is full */
2380 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2381 			   ndata.q_no);
2382 		stats->tx_iq_busy++;
2383 		return NETDEV_TX_BUSY;
2384 	}
2385 
2386 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2387 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2388 	 */
2389 
2390 	ndata.datasize = skb->len;
2391 
2392 	cmdsetup.u64 = 0;
2393 	cmdsetup.s.iq_no = iq_no;
2394 
2395 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2396 		if (skb->encapsulation) {
2397 			cmdsetup.s.tnl_csum = 1;
2398 			stats->tx_vxlan++;
2399 		} else {
2400 			cmdsetup.s.transport_csum = 1;
2401 		}
2402 	}
2403 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2404 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2405 		cmdsetup.s.timestamp = 1;
2406 	}
2407 
2408 	if (skb_shinfo(skb)->nr_frags == 0) {
2409 		cmdsetup.s.u.datasize = skb->len;
2410 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2411 
2412 		/* Offload checksum calculation for TCP/UDP packets */
2413 		dptr = dma_map_single(&oct->pci_dev->dev,
2414 				      skb->data,
2415 				      skb->len,
2416 				      DMA_TO_DEVICE);
2417 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2418 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2419 				__func__);
2420 			stats->tx_dmamap_fail++;
2421 			return NETDEV_TX_BUSY;
2422 		}
2423 
2424 		if (OCTEON_CN23XX_PF(oct))
2425 			ndata.cmd.cmd3.dptr = dptr;
2426 		else
2427 			ndata.cmd.cmd2.dptr = dptr;
2428 		finfo->dptr = dptr;
2429 		ndata.reqtype = REQTYPE_NORESP_NET;
2430 
2431 	} else {
2432 		int i, frags;
2433 		struct skb_frag_struct *frag;
2434 		struct octnic_gather *g;
2435 
2436 		spin_lock(&lio->glist_lock[q_idx]);
2437 		g = (struct octnic_gather *)
2438 			lio_list_delete_head(&lio->glist[q_idx]);
2439 		spin_unlock(&lio->glist_lock[q_idx]);
2440 
2441 		if (!g) {
2442 			netif_info(lio, tx_err, lio->netdev,
2443 				   "Transmit scatter gather: glist null!\n");
2444 			goto lio_xmit_failed;
2445 		}
2446 
2447 		cmdsetup.s.gather = 1;
2448 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2449 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2450 
2451 		memset(g->sg, 0, g->sg_size);
2452 
2453 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2454 						 skb->data,
2455 						 (skb->len - skb->data_len),
2456 						 DMA_TO_DEVICE);
2457 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2458 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2459 				__func__);
2460 			stats->tx_dmamap_fail++;
2461 			return NETDEV_TX_BUSY;
2462 		}
2463 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2464 
2465 		frags = skb_shinfo(skb)->nr_frags;
2466 		i = 1;
2467 		while (frags--) {
2468 			frag = &skb_shinfo(skb)->frags[i - 1];
2469 
2470 			g->sg[(i >> 2)].ptr[(i & 3)] =
2471 				dma_map_page(&oct->pci_dev->dev,
2472 					     frag->page.p,
2473 					     frag->page_offset,
2474 					     frag->size,
2475 					     DMA_TO_DEVICE);
2476 
2477 			if (dma_mapping_error(&oct->pci_dev->dev,
2478 					      g->sg[i >> 2].ptr[i & 3])) {
2479 				dma_unmap_single(&oct->pci_dev->dev,
2480 						 g->sg[0].ptr[0],
2481 						 skb->len - skb->data_len,
2482 						 DMA_TO_DEVICE);
2483 				for (j = 1; j < i; j++) {
2484 					frag = &skb_shinfo(skb)->frags[j - 1];
2485 					dma_unmap_page(&oct->pci_dev->dev,
2486 						       g->sg[j >> 2].ptr[j & 3],
2487 						       frag->size,
2488 						       DMA_TO_DEVICE);
2489 				}
2490 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2491 					__func__);
2492 				return NETDEV_TX_BUSY;
2493 			}
2494 
2495 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2496 			i++;
2497 		}
2498 
2499 		dptr = g->sg_dma_ptr;
2500 
2501 		if (OCTEON_CN23XX_PF(oct))
2502 			ndata.cmd.cmd3.dptr = dptr;
2503 		else
2504 			ndata.cmd.cmd2.dptr = dptr;
2505 		finfo->dptr = dptr;
2506 		finfo->g = g;
2507 
2508 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
2509 	}
2510 
2511 	if (OCTEON_CN23XX_PF(oct)) {
2512 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2513 		tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2514 	} else {
2515 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2516 		tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2517 	}
2518 
2519 	if (skb_shinfo(skb)->gso_size) {
2520 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2521 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2522 		stats->tx_gso++;
2523 	}
2524 
2525 	/* HW insert VLAN tag */
2526 	if (skb_vlan_tag_present(skb)) {
2527 		irh->priority = skb_vlan_tag_get(skb) >> 13;
2528 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2529 	}
2530 
2531 	xmit_more = skb->xmit_more;
2532 
2533 	if (unlikely(cmdsetup.s.timestamp))
2534 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2535 	else
2536 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2537 	if (status == IQ_SEND_FAILED)
2538 		goto lio_xmit_failed;
2539 
2540 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2541 
2542 	if (status == IQ_SEND_STOP)
2543 		netif_stop_subqueue(netdev, q_idx);
2544 
2545 	netif_trans_update(netdev);
2546 
2547 	if (tx_info->s.gso_segs)
2548 		stats->tx_done += tx_info->s.gso_segs;
2549 	else
2550 		stats->tx_done++;
2551 	stats->tx_tot_bytes += ndata.datasize;
2552 
2553 	return NETDEV_TX_OK;
2554 
2555 lio_xmit_failed:
2556 	stats->tx_dropped++;
2557 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2558 		   iq_no, stats->tx_dropped);
2559 	if (dptr)
2560 		dma_unmap_single(&oct->pci_dev->dev, dptr,
2561 				 ndata.datasize, DMA_TO_DEVICE);
2562 
2563 	octeon_ring_doorbell_locked(oct, iq_no);
2564 
2565 	tx_buffer_free(skb);
2566 	return NETDEV_TX_OK;
2567 }
2568 
2569 /** \brief Network device Tx timeout
2570  * @param netdev    pointer to network device
2571  */
2572 static void liquidio_tx_timeout(struct net_device *netdev)
2573 {
2574 	struct lio *lio;
2575 
2576 	lio = GET_LIO(netdev);
2577 
2578 	netif_info(lio, tx_err, lio->netdev,
2579 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2580 		   netdev->stats.tx_dropped);
2581 	netif_trans_update(netdev);
2582 	wake_txqs(netdev);
2583 }
2584 
2585 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2586 				    __be16 proto __attribute__((unused)),
2587 				    u16 vid)
2588 {
2589 	struct lio *lio = GET_LIO(netdev);
2590 	struct octeon_device *oct = lio->oct_dev;
2591 	struct octnic_ctrl_pkt nctrl;
2592 	int ret = 0;
2593 
2594 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2595 
2596 	nctrl.ncmd.u64 = 0;
2597 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2598 	nctrl.ncmd.s.param1 = vid;
2599 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2600 	nctrl.wait_time = 100;
2601 	nctrl.netpndev = (u64)netdev;
2602 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2603 
2604 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2605 	if (ret < 0) {
2606 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2607 			ret);
2608 	}
2609 
2610 	return ret;
2611 }
2612 
2613 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2614 				     __be16 proto __attribute__((unused)),
2615 				     u16 vid)
2616 {
2617 	struct lio *lio = GET_LIO(netdev);
2618 	struct octeon_device *oct = lio->oct_dev;
2619 	struct octnic_ctrl_pkt nctrl;
2620 	int ret = 0;
2621 
2622 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2623 
2624 	nctrl.ncmd.u64 = 0;
2625 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2626 	nctrl.ncmd.s.param1 = vid;
2627 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2628 	nctrl.wait_time = 100;
2629 	nctrl.netpndev = (u64)netdev;
2630 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2631 
2632 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2633 	if (ret < 0) {
2634 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2635 			ret);
2636 	}
2637 	return ret;
2638 }
2639 
2640 /** Sending command to enable/disable RX checksum offload
2641  * @param netdev                pointer to network device
2642  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2643  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2644  *                              OCTNET_CMD_RXCSUM_DISABLE
2645  * @returns                     SUCCESS or FAILURE
2646  */
2647 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2648 				       u8 rx_cmd)
2649 {
2650 	struct lio *lio = GET_LIO(netdev);
2651 	struct octeon_device *oct = lio->oct_dev;
2652 	struct octnic_ctrl_pkt nctrl;
2653 	int ret = 0;
2654 
2655 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2656 
2657 	nctrl.ncmd.u64 = 0;
2658 	nctrl.ncmd.s.cmd = command;
2659 	nctrl.ncmd.s.param1 = rx_cmd;
2660 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2661 	nctrl.wait_time = 100;
2662 	nctrl.netpndev = (u64)netdev;
2663 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2664 
2665 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2666 	if (ret < 0) {
2667 		dev_err(&oct->pci_dev->dev,
2668 			"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2669 			ret);
2670 	}
2671 	return ret;
2672 }
2673 
2674 /** Sending command to add/delete VxLAN UDP port to firmware
2675  * @param netdev                pointer to network device
2676  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2677  * @param vxlan_port            VxLAN port to be added or deleted
2678  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2679  *                              OCTNET_CMD_VXLAN_PORT_DEL
2680  * @returns                     SUCCESS or FAILURE
2681  */
2682 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2683 				       u16 vxlan_port, u8 vxlan_cmd_bit)
2684 {
2685 	struct lio *lio = GET_LIO(netdev);
2686 	struct octeon_device *oct = lio->oct_dev;
2687 	struct octnic_ctrl_pkt nctrl;
2688 	int ret = 0;
2689 
2690 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2691 
2692 	nctrl.ncmd.u64 = 0;
2693 	nctrl.ncmd.s.cmd = command;
2694 	nctrl.ncmd.s.more = vxlan_cmd_bit;
2695 	nctrl.ncmd.s.param1 = vxlan_port;
2696 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2697 	nctrl.wait_time = 100;
2698 	nctrl.netpndev = (u64)netdev;
2699 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2700 
2701 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2702 	if (ret < 0) {
2703 		dev_err(&oct->pci_dev->dev,
2704 			"VxLAN port add/delete failed in core (ret:0x%x)\n",
2705 			ret);
2706 	}
2707 	return ret;
2708 }
2709 
2710 /** \brief Net device fix features
2711  * @param netdev  pointer to network device
2712  * @param request features requested
2713  * @returns updated features list
2714  */
2715 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2716 					       netdev_features_t request)
2717 {
2718 	struct lio *lio = netdev_priv(netdev);
2719 
2720 	if ((request & NETIF_F_RXCSUM) &&
2721 	    !(lio->dev_capability & NETIF_F_RXCSUM))
2722 		request &= ~NETIF_F_RXCSUM;
2723 
2724 	if ((request & NETIF_F_HW_CSUM) &&
2725 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
2726 		request &= ~NETIF_F_HW_CSUM;
2727 
2728 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2729 		request &= ~NETIF_F_TSO;
2730 
2731 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2732 		request &= ~NETIF_F_TSO6;
2733 
2734 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2735 		request &= ~NETIF_F_LRO;
2736 
2737 	/*Disable LRO if RXCSUM is off */
2738 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2739 	    (lio->dev_capability & NETIF_F_LRO))
2740 		request &= ~NETIF_F_LRO;
2741 
2742 	if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2743 	    !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2744 		request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2745 
2746 	return request;
2747 }
2748 
2749 /** \brief Net device set features
2750  * @param netdev  pointer to network device
2751  * @param features features to enable/disable
2752  */
2753 static int liquidio_set_features(struct net_device *netdev,
2754 				 netdev_features_t features)
2755 {
2756 	struct lio *lio = netdev_priv(netdev);
2757 
2758 	if ((features & NETIF_F_LRO) &&
2759 	    (lio->dev_capability & NETIF_F_LRO) &&
2760 	    !(netdev->features & NETIF_F_LRO))
2761 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2762 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2763 	else if (!(features & NETIF_F_LRO) &&
2764 		 (lio->dev_capability & NETIF_F_LRO) &&
2765 		 (netdev->features & NETIF_F_LRO))
2766 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2767 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2768 
2769 	/* Sending command to firmware to enable/disable RX checksum
2770 	 * offload settings using ethtool
2771 	 */
2772 	if (!(netdev->features & NETIF_F_RXCSUM) &&
2773 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2774 	    (features & NETIF_F_RXCSUM))
2775 		liquidio_set_rxcsum_command(netdev,
2776 					    OCTNET_CMD_TNL_RX_CSUM_CTL,
2777 					    OCTNET_CMD_RXCSUM_ENABLE);
2778 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2779 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2780 		 !(features & NETIF_F_RXCSUM))
2781 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2782 					    OCTNET_CMD_RXCSUM_DISABLE);
2783 
2784 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2785 	    (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2786 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2787 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2788 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
2789 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2790 		 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2791 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2792 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2793 				     OCTNET_CMD_VLAN_FILTER_DISABLE);
2794 
2795 	return 0;
2796 }
2797 
2798 static void liquidio_add_vxlan_port(struct net_device *netdev,
2799 				    struct udp_tunnel_info *ti)
2800 {
2801 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2802 		return;
2803 
2804 	liquidio_vxlan_port_command(netdev,
2805 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2806 				    htons(ti->port),
2807 				    OCTNET_CMD_VXLAN_PORT_ADD);
2808 }
2809 
2810 static void liquidio_del_vxlan_port(struct net_device *netdev,
2811 				    struct udp_tunnel_info *ti)
2812 {
2813 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2814 		return;
2815 
2816 	liquidio_vxlan_port_command(netdev,
2817 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2818 				    htons(ti->port),
2819 				    OCTNET_CMD_VXLAN_PORT_DEL);
2820 }
2821 
2822 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2823 				 u8 *mac, bool is_admin_assigned)
2824 {
2825 	struct lio *lio = GET_LIO(netdev);
2826 	struct octeon_device *oct = lio->oct_dev;
2827 	struct octnic_ctrl_pkt nctrl;
2828 
2829 	if (!is_valid_ether_addr(mac))
2830 		return -EINVAL;
2831 
2832 	if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2833 		return -EINVAL;
2834 
2835 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2836 
2837 	nctrl.ncmd.u64 = 0;
2838 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2839 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2840 	nctrl.ncmd.s.param1 = vfidx + 1;
2841 	nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
2842 	nctrl.ncmd.s.more = 1;
2843 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2844 	nctrl.netpndev = (u64)netdev;
2845 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2846 	nctrl.wait_time = LIO_CMD_WAIT_TM;
2847 
2848 	nctrl.udd[0] = 0;
2849 	/* The MAC Address is presented in network byte order. */
2850 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2851 
2852 	oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2853 
2854 	octnet_send_nic_ctrl_pkt(oct, &nctrl);
2855 
2856 	return 0;
2857 }
2858 
2859 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2860 {
2861 	struct lio *lio = GET_LIO(netdev);
2862 	struct octeon_device *oct = lio->oct_dev;
2863 	int retval;
2864 
2865 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2866 		return -EINVAL;
2867 
2868 	retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2869 	if (!retval)
2870 		cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2871 
2872 	return retval;
2873 }
2874 
2875 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2876 				u16 vlan, u8 qos, __be16 vlan_proto)
2877 {
2878 	struct lio *lio = GET_LIO(netdev);
2879 	struct octeon_device *oct = lio->oct_dev;
2880 	struct octnic_ctrl_pkt nctrl;
2881 	u16 vlantci;
2882 
2883 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2884 		return -EINVAL;
2885 
2886 	if (vlan_proto != htons(ETH_P_8021Q))
2887 		return -EPROTONOSUPPORT;
2888 
2889 	if (vlan >= VLAN_N_VID || qos > 7)
2890 		return -EINVAL;
2891 
2892 	if (vlan)
2893 		vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2894 	else
2895 		vlantci = 0;
2896 
2897 	if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2898 		return 0;
2899 
2900 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2901 
2902 	if (vlan)
2903 		nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2904 	else
2905 		nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2906 
2907 	nctrl.ncmd.s.param1 = vlantci;
2908 	nctrl.ncmd.s.param2 =
2909 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2910 	nctrl.ncmd.s.more = 0;
2911 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2912 	nctrl.cb_fn = 0;
2913 	nctrl.wait_time = LIO_CMD_WAIT_TM;
2914 
2915 	octnet_send_nic_ctrl_pkt(oct, &nctrl);
2916 
2917 	oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2918 
2919 	return 0;
2920 }
2921 
2922 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2923 				  struct ifla_vf_info *ivi)
2924 {
2925 	struct lio *lio = GET_LIO(netdev);
2926 	struct octeon_device *oct = lio->oct_dev;
2927 	u8 *macaddr;
2928 
2929 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2930 		return -EINVAL;
2931 
2932 	ivi->vf = vfidx;
2933 	macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2934 	ether_addr_copy(&ivi->mac[0], macaddr);
2935 	ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2936 	ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2937 	if (oct->sriov_info.trusted_vf.active &&
2938 	    oct->sriov_info.trusted_vf.id == vfidx)
2939 		ivi->trusted = true;
2940 	else
2941 		ivi->trusted = false;
2942 	ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2943 	return 0;
2944 }
2945 
2946 static void trusted_vf_callback(struct octeon_device *oct_dev,
2947 				u32 status, void *ptr)
2948 {
2949 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
2950 	struct lio_trusted_vf_ctx *ctx;
2951 
2952 	ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
2953 	ctx->status = status;
2954 
2955 	complete(&ctx->complete);
2956 }
2957 
2958 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2959 {
2960 	struct octeon_device *oct = lio->oct_dev;
2961 	struct lio_trusted_vf_ctx *ctx;
2962 	struct octeon_soft_command *sc;
2963 	int ctx_size, retval;
2964 
2965 	ctx_size = sizeof(struct lio_trusted_vf_ctx);
2966 	sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size);
2967 
2968 	ctx  = (struct lio_trusted_vf_ctx *)sc->ctxptr;
2969 	init_completion(&ctx->complete);
2970 
2971 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2972 
2973 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2974 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
2975 				    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
2976 				    trusted);
2977 
2978 	sc->callback = trusted_vf_callback;
2979 	sc->callback_arg = sc;
2980 	sc->wait_time = 1000;
2981 
2982 	retval = octeon_send_soft_command(oct, sc);
2983 	if (retval == IQ_SEND_FAILED) {
2984 		retval = -1;
2985 	} else {
2986 		/* Wait for response or timeout */
2987 		if (wait_for_completion_timeout(&ctx->complete,
2988 						msecs_to_jiffies(2000)))
2989 			retval = ctx->status;
2990 		else
2991 			retval = -1;
2992 	}
2993 
2994 	octeon_free_soft_command(oct, sc);
2995 
2996 	return retval;
2997 }
2998 
2999 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3000 				 bool setting)
3001 {
3002 	struct lio *lio = GET_LIO(netdev);
3003 	struct octeon_device *oct = lio->oct_dev;
3004 
3005 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3006 		/* trusted vf is not supported by firmware older than 1.7.1 */
3007 		return -EOPNOTSUPP;
3008 	}
3009 
3010 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3011 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3012 		return -EINVAL;
3013 	}
3014 
3015 	if (setting) {
3016 		/* Set */
3017 
3018 		if (oct->sriov_info.trusted_vf.active &&
3019 		    oct->sriov_info.trusted_vf.id == vfidx)
3020 			return 0;
3021 
3022 		if (oct->sriov_info.trusted_vf.active) {
3023 			netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3024 			return -EPERM;
3025 		}
3026 	} else {
3027 		/* Clear */
3028 
3029 		if (!oct->sriov_info.trusted_vf.active)
3030 			return 0;
3031 	}
3032 
3033 	if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3034 		if (setting) {
3035 			oct->sriov_info.trusted_vf.id = vfidx;
3036 			oct->sriov_info.trusted_vf.active = true;
3037 		} else {
3038 			oct->sriov_info.trusted_vf.active = false;
3039 		}
3040 
3041 		netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3042 			   setting ? "" : "not ");
3043 	} else {
3044 		netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3045 		return -1;
3046 	}
3047 
3048 	return 0;
3049 }
3050 
3051 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3052 				      int linkstate)
3053 {
3054 	struct lio *lio = GET_LIO(netdev);
3055 	struct octeon_device *oct = lio->oct_dev;
3056 	struct octnic_ctrl_pkt nctrl;
3057 
3058 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3059 		return -EINVAL;
3060 
3061 	if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3062 		return 0;
3063 
3064 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3065 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3066 	nctrl.ncmd.s.param1 =
3067 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3068 	nctrl.ncmd.s.param2 = linkstate;
3069 	nctrl.ncmd.s.more = 0;
3070 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3071 	nctrl.cb_fn = 0;
3072 	nctrl.wait_time = LIO_CMD_WAIT_TM;
3073 
3074 	octnet_send_nic_ctrl_pkt(oct, &nctrl);
3075 
3076 	oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3077 
3078 	return 0;
3079 }
3080 
3081 static int
3082 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3083 {
3084 	struct lio_devlink_priv *priv;
3085 	struct octeon_device *oct;
3086 
3087 	priv = devlink_priv(devlink);
3088 	oct = priv->oct;
3089 
3090 	*mode = oct->eswitch_mode;
3091 
3092 	return 0;
3093 }
3094 
3095 static int
3096 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
3097 {
3098 	struct lio_devlink_priv *priv;
3099 	struct octeon_device *oct;
3100 	int ret = 0;
3101 
3102 	priv = devlink_priv(devlink);
3103 	oct = priv->oct;
3104 
3105 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3106 		return -EINVAL;
3107 
3108 	if (oct->eswitch_mode == mode)
3109 		return 0;
3110 
3111 	switch (mode) {
3112 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3113 		oct->eswitch_mode = mode;
3114 		ret = lio_vf_rep_create(oct);
3115 		break;
3116 
3117 	case DEVLINK_ESWITCH_MODE_LEGACY:
3118 		lio_vf_rep_destroy(oct);
3119 		oct->eswitch_mode = mode;
3120 		break;
3121 
3122 	default:
3123 		ret = -EINVAL;
3124 	}
3125 
3126 	return ret;
3127 }
3128 
3129 static const struct devlink_ops liquidio_devlink_ops = {
3130 	.eswitch_mode_get = liquidio_eswitch_mode_get,
3131 	.eswitch_mode_set = liquidio_eswitch_mode_set,
3132 };
3133 
3134 static int
3135 lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3136 {
3137 	struct lio *lio = GET_LIO(dev);
3138 	struct octeon_device *oct = lio->oct_dev;
3139 
3140 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3141 		return -EOPNOTSUPP;
3142 
3143 	switch (attr->id) {
3144 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3145 		attr->u.ppid.id_len = ETH_ALEN;
3146 		ether_addr_copy(attr->u.ppid.id,
3147 				(void *)&lio->linfo.hw_addr + 2);
3148 		break;
3149 
3150 	default:
3151 		return -EOPNOTSUPP;
3152 	}
3153 
3154 	return 0;
3155 }
3156 
3157 static const struct switchdev_ops lio_pf_switchdev_ops = {
3158 	.switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3159 };
3160 
3161 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3162 				 struct ifla_vf_stats *vf_stats)
3163 {
3164 	struct lio *lio = GET_LIO(netdev);
3165 	struct octeon_device *oct = lio->oct_dev;
3166 	struct oct_vf_stats stats;
3167 	int ret;
3168 
3169 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3170 		return -EINVAL;
3171 
3172 	memset(&stats, 0, sizeof(struct oct_vf_stats));
3173 	ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3174 	if (!ret) {
3175 		vf_stats->rx_packets = stats.rx_packets;
3176 		vf_stats->tx_packets = stats.tx_packets;
3177 		vf_stats->rx_bytes = stats.rx_bytes;
3178 		vf_stats->tx_bytes = stats.tx_bytes;
3179 		vf_stats->broadcast = stats.broadcast;
3180 		vf_stats->multicast = stats.multicast;
3181 	}
3182 
3183 	return ret;
3184 }
3185 
3186 static const struct net_device_ops lionetdevops = {
3187 	.ndo_open		= liquidio_open,
3188 	.ndo_stop		= liquidio_stop,
3189 	.ndo_start_xmit		= liquidio_xmit,
3190 	.ndo_get_stats64	= liquidio_get_stats64,
3191 	.ndo_set_mac_address	= liquidio_set_mac,
3192 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
3193 	.ndo_tx_timeout		= liquidio_tx_timeout,
3194 
3195 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3196 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3197 	.ndo_change_mtu		= liquidio_change_mtu,
3198 	.ndo_do_ioctl		= liquidio_ioctl,
3199 	.ndo_fix_features	= liquidio_fix_features,
3200 	.ndo_set_features	= liquidio_set_features,
3201 	.ndo_udp_tunnel_add	= liquidio_add_vxlan_port,
3202 	.ndo_udp_tunnel_del	= liquidio_del_vxlan_port,
3203 	.ndo_set_vf_mac		= liquidio_set_vf_mac,
3204 	.ndo_set_vf_vlan	= liquidio_set_vf_vlan,
3205 	.ndo_get_vf_config	= liquidio_get_vf_config,
3206 	.ndo_set_vf_trust	= liquidio_set_vf_trust,
3207 	.ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3208 	.ndo_get_vf_stats	= liquidio_get_vf_stats,
3209 };
3210 
3211 /** \brief Entry point for the liquidio module
3212  */
3213 static int __init liquidio_init(void)
3214 {
3215 	int i;
3216 	struct handshake *hs;
3217 
3218 	init_completion(&first_stage);
3219 
3220 	octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3221 
3222 	if (liquidio_init_pci())
3223 		return -EINVAL;
3224 
3225 	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3226 
3227 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3228 		hs = &handshake[i];
3229 		if (hs->pci_dev) {
3230 			wait_for_completion(&hs->init);
3231 			if (!hs->init_ok) {
3232 				/* init handshake failed */
3233 				dev_err(&hs->pci_dev->dev,
3234 					"Failed to init device\n");
3235 				liquidio_deinit_pci();
3236 				return -EIO;
3237 			}
3238 		}
3239 	}
3240 
3241 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3242 		hs = &handshake[i];
3243 		if (hs->pci_dev) {
3244 			wait_for_completion_timeout(&hs->started,
3245 						    msecs_to_jiffies(30000));
3246 			if (!hs->started_ok) {
3247 				/* starter handshake failed */
3248 				dev_err(&hs->pci_dev->dev,
3249 					"Firmware failed to start\n");
3250 				liquidio_deinit_pci();
3251 				return -EIO;
3252 			}
3253 		}
3254 	}
3255 
3256 	return 0;
3257 }
3258 
3259 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3260 {
3261 	struct octeon_device *oct = (struct octeon_device *)buf;
3262 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3263 	int gmxport = 0;
3264 	union oct_link_status *ls;
3265 	int i;
3266 
3267 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3268 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3269 			recv_pkt->buffer_size[0],
3270 			recv_pkt->rh.r_nic_info.gmxport);
3271 		goto nic_info_err;
3272 	}
3273 
3274 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
3275 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3276 		OCT_DROQ_INFO_SIZE);
3277 
3278 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3279 	for (i = 0; i < oct->ifcount; i++) {
3280 		if (oct->props[i].gmxport == gmxport) {
3281 			update_link_status(oct->props[i].netdev, ls);
3282 			break;
3283 		}
3284 	}
3285 
3286 nic_info_err:
3287 	for (i = 0; i < recv_pkt->buffer_count; i++)
3288 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3289 	octeon_free_recv_info(recv_info);
3290 	return 0;
3291 }
3292 
3293 /**
3294  * \brief Setup network interfaces
3295  * @param octeon_dev  octeon device
3296  *
3297  * Called during init time for each device. It assumes the NIC
3298  * is already up and running.  The link information for each
3299  * interface is passed in link_info.
3300  */
3301 static int setup_nic_devices(struct octeon_device *octeon_dev)
3302 {
3303 	struct lio *lio = NULL;
3304 	struct net_device *netdev;
3305 	u8 mac[6], i, j, *fw_ver;
3306 	struct octeon_soft_command *sc;
3307 	struct liquidio_if_cfg_context *ctx;
3308 	struct liquidio_if_cfg_resp *resp;
3309 	struct octdev_props *props;
3310 	int retval, num_iqueues, num_oqueues;
3311 	int max_num_queues = 0;
3312 	union oct_nic_if_cfg if_cfg;
3313 	unsigned int base_queue;
3314 	unsigned int gmx_port_id;
3315 	u32 resp_size, ctx_size, data_size;
3316 	u32 ifidx_or_pfnum;
3317 	struct lio_version *vdata;
3318 	struct devlink *devlink;
3319 	struct lio_devlink_priv *lio_devlink;
3320 
3321 	/* This is to handle link status changes */
3322 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3323 				    OPCODE_NIC_INFO,
3324 				    lio_nic_info, octeon_dev);
3325 
3326 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3327 	 * They are handled directly.
3328 	 */
3329 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3330 					free_netbuf);
3331 
3332 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3333 					free_netsgbuf);
3334 
3335 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3336 					free_netsgbuf_with_resp);
3337 
3338 	for (i = 0; i < octeon_dev->ifcount; i++) {
3339 		resp_size = sizeof(struct liquidio_if_cfg_resp);
3340 		ctx_size = sizeof(struct liquidio_if_cfg_context);
3341 		data_size = sizeof(struct lio_version);
3342 		sc = (struct octeon_soft_command *)
3343 			octeon_alloc_soft_command(octeon_dev, data_size,
3344 						  resp_size, ctx_size);
3345 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3346 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3347 		vdata = (struct lio_version *)sc->virtdptr;
3348 
3349 		*((u64 *)vdata) = 0;
3350 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3351 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3352 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3353 
3354 		if (OCTEON_CN23XX_PF(octeon_dev)) {
3355 			num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3356 			num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3357 			base_queue = octeon_dev->sriov_info.pf_srn;
3358 
3359 			gmx_port_id = octeon_dev->pf_num;
3360 			ifidx_or_pfnum = octeon_dev->pf_num;
3361 		} else {
3362 			num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3363 						octeon_get_conf(octeon_dev), i);
3364 			num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3365 						octeon_get_conf(octeon_dev), i);
3366 			base_queue = CFG_GET_BASE_QUE_NIC_IF(
3367 						octeon_get_conf(octeon_dev), i);
3368 			gmx_port_id = CFG_GET_GMXID_NIC_IF(
3369 						octeon_get_conf(octeon_dev), i);
3370 			ifidx_or_pfnum = i;
3371 		}
3372 
3373 		dev_dbg(&octeon_dev->pci_dev->dev,
3374 			"requesting config for interface %d, iqs %d, oqs %d\n",
3375 			ifidx_or_pfnum, num_iqueues, num_oqueues);
3376 		WRITE_ONCE(ctx->cond, 0);
3377 		ctx->octeon_id = lio_get_device_id(octeon_dev);
3378 		init_waitqueue_head(&ctx->wc);
3379 
3380 		if_cfg.u64 = 0;
3381 		if_cfg.s.num_iqueues = num_iqueues;
3382 		if_cfg.s.num_oqueues = num_oqueues;
3383 		if_cfg.s.base_queue = base_queue;
3384 		if_cfg.s.gmx_port_id = gmx_port_id;
3385 
3386 		sc->iq_no = 0;
3387 
3388 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3389 					    OPCODE_NIC_IF_CFG, 0,
3390 					    if_cfg.u64, 0);
3391 
3392 		sc->callback = lio_if_cfg_callback;
3393 		sc->callback_arg = sc;
3394 		sc->wait_time = LIO_IFCFG_WAIT_TIME;
3395 
3396 		retval = octeon_send_soft_command(octeon_dev, sc);
3397 		if (retval == IQ_SEND_FAILED) {
3398 			dev_err(&octeon_dev->pci_dev->dev,
3399 				"iq/oq config failed status: %x\n",
3400 				retval);
3401 			/* Soft instr is freed by driver in case of failure. */
3402 			goto setup_nic_dev_fail;
3403 		}
3404 
3405 		/* Sleep on a wait queue till the cond flag indicates that the
3406 		 * response arrived or timed-out.
3407 		 */
3408 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3409 			dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3410 			goto setup_nic_wait_intr;
3411 		}
3412 
3413 		retval = resp->status;
3414 		if (retval) {
3415 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3416 			goto setup_nic_dev_fail;
3417 		}
3418 
3419 		/* Verify f/w version (in case of 'auto' loading from flash) */
3420 		fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3421 		if (memcmp(LIQUIDIO_BASE_VERSION,
3422 			   fw_ver,
3423 			   strlen(LIQUIDIO_BASE_VERSION))) {
3424 			dev_err(&octeon_dev->pci_dev->dev,
3425 				"Unmatched firmware version. Expected %s.x, got %s.\n",
3426 				LIQUIDIO_BASE_VERSION, fw_ver);
3427 			goto setup_nic_dev_fail;
3428 		} else if (atomic_read(octeon_dev->adapter_fw_state) ==
3429 			   FW_IS_PRELOADED) {
3430 			dev_info(&octeon_dev->pci_dev->dev,
3431 				 "Using auto-loaded firmware version %s.\n",
3432 				 fw_ver);
3433 		}
3434 
3435 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3436 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3437 
3438 		num_iqueues = hweight64(resp->cfg_info.iqmask);
3439 		num_oqueues = hweight64(resp->cfg_info.oqmask);
3440 
3441 		if (!(num_iqueues) || !(num_oqueues)) {
3442 			dev_err(&octeon_dev->pci_dev->dev,
3443 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3444 				resp->cfg_info.iqmask,
3445 				resp->cfg_info.oqmask);
3446 			goto setup_nic_dev_fail;
3447 		}
3448 
3449 		if (OCTEON_CN6XXX(octeon_dev)) {
3450 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3451 								    cn6xxx));
3452 		} else if (OCTEON_CN23XX_PF(octeon_dev)) {
3453 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3454 								    cn23xx_pf));
3455 		}
3456 
3457 		dev_dbg(&octeon_dev->pci_dev->dev,
3458 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3459 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3460 			num_iqueues, num_oqueues, max_num_queues);
3461 		netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3462 
3463 		if (!netdev) {
3464 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3465 			goto setup_nic_dev_fail;
3466 		}
3467 
3468 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3469 
3470 		/* Associate the routines that will handle different
3471 		 * netdev tasks.
3472 		 */
3473 		netdev->netdev_ops = &lionetdevops;
3474 		SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3475 
3476 		retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3477 		if (retval) {
3478 			dev_err(&octeon_dev->pci_dev->dev,
3479 				"setting real number rx failed\n");
3480 			goto setup_nic_dev_fail;
3481 		}
3482 
3483 		retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3484 		if (retval) {
3485 			dev_err(&octeon_dev->pci_dev->dev,
3486 				"setting real number tx failed\n");
3487 			goto setup_nic_dev_fail;
3488 		}
3489 
3490 		lio = GET_LIO(netdev);
3491 
3492 		memset(lio, 0, sizeof(struct lio));
3493 
3494 		lio->ifidx = ifidx_or_pfnum;
3495 
3496 		props = &octeon_dev->props[i];
3497 		props->gmxport = resp->cfg_info.linfo.gmxport;
3498 		props->netdev = netdev;
3499 
3500 		lio->linfo.num_rxpciq = num_oqueues;
3501 		lio->linfo.num_txpciq = num_iqueues;
3502 		for (j = 0; j < num_oqueues; j++) {
3503 			lio->linfo.rxpciq[j].u64 =
3504 				resp->cfg_info.linfo.rxpciq[j].u64;
3505 		}
3506 		for (j = 0; j < num_iqueues; j++) {
3507 			lio->linfo.txpciq[j].u64 =
3508 				resp->cfg_info.linfo.txpciq[j].u64;
3509 		}
3510 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3511 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3512 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3513 
3514 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3515 
3516 		if (OCTEON_CN23XX_PF(octeon_dev) ||
3517 		    OCTEON_CN6XXX(octeon_dev)) {
3518 			lio->dev_capability = NETIF_F_HIGHDMA
3519 					      | NETIF_F_IP_CSUM
3520 					      | NETIF_F_IPV6_CSUM
3521 					      | NETIF_F_SG | NETIF_F_RXCSUM
3522 					      | NETIF_F_GRO
3523 					      | NETIF_F_TSO | NETIF_F_TSO6
3524 					      | NETIF_F_LRO;
3525 		}
3526 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3527 
3528 		/*  Copy of transmit encapsulation capabilities:
3529 		 *  TSO, TSO6, Checksums for this device
3530 		 */
3531 		lio->enc_dev_capability = NETIF_F_IP_CSUM
3532 					  | NETIF_F_IPV6_CSUM
3533 					  | NETIF_F_GSO_UDP_TUNNEL
3534 					  | NETIF_F_HW_CSUM | NETIF_F_SG
3535 					  | NETIF_F_RXCSUM
3536 					  | NETIF_F_TSO | NETIF_F_TSO6
3537 					  | NETIF_F_LRO;
3538 
3539 		netdev->hw_enc_features = (lio->enc_dev_capability &
3540 					   ~NETIF_F_LRO);
3541 
3542 		lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3543 
3544 		netdev->vlan_features = lio->dev_capability;
3545 		/* Add any unchangeable hw features */
3546 		lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3547 					NETIF_F_HW_VLAN_CTAG_RX |
3548 					NETIF_F_HW_VLAN_CTAG_TX;
3549 
3550 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3551 
3552 		netdev->hw_features = lio->dev_capability;
3553 		/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3554 		netdev->hw_features = netdev->hw_features &
3555 			~NETIF_F_HW_VLAN_CTAG_RX;
3556 
3557 		/* MTU range: 68 - 16000 */
3558 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
3559 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
3560 
3561 		/* Point to the  properties for octeon device to which this
3562 		 * interface belongs.
3563 		 */
3564 		lio->oct_dev = octeon_dev;
3565 		lio->octprops = props;
3566 		lio->netdev = netdev;
3567 
3568 		dev_dbg(&octeon_dev->pci_dev->dev,
3569 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
3570 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3571 
3572 		for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3573 			u8 vfmac[ETH_ALEN];
3574 
3575 			random_ether_addr(&vfmac[0]);
3576 			if (__liquidio_set_vf_mac(netdev, j,
3577 						  &vfmac[0], false)) {
3578 				dev_err(&octeon_dev->pci_dev->dev,
3579 					"Error setting VF%d MAC address\n",
3580 					j);
3581 				goto setup_nic_dev_fail;
3582 			}
3583 		}
3584 
3585 		/* 64-bit swap required on LE machines */
3586 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3587 		for (j = 0; j < 6; j++)
3588 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3589 
3590 		/* Copy MAC Address to OS network device structure */
3591 
3592 		ether_addr_copy(netdev->dev_addr, mac);
3593 
3594 		/* By default all interfaces on a single Octeon uses the same
3595 		 * tx and rx queues
3596 		 */
3597 		lio->txq = lio->linfo.txpciq[0].s.q_no;
3598 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3599 		if (liquidio_setup_io_queues(octeon_dev, i,
3600 					     lio->linfo.num_txpciq,
3601 					     lio->linfo.num_rxpciq)) {
3602 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3603 			goto setup_nic_dev_fail;
3604 		}
3605 
3606 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3607 
3608 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3609 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3610 
3611 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3612 			dev_err(&octeon_dev->pci_dev->dev,
3613 				"Gather list allocation failed\n");
3614 			goto setup_nic_dev_fail;
3615 		}
3616 
3617 		/* Register ethtool support */
3618 		liquidio_set_ethtool_ops(netdev);
3619 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3620 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3621 		else
3622 			octeon_dev->priv_flags = 0x0;
3623 
3624 		if (netdev->features & NETIF_F_LRO)
3625 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3626 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3627 
3628 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3629 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
3630 
3631 		if ((debug != -1) && (debug & NETIF_MSG_HW))
3632 			liquidio_set_feature(netdev,
3633 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
3634 
3635 		if (setup_link_status_change_wq(netdev))
3636 			goto setup_nic_dev_fail;
3637 
3638 		if ((octeon_dev->fw_info.app_cap_flags &
3639 		     LIQUIDIO_TIME_SYNC_CAP) &&
3640 		    setup_sync_octeon_time_wq(netdev))
3641 			goto setup_nic_dev_fail;
3642 
3643 		if (setup_rx_oom_poll_fn(netdev))
3644 			goto setup_nic_dev_fail;
3645 
3646 		/* Register the network device with the OS */
3647 		if (register_netdev(netdev)) {
3648 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3649 			goto setup_nic_dev_fail;
3650 		}
3651 
3652 		dev_dbg(&octeon_dev->pci_dev->dev,
3653 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3654 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3655 		netif_carrier_off(netdev);
3656 		lio->link_changes++;
3657 
3658 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3659 
3660 		/* Sending command to firmware to enable Rx checksum offload
3661 		 * by default at the time of setup of Liquidio driver for
3662 		 * this device
3663 		 */
3664 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3665 					    OCTNET_CMD_RXCSUM_ENABLE);
3666 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3667 				     OCTNET_CMD_TXCSUM_ENABLE);
3668 
3669 		dev_dbg(&octeon_dev->pci_dev->dev,
3670 			"NIC ifidx:%d Setup successful\n", i);
3671 
3672 		octeon_free_soft_command(octeon_dev, sc);
3673 
3674 		if (octeon_dev->subsystem_id ==
3675 			OCTEON_CN2350_25GB_SUBSYS_ID ||
3676 		    octeon_dev->subsystem_id ==
3677 			OCTEON_CN2360_25GB_SUBSYS_ID) {
3678 			liquidio_get_speed(lio);
3679 
3680 			if (octeon_dev->speed_setting == 0) {
3681 				octeon_dev->speed_setting = 25;
3682 				octeon_dev->no_speed_setting = 1;
3683 			}
3684 		} else {
3685 			octeon_dev->no_speed_setting = 1;
3686 			octeon_dev->speed_setting = 10;
3687 		}
3688 		octeon_dev->speed_boot = octeon_dev->speed_setting;
3689 
3690 	}
3691 
3692 	devlink = devlink_alloc(&liquidio_devlink_ops,
3693 				sizeof(struct lio_devlink_priv));
3694 	if (!devlink) {
3695 		dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3696 		goto setup_nic_wait_intr;
3697 	}
3698 
3699 	lio_devlink = devlink_priv(devlink);
3700 	lio_devlink->oct = octeon_dev;
3701 
3702 	if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3703 		devlink_free(devlink);
3704 		dev_err(&octeon_dev->pci_dev->dev,
3705 			"devlink registration failed\n");
3706 		goto setup_nic_wait_intr;
3707 	}
3708 
3709 	octeon_dev->devlink = devlink;
3710 	octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3711 
3712 	return 0;
3713 
3714 setup_nic_dev_fail:
3715 
3716 	octeon_free_soft_command(octeon_dev, sc);
3717 
3718 setup_nic_wait_intr:
3719 
3720 	while (i--) {
3721 		dev_err(&octeon_dev->pci_dev->dev,
3722 			"NIC ifidx:%d Setup failed\n", i);
3723 		liquidio_destroy_nic_device(octeon_dev, i);
3724 	}
3725 	return -ENODEV;
3726 }
3727 
3728 #ifdef CONFIG_PCI_IOV
3729 static int octeon_enable_sriov(struct octeon_device *oct)
3730 {
3731 	unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3732 	struct pci_dev *vfdev;
3733 	int err;
3734 	u32 u;
3735 
3736 	if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3737 		err = pci_enable_sriov(oct->pci_dev,
3738 				       oct->sriov_info.num_vfs_alloced);
3739 		if (err) {
3740 			dev_err(&oct->pci_dev->dev,
3741 				"OCTEON: Failed to enable PCI sriov: %d\n",
3742 				err);
3743 			oct->sriov_info.num_vfs_alloced = 0;
3744 			return err;
3745 		}
3746 		oct->sriov_info.sriov_enabled = 1;
3747 
3748 		/* init lookup table that maps DPI ring number to VF pci_dev
3749 		 * struct pointer
3750 		 */
3751 		u = 0;
3752 		vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3753 				       OCTEON_CN23XX_VF_VID, NULL);
3754 		while (vfdev) {
3755 			if (vfdev->is_virtfn &&
3756 			    (vfdev->physfn == oct->pci_dev)) {
3757 				oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3758 					vfdev;
3759 				u += oct->sriov_info.rings_per_vf;
3760 			}
3761 			vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3762 					       OCTEON_CN23XX_VF_VID, vfdev);
3763 		}
3764 	}
3765 
3766 	return num_vfs_alloced;
3767 }
3768 
3769 static int lio_pci_sriov_disable(struct octeon_device *oct)
3770 {
3771 	int u;
3772 
3773 	if (pci_vfs_assigned(oct->pci_dev)) {
3774 		dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3775 		return -EPERM;
3776 	}
3777 
3778 	pci_disable_sriov(oct->pci_dev);
3779 
3780 	u = 0;
3781 	while (u < MAX_POSSIBLE_VFS) {
3782 		oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3783 		u += oct->sriov_info.rings_per_vf;
3784 	}
3785 
3786 	oct->sriov_info.num_vfs_alloced = 0;
3787 	dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3788 		 oct->pf_num);
3789 
3790 	return 0;
3791 }
3792 
3793 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3794 {
3795 	struct octeon_device *oct = pci_get_drvdata(dev);
3796 	int ret = 0;
3797 
3798 	if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3799 	    (oct->sriov_info.sriov_enabled)) {
3800 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3801 			 oct->pf_num, num_vfs);
3802 		return 0;
3803 	}
3804 
3805 	if (!num_vfs) {
3806 		lio_vf_rep_destroy(oct);
3807 		ret = lio_pci_sriov_disable(oct);
3808 	} else if (num_vfs > oct->sriov_info.max_vfs) {
3809 		dev_err(&oct->pci_dev->dev,
3810 			"OCTEON: Max allowed VFs:%d user requested:%d",
3811 			oct->sriov_info.max_vfs, num_vfs);
3812 		ret = -EPERM;
3813 	} else {
3814 		oct->sriov_info.num_vfs_alloced = num_vfs;
3815 		ret = octeon_enable_sriov(oct);
3816 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3817 			 oct->pf_num, num_vfs);
3818 		ret = lio_vf_rep_create(oct);
3819 		if (ret)
3820 			dev_info(&oct->pci_dev->dev,
3821 				 "vf representor create failed");
3822 	}
3823 
3824 	return ret;
3825 }
3826 #endif
3827 
3828 /**
3829  * \brief initialize the NIC
3830  * @param oct octeon device
3831  *
3832  * This initialization routine is called once the Octeon device application is
3833  * up and running
3834  */
3835 static int liquidio_init_nic_module(struct octeon_device *oct)
3836 {
3837 	int i, retval = 0;
3838 	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3839 
3840 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3841 
3842 	/* only default iq and oq were initialized
3843 	 * initialize the rest as well
3844 	 */
3845 	/* run port_config command for each port */
3846 	oct->ifcount = num_nic_ports;
3847 
3848 	memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3849 
3850 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3851 		oct->props[i].gmxport = -1;
3852 
3853 	retval = setup_nic_devices(oct);
3854 	if (retval) {
3855 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3856 		goto octnet_init_failure;
3857 	}
3858 
3859 	/* Call vf_rep_modinit if the firmware is switchdev capable
3860 	 * and do it from the first liquidio function probed.
3861 	 */
3862 	if (!oct->octeon_id &&
3863 	    oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3864 		retval = lio_vf_rep_modinit();
3865 		if (retval) {
3866 			liquidio_stop_nic_module(oct);
3867 			goto octnet_init_failure;
3868 		}
3869 	}
3870 
3871 	liquidio_ptp_init(oct);
3872 
3873 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3874 
3875 	return retval;
3876 
3877 octnet_init_failure:
3878 
3879 	oct->ifcount = 0;
3880 
3881 	return retval;
3882 }
3883 
3884 /**
3885  * \brief starter callback that invokes the remaining initialization work after
3886  * the NIC is up and running.
3887  * @param octptr  work struct work_struct
3888  */
3889 static void nic_starter(struct work_struct *work)
3890 {
3891 	struct octeon_device *oct;
3892 	struct cavium_wk *wk = (struct cavium_wk *)work;
3893 
3894 	oct = (struct octeon_device *)wk->ctxptr;
3895 
3896 	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3897 		return;
3898 
3899 	/* If the status of the device is CORE_OK, the core
3900 	 * application has reported its application type. Call
3901 	 * any registered handlers now and move to the RUNNING
3902 	 * state.
3903 	 */
3904 	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3905 		schedule_delayed_work(&oct->nic_poll_work.work,
3906 				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3907 		return;
3908 	}
3909 
3910 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3911 
3912 	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3913 		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3914 
3915 		if (liquidio_init_nic_module(oct))
3916 			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3917 		else
3918 			handshake[oct->octeon_id].started_ok = 1;
3919 	} else {
3920 		dev_err(&oct->pci_dev->dev,
3921 			"Unexpected application running on NIC (%d). Check firmware.\n",
3922 			oct->app_mode);
3923 	}
3924 
3925 	complete(&handshake[oct->octeon_id].started);
3926 }
3927 
3928 static int
3929 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3930 {
3931 	struct octeon_device *oct = (struct octeon_device *)buf;
3932 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3933 	int i, notice, vf_idx;
3934 	bool cores_crashed;
3935 	u64 *data, vf_num;
3936 
3937 	notice = recv_pkt->rh.r.ossp;
3938 	data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3939 
3940 	/* the first 64-bit word of data is the vf_num */
3941 	vf_num = data[0];
3942 	octeon_swap_8B_data(&vf_num, 1);
3943 	vf_idx = (int)vf_num - 1;
3944 
3945 	cores_crashed = READ_ONCE(oct->cores_crashed);
3946 
3947 	if (notice == VF_DRV_LOADED) {
3948 		if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
3949 			oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
3950 			dev_info(&oct->pci_dev->dev,
3951 				 "driver for VF%d was loaded\n", vf_idx);
3952 			if (!cores_crashed)
3953 				try_module_get(THIS_MODULE);
3954 		}
3955 	} else if (notice == VF_DRV_REMOVED) {
3956 		if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
3957 			oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
3958 			dev_info(&oct->pci_dev->dev,
3959 				 "driver for VF%d was removed\n", vf_idx);
3960 			if (!cores_crashed)
3961 				module_put(THIS_MODULE);
3962 		}
3963 	} else if (notice == VF_DRV_MACADDR_CHANGED) {
3964 		u8 *b = (u8 *)&data[1];
3965 
3966 		oct->sriov_info.vf_macaddr[vf_idx] = data[1];
3967 		dev_info(&oct->pci_dev->dev,
3968 			 "VF driver changed VF%d's MAC address to %pM\n",
3969 			 vf_idx, b + 2);
3970 	}
3971 
3972 	for (i = 0; i < recv_pkt->buffer_count; i++)
3973 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3974 	octeon_free_recv_info(recv_info);
3975 
3976 	return 0;
3977 }
3978 
3979 /**
3980  * \brief Device initialization for each Octeon device that is probed
3981  * @param octeon_dev  octeon device
3982  */
3983 static int octeon_device_init(struct octeon_device *octeon_dev)
3984 {
3985 	int j, ret;
3986 	char bootcmd[] = "\n";
3987 	char *dbg_enb = NULL;
3988 	enum lio_fw_state fw_state;
3989 	struct octeon_device_priv *oct_priv =
3990 		(struct octeon_device_priv *)octeon_dev->priv;
3991 	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3992 
3993 	/* Enable access to the octeon device and make its DMA capability
3994 	 * known to the OS.
3995 	 */
3996 	if (octeon_pci_os_setup(octeon_dev))
3997 		return 1;
3998 
3999 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4000 
4001 	/* Identify the Octeon type and map the BAR address space. */
4002 	if (octeon_chip_specific_setup(octeon_dev)) {
4003 		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4004 		return 1;
4005 	}
4006 
4007 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4008 
4009 	/* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4010 	 * since that is what is required for the reference to be removed
4011 	 * during de-initialization (see 'octeon_destroy_resources').
4012 	 */
4013 	octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4014 			       PCI_SLOT(octeon_dev->pci_dev->devfn),
4015 			       PCI_FUNC(octeon_dev->pci_dev->devfn),
4016 			       true);
4017 
4018 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4019 
4020 	/* CN23XX supports preloaded firmware if the following is true:
4021 	 *
4022 	 * The adapter indicates that firmware is currently running AND
4023 	 * 'fw_type' is 'auto'.
4024 	 *
4025 	 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4026 	 */
4027 	if (OCTEON_CN23XX_PF(octeon_dev) &&
4028 	    cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4029 		atomic_cmpxchg(octeon_dev->adapter_fw_state,
4030 			       FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4031 	}
4032 
4033 	/* If loading firmware, only first device of adapter needs to do so. */
4034 	fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4035 				  FW_NEEDS_TO_BE_LOADED,
4036 				  FW_IS_BEING_LOADED);
4037 
4038 	/* Here, [local variable] 'fw_state' is set to one of:
4039 	 *
4040 	 *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4041 	 *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4042 	 *                          firmware to the adapter.
4043 	 *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4044 	 *                          firmware to the adapter.
4045 	 */
4046 
4047 	/* Prior to f/w load, perform a soft reset of the Octeon device;
4048 	 * if error resetting, return w/error.
4049 	 */
4050 	if (fw_state == FW_NEEDS_TO_BE_LOADED)
4051 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
4052 			return 1;
4053 
4054 	/* Initialize the dispatch mechanism used to push packets arriving on
4055 	 * Octeon Output queues.
4056 	 */
4057 	if (octeon_init_dispatch_list(octeon_dev))
4058 		return 1;
4059 
4060 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4061 				    OPCODE_NIC_CORE_DRV_ACTIVE,
4062 				    octeon_core_drv_init,
4063 				    octeon_dev);
4064 
4065 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4066 				    OPCODE_NIC_VF_DRV_NOTICE,
4067 				    octeon_recv_vf_drv_notice, octeon_dev);
4068 	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4069 	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4070 	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4071 			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4072 
4073 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4074 
4075 	if (octeon_set_io_queues_off(octeon_dev)) {
4076 		dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4077 		return 1;
4078 	}
4079 
4080 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4081 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4082 		if (ret) {
4083 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4084 			return ret;
4085 		}
4086 	}
4087 
4088 	/* Initialize soft command buffer pool
4089 	 */
4090 	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4091 		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4092 		return 1;
4093 	}
4094 	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4095 
4096 	/*  Setup the data structures that manage this Octeon's Input queues. */
4097 	if (octeon_setup_instr_queues(octeon_dev)) {
4098 		dev_err(&octeon_dev->pci_dev->dev,
4099 			"instruction queue initialization failed\n");
4100 		return 1;
4101 	}
4102 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4103 
4104 	/* Initialize lists to manage the requests of different types that
4105 	 * arrive from user & kernel applications for this octeon device.
4106 	 */
4107 	if (octeon_setup_response_list(octeon_dev)) {
4108 		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4109 		return 1;
4110 	}
4111 	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4112 
4113 	if (octeon_setup_output_queues(octeon_dev)) {
4114 		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4115 		return 1;
4116 	}
4117 
4118 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4119 
4120 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4121 		if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4122 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4123 			return 1;
4124 		}
4125 		atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4126 
4127 		if (octeon_allocate_ioq_vector
4128 				(octeon_dev,
4129 				 octeon_dev->sriov_info.num_pf_rings)) {
4130 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4131 			return 1;
4132 		}
4133 		atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4134 
4135 	} else {
4136 		/* The input and output queue registers were setup earlier (the
4137 		 * queues were not enabled). Any additional registers
4138 		 * that need to be programmed should be done now.
4139 		 */
4140 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4141 		if (ret) {
4142 			dev_err(&octeon_dev->pci_dev->dev,
4143 				"Failed to configure device registers\n");
4144 			return ret;
4145 		}
4146 	}
4147 
4148 	/* Initialize the tasklet that handles output queue packet processing.*/
4149 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4150 	tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4151 		     (unsigned long)octeon_dev);
4152 
4153 	/* Setup the interrupt handler and record the INT SUM register address
4154 	 */
4155 	if (octeon_setup_interrupt(octeon_dev,
4156 				   octeon_dev->sriov_info.num_pf_rings))
4157 		return 1;
4158 
4159 	/* Enable Octeon device interrupts */
4160 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4161 
4162 	atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4163 
4164 	/* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4165 	 * the output queue is enabled.
4166 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4167 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4168 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4169 	 * before any credits have been issued, causing the ring to be reset
4170 	 * (and the f/w appear to never have started).
4171 	 */
4172 	for (j = 0; j < octeon_dev->num_oqs; j++)
4173 		writel(octeon_dev->droq[j]->max_count,
4174 		       octeon_dev->droq[j]->pkts_credit_reg);
4175 
4176 	/* Enable the input and output queues for this Octeon device */
4177 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4178 	if (ret) {
4179 		dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4180 		return ret;
4181 	}
4182 
4183 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4184 
4185 	if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4186 		dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4187 		if (!ddr_timeout) {
4188 			dev_info(&octeon_dev->pci_dev->dev,
4189 				 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4190 		}
4191 
4192 		schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4193 
4194 		/* Wait for the octeon to initialize DDR after the soft-reset.*/
4195 		while (!ddr_timeout) {
4196 			set_current_state(TASK_INTERRUPTIBLE);
4197 			if (schedule_timeout(HZ / 10)) {
4198 				/* user probably pressed Control-C */
4199 				return 1;
4200 			}
4201 		}
4202 		ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4203 		if (ret) {
4204 			dev_err(&octeon_dev->pci_dev->dev,
4205 				"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4206 				ret);
4207 			return 1;
4208 		}
4209 
4210 		if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4211 			dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4212 			return 1;
4213 		}
4214 
4215 		/* Divert uboot to take commands from host instead. */
4216 		ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4217 
4218 		dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4219 		ret = octeon_init_consoles(octeon_dev);
4220 		if (ret) {
4221 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4222 			return 1;
4223 		}
4224 		/* If console debug enabled, specify empty string to use default
4225 		 * enablement ELSE specify NULL string for 'disabled'.
4226 		 */
4227 		dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4228 		ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4229 		if (ret) {
4230 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4231 			return 1;
4232 		} else if (octeon_console_debug_enabled(0)) {
4233 			/* If console was added AND we're logging console output
4234 			 * then set our console print function.
4235 			 */
4236 			octeon_dev->console[0].print = octeon_dbg_console_print;
4237 		}
4238 
4239 		atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4240 
4241 		dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4242 		ret = load_firmware(octeon_dev);
4243 		if (ret) {
4244 			dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4245 			return 1;
4246 		}
4247 
4248 		atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4249 	}
4250 
4251 	handshake[octeon_dev->octeon_id].init_ok = 1;
4252 	complete(&handshake[octeon_dev->octeon_id].init);
4253 
4254 	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4255 
4256 	return 0;
4257 }
4258 
4259 /**
4260  * \brief Debug console print function
4261  * @param octeon_dev  octeon device
4262  * @param console_num console number
4263  * @param prefix      first portion of line to display
4264  * @param suffix      second portion of line to display
4265  *
4266  * The OCTEON debug console outputs entire lines (excluding '\n').
4267  * Normally, the line will be passed in the 'prefix' parameter.
4268  * However, due to buffering, it is possible for a line to be split into two
4269  * parts, in which case they will be passed as the 'prefix' parameter and
4270  * 'suffix' parameter.
4271  */
4272 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4273 				    char *prefix, char *suffix)
4274 {
4275 	if (prefix && suffix)
4276 		dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4277 			 suffix);
4278 	else if (prefix)
4279 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4280 	else if (suffix)
4281 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4282 
4283 	return 0;
4284 }
4285 
4286 /**
4287  * \brief Exits the module
4288  */
4289 static void __exit liquidio_exit(void)
4290 {
4291 	liquidio_deinit_pci();
4292 
4293 	pr_info("LiquidIO network module is now unloaded\n");
4294 }
4295 
4296 module_init(liquidio_init);
4297 module_exit(liquidio_exit);
4298