1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
39 
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
52 
53 static int ddr_timeout = 10000;
54 module_param(ddr_timeout, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout,
56 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
57 
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
59 
60 static int debug = -1;
61 module_param(debug, int, 0644);
62 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63 
64 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
65 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
66 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
67 
68 static u32 console_bitmask;
69 module_param(console_bitmask, int, 0644);
70 MODULE_PARM_DESC(console_bitmask,
71 		 "Bitmask indicating which consoles have debug output redirected to syslog.");
72 
73 /**
74  * \brief determines if a given console has debug enabled.
75  * @param console console to check
76  * @returns  1 = enabled. 0 otherwise
77  */
78 static int octeon_console_debug_enabled(u32 console)
79 {
80 	return (console_bitmask >> (console)) & 0x1;
81 }
82 
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
85 
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
88 /* update localtime to octeon firmware every 60 seconds.
89  * make firmware to use same time reference, so that it will be easy to
90  * correlate firmware logged events/errors with host events, for debugging.
91  */
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93 
94 /* time to wait for possible in-flight requests in milliseconds */
95 #define WAIT_INFLIGHT_REQUEST	msecs_to_jiffies(1000)
96 
97 struct lio_trusted_vf_ctx {
98 	struct completion complete;
99 	int status;
100 };
101 
102 struct liquidio_rx_ctl_context {
103 	int octeon_id;
104 
105 	wait_queue_head_t wc;
106 
107 	int cond;
108 };
109 
110 struct oct_link_status_resp {
111 	u64 rh;
112 	struct oct_link_info link_info;
113 	u64 status;
114 };
115 
116 struct oct_timestamp_resp {
117 	u64 rh;
118 	u64 timestamp;
119 	u64 status;
120 };
121 
122 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
123 
124 union tx_info {
125 	u64 u64;
126 	struct {
127 #ifdef __BIG_ENDIAN_BITFIELD
128 		u16 gso_size;
129 		u16 gso_segs;
130 		u32 reserved;
131 #else
132 		u32 reserved;
133 		u16 gso_segs;
134 		u16 gso_size;
135 #endif
136 	} s;
137 };
138 
139 /** Octeon device properties to be used by the NIC module.
140  * Each octeon device in the system will be represented
141  * by this structure in the NIC module.
142  */
143 
144 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
145 #define OCTNIC_GSO_MAX_SIZE                                                    \
146 	(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
147 
148 struct handshake {
149 	struct completion init;
150 	struct completion started;
151 	struct pci_dev *pci_dev;
152 	int init_ok;
153 	int started_ok;
154 };
155 
156 #ifdef CONFIG_PCI_IOV
157 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
158 #endif
159 
160 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
161 				    char *prefix, char *suffix);
162 
163 static int octeon_device_init(struct octeon_device *);
164 static int liquidio_stop(struct net_device *netdev);
165 static void liquidio_remove(struct pci_dev *pdev);
166 static int liquidio_probe(struct pci_dev *pdev,
167 			  const struct pci_device_id *ent);
168 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
169 				      int linkstate);
170 
171 static struct handshake handshake[MAX_OCTEON_DEVICES];
172 static struct completion first_stage;
173 
174 static void octeon_droq_bh(unsigned long pdev)
175 {
176 	int q_no;
177 	int reschedule = 0;
178 	struct octeon_device *oct = (struct octeon_device *)pdev;
179 	struct octeon_device_priv *oct_priv =
180 		(struct octeon_device_priv *)oct->priv;
181 
182 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
183 		if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
184 			continue;
185 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
186 							  MAX_PACKET_BUDGET);
187 		lio_enable_irq(oct->droq[q_no], NULL);
188 
189 		if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
190 			/* set time and cnt interrupt thresholds for this DROQ
191 			 * for NAPI
192 			 */
193 			int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
194 
195 			octeon_write_csr64(
196 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
197 			    0x5700000040ULL);
198 			octeon_write_csr64(
199 			    oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
200 		}
201 	}
202 
203 	if (reschedule)
204 		tasklet_schedule(&oct_priv->droq_tasklet);
205 }
206 
207 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
208 {
209 	struct octeon_device_priv *oct_priv =
210 		(struct octeon_device_priv *)oct->priv;
211 	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
212 	int i;
213 
214 	do {
215 		pending_pkts = 0;
216 
217 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
218 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
219 				continue;
220 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
221 		}
222 		if (pkt_cnt > 0) {
223 			pending_pkts += pkt_cnt;
224 			tasklet_schedule(&oct_priv->droq_tasklet);
225 		}
226 		pkt_cnt = 0;
227 		schedule_timeout_uninterruptible(1);
228 
229 	} while (retry-- && pending_pkts);
230 
231 	return pkt_cnt;
232 }
233 
234 /**
235  * \brief Forces all IO queues off on a given device
236  * @param oct Pointer to Octeon device
237  */
238 static void force_io_queues_off(struct octeon_device *oct)
239 {
240 	if ((oct->chip_id == OCTEON_CN66XX) ||
241 	    (oct->chip_id == OCTEON_CN68XX)) {
242 		/* Reset the Enable bits for Input Queues. */
243 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
244 
245 		/* Reset the Enable bits for Output Queues. */
246 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
247 	}
248 }
249 
250 /**
251  * \brief Cause device to go quiet so it can be safely removed/reset/etc
252  * @param oct Pointer to Octeon device
253  */
254 static inline void pcierror_quiesce_device(struct octeon_device *oct)
255 {
256 	int i;
257 
258 	/* Disable the input and output queues now. No more packets will
259 	 * arrive from Octeon, but we should wait for all packet processing
260 	 * to finish.
261 	 */
262 	force_io_queues_off(oct);
263 
264 	/* To allow for in-flight requests */
265 	schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
266 
267 	if (wait_for_pending_requests(oct))
268 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
269 
270 	/* Force all requests waiting to be fetched by OCTEON to complete. */
271 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
272 		struct octeon_instr_queue *iq;
273 
274 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
275 			continue;
276 		iq = oct->instr_queue[i];
277 
278 		if (atomic_read(&iq->instr_pending)) {
279 			spin_lock_bh(&iq->lock);
280 			iq->fill_cnt = 0;
281 			iq->octeon_read_index = iq->host_write_index;
282 			iq->stats.instr_processed +=
283 				atomic_read(&iq->instr_pending);
284 			lio_process_iq_request_list(oct, iq, 0);
285 			spin_unlock_bh(&iq->lock);
286 		}
287 	}
288 
289 	/* Force all pending ordered list requests to time out. */
290 	lio_process_ordered_list(oct, 1);
291 
292 	/* We do not need to wait for output queue packets to be processed. */
293 }
294 
295 /**
296  * \brief Cleanup PCI AER uncorrectable error status
297  * @param dev Pointer to PCI device
298  */
299 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
300 {
301 	int pos = 0x100;
302 	u32 status, mask;
303 
304 	pr_info("%s :\n", __func__);
305 
306 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
307 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
308 	if (dev->error_state == pci_channel_io_normal)
309 		status &= ~mask;        /* Clear corresponding nonfatal bits */
310 	else
311 		status &= mask;         /* Clear corresponding fatal bits */
312 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
313 }
314 
315 /**
316  * \brief Stop all PCI IO to a given device
317  * @param dev Pointer to Octeon device
318  */
319 static void stop_pci_io(struct octeon_device *oct)
320 {
321 	/* No more instructions will be forwarded. */
322 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
323 
324 	pci_disable_device(oct->pci_dev);
325 
326 	/* Disable interrupts  */
327 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
328 
329 	pcierror_quiesce_device(oct);
330 
331 	/* Release the interrupt line */
332 	free_irq(oct->pci_dev->irq, oct);
333 
334 	if (oct->flags & LIO_FLAG_MSI_ENABLED)
335 		pci_disable_msi(oct->pci_dev);
336 
337 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
338 		lio_get_state_string(&oct->status));
339 
340 	/* making it a common function for all OCTEON models */
341 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
342 }
343 
344 /**
345  * \brief called when PCI error is detected
346  * @param pdev Pointer to PCI device
347  * @param state The current pci connection state
348  *
349  * This function is called after a PCI bus error affecting
350  * this device has been detected.
351  */
352 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
353 						     pci_channel_state_t state)
354 {
355 	struct octeon_device *oct = pci_get_drvdata(pdev);
356 
357 	/* Non-correctable Non-fatal errors */
358 	if (state == pci_channel_io_normal) {
359 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
360 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
361 		return PCI_ERS_RESULT_CAN_RECOVER;
362 	}
363 
364 	/* Non-correctable Fatal errors */
365 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
366 	stop_pci_io(oct);
367 
368 	/* Always return a DISCONNECT. There is no support for recovery but only
369 	 * for a clean shutdown.
370 	 */
371 	return PCI_ERS_RESULT_DISCONNECT;
372 }
373 
374 /**
375  * \brief mmio handler
376  * @param pdev Pointer to PCI device
377  */
378 static pci_ers_result_t liquidio_pcie_mmio_enabled(
379 				struct pci_dev *pdev __attribute__((unused)))
380 {
381 	/* We should never hit this since we never ask for a reset for a Fatal
382 	 * Error. We always return DISCONNECT in io_error above.
383 	 * But play safe and return RECOVERED for now.
384 	 */
385 	return PCI_ERS_RESULT_RECOVERED;
386 }
387 
388 /**
389  * \brief called after the pci bus has been reset.
390  * @param pdev Pointer to PCI device
391  *
392  * Restart the card from scratch, as if from a cold-boot. Implementation
393  * resembles the first-half of the octeon_resume routine.
394  */
395 static pci_ers_result_t liquidio_pcie_slot_reset(
396 				struct pci_dev *pdev __attribute__((unused)))
397 {
398 	/* We should never hit this since we never ask for a reset for a Fatal
399 	 * Error. We always return DISCONNECT in io_error above.
400 	 * But play safe and return RECOVERED for now.
401 	 */
402 	return PCI_ERS_RESULT_RECOVERED;
403 }
404 
405 /**
406  * \brief called when traffic can start flowing again.
407  * @param pdev Pointer to PCI device
408  *
409  * This callback is called when the error recovery driver tells us that
410  * its OK to resume normal operation. Implementation resembles the
411  * second-half of the octeon_resume routine.
412  */
413 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
414 {
415 	/* Nothing to be done here. */
416 }
417 
418 #ifdef CONFIG_PM
419 /**
420  * \brief called when suspending
421  * @param pdev Pointer to PCI device
422  * @param state state to suspend to
423  */
424 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
425 			    pm_message_t state __attribute__((unused)))
426 {
427 	return 0;
428 }
429 
430 /**
431  * \brief called when resuming
432  * @param pdev Pointer to PCI device
433  */
434 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
435 {
436 	return 0;
437 }
438 #endif
439 
440 /* For PCI-E Advanced Error Recovery (AER) Interface */
441 static const struct pci_error_handlers liquidio_err_handler = {
442 	.error_detected = liquidio_pcie_error_detected,
443 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
444 	.slot_reset	= liquidio_pcie_slot_reset,
445 	.resume		= liquidio_pcie_resume,
446 };
447 
448 static const struct pci_device_id liquidio_pci_tbl[] = {
449 	{       /* 68xx */
450 		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
451 	},
452 	{       /* 66xx */
453 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
454 	},
455 	{       /* 23xx pf */
456 		PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
457 	},
458 	{
459 		0, 0, 0, 0, 0, 0, 0
460 	}
461 };
462 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
463 
464 static struct pci_driver liquidio_pci_driver = {
465 	.name		= "LiquidIO",
466 	.id_table	= liquidio_pci_tbl,
467 	.probe		= liquidio_probe,
468 	.remove		= liquidio_remove,
469 	.err_handler	= &liquidio_err_handler,    /* For AER */
470 
471 #ifdef CONFIG_PM
472 	.suspend	= liquidio_suspend,
473 	.resume		= liquidio_resume,
474 #endif
475 #ifdef CONFIG_PCI_IOV
476 	.sriov_configure = liquidio_enable_sriov,
477 #endif
478 };
479 
480 /**
481  * \brief register PCI driver
482  */
483 static int liquidio_init_pci(void)
484 {
485 	return pci_register_driver(&liquidio_pci_driver);
486 }
487 
488 /**
489  * \brief unregister PCI driver
490  */
491 static void liquidio_deinit_pci(void)
492 {
493 	pci_unregister_driver(&liquidio_pci_driver);
494 }
495 
496 /**
497  * \brief Check Tx queue status, and take appropriate action
498  * @param lio per-network private data
499  * @returns 0 if full, number of queues woken up otherwise
500  */
501 static inline int check_txq_status(struct lio *lio)
502 {
503 	int numqs = lio->netdev->real_num_tx_queues;
504 	int ret_val = 0;
505 	int q, iq;
506 
507 	/* check each sub-queue state */
508 	for (q = 0; q < numqs; q++) {
509 		iq = lio->linfo.txpciq[q %
510 			lio->oct_dev->num_iqs].s.q_no;
511 		if (octnet_iq_is_full(lio->oct_dev, iq))
512 			continue;
513 		if (__netif_subqueue_stopped(lio->netdev, q)) {
514 			netif_wake_subqueue(lio->netdev, q);
515 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
516 						  tx_restart, 1);
517 			ret_val++;
518 		}
519 	}
520 
521 	return ret_val;
522 }
523 
524 /**
525  * \brief Print link information
526  * @param netdev network device
527  */
528 static void print_link_info(struct net_device *netdev)
529 {
530 	struct lio *lio = GET_LIO(netdev);
531 
532 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
533 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
534 		struct oct_link_info *linfo = &lio->linfo;
535 
536 		if (linfo->link.s.link_up) {
537 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
538 				   linfo->link.s.speed,
539 				   (linfo->link.s.duplex) ? "Full" : "Half");
540 		} else {
541 			netif_info(lio, link, lio->netdev, "Link Down\n");
542 		}
543 	}
544 }
545 
546 /**
547  * \brief Routine to notify MTU change
548  * @param work work_struct data structure
549  */
550 static void octnet_link_status_change(struct work_struct *work)
551 {
552 	struct cavium_wk *wk = (struct cavium_wk *)work;
553 	struct lio *lio = (struct lio *)wk->ctxptr;
554 
555 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
556 	 * this API is invoked only when new max-MTU of the interface is
557 	 * less than current MTU.
558 	 */
559 	rtnl_lock();
560 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
561 	rtnl_unlock();
562 }
563 
564 /**
565  * \brief Sets up the mtu status change work
566  * @param netdev network device
567  */
568 static inline int setup_link_status_change_wq(struct net_device *netdev)
569 {
570 	struct lio *lio = GET_LIO(netdev);
571 	struct octeon_device *oct = lio->oct_dev;
572 
573 	lio->link_status_wq.wq = alloc_workqueue("link-status",
574 						 WQ_MEM_RECLAIM, 0);
575 	if (!lio->link_status_wq.wq) {
576 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
577 		return -1;
578 	}
579 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
580 			  octnet_link_status_change);
581 	lio->link_status_wq.wk.ctxptr = lio;
582 
583 	return 0;
584 }
585 
586 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
587 {
588 	struct lio *lio = GET_LIO(netdev);
589 
590 	if (lio->link_status_wq.wq) {
591 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
592 		destroy_workqueue(lio->link_status_wq.wq);
593 	}
594 }
595 
596 /**
597  * \brief Update link status
598  * @param netdev network device
599  * @param ls link status structure
600  *
601  * Called on receipt of a link status response from the core application to
602  * update each interface's link status.
603  */
604 static inline void update_link_status(struct net_device *netdev,
605 				      union oct_link_status *ls)
606 {
607 	struct lio *lio = GET_LIO(netdev);
608 	int changed = (lio->linfo.link.u64 != ls->u64);
609 	int current_max_mtu = lio->linfo.link.s.mtu;
610 	struct octeon_device *oct = lio->oct_dev;
611 
612 	dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
613 		__func__, lio->linfo.link.u64, ls->u64);
614 	lio->linfo.link.u64 = ls->u64;
615 
616 	if ((lio->intf_open) && (changed)) {
617 		print_link_info(netdev);
618 		lio->link_changes++;
619 
620 		if (lio->linfo.link.s.link_up) {
621 			dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
622 			netif_carrier_on(netdev);
623 			wake_txqs(netdev);
624 		} else {
625 			dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
626 			netif_carrier_off(netdev);
627 			stop_txqs(netdev);
628 		}
629 		if (lio->linfo.link.s.mtu != current_max_mtu) {
630 			netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
631 				   current_max_mtu, lio->linfo.link.s.mtu);
632 			netdev->max_mtu = lio->linfo.link.s.mtu;
633 		}
634 		if (lio->linfo.link.s.mtu < netdev->mtu) {
635 			dev_warn(&oct->pci_dev->dev,
636 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
637 				     netdev->mtu, lio->linfo.link.s.mtu);
638 			queue_delayed_work(lio->link_status_wq.wq,
639 					   &lio->link_status_wq.wk.work, 0);
640 		}
641 	}
642 }
643 
644 /**
645  * lio_sync_octeon_time_cb - callback that is invoked when soft command
646  * sent by lio_sync_octeon_time() has completed successfully or failed
647  *
648  * @oct - octeon device structure
649  * @status - indicates success or failure
650  * @buf - pointer to the command that was sent to firmware
651  **/
652 static void lio_sync_octeon_time_cb(struct octeon_device *oct,
653 				    u32 status, void *buf)
654 {
655 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
656 
657 	if (status)
658 		dev_err(&oct->pci_dev->dev,
659 			"Failed to sync time to octeon; error=%d\n", status);
660 
661 	octeon_free_soft_command(oct, sc);
662 }
663 
664 /**
665  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
666  * firmware will correct it's time, in case there is a time skew
667  *
668  * @work: work scheduled to send time update to octeon firmware
669  **/
670 static void lio_sync_octeon_time(struct work_struct *work)
671 {
672 	struct cavium_wk *wk = (struct cavium_wk *)work;
673 	struct lio *lio = (struct lio *)wk->ctxptr;
674 	struct octeon_device *oct = lio->oct_dev;
675 	struct octeon_soft_command *sc;
676 	struct timespec64 ts;
677 	struct lio_time *lt;
678 	int ret;
679 
680 	sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
681 	if (!sc) {
682 		dev_err(&oct->pci_dev->dev,
683 			"Failed to sync time to octeon: soft command allocation failed\n");
684 		return;
685 	}
686 
687 	lt = (struct lio_time *)sc->virtdptr;
688 
689 	/* Get time of the day */
690 	ktime_get_real_ts64(&ts);
691 	lt->sec = ts.tv_sec;
692 	lt->nsec = ts.tv_nsec;
693 	octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
694 
695 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
696 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
697 				    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
698 
699 	sc->callback = lio_sync_octeon_time_cb;
700 	sc->callback_arg = sc;
701 	sc->wait_time = 1000;
702 
703 	ret = octeon_send_soft_command(oct, sc);
704 	if (ret == IQ_SEND_FAILED) {
705 		dev_err(&oct->pci_dev->dev,
706 			"Failed to sync time to octeon: failed to send soft command\n");
707 		octeon_free_soft_command(oct, sc);
708 	}
709 
710 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
711 			   &lio->sync_octeon_time_wq.wk.work,
712 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
713 }
714 
715 /**
716  * setup_sync_octeon_time_wq - Sets up the work to periodically update
717  * local time to octeon firmware
718  *
719  * @netdev - network device which should send time update to firmware
720  **/
721 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
722 {
723 	struct lio *lio = GET_LIO(netdev);
724 	struct octeon_device *oct = lio->oct_dev;
725 
726 	lio->sync_octeon_time_wq.wq =
727 		alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
728 	if (!lio->sync_octeon_time_wq.wq) {
729 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
730 		return -1;
731 	}
732 	INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
733 			  lio_sync_octeon_time);
734 	lio->sync_octeon_time_wq.wk.ctxptr = lio;
735 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
736 			   &lio->sync_octeon_time_wq.wk.work,
737 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
738 
739 	return 0;
740 }
741 
742 /**
743  * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
744  * to periodically update local time to octeon firmware
745  *
746  * @netdev - network device which should send time update to firmware
747  **/
748 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
749 {
750 	struct lio *lio = GET_LIO(netdev);
751 	struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
752 
753 	if (time_wq->wq) {
754 		cancel_delayed_work_sync(&time_wq->wk.work);
755 		destroy_workqueue(time_wq->wq);
756 	}
757 }
758 
759 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
760 {
761 	struct octeon_device *other_oct;
762 
763 	other_oct = lio_get_device(oct->octeon_id + 1);
764 
765 	if (other_oct && other_oct->pci_dev) {
766 		int oct_busnum, other_oct_busnum;
767 
768 		oct_busnum = oct->pci_dev->bus->number;
769 		other_oct_busnum = other_oct->pci_dev->bus->number;
770 
771 		if (oct_busnum == other_oct_busnum) {
772 			int oct_slot, other_oct_slot;
773 
774 			oct_slot = PCI_SLOT(oct->pci_dev->devfn);
775 			other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
776 
777 			if (oct_slot == other_oct_slot)
778 				return other_oct;
779 		}
780 	}
781 
782 	return NULL;
783 }
784 
785 static void disable_all_vf_links(struct octeon_device *oct)
786 {
787 	struct net_device *netdev;
788 	int max_vfs, vf, i;
789 
790 	if (!oct)
791 		return;
792 
793 	max_vfs = oct->sriov_info.max_vfs;
794 
795 	for (i = 0; i < oct->ifcount; i++) {
796 		netdev = oct->props[i].netdev;
797 		if (!netdev)
798 			continue;
799 
800 		for (vf = 0; vf < max_vfs; vf++)
801 			liquidio_set_vf_link_state(netdev, vf,
802 						   IFLA_VF_LINK_STATE_DISABLE);
803 	}
804 }
805 
806 static int liquidio_watchdog(void *param)
807 {
808 	bool err_msg_was_printed[LIO_MAX_CORES];
809 	u16 mask_of_crashed_or_stuck_cores = 0;
810 	bool all_vf_links_are_disabled = false;
811 	struct octeon_device *oct = param;
812 	struct octeon_device *other_oct;
813 #ifdef CONFIG_MODULE_UNLOAD
814 	long refcount, vfs_referencing_pf;
815 	u64 vfs_mask1, vfs_mask2;
816 #endif
817 	int core;
818 
819 	memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
820 
821 	while (!kthread_should_stop()) {
822 		/* sleep for a couple of seconds so that we don't hog the CPU */
823 		set_current_state(TASK_INTERRUPTIBLE);
824 		schedule_timeout(msecs_to_jiffies(2000));
825 
826 		mask_of_crashed_or_stuck_cores =
827 		    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
828 
829 		if (!mask_of_crashed_or_stuck_cores)
830 			continue;
831 
832 		WRITE_ONCE(oct->cores_crashed, true);
833 		other_oct = get_other_octeon_device(oct);
834 		if (other_oct)
835 			WRITE_ONCE(other_oct->cores_crashed, true);
836 
837 		for (core = 0; core < LIO_MAX_CORES; core++) {
838 			bool core_crashed_or_got_stuck;
839 
840 			core_crashed_or_got_stuck =
841 						(mask_of_crashed_or_stuck_cores
842 						 >> core) & 1;
843 
844 			if (core_crashed_or_got_stuck &&
845 			    !err_msg_was_printed[core]) {
846 				dev_err(&oct->pci_dev->dev,
847 					"ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
848 					core);
849 				err_msg_was_printed[core] = true;
850 			}
851 		}
852 
853 		if (all_vf_links_are_disabled)
854 			continue;
855 
856 		disable_all_vf_links(oct);
857 		disable_all_vf_links(other_oct);
858 		all_vf_links_are_disabled = true;
859 
860 #ifdef CONFIG_MODULE_UNLOAD
861 		vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
862 		vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
863 
864 		vfs_referencing_pf  = hweight64(vfs_mask1);
865 		vfs_referencing_pf += hweight64(vfs_mask2);
866 
867 		refcount = module_refcount(THIS_MODULE);
868 		if (refcount >= vfs_referencing_pf) {
869 			while (vfs_referencing_pf) {
870 				module_put(THIS_MODULE);
871 				vfs_referencing_pf--;
872 			}
873 		}
874 #endif
875 	}
876 
877 	return 0;
878 }
879 
880 /**
881  * \brief PCI probe handler
882  * @param pdev PCI device structure
883  * @param ent unused
884  */
885 static int
886 liquidio_probe(struct pci_dev *pdev,
887 	       const struct pci_device_id *ent __attribute__((unused)))
888 {
889 	struct octeon_device *oct_dev = NULL;
890 	struct handshake *hs;
891 
892 	oct_dev = octeon_allocate_device(pdev->device,
893 					 sizeof(struct octeon_device_priv));
894 	if (!oct_dev) {
895 		dev_err(&pdev->dev, "Unable to allocate device\n");
896 		return -ENOMEM;
897 	}
898 
899 	if (pdev->device == OCTEON_CN23XX_PF_VID)
900 		oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
901 
902 	/* Enable PTP for 6XXX Device */
903 	if (((pdev->device == OCTEON_CN66XX) ||
904 	     (pdev->device == OCTEON_CN68XX)))
905 		oct_dev->ptp_enable = true;
906 	else
907 		oct_dev->ptp_enable = false;
908 
909 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
910 		 (u32)pdev->vendor, (u32)pdev->device);
911 
912 	/* Assign octeon_device for this device to the private data area. */
913 	pci_set_drvdata(pdev, oct_dev);
914 
915 	/* set linux specific device pointer */
916 	oct_dev->pci_dev = (void *)pdev;
917 
918 	oct_dev->subsystem_id = pdev->subsystem_vendor |
919 		(pdev->subsystem_device << 16);
920 
921 	hs = &handshake[oct_dev->octeon_id];
922 	init_completion(&hs->init);
923 	init_completion(&hs->started);
924 	hs->pci_dev = pdev;
925 
926 	if (oct_dev->octeon_id == 0)
927 		/* first LiquidIO NIC is detected */
928 		complete(&first_stage);
929 
930 	if (octeon_device_init(oct_dev)) {
931 		complete(&hs->init);
932 		liquidio_remove(pdev);
933 		return -ENOMEM;
934 	}
935 
936 	if (OCTEON_CN23XX_PF(oct_dev)) {
937 		u8 bus, device, function;
938 
939 		if (atomic_read(oct_dev->adapter_refcount) == 1) {
940 			/* Each NIC gets one watchdog kernel thread.  The first
941 			 * PF (of each NIC) that gets pci_driver->probe()'d
942 			 * creates that thread.
943 			 */
944 			bus = pdev->bus->number;
945 			device = PCI_SLOT(pdev->devfn);
946 			function = PCI_FUNC(pdev->devfn);
947 			oct_dev->watchdog_task = kthread_create(
948 			    liquidio_watchdog, oct_dev,
949 			    "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
950 			if (!IS_ERR(oct_dev->watchdog_task)) {
951 				wake_up_process(oct_dev->watchdog_task);
952 			} else {
953 				oct_dev->watchdog_task = NULL;
954 				dev_err(&oct_dev->pci_dev->dev,
955 					"failed to create kernel_thread\n");
956 				liquidio_remove(pdev);
957 				return -1;
958 			}
959 		}
960 	}
961 
962 	oct_dev->rx_pause = 1;
963 	oct_dev->tx_pause = 1;
964 
965 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
966 
967 	return 0;
968 }
969 
970 static bool fw_type_is_auto(void)
971 {
972 	return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
973 		       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
974 }
975 
976 /**
977  * \brief PCI FLR for each Octeon device.
978  * @param oct octeon device
979  */
980 static void octeon_pci_flr(struct octeon_device *oct)
981 {
982 	int rc;
983 
984 	pci_save_state(oct->pci_dev);
985 
986 	pci_cfg_access_lock(oct->pci_dev);
987 
988 	/* Quiesce the device completely */
989 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
990 			      PCI_COMMAND_INTX_DISABLE);
991 
992 	rc = __pci_reset_function_locked(oct->pci_dev);
993 
994 	if (rc != 0)
995 		dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
996 			rc, oct->pf_num);
997 
998 	pci_cfg_access_unlock(oct->pci_dev);
999 
1000 	pci_restore_state(oct->pci_dev);
1001 }
1002 
1003 /**
1004  *\brief Destroy resources associated with octeon device
1005  * @param pdev PCI device structure
1006  * @param ent unused
1007  */
1008 static void octeon_destroy_resources(struct octeon_device *oct)
1009 {
1010 	int i, refcount;
1011 	struct msix_entry *msix_entries;
1012 	struct octeon_device_priv *oct_priv =
1013 		(struct octeon_device_priv *)oct->priv;
1014 
1015 	struct handshake *hs;
1016 
1017 	switch (atomic_read(&oct->status)) {
1018 	case OCT_DEV_RUNNING:
1019 	case OCT_DEV_CORE_OK:
1020 
1021 		/* No more instructions will be forwarded. */
1022 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
1023 
1024 		oct->app_mode = CVM_DRV_INVALID_APP;
1025 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1026 			lio_get_state_string(&oct->status));
1027 
1028 		schedule_timeout_uninterruptible(HZ / 10);
1029 
1030 		/* fallthrough */
1031 	case OCT_DEV_HOST_OK:
1032 
1033 		/* fallthrough */
1034 	case OCT_DEV_CONSOLE_INIT_DONE:
1035 		/* Remove any consoles */
1036 		octeon_remove_consoles(oct);
1037 
1038 		/* fallthrough */
1039 	case OCT_DEV_IO_QUEUES_DONE:
1040 		if (wait_for_pending_requests(oct))
1041 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1042 
1043 		if (lio_wait_for_instr_fetch(oct))
1044 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1045 
1046 		/* Disable the input and output queues now. No more packets will
1047 		 * arrive from Octeon, but we should wait for all packet
1048 		 * processing to finish.
1049 		 */
1050 		oct->fn_list.disable_io_queues(oct);
1051 
1052 		if (lio_wait_for_oq_pkts(oct))
1053 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1054 
1055 	/* fallthrough */
1056 	case OCT_DEV_INTR_SET_DONE:
1057 		/* Disable interrupts  */
1058 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1059 
1060 		if (oct->msix_on) {
1061 			msix_entries = (struct msix_entry *)oct->msix_entries;
1062 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1063 				if (oct->ioq_vector[i].vector) {
1064 					/* clear the affinity_cpumask */
1065 					irq_set_affinity_hint(
1066 							msix_entries[i].vector,
1067 							NULL);
1068 					free_irq(msix_entries[i].vector,
1069 						 &oct->ioq_vector[i]);
1070 					oct->ioq_vector[i].vector = 0;
1071 				}
1072 			}
1073 			/* non-iov vector's argument is oct struct */
1074 			free_irq(msix_entries[i].vector, oct);
1075 
1076 			pci_disable_msix(oct->pci_dev);
1077 			kfree(oct->msix_entries);
1078 			oct->msix_entries = NULL;
1079 		} else {
1080 			/* Release the interrupt line */
1081 			free_irq(oct->pci_dev->irq, oct);
1082 
1083 			if (oct->flags & LIO_FLAG_MSI_ENABLED)
1084 				pci_disable_msi(oct->pci_dev);
1085 		}
1086 
1087 		kfree(oct->irq_name_storage);
1088 		oct->irq_name_storage = NULL;
1089 
1090 	/* fallthrough */
1091 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1092 		if (OCTEON_CN23XX_PF(oct))
1093 			octeon_free_ioq_vector(oct);
1094 
1095 	/* fallthrough */
1096 	case OCT_DEV_MBOX_SETUP_DONE:
1097 		if (OCTEON_CN23XX_PF(oct))
1098 			oct->fn_list.free_mbox(oct);
1099 
1100 	/* fallthrough */
1101 	case OCT_DEV_IN_RESET:
1102 	case OCT_DEV_DROQ_INIT_DONE:
1103 		/* Wait for any pending operations */
1104 		mdelay(100);
1105 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1106 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
1107 				continue;
1108 			octeon_delete_droq(oct, i);
1109 		}
1110 
1111 		/* Force any pending handshakes to complete */
1112 		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1113 			hs = &handshake[i];
1114 
1115 			if (hs->pci_dev) {
1116 				handshake[oct->octeon_id].init_ok = 0;
1117 				complete(&handshake[oct->octeon_id].init);
1118 				handshake[oct->octeon_id].started_ok = 0;
1119 				complete(&handshake[oct->octeon_id].started);
1120 			}
1121 		}
1122 
1123 		/* fallthrough */
1124 	case OCT_DEV_RESP_LIST_INIT_DONE:
1125 		octeon_delete_response_list(oct);
1126 
1127 		/* fallthrough */
1128 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1129 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1130 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1131 				continue;
1132 			octeon_delete_instr_queue(oct, i);
1133 		}
1134 #ifdef CONFIG_PCI_IOV
1135 		if (oct->sriov_info.sriov_enabled)
1136 			pci_disable_sriov(oct->pci_dev);
1137 #endif
1138 		/* fallthrough */
1139 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1140 		octeon_free_sc_buffer_pool(oct);
1141 
1142 		/* fallthrough */
1143 	case OCT_DEV_DISPATCH_INIT_DONE:
1144 		octeon_delete_dispatch_list(oct);
1145 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
1146 
1147 		/* fallthrough */
1148 	case OCT_DEV_PCI_MAP_DONE:
1149 		refcount = octeon_deregister_device(oct);
1150 
1151 		/* Soft reset the octeon device before exiting.
1152 		 * However, if fw was loaded from card (i.e. autoboot),
1153 		 * perform an FLR instead.
1154 		 * Implementation note: only soft-reset the device
1155 		 * if it is a CN6XXX OR the LAST CN23XX device.
1156 		 */
1157 		if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1158 			octeon_pci_flr(oct);
1159 		else if (OCTEON_CN6XXX(oct) || !refcount)
1160 			oct->fn_list.soft_reset(oct);
1161 
1162 		octeon_unmap_pci_barx(oct, 0);
1163 		octeon_unmap_pci_barx(oct, 1);
1164 
1165 		/* fallthrough */
1166 	case OCT_DEV_PCI_ENABLE_DONE:
1167 		pci_clear_master(oct->pci_dev);
1168 		/* Disable the device, releasing the PCI INT */
1169 		pci_disable_device(oct->pci_dev);
1170 
1171 		/* fallthrough */
1172 	case OCT_DEV_BEGIN_STATE:
1173 		/* Nothing to be done here either */
1174 		break;
1175 	}                       /* end switch (oct->status) */
1176 
1177 	tasklet_kill(&oct_priv->droq_tasklet);
1178 }
1179 
1180 /**
1181  * \brief Callback for rx ctrl
1182  * @param status status of request
1183  * @param buf pointer to resp structure
1184  */
1185 static void rx_ctl_callback(struct octeon_device *oct,
1186 			    u32 status,
1187 			    void *buf)
1188 {
1189 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1190 	struct liquidio_rx_ctl_context *ctx;
1191 
1192 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1193 
1194 	oct = lio_get_device(ctx->octeon_id);
1195 	if (status)
1196 		dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1197 			CVM_CAST64(status));
1198 	WRITE_ONCE(ctx->cond, 1);
1199 
1200 	/* This barrier is required to be sure that the response has been
1201 	 * written fully before waking up the handler
1202 	 */
1203 	wmb();
1204 
1205 	wake_up_interruptible(&ctx->wc);
1206 }
1207 
1208 /**
1209  * \brief Send Rx control command
1210  * @param lio per-network private data
1211  * @param start_stop whether to start or stop
1212  */
1213 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1214 {
1215 	struct octeon_soft_command *sc;
1216 	struct liquidio_rx_ctl_context *ctx;
1217 	union octnet_cmd *ncmd;
1218 	int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1219 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1220 	int retval;
1221 
1222 	if (oct->props[lio->ifidx].rx_on == start_stop)
1223 		return;
1224 
1225 	sc = (struct octeon_soft_command *)
1226 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1227 					  16, ctx_size);
1228 
1229 	ncmd = (union octnet_cmd *)sc->virtdptr;
1230 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1231 
1232 	WRITE_ONCE(ctx->cond, 0);
1233 	ctx->octeon_id = lio_get_device_id(oct);
1234 	init_waitqueue_head(&ctx->wc);
1235 
1236 	ncmd->u64 = 0;
1237 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1238 	ncmd->s.param1 = start_stop;
1239 
1240 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1241 
1242 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1243 
1244 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1245 				    OPCODE_NIC_CMD, 0, 0, 0);
1246 
1247 	sc->callback = rx_ctl_callback;
1248 	sc->callback_arg = sc;
1249 	sc->wait_time = 5000;
1250 
1251 	retval = octeon_send_soft_command(oct, sc);
1252 	if (retval == IQ_SEND_FAILED) {
1253 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1254 	} else {
1255 		/* Sleep on a wait queue till the cond flag indicates that the
1256 		 * response arrived or timed-out.
1257 		 */
1258 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1259 			return;
1260 		oct->props[lio->ifidx].rx_on = start_stop;
1261 	}
1262 
1263 	octeon_free_soft_command(oct, sc);
1264 }
1265 
1266 /**
1267  * \brief Destroy NIC device interface
1268  * @param oct octeon device
1269  * @param ifidx which interface to destroy
1270  *
1271  * Cleanup associated with each interface for an Octeon device  when NIC
1272  * module is being unloaded or if initialization fails during load.
1273  */
1274 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1275 {
1276 	struct net_device *netdev = oct->props[ifidx].netdev;
1277 	struct lio *lio;
1278 	struct napi_struct *napi, *n;
1279 
1280 	if (!netdev) {
1281 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1282 			__func__, ifidx);
1283 		return;
1284 	}
1285 
1286 	lio = GET_LIO(netdev);
1287 
1288 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1289 
1290 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1291 		liquidio_stop(netdev);
1292 
1293 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1294 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1295 			napi_disable(napi);
1296 
1297 		oct->props[lio->ifidx].napi_enabled = 0;
1298 
1299 		if (OCTEON_CN23XX_PF(oct))
1300 			oct->droq[0]->ops.poll_mode = 0;
1301 	}
1302 
1303 	/* Delete NAPI */
1304 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1305 		netif_napi_del(napi);
1306 
1307 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1308 		unregister_netdev(netdev);
1309 
1310 	cleanup_sync_octeon_time_wq(netdev);
1311 	cleanup_link_status_change_wq(netdev);
1312 
1313 	cleanup_rx_oom_poll_fn(netdev);
1314 
1315 	lio_delete_glists(lio);
1316 
1317 	free_netdev(netdev);
1318 
1319 	oct->props[ifidx].gmxport = -1;
1320 
1321 	oct->props[ifidx].netdev = NULL;
1322 }
1323 
1324 /**
1325  * \brief Stop complete NIC functionality
1326  * @param oct octeon device
1327  */
1328 static int liquidio_stop_nic_module(struct octeon_device *oct)
1329 {
1330 	int i, j;
1331 	struct lio *lio;
1332 
1333 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1334 	if (!oct->ifcount) {
1335 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1336 		return 1;
1337 	}
1338 
1339 	spin_lock_bh(&oct->cmd_resp_wqlock);
1340 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1341 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1342 
1343 	lio_vf_rep_destroy(oct);
1344 
1345 	for (i = 0; i < oct->ifcount; i++) {
1346 		lio = GET_LIO(oct->props[i].netdev);
1347 		for (j = 0; j < oct->num_oqs; j++)
1348 			octeon_unregister_droq_ops(oct,
1349 						   lio->linfo.rxpciq[j].s.q_no);
1350 	}
1351 
1352 	for (i = 0; i < oct->ifcount; i++)
1353 		liquidio_destroy_nic_device(oct, i);
1354 
1355 	if (oct->devlink) {
1356 		devlink_unregister(oct->devlink);
1357 		devlink_free(oct->devlink);
1358 		oct->devlink = NULL;
1359 	}
1360 
1361 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1362 	return 0;
1363 }
1364 
1365 /**
1366  * \brief Cleans up resources at unload time
1367  * @param pdev PCI device structure
1368  */
1369 static void liquidio_remove(struct pci_dev *pdev)
1370 {
1371 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1372 
1373 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1374 
1375 	if (oct_dev->watchdog_task)
1376 		kthread_stop(oct_dev->watchdog_task);
1377 
1378 	if (!oct_dev->octeon_id &&
1379 	    oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1380 		lio_vf_rep_modexit();
1381 
1382 	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1383 		liquidio_stop_nic_module(oct_dev);
1384 
1385 	/* Reset the octeon device and cleanup all memory allocated for
1386 	 * the octeon device by driver.
1387 	 */
1388 	octeon_destroy_resources(oct_dev);
1389 
1390 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1391 
1392 	/* This octeon device has been removed. Update the global
1393 	 * data structure to reflect this. Free the device structure.
1394 	 */
1395 	octeon_free_device_mem(oct_dev);
1396 }
1397 
1398 /**
1399  * \brief Identify the Octeon device and to map the BAR address space
1400  * @param oct octeon device
1401  */
1402 static int octeon_chip_specific_setup(struct octeon_device *oct)
1403 {
1404 	u32 dev_id, rev_id;
1405 	int ret = 1;
1406 	char *s;
1407 
1408 	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1409 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1410 	oct->rev_id = rev_id & 0xff;
1411 
1412 	switch (dev_id) {
1413 	case OCTEON_CN68XX_PCIID:
1414 		oct->chip_id = OCTEON_CN68XX;
1415 		ret = lio_setup_cn68xx_octeon_device(oct);
1416 		s = "CN68XX";
1417 		break;
1418 
1419 	case OCTEON_CN66XX_PCIID:
1420 		oct->chip_id = OCTEON_CN66XX;
1421 		ret = lio_setup_cn66xx_octeon_device(oct);
1422 		s = "CN66XX";
1423 		break;
1424 
1425 	case OCTEON_CN23XX_PCIID_PF:
1426 		oct->chip_id = OCTEON_CN23XX_PF_VID;
1427 		ret = setup_cn23xx_octeon_pf_device(oct);
1428 		if (ret)
1429 			break;
1430 #ifdef CONFIG_PCI_IOV
1431 		if (!ret)
1432 			pci_sriov_set_totalvfs(oct->pci_dev,
1433 					       oct->sriov_info.max_vfs);
1434 #endif
1435 		s = "CN23XX";
1436 		break;
1437 
1438 	default:
1439 		s = "?";
1440 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1441 			dev_id);
1442 	}
1443 
1444 	if (!ret)
1445 		dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1446 			 OCTEON_MAJOR_REV(oct),
1447 			 OCTEON_MINOR_REV(oct),
1448 			 octeon_get_conf(oct)->card_name,
1449 			 LIQUIDIO_VERSION);
1450 
1451 	return ret;
1452 }
1453 
1454 /**
1455  * \brief PCI initialization for each Octeon device.
1456  * @param oct octeon device
1457  */
1458 static int octeon_pci_os_setup(struct octeon_device *oct)
1459 {
1460 	/* setup PCI stuff first */
1461 	if (pci_enable_device(oct->pci_dev)) {
1462 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1463 		return 1;
1464 	}
1465 
1466 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1467 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1468 		pci_disable_device(oct->pci_dev);
1469 		return 1;
1470 	}
1471 
1472 	/* Enable PCI DMA Master. */
1473 	pci_set_master(oct->pci_dev);
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * \brief Unmap and free network buffer
1480  * @param buf buffer
1481  */
1482 static void free_netbuf(void *buf)
1483 {
1484 	struct sk_buff *skb;
1485 	struct octnet_buf_free_info *finfo;
1486 	struct lio *lio;
1487 
1488 	finfo = (struct octnet_buf_free_info *)buf;
1489 	skb = finfo->skb;
1490 	lio = finfo->lio;
1491 
1492 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1493 			 DMA_TO_DEVICE);
1494 
1495 	tx_buffer_free(skb);
1496 }
1497 
1498 /**
1499  * \brief Unmap and free gather buffer
1500  * @param buf buffer
1501  */
1502 static void free_netsgbuf(void *buf)
1503 {
1504 	struct octnet_buf_free_info *finfo;
1505 	struct sk_buff *skb;
1506 	struct lio *lio;
1507 	struct octnic_gather *g;
1508 	int i, frags, iq;
1509 
1510 	finfo = (struct octnet_buf_free_info *)buf;
1511 	skb = finfo->skb;
1512 	lio = finfo->lio;
1513 	g = finfo->g;
1514 	frags = skb_shinfo(skb)->nr_frags;
1515 
1516 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1517 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1518 			 DMA_TO_DEVICE);
1519 
1520 	i = 1;
1521 	while (frags--) {
1522 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1523 
1524 		pci_unmap_page((lio->oct_dev)->pci_dev,
1525 			       g->sg[(i >> 2)].ptr[(i & 3)],
1526 			       frag->size, DMA_TO_DEVICE);
1527 		i++;
1528 	}
1529 
1530 	iq = skb_iq(lio->oct_dev, skb);
1531 	spin_lock(&lio->glist_lock[iq]);
1532 	list_add_tail(&g->list, &lio->glist[iq]);
1533 	spin_unlock(&lio->glist_lock[iq]);
1534 
1535 	tx_buffer_free(skb);
1536 }
1537 
1538 /**
1539  * \brief Unmap and free gather buffer with response
1540  * @param buf buffer
1541  */
1542 static void free_netsgbuf_with_resp(void *buf)
1543 {
1544 	struct octeon_soft_command *sc;
1545 	struct octnet_buf_free_info *finfo;
1546 	struct sk_buff *skb;
1547 	struct lio *lio;
1548 	struct octnic_gather *g;
1549 	int i, frags, iq;
1550 
1551 	sc = (struct octeon_soft_command *)buf;
1552 	skb = (struct sk_buff *)sc->callback_arg;
1553 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1554 
1555 	lio = finfo->lio;
1556 	g = finfo->g;
1557 	frags = skb_shinfo(skb)->nr_frags;
1558 
1559 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1560 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1561 			 DMA_TO_DEVICE);
1562 
1563 	i = 1;
1564 	while (frags--) {
1565 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1566 
1567 		pci_unmap_page((lio->oct_dev)->pci_dev,
1568 			       g->sg[(i >> 2)].ptr[(i & 3)],
1569 			       frag->size, DMA_TO_DEVICE);
1570 		i++;
1571 	}
1572 
1573 	iq = skb_iq(lio->oct_dev, skb);
1574 
1575 	spin_lock(&lio->glist_lock[iq]);
1576 	list_add_tail(&g->list, &lio->glist[iq]);
1577 	spin_unlock(&lio->glist_lock[iq]);
1578 
1579 	/* Don't free the skb yet */
1580 }
1581 
1582 /**
1583  * \brief Adjust ptp frequency
1584  * @param ptp PTP clock info
1585  * @param ppb how much to adjust by, in parts-per-billion
1586  */
1587 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1588 {
1589 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1590 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1591 	u64 comp, delta;
1592 	unsigned long flags;
1593 	bool neg_adj = false;
1594 
1595 	if (ppb < 0) {
1596 		neg_adj = true;
1597 		ppb = -ppb;
1598 	}
1599 
1600 	/* The hardware adds the clock compensation value to the
1601 	 * PTP clock on every coprocessor clock cycle, so we
1602 	 * compute the delta in terms of coprocessor clocks.
1603 	 */
1604 	delta = (u64)ppb << 32;
1605 	do_div(delta, oct->coproc_clock_rate);
1606 
1607 	spin_lock_irqsave(&lio->ptp_lock, flags);
1608 	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1609 	if (neg_adj)
1610 		comp -= delta;
1611 	else
1612 		comp += delta;
1613 	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1614 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1615 
1616 	return 0;
1617 }
1618 
1619 /**
1620  * \brief Adjust ptp time
1621  * @param ptp PTP clock info
1622  * @param delta how much to adjust by, in nanosecs
1623  */
1624 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1625 {
1626 	unsigned long flags;
1627 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1628 
1629 	spin_lock_irqsave(&lio->ptp_lock, flags);
1630 	lio->ptp_adjust += delta;
1631 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1632 
1633 	return 0;
1634 }
1635 
1636 /**
1637  * \brief Get hardware clock time, including any adjustment
1638  * @param ptp PTP clock info
1639  * @param ts timespec
1640  */
1641 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1642 				struct timespec64 *ts)
1643 {
1644 	u64 ns;
1645 	unsigned long flags;
1646 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1647 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1648 
1649 	spin_lock_irqsave(&lio->ptp_lock, flags);
1650 	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1651 	ns += lio->ptp_adjust;
1652 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1653 
1654 	*ts = ns_to_timespec64(ns);
1655 
1656 	return 0;
1657 }
1658 
1659 /**
1660  * \brief Set hardware clock time. Reset adjustment
1661  * @param ptp PTP clock info
1662  * @param ts timespec
1663  */
1664 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1665 				const struct timespec64 *ts)
1666 {
1667 	u64 ns;
1668 	unsigned long flags;
1669 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1670 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1671 
1672 	ns = timespec64_to_ns(ts);
1673 
1674 	spin_lock_irqsave(&lio->ptp_lock, flags);
1675 	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1676 	lio->ptp_adjust = 0;
1677 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1678 
1679 	return 0;
1680 }
1681 
1682 /**
1683  * \brief Check if PTP is enabled
1684  * @param ptp PTP clock info
1685  * @param rq request
1686  * @param on is it on
1687  */
1688 static int
1689 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1690 		    struct ptp_clock_request *rq __attribute__((unused)),
1691 		    int on __attribute__((unused)))
1692 {
1693 	return -EOPNOTSUPP;
1694 }
1695 
1696 /**
1697  * \brief Open PTP clock source
1698  * @param netdev network device
1699  */
1700 static void oct_ptp_open(struct net_device *netdev)
1701 {
1702 	struct lio *lio = GET_LIO(netdev);
1703 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1704 
1705 	spin_lock_init(&lio->ptp_lock);
1706 
1707 	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1708 	lio->ptp_info.owner = THIS_MODULE;
1709 	lio->ptp_info.max_adj = 250000000;
1710 	lio->ptp_info.n_alarm = 0;
1711 	lio->ptp_info.n_ext_ts = 0;
1712 	lio->ptp_info.n_per_out = 0;
1713 	lio->ptp_info.pps = 0;
1714 	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1715 	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1716 	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1717 	lio->ptp_info.settime64 = liquidio_ptp_settime;
1718 	lio->ptp_info.enable = liquidio_ptp_enable;
1719 
1720 	lio->ptp_adjust = 0;
1721 
1722 	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1723 					     &oct->pci_dev->dev);
1724 
1725 	if (IS_ERR(lio->ptp_clock))
1726 		lio->ptp_clock = NULL;
1727 }
1728 
1729 /**
1730  * \brief Init PTP clock
1731  * @param oct octeon device
1732  */
1733 static void liquidio_ptp_init(struct octeon_device *oct)
1734 {
1735 	u64 clock_comp, cfg;
1736 
1737 	clock_comp = (u64)NSEC_PER_SEC << 32;
1738 	do_div(clock_comp, oct->coproc_clock_rate);
1739 	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1740 
1741 	/* Enable */
1742 	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1743 	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1744 }
1745 
1746 /**
1747  * \brief Load firmware to device
1748  * @param oct octeon device
1749  *
1750  * Maps device to firmware filename, requests firmware, and downloads it
1751  */
1752 static int load_firmware(struct octeon_device *oct)
1753 {
1754 	int ret = 0;
1755 	const struct firmware *fw;
1756 	char fw_name[LIO_MAX_FW_FILENAME_LEN];
1757 	char *tmp_fw_type;
1758 
1759 	if (fw_type_is_auto()) {
1760 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1761 		strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1762 	} else {
1763 		tmp_fw_type = fw_type;
1764 	}
1765 
1766 	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1767 		octeon_get_conf(oct)->card_name, tmp_fw_type,
1768 		LIO_FW_NAME_SUFFIX);
1769 
1770 	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1771 	if (ret) {
1772 		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1773 			fw_name);
1774 		release_firmware(fw);
1775 		return ret;
1776 	}
1777 
1778 	ret = octeon_download_firmware(oct, fw->data, fw->size);
1779 
1780 	release_firmware(fw);
1781 
1782 	return ret;
1783 }
1784 
1785 /**
1786  * \brief Poll routine for checking transmit queue status
1787  * @param work work_struct data structure
1788  */
1789 static void octnet_poll_check_txq_status(struct work_struct *work)
1790 {
1791 	struct cavium_wk *wk = (struct cavium_wk *)work;
1792 	struct lio *lio = (struct lio *)wk->ctxptr;
1793 
1794 	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1795 		return;
1796 
1797 	check_txq_status(lio);
1798 	queue_delayed_work(lio->txq_status_wq.wq,
1799 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1800 }
1801 
1802 /**
1803  * \brief Sets up the txq poll check
1804  * @param netdev network device
1805  */
1806 static inline int setup_tx_poll_fn(struct net_device *netdev)
1807 {
1808 	struct lio *lio = GET_LIO(netdev);
1809 	struct octeon_device *oct = lio->oct_dev;
1810 
1811 	lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1812 						WQ_MEM_RECLAIM, 0);
1813 	if (!lio->txq_status_wq.wq) {
1814 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1815 		return -1;
1816 	}
1817 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1818 			  octnet_poll_check_txq_status);
1819 	lio->txq_status_wq.wk.ctxptr = lio;
1820 	queue_delayed_work(lio->txq_status_wq.wq,
1821 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1822 	return 0;
1823 }
1824 
1825 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1826 {
1827 	struct lio *lio = GET_LIO(netdev);
1828 
1829 	if (lio->txq_status_wq.wq) {
1830 		cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1831 		destroy_workqueue(lio->txq_status_wq.wq);
1832 	}
1833 }
1834 
1835 /**
1836  * \brief Net device open for LiquidIO
1837  * @param netdev network device
1838  */
1839 static int liquidio_open(struct net_device *netdev)
1840 {
1841 	struct lio *lio = GET_LIO(netdev);
1842 	struct octeon_device *oct = lio->oct_dev;
1843 	struct napi_struct *napi, *n;
1844 
1845 	if (oct->props[lio->ifidx].napi_enabled == 0) {
1846 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1847 			napi_enable(napi);
1848 
1849 		oct->props[lio->ifidx].napi_enabled = 1;
1850 
1851 		if (OCTEON_CN23XX_PF(oct))
1852 			oct->droq[0]->ops.poll_mode = 1;
1853 	}
1854 
1855 	if (oct->ptp_enable)
1856 		oct_ptp_open(netdev);
1857 
1858 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1859 
1860 	if (OCTEON_CN23XX_PF(oct)) {
1861 		if (!oct->msix_on)
1862 			if (setup_tx_poll_fn(netdev))
1863 				return -1;
1864 	} else {
1865 		if (setup_tx_poll_fn(netdev))
1866 			return -1;
1867 	}
1868 
1869 	netif_tx_start_all_queues(netdev);
1870 
1871 	/* Ready for link status updates */
1872 	lio->intf_open = 1;
1873 
1874 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1875 
1876 	/* tell Octeon to start forwarding packets to host */
1877 	send_rx_ctrl_cmd(lio, 1);
1878 
1879 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1880 		 netdev->name);
1881 
1882 	return 0;
1883 }
1884 
1885 /**
1886  * \brief Net device stop for LiquidIO
1887  * @param netdev network device
1888  */
1889 static int liquidio_stop(struct net_device *netdev)
1890 {
1891 	struct lio *lio = GET_LIO(netdev);
1892 	struct octeon_device *oct = lio->oct_dev;
1893 	struct napi_struct *napi, *n;
1894 
1895 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1896 
1897 	/* Stop any link updates */
1898 	lio->intf_open = 0;
1899 
1900 	stop_txqs(netdev);
1901 
1902 	/* Inform that netif carrier is down */
1903 	netif_carrier_off(netdev);
1904 	netif_tx_disable(netdev);
1905 
1906 	lio->linfo.link.s.link_up = 0;
1907 	lio->link_changes++;
1908 
1909 	/* Tell Octeon that nic interface is down. */
1910 	send_rx_ctrl_cmd(lio, 0);
1911 
1912 	if (OCTEON_CN23XX_PF(oct)) {
1913 		if (!oct->msix_on)
1914 			cleanup_tx_poll_fn(netdev);
1915 	} else {
1916 		cleanup_tx_poll_fn(netdev);
1917 	}
1918 
1919 	if (lio->ptp_clock) {
1920 		ptp_clock_unregister(lio->ptp_clock);
1921 		lio->ptp_clock = NULL;
1922 	}
1923 
1924 	/* Wait for any pending Rx descriptors */
1925 	if (lio_wait_for_clean_oq(oct))
1926 		netif_info(lio, rx_err, lio->netdev,
1927 			   "Proceeding with stop interface after partial RX desc processing\n");
1928 
1929 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1930 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1931 			napi_disable(napi);
1932 
1933 		oct->props[lio->ifidx].napi_enabled = 0;
1934 
1935 		if (OCTEON_CN23XX_PF(oct))
1936 			oct->droq[0]->ops.poll_mode = 0;
1937 	}
1938 
1939 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1940 
1941 	return 0;
1942 }
1943 
1944 /**
1945  * \brief Converts a mask based on net device flags
1946  * @param netdev network device
1947  *
1948  * This routine generates a octnet_ifflags mask from the net device flags
1949  * received from the OS.
1950  */
1951 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1952 {
1953 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1954 
1955 	if (netdev->flags & IFF_PROMISC)
1956 		f |= OCTNET_IFFLAG_PROMISC;
1957 
1958 	if (netdev->flags & IFF_ALLMULTI)
1959 		f |= OCTNET_IFFLAG_ALLMULTI;
1960 
1961 	if (netdev->flags & IFF_MULTICAST) {
1962 		f |= OCTNET_IFFLAG_MULTICAST;
1963 
1964 		/* Accept all multicast addresses if there are more than we
1965 		 * can handle
1966 		 */
1967 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1968 			f |= OCTNET_IFFLAG_ALLMULTI;
1969 	}
1970 
1971 	if (netdev->flags & IFF_BROADCAST)
1972 		f |= OCTNET_IFFLAG_BROADCAST;
1973 
1974 	return f;
1975 }
1976 
1977 /**
1978  * \brief Net device set_multicast_list
1979  * @param netdev network device
1980  */
1981 static void liquidio_set_mcast_list(struct net_device *netdev)
1982 {
1983 	struct lio *lio = GET_LIO(netdev);
1984 	struct octeon_device *oct = lio->oct_dev;
1985 	struct octnic_ctrl_pkt nctrl;
1986 	struct netdev_hw_addr *ha;
1987 	u64 *mc;
1988 	int ret;
1989 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1990 
1991 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1992 
1993 	/* Create a ctrl pkt command to be sent to core app. */
1994 	nctrl.ncmd.u64 = 0;
1995 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1996 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1997 	nctrl.ncmd.s.param2 = mc_count;
1998 	nctrl.ncmd.s.more = mc_count;
1999 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2000 	nctrl.netpndev = (u64)netdev;
2001 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2002 
2003 	/* copy all the addresses into the udd */
2004 	mc = &nctrl.udd[0];
2005 	netdev_for_each_mc_addr(ha, netdev) {
2006 		*mc = 0;
2007 		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2008 		/* no need to swap bytes */
2009 
2010 		if (++mc > &nctrl.udd[mc_count])
2011 			break;
2012 	}
2013 
2014 	/* Apparently, any activity in this call from the kernel has to
2015 	 * be atomic. So we won't wait for response.
2016 	 */
2017 	nctrl.wait_time = 0;
2018 
2019 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2020 	if (ret < 0) {
2021 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2022 			ret);
2023 	}
2024 }
2025 
2026 /**
2027  * \brief Net device set_mac_address
2028  * @param netdev network device
2029  */
2030 static int liquidio_set_mac(struct net_device *netdev, void *p)
2031 {
2032 	int ret = 0;
2033 	struct lio *lio = GET_LIO(netdev);
2034 	struct octeon_device *oct = lio->oct_dev;
2035 	struct sockaddr *addr = (struct sockaddr *)p;
2036 	struct octnic_ctrl_pkt nctrl;
2037 
2038 	if (!is_valid_ether_addr(addr->sa_data))
2039 		return -EADDRNOTAVAIL;
2040 
2041 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2042 
2043 	nctrl.ncmd.u64 = 0;
2044 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2045 	nctrl.ncmd.s.param1 = 0;
2046 	nctrl.ncmd.s.more = 1;
2047 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2048 	nctrl.netpndev = (u64)netdev;
2049 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2050 	nctrl.wait_time = 100;
2051 
2052 	nctrl.udd[0] = 0;
2053 	/* The MAC Address is presented in network byte order. */
2054 	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2055 
2056 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2057 	if (ret < 0) {
2058 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2059 		return -ENOMEM;
2060 	}
2061 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2062 	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2063 
2064 	return 0;
2065 }
2066 
2067 static void
2068 liquidio_get_stats64(struct net_device *netdev,
2069 		     struct rtnl_link_stats64 *lstats)
2070 {
2071 	struct lio *lio = GET_LIO(netdev);
2072 	struct octeon_device *oct;
2073 	u64 pkts = 0, drop = 0, bytes = 0;
2074 	struct oct_droq_stats *oq_stats;
2075 	struct oct_iq_stats *iq_stats;
2076 	int i, iq_no, oq_no;
2077 
2078 	oct = lio->oct_dev;
2079 
2080 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2081 		return;
2082 
2083 	for (i = 0; i < oct->num_iqs; i++) {
2084 		iq_no = lio->linfo.txpciq[i].s.q_no;
2085 		iq_stats = &oct->instr_queue[iq_no]->stats;
2086 		pkts += iq_stats->tx_done;
2087 		drop += iq_stats->tx_dropped;
2088 		bytes += iq_stats->tx_tot_bytes;
2089 	}
2090 
2091 	lstats->tx_packets = pkts;
2092 	lstats->tx_bytes = bytes;
2093 	lstats->tx_dropped = drop;
2094 
2095 	pkts = 0;
2096 	drop = 0;
2097 	bytes = 0;
2098 
2099 	for (i = 0; i < oct->num_oqs; i++) {
2100 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2101 		oq_stats = &oct->droq[oq_no]->stats;
2102 		pkts += oq_stats->rx_pkts_received;
2103 		drop += (oq_stats->rx_dropped +
2104 			 oq_stats->dropped_nodispatch +
2105 			 oq_stats->dropped_toomany +
2106 			 oq_stats->dropped_nomem);
2107 		bytes += oq_stats->rx_bytes_received;
2108 	}
2109 
2110 	lstats->rx_bytes = bytes;
2111 	lstats->rx_packets = pkts;
2112 	lstats->rx_dropped = drop;
2113 
2114 	octnet_get_link_stats(netdev);
2115 	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2116 	lstats->collisions = oct->link_stats.fromhost.total_collisions;
2117 
2118 	/* detailed rx_errors: */
2119 	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2120 	/* recved pkt with crc error    */
2121 	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2122 	/* recv'd frame alignment error */
2123 	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2124 	/* recv'r fifo overrun */
2125 	lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2126 
2127 	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2128 		lstats->rx_frame_errors + lstats->rx_fifo_errors;
2129 
2130 	/* detailed tx_errors */
2131 	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2132 	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2133 	lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2134 
2135 	lstats->tx_errors = lstats->tx_aborted_errors +
2136 		lstats->tx_carrier_errors +
2137 		lstats->tx_fifo_errors;
2138 }
2139 
2140 /**
2141  * \brief Handler for SIOCSHWTSTAMP ioctl
2142  * @param netdev network device
2143  * @param ifr interface request
2144  * @param cmd command
2145  */
2146 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2147 {
2148 	struct hwtstamp_config conf;
2149 	struct lio *lio = GET_LIO(netdev);
2150 
2151 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2152 		return -EFAULT;
2153 
2154 	if (conf.flags)
2155 		return -EINVAL;
2156 
2157 	switch (conf.tx_type) {
2158 	case HWTSTAMP_TX_ON:
2159 	case HWTSTAMP_TX_OFF:
2160 		break;
2161 	default:
2162 		return -ERANGE;
2163 	}
2164 
2165 	switch (conf.rx_filter) {
2166 	case HWTSTAMP_FILTER_NONE:
2167 		break;
2168 	case HWTSTAMP_FILTER_ALL:
2169 	case HWTSTAMP_FILTER_SOME:
2170 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2171 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2172 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2173 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2174 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2175 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2176 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2177 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2178 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2179 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2180 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2181 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2182 	case HWTSTAMP_FILTER_NTP_ALL:
2183 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2184 		break;
2185 	default:
2186 		return -ERANGE;
2187 	}
2188 
2189 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2190 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2191 
2192 	else
2193 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2194 
2195 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2196 }
2197 
2198 /**
2199  * \brief ioctl handler
2200  * @param netdev network device
2201  * @param ifr interface request
2202  * @param cmd command
2203  */
2204 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2205 {
2206 	struct lio *lio = GET_LIO(netdev);
2207 
2208 	switch (cmd) {
2209 	case SIOCSHWTSTAMP:
2210 		if (lio->oct_dev->ptp_enable)
2211 			return hwtstamp_ioctl(netdev, ifr);
2212 		/* fall through */
2213 	default:
2214 		return -EOPNOTSUPP;
2215 	}
2216 }
2217 
2218 /**
2219  * \brief handle a Tx timestamp response
2220  * @param status response status
2221  * @param buf pointer to skb
2222  */
2223 static void handle_timestamp(struct octeon_device *oct,
2224 			     u32 status,
2225 			     void *buf)
2226 {
2227 	struct octnet_buf_free_info *finfo;
2228 	struct octeon_soft_command *sc;
2229 	struct oct_timestamp_resp *resp;
2230 	struct lio *lio;
2231 	struct sk_buff *skb = (struct sk_buff *)buf;
2232 
2233 	finfo = (struct octnet_buf_free_info *)skb->cb;
2234 	lio = finfo->lio;
2235 	sc = finfo->sc;
2236 	oct = lio->oct_dev;
2237 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2238 
2239 	if (status != OCTEON_REQUEST_DONE) {
2240 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2241 			CVM_CAST64(status));
2242 		resp->timestamp = 0;
2243 	}
2244 
2245 	octeon_swap_8B_data(&resp->timestamp, 1);
2246 
2247 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2248 		struct skb_shared_hwtstamps ts;
2249 		u64 ns = resp->timestamp;
2250 
2251 		netif_info(lio, tx_done, lio->netdev,
2252 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2253 			   skb, (unsigned long long)ns);
2254 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2255 		skb_tstamp_tx(skb, &ts);
2256 	}
2257 
2258 	octeon_free_soft_command(oct, sc);
2259 	tx_buffer_free(skb);
2260 }
2261 
2262 /* \brief Send a data packet that will be timestamped
2263  * @param oct octeon device
2264  * @param ndata pointer to network data
2265  * @param finfo pointer to private network data
2266  */
2267 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2268 					 struct octnic_data_pkt *ndata,
2269 					 struct octnet_buf_free_info *finfo,
2270 					 int xmit_more)
2271 {
2272 	int retval;
2273 	struct octeon_soft_command *sc;
2274 	struct lio *lio;
2275 	int ring_doorbell;
2276 	u32 len;
2277 
2278 	lio = finfo->lio;
2279 
2280 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2281 					    sizeof(struct oct_timestamp_resp));
2282 	finfo->sc = sc;
2283 
2284 	if (!sc) {
2285 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2286 		return IQ_SEND_FAILED;
2287 	}
2288 
2289 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2290 		ndata->reqtype = REQTYPE_RESP_NET;
2291 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2292 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2293 
2294 	sc->callback = handle_timestamp;
2295 	sc->callback_arg = finfo->skb;
2296 	sc->iq_no = ndata->q_no;
2297 
2298 	if (OCTEON_CN23XX_PF(oct))
2299 		len = (u32)((struct octeon_instr_ih3 *)
2300 			    (&sc->cmd.cmd3.ih3))->dlengsz;
2301 	else
2302 		len = (u32)((struct octeon_instr_ih2 *)
2303 			    (&sc->cmd.cmd2.ih2))->dlengsz;
2304 
2305 	ring_doorbell = !xmit_more;
2306 
2307 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2308 				     sc, len, ndata->reqtype);
2309 
2310 	if (retval == IQ_SEND_FAILED) {
2311 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2312 			retval);
2313 		octeon_free_soft_command(oct, sc);
2314 	} else {
2315 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2316 	}
2317 
2318 	return retval;
2319 }
2320 
2321 /** \brief Transmit networks packets to the Octeon interface
2322  * @param skbuff   skbuff struct to be passed to network layer.
2323  * @param netdev    pointer to network device
2324  * @returns whether the packet was transmitted to the device okay or not
2325  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2326  */
2327 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2328 {
2329 	struct lio *lio;
2330 	struct octnet_buf_free_info *finfo;
2331 	union octnic_cmd_setup cmdsetup;
2332 	struct octnic_data_pkt ndata;
2333 	struct octeon_device *oct;
2334 	struct oct_iq_stats *stats;
2335 	struct octeon_instr_irh *irh;
2336 	union tx_info *tx_info;
2337 	int status = 0;
2338 	int q_idx = 0, iq_no = 0;
2339 	int j, xmit_more = 0;
2340 	u64 dptr = 0;
2341 	u32 tag = 0;
2342 
2343 	lio = GET_LIO(netdev);
2344 	oct = lio->oct_dev;
2345 
2346 	q_idx = skb_iq(oct, skb);
2347 	tag = q_idx;
2348 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2349 
2350 	stats = &oct->instr_queue[iq_no]->stats;
2351 
2352 	/* Check for all conditions in which the current packet cannot be
2353 	 * transmitted.
2354 	 */
2355 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2356 	    (!lio->linfo.link.s.link_up) ||
2357 	    (skb->len <= 0)) {
2358 		netif_info(lio, tx_err, lio->netdev,
2359 			   "Transmit failed link_status : %d\n",
2360 			   lio->linfo.link.s.link_up);
2361 		goto lio_xmit_failed;
2362 	}
2363 
2364 	/* Use space in skb->cb to store info used to unmap and
2365 	 * free the buffers.
2366 	 */
2367 	finfo = (struct octnet_buf_free_info *)skb->cb;
2368 	finfo->lio = lio;
2369 	finfo->skb = skb;
2370 	finfo->sc = NULL;
2371 
2372 	/* Prepare the attributes for the data to be passed to OSI. */
2373 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2374 
2375 	ndata.buf = (void *)finfo;
2376 
2377 	ndata.q_no = iq_no;
2378 
2379 	if (octnet_iq_is_full(oct, ndata.q_no)) {
2380 		/* defer sending if queue is full */
2381 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2382 			   ndata.q_no);
2383 		stats->tx_iq_busy++;
2384 		return NETDEV_TX_BUSY;
2385 	}
2386 
2387 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2388 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2389 	 */
2390 
2391 	ndata.datasize = skb->len;
2392 
2393 	cmdsetup.u64 = 0;
2394 	cmdsetup.s.iq_no = iq_no;
2395 
2396 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2397 		if (skb->encapsulation) {
2398 			cmdsetup.s.tnl_csum = 1;
2399 			stats->tx_vxlan++;
2400 		} else {
2401 			cmdsetup.s.transport_csum = 1;
2402 		}
2403 	}
2404 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2405 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2406 		cmdsetup.s.timestamp = 1;
2407 	}
2408 
2409 	if (skb_shinfo(skb)->nr_frags == 0) {
2410 		cmdsetup.s.u.datasize = skb->len;
2411 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2412 
2413 		/* Offload checksum calculation for TCP/UDP packets */
2414 		dptr = dma_map_single(&oct->pci_dev->dev,
2415 				      skb->data,
2416 				      skb->len,
2417 				      DMA_TO_DEVICE);
2418 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2419 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2420 				__func__);
2421 			stats->tx_dmamap_fail++;
2422 			return NETDEV_TX_BUSY;
2423 		}
2424 
2425 		if (OCTEON_CN23XX_PF(oct))
2426 			ndata.cmd.cmd3.dptr = dptr;
2427 		else
2428 			ndata.cmd.cmd2.dptr = dptr;
2429 		finfo->dptr = dptr;
2430 		ndata.reqtype = REQTYPE_NORESP_NET;
2431 
2432 	} else {
2433 		int i, frags;
2434 		struct skb_frag_struct *frag;
2435 		struct octnic_gather *g;
2436 
2437 		spin_lock(&lio->glist_lock[q_idx]);
2438 		g = (struct octnic_gather *)
2439 			lio_list_delete_head(&lio->glist[q_idx]);
2440 		spin_unlock(&lio->glist_lock[q_idx]);
2441 
2442 		if (!g) {
2443 			netif_info(lio, tx_err, lio->netdev,
2444 				   "Transmit scatter gather: glist null!\n");
2445 			goto lio_xmit_failed;
2446 		}
2447 
2448 		cmdsetup.s.gather = 1;
2449 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2450 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2451 
2452 		memset(g->sg, 0, g->sg_size);
2453 
2454 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2455 						 skb->data,
2456 						 (skb->len - skb->data_len),
2457 						 DMA_TO_DEVICE);
2458 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2459 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2460 				__func__);
2461 			stats->tx_dmamap_fail++;
2462 			return NETDEV_TX_BUSY;
2463 		}
2464 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2465 
2466 		frags = skb_shinfo(skb)->nr_frags;
2467 		i = 1;
2468 		while (frags--) {
2469 			frag = &skb_shinfo(skb)->frags[i - 1];
2470 
2471 			g->sg[(i >> 2)].ptr[(i & 3)] =
2472 				dma_map_page(&oct->pci_dev->dev,
2473 					     frag->page.p,
2474 					     frag->page_offset,
2475 					     frag->size,
2476 					     DMA_TO_DEVICE);
2477 
2478 			if (dma_mapping_error(&oct->pci_dev->dev,
2479 					      g->sg[i >> 2].ptr[i & 3])) {
2480 				dma_unmap_single(&oct->pci_dev->dev,
2481 						 g->sg[0].ptr[0],
2482 						 skb->len - skb->data_len,
2483 						 DMA_TO_DEVICE);
2484 				for (j = 1; j < i; j++) {
2485 					frag = &skb_shinfo(skb)->frags[j - 1];
2486 					dma_unmap_page(&oct->pci_dev->dev,
2487 						       g->sg[j >> 2].ptr[j & 3],
2488 						       frag->size,
2489 						       DMA_TO_DEVICE);
2490 				}
2491 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2492 					__func__);
2493 				return NETDEV_TX_BUSY;
2494 			}
2495 
2496 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2497 			i++;
2498 		}
2499 
2500 		dptr = g->sg_dma_ptr;
2501 
2502 		if (OCTEON_CN23XX_PF(oct))
2503 			ndata.cmd.cmd3.dptr = dptr;
2504 		else
2505 			ndata.cmd.cmd2.dptr = dptr;
2506 		finfo->dptr = dptr;
2507 		finfo->g = g;
2508 
2509 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
2510 	}
2511 
2512 	if (OCTEON_CN23XX_PF(oct)) {
2513 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2514 		tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2515 	} else {
2516 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2517 		tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2518 	}
2519 
2520 	if (skb_shinfo(skb)->gso_size) {
2521 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2522 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2523 		stats->tx_gso++;
2524 	}
2525 
2526 	/* HW insert VLAN tag */
2527 	if (skb_vlan_tag_present(skb)) {
2528 		irh->priority = skb_vlan_tag_get(skb) >> 13;
2529 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2530 	}
2531 
2532 	xmit_more = skb->xmit_more;
2533 
2534 	if (unlikely(cmdsetup.s.timestamp))
2535 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2536 	else
2537 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2538 	if (status == IQ_SEND_FAILED)
2539 		goto lio_xmit_failed;
2540 
2541 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2542 
2543 	if (status == IQ_SEND_STOP)
2544 		netif_stop_subqueue(netdev, q_idx);
2545 
2546 	netif_trans_update(netdev);
2547 
2548 	if (tx_info->s.gso_segs)
2549 		stats->tx_done += tx_info->s.gso_segs;
2550 	else
2551 		stats->tx_done++;
2552 	stats->tx_tot_bytes += ndata.datasize;
2553 
2554 	return NETDEV_TX_OK;
2555 
2556 lio_xmit_failed:
2557 	stats->tx_dropped++;
2558 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2559 		   iq_no, stats->tx_dropped);
2560 	if (dptr)
2561 		dma_unmap_single(&oct->pci_dev->dev, dptr,
2562 				 ndata.datasize, DMA_TO_DEVICE);
2563 
2564 	octeon_ring_doorbell_locked(oct, iq_no);
2565 
2566 	tx_buffer_free(skb);
2567 	return NETDEV_TX_OK;
2568 }
2569 
2570 /** \brief Network device Tx timeout
2571  * @param netdev    pointer to network device
2572  */
2573 static void liquidio_tx_timeout(struct net_device *netdev)
2574 {
2575 	struct lio *lio;
2576 
2577 	lio = GET_LIO(netdev);
2578 
2579 	netif_info(lio, tx_err, lio->netdev,
2580 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2581 		   netdev->stats.tx_dropped);
2582 	netif_trans_update(netdev);
2583 	wake_txqs(netdev);
2584 }
2585 
2586 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2587 				    __be16 proto __attribute__((unused)),
2588 				    u16 vid)
2589 {
2590 	struct lio *lio = GET_LIO(netdev);
2591 	struct octeon_device *oct = lio->oct_dev;
2592 	struct octnic_ctrl_pkt nctrl;
2593 	int ret = 0;
2594 
2595 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2596 
2597 	nctrl.ncmd.u64 = 0;
2598 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2599 	nctrl.ncmd.s.param1 = vid;
2600 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2601 	nctrl.wait_time = 100;
2602 	nctrl.netpndev = (u64)netdev;
2603 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2604 
2605 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2606 	if (ret < 0) {
2607 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2608 			ret);
2609 	}
2610 
2611 	return ret;
2612 }
2613 
2614 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2615 				     __be16 proto __attribute__((unused)),
2616 				     u16 vid)
2617 {
2618 	struct lio *lio = GET_LIO(netdev);
2619 	struct octeon_device *oct = lio->oct_dev;
2620 	struct octnic_ctrl_pkt nctrl;
2621 	int ret = 0;
2622 
2623 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2624 
2625 	nctrl.ncmd.u64 = 0;
2626 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2627 	nctrl.ncmd.s.param1 = vid;
2628 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2629 	nctrl.wait_time = 100;
2630 	nctrl.netpndev = (u64)netdev;
2631 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2632 
2633 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2634 	if (ret < 0) {
2635 		dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2636 			ret);
2637 	}
2638 	return ret;
2639 }
2640 
2641 /** Sending command to enable/disable RX checksum offload
2642  * @param netdev                pointer to network device
2643  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2644  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2645  *                              OCTNET_CMD_RXCSUM_DISABLE
2646  * @returns                     SUCCESS or FAILURE
2647  */
2648 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2649 				       u8 rx_cmd)
2650 {
2651 	struct lio *lio = GET_LIO(netdev);
2652 	struct octeon_device *oct = lio->oct_dev;
2653 	struct octnic_ctrl_pkt nctrl;
2654 	int ret = 0;
2655 
2656 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2657 
2658 	nctrl.ncmd.u64 = 0;
2659 	nctrl.ncmd.s.cmd = command;
2660 	nctrl.ncmd.s.param1 = rx_cmd;
2661 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2662 	nctrl.wait_time = 100;
2663 	nctrl.netpndev = (u64)netdev;
2664 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2665 
2666 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2667 	if (ret < 0) {
2668 		dev_err(&oct->pci_dev->dev,
2669 			"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2670 			ret);
2671 	}
2672 	return ret;
2673 }
2674 
2675 /** Sending command to add/delete VxLAN UDP port to firmware
2676  * @param netdev                pointer to network device
2677  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2678  * @param vxlan_port            VxLAN port to be added or deleted
2679  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2680  *                              OCTNET_CMD_VXLAN_PORT_DEL
2681  * @returns                     SUCCESS or FAILURE
2682  */
2683 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2684 				       u16 vxlan_port, u8 vxlan_cmd_bit)
2685 {
2686 	struct lio *lio = GET_LIO(netdev);
2687 	struct octeon_device *oct = lio->oct_dev;
2688 	struct octnic_ctrl_pkt nctrl;
2689 	int ret = 0;
2690 
2691 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2692 
2693 	nctrl.ncmd.u64 = 0;
2694 	nctrl.ncmd.s.cmd = command;
2695 	nctrl.ncmd.s.more = vxlan_cmd_bit;
2696 	nctrl.ncmd.s.param1 = vxlan_port;
2697 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2698 	nctrl.wait_time = 100;
2699 	nctrl.netpndev = (u64)netdev;
2700 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2701 
2702 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2703 	if (ret < 0) {
2704 		dev_err(&oct->pci_dev->dev,
2705 			"VxLAN port add/delete failed in core (ret:0x%x)\n",
2706 			ret);
2707 	}
2708 	return ret;
2709 }
2710 
2711 /** \brief Net device fix features
2712  * @param netdev  pointer to network device
2713  * @param request features requested
2714  * @returns updated features list
2715  */
2716 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2717 					       netdev_features_t request)
2718 {
2719 	struct lio *lio = netdev_priv(netdev);
2720 
2721 	if ((request & NETIF_F_RXCSUM) &&
2722 	    !(lio->dev_capability & NETIF_F_RXCSUM))
2723 		request &= ~NETIF_F_RXCSUM;
2724 
2725 	if ((request & NETIF_F_HW_CSUM) &&
2726 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
2727 		request &= ~NETIF_F_HW_CSUM;
2728 
2729 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2730 		request &= ~NETIF_F_TSO;
2731 
2732 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2733 		request &= ~NETIF_F_TSO6;
2734 
2735 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2736 		request &= ~NETIF_F_LRO;
2737 
2738 	/*Disable LRO if RXCSUM is off */
2739 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2740 	    (lio->dev_capability & NETIF_F_LRO))
2741 		request &= ~NETIF_F_LRO;
2742 
2743 	if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2744 	    !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2745 		request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2746 
2747 	return request;
2748 }
2749 
2750 /** \brief Net device set features
2751  * @param netdev  pointer to network device
2752  * @param features features to enable/disable
2753  */
2754 static int liquidio_set_features(struct net_device *netdev,
2755 				 netdev_features_t features)
2756 {
2757 	struct lio *lio = netdev_priv(netdev);
2758 
2759 	if ((features & NETIF_F_LRO) &&
2760 	    (lio->dev_capability & NETIF_F_LRO) &&
2761 	    !(netdev->features & NETIF_F_LRO))
2762 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2763 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2764 	else if (!(features & NETIF_F_LRO) &&
2765 		 (lio->dev_capability & NETIF_F_LRO) &&
2766 		 (netdev->features & NETIF_F_LRO))
2767 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2768 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2769 
2770 	/* Sending command to firmware to enable/disable RX checksum
2771 	 * offload settings using ethtool
2772 	 */
2773 	if (!(netdev->features & NETIF_F_RXCSUM) &&
2774 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2775 	    (features & NETIF_F_RXCSUM))
2776 		liquidio_set_rxcsum_command(netdev,
2777 					    OCTNET_CMD_TNL_RX_CSUM_CTL,
2778 					    OCTNET_CMD_RXCSUM_ENABLE);
2779 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2780 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2781 		 !(features & NETIF_F_RXCSUM))
2782 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2783 					    OCTNET_CMD_RXCSUM_DISABLE);
2784 
2785 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2786 	    (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2787 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2788 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2789 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
2790 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2791 		 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2792 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2793 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2794 				     OCTNET_CMD_VLAN_FILTER_DISABLE);
2795 
2796 	return 0;
2797 }
2798 
2799 static void liquidio_add_vxlan_port(struct net_device *netdev,
2800 				    struct udp_tunnel_info *ti)
2801 {
2802 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2803 		return;
2804 
2805 	liquidio_vxlan_port_command(netdev,
2806 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2807 				    htons(ti->port),
2808 				    OCTNET_CMD_VXLAN_PORT_ADD);
2809 }
2810 
2811 static void liquidio_del_vxlan_port(struct net_device *netdev,
2812 				    struct udp_tunnel_info *ti)
2813 {
2814 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2815 		return;
2816 
2817 	liquidio_vxlan_port_command(netdev,
2818 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2819 				    htons(ti->port),
2820 				    OCTNET_CMD_VXLAN_PORT_DEL);
2821 }
2822 
2823 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2824 				 u8 *mac, bool is_admin_assigned)
2825 {
2826 	struct lio *lio = GET_LIO(netdev);
2827 	struct octeon_device *oct = lio->oct_dev;
2828 	struct octnic_ctrl_pkt nctrl;
2829 
2830 	if (!is_valid_ether_addr(mac))
2831 		return -EINVAL;
2832 
2833 	if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2834 		return -EINVAL;
2835 
2836 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2837 
2838 	nctrl.ncmd.u64 = 0;
2839 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2840 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2841 	nctrl.ncmd.s.param1 = vfidx + 1;
2842 	nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
2843 	nctrl.ncmd.s.more = 1;
2844 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2845 	nctrl.netpndev = (u64)netdev;
2846 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2847 	nctrl.wait_time = LIO_CMD_WAIT_TM;
2848 
2849 	nctrl.udd[0] = 0;
2850 	/* The MAC Address is presented in network byte order. */
2851 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2852 
2853 	oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2854 
2855 	octnet_send_nic_ctrl_pkt(oct, &nctrl);
2856 
2857 	return 0;
2858 }
2859 
2860 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2861 {
2862 	struct lio *lio = GET_LIO(netdev);
2863 	struct octeon_device *oct = lio->oct_dev;
2864 	int retval;
2865 
2866 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2867 		return -EINVAL;
2868 
2869 	retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2870 	if (!retval)
2871 		cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2872 
2873 	return retval;
2874 }
2875 
2876 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2877 				u16 vlan, u8 qos, __be16 vlan_proto)
2878 {
2879 	struct lio *lio = GET_LIO(netdev);
2880 	struct octeon_device *oct = lio->oct_dev;
2881 	struct octnic_ctrl_pkt nctrl;
2882 	u16 vlantci;
2883 
2884 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2885 		return -EINVAL;
2886 
2887 	if (vlan_proto != htons(ETH_P_8021Q))
2888 		return -EPROTONOSUPPORT;
2889 
2890 	if (vlan >= VLAN_N_VID || qos > 7)
2891 		return -EINVAL;
2892 
2893 	if (vlan)
2894 		vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2895 	else
2896 		vlantci = 0;
2897 
2898 	if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2899 		return 0;
2900 
2901 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2902 
2903 	if (vlan)
2904 		nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2905 	else
2906 		nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2907 
2908 	nctrl.ncmd.s.param1 = vlantci;
2909 	nctrl.ncmd.s.param2 =
2910 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2911 	nctrl.ncmd.s.more = 0;
2912 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2913 	nctrl.cb_fn = NULL;
2914 	nctrl.wait_time = LIO_CMD_WAIT_TM;
2915 
2916 	octnet_send_nic_ctrl_pkt(oct, &nctrl);
2917 
2918 	oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2919 
2920 	return 0;
2921 }
2922 
2923 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2924 				  struct ifla_vf_info *ivi)
2925 {
2926 	struct lio *lio = GET_LIO(netdev);
2927 	struct octeon_device *oct = lio->oct_dev;
2928 	u8 *macaddr;
2929 
2930 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2931 		return -EINVAL;
2932 
2933 	ivi->vf = vfidx;
2934 	macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2935 	ether_addr_copy(&ivi->mac[0], macaddr);
2936 	ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2937 	ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2938 	if (oct->sriov_info.trusted_vf.active &&
2939 	    oct->sriov_info.trusted_vf.id == vfidx)
2940 		ivi->trusted = true;
2941 	else
2942 		ivi->trusted = false;
2943 	ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2944 	return 0;
2945 }
2946 
2947 static void trusted_vf_callback(struct octeon_device *oct_dev,
2948 				u32 status, void *ptr)
2949 {
2950 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
2951 	struct lio_trusted_vf_ctx *ctx;
2952 
2953 	ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
2954 	ctx->status = status;
2955 
2956 	complete(&ctx->complete);
2957 }
2958 
2959 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2960 {
2961 	struct octeon_device *oct = lio->oct_dev;
2962 	struct lio_trusted_vf_ctx *ctx;
2963 	struct octeon_soft_command *sc;
2964 	int ctx_size, retval;
2965 
2966 	ctx_size = sizeof(struct lio_trusted_vf_ctx);
2967 	sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size);
2968 
2969 	ctx  = (struct lio_trusted_vf_ctx *)sc->ctxptr;
2970 	init_completion(&ctx->complete);
2971 
2972 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2973 
2974 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2975 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
2976 				    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
2977 				    trusted);
2978 
2979 	sc->callback = trusted_vf_callback;
2980 	sc->callback_arg = sc;
2981 	sc->wait_time = 1000;
2982 
2983 	retval = octeon_send_soft_command(oct, sc);
2984 	if (retval == IQ_SEND_FAILED) {
2985 		retval = -1;
2986 	} else {
2987 		/* Wait for response or timeout */
2988 		if (wait_for_completion_timeout(&ctx->complete,
2989 						msecs_to_jiffies(2000)))
2990 			retval = ctx->status;
2991 		else
2992 			retval = -1;
2993 	}
2994 
2995 	octeon_free_soft_command(oct, sc);
2996 
2997 	return retval;
2998 }
2999 
3000 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3001 				 bool setting)
3002 {
3003 	struct lio *lio = GET_LIO(netdev);
3004 	struct octeon_device *oct = lio->oct_dev;
3005 
3006 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3007 		/* trusted vf is not supported by firmware older than 1.7.1 */
3008 		return -EOPNOTSUPP;
3009 	}
3010 
3011 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3012 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3013 		return -EINVAL;
3014 	}
3015 
3016 	if (setting) {
3017 		/* Set */
3018 
3019 		if (oct->sriov_info.trusted_vf.active &&
3020 		    oct->sriov_info.trusted_vf.id == vfidx)
3021 			return 0;
3022 
3023 		if (oct->sriov_info.trusted_vf.active) {
3024 			netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3025 			return -EPERM;
3026 		}
3027 	} else {
3028 		/* Clear */
3029 
3030 		if (!oct->sriov_info.trusted_vf.active)
3031 			return 0;
3032 	}
3033 
3034 	if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3035 		if (setting) {
3036 			oct->sriov_info.trusted_vf.id = vfidx;
3037 			oct->sriov_info.trusted_vf.active = true;
3038 		} else {
3039 			oct->sriov_info.trusted_vf.active = false;
3040 		}
3041 
3042 		netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3043 			   setting ? "" : "not ");
3044 	} else {
3045 		netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3046 		return -1;
3047 	}
3048 
3049 	return 0;
3050 }
3051 
3052 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3053 				      int linkstate)
3054 {
3055 	struct lio *lio = GET_LIO(netdev);
3056 	struct octeon_device *oct = lio->oct_dev;
3057 	struct octnic_ctrl_pkt nctrl;
3058 
3059 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3060 		return -EINVAL;
3061 
3062 	if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3063 		return 0;
3064 
3065 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3066 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3067 	nctrl.ncmd.s.param1 =
3068 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3069 	nctrl.ncmd.s.param2 = linkstate;
3070 	nctrl.ncmd.s.more = 0;
3071 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3072 	nctrl.cb_fn = NULL;
3073 	nctrl.wait_time = LIO_CMD_WAIT_TM;
3074 
3075 	octnet_send_nic_ctrl_pkt(oct, &nctrl);
3076 
3077 	oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3078 
3079 	return 0;
3080 }
3081 
3082 static int
3083 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3084 {
3085 	struct lio_devlink_priv *priv;
3086 	struct octeon_device *oct;
3087 
3088 	priv = devlink_priv(devlink);
3089 	oct = priv->oct;
3090 
3091 	*mode = oct->eswitch_mode;
3092 
3093 	return 0;
3094 }
3095 
3096 static int
3097 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
3098 {
3099 	struct lio_devlink_priv *priv;
3100 	struct octeon_device *oct;
3101 	int ret = 0;
3102 
3103 	priv = devlink_priv(devlink);
3104 	oct = priv->oct;
3105 
3106 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3107 		return -EINVAL;
3108 
3109 	if (oct->eswitch_mode == mode)
3110 		return 0;
3111 
3112 	switch (mode) {
3113 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3114 		oct->eswitch_mode = mode;
3115 		ret = lio_vf_rep_create(oct);
3116 		break;
3117 
3118 	case DEVLINK_ESWITCH_MODE_LEGACY:
3119 		lio_vf_rep_destroy(oct);
3120 		oct->eswitch_mode = mode;
3121 		break;
3122 
3123 	default:
3124 		ret = -EINVAL;
3125 	}
3126 
3127 	return ret;
3128 }
3129 
3130 static const struct devlink_ops liquidio_devlink_ops = {
3131 	.eswitch_mode_get = liquidio_eswitch_mode_get,
3132 	.eswitch_mode_set = liquidio_eswitch_mode_set,
3133 };
3134 
3135 static int
3136 lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3137 {
3138 	struct lio *lio = GET_LIO(dev);
3139 	struct octeon_device *oct = lio->oct_dev;
3140 
3141 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3142 		return -EOPNOTSUPP;
3143 
3144 	switch (attr->id) {
3145 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3146 		attr->u.ppid.id_len = ETH_ALEN;
3147 		ether_addr_copy(attr->u.ppid.id,
3148 				(void *)&lio->linfo.hw_addr + 2);
3149 		break;
3150 
3151 	default:
3152 		return -EOPNOTSUPP;
3153 	}
3154 
3155 	return 0;
3156 }
3157 
3158 static const struct switchdev_ops lio_pf_switchdev_ops = {
3159 	.switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3160 };
3161 
3162 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3163 				 struct ifla_vf_stats *vf_stats)
3164 {
3165 	struct lio *lio = GET_LIO(netdev);
3166 	struct octeon_device *oct = lio->oct_dev;
3167 	struct oct_vf_stats stats;
3168 	int ret;
3169 
3170 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3171 		return -EINVAL;
3172 
3173 	memset(&stats, 0, sizeof(struct oct_vf_stats));
3174 	ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3175 	if (!ret) {
3176 		vf_stats->rx_packets = stats.rx_packets;
3177 		vf_stats->tx_packets = stats.tx_packets;
3178 		vf_stats->rx_bytes = stats.rx_bytes;
3179 		vf_stats->tx_bytes = stats.tx_bytes;
3180 		vf_stats->broadcast = stats.broadcast;
3181 		vf_stats->multicast = stats.multicast;
3182 	}
3183 
3184 	return ret;
3185 }
3186 
3187 static const struct net_device_ops lionetdevops = {
3188 	.ndo_open		= liquidio_open,
3189 	.ndo_stop		= liquidio_stop,
3190 	.ndo_start_xmit		= liquidio_xmit,
3191 	.ndo_get_stats64	= liquidio_get_stats64,
3192 	.ndo_set_mac_address	= liquidio_set_mac,
3193 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
3194 	.ndo_tx_timeout		= liquidio_tx_timeout,
3195 
3196 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3197 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3198 	.ndo_change_mtu		= liquidio_change_mtu,
3199 	.ndo_do_ioctl		= liquidio_ioctl,
3200 	.ndo_fix_features	= liquidio_fix_features,
3201 	.ndo_set_features	= liquidio_set_features,
3202 	.ndo_udp_tunnel_add	= liquidio_add_vxlan_port,
3203 	.ndo_udp_tunnel_del	= liquidio_del_vxlan_port,
3204 	.ndo_set_vf_mac		= liquidio_set_vf_mac,
3205 	.ndo_set_vf_vlan	= liquidio_set_vf_vlan,
3206 	.ndo_get_vf_config	= liquidio_get_vf_config,
3207 	.ndo_set_vf_trust	= liquidio_set_vf_trust,
3208 	.ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3209 	.ndo_get_vf_stats	= liquidio_get_vf_stats,
3210 };
3211 
3212 /** \brief Entry point for the liquidio module
3213  */
3214 static int __init liquidio_init(void)
3215 {
3216 	int i;
3217 	struct handshake *hs;
3218 
3219 	init_completion(&first_stage);
3220 
3221 	octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3222 
3223 	if (liquidio_init_pci())
3224 		return -EINVAL;
3225 
3226 	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3227 
3228 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3229 		hs = &handshake[i];
3230 		if (hs->pci_dev) {
3231 			wait_for_completion(&hs->init);
3232 			if (!hs->init_ok) {
3233 				/* init handshake failed */
3234 				dev_err(&hs->pci_dev->dev,
3235 					"Failed to init device\n");
3236 				liquidio_deinit_pci();
3237 				return -EIO;
3238 			}
3239 		}
3240 	}
3241 
3242 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3243 		hs = &handshake[i];
3244 		if (hs->pci_dev) {
3245 			wait_for_completion_timeout(&hs->started,
3246 						    msecs_to_jiffies(30000));
3247 			if (!hs->started_ok) {
3248 				/* starter handshake failed */
3249 				dev_err(&hs->pci_dev->dev,
3250 					"Firmware failed to start\n");
3251 				liquidio_deinit_pci();
3252 				return -EIO;
3253 			}
3254 		}
3255 	}
3256 
3257 	return 0;
3258 }
3259 
3260 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3261 {
3262 	struct octeon_device *oct = (struct octeon_device *)buf;
3263 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3264 	int gmxport = 0;
3265 	union oct_link_status *ls;
3266 	int i;
3267 
3268 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3269 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3270 			recv_pkt->buffer_size[0],
3271 			recv_pkt->rh.r_nic_info.gmxport);
3272 		goto nic_info_err;
3273 	}
3274 
3275 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
3276 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3277 		OCT_DROQ_INFO_SIZE);
3278 
3279 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3280 	for (i = 0; i < oct->ifcount; i++) {
3281 		if (oct->props[i].gmxport == gmxport) {
3282 			update_link_status(oct->props[i].netdev, ls);
3283 			break;
3284 		}
3285 	}
3286 
3287 nic_info_err:
3288 	for (i = 0; i < recv_pkt->buffer_count; i++)
3289 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3290 	octeon_free_recv_info(recv_info);
3291 	return 0;
3292 }
3293 
3294 /**
3295  * \brief Setup network interfaces
3296  * @param octeon_dev  octeon device
3297  *
3298  * Called during init time for each device. It assumes the NIC
3299  * is already up and running.  The link information for each
3300  * interface is passed in link_info.
3301  */
3302 static int setup_nic_devices(struct octeon_device *octeon_dev)
3303 {
3304 	struct lio *lio = NULL;
3305 	struct net_device *netdev;
3306 	u8 mac[6], i, j, *fw_ver, *micro_ver;
3307 	unsigned long micro;
3308 	u32 cur_ver;
3309 	struct octeon_soft_command *sc;
3310 	struct liquidio_if_cfg_context *ctx;
3311 	struct liquidio_if_cfg_resp *resp;
3312 	struct octdev_props *props;
3313 	int retval, num_iqueues, num_oqueues;
3314 	int max_num_queues = 0;
3315 	union oct_nic_if_cfg if_cfg;
3316 	unsigned int base_queue;
3317 	unsigned int gmx_port_id;
3318 	u32 resp_size, ctx_size, data_size;
3319 	u32 ifidx_or_pfnum;
3320 	struct lio_version *vdata;
3321 	struct devlink *devlink;
3322 	struct lio_devlink_priv *lio_devlink;
3323 
3324 	/* This is to handle link status changes */
3325 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3326 				    OPCODE_NIC_INFO,
3327 				    lio_nic_info, octeon_dev);
3328 
3329 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3330 	 * They are handled directly.
3331 	 */
3332 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3333 					free_netbuf);
3334 
3335 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3336 					free_netsgbuf);
3337 
3338 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3339 					free_netsgbuf_with_resp);
3340 
3341 	for (i = 0; i < octeon_dev->ifcount; i++) {
3342 		resp_size = sizeof(struct liquidio_if_cfg_resp);
3343 		ctx_size = sizeof(struct liquidio_if_cfg_context);
3344 		data_size = sizeof(struct lio_version);
3345 		sc = (struct octeon_soft_command *)
3346 			octeon_alloc_soft_command(octeon_dev, data_size,
3347 						  resp_size, ctx_size);
3348 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3349 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3350 		vdata = (struct lio_version *)sc->virtdptr;
3351 
3352 		*((u64 *)vdata) = 0;
3353 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3354 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3355 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3356 
3357 		if (OCTEON_CN23XX_PF(octeon_dev)) {
3358 			num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3359 			num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3360 			base_queue = octeon_dev->sriov_info.pf_srn;
3361 
3362 			gmx_port_id = octeon_dev->pf_num;
3363 			ifidx_or_pfnum = octeon_dev->pf_num;
3364 		} else {
3365 			num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3366 						octeon_get_conf(octeon_dev), i);
3367 			num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3368 						octeon_get_conf(octeon_dev), i);
3369 			base_queue = CFG_GET_BASE_QUE_NIC_IF(
3370 						octeon_get_conf(octeon_dev), i);
3371 			gmx_port_id = CFG_GET_GMXID_NIC_IF(
3372 						octeon_get_conf(octeon_dev), i);
3373 			ifidx_or_pfnum = i;
3374 		}
3375 
3376 		dev_dbg(&octeon_dev->pci_dev->dev,
3377 			"requesting config for interface %d, iqs %d, oqs %d\n",
3378 			ifidx_or_pfnum, num_iqueues, num_oqueues);
3379 		WRITE_ONCE(ctx->cond, 0);
3380 		ctx->octeon_id = lio_get_device_id(octeon_dev);
3381 		init_waitqueue_head(&ctx->wc);
3382 
3383 		if_cfg.u64 = 0;
3384 		if_cfg.s.num_iqueues = num_iqueues;
3385 		if_cfg.s.num_oqueues = num_oqueues;
3386 		if_cfg.s.base_queue = base_queue;
3387 		if_cfg.s.gmx_port_id = gmx_port_id;
3388 
3389 		sc->iq_no = 0;
3390 
3391 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3392 					    OPCODE_NIC_IF_CFG, 0,
3393 					    if_cfg.u64, 0);
3394 
3395 		sc->callback = lio_if_cfg_callback;
3396 		sc->callback_arg = sc;
3397 		sc->wait_time = LIO_IFCFG_WAIT_TIME;
3398 
3399 		retval = octeon_send_soft_command(octeon_dev, sc);
3400 		if (retval == IQ_SEND_FAILED) {
3401 			dev_err(&octeon_dev->pci_dev->dev,
3402 				"iq/oq config failed status: %x\n",
3403 				retval);
3404 			/* Soft instr is freed by driver in case of failure. */
3405 			goto setup_nic_dev_fail;
3406 		}
3407 
3408 		/* Sleep on a wait queue till the cond flag indicates that the
3409 		 * response arrived or timed-out.
3410 		 */
3411 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3412 			dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3413 			goto setup_nic_wait_intr;
3414 		}
3415 
3416 		retval = resp->status;
3417 		if (retval) {
3418 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3419 			goto setup_nic_dev_fail;
3420 		}
3421 
3422 		/* Verify f/w version (in case of 'auto' loading from flash) */
3423 		fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3424 		if (memcmp(LIQUIDIO_BASE_VERSION,
3425 			   fw_ver,
3426 			   strlen(LIQUIDIO_BASE_VERSION))) {
3427 			dev_err(&octeon_dev->pci_dev->dev,
3428 				"Unmatched firmware version. Expected %s.x, got %s.\n",
3429 				LIQUIDIO_BASE_VERSION, fw_ver);
3430 			goto setup_nic_dev_fail;
3431 		} else if (atomic_read(octeon_dev->adapter_fw_state) ==
3432 			   FW_IS_PRELOADED) {
3433 			dev_info(&octeon_dev->pci_dev->dev,
3434 				 "Using auto-loaded firmware version %s.\n",
3435 				 fw_ver);
3436 		}
3437 
3438 		/* extract micro version field; point past '<maj>.<min>.' */
3439 		micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3440 		if (kstrtoul(micro_ver, 10, &micro) != 0)
3441 			micro = 0;
3442 		octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3443 		octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3444 		octeon_dev->fw_info.ver.rev = micro;
3445 
3446 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3447 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3448 
3449 		num_iqueues = hweight64(resp->cfg_info.iqmask);
3450 		num_oqueues = hweight64(resp->cfg_info.oqmask);
3451 
3452 		if (!(num_iqueues) || !(num_oqueues)) {
3453 			dev_err(&octeon_dev->pci_dev->dev,
3454 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3455 				resp->cfg_info.iqmask,
3456 				resp->cfg_info.oqmask);
3457 			goto setup_nic_dev_fail;
3458 		}
3459 
3460 		if (OCTEON_CN6XXX(octeon_dev)) {
3461 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3462 								    cn6xxx));
3463 		} else if (OCTEON_CN23XX_PF(octeon_dev)) {
3464 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3465 								    cn23xx_pf));
3466 		}
3467 
3468 		dev_dbg(&octeon_dev->pci_dev->dev,
3469 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3470 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3471 			num_iqueues, num_oqueues, max_num_queues);
3472 		netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3473 
3474 		if (!netdev) {
3475 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3476 			goto setup_nic_dev_fail;
3477 		}
3478 
3479 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3480 
3481 		/* Associate the routines that will handle different
3482 		 * netdev tasks.
3483 		 */
3484 		netdev->netdev_ops = &lionetdevops;
3485 		SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3486 
3487 		retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3488 		if (retval) {
3489 			dev_err(&octeon_dev->pci_dev->dev,
3490 				"setting real number rx failed\n");
3491 			goto setup_nic_dev_fail;
3492 		}
3493 
3494 		retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3495 		if (retval) {
3496 			dev_err(&octeon_dev->pci_dev->dev,
3497 				"setting real number tx failed\n");
3498 			goto setup_nic_dev_fail;
3499 		}
3500 
3501 		lio = GET_LIO(netdev);
3502 
3503 		memset(lio, 0, sizeof(struct lio));
3504 
3505 		lio->ifidx = ifidx_or_pfnum;
3506 
3507 		props = &octeon_dev->props[i];
3508 		props->gmxport = resp->cfg_info.linfo.gmxport;
3509 		props->netdev = netdev;
3510 
3511 		lio->linfo.num_rxpciq = num_oqueues;
3512 		lio->linfo.num_txpciq = num_iqueues;
3513 		for (j = 0; j < num_oqueues; j++) {
3514 			lio->linfo.rxpciq[j].u64 =
3515 				resp->cfg_info.linfo.rxpciq[j].u64;
3516 		}
3517 		for (j = 0; j < num_iqueues; j++) {
3518 			lio->linfo.txpciq[j].u64 =
3519 				resp->cfg_info.linfo.txpciq[j].u64;
3520 		}
3521 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3522 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3523 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3524 
3525 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3526 
3527 		if (OCTEON_CN23XX_PF(octeon_dev) ||
3528 		    OCTEON_CN6XXX(octeon_dev)) {
3529 			lio->dev_capability = NETIF_F_HIGHDMA
3530 					      | NETIF_F_IP_CSUM
3531 					      | NETIF_F_IPV6_CSUM
3532 					      | NETIF_F_SG | NETIF_F_RXCSUM
3533 					      | NETIF_F_GRO
3534 					      | NETIF_F_TSO | NETIF_F_TSO6
3535 					      | NETIF_F_LRO;
3536 		}
3537 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3538 
3539 		/*  Copy of transmit encapsulation capabilities:
3540 		 *  TSO, TSO6, Checksums for this device
3541 		 */
3542 		lio->enc_dev_capability = NETIF_F_IP_CSUM
3543 					  | NETIF_F_IPV6_CSUM
3544 					  | NETIF_F_GSO_UDP_TUNNEL
3545 					  | NETIF_F_HW_CSUM | NETIF_F_SG
3546 					  | NETIF_F_RXCSUM
3547 					  | NETIF_F_TSO | NETIF_F_TSO6
3548 					  | NETIF_F_LRO;
3549 
3550 		netdev->hw_enc_features = (lio->enc_dev_capability &
3551 					   ~NETIF_F_LRO);
3552 
3553 		lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3554 
3555 		netdev->vlan_features = lio->dev_capability;
3556 		/* Add any unchangeable hw features */
3557 		lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3558 					NETIF_F_HW_VLAN_CTAG_RX |
3559 					NETIF_F_HW_VLAN_CTAG_TX;
3560 
3561 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3562 
3563 		netdev->hw_features = lio->dev_capability;
3564 		/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3565 		netdev->hw_features = netdev->hw_features &
3566 			~NETIF_F_HW_VLAN_CTAG_RX;
3567 
3568 		/* MTU range: 68 - 16000 */
3569 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
3570 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
3571 
3572 		/* Point to the  properties for octeon device to which this
3573 		 * interface belongs.
3574 		 */
3575 		lio->oct_dev = octeon_dev;
3576 		lio->octprops = props;
3577 		lio->netdev = netdev;
3578 
3579 		dev_dbg(&octeon_dev->pci_dev->dev,
3580 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
3581 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3582 
3583 		for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3584 			u8 vfmac[ETH_ALEN];
3585 
3586 			eth_random_addr(vfmac);
3587 			if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3588 				dev_err(&octeon_dev->pci_dev->dev,
3589 					"Error setting VF%d MAC address\n",
3590 					j);
3591 				goto setup_nic_dev_fail;
3592 			}
3593 		}
3594 
3595 		/* 64-bit swap required on LE machines */
3596 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3597 		for (j = 0; j < 6; j++)
3598 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3599 
3600 		/* Copy MAC Address to OS network device structure */
3601 
3602 		ether_addr_copy(netdev->dev_addr, mac);
3603 
3604 		/* By default all interfaces on a single Octeon uses the same
3605 		 * tx and rx queues
3606 		 */
3607 		lio->txq = lio->linfo.txpciq[0].s.q_no;
3608 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3609 		if (liquidio_setup_io_queues(octeon_dev, i,
3610 					     lio->linfo.num_txpciq,
3611 					     lio->linfo.num_rxpciq)) {
3612 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3613 			goto setup_nic_dev_fail;
3614 		}
3615 
3616 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3617 
3618 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3619 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3620 
3621 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3622 			dev_err(&octeon_dev->pci_dev->dev,
3623 				"Gather list allocation failed\n");
3624 			goto setup_nic_dev_fail;
3625 		}
3626 
3627 		/* Register ethtool support */
3628 		liquidio_set_ethtool_ops(netdev);
3629 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3630 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3631 		else
3632 			octeon_dev->priv_flags = 0x0;
3633 
3634 		if (netdev->features & NETIF_F_LRO)
3635 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3636 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3637 
3638 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3639 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
3640 
3641 		if ((debug != -1) && (debug & NETIF_MSG_HW))
3642 			liquidio_set_feature(netdev,
3643 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
3644 
3645 		if (setup_link_status_change_wq(netdev))
3646 			goto setup_nic_dev_fail;
3647 
3648 		if ((octeon_dev->fw_info.app_cap_flags &
3649 		     LIQUIDIO_TIME_SYNC_CAP) &&
3650 		    setup_sync_octeon_time_wq(netdev))
3651 			goto setup_nic_dev_fail;
3652 
3653 		if (setup_rx_oom_poll_fn(netdev))
3654 			goto setup_nic_dev_fail;
3655 
3656 		/* Register the network device with the OS */
3657 		if (register_netdev(netdev)) {
3658 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3659 			goto setup_nic_dev_fail;
3660 		}
3661 
3662 		dev_dbg(&octeon_dev->pci_dev->dev,
3663 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3664 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3665 		netif_carrier_off(netdev);
3666 		lio->link_changes++;
3667 
3668 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3669 
3670 		/* Sending command to firmware to enable Rx checksum offload
3671 		 * by default at the time of setup of Liquidio driver for
3672 		 * this device
3673 		 */
3674 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3675 					    OCTNET_CMD_RXCSUM_ENABLE);
3676 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3677 				     OCTNET_CMD_TXCSUM_ENABLE);
3678 
3679 		dev_dbg(&octeon_dev->pci_dev->dev,
3680 			"NIC ifidx:%d Setup successful\n", i);
3681 
3682 		octeon_free_soft_command(octeon_dev, sc);
3683 
3684 		if (octeon_dev->subsystem_id ==
3685 			OCTEON_CN2350_25GB_SUBSYS_ID ||
3686 		    octeon_dev->subsystem_id ==
3687 			OCTEON_CN2360_25GB_SUBSYS_ID) {
3688 			cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3689 					     octeon_dev->fw_info.ver.min,
3690 					     octeon_dev->fw_info.ver.rev);
3691 
3692 			/* speed control unsupported in f/w older than 1.7.2 */
3693 			if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3694 				dev_info(&octeon_dev->pci_dev->dev,
3695 					 "speed setting not supported by f/w.");
3696 				octeon_dev->speed_setting = 25;
3697 				octeon_dev->no_speed_setting = 1;
3698 			} else {
3699 				liquidio_get_speed(lio);
3700 			}
3701 
3702 			if (octeon_dev->speed_setting == 0) {
3703 				octeon_dev->speed_setting = 25;
3704 				octeon_dev->no_speed_setting = 1;
3705 			}
3706 		} else {
3707 			octeon_dev->no_speed_setting = 1;
3708 			octeon_dev->speed_setting = 10;
3709 		}
3710 		octeon_dev->speed_boot = octeon_dev->speed_setting;
3711 
3712 	}
3713 
3714 	devlink = devlink_alloc(&liquidio_devlink_ops,
3715 				sizeof(struct lio_devlink_priv));
3716 	if (!devlink) {
3717 		dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3718 		goto setup_nic_wait_intr;
3719 	}
3720 
3721 	lio_devlink = devlink_priv(devlink);
3722 	lio_devlink->oct = octeon_dev;
3723 
3724 	if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3725 		devlink_free(devlink);
3726 		dev_err(&octeon_dev->pci_dev->dev,
3727 			"devlink registration failed\n");
3728 		goto setup_nic_wait_intr;
3729 	}
3730 
3731 	octeon_dev->devlink = devlink;
3732 	octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3733 
3734 	return 0;
3735 
3736 setup_nic_dev_fail:
3737 
3738 	octeon_free_soft_command(octeon_dev, sc);
3739 
3740 setup_nic_wait_intr:
3741 
3742 	while (i--) {
3743 		dev_err(&octeon_dev->pci_dev->dev,
3744 			"NIC ifidx:%d Setup failed\n", i);
3745 		liquidio_destroy_nic_device(octeon_dev, i);
3746 	}
3747 	return -ENODEV;
3748 }
3749 
3750 #ifdef CONFIG_PCI_IOV
3751 static int octeon_enable_sriov(struct octeon_device *oct)
3752 {
3753 	unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3754 	struct pci_dev *vfdev;
3755 	int err;
3756 	u32 u;
3757 
3758 	if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3759 		err = pci_enable_sriov(oct->pci_dev,
3760 				       oct->sriov_info.num_vfs_alloced);
3761 		if (err) {
3762 			dev_err(&oct->pci_dev->dev,
3763 				"OCTEON: Failed to enable PCI sriov: %d\n",
3764 				err);
3765 			oct->sriov_info.num_vfs_alloced = 0;
3766 			return err;
3767 		}
3768 		oct->sriov_info.sriov_enabled = 1;
3769 
3770 		/* init lookup table that maps DPI ring number to VF pci_dev
3771 		 * struct pointer
3772 		 */
3773 		u = 0;
3774 		vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3775 				       OCTEON_CN23XX_VF_VID, NULL);
3776 		while (vfdev) {
3777 			if (vfdev->is_virtfn &&
3778 			    (vfdev->physfn == oct->pci_dev)) {
3779 				oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3780 					vfdev;
3781 				u += oct->sriov_info.rings_per_vf;
3782 			}
3783 			vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3784 					       OCTEON_CN23XX_VF_VID, vfdev);
3785 		}
3786 	}
3787 
3788 	return num_vfs_alloced;
3789 }
3790 
3791 static int lio_pci_sriov_disable(struct octeon_device *oct)
3792 {
3793 	int u;
3794 
3795 	if (pci_vfs_assigned(oct->pci_dev)) {
3796 		dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3797 		return -EPERM;
3798 	}
3799 
3800 	pci_disable_sriov(oct->pci_dev);
3801 
3802 	u = 0;
3803 	while (u < MAX_POSSIBLE_VFS) {
3804 		oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3805 		u += oct->sriov_info.rings_per_vf;
3806 	}
3807 
3808 	oct->sriov_info.num_vfs_alloced = 0;
3809 	dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3810 		 oct->pf_num);
3811 
3812 	return 0;
3813 }
3814 
3815 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3816 {
3817 	struct octeon_device *oct = pci_get_drvdata(dev);
3818 	int ret = 0;
3819 
3820 	if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3821 	    (oct->sriov_info.sriov_enabled)) {
3822 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3823 			 oct->pf_num, num_vfs);
3824 		return 0;
3825 	}
3826 
3827 	if (!num_vfs) {
3828 		lio_vf_rep_destroy(oct);
3829 		ret = lio_pci_sriov_disable(oct);
3830 	} else if (num_vfs > oct->sriov_info.max_vfs) {
3831 		dev_err(&oct->pci_dev->dev,
3832 			"OCTEON: Max allowed VFs:%d user requested:%d",
3833 			oct->sriov_info.max_vfs, num_vfs);
3834 		ret = -EPERM;
3835 	} else {
3836 		oct->sriov_info.num_vfs_alloced = num_vfs;
3837 		ret = octeon_enable_sriov(oct);
3838 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3839 			 oct->pf_num, num_vfs);
3840 		ret = lio_vf_rep_create(oct);
3841 		if (ret)
3842 			dev_info(&oct->pci_dev->dev,
3843 				 "vf representor create failed");
3844 	}
3845 
3846 	return ret;
3847 }
3848 #endif
3849 
3850 /**
3851  * \brief initialize the NIC
3852  * @param oct octeon device
3853  *
3854  * This initialization routine is called once the Octeon device application is
3855  * up and running
3856  */
3857 static int liquidio_init_nic_module(struct octeon_device *oct)
3858 {
3859 	int i, retval = 0;
3860 	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3861 
3862 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3863 
3864 	/* only default iq and oq were initialized
3865 	 * initialize the rest as well
3866 	 */
3867 	/* run port_config command for each port */
3868 	oct->ifcount = num_nic_ports;
3869 
3870 	memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3871 
3872 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3873 		oct->props[i].gmxport = -1;
3874 
3875 	retval = setup_nic_devices(oct);
3876 	if (retval) {
3877 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3878 		goto octnet_init_failure;
3879 	}
3880 
3881 	/* Call vf_rep_modinit if the firmware is switchdev capable
3882 	 * and do it from the first liquidio function probed.
3883 	 */
3884 	if (!oct->octeon_id &&
3885 	    oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3886 		retval = lio_vf_rep_modinit();
3887 		if (retval) {
3888 			liquidio_stop_nic_module(oct);
3889 			goto octnet_init_failure;
3890 		}
3891 	}
3892 
3893 	liquidio_ptp_init(oct);
3894 
3895 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3896 
3897 	return retval;
3898 
3899 octnet_init_failure:
3900 
3901 	oct->ifcount = 0;
3902 
3903 	return retval;
3904 }
3905 
3906 /**
3907  * \brief starter callback that invokes the remaining initialization work after
3908  * the NIC is up and running.
3909  * @param octptr  work struct work_struct
3910  */
3911 static void nic_starter(struct work_struct *work)
3912 {
3913 	struct octeon_device *oct;
3914 	struct cavium_wk *wk = (struct cavium_wk *)work;
3915 
3916 	oct = (struct octeon_device *)wk->ctxptr;
3917 
3918 	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3919 		return;
3920 
3921 	/* If the status of the device is CORE_OK, the core
3922 	 * application has reported its application type. Call
3923 	 * any registered handlers now and move to the RUNNING
3924 	 * state.
3925 	 */
3926 	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3927 		schedule_delayed_work(&oct->nic_poll_work.work,
3928 				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3929 		return;
3930 	}
3931 
3932 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3933 
3934 	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3935 		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3936 
3937 		if (liquidio_init_nic_module(oct))
3938 			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3939 		else
3940 			handshake[oct->octeon_id].started_ok = 1;
3941 	} else {
3942 		dev_err(&oct->pci_dev->dev,
3943 			"Unexpected application running on NIC (%d). Check firmware.\n",
3944 			oct->app_mode);
3945 	}
3946 
3947 	complete(&handshake[oct->octeon_id].started);
3948 }
3949 
3950 static int
3951 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3952 {
3953 	struct octeon_device *oct = (struct octeon_device *)buf;
3954 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3955 	int i, notice, vf_idx;
3956 	bool cores_crashed;
3957 	u64 *data, vf_num;
3958 
3959 	notice = recv_pkt->rh.r.ossp;
3960 	data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3961 
3962 	/* the first 64-bit word of data is the vf_num */
3963 	vf_num = data[0];
3964 	octeon_swap_8B_data(&vf_num, 1);
3965 	vf_idx = (int)vf_num - 1;
3966 
3967 	cores_crashed = READ_ONCE(oct->cores_crashed);
3968 
3969 	if (notice == VF_DRV_LOADED) {
3970 		if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
3971 			oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
3972 			dev_info(&oct->pci_dev->dev,
3973 				 "driver for VF%d was loaded\n", vf_idx);
3974 			if (!cores_crashed)
3975 				try_module_get(THIS_MODULE);
3976 		}
3977 	} else if (notice == VF_DRV_REMOVED) {
3978 		if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
3979 			oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
3980 			dev_info(&oct->pci_dev->dev,
3981 				 "driver for VF%d was removed\n", vf_idx);
3982 			if (!cores_crashed)
3983 				module_put(THIS_MODULE);
3984 		}
3985 	} else if (notice == VF_DRV_MACADDR_CHANGED) {
3986 		u8 *b = (u8 *)&data[1];
3987 
3988 		oct->sriov_info.vf_macaddr[vf_idx] = data[1];
3989 		dev_info(&oct->pci_dev->dev,
3990 			 "VF driver changed VF%d's MAC address to %pM\n",
3991 			 vf_idx, b + 2);
3992 	}
3993 
3994 	for (i = 0; i < recv_pkt->buffer_count; i++)
3995 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3996 	octeon_free_recv_info(recv_info);
3997 
3998 	return 0;
3999 }
4000 
4001 /**
4002  * \brief Device initialization for each Octeon device that is probed
4003  * @param octeon_dev  octeon device
4004  */
4005 static int octeon_device_init(struct octeon_device *octeon_dev)
4006 {
4007 	int j, ret;
4008 	char bootcmd[] = "\n";
4009 	char *dbg_enb = NULL;
4010 	enum lio_fw_state fw_state;
4011 	struct octeon_device_priv *oct_priv =
4012 		(struct octeon_device_priv *)octeon_dev->priv;
4013 	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4014 
4015 	/* Enable access to the octeon device and make its DMA capability
4016 	 * known to the OS.
4017 	 */
4018 	if (octeon_pci_os_setup(octeon_dev))
4019 		return 1;
4020 
4021 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4022 
4023 	/* Identify the Octeon type and map the BAR address space. */
4024 	if (octeon_chip_specific_setup(octeon_dev)) {
4025 		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4026 		return 1;
4027 	}
4028 
4029 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4030 
4031 	/* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4032 	 * since that is what is required for the reference to be removed
4033 	 * during de-initialization (see 'octeon_destroy_resources').
4034 	 */
4035 	octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4036 			       PCI_SLOT(octeon_dev->pci_dev->devfn),
4037 			       PCI_FUNC(octeon_dev->pci_dev->devfn),
4038 			       true);
4039 
4040 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4041 
4042 	/* CN23XX supports preloaded firmware if the following is true:
4043 	 *
4044 	 * The adapter indicates that firmware is currently running AND
4045 	 * 'fw_type' is 'auto'.
4046 	 *
4047 	 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4048 	 */
4049 	if (OCTEON_CN23XX_PF(octeon_dev) &&
4050 	    cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4051 		atomic_cmpxchg(octeon_dev->adapter_fw_state,
4052 			       FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4053 	}
4054 
4055 	/* If loading firmware, only first device of adapter needs to do so. */
4056 	fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4057 				  FW_NEEDS_TO_BE_LOADED,
4058 				  FW_IS_BEING_LOADED);
4059 
4060 	/* Here, [local variable] 'fw_state' is set to one of:
4061 	 *
4062 	 *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4063 	 *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4064 	 *                          firmware to the adapter.
4065 	 *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4066 	 *                          firmware to the adapter.
4067 	 */
4068 
4069 	/* Prior to f/w load, perform a soft reset of the Octeon device;
4070 	 * if error resetting, return w/error.
4071 	 */
4072 	if (fw_state == FW_NEEDS_TO_BE_LOADED)
4073 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
4074 			return 1;
4075 
4076 	/* Initialize the dispatch mechanism used to push packets arriving on
4077 	 * Octeon Output queues.
4078 	 */
4079 	if (octeon_init_dispatch_list(octeon_dev))
4080 		return 1;
4081 
4082 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4083 				    OPCODE_NIC_CORE_DRV_ACTIVE,
4084 				    octeon_core_drv_init,
4085 				    octeon_dev);
4086 
4087 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4088 				    OPCODE_NIC_VF_DRV_NOTICE,
4089 				    octeon_recv_vf_drv_notice, octeon_dev);
4090 	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4091 	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4092 	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4093 			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4094 
4095 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4096 
4097 	if (octeon_set_io_queues_off(octeon_dev)) {
4098 		dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4099 		return 1;
4100 	}
4101 
4102 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4103 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4104 		if (ret) {
4105 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4106 			return ret;
4107 		}
4108 	}
4109 
4110 	/* Initialize soft command buffer pool
4111 	 */
4112 	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4113 		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4114 		return 1;
4115 	}
4116 	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4117 
4118 	/*  Setup the data structures that manage this Octeon's Input queues. */
4119 	if (octeon_setup_instr_queues(octeon_dev)) {
4120 		dev_err(&octeon_dev->pci_dev->dev,
4121 			"instruction queue initialization failed\n");
4122 		return 1;
4123 	}
4124 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4125 
4126 	/* Initialize lists to manage the requests of different types that
4127 	 * arrive from user & kernel applications for this octeon device.
4128 	 */
4129 	if (octeon_setup_response_list(octeon_dev)) {
4130 		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4131 		return 1;
4132 	}
4133 	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4134 
4135 	if (octeon_setup_output_queues(octeon_dev)) {
4136 		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4137 		return 1;
4138 	}
4139 
4140 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4141 
4142 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4143 		if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4144 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4145 			return 1;
4146 		}
4147 		atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4148 
4149 		if (octeon_allocate_ioq_vector
4150 				(octeon_dev,
4151 				 octeon_dev->sriov_info.num_pf_rings)) {
4152 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4153 			return 1;
4154 		}
4155 		atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4156 
4157 	} else {
4158 		/* The input and output queue registers were setup earlier (the
4159 		 * queues were not enabled). Any additional registers
4160 		 * that need to be programmed should be done now.
4161 		 */
4162 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4163 		if (ret) {
4164 			dev_err(&octeon_dev->pci_dev->dev,
4165 				"Failed to configure device registers\n");
4166 			return ret;
4167 		}
4168 	}
4169 
4170 	/* Initialize the tasklet that handles output queue packet processing.*/
4171 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4172 	tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4173 		     (unsigned long)octeon_dev);
4174 
4175 	/* Setup the interrupt handler and record the INT SUM register address
4176 	 */
4177 	if (octeon_setup_interrupt(octeon_dev,
4178 				   octeon_dev->sriov_info.num_pf_rings))
4179 		return 1;
4180 
4181 	/* Enable Octeon device interrupts */
4182 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4183 
4184 	atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4185 
4186 	/* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4187 	 * the output queue is enabled.
4188 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4189 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4190 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4191 	 * before any credits have been issued, causing the ring to be reset
4192 	 * (and the f/w appear to never have started).
4193 	 */
4194 	for (j = 0; j < octeon_dev->num_oqs; j++)
4195 		writel(octeon_dev->droq[j]->max_count,
4196 		       octeon_dev->droq[j]->pkts_credit_reg);
4197 
4198 	/* Enable the input and output queues for this Octeon device */
4199 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4200 	if (ret) {
4201 		dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4202 		return ret;
4203 	}
4204 
4205 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4206 
4207 	if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4208 		dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4209 		if (!ddr_timeout) {
4210 			dev_info(&octeon_dev->pci_dev->dev,
4211 				 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4212 		}
4213 
4214 		schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4215 
4216 		/* Wait for the octeon to initialize DDR after the soft-reset.*/
4217 		while (!ddr_timeout) {
4218 			set_current_state(TASK_INTERRUPTIBLE);
4219 			if (schedule_timeout(HZ / 10)) {
4220 				/* user probably pressed Control-C */
4221 				return 1;
4222 			}
4223 		}
4224 		ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4225 		if (ret) {
4226 			dev_err(&octeon_dev->pci_dev->dev,
4227 				"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4228 				ret);
4229 			return 1;
4230 		}
4231 
4232 		if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4233 			dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4234 			return 1;
4235 		}
4236 
4237 		/* Divert uboot to take commands from host instead. */
4238 		ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4239 
4240 		dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4241 		ret = octeon_init_consoles(octeon_dev);
4242 		if (ret) {
4243 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4244 			return 1;
4245 		}
4246 		/* If console debug enabled, specify empty string to use default
4247 		 * enablement ELSE specify NULL string for 'disabled'.
4248 		 */
4249 		dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4250 		ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4251 		if (ret) {
4252 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4253 			return 1;
4254 		} else if (octeon_console_debug_enabled(0)) {
4255 			/* If console was added AND we're logging console output
4256 			 * then set our console print function.
4257 			 */
4258 			octeon_dev->console[0].print = octeon_dbg_console_print;
4259 		}
4260 
4261 		atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4262 
4263 		dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4264 		ret = load_firmware(octeon_dev);
4265 		if (ret) {
4266 			dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4267 			return 1;
4268 		}
4269 
4270 		atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4271 	}
4272 
4273 	handshake[octeon_dev->octeon_id].init_ok = 1;
4274 	complete(&handshake[octeon_dev->octeon_id].init);
4275 
4276 	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4277 
4278 	return 0;
4279 }
4280 
4281 /**
4282  * \brief Debug console print function
4283  * @param octeon_dev  octeon device
4284  * @param console_num console number
4285  * @param prefix      first portion of line to display
4286  * @param suffix      second portion of line to display
4287  *
4288  * The OCTEON debug console outputs entire lines (excluding '\n').
4289  * Normally, the line will be passed in the 'prefix' parameter.
4290  * However, due to buffering, it is possible for a line to be split into two
4291  * parts, in which case they will be passed as the 'prefix' parameter and
4292  * 'suffix' parameter.
4293  */
4294 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4295 				    char *prefix, char *suffix)
4296 {
4297 	if (prefix && suffix)
4298 		dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4299 			 suffix);
4300 	else if (prefix)
4301 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4302 	else if (suffix)
4303 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4304 
4305 	return 0;
4306 }
4307 
4308 /**
4309  * \brief Exits the module
4310  */
4311 static void __exit liquidio_exit(void)
4312 {
4313 	liquidio_deinit_pci();
4314 
4315 	pr_info("LiquidIO network module is now unloaded\n");
4316 }
4317 
4318 module_init(liquidio_init);
4319 module_exit(liquidio_exit);
4320