1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
38 
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55 
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
57 
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
61 
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
65 
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69 		 "Bitmask indicating which consoles have debug output redirected to syslog.");
70 
71 /**
72  * octeon_console_debug_enabled - determines if a given console has debug enabled.
73  * @console: console to check
74  * Return:  1 = enabled. 0 otherwise
75  */
76 static int octeon_console_debug_enabled(u32 console)
77 {
78 	return (console_bitmask >> (console)) & 0x1;
79 }
80 
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
83 
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
86 /* update localtime to octeon firmware every 60 seconds.
87  * make firmware to use same time reference, so that it will be easy to
88  * correlate firmware logged events/errors with host events, for debugging.
89  */
90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
91 
92 /* time to wait for possible in-flight requests in milliseconds */
93 #define WAIT_INFLIGHT_REQUEST	msecs_to_jiffies(1000)
94 
95 struct lio_trusted_vf_ctx {
96 	struct completion complete;
97 	int status;
98 };
99 
100 struct oct_link_status_resp {
101 	u64 rh;
102 	struct oct_link_info link_info;
103 	u64 status;
104 };
105 
106 struct oct_timestamp_resp {
107 	u64 rh;
108 	u64 timestamp;
109 	u64 status;
110 };
111 
112 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
113 
114 union tx_info {
115 	u64 u64;
116 	struct {
117 #ifdef __BIG_ENDIAN_BITFIELD
118 		u16 gso_size;
119 		u16 gso_segs;
120 		u32 reserved;
121 #else
122 		u32 reserved;
123 		u16 gso_segs;
124 		u16 gso_size;
125 #endif
126 	} s;
127 };
128 
129 /* Octeon device properties to be used by the NIC module.
130  * Each octeon device in the system will be represented
131  * by this structure in the NIC module.
132  */
133 
134 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
135 #define OCTNIC_GSO_MAX_SIZE                                                    \
136 	(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
137 
138 struct handshake {
139 	struct completion init;
140 	struct completion started;
141 	struct pci_dev *pci_dev;
142 	int init_ok;
143 	int started_ok;
144 };
145 
146 #ifdef CONFIG_PCI_IOV
147 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
148 #endif
149 
150 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
151 				    char *prefix, char *suffix);
152 
153 static int octeon_device_init(struct octeon_device *);
154 static int liquidio_stop(struct net_device *netdev);
155 static void liquidio_remove(struct pci_dev *pdev);
156 static int liquidio_probe(struct pci_dev *pdev,
157 			  const struct pci_device_id *ent);
158 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
159 				      int linkstate);
160 
161 static struct handshake handshake[MAX_OCTEON_DEVICES];
162 static struct completion first_stage;
163 
164 static void octeon_droq_bh(struct tasklet_struct *t)
165 {
166 	int q_no;
167 	int reschedule = 0;
168 	struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
169 							  droq_tasklet);
170 	struct octeon_device *oct = oct_priv->dev;
171 
172 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
173 		if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
174 			continue;
175 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
176 							  MAX_PACKET_BUDGET);
177 		lio_enable_irq(oct->droq[q_no], NULL);
178 
179 		if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
180 			/* set time and cnt interrupt thresholds for this DROQ
181 			 * for NAPI
182 			 */
183 			int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
184 
185 			octeon_write_csr64(
186 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
187 			    0x5700000040ULL);
188 			octeon_write_csr64(
189 			    oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
190 		}
191 	}
192 
193 	if (reschedule)
194 		tasklet_schedule(&oct_priv->droq_tasklet);
195 }
196 
197 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
198 {
199 	struct octeon_device_priv *oct_priv =
200 		(struct octeon_device_priv *)oct->priv;
201 	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
202 	int i;
203 
204 	do {
205 		pending_pkts = 0;
206 
207 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
208 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
209 				continue;
210 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
211 		}
212 		if (pkt_cnt > 0) {
213 			pending_pkts += pkt_cnt;
214 			tasklet_schedule(&oct_priv->droq_tasklet);
215 		}
216 		pkt_cnt = 0;
217 		schedule_timeout_uninterruptible(1);
218 
219 	} while (retry-- && pending_pkts);
220 
221 	return pkt_cnt;
222 }
223 
224 /**
225  * force_io_queues_off - Forces all IO queues off on a given device
226  * @oct: Pointer to Octeon device
227  */
228 static void force_io_queues_off(struct octeon_device *oct)
229 {
230 	if ((oct->chip_id == OCTEON_CN66XX) ||
231 	    (oct->chip_id == OCTEON_CN68XX)) {
232 		/* Reset the Enable bits for Input Queues. */
233 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
234 
235 		/* Reset the Enable bits for Output Queues. */
236 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
237 	}
238 }
239 
240 /**
241  * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
242  * @oct: Pointer to Octeon device
243  */
244 static inline void pcierror_quiesce_device(struct octeon_device *oct)
245 {
246 	int i;
247 
248 	/* Disable the input and output queues now. No more packets will
249 	 * arrive from Octeon, but we should wait for all packet processing
250 	 * to finish.
251 	 */
252 	force_io_queues_off(oct);
253 
254 	/* To allow for in-flight requests */
255 	schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
256 
257 	if (wait_for_pending_requests(oct))
258 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
259 
260 	/* Force all requests waiting to be fetched by OCTEON to complete. */
261 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
262 		struct octeon_instr_queue *iq;
263 
264 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
265 			continue;
266 		iq = oct->instr_queue[i];
267 
268 		if (atomic_read(&iq->instr_pending)) {
269 			spin_lock_bh(&iq->lock);
270 			iq->fill_cnt = 0;
271 			iq->octeon_read_index = iq->host_write_index;
272 			iq->stats.instr_processed +=
273 				atomic_read(&iq->instr_pending);
274 			lio_process_iq_request_list(oct, iq, 0);
275 			spin_unlock_bh(&iq->lock);
276 		}
277 	}
278 
279 	/* Force all pending ordered list requests to time out. */
280 	lio_process_ordered_list(oct, 1);
281 
282 	/* We do not need to wait for output queue packets to be processed. */
283 }
284 
285 /**
286  * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
287  * @dev: Pointer to PCI device
288  */
289 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
290 {
291 	int pos = 0x100;
292 	u32 status, mask;
293 
294 	pr_info("%s :\n", __func__);
295 
296 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
297 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
298 	if (dev->error_state == pci_channel_io_normal)
299 		status &= ~mask;        /* Clear corresponding nonfatal bits */
300 	else
301 		status &= mask;         /* Clear corresponding fatal bits */
302 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
303 }
304 
305 /**
306  * stop_pci_io - Stop all PCI IO to a given device
307  * @oct: Pointer to Octeon device
308  */
309 static void stop_pci_io(struct octeon_device *oct)
310 {
311 	/* No more instructions will be forwarded. */
312 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
313 
314 	pci_disable_device(oct->pci_dev);
315 
316 	/* Disable interrupts  */
317 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
318 
319 	pcierror_quiesce_device(oct);
320 
321 	/* Release the interrupt line */
322 	free_irq(oct->pci_dev->irq, oct);
323 
324 	if (oct->flags & LIO_FLAG_MSI_ENABLED)
325 		pci_disable_msi(oct->pci_dev);
326 
327 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
328 		lio_get_state_string(&oct->status));
329 
330 	/* making it a common function for all OCTEON models */
331 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
332 }
333 
334 /**
335  * liquidio_pcie_error_detected - called when PCI error is detected
336  * @pdev: Pointer to PCI device
337  * @state: The current pci connection state
338  *
339  * This function is called after a PCI bus error affecting
340  * this device has been detected.
341  */
342 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
343 						     pci_channel_state_t state)
344 {
345 	struct octeon_device *oct = pci_get_drvdata(pdev);
346 
347 	/* Non-correctable Non-fatal errors */
348 	if (state == pci_channel_io_normal) {
349 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
350 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
351 		return PCI_ERS_RESULT_CAN_RECOVER;
352 	}
353 
354 	/* Non-correctable Fatal errors */
355 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
356 	stop_pci_io(oct);
357 
358 	/* Always return a DISCONNECT. There is no support for recovery but only
359 	 * for a clean shutdown.
360 	 */
361 	return PCI_ERS_RESULT_DISCONNECT;
362 }
363 
364 /**
365  * liquidio_pcie_mmio_enabled - mmio handler
366  * @pdev: Pointer to PCI device
367  */
368 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
369 {
370 	/* We should never hit this since we never ask for a reset for a Fatal
371 	 * Error. We always return DISCONNECT in io_error above.
372 	 * But play safe and return RECOVERED for now.
373 	 */
374 	return PCI_ERS_RESULT_RECOVERED;
375 }
376 
377 /**
378  * liquidio_pcie_slot_reset - called after the pci bus has been reset.
379  * @pdev: Pointer to PCI device
380  *
381  * Restart the card from scratch, as if from a cold-boot. Implementation
382  * resembles the first-half of the octeon_resume routine.
383  */
384 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
385 {
386 	/* We should never hit this since we never ask for a reset for a Fatal
387 	 * Error. We always return DISCONNECT in io_error above.
388 	 * But play safe and return RECOVERED for now.
389 	 */
390 	return PCI_ERS_RESULT_RECOVERED;
391 }
392 
393 /**
394  * liquidio_pcie_resume - called when traffic can start flowing again.
395  * @pdev: Pointer to PCI device
396  *
397  * This callback is called when the error recovery driver tells us that
398  * its OK to resume normal operation. Implementation resembles the
399  * second-half of the octeon_resume routine.
400  */
401 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
402 {
403 	/* Nothing to be done here. */
404 }
405 
406 #define liquidio_suspend NULL
407 #define liquidio_resume NULL
408 
409 /* For PCI-E Advanced Error Recovery (AER) Interface */
410 static const struct pci_error_handlers liquidio_err_handler = {
411 	.error_detected = liquidio_pcie_error_detected,
412 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
413 	.slot_reset	= liquidio_pcie_slot_reset,
414 	.resume		= liquidio_pcie_resume,
415 };
416 
417 static const struct pci_device_id liquidio_pci_tbl[] = {
418 	{       /* 68xx */
419 		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
420 	},
421 	{       /* 66xx */
422 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
423 	},
424 	{       /* 23xx pf */
425 		PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
426 	},
427 	{
428 		0, 0, 0, 0, 0, 0, 0
429 	}
430 };
431 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
432 
433 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
434 
435 static struct pci_driver liquidio_pci_driver = {
436 	.name		= "LiquidIO",
437 	.id_table	= liquidio_pci_tbl,
438 	.probe		= liquidio_probe,
439 	.remove		= liquidio_remove,
440 	.err_handler	= &liquidio_err_handler,    /* For AER */
441 	.driver.pm	= &liquidio_pm_ops,
442 #ifdef CONFIG_PCI_IOV
443 	.sriov_configure = liquidio_enable_sriov,
444 #endif
445 };
446 
447 /**
448  * liquidio_init_pci - register PCI driver
449  */
450 static int liquidio_init_pci(void)
451 {
452 	return pci_register_driver(&liquidio_pci_driver);
453 }
454 
455 /**
456  * liquidio_deinit_pci - unregister PCI driver
457  */
458 static void liquidio_deinit_pci(void)
459 {
460 	pci_unregister_driver(&liquidio_pci_driver);
461 }
462 
463 /**
464  * check_txq_status - Check Tx queue status, and take appropriate action
465  * @lio: per-network private data
466  * Return: 0 if full, number of queues woken up otherwise
467  */
468 static inline int check_txq_status(struct lio *lio)
469 {
470 	int numqs = lio->netdev->real_num_tx_queues;
471 	int ret_val = 0;
472 	int q, iq;
473 
474 	/* check each sub-queue state */
475 	for (q = 0; q < numqs; q++) {
476 		iq = lio->linfo.txpciq[q %
477 			lio->oct_dev->num_iqs].s.q_no;
478 		if (octnet_iq_is_full(lio->oct_dev, iq))
479 			continue;
480 		if (__netif_subqueue_stopped(lio->netdev, q)) {
481 			netif_wake_subqueue(lio->netdev, q);
482 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
483 						  tx_restart, 1);
484 			ret_val++;
485 		}
486 	}
487 
488 	return ret_val;
489 }
490 
491 /**
492  * print_link_info -  Print link information
493  * @netdev: network device
494  */
495 static void print_link_info(struct net_device *netdev)
496 {
497 	struct lio *lio = GET_LIO(netdev);
498 
499 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
500 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
501 		struct oct_link_info *linfo = &lio->linfo;
502 
503 		if (linfo->link.s.link_up) {
504 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
505 				   linfo->link.s.speed,
506 				   (linfo->link.s.duplex) ? "Full" : "Half");
507 		} else {
508 			netif_info(lio, link, lio->netdev, "Link Down\n");
509 		}
510 	}
511 }
512 
513 /**
514  * octnet_link_status_change - Routine to notify MTU change
515  * @work: work_struct data structure
516  */
517 static void octnet_link_status_change(struct work_struct *work)
518 {
519 	struct cavium_wk *wk = (struct cavium_wk *)work;
520 	struct lio *lio = (struct lio *)wk->ctxptr;
521 
522 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
523 	 * this API is invoked only when new max-MTU of the interface is
524 	 * less than current MTU.
525 	 */
526 	rtnl_lock();
527 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
528 	rtnl_unlock();
529 }
530 
531 /**
532  * setup_link_status_change_wq - Sets up the mtu status change work
533  * @netdev: network device
534  */
535 static inline int setup_link_status_change_wq(struct net_device *netdev)
536 {
537 	struct lio *lio = GET_LIO(netdev);
538 	struct octeon_device *oct = lio->oct_dev;
539 
540 	lio->link_status_wq.wq = alloc_workqueue("link-status",
541 						 WQ_MEM_RECLAIM, 0);
542 	if (!lio->link_status_wq.wq) {
543 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
544 		return -1;
545 	}
546 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
547 			  octnet_link_status_change);
548 	lio->link_status_wq.wk.ctxptr = lio;
549 
550 	return 0;
551 }
552 
553 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
554 {
555 	struct lio *lio = GET_LIO(netdev);
556 
557 	if (lio->link_status_wq.wq) {
558 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
559 		destroy_workqueue(lio->link_status_wq.wq);
560 	}
561 }
562 
563 /**
564  * update_link_status - Update link status
565  * @netdev: network device
566  * @ls: link status structure
567  *
568  * Called on receipt of a link status response from the core application to
569  * update each interface's link status.
570  */
571 static inline void update_link_status(struct net_device *netdev,
572 				      union oct_link_status *ls)
573 {
574 	struct lio *lio = GET_LIO(netdev);
575 	int changed = (lio->linfo.link.u64 != ls->u64);
576 	int current_max_mtu = lio->linfo.link.s.mtu;
577 	struct octeon_device *oct = lio->oct_dev;
578 
579 	dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
580 		__func__, lio->linfo.link.u64, ls->u64);
581 	lio->linfo.link.u64 = ls->u64;
582 
583 	if ((lio->intf_open) && (changed)) {
584 		print_link_info(netdev);
585 		lio->link_changes++;
586 
587 		if (lio->linfo.link.s.link_up) {
588 			dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
589 			netif_carrier_on(netdev);
590 			wake_txqs(netdev);
591 		} else {
592 			dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
593 			netif_carrier_off(netdev);
594 			stop_txqs(netdev);
595 		}
596 		if (lio->linfo.link.s.mtu != current_max_mtu) {
597 			netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
598 				   current_max_mtu, lio->linfo.link.s.mtu);
599 			netdev->max_mtu = lio->linfo.link.s.mtu;
600 		}
601 		if (lio->linfo.link.s.mtu < netdev->mtu) {
602 			dev_warn(&oct->pci_dev->dev,
603 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
604 				     netdev->mtu, lio->linfo.link.s.mtu);
605 			queue_delayed_work(lio->link_status_wq.wq,
606 					   &lio->link_status_wq.wk.work, 0);
607 		}
608 	}
609 }
610 
611 /**
612  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
613  * firmware will correct it's time, in case there is a time skew
614  *
615  * @work: work scheduled to send time update to octeon firmware
616  **/
617 static void lio_sync_octeon_time(struct work_struct *work)
618 {
619 	struct cavium_wk *wk = (struct cavium_wk *)work;
620 	struct lio *lio = (struct lio *)wk->ctxptr;
621 	struct octeon_device *oct = lio->oct_dev;
622 	struct octeon_soft_command *sc;
623 	struct timespec64 ts;
624 	struct lio_time *lt;
625 	int ret;
626 
627 	sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
628 	if (!sc) {
629 		dev_err(&oct->pci_dev->dev,
630 			"Failed to sync time to octeon: soft command allocation failed\n");
631 		return;
632 	}
633 
634 	lt = (struct lio_time *)sc->virtdptr;
635 
636 	/* Get time of the day */
637 	ktime_get_real_ts64(&ts);
638 	lt->sec = ts.tv_sec;
639 	lt->nsec = ts.tv_nsec;
640 	octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
641 
642 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
643 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
644 				    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
645 
646 	init_completion(&sc->complete);
647 	sc->sc_status = OCTEON_REQUEST_PENDING;
648 
649 	ret = octeon_send_soft_command(oct, sc);
650 	if (ret == IQ_SEND_FAILED) {
651 		dev_err(&oct->pci_dev->dev,
652 			"Failed to sync time to octeon: failed to send soft command\n");
653 		octeon_free_soft_command(oct, sc);
654 	} else {
655 		WRITE_ONCE(sc->caller_is_done, true);
656 	}
657 
658 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
659 			   &lio->sync_octeon_time_wq.wk.work,
660 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
661 }
662 
663 /**
664  * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
665  *
666  * @netdev: network device which should send time update to firmware
667  **/
668 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
669 {
670 	struct lio *lio = GET_LIO(netdev);
671 	struct octeon_device *oct = lio->oct_dev;
672 
673 	lio->sync_octeon_time_wq.wq =
674 		alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
675 	if (!lio->sync_octeon_time_wq.wq) {
676 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
677 		return -1;
678 	}
679 	INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
680 			  lio_sync_octeon_time);
681 	lio->sync_octeon_time_wq.wk.ctxptr = lio;
682 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
683 			   &lio->sync_octeon_time_wq.wk.work,
684 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
685 
686 	return 0;
687 }
688 
689 /**
690  * cleanup_sync_octeon_time_wq - destroy wq
691  *
692  * @netdev: network device which should send time update to firmware
693  *
694  * Stop scheduling and destroy the work created to periodically update local
695  * time to octeon firmware.
696  **/
697 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
698 {
699 	struct lio *lio = GET_LIO(netdev);
700 	struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
701 
702 	if (time_wq->wq) {
703 		cancel_delayed_work_sync(&time_wq->wk.work);
704 		destroy_workqueue(time_wq->wq);
705 	}
706 }
707 
708 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
709 {
710 	struct octeon_device *other_oct;
711 
712 	other_oct = lio_get_device(oct->octeon_id + 1);
713 
714 	if (other_oct && other_oct->pci_dev) {
715 		int oct_busnum, other_oct_busnum;
716 
717 		oct_busnum = oct->pci_dev->bus->number;
718 		other_oct_busnum = other_oct->pci_dev->bus->number;
719 
720 		if (oct_busnum == other_oct_busnum) {
721 			int oct_slot, other_oct_slot;
722 
723 			oct_slot = PCI_SLOT(oct->pci_dev->devfn);
724 			other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
725 
726 			if (oct_slot == other_oct_slot)
727 				return other_oct;
728 		}
729 	}
730 
731 	return NULL;
732 }
733 
734 static void disable_all_vf_links(struct octeon_device *oct)
735 {
736 	struct net_device *netdev;
737 	int max_vfs, vf, i;
738 
739 	if (!oct)
740 		return;
741 
742 	max_vfs = oct->sriov_info.max_vfs;
743 
744 	for (i = 0; i < oct->ifcount; i++) {
745 		netdev = oct->props[i].netdev;
746 		if (!netdev)
747 			continue;
748 
749 		for (vf = 0; vf < max_vfs; vf++)
750 			liquidio_set_vf_link_state(netdev, vf,
751 						   IFLA_VF_LINK_STATE_DISABLE);
752 	}
753 }
754 
755 static int liquidio_watchdog(void *param)
756 {
757 	bool err_msg_was_printed[LIO_MAX_CORES];
758 	u16 mask_of_crashed_or_stuck_cores = 0;
759 	bool all_vf_links_are_disabled = false;
760 	struct octeon_device *oct = param;
761 	struct octeon_device *other_oct;
762 #ifdef CONFIG_MODULE_UNLOAD
763 	long refcount, vfs_referencing_pf;
764 	u64 vfs_mask1, vfs_mask2;
765 #endif
766 	int core;
767 
768 	memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
769 
770 	while (!kthread_should_stop()) {
771 		/* sleep for a couple of seconds so that we don't hog the CPU */
772 		set_current_state(TASK_INTERRUPTIBLE);
773 		schedule_timeout(msecs_to_jiffies(2000));
774 
775 		mask_of_crashed_or_stuck_cores =
776 		    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
777 
778 		if (!mask_of_crashed_or_stuck_cores)
779 			continue;
780 
781 		WRITE_ONCE(oct->cores_crashed, true);
782 		other_oct = get_other_octeon_device(oct);
783 		if (other_oct)
784 			WRITE_ONCE(other_oct->cores_crashed, true);
785 
786 		for (core = 0; core < LIO_MAX_CORES; core++) {
787 			bool core_crashed_or_got_stuck;
788 
789 			core_crashed_or_got_stuck =
790 						(mask_of_crashed_or_stuck_cores
791 						 >> core) & 1;
792 
793 			if (core_crashed_or_got_stuck &&
794 			    !err_msg_was_printed[core]) {
795 				dev_err(&oct->pci_dev->dev,
796 					"ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
797 					core);
798 				err_msg_was_printed[core] = true;
799 			}
800 		}
801 
802 		if (all_vf_links_are_disabled)
803 			continue;
804 
805 		disable_all_vf_links(oct);
806 		disable_all_vf_links(other_oct);
807 		all_vf_links_are_disabled = true;
808 
809 #ifdef CONFIG_MODULE_UNLOAD
810 		vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
811 		vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
812 
813 		vfs_referencing_pf  = hweight64(vfs_mask1);
814 		vfs_referencing_pf += hweight64(vfs_mask2);
815 
816 		refcount = module_refcount(THIS_MODULE);
817 		if (refcount >= vfs_referencing_pf) {
818 			while (vfs_referencing_pf) {
819 				module_put(THIS_MODULE);
820 				vfs_referencing_pf--;
821 			}
822 		}
823 #endif
824 	}
825 
826 	return 0;
827 }
828 
829 /**
830  * liquidio_probe - PCI probe handler
831  * @pdev: PCI device structure
832  * @ent: unused
833  */
834 static int
835 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
836 {
837 	struct octeon_device *oct_dev = NULL;
838 	struct handshake *hs;
839 
840 	oct_dev = octeon_allocate_device(pdev->device,
841 					 sizeof(struct octeon_device_priv));
842 	if (!oct_dev) {
843 		dev_err(&pdev->dev, "Unable to allocate device\n");
844 		return -ENOMEM;
845 	}
846 
847 	if (pdev->device == OCTEON_CN23XX_PF_VID)
848 		oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
849 
850 	/* Enable PTP for 6XXX Device */
851 	if (((pdev->device == OCTEON_CN66XX) ||
852 	     (pdev->device == OCTEON_CN68XX)))
853 		oct_dev->ptp_enable = true;
854 	else
855 		oct_dev->ptp_enable = false;
856 
857 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
858 		 (u32)pdev->vendor, (u32)pdev->device);
859 
860 	/* Assign octeon_device for this device to the private data area. */
861 	pci_set_drvdata(pdev, oct_dev);
862 
863 	/* set linux specific device pointer */
864 	oct_dev->pci_dev = (void *)pdev;
865 
866 	oct_dev->subsystem_id = pdev->subsystem_vendor |
867 		(pdev->subsystem_device << 16);
868 
869 	hs = &handshake[oct_dev->octeon_id];
870 	init_completion(&hs->init);
871 	init_completion(&hs->started);
872 	hs->pci_dev = pdev;
873 
874 	if (oct_dev->octeon_id == 0)
875 		/* first LiquidIO NIC is detected */
876 		complete(&first_stage);
877 
878 	if (octeon_device_init(oct_dev)) {
879 		complete(&hs->init);
880 		liquidio_remove(pdev);
881 		return -ENOMEM;
882 	}
883 
884 	if (OCTEON_CN23XX_PF(oct_dev)) {
885 		u8 bus, device, function;
886 
887 		if (atomic_read(oct_dev->adapter_refcount) == 1) {
888 			/* Each NIC gets one watchdog kernel thread.  The first
889 			 * PF (of each NIC) that gets pci_driver->probe()'d
890 			 * creates that thread.
891 			 */
892 			bus = pdev->bus->number;
893 			device = PCI_SLOT(pdev->devfn);
894 			function = PCI_FUNC(pdev->devfn);
895 			oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
896 							     oct_dev,
897 							     "liowd/%02hhx:%02hhx.%hhx",
898 							     bus, device, function);
899 			if (IS_ERR(oct_dev->watchdog_task)) {
900 				oct_dev->watchdog_task = NULL;
901 				dev_err(&oct_dev->pci_dev->dev,
902 					"failed to create kernel_thread\n");
903 				liquidio_remove(pdev);
904 				return -1;
905 			}
906 		}
907 	}
908 
909 	oct_dev->rx_pause = 1;
910 	oct_dev->tx_pause = 1;
911 
912 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
913 
914 	return 0;
915 }
916 
917 static bool fw_type_is_auto(void)
918 {
919 	return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
920 		       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
921 }
922 
923 /**
924  * octeon_pci_flr - PCI FLR for each Octeon device.
925  * @oct: octeon device
926  */
927 static void octeon_pci_flr(struct octeon_device *oct)
928 {
929 	int rc;
930 
931 	pci_save_state(oct->pci_dev);
932 
933 	pci_cfg_access_lock(oct->pci_dev);
934 
935 	/* Quiesce the device completely */
936 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
937 			      PCI_COMMAND_INTX_DISABLE);
938 
939 	rc = __pci_reset_function_locked(oct->pci_dev);
940 
941 	if (rc != 0)
942 		dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
943 			rc, oct->pf_num);
944 
945 	pci_cfg_access_unlock(oct->pci_dev);
946 
947 	pci_restore_state(oct->pci_dev);
948 }
949 
950 /**
951  * octeon_destroy_resources - Destroy resources associated with octeon device
952  * @oct: octeon device
953  */
954 static void octeon_destroy_resources(struct octeon_device *oct)
955 {
956 	int i, refcount;
957 	struct msix_entry *msix_entries;
958 	struct octeon_device_priv *oct_priv =
959 		(struct octeon_device_priv *)oct->priv;
960 
961 	struct handshake *hs;
962 
963 	switch (atomic_read(&oct->status)) {
964 	case OCT_DEV_RUNNING:
965 	case OCT_DEV_CORE_OK:
966 
967 		/* No more instructions will be forwarded. */
968 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
969 
970 		oct->app_mode = CVM_DRV_INVALID_APP;
971 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
972 			lio_get_state_string(&oct->status));
973 
974 		schedule_timeout_uninterruptible(HZ / 10);
975 
976 		fallthrough;
977 	case OCT_DEV_HOST_OK:
978 
979 	case OCT_DEV_CONSOLE_INIT_DONE:
980 		/* Remove any consoles */
981 		octeon_remove_consoles(oct);
982 
983 		fallthrough;
984 	case OCT_DEV_IO_QUEUES_DONE:
985 		if (lio_wait_for_instr_fetch(oct))
986 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
987 
988 		if (wait_for_pending_requests(oct))
989 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
990 
991 		/* Disable the input and output queues now. No more packets will
992 		 * arrive from Octeon, but we should wait for all packet
993 		 * processing to finish.
994 		 */
995 		oct->fn_list.disable_io_queues(oct);
996 
997 		if (lio_wait_for_oq_pkts(oct))
998 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
999 
1000 		/* Force all requests waiting to be fetched by OCTEON to
1001 		 * complete.
1002 		 */
1003 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1004 			struct octeon_instr_queue *iq;
1005 
1006 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1007 				continue;
1008 			iq = oct->instr_queue[i];
1009 
1010 			if (atomic_read(&iq->instr_pending)) {
1011 				spin_lock_bh(&iq->lock);
1012 				iq->fill_cnt = 0;
1013 				iq->octeon_read_index = iq->host_write_index;
1014 				iq->stats.instr_processed +=
1015 					atomic_read(&iq->instr_pending);
1016 				lio_process_iq_request_list(oct, iq, 0);
1017 				spin_unlock_bh(&iq->lock);
1018 			}
1019 		}
1020 
1021 		lio_process_ordered_list(oct, 1);
1022 		octeon_free_sc_done_list(oct);
1023 		octeon_free_sc_zombie_list(oct);
1024 
1025 		fallthrough;
1026 	case OCT_DEV_INTR_SET_DONE:
1027 		/* Disable interrupts  */
1028 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1029 
1030 		if (oct->msix_on) {
1031 			msix_entries = (struct msix_entry *)oct->msix_entries;
1032 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1033 				if (oct->ioq_vector[i].vector) {
1034 					/* clear the affinity_cpumask */
1035 					irq_set_affinity_hint(
1036 							msix_entries[i].vector,
1037 							NULL);
1038 					free_irq(msix_entries[i].vector,
1039 						 &oct->ioq_vector[i]);
1040 					oct->ioq_vector[i].vector = 0;
1041 				}
1042 			}
1043 			/* non-iov vector's argument is oct struct */
1044 			free_irq(msix_entries[i].vector, oct);
1045 
1046 			pci_disable_msix(oct->pci_dev);
1047 			kfree(oct->msix_entries);
1048 			oct->msix_entries = NULL;
1049 		} else {
1050 			/* Release the interrupt line */
1051 			free_irq(oct->pci_dev->irq, oct);
1052 
1053 			if (oct->flags & LIO_FLAG_MSI_ENABLED)
1054 				pci_disable_msi(oct->pci_dev);
1055 		}
1056 
1057 		kfree(oct->irq_name_storage);
1058 		oct->irq_name_storage = NULL;
1059 
1060 		fallthrough;
1061 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1062 		if (OCTEON_CN23XX_PF(oct))
1063 			octeon_free_ioq_vector(oct);
1064 
1065 		fallthrough;
1066 	case OCT_DEV_MBOX_SETUP_DONE:
1067 		if (OCTEON_CN23XX_PF(oct))
1068 			oct->fn_list.free_mbox(oct);
1069 
1070 		fallthrough;
1071 	case OCT_DEV_IN_RESET:
1072 	case OCT_DEV_DROQ_INIT_DONE:
1073 		/* Wait for any pending operations */
1074 		mdelay(100);
1075 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1076 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
1077 				continue;
1078 			octeon_delete_droq(oct, i);
1079 		}
1080 
1081 		/* Force any pending handshakes to complete */
1082 		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1083 			hs = &handshake[i];
1084 
1085 			if (hs->pci_dev) {
1086 				handshake[oct->octeon_id].init_ok = 0;
1087 				complete(&handshake[oct->octeon_id].init);
1088 				handshake[oct->octeon_id].started_ok = 0;
1089 				complete(&handshake[oct->octeon_id].started);
1090 			}
1091 		}
1092 
1093 		fallthrough;
1094 	case OCT_DEV_RESP_LIST_INIT_DONE:
1095 		octeon_delete_response_list(oct);
1096 
1097 		fallthrough;
1098 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1099 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1100 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1101 				continue;
1102 			octeon_delete_instr_queue(oct, i);
1103 		}
1104 #ifdef CONFIG_PCI_IOV
1105 		if (oct->sriov_info.sriov_enabled)
1106 			pci_disable_sriov(oct->pci_dev);
1107 #endif
1108 		fallthrough;
1109 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1110 		octeon_free_sc_buffer_pool(oct);
1111 
1112 		fallthrough;
1113 	case OCT_DEV_DISPATCH_INIT_DONE:
1114 		octeon_delete_dispatch_list(oct);
1115 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
1116 
1117 		fallthrough;
1118 	case OCT_DEV_PCI_MAP_DONE:
1119 		refcount = octeon_deregister_device(oct);
1120 
1121 		/* Soft reset the octeon device before exiting.
1122 		 * However, if fw was loaded from card (i.e. autoboot),
1123 		 * perform an FLR instead.
1124 		 * Implementation note: only soft-reset the device
1125 		 * if it is a CN6XXX OR the LAST CN23XX device.
1126 		 */
1127 		if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1128 			octeon_pci_flr(oct);
1129 		else if (OCTEON_CN6XXX(oct) || !refcount)
1130 			oct->fn_list.soft_reset(oct);
1131 
1132 		octeon_unmap_pci_barx(oct, 0);
1133 		octeon_unmap_pci_barx(oct, 1);
1134 
1135 		fallthrough;
1136 	case OCT_DEV_PCI_ENABLE_DONE:
1137 		pci_clear_master(oct->pci_dev);
1138 		/* Disable the device, releasing the PCI INT */
1139 		pci_disable_device(oct->pci_dev);
1140 
1141 		fallthrough;
1142 	case OCT_DEV_BEGIN_STATE:
1143 		/* Nothing to be done here either */
1144 		break;
1145 	}                       /* end switch (oct->status) */
1146 
1147 	tasklet_kill(&oct_priv->droq_tasklet);
1148 }
1149 
1150 /**
1151  * send_rx_ctrl_cmd - Send Rx control command
1152  * @lio: per-network private data
1153  * @start_stop: whether to start or stop
1154  */
1155 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1156 {
1157 	struct octeon_soft_command *sc;
1158 	union octnet_cmd *ncmd;
1159 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1160 	int retval;
1161 
1162 	if (oct->props[lio->ifidx].rx_on == start_stop)
1163 		return 0;
1164 
1165 	sc = (struct octeon_soft_command *)
1166 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1167 					  16, 0);
1168 	if (!sc) {
1169 		netif_info(lio, rx_err, lio->netdev,
1170 			   "Failed to allocate octeon_soft_command struct\n");
1171 		return -ENOMEM;
1172 	}
1173 
1174 	ncmd = (union octnet_cmd *)sc->virtdptr;
1175 
1176 	ncmd->u64 = 0;
1177 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1178 	ncmd->s.param1 = start_stop;
1179 
1180 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1181 
1182 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1183 
1184 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1185 				    OPCODE_NIC_CMD, 0, 0, 0);
1186 
1187 	init_completion(&sc->complete);
1188 	sc->sc_status = OCTEON_REQUEST_PENDING;
1189 
1190 	retval = octeon_send_soft_command(oct, sc);
1191 	if (retval == IQ_SEND_FAILED) {
1192 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1193 		octeon_free_soft_command(oct, sc);
1194 	} else {
1195 		/* Sleep on a wait queue till the cond flag indicates that the
1196 		 * response arrived or timed-out.
1197 		 */
1198 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
1199 		if (retval)
1200 			return retval;
1201 
1202 		oct->props[lio->ifidx].rx_on = start_stop;
1203 		WRITE_ONCE(sc->caller_is_done, true);
1204 	}
1205 
1206 	return retval;
1207 }
1208 
1209 /**
1210  * liquidio_destroy_nic_device - Destroy NIC device interface
1211  * @oct: octeon device
1212  * @ifidx: which interface to destroy
1213  *
1214  * Cleanup associated with each interface for an Octeon device  when NIC
1215  * module is being unloaded or if initialization fails during load.
1216  */
1217 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1218 {
1219 	struct net_device *netdev = oct->props[ifidx].netdev;
1220 	struct octeon_device_priv *oct_priv =
1221 		(struct octeon_device_priv *)oct->priv;
1222 	struct napi_struct *napi, *n;
1223 	struct lio *lio;
1224 
1225 	if (!netdev) {
1226 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1227 			__func__, ifidx);
1228 		return;
1229 	}
1230 
1231 	lio = GET_LIO(netdev);
1232 
1233 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1234 
1235 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1236 		liquidio_stop(netdev);
1237 
1238 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1239 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1240 			napi_disable(napi);
1241 
1242 		oct->props[lio->ifidx].napi_enabled = 0;
1243 
1244 		if (OCTEON_CN23XX_PF(oct))
1245 			oct->droq[0]->ops.poll_mode = 0;
1246 	}
1247 
1248 	/* Delete NAPI */
1249 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1250 		netif_napi_del(napi);
1251 
1252 	tasklet_enable(&oct_priv->droq_tasklet);
1253 
1254 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1255 		unregister_netdev(netdev);
1256 
1257 	cleanup_sync_octeon_time_wq(netdev);
1258 	cleanup_link_status_change_wq(netdev);
1259 
1260 	cleanup_rx_oom_poll_fn(netdev);
1261 
1262 	lio_delete_glists(lio);
1263 
1264 	free_netdev(netdev);
1265 
1266 	oct->props[ifidx].gmxport = -1;
1267 
1268 	oct->props[ifidx].netdev = NULL;
1269 }
1270 
1271 /**
1272  * liquidio_stop_nic_module - Stop complete NIC functionality
1273  * @oct: octeon device
1274  */
1275 static int liquidio_stop_nic_module(struct octeon_device *oct)
1276 {
1277 	int i, j;
1278 	struct lio *lio;
1279 
1280 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1281 	device_lock(&oct->pci_dev->dev);
1282 	if (oct->devlink) {
1283 		devlink_unregister(oct->devlink);
1284 		devlink_free(oct->devlink);
1285 		oct->devlink = NULL;
1286 	}
1287 	device_unlock(&oct->pci_dev->dev);
1288 
1289 	if (!oct->ifcount) {
1290 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1291 		return 1;
1292 	}
1293 
1294 	spin_lock_bh(&oct->cmd_resp_wqlock);
1295 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1296 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1297 
1298 	lio_vf_rep_destroy(oct);
1299 
1300 	for (i = 0; i < oct->ifcount; i++) {
1301 		lio = GET_LIO(oct->props[i].netdev);
1302 		for (j = 0; j < oct->num_oqs; j++)
1303 			octeon_unregister_droq_ops(oct,
1304 						   lio->linfo.rxpciq[j].s.q_no);
1305 	}
1306 
1307 	for (i = 0; i < oct->ifcount; i++)
1308 		liquidio_destroy_nic_device(oct, i);
1309 
1310 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1311 	return 0;
1312 }
1313 
1314 /**
1315  * liquidio_remove - Cleans up resources at unload time
1316  * @pdev: PCI device structure
1317  */
1318 static void liquidio_remove(struct pci_dev *pdev)
1319 {
1320 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1321 
1322 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1323 
1324 	if (oct_dev->watchdog_task)
1325 		kthread_stop(oct_dev->watchdog_task);
1326 
1327 	if (!oct_dev->octeon_id &&
1328 	    oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1329 		lio_vf_rep_modexit();
1330 
1331 	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1332 		liquidio_stop_nic_module(oct_dev);
1333 
1334 	/* Reset the octeon device and cleanup all memory allocated for
1335 	 * the octeon device by driver.
1336 	 */
1337 	octeon_destroy_resources(oct_dev);
1338 
1339 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1340 
1341 	/* This octeon device has been removed. Update the global
1342 	 * data structure to reflect this. Free the device structure.
1343 	 */
1344 	octeon_free_device_mem(oct_dev);
1345 }
1346 
1347 /**
1348  * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1349  * @oct: octeon device
1350  */
1351 static int octeon_chip_specific_setup(struct octeon_device *oct)
1352 {
1353 	u32 dev_id, rev_id;
1354 	int ret = 1;
1355 
1356 	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1357 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1358 	oct->rev_id = rev_id & 0xff;
1359 
1360 	switch (dev_id) {
1361 	case OCTEON_CN68XX_PCIID:
1362 		oct->chip_id = OCTEON_CN68XX;
1363 		ret = lio_setup_cn68xx_octeon_device(oct);
1364 		break;
1365 
1366 	case OCTEON_CN66XX_PCIID:
1367 		oct->chip_id = OCTEON_CN66XX;
1368 		ret = lio_setup_cn66xx_octeon_device(oct);
1369 		break;
1370 
1371 	case OCTEON_CN23XX_PCIID_PF:
1372 		oct->chip_id = OCTEON_CN23XX_PF_VID;
1373 		ret = setup_cn23xx_octeon_pf_device(oct);
1374 		if (ret)
1375 			break;
1376 #ifdef CONFIG_PCI_IOV
1377 		if (!ret)
1378 			pci_sriov_set_totalvfs(oct->pci_dev,
1379 					       oct->sriov_info.max_vfs);
1380 #endif
1381 		break;
1382 
1383 	default:
1384 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1385 			dev_id);
1386 	}
1387 
1388 	return ret;
1389 }
1390 
1391 /**
1392  * octeon_pci_os_setup - PCI initialization for each Octeon device.
1393  * @oct: octeon device
1394  */
1395 static int octeon_pci_os_setup(struct octeon_device *oct)
1396 {
1397 	/* setup PCI stuff first */
1398 	if (pci_enable_device(oct->pci_dev)) {
1399 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1400 		return 1;
1401 	}
1402 
1403 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1404 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1405 		pci_disable_device(oct->pci_dev);
1406 		return 1;
1407 	}
1408 
1409 	/* Enable PCI DMA Master. */
1410 	pci_set_master(oct->pci_dev);
1411 
1412 	return 0;
1413 }
1414 
1415 /**
1416  * free_netbuf - Unmap and free network buffer
1417  * @buf: buffer
1418  */
1419 static void free_netbuf(void *buf)
1420 {
1421 	struct sk_buff *skb;
1422 	struct octnet_buf_free_info *finfo;
1423 	struct lio *lio;
1424 
1425 	finfo = (struct octnet_buf_free_info *)buf;
1426 	skb = finfo->skb;
1427 	lio = finfo->lio;
1428 
1429 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1430 			 DMA_TO_DEVICE);
1431 
1432 	tx_buffer_free(skb);
1433 }
1434 
1435 /**
1436  * free_netsgbuf - Unmap and free gather buffer
1437  * @buf: buffer
1438  */
1439 static void free_netsgbuf(void *buf)
1440 {
1441 	struct octnet_buf_free_info *finfo;
1442 	struct sk_buff *skb;
1443 	struct lio *lio;
1444 	struct octnic_gather *g;
1445 	int i, frags, iq;
1446 
1447 	finfo = (struct octnet_buf_free_info *)buf;
1448 	skb = finfo->skb;
1449 	lio = finfo->lio;
1450 	g = finfo->g;
1451 	frags = skb_shinfo(skb)->nr_frags;
1452 
1453 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1454 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1455 			 DMA_TO_DEVICE);
1456 
1457 	i = 1;
1458 	while (frags--) {
1459 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1460 
1461 		dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1462 			       g->sg[(i >> 2)].ptr[(i & 3)],
1463 			       skb_frag_size(frag), DMA_TO_DEVICE);
1464 		i++;
1465 	}
1466 
1467 	iq = skb_iq(lio->oct_dev, skb);
1468 	spin_lock(&lio->glist_lock[iq]);
1469 	list_add_tail(&g->list, &lio->glist[iq]);
1470 	spin_unlock(&lio->glist_lock[iq]);
1471 
1472 	tx_buffer_free(skb);
1473 }
1474 
1475 /**
1476  * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1477  * @buf: buffer
1478  */
1479 static void free_netsgbuf_with_resp(void *buf)
1480 {
1481 	struct octeon_soft_command *sc;
1482 	struct octnet_buf_free_info *finfo;
1483 	struct sk_buff *skb;
1484 	struct lio *lio;
1485 	struct octnic_gather *g;
1486 	int i, frags, iq;
1487 
1488 	sc = (struct octeon_soft_command *)buf;
1489 	skb = (struct sk_buff *)sc->callback_arg;
1490 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1491 
1492 	lio = finfo->lio;
1493 	g = finfo->g;
1494 	frags = skb_shinfo(skb)->nr_frags;
1495 
1496 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1497 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1498 			 DMA_TO_DEVICE);
1499 
1500 	i = 1;
1501 	while (frags--) {
1502 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1503 
1504 		dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1505 			       g->sg[(i >> 2)].ptr[(i & 3)],
1506 			       skb_frag_size(frag), DMA_TO_DEVICE);
1507 		i++;
1508 	}
1509 
1510 	iq = skb_iq(lio->oct_dev, skb);
1511 
1512 	spin_lock(&lio->glist_lock[iq]);
1513 	list_add_tail(&g->list, &lio->glist[iq]);
1514 	spin_unlock(&lio->glist_lock[iq]);
1515 
1516 	/* Don't free the skb yet */
1517 }
1518 
1519 /**
1520  * liquidio_ptp_adjfreq - Adjust ptp frequency
1521  * @ptp: PTP clock info
1522  * @ppb: how much to adjust by, in parts-per-billion
1523  */
1524 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1525 {
1526 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1527 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1528 	u64 comp, delta;
1529 	unsigned long flags;
1530 	bool neg_adj = false;
1531 
1532 	if (ppb < 0) {
1533 		neg_adj = true;
1534 		ppb = -ppb;
1535 	}
1536 
1537 	/* The hardware adds the clock compensation value to the
1538 	 * PTP clock on every coprocessor clock cycle, so we
1539 	 * compute the delta in terms of coprocessor clocks.
1540 	 */
1541 	delta = (u64)ppb << 32;
1542 	do_div(delta, oct->coproc_clock_rate);
1543 
1544 	spin_lock_irqsave(&lio->ptp_lock, flags);
1545 	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1546 	if (neg_adj)
1547 		comp -= delta;
1548 	else
1549 		comp += delta;
1550 	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1551 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1552 
1553 	return 0;
1554 }
1555 
1556 /**
1557  * liquidio_ptp_adjtime - Adjust ptp time
1558  * @ptp: PTP clock info
1559  * @delta: how much to adjust by, in nanosecs
1560  */
1561 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1562 {
1563 	unsigned long flags;
1564 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1565 
1566 	spin_lock_irqsave(&lio->ptp_lock, flags);
1567 	lio->ptp_adjust += delta;
1568 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1569 
1570 	return 0;
1571 }
1572 
1573 /**
1574  * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1575  * @ptp: PTP clock info
1576  * @ts: timespec
1577  */
1578 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1579 				struct timespec64 *ts)
1580 {
1581 	u64 ns;
1582 	unsigned long flags;
1583 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1584 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1585 
1586 	spin_lock_irqsave(&lio->ptp_lock, flags);
1587 	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1588 	ns += lio->ptp_adjust;
1589 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1590 
1591 	*ts = ns_to_timespec64(ns);
1592 
1593 	return 0;
1594 }
1595 
1596 /**
1597  * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1598  * @ptp: PTP clock info
1599  * @ts: timespec
1600  */
1601 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1602 				const struct timespec64 *ts)
1603 {
1604 	u64 ns;
1605 	unsigned long flags;
1606 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1607 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1608 
1609 	ns = timespec64_to_ns(ts);
1610 
1611 	spin_lock_irqsave(&lio->ptp_lock, flags);
1612 	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1613 	lio->ptp_adjust = 0;
1614 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1615 
1616 	return 0;
1617 }
1618 
1619 /**
1620  * liquidio_ptp_enable - Check if PTP is enabled
1621  * @ptp: PTP clock info
1622  * @rq: request
1623  * @on: is it on
1624  */
1625 static int
1626 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1627 		    struct ptp_clock_request __maybe_unused *rq,
1628 		    int __maybe_unused on)
1629 {
1630 	return -EOPNOTSUPP;
1631 }
1632 
1633 /**
1634  * oct_ptp_open - Open PTP clock source
1635  * @netdev: network device
1636  */
1637 static void oct_ptp_open(struct net_device *netdev)
1638 {
1639 	struct lio *lio = GET_LIO(netdev);
1640 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1641 
1642 	spin_lock_init(&lio->ptp_lock);
1643 
1644 	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1645 	lio->ptp_info.owner = THIS_MODULE;
1646 	lio->ptp_info.max_adj = 250000000;
1647 	lio->ptp_info.n_alarm = 0;
1648 	lio->ptp_info.n_ext_ts = 0;
1649 	lio->ptp_info.n_per_out = 0;
1650 	lio->ptp_info.pps = 0;
1651 	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1652 	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1653 	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1654 	lio->ptp_info.settime64 = liquidio_ptp_settime;
1655 	lio->ptp_info.enable = liquidio_ptp_enable;
1656 
1657 	lio->ptp_adjust = 0;
1658 
1659 	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1660 					     &oct->pci_dev->dev);
1661 
1662 	if (IS_ERR(lio->ptp_clock))
1663 		lio->ptp_clock = NULL;
1664 }
1665 
1666 /**
1667  * liquidio_ptp_init - Init PTP clock
1668  * @oct: octeon device
1669  */
1670 static void liquidio_ptp_init(struct octeon_device *oct)
1671 {
1672 	u64 clock_comp, cfg;
1673 
1674 	clock_comp = (u64)NSEC_PER_SEC << 32;
1675 	do_div(clock_comp, oct->coproc_clock_rate);
1676 	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1677 
1678 	/* Enable */
1679 	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1680 	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1681 }
1682 
1683 /**
1684  * load_firmware - Load firmware to device
1685  * @oct: octeon device
1686  *
1687  * Maps device to firmware filename, requests firmware, and downloads it
1688  */
1689 static int load_firmware(struct octeon_device *oct)
1690 {
1691 	int ret = 0;
1692 	const struct firmware *fw;
1693 	char fw_name[LIO_MAX_FW_FILENAME_LEN];
1694 	char *tmp_fw_type;
1695 
1696 	if (fw_type_is_auto()) {
1697 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1698 		strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1699 	} else {
1700 		tmp_fw_type = fw_type;
1701 	}
1702 
1703 	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1704 		octeon_get_conf(oct)->card_name, tmp_fw_type,
1705 		LIO_FW_NAME_SUFFIX);
1706 
1707 	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1708 	if (ret) {
1709 		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1710 			fw_name);
1711 		release_firmware(fw);
1712 		return ret;
1713 	}
1714 
1715 	ret = octeon_download_firmware(oct, fw->data, fw->size);
1716 
1717 	release_firmware(fw);
1718 
1719 	return ret;
1720 }
1721 
1722 /**
1723  * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1724  * @work: work_struct data structure
1725  */
1726 static void octnet_poll_check_txq_status(struct work_struct *work)
1727 {
1728 	struct cavium_wk *wk = (struct cavium_wk *)work;
1729 	struct lio *lio = (struct lio *)wk->ctxptr;
1730 
1731 	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1732 		return;
1733 
1734 	check_txq_status(lio);
1735 	queue_delayed_work(lio->txq_status_wq.wq,
1736 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1737 }
1738 
1739 /**
1740  * setup_tx_poll_fn - Sets up the txq poll check
1741  * @netdev: network device
1742  */
1743 static inline int setup_tx_poll_fn(struct net_device *netdev)
1744 {
1745 	struct lio *lio = GET_LIO(netdev);
1746 	struct octeon_device *oct = lio->oct_dev;
1747 
1748 	lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1749 						WQ_MEM_RECLAIM, 0);
1750 	if (!lio->txq_status_wq.wq) {
1751 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1752 		return -1;
1753 	}
1754 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1755 			  octnet_poll_check_txq_status);
1756 	lio->txq_status_wq.wk.ctxptr = lio;
1757 	queue_delayed_work(lio->txq_status_wq.wq,
1758 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1759 	return 0;
1760 }
1761 
1762 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1763 {
1764 	struct lio *lio = GET_LIO(netdev);
1765 
1766 	if (lio->txq_status_wq.wq) {
1767 		cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1768 		destroy_workqueue(lio->txq_status_wq.wq);
1769 	}
1770 }
1771 
1772 /**
1773  * liquidio_open - Net device open for LiquidIO
1774  * @netdev: network device
1775  */
1776 static int liquidio_open(struct net_device *netdev)
1777 {
1778 	struct lio *lio = GET_LIO(netdev);
1779 	struct octeon_device *oct = lio->oct_dev;
1780 	struct octeon_device_priv *oct_priv =
1781 		(struct octeon_device_priv *)oct->priv;
1782 	struct napi_struct *napi, *n;
1783 	int ret = 0;
1784 
1785 	if (oct->props[lio->ifidx].napi_enabled == 0) {
1786 		tasklet_disable(&oct_priv->droq_tasklet);
1787 
1788 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1789 			napi_enable(napi);
1790 
1791 		oct->props[lio->ifidx].napi_enabled = 1;
1792 
1793 		if (OCTEON_CN23XX_PF(oct))
1794 			oct->droq[0]->ops.poll_mode = 1;
1795 	}
1796 
1797 	if (oct->ptp_enable)
1798 		oct_ptp_open(netdev);
1799 
1800 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1801 
1802 	if (OCTEON_CN23XX_PF(oct)) {
1803 		if (!oct->msix_on)
1804 			if (setup_tx_poll_fn(netdev))
1805 				return -1;
1806 	} else {
1807 		if (setup_tx_poll_fn(netdev))
1808 			return -1;
1809 	}
1810 
1811 	netif_tx_start_all_queues(netdev);
1812 
1813 	/* Ready for link status updates */
1814 	lio->intf_open = 1;
1815 
1816 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1817 
1818 	/* tell Octeon to start forwarding packets to host */
1819 	ret = send_rx_ctrl_cmd(lio, 1);
1820 	if (ret)
1821 		return ret;
1822 
1823 	/* start periodical statistics fetch */
1824 	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1825 	lio->stats_wk.ctxptr = lio;
1826 	schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1827 					(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1828 
1829 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1830 		 netdev->name);
1831 
1832 	return ret;
1833 }
1834 
1835 /**
1836  * liquidio_stop - Net device stop for LiquidIO
1837  * @netdev: network device
1838  */
1839 static int liquidio_stop(struct net_device *netdev)
1840 {
1841 	struct lio *lio = GET_LIO(netdev);
1842 	struct octeon_device *oct = lio->oct_dev;
1843 	struct octeon_device_priv *oct_priv =
1844 		(struct octeon_device_priv *)oct->priv;
1845 	struct napi_struct *napi, *n;
1846 	int ret = 0;
1847 
1848 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1849 
1850 	/* Stop any link updates */
1851 	lio->intf_open = 0;
1852 
1853 	stop_txqs(netdev);
1854 
1855 	/* Inform that netif carrier is down */
1856 	netif_carrier_off(netdev);
1857 	netif_tx_disable(netdev);
1858 
1859 	lio->linfo.link.s.link_up = 0;
1860 	lio->link_changes++;
1861 
1862 	/* Tell Octeon that nic interface is down. */
1863 	ret = send_rx_ctrl_cmd(lio, 0);
1864 	if (ret)
1865 		return ret;
1866 
1867 	if (OCTEON_CN23XX_PF(oct)) {
1868 		if (!oct->msix_on)
1869 			cleanup_tx_poll_fn(netdev);
1870 	} else {
1871 		cleanup_tx_poll_fn(netdev);
1872 	}
1873 
1874 	cancel_delayed_work_sync(&lio->stats_wk.work);
1875 
1876 	if (lio->ptp_clock) {
1877 		ptp_clock_unregister(lio->ptp_clock);
1878 		lio->ptp_clock = NULL;
1879 	}
1880 
1881 	/* Wait for any pending Rx descriptors */
1882 	if (lio_wait_for_clean_oq(oct))
1883 		netif_info(lio, rx_err, lio->netdev,
1884 			   "Proceeding with stop interface after partial RX desc processing\n");
1885 
1886 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1887 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1888 			napi_disable(napi);
1889 
1890 		oct->props[lio->ifidx].napi_enabled = 0;
1891 
1892 		if (OCTEON_CN23XX_PF(oct))
1893 			oct->droq[0]->ops.poll_mode = 0;
1894 
1895 		tasklet_enable(&oct_priv->droq_tasklet);
1896 	}
1897 
1898 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1899 
1900 	return ret;
1901 }
1902 
1903 /**
1904  * get_new_flags - Converts a mask based on net device flags
1905  * @netdev: network device
1906  *
1907  * This routine generates a octnet_ifflags mask from the net device flags
1908  * received from the OS.
1909  */
1910 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1911 {
1912 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1913 
1914 	if (netdev->flags & IFF_PROMISC)
1915 		f |= OCTNET_IFFLAG_PROMISC;
1916 
1917 	if (netdev->flags & IFF_ALLMULTI)
1918 		f |= OCTNET_IFFLAG_ALLMULTI;
1919 
1920 	if (netdev->flags & IFF_MULTICAST) {
1921 		f |= OCTNET_IFFLAG_MULTICAST;
1922 
1923 		/* Accept all multicast addresses if there are more than we
1924 		 * can handle
1925 		 */
1926 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1927 			f |= OCTNET_IFFLAG_ALLMULTI;
1928 	}
1929 
1930 	if (netdev->flags & IFF_BROADCAST)
1931 		f |= OCTNET_IFFLAG_BROADCAST;
1932 
1933 	return f;
1934 }
1935 
1936 /**
1937  * liquidio_set_mcast_list - Net device set_multicast_list
1938  * @netdev: network device
1939  */
1940 static void liquidio_set_mcast_list(struct net_device *netdev)
1941 {
1942 	struct lio *lio = GET_LIO(netdev);
1943 	struct octeon_device *oct = lio->oct_dev;
1944 	struct octnic_ctrl_pkt nctrl;
1945 	struct netdev_hw_addr *ha;
1946 	u64 *mc;
1947 	int ret;
1948 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1949 
1950 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1951 
1952 	/* Create a ctrl pkt command to be sent to core app. */
1953 	nctrl.ncmd.u64 = 0;
1954 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1955 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1956 	nctrl.ncmd.s.param2 = mc_count;
1957 	nctrl.ncmd.s.more = mc_count;
1958 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1959 	nctrl.netpndev = (u64)netdev;
1960 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1961 
1962 	/* copy all the addresses into the udd */
1963 	mc = &nctrl.udd[0];
1964 	netdev_for_each_mc_addr(ha, netdev) {
1965 		*mc = 0;
1966 		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1967 		/* no need to swap bytes */
1968 
1969 		if (++mc > &nctrl.udd[mc_count])
1970 			break;
1971 	}
1972 
1973 	/* Apparently, any activity in this call from the kernel has to
1974 	 * be atomic. So we won't wait for response.
1975 	 */
1976 
1977 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1978 	if (ret) {
1979 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1980 			ret);
1981 	}
1982 }
1983 
1984 /**
1985  * liquidio_set_mac - Net device set_mac_address
1986  * @netdev: network device
1987  * @p: pointer to sockaddr
1988  */
1989 static int liquidio_set_mac(struct net_device *netdev, void *p)
1990 {
1991 	int ret = 0;
1992 	struct lio *lio = GET_LIO(netdev);
1993 	struct octeon_device *oct = lio->oct_dev;
1994 	struct sockaddr *addr = (struct sockaddr *)p;
1995 	struct octnic_ctrl_pkt nctrl;
1996 
1997 	if (!is_valid_ether_addr(addr->sa_data))
1998 		return -EADDRNOTAVAIL;
1999 
2000 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2001 
2002 	nctrl.ncmd.u64 = 0;
2003 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2004 	nctrl.ncmd.s.param1 = 0;
2005 	nctrl.ncmd.s.more = 1;
2006 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2007 	nctrl.netpndev = (u64)netdev;
2008 
2009 	nctrl.udd[0] = 0;
2010 	/* The MAC Address is presented in network byte order. */
2011 	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2012 
2013 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2014 	if (ret < 0) {
2015 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2016 		return -ENOMEM;
2017 	}
2018 
2019 	if (nctrl.sc_status) {
2020 		dev_err(&oct->pci_dev->dev,
2021 			"%s: MAC Address change failed. sc return=%x\n",
2022 			 __func__, nctrl.sc_status);
2023 		return -EIO;
2024 	}
2025 
2026 	eth_hw_addr_set(netdev, addr->sa_data);
2027 	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2028 
2029 	return 0;
2030 }
2031 
2032 static void
2033 liquidio_get_stats64(struct net_device *netdev,
2034 		     struct rtnl_link_stats64 *lstats)
2035 {
2036 	struct lio *lio = GET_LIO(netdev);
2037 	struct octeon_device *oct;
2038 	u64 pkts = 0, drop = 0, bytes = 0;
2039 	struct oct_droq_stats *oq_stats;
2040 	struct oct_iq_stats *iq_stats;
2041 	int i, iq_no, oq_no;
2042 
2043 	oct = lio->oct_dev;
2044 
2045 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2046 		return;
2047 
2048 	for (i = 0; i < oct->num_iqs; i++) {
2049 		iq_no = lio->linfo.txpciq[i].s.q_no;
2050 		iq_stats = &oct->instr_queue[iq_no]->stats;
2051 		pkts += iq_stats->tx_done;
2052 		drop += iq_stats->tx_dropped;
2053 		bytes += iq_stats->tx_tot_bytes;
2054 	}
2055 
2056 	lstats->tx_packets = pkts;
2057 	lstats->tx_bytes = bytes;
2058 	lstats->tx_dropped = drop;
2059 
2060 	pkts = 0;
2061 	drop = 0;
2062 	bytes = 0;
2063 
2064 	for (i = 0; i < oct->num_oqs; i++) {
2065 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2066 		oq_stats = &oct->droq[oq_no]->stats;
2067 		pkts += oq_stats->rx_pkts_received;
2068 		drop += (oq_stats->rx_dropped +
2069 			 oq_stats->dropped_nodispatch +
2070 			 oq_stats->dropped_toomany +
2071 			 oq_stats->dropped_nomem);
2072 		bytes += oq_stats->rx_bytes_received;
2073 	}
2074 
2075 	lstats->rx_bytes = bytes;
2076 	lstats->rx_packets = pkts;
2077 	lstats->rx_dropped = drop;
2078 
2079 	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2080 	lstats->collisions = oct->link_stats.fromhost.total_collisions;
2081 
2082 	/* detailed rx_errors: */
2083 	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2084 	/* recved pkt with crc error    */
2085 	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2086 	/* recv'd frame alignment error */
2087 	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2088 	/* recv'r fifo overrun */
2089 	lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2090 
2091 	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2092 		lstats->rx_frame_errors + lstats->rx_fifo_errors;
2093 
2094 	/* detailed tx_errors */
2095 	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2096 	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2097 	lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2098 
2099 	lstats->tx_errors = lstats->tx_aborted_errors +
2100 		lstats->tx_carrier_errors +
2101 		lstats->tx_fifo_errors;
2102 }
2103 
2104 /**
2105  * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2106  * @netdev: network device
2107  * @ifr: interface request
2108  */
2109 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2110 {
2111 	struct hwtstamp_config conf;
2112 	struct lio *lio = GET_LIO(netdev);
2113 
2114 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2115 		return -EFAULT;
2116 
2117 	if (conf.flags)
2118 		return -EINVAL;
2119 
2120 	switch (conf.tx_type) {
2121 	case HWTSTAMP_TX_ON:
2122 	case HWTSTAMP_TX_OFF:
2123 		break;
2124 	default:
2125 		return -ERANGE;
2126 	}
2127 
2128 	switch (conf.rx_filter) {
2129 	case HWTSTAMP_FILTER_NONE:
2130 		break;
2131 	case HWTSTAMP_FILTER_ALL:
2132 	case HWTSTAMP_FILTER_SOME:
2133 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2134 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2135 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2136 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2137 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2138 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2139 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2140 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2141 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2142 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2143 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2144 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2145 	case HWTSTAMP_FILTER_NTP_ALL:
2146 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2147 		break;
2148 	default:
2149 		return -ERANGE;
2150 	}
2151 
2152 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2153 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2154 
2155 	else
2156 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2157 
2158 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2159 }
2160 
2161 /**
2162  * liquidio_ioctl - ioctl handler
2163  * @netdev: network device
2164  * @ifr: interface request
2165  * @cmd: command
2166  */
2167 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2168 {
2169 	struct lio *lio = GET_LIO(netdev);
2170 
2171 	switch (cmd) {
2172 	case SIOCSHWTSTAMP:
2173 		if (lio->oct_dev->ptp_enable)
2174 			return hwtstamp_ioctl(netdev, ifr);
2175 		fallthrough;
2176 	default:
2177 		return -EOPNOTSUPP;
2178 	}
2179 }
2180 
2181 /**
2182  * handle_timestamp - handle a Tx timestamp response
2183  * @oct: octeon device
2184  * @status: response status
2185  * @buf: pointer to skb
2186  */
2187 static void handle_timestamp(struct octeon_device *oct,
2188 			     u32 status,
2189 			     void *buf)
2190 {
2191 	struct octnet_buf_free_info *finfo;
2192 	struct octeon_soft_command *sc;
2193 	struct oct_timestamp_resp *resp;
2194 	struct lio *lio;
2195 	struct sk_buff *skb = (struct sk_buff *)buf;
2196 
2197 	finfo = (struct octnet_buf_free_info *)skb->cb;
2198 	lio = finfo->lio;
2199 	sc = finfo->sc;
2200 	oct = lio->oct_dev;
2201 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2202 
2203 	if (status != OCTEON_REQUEST_DONE) {
2204 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2205 			CVM_CAST64(status));
2206 		resp->timestamp = 0;
2207 	}
2208 
2209 	octeon_swap_8B_data(&resp->timestamp, 1);
2210 
2211 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2212 		struct skb_shared_hwtstamps ts;
2213 		u64 ns = resp->timestamp;
2214 
2215 		netif_info(lio, tx_done, lio->netdev,
2216 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2217 			   skb, (unsigned long long)ns);
2218 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2219 		skb_tstamp_tx(skb, &ts);
2220 	}
2221 
2222 	octeon_free_soft_command(oct, sc);
2223 	tx_buffer_free(skb);
2224 }
2225 
2226 /**
2227  * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2228  * @oct: octeon device
2229  * @ndata: pointer to network data
2230  * @finfo: pointer to private network data
2231  * @xmit_more: more is coming
2232  */
2233 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2234 					 struct octnic_data_pkt *ndata,
2235 					 struct octnet_buf_free_info *finfo,
2236 					 int xmit_more)
2237 {
2238 	int retval;
2239 	struct octeon_soft_command *sc;
2240 	struct lio *lio;
2241 	int ring_doorbell;
2242 	u32 len;
2243 
2244 	lio = finfo->lio;
2245 
2246 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2247 					    sizeof(struct oct_timestamp_resp));
2248 	finfo->sc = sc;
2249 
2250 	if (!sc) {
2251 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2252 		return IQ_SEND_FAILED;
2253 	}
2254 
2255 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2256 		ndata->reqtype = REQTYPE_RESP_NET;
2257 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2258 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2259 
2260 	sc->callback = handle_timestamp;
2261 	sc->callback_arg = finfo->skb;
2262 	sc->iq_no = ndata->q_no;
2263 
2264 	if (OCTEON_CN23XX_PF(oct))
2265 		len = (u32)((struct octeon_instr_ih3 *)
2266 			    (&sc->cmd.cmd3.ih3))->dlengsz;
2267 	else
2268 		len = (u32)((struct octeon_instr_ih2 *)
2269 			    (&sc->cmd.cmd2.ih2))->dlengsz;
2270 
2271 	ring_doorbell = !xmit_more;
2272 
2273 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2274 				     sc, len, ndata->reqtype);
2275 
2276 	if (retval == IQ_SEND_FAILED) {
2277 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2278 			retval);
2279 		octeon_free_soft_command(oct, sc);
2280 	} else {
2281 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2282 	}
2283 
2284 	return retval;
2285 }
2286 
2287 /**
2288  * liquidio_xmit - Transmit networks packets to the Octeon interface
2289  * @skb: skbuff struct to be passed to network layer.
2290  * @netdev: pointer to network device
2291  *
2292  * Return: whether the packet was transmitted to the device okay or not
2293  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2294  */
2295 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2296 {
2297 	struct lio *lio;
2298 	struct octnet_buf_free_info *finfo;
2299 	union octnic_cmd_setup cmdsetup;
2300 	struct octnic_data_pkt ndata;
2301 	struct octeon_device *oct;
2302 	struct oct_iq_stats *stats;
2303 	struct octeon_instr_irh *irh;
2304 	union tx_info *tx_info;
2305 	int status = 0;
2306 	int q_idx = 0, iq_no = 0;
2307 	int j, xmit_more = 0;
2308 	u64 dptr = 0;
2309 	u32 tag = 0;
2310 
2311 	lio = GET_LIO(netdev);
2312 	oct = lio->oct_dev;
2313 
2314 	q_idx = skb_iq(oct, skb);
2315 	tag = q_idx;
2316 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2317 
2318 	stats = &oct->instr_queue[iq_no]->stats;
2319 
2320 	/* Check for all conditions in which the current packet cannot be
2321 	 * transmitted.
2322 	 */
2323 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2324 	    (!lio->linfo.link.s.link_up) ||
2325 	    (skb->len <= 0)) {
2326 		netif_info(lio, tx_err, lio->netdev,
2327 			   "Transmit failed link_status : %d\n",
2328 			   lio->linfo.link.s.link_up);
2329 		goto lio_xmit_failed;
2330 	}
2331 
2332 	/* Use space in skb->cb to store info used to unmap and
2333 	 * free the buffers.
2334 	 */
2335 	finfo = (struct octnet_buf_free_info *)skb->cb;
2336 	finfo->lio = lio;
2337 	finfo->skb = skb;
2338 	finfo->sc = NULL;
2339 
2340 	/* Prepare the attributes for the data to be passed to OSI. */
2341 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2342 
2343 	ndata.buf = (void *)finfo;
2344 
2345 	ndata.q_no = iq_no;
2346 
2347 	if (octnet_iq_is_full(oct, ndata.q_no)) {
2348 		/* defer sending if queue is full */
2349 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2350 			   ndata.q_no);
2351 		stats->tx_iq_busy++;
2352 		return NETDEV_TX_BUSY;
2353 	}
2354 
2355 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2356 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2357 	 */
2358 
2359 	ndata.datasize = skb->len;
2360 
2361 	cmdsetup.u64 = 0;
2362 	cmdsetup.s.iq_no = iq_no;
2363 
2364 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2365 		if (skb->encapsulation) {
2366 			cmdsetup.s.tnl_csum = 1;
2367 			stats->tx_vxlan++;
2368 		} else {
2369 			cmdsetup.s.transport_csum = 1;
2370 		}
2371 	}
2372 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2373 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2374 		cmdsetup.s.timestamp = 1;
2375 	}
2376 
2377 	if (skb_shinfo(skb)->nr_frags == 0) {
2378 		cmdsetup.s.u.datasize = skb->len;
2379 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2380 
2381 		/* Offload checksum calculation for TCP/UDP packets */
2382 		dptr = dma_map_single(&oct->pci_dev->dev,
2383 				      skb->data,
2384 				      skb->len,
2385 				      DMA_TO_DEVICE);
2386 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2387 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2388 				__func__);
2389 			stats->tx_dmamap_fail++;
2390 			return NETDEV_TX_BUSY;
2391 		}
2392 
2393 		if (OCTEON_CN23XX_PF(oct))
2394 			ndata.cmd.cmd3.dptr = dptr;
2395 		else
2396 			ndata.cmd.cmd2.dptr = dptr;
2397 		finfo->dptr = dptr;
2398 		ndata.reqtype = REQTYPE_NORESP_NET;
2399 
2400 	} else {
2401 		int i, frags;
2402 		skb_frag_t *frag;
2403 		struct octnic_gather *g;
2404 
2405 		spin_lock(&lio->glist_lock[q_idx]);
2406 		g = (struct octnic_gather *)
2407 			lio_list_delete_head(&lio->glist[q_idx]);
2408 		spin_unlock(&lio->glist_lock[q_idx]);
2409 
2410 		if (!g) {
2411 			netif_info(lio, tx_err, lio->netdev,
2412 				   "Transmit scatter gather: glist null!\n");
2413 			goto lio_xmit_failed;
2414 		}
2415 
2416 		cmdsetup.s.gather = 1;
2417 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2418 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2419 
2420 		memset(g->sg, 0, g->sg_size);
2421 
2422 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2423 						 skb->data,
2424 						 (skb->len - skb->data_len),
2425 						 DMA_TO_DEVICE);
2426 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2427 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2428 				__func__);
2429 			stats->tx_dmamap_fail++;
2430 			return NETDEV_TX_BUSY;
2431 		}
2432 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2433 
2434 		frags = skb_shinfo(skb)->nr_frags;
2435 		i = 1;
2436 		while (frags--) {
2437 			frag = &skb_shinfo(skb)->frags[i - 1];
2438 
2439 			g->sg[(i >> 2)].ptr[(i & 3)] =
2440 				skb_frag_dma_map(&oct->pci_dev->dev,
2441 					         frag, 0, skb_frag_size(frag),
2442 						 DMA_TO_DEVICE);
2443 
2444 			if (dma_mapping_error(&oct->pci_dev->dev,
2445 					      g->sg[i >> 2].ptr[i & 3])) {
2446 				dma_unmap_single(&oct->pci_dev->dev,
2447 						 g->sg[0].ptr[0],
2448 						 skb->len - skb->data_len,
2449 						 DMA_TO_DEVICE);
2450 				for (j = 1; j < i; j++) {
2451 					frag = &skb_shinfo(skb)->frags[j - 1];
2452 					dma_unmap_page(&oct->pci_dev->dev,
2453 						       g->sg[j >> 2].ptr[j & 3],
2454 						       skb_frag_size(frag),
2455 						       DMA_TO_DEVICE);
2456 				}
2457 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2458 					__func__);
2459 				return NETDEV_TX_BUSY;
2460 			}
2461 
2462 			add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2463 				    (i & 3));
2464 			i++;
2465 		}
2466 
2467 		dptr = g->sg_dma_ptr;
2468 
2469 		if (OCTEON_CN23XX_PF(oct))
2470 			ndata.cmd.cmd3.dptr = dptr;
2471 		else
2472 			ndata.cmd.cmd2.dptr = dptr;
2473 		finfo->dptr = dptr;
2474 		finfo->g = g;
2475 
2476 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
2477 	}
2478 
2479 	if (OCTEON_CN23XX_PF(oct)) {
2480 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2481 		tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2482 	} else {
2483 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2484 		tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2485 	}
2486 
2487 	if (skb_shinfo(skb)->gso_size) {
2488 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2489 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2490 		stats->tx_gso++;
2491 	}
2492 
2493 	/* HW insert VLAN tag */
2494 	if (skb_vlan_tag_present(skb)) {
2495 		irh->priority = skb_vlan_tag_get(skb) >> 13;
2496 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2497 	}
2498 
2499 	xmit_more = netdev_xmit_more();
2500 
2501 	if (unlikely(cmdsetup.s.timestamp))
2502 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2503 	else
2504 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2505 	if (status == IQ_SEND_FAILED)
2506 		goto lio_xmit_failed;
2507 
2508 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2509 
2510 	if (status == IQ_SEND_STOP)
2511 		netif_stop_subqueue(netdev, q_idx);
2512 
2513 	netif_trans_update(netdev);
2514 
2515 	if (tx_info->s.gso_segs)
2516 		stats->tx_done += tx_info->s.gso_segs;
2517 	else
2518 		stats->tx_done++;
2519 	stats->tx_tot_bytes += ndata.datasize;
2520 
2521 	return NETDEV_TX_OK;
2522 
2523 lio_xmit_failed:
2524 	stats->tx_dropped++;
2525 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2526 		   iq_no, stats->tx_dropped);
2527 	if (dptr)
2528 		dma_unmap_single(&oct->pci_dev->dev, dptr,
2529 				 ndata.datasize, DMA_TO_DEVICE);
2530 
2531 	octeon_ring_doorbell_locked(oct, iq_no);
2532 
2533 	tx_buffer_free(skb);
2534 	return NETDEV_TX_OK;
2535 }
2536 
2537 /**
2538  * liquidio_tx_timeout - Network device Tx timeout
2539  * @netdev:    pointer to network device
2540  * @txqueue: index of the hung transmit queue
2541  */
2542 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2543 {
2544 	struct lio *lio;
2545 
2546 	lio = GET_LIO(netdev);
2547 
2548 	netif_info(lio, tx_err, lio->netdev,
2549 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2550 		   netdev->stats.tx_dropped);
2551 	netif_trans_update(netdev);
2552 	wake_txqs(netdev);
2553 }
2554 
2555 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2556 				    __be16 proto __attribute__((unused)),
2557 				    u16 vid)
2558 {
2559 	struct lio *lio = GET_LIO(netdev);
2560 	struct octeon_device *oct = lio->oct_dev;
2561 	struct octnic_ctrl_pkt nctrl;
2562 	int ret = 0;
2563 
2564 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2565 
2566 	nctrl.ncmd.u64 = 0;
2567 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2568 	nctrl.ncmd.s.param1 = vid;
2569 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2570 	nctrl.netpndev = (u64)netdev;
2571 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2572 
2573 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2574 	if (ret) {
2575 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2576 			ret);
2577 		if (ret > 0)
2578 			ret = -EIO;
2579 	}
2580 
2581 	return ret;
2582 }
2583 
2584 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2585 				     __be16 proto __attribute__((unused)),
2586 				     u16 vid)
2587 {
2588 	struct lio *lio = GET_LIO(netdev);
2589 	struct octeon_device *oct = lio->oct_dev;
2590 	struct octnic_ctrl_pkt nctrl;
2591 	int ret = 0;
2592 
2593 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2594 
2595 	nctrl.ncmd.u64 = 0;
2596 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2597 	nctrl.ncmd.s.param1 = vid;
2598 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2599 	nctrl.netpndev = (u64)netdev;
2600 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2601 
2602 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2603 	if (ret) {
2604 		dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2605 			ret);
2606 		if (ret > 0)
2607 			ret = -EIO;
2608 	}
2609 	return ret;
2610 }
2611 
2612 /**
2613  * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2614  * @netdev:                pointer to network device
2615  * @command:               OCTNET_CMD_TNL_RX_CSUM_CTL
2616  * @rx_cmd:                OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2617  * Returns:                SUCCESS or FAILURE
2618  */
2619 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2620 				       u8 rx_cmd)
2621 {
2622 	struct lio *lio = GET_LIO(netdev);
2623 	struct octeon_device *oct = lio->oct_dev;
2624 	struct octnic_ctrl_pkt nctrl;
2625 	int ret = 0;
2626 
2627 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2628 
2629 	nctrl.ncmd.u64 = 0;
2630 	nctrl.ncmd.s.cmd = command;
2631 	nctrl.ncmd.s.param1 = rx_cmd;
2632 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2633 	nctrl.netpndev = (u64)netdev;
2634 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2635 
2636 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2637 	if (ret) {
2638 		dev_err(&oct->pci_dev->dev,
2639 			"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2640 			ret);
2641 		if (ret > 0)
2642 			ret = -EIO;
2643 	}
2644 	return ret;
2645 }
2646 
2647 /**
2648  * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2649  * @netdev:                pointer to network device
2650  * @command:               OCTNET_CMD_VXLAN_PORT_CONFIG
2651  * @vxlan_port:            VxLAN port to be added or deleted
2652  * @vxlan_cmd_bit:         OCTNET_CMD_VXLAN_PORT_ADD,
2653  *                              OCTNET_CMD_VXLAN_PORT_DEL
2654  * Return:                     SUCCESS or FAILURE
2655  */
2656 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2657 				       u16 vxlan_port, u8 vxlan_cmd_bit)
2658 {
2659 	struct lio *lio = GET_LIO(netdev);
2660 	struct octeon_device *oct = lio->oct_dev;
2661 	struct octnic_ctrl_pkt nctrl;
2662 	int ret = 0;
2663 
2664 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2665 
2666 	nctrl.ncmd.u64 = 0;
2667 	nctrl.ncmd.s.cmd = command;
2668 	nctrl.ncmd.s.more = vxlan_cmd_bit;
2669 	nctrl.ncmd.s.param1 = vxlan_port;
2670 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2671 	nctrl.netpndev = (u64)netdev;
2672 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2673 
2674 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2675 	if (ret) {
2676 		dev_err(&oct->pci_dev->dev,
2677 			"VxLAN port add/delete failed in core (ret:0x%x)\n",
2678 			ret);
2679 		if (ret > 0)
2680 			ret = -EIO;
2681 	}
2682 	return ret;
2683 }
2684 
2685 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2686 					unsigned int table, unsigned int entry,
2687 					struct udp_tunnel_info *ti)
2688 {
2689 	return liquidio_vxlan_port_command(netdev,
2690 					   OCTNET_CMD_VXLAN_PORT_CONFIG,
2691 					   htons(ti->port),
2692 					   OCTNET_CMD_VXLAN_PORT_ADD);
2693 }
2694 
2695 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2696 					  unsigned int table,
2697 					  unsigned int entry,
2698 					  struct udp_tunnel_info *ti)
2699 {
2700 	return liquidio_vxlan_port_command(netdev,
2701 					   OCTNET_CMD_VXLAN_PORT_CONFIG,
2702 					   htons(ti->port),
2703 					   OCTNET_CMD_VXLAN_PORT_DEL);
2704 }
2705 
2706 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2707 	.set_port	= liquidio_udp_tunnel_set_port,
2708 	.unset_port	= liquidio_udp_tunnel_unset_port,
2709 	.tables		= {
2710 		{ .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2711 	},
2712 };
2713 
2714 /**
2715  * liquidio_fix_features - Net device fix features
2716  * @netdev:  pointer to network device
2717  * @request: features requested
2718  * Return: updated features list
2719  */
2720 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2721 					       netdev_features_t request)
2722 {
2723 	struct lio *lio = netdev_priv(netdev);
2724 
2725 	if ((request & NETIF_F_RXCSUM) &&
2726 	    !(lio->dev_capability & NETIF_F_RXCSUM))
2727 		request &= ~NETIF_F_RXCSUM;
2728 
2729 	if ((request & NETIF_F_HW_CSUM) &&
2730 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
2731 		request &= ~NETIF_F_HW_CSUM;
2732 
2733 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2734 		request &= ~NETIF_F_TSO;
2735 
2736 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2737 		request &= ~NETIF_F_TSO6;
2738 
2739 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2740 		request &= ~NETIF_F_LRO;
2741 
2742 	/*Disable LRO if RXCSUM is off */
2743 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2744 	    (lio->dev_capability & NETIF_F_LRO))
2745 		request &= ~NETIF_F_LRO;
2746 
2747 	if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2748 	    !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2749 		request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2750 
2751 	return request;
2752 }
2753 
2754 /**
2755  * liquidio_set_features - Net device set features
2756  * @netdev:  pointer to network device
2757  * @features: features to enable/disable
2758  */
2759 static int liquidio_set_features(struct net_device *netdev,
2760 				 netdev_features_t features)
2761 {
2762 	struct lio *lio = netdev_priv(netdev);
2763 
2764 	if ((features & NETIF_F_LRO) &&
2765 	    (lio->dev_capability & NETIF_F_LRO) &&
2766 	    !(netdev->features & NETIF_F_LRO))
2767 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2768 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2769 	else if (!(features & NETIF_F_LRO) &&
2770 		 (lio->dev_capability & NETIF_F_LRO) &&
2771 		 (netdev->features & NETIF_F_LRO))
2772 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2773 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2774 
2775 	/* Sending command to firmware to enable/disable RX checksum
2776 	 * offload settings using ethtool
2777 	 */
2778 	if (!(netdev->features & NETIF_F_RXCSUM) &&
2779 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2780 	    (features & NETIF_F_RXCSUM))
2781 		liquidio_set_rxcsum_command(netdev,
2782 					    OCTNET_CMD_TNL_RX_CSUM_CTL,
2783 					    OCTNET_CMD_RXCSUM_ENABLE);
2784 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2785 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2786 		 !(features & NETIF_F_RXCSUM))
2787 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2788 					    OCTNET_CMD_RXCSUM_DISABLE);
2789 
2790 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2791 	    (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2792 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2793 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2794 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
2795 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2796 		 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2797 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2798 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2799 				     OCTNET_CMD_VLAN_FILTER_DISABLE);
2800 
2801 	return 0;
2802 }
2803 
2804 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2805 				 u8 *mac, bool is_admin_assigned)
2806 {
2807 	struct lio *lio = GET_LIO(netdev);
2808 	struct octeon_device *oct = lio->oct_dev;
2809 	struct octnic_ctrl_pkt nctrl;
2810 	int ret = 0;
2811 
2812 	if (!is_valid_ether_addr(mac))
2813 		return -EINVAL;
2814 
2815 	if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2816 		return -EINVAL;
2817 
2818 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2819 
2820 	nctrl.ncmd.u64 = 0;
2821 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2822 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2823 	nctrl.ncmd.s.param1 = vfidx + 1;
2824 	nctrl.ncmd.s.more = 1;
2825 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2826 	nctrl.netpndev = (u64)netdev;
2827 	if (is_admin_assigned) {
2828 		nctrl.ncmd.s.param2 = true;
2829 		nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2830 	}
2831 
2832 	nctrl.udd[0] = 0;
2833 	/* The MAC Address is presented in network byte order. */
2834 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2835 
2836 	oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2837 
2838 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2839 	if (ret > 0)
2840 		ret = -EIO;
2841 
2842 	return ret;
2843 }
2844 
2845 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2846 {
2847 	struct lio *lio = GET_LIO(netdev);
2848 	struct octeon_device *oct = lio->oct_dev;
2849 	int retval;
2850 
2851 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2852 		return -EINVAL;
2853 
2854 	retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2855 	if (!retval)
2856 		cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2857 
2858 	return retval;
2859 }
2860 
2861 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2862 				    bool enable)
2863 {
2864 	struct lio *lio = GET_LIO(netdev);
2865 	struct octeon_device *oct = lio->oct_dev;
2866 	struct octnic_ctrl_pkt nctrl;
2867 	int retval;
2868 
2869 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2870 		netif_info(lio, drv, lio->netdev,
2871 			   "firmware does not support spoofchk\n");
2872 		return -EOPNOTSUPP;
2873 	}
2874 
2875 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2876 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2877 		return -EINVAL;
2878 	}
2879 
2880 	if (enable) {
2881 		if (oct->sriov_info.vf_spoofchk[vfidx])
2882 			return 0;
2883 	} else {
2884 		/* Clear */
2885 		if (!oct->sriov_info.vf_spoofchk[vfidx])
2886 			return 0;
2887 	}
2888 
2889 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2890 	nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2891 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2892 	nctrl.ncmd.s.param1 =
2893 		vfidx + 1; /* vfidx is 0 based,
2894 			    * but vf_num (param1) is 1 based
2895 			    */
2896 	nctrl.ncmd.s.param2 = enable;
2897 	nctrl.ncmd.s.more = 0;
2898 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2899 	nctrl.cb_fn = NULL;
2900 
2901 	retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2902 
2903 	if (retval) {
2904 		netif_info(lio, drv, lio->netdev,
2905 			   "Failed to set VF %d spoofchk %s\n", vfidx,
2906 			enable ? "on" : "off");
2907 		return -1;
2908 	}
2909 
2910 	oct->sriov_info.vf_spoofchk[vfidx] = enable;
2911 	netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2912 		   enable ? "on" : "off");
2913 
2914 	return 0;
2915 }
2916 
2917 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2918 				u16 vlan, u8 qos, __be16 vlan_proto)
2919 {
2920 	struct lio *lio = GET_LIO(netdev);
2921 	struct octeon_device *oct = lio->oct_dev;
2922 	struct octnic_ctrl_pkt nctrl;
2923 	u16 vlantci;
2924 	int ret = 0;
2925 
2926 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2927 		return -EINVAL;
2928 
2929 	if (vlan_proto != htons(ETH_P_8021Q))
2930 		return -EPROTONOSUPPORT;
2931 
2932 	if (vlan >= VLAN_N_VID || qos > 7)
2933 		return -EINVAL;
2934 
2935 	if (vlan)
2936 		vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2937 	else
2938 		vlantci = 0;
2939 
2940 	if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2941 		return 0;
2942 
2943 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2944 
2945 	if (vlan)
2946 		nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2947 	else
2948 		nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2949 
2950 	nctrl.ncmd.s.param1 = vlantci;
2951 	nctrl.ncmd.s.param2 =
2952 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2953 	nctrl.ncmd.s.more = 0;
2954 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2955 	nctrl.cb_fn = NULL;
2956 
2957 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2958 	if (ret) {
2959 		if (ret > 0)
2960 			ret = -EIO;
2961 		return ret;
2962 	}
2963 
2964 	oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2965 
2966 	return ret;
2967 }
2968 
2969 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2970 				  struct ifla_vf_info *ivi)
2971 {
2972 	struct lio *lio = GET_LIO(netdev);
2973 	struct octeon_device *oct = lio->oct_dev;
2974 	u8 *macaddr;
2975 
2976 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2977 		return -EINVAL;
2978 
2979 	memset(ivi, 0, sizeof(struct ifla_vf_info));
2980 
2981 	ivi->vf = vfidx;
2982 	macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2983 	ether_addr_copy(&ivi->mac[0], macaddr);
2984 	ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2985 	ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2986 	if (oct->sriov_info.trusted_vf.active &&
2987 	    oct->sriov_info.trusted_vf.id == vfidx)
2988 		ivi->trusted = true;
2989 	else
2990 		ivi->trusted = false;
2991 	ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2992 	ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
2993 	ivi->max_tx_rate = lio->linfo.link.s.speed;
2994 	ivi->min_tx_rate = 0;
2995 
2996 	return 0;
2997 }
2998 
2999 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3000 {
3001 	struct octeon_device *oct = lio->oct_dev;
3002 	struct octeon_soft_command *sc;
3003 	int retval;
3004 
3005 	sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3006 	if (!sc)
3007 		return -ENOMEM;
3008 
3009 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3010 
3011 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
3012 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3013 				    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3014 				    trusted);
3015 
3016 	init_completion(&sc->complete);
3017 	sc->sc_status = OCTEON_REQUEST_PENDING;
3018 
3019 	retval = octeon_send_soft_command(oct, sc);
3020 	if (retval == IQ_SEND_FAILED) {
3021 		octeon_free_soft_command(oct, sc);
3022 		retval = -1;
3023 	} else {
3024 		/* Wait for response or timeout */
3025 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
3026 		if (retval)
3027 			return (retval);
3028 
3029 		WRITE_ONCE(sc->caller_is_done, true);
3030 	}
3031 
3032 	return retval;
3033 }
3034 
3035 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3036 				 bool setting)
3037 {
3038 	struct lio *lio = GET_LIO(netdev);
3039 	struct octeon_device *oct = lio->oct_dev;
3040 
3041 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3042 		/* trusted vf is not supported by firmware older than 1.7.1 */
3043 		return -EOPNOTSUPP;
3044 	}
3045 
3046 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3047 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3048 		return -EINVAL;
3049 	}
3050 
3051 	if (setting) {
3052 		/* Set */
3053 
3054 		if (oct->sriov_info.trusted_vf.active &&
3055 		    oct->sriov_info.trusted_vf.id == vfidx)
3056 			return 0;
3057 
3058 		if (oct->sriov_info.trusted_vf.active) {
3059 			netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3060 			return -EPERM;
3061 		}
3062 	} else {
3063 		/* Clear */
3064 
3065 		if (!oct->sriov_info.trusted_vf.active)
3066 			return 0;
3067 	}
3068 
3069 	if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3070 		if (setting) {
3071 			oct->sriov_info.trusted_vf.id = vfidx;
3072 			oct->sriov_info.trusted_vf.active = true;
3073 		} else {
3074 			oct->sriov_info.trusted_vf.active = false;
3075 		}
3076 
3077 		netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3078 			   setting ? "" : "not ");
3079 	} else {
3080 		netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3081 		return -1;
3082 	}
3083 
3084 	return 0;
3085 }
3086 
3087 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3088 				      int linkstate)
3089 {
3090 	struct lio *lio = GET_LIO(netdev);
3091 	struct octeon_device *oct = lio->oct_dev;
3092 	struct octnic_ctrl_pkt nctrl;
3093 	int ret = 0;
3094 
3095 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3096 		return -EINVAL;
3097 
3098 	if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3099 		return 0;
3100 
3101 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3102 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3103 	nctrl.ncmd.s.param1 =
3104 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3105 	nctrl.ncmd.s.param2 = linkstate;
3106 	nctrl.ncmd.s.more = 0;
3107 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3108 	nctrl.cb_fn = NULL;
3109 
3110 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3111 
3112 	if (!ret)
3113 		oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3114 	else if (ret > 0)
3115 		ret = -EIO;
3116 
3117 	return ret;
3118 }
3119 
3120 static int
3121 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3122 {
3123 	struct lio_devlink_priv *priv;
3124 	struct octeon_device *oct;
3125 
3126 	priv = devlink_priv(devlink);
3127 	oct = priv->oct;
3128 
3129 	*mode = oct->eswitch_mode;
3130 
3131 	return 0;
3132 }
3133 
3134 static int
3135 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3136 			  struct netlink_ext_ack *extack)
3137 {
3138 	struct lio_devlink_priv *priv;
3139 	struct octeon_device *oct;
3140 	int ret = 0;
3141 
3142 	priv = devlink_priv(devlink);
3143 	oct = priv->oct;
3144 
3145 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3146 		return -EINVAL;
3147 
3148 	if (oct->eswitch_mode == mode)
3149 		return 0;
3150 
3151 	switch (mode) {
3152 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3153 		oct->eswitch_mode = mode;
3154 		ret = lio_vf_rep_create(oct);
3155 		break;
3156 
3157 	case DEVLINK_ESWITCH_MODE_LEGACY:
3158 		lio_vf_rep_destroy(oct);
3159 		oct->eswitch_mode = mode;
3160 		break;
3161 
3162 	default:
3163 		ret = -EINVAL;
3164 	}
3165 
3166 	return ret;
3167 }
3168 
3169 static const struct devlink_ops liquidio_devlink_ops = {
3170 	.eswitch_mode_get = liquidio_eswitch_mode_get,
3171 	.eswitch_mode_set = liquidio_eswitch_mode_set,
3172 };
3173 
3174 static int
3175 liquidio_get_port_parent_id(struct net_device *dev,
3176 			    struct netdev_phys_item_id *ppid)
3177 {
3178 	struct lio *lio = GET_LIO(dev);
3179 	struct octeon_device *oct = lio->oct_dev;
3180 
3181 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3182 		return -EOPNOTSUPP;
3183 
3184 	ppid->id_len = ETH_ALEN;
3185 	ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3186 
3187 	return 0;
3188 }
3189 
3190 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3191 				 struct ifla_vf_stats *vf_stats)
3192 {
3193 	struct lio *lio = GET_LIO(netdev);
3194 	struct octeon_device *oct = lio->oct_dev;
3195 	struct oct_vf_stats stats;
3196 	int ret;
3197 
3198 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3199 		return -EINVAL;
3200 
3201 	memset(&stats, 0, sizeof(struct oct_vf_stats));
3202 	ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3203 	if (!ret) {
3204 		vf_stats->rx_packets = stats.rx_packets;
3205 		vf_stats->tx_packets = stats.tx_packets;
3206 		vf_stats->rx_bytes = stats.rx_bytes;
3207 		vf_stats->tx_bytes = stats.tx_bytes;
3208 		vf_stats->broadcast = stats.broadcast;
3209 		vf_stats->multicast = stats.multicast;
3210 	}
3211 
3212 	return ret;
3213 }
3214 
3215 static const struct net_device_ops lionetdevops = {
3216 	.ndo_open		= liquidio_open,
3217 	.ndo_stop		= liquidio_stop,
3218 	.ndo_start_xmit		= liquidio_xmit,
3219 	.ndo_get_stats64	= liquidio_get_stats64,
3220 	.ndo_set_mac_address	= liquidio_set_mac,
3221 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
3222 	.ndo_tx_timeout		= liquidio_tx_timeout,
3223 
3224 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3225 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3226 	.ndo_change_mtu		= liquidio_change_mtu,
3227 	.ndo_eth_ioctl		= liquidio_ioctl,
3228 	.ndo_fix_features	= liquidio_fix_features,
3229 	.ndo_set_features	= liquidio_set_features,
3230 	.ndo_set_vf_mac		= liquidio_set_vf_mac,
3231 	.ndo_set_vf_vlan	= liquidio_set_vf_vlan,
3232 	.ndo_get_vf_config	= liquidio_get_vf_config,
3233 	.ndo_set_vf_spoofchk	= liquidio_set_vf_spoofchk,
3234 	.ndo_set_vf_trust	= liquidio_set_vf_trust,
3235 	.ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3236 	.ndo_get_vf_stats	= liquidio_get_vf_stats,
3237 	.ndo_get_port_parent_id	= liquidio_get_port_parent_id,
3238 };
3239 
3240 /**
3241  * liquidio_init - Entry point for the liquidio module
3242  */
3243 static int __init liquidio_init(void)
3244 {
3245 	int i;
3246 	struct handshake *hs;
3247 
3248 	init_completion(&first_stage);
3249 
3250 	octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3251 
3252 	if (liquidio_init_pci())
3253 		return -EINVAL;
3254 
3255 	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3256 
3257 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3258 		hs = &handshake[i];
3259 		if (hs->pci_dev) {
3260 			wait_for_completion(&hs->init);
3261 			if (!hs->init_ok) {
3262 				/* init handshake failed */
3263 				dev_err(&hs->pci_dev->dev,
3264 					"Failed to init device\n");
3265 				liquidio_deinit_pci();
3266 				return -EIO;
3267 			}
3268 		}
3269 	}
3270 
3271 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3272 		hs = &handshake[i];
3273 		if (hs->pci_dev) {
3274 			wait_for_completion_timeout(&hs->started,
3275 						    msecs_to_jiffies(30000));
3276 			if (!hs->started_ok) {
3277 				/* starter handshake failed */
3278 				dev_err(&hs->pci_dev->dev,
3279 					"Firmware failed to start\n");
3280 				liquidio_deinit_pci();
3281 				return -EIO;
3282 			}
3283 		}
3284 	}
3285 
3286 	return 0;
3287 }
3288 
3289 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3290 {
3291 	struct octeon_device *oct = (struct octeon_device *)buf;
3292 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3293 	int gmxport = 0;
3294 	union oct_link_status *ls;
3295 	int i;
3296 
3297 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3298 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3299 			recv_pkt->buffer_size[0],
3300 			recv_pkt->rh.r_nic_info.gmxport);
3301 		goto nic_info_err;
3302 	}
3303 
3304 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
3305 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3306 		OCT_DROQ_INFO_SIZE);
3307 
3308 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3309 	for (i = 0; i < oct->ifcount; i++) {
3310 		if (oct->props[i].gmxport == gmxport) {
3311 			update_link_status(oct->props[i].netdev, ls);
3312 			break;
3313 		}
3314 	}
3315 
3316 nic_info_err:
3317 	for (i = 0; i < recv_pkt->buffer_count; i++)
3318 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3319 	octeon_free_recv_info(recv_info);
3320 	return 0;
3321 }
3322 
3323 /**
3324  * setup_nic_devices - Setup network interfaces
3325  * @octeon_dev:  octeon device
3326  *
3327  * Called during init time for each device. It assumes the NIC
3328  * is already up and running.  The link information for each
3329  * interface is passed in link_info.
3330  */
3331 static int setup_nic_devices(struct octeon_device *octeon_dev)
3332 {
3333 	struct lio *lio = NULL;
3334 	struct net_device *netdev;
3335 	u8 mac[6], i, j, *fw_ver, *micro_ver;
3336 	unsigned long micro;
3337 	u32 cur_ver;
3338 	struct octeon_soft_command *sc;
3339 	struct liquidio_if_cfg_resp *resp;
3340 	struct octdev_props *props;
3341 	int retval, num_iqueues, num_oqueues;
3342 	int max_num_queues = 0;
3343 	union oct_nic_if_cfg if_cfg;
3344 	unsigned int base_queue;
3345 	unsigned int gmx_port_id;
3346 	u32 resp_size, data_size;
3347 	u32 ifidx_or_pfnum;
3348 	struct lio_version *vdata;
3349 	struct devlink *devlink;
3350 	struct lio_devlink_priv *lio_devlink;
3351 
3352 	/* This is to handle link status changes */
3353 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3354 				    OPCODE_NIC_INFO,
3355 				    lio_nic_info, octeon_dev);
3356 
3357 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3358 	 * They are handled directly.
3359 	 */
3360 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3361 					free_netbuf);
3362 
3363 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3364 					free_netsgbuf);
3365 
3366 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3367 					free_netsgbuf_with_resp);
3368 
3369 	for (i = 0; i < octeon_dev->ifcount; i++) {
3370 		resp_size = sizeof(struct liquidio_if_cfg_resp);
3371 		data_size = sizeof(struct lio_version);
3372 		sc = (struct octeon_soft_command *)
3373 			octeon_alloc_soft_command(octeon_dev, data_size,
3374 						  resp_size, 0);
3375 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3376 		vdata = (struct lio_version *)sc->virtdptr;
3377 
3378 		*((u64 *)vdata) = 0;
3379 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3380 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3381 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3382 
3383 		if (OCTEON_CN23XX_PF(octeon_dev)) {
3384 			num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3385 			num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3386 			base_queue = octeon_dev->sriov_info.pf_srn;
3387 
3388 			gmx_port_id = octeon_dev->pf_num;
3389 			ifidx_or_pfnum = octeon_dev->pf_num;
3390 		} else {
3391 			num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3392 						octeon_get_conf(octeon_dev), i);
3393 			num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3394 						octeon_get_conf(octeon_dev), i);
3395 			base_queue = CFG_GET_BASE_QUE_NIC_IF(
3396 						octeon_get_conf(octeon_dev), i);
3397 			gmx_port_id = CFG_GET_GMXID_NIC_IF(
3398 						octeon_get_conf(octeon_dev), i);
3399 			ifidx_or_pfnum = i;
3400 		}
3401 
3402 		dev_dbg(&octeon_dev->pci_dev->dev,
3403 			"requesting config for interface %d, iqs %d, oqs %d\n",
3404 			ifidx_or_pfnum, num_iqueues, num_oqueues);
3405 
3406 		if_cfg.u64 = 0;
3407 		if_cfg.s.num_iqueues = num_iqueues;
3408 		if_cfg.s.num_oqueues = num_oqueues;
3409 		if_cfg.s.base_queue = base_queue;
3410 		if_cfg.s.gmx_port_id = gmx_port_id;
3411 
3412 		sc->iq_no = 0;
3413 
3414 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3415 					    OPCODE_NIC_IF_CFG, 0,
3416 					    if_cfg.u64, 0);
3417 
3418 		init_completion(&sc->complete);
3419 		sc->sc_status = OCTEON_REQUEST_PENDING;
3420 
3421 		retval = octeon_send_soft_command(octeon_dev, sc);
3422 		if (retval == IQ_SEND_FAILED) {
3423 			dev_err(&octeon_dev->pci_dev->dev,
3424 				"iq/oq config failed status: %x\n",
3425 				retval);
3426 			/* Soft instr is freed by driver in case of failure. */
3427 			octeon_free_soft_command(octeon_dev, sc);
3428 			return(-EIO);
3429 		}
3430 
3431 		/* Sleep on a wait queue till the cond flag indicates that the
3432 		 * response arrived or timed-out.
3433 		 */
3434 		retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3435 		if (retval)
3436 			return retval;
3437 
3438 		retval = resp->status;
3439 		if (retval) {
3440 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3441 			WRITE_ONCE(sc->caller_is_done, true);
3442 			goto setup_nic_dev_done;
3443 		}
3444 		snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3445 			 32, "%s",
3446 			 resp->cfg_info.liquidio_firmware_version);
3447 
3448 		/* Verify f/w version (in case of 'auto' loading from flash) */
3449 		fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3450 		if (memcmp(LIQUIDIO_BASE_VERSION,
3451 			   fw_ver,
3452 			   strlen(LIQUIDIO_BASE_VERSION))) {
3453 			dev_err(&octeon_dev->pci_dev->dev,
3454 				"Unmatched firmware version. Expected %s.x, got %s.\n",
3455 				LIQUIDIO_BASE_VERSION, fw_ver);
3456 			WRITE_ONCE(sc->caller_is_done, true);
3457 			goto setup_nic_dev_done;
3458 		} else if (atomic_read(octeon_dev->adapter_fw_state) ==
3459 			   FW_IS_PRELOADED) {
3460 			dev_info(&octeon_dev->pci_dev->dev,
3461 				 "Using auto-loaded firmware version %s.\n",
3462 				 fw_ver);
3463 		}
3464 
3465 		/* extract micro version field; point past '<maj>.<min>.' */
3466 		micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3467 		if (kstrtoul(micro_ver, 10, &micro) != 0)
3468 			micro = 0;
3469 		octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3470 		octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3471 		octeon_dev->fw_info.ver.rev = micro;
3472 
3473 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3474 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3475 
3476 		num_iqueues = hweight64(resp->cfg_info.iqmask);
3477 		num_oqueues = hweight64(resp->cfg_info.oqmask);
3478 
3479 		if (!(num_iqueues) || !(num_oqueues)) {
3480 			dev_err(&octeon_dev->pci_dev->dev,
3481 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3482 				resp->cfg_info.iqmask,
3483 				resp->cfg_info.oqmask);
3484 			WRITE_ONCE(sc->caller_is_done, true);
3485 			goto setup_nic_dev_done;
3486 		}
3487 
3488 		if (OCTEON_CN6XXX(octeon_dev)) {
3489 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3490 								    cn6xxx));
3491 		} else if (OCTEON_CN23XX_PF(octeon_dev)) {
3492 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3493 								    cn23xx_pf));
3494 		}
3495 
3496 		dev_dbg(&octeon_dev->pci_dev->dev,
3497 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3498 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3499 			num_iqueues, num_oqueues, max_num_queues);
3500 		netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3501 
3502 		if (!netdev) {
3503 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3504 			WRITE_ONCE(sc->caller_is_done, true);
3505 			goto setup_nic_dev_done;
3506 		}
3507 
3508 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3509 
3510 		/* Associate the routines that will handle different
3511 		 * netdev tasks.
3512 		 */
3513 		netdev->netdev_ops = &lionetdevops;
3514 
3515 		retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3516 		if (retval) {
3517 			dev_err(&octeon_dev->pci_dev->dev,
3518 				"setting real number rx failed\n");
3519 			WRITE_ONCE(sc->caller_is_done, true);
3520 			goto setup_nic_dev_free;
3521 		}
3522 
3523 		retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3524 		if (retval) {
3525 			dev_err(&octeon_dev->pci_dev->dev,
3526 				"setting real number tx failed\n");
3527 			WRITE_ONCE(sc->caller_is_done, true);
3528 			goto setup_nic_dev_free;
3529 		}
3530 
3531 		lio = GET_LIO(netdev);
3532 
3533 		memset(lio, 0, sizeof(struct lio));
3534 
3535 		lio->ifidx = ifidx_or_pfnum;
3536 
3537 		props = &octeon_dev->props[i];
3538 		props->gmxport = resp->cfg_info.linfo.gmxport;
3539 		props->netdev = netdev;
3540 
3541 		lio->linfo.num_rxpciq = num_oqueues;
3542 		lio->linfo.num_txpciq = num_iqueues;
3543 		for (j = 0; j < num_oqueues; j++) {
3544 			lio->linfo.rxpciq[j].u64 =
3545 				resp->cfg_info.linfo.rxpciq[j].u64;
3546 		}
3547 		for (j = 0; j < num_iqueues; j++) {
3548 			lio->linfo.txpciq[j].u64 =
3549 				resp->cfg_info.linfo.txpciq[j].u64;
3550 		}
3551 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3552 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3553 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3554 
3555 		WRITE_ONCE(sc->caller_is_done, true);
3556 
3557 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3558 
3559 		if (OCTEON_CN23XX_PF(octeon_dev) ||
3560 		    OCTEON_CN6XXX(octeon_dev)) {
3561 			lio->dev_capability = NETIF_F_HIGHDMA
3562 					      | NETIF_F_IP_CSUM
3563 					      | NETIF_F_IPV6_CSUM
3564 					      | NETIF_F_SG | NETIF_F_RXCSUM
3565 					      | NETIF_F_GRO
3566 					      | NETIF_F_TSO | NETIF_F_TSO6
3567 					      | NETIF_F_LRO;
3568 		}
3569 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3570 
3571 		/*  Copy of transmit encapsulation capabilities:
3572 		 *  TSO, TSO6, Checksums for this device
3573 		 */
3574 		lio->enc_dev_capability = NETIF_F_IP_CSUM
3575 					  | NETIF_F_IPV6_CSUM
3576 					  | NETIF_F_GSO_UDP_TUNNEL
3577 					  | NETIF_F_HW_CSUM | NETIF_F_SG
3578 					  | NETIF_F_RXCSUM
3579 					  | NETIF_F_TSO | NETIF_F_TSO6
3580 					  | NETIF_F_LRO;
3581 
3582 		netdev->hw_enc_features = (lio->enc_dev_capability &
3583 					   ~NETIF_F_LRO);
3584 
3585 		netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3586 
3587 		lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3588 
3589 		netdev->vlan_features = lio->dev_capability;
3590 		/* Add any unchangeable hw features */
3591 		lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3592 					NETIF_F_HW_VLAN_CTAG_RX |
3593 					NETIF_F_HW_VLAN_CTAG_TX;
3594 
3595 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3596 
3597 		netdev->hw_features = lio->dev_capability;
3598 		/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3599 		netdev->hw_features = netdev->hw_features &
3600 			~NETIF_F_HW_VLAN_CTAG_RX;
3601 
3602 		/* MTU range: 68 - 16000 */
3603 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
3604 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
3605 
3606 		/* Point to the  properties for octeon device to which this
3607 		 * interface belongs.
3608 		 */
3609 		lio->oct_dev = octeon_dev;
3610 		lio->octprops = props;
3611 		lio->netdev = netdev;
3612 
3613 		dev_dbg(&octeon_dev->pci_dev->dev,
3614 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
3615 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3616 
3617 		for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3618 			u8 vfmac[ETH_ALEN];
3619 
3620 			eth_random_addr(vfmac);
3621 			if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3622 				dev_err(&octeon_dev->pci_dev->dev,
3623 					"Error setting VF%d MAC address\n",
3624 					j);
3625 				goto setup_nic_dev_free;
3626 			}
3627 		}
3628 
3629 		/* 64-bit swap required on LE machines */
3630 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3631 		for (j = 0; j < 6; j++)
3632 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3633 
3634 		/* Copy MAC Address to OS network device structure */
3635 
3636 		eth_hw_addr_set(netdev, mac);
3637 
3638 		/* By default all interfaces on a single Octeon uses the same
3639 		 * tx and rx queues
3640 		 */
3641 		lio->txq = lio->linfo.txpciq[0].s.q_no;
3642 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3643 		if (liquidio_setup_io_queues(octeon_dev, i,
3644 					     lio->linfo.num_txpciq,
3645 					     lio->linfo.num_rxpciq)) {
3646 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3647 			goto setup_nic_dev_free;
3648 		}
3649 
3650 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3651 
3652 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3653 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3654 
3655 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3656 			dev_err(&octeon_dev->pci_dev->dev,
3657 				"Gather list allocation failed\n");
3658 			goto setup_nic_dev_free;
3659 		}
3660 
3661 		/* Register ethtool support */
3662 		liquidio_set_ethtool_ops(netdev);
3663 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3664 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3665 		else
3666 			octeon_dev->priv_flags = 0x0;
3667 
3668 		if (netdev->features & NETIF_F_LRO)
3669 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3670 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3671 
3672 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3673 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
3674 
3675 		if ((debug != -1) && (debug & NETIF_MSG_HW))
3676 			liquidio_set_feature(netdev,
3677 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
3678 
3679 		if (setup_link_status_change_wq(netdev))
3680 			goto setup_nic_dev_free;
3681 
3682 		if ((octeon_dev->fw_info.app_cap_flags &
3683 		     LIQUIDIO_TIME_SYNC_CAP) &&
3684 		    setup_sync_octeon_time_wq(netdev))
3685 			goto setup_nic_dev_free;
3686 
3687 		if (setup_rx_oom_poll_fn(netdev))
3688 			goto setup_nic_dev_free;
3689 
3690 		/* Register the network device with the OS */
3691 		if (register_netdev(netdev)) {
3692 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3693 			goto setup_nic_dev_free;
3694 		}
3695 
3696 		dev_dbg(&octeon_dev->pci_dev->dev,
3697 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3698 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3699 		netif_carrier_off(netdev);
3700 		lio->link_changes++;
3701 
3702 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3703 
3704 		/* Sending command to firmware to enable Rx checksum offload
3705 		 * by default at the time of setup of Liquidio driver for
3706 		 * this device
3707 		 */
3708 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3709 					    OCTNET_CMD_RXCSUM_ENABLE);
3710 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3711 				     OCTNET_CMD_TXCSUM_ENABLE);
3712 
3713 		dev_dbg(&octeon_dev->pci_dev->dev,
3714 			"NIC ifidx:%d Setup successful\n", i);
3715 
3716 		if (octeon_dev->subsystem_id ==
3717 			OCTEON_CN2350_25GB_SUBSYS_ID ||
3718 		    octeon_dev->subsystem_id ==
3719 			OCTEON_CN2360_25GB_SUBSYS_ID) {
3720 			cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3721 					     octeon_dev->fw_info.ver.min,
3722 					     octeon_dev->fw_info.ver.rev);
3723 
3724 			/* speed control unsupported in f/w older than 1.7.2 */
3725 			if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3726 				dev_info(&octeon_dev->pci_dev->dev,
3727 					 "speed setting not supported by f/w.");
3728 				octeon_dev->speed_setting = 25;
3729 				octeon_dev->no_speed_setting = 1;
3730 			} else {
3731 				liquidio_get_speed(lio);
3732 			}
3733 
3734 			if (octeon_dev->speed_setting == 0) {
3735 				octeon_dev->speed_setting = 25;
3736 				octeon_dev->no_speed_setting = 1;
3737 			}
3738 		} else {
3739 			octeon_dev->no_speed_setting = 1;
3740 			octeon_dev->speed_setting = 10;
3741 		}
3742 		octeon_dev->speed_boot = octeon_dev->speed_setting;
3743 
3744 		/* don't read FEC setting if unsupported by f/w (see above) */
3745 		if (octeon_dev->speed_boot == 25 &&
3746 		    !octeon_dev->no_speed_setting) {
3747 			liquidio_get_fec(lio);
3748 			octeon_dev->props[lio->ifidx].fec_boot =
3749 				octeon_dev->props[lio->ifidx].fec;
3750 		}
3751 	}
3752 
3753 	device_lock(&octeon_dev->pci_dev->dev);
3754 	devlink = devlink_alloc(&liquidio_devlink_ops,
3755 				sizeof(struct lio_devlink_priv),
3756 				&octeon_dev->pci_dev->dev);
3757 	if (!devlink) {
3758 		device_unlock(&octeon_dev->pci_dev->dev);
3759 		dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3760 		goto setup_nic_dev_free;
3761 	}
3762 
3763 	lio_devlink = devlink_priv(devlink);
3764 	lio_devlink->oct = octeon_dev;
3765 
3766 	octeon_dev->devlink = devlink;
3767 	octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3768 	devlink_register(devlink);
3769 	device_unlock(&octeon_dev->pci_dev->dev);
3770 
3771 	return 0;
3772 
3773 setup_nic_dev_free:
3774 
3775 	while (i--) {
3776 		dev_err(&octeon_dev->pci_dev->dev,
3777 			"NIC ifidx:%d Setup failed\n", i);
3778 		liquidio_destroy_nic_device(octeon_dev, i);
3779 	}
3780 
3781 setup_nic_dev_done:
3782 
3783 	return -ENODEV;
3784 }
3785 
3786 #ifdef CONFIG_PCI_IOV
3787 static int octeon_enable_sriov(struct octeon_device *oct)
3788 {
3789 	unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3790 	struct pci_dev *vfdev;
3791 	int err;
3792 	u32 u;
3793 
3794 	if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3795 		err = pci_enable_sriov(oct->pci_dev,
3796 				       oct->sriov_info.num_vfs_alloced);
3797 		if (err) {
3798 			dev_err(&oct->pci_dev->dev,
3799 				"OCTEON: Failed to enable PCI sriov: %d\n",
3800 				err);
3801 			oct->sriov_info.num_vfs_alloced = 0;
3802 			return err;
3803 		}
3804 		oct->sriov_info.sriov_enabled = 1;
3805 
3806 		/* init lookup table that maps DPI ring number to VF pci_dev
3807 		 * struct pointer
3808 		 */
3809 		u = 0;
3810 		vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3811 				       OCTEON_CN23XX_VF_VID, NULL);
3812 		while (vfdev) {
3813 			if (vfdev->is_virtfn &&
3814 			    (vfdev->physfn == oct->pci_dev)) {
3815 				oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3816 					vfdev;
3817 				u += oct->sriov_info.rings_per_vf;
3818 			}
3819 			vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3820 					       OCTEON_CN23XX_VF_VID, vfdev);
3821 		}
3822 	}
3823 
3824 	return num_vfs_alloced;
3825 }
3826 
3827 static int lio_pci_sriov_disable(struct octeon_device *oct)
3828 {
3829 	int u;
3830 
3831 	if (pci_vfs_assigned(oct->pci_dev)) {
3832 		dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3833 		return -EPERM;
3834 	}
3835 
3836 	pci_disable_sriov(oct->pci_dev);
3837 
3838 	u = 0;
3839 	while (u < MAX_POSSIBLE_VFS) {
3840 		oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3841 		u += oct->sriov_info.rings_per_vf;
3842 	}
3843 
3844 	oct->sriov_info.num_vfs_alloced = 0;
3845 	dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3846 		 oct->pf_num);
3847 
3848 	return 0;
3849 }
3850 
3851 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3852 {
3853 	struct octeon_device *oct = pci_get_drvdata(dev);
3854 	int ret = 0;
3855 
3856 	if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3857 	    (oct->sriov_info.sriov_enabled)) {
3858 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3859 			 oct->pf_num, num_vfs);
3860 		return 0;
3861 	}
3862 
3863 	if (!num_vfs) {
3864 		lio_vf_rep_destroy(oct);
3865 		ret = lio_pci_sriov_disable(oct);
3866 	} else if (num_vfs > oct->sriov_info.max_vfs) {
3867 		dev_err(&oct->pci_dev->dev,
3868 			"OCTEON: Max allowed VFs:%d user requested:%d",
3869 			oct->sriov_info.max_vfs, num_vfs);
3870 		ret = -EPERM;
3871 	} else {
3872 		oct->sriov_info.num_vfs_alloced = num_vfs;
3873 		ret = octeon_enable_sriov(oct);
3874 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3875 			 oct->pf_num, num_vfs);
3876 		ret = lio_vf_rep_create(oct);
3877 		if (ret)
3878 			dev_info(&oct->pci_dev->dev,
3879 				 "vf representor create failed");
3880 	}
3881 
3882 	return ret;
3883 }
3884 #endif
3885 
3886 /**
3887  * liquidio_init_nic_module - initialize the NIC
3888  * @oct: octeon device
3889  *
3890  * This initialization routine is called once the Octeon device application is
3891  * up and running
3892  */
3893 static int liquidio_init_nic_module(struct octeon_device *oct)
3894 {
3895 	int i, retval = 0;
3896 	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3897 
3898 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3899 
3900 	/* only default iq and oq were initialized
3901 	 * initialize the rest as well
3902 	 */
3903 	/* run port_config command for each port */
3904 	oct->ifcount = num_nic_ports;
3905 
3906 	memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3907 
3908 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3909 		oct->props[i].gmxport = -1;
3910 
3911 	retval = setup_nic_devices(oct);
3912 	if (retval) {
3913 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3914 		goto octnet_init_failure;
3915 	}
3916 
3917 	/* Call vf_rep_modinit if the firmware is switchdev capable
3918 	 * and do it from the first liquidio function probed.
3919 	 */
3920 	if (!oct->octeon_id &&
3921 	    oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3922 		retval = lio_vf_rep_modinit();
3923 		if (retval) {
3924 			liquidio_stop_nic_module(oct);
3925 			goto octnet_init_failure;
3926 		}
3927 	}
3928 
3929 	liquidio_ptp_init(oct);
3930 
3931 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3932 
3933 	return retval;
3934 
3935 octnet_init_failure:
3936 
3937 	oct->ifcount = 0;
3938 
3939 	return retval;
3940 }
3941 
3942 /**
3943  * nic_starter - finish init
3944  * @work:  work struct work_struct
3945  *
3946  * starter callback that invokes the remaining initialization work after the NIC is up and running.
3947  */
3948 static void nic_starter(struct work_struct *work)
3949 {
3950 	struct octeon_device *oct;
3951 	struct cavium_wk *wk = (struct cavium_wk *)work;
3952 
3953 	oct = (struct octeon_device *)wk->ctxptr;
3954 
3955 	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3956 		return;
3957 
3958 	/* If the status of the device is CORE_OK, the core
3959 	 * application has reported its application type. Call
3960 	 * any registered handlers now and move to the RUNNING
3961 	 * state.
3962 	 */
3963 	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3964 		schedule_delayed_work(&oct->nic_poll_work.work,
3965 				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3966 		return;
3967 	}
3968 
3969 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3970 
3971 	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3972 		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3973 
3974 		if (liquidio_init_nic_module(oct))
3975 			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3976 		else
3977 			handshake[oct->octeon_id].started_ok = 1;
3978 	} else {
3979 		dev_err(&oct->pci_dev->dev,
3980 			"Unexpected application running on NIC (%d). Check firmware.\n",
3981 			oct->app_mode);
3982 	}
3983 
3984 	complete(&handshake[oct->octeon_id].started);
3985 }
3986 
3987 static int
3988 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3989 {
3990 	struct octeon_device *oct = (struct octeon_device *)buf;
3991 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3992 	int i, notice, vf_idx;
3993 	bool cores_crashed;
3994 	u64 *data, vf_num;
3995 
3996 	notice = recv_pkt->rh.r.ossp;
3997 	data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3998 
3999 	/* the first 64-bit word of data is the vf_num */
4000 	vf_num = data[0];
4001 	octeon_swap_8B_data(&vf_num, 1);
4002 	vf_idx = (int)vf_num - 1;
4003 
4004 	cores_crashed = READ_ONCE(oct->cores_crashed);
4005 
4006 	if (notice == VF_DRV_LOADED) {
4007 		if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4008 			oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4009 			dev_info(&oct->pci_dev->dev,
4010 				 "driver for VF%d was loaded\n", vf_idx);
4011 			if (!cores_crashed)
4012 				try_module_get(THIS_MODULE);
4013 		}
4014 	} else if (notice == VF_DRV_REMOVED) {
4015 		if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4016 			oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4017 			dev_info(&oct->pci_dev->dev,
4018 				 "driver for VF%d was removed\n", vf_idx);
4019 			if (!cores_crashed)
4020 				module_put(THIS_MODULE);
4021 		}
4022 	} else if (notice == VF_DRV_MACADDR_CHANGED) {
4023 		u8 *b = (u8 *)&data[1];
4024 
4025 		oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4026 		dev_info(&oct->pci_dev->dev,
4027 			 "VF driver changed VF%d's MAC address to %pM\n",
4028 			 vf_idx, b + 2);
4029 	}
4030 
4031 	for (i = 0; i < recv_pkt->buffer_count; i++)
4032 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
4033 	octeon_free_recv_info(recv_info);
4034 
4035 	return 0;
4036 }
4037 
4038 /**
4039  * octeon_device_init - Device initialization for each Octeon device that is probed
4040  * @octeon_dev:  octeon device
4041  */
4042 static int octeon_device_init(struct octeon_device *octeon_dev)
4043 {
4044 	int j, ret;
4045 	char bootcmd[] = "\n";
4046 	char *dbg_enb = NULL;
4047 	enum lio_fw_state fw_state;
4048 	struct octeon_device_priv *oct_priv =
4049 		(struct octeon_device_priv *)octeon_dev->priv;
4050 	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4051 
4052 	/* Enable access to the octeon device and make its DMA capability
4053 	 * known to the OS.
4054 	 */
4055 	if (octeon_pci_os_setup(octeon_dev))
4056 		return 1;
4057 
4058 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4059 
4060 	/* Identify the Octeon type and map the BAR address space. */
4061 	if (octeon_chip_specific_setup(octeon_dev)) {
4062 		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4063 		return 1;
4064 	}
4065 
4066 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4067 
4068 	/* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4069 	 * since that is what is required for the reference to be removed
4070 	 * during de-initialization (see 'octeon_destroy_resources').
4071 	 */
4072 	octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4073 			       PCI_SLOT(octeon_dev->pci_dev->devfn),
4074 			       PCI_FUNC(octeon_dev->pci_dev->devfn),
4075 			       true);
4076 
4077 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4078 
4079 	/* CN23XX supports preloaded firmware if the following is true:
4080 	 *
4081 	 * The adapter indicates that firmware is currently running AND
4082 	 * 'fw_type' is 'auto'.
4083 	 *
4084 	 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4085 	 */
4086 	if (OCTEON_CN23XX_PF(octeon_dev) &&
4087 	    cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4088 		atomic_cmpxchg(octeon_dev->adapter_fw_state,
4089 			       FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4090 	}
4091 
4092 	/* If loading firmware, only first device of adapter needs to do so. */
4093 	fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4094 				  FW_NEEDS_TO_BE_LOADED,
4095 				  FW_IS_BEING_LOADED);
4096 
4097 	/* Here, [local variable] 'fw_state' is set to one of:
4098 	 *
4099 	 *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4100 	 *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4101 	 *                          firmware to the adapter.
4102 	 *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4103 	 *                          firmware to the adapter.
4104 	 */
4105 
4106 	/* Prior to f/w load, perform a soft reset of the Octeon device;
4107 	 * if error resetting, return w/error.
4108 	 */
4109 	if (fw_state == FW_NEEDS_TO_BE_LOADED)
4110 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
4111 			return 1;
4112 
4113 	/* Initialize the dispatch mechanism used to push packets arriving on
4114 	 * Octeon Output queues.
4115 	 */
4116 	if (octeon_init_dispatch_list(octeon_dev))
4117 		return 1;
4118 
4119 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4120 				    OPCODE_NIC_CORE_DRV_ACTIVE,
4121 				    octeon_core_drv_init,
4122 				    octeon_dev);
4123 
4124 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4125 				    OPCODE_NIC_VF_DRV_NOTICE,
4126 				    octeon_recv_vf_drv_notice, octeon_dev);
4127 	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4128 	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4129 	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4130 			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4131 
4132 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4133 
4134 	if (octeon_set_io_queues_off(octeon_dev)) {
4135 		dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4136 		return 1;
4137 	}
4138 
4139 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4140 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4141 		if (ret) {
4142 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4143 			return ret;
4144 		}
4145 	}
4146 
4147 	/* Initialize soft command buffer pool
4148 	 */
4149 	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4150 		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4151 		return 1;
4152 	}
4153 	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4154 
4155 	/*  Setup the data structures that manage this Octeon's Input queues. */
4156 	if (octeon_setup_instr_queues(octeon_dev)) {
4157 		dev_err(&octeon_dev->pci_dev->dev,
4158 			"instruction queue initialization failed\n");
4159 		return 1;
4160 	}
4161 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4162 
4163 	/* Initialize lists to manage the requests of different types that
4164 	 * arrive from user & kernel applications for this octeon device.
4165 	 */
4166 	if (octeon_setup_response_list(octeon_dev)) {
4167 		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4168 		return 1;
4169 	}
4170 	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4171 
4172 	if (octeon_setup_output_queues(octeon_dev)) {
4173 		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4174 		return 1;
4175 	}
4176 
4177 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4178 
4179 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4180 		if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4181 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4182 			return 1;
4183 		}
4184 		atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4185 
4186 		if (octeon_allocate_ioq_vector
4187 				(octeon_dev,
4188 				 octeon_dev->sriov_info.num_pf_rings)) {
4189 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4190 			return 1;
4191 		}
4192 		atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4193 
4194 	} else {
4195 		/* The input and output queue registers were setup earlier (the
4196 		 * queues were not enabled). Any additional registers
4197 		 * that need to be programmed should be done now.
4198 		 */
4199 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4200 		if (ret) {
4201 			dev_err(&octeon_dev->pci_dev->dev,
4202 				"Failed to configure device registers\n");
4203 			return ret;
4204 		}
4205 	}
4206 
4207 	/* Initialize the tasklet that handles output queue packet processing.*/
4208 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4209 	tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4210 
4211 	/* Setup the interrupt handler and record the INT SUM register address
4212 	 */
4213 	if (octeon_setup_interrupt(octeon_dev,
4214 				   octeon_dev->sriov_info.num_pf_rings))
4215 		return 1;
4216 
4217 	/* Enable Octeon device interrupts */
4218 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4219 
4220 	atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4221 
4222 	/* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4223 	 * the output queue is enabled.
4224 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4225 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4226 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4227 	 * before any credits have been issued, causing the ring to be reset
4228 	 * (and the f/w appear to never have started).
4229 	 */
4230 	for (j = 0; j < octeon_dev->num_oqs; j++)
4231 		writel(octeon_dev->droq[j]->max_count,
4232 		       octeon_dev->droq[j]->pkts_credit_reg);
4233 
4234 	/* Enable the input and output queues for this Octeon device */
4235 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4236 	if (ret) {
4237 		dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4238 		return ret;
4239 	}
4240 
4241 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4242 
4243 	if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4244 		dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4245 		if (!ddr_timeout) {
4246 			dev_info(&octeon_dev->pci_dev->dev,
4247 				 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4248 		}
4249 
4250 		schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4251 
4252 		/* Wait for the octeon to initialize DDR after the soft-reset.*/
4253 		while (!ddr_timeout) {
4254 			set_current_state(TASK_INTERRUPTIBLE);
4255 			if (schedule_timeout(HZ / 10)) {
4256 				/* user probably pressed Control-C */
4257 				return 1;
4258 			}
4259 		}
4260 		ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4261 		if (ret) {
4262 			dev_err(&octeon_dev->pci_dev->dev,
4263 				"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4264 				ret);
4265 			return 1;
4266 		}
4267 
4268 		if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4269 			dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4270 			return 1;
4271 		}
4272 
4273 		/* Divert uboot to take commands from host instead. */
4274 		ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4275 
4276 		dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4277 		ret = octeon_init_consoles(octeon_dev);
4278 		if (ret) {
4279 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4280 			return 1;
4281 		}
4282 		/* If console debug enabled, specify empty string to use default
4283 		 * enablement ELSE specify NULL string for 'disabled'.
4284 		 */
4285 		dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4286 		ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4287 		if (ret) {
4288 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4289 			return 1;
4290 		} else if (octeon_console_debug_enabled(0)) {
4291 			/* If console was added AND we're logging console output
4292 			 * then set our console print function.
4293 			 */
4294 			octeon_dev->console[0].print = octeon_dbg_console_print;
4295 		}
4296 
4297 		atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4298 
4299 		dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4300 		ret = load_firmware(octeon_dev);
4301 		if (ret) {
4302 			dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4303 			return 1;
4304 		}
4305 
4306 		atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4307 	}
4308 
4309 	handshake[octeon_dev->octeon_id].init_ok = 1;
4310 	complete(&handshake[octeon_dev->octeon_id].init);
4311 
4312 	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4313 	oct_priv->dev = octeon_dev;
4314 
4315 	return 0;
4316 }
4317 
4318 /**
4319  * octeon_dbg_console_print - Debug console print function
4320  * @oct:  octeon device
4321  * @console_num: console number
4322  * @prefix:      first portion of line to display
4323  * @suffix:      second portion of line to display
4324  *
4325  * The OCTEON debug console outputs entire lines (excluding '\n').
4326  * Normally, the line will be passed in the 'prefix' parameter.
4327  * However, due to buffering, it is possible for a line to be split into two
4328  * parts, in which case they will be passed as the 'prefix' parameter and
4329  * 'suffix' parameter.
4330  */
4331 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4332 				    char *prefix, char *suffix)
4333 {
4334 	if (prefix && suffix)
4335 		dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4336 			 suffix);
4337 	else if (prefix)
4338 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4339 	else if (suffix)
4340 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4341 
4342 	return 0;
4343 }
4344 
4345 /**
4346  * liquidio_exit - Exits the module
4347  */
4348 static void __exit liquidio_exit(void)
4349 {
4350 	liquidio_deinit_pci();
4351 
4352 	pr_info("LiquidIO network module is now unloaded\n");
4353 }
4354 
4355 module_init(liquidio_init);
4356 module_exit(liquidio_exit);
4357