1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
38 
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49 		"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55 
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
57 
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
61 
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
65 
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69 		 "Bitmask indicating which consoles have debug output redirected to syslog.");
70 
71 /**
72  * octeon_console_debug_enabled - determines if a given console has debug enabled.
73  * @console: console to check
74  * Return:  1 = enabled. 0 otherwise
75  */
76 static int octeon_console_debug_enabled(u32 console)
77 {
78 	return (console_bitmask >> (console)) & 0x1;
79 }
80 
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
83 
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
86 /* update localtime to octeon firmware every 60 seconds.
87  * make firmware to use same time reference, so that it will be easy to
88  * correlate firmware logged events/errors with host events, for debugging.
89  */
90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
91 
92 /* time to wait for possible in-flight requests in milliseconds */
93 #define WAIT_INFLIGHT_REQUEST	msecs_to_jiffies(1000)
94 
95 struct lio_trusted_vf_ctx {
96 	struct completion complete;
97 	int status;
98 };
99 
100 struct oct_link_status_resp {
101 	u64 rh;
102 	struct oct_link_info link_info;
103 	u64 status;
104 };
105 
106 struct oct_timestamp_resp {
107 	u64 rh;
108 	u64 timestamp;
109 	u64 status;
110 };
111 
112 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
113 
114 union tx_info {
115 	u64 u64;
116 	struct {
117 #ifdef __BIG_ENDIAN_BITFIELD
118 		u16 gso_size;
119 		u16 gso_segs;
120 		u32 reserved;
121 #else
122 		u32 reserved;
123 		u16 gso_segs;
124 		u16 gso_size;
125 #endif
126 	} s;
127 };
128 
129 /* Octeon device properties to be used by the NIC module.
130  * Each octeon device in the system will be represented
131  * by this structure in the NIC module.
132  */
133 
134 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
135 #define OCTNIC_GSO_MAX_SIZE                                                    \
136 	(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
137 
138 struct handshake {
139 	struct completion init;
140 	struct completion started;
141 	struct pci_dev *pci_dev;
142 	int init_ok;
143 	int started_ok;
144 };
145 
146 #ifdef CONFIG_PCI_IOV
147 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
148 #endif
149 
150 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
151 				    char *prefix, char *suffix);
152 
153 static int octeon_device_init(struct octeon_device *);
154 static int liquidio_stop(struct net_device *netdev);
155 static void liquidio_remove(struct pci_dev *pdev);
156 static int liquidio_probe(struct pci_dev *pdev,
157 			  const struct pci_device_id *ent);
158 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
159 				      int linkstate);
160 
161 static struct handshake handshake[MAX_OCTEON_DEVICES];
162 static struct completion first_stage;
163 
164 static void octeon_droq_bh(struct tasklet_struct *t)
165 {
166 	int q_no;
167 	int reschedule = 0;
168 	struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
169 							  droq_tasklet);
170 	struct octeon_device *oct = oct_priv->dev;
171 
172 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
173 		if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
174 			continue;
175 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
176 							  MAX_PACKET_BUDGET);
177 		lio_enable_irq(oct->droq[q_no], NULL);
178 
179 		if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
180 			/* set time and cnt interrupt thresholds for this DROQ
181 			 * for NAPI
182 			 */
183 			int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
184 
185 			octeon_write_csr64(
186 			    oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
187 			    0x5700000040ULL);
188 			octeon_write_csr64(
189 			    oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
190 		}
191 	}
192 
193 	if (reschedule)
194 		tasklet_schedule(&oct_priv->droq_tasklet);
195 }
196 
197 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
198 {
199 	struct octeon_device_priv *oct_priv =
200 		(struct octeon_device_priv *)oct->priv;
201 	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
202 	int i;
203 
204 	do {
205 		pending_pkts = 0;
206 
207 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
208 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
209 				continue;
210 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
211 		}
212 		if (pkt_cnt > 0) {
213 			pending_pkts += pkt_cnt;
214 			tasklet_schedule(&oct_priv->droq_tasklet);
215 		}
216 		pkt_cnt = 0;
217 		schedule_timeout_uninterruptible(1);
218 
219 	} while (retry-- && pending_pkts);
220 
221 	return pkt_cnt;
222 }
223 
224 /**
225  * force_io_queues_off - Forces all IO queues off on a given device
226  * @oct: Pointer to Octeon device
227  */
228 static void force_io_queues_off(struct octeon_device *oct)
229 {
230 	if ((oct->chip_id == OCTEON_CN66XX) ||
231 	    (oct->chip_id == OCTEON_CN68XX)) {
232 		/* Reset the Enable bits for Input Queues. */
233 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
234 
235 		/* Reset the Enable bits for Output Queues. */
236 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
237 	}
238 }
239 
240 /**
241  * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
242  * @oct: Pointer to Octeon device
243  */
244 static inline void pcierror_quiesce_device(struct octeon_device *oct)
245 {
246 	int i;
247 
248 	/* Disable the input and output queues now. No more packets will
249 	 * arrive from Octeon, but we should wait for all packet processing
250 	 * to finish.
251 	 */
252 	force_io_queues_off(oct);
253 
254 	/* To allow for in-flight requests */
255 	schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
256 
257 	if (wait_for_pending_requests(oct))
258 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
259 
260 	/* Force all requests waiting to be fetched by OCTEON to complete. */
261 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
262 		struct octeon_instr_queue *iq;
263 
264 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
265 			continue;
266 		iq = oct->instr_queue[i];
267 
268 		if (atomic_read(&iq->instr_pending)) {
269 			spin_lock_bh(&iq->lock);
270 			iq->fill_cnt = 0;
271 			iq->octeon_read_index = iq->host_write_index;
272 			iq->stats.instr_processed +=
273 				atomic_read(&iq->instr_pending);
274 			lio_process_iq_request_list(oct, iq, 0);
275 			spin_unlock_bh(&iq->lock);
276 		}
277 	}
278 
279 	/* Force all pending ordered list requests to time out. */
280 	lio_process_ordered_list(oct, 1);
281 
282 	/* We do not need to wait for output queue packets to be processed. */
283 }
284 
285 /**
286  * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
287  * @dev: Pointer to PCI device
288  */
289 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
290 {
291 	int pos = 0x100;
292 	u32 status, mask;
293 
294 	pr_info("%s :\n", __func__);
295 
296 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
297 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
298 	if (dev->error_state == pci_channel_io_normal)
299 		status &= ~mask;        /* Clear corresponding nonfatal bits */
300 	else
301 		status &= mask;         /* Clear corresponding fatal bits */
302 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
303 }
304 
305 /**
306  * stop_pci_io - Stop all PCI IO to a given device
307  * @oct: Pointer to Octeon device
308  */
309 static void stop_pci_io(struct octeon_device *oct)
310 {
311 	/* No more instructions will be forwarded. */
312 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
313 
314 	pci_disable_device(oct->pci_dev);
315 
316 	/* Disable interrupts  */
317 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
318 
319 	pcierror_quiesce_device(oct);
320 
321 	/* Release the interrupt line */
322 	free_irq(oct->pci_dev->irq, oct);
323 
324 	if (oct->flags & LIO_FLAG_MSI_ENABLED)
325 		pci_disable_msi(oct->pci_dev);
326 
327 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
328 		lio_get_state_string(&oct->status));
329 
330 	/* making it a common function for all OCTEON models */
331 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
332 }
333 
334 /**
335  * liquidio_pcie_error_detected - called when PCI error is detected
336  * @pdev: Pointer to PCI device
337  * @state: The current pci connection state
338  *
339  * This function is called after a PCI bus error affecting
340  * this device has been detected.
341  */
342 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
343 						     pci_channel_state_t state)
344 {
345 	struct octeon_device *oct = pci_get_drvdata(pdev);
346 
347 	/* Non-correctable Non-fatal errors */
348 	if (state == pci_channel_io_normal) {
349 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
350 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
351 		return PCI_ERS_RESULT_CAN_RECOVER;
352 	}
353 
354 	/* Non-correctable Fatal errors */
355 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
356 	stop_pci_io(oct);
357 
358 	/* Always return a DISCONNECT. There is no support for recovery but only
359 	 * for a clean shutdown.
360 	 */
361 	return PCI_ERS_RESULT_DISCONNECT;
362 }
363 
364 /**
365  * liquidio_pcie_mmio_enabled - mmio handler
366  * @pdev: Pointer to PCI device
367  */
368 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
369 {
370 	/* We should never hit this since we never ask for a reset for a Fatal
371 	 * Error. We always return DISCONNECT in io_error above.
372 	 * But play safe and return RECOVERED for now.
373 	 */
374 	return PCI_ERS_RESULT_RECOVERED;
375 }
376 
377 /**
378  * liquidio_pcie_slot_reset - called after the pci bus has been reset.
379  * @pdev: Pointer to PCI device
380  *
381  * Restart the card from scratch, as if from a cold-boot. Implementation
382  * resembles the first-half of the octeon_resume routine.
383  */
384 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
385 {
386 	/* We should never hit this since we never ask for a reset for a Fatal
387 	 * Error. We always return DISCONNECT in io_error above.
388 	 * But play safe and return RECOVERED for now.
389 	 */
390 	return PCI_ERS_RESULT_RECOVERED;
391 }
392 
393 /**
394  * liquidio_pcie_resume - called when traffic can start flowing again.
395  * @pdev: Pointer to PCI device
396  *
397  * This callback is called when the error recovery driver tells us that
398  * its OK to resume normal operation. Implementation resembles the
399  * second-half of the octeon_resume routine.
400  */
401 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
402 {
403 	/* Nothing to be done here. */
404 }
405 
406 #define liquidio_suspend NULL
407 #define liquidio_resume NULL
408 
409 /* For PCI-E Advanced Error Recovery (AER) Interface */
410 static const struct pci_error_handlers liquidio_err_handler = {
411 	.error_detected = liquidio_pcie_error_detected,
412 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
413 	.slot_reset	= liquidio_pcie_slot_reset,
414 	.resume		= liquidio_pcie_resume,
415 };
416 
417 static const struct pci_device_id liquidio_pci_tbl[] = {
418 	{       /* 68xx */
419 		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
420 	},
421 	{       /* 66xx */
422 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
423 	},
424 	{       /* 23xx pf */
425 		PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
426 	},
427 	{
428 		0, 0, 0, 0, 0, 0, 0
429 	}
430 };
431 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
432 
433 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
434 
435 static struct pci_driver liquidio_pci_driver = {
436 	.name		= "LiquidIO",
437 	.id_table	= liquidio_pci_tbl,
438 	.probe		= liquidio_probe,
439 	.remove		= liquidio_remove,
440 	.err_handler	= &liquidio_err_handler,    /* For AER */
441 	.driver.pm	= &liquidio_pm_ops,
442 #ifdef CONFIG_PCI_IOV
443 	.sriov_configure = liquidio_enable_sriov,
444 #endif
445 };
446 
447 /**
448  * liquidio_init_pci - register PCI driver
449  */
450 static int liquidio_init_pci(void)
451 {
452 	return pci_register_driver(&liquidio_pci_driver);
453 }
454 
455 /**
456  * liquidio_deinit_pci - unregister PCI driver
457  */
458 static void liquidio_deinit_pci(void)
459 {
460 	pci_unregister_driver(&liquidio_pci_driver);
461 }
462 
463 /**
464  * check_txq_status - Check Tx queue status, and take appropriate action
465  * @lio: per-network private data
466  * Return: 0 if full, number of queues woken up otherwise
467  */
468 static inline int check_txq_status(struct lio *lio)
469 {
470 	int numqs = lio->netdev->real_num_tx_queues;
471 	int ret_val = 0;
472 	int q, iq;
473 
474 	/* check each sub-queue state */
475 	for (q = 0; q < numqs; q++) {
476 		iq = lio->linfo.txpciq[q %
477 			lio->oct_dev->num_iqs].s.q_no;
478 		if (octnet_iq_is_full(lio->oct_dev, iq))
479 			continue;
480 		if (__netif_subqueue_stopped(lio->netdev, q)) {
481 			netif_wake_subqueue(lio->netdev, q);
482 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
483 						  tx_restart, 1);
484 			ret_val++;
485 		}
486 	}
487 
488 	return ret_val;
489 }
490 
491 /**
492  * print_link_info -  Print link information
493  * @netdev: network device
494  */
495 static void print_link_info(struct net_device *netdev)
496 {
497 	struct lio *lio = GET_LIO(netdev);
498 
499 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
500 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
501 		struct oct_link_info *linfo = &lio->linfo;
502 
503 		if (linfo->link.s.link_up) {
504 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
505 				   linfo->link.s.speed,
506 				   (linfo->link.s.duplex) ? "Full" : "Half");
507 		} else {
508 			netif_info(lio, link, lio->netdev, "Link Down\n");
509 		}
510 	}
511 }
512 
513 /**
514  * octnet_link_status_change - Routine to notify MTU change
515  * @work: work_struct data structure
516  */
517 static void octnet_link_status_change(struct work_struct *work)
518 {
519 	struct cavium_wk *wk = (struct cavium_wk *)work;
520 	struct lio *lio = (struct lio *)wk->ctxptr;
521 
522 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
523 	 * this API is invoked only when new max-MTU of the interface is
524 	 * less than current MTU.
525 	 */
526 	rtnl_lock();
527 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
528 	rtnl_unlock();
529 }
530 
531 /**
532  * setup_link_status_change_wq - Sets up the mtu status change work
533  * @netdev: network device
534  */
535 static inline int setup_link_status_change_wq(struct net_device *netdev)
536 {
537 	struct lio *lio = GET_LIO(netdev);
538 	struct octeon_device *oct = lio->oct_dev;
539 
540 	lio->link_status_wq.wq = alloc_workqueue("link-status",
541 						 WQ_MEM_RECLAIM, 0);
542 	if (!lio->link_status_wq.wq) {
543 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
544 		return -1;
545 	}
546 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
547 			  octnet_link_status_change);
548 	lio->link_status_wq.wk.ctxptr = lio;
549 
550 	return 0;
551 }
552 
553 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
554 {
555 	struct lio *lio = GET_LIO(netdev);
556 
557 	if (lio->link_status_wq.wq) {
558 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
559 		destroy_workqueue(lio->link_status_wq.wq);
560 	}
561 }
562 
563 /**
564  * update_link_status - Update link status
565  * @netdev: network device
566  * @ls: link status structure
567  *
568  * Called on receipt of a link status response from the core application to
569  * update each interface's link status.
570  */
571 static inline void update_link_status(struct net_device *netdev,
572 				      union oct_link_status *ls)
573 {
574 	struct lio *lio = GET_LIO(netdev);
575 	int changed = (lio->linfo.link.u64 != ls->u64);
576 	int current_max_mtu = lio->linfo.link.s.mtu;
577 	struct octeon_device *oct = lio->oct_dev;
578 
579 	dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
580 		__func__, lio->linfo.link.u64, ls->u64);
581 	lio->linfo.link.u64 = ls->u64;
582 
583 	if ((lio->intf_open) && (changed)) {
584 		print_link_info(netdev);
585 		lio->link_changes++;
586 
587 		if (lio->linfo.link.s.link_up) {
588 			dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
589 			netif_carrier_on(netdev);
590 			wake_txqs(netdev);
591 		} else {
592 			dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
593 			netif_carrier_off(netdev);
594 			stop_txqs(netdev);
595 		}
596 		if (lio->linfo.link.s.mtu != current_max_mtu) {
597 			netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
598 				   current_max_mtu, lio->linfo.link.s.mtu);
599 			netdev->max_mtu = lio->linfo.link.s.mtu;
600 		}
601 		if (lio->linfo.link.s.mtu < netdev->mtu) {
602 			dev_warn(&oct->pci_dev->dev,
603 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
604 				     netdev->mtu, lio->linfo.link.s.mtu);
605 			queue_delayed_work(lio->link_status_wq.wq,
606 					   &lio->link_status_wq.wk.work, 0);
607 		}
608 	}
609 }
610 
611 /**
612  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
613  * firmware will correct it's time, in case there is a time skew
614  *
615  * @work: work scheduled to send time update to octeon firmware
616  **/
617 static void lio_sync_octeon_time(struct work_struct *work)
618 {
619 	struct cavium_wk *wk = (struct cavium_wk *)work;
620 	struct lio *lio = (struct lio *)wk->ctxptr;
621 	struct octeon_device *oct = lio->oct_dev;
622 	struct octeon_soft_command *sc;
623 	struct timespec64 ts;
624 	struct lio_time *lt;
625 	int ret;
626 
627 	sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
628 	if (!sc) {
629 		dev_err(&oct->pci_dev->dev,
630 			"Failed to sync time to octeon: soft command allocation failed\n");
631 		return;
632 	}
633 
634 	lt = (struct lio_time *)sc->virtdptr;
635 
636 	/* Get time of the day */
637 	ktime_get_real_ts64(&ts);
638 	lt->sec = ts.tv_sec;
639 	lt->nsec = ts.tv_nsec;
640 	octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
641 
642 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
643 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
644 				    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
645 
646 	init_completion(&sc->complete);
647 	sc->sc_status = OCTEON_REQUEST_PENDING;
648 
649 	ret = octeon_send_soft_command(oct, sc);
650 	if (ret == IQ_SEND_FAILED) {
651 		dev_err(&oct->pci_dev->dev,
652 			"Failed to sync time to octeon: failed to send soft command\n");
653 		octeon_free_soft_command(oct, sc);
654 	} else {
655 		WRITE_ONCE(sc->caller_is_done, true);
656 	}
657 
658 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
659 			   &lio->sync_octeon_time_wq.wk.work,
660 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
661 }
662 
663 /**
664  * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
665  *
666  * @netdev: network device which should send time update to firmware
667  **/
668 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
669 {
670 	struct lio *lio = GET_LIO(netdev);
671 	struct octeon_device *oct = lio->oct_dev;
672 
673 	lio->sync_octeon_time_wq.wq =
674 		alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
675 	if (!lio->sync_octeon_time_wq.wq) {
676 		dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
677 		return -1;
678 	}
679 	INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
680 			  lio_sync_octeon_time);
681 	lio->sync_octeon_time_wq.wk.ctxptr = lio;
682 	queue_delayed_work(lio->sync_octeon_time_wq.wq,
683 			   &lio->sync_octeon_time_wq.wk.work,
684 			   msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
685 
686 	return 0;
687 }
688 
689 /**
690  * cleanup_sync_octeon_time_wq - destroy wq
691  *
692  * @netdev: network device which should send time update to firmware
693  *
694  * Stop scheduling and destroy the work created to periodically update local
695  * time to octeon firmware.
696  **/
697 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
698 {
699 	struct lio *lio = GET_LIO(netdev);
700 	struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
701 
702 	if (time_wq->wq) {
703 		cancel_delayed_work_sync(&time_wq->wk.work);
704 		destroy_workqueue(time_wq->wq);
705 	}
706 }
707 
708 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
709 {
710 	struct octeon_device *other_oct;
711 
712 	other_oct = lio_get_device(oct->octeon_id + 1);
713 
714 	if (other_oct && other_oct->pci_dev) {
715 		int oct_busnum, other_oct_busnum;
716 
717 		oct_busnum = oct->pci_dev->bus->number;
718 		other_oct_busnum = other_oct->pci_dev->bus->number;
719 
720 		if (oct_busnum == other_oct_busnum) {
721 			int oct_slot, other_oct_slot;
722 
723 			oct_slot = PCI_SLOT(oct->pci_dev->devfn);
724 			other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
725 
726 			if (oct_slot == other_oct_slot)
727 				return other_oct;
728 		}
729 	}
730 
731 	return NULL;
732 }
733 
734 static void disable_all_vf_links(struct octeon_device *oct)
735 {
736 	struct net_device *netdev;
737 	int max_vfs, vf, i;
738 
739 	if (!oct)
740 		return;
741 
742 	max_vfs = oct->sriov_info.max_vfs;
743 
744 	for (i = 0; i < oct->ifcount; i++) {
745 		netdev = oct->props[i].netdev;
746 		if (!netdev)
747 			continue;
748 
749 		for (vf = 0; vf < max_vfs; vf++)
750 			liquidio_set_vf_link_state(netdev, vf,
751 						   IFLA_VF_LINK_STATE_DISABLE);
752 	}
753 }
754 
755 static int liquidio_watchdog(void *param)
756 {
757 	bool err_msg_was_printed[LIO_MAX_CORES];
758 	u16 mask_of_crashed_or_stuck_cores = 0;
759 	bool all_vf_links_are_disabled = false;
760 	struct octeon_device *oct = param;
761 	struct octeon_device *other_oct;
762 #ifdef CONFIG_MODULE_UNLOAD
763 	long refcount, vfs_referencing_pf;
764 	u64 vfs_mask1, vfs_mask2;
765 #endif
766 	int core;
767 
768 	memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
769 
770 	while (!kthread_should_stop()) {
771 		/* sleep for a couple of seconds so that we don't hog the CPU */
772 		set_current_state(TASK_INTERRUPTIBLE);
773 		schedule_timeout(msecs_to_jiffies(2000));
774 
775 		mask_of_crashed_or_stuck_cores =
776 		    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
777 
778 		if (!mask_of_crashed_or_stuck_cores)
779 			continue;
780 
781 		WRITE_ONCE(oct->cores_crashed, true);
782 		other_oct = get_other_octeon_device(oct);
783 		if (other_oct)
784 			WRITE_ONCE(other_oct->cores_crashed, true);
785 
786 		for (core = 0; core < LIO_MAX_CORES; core++) {
787 			bool core_crashed_or_got_stuck;
788 
789 			core_crashed_or_got_stuck =
790 						(mask_of_crashed_or_stuck_cores
791 						 >> core) & 1;
792 
793 			if (core_crashed_or_got_stuck &&
794 			    !err_msg_was_printed[core]) {
795 				dev_err(&oct->pci_dev->dev,
796 					"ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
797 					core);
798 				err_msg_was_printed[core] = true;
799 			}
800 		}
801 
802 		if (all_vf_links_are_disabled)
803 			continue;
804 
805 		disable_all_vf_links(oct);
806 		disable_all_vf_links(other_oct);
807 		all_vf_links_are_disabled = true;
808 
809 #ifdef CONFIG_MODULE_UNLOAD
810 		vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
811 		vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
812 
813 		vfs_referencing_pf  = hweight64(vfs_mask1);
814 		vfs_referencing_pf += hweight64(vfs_mask2);
815 
816 		refcount = module_refcount(THIS_MODULE);
817 		if (refcount >= vfs_referencing_pf) {
818 			while (vfs_referencing_pf) {
819 				module_put(THIS_MODULE);
820 				vfs_referencing_pf--;
821 			}
822 		}
823 #endif
824 	}
825 
826 	return 0;
827 }
828 
829 /**
830  * liquidio_probe - PCI probe handler
831  * @pdev: PCI device structure
832  * @ent: unused
833  */
834 static int
835 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
836 {
837 	struct octeon_device *oct_dev = NULL;
838 	struct handshake *hs;
839 
840 	oct_dev = octeon_allocate_device(pdev->device,
841 					 sizeof(struct octeon_device_priv));
842 	if (!oct_dev) {
843 		dev_err(&pdev->dev, "Unable to allocate device\n");
844 		return -ENOMEM;
845 	}
846 
847 	if (pdev->device == OCTEON_CN23XX_PF_VID)
848 		oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
849 
850 	/* Enable PTP for 6XXX Device */
851 	if (((pdev->device == OCTEON_CN66XX) ||
852 	     (pdev->device == OCTEON_CN68XX)))
853 		oct_dev->ptp_enable = true;
854 	else
855 		oct_dev->ptp_enable = false;
856 
857 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
858 		 (u32)pdev->vendor, (u32)pdev->device);
859 
860 	/* Assign octeon_device for this device to the private data area. */
861 	pci_set_drvdata(pdev, oct_dev);
862 
863 	/* set linux specific device pointer */
864 	oct_dev->pci_dev = (void *)pdev;
865 
866 	oct_dev->subsystem_id = pdev->subsystem_vendor |
867 		(pdev->subsystem_device << 16);
868 
869 	hs = &handshake[oct_dev->octeon_id];
870 	init_completion(&hs->init);
871 	init_completion(&hs->started);
872 	hs->pci_dev = pdev;
873 
874 	if (oct_dev->octeon_id == 0)
875 		/* first LiquidIO NIC is detected */
876 		complete(&first_stage);
877 
878 	if (octeon_device_init(oct_dev)) {
879 		complete(&hs->init);
880 		liquidio_remove(pdev);
881 		return -ENOMEM;
882 	}
883 
884 	if (OCTEON_CN23XX_PF(oct_dev)) {
885 		u8 bus, device, function;
886 
887 		if (atomic_read(oct_dev->adapter_refcount) == 1) {
888 			/* Each NIC gets one watchdog kernel thread.  The first
889 			 * PF (of each NIC) that gets pci_driver->probe()'d
890 			 * creates that thread.
891 			 */
892 			bus = pdev->bus->number;
893 			device = PCI_SLOT(pdev->devfn);
894 			function = PCI_FUNC(pdev->devfn);
895 			oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
896 							     oct_dev,
897 							     "liowd/%02hhx:%02hhx.%hhx",
898 							     bus, device, function);
899 			if (IS_ERR(oct_dev->watchdog_task)) {
900 				oct_dev->watchdog_task = NULL;
901 				dev_err(&oct_dev->pci_dev->dev,
902 					"failed to create kernel_thread\n");
903 				liquidio_remove(pdev);
904 				return -1;
905 			}
906 		}
907 	}
908 
909 	oct_dev->rx_pause = 1;
910 	oct_dev->tx_pause = 1;
911 
912 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
913 
914 	return 0;
915 }
916 
917 static bool fw_type_is_auto(void)
918 {
919 	return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
920 		       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
921 }
922 
923 /**
924  * octeon_pci_flr - PCI FLR for each Octeon device.
925  * @oct: octeon device
926  */
927 static void octeon_pci_flr(struct octeon_device *oct)
928 {
929 	int rc;
930 
931 	pci_save_state(oct->pci_dev);
932 
933 	pci_cfg_access_lock(oct->pci_dev);
934 
935 	/* Quiesce the device completely */
936 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
937 			      PCI_COMMAND_INTX_DISABLE);
938 
939 	rc = __pci_reset_function_locked(oct->pci_dev);
940 
941 	if (rc != 0)
942 		dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
943 			rc, oct->pf_num);
944 
945 	pci_cfg_access_unlock(oct->pci_dev);
946 
947 	pci_restore_state(oct->pci_dev);
948 }
949 
950 /**
951  * octeon_destroy_resources - Destroy resources associated with octeon device
952  * @oct: octeon device
953  */
954 static void octeon_destroy_resources(struct octeon_device *oct)
955 {
956 	int i, refcount;
957 	struct msix_entry *msix_entries;
958 	struct octeon_device_priv *oct_priv =
959 		(struct octeon_device_priv *)oct->priv;
960 
961 	struct handshake *hs;
962 
963 	switch (atomic_read(&oct->status)) {
964 	case OCT_DEV_RUNNING:
965 	case OCT_DEV_CORE_OK:
966 
967 		/* No more instructions will be forwarded. */
968 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
969 
970 		oct->app_mode = CVM_DRV_INVALID_APP;
971 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
972 			lio_get_state_string(&oct->status));
973 
974 		schedule_timeout_uninterruptible(HZ / 10);
975 
976 		fallthrough;
977 	case OCT_DEV_HOST_OK:
978 
979 	case OCT_DEV_CONSOLE_INIT_DONE:
980 		/* Remove any consoles */
981 		octeon_remove_consoles(oct);
982 
983 		fallthrough;
984 	case OCT_DEV_IO_QUEUES_DONE:
985 		if (lio_wait_for_instr_fetch(oct))
986 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
987 
988 		if (wait_for_pending_requests(oct))
989 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
990 
991 		/* Disable the input and output queues now. No more packets will
992 		 * arrive from Octeon, but we should wait for all packet
993 		 * processing to finish.
994 		 */
995 		oct->fn_list.disable_io_queues(oct);
996 
997 		if (lio_wait_for_oq_pkts(oct))
998 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
999 
1000 		/* Force all requests waiting to be fetched by OCTEON to
1001 		 * complete.
1002 		 */
1003 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1004 			struct octeon_instr_queue *iq;
1005 
1006 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1007 				continue;
1008 			iq = oct->instr_queue[i];
1009 
1010 			if (atomic_read(&iq->instr_pending)) {
1011 				spin_lock_bh(&iq->lock);
1012 				iq->fill_cnt = 0;
1013 				iq->octeon_read_index = iq->host_write_index;
1014 				iq->stats.instr_processed +=
1015 					atomic_read(&iq->instr_pending);
1016 				lio_process_iq_request_list(oct, iq, 0);
1017 				spin_unlock_bh(&iq->lock);
1018 			}
1019 		}
1020 
1021 		lio_process_ordered_list(oct, 1);
1022 		octeon_free_sc_done_list(oct);
1023 		octeon_free_sc_zombie_list(oct);
1024 
1025 		fallthrough;
1026 	case OCT_DEV_INTR_SET_DONE:
1027 		/* Disable interrupts  */
1028 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1029 
1030 		if (oct->msix_on) {
1031 			msix_entries = (struct msix_entry *)oct->msix_entries;
1032 			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1033 				if (oct->ioq_vector[i].vector) {
1034 					/* clear the affinity_cpumask */
1035 					irq_set_affinity_hint(
1036 							msix_entries[i].vector,
1037 							NULL);
1038 					free_irq(msix_entries[i].vector,
1039 						 &oct->ioq_vector[i]);
1040 					oct->ioq_vector[i].vector = 0;
1041 				}
1042 			}
1043 			/* non-iov vector's argument is oct struct */
1044 			free_irq(msix_entries[i].vector, oct);
1045 
1046 			pci_disable_msix(oct->pci_dev);
1047 			kfree(oct->msix_entries);
1048 			oct->msix_entries = NULL;
1049 		} else {
1050 			/* Release the interrupt line */
1051 			free_irq(oct->pci_dev->irq, oct);
1052 
1053 			if (oct->flags & LIO_FLAG_MSI_ENABLED)
1054 				pci_disable_msi(oct->pci_dev);
1055 		}
1056 
1057 		kfree(oct->irq_name_storage);
1058 		oct->irq_name_storage = NULL;
1059 
1060 		fallthrough;
1061 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1062 		if (OCTEON_CN23XX_PF(oct))
1063 			octeon_free_ioq_vector(oct);
1064 
1065 		fallthrough;
1066 	case OCT_DEV_MBOX_SETUP_DONE:
1067 		if (OCTEON_CN23XX_PF(oct))
1068 			oct->fn_list.free_mbox(oct);
1069 
1070 		fallthrough;
1071 	case OCT_DEV_IN_RESET:
1072 	case OCT_DEV_DROQ_INIT_DONE:
1073 		/* Wait for any pending operations */
1074 		mdelay(100);
1075 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1076 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
1077 				continue;
1078 			octeon_delete_droq(oct, i);
1079 		}
1080 
1081 		/* Force any pending handshakes to complete */
1082 		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1083 			hs = &handshake[i];
1084 
1085 			if (hs->pci_dev) {
1086 				handshake[oct->octeon_id].init_ok = 0;
1087 				complete(&handshake[oct->octeon_id].init);
1088 				handshake[oct->octeon_id].started_ok = 0;
1089 				complete(&handshake[oct->octeon_id].started);
1090 			}
1091 		}
1092 
1093 		fallthrough;
1094 	case OCT_DEV_RESP_LIST_INIT_DONE:
1095 		octeon_delete_response_list(oct);
1096 
1097 		fallthrough;
1098 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1099 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1100 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
1101 				continue;
1102 			octeon_delete_instr_queue(oct, i);
1103 		}
1104 #ifdef CONFIG_PCI_IOV
1105 		if (oct->sriov_info.sriov_enabled)
1106 			pci_disable_sriov(oct->pci_dev);
1107 #endif
1108 		fallthrough;
1109 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1110 		octeon_free_sc_buffer_pool(oct);
1111 
1112 		fallthrough;
1113 	case OCT_DEV_DISPATCH_INIT_DONE:
1114 		octeon_delete_dispatch_list(oct);
1115 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
1116 
1117 		fallthrough;
1118 	case OCT_DEV_PCI_MAP_DONE:
1119 		refcount = octeon_deregister_device(oct);
1120 
1121 		/* Soft reset the octeon device before exiting.
1122 		 * However, if fw was loaded from card (i.e. autoboot),
1123 		 * perform an FLR instead.
1124 		 * Implementation note: only soft-reset the device
1125 		 * if it is a CN6XXX OR the LAST CN23XX device.
1126 		 */
1127 		if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1128 			octeon_pci_flr(oct);
1129 		else if (OCTEON_CN6XXX(oct) || !refcount)
1130 			oct->fn_list.soft_reset(oct);
1131 
1132 		octeon_unmap_pci_barx(oct, 0);
1133 		octeon_unmap_pci_barx(oct, 1);
1134 
1135 		fallthrough;
1136 	case OCT_DEV_PCI_ENABLE_DONE:
1137 		pci_clear_master(oct->pci_dev);
1138 		/* Disable the device, releasing the PCI INT */
1139 		pci_disable_device(oct->pci_dev);
1140 
1141 		fallthrough;
1142 	case OCT_DEV_BEGIN_STATE:
1143 		/* Nothing to be done here either */
1144 		break;
1145 	}                       /* end switch (oct->status) */
1146 
1147 	tasklet_kill(&oct_priv->droq_tasklet);
1148 }
1149 
1150 /**
1151  * send_rx_ctrl_cmd - Send Rx control command
1152  * @lio: per-network private data
1153  * @start_stop: whether to start or stop
1154  */
1155 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1156 {
1157 	struct octeon_soft_command *sc;
1158 	union octnet_cmd *ncmd;
1159 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1160 	int retval;
1161 
1162 	if (oct->props[lio->ifidx].rx_on == start_stop)
1163 		return 0;
1164 
1165 	sc = (struct octeon_soft_command *)
1166 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1167 					  16, 0);
1168 	if (!sc) {
1169 		netif_info(lio, rx_err, lio->netdev,
1170 			   "Failed to allocate octeon_soft_command struct\n");
1171 		return -ENOMEM;
1172 	}
1173 
1174 	ncmd = (union octnet_cmd *)sc->virtdptr;
1175 
1176 	ncmd->u64 = 0;
1177 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1178 	ncmd->s.param1 = start_stop;
1179 
1180 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1181 
1182 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1183 
1184 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1185 				    OPCODE_NIC_CMD, 0, 0, 0);
1186 
1187 	init_completion(&sc->complete);
1188 	sc->sc_status = OCTEON_REQUEST_PENDING;
1189 
1190 	retval = octeon_send_soft_command(oct, sc);
1191 	if (retval == IQ_SEND_FAILED) {
1192 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1193 		octeon_free_soft_command(oct, sc);
1194 	} else {
1195 		/* Sleep on a wait queue till the cond flag indicates that the
1196 		 * response arrived or timed-out.
1197 		 */
1198 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
1199 		if (retval)
1200 			return retval;
1201 
1202 		oct->props[lio->ifidx].rx_on = start_stop;
1203 		WRITE_ONCE(sc->caller_is_done, true);
1204 	}
1205 
1206 	return retval;
1207 }
1208 
1209 /**
1210  * liquidio_destroy_nic_device - Destroy NIC device interface
1211  * @oct: octeon device
1212  * @ifidx: which interface to destroy
1213  *
1214  * Cleanup associated with each interface for an Octeon device  when NIC
1215  * module is being unloaded or if initialization fails during load.
1216  */
1217 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1218 {
1219 	struct net_device *netdev = oct->props[ifidx].netdev;
1220 	struct octeon_device_priv *oct_priv =
1221 		(struct octeon_device_priv *)oct->priv;
1222 	struct napi_struct *napi, *n;
1223 	struct lio *lio;
1224 
1225 	if (!netdev) {
1226 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1227 			__func__, ifidx);
1228 		return;
1229 	}
1230 
1231 	lio = GET_LIO(netdev);
1232 
1233 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1234 
1235 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1236 		liquidio_stop(netdev);
1237 
1238 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1239 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1240 			napi_disable(napi);
1241 
1242 		oct->props[lio->ifidx].napi_enabled = 0;
1243 
1244 		if (OCTEON_CN23XX_PF(oct))
1245 			oct->droq[0]->ops.poll_mode = 0;
1246 	}
1247 
1248 	/* Delete NAPI */
1249 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1250 		netif_napi_del(napi);
1251 
1252 	tasklet_enable(&oct_priv->droq_tasklet);
1253 
1254 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1255 		unregister_netdev(netdev);
1256 
1257 	cleanup_sync_octeon_time_wq(netdev);
1258 	cleanup_link_status_change_wq(netdev);
1259 
1260 	cleanup_rx_oom_poll_fn(netdev);
1261 
1262 	lio_delete_glists(lio);
1263 
1264 	free_netdev(netdev);
1265 
1266 	oct->props[ifidx].gmxport = -1;
1267 
1268 	oct->props[ifidx].netdev = NULL;
1269 }
1270 
1271 /**
1272  * liquidio_stop_nic_module - Stop complete NIC functionality
1273  * @oct: octeon device
1274  */
1275 static int liquidio_stop_nic_module(struct octeon_device *oct)
1276 {
1277 	int i, j;
1278 	struct lio *lio;
1279 
1280 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1281 	device_lock(&oct->pci_dev->dev);
1282 	if (oct->devlink) {
1283 		devlink_unregister(oct->devlink);
1284 		devlink_free(oct->devlink);
1285 		oct->devlink = NULL;
1286 	}
1287 	device_unlock(&oct->pci_dev->dev);
1288 
1289 	if (!oct->ifcount) {
1290 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1291 		return 1;
1292 	}
1293 
1294 	spin_lock_bh(&oct->cmd_resp_wqlock);
1295 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1296 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1297 
1298 	lio_vf_rep_destroy(oct);
1299 
1300 	for (i = 0; i < oct->ifcount; i++) {
1301 		lio = GET_LIO(oct->props[i].netdev);
1302 		for (j = 0; j < oct->num_oqs; j++)
1303 			octeon_unregister_droq_ops(oct,
1304 						   lio->linfo.rxpciq[j].s.q_no);
1305 	}
1306 
1307 	for (i = 0; i < oct->ifcount; i++)
1308 		liquidio_destroy_nic_device(oct, i);
1309 
1310 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1311 	return 0;
1312 }
1313 
1314 /**
1315  * liquidio_remove - Cleans up resources at unload time
1316  * @pdev: PCI device structure
1317  */
1318 static void liquidio_remove(struct pci_dev *pdev)
1319 {
1320 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1321 
1322 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1323 
1324 	if (oct_dev->watchdog_task)
1325 		kthread_stop(oct_dev->watchdog_task);
1326 
1327 	if (!oct_dev->octeon_id &&
1328 	    oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1329 		lio_vf_rep_modexit();
1330 
1331 	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1332 		liquidio_stop_nic_module(oct_dev);
1333 
1334 	/* Reset the octeon device and cleanup all memory allocated for
1335 	 * the octeon device by driver.
1336 	 */
1337 	octeon_destroy_resources(oct_dev);
1338 
1339 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1340 
1341 	/* This octeon device has been removed. Update the global
1342 	 * data structure to reflect this. Free the device structure.
1343 	 */
1344 	octeon_free_device_mem(oct_dev);
1345 }
1346 
1347 /**
1348  * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1349  * @oct: octeon device
1350  */
1351 static int octeon_chip_specific_setup(struct octeon_device *oct)
1352 {
1353 	u32 dev_id, rev_id;
1354 	int ret = 1;
1355 
1356 	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1357 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1358 	oct->rev_id = rev_id & 0xff;
1359 
1360 	switch (dev_id) {
1361 	case OCTEON_CN68XX_PCIID:
1362 		oct->chip_id = OCTEON_CN68XX;
1363 		ret = lio_setup_cn68xx_octeon_device(oct);
1364 		break;
1365 
1366 	case OCTEON_CN66XX_PCIID:
1367 		oct->chip_id = OCTEON_CN66XX;
1368 		ret = lio_setup_cn66xx_octeon_device(oct);
1369 		break;
1370 
1371 	case OCTEON_CN23XX_PCIID_PF:
1372 		oct->chip_id = OCTEON_CN23XX_PF_VID;
1373 		ret = setup_cn23xx_octeon_pf_device(oct);
1374 		if (ret)
1375 			break;
1376 #ifdef CONFIG_PCI_IOV
1377 		if (!ret)
1378 			pci_sriov_set_totalvfs(oct->pci_dev,
1379 					       oct->sriov_info.max_vfs);
1380 #endif
1381 		break;
1382 
1383 	default:
1384 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1385 			dev_id);
1386 	}
1387 
1388 	return ret;
1389 }
1390 
1391 /**
1392  * octeon_pci_os_setup - PCI initialization for each Octeon device.
1393  * @oct: octeon device
1394  */
1395 static int octeon_pci_os_setup(struct octeon_device *oct)
1396 {
1397 	/* setup PCI stuff first */
1398 	if (pci_enable_device(oct->pci_dev)) {
1399 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1400 		return 1;
1401 	}
1402 
1403 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1404 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1405 		pci_disable_device(oct->pci_dev);
1406 		return 1;
1407 	}
1408 
1409 	/* Enable PCI DMA Master. */
1410 	pci_set_master(oct->pci_dev);
1411 
1412 	return 0;
1413 }
1414 
1415 /**
1416  * free_netbuf - Unmap and free network buffer
1417  * @buf: buffer
1418  */
1419 static void free_netbuf(void *buf)
1420 {
1421 	struct sk_buff *skb;
1422 	struct octnet_buf_free_info *finfo;
1423 	struct lio *lio;
1424 
1425 	finfo = (struct octnet_buf_free_info *)buf;
1426 	skb = finfo->skb;
1427 	lio = finfo->lio;
1428 
1429 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1430 			 DMA_TO_DEVICE);
1431 
1432 	tx_buffer_free(skb);
1433 }
1434 
1435 /**
1436  * free_netsgbuf - Unmap and free gather buffer
1437  * @buf: buffer
1438  */
1439 static void free_netsgbuf(void *buf)
1440 {
1441 	struct octnet_buf_free_info *finfo;
1442 	struct sk_buff *skb;
1443 	struct lio *lio;
1444 	struct octnic_gather *g;
1445 	int i, frags, iq;
1446 
1447 	finfo = (struct octnet_buf_free_info *)buf;
1448 	skb = finfo->skb;
1449 	lio = finfo->lio;
1450 	g = finfo->g;
1451 	frags = skb_shinfo(skb)->nr_frags;
1452 
1453 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1454 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1455 			 DMA_TO_DEVICE);
1456 
1457 	i = 1;
1458 	while (frags--) {
1459 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1460 
1461 		dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1462 			       g->sg[(i >> 2)].ptr[(i & 3)],
1463 			       skb_frag_size(frag), DMA_TO_DEVICE);
1464 		i++;
1465 	}
1466 
1467 	iq = skb_iq(lio->oct_dev, skb);
1468 	spin_lock(&lio->glist_lock[iq]);
1469 	list_add_tail(&g->list, &lio->glist[iq]);
1470 	spin_unlock(&lio->glist_lock[iq]);
1471 
1472 	tx_buffer_free(skb);
1473 }
1474 
1475 /**
1476  * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1477  * @buf: buffer
1478  */
1479 static void free_netsgbuf_with_resp(void *buf)
1480 {
1481 	struct octeon_soft_command *sc;
1482 	struct octnet_buf_free_info *finfo;
1483 	struct sk_buff *skb;
1484 	struct lio *lio;
1485 	struct octnic_gather *g;
1486 	int i, frags, iq;
1487 
1488 	sc = (struct octeon_soft_command *)buf;
1489 	skb = (struct sk_buff *)sc->callback_arg;
1490 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1491 
1492 	lio = finfo->lio;
1493 	g = finfo->g;
1494 	frags = skb_shinfo(skb)->nr_frags;
1495 
1496 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1497 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1498 			 DMA_TO_DEVICE);
1499 
1500 	i = 1;
1501 	while (frags--) {
1502 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1503 
1504 		dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1505 			       g->sg[(i >> 2)].ptr[(i & 3)],
1506 			       skb_frag_size(frag), DMA_TO_DEVICE);
1507 		i++;
1508 	}
1509 
1510 	iq = skb_iq(lio->oct_dev, skb);
1511 
1512 	spin_lock(&lio->glist_lock[iq]);
1513 	list_add_tail(&g->list, &lio->glist[iq]);
1514 	spin_unlock(&lio->glist_lock[iq]);
1515 
1516 	/* Don't free the skb yet */
1517 }
1518 
1519 /**
1520  * liquidio_ptp_adjfreq - Adjust ptp frequency
1521  * @ptp: PTP clock info
1522  * @ppb: how much to adjust by, in parts-per-billion
1523  */
1524 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1525 {
1526 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1527 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1528 	u64 comp, delta;
1529 	unsigned long flags;
1530 	bool neg_adj = false;
1531 
1532 	if (ppb < 0) {
1533 		neg_adj = true;
1534 		ppb = -ppb;
1535 	}
1536 
1537 	/* The hardware adds the clock compensation value to the
1538 	 * PTP clock on every coprocessor clock cycle, so we
1539 	 * compute the delta in terms of coprocessor clocks.
1540 	 */
1541 	delta = (u64)ppb << 32;
1542 	do_div(delta, oct->coproc_clock_rate);
1543 
1544 	spin_lock_irqsave(&lio->ptp_lock, flags);
1545 	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1546 	if (neg_adj)
1547 		comp -= delta;
1548 	else
1549 		comp += delta;
1550 	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1551 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1552 
1553 	return 0;
1554 }
1555 
1556 /**
1557  * liquidio_ptp_adjtime - Adjust ptp time
1558  * @ptp: PTP clock info
1559  * @delta: how much to adjust by, in nanosecs
1560  */
1561 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1562 {
1563 	unsigned long flags;
1564 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1565 
1566 	spin_lock_irqsave(&lio->ptp_lock, flags);
1567 	lio->ptp_adjust += delta;
1568 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1569 
1570 	return 0;
1571 }
1572 
1573 /**
1574  * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1575  * @ptp: PTP clock info
1576  * @ts: timespec
1577  */
1578 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1579 				struct timespec64 *ts)
1580 {
1581 	u64 ns;
1582 	unsigned long flags;
1583 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1584 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1585 
1586 	spin_lock_irqsave(&lio->ptp_lock, flags);
1587 	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1588 	ns += lio->ptp_adjust;
1589 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1590 
1591 	*ts = ns_to_timespec64(ns);
1592 
1593 	return 0;
1594 }
1595 
1596 /**
1597  * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1598  * @ptp: PTP clock info
1599  * @ts: timespec
1600  */
1601 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1602 				const struct timespec64 *ts)
1603 {
1604 	u64 ns;
1605 	unsigned long flags;
1606 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1607 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1608 
1609 	ns = timespec64_to_ns(ts);
1610 
1611 	spin_lock_irqsave(&lio->ptp_lock, flags);
1612 	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1613 	lio->ptp_adjust = 0;
1614 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1615 
1616 	return 0;
1617 }
1618 
1619 /**
1620  * liquidio_ptp_enable - Check if PTP is enabled
1621  * @ptp: PTP clock info
1622  * @rq: request
1623  * @on: is it on
1624  */
1625 static int
1626 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1627 		    struct ptp_clock_request __maybe_unused *rq,
1628 		    int __maybe_unused on)
1629 {
1630 	return -EOPNOTSUPP;
1631 }
1632 
1633 /**
1634  * oct_ptp_open - Open PTP clock source
1635  * @netdev: network device
1636  */
1637 static void oct_ptp_open(struct net_device *netdev)
1638 {
1639 	struct lio *lio = GET_LIO(netdev);
1640 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1641 
1642 	spin_lock_init(&lio->ptp_lock);
1643 
1644 	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1645 	lio->ptp_info.owner = THIS_MODULE;
1646 	lio->ptp_info.max_adj = 250000000;
1647 	lio->ptp_info.n_alarm = 0;
1648 	lio->ptp_info.n_ext_ts = 0;
1649 	lio->ptp_info.n_per_out = 0;
1650 	lio->ptp_info.pps = 0;
1651 	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1652 	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1653 	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1654 	lio->ptp_info.settime64 = liquidio_ptp_settime;
1655 	lio->ptp_info.enable = liquidio_ptp_enable;
1656 
1657 	lio->ptp_adjust = 0;
1658 
1659 	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1660 					     &oct->pci_dev->dev);
1661 
1662 	if (IS_ERR(lio->ptp_clock))
1663 		lio->ptp_clock = NULL;
1664 }
1665 
1666 /**
1667  * liquidio_ptp_init - Init PTP clock
1668  * @oct: octeon device
1669  */
1670 static void liquidio_ptp_init(struct octeon_device *oct)
1671 {
1672 	u64 clock_comp, cfg;
1673 
1674 	clock_comp = (u64)NSEC_PER_SEC << 32;
1675 	do_div(clock_comp, oct->coproc_clock_rate);
1676 	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1677 
1678 	/* Enable */
1679 	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1680 	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1681 }
1682 
1683 /**
1684  * load_firmware - Load firmware to device
1685  * @oct: octeon device
1686  *
1687  * Maps device to firmware filename, requests firmware, and downloads it
1688  */
1689 static int load_firmware(struct octeon_device *oct)
1690 {
1691 	int ret = 0;
1692 	const struct firmware *fw;
1693 	char fw_name[LIO_MAX_FW_FILENAME_LEN];
1694 	char *tmp_fw_type;
1695 
1696 	if (fw_type_is_auto()) {
1697 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1698 		strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1699 	} else {
1700 		tmp_fw_type = fw_type;
1701 	}
1702 
1703 	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1704 		octeon_get_conf(oct)->card_name, tmp_fw_type,
1705 		LIO_FW_NAME_SUFFIX);
1706 
1707 	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1708 	if (ret) {
1709 		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1710 			fw_name);
1711 		release_firmware(fw);
1712 		return ret;
1713 	}
1714 
1715 	ret = octeon_download_firmware(oct, fw->data, fw->size);
1716 
1717 	release_firmware(fw);
1718 
1719 	return ret;
1720 }
1721 
1722 /**
1723  * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1724  * @work: work_struct data structure
1725  */
1726 static void octnet_poll_check_txq_status(struct work_struct *work)
1727 {
1728 	struct cavium_wk *wk = (struct cavium_wk *)work;
1729 	struct lio *lio = (struct lio *)wk->ctxptr;
1730 
1731 	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1732 		return;
1733 
1734 	check_txq_status(lio);
1735 	queue_delayed_work(lio->txq_status_wq.wq,
1736 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1737 }
1738 
1739 /**
1740  * setup_tx_poll_fn - Sets up the txq poll check
1741  * @netdev: network device
1742  */
1743 static inline int setup_tx_poll_fn(struct net_device *netdev)
1744 {
1745 	struct lio *lio = GET_LIO(netdev);
1746 	struct octeon_device *oct = lio->oct_dev;
1747 
1748 	lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1749 						WQ_MEM_RECLAIM, 0);
1750 	if (!lio->txq_status_wq.wq) {
1751 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1752 		return -1;
1753 	}
1754 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1755 			  octnet_poll_check_txq_status);
1756 	lio->txq_status_wq.wk.ctxptr = lio;
1757 	queue_delayed_work(lio->txq_status_wq.wq,
1758 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1759 	return 0;
1760 }
1761 
1762 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1763 {
1764 	struct lio *lio = GET_LIO(netdev);
1765 
1766 	if (lio->txq_status_wq.wq) {
1767 		cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1768 		destroy_workqueue(lio->txq_status_wq.wq);
1769 	}
1770 }
1771 
1772 /**
1773  * liquidio_open - Net device open for LiquidIO
1774  * @netdev: network device
1775  */
1776 static int liquidio_open(struct net_device *netdev)
1777 {
1778 	struct lio *lio = GET_LIO(netdev);
1779 	struct octeon_device *oct = lio->oct_dev;
1780 	struct octeon_device_priv *oct_priv =
1781 		(struct octeon_device_priv *)oct->priv;
1782 	struct napi_struct *napi, *n;
1783 	int ret = 0;
1784 
1785 	if (oct->props[lio->ifidx].napi_enabled == 0) {
1786 		tasklet_disable(&oct_priv->droq_tasklet);
1787 
1788 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1789 			napi_enable(napi);
1790 
1791 		oct->props[lio->ifidx].napi_enabled = 1;
1792 
1793 		if (OCTEON_CN23XX_PF(oct))
1794 			oct->droq[0]->ops.poll_mode = 1;
1795 	}
1796 
1797 	if (oct->ptp_enable)
1798 		oct_ptp_open(netdev);
1799 
1800 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1801 
1802 	if (OCTEON_CN23XX_PF(oct)) {
1803 		if (!oct->msix_on)
1804 			if (setup_tx_poll_fn(netdev))
1805 				return -1;
1806 	} else {
1807 		if (setup_tx_poll_fn(netdev))
1808 			return -1;
1809 	}
1810 
1811 	netif_tx_start_all_queues(netdev);
1812 
1813 	/* Ready for link status updates */
1814 	lio->intf_open = 1;
1815 
1816 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1817 
1818 	/* tell Octeon to start forwarding packets to host */
1819 	ret = send_rx_ctrl_cmd(lio, 1);
1820 	if (ret)
1821 		return ret;
1822 
1823 	/* start periodical statistics fetch */
1824 	INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1825 	lio->stats_wk.ctxptr = lio;
1826 	schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1827 					(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1828 
1829 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1830 		 netdev->name);
1831 
1832 	return ret;
1833 }
1834 
1835 /**
1836  * liquidio_stop - Net device stop for LiquidIO
1837  * @netdev: network device
1838  */
1839 static int liquidio_stop(struct net_device *netdev)
1840 {
1841 	struct lio *lio = GET_LIO(netdev);
1842 	struct octeon_device *oct = lio->oct_dev;
1843 	struct octeon_device_priv *oct_priv =
1844 		(struct octeon_device_priv *)oct->priv;
1845 	struct napi_struct *napi, *n;
1846 	int ret = 0;
1847 
1848 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1849 
1850 	/* Stop any link updates */
1851 	lio->intf_open = 0;
1852 
1853 	stop_txqs(netdev);
1854 
1855 	/* Inform that netif carrier is down */
1856 	netif_carrier_off(netdev);
1857 	netif_tx_disable(netdev);
1858 
1859 	lio->linfo.link.s.link_up = 0;
1860 	lio->link_changes++;
1861 
1862 	/* Tell Octeon that nic interface is down. */
1863 	ret = send_rx_ctrl_cmd(lio, 0);
1864 	if (ret)
1865 		return ret;
1866 
1867 	if (OCTEON_CN23XX_PF(oct)) {
1868 		if (!oct->msix_on)
1869 			cleanup_tx_poll_fn(netdev);
1870 	} else {
1871 		cleanup_tx_poll_fn(netdev);
1872 	}
1873 
1874 	cancel_delayed_work_sync(&lio->stats_wk.work);
1875 
1876 	if (lio->ptp_clock) {
1877 		ptp_clock_unregister(lio->ptp_clock);
1878 		lio->ptp_clock = NULL;
1879 	}
1880 
1881 	/* Wait for any pending Rx descriptors */
1882 	if (lio_wait_for_clean_oq(oct))
1883 		netif_info(lio, rx_err, lio->netdev,
1884 			   "Proceeding with stop interface after partial RX desc processing\n");
1885 
1886 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1887 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1888 			napi_disable(napi);
1889 
1890 		oct->props[lio->ifidx].napi_enabled = 0;
1891 
1892 		if (OCTEON_CN23XX_PF(oct))
1893 			oct->droq[0]->ops.poll_mode = 0;
1894 
1895 		tasklet_enable(&oct_priv->droq_tasklet);
1896 	}
1897 
1898 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1899 
1900 	return ret;
1901 }
1902 
1903 /**
1904  * get_new_flags - Converts a mask based on net device flags
1905  * @netdev: network device
1906  *
1907  * This routine generates a octnet_ifflags mask from the net device flags
1908  * received from the OS.
1909  */
1910 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1911 {
1912 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1913 
1914 	if (netdev->flags & IFF_PROMISC)
1915 		f |= OCTNET_IFFLAG_PROMISC;
1916 
1917 	if (netdev->flags & IFF_ALLMULTI)
1918 		f |= OCTNET_IFFLAG_ALLMULTI;
1919 
1920 	if (netdev->flags & IFF_MULTICAST) {
1921 		f |= OCTNET_IFFLAG_MULTICAST;
1922 
1923 		/* Accept all multicast addresses if there are more than we
1924 		 * can handle
1925 		 */
1926 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1927 			f |= OCTNET_IFFLAG_ALLMULTI;
1928 	}
1929 
1930 	if (netdev->flags & IFF_BROADCAST)
1931 		f |= OCTNET_IFFLAG_BROADCAST;
1932 
1933 	return f;
1934 }
1935 
1936 /**
1937  * liquidio_set_mcast_list - Net device set_multicast_list
1938  * @netdev: network device
1939  */
1940 static void liquidio_set_mcast_list(struct net_device *netdev)
1941 {
1942 	struct lio *lio = GET_LIO(netdev);
1943 	struct octeon_device *oct = lio->oct_dev;
1944 	struct octnic_ctrl_pkt nctrl;
1945 	struct netdev_hw_addr *ha;
1946 	u64 *mc;
1947 	int ret;
1948 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1949 
1950 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1951 
1952 	/* Create a ctrl pkt command to be sent to core app. */
1953 	nctrl.ncmd.u64 = 0;
1954 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1955 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1956 	nctrl.ncmd.s.param2 = mc_count;
1957 	nctrl.ncmd.s.more = mc_count;
1958 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1959 	nctrl.netpndev = (u64)netdev;
1960 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1961 
1962 	/* copy all the addresses into the udd */
1963 	mc = &nctrl.udd[0];
1964 	netdev_for_each_mc_addr(ha, netdev) {
1965 		*mc = 0;
1966 		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1967 		/* no need to swap bytes */
1968 
1969 		if (++mc > &nctrl.udd[mc_count])
1970 			break;
1971 	}
1972 
1973 	/* Apparently, any activity in this call from the kernel has to
1974 	 * be atomic. So we won't wait for response.
1975 	 */
1976 
1977 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1978 	if (ret) {
1979 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1980 			ret);
1981 	}
1982 }
1983 
1984 /**
1985  * liquidio_set_mac - Net device set_mac_address
1986  * @netdev: network device
1987  * @p: pointer to sockaddr
1988  */
1989 static int liquidio_set_mac(struct net_device *netdev, void *p)
1990 {
1991 	int ret = 0;
1992 	struct lio *lio = GET_LIO(netdev);
1993 	struct octeon_device *oct = lio->oct_dev;
1994 	struct sockaddr *addr = (struct sockaddr *)p;
1995 	struct octnic_ctrl_pkt nctrl;
1996 
1997 	if (!is_valid_ether_addr(addr->sa_data))
1998 		return -EADDRNOTAVAIL;
1999 
2000 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2001 
2002 	nctrl.ncmd.u64 = 0;
2003 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2004 	nctrl.ncmd.s.param1 = 0;
2005 	nctrl.ncmd.s.more = 1;
2006 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2007 	nctrl.netpndev = (u64)netdev;
2008 
2009 	nctrl.udd[0] = 0;
2010 	/* The MAC Address is presented in network byte order. */
2011 	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2012 
2013 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2014 	if (ret < 0) {
2015 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2016 		return -ENOMEM;
2017 	}
2018 
2019 	if (nctrl.sc_status) {
2020 		dev_err(&oct->pci_dev->dev,
2021 			"%s: MAC Address change failed. sc return=%x\n",
2022 			 __func__, nctrl.sc_status);
2023 		return -EIO;
2024 	}
2025 
2026 	eth_hw_addr_set(netdev, addr->sa_data);
2027 	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2028 
2029 	return 0;
2030 }
2031 
2032 static void
2033 liquidio_get_stats64(struct net_device *netdev,
2034 		     struct rtnl_link_stats64 *lstats)
2035 {
2036 	struct lio *lio = GET_LIO(netdev);
2037 	struct octeon_device *oct;
2038 	u64 pkts = 0, drop = 0, bytes = 0;
2039 	struct oct_droq_stats *oq_stats;
2040 	struct oct_iq_stats *iq_stats;
2041 	int i, iq_no, oq_no;
2042 
2043 	oct = lio->oct_dev;
2044 
2045 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2046 		return;
2047 
2048 	for (i = 0; i < oct->num_iqs; i++) {
2049 		iq_no = lio->linfo.txpciq[i].s.q_no;
2050 		iq_stats = &oct->instr_queue[iq_no]->stats;
2051 		pkts += iq_stats->tx_done;
2052 		drop += iq_stats->tx_dropped;
2053 		bytes += iq_stats->tx_tot_bytes;
2054 	}
2055 
2056 	lstats->tx_packets = pkts;
2057 	lstats->tx_bytes = bytes;
2058 	lstats->tx_dropped = drop;
2059 
2060 	pkts = 0;
2061 	drop = 0;
2062 	bytes = 0;
2063 
2064 	for (i = 0; i < oct->num_oqs; i++) {
2065 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2066 		oq_stats = &oct->droq[oq_no]->stats;
2067 		pkts += oq_stats->rx_pkts_received;
2068 		drop += (oq_stats->rx_dropped +
2069 			 oq_stats->dropped_nodispatch +
2070 			 oq_stats->dropped_toomany +
2071 			 oq_stats->dropped_nomem);
2072 		bytes += oq_stats->rx_bytes_received;
2073 	}
2074 
2075 	lstats->rx_bytes = bytes;
2076 	lstats->rx_packets = pkts;
2077 	lstats->rx_dropped = drop;
2078 
2079 	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2080 	lstats->collisions = oct->link_stats.fromhost.total_collisions;
2081 
2082 	/* detailed rx_errors: */
2083 	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2084 	/* recved pkt with crc error    */
2085 	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2086 	/* recv'd frame alignment error */
2087 	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2088 	/* recv'r fifo overrun */
2089 	lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2090 
2091 	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2092 		lstats->rx_frame_errors + lstats->rx_fifo_errors;
2093 
2094 	/* detailed tx_errors */
2095 	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2096 	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2097 	lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2098 
2099 	lstats->tx_errors = lstats->tx_aborted_errors +
2100 		lstats->tx_carrier_errors +
2101 		lstats->tx_fifo_errors;
2102 }
2103 
2104 /**
2105  * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2106  * @netdev: network device
2107  * @ifr: interface request
2108  */
2109 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2110 {
2111 	struct hwtstamp_config conf;
2112 	struct lio *lio = GET_LIO(netdev);
2113 
2114 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2115 		return -EFAULT;
2116 
2117 	switch (conf.tx_type) {
2118 	case HWTSTAMP_TX_ON:
2119 	case HWTSTAMP_TX_OFF:
2120 		break;
2121 	default:
2122 		return -ERANGE;
2123 	}
2124 
2125 	switch (conf.rx_filter) {
2126 	case HWTSTAMP_FILTER_NONE:
2127 		break;
2128 	case HWTSTAMP_FILTER_ALL:
2129 	case HWTSTAMP_FILTER_SOME:
2130 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2131 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2132 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2133 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2134 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2135 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2136 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2137 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2138 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2139 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2140 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2141 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2142 	case HWTSTAMP_FILTER_NTP_ALL:
2143 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2144 		break;
2145 	default:
2146 		return -ERANGE;
2147 	}
2148 
2149 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2150 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2151 
2152 	else
2153 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2154 
2155 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2156 }
2157 
2158 /**
2159  * liquidio_ioctl - ioctl handler
2160  * @netdev: network device
2161  * @ifr: interface request
2162  * @cmd: command
2163  */
2164 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2165 {
2166 	struct lio *lio = GET_LIO(netdev);
2167 
2168 	switch (cmd) {
2169 	case SIOCSHWTSTAMP:
2170 		if (lio->oct_dev->ptp_enable)
2171 			return hwtstamp_ioctl(netdev, ifr);
2172 		fallthrough;
2173 	default:
2174 		return -EOPNOTSUPP;
2175 	}
2176 }
2177 
2178 /**
2179  * handle_timestamp - handle a Tx timestamp response
2180  * @oct: octeon device
2181  * @status: response status
2182  * @buf: pointer to skb
2183  */
2184 static void handle_timestamp(struct octeon_device *oct,
2185 			     u32 status,
2186 			     void *buf)
2187 {
2188 	struct octnet_buf_free_info *finfo;
2189 	struct octeon_soft_command *sc;
2190 	struct oct_timestamp_resp *resp;
2191 	struct lio *lio;
2192 	struct sk_buff *skb = (struct sk_buff *)buf;
2193 
2194 	finfo = (struct octnet_buf_free_info *)skb->cb;
2195 	lio = finfo->lio;
2196 	sc = finfo->sc;
2197 	oct = lio->oct_dev;
2198 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2199 
2200 	if (status != OCTEON_REQUEST_DONE) {
2201 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2202 			CVM_CAST64(status));
2203 		resp->timestamp = 0;
2204 	}
2205 
2206 	octeon_swap_8B_data(&resp->timestamp, 1);
2207 
2208 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2209 		struct skb_shared_hwtstamps ts;
2210 		u64 ns = resp->timestamp;
2211 
2212 		netif_info(lio, tx_done, lio->netdev,
2213 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2214 			   skb, (unsigned long long)ns);
2215 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2216 		skb_tstamp_tx(skb, &ts);
2217 	}
2218 
2219 	octeon_free_soft_command(oct, sc);
2220 	tx_buffer_free(skb);
2221 }
2222 
2223 /**
2224  * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2225  * @oct: octeon device
2226  * @ndata: pointer to network data
2227  * @finfo: pointer to private network data
2228  * @xmit_more: more is coming
2229  */
2230 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2231 					 struct octnic_data_pkt *ndata,
2232 					 struct octnet_buf_free_info *finfo,
2233 					 int xmit_more)
2234 {
2235 	int retval;
2236 	struct octeon_soft_command *sc;
2237 	struct lio *lio;
2238 	int ring_doorbell;
2239 	u32 len;
2240 
2241 	lio = finfo->lio;
2242 
2243 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2244 					    sizeof(struct oct_timestamp_resp));
2245 	finfo->sc = sc;
2246 
2247 	if (!sc) {
2248 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2249 		return IQ_SEND_FAILED;
2250 	}
2251 
2252 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2253 		ndata->reqtype = REQTYPE_RESP_NET;
2254 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2255 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2256 
2257 	sc->callback = handle_timestamp;
2258 	sc->callback_arg = finfo->skb;
2259 	sc->iq_no = ndata->q_no;
2260 
2261 	if (OCTEON_CN23XX_PF(oct))
2262 		len = (u32)((struct octeon_instr_ih3 *)
2263 			    (&sc->cmd.cmd3.ih3))->dlengsz;
2264 	else
2265 		len = (u32)((struct octeon_instr_ih2 *)
2266 			    (&sc->cmd.cmd2.ih2))->dlengsz;
2267 
2268 	ring_doorbell = !xmit_more;
2269 
2270 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2271 				     sc, len, ndata->reqtype);
2272 
2273 	if (retval == IQ_SEND_FAILED) {
2274 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2275 			retval);
2276 		octeon_free_soft_command(oct, sc);
2277 	} else {
2278 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2279 	}
2280 
2281 	return retval;
2282 }
2283 
2284 /**
2285  * liquidio_xmit - Transmit networks packets to the Octeon interface
2286  * @skb: skbuff struct to be passed to network layer.
2287  * @netdev: pointer to network device
2288  *
2289  * Return: whether the packet was transmitted to the device okay or not
2290  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2291  */
2292 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2293 {
2294 	struct lio *lio;
2295 	struct octnet_buf_free_info *finfo;
2296 	union octnic_cmd_setup cmdsetup;
2297 	struct octnic_data_pkt ndata;
2298 	struct octeon_device *oct;
2299 	struct oct_iq_stats *stats;
2300 	struct octeon_instr_irh *irh;
2301 	union tx_info *tx_info;
2302 	int status = 0;
2303 	int q_idx = 0, iq_no = 0;
2304 	int j, xmit_more = 0;
2305 	u64 dptr = 0;
2306 	u32 tag = 0;
2307 
2308 	lio = GET_LIO(netdev);
2309 	oct = lio->oct_dev;
2310 
2311 	q_idx = skb_iq(oct, skb);
2312 	tag = q_idx;
2313 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2314 
2315 	stats = &oct->instr_queue[iq_no]->stats;
2316 
2317 	/* Check for all conditions in which the current packet cannot be
2318 	 * transmitted.
2319 	 */
2320 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2321 	    (!lio->linfo.link.s.link_up) ||
2322 	    (skb->len <= 0)) {
2323 		netif_info(lio, tx_err, lio->netdev,
2324 			   "Transmit failed link_status : %d\n",
2325 			   lio->linfo.link.s.link_up);
2326 		goto lio_xmit_failed;
2327 	}
2328 
2329 	/* Use space in skb->cb to store info used to unmap and
2330 	 * free the buffers.
2331 	 */
2332 	finfo = (struct octnet_buf_free_info *)skb->cb;
2333 	finfo->lio = lio;
2334 	finfo->skb = skb;
2335 	finfo->sc = NULL;
2336 
2337 	/* Prepare the attributes for the data to be passed to OSI. */
2338 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2339 
2340 	ndata.buf = (void *)finfo;
2341 
2342 	ndata.q_no = iq_no;
2343 
2344 	if (octnet_iq_is_full(oct, ndata.q_no)) {
2345 		/* defer sending if queue is full */
2346 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2347 			   ndata.q_no);
2348 		stats->tx_iq_busy++;
2349 		return NETDEV_TX_BUSY;
2350 	}
2351 
2352 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2353 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2354 	 */
2355 
2356 	ndata.datasize = skb->len;
2357 
2358 	cmdsetup.u64 = 0;
2359 	cmdsetup.s.iq_no = iq_no;
2360 
2361 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2362 		if (skb->encapsulation) {
2363 			cmdsetup.s.tnl_csum = 1;
2364 			stats->tx_vxlan++;
2365 		} else {
2366 			cmdsetup.s.transport_csum = 1;
2367 		}
2368 	}
2369 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2370 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2371 		cmdsetup.s.timestamp = 1;
2372 	}
2373 
2374 	if (skb_shinfo(skb)->nr_frags == 0) {
2375 		cmdsetup.s.u.datasize = skb->len;
2376 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2377 
2378 		/* Offload checksum calculation for TCP/UDP packets */
2379 		dptr = dma_map_single(&oct->pci_dev->dev,
2380 				      skb->data,
2381 				      skb->len,
2382 				      DMA_TO_DEVICE);
2383 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2384 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2385 				__func__);
2386 			stats->tx_dmamap_fail++;
2387 			return NETDEV_TX_BUSY;
2388 		}
2389 
2390 		if (OCTEON_CN23XX_PF(oct))
2391 			ndata.cmd.cmd3.dptr = dptr;
2392 		else
2393 			ndata.cmd.cmd2.dptr = dptr;
2394 		finfo->dptr = dptr;
2395 		ndata.reqtype = REQTYPE_NORESP_NET;
2396 
2397 	} else {
2398 		int i, frags;
2399 		skb_frag_t *frag;
2400 		struct octnic_gather *g;
2401 
2402 		spin_lock(&lio->glist_lock[q_idx]);
2403 		g = (struct octnic_gather *)
2404 			lio_list_delete_head(&lio->glist[q_idx]);
2405 		spin_unlock(&lio->glist_lock[q_idx]);
2406 
2407 		if (!g) {
2408 			netif_info(lio, tx_err, lio->netdev,
2409 				   "Transmit scatter gather: glist null!\n");
2410 			goto lio_xmit_failed;
2411 		}
2412 
2413 		cmdsetup.s.gather = 1;
2414 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2415 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2416 
2417 		memset(g->sg, 0, g->sg_size);
2418 
2419 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2420 						 skb->data,
2421 						 (skb->len - skb->data_len),
2422 						 DMA_TO_DEVICE);
2423 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2424 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2425 				__func__);
2426 			stats->tx_dmamap_fail++;
2427 			return NETDEV_TX_BUSY;
2428 		}
2429 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2430 
2431 		frags = skb_shinfo(skb)->nr_frags;
2432 		i = 1;
2433 		while (frags--) {
2434 			frag = &skb_shinfo(skb)->frags[i - 1];
2435 
2436 			g->sg[(i >> 2)].ptr[(i & 3)] =
2437 				skb_frag_dma_map(&oct->pci_dev->dev,
2438 					         frag, 0, skb_frag_size(frag),
2439 						 DMA_TO_DEVICE);
2440 
2441 			if (dma_mapping_error(&oct->pci_dev->dev,
2442 					      g->sg[i >> 2].ptr[i & 3])) {
2443 				dma_unmap_single(&oct->pci_dev->dev,
2444 						 g->sg[0].ptr[0],
2445 						 skb->len - skb->data_len,
2446 						 DMA_TO_DEVICE);
2447 				for (j = 1; j < i; j++) {
2448 					frag = &skb_shinfo(skb)->frags[j - 1];
2449 					dma_unmap_page(&oct->pci_dev->dev,
2450 						       g->sg[j >> 2].ptr[j & 3],
2451 						       skb_frag_size(frag),
2452 						       DMA_TO_DEVICE);
2453 				}
2454 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2455 					__func__);
2456 				return NETDEV_TX_BUSY;
2457 			}
2458 
2459 			add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2460 				    (i & 3));
2461 			i++;
2462 		}
2463 
2464 		dptr = g->sg_dma_ptr;
2465 
2466 		if (OCTEON_CN23XX_PF(oct))
2467 			ndata.cmd.cmd3.dptr = dptr;
2468 		else
2469 			ndata.cmd.cmd2.dptr = dptr;
2470 		finfo->dptr = dptr;
2471 		finfo->g = g;
2472 
2473 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
2474 	}
2475 
2476 	if (OCTEON_CN23XX_PF(oct)) {
2477 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2478 		tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2479 	} else {
2480 		irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2481 		tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2482 	}
2483 
2484 	if (skb_shinfo(skb)->gso_size) {
2485 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2486 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2487 		stats->tx_gso++;
2488 	}
2489 
2490 	/* HW insert VLAN tag */
2491 	if (skb_vlan_tag_present(skb)) {
2492 		irh->priority = skb_vlan_tag_get(skb) >> 13;
2493 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2494 	}
2495 
2496 	xmit_more = netdev_xmit_more();
2497 
2498 	if (unlikely(cmdsetup.s.timestamp))
2499 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2500 	else
2501 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2502 	if (status == IQ_SEND_FAILED)
2503 		goto lio_xmit_failed;
2504 
2505 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2506 
2507 	if (status == IQ_SEND_STOP)
2508 		netif_stop_subqueue(netdev, q_idx);
2509 
2510 	netif_trans_update(netdev);
2511 
2512 	if (tx_info->s.gso_segs)
2513 		stats->tx_done += tx_info->s.gso_segs;
2514 	else
2515 		stats->tx_done++;
2516 	stats->tx_tot_bytes += ndata.datasize;
2517 
2518 	return NETDEV_TX_OK;
2519 
2520 lio_xmit_failed:
2521 	stats->tx_dropped++;
2522 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2523 		   iq_no, stats->tx_dropped);
2524 	if (dptr)
2525 		dma_unmap_single(&oct->pci_dev->dev, dptr,
2526 				 ndata.datasize, DMA_TO_DEVICE);
2527 
2528 	octeon_ring_doorbell_locked(oct, iq_no);
2529 
2530 	tx_buffer_free(skb);
2531 	return NETDEV_TX_OK;
2532 }
2533 
2534 /**
2535  * liquidio_tx_timeout - Network device Tx timeout
2536  * @netdev:    pointer to network device
2537  * @txqueue: index of the hung transmit queue
2538  */
2539 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2540 {
2541 	struct lio *lio;
2542 
2543 	lio = GET_LIO(netdev);
2544 
2545 	netif_info(lio, tx_err, lio->netdev,
2546 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2547 		   netdev->stats.tx_dropped);
2548 	netif_trans_update(netdev);
2549 	wake_txqs(netdev);
2550 }
2551 
2552 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2553 				    __be16 proto __attribute__((unused)),
2554 				    u16 vid)
2555 {
2556 	struct lio *lio = GET_LIO(netdev);
2557 	struct octeon_device *oct = lio->oct_dev;
2558 	struct octnic_ctrl_pkt nctrl;
2559 	int ret = 0;
2560 
2561 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2562 
2563 	nctrl.ncmd.u64 = 0;
2564 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2565 	nctrl.ncmd.s.param1 = vid;
2566 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2567 	nctrl.netpndev = (u64)netdev;
2568 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2569 
2570 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2571 	if (ret) {
2572 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2573 			ret);
2574 		if (ret > 0)
2575 			ret = -EIO;
2576 	}
2577 
2578 	return ret;
2579 }
2580 
2581 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2582 				     __be16 proto __attribute__((unused)),
2583 				     u16 vid)
2584 {
2585 	struct lio *lio = GET_LIO(netdev);
2586 	struct octeon_device *oct = lio->oct_dev;
2587 	struct octnic_ctrl_pkt nctrl;
2588 	int ret = 0;
2589 
2590 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2591 
2592 	nctrl.ncmd.u64 = 0;
2593 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2594 	nctrl.ncmd.s.param1 = vid;
2595 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2596 	nctrl.netpndev = (u64)netdev;
2597 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2598 
2599 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2600 	if (ret) {
2601 		dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2602 			ret);
2603 		if (ret > 0)
2604 			ret = -EIO;
2605 	}
2606 	return ret;
2607 }
2608 
2609 /**
2610  * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2611  * @netdev:                pointer to network device
2612  * @command:               OCTNET_CMD_TNL_RX_CSUM_CTL
2613  * @rx_cmd:                OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2614  * Returns:                SUCCESS or FAILURE
2615  */
2616 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2617 				       u8 rx_cmd)
2618 {
2619 	struct lio *lio = GET_LIO(netdev);
2620 	struct octeon_device *oct = lio->oct_dev;
2621 	struct octnic_ctrl_pkt nctrl;
2622 	int ret = 0;
2623 
2624 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2625 
2626 	nctrl.ncmd.u64 = 0;
2627 	nctrl.ncmd.s.cmd = command;
2628 	nctrl.ncmd.s.param1 = rx_cmd;
2629 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2630 	nctrl.netpndev = (u64)netdev;
2631 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2632 
2633 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2634 	if (ret) {
2635 		dev_err(&oct->pci_dev->dev,
2636 			"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2637 			ret);
2638 		if (ret > 0)
2639 			ret = -EIO;
2640 	}
2641 	return ret;
2642 }
2643 
2644 /**
2645  * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2646  * @netdev:                pointer to network device
2647  * @command:               OCTNET_CMD_VXLAN_PORT_CONFIG
2648  * @vxlan_port:            VxLAN port to be added or deleted
2649  * @vxlan_cmd_bit:         OCTNET_CMD_VXLAN_PORT_ADD,
2650  *                              OCTNET_CMD_VXLAN_PORT_DEL
2651  * Return:                     SUCCESS or FAILURE
2652  */
2653 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2654 				       u16 vxlan_port, u8 vxlan_cmd_bit)
2655 {
2656 	struct lio *lio = GET_LIO(netdev);
2657 	struct octeon_device *oct = lio->oct_dev;
2658 	struct octnic_ctrl_pkt nctrl;
2659 	int ret = 0;
2660 
2661 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2662 
2663 	nctrl.ncmd.u64 = 0;
2664 	nctrl.ncmd.s.cmd = command;
2665 	nctrl.ncmd.s.more = vxlan_cmd_bit;
2666 	nctrl.ncmd.s.param1 = vxlan_port;
2667 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2668 	nctrl.netpndev = (u64)netdev;
2669 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2670 
2671 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2672 	if (ret) {
2673 		dev_err(&oct->pci_dev->dev,
2674 			"VxLAN port add/delete failed in core (ret:0x%x)\n",
2675 			ret);
2676 		if (ret > 0)
2677 			ret = -EIO;
2678 	}
2679 	return ret;
2680 }
2681 
2682 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2683 					unsigned int table, unsigned int entry,
2684 					struct udp_tunnel_info *ti)
2685 {
2686 	return liquidio_vxlan_port_command(netdev,
2687 					   OCTNET_CMD_VXLAN_PORT_CONFIG,
2688 					   htons(ti->port),
2689 					   OCTNET_CMD_VXLAN_PORT_ADD);
2690 }
2691 
2692 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2693 					  unsigned int table,
2694 					  unsigned int entry,
2695 					  struct udp_tunnel_info *ti)
2696 {
2697 	return liquidio_vxlan_port_command(netdev,
2698 					   OCTNET_CMD_VXLAN_PORT_CONFIG,
2699 					   htons(ti->port),
2700 					   OCTNET_CMD_VXLAN_PORT_DEL);
2701 }
2702 
2703 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2704 	.set_port	= liquidio_udp_tunnel_set_port,
2705 	.unset_port	= liquidio_udp_tunnel_unset_port,
2706 	.tables		= {
2707 		{ .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2708 	},
2709 };
2710 
2711 /**
2712  * liquidio_fix_features - Net device fix features
2713  * @netdev:  pointer to network device
2714  * @request: features requested
2715  * Return: updated features list
2716  */
2717 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2718 					       netdev_features_t request)
2719 {
2720 	struct lio *lio = netdev_priv(netdev);
2721 
2722 	if ((request & NETIF_F_RXCSUM) &&
2723 	    !(lio->dev_capability & NETIF_F_RXCSUM))
2724 		request &= ~NETIF_F_RXCSUM;
2725 
2726 	if ((request & NETIF_F_HW_CSUM) &&
2727 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
2728 		request &= ~NETIF_F_HW_CSUM;
2729 
2730 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2731 		request &= ~NETIF_F_TSO;
2732 
2733 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2734 		request &= ~NETIF_F_TSO6;
2735 
2736 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2737 		request &= ~NETIF_F_LRO;
2738 
2739 	/*Disable LRO if RXCSUM is off */
2740 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2741 	    (lio->dev_capability & NETIF_F_LRO))
2742 		request &= ~NETIF_F_LRO;
2743 
2744 	if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2745 	    !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2746 		request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2747 
2748 	return request;
2749 }
2750 
2751 /**
2752  * liquidio_set_features - Net device set features
2753  * @netdev:  pointer to network device
2754  * @features: features to enable/disable
2755  */
2756 static int liquidio_set_features(struct net_device *netdev,
2757 				 netdev_features_t features)
2758 {
2759 	struct lio *lio = netdev_priv(netdev);
2760 
2761 	if ((features & NETIF_F_LRO) &&
2762 	    (lio->dev_capability & NETIF_F_LRO) &&
2763 	    !(netdev->features & NETIF_F_LRO))
2764 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2765 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2766 	else if (!(features & NETIF_F_LRO) &&
2767 		 (lio->dev_capability & NETIF_F_LRO) &&
2768 		 (netdev->features & NETIF_F_LRO))
2769 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2770 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2771 
2772 	/* Sending command to firmware to enable/disable RX checksum
2773 	 * offload settings using ethtool
2774 	 */
2775 	if (!(netdev->features & NETIF_F_RXCSUM) &&
2776 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2777 	    (features & NETIF_F_RXCSUM))
2778 		liquidio_set_rxcsum_command(netdev,
2779 					    OCTNET_CMD_TNL_RX_CSUM_CTL,
2780 					    OCTNET_CMD_RXCSUM_ENABLE);
2781 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2782 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2783 		 !(features & NETIF_F_RXCSUM))
2784 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2785 					    OCTNET_CMD_RXCSUM_DISABLE);
2786 
2787 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2788 	    (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2789 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2790 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2791 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
2792 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2793 		 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2794 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2795 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2796 				     OCTNET_CMD_VLAN_FILTER_DISABLE);
2797 
2798 	return 0;
2799 }
2800 
2801 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2802 				 u8 *mac, bool is_admin_assigned)
2803 {
2804 	struct lio *lio = GET_LIO(netdev);
2805 	struct octeon_device *oct = lio->oct_dev;
2806 	struct octnic_ctrl_pkt nctrl;
2807 	int ret = 0;
2808 
2809 	if (!is_valid_ether_addr(mac))
2810 		return -EINVAL;
2811 
2812 	if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2813 		return -EINVAL;
2814 
2815 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2816 
2817 	nctrl.ncmd.u64 = 0;
2818 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2819 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
2820 	nctrl.ncmd.s.param1 = vfidx + 1;
2821 	nctrl.ncmd.s.more = 1;
2822 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2823 	nctrl.netpndev = (u64)netdev;
2824 	if (is_admin_assigned) {
2825 		nctrl.ncmd.s.param2 = true;
2826 		nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2827 	}
2828 
2829 	nctrl.udd[0] = 0;
2830 	/* The MAC Address is presented in network byte order. */
2831 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2832 
2833 	oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2834 
2835 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2836 	if (ret > 0)
2837 		ret = -EIO;
2838 
2839 	return ret;
2840 }
2841 
2842 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2843 {
2844 	struct lio *lio = GET_LIO(netdev);
2845 	struct octeon_device *oct = lio->oct_dev;
2846 	int retval;
2847 
2848 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2849 		return -EINVAL;
2850 
2851 	retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2852 	if (!retval)
2853 		cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2854 
2855 	return retval;
2856 }
2857 
2858 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2859 				    bool enable)
2860 {
2861 	struct lio *lio = GET_LIO(netdev);
2862 	struct octeon_device *oct = lio->oct_dev;
2863 	struct octnic_ctrl_pkt nctrl;
2864 	int retval;
2865 
2866 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2867 		netif_info(lio, drv, lio->netdev,
2868 			   "firmware does not support spoofchk\n");
2869 		return -EOPNOTSUPP;
2870 	}
2871 
2872 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2873 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2874 		return -EINVAL;
2875 	}
2876 
2877 	if (enable) {
2878 		if (oct->sriov_info.vf_spoofchk[vfidx])
2879 			return 0;
2880 	} else {
2881 		/* Clear */
2882 		if (!oct->sriov_info.vf_spoofchk[vfidx])
2883 			return 0;
2884 	}
2885 
2886 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2887 	nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2888 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2889 	nctrl.ncmd.s.param1 =
2890 		vfidx + 1; /* vfidx is 0 based,
2891 			    * but vf_num (param1) is 1 based
2892 			    */
2893 	nctrl.ncmd.s.param2 = enable;
2894 	nctrl.ncmd.s.more = 0;
2895 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2896 	nctrl.cb_fn = NULL;
2897 
2898 	retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2899 
2900 	if (retval) {
2901 		netif_info(lio, drv, lio->netdev,
2902 			   "Failed to set VF %d spoofchk %s\n", vfidx,
2903 			enable ? "on" : "off");
2904 		return -1;
2905 	}
2906 
2907 	oct->sriov_info.vf_spoofchk[vfidx] = enable;
2908 	netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2909 		   enable ? "on" : "off");
2910 
2911 	return 0;
2912 }
2913 
2914 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2915 				u16 vlan, u8 qos, __be16 vlan_proto)
2916 {
2917 	struct lio *lio = GET_LIO(netdev);
2918 	struct octeon_device *oct = lio->oct_dev;
2919 	struct octnic_ctrl_pkt nctrl;
2920 	u16 vlantci;
2921 	int ret = 0;
2922 
2923 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2924 		return -EINVAL;
2925 
2926 	if (vlan_proto != htons(ETH_P_8021Q))
2927 		return -EPROTONOSUPPORT;
2928 
2929 	if (vlan >= VLAN_N_VID || qos > 7)
2930 		return -EINVAL;
2931 
2932 	if (vlan)
2933 		vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2934 	else
2935 		vlantci = 0;
2936 
2937 	if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2938 		return 0;
2939 
2940 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2941 
2942 	if (vlan)
2943 		nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2944 	else
2945 		nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2946 
2947 	nctrl.ncmd.s.param1 = vlantci;
2948 	nctrl.ncmd.s.param2 =
2949 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2950 	nctrl.ncmd.s.more = 0;
2951 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2952 	nctrl.cb_fn = NULL;
2953 
2954 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2955 	if (ret) {
2956 		if (ret > 0)
2957 			ret = -EIO;
2958 		return ret;
2959 	}
2960 
2961 	oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2962 
2963 	return ret;
2964 }
2965 
2966 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2967 				  struct ifla_vf_info *ivi)
2968 {
2969 	struct lio *lio = GET_LIO(netdev);
2970 	struct octeon_device *oct = lio->oct_dev;
2971 	u8 *macaddr;
2972 
2973 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2974 		return -EINVAL;
2975 
2976 	memset(ivi, 0, sizeof(struct ifla_vf_info));
2977 
2978 	ivi->vf = vfidx;
2979 	macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2980 	ether_addr_copy(&ivi->mac[0], macaddr);
2981 	ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2982 	ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2983 	if (oct->sriov_info.trusted_vf.active &&
2984 	    oct->sriov_info.trusted_vf.id == vfidx)
2985 		ivi->trusted = true;
2986 	else
2987 		ivi->trusted = false;
2988 	ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2989 	ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
2990 	ivi->max_tx_rate = lio->linfo.link.s.speed;
2991 	ivi->min_tx_rate = 0;
2992 
2993 	return 0;
2994 }
2995 
2996 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2997 {
2998 	struct octeon_device *oct = lio->oct_dev;
2999 	struct octeon_soft_command *sc;
3000 	int retval;
3001 
3002 	sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3003 	if (!sc)
3004 		return -ENOMEM;
3005 
3006 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3007 
3008 	/* vfidx is 0 based, but vf_num (param1) is 1 based */
3009 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3010 				    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3011 				    trusted);
3012 
3013 	init_completion(&sc->complete);
3014 	sc->sc_status = OCTEON_REQUEST_PENDING;
3015 
3016 	retval = octeon_send_soft_command(oct, sc);
3017 	if (retval == IQ_SEND_FAILED) {
3018 		octeon_free_soft_command(oct, sc);
3019 		retval = -1;
3020 	} else {
3021 		/* Wait for response or timeout */
3022 		retval = wait_for_sc_completion_timeout(oct, sc, 0);
3023 		if (retval)
3024 			return (retval);
3025 
3026 		WRITE_ONCE(sc->caller_is_done, true);
3027 	}
3028 
3029 	return retval;
3030 }
3031 
3032 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3033 				 bool setting)
3034 {
3035 	struct lio *lio = GET_LIO(netdev);
3036 	struct octeon_device *oct = lio->oct_dev;
3037 
3038 	if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3039 		/* trusted vf is not supported by firmware older than 1.7.1 */
3040 		return -EOPNOTSUPP;
3041 	}
3042 
3043 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3044 		netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3045 		return -EINVAL;
3046 	}
3047 
3048 	if (setting) {
3049 		/* Set */
3050 
3051 		if (oct->sriov_info.trusted_vf.active &&
3052 		    oct->sriov_info.trusted_vf.id == vfidx)
3053 			return 0;
3054 
3055 		if (oct->sriov_info.trusted_vf.active) {
3056 			netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3057 			return -EPERM;
3058 		}
3059 	} else {
3060 		/* Clear */
3061 
3062 		if (!oct->sriov_info.trusted_vf.active)
3063 			return 0;
3064 	}
3065 
3066 	if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3067 		if (setting) {
3068 			oct->sriov_info.trusted_vf.id = vfidx;
3069 			oct->sriov_info.trusted_vf.active = true;
3070 		} else {
3071 			oct->sriov_info.trusted_vf.active = false;
3072 		}
3073 
3074 		netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3075 			   setting ? "" : "not ");
3076 	} else {
3077 		netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3078 		return -1;
3079 	}
3080 
3081 	return 0;
3082 }
3083 
3084 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3085 				      int linkstate)
3086 {
3087 	struct lio *lio = GET_LIO(netdev);
3088 	struct octeon_device *oct = lio->oct_dev;
3089 	struct octnic_ctrl_pkt nctrl;
3090 	int ret = 0;
3091 
3092 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3093 		return -EINVAL;
3094 
3095 	if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3096 		return 0;
3097 
3098 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3099 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3100 	nctrl.ncmd.s.param1 =
3101 	    vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3102 	nctrl.ncmd.s.param2 = linkstate;
3103 	nctrl.ncmd.s.more = 0;
3104 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3105 	nctrl.cb_fn = NULL;
3106 
3107 	ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3108 
3109 	if (!ret)
3110 		oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3111 	else if (ret > 0)
3112 		ret = -EIO;
3113 
3114 	return ret;
3115 }
3116 
3117 static int
3118 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3119 {
3120 	struct lio_devlink_priv *priv;
3121 	struct octeon_device *oct;
3122 
3123 	priv = devlink_priv(devlink);
3124 	oct = priv->oct;
3125 
3126 	*mode = oct->eswitch_mode;
3127 
3128 	return 0;
3129 }
3130 
3131 static int
3132 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3133 			  struct netlink_ext_ack *extack)
3134 {
3135 	struct lio_devlink_priv *priv;
3136 	struct octeon_device *oct;
3137 	int ret = 0;
3138 
3139 	priv = devlink_priv(devlink);
3140 	oct = priv->oct;
3141 
3142 	if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3143 		return -EINVAL;
3144 
3145 	if (oct->eswitch_mode == mode)
3146 		return 0;
3147 
3148 	switch (mode) {
3149 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3150 		oct->eswitch_mode = mode;
3151 		ret = lio_vf_rep_create(oct);
3152 		break;
3153 
3154 	case DEVLINK_ESWITCH_MODE_LEGACY:
3155 		lio_vf_rep_destroy(oct);
3156 		oct->eswitch_mode = mode;
3157 		break;
3158 
3159 	default:
3160 		ret = -EINVAL;
3161 	}
3162 
3163 	return ret;
3164 }
3165 
3166 static const struct devlink_ops liquidio_devlink_ops = {
3167 	.eswitch_mode_get = liquidio_eswitch_mode_get,
3168 	.eswitch_mode_set = liquidio_eswitch_mode_set,
3169 };
3170 
3171 static int
3172 liquidio_get_port_parent_id(struct net_device *dev,
3173 			    struct netdev_phys_item_id *ppid)
3174 {
3175 	struct lio *lio = GET_LIO(dev);
3176 	struct octeon_device *oct = lio->oct_dev;
3177 
3178 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3179 		return -EOPNOTSUPP;
3180 
3181 	ppid->id_len = ETH_ALEN;
3182 	ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3183 
3184 	return 0;
3185 }
3186 
3187 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3188 				 struct ifla_vf_stats *vf_stats)
3189 {
3190 	struct lio *lio = GET_LIO(netdev);
3191 	struct octeon_device *oct = lio->oct_dev;
3192 	struct oct_vf_stats stats;
3193 	int ret;
3194 
3195 	if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3196 		return -EINVAL;
3197 
3198 	memset(&stats, 0, sizeof(struct oct_vf_stats));
3199 	ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3200 	if (!ret) {
3201 		vf_stats->rx_packets = stats.rx_packets;
3202 		vf_stats->tx_packets = stats.tx_packets;
3203 		vf_stats->rx_bytes = stats.rx_bytes;
3204 		vf_stats->tx_bytes = stats.tx_bytes;
3205 		vf_stats->broadcast = stats.broadcast;
3206 		vf_stats->multicast = stats.multicast;
3207 	}
3208 
3209 	return ret;
3210 }
3211 
3212 static const struct net_device_ops lionetdevops = {
3213 	.ndo_open		= liquidio_open,
3214 	.ndo_stop		= liquidio_stop,
3215 	.ndo_start_xmit		= liquidio_xmit,
3216 	.ndo_get_stats64	= liquidio_get_stats64,
3217 	.ndo_set_mac_address	= liquidio_set_mac,
3218 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
3219 	.ndo_tx_timeout		= liquidio_tx_timeout,
3220 
3221 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3222 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3223 	.ndo_change_mtu		= liquidio_change_mtu,
3224 	.ndo_eth_ioctl		= liquidio_ioctl,
3225 	.ndo_fix_features	= liquidio_fix_features,
3226 	.ndo_set_features	= liquidio_set_features,
3227 	.ndo_set_vf_mac		= liquidio_set_vf_mac,
3228 	.ndo_set_vf_vlan	= liquidio_set_vf_vlan,
3229 	.ndo_get_vf_config	= liquidio_get_vf_config,
3230 	.ndo_set_vf_spoofchk	= liquidio_set_vf_spoofchk,
3231 	.ndo_set_vf_trust	= liquidio_set_vf_trust,
3232 	.ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3233 	.ndo_get_vf_stats	= liquidio_get_vf_stats,
3234 	.ndo_get_port_parent_id	= liquidio_get_port_parent_id,
3235 };
3236 
3237 /**
3238  * liquidio_init - Entry point for the liquidio module
3239  */
3240 static int __init liquidio_init(void)
3241 {
3242 	int i;
3243 	struct handshake *hs;
3244 
3245 	init_completion(&first_stage);
3246 
3247 	octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3248 
3249 	if (liquidio_init_pci())
3250 		return -EINVAL;
3251 
3252 	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3253 
3254 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3255 		hs = &handshake[i];
3256 		if (hs->pci_dev) {
3257 			wait_for_completion(&hs->init);
3258 			if (!hs->init_ok) {
3259 				/* init handshake failed */
3260 				dev_err(&hs->pci_dev->dev,
3261 					"Failed to init device\n");
3262 				liquidio_deinit_pci();
3263 				return -EIO;
3264 			}
3265 		}
3266 	}
3267 
3268 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3269 		hs = &handshake[i];
3270 		if (hs->pci_dev) {
3271 			wait_for_completion_timeout(&hs->started,
3272 						    msecs_to_jiffies(30000));
3273 			if (!hs->started_ok) {
3274 				/* starter handshake failed */
3275 				dev_err(&hs->pci_dev->dev,
3276 					"Firmware failed to start\n");
3277 				liquidio_deinit_pci();
3278 				return -EIO;
3279 			}
3280 		}
3281 	}
3282 
3283 	return 0;
3284 }
3285 
3286 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3287 {
3288 	struct octeon_device *oct = (struct octeon_device *)buf;
3289 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3290 	int gmxport = 0;
3291 	union oct_link_status *ls;
3292 	int i;
3293 
3294 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3295 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3296 			recv_pkt->buffer_size[0],
3297 			recv_pkt->rh.r_nic_info.gmxport);
3298 		goto nic_info_err;
3299 	}
3300 
3301 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
3302 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3303 		OCT_DROQ_INFO_SIZE);
3304 
3305 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3306 	for (i = 0; i < oct->ifcount; i++) {
3307 		if (oct->props[i].gmxport == gmxport) {
3308 			update_link_status(oct->props[i].netdev, ls);
3309 			break;
3310 		}
3311 	}
3312 
3313 nic_info_err:
3314 	for (i = 0; i < recv_pkt->buffer_count; i++)
3315 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3316 	octeon_free_recv_info(recv_info);
3317 	return 0;
3318 }
3319 
3320 /**
3321  * setup_nic_devices - Setup network interfaces
3322  * @octeon_dev:  octeon device
3323  *
3324  * Called during init time for each device. It assumes the NIC
3325  * is already up and running.  The link information for each
3326  * interface is passed in link_info.
3327  */
3328 static int setup_nic_devices(struct octeon_device *octeon_dev)
3329 {
3330 	struct lio *lio = NULL;
3331 	struct net_device *netdev;
3332 	u8 mac[6], i, j, *fw_ver, *micro_ver;
3333 	unsigned long micro;
3334 	u32 cur_ver;
3335 	struct octeon_soft_command *sc;
3336 	struct liquidio_if_cfg_resp *resp;
3337 	struct octdev_props *props;
3338 	int retval, num_iqueues, num_oqueues;
3339 	int max_num_queues = 0;
3340 	union oct_nic_if_cfg if_cfg;
3341 	unsigned int base_queue;
3342 	unsigned int gmx_port_id;
3343 	u32 resp_size, data_size;
3344 	u32 ifidx_or_pfnum;
3345 	struct lio_version *vdata;
3346 	struct devlink *devlink;
3347 	struct lio_devlink_priv *lio_devlink;
3348 
3349 	/* This is to handle link status changes */
3350 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3351 				    OPCODE_NIC_INFO,
3352 				    lio_nic_info, octeon_dev);
3353 
3354 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3355 	 * They are handled directly.
3356 	 */
3357 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3358 					free_netbuf);
3359 
3360 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3361 					free_netsgbuf);
3362 
3363 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3364 					free_netsgbuf_with_resp);
3365 
3366 	for (i = 0; i < octeon_dev->ifcount; i++) {
3367 		resp_size = sizeof(struct liquidio_if_cfg_resp);
3368 		data_size = sizeof(struct lio_version);
3369 		sc = (struct octeon_soft_command *)
3370 			octeon_alloc_soft_command(octeon_dev, data_size,
3371 						  resp_size, 0);
3372 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3373 		vdata = (struct lio_version *)sc->virtdptr;
3374 
3375 		*((u64 *)vdata) = 0;
3376 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3377 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3378 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3379 
3380 		if (OCTEON_CN23XX_PF(octeon_dev)) {
3381 			num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3382 			num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3383 			base_queue = octeon_dev->sriov_info.pf_srn;
3384 
3385 			gmx_port_id = octeon_dev->pf_num;
3386 			ifidx_or_pfnum = octeon_dev->pf_num;
3387 		} else {
3388 			num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3389 						octeon_get_conf(octeon_dev), i);
3390 			num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3391 						octeon_get_conf(octeon_dev), i);
3392 			base_queue = CFG_GET_BASE_QUE_NIC_IF(
3393 						octeon_get_conf(octeon_dev), i);
3394 			gmx_port_id = CFG_GET_GMXID_NIC_IF(
3395 						octeon_get_conf(octeon_dev), i);
3396 			ifidx_or_pfnum = i;
3397 		}
3398 
3399 		dev_dbg(&octeon_dev->pci_dev->dev,
3400 			"requesting config for interface %d, iqs %d, oqs %d\n",
3401 			ifidx_or_pfnum, num_iqueues, num_oqueues);
3402 
3403 		if_cfg.u64 = 0;
3404 		if_cfg.s.num_iqueues = num_iqueues;
3405 		if_cfg.s.num_oqueues = num_oqueues;
3406 		if_cfg.s.base_queue = base_queue;
3407 		if_cfg.s.gmx_port_id = gmx_port_id;
3408 
3409 		sc->iq_no = 0;
3410 
3411 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3412 					    OPCODE_NIC_IF_CFG, 0,
3413 					    if_cfg.u64, 0);
3414 
3415 		init_completion(&sc->complete);
3416 		sc->sc_status = OCTEON_REQUEST_PENDING;
3417 
3418 		retval = octeon_send_soft_command(octeon_dev, sc);
3419 		if (retval == IQ_SEND_FAILED) {
3420 			dev_err(&octeon_dev->pci_dev->dev,
3421 				"iq/oq config failed status: %x\n",
3422 				retval);
3423 			/* Soft instr is freed by driver in case of failure. */
3424 			octeon_free_soft_command(octeon_dev, sc);
3425 			return(-EIO);
3426 		}
3427 
3428 		/* Sleep on a wait queue till the cond flag indicates that the
3429 		 * response arrived or timed-out.
3430 		 */
3431 		retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3432 		if (retval)
3433 			return retval;
3434 
3435 		retval = resp->status;
3436 		if (retval) {
3437 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3438 			WRITE_ONCE(sc->caller_is_done, true);
3439 			goto setup_nic_dev_done;
3440 		}
3441 		snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3442 			 32, "%s",
3443 			 resp->cfg_info.liquidio_firmware_version);
3444 
3445 		/* Verify f/w version (in case of 'auto' loading from flash) */
3446 		fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3447 		if (memcmp(LIQUIDIO_BASE_VERSION,
3448 			   fw_ver,
3449 			   strlen(LIQUIDIO_BASE_VERSION))) {
3450 			dev_err(&octeon_dev->pci_dev->dev,
3451 				"Unmatched firmware version. Expected %s.x, got %s.\n",
3452 				LIQUIDIO_BASE_VERSION, fw_ver);
3453 			WRITE_ONCE(sc->caller_is_done, true);
3454 			goto setup_nic_dev_done;
3455 		} else if (atomic_read(octeon_dev->adapter_fw_state) ==
3456 			   FW_IS_PRELOADED) {
3457 			dev_info(&octeon_dev->pci_dev->dev,
3458 				 "Using auto-loaded firmware version %s.\n",
3459 				 fw_ver);
3460 		}
3461 
3462 		/* extract micro version field; point past '<maj>.<min>.' */
3463 		micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3464 		if (kstrtoul(micro_ver, 10, &micro) != 0)
3465 			micro = 0;
3466 		octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3467 		octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3468 		octeon_dev->fw_info.ver.rev = micro;
3469 
3470 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3471 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3472 
3473 		num_iqueues = hweight64(resp->cfg_info.iqmask);
3474 		num_oqueues = hweight64(resp->cfg_info.oqmask);
3475 
3476 		if (!(num_iqueues) || !(num_oqueues)) {
3477 			dev_err(&octeon_dev->pci_dev->dev,
3478 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3479 				resp->cfg_info.iqmask,
3480 				resp->cfg_info.oqmask);
3481 			WRITE_ONCE(sc->caller_is_done, true);
3482 			goto setup_nic_dev_done;
3483 		}
3484 
3485 		if (OCTEON_CN6XXX(octeon_dev)) {
3486 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3487 								    cn6xxx));
3488 		} else if (OCTEON_CN23XX_PF(octeon_dev)) {
3489 			max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3490 								    cn23xx_pf));
3491 		}
3492 
3493 		dev_dbg(&octeon_dev->pci_dev->dev,
3494 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3495 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3496 			num_iqueues, num_oqueues, max_num_queues);
3497 		netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3498 
3499 		if (!netdev) {
3500 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3501 			WRITE_ONCE(sc->caller_is_done, true);
3502 			goto setup_nic_dev_done;
3503 		}
3504 
3505 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3506 
3507 		/* Associate the routines that will handle different
3508 		 * netdev tasks.
3509 		 */
3510 		netdev->netdev_ops = &lionetdevops;
3511 
3512 		retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3513 		if (retval) {
3514 			dev_err(&octeon_dev->pci_dev->dev,
3515 				"setting real number rx failed\n");
3516 			WRITE_ONCE(sc->caller_is_done, true);
3517 			goto setup_nic_dev_free;
3518 		}
3519 
3520 		retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3521 		if (retval) {
3522 			dev_err(&octeon_dev->pci_dev->dev,
3523 				"setting real number tx failed\n");
3524 			WRITE_ONCE(sc->caller_is_done, true);
3525 			goto setup_nic_dev_free;
3526 		}
3527 
3528 		lio = GET_LIO(netdev);
3529 
3530 		memset(lio, 0, sizeof(struct lio));
3531 
3532 		lio->ifidx = ifidx_or_pfnum;
3533 
3534 		props = &octeon_dev->props[i];
3535 		props->gmxport = resp->cfg_info.linfo.gmxport;
3536 		props->netdev = netdev;
3537 
3538 		lio->linfo.num_rxpciq = num_oqueues;
3539 		lio->linfo.num_txpciq = num_iqueues;
3540 		for (j = 0; j < num_oqueues; j++) {
3541 			lio->linfo.rxpciq[j].u64 =
3542 				resp->cfg_info.linfo.rxpciq[j].u64;
3543 		}
3544 		for (j = 0; j < num_iqueues; j++) {
3545 			lio->linfo.txpciq[j].u64 =
3546 				resp->cfg_info.linfo.txpciq[j].u64;
3547 		}
3548 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3549 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3550 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3551 
3552 		WRITE_ONCE(sc->caller_is_done, true);
3553 
3554 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3555 
3556 		if (OCTEON_CN23XX_PF(octeon_dev) ||
3557 		    OCTEON_CN6XXX(octeon_dev)) {
3558 			lio->dev_capability = NETIF_F_HIGHDMA
3559 					      | NETIF_F_IP_CSUM
3560 					      | NETIF_F_IPV6_CSUM
3561 					      | NETIF_F_SG | NETIF_F_RXCSUM
3562 					      | NETIF_F_GRO
3563 					      | NETIF_F_TSO | NETIF_F_TSO6
3564 					      | NETIF_F_LRO;
3565 		}
3566 		netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3567 
3568 		/*  Copy of transmit encapsulation capabilities:
3569 		 *  TSO, TSO6, Checksums for this device
3570 		 */
3571 		lio->enc_dev_capability = NETIF_F_IP_CSUM
3572 					  | NETIF_F_IPV6_CSUM
3573 					  | NETIF_F_GSO_UDP_TUNNEL
3574 					  | NETIF_F_HW_CSUM | NETIF_F_SG
3575 					  | NETIF_F_RXCSUM
3576 					  | NETIF_F_TSO | NETIF_F_TSO6
3577 					  | NETIF_F_LRO;
3578 
3579 		netdev->hw_enc_features = (lio->enc_dev_capability &
3580 					   ~NETIF_F_LRO);
3581 
3582 		netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3583 
3584 		lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3585 
3586 		netdev->vlan_features = lio->dev_capability;
3587 		/* Add any unchangeable hw features */
3588 		lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3589 					NETIF_F_HW_VLAN_CTAG_RX |
3590 					NETIF_F_HW_VLAN_CTAG_TX;
3591 
3592 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3593 
3594 		netdev->hw_features = lio->dev_capability;
3595 		/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3596 		netdev->hw_features = netdev->hw_features &
3597 			~NETIF_F_HW_VLAN_CTAG_RX;
3598 
3599 		/* MTU range: 68 - 16000 */
3600 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
3601 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
3602 
3603 		/* Point to the  properties for octeon device to which this
3604 		 * interface belongs.
3605 		 */
3606 		lio->oct_dev = octeon_dev;
3607 		lio->octprops = props;
3608 		lio->netdev = netdev;
3609 
3610 		dev_dbg(&octeon_dev->pci_dev->dev,
3611 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
3612 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3613 
3614 		for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3615 			u8 vfmac[ETH_ALEN];
3616 
3617 			eth_random_addr(vfmac);
3618 			if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3619 				dev_err(&octeon_dev->pci_dev->dev,
3620 					"Error setting VF%d MAC address\n",
3621 					j);
3622 				goto setup_nic_dev_free;
3623 			}
3624 		}
3625 
3626 		/* 64-bit swap required on LE machines */
3627 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3628 		for (j = 0; j < 6; j++)
3629 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3630 
3631 		/* Copy MAC Address to OS network device structure */
3632 
3633 		eth_hw_addr_set(netdev, mac);
3634 
3635 		/* By default all interfaces on a single Octeon uses the same
3636 		 * tx and rx queues
3637 		 */
3638 		lio->txq = lio->linfo.txpciq[0].s.q_no;
3639 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3640 		if (liquidio_setup_io_queues(octeon_dev, i,
3641 					     lio->linfo.num_txpciq,
3642 					     lio->linfo.num_rxpciq)) {
3643 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3644 			goto setup_nic_dev_free;
3645 		}
3646 
3647 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3648 
3649 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3650 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3651 
3652 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3653 			dev_err(&octeon_dev->pci_dev->dev,
3654 				"Gather list allocation failed\n");
3655 			goto setup_nic_dev_free;
3656 		}
3657 
3658 		/* Register ethtool support */
3659 		liquidio_set_ethtool_ops(netdev);
3660 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3661 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3662 		else
3663 			octeon_dev->priv_flags = 0x0;
3664 
3665 		if (netdev->features & NETIF_F_LRO)
3666 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3667 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3668 
3669 		liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3670 				     OCTNET_CMD_VLAN_FILTER_ENABLE);
3671 
3672 		if ((debug != -1) && (debug & NETIF_MSG_HW))
3673 			liquidio_set_feature(netdev,
3674 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
3675 
3676 		if (setup_link_status_change_wq(netdev))
3677 			goto setup_nic_dev_free;
3678 
3679 		if ((octeon_dev->fw_info.app_cap_flags &
3680 		     LIQUIDIO_TIME_SYNC_CAP) &&
3681 		    setup_sync_octeon_time_wq(netdev))
3682 			goto setup_nic_dev_free;
3683 
3684 		if (setup_rx_oom_poll_fn(netdev))
3685 			goto setup_nic_dev_free;
3686 
3687 		/* Register the network device with the OS */
3688 		if (register_netdev(netdev)) {
3689 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3690 			goto setup_nic_dev_free;
3691 		}
3692 
3693 		dev_dbg(&octeon_dev->pci_dev->dev,
3694 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3695 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3696 		netif_carrier_off(netdev);
3697 		lio->link_changes++;
3698 
3699 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3700 
3701 		/* Sending command to firmware to enable Rx checksum offload
3702 		 * by default at the time of setup of Liquidio driver for
3703 		 * this device
3704 		 */
3705 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3706 					    OCTNET_CMD_RXCSUM_ENABLE);
3707 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3708 				     OCTNET_CMD_TXCSUM_ENABLE);
3709 
3710 		dev_dbg(&octeon_dev->pci_dev->dev,
3711 			"NIC ifidx:%d Setup successful\n", i);
3712 
3713 		if (octeon_dev->subsystem_id ==
3714 			OCTEON_CN2350_25GB_SUBSYS_ID ||
3715 		    octeon_dev->subsystem_id ==
3716 			OCTEON_CN2360_25GB_SUBSYS_ID) {
3717 			cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3718 					     octeon_dev->fw_info.ver.min,
3719 					     octeon_dev->fw_info.ver.rev);
3720 
3721 			/* speed control unsupported in f/w older than 1.7.2 */
3722 			if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3723 				dev_info(&octeon_dev->pci_dev->dev,
3724 					 "speed setting not supported by f/w.");
3725 				octeon_dev->speed_setting = 25;
3726 				octeon_dev->no_speed_setting = 1;
3727 			} else {
3728 				liquidio_get_speed(lio);
3729 			}
3730 
3731 			if (octeon_dev->speed_setting == 0) {
3732 				octeon_dev->speed_setting = 25;
3733 				octeon_dev->no_speed_setting = 1;
3734 			}
3735 		} else {
3736 			octeon_dev->no_speed_setting = 1;
3737 			octeon_dev->speed_setting = 10;
3738 		}
3739 		octeon_dev->speed_boot = octeon_dev->speed_setting;
3740 
3741 		/* don't read FEC setting if unsupported by f/w (see above) */
3742 		if (octeon_dev->speed_boot == 25 &&
3743 		    !octeon_dev->no_speed_setting) {
3744 			liquidio_get_fec(lio);
3745 			octeon_dev->props[lio->ifidx].fec_boot =
3746 				octeon_dev->props[lio->ifidx].fec;
3747 		}
3748 	}
3749 
3750 	device_lock(&octeon_dev->pci_dev->dev);
3751 	devlink = devlink_alloc(&liquidio_devlink_ops,
3752 				sizeof(struct lio_devlink_priv),
3753 				&octeon_dev->pci_dev->dev);
3754 	if (!devlink) {
3755 		device_unlock(&octeon_dev->pci_dev->dev);
3756 		dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3757 		goto setup_nic_dev_free;
3758 	}
3759 
3760 	lio_devlink = devlink_priv(devlink);
3761 	lio_devlink->oct = octeon_dev;
3762 
3763 	octeon_dev->devlink = devlink;
3764 	octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3765 	devlink_register(devlink);
3766 	device_unlock(&octeon_dev->pci_dev->dev);
3767 
3768 	return 0;
3769 
3770 setup_nic_dev_free:
3771 
3772 	while (i--) {
3773 		dev_err(&octeon_dev->pci_dev->dev,
3774 			"NIC ifidx:%d Setup failed\n", i);
3775 		liquidio_destroy_nic_device(octeon_dev, i);
3776 	}
3777 
3778 setup_nic_dev_done:
3779 
3780 	return -ENODEV;
3781 }
3782 
3783 #ifdef CONFIG_PCI_IOV
3784 static int octeon_enable_sriov(struct octeon_device *oct)
3785 {
3786 	unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3787 	struct pci_dev *vfdev;
3788 	int err;
3789 	u32 u;
3790 
3791 	if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3792 		err = pci_enable_sriov(oct->pci_dev,
3793 				       oct->sriov_info.num_vfs_alloced);
3794 		if (err) {
3795 			dev_err(&oct->pci_dev->dev,
3796 				"OCTEON: Failed to enable PCI sriov: %d\n",
3797 				err);
3798 			oct->sriov_info.num_vfs_alloced = 0;
3799 			return err;
3800 		}
3801 		oct->sriov_info.sriov_enabled = 1;
3802 
3803 		/* init lookup table that maps DPI ring number to VF pci_dev
3804 		 * struct pointer
3805 		 */
3806 		u = 0;
3807 		vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3808 				       OCTEON_CN23XX_VF_VID, NULL);
3809 		while (vfdev) {
3810 			if (vfdev->is_virtfn &&
3811 			    (vfdev->physfn == oct->pci_dev)) {
3812 				oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3813 					vfdev;
3814 				u += oct->sriov_info.rings_per_vf;
3815 			}
3816 			vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3817 					       OCTEON_CN23XX_VF_VID, vfdev);
3818 		}
3819 	}
3820 
3821 	return num_vfs_alloced;
3822 }
3823 
3824 static int lio_pci_sriov_disable(struct octeon_device *oct)
3825 {
3826 	int u;
3827 
3828 	if (pci_vfs_assigned(oct->pci_dev)) {
3829 		dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3830 		return -EPERM;
3831 	}
3832 
3833 	pci_disable_sriov(oct->pci_dev);
3834 
3835 	u = 0;
3836 	while (u < MAX_POSSIBLE_VFS) {
3837 		oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3838 		u += oct->sriov_info.rings_per_vf;
3839 	}
3840 
3841 	oct->sriov_info.num_vfs_alloced = 0;
3842 	dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3843 		 oct->pf_num);
3844 
3845 	return 0;
3846 }
3847 
3848 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3849 {
3850 	struct octeon_device *oct = pci_get_drvdata(dev);
3851 	int ret = 0;
3852 
3853 	if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3854 	    (oct->sriov_info.sriov_enabled)) {
3855 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3856 			 oct->pf_num, num_vfs);
3857 		return 0;
3858 	}
3859 
3860 	if (!num_vfs) {
3861 		lio_vf_rep_destroy(oct);
3862 		ret = lio_pci_sriov_disable(oct);
3863 	} else if (num_vfs > oct->sriov_info.max_vfs) {
3864 		dev_err(&oct->pci_dev->dev,
3865 			"OCTEON: Max allowed VFs:%d user requested:%d",
3866 			oct->sriov_info.max_vfs, num_vfs);
3867 		ret = -EPERM;
3868 	} else {
3869 		oct->sriov_info.num_vfs_alloced = num_vfs;
3870 		ret = octeon_enable_sriov(oct);
3871 		dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3872 			 oct->pf_num, num_vfs);
3873 		ret = lio_vf_rep_create(oct);
3874 		if (ret)
3875 			dev_info(&oct->pci_dev->dev,
3876 				 "vf representor create failed");
3877 	}
3878 
3879 	return ret;
3880 }
3881 #endif
3882 
3883 /**
3884  * liquidio_init_nic_module - initialize the NIC
3885  * @oct: octeon device
3886  *
3887  * This initialization routine is called once the Octeon device application is
3888  * up and running
3889  */
3890 static int liquidio_init_nic_module(struct octeon_device *oct)
3891 {
3892 	int i, retval = 0;
3893 	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3894 
3895 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3896 
3897 	/* only default iq and oq were initialized
3898 	 * initialize the rest as well
3899 	 */
3900 	/* run port_config command for each port */
3901 	oct->ifcount = num_nic_ports;
3902 
3903 	memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3904 
3905 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3906 		oct->props[i].gmxport = -1;
3907 
3908 	retval = setup_nic_devices(oct);
3909 	if (retval) {
3910 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3911 		goto octnet_init_failure;
3912 	}
3913 
3914 	/* Call vf_rep_modinit if the firmware is switchdev capable
3915 	 * and do it from the first liquidio function probed.
3916 	 */
3917 	if (!oct->octeon_id &&
3918 	    oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3919 		retval = lio_vf_rep_modinit();
3920 		if (retval) {
3921 			liquidio_stop_nic_module(oct);
3922 			goto octnet_init_failure;
3923 		}
3924 	}
3925 
3926 	liquidio_ptp_init(oct);
3927 
3928 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3929 
3930 	return retval;
3931 
3932 octnet_init_failure:
3933 
3934 	oct->ifcount = 0;
3935 
3936 	return retval;
3937 }
3938 
3939 /**
3940  * nic_starter - finish init
3941  * @work:  work struct work_struct
3942  *
3943  * starter callback that invokes the remaining initialization work after the NIC is up and running.
3944  */
3945 static void nic_starter(struct work_struct *work)
3946 {
3947 	struct octeon_device *oct;
3948 	struct cavium_wk *wk = (struct cavium_wk *)work;
3949 
3950 	oct = (struct octeon_device *)wk->ctxptr;
3951 
3952 	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3953 		return;
3954 
3955 	/* If the status of the device is CORE_OK, the core
3956 	 * application has reported its application type. Call
3957 	 * any registered handlers now and move to the RUNNING
3958 	 * state.
3959 	 */
3960 	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3961 		schedule_delayed_work(&oct->nic_poll_work.work,
3962 				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3963 		return;
3964 	}
3965 
3966 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3967 
3968 	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3969 		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3970 
3971 		if (liquidio_init_nic_module(oct))
3972 			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3973 		else
3974 			handshake[oct->octeon_id].started_ok = 1;
3975 	} else {
3976 		dev_err(&oct->pci_dev->dev,
3977 			"Unexpected application running on NIC (%d). Check firmware.\n",
3978 			oct->app_mode);
3979 	}
3980 
3981 	complete(&handshake[oct->octeon_id].started);
3982 }
3983 
3984 static int
3985 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3986 {
3987 	struct octeon_device *oct = (struct octeon_device *)buf;
3988 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3989 	int i, notice, vf_idx;
3990 	bool cores_crashed;
3991 	u64 *data, vf_num;
3992 
3993 	notice = recv_pkt->rh.r.ossp;
3994 	data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3995 
3996 	/* the first 64-bit word of data is the vf_num */
3997 	vf_num = data[0];
3998 	octeon_swap_8B_data(&vf_num, 1);
3999 	vf_idx = (int)vf_num - 1;
4000 
4001 	cores_crashed = READ_ONCE(oct->cores_crashed);
4002 
4003 	if (notice == VF_DRV_LOADED) {
4004 		if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4005 			oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4006 			dev_info(&oct->pci_dev->dev,
4007 				 "driver for VF%d was loaded\n", vf_idx);
4008 			if (!cores_crashed)
4009 				try_module_get(THIS_MODULE);
4010 		}
4011 	} else if (notice == VF_DRV_REMOVED) {
4012 		if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4013 			oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4014 			dev_info(&oct->pci_dev->dev,
4015 				 "driver for VF%d was removed\n", vf_idx);
4016 			if (!cores_crashed)
4017 				module_put(THIS_MODULE);
4018 		}
4019 	} else if (notice == VF_DRV_MACADDR_CHANGED) {
4020 		u8 *b = (u8 *)&data[1];
4021 
4022 		oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4023 		dev_info(&oct->pci_dev->dev,
4024 			 "VF driver changed VF%d's MAC address to %pM\n",
4025 			 vf_idx, b + 2);
4026 	}
4027 
4028 	for (i = 0; i < recv_pkt->buffer_count; i++)
4029 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
4030 	octeon_free_recv_info(recv_info);
4031 
4032 	return 0;
4033 }
4034 
4035 /**
4036  * octeon_device_init - Device initialization for each Octeon device that is probed
4037  * @octeon_dev:  octeon device
4038  */
4039 static int octeon_device_init(struct octeon_device *octeon_dev)
4040 {
4041 	int j, ret;
4042 	char bootcmd[] = "\n";
4043 	char *dbg_enb = NULL;
4044 	enum lio_fw_state fw_state;
4045 	struct octeon_device_priv *oct_priv =
4046 		(struct octeon_device_priv *)octeon_dev->priv;
4047 	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4048 
4049 	/* Enable access to the octeon device and make its DMA capability
4050 	 * known to the OS.
4051 	 */
4052 	if (octeon_pci_os_setup(octeon_dev))
4053 		return 1;
4054 
4055 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4056 
4057 	/* Identify the Octeon type and map the BAR address space. */
4058 	if (octeon_chip_specific_setup(octeon_dev)) {
4059 		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4060 		return 1;
4061 	}
4062 
4063 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4064 
4065 	/* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4066 	 * since that is what is required for the reference to be removed
4067 	 * during de-initialization (see 'octeon_destroy_resources').
4068 	 */
4069 	octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4070 			       PCI_SLOT(octeon_dev->pci_dev->devfn),
4071 			       PCI_FUNC(octeon_dev->pci_dev->devfn),
4072 			       true);
4073 
4074 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4075 
4076 	/* CN23XX supports preloaded firmware if the following is true:
4077 	 *
4078 	 * The adapter indicates that firmware is currently running AND
4079 	 * 'fw_type' is 'auto'.
4080 	 *
4081 	 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4082 	 */
4083 	if (OCTEON_CN23XX_PF(octeon_dev) &&
4084 	    cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4085 		atomic_cmpxchg(octeon_dev->adapter_fw_state,
4086 			       FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4087 	}
4088 
4089 	/* If loading firmware, only first device of adapter needs to do so. */
4090 	fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4091 				  FW_NEEDS_TO_BE_LOADED,
4092 				  FW_IS_BEING_LOADED);
4093 
4094 	/* Here, [local variable] 'fw_state' is set to one of:
4095 	 *
4096 	 *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4097 	 *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4098 	 *                          firmware to the adapter.
4099 	 *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4100 	 *                          firmware to the adapter.
4101 	 */
4102 
4103 	/* Prior to f/w load, perform a soft reset of the Octeon device;
4104 	 * if error resetting, return w/error.
4105 	 */
4106 	if (fw_state == FW_NEEDS_TO_BE_LOADED)
4107 		if (octeon_dev->fn_list.soft_reset(octeon_dev))
4108 			return 1;
4109 
4110 	/* Initialize the dispatch mechanism used to push packets arriving on
4111 	 * Octeon Output queues.
4112 	 */
4113 	if (octeon_init_dispatch_list(octeon_dev))
4114 		return 1;
4115 
4116 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4117 				    OPCODE_NIC_CORE_DRV_ACTIVE,
4118 				    octeon_core_drv_init,
4119 				    octeon_dev);
4120 
4121 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4122 				    OPCODE_NIC_VF_DRV_NOTICE,
4123 				    octeon_recv_vf_drv_notice, octeon_dev);
4124 	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4125 	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4126 	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4127 			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4128 
4129 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4130 
4131 	if (octeon_set_io_queues_off(octeon_dev)) {
4132 		dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4133 		return 1;
4134 	}
4135 
4136 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4137 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4138 		if (ret) {
4139 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4140 			return ret;
4141 		}
4142 	}
4143 
4144 	/* Initialize soft command buffer pool
4145 	 */
4146 	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4147 		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4148 		return 1;
4149 	}
4150 	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4151 
4152 	/*  Setup the data structures that manage this Octeon's Input queues. */
4153 	if (octeon_setup_instr_queues(octeon_dev)) {
4154 		dev_err(&octeon_dev->pci_dev->dev,
4155 			"instruction queue initialization failed\n");
4156 		return 1;
4157 	}
4158 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4159 
4160 	/* Initialize lists to manage the requests of different types that
4161 	 * arrive from user & kernel applications for this octeon device.
4162 	 */
4163 	if (octeon_setup_response_list(octeon_dev)) {
4164 		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4165 		return 1;
4166 	}
4167 	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4168 
4169 	if (octeon_setup_output_queues(octeon_dev)) {
4170 		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4171 		return 1;
4172 	}
4173 
4174 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4175 
4176 	if (OCTEON_CN23XX_PF(octeon_dev)) {
4177 		if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4178 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4179 			return 1;
4180 		}
4181 		atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4182 
4183 		if (octeon_allocate_ioq_vector
4184 				(octeon_dev,
4185 				 octeon_dev->sriov_info.num_pf_rings)) {
4186 			dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4187 			return 1;
4188 		}
4189 		atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4190 
4191 	} else {
4192 		/* The input and output queue registers were setup earlier (the
4193 		 * queues were not enabled). Any additional registers
4194 		 * that need to be programmed should be done now.
4195 		 */
4196 		ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4197 		if (ret) {
4198 			dev_err(&octeon_dev->pci_dev->dev,
4199 				"Failed to configure device registers\n");
4200 			return ret;
4201 		}
4202 	}
4203 
4204 	/* Initialize the tasklet that handles output queue packet processing.*/
4205 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4206 	tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4207 
4208 	/* Setup the interrupt handler and record the INT SUM register address
4209 	 */
4210 	if (octeon_setup_interrupt(octeon_dev,
4211 				   octeon_dev->sriov_info.num_pf_rings))
4212 		return 1;
4213 
4214 	/* Enable Octeon device interrupts */
4215 	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4216 
4217 	atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4218 
4219 	/* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4220 	 * the output queue is enabled.
4221 	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4222 	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4223 	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4224 	 * before any credits have been issued, causing the ring to be reset
4225 	 * (and the f/w appear to never have started).
4226 	 */
4227 	for (j = 0; j < octeon_dev->num_oqs; j++)
4228 		writel(octeon_dev->droq[j]->max_count,
4229 		       octeon_dev->droq[j]->pkts_credit_reg);
4230 
4231 	/* Enable the input and output queues for this Octeon device */
4232 	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4233 	if (ret) {
4234 		dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4235 		return ret;
4236 	}
4237 
4238 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4239 
4240 	if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4241 		dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4242 		if (!ddr_timeout) {
4243 			dev_info(&octeon_dev->pci_dev->dev,
4244 				 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4245 		}
4246 
4247 		schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4248 
4249 		/* Wait for the octeon to initialize DDR after the soft-reset.*/
4250 		while (!ddr_timeout) {
4251 			set_current_state(TASK_INTERRUPTIBLE);
4252 			if (schedule_timeout(HZ / 10)) {
4253 				/* user probably pressed Control-C */
4254 				return 1;
4255 			}
4256 		}
4257 		ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4258 		if (ret) {
4259 			dev_err(&octeon_dev->pci_dev->dev,
4260 				"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4261 				ret);
4262 			return 1;
4263 		}
4264 
4265 		if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4266 			dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4267 			return 1;
4268 		}
4269 
4270 		/* Divert uboot to take commands from host instead. */
4271 		ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4272 
4273 		dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4274 		ret = octeon_init_consoles(octeon_dev);
4275 		if (ret) {
4276 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4277 			return 1;
4278 		}
4279 		/* If console debug enabled, specify empty string to use default
4280 		 * enablement ELSE specify NULL string for 'disabled'.
4281 		 */
4282 		dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4283 		ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4284 		if (ret) {
4285 			dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4286 			return 1;
4287 		} else if (octeon_console_debug_enabled(0)) {
4288 			/* If console was added AND we're logging console output
4289 			 * then set our console print function.
4290 			 */
4291 			octeon_dev->console[0].print = octeon_dbg_console_print;
4292 		}
4293 
4294 		atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4295 
4296 		dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4297 		ret = load_firmware(octeon_dev);
4298 		if (ret) {
4299 			dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4300 			return 1;
4301 		}
4302 
4303 		atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4304 	}
4305 
4306 	handshake[octeon_dev->octeon_id].init_ok = 1;
4307 	complete(&handshake[octeon_dev->octeon_id].init);
4308 
4309 	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4310 	oct_priv->dev = octeon_dev;
4311 
4312 	return 0;
4313 }
4314 
4315 /**
4316  * octeon_dbg_console_print - Debug console print function
4317  * @oct:  octeon device
4318  * @console_num: console number
4319  * @prefix:      first portion of line to display
4320  * @suffix:      second portion of line to display
4321  *
4322  * The OCTEON debug console outputs entire lines (excluding '\n').
4323  * Normally, the line will be passed in the 'prefix' parameter.
4324  * However, due to buffering, it is possible for a line to be split into two
4325  * parts, in which case they will be passed as the 'prefix' parameter and
4326  * 'suffix' parameter.
4327  */
4328 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4329 				    char *prefix, char *suffix)
4330 {
4331 	if (prefix && suffix)
4332 		dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4333 			 suffix);
4334 	else if (prefix)
4335 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4336 	else if (suffix)
4337 		dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4338 
4339 	return 0;
4340 }
4341 
4342 /**
4343  * liquidio_exit - Exits the module
4344  */
4345 static void __exit liquidio_exit(void)
4346 {
4347 	liquidio_deinit_pci();
4348 
4349 	pr_info("LiquidIO network module is now unloaded\n");
4350 }
4351 
4352 module_init(liquidio_init);
4353 module_exit(liquidio_exit);
4354