xref: /openbmc/linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c (revision 7a846d3c43b0b6d04300be9ba666b102b57a391a)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <net/vxlan.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_nic.h"
28 #include "octeon_main.h"
29 #include "octeon_network.h"
30 #include "cn23xx_vf_device.h"
31 
32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
34 MODULE_LICENSE("GPL");
35 MODULE_VERSION(LIQUIDIO_VERSION);
36 
37 static int debug = -1;
38 module_param(debug, int, 0644);
39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
40 
41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
42 
43 struct liquidio_rx_ctl_context {
44 	int octeon_id;
45 
46 	wait_queue_head_t wc;
47 
48 	int cond;
49 };
50 
51 struct oct_timestamp_resp {
52 	u64 rh;
53 	u64 timestamp;
54 	u64 status;
55 };
56 
57 union tx_info {
58 	u64 u64;
59 	struct {
60 #ifdef __BIG_ENDIAN_BITFIELD
61 		u16 gso_size;
62 		u16 gso_segs;
63 		u32 reserved;
64 #else
65 		u32 reserved;
66 		u16 gso_segs;
67 		u16 gso_size;
68 #endif
69 	} s;
70 };
71 
72 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
73 #define OCTNIC_GSO_MAX_SIZE \
74 		(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
75 
76 static int
77 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
78 static void liquidio_vf_remove(struct pci_dev *pdev);
79 static int octeon_device_init(struct octeon_device *oct);
80 static int liquidio_stop(struct net_device *netdev);
81 
82 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
83 {
84 	struct octeon_device_priv *oct_priv =
85 	    (struct octeon_device_priv *)oct->priv;
86 	int retry = MAX_IO_PENDING_PKT_COUNT;
87 	int pkt_cnt = 0, pending_pkts;
88 	int i;
89 
90 	do {
91 		pending_pkts = 0;
92 
93 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
94 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
95 				continue;
96 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
97 		}
98 		if (pkt_cnt > 0) {
99 			pending_pkts += pkt_cnt;
100 			tasklet_schedule(&oct_priv->droq_tasklet);
101 		}
102 		pkt_cnt = 0;
103 		schedule_timeout_uninterruptible(1);
104 
105 	} while (retry-- && pending_pkts);
106 
107 	return pkt_cnt;
108 }
109 
110 /**
111  * \brief Cause device to go quiet so it can be safely removed/reset/etc
112  * @param oct Pointer to Octeon device
113  */
114 static void pcierror_quiesce_device(struct octeon_device *oct)
115 {
116 	int i;
117 
118 	/* Disable the input and output queues now. No more packets will
119 	 * arrive from Octeon, but we should wait for all packet processing
120 	 * to finish.
121 	 */
122 
123 	/* To allow for in-flight requests */
124 	schedule_timeout_uninterruptible(100);
125 
126 	if (wait_for_pending_requests(oct))
127 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
128 
129 	/* Force all requests waiting to be fetched by OCTEON to complete. */
130 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
131 		struct octeon_instr_queue *iq;
132 
133 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
134 			continue;
135 		iq = oct->instr_queue[i];
136 
137 		if (atomic_read(&iq->instr_pending)) {
138 			spin_lock_bh(&iq->lock);
139 			iq->fill_cnt = 0;
140 			iq->octeon_read_index = iq->host_write_index;
141 			iq->stats.instr_processed +=
142 			    atomic_read(&iq->instr_pending);
143 			lio_process_iq_request_list(oct, iq, 0);
144 			spin_unlock_bh(&iq->lock);
145 		}
146 	}
147 
148 	/* Force all pending ordered list requests to time out. */
149 	lio_process_ordered_list(oct, 1);
150 
151 	/* We do not need to wait for output queue packets to be processed. */
152 }
153 
154 /**
155  * \brief Cleanup PCI AER uncorrectable error status
156  * @param dev Pointer to PCI device
157  */
158 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
159 {
160 	u32 status, mask;
161 	int pos = 0x100;
162 
163 	pr_info("%s :\n", __func__);
164 
165 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
166 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
167 	if (dev->error_state == pci_channel_io_normal)
168 		status &= ~mask; /* Clear corresponding nonfatal bits */
169 	else
170 		status &= mask; /* Clear corresponding fatal bits */
171 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
172 }
173 
174 /**
175  * \brief Stop all PCI IO to a given device
176  * @param dev Pointer to Octeon device
177  */
178 static void stop_pci_io(struct octeon_device *oct)
179 {
180 	struct msix_entry *msix_entries;
181 	int i;
182 
183 	/* No more instructions will be forwarded. */
184 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
185 
186 	for (i = 0; i < oct->ifcount; i++)
187 		netif_device_detach(oct->props[i].netdev);
188 
189 	/* Disable interrupts  */
190 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
191 
192 	pcierror_quiesce_device(oct);
193 	if (oct->msix_on) {
194 		msix_entries = (struct msix_entry *)oct->msix_entries;
195 		for (i = 0; i < oct->num_msix_irqs; i++) {
196 			/* clear the affinity_cpumask */
197 			irq_set_affinity_hint(msix_entries[i].vector,
198 					      NULL);
199 			free_irq(msix_entries[i].vector,
200 				 &oct->ioq_vector[i]);
201 		}
202 		pci_disable_msix(oct->pci_dev);
203 		kfree(oct->msix_entries);
204 		oct->msix_entries = NULL;
205 		octeon_free_ioq_vector(oct);
206 	}
207 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
208 		lio_get_state_string(&oct->status));
209 
210 	/* making it a common function for all OCTEON models */
211 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
212 
213 	pci_disable_device(oct->pci_dev);
214 }
215 
216 /**
217  * \brief called when PCI error is detected
218  * @param pdev Pointer to PCI device
219  * @param state The current pci connection state
220  *
221  * This function is called after a PCI bus error affecting
222  * this device has been detected.
223  */
224 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
225 						     pci_channel_state_t state)
226 {
227 	struct octeon_device *oct = pci_get_drvdata(pdev);
228 
229 	/* Non-correctable Non-fatal errors */
230 	if (state == pci_channel_io_normal) {
231 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
232 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
233 		return PCI_ERS_RESULT_CAN_RECOVER;
234 	}
235 
236 	/* Non-correctable Fatal errors */
237 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
238 	stop_pci_io(oct);
239 
240 	return PCI_ERS_RESULT_DISCONNECT;
241 }
242 
243 /* For PCI-E Advanced Error Recovery (AER) Interface */
244 static const struct pci_error_handlers liquidio_vf_err_handler = {
245 	.error_detected = liquidio_pcie_error_detected,
246 };
247 
248 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
249 	{
250 		PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
251 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
252 	},
253 	{
254 		0, 0, 0, 0, 0, 0, 0
255 	}
256 };
257 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
258 
259 static struct pci_driver liquidio_vf_pci_driver = {
260 	.name		= "LiquidIO_VF",
261 	.id_table	= liquidio_vf_pci_tbl,
262 	.probe		= liquidio_vf_probe,
263 	.remove		= liquidio_vf_remove,
264 	.err_handler	= &liquidio_vf_err_handler,    /* For AER */
265 };
266 
267 /**
268  * \brief Print link information
269  * @param netdev network device
270  */
271 static void print_link_info(struct net_device *netdev)
272 {
273 	struct lio *lio = GET_LIO(netdev);
274 
275 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
276 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
277 		struct oct_link_info *linfo = &lio->linfo;
278 
279 		if (linfo->link.s.link_up) {
280 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
281 				   linfo->link.s.speed,
282 				   (linfo->link.s.duplex) ? "Full" : "Half");
283 		} else {
284 			netif_info(lio, link, lio->netdev, "Link Down\n");
285 		}
286 	}
287 }
288 
289 /**
290  * \brief Routine to notify MTU change
291  * @param work work_struct data structure
292  */
293 static void octnet_link_status_change(struct work_struct *work)
294 {
295 	struct cavium_wk *wk = (struct cavium_wk *)work;
296 	struct lio *lio = (struct lio *)wk->ctxptr;
297 
298 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
299 	 * this API is invoked only when new max-MTU of the interface is
300 	 * less than current MTU.
301 	 */
302 	rtnl_lock();
303 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
304 	rtnl_unlock();
305 }
306 
307 /**
308  * \brief Sets up the mtu status change work
309  * @param netdev network device
310  */
311 static int setup_link_status_change_wq(struct net_device *netdev)
312 {
313 	struct lio *lio = GET_LIO(netdev);
314 	struct octeon_device *oct = lio->oct_dev;
315 
316 	lio->link_status_wq.wq = alloc_workqueue("link-status",
317 						 WQ_MEM_RECLAIM, 0);
318 	if (!lio->link_status_wq.wq) {
319 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
320 		return -1;
321 	}
322 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
323 			  octnet_link_status_change);
324 	lio->link_status_wq.wk.ctxptr = lio;
325 
326 	return 0;
327 }
328 
329 static void cleanup_link_status_change_wq(struct net_device *netdev)
330 {
331 	struct lio *lio = GET_LIO(netdev);
332 
333 	if (lio->link_status_wq.wq) {
334 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
335 		destroy_workqueue(lio->link_status_wq.wq);
336 	}
337 }
338 
339 /**
340  * \brief Update link status
341  * @param netdev network device
342  * @param ls link status structure
343  *
344  * Called on receipt of a link status response from the core application to
345  * update each interface's link status.
346  */
347 static void update_link_status(struct net_device *netdev,
348 			       union oct_link_status *ls)
349 {
350 	struct lio *lio = GET_LIO(netdev);
351 	int current_max_mtu = lio->linfo.link.s.mtu;
352 	struct octeon_device *oct = lio->oct_dev;
353 
354 	if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
355 		lio->linfo.link.u64 = ls->u64;
356 
357 		print_link_info(netdev);
358 		lio->link_changes++;
359 
360 		if (lio->linfo.link.s.link_up) {
361 			netif_carrier_on(netdev);
362 			wake_txqs(netdev);
363 		} else {
364 			netif_carrier_off(netdev);
365 			stop_txqs(netdev);
366 		}
367 
368 		if (lio->linfo.link.s.mtu != current_max_mtu) {
369 			dev_info(&oct->pci_dev->dev,
370 				 "Max MTU Changed from %d to %d\n",
371 				 current_max_mtu, lio->linfo.link.s.mtu);
372 			netdev->max_mtu = lio->linfo.link.s.mtu;
373 		}
374 
375 		if (lio->linfo.link.s.mtu < netdev->mtu) {
376 			dev_warn(&oct->pci_dev->dev,
377 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
378 				 netdev->mtu, lio->linfo.link.s.mtu);
379 			queue_delayed_work(lio->link_status_wq.wq,
380 					   &lio->link_status_wq.wk.work, 0);
381 		}
382 	}
383 }
384 
385 /**
386  * \brief PCI probe handler
387  * @param pdev PCI device structure
388  * @param ent unused
389  */
390 static int
391 liquidio_vf_probe(struct pci_dev *pdev,
392 		  const struct pci_device_id *ent __attribute__((unused)))
393 {
394 	struct octeon_device *oct_dev = NULL;
395 
396 	oct_dev = octeon_allocate_device(pdev->device,
397 					 sizeof(struct octeon_device_priv));
398 
399 	if (!oct_dev) {
400 		dev_err(&pdev->dev, "Unable to allocate device\n");
401 		return -ENOMEM;
402 	}
403 	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
404 
405 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
406 		 (u32)pdev->vendor, (u32)pdev->device);
407 
408 	/* Assign octeon_device for this device to the private data area. */
409 	pci_set_drvdata(pdev, oct_dev);
410 
411 	/* set linux specific device pointer */
412 	oct_dev->pci_dev = pdev;
413 
414 	oct_dev->subsystem_id = pdev->subsystem_vendor |
415 		(pdev->subsystem_device << 16);
416 
417 	if (octeon_device_init(oct_dev)) {
418 		liquidio_vf_remove(pdev);
419 		return -ENOMEM;
420 	}
421 
422 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
423 
424 	return 0;
425 }
426 
427 /**
428  * \brief PCI FLR for each Octeon device.
429  * @param oct octeon device
430  */
431 static void octeon_pci_flr(struct octeon_device *oct)
432 {
433 	pci_save_state(oct->pci_dev);
434 
435 	pci_cfg_access_lock(oct->pci_dev);
436 
437 	/* Quiesce the device completely */
438 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
439 			      PCI_COMMAND_INTX_DISABLE);
440 
441 	pcie_flr(oct->pci_dev);
442 
443 	pci_cfg_access_unlock(oct->pci_dev);
444 
445 	pci_restore_state(oct->pci_dev);
446 }
447 
448 /**
449  *\brief Destroy resources associated with octeon device
450  * @param pdev PCI device structure
451  * @param ent unused
452  */
453 static void octeon_destroy_resources(struct octeon_device *oct)
454 {
455 	struct msix_entry *msix_entries;
456 	int i;
457 
458 	switch (atomic_read(&oct->status)) {
459 	case OCT_DEV_RUNNING:
460 	case OCT_DEV_CORE_OK:
461 		/* No more instructions will be forwarded. */
462 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
463 
464 		oct->app_mode = CVM_DRV_INVALID_APP;
465 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
466 			lio_get_state_string(&oct->status));
467 
468 		schedule_timeout_uninterruptible(HZ / 10);
469 
470 		/* fallthrough */
471 	case OCT_DEV_HOST_OK:
472 		/* fallthrough */
473 	case OCT_DEV_IO_QUEUES_DONE:
474 		if (wait_for_pending_requests(oct))
475 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
476 
477 		if (lio_wait_for_instr_fetch(oct))
478 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
479 
480 		/* Disable the input and output queues now. No more packets will
481 		 * arrive from Octeon, but we should wait for all packet
482 		 * processing to finish.
483 		 */
484 		oct->fn_list.disable_io_queues(oct);
485 
486 		if (lio_wait_for_oq_pkts(oct))
487 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
488 		/* fall through */
489 	case OCT_DEV_INTR_SET_DONE:
490 		/* Disable interrupts  */
491 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
492 
493 		if (oct->msix_on) {
494 			msix_entries = (struct msix_entry *)oct->msix_entries;
495 			for (i = 0; i < oct->num_msix_irqs; i++) {
496 				if (oct->ioq_vector[i].vector) {
497 					irq_set_affinity_hint(
498 							msix_entries[i].vector,
499 							NULL);
500 					free_irq(msix_entries[i].vector,
501 						 &oct->ioq_vector[i]);
502 					oct->ioq_vector[i].vector = 0;
503 				}
504 			}
505 			pci_disable_msix(oct->pci_dev);
506 			kfree(oct->msix_entries);
507 			oct->msix_entries = NULL;
508 			kfree(oct->irq_name_storage);
509 			oct->irq_name_storage = NULL;
510 		}
511 		/* Soft reset the octeon device before exiting */
512 		if (oct->pci_dev->reset_fn)
513 			octeon_pci_flr(oct);
514 		else
515 			cn23xx_vf_ask_pf_to_do_flr(oct);
516 
517 		/* fallthrough */
518 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
519 		octeon_free_ioq_vector(oct);
520 
521 		/* fallthrough */
522 	case OCT_DEV_MBOX_SETUP_DONE:
523 		oct->fn_list.free_mbox(oct);
524 
525 		/* fallthrough */
526 	case OCT_DEV_IN_RESET:
527 	case OCT_DEV_DROQ_INIT_DONE:
528 		mdelay(100);
529 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
530 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
531 				continue;
532 			octeon_delete_droq(oct, i);
533 		}
534 
535 		/* fallthrough */
536 	case OCT_DEV_RESP_LIST_INIT_DONE:
537 		octeon_delete_response_list(oct);
538 
539 		/* fallthrough */
540 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
541 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
542 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
543 				continue;
544 			octeon_delete_instr_queue(oct, i);
545 		}
546 
547 		/* fallthrough */
548 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
549 		octeon_free_sc_buffer_pool(oct);
550 
551 		/* fallthrough */
552 	case OCT_DEV_DISPATCH_INIT_DONE:
553 		octeon_delete_dispatch_list(oct);
554 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
555 
556 		/* fallthrough */
557 	case OCT_DEV_PCI_MAP_DONE:
558 		octeon_unmap_pci_barx(oct, 0);
559 		octeon_unmap_pci_barx(oct, 1);
560 
561 		/* fallthrough */
562 	case OCT_DEV_PCI_ENABLE_DONE:
563 		pci_clear_master(oct->pci_dev);
564 		/* Disable the device, releasing the PCI INT */
565 		pci_disable_device(oct->pci_dev);
566 
567 		/* fallthrough */
568 	case OCT_DEV_BEGIN_STATE:
569 		/* Nothing to be done here either */
570 		break;
571 	}
572 }
573 
574 /**
575  * \brief Callback for rx ctrl
576  * @param status status of request
577  * @param buf pointer to resp structure
578  */
579 static void rx_ctl_callback(struct octeon_device *oct,
580 			    u32 status, void *buf)
581 {
582 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
583 	struct liquidio_rx_ctl_context *ctx;
584 
585 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
586 
587 	oct = lio_get_device(ctx->octeon_id);
588 	if (status)
589 		dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
590 			CVM_CAST64(status));
591 	WRITE_ONCE(ctx->cond, 1);
592 
593 	/* This barrier is required to be sure that the response has been
594 	 * written fully before waking up the handler
595 	 */
596 	wmb();
597 
598 	wake_up_interruptible(&ctx->wc);
599 }
600 
601 /**
602  * \brief Send Rx control command
603  * @param lio per-network private data
604  * @param start_stop whether to start or stop
605  */
606 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
607 {
608 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
609 	int ctx_size = sizeof(struct liquidio_rx_ctl_context);
610 	struct liquidio_rx_ctl_context *ctx;
611 	struct octeon_soft_command *sc;
612 	union octnet_cmd *ncmd;
613 	int retval;
614 
615 	if (oct->props[lio->ifidx].rx_on == start_stop)
616 		return;
617 
618 	sc = (struct octeon_soft_command *)
619 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
620 					  16, ctx_size);
621 
622 	ncmd = (union octnet_cmd *)sc->virtdptr;
623 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
624 
625 	WRITE_ONCE(ctx->cond, 0);
626 	ctx->octeon_id = lio_get_device_id(oct);
627 	init_waitqueue_head(&ctx->wc);
628 
629 	ncmd->u64 = 0;
630 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
631 	ncmd->s.param1 = start_stop;
632 
633 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
634 
635 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
636 
637 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
638 				    OPCODE_NIC_CMD, 0, 0, 0);
639 
640 	sc->callback = rx_ctl_callback;
641 	sc->callback_arg = sc;
642 	sc->wait_time = 5000;
643 
644 	retval = octeon_send_soft_command(oct, sc);
645 	if (retval == IQ_SEND_FAILED) {
646 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
647 	} else {
648 		/* Sleep on a wait queue till the cond flag indicates that the
649 		 * response arrived or timed-out.
650 		 */
651 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
652 			return;
653 		oct->props[lio->ifidx].rx_on = start_stop;
654 	}
655 
656 	octeon_free_soft_command(oct, sc);
657 }
658 
659 /**
660  * \brief Destroy NIC device interface
661  * @param oct octeon device
662  * @param ifidx which interface to destroy
663  *
664  * Cleanup associated with each interface for an Octeon device  when NIC
665  * module is being unloaded or if initialization fails during load.
666  */
667 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
668 {
669 	struct net_device *netdev = oct->props[ifidx].netdev;
670 	struct napi_struct *napi, *n;
671 	struct lio *lio;
672 
673 	if (!netdev) {
674 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
675 			__func__, ifidx);
676 		return;
677 	}
678 
679 	lio = GET_LIO(netdev);
680 
681 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
682 
683 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
684 		liquidio_stop(netdev);
685 
686 	if (oct->props[lio->ifidx].napi_enabled == 1) {
687 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
688 			napi_disable(napi);
689 
690 		oct->props[lio->ifidx].napi_enabled = 0;
691 
692 		oct->droq[0]->ops.poll_mode = 0;
693 	}
694 
695 	/* Delete NAPI */
696 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
697 		netif_napi_del(napi);
698 
699 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
700 		unregister_netdev(netdev);
701 
702 	cleanup_rx_oom_poll_fn(netdev);
703 
704 	cleanup_link_status_change_wq(netdev);
705 
706 	lio_delete_glists(lio);
707 
708 	free_netdev(netdev);
709 
710 	oct->props[ifidx].gmxport = -1;
711 
712 	oct->props[ifidx].netdev = NULL;
713 }
714 
715 /**
716  * \brief Stop complete NIC functionality
717  * @param oct octeon device
718  */
719 static int liquidio_stop_nic_module(struct octeon_device *oct)
720 {
721 	struct lio *lio;
722 	int i, j;
723 
724 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
725 	if (!oct->ifcount) {
726 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
727 		return 1;
728 	}
729 
730 	spin_lock_bh(&oct->cmd_resp_wqlock);
731 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
732 	spin_unlock_bh(&oct->cmd_resp_wqlock);
733 
734 	for (i = 0; i < oct->ifcount; i++) {
735 		lio = GET_LIO(oct->props[i].netdev);
736 		for (j = 0; j < oct->num_oqs; j++)
737 			octeon_unregister_droq_ops(oct,
738 						   lio->linfo.rxpciq[j].s.q_no);
739 	}
740 
741 	for (i = 0; i < oct->ifcount; i++)
742 		liquidio_destroy_nic_device(oct, i);
743 
744 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
745 	return 0;
746 }
747 
748 /**
749  * \brief Cleans up resources at unload time
750  * @param pdev PCI device structure
751  */
752 static void liquidio_vf_remove(struct pci_dev *pdev)
753 {
754 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
755 
756 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
757 
758 	if (oct_dev->app_mode == CVM_DRV_NIC_APP)
759 		liquidio_stop_nic_module(oct_dev);
760 
761 	/* Reset the octeon device and cleanup all memory allocated for
762 	 * the octeon device by driver.
763 	 */
764 	octeon_destroy_resources(oct_dev);
765 
766 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
767 
768 	/* This octeon device has been removed. Update the global
769 	 * data structure to reflect this. Free the device structure.
770 	 */
771 	octeon_free_device_mem(oct_dev);
772 }
773 
774 /**
775  * \brief PCI initialization for each Octeon device.
776  * @param oct octeon device
777  */
778 static int octeon_pci_os_setup(struct octeon_device *oct)
779 {
780 #ifdef CONFIG_PCI_IOV
781 	/* setup PCI stuff first */
782 	if (!oct->pci_dev->physfn)
783 		octeon_pci_flr(oct);
784 #endif
785 
786 	if (pci_enable_device(oct->pci_dev)) {
787 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
788 		return 1;
789 	}
790 
791 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
792 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
793 		pci_disable_device(oct->pci_dev);
794 		return 1;
795 	}
796 
797 	/* Enable PCI DMA Master. */
798 	pci_set_master(oct->pci_dev);
799 
800 	return 0;
801 }
802 
803 /**
804  * \brief Unmap and free network buffer
805  * @param buf buffer
806  */
807 static void free_netbuf(void *buf)
808 {
809 	struct octnet_buf_free_info *finfo;
810 	struct sk_buff *skb;
811 	struct lio *lio;
812 
813 	finfo = (struct octnet_buf_free_info *)buf;
814 	skb = finfo->skb;
815 	lio = finfo->lio;
816 
817 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
818 			 DMA_TO_DEVICE);
819 
820 	tx_buffer_free(skb);
821 }
822 
823 /**
824  * \brief Unmap and free gather buffer
825  * @param buf buffer
826  */
827 static void free_netsgbuf(void *buf)
828 {
829 	struct octnet_buf_free_info *finfo;
830 	struct octnic_gather *g;
831 	struct sk_buff *skb;
832 	int i, frags, iq;
833 	struct lio *lio;
834 
835 	finfo = (struct octnet_buf_free_info *)buf;
836 	skb = finfo->skb;
837 	lio = finfo->lio;
838 	g = finfo->g;
839 	frags = skb_shinfo(skb)->nr_frags;
840 
841 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
842 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
843 			 DMA_TO_DEVICE);
844 
845 	i = 1;
846 	while (frags--) {
847 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
848 
849 		pci_unmap_page((lio->oct_dev)->pci_dev,
850 			       g->sg[(i >> 2)].ptr[(i & 3)],
851 			       frag->size, DMA_TO_DEVICE);
852 		i++;
853 	}
854 
855 	iq = skb_iq(lio->oct_dev, skb);
856 
857 	spin_lock(&lio->glist_lock[iq]);
858 	list_add_tail(&g->list, &lio->glist[iq]);
859 	spin_unlock(&lio->glist_lock[iq]);
860 
861 	tx_buffer_free(skb);
862 }
863 
864 /**
865  * \brief Unmap and free gather buffer with response
866  * @param buf buffer
867  */
868 static void free_netsgbuf_with_resp(void *buf)
869 {
870 	struct octnet_buf_free_info *finfo;
871 	struct octeon_soft_command *sc;
872 	struct octnic_gather *g;
873 	struct sk_buff *skb;
874 	int i, frags, iq;
875 	struct lio *lio;
876 
877 	sc = (struct octeon_soft_command *)buf;
878 	skb = (struct sk_buff *)sc->callback_arg;
879 	finfo = (struct octnet_buf_free_info *)&skb->cb;
880 
881 	lio = finfo->lio;
882 	g = finfo->g;
883 	frags = skb_shinfo(skb)->nr_frags;
884 
885 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
886 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
887 			 DMA_TO_DEVICE);
888 
889 	i = 1;
890 	while (frags--) {
891 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
892 
893 		pci_unmap_page((lio->oct_dev)->pci_dev,
894 			       g->sg[(i >> 2)].ptr[(i & 3)],
895 			       frag->size, DMA_TO_DEVICE);
896 		i++;
897 	}
898 
899 	iq = skb_iq(lio->oct_dev, skb);
900 
901 	spin_lock(&lio->glist_lock[iq]);
902 	list_add_tail(&g->list, &lio->glist[iq]);
903 	spin_unlock(&lio->glist_lock[iq]);
904 
905 	/* Don't free the skb yet */
906 }
907 
908 /**
909  * \brief Net device open for LiquidIO
910  * @param netdev network device
911  */
912 static int liquidio_open(struct net_device *netdev)
913 {
914 	struct lio *lio = GET_LIO(netdev);
915 	struct octeon_device *oct = lio->oct_dev;
916 	struct napi_struct *napi, *n;
917 
918 	if (!oct->props[lio->ifidx].napi_enabled) {
919 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
920 			napi_enable(napi);
921 
922 		oct->props[lio->ifidx].napi_enabled = 1;
923 
924 		oct->droq[0]->ops.poll_mode = 1;
925 	}
926 
927 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
928 
929 	/* Ready for link status updates */
930 	lio->intf_open = 1;
931 
932 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
933 	start_txqs(netdev);
934 
935 	/* tell Octeon to start forwarding packets to host */
936 	send_rx_ctrl_cmd(lio, 1);
937 
938 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
939 
940 	return 0;
941 }
942 
943 /**
944  * \brief Net device stop for LiquidIO
945  * @param netdev network device
946  */
947 static int liquidio_stop(struct net_device *netdev)
948 {
949 	struct lio *lio = GET_LIO(netdev);
950 	struct octeon_device *oct = lio->oct_dev;
951 	struct napi_struct *napi, *n;
952 
953 	/* tell Octeon to stop forwarding packets to host */
954 	send_rx_ctrl_cmd(lio, 0);
955 
956 	netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
957 	/* Inform that netif carrier is down */
958 	lio->intf_open = 0;
959 	lio->linfo.link.s.link_up = 0;
960 
961 	netif_carrier_off(netdev);
962 	lio->link_changes++;
963 
964 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
965 
966 	stop_txqs(netdev);
967 
968 	/* Wait for any pending Rx descriptors */
969 	if (lio_wait_for_clean_oq(oct))
970 		netif_info(lio, rx_err, lio->netdev,
971 			   "Proceeding with stop interface after partial RX desc processing\n");
972 
973 	if (oct->props[lio->ifidx].napi_enabled == 1) {
974 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
975 			napi_disable(napi);
976 
977 		oct->props[lio->ifidx].napi_enabled = 0;
978 
979 		oct->droq[0]->ops.poll_mode = 0;
980 	}
981 
982 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
983 
984 	return 0;
985 }
986 
987 /**
988  * \brief Converts a mask based on net device flags
989  * @param netdev network device
990  *
991  * This routine generates a octnet_ifflags mask from the net device flags
992  * received from the OS.
993  */
994 static enum octnet_ifflags get_new_flags(struct net_device *netdev)
995 {
996 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
997 
998 	if (netdev->flags & IFF_PROMISC)
999 		f |= OCTNET_IFFLAG_PROMISC;
1000 
1001 	if (netdev->flags & IFF_ALLMULTI)
1002 		f |= OCTNET_IFFLAG_ALLMULTI;
1003 
1004 	if (netdev->flags & IFF_MULTICAST) {
1005 		f |= OCTNET_IFFLAG_MULTICAST;
1006 
1007 		/* Accept all multicast addresses if there are more than we
1008 		 * can handle
1009 		 */
1010 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1011 			f |= OCTNET_IFFLAG_ALLMULTI;
1012 	}
1013 
1014 	if (netdev->flags & IFF_BROADCAST)
1015 		f |= OCTNET_IFFLAG_BROADCAST;
1016 
1017 	return f;
1018 }
1019 
1020 static void liquidio_set_uc_list(struct net_device *netdev)
1021 {
1022 	struct lio *lio = GET_LIO(netdev);
1023 	struct octeon_device *oct = lio->oct_dev;
1024 	struct octnic_ctrl_pkt nctrl;
1025 	struct netdev_hw_addr *ha;
1026 	u64 *mac;
1027 
1028 	if (lio->netdev_uc_count == netdev_uc_count(netdev))
1029 		return;
1030 
1031 	if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1032 		dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1033 		return;
1034 	}
1035 
1036 	lio->netdev_uc_count = netdev_uc_count(netdev);
1037 
1038 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1039 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1040 	nctrl.ncmd.s.more = lio->netdev_uc_count;
1041 	nctrl.ncmd.s.param1 = oct->vf_num;
1042 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1043 	nctrl.netpndev = (u64)netdev;
1044 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1045 
1046 	/* copy all the addresses into the udd */
1047 	mac = &nctrl.udd[0];
1048 	netdev_for_each_uc_addr(ha, netdev) {
1049 		ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1050 		mac++;
1051 	}
1052 
1053 	octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1054 }
1055 
1056 /**
1057  * \brief Net device set_multicast_list
1058  * @param netdev network device
1059  */
1060 static void liquidio_set_mcast_list(struct net_device *netdev)
1061 {
1062 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1063 	struct lio *lio = GET_LIO(netdev);
1064 	struct octeon_device *oct = lio->oct_dev;
1065 	struct octnic_ctrl_pkt nctrl;
1066 	struct netdev_hw_addr *ha;
1067 	u64 *mc;
1068 	int ret;
1069 
1070 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1071 
1072 	/* Create a ctrl pkt command to be sent to core app. */
1073 	nctrl.ncmd.u64 = 0;
1074 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1075 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1076 	nctrl.ncmd.s.param2 = mc_count;
1077 	nctrl.ncmd.s.more = mc_count;
1078 	nctrl.netpndev = (u64)netdev;
1079 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1080 
1081 	/* copy all the addresses into the udd */
1082 	mc = &nctrl.udd[0];
1083 	netdev_for_each_mc_addr(ha, netdev) {
1084 		*mc = 0;
1085 		ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1086 		/* no need to swap bytes */
1087 		if (++mc > &nctrl.udd[mc_count])
1088 			break;
1089 	}
1090 
1091 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1092 
1093 	/* Apparently, any activity in this call from the kernel has to
1094 	 * be atomic. So we won't wait for response.
1095 	 */
1096 	nctrl.wait_time = 0;
1097 
1098 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1099 	if (ret < 0) {
1100 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1101 			ret);
1102 	}
1103 
1104 	liquidio_set_uc_list(netdev);
1105 }
1106 
1107 /**
1108  * \brief Net device set_mac_address
1109  * @param netdev network device
1110  */
1111 static int liquidio_set_mac(struct net_device *netdev, void *p)
1112 {
1113 	struct sockaddr *addr = (struct sockaddr *)p;
1114 	struct lio *lio = GET_LIO(netdev);
1115 	struct octeon_device *oct = lio->oct_dev;
1116 	struct octnic_ctrl_pkt nctrl;
1117 	int ret = 0;
1118 
1119 	if (!is_valid_ether_addr(addr->sa_data))
1120 		return -EADDRNOTAVAIL;
1121 
1122 	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1123 		return 0;
1124 
1125 	if (lio->linfo.macaddr_is_admin_asgnd)
1126 		return -EPERM;
1127 
1128 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1129 
1130 	nctrl.ncmd.u64 = 0;
1131 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1132 	nctrl.ncmd.s.param1 = 0;
1133 	nctrl.ncmd.s.more = 1;
1134 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1135 	nctrl.netpndev = (u64)netdev;
1136 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1137 	nctrl.wait_time = 100;
1138 
1139 	nctrl.udd[0] = 0;
1140 	/* The MAC Address is presented in network byte order. */
1141 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1142 
1143 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1144 	if (ret < 0) {
1145 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1146 		return -ENOMEM;
1147 	}
1148 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1149 	ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1150 
1151 	return 0;
1152 }
1153 
1154 static void
1155 liquidio_get_stats64(struct net_device *netdev,
1156 		     struct rtnl_link_stats64 *lstats)
1157 {
1158 	struct lio *lio = GET_LIO(netdev);
1159 	struct octeon_device *oct;
1160 	u64 pkts = 0, drop = 0, bytes = 0;
1161 	struct oct_droq_stats *oq_stats;
1162 	struct oct_iq_stats *iq_stats;
1163 	int i, iq_no, oq_no;
1164 
1165 	oct = lio->oct_dev;
1166 
1167 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1168 		return;
1169 
1170 	for (i = 0; i < oct->num_iqs; i++) {
1171 		iq_no = lio->linfo.txpciq[i].s.q_no;
1172 		iq_stats = &oct->instr_queue[iq_no]->stats;
1173 		pkts += iq_stats->tx_done;
1174 		drop += iq_stats->tx_dropped;
1175 		bytes += iq_stats->tx_tot_bytes;
1176 	}
1177 
1178 	lstats->tx_packets = pkts;
1179 	lstats->tx_bytes = bytes;
1180 	lstats->tx_dropped = drop;
1181 
1182 	pkts = 0;
1183 	drop = 0;
1184 	bytes = 0;
1185 
1186 	for (i = 0; i < oct->num_oqs; i++) {
1187 		oq_no = lio->linfo.rxpciq[i].s.q_no;
1188 		oq_stats = &oct->droq[oq_no]->stats;
1189 		pkts += oq_stats->rx_pkts_received;
1190 		drop += (oq_stats->rx_dropped +
1191 			 oq_stats->dropped_nodispatch +
1192 			 oq_stats->dropped_toomany +
1193 			 oq_stats->dropped_nomem);
1194 		bytes += oq_stats->rx_bytes_received;
1195 	}
1196 
1197 	lstats->rx_bytes = bytes;
1198 	lstats->rx_packets = pkts;
1199 	lstats->rx_dropped = drop;
1200 
1201 	octnet_get_link_stats(netdev);
1202 	lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1203 
1204 	/* detailed rx_errors: */
1205 	lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1206 	/* recved pkt with crc error */
1207 	lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1208 	/* recv'd frame alignment error */
1209 	lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1210 
1211 	lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1212 			    lstats->rx_frame_errors;
1213 
1214 	/* detailed tx_errors */
1215 	lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1216 	lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1217 
1218 	lstats->tx_errors = lstats->tx_aborted_errors +
1219 		lstats->tx_carrier_errors;
1220 }
1221 
1222 /**
1223  * \brief Handler for SIOCSHWTSTAMP ioctl
1224  * @param netdev network device
1225  * @param ifr interface request
1226  * @param cmd command
1227  */
1228 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1229 {
1230 	struct lio *lio = GET_LIO(netdev);
1231 	struct hwtstamp_config conf;
1232 
1233 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1234 		return -EFAULT;
1235 
1236 	if (conf.flags)
1237 		return -EINVAL;
1238 
1239 	switch (conf.tx_type) {
1240 	case HWTSTAMP_TX_ON:
1241 	case HWTSTAMP_TX_OFF:
1242 		break;
1243 	default:
1244 		return -ERANGE;
1245 	}
1246 
1247 	switch (conf.rx_filter) {
1248 	case HWTSTAMP_FILTER_NONE:
1249 		break;
1250 	case HWTSTAMP_FILTER_ALL:
1251 	case HWTSTAMP_FILTER_SOME:
1252 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1253 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1254 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1255 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1256 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1257 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1258 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1259 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1260 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1261 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1262 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1263 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1264 	case HWTSTAMP_FILTER_NTP_ALL:
1265 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
1266 		break;
1267 	default:
1268 		return -ERANGE;
1269 	}
1270 
1271 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1272 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1273 
1274 	else
1275 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1276 
1277 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1278 }
1279 
1280 /**
1281  * \brief ioctl handler
1282  * @param netdev network device
1283  * @param ifr interface request
1284  * @param cmd command
1285  */
1286 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1287 {
1288 	switch (cmd) {
1289 	case SIOCSHWTSTAMP:
1290 		return hwtstamp_ioctl(netdev, ifr);
1291 	default:
1292 		return -EOPNOTSUPP;
1293 	}
1294 }
1295 
1296 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1297 {
1298 	struct sk_buff *skb = (struct sk_buff *)buf;
1299 	struct octnet_buf_free_info *finfo;
1300 	struct oct_timestamp_resp *resp;
1301 	struct octeon_soft_command *sc;
1302 	struct lio *lio;
1303 
1304 	finfo = (struct octnet_buf_free_info *)skb->cb;
1305 	lio = finfo->lio;
1306 	sc = finfo->sc;
1307 	oct = lio->oct_dev;
1308 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
1309 
1310 	if (status != OCTEON_REQUEST_DONE) {
1311 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1312 			CVM_CAST64(status));
1313 		resp->timestamp = 0;
1314 	}
1315 
1316 	octeon_swap_8B_data(&resp->timestamp, 1);
1317 
1318 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1319 		struct skb_shared_hwtstamps ts;
1320 		u64 ns = resp->timestamp;
1321 
1322 		netif_info(lio, tx_done, lio->netdev,
1323 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1324 			   skb, (unsigned long long)ns);
1325 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1326 		skb_tstamp_tx(skb, &ts);
1327 	}
1328 
1329 	octeon_free_soft_command(oct, sc);
1330 	tx_buffer_free(skb);
1331 }
1332 
1333 /* \brief Send a data packet that will be timestamped
1334  * @param oct octeon device
1335  * @param ndata pointer to network data
1336  * @param finfo pointer to private network data
1337  */
1338 static int send_nic_timestamp_pkt(struct octeon_device *oct,
1339 				  struct octnic_data_pkt *ndata,
1340 				  struct octnet_buf_free_info *finfo,
1341 				  int xmit_more)
1342 {
1343 	struct octeon_soft_command *sc;
1344 	int ring_doorbell;
1345 	struct lio *lio;
1346 	int retval;
1347 	u32 len;
1348 
1349 	lio = finfo->lio;
1350 
1351 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1352 					    sizeof(struct oct_timestamp_resp));
1353 	finfo->sc = sc;
1354 
1355 	if (!sc) {
1356 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1357 		return IQ_SEND_FAILED;
1358 	}
1359 
1360 	if (ndata->reqtype == REQTYPE_NORESP_NET)
1361 		ndata->reqtype = REQTYPE_RESP_NET;
1362 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1363 		ndata->reqtype = REQTYPE_RESP_NET_SG;
1364 
1365 	sc->callback = handle_timestamp;
1366 	sc->callback_arg = finfo->skb;
1367 	sc->iq_no = ndata->q_no;
1368 
1369 	len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1370 
1371 	ring_doorbell = !xmit_more;
1372 
1373 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1374 				     sc, len, ndata->reqtype);
1375 
1376 	if (retval == IQ_SEND_FAILED) {
1377 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1378 			retval);
1379 		octeon_free_soft_command(oct, sc);
1380 	} else {
1381 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1382 	}
1383 
1384 	return retval;
1385 }
1386 
1387 /** \brief Transmit networks packets to the Octeon interface
1388  * @param skbuff   skbuff struct to be passed to network layer.
1389  * @param netdev   pointer to network device
1390  * @returns whether the packet was transmitted to the device okay or not
1391  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
1392  */
1393 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1394 {
1395 	struct octnet_buf_free_info *finfo;
1396 	union octnic_cmd_setup cmdsetup;
1397 	struct octnic_data_pkt ndata;
1398 	struct octeon_instr_irh *irh;
1399 	struct oct_iq_stats *stats;
1400 	struct octeon_device *oct;
1401 	int q_idx = 0, iq_no = 0;
1402 	union tx_info *tx_info;
1403 	int xmit_more = 0;
1404 	struct lio *lio;
1405 	int status = 0;
1406 	u64 dptr = 0;
1407 	u32 tag = 0;
1408 	int j;
1409 
1410 	lio = GET_LIO(netdev);
1411 	oct = lio->oct_dev;
1412 
1413 	q_idx = skb_iq(lio->oct_dev, skb);
1414 	tag = q_idx;
1415 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1416 
1417 	stats = &oct->instr_queue[iq_no]->stats;
1418 
1419 	/* Check for all conditions in which the current packet cannot be
1420 	 * transmitted.
1421 	 */
1422 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1423 	    (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1424 		netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1425 			   lio->linfo.link.s.link_up);
1426 		goto lio_xmit_failed;
1427 	}
1428 
1429 	/* Use space in skb->cb to store info used to unmap and
1430 	 * free the buffers.
1431 	 */
1432 	finfo = (struct octnet_buf_free_info *)skb->cb;
1433 	finfo->lio = lio;
1434 	finfo->skb = skb;
1435 	finfo->sc = NULL;
1436 
1437 	/* Prepare the attributes for the data to be passed to OSI. */
1438 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1439 
1440 	ndata.buf = finfo;
1441 
1442 	ndata.q_no = iq_no;
1443 
1444 	if (octnet_iq_is_full(oct, ndata.q_no)) {
1445 		/* defer sending if queue is full */
1446 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1447 			   ndata.q_no);
1448 		stats->tx_iq_busy++;
1449 		return NETDEV_TX_BUSY;
1450 	}
1451 
1452 	ndata.datasize = skb->len;
1453 
1454 	cmdsetup.u64 = 0;
1455 	cmdsetup.s.iq_no = iq_no;
1456 
1457 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1458 		if (skb->encapsulation) {
1459 			cmdsetup.s.tnl_csum = 1;
1460 			stats->tx_vxlan++;
1461 		} else {
1462 			cmdsetup.s.transport_csum = 1;
1463 		}
1464 	}
1465 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1466 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1467 		cmdsetup.s.timestamp = 1;
1468 	}
1469 
1470 	if (!skb_shinfo(skb)->nr_frags) {
1471 		cmdsetup.s.u.datasize = skb->len;
1472 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1473 		/* Offload checksum calculation for TCP/UDP packets */
1474 		dptr = dma_map_single(&oct->pci_dev->dev,
1475 				      skb->data,
1476 				      skb->len,
1477 				      DMA_TO_DEVICE);
1478 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1479 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1480 				__func__);
1481 			return NETDEV_TX_BUSY;
1482 		}
1483 
1484 		ndata.cmd.cmd3.dptr = dptr;
1485 		finfo->dptr = dptr;
1486 		ndata.reqtype = REQTYPE_NORESP_NET;
1487 
1488 	} else {
1489 		struct skb_frag_struct *frag;
1490 		struct octnic_gather *g;
1491 		int i, frags;
1492 
1493 		spin_lock(&lio->glist_lock[q_idx]);
1494 		g = (struct octnic_gather *)
1495 			lio_list_delete_head(&lio->glist[q_idx]);
1496 		spin_unlock(&lio->glist_lock[q_idx]);
1497 
1498 		if (!g) {
1499 			netif_info(lio, tx_err, lio->netdev,
1500 				   "Transmit scatter gather: glist null!\n");
1501 			goto lio_xmit_failed;
1502 		}
1503 
1504 		cmdsetup.s.gather = 1;
1505 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1506 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1507 
1508 		memset(g->sg, 0, g->sg_size);
1509 
1510 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1511 						 skb->data,
1512 						 (skb->len - skb->data_len),
1513 						 DMA_TO_DEVICE);
1514 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1515 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1516 				__func__);
1517 			return NETDEV_TX_BUSY;
1518 		}
1519 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1520 
1521 		frags = skb_shinfo(skb)->nr_frags;
1522 		i = 1;
1523 		while (frags--) {
1524 			frag = &skb_shinfo(skb)->frags[i - 1];
1525 
1526 			g->sg[(i >> 2)].ptr[(i & 3)] =
1527 				dma_map_page(&oct->pci_dev->dev,
1528 					     frag->page.p,
1529 					     frag->page_offset,
1530 					     frag->size,
1531 					     DMA_TO_DEVICE);
1532 			if (dma_mapping_error(&oct->pci_dev->dev,
1533 					      g->sg[i >> 2].ptr[i & 3])) {
1534 				dma_unmap_single(&oct->pci_dev->dev,
1535 						 g->sg[0].ptr[0],
1536 						 skb->len - skb->data_len,
1537 						 DMA_TO_DEVICE);
1538 				for (j = 1; j < i; j++) {
1539 					frag = &skb_shinfo(skb)->frags[j - 1];
1540 					dma_unmap_page(&oct->pci_dev->dev,
1541 						       g->sg[j >> 2].ptr[j & 3],
1542 						       frag->size,
1543 						       DMA_TO_DEVICE);
1544 				}
1545 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1546 					__func__);
1547 				return NETDEV_TX_BUSY;
1548 			}
1549 
1550 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
1551 			i++;
1552 		}
1553 
1554 		dptr = g->sg_dma_ptr;
1555 
1556 		ndata.cmd.cmd3.dptr = dptr;
1557 		finfo->dptr = dptr;
1558 		finfo->g = g;
1559 
1560 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
1561 	}
1562 
1563 	irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1564 	tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1565 
1566 	if (skb_shinfo(skb)->gso_size) {
1567 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1568 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1569 	}
1570 
1571 	/* HW insert VLAN tag */
1572 	if (skb_vlan_tag_present(skb)) {
1573 		irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1574 		irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1575 	}
1576 
1577 	xmit_more = skb->xmit_more;
1578 
1579 	if (unlikely(cmdsetup.s.timestamp))
1580 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1581 	else
1582 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1583 	if (status == IQ_SEND_FAILED)
1584 		goto lio_xmit_failed;
1585 
1586 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1587 
1588 	if (status == IQ_SEND_STOP) {
1589 		dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1590 			iq_no);
1591 		netif_stop_subqueue(netdev, q_idx);
1592 	}
1593 
1594 	netif_trans_update(netdev);
1595 
1596 	if (tx_info->s.gso_segs)
1597 		stats->tx_done += tx_info->s.gso_segs;
1598 	else
1599 		stats->tx_done++;
1600 	stats->tx_tot_bytes += ndata.datasize;
1601 
1602 	return NETDEV_TX_OK;
1603 
1604 lio_xmit_failed:
1605 	stats->tx_dropped++;
1606 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1607 		   iq_no, stats->tx_dropped);
1608 	if (dptr)
1609 		dma_unmap_single(&oct->pci_dev->dev, dptr,
1610 				 ndata.datasize, DMA_TO_DEVICE);
1611 
1612 	octeon_ring_doorbell_locked(oct, iq_no);
1613 
1614 	tx_buffer_free(skb);
1615 	return NETDEV_TX_OK;
1616 }
1617 
1618 /** \brief Network device Tx timeout
1619  * @param netdev    pointer to network device
1620  */
1621 static void liquidio_tx_timeout(struct net_device *netdev)
1622 {
1623 	struct lio *lio;
1624 
1625 	lio = GET_LIO(netdev);
1626 
1627 	netif_info(lio, tx_err, lio->netdev,
1628 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1629 		   netdev->stats.tx_dropped);
1630 	netif_trans_update(netdev);
1631 	wake_txqs(netdev);
1632 }
1633 
1634 static int
1635 liquidio_vlan_rx_add_vid(struct net_device *netdev,
1636 			 __be16 proto __attribute__((unused)), u16 vid)
1637 {
1638 	struct lio *lio = GET_LIO(netdev);
1639 	struct octeon_device *oct = lio->oct_dev;
1640 	struct octnic_ctrl_pkt nctrl;
1641 	struct completion compl;
1642 	u16 response_code;
1643 	int ret = 0;
1644 
1645 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1646 
1647 	nctrl.ncmd.u64 = 0;
1648 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1649 	nctrl.ncmd.s.param1 = vid;
1650 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1651 	nctrl.wait_time = 100;
1652 	nctrl.netpndev = (u64)netdev;
1653 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1654 	init_completion(&compl);
1655 	nctrl.completion = &compl;
1656 	nctrl.response_code = &response_code;
1657 
1658 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1659 	if (ret < 0) {
1660 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1661 			ret);
1662 		return -EIO;
1663 	}
1664 
1665 	if (!wait_for_completion_timeout(&compl,
1666 					 msecs_to_jiffies(nctrl.wait_time)))
1667 		return -EPERM;
1668 
1669 	if (READ_ONCE(response_code))
1670 		return -EPERM;
1671 
1672 	return 0;
1673 }
1674 
1675 static int
1676 liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1677 			  __be16 proto __attribute__((unused)), u16 vid)
1678 {
1679 	struct lio *lio = GET_LIO(netdev);
1680 	struct octeon_device *oct = lio->oct_dev;
1681 	struct octnic_ctrl_pkt nctrl;
1682 	int ret = 0;
1683 
1684 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1685 
1686 	nctrl.ncmd.u64 = 0;
1687 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1688 	nctrl.ncmd.s.param1 = vid;
1689 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1690 	nctrl.wait_time = 100;
1691 	nctrl.netpndev = (u64)netdev;
1692 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1693 
1694 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1695 	if (ret < 0) {
1696 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1697 			ret);
1698 	}
1699 	return ret;
1700 }
1701 
1702 /** Sending command to enable/disable RX checksum offload
1703  * @param netdev                pointer to network device
1704  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
1705  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
1706  *                              OCTNET_CMD_RXCSUM_DISABLE
1707  * @returns                     SUCCESS or FAILURE
1708  */
1709 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1710 				       u8 rx_cmd)
1711 {
1712 	struct lio *lio = GET_LIO(netdev);
1713 	struct octeon_device *oct = lio->oct_dev;
1714 	struct octnic_ctrl_pkt nctrl;
1715 	int ret = 0;
1716 
1717 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1718 
1719 	nctrl.ncmd.u64 = 0;
1720 	nctrl.ncmd.s.cmd = command;
1721 	nctrl.ncmd.s.param1 = rx_cmd;
1722 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1723 	nctrl.wait_time = 100;
1724 	nctrl.netpndev = (u64)netdev;
1725 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1726 
1727 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1728 	if (ret < 0) {
1729 		dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1730 			ret);
1731 	}
1732 	return ret;
1733 }
1734 
1735 /** Sending command to add/delete VxLAN UDP port to firmware
1736  * @param netdev                pointer to network device
1737  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
1738  * @param vxlan_port            VxLAN port to be added or deleted
1739  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
1740  *                              OCTNET_CMD_VXLAN_PORT_DEL
1741  * @returns                     SUCCESS or FAILURE
1742  */
1743 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1744 				       u16 vxlan_port, u8 vxlan_cmd_bit)
1745 {
1746 	struct lio *lio = GET_LIO(netdev);
1747 	struct octeon_device *oct = lio->oct_dev;
1748 	struct octnic_ctrl_pkt nctrl;
1749 	int ret = 0;
1750 
1751 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1752 
1753 	nctrl.ncmd.u64 = 0;
1754 	nctrl.ncmd.s.cmd = command;
1755 	nctrl.ncmd.s.more = vxlan_cmd_bit;
1756 	nctrl.ncmd.s.param1 = vxlan_port;
1757 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1758 	nctrl.wait_time = 100;
1759 	nctrl.netpndev = (u64)netdev;
1760 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1761 
1762 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1763 	if (ret < 0) {
1764 		dev_err(&oct->pci_dev->dev,
1765 			"DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1766 			ret);
1767 	}
1768 	return ret;
1769 }
1770 
1771 /** \brief Net device fix features
1772  * @param netdev  pointer to network device
1773  * @param request features requested
1774  * @returns updated features list
1775  */
1776 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1777 					       netdev_features_t request)
1778 {
1779 	struct lio *lio = netdev_priv(netdev);
1780 
1781 	if ((request & NETIF_F_RXCSUM) &&
1782 	    !(lio->dev_capability & NETIF_F_RXCSUM))
1783 		request &= ~NETIF_F_RXCSUM;
1784 
1785 	if ((request & NETIF_F_HW_CSUM) &&
1786 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
1787 		request &= ~NETIF_F_HW_CSUM;
1788 
1789 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1790 		request &= ~NETIF_F_TSO;
1791 
1792 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1793 		request &= ~NETIF_F_TSO6;
1794 
1795 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1796 		request &= ~NETIF_F_LRO;
1797 
1798 	/* Disable LRO if RXCSUM is off */
1799 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1800 	    (lio->dev_capability & NETIF_F_LRO))
1801 		request &= ~NETIF_F_LRO;
1802 
1803 	return request;
1804 }
1805 
1806 /** \brief Net device set features
1807  * @param netdev  pointer to network device
1808  * @param features features to enable/disable
1809  */
1810 static int liquidio_set_features(struct net_device *netdev,
1811 				 netdev_features_t features)
1812 {
1813 	struct lio *lio = netdev_priv(netdev);
1814 
1815 	if (!((netdev->features ^ features) & NETIF_F_LRO))
1816 		return 0;
1817 
1818 	if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1819 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1820 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1821 	else if (!(features & NETIF_F_LRO) &&
1822 		 (lio->dev_capability & NETIF_F_LRO))
1823 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1824 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1825 	if (!(netdev->features & NETIF_F_RXCSUM) &&
1826 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1827 	    (features & NETIF_F_RXCSUM))
1828 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1829 					    OCTNET_CMD_RXCSUM_ENABLE);
1830 	else if ((netdev->features & NETIF_F_RXCSUM) &&
1831 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1832 		 !(features & NETIF_F_RXCSUM))
1833 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1834 					    OCTNET_CMD_RXCSUM_DISABLE);
1835 
1836 	return 0;
1837 }
1838 
1839 static void liquidio_add_vxlan_port(struct net_device *netdev,
1840 				    struct udp_tunnel_info *ti)
1841 {
1842 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
1843 		return;
1844 
1845 	liquidio_vxlan_port_command(netdev,
1846 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
1847 				    htons(ti->port),
1848 				    OCTNET_CMD_VXLAN_PORT_ADD);
1849 }
1850 
1851 static void liquidio_del_vxlan_port(struct net_device *netdev,
1852 				    struct udp_tunnel_info *ti)
1853 {
1854 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
1855 		return;
1856 
1857 	liquidio_vxlan_port_command(netdev,
1858 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
1859 				    htons(ti->port),
1860 				    OCTNET_CMD_VXLAN_PORT_DEL);
1861 }
1862 
1863 static const struct net_device_ops lionetdevops = {
1864 	.ndo_open		= liquidio_open,
1865 	.ndo_stop		= liquidio_stop,
1866 	.ndo_start_xmit		= liquidio_xmit,
1867 	.ndo_get_stats64	= liquidio_get_stats64,
1868 	.ndo_set_mac_address	= liquidio_set_mac,
1869 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
1870 	.ndo_tx_timeout		= liquidio_tx_timeout,
1871 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
1872 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
1873 	.ndo_change_mtu		= liquidio_change_mtu,
1874 	.ndo_do_ioctl		= liquidio_ioctl,
1875 	.ndo_fix_features	= liquidio_fix_features,
1876 	.ndo_set_features	= liquidio_set_features,
1877 	.ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
1878 	.ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
1879 };
1880 
1881 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1882 {
1883 	struct octeon_device *oct = (struct octeon_device *)buf;
1884 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1885 	union oct_link_status *ls;
1886 	int gmxport = 0;
1887 	int i;
1888 
1889 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1890 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1891 			recv_pkt->buffer_size[0],
1892 			recv_pkt->rh.r_nic_info.gmxport);
1893 		goto nic_info_err;
1894 	}
1895 
1896 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
1897 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1898 		OCT_DROQ_INFO_SIZE);
1899 
1900 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1901 
1902 	for (i = 0; i < oct->ifcount; i++) {
1903 		if (oct->props[i].gmxport == gmxport) {
1904 			update_link_status(oct->props[i].netdev, ls);
1905 			break;
1906 		}
1907 	}
1908 
1909 nic_info_err:
1910 	for (i = 0; i < recv_pkt->buffer_count; i++)
1911 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
1912 	octeon_free_recv_info(recv_info);
1913 	return 0;
1914 }
1915 
1916 /**
1917  * \brief Setup network interfaces
1918  * @param octeon_dev  octeon device
1919  *
1920  * Called during init time for each device. It assumes the NIC
1921  * is already up and running.  The link information for each
1922  * interface is passed in link_info.
1923  */
1924 static int setup_nic_devices(struct octeon_device *octeon_dev)
1925 {
1926 	int retval, num_iqueues, num_oqueues;
1927 	struct liquidio_if_cfg_context *ctx;
1928 	u32 resp_size, ctx_size, data_size;
1929 	struct liquidio_if_cfg_resp *resp;
1930 	struct octeon_soft_command *sc;
1931 	union oct_nic_if_cfg if_cfg;
1932 	struct octdev_props *props;
1933 	struct net_device *netdev;
1934 	struct lio_version *vdata;
1935 	struct lio *lio = NULL;
1936 	u8 mac[ETH_ALEN], i, j;
1937 	u32 ifidx_or_pfnum;
1938 
1939 	ifidx_or_pfnum = octeon_dev->pf_num;
1940 
1941 	/* This is to handle link status changes */
1942 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1943 				    lio_nic_info, octeon_dev);
1944 
1945 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
1946 	 * They are handled directly.
1947 	 */
1948 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1949 					free_netbuf);
1950 
1951 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1952 					free_netsgbuf);
1953 
1954 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1955 					free_netsgbuf_with_resp);
1956 
1957 	for (i = 0; i < octeon_dev->ifcount; i++) {
1958 		resp_size = sizeof(struct liquidio_if_cfg_resp);
1959 		ctx_size = sizeof(struct liquidio_if_cfg_context);
1960 		data_size = sizeof(struct lio_version);
1961 		sc = (struct octeon_soft_command *)
1962 			octeon_alloc_soft_command(octeon_dev, data_size,
1963 						  resp_size, ctx_size);
1964 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1965 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
1966 		vdata = (struct lio_version *)sc->virtdptr;
1967 
1968 		*((u64 *)vdata) = 0;
1969 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1970 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1971 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1972 
1973 		WRITE_ONCE(ctx->cond, 0);
1974 		ctx->octeon_id = lio_get_device_id(octeon_dev);
1975 		init_waitqueue_head(&ctx->wc);
1976 
1977 		if_cfg.u64 = 0;
1978 
1979 		if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1980 		if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1981 		if_cfg.s.base_queue = 0;
1982 
1983 		sc->iq_no = 0;
1984 
1985 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1986 					    OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1987 					    0);
1988 
1989 		sc->callback = lio_if_cfg_callback;
1990 		sc->callback_arg = sc;
1991 		sc->wait_time = 5000;
1992 
1993 		retval = octeon_send_soft_command(octeon_dev, sc);
1994 		if (retval == IQ_SEND_FAILED) {
1995 			dev_err(&octeon_dev->pci_dev->dev,
1996 				"iq/oq config failed status: %x\n", retval);
1997 			/* Soft instr is freed by driver in case of failure. */
1998 			goto setup_nic_dev_fail;
1999 		}
2000 
2001 		/* Sleep on a wait queue till the cond flag indicates that the
2002 		 * response arrived or timed-out.
2003 		 */
2004 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
2005 			dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
2006 			goto setup_nic_wait_intr;
2007 		}
2008 
2009 		retval = resp->status;
2010 		if (retval) {
2011 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
2012 			goto setup_nic_dev_fail;
2013 		}
2014 
2015 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2016 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
2017 
2018 		num_iqueues = hweight64(resp->cfg_info.iqmask);
2019 		num_oqueues = hweight64(resp->cfg_info.oqmask);
2020 
2021 		if (!(num_iqueues) || !(num_oqueues)) {
2022 			dev_err(&octeon_dev->pci_dev->dev,
2023 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2024 				resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2025 			goto setup_nic_dev_fail;
2026 		}
2027 		dev_dbg(&octeon_dev->pci_dev->dev,
2028 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2029 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2030 			num_iqueues, num_oqueues);
2031 
2032 		netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2033 
2034 		if (!netdev) {
2035 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2036 			goto setup_nic_dev_fail;
2037 		}
2038 
2039 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2040 
2041 		/* Associate the routines that will handle different
2042 		 * netdev tasks.
2043 		 */
2044 		netdev->netdev_ops = &lionetdevops;
2045 
2046 		lio = GET_LIO(netdev);
2047 
2048 		memset(lio, 0, sizeof(struct lio));
2049 
2050 		lio->ifidx = ifidx_or_pfnum;
2051 
2052 		props = &octeon_dev->props[i];
2053 		props->gmxport = resp->cfg_info.linfo.gmxport;
2054 		props->netdev = netdev;
2055 
2056 		lio->linfo.num_rxpciq = num_oqueues;
2057 		lio->linfo.num_txpciq = num_iqueues;
2058 
2059 		for (j = 0; j < num_oqueues; j++) {
2060 			lio->linfo.rxpciq[j].u64 =
2061 			    resp->cfg_info.linfo.rxpciq[j].u64;
2062 		}
2063 		for (j = 0; j < num_iqueues; j++) {
2064 			lio->linfo.txpciq[j].u64 =
2065 			    resp->cfg_info.linfo.txpciq[j].u64;
2066 		}
2067 
2068 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2069 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2070 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2071 		lio->linfo.macaddr_is_admin_asgnd =
2072 			resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2073 
2074 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2075 
2076 		lio->dev_capability = NETIF_F_HIGHDMA
2077 				      | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2078 				      | NETIF_F_SG | NETIF_F_RXCSUM
2079 				      | NETIF_F_TSO | NETIF_F_TSO6
2080 				      | NETIF_F_GRO
2081 				      | NETIF_F_LRO;
2082 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2083 
2084 		/* Copy of transmit encapsulation capabilities:
2085 		 * TSO, TSO6, Checksums for this device
2086 		 */
2087 		lio->enc_dev_capability = NETIF_F_IP_CSUM
2088 					  | NETIF_F_IPV6_CSUM
2089 					  | NETIF_F_GSO_UDP_TUNNEL
2090 					  | NETIF_F_HW_CSUM | NETIF_F_SG
2091 					  | NETIF_F_RXCSUM
2092 					  | NETIF_F_TSO | NETIF_F_TSO6
2093 					  | NETIF_F_LRO;
2094 
2095 		netdev->hw_enc_features =
2096 		    (lio->enc_dev_capability & ~NETIF_F_LRO);
2097 		netdev->vlan_features = lio->dev_capability;
2098 		/* Add any unchangeable hw features */
2099 		lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2100 				       NETIF_F_HW_VLAN_CTAG_RX |
2101 				       NETIF_F_HW_VLAN_CTAG_TX;
2102 
2103 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2104 
2105 		netdev->hw_features = lio->dev_capability;
2106 		netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2107 
2108 		/* MTU range: 68 - 16000 */
2109 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
2110 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
2111 
2112 		/* Point to the  properties for octeon device to which this
2113 		 * interface belongs.
2114 		 */
2115 		lio->oct_dev = octeon_dev;
2116 		lio->octprops = props;
2117 		lio->netdev = netdev;
2118 
2119 		dev_dbg(&octeon_dev->pci_dev->dev,
2120 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
2121 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2122 
2123 		/* 64-bit swap required on LE machines */
2124 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2125 		for (j = 0; j < ETH_ALEN; j++)
2126 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2127 
2128 		/* Copy MAC Address to OS network device structure */
2129 		ether_addr_copy(netdev->dev_addr, mac);
2130 
2131 		if (liquidio_setup_io_queues(octeon_dev, i,
2132 					     lio->linfo.num_txpciq,
2133 					     lio->linfo.num_rxpciq)) {
2134 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2135 			goto setup_nic_dev_fail;
2136 		}
2137 
2138 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2139 
2140 		/* For VFs, enable Octeon device interrupts here,
2141 		 * as this is contingent upon IO queue setup
2142 		 */
2143 		octeon_dev->fn_list.enable_interrupt(octeon_dev,
2144 						     OCTEON_ALL_INTR);
2145 
2146 		/* By default all interfaces on a single Octeon uses the same
2147 		 * tx and rx queues
2148 		 */
2149 		lio->txq = lio->linfo.txpciq[0].s.q_no;
2150 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2151 
2152 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2153 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2154 
2155 		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2156 			dev_err(&octeon_dev->pci_dev->dev,
2157 				"Gather list allocation failed\n");
2158 			goto setup_nic_dev_fail;
2159 		}
2160 
2161 		/* Register ethtool support */
2162 		liquidio_set_ethtool_ops(netdev);
2163 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2164 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2165 		else
2166 			octeon_dev->priv_flags = 0x0;
2167 
2168 		if (netdev->features & NETIF_F_LRO)
2169 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2170 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2171 
2172 		if (setup_link_status_change_wq(netdev))
2173 			goto setup_nic_dev_fail;
2174 
2175 		if (setup_rx_oom_poll_fn(netdev))
2176 			goto setup_nic_dev_fail;
2177 
2178 		/* Register the network device with the OS */
2179 		if (register_netdev(netdev)) {
2180 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2181 			goto setup_nic_dev_fail;
2182 		}
2183 
2184 		dev_dbg(&octeon_dev->pci_dev->dev,
2185 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2186 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2187 		netif_carrier_off(netdev);
2188 		lio->link_changes++;
2189 
2190 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2191 
2192 		/* Sending command to firmware to enable Rx checksum offload
2193 		 * by default at the time of setup of Liquidio driver for
2194 		 * this device
2195 		 */
2196 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2197 					    OCTNET_CMD_RXCSUM_ENABLE);
2198 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2199 				     OCTNET_CMD_TXCSUM_ENABLE);
2200 
2201 		dev_dbg(&octeon_dev->pci_dev->dev,
2202 			"NIC ifidx:%d Setup successful\n", i);
2203 
2204 		octeon_free_soft_command(octeon_dev, sc);
2205 
2206 		octeon_dev->no_speed_setting = 1;
2207 	}
2208 
2209 	return 0;
2210 
2211 setup_nic_dev_fail:
2212 
2213 	octeon_free_soft_command(octeon_dev, sc);
2214 
2215 setup_nic_wait_intr:
2216 
2217 	while (i--) {
2218 		dev_err(&octeon_dev->pci_dev->dev,
2219 			"NIC ifidx:%d Setup failed\n", i);
2220 		liquidio_destroy_nic_device(octeon_dev, i);
2221 	}
2222 	return -ENODEV;
2223 }
2224 
2225 /**
2226  * \brief initialize the NIC
2227  * @param oct octeon device
2228  *
2229  * This initialization routine is called once the Octeon device application is
2230  * up and running
2231  */
2232 static int liquidio_init_nic_module(struct octeon_device *oct)
2233 {
2234 	int num_nic_ports = 1;
2235 	int i, retval = 0;
2236 
2237 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2238 
2239 	/* only default iq and oq were initialized
2240 	 * initialize the rest as well run port_config command for each port
2241 	 */
2242 	oct->ifcount = num_nic_ports;
2243 	memset(oct->props, 0,
2244 	       sizeof(struct octdev_props) * num_nic_ports);
2245 
2246 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
2247 		oct->props[i].gmxport = -1;
2248 
2249 	retval = setup_nic_devices(oct);
2250 	if (retval) {
2251 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2252 		goto octnet_init_failure;
2253 	}
2254 
2255 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2256 
2257 	return retval;
2258 
2259 octnet_init_failure:
2260 
2261 	oct->ifcount = 0;
2262 
2263 	return retval;
2264 }
2265 
2266 /**
2267  * \brief Device initialization for each Octeon device that is probed
2268  * @param octeon_dev  octeon device
2269  */
2270 static int octeon_device_init(struct octeon_device *oct)
2271 {
2272 	u32 rev_id;
2273 	int j;
2274 
2275 	atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2276 
2277 	/* Enable access to the octeon device and make its DMA capability
2278 	 * known to the OS.
2279 	 */
2280 	if (octeon_pci_os_setup(oct))
2281 		return 1;
2282 	atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2283 
2284 	oct->chip_id = OCTEON_CN23XX_VF_VID;
2285 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2286 	oct->rev_id = rev_id & 0xff;
2287 
2288 	if (cn23xx_setup_octeon_vf_device(oct))
2289 		return 1;
2290 
2291 	atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2292 
2293 	oct->app_mode = CVM_DRV_NIC_APP;
2294 
2295 	/* Initialize the dispatch mechanism used to push packets arriving on
2296 	 * Octeon Output queues.
2297 	 */
2298 	if (octeon_init_dispatch_list(oct))
2299 		return 1;
2300 
2301 	atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2302 
2303 	if (octeon_set_io_queues_off(oct)) {
2304 		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2305 		return 1;
2306 	}
2307 
2308 	if (oct->fn_list.setup_device_regs(oct)) {
2309 		dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2310 		return 1;
2311 	}
2312 
2313 	/* Initialize soft command buffer pool */
2314 	if (octeon_setup_sc_buffer_pool(oct)) {
2315 		dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2316 		return 1;
2317 	}
2318 	atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2319 
2320 	/* Setup the data structures that manage this Octeon's Input queues. */
2321 	if (octeon_setup_instr_queues(oct)) {
2322 		dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2323 		return 1;
2324 	}
2325 	atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2326 
2327 	/* Initialize lists to manage the requests of different types that
2328 	 * arrive from user & kernel applications for this octeon device.
2329 	 */
2330 	if (octeon_setup_response_list(oct)) {
2331 		dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2332 		return 1;
2333 	}
2334 	atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2335 
2336 	if (octeon_setup_output_queues(oct)) {
2337 		dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2338 		return 1;
2339 	}
2340 	atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2341 
2342 	if (oct->fn_list.setup_mbox(oct)) {
2343 		dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2344 		return 1;
2345 	}
2346 	atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2347 
2348 	if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2349 		dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2350 		return 1;
2351 	}
2352 	atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2353 
2354 	dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
2355 		 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
2356 
2357 	/* Setup the interrupt handler and record the INT SUM register address*/
2358 	if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2359 		return 1;
2360 
2361 	atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2362 
2363 	/* ***************************************************************
2364 	 * The interrupts need to be enabled for the PF<-->VF handshake.
2365 	 * They are [re]-enabled after the PF<-->VF handshake so that the
2366 	 * correct OQ tick value is used (i.e. the value retrieved from
2367 	 * the PF as part of the handshake).
2368 	 */
2369 
2370 	/* Enable Octeon device interrupts */
2371 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2372 
2373 	if (cn23xx_octeon_pfvf_handshake(oct))
2374 		return 1;
2375 
2376 	/* Here we [re]-enable the interrupts so that the correct OQ tick value
2377 	 * is used (i.e. the value that was retrieved during the handshake)
2378 	 */
2379 
2380 	/* Enable Octeon device interrupts */
2381 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2382 	/* *************************************************************** */
2383 
2384 	/* Enable the input and output queues for this Octeon device */
2385 	if (oct->fn_list.enable_io_queues(oct)) {
2386 		dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2387 		return 1;
2388 	}
2389 
2390 	atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2391 
2392 	atomic_set(&oct->status, OCT_DEV_HOST_OK);
2393 
2394 	/* Send Credit for Octeon Output queues. Credits are always sent after
2395 	 * the output queue is enabled.
2396 	 */
2397 	for (j = 0; j < oct->num_oqs; j++)
2398 		writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2399 
2400 	/* Packets can start arriving on the output queues from this point. */
2401 
2402 	atomic_set(&oct->status, OCT_DEV_CORE_OK);
2403 
2404 	atomic_set(&oct->status, OCT_DEV_RUNNING);
2405 
2406 	if (liquidio_init_nic_module(oct))
2407 		return 1;
2408 
2409 	return 0;
2410 }
2411 
2412 static int __init liquidio_vf_init(void)
2413 {
2414 	octeon_init_device_list(0);
2415 	return pci_register_driver(&liquidio_vf_pci_driver);
2416 }
2417 
2418 static void __exit liquidio_vf_exit(void)
2419 {
2420 	pci_unregister_driver(&liquidio_vf_pci_driver);
2421 
2422 	pr_info("LiquidIO_VF network module is now unloaded\n");
2423 }
2424 
2425 module_init(liquidio_vf_init);
2426 module_exit(liquidio_vf_exit);
2427