1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <net/vxlan.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn23xx_vf_device.h"
30 
31 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
32 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
33 MODULE_LICENSE("GPL");
34 MODULE_VERSION(LIQUIDIO_VERSION);
35 
36 static int debug = -1;
37 module_param(debug, int, 0644);
38 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
39 
40 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
41 
42 /* Bit mask values for lio->ifstate */
43 #define   LIO_IFSTATE_DROQ_OPS             0x01
44 #define   LIO_IFSTATE_REGISTERED           0x02
45 #define   LIO_IFSTATE_RUNNING              0x04
46 #define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
47 
48 struct liquidio_if_cfg_context {
49 	int octeon_id;
50 
51 	wait_queue_head_t wc;
52 
53 	int cond;
54 };
55 
56 struct liquidio_if_cfg_resp {
57 	u64 rh;
58 	struct liquidio_if_cfg_info cfg_info;
59 	u64 status;
60 };
61 
62 struct liquidio_rx_ctl_context {
63 	int octeon_id;
64 
65 	wait_queue_head_t wc;
66 
67 	int cond;
68 };
69 
70 struct oct_timestamp_resp {
71 	u64 rh;
72 	u64 timestamp;
73 	u64 status;
74 };
75 
76 union tx_info {
77 	u64 u64;
78 	struct {
79 #ifdef __BIG_ENDIAN_BITFIELD
80 		u16 gso_size;
81 		u16 gso_segs;
82 		u32 reserved;
83 #else
84 		u32 reserved;
85 		u16 gso_segs;
86 		u16 gso_size;
87 #endif
88 	} s;
89 };
90 
91 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
92 
93 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
94 #define OCTNIC_GSO_MAX_SIZE \
95 		(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
96 
97 struct octnic_gather {
98 	/* List manipulation. Next and prev pointers. */
99 	struct list_head list;
100 
101 	/* Size of the gather component at sg in bytes. */
102 	int sg_size;
103 
104 	/* Number of bytes that sg was adjusted to make it 8B-aligned. */
105 	int adjust;
106 
107 	/* Gather component that can accommodate max sized fragment list
108 	 * received from the IP layer.
109 	 */
110 	struct octeon_sg_entry *sg;
111 };
112 
113 struct octeon_device_priv {
114 	/* Tasklet structures for this device. */
115 	struct tasklet_struct droq_tasklet;
116 	unsigned long napi_mask;
117 };
118 
119 static int
120 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
121 static void liquidio_vf_remove(struct pci_dev *pdev);
122 static int octeon_device_init(struct octeon_device *oct);
123 static int liquidio_stop(struct net_device *netdev);
124 
125 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
126 {
127 	struct octeon_device_priv *oct_priv =
128 	    (struct octeon_device_priv *)oct->priv;
129 	int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT;
130 	int pkt_cnt = 0, pending_pkts;
131 	int i;
132 
133 	do {
134 		pending_pkts = 0;
135 
136 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
137 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
138 				continue;
139 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
140 		}
141 		if (pkt_cnt > 0) {
142 			pending_pkts += pkt_cnt;
143 			tasklet_schedule(&oct_priv->droq_tasklet);
144 		}
145 		pkt_cnt = 0;
146 		schedule_timeout_uninterruptible(1);
147 
148 	} while (retry-- && pending_pkts);
149 
150 	return pkt_cnt;
151 }
152 
153 /**
154  * \brief wait for all pending requests to complete
155  * @param oct Pointer to Octeon device
156  *
157  * Called during shutdown sequence
158  */
159 static int wait_for_pending_requests(struct octeon_device *oct)
160 {
161 	int i, pcount = 0;
162 
163 	for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) {
164 		pcount = atomic_read(
165 		    &oct->response_list[OCTEON_ORDERED_SC_LIST]
166 			 .pending_req_count);
167 		if (pcount)
168 			schedule_timeout_uninterruptible(HZ / 10);
169 		else
170 			break;
171 	}
172 
173 	if (pcount)
174 		return 1;
175 
176 	return 0;
177 }
178 
179 /**
180  * \brief Cause device to go quiet so it can be safely removed/reset/etc
181  * @param oct Pointer to Octeon device
182  */
183 static void pcierror_quiesce_device(struct octeon_device *oct)
184 {
185 	int i;
186 
187 	/* Disable the input and output queues now. No more packets will
188 	 * arrive from Octeon, but we should wait for all packet processing
189 	 * to finish.
190 	 */
191 
192 	/* To allow for in-flight requests */
193 	schedule_timeout_uninterruptible(100);
194 
195 	if (wait_for_pending_requests(oct))
196 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
197 
198 	/* Force all requests waiting to be fetched by OCTEON to complete. */
199 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
200 		struct octeon_instr_queue *iq;
201 
202 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
203 			continue;
204 		iq = oct->instr_queue[i];
205 
206 		if (atomic_read(&iq->instr_pending)) {
207 			spin_lock_bh(&iq->lock);
208 			iq->fill_cnt = 0;
209 			iq->octeon_read_index = iq->host_write_index;
210 			iq->stats.instr_processed +=
211 			    atomic_read(&iq->instr_pending);
212 			lio_process_iq_request_list(oct, iq, 0);
213 			spin_unlock_bh(&iq->lock);
214 		}
215 	}
216 
217 	/* Force all pending ordered list requests to time out. */
218 	lio_process_ordered_list(oct, 1);
219 
220 	/* We do not need to wait for output queue packets to be processed. */
221 }
222 
223 /**
224  * \brief Cleanup PCI AER uncorrectable error status
225  * @param dev Pointer to PCI device
226  */
227 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
228 {
229 	u32 status, mask;
230 	int pos = 0x100;
231 
232 	pr_info("%s :\n", __func__);
233 
234 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
235 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
236 	if (dev->error_state == pci_channel_io_normal)
237 		status &= ~mask; /* Clear corresponding nonfatal bits */
238 	else
239 		status &= mask; /* Clear corresponding fatal bits */
240 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
241 }
242 
243 /**
244  * \brief Stop all PCI IO to a given device
245  * @param dev Pointer to Octeon device
246  */
247 static void stop_pci_io(struct octeon_device *oct)
248 {
249 	struct msix_entry *msix_entries;
250 	int i;
251 
252 	/* No more instructions will be forwarded. */
253 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
254 
255 	for (i = 0; i < oct->ifcount; i++)
256 		netif_device_detach(oct->props[i].netdev);
257 
258 	/* Disable interrupts  */
259 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
260 
261 	pcierror_quiesce_device(oct);
262 	if (oct->msix_on) {
263 		msix_entries = (struct msix_entry *)oct->msix_entries;
264 		for (i = 0; i < oct->num_msix_irqs; i++) {
265 			/* clear the affinity_cpumask */
266 			irq_set_affinity_hint(msix_entries[i].vector,
267 					      NULL);
268 			free_irq(msix_entries[i].vector,
269 				 &oct->ioq_vector[i]);
270 		}
271 		pci_disable_msix(oct->pci_dev);
272 		kfree(oct->msix_entries);
273 		oct->msix_entries = NULL;
274 		octeon_free_ioq_vector(oct);
275 	}
276 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
277 		lio_get_state_string(&oct->status));
278 
279 	/* making it a common function for all OCTEON models */
280 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
281 
282 	pci_disable_device(oct->pci_dev);
283 }
284 
285 /**
286  * \brief called when PCI error is detected
287  * @param pdev Pointer to PCI device
288  * @param state The current pci connection state
289  *
290  * This function is called after a PCI bus error affecting
291  * this device has been detected.
292  */
293 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
294 						     pci_channel_state_t state)
295 {
296 	struct octeon_device *oct = pci_get_drvdata(pdev);
297 
298 	/* Non-correctable Non-fatal errors */
299 	if (state == pci_channel_io_normal) {
300 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
301 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
302 		return PCI_ERS_RESULT_CAN_RECOVER;
303 	}
304 
305 	/* Non-correctable Fatal errors */
306 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
307 	stop_pci_io(oct);
308 
309 	return PCI_ERS_RESULT_DISCONNECT;
310 }
311 
312 /* For PCI-E Advanced Error Recovery (AER) Interface */
313 static const struct pci_error_handlers liquidio_vf_err_handler = {
314 	.error_detected = liquidio_pcie_error_detected,
315 };
316 
317 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
318 	{
319 		PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
320 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
321 	},
322 	{
323 		0, 0, 0, 0, 0, 0, 0
324 	}
325 };
326 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
327 
328 static struct pci_driver liquidio_vf_pci_driver = {
329 	.name		= "LiquidIO_VF",
330 	.id_table	= liquidio_vf_pci_tbl,
331 	.probe		= liquidio_vf_probe,
332 	.remove		= liquidio_vf_remove,
333 	.err_handler	= &liquidio_vf_err_handler,    /* For AER */
334 };
335 
336 /**
337  * \brief check interface state
338  * @param lio per-network private data
339  * @param state_flag flag state to check
340  */
341 static int ifstate_check(struct lio *lio, int state_flag)
342 {
343 	return atomic_read(&lio->ifstate) & state_flag;
344 }
345 
346 /**
347  * \brief set interface state
348  * @param lio per-network private data
349  * @param state_flag flag state to set
350  */
351 static void ifstate_set(struct lio *lio, int state_flag)
352 {
353 	atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
354 }
355 
356 /**
357  * \brief clear interface state
358  * @param lio per-network private data
359  * @param state_flag flag state to clear
360  */
361 static void ifstate_reset(struct lio *lio, int state_flag)
362 {
363 	atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
364 }
365 
366 /**
367  * \brief Stop Tx queues
368  * @param netdev network device
369  */
370 static void txqs_stop(struct net_device *netdev)
371 {
372 	if (netif_is_multiqueue(netdev)) {
373 		int i;
374 
375 		for (i = 0; i < netdev->num_tx_queues; i++)
376 			netif_stop_subqueue(netdev, i);
377 	} else {
378 		netif_stop_queue(netdev);
379 	}
380 }
381 
382 /**
383  * \brief Start Tx queues
384  * @param netdev network device
385  */
386 static void txqs_start(struct net_device *netdev)
387 {
388 	if (netif_is_multiqueue(netdev)) {
389 		int i;
390 
391 		for (i = 0; i < netdev->num_tx_queues; i++)
392 			netif_start_subqueue(netdev, i);
393 	} else {
394 		netif_start_queue(netdev);
395 	}
396 }
397 
398 /**
399  * \brief Wake Tx queues
400  * @param netdev network device
401  */
402 static void txqs_wake(struct net_device *netdev)
403 {
404 	struct lio *lio = GET_LIO(netdev);
405 
406 	if (netif_is_multiqueue(netdev)) {
407 		int i;
408 
409 		for (i = 0; i < netdev->num_tx_queues; i++) {
410 			int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)]
411 				      .s.q_no;
412 			if (__netif_subqueue_stopped(netdev, i)) {
413 				INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
414 							  tx_restart, 1);
415 				netif_wake_subqueue(netdev, i);
416 			}
417 		}
418 	} else {
419 		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
420 					  tx_restart, 1);
421 		netif_wake_queue(netdev);
422 	}
423 }
424 
425 /**
426  * \brief Start Tx queue
427  * @param netdev network device
428  */
429 static void start_txq(struct net_device *netdev)
430 {
431 	struct lio *lio = GET_LIO(netdev);
432 
433 	if (lio->linfo.link.s.link_up) {
434 		txqs_start(netdev);
435 		return;
436 	}
437 }
438 
439 /**
440  * \brief Wake a queue
441  * @param netdev network device
442  * @param q which queue to wake
443  */
444 static void wake_q(struct net_device *netdev, int q)
445 {
446 	if (netif_is_multiqueue(netdev))
447 		netif_wake_subqueue(netdev, q);
448 	else
449 		netif_wake_queue(netdev);
450 }
451 
452 /**
453  * \brief Stop a queue
454  * @param netdev network device
455  * @param q which queue to stop
456  */
457 static void stop_q(struct net_device *netdev, int q)
458 {
459 	if (netif_is_multiqueue(netdev))
460 		netif_stop_subqueue(netdev, q);
461 	else
462 		netif_stop_queue(netdev);
463 }
464 
465 /**
466  * Remove the node at the head of the list. The list would be empty at
467  * the end of this call if there are no more nodes in the list.
468  */
469 static struct list_head *list_delete_head(struct list_head *root)
470 {
471 	struct list_head *node;
472 
473 	if ((root->prev == root) && (root->next == root))
474 		node = NULL;
475 	else
476 		node = root->next;
477 
478 	if (node)
479 		list_del(node);
480 
481 	return node;
482 }
483 
484 /**
485  * \brief Delete gather lists
486  * @param lio per-network private data
487  */
488 static void delete_glists(struct lio *lio)
489 {
490 	struct octnic_gather *g;
491 	int i;
492 
493 	if (!lio->glist)
494 		return;
495 
496 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
497 		do {
498 			g = (struct octnic_gather *)
499 			    list_delete_head(&lio->glist[i]);
500 			if (g) {
501 				if (g->sg)
502 					kfree((void *)((unsigned long)g->sg -
503 							g->adjust));
504 				kfree(g);
505 			}
506 		} while (g);
507 	}
508 
509 	kfree(lio->glist);
510 	kfree(lio->glist_lock);
511 }
512 
513 /**
514  * \brief Setup gather lists
515  * @param lio per-network private data
516  */
517 static int setup_glists(struct lio *lio, int num_iqs)
518 {
519 	struct octnic_gather *g;
520 	int i, j;
521 
522 	lio->glist_lock =
523 	    kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
524 	if (!lio->glist_lock)
525 		return 1;
526 
527 	lio->glist =
528 	    kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
529 	if (!lio->glist) {
530 		kfree(lio->glist_lock);
531 		return 1;
532 	}
533 
534 	for (i = 0; i < num_iqs; i++) {
535 		spin_lock_init(&lio->glist_lock[i]);
536 
537 		INIT_LIST_HEAD(&lio->glist[i]);
538 
539 		for (j = 0; j < lio->tx_qsize; j++) {
540 			g = kzalloc(sizeof(*g), GFP_KERNEL);
541 			if (!g)
542 				break;
543 
544 			g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
545 				      OCT_SG_ENTRY_SIZE);
546 
547 			g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
548 			if (!g->sg) {
549 				kfree(g);
550 				break;
551 			}
552 
553 			/* The gather component should be aligned on 64-bit
554 			 * boundary
555 			 */
556 			if (((unsigned long)g->sg) & 7) {
557 				g->adjust = 8 - (((unsigned long)g->sg) & 7);
558 				g->sg = (struct octeon_sg_entry *)
559 					((unsigned long)g->sg + g->adjust);
560 			}
561 			list_add_tail(&g->list, &lio->glist[i]);
562 		}
563 
564 		if (j != lio->tx_qsize) {
565 			delete_glists(lio);
566 			return 1;
567 		}
568 	}
569 
570 	return 0;
571 }
572 
573 /**
574  * \brief Print link information
575  * @param netdev network device
576  */
577 static void print_link_info(struct net_device *netdev)
578 {
579 	struct lio *lio = GET_LIO(netdev);
580 
581 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
582 		struct oct_link_info *linfo = &lio->linfo;
583 
584 		if (linfo->link.s.link_up) {
585 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
586 				   linfo->link.s.speed,
587 				   (linfo->link.s.duplex) ? "Full" : "Half");
588 		} else {
589 			netif_info(lio, link, lio->netdev, "Link Down\n");
590 		}
591 	}
592 }
593 
594 /**
595  * \brief Routine to notify MTU change
596  * @param work work_struct data structure
597  */
598 static void octnet_link_status_change(struct work_struct *work)
599 {
600 	struct cavium_wk *wk = (struct cavium_wk *)work;
601 	struct lio *lio = (struct lio *)wk->ctxptr;
602 
603 	rtnl_lock();
604 	call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
605 	rtnl_unlock();
606 }
607 
608 /**
609  * \brief Sets up the mtu status change work
610  * @param netdev network device
611  */
612 static int setup_link_status_change_wq(struct net_device *netdev)
613 {
614 	struct lio *lio = GET_LIO(netdev);
615 	struct octeon_device *oct = lio->oct_dev;
616 
617 	lio->link_status_wq.wq = alloc_workqueue("link-status",
618 						 WQ_MEM_RECLAIM, 0);
619 	if (!lio->link_status_wq.wq) {
620 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
621 		return -1;
622 	}
623 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
624 			  octnet_link_status_change);
625 	lio->link_status_wq.wk.ctxptr = lio;
626 
627 	return 0;
628 }
629 
630 static void cleanup_link_status_change_wq(struct net_device *netdev)
631 {
632 	struct lio *lio = GET_LIO(netdev);
633 
634 	if (lio->link_status_wq.wq) {
635 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
636 		destroy_workqueue(lio->link_status_wq.wq);
637 	}
638 }
639 
640 /**
641  * \brief Update link status
642  * @param netdev network device
643  * @param ls link status structure
644  *
645  * Called on receipt of a link status response from the core application to
646  * update each interface's link status.
647  */
648 static void update_link_status(struct net_device *netdev,
649 			       union oct_link_status *ls)
650 {
651 	struct lio *lio = GET_LIO(netdev);
652 	struct octeon_device *oct = lio->oct_dev;
653 
654 	if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
655 		lio->linfo.link.u64 = ls->u64;
656 
657 		print_link_info(netdev);
658 		lio->link_changes++;
659 
660 		if (lio->linfo.link.s.link_up) {
661 			netif_carrier_on(netdev);
662 			txqs_wake(netdev);
663 		} else {
664 			netif_carrier_off(netdev);
665 			txqs_stop(netdev);
666 		}
667 
668 		if (lio->linfo.link.s.mtu < netdev->mtu) {
669 			dev_warn(&oct->pci_dev->dev,
670 				 "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n",
671 				 netdev->mtu, lio->linfo.link.s.mtu);
672 			lio->mtu = lio->linfo.link.s.mtu;
673 			netdev->mtu = lio->linfo.link.s.mtu;
674 			queue_delayed_work(lio->link_status_wq.wq,
675 					   &lio->link_status_wq.wk.work, 0);
676 		}
677 	}
678 }
679 
680 static void update_txq_status(struct octeon_device *oct, int iq_num)
681 {
682 	struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
683 	struct net_device *netdev;
684 	struct lio *lio;
685 
686 	netdev = oct->props[iq->ifidx].netdev;
687 	lio = GET_LIO(netdev);
688 	if (netif_is_multiqueue(netdev)) {
689 		if (__netif_subqueue_stopped(netdev, iq->q_index) &&
690 		    lio->linfo.link.s.link_up &&
691 		    (!octnet_iq_is_full(oct, iq_num))) {
692 			netif_wake_subqueue(netdev, iq->q_index);
693 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
694 						  tx_restart, 1);
695 		} else {
696 			if (!octnet_iq_is_full(oct, lio->txq)) {
697 				INCR_INSTRQUEUE_PKT_COUNT(
698 				    lio->oct_dev, lio->txq, tx_restart, 1);
699 				wake_q(netdev, lio->txq);
700 			}
701 		}
702 	}
703 }
704 
705 static
706 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
707 {
708 	struct octeon_device *oct = droq->oct_dev;
709 	struct octeon_device_priv *oct_priv =
710 	    (struct octeon_device_priv *)oct->priv;
711 
712 	if (droq->ops.poll_mode) {
713 		droq->ops.napi_fn(droq);
714 	} else {
715 		if (ret & MSIX_PO_INT) {
716 			dev_err(&oct->pci_dev->dev,
717 				"should not come here should not get rx when poll mode = 0 for vf\n");
718 			tasklet_schedule(&oct_priv->droq_tasklet);
719 			return 1;
720 		}
721 		/* this will be flushed periodically by check iq db */
722 		if (ret & MSIX_PI_INT)
723 			return 0;
724 	}
725 	return 0;
726 }
727 
728 static irqreturn_t
729 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
730 {
731 	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
732 	struct octeon_device *oct = ioq_vector->oct_dev;
733 	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
734 	u64 ret;
735 
736 	ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
737 
738 	if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
739 		liquidio_schedule_msix_droq_pkt_handler(droq, ret);
740 
741 	return IRQ_HANDLED;
742 }
743 
744 /**
745  * \brief Setup interrupt for octeon device
746  * @param oct octeon device
747  *
748  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
749  */
750 static int octeon_setup_interrupt(struct octeon_device *oct)
751 {
752 	struct msix_entry *msix_entries;
753 	int num_alloc_ioq_vectors;
754 	int num_ioq_vectors;
755 	int irqret;
756 	int i;
757 
758 	if (oct->msix_on) {
759 		oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
760 
761 		oct->msix_entries = kcalloc(
762 		    oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
763 		if (!oct->msix_entries)
764 			return 1;
765 
766 		msix_entries = (struct msix_entry *)oct->msix_entries;
767 
768 		for (i = 0; i < oct->num_msix_irqs; i++)
769 			msix_entries[i].entry = i;
770 		num_alloc_ioq_vectors = pci_enable_msix_range(
771 						oct->pci_dev, msix_entries,
772 						oct->num_msix_irqs,
773 						oct->num_msix_irqs);
774 		if (num_alloc_ioq_vectors < 0) {
775 			dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
776 			kfree(oct->msix_entries);
777 			oct->msix_entries = NULL;
778 			return 1;
779 		}
780 		dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
781 
782 		num_ioq_vectors = oct->num_msix_irqs;
783 
784 		for (i = 0; i < num_ioq_vectors; i++) {
785 			irqret = request_irq(msix_entries[i].vector,
786 					     liquidio_msix_intr_handler, 0,
787 					     "octeon", &oct->ioq_vector[i]);
788 			if (irqret) {
789 				dev_err(&oct->pci_dev->dev,
790 					"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
791 					irqret);
792 
793 				while (i) {
794 					i--;
795 					irq_set_affinity_hint(
796 					    msix_entries[i].vector, NULL);
797 					free_irq(msix_entries[i].vector,
798 						 &oct->ioq_vector[i]);
799 				}
800 				pci_disable_msix(oct->pci_dev);
801 				kfree(oct->msix_entries);
802 				oct->msix_entries = NULL;
803 				return 1;
804 			}
805 			oct->ioq_vector[i].vector = msix_entries[i].vector;
806 			/* assign the cpu mask for this msix interrupt vector */
807 			irq_set_affinity_hint(
808 			    msix_entries[i].vector,
809 			    (&oct->ioq_vector[i].affinity_mask));
810 		}
811 		dev_dbg(&oct->pci_dev->dev,
812 			"OCTEON[%d]: MSI-X enabled\n", oct->octeon_id);
813 	}
814 	return 0;
815 }
816 
817 /**
818  * \brief PCI probe handler
819  * @param pdev PCI device structure
820  * @param ent unused
821  */
822 static int
823 liquidio_vf_probe(struct pci_dev *pdev,
824 		  const struct pci_device_id *ent __attribute__((unused)))
825 {
826 	struct octeon_device *oct_dev = NULL;
827 
828 	oct_dev = octeon_allocate_device(pdev->device,
829 					 sizeof(struct octeon_device_priv));
830 
831 	if (!oct_dev) {
832 		dev_err(&pdev->dev, "Unable to allocate device\n");
833 		return -ENOMEM;
834 	}
835 	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
836 
837 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
838 		 (u32)pdev->vendor, (u32)pdev->device);
839 
840 	/* Assign octeon_device for this device to the private data area. */
841 	pci_set_drvdata(pdev, oct_dev);
842 
843 	/* set linux specific device pointer */
844 	oct_dev->pci_dev = pdev;
845 
846 	if (octeon_device_init(oct_dev)) {
847 		liquidio_vf_remove(pdev);
848 		return -ENOMEM;
849 	}
850 
851 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
852 
853 	return 0;
854 }
855 
856 /**
857  * \brief PCI FLR for each Octeon device.
858  * @param oct octeon device
859  */
860 static void octeon_pci_flr(struct octeon_device *oct)
861 {
862 	u16 status;
863 
864 	pci_save_state(oct->pci_dev);
865 
866 	pci_cfg_access_lock(oct->pci_dev);
867 
868 	/* Quiesce the device completely */
869 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
870 			      PCI_COMMAND_INTX_DISABLE);
871 
872 	/* Wait for Transaction Pending bit clean */
873 	msleep(100);
874 	pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status);
875 	if (status & PCI_EXP_DEVSTA_TRPND) {
876 		dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
877 		ssleep(5);
878 		pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA,
879 					  &status);
880 		if (status & PCI_EXP_DEVSTA_TRPND)
881 			dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n");
882 	}
883 	pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL,
884 				 PCI_EXP_DEVCTL_BCR_FLR);
885 	mdelay(100);
886 
887 	pci_cfg_access_unlock(oct->pci_dev);
888 
889 	pci_restore_state(oct->pci_dev);
890 }
891 
892 /**
893  *\brief Destroy resources associated with octeon device
894  * @param pdev PCI device structure
895  * @param ent unused
896  */
897 static void octeon_destroy_resources(struct octeon_device *oct)
898 {
899 	struct msix_entry *msix_entries;
900 	int i;
901 
902 	switch (atomic_read(&oct->status)) {
903 	case OCT_DEV_RUNNING:
904 	case OCT_DEV_CORE_OK:
905 		/* No more instructions will be forwarded. */
906 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
907 
908 		oct->app_mode = CVM_DRV_INVALID_APP;
909 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
910 			lio_get_state_string(&oct->status));
911 
912 		schedule_timeout_uninterruptible(HZ / 10);
913 
914 		/* fallthrough */
915 	case OCT_DEV_HOST_OK:
916 		/* fallthrough */
917 	case OCT_DEV_IO_QUEUES_DONE:
918 		if (wait_for_pending_requests(oct))
919 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
920 
921 		if (lio_wait_for_instr_fetch(oct))
922 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
923 
924 		/* Disable the input and output queues now. No more packets will
925 		 * arrive from Octeon, but we should wait for all packet
926 		 * processing to finish.
927 		 */
928 		oct->fn_list.disable_io_queues(oct);
929 
930 		if (lio_wait_for_oq_pkts(oct))
931 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
932 
933 	case OCT_DEV_INTR_SET_DONE:
934 		/* Disable interrupts  */
935 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
936 
937 		if (oct->msix_on) {
938 			msix_entries = (struct msix_entry *)oct->msix_entries;
939 			for (i = 0; i < oct->num_msix_irqs; i++) {
940 				irq_set_affinity_hint(msix_entries[i].vector,
941 						      NULL);
942 				free_irq(msix_entries[i].vector,
943 					 &oct->ioq_vector[i]);
944 			}
945 			pci_disable_msix(oct->pci_dev);
946 			kfree(oct->msix_entries);
947 			oct->msix_entries = NULL;
948 		}
949 		/* Soft reset the octeon device before exiting */
950 		if (oct->pci_dev->reset_fn)
951 			octeon_pci_flr(oct);
952 		else
953 			cn23xx_vf_ask_pf_to_do_flr(oct);
954 
955 		/* fallthrough */
956 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
957 		octeon_free_ioq_vector(oct);
958 
959 		/* fallthrough */
960 	case OCT_DEV_MBOX_SETUP_DONE:
961 		oct->fn_list.free_mbox(oct);
962 
963 		/* fallthrough */
964 	case OCT_DEV_IN_RESET:
965 	case OCT_DEV_DROQ_INIT_DONE:
966 		mdelay(100);
967 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
968 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
969 				continue;
970 			octeon_delete_droq(oct, i);
971 		}
972 
973 		/* fallthrough */
974 	case OCT_DEV_RESP_LIST_INIT_DONE:
975 		octeon_delete_response_list(oct);
976 
977 		/* fallthrough */
978 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
979 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
980 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
981 				continue;
982 			octeon_delete_instr_queue(oct, i);
983 		}
984 
985 		/* fallthrough */
986 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
987 		octeon_free_sc_buffer_pool(oct);
988 
989 		/* fallthrough */
990 	case OCT_DEV_DISPATCH_INIT_DONE:
991 		octeon_delete_dispatch_list(oct);
992 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
993 
994 		/* fallthrough */
995 	case OCT_DEV_PCI_MAP_DONE:
996 		octeon_unmap_pci_barx(oct, 0);
997 		octeon_unmap_pci_barx(oct, 1);
998 
999 		/* fallthrough */
1000 	case OCT_DEV_PCI_ENABLE_DONE:
1001 		pci_clear_master(oct->pci_dev);
1002 		/* Disable the device, releasing the PCI INT */
1003 		pci_disable_device(oct->pci_dev);
1004 
1005 		/* fallthrough */
1006 	case OCT_DEV_BEGIN_STATE:
1007 		/* Nothing to be done here either */
1008 		break;
1009 	}
1010 }
1011 
1012 /**
1013  * \brief Callback for rx ctrl
1014  * @param status status of request
1015  * @param buf pointer to resp structure
1016  */
1017 static void rx_ctl_callback(struct octeon_device *oct,
1018 			    u32 status, void *buf)
1019 {
1020 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1021 	struct liquidio_rx_ctl_context *ctx;
1022 
1023 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1024 
1025 	oct = lio_get_device(ctx->octeon_id);
1026 	if (status)
1027 		dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1028 			CVM_CAST64(status));
1029 	WRITE_ONCE(ctx->cond, 1);
1030 
1031 	/* This barrier is required to be sure that the response has been
1032 	 * written fully before waking up the handler
1033 	 */
1034 	wmb();
1035 
1036 	wake_up_interruptible(&ctx->wc);
1037 }
1038 
1039 /**
1040  * \brief Send Rx control command
1041  * @param lio per-network private data
1042  * @param start_stop whether to start or stop
1043  */
1044 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1045 {
1046 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1047 	int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1048 	struct liquidio_rx_ctl_context *ctx;
1049 	struct octeon_soft_command *sc;
1050 	union octnet_cmd *ncmd;
1051 	int retval;
1052 
1053 	if (oct->props[lio->ifidx].rx_on == start_stop)
1054 		return;
1055 
1056 	sc = (struct octeon_soft_command *)
1057 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1058 					  16, ctx_size);
1059 
1060 	ncmd = (union octnet_cmd *)sc->virtdptr;
1061 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1062 
1063 	WRITE_ONCE(ctx->cond, 0);
1064 	ctx->octeon_id = lio_get_device_id(oct);
1065 	init_waitqueue_head(&ctx->wc);
1066 
1067 	ncmd->u64 = 0;
1068 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1069 	ncmd->s.param1 = start_stop;
1070 
1071 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1072 
1073 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1074 
1075 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1076 				    OPCODE_NIC_CMD, 0, 0, 0);
1077 
1078 	sc->callback = rx_ctl_callback;
1079 	sc->callback_arg = sc;
1080 	sc->wait_time = 5000;
1081 
1082 	retval = octeon_send_soft_command(oct, sc);
1083 	if (retval == IQ_SEND_FAILED) {
1084 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1085 	} else {
1086 		/* Sleep on a wait queue till the cond flag indicates that the
1087 		 * response arrived or timed-out.
1088 		 */
1089 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1090 			return;
1091 		oct->props[lio->ifidx].rx_on = start_stop;
1092 	}
1093 
1094 	octeon_free_soft_command(oct, sc);
1095 }
1096 
1097 /**
1098  * \brief Destroy NIC device interface
1099  * @param oct octeon device
1100  * @param ifidx which interface to destroy
1101  *
1102  * Cleanup associated with each interface for an Octeon device  when NIC
1103  * module is being unloaded or if initialization fails during load.
1104  */
1105 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1106 {
1107 	struct net_device *netdev = oct->props[ifidx].netdev;
1108 	struct napi_struct *napi, *n;
1109 	struct lio *lio;
1110 
1111 	if (!netdev) {
1112 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1113 			__func__, ifidx);
1114 		return;
1115 	}
1116 
1117 	lio = GET_LIO(netdev);
1118 
1119 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1120 
1121 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1122 		liquidio_stop(netdev);
1123 
1124 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1125 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1126 			napi_disable(napi);
1127 
1128 		oct->props[lio->ifidx].napi_enabled = 0;
1129 
1130 		oct->droq[0]->ops.poll_mode = 0;
1131 	}
1132 
1133 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1134 		unregister_netdev(netdev);
1135 
1136 	cleanup_link_status_change_wq(netdev);
1137 
1138 	delete_glists(lio);
1139 
1140 	free_netdev(netdev);
1141 
1142 	oct->props[ifidx].gmxport = -1;
1143 
1144 	oct->props[ifidx].netdev = NULL;
1145 }
1146 
1147 /**
1148  * \brief Stop complete NIC functionality
1149  * @param oct octeon device
1150  */
1151 static int liquidio_stop_nic_module(struct octeon_device *oct)
1152 {
1153 	struct lio *lio;
1154 	int i, j;
1155 
1156 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1157 	if (!oct->ifcount) {
1158 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1159 		return 1;
1160 	}
1161 
1162 	spin_lock_bh(&oct->cmd_resp_wqlock);
1163 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1164 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1165 
1166 	for (i = 0; i < oct->ifcount; i++) {
1167 		lio = GET_LIO(oct->props[i].netdev);
1168 		for (j = 0; j < lio->linfo.num_rxpciq; j++)
1169 			octeon_unregister_droq_ops(oct,
1170 						   lio->linfo.rxpciq[j].s.q_no);
1171 	}
1172 
1173 	for (i = 0; i < oct->ifcount; i++)
1174 		liquidio_destroy_nic_device(oct, i);
1175 
1176 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1177 	return 0;
1178 }
1179 
1180 /**
1181  * \brief Cleans up resources at unload time
1182  * @param pdev PCI device structure
1183  */
1184 static void liquidio_vf_remove(struct pci_dev *pdev)
1185 {
1186 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1187 
1188 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1189 
1190 	if (oct_dev->app_mode == CVM_DRV_NIC_APP)
1191 		liquidio_stop_nic_module(oct_dev);
1192 
1193 	/* Reset the octeon device and cleanup all memory allocated for
1194 	 * the octeon device by driver.
1195 	 */
1196 	octeon_destroy_resources(oct_dev);
1197 
1198 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1199 
1200 	/* This octeon device has been removed. Update the global
1201 	 * data structure to reflect this. Free the device structure.
1202 	 */
1203 	octeon_free_device_mem(oct_dev);
1204 }
1205 
1206 /**
1207  * \brief PCI initialization for each Octeon device.
1208  * @param oct octeon device
1209  */
1210 static int octeon_pci_os_setup(struct octeon_device *oct)
1211 {
1212 #ifdef CONFIG_PCI_IOV
1213 	/* setup PCI stuff first */
1214 	if (!oct->pci_dev->physfn)
1215 		octeon_pci_flr(oct);
1216 #endif
1217 
1218 	if (pci_enable_device(oct->pci_dev)) {
1219 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1220 		return 1;
1221 	}
1222 
1223 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1224 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1225 		pci_disable_device(oct->pci_dev);
1226 		return 1;
1227 	}
1228 
1229 	/* Enable PCI DMA Master. */
1230 	pci_set_master(oct->pci_dev);
1231 
1232 	return 0;
1233 }
1234 
1235 static int skb_iq(struct lio *lio, struct sk_buff *skb)
1236 {
1237 	int q = 0;
1238 
1239 	if (netif_is_multiqueue(lio->netdev))
1240 		q = skb->queue_mapping % lio->linfo.num_txpciq;
1241 
1242 	return q;
1243 }
1244 
1245 /**
1246  * \brief Check Tx queue state for a given network buffer
1247  * @param lio per-network private data
1248  * @param skb network buffer
1249  */
1250 static int check_txq_state(struct lio *lio, struct sk_buff *skb)
1251 {
1252 	int q = 0, iq = 0;
1253 
1254 	if (netif_is_multiqueue(lio->netdev)) {
1255 		q = skb->queue_mapping;
1256 		iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
1257 	} else {
1258 		iq = lio->txq;
1259 		q = iq;
1260 	}
1261 
1262 	if (octnet_iq_is_full(lio->oct_dev, iq))
1263 		return 0;
1264 
1265 	if (__netif_subqueue_stopped(lio->netdev, q)) {
1266 		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1267 		wake_q(lio->netdev, q);
1268 	}
1269 
1270 	return 1;
1271 }
1272 
1273 /**
1274  * \brief Unmap and free network buffer
1275  * @param buf buffer
1276  */
1277 static void free_netbuf(void *buf)
1278 {
1279 	struct octnet_buf_free_info *finfo;
1280 	struct sk_buff *skb;
1281 	struct lio *lio;
1282 
1283 	finfo = (struct octnet_buf_free_info *)buf;
1284 	skb = finfo->skb;
1285 	lio = finfo->lio;
1286 
1287 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1288 			 DMA_TO_DEVICE);
1289 
1290 	check_txq_state(lio, skb);
1291 
1292 	tx_buffer_free(skb);
1293 }
1294 
1295 /**
1296  * \brief Unmap and free gather buffer
1297  * @param buf buffer
1298  */
1299 static void free_netsgbuf(void *buf)
1300 {
1301 	struct octnet_buf_free_info *finfo;
1302 	struct octnic_gather *g;
1303 	struct sk_buff *skb;
1304 	int i, frags, iq;
1305 	struct lio *lio;
1306 
1307 	finfo = (struct octnet_buf_free_info *)buf;
1308 	skb = finfo->skb;
1309 	lio = finfo->lio;
1310 	g = finfo->g;
1311 	frags = skb_shinfo(skb)->nr_frags;
1312 
1313 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1314 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1315 			 DMA_TO_DEVICE);
1316 
1317 	i = 1;
1318 	while (frags--) {
1319 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1320 
1321 		pci_unmap_page((lio->oct_dev)->pci_dev,
1322 			       g->sg[(i >> 2)].ptr[(i & 3)],
1323 			       frag->size, DMA_TO_DEVICE);
1324 		i++;
1325 	}
1326 
1327 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1328 			 finfo->dptr, g->sg_size,
1329 			 DMA_TO_DEVICE);
1330 
1331 	iq = skb_iq(lio, skb);
1332 
1333 	spin_lock(&lio->glist_lock[iq]);
1334 	list_add_tail(&g->list, &lio->glist[iq]);
1335 	spin_unlock(&lio->glist_lock[iq]);
1336 
1337 	check_txq_state(lio, skb); /* mq support: sub-queue state check */
1338 
1339 	tx_buffer_free(skb);
1340 }
1341 
1342 /**
1343  * \brief Unmap and free gather buffer with response
1344  * @param buf buffer
1345  */
1346 static void free_netsgbuf_with_resp(void *buf)
1347 {
1348 	struct octnet_buf_free_info *finfo;
1349 	struct octeon_soft_command *sc;
1350 	struct octnic_gather *g;
1351 	struct sk_buff *skb;
1352 	int i, frags, iq;
1353 	struct lio *lio;
1354 
1355 	sc = (struct octeon_soft_command *)buf;
1356 	skb = (struct sk_buff *)sc->callback_arg;
1357 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1358 
1359 	lio = finfo->lio;
1360 	g = finfo->g;
1361 	frags = skb_shinfo(skb)->nr_frags;
1362 
1363 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1364 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1365 			 DMA_TO_DEVICE);
1366 
1367 	i = 1;
1368 	while (frags--) {
1369 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1370 
1371 		pci_unmap_page((lio->oct_dev)->pci_dev,
1372 			       g->sg[(i >> 2)].ptr[(i & 3)],
1373 			       frag->size, DMA_TO_DEVICE);
1374 		i++;
1375 	}
1376 
1377 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1378 			 finfo->dptr, g->sg_size,
1379 			 DMA_TO_DEVICE);
1380 
1381 	iq = skb_iq(lio, skb);
1382 
1383 	spin_lock(&lio->glist_lock[iq]);
1384 	list_add_tail(&g->list, &lio->glist[iq]);
1385 	spin_unlock(&lio->glist_lock[iq]);
1386 
1387 	/* Don't free the skb yet */
1388 
1389 	check_txq_state(lio, skb);
1390 }
1391 
1392 /**
1393  * \brief Setup output queue
1394  * @param oct octeon device
1395  * @param q_no which queue
1396  * @param num_descs how many descriptors
1397  * @param desc_size size of each descriptor
1398  * @param app_ctx application context
1399  */
1400 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1401 			     int desc_size, void *app_ctx)
1402 {
1403 	int ret_val;
1404 
1405 	dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1406 	/* droq creation and local register settings. */
1407 	ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1408 	if (ret_val < 0)
1409 		return ret_val;
1410 
1411 	if (ret_val == 1) {
1412 		dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
1413 		return 0;
1414 	}
1415 
1416 	/* Enable the droq queues */
1417 	octeon_set_droq_pkt_op(oct, q_no, 1);
1418 
1419 	/* Send Credit for Octeon Output queues. Credits are always
1420 	 * sent after the output queue is enabled.
1421 	 */
1422 	writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
1423 
1424 	return ret_val;
1425 }
1426 
1427 /**
1428  * \brief Callback for getting interface configuration
1429  * @param status status of request
1430  * @param buf pointer to resp structure
1431  */
1432 static void if_cfg_callback(struct octeon_device *oct,
1433 			    u32 status __attribute__((unused)), void *buf)
1434 {
1435 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1436 	struct liquidio_if_cfg_context *ctx;
1437 	struct liquidio_if_cfg_resp *resp;
1438 
1439 	resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1440 	ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1441 
1442 	oct = lio_get_device(ctx->octeon_id);
1443 	if (resp->status)
1444 		dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1445 			CVM_CAST64(resp->status));
1446 	WRITE_ONCE(ctx->cond, 1);
1447 
1448 	snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1449 		 resp->cfg_info.liquidio_firmware_version);
1450 
1451 	/* This barrier is required to be sure that the response has been
1452 	 * written fully before waking up the handler
1453 	 */
1454 	wmb();
1455 
1456 	wake_up_interruptible(&ctx->wc);
1457 }
1458 
1459 /** Routine to push packets arriving on Octeon interface upto network layer.
1460  * @param oct_id   - octeon device id.
1461  * @param skbuff   - skbuff struct to be passed to network layer.
1462  * @param len      - size of total data received.
1463  * @param rh       - Control header associated with the packet
1464  * @param param    - additional control data with the packet
1465  * @param arg      - farg registered in droq_ops
1466  */
1467 static void
1468 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
1469 		     void *skbuff,
1470 		     u32 len,
1471 		     union octeon_rh *rh,
1472 		     void *param,
1473 		     void *arg)
1474 {
1475 	struct napi_struct *napi = param;
1476 	struct octeon_droq *droq =
1477 		container_of(param, struct octeon_droq, napi);
1478 	struct net_device *netdev = (struct net_device *)arg;
1479 	struct sk_buff *skb = (struct sk_buff *)skbuff;
1480 	u16 vtag = 0;
1481 	u32 r_dh_off;
1482 
1483 	if (netdev) {
1484 		struct lio *lio = GET_LIO(netdev);
1485 		int packet_was_received;
1486 
1487 		/* Do not proceed if the interface is not in RUNNING state. */
1488 		if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1489 			recv_buffer_free(skb);
1490 			droq->stats.rx_dropped++;
1491 			return;
1492 		}
1493 
1494 		skb->dev = netdev;
1495 
1496 		skb_record_rx_queue(skb, droq->q_no);
1497 		if (likely(len > MIN_SKB_SIZE)) {
1498 			struct octeon_skb_page_info *pg_info;
1499 			unsigned char *va;
1500 
1501 			pg_info = ((struct octeon_skb_page_info *)(skb->cb));
1502 			if (pg_info->page) {
1503 				/* For Paged allocation use the frags */
1504 				va = page_address(pg_info->page) +
1505 					pg_info->page_offset;
1506 				memcpy(skb->data, va, MIN_SKB_SIZE);
1507 				skb_put(skb, MIN_SKB_SIZE);
1508 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1509 						pg_info->page,
1510 						pg_info->page_offset +
1511 						MIN_SKB_SIZE,
1512 						len - MIN_SKB_SIZE,
1513 						LIO_RXBUFFER_SZ);
1514 			}
1515 		} else {
1516 			struct octeon_skb_page_info *pg_info =
1517 				((struct octeon_skb_page_info *)(skb->cb));
1518 			skb_copy_to_linear_data(skb,
1519 						page_address(pg_info->page) +
1520 						pg_info->page_offset, len);
1521 			skb_put(skb, len);
1522 			put_page(pg_info->page);
1523 		}
1524 
1525 		r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
1526 
1527 		if (rh->r_dh.has_hwtstamp)
1528 			r_dh_off -= BYTES_PER_DHLEN_UNIT;
1529 
1530 		if (rh->r_dh.has_hash) {
1531 			__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
1532 			u32 hash = be32_to_cpu(*hash_be);
1533 
1534 			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
1535 			r_dh_off -= BYTES_PER_DHLEN_UNIT;
1536 		}
1537 
1538 		skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
1539 		skb->protocol = eth_type_trans(skb, skb->dev);
1540 
1541 		if ((netdev->features & NETIF_F_RXCSUM) &&
1542 		    (((rh->r_dh.encap_on) &&
1543 		      (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
1544 		     (!(rh->r_dh.encap_on) &&
1545 		      (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
1546 			/* checksum has already been verified */
1547 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1548 		else
1549 			skb->ip_summed = CHECKSUM_NONE;
1550 
1551 		/* Setting Encapsulation field on basis of status received
1552 		 * from the firmware
1553 		 */
1554 		if (rh->r_dh.encap_on) {
1555 			skb->encapsulation = 1;
1556 			skb->csum_level = 1;
1557 			droq->stats.rx_vxlan++;
1558 		}
1559 
1560 		/* inbound VLAN tag */
1561 		if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1562 		    rh->r_dh.vlan) {
1563 			u16 priority = rh->r_dh.priority;
1564 			u16 vid = rh->r_dh.vlan;
1565 
1566 			vtag = (priority << VLAN_PRIO_SHIFT) | vid;
1567 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
1568 		}
1569 
1570 		packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
1571 
1572 		if (packet_was_received) {
1573 			droq->stats.rx_bytes_received += len;
1574 			droq->stats.rx_pkts_received++;
1575 		} else {
1576 			droq->stats.rx_dropped++;
1577 			netif_info(lio, rx_err, lio->netdev,
1578 				   "droq:%d  error rx_dropped:%llu\n",
1579 				   droq->q_no, droq->stats.rx_dropped);
1580 		}
1581 
1582 	} else {
1583 		recv_buffer_free(skb);
1584 	}
1585 }
1586 
1587 /**
1588  * \brief callback when receive interrupt occurs and we are in NAPI mode
1589  * @param arg pointer to octeon output queue
1590  */
1591 static void liquidio_vf_napi_drv_callback(void *arg)
1592 {
1593 	struct octeon_droq *droq = arg;
1594 
1595 	napi_schedule_irqoff(&droq->napi);
1596 }
1597 
1598 /**
1599  * \brief Entry point for NAPI polling
1600  * @param napi NAPI structure
1601  * @param budget maximum number of items to process
1602  */
1603 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
1604 {
1605 	struct octeon_instr_queue *iq;
1606 	struct octeon_device *oct;
1607 	struct octeon_droq *droq;
1608 	int tx_done = 0, iq_no;
1609 	int work_done;
1610 
1611 	droq = container_of(napi, struct octeon_droq, napi);
1612 	oct = droq->oct_dev;
1613 	iq_no = droq->q_no;
1614 
1615 	/* Handle Droq descriptors */
1616 	work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
1617 						 POLL_EVENT_PROCESS_PKTS,
1618 						 budget);
1619 
1620 	/* Flush the instruction queue */
1621 	iq = oct->instr_queue[iq_no];
1622 	if (iq) {
1623 		/* Process iq buffers with in the budget limits */
1624 		tx_done = octeon_flush_iq(oct, iq, budget);
1625 		/* Update iq read-index rather than waiting for next interrupt.
1626 		 * Return back if tx_done is false.
1627 		 */
1628 		update_txq_status(oct, iq_no);
1629 	} else {
1630 		dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
1631 			__func__, iq_no);
1632 	}
1633 
1634 	/* force enable interrupt if reg cnts are high to avoid wraparound */
1635 	if ((work_done < budget && tx_done) ||
1636 	    (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
1637 	    (droq->pkt_count >= MAX_REG_CNT)) {
1638 		tx_done = 1;
1639 		napi_complete_done(napi, work_done);
1640 		octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
1641 					     POLL_EVENT_ENABLE_INTR, 0);
1642 		return 0;
1643 	}
1644 
1645 	return (!tx_done) ? (budget) : (work_done);
1646 }
1647 
1648 /**
1649  * \brief Setup input and output queues
1650  * @param octeon_dev octeon device
1651  * @param ifidx Interface index
1652  *
1653  * Note: Queues are with respect to the octeon device. Thus
1654  * an input queue is for egress packets, and output queues
1655  * are for ingress packets.
1656  */
1657 static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
1658 {
1659 	struct octeon_droq_ops droq_ops;
1660 	struct net_device *netdev;
1661 	static int cpu_id_modulus;
1662 	struct octeon_droq *droq;
1663 	struct napi_struct *napi;
1664 	static int cpu_id;
1665 	int num_tx_descs;
1666 	struct lio *lio;
1667 	int retval = 0;
1668 	int q, q_no;
1669 
1670 	netdev = octeon_dev->props[ifidx].netdev;
1671 
1672 	lio = GET_LIO(netdev);
1673 
1674 	memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
1675 
1676 	droq_ops.fptr = liquidio_push_packet;
1677 	droq_ops.farg = netdev;
1678 
1679 	droq_ops.poll_mode = 1;
1680 	droq_ops.napi_fn = liquidio_vf_napi_drv_callback;
1681 	cpu_id = 0;
1682 	cpu_id_modulus = num_present_cpus();
1683 
1684 	/* set up DROQs. */
1685 	for (q = 0; q < lio->linfo.num_rxpciq; q++) {
1686 		q_no = lio->linfo.rxpciq[q].s.q_no;
1687 
1688 		retval = octeon_setup_droq(
1689 		    octeon_dev, q_no,
1690 		    CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
1691 						lio->ifidx),
1692 		    CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
1693 						   lio->ifidx),
1694 		    NULL);
1695 		if (retval) {
1696 			dev_err(&octeon_dev->pci_dev->dev,
1697 				"%s : Runtime DROQ(RxQ) creation failed.\n",
1698 				__func__);
1699 			return 1;
1700 		}
1701 
1702 		droq = octeon_dev->droq[q_no];
1703 		napi = &droq->napi;
1704 		netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
1705 
1706 		/* designate a CPU for this droq */
1707 		droq->cpu_id = cpu_id;
1708 		cpu_id++;
1709 		if (cpu_id >= cpu_id_modulus)
1710 			cpu_id = 0;
1711 
1712 		octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
1713 	}
1714 
1715 	/* 23XX VF can send/recv control messages (via the first VF-owned
1716 	 * droq) from the firmware even if the ethX interface is down,
1717 	 * so that's why poll_mode must be off for the first droq.
1718 	 */
1719 	octeon_dev->droq[0]->ops.poll_mode = 0;
1720 
1721 	/* set up IQs. */
1722 	for (q = 0; q < lio->linfo.num_txpciq; q++) {
1723 		num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
1724 		    octeon_get_conf(octeon_dev), lio->ifidx);
1725 		retval = octeon_setup_iq(octeon_dev, ifidx, q,
1726 					 lio->linfo.txpciq[q], num_tx_descs,
1727 					 netdev_get_tx_queue(netdev, q));
1728 		if (retval) {
1729 			dev_err(&octeon_dev->pci_dev->dev,
1730 				" %s : Runtime IQ(TxQ) creation failed.\n",
1731 				__func__);
1732 			return 1;
1733 		}
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 /**
1740  * \brief Net device open for LiquidIO
1741  * @param netdev network device
1742  */
1743 static int liquidio_open(struct net_device *netdev)
1744 {
1745 	struct lio *lio = GET_LIO(netdev);
1746 	struct octeon_device *oct = lio->oct_dev;
1747 	struct napi_struct *napi, *n;
1748 
1749 	if (!oct->props[lio->ifidx].napi_enabled) {
1750 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1751 			napi_enable(napi);
1752 
1753 		oct->props[lio->ifidx].napi_enabled = 1;
1754 
1755 		oct->droq[0]->ops.poll_mode = 1;
1756 	}
1757 
1758 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1759 
1760 	/* Ready for link status updates */
1761 	lio->intf_open = 1;
1762 
1763 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1764 	start_txq(netdev);
1765 
1766 	/* tell Octeon to start forwarding packets to host */
1767 	send_rx_ctrl_cmd(lio, 1);
1768 
1769 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
1770 
1771 	return 0;
1772 }
1773 
1774 /**
1775  * \brief Net device stop for LiquidIO
1776  * @param netdev network device
1777  */
1778 static int liquidio_stop(struct net_device *netdev)
1779 {
1780 	struct lio *lio = GET_LIO(netdev);
1781 	struct octeon_device *oct = lio->oct_dev;
1782 
1783 	netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
1784 	/* Inform that netif carrier is down */
1785 	lio->intf_open = 0;
1786 	lio->linfo.link.s.link_up = 0;
1787 
1788 	netif_carrier_off(netdev);
1789 	lio->link_changes++;
1790 
1791 	/* tell Octeon to stop forwarding packets to host */
1792 	send_rx_ctrl_cmd(lio, 0);
1793 
1794 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1795 
1796 	txqs_stop(netdev);
1797 
1798 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1799 
1800 	return 0;
1801 }
1802 
1803 /**
1804  * \brief Converts a mask based on net device flags
1805  * @param netdev network device
1806  *
1807  * This routine generates a octnet_ifflags mask from the net device flags
1808  * received from the OS.
1809  */
1810 static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1811 {
1812 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1813 
1814 	if (netdev->flags & IFF_PROMISC)
1815 		f |= OCTNET_IFFLAG_PROMISC;
1816 
1817 	if (netdev->flags & IFF_ALLMULTI)
1818 		f |= OCTNET_IFFLAG_ALLMULTI;
1819 
1820 	if (netdev->flags & IFF_MULTICAST) {
1821 		f |= OCTNET_IFFLAG_MULTICAST;
1822 
1823 		/* Accept all multicast addresses if there are more than we
1824 		 * can handle
1825 		 */
1826 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1827 			f |= OCTNET_IFFLAG_ALLMULTI;
1828 	}
1829 
1830 	if (netdev->flags & IFF_BROADCAST)
1831 		f |= OCTNET_IFFLAG_BROADCAST;
1832 
1833 	return f;
1834 }
1835 
1836 static void liquidio_set_uc_list(struct net_device *netdev)
1837 {
1838 	struct lio *lio = GET_LIO(netdev);
1839 	struct octeon_device *oct = lio->oct_dev;
1840 	struct octnic_ctrl_pkt nctrl;
1841 	struct netdev_hw_addr *ha;
1842 	u64 *mac;
1843 
1844 	if (lio->netdev_uc_count == netdev_uc_count(netdev))
1845 		return;
1846 
1847 	if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1848 		dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1849 		return;
1850 	}
1851 
1852 	lio->netdev_uc_count = netdev_uc_count(netdev);
1853 
1854 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1855 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1856 	nctrl.ncmd.s.more = lio->netdev_uc_count;
1857 	nctrl.ncmd.s.param1 = oct->vf_num;
1858 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1859 	nctrl.netpndev = (u64)netdev;
1860 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1861 
1862 	/* copy all the addresses into the udd */
1863 	mac = &nctrl.udd[0];
1864 	netdev_for_each_uc_addr(ha, netdev) {
1865 		ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1866 		mac++;
1867 	}
1868 
1869 	octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1870 }
1871 
1872 /**
1873  * \brief Net device set_multicast_list
1874  * @param netdev network device
1875  */
1876 static void liquidio_set_mcast_list(struct net_device *netdev)
1877 {
1878 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1879 	struct lio *lio = GET_LIO(netdev);
1880 	struct octeon_device *oct = lio->oct_dev;
1881 	struct octnic_ctrl_pkt nctrl;
1882 	struct netdev_hw_addr *ha;
1883 	u64 *mc;
1884 	int ret;
1885 
1886 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1887 
1888 	/* Create a ctrl pkt command to be sent to core app. */
1889 	nctrl.ncmd.u64 = 0;
1890 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1891 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1892 	nctrl.ncmd.s.param2 = mc_count;
1893 	nctrl.ncmd.s.more = mc_count;
1894 	nctrl.netpndev = (u64)netdev;
1895 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1896 
1897 	/* copy all the addresses into the udd */
1898 	mc = &nctrl.udd[0];
1899 	netdev_for_each_mc_addr(ha, netdev) {
1900 		*mc = 0;
1901 		ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1902 		/* no need to swap bytes */
1903 		if (++mc > &nctrl.udd[mc_count])
1904 			break;
1905 	}
1906 
1907 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1908 
1909 	/* Apparently, any activity in this call from the kernel has to
1910 	 * be atomic. So we won't wait for response.
1911 	 */
1912 	nctrl.wait_time = 0;
1913 
1914 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1915 	if (ret < 0) {
1916 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1917 			ret);
1918 	}
1919 
1920 	liquidio_set_uc_list(netdev);
1921 }
1922 
1923 /**
1924  * \brief Net device set_mac_address
1925  * @param netdev network device
1926  */
1927 static int liquidio_set_mac(struct net_device *netdev, void *p)
1928 {
1929 	struct sockaddr *addr = (struct sockaddr *)p;
1930 	struct lio *lio = GET_LIO(netdev);
1931 	struct octeon_device *oct = lio->oct_dev;
1932 	struct octnic_ctrl_pkt nctrl;
1933 	int ret = 0;
1934 
1935 	if (!is_valid_ether_addr(addr->sa_data))
1936 		return -EADDRNOTAVAIL;
1937 
1938 	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1939 		return 0;
1940 
1941 	if (lio->linfo.macaddr_is_admin_asgnd)
1942 		return -EPERM;
1943 
1944 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1945 
1946 	nctrl.ncmd.u64 = 0;
1947 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1948 	nctrl.ncmd.s.param1 = 0;
1949 	nctrl.ncmd.s.more = 1;
1950 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1951 	nctrl.netpndev = (u64)netdev;
1952 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1953 	nctrl.wait_time = 100;
1954 
1955 	nctrl.udd[0] = 0;
1956 	/* The MAC Address is presented in network byte order. */
1957 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1958 
1959 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1960 	if (ret < 0) {
1961 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1962 		return -ENOMEM;
1963 	}
1964 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1965 	ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1966 
1967 	return 0;
1968 }
1969 
1970 /**
1971  * \brief Net device get_stats
1972  * @param netdev network device
1973  */
1974 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
1975 {
1976 	struct lio *lio = GET_LIO(netdev);
1977 	struct net_device_stats *stats = &netdev->stats;
1978 	u64 pkts = 0, drop = 0, bytes = 0;
1979 	struct oct_droq_stats *oq_stats;
1980 	struct oct_iq_stats *iq_stats;
1981 	struct octeon_device *oct;
1982 	int i, iq_no, oq_no;
1983 
1984 	oct = lio->oct_dev;
1985 
1986 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
1987 		iq_no = lio->linfo.txpciq[i].s.q_no;
1988 		iq_stats = &oct->instr_queue[iq_no]->stats;
1989 		pkts += iq_stats->tx_done;
1990 		drop += iq_stats->tx_dropped;
1991 		bytes += iq_stats->tx_tot_bytes;
1992 	}
1993 
1994 	stats->tx_packets = pkts;
1995 	stats->tx_bytes = bytes;
1996 	stats->tx_dropped = drop;
1997 
1998 	pkts = 0;
1999 	drop = 0;
2000 	bytes = 0;
2001 
2002 	for (i = 0; i < lio->linfo.num_rxpciq; i++) {
2003 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2004 		oq_stats = &oct->droq[oq_no]->stats;
2005 		pkts += oq_stats->rx_pkts_received;
2006 		drop += (oq_stats->rx_dropped +
2007 			 oq_stats->dropped_nodispatch +
2008 			 oq_stats->dropped_toomany +
2009 			 oq_stats->dropped_nomem);
2010 		bytes += oq_stats->rx_bytes_received;
2011 	}
2012 
2013 	stats->rx_bytes = bytes;
2014 	stats->rx_packets = pkts;
2015 	stats->rx_dropped = drop;
2016 
2017 	return stats;
2018 }
2019 
2020 /**
2021  * \brief Net device change_mtu
2022  * @param netdev network device
2023  */
2024 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2025 {
2026 	struct lio *lio = GET_LIO(netdev);
2027 	struct octeon_device *oct = lio->oct_dev;
2028 
2029 	lio->mtu = new_mtu;
2030 
2031 	netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
2032 		   netdev->mtu, new_mtu);
2033 	dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
2034 		 netdev->name, netdev->mtu, new_mtu);
2035 
2036 	netdev->mtu = new_mtu;
2037 
2038 	return 0;
2039 }
2040 
2041 /**
2042  * \brief Handler for SIOCSHWTSTAMP ioctl
2043  * @param netdev network device
2044  * @param ifr interface request
2045  * @param cmd command
2046  */
2047 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2048 {
2049 	struct lio *lio = GET_LIO(netdev);
2050 	struct hwtstamp_config conf;
2051 
2052 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2053 		return -EFAULT;
2054 
2055 	if (conf.flags)
2056 		return -EINVAL;
2057 
2058 	switch (conf.tx_type) {
2059 	case HWTSTAMP_TX_ON:
2060 	case HWTSTAMP_TX_OFF:
2061 		break;
2062 	default:
2063 		return -ERANGE;
2064 	}
2065 
2066 	switch (conf.rx_filter) {
2067 	case HWTSTAMP_FILTER_NONE:
2068 		break;
2069 	case HWTSTAMP_FILTER_ALL:
2070 	case HWTSTAMP_FILTER_SOME:
2071 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2072 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2073 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2074 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2075 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2076 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2077 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2078 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2079 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2080 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2081 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2082 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2083 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2084 		break;
2085 	default:
2086 		return -ERANGE;
2087 	}
2088 
2089 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2090 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2091 
2092 	else
2093 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2094 
2095 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2096 }
2097 
2098 /**
2099  * \brief ioctl handler
2100  * @param netdev network device
2101  * @param ifr interface request
2102  * @param cmd command
2103  */
2104 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2105 {
2106 	switch (cmd) {
2107 	case SIOCSHWTSTAMP:
2108 		return hwtstamp_ioctl(netdev, ifr);
2109 	default:
2110 		return -EOPNOTSUPP;
2111 	}
2112 }
2113 
2114 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
2115 {
2116 	struct sk_buff *skb = (struct sk_buff *)buf;
2117 	struct octnet_buf_free_info *finfo;
2118 	struct oct_timestamp_resp *resp;
2119 	struct octeon_soft_command *sc;
2120 	struct lio *lio;
2121 
2122 	finfo = (struct octnet_buf_free_info *)skb->cb;
2123 	lio = finfo->lio;
2124 	sc = finfo->sc;
2125 	oct = lio->oct_dev;
2126 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2127 
2128 	if (status != OCTEON_REQUEST_DONE) {
2129 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2130 			CVM_CAST64(status));
2131 		resp->timestamp = 0;
2132 	}
2133 
2134 	octeon_swap_8B_data(&resp->timestamp, 1);
2135 
2136 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2137 		struct skb_shared_hwtstamps ts;
2138 		u64 ns = resp->timestamp;
2139 
2140 		netif_info(lio, tx_done, lio->netdev,
2141 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2142 			   skb, (unsigned long long)ns);
2143 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2144 		skb_tstamp_tx(skb, &ts);
2145 	}
2146 
2147 	octeon_free_soft_command(oct, sc);
2148 	tx_buffer_free(skb);
2149 }
2150 
2151 /* \brief Send a data packet that will be timestamped
2152  * @param oct octeon device
2153  * @param ndata pointer to network data
2154  * @param finfo pointer to private network data
2155  */
2156 static int send_nic_timestamp_pkt(struct octeon_device *oct,
2157 				  struct octnic_data_pkt *ndata,
2158 				  struct octnet_buf_free_info *finfo)
2159 {
2160 	struct octeon_soft_command *sc;
2161 	int ring_doorbell;
2162 	struct lio *lio;
2163 	int retval;
2164 	u32 len;
2165 
2166 	lio = finfo->lio;
2167 
2168 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2169 					    sizeof(struct oct_timestamp_resp));
2170 	finfo->sc = sc;
2171 
2172 	if (!sc) {
2173 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2174 		return IQ_SEND_FAILED;
2175 	}
2176 
2177 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2178 		ndata->reqtype = REQTYPE_RESP_NET;
2179 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2180 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2181 
2182 	sc->callback = handle_timestamp;
2183 	sc->callback_arg = finfo->skb;
2184 	sc->iq_no = ndata->q_no;
2185 
2186 	len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
2187 
2188 	ring_doorbell = 1;
2189 
2190 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2191 				     sc, len, ndata->reqtype);
2192 
2193 	if (retval == IQ_SEND_FAILED) {
2194 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2195 			retval);
2196 		octeon_free_soft_command(oct, sc);
2197 	} else {
2198 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2199 	}
2200 
2201 	return retval;
2202 }
2203 
2204 /** \brief Transmit networks packets to the Octeon interface
2205  * @param skbuff   skbuff struct to be passed to network layer.
2206  * @param netdev   pointer to network device
2207  * @returns whether the packet was transmitted to the device okay or not
2208  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2209  */
2210 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2211 {
2212 	struct octnet_buf_free_info *finfo;
2213 	union octnic_cmd_setup cmdsetup;
2214 	struct octnic_data_pkt ndata;
2215 	struct octeon_instr_irh *irh;
2216 	struct oct_iq_stats *stats;
2217 	struct octeon_device *oct;
2218 	int q_idx = 0, iq_no = 0;
2219 	union tx_info *tx_info;
2220 	struct lio *lio;
2221 	int status = 0;
2222 	u64 dptr = 0;
2223 	u32 tag = 0;
2224 	int j;
2225 
2226 	lio = GET_LIO(netdev);
2227 	oct = lio->oct_dev;
2228 
2229 	if (netif_is_multiqueue(netdev)) {
2230 		q_idx = skb->queue_mapping;
2231 		q_idx = (q_idx % (lio->linfo.num_txpciq));
2232 		tag = q_idx;
2233 		iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2234 	} else {
2235 		iq_no = lio->txq;
2236 	}
2237 
2238 	stats = &oct->instr_queue[iq_no]->stats;
2239 
2240 	/* Check for all conditions in which the current packet cannot be
2241 	 * transmitted.
2242 	 */
2243 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2244 	    (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
2245 		netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
2246 			   lio->linfo.link.s.link_up);
2247 		goto lio_xmit_failed;
2248 	}
2249 
2250 	/* Use space in skb->cb to store info used to unmap and
2251 	 * free the buffers.
2252 	 */
2253 	finfo = (struct octnet_buf_free_info *)skb->cb;
2254 	finfo->lio = lio;
2255 	finfo->skb = skb;
2256 	finfo->sc = NULL;
2257 
2258 	/* Prepare the attributes for the data to be passed to OSI. */
2259 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2260 
2261 	ndata.buf = finfo;
2262 
2263 	ndata.q_no = iq_no;
2264 
2265 	if (netif_is_multiqueue(netdev)) {
2266 		if (octnet_iq_is_full(oct, ndata.q_no)) {
2267 			/* defer sending if queue is full */
2268 			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2269 				   ndata.q_no);
2270 			stats->tx_iq_busy++;
2271 			return NETDEV_TX_BUSY;
2272 		}
2273 	} else {
2274 		if (octnet_iq_is_full(oct, lio->txq)) {
2275 			/* defer sending if queue is full */
2276 			stats->tx_iq_busy++;
2277 			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2278 				   ndata.q_no);
2279 			return NETDEV_TX_BUSY;
2280 		}
2281 	}
2282 
2283 	ndata.datasize = skb->len;
2284 
2285 	cmdsetup.u64 = 0;
2286 	cmdsetup.s.iq_no = iq_no;
2287 
2288 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2289 		if (skb->encapsulation) {
2290 			cmdsetup.s.tnl_csum = 1;
2291 			stats->tx_vxlan++;
2292 		} else {
2293 			cmdsetup.s.transport_csum = 1;
2294 		}
2295 	}
2296 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2297 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2298 		cmdsetup.s.timestamp = 1;
2299 	}
2300 
2301 	if (!skb_shinfo(skb)->nr_frags) {
2302 		cmdsetup.s.u.datasize = skb->len;
2303 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2304 		/* Offload checksum calculation for TCP/UDP packets */
2305 		dptr = dma_map_single(&oct->pci_dev->dev,
2306 				      skb->data,
2307 				      skb->len,
2308 				      DMA_TO_DEVICE);
2309 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2310 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2311 				__func__);
2312 			return NETDEV_TX_BUSY;
2313 		}
2314 
2315 		ndata.cmd.cmd3.dptr = dptr;
2316 		finfo->dptr = dptr;
2317 		ndata.reqtype = REQTYPE_NORESP_NET;
2318 
2319 	} else {
2320 		struct skb_frag_struct *frag;
2321 		struct octnic_gather *g;
2322 		int i, frags;
2323 
2324 		spin_lock(&lio->glist_lock[q_idx]);
2325 		g = (struct octnic_gather *)list_delete_head(
2326 		    &lio->glist[q_idx]);
2327 		spin_unlock(&lio->glist_lock[q_idx]);
2328 
2329 		if (!g) {
2330 			netif_info(lio, tx_err, lio->netdev,
2331 				   "Transmit scatter gather: glist null!\n");
2332 			goto lio_xmit_failed;
2333 		}
2334 
2335 		cmdsetup.s.gather = 1;
2336 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2337 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2338 
2339 		memset(g->sg, 0, g->sg_size);
2340 
2341 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2342 						 skb->data,
2343 						 (skb->len - skb->data_len),
2344 						 DMA_TO_DEVICE);
2345 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2346 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2347 				__func__);
2348 			return NETDEV_TX_BUSY;
2349 		}
2350 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2351 
2352 		frags = skb_shinfo(skb)->nr_frags;
2353 		i = 1;
2354 		while (frags--) {
2355 			frag = &skb_shinfo(skb)->frags[i - 1];
2356 
2357 			g->sg[(i >> 2)].ptr[(i & 3)] =
2358 				dma_map_page(&oct->pci_dev->dev,
2359 					     frag->page.p,
2360 					     frag->page_offset,
2361 					     frag->size,
2362 					     DMA_TO_DEVICE);
2363 			if (dma_mapping_error(&oct->pci_dev->dev,
2364 					      g->sg[i >> 2].ptr[i & 3])) {
2365 				dma_unmap_single(&oct->pci_dev->dev,
2366 						 g->sg[0].ptr[0],
2367 						 skb->len - skb->data_len,
2368 						 DMA_TO_DEVICE);
2369 				for (j = 1; j < i; j++) {
2370 					frag = &skb_shinfo(skb)->frags[j - 1];
2371 					dma_unmap_page(&oct->pci_dev->dev,
2372 						       g->sg[j >> 2].ptr[j & 3],
2373 						       frag->size,
2374 						       DMA_TO_DEVICE);
2375 				}
2376 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2377 					__func__);
2378 				return NETDEV_TX_BUSY;
2379 			}
2380 
2381 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2382 			i++;
2383 		}
2384 
2385 		dptr = dma_map_single(&oct->pci_dev->dev,
2386 				      g->sg, g->sg_size,
2387 				      DMA_TO_DEVICE);
2388 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2389 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
2390 				__func__);
2391 			dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
2392 					 skb->len - skb->data_len,
2393 					 DMA_TO_DEVICE);
2394 			for (j = 1; j <= frags; j++) {
2395 				frag = &skb_shinfo(skb)->frags[j - 1];
2396 				dma_unmap_page(&oct->pci_dev->dev,
2397 					       g->sg[j >> 2].ptr[j & 3],
2398 					       frag->size, DMA_TO_DEVICE);
2399 			}
2400 			return NETDEV_TX_BUSY;
2401 		}
2402 
2403 		ndata.cmd.cmd3.dptr = dptr;
2404 		finfo->dptr = dptr;
2405 		finfo->g = g;
2406 
2407 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
2408 	}
2409 
2410 	irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2411 	tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2412 
2413 	if (skb_shinfo(skb)->gso_size) {
2414 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2415 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2416 	}
2417 
2418 	/* HW insert VLAN tag */
2419 	if (skb_vlan_tag_present(skb)) {
2420 		irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
2421 		irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
2422 	}
2423 
2424 	if (unlikely(cmdsetup.s.timestamp))
2425 		status = send_nic_timestamp_pkt(oct, &ndata, finfo);
2426 	else
2427 		status = octnet_send_nic_data_pkt(oct, &ndata);
2428 	if (status == IQ_SEND_FAILED)
2429 		goto lio_xmit_failed;
2430 
2431 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2432 
2433 	if (status == IQ_SEND_STOP) {
2434 		dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
2435 			iq_no);
2436 		stop_q(lio->netdev, q_idx);
2437 	}
2438 
2439 	netif_trans_update(netdev);
2440 
2441 	if (tx_info->s.gso_segs)
2442 		stats->tx_done += tx_info->s.gso_segs;
2443 	else
2444 		stats->tx_done++;
2445 	stats->tx_tot_bytes += ndata.datasize;
2446 
2447 	return NETDEV_TX_OK;
2448 
2449 lio_xmit_failed:
2450 	stats->tx_dropped++;
2451 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2452 		   iq_no, stats->tx_dropped);
2453 	if (dptr)
2454 		dma_unmap_single(&oct->pci_dev->dev, dptr,
2455 				 ndata.datasize, DMA_TO_DEVICE);
2456 	tx_buffer_free(skb);
2457 	return NETDEV_TX_OK;
2458 }
2459 
2460 /** \brief Network device Tx timeout
2461  * @param netdev    pointer to network device
2462  */
2463 static void liquidio_tx_timeout(struct net_device *netdev)
2464 {
2465 	struct lio *lio;
2466 
2467 	lio = GET_LIO(netdev);
2468 
2469 	netif_info(lio, tx_err, lio->netdev,
2470 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2471 		   netdev->stats.tx_dropped);
2472 	netif_trans_update(netdev);
2473 	txqs_wake(netdev);
2474 }
2475 
2476 static int
2477 liquidio_vlan_rx_add_vid(struct net_device *netdev,
2478 			 __be16 proto __attribute__((unused)), u16 vid)
2479 {
2480 	struct lio *lio = GET_LIO(netdev);
2481 	struct octeon_device *oct = lio->oct_dev;
2482 	struct octnic_ctrl_pkt nctrl;
2483 	int ret = 0;
2484 
2485 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2486 
2487 	nctrl.ncmd.u64 = 0;
2488 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2489 	nctrl.ncmd.s.param1 = vid;
2490 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2491 	nctrl.wait_time = 100;
2492 	nctrl.netpndev = (u64)netdev;
2493 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2494 
2495 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2496 	if (ret < 0) {
2497 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2498 			ret);
2499 	}
2500 
2501 	return ret;
2502 }
2503 
2504 static int
2505 liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2506 			  __be16 proto __attribute__((unused)), u16 vid)
2507 {
2508 	struct lio *lio = GET_LIO(netdev);
2509 	struct octeon_device *oct = lio->oct_dev;
2510 	struct octnic_ctrl_pkt nctrl;
2511 	int ret = 0;
2512 
2513 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2514 
2515 	nctrl.ncmd.u64 = 0;
2516 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2517 	nctrl.ncmd.s.param1 = vid;
2518 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2519 	nctrl.wait_time = 100;
2520 	nctrl.netpndev = (u64)netdev;
2521 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2522 
2523 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2524 	if (ret < 0) {
2525 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2526 			ret);
2527 	}
2528 	return ret;
2529 }
2530 
2531 /** Sending command to enable/disable RX checksum offload
2532  * @param netdev                pointer to network device
2533  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2534  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2535  *                              OCTNET_CMD_RXCSUM_DISABLE
2536  * @returns                     SUCCESS or FAILURE
2537  */
2538 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2539 				       u8 rx_cmd)
2540 {
2541 	struct lio *lio = GET_LIO(netdev);
2542 	struct octeon_device *oct = lio->oct_dev;
2543 	struct octnic_ctrl_pkt nctrl;
2544 	int ret = 0;
2545 
2546 	nctrl.ncmd.u64 = 0;
2547 	nctrl.ncmd.s.cmd = command;
2548 	nctrl.ncmd.s.param1 = rx_cmd;
2549 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2550 	nctrl.wait_time = 100;
2551 	nctrl.netpndev = (u64)netdev;
2552 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2553 
2554 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2555 	if (ret < 0) {
2556 		dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
2557 			ret);
2558 	}
2559 	return ret;
2560 }
2561 
2562 /** Sending command to add/delete VxLAN UDP port to firmware
2563  * @param netdev                pointer to network device
2564  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2565  * @param vxlan_port            VxLAN port to be added or deleted
2566  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2567  *                              OCTNET_CMD_VXLAN_PORT_DEL
2568  * @returns                     SUCCESS or FAILURE
2569  */
2570 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2571 				       u16 vxlan_port, u8 vxlan_cmd_bit)
2572 {
2573 	struct lio *lio = GET_LIO(netdev);
2574 	struct octeon_device *oct = lio->oct_dev;
2575 	struct octnic_ctrl_pkt nctrl;
2576 	int ret = 0;
2577 
2578 	nctrl.ncmd.u64 = 0;
2579 	nctrl.ncmd.s.cmd = command;
2580 	nctrl.ncmd.s.more = vxlan_cmd_bit;
2581 	nctrl.ncmd.s.param1 = vxlan_port;
2582 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2583 	nctrl.wait_time = 100;
2584 	nctrl.netpndev = (u64)netdev;
2585 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2586 
2587 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2588 	if (ret < 0) {
2589 		dev_err(&oct->pci_dev->dev,
2590 			"DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
2591 			ret);
2592 	}
2593 	return ret;
2594 }
2595 
2596 /** \brief Net device fix features
2597  * @param netdev  pointer to network device
2598  * @param request features requested
2599  * @returns updated features list
2600  */
2601 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2602 					       netdev_features_t request)
2603 {
2604 	struct lio *lio = netdev_priv(netdev);
2605 
2606 	if ((request & NETIF_F_RXCSUM) &&
2607 	    !(lio->dev_capability & NETIF_F_RXCSUM))
2608 		request &= ~NETIF_F_RXCSUM;
2609 
2610 	if ((request & NETIF_F_HW_CSUM) &&
2611 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
2612 		request &= ~NETIF_F_HW_CSUM;
2613 
2614 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2615 		request &= ~NETIF_F_TSO;
2616 
2617 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2618 		request &= ~NETIF_F_TSO6;
2619 
2620 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2621 		request &= ~NETIF_F_LRO;
2622 
2623 	/* Disable LRO if RXCSUM is off */
2624 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2625 	    (lio->dev_capability & NETIF_F_LRO))
2626 		request &= ~NETIF_F_LRO;
2627 
2628 	return request;
2629 }
2630 
2631 /** \brief Net device set features
2632  * @param netdev  pointer to network device
2633  * @param features features to enable/disable
2634  */
2635 static int liquidio_set_features(struct net_device *netdev,
2636 				 netdev_features_t features)
2637 {
2638 	struct lio *lio = netdev_priv(netdev);
2639 
2640 	if (!((netdev->features ^ features) & NETIF_F_LRO))
2641 		return 0;
2642 
2643 	if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
2644 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2645 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2646 	else if (!(features & NETIF_F_LRO) &&
2647 		 (lio->dev_capability & NETIF_F_LRO))
2648 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2649 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2650 	if (!(netdev->features & NETIF_F_RXCSUM) &&
2651 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2652 	    (features & NETIF_F_RXCSUM))
2653 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2654 					    OCTNET_CMD_RXCSUM_ENABLE);
2655 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2656 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2657 		 !(features & NETIF_F_RXCSUM))
2658 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2659 					    OCTNET_CMD_RXCSUM_DISABLE);
2660 
2661 	return 0;
2662 }
2663 
2664 static void liquidio_add_vxlan_port(struct net_device *netdev,
2665 				    struct udp_tunnel_info *ti)
2666 {
2667 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2668 		return;
2669 
2670 	liquidio_vxlan_port_command(netdev,
2671 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2672 				    htons(ti->port),
2673 				    OCTNET_CMD_VXLAN_PORT_ADD);
2674 }
2675 
2676 static void liquidio_del_vxlan_port(struct net_device *netdev,
2677 				    struct udp_tunnel_info *ti)
2678 {
2679 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2680 		return;
2681 
2682 	liquidio_vxlan_port_command(netdev,
2683 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2684 				    htons(ti->port),
2685 				    OCTNET_CMD_VXLAN_PORT_DEL);
2686 }
2687 
2688 static const struct net_device_ops lionetdevops = {
2689 	.ndo_open		= liquidio_open,
2690 	.ndo_stop		= liquidio_stop,
2691 	.ndo_start_xmit		= liquidio_xmit,
2692 	.ndo_get_stats		= liquidio_get_stats,
2693 	.ndo_set_mac_address	= liquidio_set_mac,
2694 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
2695 	.ndo_tx_timeout		= liquidio_tx_timeout,
2696 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
2697 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
2698 	.ndo_change_mtu		= liquidio_change_mtu,
2699 	.ndo_do_ioctl		= liquidio_ioctl,
2700 	.ndo_fix_features	= liquidio_fix_features,
2701 	.ndo_set_features	= liquidio_set_features,
2702 	.ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
2703 	.ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
2704 };
2705 
2706 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
2707 {
2708 	struct octeon_device *oct = (struct octeon_device *)buf;
2709 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
2710 	union oct_link_status *ls;
2711 	int gmxport = 0;
2712 	int i;
2713 
2714 	if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
2715 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
2716 			recv_pkt->buffer_size[0],
2717 			recv_pkt->rh.r_nic_info.gmxport);
2718 		goto nic_info_err;
2719 	}
2720 
2721 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
2722 	ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
2723 
2724 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
2725 
2726 	for (i = 0; i < oct->ifcount; i++) {
2727 		if (oct->props[i].gmxport == gmxport) {
2728 			update_link_status(oct->props[i].netdev, ls);
2729 			break;
2730 		}
2731 	}
2732 
2733 nic_info_err:
2734 	for (i = 0; i < recv_pkt->buffer_count; i++)
2735 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
2736 	octeon_free_recv_info(recv_info);
2737 	return 0;
2738 }
2739 
2740 /**
2741  * \brief Setup network interfaces
2742  * @param octeon_dev  octeon device
2743  *
2744  * Called during init time for each device. It assumes the NIC
2745  * is already up and running.  The link information for each
2746  * interface is passed in link_info.
2747  */
2748 static int setup_nic_devices(struct octeon_device *octeon_dev)
2749 {
2750 	int retval, num_iqueues, num_oqueues;
2751 	struct liquidio_if_cfg_context *ctx;
2752 	u32 resp_size, ctx_size, data_size;
2753 	struct liquidio_if_cfg_resp *resp;
2754 	struct octeon_soft_command *sc;
2755 	union oct_nic_if_cfg if_cfg;
2756 	struct octdev_props *props;
2757 	struct net_device *netdev;
2758 	struct lio_version *vdata;
2759 	struct lio *lio = NULL;
2760 	u8 mac[ETH_ALEN], i, j;
2761 	u32 ifidx_or_pfnum;
2762 
2763 	ifidx_or_pfnum = octeon_dev->pf_num;
2764 
2765 	/* This is to handle link status changes */
2766 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
2767 				    lio_nic_info, octeon_dev);
2768 
2769 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
2770 	 * They are handled directly.
2771 	 */
2772 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
2773 					free_netbuf);
2774 
2775 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
2776 					free_netsgbuf);
2777 
2778 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
2779 					free_netsgbuf_with_resp);
2780 
2781 	for (i = 0; i < octeon_dev->ifcount; i++) {
2782 		resp_size = sizeof(struct liquidio_if_cfg_resp);
2783 		ctx_size = sizeof(struct liquidio_if_cfg_context);
2784 		data_size = sizeof(struct lio_version);
2785 		sc = (struct octeon_soft_command *)
2786 			octeon_alloc_soft_command(octeon_dev, data_size,
2787 						  resp_size, ctx_size);
2788 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
2789 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
2790 		vdata = (struct lio_version *)sc->virtdptr;
2791 
2792 		*((u64 *)vdata) = 0;
2793 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
2794 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
2795 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
2796 
2797 		WRITE_ONCE(ctx->cond, 0);
2798 		ctx->octeon_id = lio_get_device_id(octeon_dev);
2799 		init_waitqueue_head(&ctx->wc);
2800 
2801 		if_cfg.u64 = 0;
2802 
2803 		if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
2804 		if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
2805 		if_cfg.s.base_queue = 0;
2806 
2807 		sc->iq_no = 0;
2808 
2809 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
2810 					    OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
2811 					    0);
2812 
2813 		sc->callback = if_cfg_callback;
2814 		sc->callback_arg = sc;
2815 		sc->wait_time = 5000;
2816 
2817 		retval = octeon_send_soft_command(octeon_dev, sc);
2818 		if (retval == IQ_SEND_FAILED) {
2819 			dev_err(&octeon_dev->pci_dev->dev,
2820 				"iq/oq config failed status: %x\n", retval);
2821 			/* Soft instr is freed by driver in case of failure. */
2822 			goto setup_nic_dev_fail;
2823 		}
2824 
2825 		/* Sleep on a wait queue till the cond flag indicates that the
2826 		 * response arrived or timed-out.
2827 		 */
2828 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
2829 			dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
2830 			goto setup_nic_wait_intr;
2831 		}
2832 
2833 		retval = resp->status;
2834 		if (retval) {
2835 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
2836 			goto setup_nic_dev_fail;
2837 		}
2838 
2839 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2840 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
2841 
2842 		num_iqueues = hweight64(resp->cfg_info.iqmask);
2843 		num_oqueues = hweight64(resp->cfg_info.oqmask);
2844 
2845 		if (!(num_iqueues) || !(num_oqueues)) {
2846 			dev_err(&octeon_dev->pci_dev->dev,
2847 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2848 				resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2849 			goto setup_nic_dev_fail;
2850 		}
2851 		dev_dbg(&octeon_dev->pci_dev->dev,
2852 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2853 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2854 			num_iqueues, num_oqueues);
2855 
2856 		netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2857 
2858 		if (!netdev) {
2859 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2860 			goto setup_nic_dev_fail;
2861 		}
2862 
2863 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2864 
2865 		/* Associate the routines that will handle different
2866 		 * netdev tasks.
2867 		 */
2868 		netdev->netdev_ops = &lionetdevops;
2869 
2870 		lio = GET_LIO(netdev);
2871 
2872 		memset(lio, 0, sizeof(struct lio));
2873 
2874 		lio->ifidx = ifidx_or_pfnum;
2875 
2876 		props = &octeon_dev->props[i];
2877 		props->gmxport = resp->cfg_info.linfo.gmxport;
2878 		props->netdev = netdev;
2879 
2880 		lio->linfo.num_rxpciq = num_oqueues;
2881 		lio->linfo.num_txpciq = num_iqueues;
2882 
2883 		for (j = 0; j < num_oqueues; j++) {
2884 			lio->linfo.rxpciq[j].u64 =
2885 			    resp->cfg_info.linfo.rxpciq[j].u64;
2886 		}
2887 		for (j = 0; j < num_iqueues; j++) {
2888 			lio->linfo.txpciq[j].u64 =
2889 			    resp->cfg_info.linfo.txpciq[j].u64;
2890 		}
2891 
2892 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2893 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2894 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2895 		lio->linfo.macaddr_is_admin_asgnd =
2896 			resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2897 
2898 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2899 
2900 		lio->dev_capability = NETIF_F_HIGHDMA
2901 				      | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2902 				      | NETIF_F_SG | NETIF_F_RXCSUM
2903 				      | NETIF_F_TSO | NETIF_F_TSO6
2904 				      | NETIF_F_GRO
2905 				      | NETIF_F_LRO;
2906 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2907 
2908 		/* Copy of transmit encapsulation capabilities:
2909 		 * TSO, TSO6, Checksums for this device
2910 		 */
2911 		lio->enc_dev_capability = NETIF_F_IP_CSUM
2912 					  | NETIF_F_IPV6_CSUM
2913 					  | NETIF_F_GSO_UDP_TUNNEL
2914 					  | NETIF_F_HW_CSUM | NETIF_F_SG
2915 					  | NETIF_F_RXCSUM
2916 					  | NETIF_F_TSO | NETIF_F_TSO6
2917 					  | NETIF_F_LRO;
2918 
2919 		netdev->hw_enc_features =
2920 		    (lio->enc_dev_capability & ~NETIF_F_LRO);
2921 		netdev->vlan_features = lio->dev_capability;
2922 		/* Add any unchangeable hw features */
2923 		lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2924 				       NETIF_F_HW_VLAN_CTAG_RX |
2925 				       NETIF_F_HW_VLAN_CTAG_TX;
2926 
2927 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2928 
2929 		netdev->hw_features = lio->dev_capability;
2930 
2931 		/* MTU range: 68 - 16000 */
2932 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
2933 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
2934 
2935 		/* Point to the  properties for octeon device to which this
2936 		 * interface belongs.
2937 		 */
2938 		lio->oct_dev = octeon_dev;
2939 		lio->octprops = props;
2940 		lio->netdev = netdev;
2941 
2942 		dev_dbg(&octeon_dev->pci_dev->dev,
2943 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
2944 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2945 
2946 		/* 64-bit swap required on LE machines */
2947 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2948 		for (j = 0; j < ETH_ALEN; j++)
2949 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2950 
2951 		/* Copy MAC Address to OS network device structure */
2952 		ether_addr_copy(netdev->dev_addr, mac);
2953 
2954 		if (setup_io_queues(octeon_dev, i)) {
2955 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2956 			goto setup_nic_dev_fail;
2957 		}
2958 
2959 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2960 
2961 		/* For VFs, enable Octeon device interrupts here,
2962 		 * as this is contingent upon IO queue setup
2963 		 */
2964 		octeon_dev->fn_list.enable_interrupt(octeon_dev,
2965 						     OCTEON_ALL_INTR);
2966 
2967 		/* By default all interfaces on a single Octeon uses the same
2968 		 * tx and rx queues
2969 		 */
2970 		lio->txq = lio->linfo.txpciq[0].s.q_no;
2971 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2972 
2973 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2974 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2975 
2976 		if (setup_glists(lio, num_iqueues)) {
2977 			dev_err(&octeon_dev->pci_dev->dev,
2978 				"Gather list allocation failed\n");
2979 			goto setup_nic_dev_fail;
2980 		}
2981 
2982 		/* Register ethtool support */
2983 		liquidio_set_ethtool_ops(netdev);
2984 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2985 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2986 		else
2987 			octeon_dev->priv_flags = 0x0;
2988 
2989 		if (netdev->features & NETIF_F_LRO)
2990 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2991 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2992 
2993 		if ((debug != -1) && (debug & NETIF_MSG_HW))
2994 			liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE,
2995 					     0);
2996 
2997 		if (setup_link_status_change_wq(netdev))
2998 			goto setup_nic_dev_fail;
2999 
3000 		/* Register the network device with the OS */
3001 		if (register_netdev(netdev)) {
3002 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3003 			goto setup_nic_dev_fail;
3004 		}
3005 
3006 		dev_dbg(&octeon_dev->pci_dev->dev,
3007 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3008 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3009 		netif_carrier_off(netdev);
3010 		lio->link_changes++;
3011 
3012 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3013 
3014 		/* Sending command to firmware to enable Rx checksum offload
3015 		 * by default at the time of setup of Liquidio driver for
3016 		 * this device
3017 		 */
3018 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3019 					    OCTNET_CMD_RXCSUM_ENABLE);
3020 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3021 				     OCTNET_CMD_TXCSUM_ENABLE);
3022 
3023 		dev_dbg(&octeon_dev->pci_dev->dev,
3024 			"NIC ifidx:%d Setup successful\n", i);
3025 
3026 		octeon_free_soft_command(octeon_dev, sc);
3027 	}
3028 
3029 	return 0;
3030 
3031 setup_nic_dev_fail:
3032 
3033 	octeon_free_soft_command(octeon_dev, sc);
3034 
3035 setup_nic_wait_intr:
3036 
3037 	while (i--) {
3038 		dev_err(&octeon_dev->pci_dev->dev,
3039 			"NIC ifidx:%d Setup failed\n", i);
3040 		liquidio_destroy_nic_device(octeon_dev, i);
3041 	}
3042 	return -ENODEV;
3043 }
3044 
3045 /**
3046  * \brief initialize the NIC
3047  * @param oct octeon device
3048  *
3049  * This initialization routine is called once the Octeon device application is
3050  * up and running
3051  */
3052 static int liquidio_init_nic_module(struct octeon_device *oct)
3053 {
3054 	struct oct_intrmod_cfg *intrmod_cfg;
3055 	int num_nic_ports = 1;
3056 	int i, retval = 0;
3057 
3058 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3059 
3060 	/* only default iq and oq were initialized
3061 	 * initialize the rest as well run port_config command for each port
3062 	 */
3063 	oct->ifcount = num_nic_ports;
3064 	memset(oct->props, 0,
3065 	       sizeof(struct octdev_props) * num_nic_ports);
3066 
3067 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3068 		oct->props[i].gmxport = -1;
3069 
3070 	retval = setup_nic_devices(oct);
3071 	if (retval) {
3072 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3073 		goto octnet_init_failure;
3074 	}
3075 
3076 	/* Initialize interrupt moderation params */
3077 	intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
3078 	intrmod_cfg->rx_enable = 1;
3079 	intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
3080 	intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
3081 	intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
3082 	intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
3083 	intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
3084 	intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
3085 	intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
3086 	intrmod_cfg->tx_enable = 1;
3087 	intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
3088 	intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
3089 	intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
3090 	intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
3091 	intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
3092 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3093 
3094 	return retval;
3095 
3096 octnet_init_failure:
3097 
3098 	oct->ifcount = 0;
3099 
3100 	return retval;
3101 }
3102 
3103 /**
3104  * \brief Device initialization for each Octeon device that is probed
3105  * @param octeon_dev  octeon device
3106  */
3107 static int octeon_device_init(struct octeon_device *oct)
3108 {
3109 	u32 rev_id;
3110 	int j;
3111 
3112 	atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
3113 
3114 	/* Enable access to the octeon device and make its DMA capability
3115 	 * known to the OS.
3116 	 */
3117 	if (octeon_pci_os_setup(oct))
3118 		return 1;
3119 	atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
3120 
3121 	oct->chip_id = OCTEON_CN23XX_VF_VID;
3122 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
3123 	oct->rev_id = rev_id & 0xff;
3124 
3125 	if (cn23xx_setup_octeon_vf_device(oct))
3126 		return 1;
3127 
3128 	atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
3129 
3130 	oct->app_mode = CVM_DRV_NIC_APP;
3131 
3132 	/* Initialize the dispatch mechanism used to push packets arriving on
3133 	 * Octeon Output queues.
3134 	 */
3135 	if (octeon_init_dispatch_list(oct))
3136 		return 1;
3137 
3138 	atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
3139 
3140 	if (octeon_set_io_queues_off(oct)) {
3141 		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
3142 		return 1;
3143 	}
3144 
3145 	if (oct->fn_list.setup_device_regs(oct)) {
3146 		dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
3147 		return 1;
3148 	}
3149 
3150 	/* Initialize soft command buffer pool */
3151 	if (octeon_setup_sc_buffer_pool(oct)) {
3152 		dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
3153 		return 1;
3154 	}
3155 	atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
3156 
3157 	/* Setup the data structures that manage this Octeon's Input queues. */
3158 	if (octeon_setup_instr_queues(oct)) {
3159 		dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
3160 		return 1;
3161 	}
3162 	atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
3163 
3164 	/* Initialize lists to manage the requests of different types that
3165 	 * arrive from user & kernel applications for this octeon device.
3166 	 */
3167 	if (octeon_setup_response_list(oct)) {
3168 		dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
3169 		return 1;
3170 	}
3171 	atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
3172 
3173 	if (octeon_setup_output_queues(oct)) {
3174 		dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
3175 		return 1;
3176 	}
3177 	atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
3178 
3179 	if (oct->fn_list.setup_mbox(oct)) {
3180 		dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
3181 		return 1;
3182 	}
3183 	atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
3184 
3185 	if (octeon_allocate_ioq_vector(oct)) {
3186 		dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
3187 		return 1;
3188 	}
3189 	atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
3190 
3191 	dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
3192 		 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
3193 
3194 	/* Setup the interrupt handler and record the INT SUM register address*/
3195 	if (octeon_setup_interrupt(oct))
3196 		return 1;
3197 
3198 	if (cn23xx_octeon_pfvf_handshake(oct))
3199 		return 1;
3200 
3201 	/* Enable Octeon device interrupts */
3202 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
3203 
3204 	atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
3205 
3206 	/* Enable the input and output queues for this Octeon device */
3207 	if (oct->fn_list.enable_io_queues(oct)) {
3208 		dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
3209 		return 1;
3210 	}
3211 
3212 	atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
3213 
3214 	atomic_set(&oct->status, OCT_DEV_HOST_OK);
3215 
3216 	/* Send Credit for Octeon Output queues. Credits are always sent after
3217 	 * the output queue is enabled.
3218 	 */
3219 	for (j = 0; j < oct->num_oqs; j++)
3220 		writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
3221 
3222 	/* Packets can start arriving on the output queues from this point. */
3223 
3224 	atomic_set(&oct->status, OCT_DEV_CORE_OK);
3225 
3226 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3227 
3228 	if (liquidio_init_nic_module(oct))
3229 		return 1;
3230 
3231 	return 0;
3232 }
3233 
3234 static int __init liquidio_vf_init(void)
3235 {
3236 	octeon_init_device_list(0);
3237 	return pci_register_driver(&liquidio_vf_pci_driver);
3238 }
3239 
3240 static void __exit liquidio_vf_exit(void)
3241 {
3242 	pci_unregister_driver(&liquidio_vf_pci_driver);
3243 
3244 	pr_info("LiquidIO_VF network module is now unloaded\n");
3245 }
3246 
3247 module_init(liquidio_vf_init);
3248 module_exit(liquidio_vf_exit);
3249