xref: /openbmc/linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c (revision 5ef12cb4a3a78ffb331c03a795a15eea4ae35155)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <net/vxlan.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_nic.h"
28 #include "octeon_main.h"
29 #include "octeon_network.h"
30 #include "cn23xx_vf_device.h"
31 
32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
34 MODULE_LICENSE("GPL");
35 MODULE_VERSION(LIQUIDIO_VERSION);
36 
37 static int debug = -1;
38 module_param(debug, int, 0644);
39 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
40 
41 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
42 
43 struct liquidio_rx_ctl_context {
44 	int octeon_id;
45 
46 	wait_queue_head_t wc;
47 
48 	int cond;
49 };
50 
51 struct oct_timestamp_resp {
52 	u64 rh;
53 	u64 timestamp;
54 	u64 status;
55 };
56 
57 union tx_info {
58 	u64 u64;
59 	struct {
60 #ifdef __BIG_ENDIAN_BITFIELD
61 		u16 gso_size;
62 		u16 gso_segs;
63 		u32 reserved;
64 #else
65 		u32 reserved;
66 		u16 gso_segs;
67 		u16 gso_size;
68 #endif
69 	} s;
70 };
71 
72 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
73 
74 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
75 #define OCTNIC_GSO_MAX_SIZE \
76 		(CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
77 
78 struct octnic_gather {
79 	/* List manipulation. Next and prev pointers. */
80 	struct list_head list;
81 
82 	/* Size of the gather component at sg in bytes. */
83 	int sg_size;
84 
85 	/* Number of bytes that sg was adjusted to make it 8B-aligned. */
86 	int adjust;
87 
88 	/* Gather component that can accommodate max sized fragment list
89 	 * received from the IP layer.
90 	 */
91 	struct octeon_sg_entry *sg;
92 
93 	dma_addr_t sg_dma_ptr;
94 };
95 
96 static int
97 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
98 static void liquidio_vf_remove(struct pci_dev *pdev);
99 static int octeon_device_init(struct octeon_device *oct);
100 static int liquidio_stop(struct net_device *netdev);
101 
102 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
103 {
104 	struct octeon_device_priv *oct_priv =
105 	    (struct octeon_device_priv *)oct->priv;
106 	int retry = MAX_IO_PENDING_PKT_COUNT;
107 	int pkt_cnt = 0, pending_pkts;
108 	int i;
109 
110 	do {
111 		pending_pkts = 0;
112 
113 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
114 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
115 				continue;
116 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
117 		}
118 		if (pkt_cnt > 0) {
119 			pending_pkts += pkt_cnt;
120 			tasklet_schedule(&oct_priv->droq_tasklet);
121 		}
122 		pkt_cnt = 0;
123 		schedule_timeout_uninterruptible(1);
124 
125 	} while (retry-- && pending_pkts);
126 
127 	return pkt_cnt;
128 }
129 
130 /**
131  * \brief Cause device to go quiet so it can be safely removed/reset/etc
132  * @param oct Pointer to Octeon device
133  */
134 static void pcierror_quiesce_device(struct octeon_device *oct)
135 {
136 	int i;
137 
138 	/* Disable the input and output queues now. No more packets will
139 	 * arrive from Octeon, but we should wait for all packet processing
140 	 * to finish.
141 	 */
142 
143 	/* To allow for in-flight requests */
144 	schedule_timeout_uninterruptible(100);
145 
146 	if (wait_for_pending_requests(oct))
147 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
148 
149 	/* Force all requests waiting to be fetched by OCTEON to complete. */
150 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
151 		struct octeon_instr_queue *iq;
152 
153 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
154 			continue;
155 		iq = oct->instr_queue[i];
156 
157 		if (atomic_read(&iq->instr_pending)) {
158 			spin_lock_bh(&iq->lock);
159 			iq->fill_cnt = 0;
160 			iq->octeon_read_index = iq->host_write_index;
161 			iq->stats.instr_processed +=
162 			    atomic_read(&iq->instr_pending);
163 			lio_process_iq_request_list(oct, iq, 0);
164 			spin_unlock_bh(&iq->lock);
165 		}
166 	}
167 
168 	/* Force all pending ordered list requests to time out. */
169 	lio_process_ordered_list(oct, 1);
170 
171 	/* We do not need to wait for output queue packets to be processed. */
172 }
173 
174 /**
175  * \brief Cleanup PCI AER uncorrectable error status
176  * @param dev Pointer to PCI device
177  */
178 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
179 {
180 	u32 status, mask;
181 	int pos = 0x100;
182 
183 	pr_info("%s :\n", __func__);
184 
185 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
186 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
187 	if (dev->error_state == pci_channel_io_normal)
188 		status &= ~mask; /* Clear corresponding nonfatal bits */
189 	else
190 		status &= mask; /* Clear corresponding fatal bits */
191 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
192 }
193 
194 /**
195  * \brief Stop all PCI IO to a given device
196  * @param dev Pointer to Octeon device
197  */
198 static void stop_pci_io(struct octeon_device *oct)
199 {
200 	struct msix_entry *msix_entries;
201 	int i;
202 
203 	/* No more instructions will be forwarded. */
204 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
205 
206 	for (i = 0; i < oct->ifcount; i++)
207 		netif_device_detach(oct->props[i].netdev);
208 
209 	/* Disable interrupts  */
210 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
211 
212 	pcierror_quiesce_device(oct);
213 	if (oct->msix_on) {
214 		msix_entries = (struct msix_entry *)oct->msix_entries;
215 		for (i = 0; i < oct->num_msix_irqs; i++) {
216 			/* clear the affinity_cpumask */
217 			irq_set_affinity_hint(msix_entries[i].vector,
218 					      NULL);
219 			free_irq(msix_entries[i].vector,
220 				 &oct->ioq_vector[i]);
221 		}
222 		pci_disable_msix(oct->pci_dev);
223 		kfree(oct->msix_entries);
224 		oct->msix_entries = NULL;
225 		octeon_free_ioq_vector(oct);
226 	}
227 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
228 		lio_get_state_string(&oct->status));
229 
230 	/* making it a common function for all OCTEON models */
231 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
232 
233 	pci_disable_device(oct->pci_dev);
234 }
235 
236 /**
237  * \brief called when PCI error is detected
238  * @param pdev Pointer to PCI device
239  * @param state The current pci connection state
240  *
241  * This function is called after a PCI bus error affecting
242  * this device has been detected.
243  */
244 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
245 						     pci_channel_state_t state)
246 {
247 	struct octeon_device *oct = pci_get_drvdata(pdev);
248 
249 	/* Non-correctable Non-fatal errors */
250 	if (state == pci_channel_io_normal) {
251 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
252 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
253 		return PCI_ERS_RESULT_CAN_RECOVER;
254 	}
255 
256 	/* Non-correctable Fatal errors */
257 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
258 	stop_pci_io(oct);
259 
260 	return PCI_ERS_RESULT_DISCONNECT;
261 }
262 
263 /* For PCI-E Advanced Error Recovery (AER) Interface */
264 static const struct pci_error_handlers liquidio_vf_err_handler = {
265 	.error_detected = liquidio_pcie_error_detected,
266 };
267 
268 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
269 	{
270 		PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
271 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
272 	},
273 	{
274 		0, 0, 0, 0, 0, 0, 0
275 	}
276 };
277 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
278 
279 static struct pci_driver liquidio_vf_pci_driver = {
280 	.name		= "LiquidIO_VF",
281 	.id_table	= liquidio_vf_pci_tbl,
282 	.probe		= liquidio_vf_probe,
283 	.remove		= liquidio_vf_remove,
284 	.err_handler	= &liquidio_vf_err_handler,    /* For AER */
285 };
286 
287 /**
288  * Remove the node at the head of the list. The list would be empty at
289  * the end of this call if there are no more nodes in the list.
290  */
291 static struct list_head *list_delete_head(struct list_head *root)
292 {
293 	struct list_head *node;
294 
295 	if ((root->prev == root) && (root->next == root))
296 		node = NULL;
297 	else
298 		node = root->next;
299 
300 	if (node)
301 		list_del(node);
302 
303 	return node;
304 }
305 
306 /**
307  * \brief Delete gather lists
308  * @param lio per-network private data
309  */
310 static void delete_glists(struct lio *lio)
311 {
312 	struct octnic_gather *g;
313 	int i;
314 
315 	kfree(lio->glist_lock);
316 	lio->glist_lock = NULL;
317 
318 	if (!lio->glist)
319 		return;
320 
321 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
322 		do {
323 			g = (struct octnic_gather *)
324 			    list_delete_head(&lio->glist[i]);
325 			kfree(g);
326 		} while (g);
327 
328 		if (lio->glists_virt_base && lio->glists_virt_base[i] &&
329 		    lio->glists_dma_base && lio->glists_dma_base[i]) {
330 			lio_dma_free(lio->oct_dev,
331 				     lio->glist_entry_size * lio->tx_qsize,
332 				     lio->glists_virt_base[i],
333 				     lio->glists_dma_base[i]);
334 		}
335 	}
336 
337 	kfree(lio->glists_virt_base);
338 	lio->glists_virt_base = NULL;
339 
340 	kfree(lio->glists_dma_base);
341 	lio->glists_dma_base = NULL;
342 
343 	kfree(lio->glist);
344 	lio->glist = NULL;
345 }
346 
347 /**
348  * \brief Setup gather lists
349  * @param lio per-network private data
350  */
351 static int setup_glists(struct lio *lio, int num_iqs)
352 {
353 	struct octnic_gather *g;
354 	int i, j;
355 
356 	lio->glist_lock =
357 	    kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
358 	if (!lio->glist_lock)
359 		return -ENOMEM;
360 
361 	lio->glist =
362 	    kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
363 	if (!lio->glist) {
364 		kfree(lio->glist_lock);
365 		lio->glist_lock = NULL;
366 		return -ENOMEM;
367 	}
368 
369 	lio->glist_entry_size =
370 		ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
371 
372 	/* allocate memory to store virtual and dma base address of
373 	 * per glist consistent memory
374 	 */
375 	lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
376 					GFP_KERNEL);
377 	lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
378 				       GFP_KERNEL);
379 
380 	if (!lio->glists_virt_base || !lio->glists_dma_base) {
381 		delete_glists(lio);
382 		return -ENOMEM;
383 	}
384 
385 	for (i = 0; i < num_iqs; i++) {
386 		spin_lock_init(&lio->glist_lock[i]);
387 
388 		INIT_LIST_HEAD(&lio->glist[i]);
389 
390 		lio->glists_virt_base[i] =
391 			lio_dma_alloc(lio->oct_dev,
392 				      lio->glist_entry_size * lio->tx_qsize,
393 				      &lio->glists_dma_base[i]);
394 
395 		if (!lio->glists_virt_base[i]) {
396 			delete_glists(lio);
397 			return -ENOMEM;
398 		}
399 
400 		for (j = 0; j < lio->tx_qsize; j++) {
401 			g = kzalloc(sizeof(*g), GFP_KERNEL);
402 			if (!g)
403 				break;
404 
405 			g->sg = lio->glists_virt_base[i] +
406 				(j * lio->glist_entry_size);
407 
408 			g->sg_dma_ptr = lio->glists_dma_base[i] +
409 					(j * lio->glist_entry_size);
410 
411 			list_add_tail(&g->list, &lio->glist[i]);
412 		}
413 
414 		if (j != lio->tx_qsize) {
415 			delete_glists(lio);
416 			return -ENOMEM;
417 		}
418 	}
419 
420 	return 0;
421 }
422 
423 /**
424  * \brief Print link information
425  * @param netdev network device
426  */
427 static void print_link_info(struct net_device *netdev)
428 {
429 	struct lio *lio = GET_LIO(netdev);
430 
431 	if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
432 	    ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
433 		struct oct_link_info *linfo = &lio->linfo;
434 
435 		if (linfo->link.s.link_up) {
436 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
437 				   linfo->link.s.speed,
438 				   (linfo->link.s.duplex) ? "Full" : "Half");
439 		} else {
440 			netif_info(lio, link, lio->netdev, "Link Down\n");
441 		}
442 	}
443 }
444 
445 /**
446  * \brief Routine to notify MTU change
447  * @param work work_struct data structure
448  */
449 static void octnet_link_status_change(struct work_struct *work)
450 {
451 	struct cavium_wk *wk = (struct cavium_wk *)work;
452 	struct lio *lio = (struct lio *)wk->ctxptr;
453 
454 	/* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
455 	 * this API is invoked only when new max-MTU of the interface is
456 	 * less than current MTU.
457 	 */
458 	rtnl_lock();
459 	dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
460 	rtnl_unlock();
461 }
462 
463 /**
464  * \brief Sets up the mtu status change work
465  * @param netdev network device
466  */
467 static int setup_link_status_change_wq(struct net_device *netdev)
468 {
469 	struct lio *lio = GET_LIO(netdev);
470 	struct octeon_device *oct = lio->oct_dev;
471 
472 	lio->link_status_wq.wq = alloc_workqueue("link-status",
473 						 WQ_MEM_RECLAIM, 0);
474 	if (!lio->link_status_wq.wq) {
475 		dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
476 		return -1;
477 	}
478 	INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
479 			  octnet_link_status_change);
480 	lio->link_status_wq.wk.ctxptr = lio;
481 
482 	return 0;
483 }
484 
485 static void cleanup_link_status_change_wq(struct net_device *netdev)
486 {
487 	struct lio *lio = GET_LIO(netdev);
488 
489 	if (lio->link_status_wq.wq) {
490 		cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
491 		destroy_workqueue(lio->link_status_wq.wq);
492 	}
493 }
494 
495 /**
496  * \brief Update link status
497  * @param netdev network device
498  * @param ls link status structure
499  *
500  * Called on receipt of a link status response from the core application to
501  * update each interface's link status.
502  */
503 static void update_link_status(struct net_device *netdev,
504 			       union oct_link_status *ls)
505 {
506 	struct lio *lio = GET_LIO(netdev);
507 	int current_max_mtu = lio->linfo.link.s.mtu;
508 	struct octeon_device *oct = lio->oct_dev;
509 
510 	if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
511 		lio->linfo.link.u64 = ls->u64;
512 
513 		print_link_info(netdev);
514 		lio->link_changes++;
515 
516 		if (lio->linfo.link.s.link_up) {
517 			netif_carrier_on(netdev);
518 			wake_txqs(netdev);
519 		} else {
520 			netif_carrier_off(netdev);
521 			stop_txqs(netdev);
522 		}
523 
524 		if (lio->linfo.link.s.mtu != current_max_mtu) {
525 			dev_info(&oct->pci_dev->dev,
526 				 "Max MTU Changed from %d to %d\n",
527 				 current_max_mtu, lio->linfo.link.s.mtu);
528 			netdev->max_mtu = lio->linfo.link.s.mtu;
529 		}
530 
531 		if (lio->linfo.link.s.mtu < netdev->mtu) {
532 			dev_warn(&oct->pci_dev->dev,
533 				 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
534 				 netdev->mtu, lio->linfo.link.s.mtu);
535 			queue_delayed_work(lio->link_status_wq.wq,
536 					   &lio->link_status_wq.wk.work, 0);
537 		}
538 	}
539 }
540 
541 /**
542  * \brief PCI probe handler
543  * @param pdev PCI device structure
544  * @param ent unused
545  */
546 static int
547 liquidio_vf_probe(struct pci_dev *pdev,
548 		  const struct pci_device_id *ent __attribute__((unused)))
549 {
550 	struct octeon_device *oct_dev = NULL;
551 
552 	oct_dev = octeon_allocate_device(pdev->device,
553 					 sizeof(struct octeon_device_priv));
554 
555 	if (!oct_dev) {
556 		dev_err(&pdev->dev, "Unable to allocate device\n");
557 		return -ENOMEM;
558 	}
559 	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
560 
561 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
562 		 (u32)pdev->vendor, (u32)pdev->device);
563 
564 	/* Assign octeon_device for this device to the private data area. */
565 	pci_set_drvdata(pdev, oct_dev);
566 
567 	/* set linux specific device pointer */
568 	oct_dev->pci_dev = pdev;
569 
570 	if (octeon_device_init(oct_dev)) {
571 		liquidio_vf_remove(pdev);
572 		return -ENOMEM;
573 	}
574 
575 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
576 
577 	return 0;
578 }
579 
580 /**
581  * \brief PCI FLR for each Octeon device.
582  * @param oct octeon device
583  */
584 static void octeon_pci_flr(struct octeon_device *oct)
585 {
586 	pci_save_state(oct->pci_dev);
587 
588 	pci_cfg_access_lock(oct->pci_dev);
589 
590 	/* Quiesce the device completely */
591 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
592 			      PCI_COMMAND_INTX_DISABLE);
593 
594 	pcie_flr(oct->pci_dev);
595 
596 	pci_cfg_access_unlock(oct->pci_dev);
597 
598 	pci_restore_state(oct->pci_dev);
599 }
600 
601 /**
602  *\brief Destroy resources associated with octeon device
603  * @param pdev PCI device structure
604  * @param ent unused
605  */
606 static void octeon_destroy_resources(struct octeon_device *oct)
607 {
608 	struct msix_entry *msix_entries;
609 	int i;
610 
611 	switch (atomic_read(&oct->status)) {
612 	case OCT_DEV_RUNNING:
613 	case OCT_DEV_CORE_OK:
614 		/* No more instructions will be forwarded. */
615 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
616 
617 		oct->app_mode = CVM_DRV_INVALID_APP;
618 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
619 			lio_get_state_string(&oct->status));
620 
621 		schedule_timeout_uninterruptible(HZ / 10);
622 
623 		/* fallthrough */
624 	case OCT_DEV_HOST_OK:
625 		/* fallthrough */
626 	case OCT_DEV_IO_QUEUES_DONE:
627 		if (wait_for_pending_requests(oct))
628 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
629 
630 		if (lio_wait_for_instr_fetch(oct))
631 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
632 
633 		/* Disable the input and output queues now. No more packets will
634 		 * arrive from Octeon, but we should wait for all packet
635 		 * processing to finish.
636 		 */
637 		oct->fn_list.disable_io_queues(oct);
638 
639 		if (lio_wait_for_oq_pkts(oct))
640 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
641 		/* fall through */
642 	case OCT_DEV_INTR_SET_DONE:
643 		/* Disable interrupts  */
644 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
645 
646 		if (oct->msix_on) {
647 			msix_entries = (struct msix_entry *)oct->msix_entries;
648 			for (i = 0; i < oct->num_msix_irqs; i++) {
649 				if (oct->ioq_vector[i].vector) {
650 					irq_set_affinity_hint(
651 							msix_entries[i].vector,
652 							NULL);
653 					free_irq(msix_entries[i].vector,
654 						 &oct->ioq_vector[i]);
655 					oct->ioq_vector[i].vector = 0;
656 				}
657 			}
658 			pci_disable_msix(oct->pci_dev);
659 			kfree(oct->msix_entries);
660 			oct->msix_entries = NULL;
661 			kfree(oct->irq_name_storage);
662 			oct->irq_name_storage = NULL;
663 		}
664 		/* Soft reset the octeon device before exiting */
665 		if (oct->pci_dev->reset_fn)
666 			octeon_pci_flr(oct);
667 		else
668 			cn23xx_vf_ask_pf_to_do_flr(oct);
669 
670 		/* fallthrough */
671 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
672 		octeon_free_ioq_vector(oct);
673 
674 		/* fallthrough */
675 	case OCT_DEV_MBOX_SETUP_DONE:
676 		oct->fn_list.free_mbox(oct);
677 
678 		/* fallthrough */
679 	case OCT_DEV_IN_RESET:
680 	case OCT_DEV_DROQ_INIT_DONE:
681 		mdelay(100);
682 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
683 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
684 				continue;
685 			octeon_delete_droq(oct, i);
686 		}
687 
688 		/* fallthrough */
689 	case OCT_DEV_RESP_LIST_INIT_DONE:
690 		octeon_delete_response_list(oct);
691 
692 		/* fallthrough */
693 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
694 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
695 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
696 				continue;
697 			octeon_delete_instr_queue(oct, i);
698 		}
699 
700 		/* fallthrough */
701 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
702 		octeon_free_sc_buffer_pool(oct);
703 
704 		/* fallthrough */
705 	case OCT_DEV_DISPATCH_INIT_DONE:
706 		octeon_delete_dispatch_list(oct);
707 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
708 
709 		/* fallthrough */
710 	case OCT_DEV_PCI_MAP_DONE:
711 		octeon_unmap_pci_barx(oct, 0);
712 		octeon_unmap_pci_barx(oct, 1);
713 
714 		/* fallthrough */
715 	case OCT_DEV_PCI_ENABLE_DONE:
716 		pci_clear_master(oct->pci_dev);
717 		/* Disable the device, releasing the PCI INT */
718 		pci_disable_device(oct->pci_dev);
719 
720 		/* fallthrough */
721 	case OCT_DEV_BEGIN_STATE:
722 		/* Nothing to be done here either */
723 		break;
724 	}
725 }
726 
727 /**
728  * \brief Callback for rx ctrl
729  * @param status status of request
730  * @param buf pointer to resp structure
731  */
732 static void rx_ctl_callback(struct octeon_device *oct,
733 			    u32 status, void *buf)
734 {
735 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
736 	struct liquidio_rx_ctl_context *ctx;
737 
738 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
739 
740 	oct = lio_get_device(ctx->octeon_id);
741 	if (status)
742 		dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
743 			CVM_CAST64(status));
744 	WRITE_ONCE(ctx->cond, 1);
745 
746 	/* This barrier is required to be sure that the response has been
747 	 * written fully before waking up the handler
748 	 */
749 	wmb();
750 
751 	wake_up_interruptible(&ctx->wc);
752 }
753 
754 /**
755  * \brief Send Rx control command
756  * @param lio per-network private data
757  * @param start_stop whether to start or stop
758  */
759 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
760 {
761 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
762 	int ctx_size = sizeof(struct liquidio_rx_ctl_context);
763 	struct liquidio_rx_ctl_context *ctx;
764 	struct octeon_soft_command *sc;
765 	union octnet_cmd *ncmd;
766 	int retval;
767 
768 	if (oct->props[lio->ifidx].rx_on == start_stop)
769 		return;
770 
771 	sc = (struct octeon_soft_command *)
772 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
773 					  16, ctx_size);
774 
775 	ncmd = (union octnet_cmd *)sc->virtdptr;
776 	ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
777 
778 	WRITE_ONCE(ctx->cond, 0);
779 	ctx->octeon_id = lio_get_device_id(oct);
780 	init_waitqueue_head(&ctx->wc);
781 
782 	ncmd->u64 = 0;
783 	ncmd->s.cmd = OCTNET_CMD_RX_CTL;
784 	ncmd->s.param1 = start_stop;
785 
786 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
787 
788 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
789 
790 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
791 				    OPCODE_NIC_CMD, 0, 0, 0);
792 
793 	sc->callback = rx_ctl_callback;
794 	sc->callback_arg = sc;
795 	sc->wait_time = 5000;
796 
797 	retval = octeon_send_soft_command(oct, sc);
798 	if (retval == IQ_SEND_FAILED) {
799 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
800 	} else {
801 		/* Sleep on a wait queue till the cond flag indicates that the
802 		 * response arrived or timed-out.
803 		 */
804 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
805 			return;
806 		oct->props[lio->ifidx].rx_on = start_stop;
807 	}
808 
809 	octeon_free_soft_command(oct, sc);
810 }
811 
812 /**
813  * \brief Destroy NIC device interface
814  * @param oct octeon device
815  * @param ifidx which interface to destroy
816  *
817  * Cleanup associated with each interface for an Octeon device  when NIC
818  * module is being unloaded or if initialization fails during load.
819  */
820 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
821 {
822 	struct net_device *netdev = oct->props[ifidx].netdev;
823 	struct napi_struct *napi, *n;
824 	struct lio *lio;
825 
826 	if (!netdev) {
827 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
828 			__func__, ifidx);
829 		return;
830 	}
831 
832 	lio = GET_LIO(netdev);
833 
834 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
835 
836 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
837 		liquidio_stop(netdev);
838 
839 	if (oct->props[lio->ifidx].napi_enabled == 1) {
840 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
841 			napi_disable(napi);
842 
843 		oct->props[lio->ifidx].napi_enabled = 0;
844 
845 		oct->droq[0]->ops.poll_mode = 0;
846 	}
847 
848 	/* Delete NAPI */
849 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
850 		netif_napi_del(napi);
851 
852 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
853 		unregister_netdev(netdev);
854 
855 	cleanup_rx_oom_poll_fn(netdev);
856 
857 	cleanup_link_status_change_wq(netdev);
858 
859 	delete_glists(lio);
860 
861 	free_netdev(netdev);
862 
863 	oct->props[ifidx].gmxport = -1;
864 
865 	oct->props[ifidx].netdev = NULL;
866 }
867 
868 /**
869  * \brief Stop complete NIC functionality
870  * @param oct octeon device
871  */
872 static int liquidio_stop_nic_module(struct octeon_device *oct)
873 {
874 	struct lio *lio;
875 	int i, j;
876 
877 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
878 	if (!oct->ifcount) {
879 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
880 		return 1;
881 	}
882 
883 	spin_lock_bh(&oct->cmd_resp_wqlock);
884 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
885 	spin_unlock_bh(&oct->cmd_resp_wqlock);
886 
887 	for (i = 0; i < oct->ifcount; i++) {
888 		lio = GET_LIO(oct->props[i].netdev);
889 		for (j = 0; j < oct->num_oqs; j++)
890 			octeon_unregister_droq_ops(oct,
891 						   lio->linfo.rxpciq[j].s.q_no);
892 	}
893 
894 	for (i = 0; i < oct->ifcount; i++)
895 		liquidio_destroy_nic_device(oct, i);
896 
897 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
898 	return 0;
899 }
900 
901 /**
902  * \brief Cleans up resources at unload time
903  * @param pdev PCI device structure
904  */
905 static void liquidio_vf_remove(struct pci_dev *pdev)
906 {
907 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
908 
909 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
910 
911 	if (oct_dev->app_mode == CVM_DRV_NIC_APP)
912 		liquidio_stop_nic_module(oct_dev);
913 
914 	/* Reset the octeon device and cleanup all memory allocated for
915 	 * the octeon device by driver.
916 	 */
917 	octeon_destroy_resources(oct_dev);
918 
919 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
920 
921 	/* This octeon device has been removed. Update the global
922 	 * data structure to reflect this. Free the device structure.
923 	 */
924 	octeon_free_device_mem(oct_dev);
925 }
926 
927 /**
928  * \brief PCI initialization for each Octeon device.
929  * @param oct octeon device
930  */
931 static int octeon_pci_os_setup(struct octeon_device *oct)
932 {
933 #ifdef CONFIG_PCI_IOV
934 	/* setup PCI stuff first */
935 	if (!oct->pci_dev->physfn)
936 		octeon_pci_flr(oct);
937 #endif
938 
939 	if (pci_enable_device(oct->pci_dev)) {
940 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
941 		return 1;
942 	}
943 
944 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
945 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
946 		pci_disable_device(oct->pci_dev);
947 		return 1;
948 	}
949 
950 	/* Enable PCI DMA Master. */
951 	pci_set_master(oct->pci_dev);
952 
953 	return 0;
954 }
955 
956 /**
957  * \brief Unmap and free network buffer
958  * @param buf buffer
959  */
960 static void free_netbuf(void *buf)
961 {
962 	struct octnet_buf_free_info *finfo;
963 	struct sk_buff *skb;
964 	struct lio *lio;
965 
966 	finfo = (struct octnet_buf_free_info *)buf;
967 	skb = finfo->skb;
968 	lio = finfo->lio;
969 
970 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
971 			 DMA_TO_DEVICE);
972 
973 	tx_buffer_free(skb);
974 }
975 
976 /**
977  * \brief Unmap and free gather buffer
978  * @param buf buffer
979  */
980 static void free_netsgbuf(void *buf)
981 {
982 	struct octnet_buf_free_info *finfo;
983 	struct octnic_gather *g;
984 	struct sk_buff *skb;
985 	int i, frags, iq;
986 	struct lio *lio;
987 
988 	finfo = (struct octnet_buf_free_info *)buf;
989 	skb = finfo->skb;
990 	lio = finfo->lio;
991 	g = finfo->g;
992 	frags = skb_shinfo(skb)->nr_frags;
993 
994 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
995 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
996 			 DMA_TO_DEVICE);
997 
998 	i = 1;
999 	while (frags--) {
1000 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1001 
1002 		pci_unmap_page((lio->oct_dev)->pci_dev,
1003 			       g->sg[(i >> 2)].ptr[(i & 3)],
1004 			       frag->size, DMA_TO_DEVICE);
1005 		i++;
1006 	}
1007 
1008 	iq = skb_iq(lio, skb);
1009 
1010 	spin_lock(&lio->glist_lock[iq]);
1011 	list_add_tail(&g->list, &lio->glist[iq]);
1012 	spin_unlock(&lio->glist_lock[iq]);
1013 
1014 	tx_buffer_free(skb);
1015 }
1016 
1017 /**
1018  * \brief Unmap and free gather buffer with response
1019  * @param buf buffer
1020  */
1021 static void free_netsgbuf_with_resp(void *buf)
1022 {
1023 	struct octnet_buf_free_info *finfo;
1024 	struct octeon_soft_command *sc;
1025 	struct octnic_gather *g;
1026 	struct sk_buff *skb;
1027 	int i, frags, iq;
1028 	struct lio *lio;
1029 
1030 	sc = (struct octeon_soft_command *)buf;
1031 	skb = (struct sk_buff *)sc->callback_arg;
1032 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1033 
1034 	lio = finfo->lio;
1035 	g = finfo->g;
1036 	frags = skb_shinfo(skb)->nr_frags;
1037 
1038 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1039 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1040 			 DMA_TO_DEVICE);
1041 
1042 	i = 1;
1043 	while (frags--) {
1044 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1045 
1046 		pci_unmap_page((lio->oct_dev)->pci_dev,
1047 			       g->sg[(i >> 2)].ptr[(i & 3)],
1048 			       frag->size, DMA_TO_DEVICE);
1049 		i++;
1050 	}
1051 
1052 	iq = skb_iq(lio, skb);
1053 
1054 	spin_lock(&lio->glist_lock[iq]);
1055 	list_add_tail(&g->list, &lio->glist[iq]);
1056 	spin_unlock(&lio->glist_lock[iq]);
1057 
1058 	/* Don't free the skb yet */
1059 }
1060 
1061 /**
1062  * \brief Callback for getting interface configuration
1063  * @param status status of request
1064  * @param buf pointer to resp structure
1065  */
1066 static void if_cfg_callback(struct octeon_device *oct,
1067 			    u32 status __attribute__((unused)), void *buf)
1068 {
1069 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1070 	struct liquidio_if_cfg_context *ctx;
1071 	struct liquidio_if_cfg_resp *resp;
1072 
1073 	resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1074 	ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1075 
1076 	oct = lio_get_device(ctx->octeon_id);
1077 	if (resp->status)
1078 		dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1079 			CVM_CAST64(resp->status));
1080 	WRITE_ONCE(ctx->cond, 1);
1081 
1082 	snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1083 		 resp->cfg_info.liquidio_firmware_version);
1084 
1085 	/* This barrier is required to be sure that the response has been
1086 	 * written fully before waking up the handler
1087 	 */
1088 	wmb();
1089 
1090 	wake_up_interruptible(&ctx->wc);
1091 }
1092 
1093 /**
1094  * \brief Net device open for LiquidIO
1095  * @param netdev network device
1096  */
1097 static int liquidio_open(struct net_device *netdev)
1098 {
1099 	struct lio *lio = GET_LIO(netdev);
1100 	struct octeon_device *oct = lio->oct_dev;
1101 	struct napi_struct *napi, *n;
1102 
1103 	if (!oct->props[lio->ifidx].napi_enabled) {
1104 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1105 			napi_enable(napi);
1106 
1107 		oct->props[lio->ifidx].napi_enabled = 1;
1108 
1109 		oct->droq[0]->ops.poll_mode = 1;
1110 	}
1111 
1112 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
1113 
1114 	/* Ready for link status updates */
1115 	lio->intf_open = 1;
1116 
1117 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1118 	start_txqs(netdev);
1119 
1120 	/* tell Octeon to start forwarding packets to host */
1121 	send_rx_ctrl_cmd(lio, 1);
1122 
1123 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
1124 
1125 	return 0;
1126 }
1127 
1128 /**
1129  * \brief Net device stop for LiquidIO
1130  * @param netdev network device
1131  */
1132 static int liquidio_stop(struct net_device *netdev)
1133 {
1134 	struct lio *lio = GET_LIO(netdev);
1135 	struct octeon_device *oct = lio->oct_dev;
1136 	struct napi_struct *napi, *n;
1137 
1138 	/* tell Octeon to stop forwarding packets to host */
1139 	send_rx_ctrl_cmd(lio, 0);
1140 
1141 	netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
1142 	/* Inform that netif carrier is down */
1143 	lio->intf_open = 0;
1144 	lio->linfo.link.s.link_up = 0;
1145 
1146 	netif_carrier_off(netdev);
1147 	lio->link_changes++;
1148 
1149 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1150 
1151 	stop_txqs(netdev);
1152 
1153 	/* Wait for any pending Rx descriptors */
1154 	if (lio_wait_for_clean_oq(oct))
1155 		netif_info(lio, rx_err, lio->netdev,
1156 			   "Proceeding with stop interface after partial RX desc processing\n");
1157 
1158 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1159 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1160 			napi_disable(napi);
1161 
1162 		oct->props[lio->ifidx].napi_enabled = 0;
1163 
1164 		oct->droq[0]->ops.poll_mode = 0;
1165 	}
1166 
1167 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1168 
1169 	return 0;
1170 }
1171 
1172 /**
1173  * \brief Converts a mask based on net device flags
1174  * @param netdev network device
1175  *
1176  * This routine generates a octnet_ifflags mask from the net device flags
1177  * received from the OS.
1178  */
1179 static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1180 {
1181 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1182 
1183 	if (netdev->flags & IFF_PROMISC)
1184 		f |= OCTNET_IFFLAG_PROMISC;
1185 
1186 	if (netdev->flags & IFF_ALLMULTI)
1187 		f |= OCTNET_IFFLAG_ALLMULTI;
1188 
1189 	if (netdev->flags & IFF_MULTICAST) {
1190 		f |= OCTNET_IFFLAG_MULTICAST;
1191 
1192 		/* Accept all multicast addresses if there are more than we
1193 		 * can handle
1194 		 */
1195 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1196 			f |= OCTNET_IFFLAG_ALLMULTI;
1197 	}
1198 
1199 	if (netdev->flags & IFF_BROADCAST)
1200 		f |= OCTNET_IFFLAG_BROADCAST;
1201 
1202 	return f;
1203 }
1204 
1205 static void liquidio_set_uc_list(struct net_device *netdev)
1206 {
1207 	struct lio *lio = GET_LIO(netdev);
1208 	struct octeon_device *oct = lio->oct_dev;
1209 	struct octnic_ctrl_pkt nctrl;
1210 	struct netdev_hw_addr *ha;
1211 	u64 *mac;
1212 
1213 	if (lio->netdev_uc_count == netdev_uc_count(netdev))
1214 		return;
1215 
1216 	if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1217 		dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1218 		return;
1219 	}
1220 
1221 	lio->netdev_uc_count = netdev_uc_count(netdev);
1222 
1223 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1224 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1225 	nctrl.ncmd.s.more = lio->netdev_uc_count;
1226 	nctrl.ncmd.s.param1 = oct->vf_num;
1227 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1228 	nctrl.netpndev = (u64)netdev;
1229 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1230 
1231 	/* copy all the addresses into the udd */
1232 	mac = &nctrl.udd[0];
1233 	netdev_for_each_uc_addr(ha, netdev) {
1234 		ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1235 		mac++;
1236 	}
1237 
1238 	octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1239 }
1240 
1241 /**
1242  * \brief Net device set_multicast_list
1243  * @param netdev network device
1244  */
1245 static void liquidio_set_mcast_list(struct net_device *netdev)
1246 {
1247 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1248 	struct lio *lio = GET_LIO(netdev);
1249 	struct octeon_device *oct = lio->oct_dev;
1250 	struct octnic_ctrl_pkt nctrl;
1251 	struct netdev_hw_addr *ha;
1252 	u64 *mc;
1253 	int ret;
1254 
1255 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1256 
1257 	/* Create a ctrl pkt command to be sent to core app. */
1258 	nctrl.ncmd.u64 = 0;
1259 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1260 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
1261 	nctrl.ncmd.s.param2 = mc_count;
1262 	nctrl.ncmd.s.more = mc_count;
1263 	nctrl.netpndev = (u64)netdev;
1264 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1265 
1266 	/* copy all the addresses into the udd */
1267 	mc = &nctrl.udd[0];
1268 	netdev_for_each_mc_addr(ha, netdev) {
1269 		*mc = 0;
1270 		ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1271 		/* no need to swap bytes */
1272 		if (++mc > &nctrl.udd[mc_count])
1273 			break;
1274 	}
1275 
1276 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1277 
1278 	/* Apparently, any activity in this call from the kernel has to
1279 	 * be atomic. So we won't wait for response.
1280 	 */
1281 	nctrl.wait_time = 0;
1282 
1283 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1284 	if (ret < 0) {
1285 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1286 			ret);
1287 	}
1288 
1289 	liquidio_set_uc_list(netdev);
1290 }
1291 
1292 /**
1293  * \brief Net device set_mac_address
1294  * @param netdev network device
1295  */
1296 static int liquidio_set_mac(struct net_device *netdev, void *p)
1297 {
1298 	struct sockaddr *addr = (struct sockaddr *)p;
1299 	struct lio *lio = GET_LIO(netdev);
1300 	struct octeon_device *oct = lio->oct_dev;
1301 	struct octnic_ctrl_pkt nctrl;
1302 	int ret = 0;
1303 
1304 	if (!is_valid_ether_addr(addr->sa_data))
1305 		return -EADDRNOTAVAIL;
1306 
1307 	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1308 		return 0;
1309 
1310 	if (lio->linfo.macaddr_is_admin_asgnd)
1311 		return -EPERM;
1312 
1313 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1314 
1315 	nctrl.ncmd.u64 = 0;
1316 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1317 	nctrl.ncmd.s.param1 = 0;
1318 	nctrl.ncmd.s.more = 1;
1319 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1320 	nctrl.netpndev = (u64)netdev;
1321 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1322 	nctrl.wait_time = 100;
1323 
1324 	nctrl.udd[0] = 0;
1325 	/* The MAC Address is presented in network byte order. */
1326 	ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1327 
1328 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1329 	if (ret < 0) {
1330 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1331 		return -ENOMEM;
1332 	}
1333 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1334 	ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1335 
1336 	return 0;
1337 }
1338 
1339 /**
1340  * \brief Net device get_stats
1341  * @param netdev network device
1342  */
1343 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
1344 {
1345 	struct lio *lio = GET_LIO(netdev);
1346 	struct net_device_stats *stats = &netdev->stats;
1347 	u64 pkts = 0, drop = 0, bytes = 0;
1348 	struct oct_droq_stats *oq_stats;
1349 	struct oct_iq_stats *iq_stats;
1350 	struct octeon_device *oct;
1351 	int i, iq_no, oq_no;
1352 
1353 	oct = lio->oct_dev;
1354 
1355 	if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1356 		return stats;
1357 
1358 	for (i = 0; i < oct->num_iqs; i++) {
1359 		iq_no = lio->linfo.txpciq[i].s.q_no;
1360 		iq_stats = &oct->instr_queue[iq_no]->stats;
1361 		pkts += iq_stats->tx_done;
1362 		drop += iq_stats->tx_dropped;
1363 		bytes += iq_stats->tx_tot_bytes;
1364 	}
1365 
1366 	stats->tx_packets = pkts;
1367 	stats->tx_bytes = bytes;
1368 	stats->tx_dropped = drop;
1369 
1370 	pkts = 0;
1371 	drop = 0;
1372 	bytes = 0;
1373 
1374 	for (i = 0; i < oct->num_oqs; i++) {
1375 		oq_no = lio->linfo.rxpciq[i].s.q_no;
1376 		oq_stats = &oct->droq[oq_no]->stats;
1377 		pkts += oq_stats->rx_pkts_received;
1378 		drop += (oq_stats->rx_dropped +
1379 			 oq_stats->dropped_nodispatch +
1380 			 oq_stats->dropped_toomany +
1381 			 oq_stats->dropped_nomem);
1382 		bytes += oq_stats->rx_bytes_received;
1383 	}
1384 
1385 	stats->rx_bytes = bytes;
1386 	stats->rx_packets = pkts;
1387 	stats->rx_dropped = drop;
1388 
1389 	return stats;
1390 }
1391 
1392 /**
1393  * \brief Handler for SIOCSHWTSTAMP ioctl
1394  * @param netdev network device
1395  * @param ifr interface request
1396  * @param cmd command
1397  */
1398 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1399 {
1400 	struct lio *lio = GET_LIO(netdev);
1401 	struct hwtstamp_config conf;
1402 
1403 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1404 		return -EFAULT;
1405 
1406 	if (conf.flags)
1407 		return -EINVAL;
1408 
1409 	switch (conf.tx_type) {
1410 	case HWTSTAMP_TX_ON:
1411 	case HWTSTAMP_TX_OFF:
1412 		break;
1413 	default:
1414 		return -ERANGE;
1415 	}
1416 
1417 	switch (conf.rx_filter) {
1418 	case HWTSTAMP_FILTER_NONE:
1419 		break;
1420 	case HWTSTAMP_FILTER_ALL:
1421 	case HWTSTAMP_FILTER_SOME:
1422 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1423 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1424 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1425 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1426 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1427 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1428 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1429 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1430 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1431 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1432 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1433 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1434 	case HWTSTAMP_FILTER_NTP_ALL:
1435 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
1436 		break;
1437 	default:
1438 		return -ERANGE;
1439 	}
1440 
1441 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1442 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1443 
1444 	else
1445 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1446 
1447 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1448 }
1449 
1450 /**
1451  * \brief ioctl handler
1452  * @param netdev network device
1453  * @param ifr interface request
1454  * @param cmd command
1455  */
1456 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1457 {
1458 	switch (cmd) {
1459 	case SIOCSHWTSTAMP:
1460 		return hwtstamp_ioctl(netdev, ifr);
1461 	default:
1462 		return -EOPNOTSUPP;
1463 	}
1464 }
1465 
1466 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1467 {
1468 	struct sk_buff *skb = (struct sk_buff *)buf;
1469 	struct octnet_buf_free_info *finfo;
1470 	struct oct_timestamp_resp *resp;
1471 	struct octeon_soft_command *sc;
1472 	struct lio *lio;
1473 
1474 	finfo = (struct octnet_buf_free_info *)skb->cb;
1475 	lio = finfo->lio;
1476 	sc = finfo->sc;
1477 	oct = lio->oct_dev;
1478 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
1479 
1480 	if (status != OCTEON_REQUEST_DONE) {
1481 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1482 			CVM_CAST64(status));
1483 		resp->timestamp = 0;
1484 	}
1485 
1486 	octeon_swap_8B_data(&resp->timestamp, 1);
1487 
1488 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1489 		struct skb_shared_hwtstamps ts;
1490 		u64 ns = resp->timestamp;
1491 
1492 		netif_info(lio, tx_done, lio->netdev,
1493 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1494 			   skb, (unsigned long long)ns);
1495 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1496 		skb_tstamp_tx(skb, &ts);
1497 	}
1498 
1499 	octeon_free_soft_command(oct, sc);
1500 	tx_buffer_free(skb);
1501 }
1502 
1503 /* \brief Send a data packet that will be timestamped
1504  * @param oct octeon device
1505  * @param ndata pointer to network data
1506  * @param finfo pointer to private network data
1507  */
1508 static int send_nic_timestamp_pkt(struct octeon_device *oct,
1509 				  struct octnic_data_pkt *ndata,
1510 				  struct octnet_buf_free_info *finfo,
1511 				  int xmit_more)
1512 {
1513 	struct octeon_soft_command *sc;
1514 	int ring_doorbell;
1515 	struct lio *lio;
1516 	int retval;
1517 	u32 len;
1518 
1519 	lio = finfo->lio;
1520 
1521 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1522 					    sizeof(struct oct_timestamp_resp));
1523 	finfo->sc = sc;
1524 
1525 	if (!sc) {
1526 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1527 		return IQ_SEND_FAILED;
1528 	}
1529 
1530 	if (ndata->reqtype == REQTYPE_NORESP_NET)
1531 		ndata->reqtype = REQTYPE_RESP_NET;
1532 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1533 		ndata->reqtype = REQTYPE_RESP_NET_SG;
1534 
1535 	sc->callback = handle_timestamp;
1536 	sc->callback_arg = finfo->skb;
1537 	sc->iq_no = ndata->q_no;
1538 
1539 	len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1540 
1541 	ring_doorbell = !xmit_more;
1542 
1543 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1544 				     sc, len, ndata->reqtype);
1545 
1546 	if (retval == IQ_SEND_FAILED) {
1547 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1548 			retval);
1549 		octeon_free_soft_command(oct, sc);
1550 	} else {
1551 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1552 	}
1553 
1554 	return retval;
1555 }
1556 
1557 /** \brief Transmit networks packets to the Octeon interface
1558  * @param skbuff   skbuff struct to be passed to network layer.
1559  * @param netdev   pointer to network device
1560  * @returns whether the packet was transmitted to the device okay or not
1561  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
1562  */
1563 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1564 {
1565 	struct octnet_buf_free_info *finfo;
1566 	union octnic_cmd_setup cmdsetup;
1567 	struct octnic_data_pkt ndata;
1568 	struct octeon_instr_irh *irh;
1569 	struct oct_iq_stats *stats;
1570 	struct octeon_device *oct;
1571 	int q_idx = 0, iq_no = 0;
1572 	union tx_info *tx_info;
1573 	int xmit_more = 0;
1574 	struct lio *lio;
1575 	int status = 0;
1576 	u64 dptr = 0;
1577 	u32 tag = 0;
1578 	int j;
1579 
1580 	lio = GET_LIO(netdev);
1581 	oct = lio->oct_dev;
1582 
1583 	q_idx = skb_iq(lio, skb);
1584 	tag = q_idx;
1585 	iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1586 
1587 	stats = &oct->instr_queue[iq_no]->stats;
1588 
1589 	/* Check for all conditions in which the current packet cannot be
1590 	 * transmitted.
1591 	 */
1592 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1593 	    (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1594 		netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1595 			   lio->linfo.link.s.link_up);
1596 		goto lio_xmit_failed;
1597 	}
1598 
1599 	/* Use space in skb->cb to store info used to unmap and
1600 	 * free the buffers.
1601 	 */
1602 	finfo = (struct octnet_buf_free_info *)skb->cb;
1603 	finfo->lio = lio;
1604 	finfo->skb = skb;
1605 	finfo->sc = NULL;
1606 
1607 	/* Prepare the attributes for the data to be passed to OSI. */
1608 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1609 
1610 	ndata.buf = finfo;
1611 
1612 	ndata.q_no = iq_no;
1613 
1614 	if (octnet_iq_is_full(oct, ndata.q_no)) {
1615 		/* defer sending if queue is full */
1616 		netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1617 			   ndata.q_no);
1618 		stats->tx_iq_busy++;
1619 		return NETDEV_TX_BUSY;
1620 	}
1621 
1622 	ndata.datasize = skb->len;
1623 
1624 	cmdsetup.u64 = 0;
1625 	cmdsetup.s.iq_no = iq_no;
1626 
1627 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1628 		if (skb->encapsulation) {
1629 			cmdsetup.s.tnl_csum = 1;
1630 			stats->tx_vxlan++;
1631 		} else {
1632 			cmdsetup.s.transport_csum = 1;
1633 		}
1634 	}
1635 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1636 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1637 		cmdsetup.s.timestamp = 1;
1638 	}
1639 
1640 	if (!skb_shinfo(skb)->nr_frags) {
1641 		cmdsetup.s.u.datasize = skb->len;
1642 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1643 		/* Offload checksum calculation for TCP/UDP packets */
1644 		dptr = dma_map_single(&oct->pci_dev->dev,
1645 				      skb->data,
1646 				      skb->len,
1647 				      DMA_TO_DEVICE);
1648 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1649 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1650 				__func__);
1651 			return NETDEV_TX_BUSY;
1652 		}
1653 
1654 		ndata.cmd.cmd3.dptr = dptr;
1655 		finfo->dptr = dptr;
1656 		ndata.reqtype = REQTYPE_NORESP_NET;
1657 
1658 	} else {
1659 		struct skb_frag_struct *frag;
1660 		struct octnic_gather *g;
1661 		int i, frags;
1662 
1663 		spin_lock(&lio->glist_lock[q_idx]);
1664 		g = (struct octnic_gather *)list_delete_head(
1665 		    &lio->glist[q_idx]);
1666 		spin_unlock(&lio->glist_lock[q_idx]);
1667 
1668 		if (!g) {
1669 			netif_info(lio, tx_err, lio->netdev,
1670 				   "Transmit scatter gather: glist null!\n");
1671 			goto lio_xmit_failed;
1672 		}
1673 
1674 		cmdsetup.s.gather = 1;
1675 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1676 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1677 
1678 		memset(g->sg, 0, g->sg_size);
1679 
1680 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1681 						 skb->data,
1682 						 (skb->len - skb->data_len),
1683 						 DMA_TO_DEVICE);
1684 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1685 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1686 				__func__);
1687 			return NETDEV_TX_BUSY;
1688 		}
1689 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1690 
1691 		frags = skb_shinfo(skb)->nr_frags;
1692 		i = 1;
1693 		while (frags--) {
1694 			frag = &skb_shinfo(skb)->frags[i - 1];
1695 
1696 			g->sg[(i >> 2)].ptr[(i & 3)] =
1697 				dma_map_page(&oct->pci_dev->dev,
1698 					     frag->page.p,
1699 					     frag->page_offset,
1700 					     frag->size,
1701 					     DMA_TO_DEVICE);
1702 			if (dma_mapping_error(&oct->pci_dev->dev,
1703 					      g->sg[i >> 2].ptr[i & 3])) {
1704 				dma_unmap_single(&oct->pci_dev->dev,
1705 						 g->sg[0].ptr[0],
1706 						 skb->len - skb->data_len,
1707 						 DMA_TO_DEVICE);
1708 				for (j = 1; j < i; j++) {
1709 					frag = &skb_shinfo(skb)->frags[j - 1];
1710 					dma_unmap_page(&oct->pci_dev->dev,
1711 						       g->sg[j >> 2].ptr[j & 3],
1712 						       frag->size,
1713 						       DMA_TO_DEVICE);
1714 				}
1715 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1716 					__func__);
1717 				return NETDEV_TX_BUSY;
1718 			}
1719 
1720 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
1721 			i++;
1722 		}
1723 
1724 		dptr = g->sg_dma_ptr;
1725 
1726 		ndata.cmd.cmd3.dptr = dptr;
1727 		finfo->dptr = dptr;
1728 		finfo->g = g;
1729 
1730 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
1731 	}
1732 
1733 	irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1734 	tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1735 
1736 	if (skb_shinfo(skb)->gso_size) {
1737 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1738 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1739 	}
1740 
1741 	/* HW insert VLAN tag */
1742 	if (skb_vlan_tag_present(skb)) {
1743 		irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1744 		irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1745 	}
1746 
1747 	xmit_more = skb->xmit_more;
1748 
1749 	if (unlikely(cmdsetup.s.timestamp))
1750 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1751 	else
1752 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1753 	if (status == IQ_SEND_FAILED)
1754 		goto lio_xmit_failed;
1755 
1756 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1757 
1758 	if (status == IQ_SEND_STOP) {
1759 		dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1760 			iq_no);
1761 		netif_stop_subqueue(netdev, q_idx);
1762 	}
1763 
1764 	netif_trans_update(netdev);
1765 
1766 	if (tx_info->s.gso_segs)
1767 		stats->tx_done += tx_info->s.gso_segs;
1768 	else
1769 		stats->tx_done++;
1770 	stats->tx_tot_bytes += ndata.datasize;
1771 
1772 	return NETDEV_TX_OK;
1773 
1774 lio_xmit_failed:
1775 	stats->tx_dropped++;
1776 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1777 		   iq_no, stats->tx_dropped);
1778 	if (dptr)
1779 		dma_unmap_single(&oct->pci_dev->dev, dptr,
1780 				 ndata.datasize, DMA_TO_DEVICE);
1781 
1782 	octeon_ring_doorbell_locked(oct, iq_no);
1783 
1784 	tx_buffer_free(skb);
1785 	return NETDEV_TX_OK;
1786 }
1787 
1788 /** \brief Network device Tx timeout
1789  * @param netdev    pointer to network device
1790  */
1791 static void liquidio_tx_timeout(struct net_device *netdev)
1792 {
1793 	struct lio *lio;
1794 
1795 	lio = GET_LIO(netdev);
1796 
1797 	netif_info(lio, tx_err, lio->netdev,
1798 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1799 		   netdev->stats.tx_dropped);
1800 	netif_trans_update(netdev);
1801 	wake_txqs(netdev);
1802 }
1803 
1804 static int
1805 liquidio_vlan_rx_add_vid(struct net_device *netdev,
1806 			 __be16 proto __attribute__((unused)), u16 vid)
1807 {
1808 	struct lio *lio = GET_LIO(netdev);
1809 	struct octeon_device *oct = lio->oct_dev;
1810 	struct octnic_ctrl_pkt nctrl;
1811 	struct completion compl;
1812 	u16 response_code;
1813 	int ret = 0;
1814 
1815 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1816 
1817 	nctrl.ncmd.u64 = 0;
1818 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1819 	nctrl.ncmd.s.param1 = vid;
1820 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1821 	nctrl.wait_time = 100;
1822 	nctrl.netpndev = (u64)netdev;
1823 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1824 	init_completion(&compl);
1825 	nctrl.completion = &compl;
1826 	nctrl.response_code = &response_code;
1827 
1828 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1829 	if (ret < 0) {
1830 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1831 			ret);
1832 		return -EIO;
1833 	}
1834 
1835 	if (!wait_for_completion_timeout(&compl,
1836 					 msecs_to_jiffies(nctrl.wait_time)))
1837 		return -EPERM;
1838 
1839 	if (READ_ONCE(response_code))
1840 		return -EPERM;
1841 
1842 	return 0;
1843 }
1844 
1845 static int
1846 liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1847 			  __be16 proto __attribute__((unused)), u16 vid)
1848 {
1849 	struct lio *lio = GET_LIO(netdev);
1850 	struct octeon_device *oct = lio->oct_dev;
1851 	struct octnic_ctrl_pkt nctrl;
1852 	int ret = 0;
1853 
1854 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1855 
1856 	nctrl.ncmd.u64 = 0;
1857 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1858 	nctrl.ncmd.s.param1 = vid;
1859 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1860 	nctrl.wait_time = 100;
1861 	nctrl.netpndev = (u64)netdev;
1862 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1863 
1864 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1865 	if (ret < 0) {
1866 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1867 			ret);
1868 	}
1869 	return ret;
1870 }
1871 
1872 /** Sending command to enable/disable RX checksum offload
1873  * @param netdev                pointer to network device
1874  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
1875  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
1876  *                              OCTNET_CMD_RXCSUM_DISABLE
1877  * @returns                     SUCCESS or FAILURE
1878  */
1879 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1880 				       u8 rx_cmd)
1881 {
1882 	struct lio *lio = GET_LIO(netdev);
1883 	struct octeon_device *oct = lio->oct_dev;
1884 	struct octnic_ctrl_pkt nctrl;
1885 	int ret = 0;
1886 
1887 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1888 
1889 	nctrl.ncmd.u64 = 0;
1890 	nctrl.ncmd.s.cmd = command;
1891 	nctrl.ncmd.s.param1 = rx_cmd;
1892 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1893 	nctrl.wait_time = 100;
1894 	nctrl.netpndev = (u64)netdev;
1895 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1896 
1897 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1898 	if (ret < 0) {
1899 		dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1900 			ret);
1901 	}
1902 	return ret;
1903 }
1904 
1905 /** Sending command to add/delete VxLAN UDP port to firmware
1906  * @param netdev                pointer to network device
1907  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
1908  * @param vxlan_port            VxLAN port to be added or deleted
1909  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
1910  *                              OCTNET_CMD_VXLAN_PORT_DEL
1911  * @returns                     SUCCESS or FAILURE
1912  */
1913 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1914 				       u16 vxlan_port, u8 vxlan_cmd_bit)
1915 {
1916 	struct lio *lio = GET_LIO(netdev);
1917 	struct octeon_device *oct = lio->oct_dev;
1918 	struct octnic_ctrl_pkt nctrl;
1919 	int ret = 0;
1920 
1921 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1922 
1923 	nctrl.ncmd.u64 = 0;
1924 	nctrl.ncmd.s.cmd = command;
1925 	nctrl.ncmd.s.more = vxlan_cmd_bit;
1926 	nctrl.ncmd.s.param1 = vxlan_port;
1927 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1928 	nctrl.wait_time = 100;
1929 	nctrl.netpndev = (u64)netdev;
1930 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1931 
1932 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1933 	if (ret < 0) {
1934 		dev_err(&oct->pci_dev->dev,
1935 			"DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1936 			ret);
1937 	}
1938 	return ret;
1939 }
1940 
1941 /** \brief Net device fix features
1942  * @param netdev  pointer to network device
1943  * @param request features requested
1944  * @returns updated features list
1945  */
1946 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1947 					       netdev_features_t request)
1948 {
1949 	struct lio *lio = netdev_priv(netdev);
1950 
1951 	if ((request & NETIF_F_RXCSUM) &&
1952 	    !(lio->dev_capability & NETIF_F_RXCSUM))
1953 		request &= ~NETIF_F_RXCSUM;
1954 
1955 	if ((request & NETIF_F_HW_CSUM) &&
1956 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
1957 		request &= ~NETIF_F_HW_CSUM;
1958 
1959 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1960 		request &= ~NETIF_F_TSO;
1961 
1962 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1963 		request &= ~NETIF_F_TSO6;
1964 
1965 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1966 		request &= ~NETIF_F_LRO;
1967 
1968 	/* Disable LRO if RXCSUM is off */
1969 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1970 	    (lio->dev_capability & NETIF_F_LRO))
1971 		request &= ~NETIF_F_LRO;
1972 
1973 	return request;
1974 }
1975 
1976 /** \brief Net device set features
1977  * @param netdev  pointer to network device
1978  * @param features features to enable/disable
1979  */
1980 static int liquidio_set_features(struct net_device *netdev,
1981 				 netdev_features_t features)
1982 {
1983 	struct lio *lio = netdev_priv(netdev);
1984 
1985 	if (!((netdev->features ^ features) & NETIF_F_LRO))
1986 		return 0;
1987 
1988 	if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1989 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1990 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1991 	else if (!(features & NETIF_F_LRO) &&
1992 		 (lio->dev_capability & NETIF_F_LRO))
1993 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1994 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1995 	if (!(netdev->features & NETIF_F_RXCSUM) &&
1996 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1997 	    (features & NETIF_F_RXCSUM))
1998 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1999 					    OCTNET_CMD_RXCSUM_ENABLE);
2000 	else if ((netdev->features & NETIF_F_RXCSUM) &&
2001 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2002 		 !(features & NETIF_F_RXCSUM))
2003 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2004 					    OCTNET_CMD_RXCSUM_DISABLE);
2005 
2006 	return 0;
2007 }
2008 
2009 static void liquidio_add_vxlan_port(struct net_device *netdev,
2010 				    struct udp_tunnel_info *ti)
2011 {
2012 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2013 		return;
2014 
2015 	liquidio_vxlan_port_command(netdev,
2016 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2017 				    htons(ti->port),
2018 				    OCTNET_CMD_VXLAN_PORT_ADD);
2019 }
2020 
2021 static void liquidio_del_vxlan_port(struct net_device *netdev,
2022 				    struct udp_tunnel_info *ti)
2023 {
2024 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2025 		return;
2026 
2027 	liquidio_vxlan_port_command(netdev,
2028 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
2029 				    htons(ti->port),
2030 				    OCTNET_CMD_VXLAN_PORT_DEL);
2031 }
2032 
2033 static const struct net_device_ops lionetdevops = {
2034 	.ndo_open		= liquidio_open,
2035 	.ndo_stop		= liquidio_stop,
2036 	.ndo_start_xmit		= liquidio_xmit,
2037 	.ndo_get_stats		= liquidio_get_stats,
2038 	.ndo_set_mac_address	= liquidio_set_mac,
2039 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
2040 	.ndo_tx_timeout		= liquidio_tx_timeout,
2041 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
2042 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
2043 	.ndo_change_mtu		= liquidio_change_mtu,
2044 	.ndo_do_ioctl		= liquidio_ioctl,
2045 	.ndo_fix_features	= liquidio_fix_features,
2046 	.ndo_set_features	= liquidio_set_features,
2047 	.ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
2048 	.ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
2049 };
2050 
2051 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
2052 {
2053 	struct octeon_device *oct = (struct octeon_device *)buf;
2054 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
2055 	union oct_link_status *ls;
2056 	int gmxport = 0;
2057 	int i;
2058 
2059 	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
2060 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
2061 			recv_pkt->buffer_size[0],
2062 			recv_pkt->rh.r_nic_info.gmxport);
2063 		goto nic_info_err;
2064 	}
2065 
2066 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
2067 	ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
2068 		OCT_DROQ_INFO_SIZE);
2069 
2070 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
2071 
2072 	for (i = 0; i < oct->ifcount; i++) {
2073 		if (oct->props[i].gmxport == gmxport) {
2074 			update_link_status(oct->props[i].netdev, ls);
2075 			break;
2076 		}
2077 	}
2078 
2079 nic_info_err:
2080 	for (i = 0; i < recv_pkt->buffer_count; i++)
2081 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
2082 	octeon_free_recv_info(recv_info);
2083 	return 0;
2084 }
2085 
2086 /**
2087  * \brief Setup network interfaces
2088  * @param octeon_dev  octeon device
2089  *
2090  * Called during init time for each device. It assumes the NIC
2091  * is already up and running.  The link information for each
2092  * interface is passed in link_info.
2093  */
2094 static int setup_nic_devices(struct octeon_device *octeon_dev)
2095 {
2096 	int retval, num_iqueues, num_oqueues;
2097 	struct liquidio_if_cfg_context *ctx;
2098 	u32 resp_size, ctx_size, data_size;
2099 	struct liquidio_if_cfg_resp *resp;
2100 	struct octeon_soft_command *sc;
2101 	union oct_nic_if_cfg if_cfg;
2102 	struct octdev_props *props;
2103 	struct net_device *netdev;
2104 	struct lio_version *vdata;
2105 	struct lio *lio = NULL;
2106 	u8 mac[ETH_ALEN], i, j;
2107 	u32 ifidx_or_pfnum;
2108 
2109 	ifidx_or_pfnum = octeon_dev->pf_num;
2110 
2111 	/* This is to handle link status changes */
2112 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
2113 				    lio_nic_info, octeon_dev);
2114 
2115 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
2116 	 * They are handled directly.
2117 	 */
2118 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
2119 					free_netbuf);
2120 
2121 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
2122 					free_netsgbuf);
2123 
2124 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
2125 					free_netsgbuf_with_resp);
2126 
2127 	for (i = 0; i < octeon_dev->ifcount; i++) {
2128 		resp_size = sizeof(struct liquidio_if_cfg_resp);
2129 		ctx_size = sizeof(struct liquidio_if_cfg_context);
2130 		data_size = sizeof(struct lio_version);
2131 		sc = (struct octeon_soft_command *)
2132 			octeon_alloc_soft_command(octeon_dev, data_size,
2133 						  resp_size, ctx_size);
2134 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
2135 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
2136 		vdata = (struct lio_version *)sc->virtdptr;
2137 
2138 		*((u64 *)vdata) = 0;
2139 		vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
2140 		vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
2141 		vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
2142 
2143 		WRITE_ONCE(ctx->cond, 0);
2144 		ctx->octeon_id = lio_get_device_id(octeon_dev);
2145 		init_waitqueue_head(&ctx->wc);
2146 
2147 		if_cfg.u64 = 0;
2148 
2149 		if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
2150 		if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
2151 		if_cfg.s.base_queue = 0;
2152 
2153 		sc->iq_no = 0;
2154 
2155 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
2156 					    OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
2157 					    0);
2158 
2159 		sc->callback = if_cfg_callback;
2160 		sc->callback_arg = sc;
2161 		sc->wait_time = 5000;
2162 
2163 		retval = octeon_send_soft_command(octeon_dev, sc);
2164 		if (retval == IQ_SEND_FAILED) {
2165 			dev_err(&octeon_dev->pci_dev->dev,
2166 				"iq/oq config failed status: %x\n", retval);
2167 			/* Soft instr is freed by driver in case of failure. */
2168 			goto setup_nic_dev_fail;
2169 		}
2170 
2171 		/* Sleep on a wait queue till the cond flag indicates that the
2172 		 * response arrived or timed-out.
2173 		 */
2174 		if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
2175 			dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
2176 			goto setup_nic_wait_intr;
2177 		}
2178 
2179 		retval = resp->status;
2180 		if (retval) {
2181 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
2182 			goto setup_nic_dev_fail;
2183 		}
2184 
2185 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2186 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
2187 
2188 		num_iqueues = hweight64(resp->cfg_info.iqmask);
2189 		num_oqueues = hweight64(resp->cfg_info.oqmask);
2190 
2191 		if (!(num_iqueues) || !(num_oqueues)) {
2192 			dev_err(&octeon_dev->pci_dev->dev,
2193 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2194 				resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2195 			goto setup_nic_dev_fail;
2196 		}
2197 		dev_dbg(&octeon_dev->pci_dev->dev,
2198 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2199 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2200 			num_iqueues, num_oqueues);
2201 
2202 		netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2203 
2204 		if (!netdev) {
2205 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2206 			goto setup_nic_dev_fail;
2207 		}
2208 
2209 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2210 
2211 		/* Associate the routines that will handle different
2212 		 * netdev tasks.
2213 		 */
2214 		netdev->netdev_ops = &lionetdevops;
2215 
2216 		lio = GET_LIO(netdev);
2217 
2218 		memset(lio, 0, sizeof(struct lio));
2219 
2220 		lio->ifidx = ifidx_or_pfnum;
2221 
2222 		props = &octeon_dev->props[i];
2223 		props->gmxport = resp->cfg_info.linfo.gmxport;
2224 		props->netdev = netdev;
2225 
2226 		lio->linfo.num_rxpciq = num_oqueues;
2227 		lio->linfo.num_txpciq = num_iqueues;
2228 
2229 		for (j = 0; j < num_oqueues; j++) {
2230 			lio->linfo.rxpciq[j].u64 =
2231 			    resp->cfg_info.linfo.rxpciq[j].u64;
2232 		}
2233 		for (j = 0; j < num_iqueues; j++) {
2234 			lio->linfo.txpciq[j].u64 =
2235 			    resp->cfg_info.linfo.txpciq[j].u64;
2236 		}
2237 
2238 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2239 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2240 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2241 		lio->linfo.macaddr_is_admin_asgnd =
2242 			resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2243 
2244 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2245 
2246 		lio->dev_capability = NETIF_F_HIGHDMA
2247 				      | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2248 				      | NETIF_F_SG | NETIF_F_RXCSUM
2249 				      | NETIF_F_TSO | NETIF_F_TSO6
2250 				      | NETIF_F_GRO
2251 				      | NETIF_F_LRO;
2252 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2253 
2254 		/* Copy of transmit encapsulation capabilities:
2255 		 * TSO, TSO6, Checksums for this device
2256 		 */
2257 		lio->enc_dev_capability = NETIF_F_IP_CSUM
2258 					  | NETIF_F_IPV6_CSUM
2259 					  | NETIF_F_GSO_UDP_TUNNEL
2260 					  | NETIF_F_HW_CSUM | NETIF_F_SG
2261 					  | NETIF_F_RXCSUM
2262 					  | NETIF_F_TSO | NETIF_F_TSO6
2263 					  | NETIF_F_LRO;
2264 
2265 		netdev->hw_enc_features =
2266 		    (lio->enc_dev_capability & ~NETIF_F_LRO);
2267 		netdev->vlan_features = lio->dev_capability;
2268 		/* Add any unchangeable hw features */
2269 		lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2270 				       NETIF_F_HW_VLAN_CTAG_RX |
2271 				       NETIF_F_HW_VLAN_CTAG_TX;
2272 
2273 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2274 
2275 		netdev->hw_features = lio->dev_capability;
2276 
2277 		/* MTU range: 68 - 16000 */
2278 		netdev->min_mtu = LIO_MIN_MTU_SIZE;
2279 		netdev->max_mtu = LIO_MAX_MTU_SIZE;
2280 
2281 		/* Point to the  properties for octeon device to which this
2282 		 * interface belongs.
2283 		 */
2284 		lio->oct_dev = octeon_dev;
2285 		lio->octprops = props;
2286 		lio->netdev = netdev;
2287 
2288 		dev_dbg(&octeon_dev->pci_dev->dev,
2289 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
2290 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2291 
2292 		/* 64-bit swap required on LE machines */
2293 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2294 		for (j = 0; j < ETH_ALEN; j++)
2295 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2296 
2297 		/* Copy MAC Address to OS network device structure */
2298 		ether_addr_copy(netdev->dev_addr, mac);
2299 
2300 		if (liquidio_setup_io_queues(octeon_dev, i,
2301 					     lio->linfo.num_txpciq,
2302 					     lio->linfo.num_rxpciq)) {
2303 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2304 			goto setup_nic_dev_fail;
2305 		}
2306 
2307 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2308 
2309 		/* For VFs, enable Octeon device interrupts here,
2310 		 * as this is contingent upon IO queue setup
2311 		 */
2312 		octeon_dev->fn_list.enable_interrupt(octeon_dev,
2313 						     OCTEON_ALL_INTR);
2314 
2315 		/* By default all interfaces on a single Octeon uses the same
2316 		 * tx and rx queues
2317 		 */
2318 		lio->txq = lio->linfo.txpciq[0].s.q_no;
2319 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2320 
2321 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2322 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2323 
2324 		if (setup_glists(lio, num_iqueues)) {
2325 			dev_err(&octeon_dev->pci_dev->dev,
2326 				"Gather list allocation failed\n");
2327 			goto setup_nic_dev_fail;
2328 		}
2329 
2330 		/* Register ethtool support */
2331 		liquidio_set_ethtool_ops(netdev);
2332 		if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2333 			octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2334 		else
2335 			octeon_dev->priv_flags = 0x0;
2336 
2337 		if (netdev->features & NETIF_F_LRO)
2338 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2339 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2340 
2341 		if (setup_link_status_change_wq(netdev))
2342 			goto setup_nic_dev_fail;
2343 
2344 		if (setup_rx_oom_poll_fn(netdev))
2345 			goto setup_nic_dev_fail;
2346 
2347 		/* Register the network device with the OS */
2348 		if (register_netdev(netdev)) {
2349 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2350 			goto setup_nic_dev_fail;
2351 		}
2352 
2353 		dev_dbg(&octeon_dev->pci_dev->dev,
2354 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2355 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2356 		netif_carrier_off(netdev);
2357 		lio->link_changes++;
2358 
2359 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2360 
2361 		/* Sending command to firmware to enable Rx checksum offload
2362 		 * by default at the time of setup of Liquidio driver for
2363 		 * this device
2364 		 */
2365 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2366 					    OCTNET_CMD_RXCSUM_ENABLE);
2367 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2368 				     OCTNET_CMD_TXCSUM_ENABLE);
2369 
2370 		dev_dbg(&octeon_dev->pci_dev->dev,
2371 			"NIC ifidx:%d Setup successful\n", i);
2372 
2373 		octeon_free_soft_command(octeon_dev, sc);
2374 	}
2375 
2376 	return 0;
2377 
2378 setup_nic_dev_fail:
2379 
2380 	octeon_free_soft_command(octeon_dev, sc);
2381 
2382 setup_nic_wait_intr:
2383 
2384 	while (i--) {
2385 		dev_err(&octeon_dev->pci_dev->dev,
2386 			"NIC ifidx:%d Setup failed\n", i);
2387 		liquidio_destroy_nic_device(octeon_dev, i);
2388 	}
2389 	return -ENODEV;
2390 }
2391 
2392 /**
2393  * \brief initialize the NIC
2394  * @param oct octeon device
2395  *
2396  * This initialization routine is called once the Octeon device application is
2397  * up and running
2398  */
2399 static int liquidio_init_nic_module(struct octeon_device *oct)
2400 {
2401 	int num_nic_ports = 1;
2402 	int i, retval = 0;
2403 
2404 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2405 
2406 	/* only default iq and oq were initialized
2407 	 * initialize the rest as well run port_config command for each port
2408 	 */
2409 	oct->ifcount = num_nic_ports;
2410 	memset(oct->props, 0,
2411 	       sizeof(struct octdev_props) * num_nic_ports);
2412 
2413 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
2414 		oct->props[i].gmxport = -1;
2415 
2416 	retval = setup_nic_devices(oct);
2417 	if (retval) {
2418 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2419 		goto octnet_init_failure;
2420 	}
2421 
2422 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2423 
2424 	return retval;
2425 
2426 octnet_init_failure:
2427 
2428 	oct->ifcount = 0;
2429 
2430 	return retval;
2431 }
2432 
2433 /**
2434  * \brief Device initialization for each Octeon device that is probed
2435  * @param octeon_dev  octeon device
2436  */
2437 static int octeon_device_init(struct octeon_device *oct)
2438 {
2439 	u32 rev_id;
2440 	int j;
2441 
2442 	atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2443 
2444 	/* Enable access to the octeon device and make its DMA capability
2445 	 * known to the OS.
2446 	 */
2447 	if (octeon_pci_os_setup(oct))
2448 		return 1;
2449 	atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2450 
2451 	oct->chip_id = OCTEON_CN23XX_VF_VID;
2452 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2453 	oct->rev_id = rev_id & 0xff;
2454 
2455 	if (cn23xx_setup_octeon_vf_device(oct))
2456 		return 1;
2457 
2458 	atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2459 
2460 	oct->app_mode = CVM_DRV_NIC_APP;
2461 
2462 	/* Initialize the dispatch mechanism used to push packets arriving on
2463 	 * Octeon Output queues.
2464 	 */
2465 	if (octeon_init_dispatch_list(oct))
2466 		return 1;
2467 
2468 	atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2469 
2470 	if (octeon_set_io_queues_off(oct)) {
2471 		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2472 		return 1;
2473 	}
2474 
2475 	if (oct->fn_list.setup_device_regs(oct)) {
2476 		dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2477 		return 1;
2478 	}
2479 
2480 	/* Initialize soft command buffer pool */
2481 	if (octeon_setup_sc_buffer_pool(oct)) {
2482 		dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2483 		return 1;
2484 	}
2485 	atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2486 
2487 	/* Setup the data structures that manage this Octeon's Input queues. */
2488 	if (octeon_setup_instr_queues(oct)) {
2489 		dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2490 		return 1;
2491 	}
2492 	atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2493 
2494 	/* Initialize lists to manage the requests of different types that
2495 	 * arrive from user & kernel applications for this octeon device.
2496 	 */
2497 	if (octeon_setup_response_list(oct)) {
2498 		dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2499 		return 1;
2500 	}
2501 	atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2502 
2503 	if (octeon_setup_output_queues(oct)) {
2504 		dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2505 		return 1;
2506 	}
2507 	atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2508 
2509 	if (oct->fn_list.setup_mbox(oct)) {
2510 		dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2511 		return 1;
2512 	}
2513 	atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2514 
2515 	if (octeon_allocate_ioq_vector(oct)) {
2516 		dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2517 		return 1;
2518 	}
2519 	atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2520 
2521 	dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
2522 		 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
2523 
2524 	/* Setup the interrupt handler and record the INT SUM register address*/
2525 	if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2526 		return 1;
2527 
2528 	atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2529 
2530 	/* ***************************************************************
2531 	 * The interrupts need to be enabled for the PF<-->VF handshake.
2532 	 * They are [re]-enabled after the PF<-->VF handshake so that the
2533 	 * correct OQ tick value is used (i.e. the value retrieved from
2534 	 * the PF as part of the handshake).
2535 	 */
2536 
2537 	/* Enable Octeon device interrupts */
2538 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2539 
2540 	if (cn23xx_octeon_pfvf_handshake(oct))
2541 		return 1;
2542 
2543 	/* Here we [re]-enable the interrupts so that the correct OQ tick value
2544 	 * is used (i.e. the value that was retrieved during the handshake)
2545 	 */
2546 
2547 	/* Enable Octeon device interrupts */
2548 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2549 	/* *************************************************************** */
2550 
2551 	/* Enable the input and output queues for this Octeon device */
2552 	if (oct->fn_list.enable_io_queues(oct)) {
2553 		dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2554 		return 1;
2555 	}
2556 
2557 	atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2558 
2559 	atomic_set(&oct->status, OCT_DEV_HOST_OK);
2560 
2561 	/* Send Credit for Octeon Output queues. Credits are always sent after
2562 	 * the output queue is enabled.
2563 	 */
2564 	for (j = 0; j < oct->num_oqs; j++)
2565 		writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2566 
2567 	/* Packets can start arriving on the output queues from this point. */
2568 
2569 	atomic_set(&oct->status, OCT_DEV_CORE_OK);
2570 
2571 	atomic_set(&oct->status, OCT_DEV_RUNNING);
2572 
2573 	if (liquidio_init_nic_module(oct))
2574 		return 1;
2575 
2576 	return 0;
2577 }
2578 
2579 static int __init liquidio_vf_init(void)
2580 {
2581 	octeon_init_device_list(0);
2582 	return pci_register_driver(&liquidio_vf_pci_driver);
2583 }
2584 
2585 static void __exit liquidio_vf_exit(void)
2586 {
2587 	pci_unregister_driver(&liquidio_vf_pci_driver);
2588 
2589 	pr_info("LiquidIO_VF network module is now unloaded\n");
2590 }
2591 
2592 module_init(liquidio_vf_init);
2593 module_exit(liquidio_vf_exit);
2594