1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 *          Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/pci.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/if_vlan.h>
26 #include <linux/firmware.h>
27 #include <linux/ptp_clock_kernel.h>
28 #include <net/vxlan.h>
29 #include "liquidio_common.h"
30 #include "octeon_droq.h"
31 #include "octeon_iq.h"
32 #include "response_manager.h"
33 #include "octeon_device.h"
34 #include "octeon_nic.h"
35 #include "octeon_main.h"
36 #include "octeon_network.h"
37 #include "cn66xx_regs.h"
38 #include "cn66xx_device.h"
39 #include "cn68xx_device.h"
40 #include "liquidio_image.h"
41 
42 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
43 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(LIQUIDIO_VERSION);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
47 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
49 
50 static int ddr_timeout = 10000;
51 module_param(ddr_timeout, int, 0644);
52 MODULE_PARM_DESC(ddr_timeout,
53 		 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
54 
55 static u32 console_bitmask;
56 module_param(console_bitmask, int, 0644);
57 MODULE_PARM_DESC(console_bitmask,
58 		 "Bitmask indicating which consoles have debug output redirected to syslog.");
59 
60 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
61 
62 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count)  \
63 	(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
64 
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
68 
69 static char fw_type[LIO_MAX_FW_TYPE_LEN];
70 module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
71 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
72 
73 static int conf_type;
74 module_param(conf_type, int, 0);
75 MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
76 
77 static int ptp_enable = 1;
78 
79 /* Bit mask values for lio->ifstate */
80 #define   LIO_IFSTATE_DROQ_OPS             0x01
81 #define   LIO_IFSTATE_REGISTERED           0x02
82 #define   LIO_IFSTATE_RUNNING              0x04
83 #define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
84 
85 /* Polling interval for determining when NIC application is alive */
86 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
87 
88 /* runtime link query interval */
89 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
90 
91 struct liquidio_if_cfg_context {
92 	int octeon_id;
93 
94 	wait_queue_head_t wc;
95 
96 	int cond;
97 };
98 
99 struct liquidio_if_cfg_resp {
100 	u64 rh;
101 	struct liquidio_if_cfg_info cfg_info;
102 	u64 status;
103 };
104 
105 struct oct_link_status_resp {
106 	u64 rh;
107 	struct oct_link_info link_info;
108 	u64 status;
109 };
110 
111 struct oct_timestamp_resp {
112 	u64 rh;
113 	u64 timestamp;
114 	u64 status;
115 };
116 
117 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
118 
119 union tx_info {
120 	u64 u64;
121 	struct {
122 #ifdef __BIG_ENDIAN_BITFIELD
123 		u16 gso_size;
124 		u16 gso_segs;
125 		u32 reserved;
126 #else
127 		u32 reserved;
128 		u16 gso_segs;
129 		u16 gso_size;
130 #endif
131 	} s;
132 };
133 
134 /** Octeon device properties to be used by the NIC module.
135  * Each octeon device in the system will be represented
136  * by this structure in the NIC module.
137  */
138 
139 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
140 
141 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
142 #define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
143 
144 /** Structure of a node in list of gather components maintained by
145  * NIC driver for each network device.
146  */
147 struct octnic_gather {
148 	/** List manipulation. Next and prev pointers. */
149 	struct list_head list;
150 
151 	/** Size of the gather component at sg in bytes. */
152 	int sg_size;
153 
154 	/** Number of bytes that sg was adjusted to make it 8B-aligned. */
155 	int adjust;
156 
157 	/** Gather component that can accommodate max sized fragment list
158 	 *  received from the IP layer.
159 	 */
160 	struct octeon_sg_entry *sg;
161 
162 	u64 sg_dma_ptr;
163 };
164 
165 /** This structure is used by NIC driver to store information required
166  * to free the sk_buff when the packet has been fetched by Octeon.
167  * Bytes offset below assume worst-case of a 64-bit system.
168  */
169 struct octnet_buf_free_info {
170 	/** Bytes 1-8.  Pointer to network device private structure. */
171 	struct lio *lio;
172 
173 	/** Bytes 9-16.  Pointer to sk_buff. */
174 	struct sk_buff *skb;
175 
176 	/** Bytes 17-24.  Pointer to gather list. */
177 	struct octnic_gather *g;
178 
179 	/** Bytes 25-32. Physical address of skb->data or gather list. */
180 	u64 dptr;
181 
182 	/** Bytes 33-47. Piggybacked soft command, if any */
183 	struct octeon_soft_command *sc;
184 };
185 
186 struct handshake {
187 	struct completion init;
188 	struct completion started;
189 	struct pci_dev *pci_dev;
190 	int init_ok;
191 	int started_ok;
192 };
193 
194 struct octeon_device_priv {
195 	/** Tasklet structures for this device. */
196 	struct tasklet_struct droq_tasklet;
197 	unsigned long napi_mask;
198 };
199 
200 static int octeon_device_init(struct octeon_device *);
201 static void liquidio_remove(struct pci_dev *pdev);
202 static int liquidio_probe(struct pci_dev *pdev,
203 			  const struct pci_device_id *ent);
204 
205 static struct handshake handshake[MAX_OCTEON_DEVICES];
206 static struct completion first_stage;
207 
208 static void octeon_droq_bh(unsigned long pdev)
209 {
210 	int q_no;
211 	int reschedule = 0;
212 	struct octeon_device *oct = (struct octeon_device *)pdev;
213 	struct octeon_device_priv *oct_priv =
214 		(struct octeon_device_priv *)oct->priv;
215 
216 	/* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
217 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
218 		if (!(oct->io_qmask.oq & (1ULL << q_no)))
219 			continue;
220 		reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
221 							  MAX_PACKET_BUDGET);
222 	}
223 
224 	if (reschedule)
225 		tasklet_schedule(&oct_priv->droq_tasklet);
226 }
227 
228 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
229 {
230 	struct octeon_device_priv *oct_priv =
231 		(struct octeon_device_priv *)oct->priv;
232 	int retry = 100, pkt_cnt = 0, pending_pkts = 0;
233 	int i;
234 
235 	do {
236 		pending_pkts = 0;
237 
238 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
239 			if (!(oct->io_qmask.oq & (1ULL << i)))
240 				continue;
241 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
242 		}
243 		if (pkt_cnt > 0) {
244 			pending_pkts += pkt_cnt;
245 			tasklet_schedule(&oct_priv->droq_tasklet);
246 		}
247 		pkt_cnt = 0;
248 		schedule_timeout_uninterruptible(1);
249 
250 	} while (retry-- && pending_pkts);
251 
252 	return pkt_cnt;
253 }
254 
255 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
256 					unsigned int bytes_compl)
257 {
258 	struct netdev_queue *netdev_queue = txq;
259 
260 	netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
261 }
262 
263 void octeon_update_tx_completion_counters(void *buf, int reqtype,
264 					  unsigned int *pkts_compl,
265 					  unsigned int *bytes_compl)
266 {
267 	struct octnet_buf_free_info *finfo;
268 	struct sk_buff *skb = NULL;
269 	struct octeon_soft_command *sc;
270 
271 	switch (reqtype) {
272 	case REQTYPE_NORESP_NET:
273 	case REQTYPE_NORESP_NET_SG:
274 		finfo = buf;
275 		skb = finfo->skb;
276 		break;
277 
278 	case REQTYPE_RESP_NET_SG:
279 	case REQTYPE_RESP_NET:
280 		sc = buf;
281 		skb = sc->callback_arg;
282 		break;
283 
284 	default:
285 		return;
286 	}
287 
288 	(*pkts_compl)++;
289 	*bytes_compl += skb->len;
290 }
291 
292 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
293 {
294 	struct octnet_buf_free_info *finfo;
295 	struct sk_buff *skb;
296 	struct octeon_soft_command *sc;
297 	struct netdev_queue *txq;
298 
299 	switch (reqtype) {
300 	case REQTYPE_NORESP_NET:
301 	case REQTYPE_NORESP_NET_SG:
302 		finfo = buf;
303 		skb = finfo->skb;
304 		break;
305 
306 	case REQTYPE_RESP_NET_SG:
307 	case REQTYPE_RESP_NET:
308 		sc = buf;
309 		skb = sc->callback_arg;
310 		break;
311 
312 	default:
313 		return;
314 	}
315 
316 	txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
317 	netdev_tx_sent_queue(txq, skb->len);
318 }
319 
320 int octeon_console_debug_enabled(u32 console)
321 {
322 	return (console_bitmask >> (console)) & 0x1;
323 }
324 
325 /**
326  * \brief Forces all IO queues off on a given device
327  * @param oct Pointer to Octeon device
328  */
329 static void force_io_queues_off(struct octeon_device *oct)
330 {
331 	if ((oct->chip_id == OCTEON_CN66XX) ||
332 	    (oct->chip_id == OCTEON_CN68XX)) {
333 		/* Reset the Enable bits for Input Queues. */
334 		octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
335 
336 		/* Reset the Enable bits for Output Queues. */
337 		octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
338 	}
339 }
340 
341 /**
342  * \brief wait for all pending requests to complete
343  * @param oct Pointer to Octeon device
344  *
345  * Called during shutdown sequence
346  */
347 static int wait_for_pending_requests(struct octeon_device *oct)
348 {
349 	int i, pcount = 0;
350 
351 	for (i = 0; i < 100; i++) {
352 		pcount =
353 			atomic_read(&oct->response_list
354 				[OCTEON_ORDERED_SC_LIST].pending_req_count);
355 		if (pcount)
356 			schedule_timeout_uninterruptible(HZ / 10);
357 		else
358 			break;
359 	}
360 
361 	if (pcount)
362 		return 1;
363 
364 	return 0;
365 }
366 
367 /**
368  * \brief Cause device to go quiet so it can be safely removed/reset/etc
369  * @param oct Pointer to Octeon device
370  */
371 static inline void pcierror_quiesce_device(struct octeon_device *oct)
372 {
373 	int i;
374 
375 	/* Disable the input and output queues now. No more packets will
376 	 * arrive from Octeon, but we should wait for all packet processing
377 	 * to finish.
378 	 */
379 	force_io_queues_off(oct);
380 
381 	/* To allow for in-flight requests */
382 	schedule_timeout_uninterruptible(100);
383 
384 	if (wait_for_pending_requests(oct))
385 		dev_err(&oct->pci_dev->dev, "There were pending requests\n");
386 
387 	/* Force all requests waiting to be fetched by OCTEON to complete. */
388 	for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
389 		struct octeon_instr_queue *iq;
390 
391 		if (!(oct->io_qmask.iq & (1ULL << i)))
392 			continue;
393 		iq = oct->instr_queue[i];
394 
395 		if (atomic_read(&iq->instr_pending)) {
396 			spin_lock_bh(&iq->lock);
397 			iq->fill_cnt = 0;
398 			iq->octeon_read_index = iq->host_write_index;
399 			iq->stats.instr_processed +=
400 				atomic_read(&iq->instr_pending);
401 			lio_process_iq_request_list(oct, iq, 0);
402 			spin_unlock_bh(&iq->lock);
403 		}
404 	}
405 
406 	/* Force all pending ordered list requests to time out. */
407 	lio_process_ordered_list(oct, 1);
408 
409 	/* We do not need to wait for output queue packets to be processed. */
410 }
411 
412 /**
413  * \brief Cleanup PCI AER uncorrectable error status
414  * @param dev Pointer to PCI device
415  */
416 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
417 {
418 	int pos = 0x100;
419 	u32 status, mask;
420 
421 	pr_info("%s :\n", __func__);
422 
423 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
424 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
425 	if (dev->error_state == pci_channel_io_normal)
426 		status &= ~mask;        /* Clear corresponding nonfatal bits */
427 	else
428 		status &= mask;         /* Clear corresponding fatal bits */
429 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
430 }
431 
432 /**
433  * \brief Stop all PCI IO to a given device
434  * @param dev Pointer to Octeon device
435  */
436 static void stop_pci_io(struct octeon_device *oct)
437 {
438 	/* No more instructions will be forwarded. */
439 	atomic_set(&oct->status, OCT_DEV_IN_RESET);
440 
441 	pci_disable_device(oct->pci_dev);
442 
443 	/* Disable interrupts  */
444 	oct->fn_list.disable_interrupt(oct->chip);
445 
446 	pcierror_quiesce_device(oct);
447 
448 	/* Release the interrupt line */
449 	free_irq(oct->pci_dev->irq, oct);
450 
451 	if (oct->flags & LIO_FLAG_MSI_ENABLED)
452 		pci_disable_msi(oct->pci_dev);
453 
454 	dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
455 		lio_get_state_string(&oct->status));
456 
457 	/* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
458 	/* making it a common function for all OCTEON models */
459 	cleanup_aer_uncorrect_error_status(oct->pci_dev);
460 }
461 
462 /**
463  * \brief called when PCI error is detected
464  * @param pdev Pointer to PCI device
465  * @param state The current pci connection state
466  *
467  * This function is called after a PCI bus error affecting
468  * this device has been detected.
469  */
470 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
471 						     pci_channel_state_t state)
472 {
473 	struct octeon_device *oct = pci_get_drvdata(pdev);
474 
475 	/* Non-correctable Non-fatal errors */
476 	if (state == pci_channel_io_normal) {
477 		dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
478 		cleanup_aer_uncorrect_error_status(oct->pci_dev);
479 		return PCI_ERS_RESULT_CAN_RECOVER;
480 	}
481 
482 	/* Non-correctable Fatal errors */
483 	dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
484 	stop_pci_io(oct);
485 
486 	/* Always return a DISCONNECT. There is no support for recovery but only
487 	 * for a clean shutdown.
488 	 */
489 	return PCI_ERS_RESULT_DISCONNECT;
490 }
491 
492 /**
493  * \brief mmio handler
494  * @param pdev Pointer to PCI device
495  */
496 static pci_ers_result_t liquidio_pcie_mmio_enabled(
497 				struct pci_dev *pdev __attribute__((unused)))
498 {
499 	/* We should never hit this since we never ask for a reset for a Fatal
500 	 * Error. We always return DISCONNECT in io_error above.
501 	 * But play safe and return RECOVERED for now.
502 	 */
503 	return PCI_ERS_RESULT_RECOVERED;
504 }
505 
506 /**
507  * \brief called after the pci bus has been reset.
508  * @param pdev Pointer to PCI device
509  *
510  * Restart the card from scratch, as if from a cold-boot. Implementation
511  * resembles the first-half of the octeon_resume routine.
512  */
513 static pci_ers_result_t liquidio_pcie_slot_reset(
514 				struct pci_dev *pdev __attribute__((unused)))
515 {
516 	/* We should never hit this since we never ask for a reset for a Fatal
517 	 * Error. We always return DISCONNECT in io_error above.
518 	 * But play safe and return RECOVERED for now.
519 	 */
520 	return PCI_ERS_RESULT_RECOVERED;
521 }
522 
523 /**
524  * \brief called when traffic can start flowing again.
525  * @param pdev Pointer to PCI device
526  *
527  * This callback is called when the error recovery driver tells us that
528  * its OK to resume normal operation. Implementation resembles the
529  * second-half of the octeon_resume routine.
530  */
531 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
532 {
533 	/* Nothing to be done here. */
534 }
535 
536 #ifdef CONFIG_PM
537 /**
538  * \brief called when suspending
539  * @param pdev Pointer to PCI device
540  * @param state state to suspend to
541  */
542 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
543 			    pm_message_t state __attribute__((unused)))
544 {
545 	return 0;
546 }
547 
548 /**
549  * \brief called when resuming
550  * @param pdev Pointer to PCI device
551  */
552 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
553 {
554 	return 0;
555 }
556 #endif
557 
558 /* For PCI-E Advanced Error Recovery (AER) Interface */
559 static const struct pci_error_handlers liquidio_err_handler = {
560 	.error_detected = liquidio_pcie_error_detected,
561 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
562 	.slot_reset	= liquidio_pcie_slot_reset,
563 	.resume		= liquidio_pcie_resume,
564 };
565 
566 static const struct pci_device_id liquidio_pci_tbl[] = {
567 	{       /* 68xx */
568 		PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
569 	},
570 	{       /* 66xx */
571 		PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
572 	},
573 	{
574 		0, 0, 0, 0, 0, 0, 0
575 	}
576 };
577 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
578 
579 static struct pci_driver liquidio_pci_driver = {
580 	.name		= "LiquidIO",
581 	.id_table	= liquidio_pci_tbl,
582 	.probe		= liquidio_probe,
583 	.remove		= liquidio_remove,
584 	.err_handler	= &liquidio_err_handler,    /* For AER */
585 
586 #ifdef CONFIG_PM
587 	.suspend	= liquidio_suspend,
588 	.resume		= liquidio_resume,
589 #endif
590 
591 };
592 
593 /**
594  * \brief register PCI driver
595  */
596 static int liquidio_init_pci(void)
597 {
598 	return pci_register_driver(&liquidio_pci_driver);
599 }
600 
601 /**
602  * \brief unregister PCI driver
603  */
604 static void liquidio_deinit_pci(void)
605 {
606 	pci_unregister_driver(&liquidio_pci_driver);
607 }
608 
609 /**
610  * \brief check interface state
611  * @param lio per-network private data
612  * @param state_flag flag state to check
613  */
614 static inline int ifstate_check(struct lio *lio, int state_flag)
615 {
616 	return atomic_read(&lio->ifstate) & state_flag;
617 }
618 
619 /**
620  * \brief set interface state
621  * @param lio per-network private data
622  * @param state_flag flag state to set
623  */
624 static inline void ifstate_set(struct lio *lio, int state_flag)
625 {
626 	atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
627 }
628 
629 /**
630  * \brief clear interface state
631  * @param lio per-network private data
632  * @param state_flag flag state to clear
633  */
634 static inline void ifstate_reset(struct lio *lio, int state_flag)
635 {
636 	atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
637 }
638 
639 /**
640  * \brief Stop Tx queues
641  * @param netdev network device
642  */
643 static inline void txqs_stop(struct net_device *netdev)
644 {
645 	if (netif_is_multiqueue(netdev)) {
646 		int i;
647 
648 		for (i = 0; i < netdev->num_tx_queues; i++)
649 			netif_stop_subqueue(netdev, i);
650 	} else {
651 		netif_stop_queue(netdev);
652 	}
653 }
654 
655 /**
656  * \brief Start Tx queues
657  * @param netdev network device
658  */
659 static inline void txqs_start(struct net_device *netdev)
660 {
661 	if (netif_is_multiqueue(netdev)) {
662 		int i;
663 
664 		for (i = 0; i < netdev->num_tx_queues; i++)
665 			netif_start_subqueue(netdev, i);
666 	} else {
667 		netif_start_queue(netdev);
668 	}
669 }
670 
671 /**
672  * \brief Wake Tx queues
673  * @param netdev network device
674  */
675 static inline void txqs_wake(struct net_device *netdev)
676 {
677 	struct lio *lio = GET_LIO(netdev);
678 
679 	if (netif_is_multiqueue(netdev)) {
680 		int i;
681 
682 		for (i = 0; i < netdev->num_tx_queues; i++) {
683 			int qno = lio->linfo.txpciq[i %
684 				(lio->linfo.num_txpciq)].s.q_no;
685 
686 			if (__netif_subqueue_stopped(netdev, i)) {
687 				INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
688 							  tx_restart, 1);
689 				netif_wake_subqueue(netdev, i);
690 			}
691 		}
692 	} else {
693 		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
694 					  tx_restart, 1);
695 		netif_wake_queue(netdev);
696 	}
697 }
698 
699 /**
700  * \brief Stop Tx queue
701  * @param netdev network device
702  */
703 static void stop_txq(struct net_device *netdev)
704 {
705 	txqs_stop(netdev);
706 }
707 
708 /**
709  * \brief Start Tx queue
710  * @param netdev network device
711  */
712 static void start_txq(struct net_device *netdev)
713 {
714 	struct lio *lio = GET_LIO(netdev);
715 
716 	if (lio->linfo.link.s.link_up) {
717 		txqs_start(netdev);
718 		return;
719 	}
720 }
721 
722 /**
723  * \brief Wake a queue
724  * @param netdev network device
725  * @param q which queue to wake
726  */
727 static inline void wake_q(struct net_device *netdev, int q)
728 {
729 	if (netif_is_multiqueue(netdev))
730 		netif_wake_subqueue(netdev, q);
731 	else
732 		netif_wake_queue(netdev);
733 }
734 
735 /**
736  * \brief Stop a queue
737  * @param netdev network device
738  * @param q which queue to stop
739  */
740 static inline void stop_q(struct net_device *netdev, int q)
741 {
742 	if (netif_is_multiqueue(netdev))
743 		netif_stop_subqueue(netdev, q);
744 	else
745 		netif_stop_queue(netdev);
746 }
747 
748 /**
749  * \brief Check Tx queue status, and take appropriate action
750  * @param lio per-network private data
751  * @returns 0 if full, number of queues woken up otherwise
752  */
753 static inline int check_txq_status(struct lio *lio)
754 {
755 	int ret_val = 0;
756 
757 	if (netif_is_multiqueue(lio->netdev)) {
758 		int numqs = lio->netdev->num_tx_queues;
759 		int q, iq = 0;
760 
761 		/* check each sub-queue state */
762 		for (q = 0; q < numqs; q++) {
763 			iq = lio->linfo.txpciq[q %
764 				(lio->linfo.num_txpciq)].s.q_no;
765 			if (octnet_iq_is_full(lio->oct_dev, iq))
766 				continue;
767 			if (__netif_subqueue_stopped(lio->netdev, q)) {
768 				wake_q(lio->netdev, q);
769 				INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
770 							  tx_restart, 1);
771 				ret_val++;
772 			}
773 		}
774 	} else {
775 		if (octnet_iq_is_full(lio->oct_dev, lio->txq))
776 			return 0;
777 		wake_q(lio->netdev, lio->txq);
778 		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
779 					  tx_restart, 1);
780 		ret_val = 1;
781 	}
782 	return ret_val;
783 }
784 
785 /**
786  * Remove the node at the head of the list. The list would be empty at
787  * the end of this call if there are no more nodes in the list.
788  */
789 static inline struct list_head *list_delete_head(struct list_head *root)
790 {
791 	struct list_head *node;
792 
793 	if ((root->prev == root) && (root->next == root))
794 		node = NULL;
795 	else
796 		node = root->next;
797 
798 	if (node)
799 		list_del(node);
800 
801 	return node;
802 }
803 
804 /**
805  * \brief Delete gather lists
806  * @param lio per-network private data
807  */
808 static void delete_glists(struct lio *lio)
809 {
810 	struct octnic_gather *g;
811 	int i;
812 
813 	if (!lio->glist)
814 		return;
815 
816 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
817 		do {
818 			g = (struct octnic_gather *)
819 				list_delete_head(&lio->glist[i]);
820 			if (g) {
821 				if (g->sg) {
822 					dma_unmap_single(&lio->oct_dev->
823 							 pci_dev->dev,
824 							 g->sg_dma_ptr,
825 							 g->sg_size,
826 							 DMA_TO_DEVICE);
827 					kfree((void *)((unsigned long)g->sg -
828 						       g->adjust));
829 				}
830 				kfree(g);
831 			}
832 		} while (g);
833 	}
834 
835 	kfree((void *)lio->glist);
836 }
837 
838 /**
839  * \brief Setup gather lists
840  * @param lio per-network private data
841  */
842 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
843 {
844 	int i, j;
845 	struct octnic_gather *g;
846 
847 	lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
848 				  GFP_KERNEL);
849 	if (!lio->glist_lock)
850 		return 1;
851 
852 	lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
853 			     GFP_KERNEL);
854 	if (!lio->glist) {
855 		kfree((void *)lio->glist_lock);
856 		return 1;
857 	}
858 
859 	for (i = 0; i < num_iqs; i++) {
860 		int numa_node = cpu_to_node(i % num_online_cpus());
861 
862 		spin_lock_init(&lio->glist_lock[i]);
863 
864 		INIT_LIST_HEAD(&lio->glist[i]);
865 
866 		for (j = 0; j < lio->tx_qsize; j++) {
867 			g = kzalloc_node(sizeof(*g), GFP_KERNEL,
868 					 numa_node);
869 			if (!g)
870 				g = kzalloc(sizeof(*g), GFP_KERNEL);
871 			if (!g)
872 				break;
873 
874 			g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
875 				      OCT_SG_ENTRY_SIZE);
876 
877 			g->sg = kmalloc_node(g->sg_size + 8,
878 					     GFP_KERNEL, numa_node);
879 			if (!g->sg)
880 				g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
881 			if (!g->sg) {
882 				kfree(g);
883 				break;
884 			}
885 
886 			/* The gather component should be aligned on 64-bit
887 			 * boundary
888 			 */
889 			if (((unsigned long)g->sg) & 7) {
890 				g->adjust = 8 - (((unsigned long)g->sg) & 7);
891 				g->sg = (struct octeon_sg_entry *)
892 					((unsigned long)g->sg + g->adjust);
893 			}
894 			g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
895 						       g->sg, g->sg_size,
896 						       DMA_TO_DEVICE);
897 			if (dma_mapping_error(&oct->pci_dev->dev,
898 					      g->sg_dma_ptr)) {
899 				kfree((void *)((unsigned long)g->sg -
900 					       g->adjust));
901 				kfree(g);
902 				break;
903 			}
904 
905 			list_add_tail(&g->list, &lio->glist[i]);
906 		}
907 
908 		if (j != lio->tx_qsize) {
909 			delete_glists(lio);
910 			return 1;
911 		}
912 	}
913 
914 	return 0;
915 }
916 
917 /**
918  * \brief Print link information
919  * @param netdev network device
920  */
921 static void print_link_info(struct net_device *netdev)
922 {
923 	struct lio *lio = GET_LIO(netdev);
924 
925 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
926 		struct oct_link_info *linfo = &lio->linfo;
927 
928 		if (linfo->link.s.link_up) {
929 			netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
930 				   linfo->link.s.speed,
931 				   (linfo->link.s.duplex) ? "Full" : "Half");
932 		} else {
933 			netif_info(lio, link, lio->netdev, "Link Down\n");
934 		}
935 	}
936 }
937 
938 /**
939  * \brief Update link status
940  * @param netdev network device
941  * @param ls link status structure
942  *
943  * Called on receipt of a link status response from the core application to
944  * update each interface's link status.
945  */
946 static inline void update_link_status(struct net_device *netdev,
947 				      union oct_link_status *ls)
948 {
949 	struct lio *lio = GET_LIO(netdev);
950 	int changed = (lio->linfo.link.u64 != ls->u64);
951 
952 	lio->linfo.link.u64 = ls->u64;
953 
954 	if ((lio->intf_open) && (changed)) {
955 		print_link_info(netdev);
956 		lio->link_changes++;
957 
958 		if (lio->linfo.link.s.link_up) {
959 			netif_carrier_on(netdev);
960 			/* start_txq(netdev); */
961 			txqs_wake(netdev);
962 		} else {
963 			netif_carrier_off(netdev);
964 			stop_txq(netdev);
965 		}
966 	}
967 }
968 
969 /* Runs in interrupt context. */
970 static void update_txq_status(struct octeon_device *oct, int iq_num)
971 {
972 	struct net_device *netdev;
973 	struct lio *lio;
974 	struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
975 
976 	/*octeon_update_iq_read_idx(oct, iq);*/
977 
978 	netdev = oct->props[iq->ifidx].netdev;
979 
980 	/* This is needed because the first IQ does not have
981 	 * a netdev associated with it.
982 	 */
983 	if (!netdev)
984 		return;
985 
986 	lio = GET_LIO(netdev);
987 	if (netif_is_multiqueue(netdev)) {
988 		if (__netif_subqueue_stopped(netdev, iq->q_index) &&
989 		    lio->linfo.link.s.link_up &&
990 		    (!octnet_iq_is_full(oct, iq_num))) {
991 			INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
992 						  tx_restart, 1);
993 			netif_wake_subqueue(netdev, iq->q_index);
994 		} else {
995 			if (!octnet_iq_is_full(oct, lio->txq)) {
996 				INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
997 							  lio->txq,
998 							  tx_restart, 1);
999 				wake_q(netdev, lio->txq);
1000 			}
1001 		}
1002 	}
1003 }
1004 
1005 /**
1006  * \brief Droq packet processor sceduler
1007  * @param oct octeon device
1008  */
1009 static
1010 void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
1011 {
1012 	struct octeon_device_priv *oct_priv =
1013 		(struct octeon_device_priv *)oct->priv;
1014 	u64 oq_no;
1015 	struct octeon_droq *droq;
1016 
1017 	if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
1018 		for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
1019 		     oq_no++) {
1020 			if (!(oct->droq_intr & (1ULL << oq_no)))
1021 				continue;
1022 
1023 			droq = oct->droq[oq_no];
1024 
1025 			if (droq->ops.poll_mode) {
1026 				droq->ops.napi_fn(droq);
1027 				oct_priv->napi_mask |= (1 << oq_no);
1028 			} else {
1029 				tasklet_schedule(&oct_priv->droq_tasklet);
1030 			}
1031 		}
1032 	}
1033 }
1034 
1035 /**
1036  * \brief Interrupt handler for octeon
1037  * @param irq unused
1038  * @param dev octeon device
1039  */
1040 static
1041 irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
1042 {
1043 	struct octeon_device *oct = (struct octeon_device *)dev;
1044 	irqreturn_t ret;
1045 
1046 	/* Disable our interrupts for the duration of ISR */
1047 	oct->fn_list.disable_interrupt(oct->chip);
1048 
1049 	ret = oct->fn_list.process_interrupt_regs(oct);
1050 
1051 	if (ret == IRQ_HANDLED)
1052 		liquidio_schedule_droq_pkt_handlers(oct);
1053 
1054 	/* Re-enable our interrupts  */
1055 	if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1056 		oct->fn_list.enable_interrupt(oct->chip);
1057 
1058 	return ret;
1059 }
1060 
1061 /**
1062  * \brief Setup interrupt for octeon device
1063  * @param oct octeon device
1064  *
1065  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
1066  */
1067 static int octeon_setup_interrupt(struct octeon_device *oct)
1068 {
1069 	int irqret, err;
1070 
1071 	err = pci_enable_msi(oct->pci_dev);
1072 	if (err)
1073 		dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1074 			 err);
1075 	else
1076 		oct->flags |= LIO_FLAG_MSI_ENABLED;
1077 
1078 	irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
1079 			     IRQF_SHARED, "octeon", oct);
1080 	if (irqret) {
1081 		if (oct->flags & LIO_FLAG_MSI_ENABLED)
1082 			pci_disable_msi(oct->pci_dev);
1083 		dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1084 			irqret);
1085 		return 1;
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 /**
1092  * \brief PCI probe handler
1093  * @param pdev PCI device structure
1094  * @param ent unused
1095  */
1096 static int
1097 liquidio_probe(struct pci_dev *pdev,
1098 	       const struct pci_device_id *ent __attribute__((unused)))
1099 {
1100 	struct octeon_device *oct_dev = NULL;
1101 	struct handshake *hs;
1102 
1103 	oct_dev = octeon_allocate_device(pdev->device,
1104 					 sizeof(struct octeon_device_priv));
1105 	if (!oct_dev) {
1106 		dev_err(&pdev->dev, "Unable to allocate device\n");
1107 		return -ENOMEM;
1108 	}
1109 
1110 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1111 		 (u32)pdev->vendor, (u32)pdev->device);
1112 
1113 	/* Assign octeon_device for this device to the private data area. */
1114 	pci_set_drvdata(pdev, oct_dev);
1115 
1116 	/* set linux specific device pointer */
1117 	oct_dev->pci_dev = (void *)pdev;
1118 
1119 	hs = &handshake[oct_dev->octeon_id];
1120 	init_completion(&hs->init);
1121 	init_completion(&hs->started);
1122 	hs->pci_dev = pdev;
1123 
1124 	if (oct_dev->octeon_id == 0)
1125 		/* first LiquidIO NIC is detected */
1126 		complete(&first_stage);
1127 
1128 	if (octeon_device_init(oct_dev)) {
1129 		liquidio_remove(pdev);
1130 		return -ENOMEM;
1131 	}
1132 
1133 	oct_dev->rx_pause = 1;
1134 	oct_dev->tx_pause = 1;
1135 
1136 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1137 
1138 	return 0;
1139 }
1140 
1141 /**
1142  *\brief Destroy resources associated with octeon device
1143  * @param pdev PCI device structure
1144  * @param ent unused
1145  */
1146 static void octeon_destroy_resources(struct octeon_device *oct)
1147 {
1148 	int i;
1149 	struct octeon_device_priv *oct_priv =
1150 		(struct octeon_device_priv *)oct->priv;
1151 
1152 	struct handshake *hs;
1153 
1154 	switch (atomic_read(&oct->status)) {
1155 	case OCT_DEV_RUNNING:
1156 	case OCT_DEV_CORE_OK:
1157 
1158 		/* No more instructions will be forwarded. */
1159 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
1160 
1161 		oct->app_mode = CVM_DRV_INVALID_APP;
1162 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1163 			lio_get_state_string(&oct->status));
1164 
1165 		schedule_timeout_uninterruptible(HZ / 10);
1166 
1167 		/* fallthrough */
1168 	case OCT_DEV_HOST_OK:
1169 
1170 		/* fallthrough */
1171 	case OCT_DEV_CONSOLE_INIT_DONE:
1172 		/* Remove any consoles */
1173 		octeon_remove_consoles(oct);
1174 
1175 		/* fallthrough */
1176 	case OCT_DEV_IO_QUEUES_DONE:
1177 		if (wait_for_pending_requests(oct))
1178 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1179 
1180 		if (lio_wait_for_instr_fetch(oct))
1181 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1182 
1183 		/* Disable the input and output queues now. No more packets will
1184 		 * arrive from Octeon, but we should wait for all packet
1185 		 * processing to finish.
1186 		 */
1187 		oct->fn_list.disable_io_queues(oct);
1188 
1189 		if (lio_wait_for_oq_pkts(oct))
1190 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1191 
1192 		/* Disable interrupts  */
1193 		oct->fn_list.disable_interrupt(oct->chip);
1194 
1195 		/* Release the interrupt line */
1196 		free_irq(oct->pci_dev->irq, oct);
1197 
1198 		if (oct->flags & LIO_FLAG_MSI_ENABLED)
1199 			pci_disable_msi(oct->pci_dev);
1200 
1201 		/* fallthrough */
1202 	case OCT_DEV_IN_RESET:
1203 	case OCT_DEV_DROQ_INIT_DONE:
1204 		/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1205 		mdelay(100);
1206 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1207 			if (!(oct->io_qmask.oq & (1ULL << i)))
1208 				continue;
1209 			octeon_delete_droq(oct, i);
1210 		}
1211 
1212 		/* Force any pending handshakes to complete */
1213 		for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1214 			hs = &handshake[i];
1215 
1216 			if (hs->pci_dev) {
1217 				handshake[oct->octeon_id].init_ok = 0;
1218 				complete(&handshake[oct->octeon_id].init);
1219 				handshake[oct->octeon_id].started_ok = 0;
1220 				complete(&handshake[oct->octeon_id].started);
1221 			}
1222 		}
1223 
1224 		/* fallthrough */
1225 	case OCT_DEV_RESP_LIST_INIT_DONE:
1226 		octeon_delete_response_list(oct);
1227 
1228 		/* fallthrough */
1229 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1230 		octeon_free_sc_buffer_pool(oct);
1231 
1232 		/* fallthrough */
1233 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1234 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1235 			if (!(oct->io_qmask.iq & (1ULL << i)))
1236 				continue;
1237 			octeon_delete_instr_queue(oct, i);
1238 		}
1239 
1240 		/* fallthrough */
1241 	case OCT_DEV_DISPATCH_INIT_DONE:
1242 		octeon_delete_dispatch_list(oct);
1243 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
1244 
1245 		/* fallthrough */
1246 	case OCT_DEV_PCI_MAP_DONE:
1247 
1248 		/* Soft reset the octeon device before exiting */
1249 		oct->fn_list.soft_reset(oct);
1250 
1251 		octeon_unmap_pci_barx(oct, 0);
1252 		octeon_unmap_pci_barx(oct, 1);
1253 
1254 		/* fallthrough */
1255 	case OCT_DEV_BEGIN_STATE:
1256 		/* Disable the device, releasing the PCI INT */
1257 		pci_disable_device(oct->pci_dev);
1258 
1259 		/* Nothing to be done here either */
1260 		break;
1261 	}                       /* end switch (oct->status) */
1262 
1263 	tasklet_kill(&oct_priv->droq_tasklet);
1264 }
1265 
1266 /**
1267  * \brief Send Rx control command
1268  * @param lio per-network private data
1269  * @param start_stop whether to start or stop
1270  */
1271 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1272 {
1273 	struct octnic_ctrl_pkt nctrl;
1274 
1275 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1276 
1277 	nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
1278 	nctrl.ncmd.s.param1 = start_stop;
1279 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1280 	nctrl.netpndev = (u64)lio->netdev;
1281 
1282 	if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
1283 		netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1284 }
1285 
1286 /**
1287  * \brief Destroy NIC device interface
1288  * @param oct octeon device
1289  * @param ifidx which interface to destroy
1290  *
1291  * Cleanup associated with each interface for an Octeon device  when NIC
1292  * module is being unloaded or if initialization fails during load.
1293  */
1294 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1295 {
1296 	struct net_device *netdev = oct->props[ifidx].netdev;
1297 	struct lio *lio;
1298 	struct napi_struct *napi, *n;
1299 
1300 	if (!netdev) {
1301 		dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1302 			__func__, ifidx);
1303 		return;
1304 	}
1305 
1306 	lio = GET_LIO(netdev);
1307 
1308 	dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1309 
1310 	send_rx_ctrl_cmd(lio, 0);
1311 
1312 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1313 		txqs_stop(netdev);
1314 
1315 	if (oct->props[lio->ifidx].napi_enabled == 1) {
1316 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1317 			napi_disable(napi);
1318 
1319 		oct->props[lio->ifidx].napi_enabled = 0;
1320 	}
1321 
1322 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1323 		unregister_netdev(netdev);
1324 
1325 	delete_glists(lio);
1326 
1327 	free_netdev(netdev);
1328 
1329 	oct->props[ifidx].gmxport = -1;
1330 
1331 	oct->props[ifidx].netdev = NULL;
1332 }
1333 
1334 /**
1335  * \brief Stop complete NIC functionality
1336  * @param oct octeon device
1337  */
1338 static int liquidio_stop_nic_module(struct octeon_device *oct)
1339 {
1340 	int i, j;
1341 	struct lio *lio;
1342 
1343 	dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1344 	if (!oct->ifcount) {
1345 		dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1346 		return 1;
1347 	}
1348 
1349 	spin_lock_bh(&oct->cmd_resp_wqlock);
1350 	oct->cmd_resp_state = OCT_DRV_OFFLINE;
1351 	spin_unlock_bh(&oct->cmd_resp_wqlock);
1352 
1353 	for (i = 0; i < oct->ifcount; i++) {
1354 		lio = GET_LIO(oct->props[i].netdev);
1355 		for (j = 0; j < lio->linfo.num_rxpciq; j++)
1356 			octeon_unregister_droq_ops(oct,
1357 						   lio->linfo.rxpciq[j].s.q_no);
1358 	}
1359 
1360 	for (i = 0; i < oct->ifcount; i++)
1361 		liquidio_destroy_nic_device(oct, i);
1362 
1363 	dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1364 	return 0;
1365 }
1366 
1367 /**
1368  * \brief Cleans up resources at unload time
1369  * @param pdev PCI device structure
1370  */
1371 static void liquidio_remove(struct pci_dev *pdev)
1372 {
1373 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1374 
1375 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1376 
1377 	if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1378 		liquidio_stop_nic_module(oct_dev);
1379 
1380 	/* Reset the octeon device and cleanup all memory allocated for
1381 	 * the octeon device by driver.
1382 	 */
1383 	octeon_destroy_resources(oct_dev);
1384 
1385 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1386 
1387 	/* This octeon device has been removed. Update the global
1388 	 * data structure to reflect this. Free the device structure.
1389 	 */
1390 	octeon_free_device_mem(oct_dev);
1391 }
1392 
1393 /**
1394  * \brief Identify the Octeon device and to map the BAR address space
1395  * @param oct octeon device
1396  */
1397 static int octeon_chip_specific_setup(struct octeon_device *oct)
1398 {
1399 	u32 dev_id, rev_id;
1400 	int ret = 1;
1401 	char *s;
1402 
1403 	pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1404 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1405 	oct->rev_id = rev_id & 0xff;
1406 
1407 	switch (dev_id) {
1408 	case OCTEON_CN68XX_PCIID:
1409 		oct->chip_id = OCTEON_CN68XX;
1410 		ret = lio_setup_cn68xx_octeon_device(oct);
1411 		s = "CN68XX";
1412 		break;
1413 
1414 	case OCTEON_CN66XX_PCIID:
1415 		oct->chip_id = OCTEON_CN66XX;
1416 		ret = lio_setup_cn66xx_octeon_device(oct);
1417 		s = "CN66XX";
1418 		break;
1419 
1420 	default:
1421 		s = "?";
1422 		dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1423 			dev_id);
1424 	}
1425 
1426 	if (!ret)
1427 		dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1428 			 OCTEON_MAJOR_REV(oct),
1429 			 OCTEON_MINOR_REV(oct),
1430 			 octeon_get_conf(oct)->card_name,
1431 			 LIQUIDIO_VERSION);
1432 
1433 	return ret;
1434 }
1435 
1436 /**
1437  * \brief PCI initialization for each Octeon device.
1438  * @param oct octeon device
1439  */
1440 static int octeon_pci_os_setup(struct octeon_device *oct)
1441 {
1442 	/* setup PCI stuff first */
1443 	if (pci_enable_device(oct->pci_dev)) {
1444 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1445 		return 1;
1446 	}
1447 
1448 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1449 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1450 		return 1;
1451 	}
1452 
1453 	/* Enable PCI DMA Master. */
1454 	pci_set_master(oct->pci_dev);
1455 
1456 	return 0;
1457 }
1458 
1459 static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1460 {
1461 	int q = 0;
1462 
1463 	if (netif_is_multiqueue(lio->netdev))
1464 		q = skb->queue_mapping % lio->linfo.num_txpciq;
1465 
1466 	return q;
1467 }
1468 
1469 /**
1470  * \brief Check Tx queue state for a given network buffer
1471  * @param lio per-network private data
1472  * @param skb network buffer
1473  */
1474 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1475 {
1476 	int q = 0, iq = 0;
1477 
1478 	if (netif_is_multiqueue(lio->netdev)) {
1479 		q = skb->queue_mapping;
1480 		iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
1481 	} else {
1482 		iq = lio->txq;
1483 		q = iq;
1484 	}
1485 
1486 	if (octnet_iq_is_full(lio->oct_dev, iq))
1487 		return 0;
1488 
1489 	if (__netif_subqueue_stopped(lio->netdev, q)) {
1490 		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1491 		wake_q(lio->netdev, q);
1492 	}
1493 	return 1;
1494 }
1495 
1496 /**
1497  * \brief Unmap and free network buffer
1498  * @param buf buffer
1499  */
1500 static void free_netbuf(void *buf)
1501 {
1502 	struct sk_buff *skb;
1503 	struct octnet_buf_free_info *finfo;
1504 	struct lio *lio;
1505 
1506 	finfo = (struct octnet_buf_free_info *)buf;
1507 	skb = finfo->skb;
1508 	lio = finfo->lio;
1509 
1510 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1511 			 DMA_TO_DEVICE);
1512 
1513 	check_txq_state(lio, skb);
1514 
1515 	tx_buffer_free(skb);
1516 }
1517 
1518 /**
1519  * \brief Unmap and free gather buffer
1520  * @param buf buffer
1521  */
1522 static void free_netsgbuf(void *buf)
1523 {
1524 	struct octnet_buf_free_info *finfo;
1525 	struct sk_buff *skb;
1526 	struct lio *lio;
1527 	struct octnic_gather *g;
1528 	int i, frags, iq;
1529 
1530 	finfo = (struct octnet_buf_free_info *)buf;
1531 	skb = finfo->skb;
1532 	lio = finfo->lio;
1533 	g = finfo->g;
1534 	frags = skb_shinfo(skb)->nr_frags;
1535 
1536 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1537 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1538 			 DMA_TO_DEVICE);
1539 
1540 	i = 1;
1541 	while (frags--) {
1542 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1543 
1544 		pci_unmap_page((lio->oct_dev)->pci_dev,
1545 			       g->sg[(i >> 2)].ptr[(i & 3)],
1546 			       frag->size, DMA_TO_DEVICE);
1547 		i++;
1548 	}
1549 
1550 	dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1551 				g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1552 
1553 	iq = skb_iq(lio, skb);
1554 	spin_lock(&lio->glist_lock[iq]);
1555 	list_add_tail(&g->list, &lio->glist[iq]);
1556 	spin_unlock(&lio->glist_lock[iq]);
1557 
1558 	check_txq_state(lio, skb);     /* mq support: sub-queue state check */
1559 
1560 	tx_buffer_free(skb);
1561 }
1562 
1563 /**
1564  * \brief Unmap and free gather buffer with response
1565  * @param buf buffer
1566  */
1567 static void free_netsgbuf_with_resp(void *buf)
1568 {
1569 	struct octeon_soft_command *sc;
1570 	struct octnet_buf_free_info *finfo;
1571 	struct sk_buff *skb;
1572 	struct lio *lio;
1573 	struct octnic_gather *g;
1574 	int i, frags, iq;
1575 
1576 	sc = (struct octeon_soft_command *)buf;
1577 	skb = (struct sk_buff *)sc->callback_arg;
1578 	finfo = (struct octnet_buf_free_info *)&skb->cb;
1579 
1580 	lio = finfo->lio;
1581 	g = finfo->g;
1582 	frags = skb_shinfo(skb)->nr_frags;
1583 
1584 	dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1585 			 g->sg[0].ptr[0], (skb->len - skb->data_len),
1586 			 DMA_TO_DEVICE);
1587 
1588 	i = 1;
1589 	while (frags--) {
1590 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1591 
1592 		pci_unmap_page((lio->oct_dev)->pci_dev,
1593 			       g->sg[(i >> 2)].ptr[(i & 3)],
1594 			       frag->size, DMA_TO_DEVICE);
1595 		i++;
1596 	}
1597 
1598 	dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1599 				g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1600 
1601 	iq = skb_iq(lio, skb);
1602 
1603 	spin_lock(&lio->glist_lock[iq]);
1604 	list_add_tail(&g->list, &lio->glist[iq]);
1605 	spin_unlock(&lio->glist_lock[iq]);
1606 
1607 	/* Don't free the skb yet */
1608 
1609 	check_txq_state(lio, skb);
1610 }
1611 
1612 /**
1613  * \brief Adjust ptp frequency
1614  * @param ptp PTP clock info
1615  * @param ppb how much to adjust by, in parts-per-billion
1616  */
1617 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1618 {
1619 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1620 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1621 	u64 comp, delta;
1622 	unsigned long flags;
1623 	bool neg_adj = false;
1624 
1625 	if (ppb < 0) {
1626 		neg_adj = true;
1627 		ppb = -ppb;
1628 	}
1629 
1630 	/* The hardware adds the clock compensation value to the
1631 	 * PTP clock on every coprocessor clock cycle, so we
1632 	 * compute the delta in terms of coprocessor clocks.
1633 	 */
1634 	delta = (u64)ppb << 32;
1635 	do_div(delta, oct->coproc_clock_rate);
1636 
1637 	spin_lock_irqsave(&lio->ptp_lock, flags);
1638 	comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1639 	if (neg_adj)
1640 		comp -= delta;
1641 	else
1642 		comp += delta;
1643 	lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1644 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1645 
1646 	return 0;
1647 }
1648 
1649 /**
1650  * \brief Adjust ptp time
1651  * @param ptp PTP clock info
1652  * @param delta how much to adjust by, in nanosecs
1653  */
1654 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1655 {
1656 	unsigned long flags;
1657 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1658 
1659 	spin_lock_irqsave(&lio->ptp_lock, flags);
1660 	lio->ptp_adjust += delta;
1661 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1662 
1663 	return 0;
1664 }
1665 
1666 /**
1667  * \brief Get hardware clock time, including any adjustment
1668  * @param ptp PTP clock info
1669  * @param ts timespec
1670  */
1671 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1672 				struct timespec64 *ts)
1673 {
1674 	u64 ns;
1675 	unsigned long flags;
1676 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1677 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1678 
1679 	spin_lock_irqsave(&lio->ptp_lock, flags);
1680 	ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1681 	ns += lio->ptp_adjust;
1682 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1683 
1684 	*ts = ns_to_timespec64(ns);
1685 
1686 	return 0;
1687 }
1688 
1689 /**
1690  * \brief Set hardware clock time. Reset adjustment
1691  * @param ptp PTP clock info
1692  * @param ts timespec
1693  */
1694 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1695 				const struct timespec64 *ts)
1696 {
1697 	u64 ns;
1698 	unsigned long flags;
1699 	struct lio *lio = container_of(ptp, struct lio, ptp_info);
1700 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1701 
1702 	ns = timespec_to_ns(ts);
1703 
1704 	spin_lock_irqsave(&lio->ptp_lock, flags);
1705 	lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1706 	lio->ptp_adjust = 0;
1707 	spin_unlock_irqrestore(&lio->ptp_lock, flags);
1708 
1709 	return 0;
1710 }
1711 
1712 /**
1713  * \brief Check if PTP is enabled
1714  * @param ptp PTP clock info
1715  * @param rq request
1716  * @param on is it on
1717  */
1718 static int
1719 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1720 		    struct ptp_clock_request *rq __attribute__((unused)),
1721 		    int on __attribute__((unused)))
1722 {
1723 	return -EOPNOTSUPP;
1724 }
1725 
1726 /**
1727  * \brief Open PTP clock source
1728  * @param netdev network device
1729  */
1730 static void oct_ptp_open(struct net_device *netdev)
1731 {
1732 	struct lio *lio = GET_LIO(netdev);
1733 	struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1734 
1735 	spin_lock_init(&lio->ptp_lock);
1736 
1737 	snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1738 	lio->ptp_info.owner = THIS_MODULE;
1739 	lio->ptp_info.max_adj = 250000000;
1740 	lio->ptp_info.n_alarm = 0;
1741 	lio->ptp_info.n_ext_ts = 0;
1742 	lio->ptp_info.n_per_out = 0;
1743 	lio->ptp_info.pps = 0;
1744 	lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1745 	lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1746 	lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1747 	lio->ptp_info.settime64 = liquidio_ptp_settime;
1748 	lio->ptp_info.enable = liquidio_ptp_enable;
1749 
1750 	lio->ptp_adjust = 0;
1751 
1752 	lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1753 					     &oct->pci_dev->dev);
1754 
1755 	if (IS_ERR(lio->ptp_clock))
1756 		lio->ptp_clock = NULL;
1757 }
1758 
1759 /**
1760  * \brief Init PTP clock
1761  * @param oct octeon device
1762  */
1763 static void liquidio_ptp_init(struct octeon_device *oct)
1764 {
1765 	u64 clock_comp, cfg;
1766 
1767 	clock_comp = (u64)NSEC_PER_SEC << 32;
1768 	do_div(clock_comp, oct->coproc_clock_rate);
1769 	lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1770 
1771 	/* Enable */
1772 	cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1773 	lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1774 }
1775 
1776 /**
1777  * \brief Load firmware to device
1778  * @param oct octeon device
1779  *
1780  * Maps device to firmware filename, requests firmware, and downloads it
1781  */
1782 static int load_firmware(struct octeon_device *oct)
1783 {
1784 	int ret = 0;
1785 	const struct firmware *fw;
1786 	char fw_name[LIO_MAX_FW_FILENAME_LEN];
1787 	char *tmp_fw_type;
1788 
1789 	if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
1790 		    sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
1791 		dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
1792 		return ret;
1793 	}
1794 
1795 	if (fw_type[0] == '\0')
1796 		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1797 	else
1798 		tmp_fw_type = fw_type;
1799 
1800 	sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1801 		octeon_get_conf(oct)->card_name, tmp_fw_type,
1802 		LIO_FW_NAME_SUFFIX);
1803 
1804 	ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1805 	if (ret) {
1806 		dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1807 			fw_name);
1808 		release_firmware(fw);
1809 		return ret;
1810 	}
1811 
1812 	ret = octeon_download_firmware(oct, fw->data, fw->size);
1813 
1814 	release_firmware(fw);
1815 
1816 	return ret;
1817 }
1818 
1819 /**
1820  * \brief Setup output queue
1821  * @param oct octeon device
1822  * @param q_no which queue
1823  * @param num_descs how many descriptors
1824  * @param desc_size size of each descriptor
1825  * @param app_ctx application context
1826  */
1827 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1828 			     int desc_size, void *app_ctx)
1829 {
1830 	int ret_val = 0;
1831 
1832 	dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1833 	/* droq creation and local register settings. */
1834 	ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1835 	if (ret_val < 0)
1836 		return ret_val;
1837 
1838 	if (ret_val == 1) {
1839 		dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
1840 		return 0;
1841 	}
1842 	/* tasklet creation for the droq */
1843 
1844 	/* Enable the droq queues */
1845 	octeon_set_droq_pkt_op(oct, q_no, 1);
1846 
1847 	/* Send Credit for Octeon Output queues. Credits are always
1848 	 * sent after the output queue is enabled.
1849 	 */
1850 	writel(oct->droq[q_no]->max_count,
1851 	       oct->droq[q_no]->pkts_credit_reg);
1852 
1853 	return ret_val;
1854 }
1855 
1856 /**
1857  * \brief Callback for getting interface configuration
1858  * @param status status of request
1859  * @param buf pointer to resp structure
1860  */
1861 static void if_cfg_callback(struct octeon_device *oct,
1862 			    u32 status __attribute__((unused)),
1863 			    void *buf)
1864 {
1865 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1866 	struct liquidio_if_cfg_resp *resp;
1867 	struct liquidio_if_cfg_context *ctx;
1868 
1869 	resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1870 	ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
1871 
1872 	oct = lio_get_device(ctx->octeon_id);
1873 	if (resp->status)
1874 		dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1875 			CVM_CAST64(resp->status));
1876 	WRITE_ONCE(ctx->cond, 1);
1877 
1878 	snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1879 		 resp->cfg_info.liquidio_firmware_version);
1880 
1881 	/* This barrier is required to be sure that the response has been
1882 	 * written fully before waking up the handler
1883 	 */
1884 	wmb();
1885 
1886 	wake_up_interruptible(&ctx->wc);
1887 }
1888 
1889 /**
1890  * \brief Select queue based on hash
1891  * @param dev Net device
1892  * @param skb sk_buff structure
1893  * @returns selected queue number
1894  */
1895 static u16 select_q(struct net_device *dev, struct sk_buff *skb,
1896 		    void *accel_priv __attribute__((unused)),
1897 		    select_queue_fallback_t fallback __attribute__((unused)))
1898 {
1899 	u32 qindex = 0;
1900 	struct lio *lio;
1901 
1902 	lio = GET_LIO(dev);
1903 	qindex = skb_tx_hash(dev, skb);
1904 
1905 	return (u16)(qindex % (lio->linfo.num_txpciq));
1906 }
1907 
1908 /** Routine to push packets arriving on Octeon interface upto network layer.
1909  * @param oct_id   - octeon device id.
1910  * @param skbuff   - skbuff struct to be passed to network layer.
1911  * @param len      - size of total data received.
1912  * @param rh       - Control header associated with the packet
1913  * @param param    - additional control data with the packet
1914  * @param arg	   - farg registered in droq_ops
1915  */
1916 static void
1917 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
1918 		     void *skbuff,
1919 		     u32 len,
1920 		     union octeon_rh *rh,
1921 		     void *param,
1922 		     void *arg)
1923 {
1924 	struct napi_struct *napi = param;
1925 	struct sk_buff *skb = (struct sk_buff *)skbuff;
1926 	struct skb_shared_hwtstamps *shhwtstamps;
1927 	u64 ns;
1928 	u16 vtag = 0;
1929 	struct net_device *netdev = (struct net_device *)arg;
1930 	struct octeon_droq *droq = container_of(param, struct octeon_droq,
1931 						napi);
1932 	if (netdev) {
1933 		int packet_was_received;
1934 		struct lio *lio = GET_LIO(netdev);
1935 		struct octeon_device *oct = lio->oct_dev;
1936 
1937 		/* Do not proceed if the interface is not in RUNNING state. */
1938 		if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1939 			recv_buffer_free(skb);
1940 			droq->stats.rx_dropped++;
1941 			return;
1942 		}
1943 
1944 		skb->dev = netdev;
1945 
1946 		skb_record_rx_queue(skb, droq->q_no);
1947 		if (likely(len > MIN_SKB_SIZE)) {
1948 			struct octeon_skb_page_info *pg_info;
1949 			unsigned char *va;
1950 
1951 			pg_info = ((struct octeon_skb_page_info *)(skb->cb));
1952 			if (pg_info->page) {
1953 				/* For Paged allocation use the frags */
1954 				va = page_address(pg_info->page) +
1955 					pg_info->page_offset;
1956 				memcpy(skb->data, va, MIN_SKB_SIZE);
1957 				skb_put(skb, MIN_SKB_SIZE);
1958 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1959 						pg_info->page,
1960 						pg_info->page_offset +
1961 						MIN_SKB_SIZE,
1962 						len - MIN_SKB_SIZE,
1963 						LIO_RXBUFFER_SZ);
1964 			}
1965 		} else {
1966 			struct octeon_skb_page_info *pg_info =
1967 				((struct octeon_skb_page_info *)(skb->cb));
1968 			skb_copy_to_linear_data(skb, page_address(pg_info->page)
1969 						+ pg_info->page_offset, len);
1970 			skb_put(skb, len);
1971 			put_page(pg_info->page);
1972 		}
1973 
1974 		if (((oct->chip_id == OCTEON_CN66XX) ||
1975 		     (oct->chip_id == OCTEON_CN68XX)) &&
1976 		    ptp_enable) {
1977 			if (rh->r_dh.has_hwtstamp) {
1978 				/* timestamp is included from the hardware at
1979 				 * the beginning of the packet.
1980 				 */
1981 				if (ifstate_check
1982 				    (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
1983 					/* Nanoseconds are in the first 64-bits
1984 					 * of the packet.
1985 					 */
1986 					memcpy(&ns, (skb->data), sizeof(ns));
1987 					shhwtstamps = skb_hwtstamps(skb);
1988 					shhwtstamps->hwtstamp =
1989 						ns_to_ktime(ns +
1990 							    lio->ptp_adjust);
1991 				}
1992 				skb_pull(skb, sizeof(ns));
1993 			}
1994 		}
1995 
1996 		skb->protocol = eth_type_trans(skb, skb->dev);
1997 		if ((netdev->features & NETIF_F_RXCSUM) &&
1998 		    (((rh->r_dh.encap_on) &&
1999 		      (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
2000 		     (!(rh->r_dh.encap_on) &&
2001 		      (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
2002 			/* checksum has already been verified */
2003 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2004 		else
2005 			skb->ip_summed = CHECKSUM_NONE;
2006 
2007 		/* Setting Encapsulation field on basis of status received
2008 		 * from the firmware
2009 		 */
2010 		if (rh->r_dh.encap_on) {
2011 			skb->encapsulation = 1;
2012 			skb->csum_level = 1;
2013 			droq->stats.rx_vxlan++;
2014 		}
2015 
2016 		/* inbound VLAN tag */
2017 		if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2018 		    (rh->r_dh.vlan != 0)) {
2019 			u16 vid = rh->r_dh.vlan;
2020 			u16 priority = rh->r_dh.priority;
2021 
2022 			vtag = priority << 13 | vid;
2023 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2024 		}
2025 
2026 		packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
2027 
2028 		if (packet_was_received) {
2029 			droq->stats.rx_bytes_received += len;
2030 			droq->stats.rx_pkts_received++;
2031 			netdev->last_rx = jiffies;
2032 		} else {
2033 			droq->stats.rx_dropped++;
2034 			netif_info(lio, rx_err, lio->netdev,
2035 				   "droq:%d  error rx_dropped:%llu\n",
2036 				   droq->q_no, droq->stats.rx_dropped);
2037 		}
2038 
2039 	} else {
2040 		recv_buffer_free(skb);
2041 	}
2042 }
2043 
2044 /**
2045  * \brief wrapper for calling napi_schedule
2046  * @param param parameters to pass to napi_schedule
2047  *
2048  * Used when scheduling on different CPUs
2049  */
2050 static void napi_schedule_wrapper(void *param)
2051 {
2052 	struct napi_struct *napi = param;
2053 
2054 	napi_schedule(napi);
2055 }
2056 
2057 /**
2058  * \brief callback when receive interrupt occurs and we are in NAPI mode
2059  * @param arg pointer to octeon output queue
2060  */
2061 static void liquidio_napi_drv_callback(void *arg)
2062 {
2063 	struct octeon_droq *droq = arg;
2064 	int this_cpu = smp_processor_id();
2065 
2066 	if (droq->cpu_id == this_cpu) {
2067 		napi_schedule(&droq->napi);
2068 	} else {
2069 		struct call_single_data *csd = &droq->csd;
2070 
2071 		csd->func = napi_schedule_wrapper;
2072 		csd->info = &droq->napi;
2073 		csd->flags = 0;
2074 
2075 		smp_call_function_single_async(droq->cpu_id, csd);
2076 	}
2077 }
2078 
2079 /**
2080  * \brief Entry point for NAPI polling
2081  * @param napi NAPI structure
2082  * @param budget maximum number of items to process
2083  */
2084 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2085 {
2086 	struct octeon_droq *droq;
2087 	int work_done;
2088 	int tx_done = 0, iq_no;
2089 	struct octeon_instr_queue *iq;
2090 	struct octeon_device *oct;
2091 
2092 	droq = container_of(napi, struct octeon_droq, napi);
2093 	oct = droq->oct_dev;
2094 	iq_no = droq->q_no;
2095 	/* Handle Droq descriptors */
2096 	work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2097 						 POLL_EVENT_PROCESS_PKTS,
2098 						 budget);
2099 
2100 	/* Flush the instruction queue */
2101 	iq = oct->instr_queue[iq_no];
2102 	if (iq) {
2103 		/* Process iq buffers with in the budget limits */
2104 		tx_done = octeon_flush_iq(oct, iq, 1, budget);
2105 		/* Update iq read-index rather than waiting for next interrupt.
2106 		 * Return back if tx_done is false.
2107 		 */
2108 		update_txq_status(oct, iq_no);
2109 		/*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2110 	} else {
2111 		dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
2112 			__func__, iq_no);
2113 	}
2114 
2115 	if ((work_done < budget) && (tx_done)) {
2116 		napi_complete(napi);
2117 		octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
2118 					     POLL_EVENT_ENABLE_INTR, 0);
2119 		return 0;
2120 	}
2121 
2122 	return (!tx_done) ? (budget) : (work_done);
2123 }
2124 
2125 /**
2126  * \brief Setup input and output queues
2127  * @param octeon_dev octeon device
2128  * @param ifidx  Interface Index
2129  *
2130  * Note: Queues are with respect to the octeon device. Thus
2131  * an input queue is for egress packets, and output queues
2132  * are for ingress packets.
2133  */
2134 static inline int setup_io_queues(struct octeon_device *octeon_dev,
2135 				  int ifidx)
2136 {
2137 	struct octeon_droq_ops droq_ops;
2138 	struct net_device *netdev;
2139 	static int cpu_id;
2140 	static int cpu_id_modulus;
2141 	struct octeon_droq *droq;
2142 	struct napi_struct *napi;
2143 	int q, q_no, retval = 0;
2144 	struct lio *lio;
2145 	int num_tx_descs;
2146 
2147 	netdev = octeon_dev->props[ifidx].netdev;
2148 
2149 	lio = GET_LIO(netdev);
2150 
2151 	memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
2152 
2153 	droq_ops.fptr = liquidio_push_packet;
2154 	droq_ops.farg = (void *)netdev;
2155 
2156 	droq_ops.poll_mode = 1;
2157 	droq_ops.napi_fn = liquidio_napi_drv_callback;
2158 	cpu_id = 0;
2159 	cpu_id_modulus = num_present_cpus();
2160 
2161 	/* set up DROQs. */
2162 	for (q = 0; q < lio->linfo.num_rxpciq; q++) {
2163 		q_no = lio->linfo.rxpciq[q].s.q_no;
2164 		dev_dbg(&octeon_dev->pci_dev->dev,
2165 			"setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2166 			q, q_no);
2167 		retval = octeon_setup_droq(octeon_dev, q_no,
2168 					   CFG_GET_NUM_RX_DESCS_NIC_IF
2169 						   (octeon_get_conf(octeon_dev),
2170 						   lio->ifidx),
2171 					   CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2172 						   (octeon_get_conf(octeon_dev),
2173 						   lio->ifidx), NULL);
2174 		if (retval) {
2175 			dev_err(&octeon_dev->pci_dev->dev,
2176 				" %s : Runtime DROQ(RxQ) creation failed.\n",
2177 				__func__);
2178 			return 1;
2179 		}
2180 
2181 		droq = octeon_dev->droq[q_no];
2182 		napi = &droq->napi;
2183 		dev_dbg(&octeon_dev->pci_dev->dev,
2184 			"netif_napi_add netdev:%llx oct:%llx\n",
2185 			(u64)netdev,
2186 			(u64)octeon_dev);
2187 		netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
2188 
2189 		/* designate a CPU for this droq */
2190 		droq->cpu_id = cpu_id;
2191 		cpu_id++;
2192 		if (cpu_id >= cpu_id_modulus)
2193 			cpu_id = 0;
2194 
2195 		octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
2196 	}
2197 
2198 	/* set up IQs. */
2199 	for (q = 0; q < lio->linfo.num_txpciq; q++) {
2200 		num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2201 							   (octeon_dev),
2202 							   lio->ifidx);
2203 		retval = octeon_setup_iq(octeon_dev, ifidx, q,
2204 					 lio->linfo.txpciq[q], num_tx_descs,
2205 					 netdev_get_tx_queue(netdev, q));
2206 		if (retval) {
2207 			dev_err(&octeon_dev->pci_dev->dev,
2208 				" %s : Runtime IQ(TxQ) creation failed.\n",
2209 				__func__);
2210 			return 1;
2211 		}
2212 	}
2213 
2214 	return 0;
2215 }
2216 
2217 /**
2218  * \brief Poll routine for checking transmit queue status
2219  * @param work work_struct data structure
2220  */
2221 static void octnet_poll_check_txq_status(struct work_struct *work)
2222 {
2223 	struct cavium_wk *wk = (struct cavium_wk *)work;
2224 	struct lio *lio = (struct lio *)wk->ctxptr;
2225 
2226 	if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2227 		return;
2228 
2229 	check_txq_status(lio);
2230 	queue_delayed_work(lio->txq_status_wq.wq,
2231 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2232 }
2233 
2234 /**
2235  * \brief Sets up the txq poll check
2236  * @param netdev network device
2237  */
2238 static inline void setup_tx_poll_fn(struct net_device *netdev)
2239 {
2240 	struct lio *lio = GET_LIO(netdev);
2241 	struct octeon_device *oct = lio->oct_dev;
2242 
2243 	lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2244 						WQ_MEM_RECLAIM, 0);
2245 	if (!lio->txq_status_wq.wq) {
2246 		dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2247 		return;
2248 	}
2249 	INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2250 			  octnet_poll_check_txq_status);
2251 	lio->txq_status_wq.wk.ctxptr = lio;
2252 	queue_delayed_work(lio->txq_status_wq.wq,
2253 			   &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2254 }
2255 
2256 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2257 {
2258 	struct lio *lio = GET_LIO(netdev);
2259 
2260 	cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2261 	destroy_workqueue(lio->txq_status_wq.wq);
2262 }
2263 
2264 /**
2265  * \brief Net device open for LiquidIO
2266  * @param netdev network device
2267  */
2268 static int liquidio_open(struct net_device *netdev)
2269 {
2270 	struct lio *lio = GET_LIO(netdev);
2271 	struct octeon_device *oct = lio->oct_dev;
2272 	struct napi_struct *napi, *n;
2273 
2274 	if (oct->props[lio->ifidx].napi_enabled == 0) {
2275 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2276 			napi_enable(napi);
2277 
2278 		oct->props[lio->ifidx].napi_enabled = 1;
2279 	}
2280 
2281 	oct_ptp_open(netdev);
2282 
2283 	ifstate_set(lio, LIO_IFSTATE_RUNNING);
2284 
2285 	setup_tx_poll_fn(netdev);
2286 
2287 	start_txq(netdev);
2288 
2289 	netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2290 
2291 	/* tell Octeon to start forwarding packets to host */
2292 	send_rx_ctrl_cmd(lio, 1);
2293 
2294 	/* Ready for link status updates */
2295 	lio->intf_open = 1;
2296 
2297 	dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2298 		 netdev->name);
2299 
2300 	return 0;
2301 }
2302 
2303 /**
2304  * \brief Net device stop for LiquidIO
2305  * @param netdev network device
2306  */
2307 static int liquidio_stop(struct net_device *netdev)
2308 {
2309 	struct lio *lio = GET_LIO(netdev);
2310 	struct octeon_device *oct = lio->oct_dev;
2311 
2312 	ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2313 
2314 	netif_tx_disable(netdev);
2315 
2316 	/* Inform that netif carrier is down */
2317 	netif_carrier_off(netdev);
2318 	lio->intf_open = 0;
2319 	lio->linfo.link.s.link_up = 0;
2320 	lio->link_changes++;
2321 
2322 	/* Pause for a moment and wait for Octeon to flush out (to the wire) any
2323 	 * egress packets that are in-flight.
2324 	 */
2325 	set_current_state(TASK_INTERRUPTIBLE);
2326 	schedule_timeout(msecs_to_jiffies(100));
2327 
2328 	/* Now it should be safe to tell Octeon that nic interface is down. */
2329 	send_rx_ctrl_cmd(lio, 0);
2330 
2331 	cleanup_tx_poll_fn(netdev);
2332 
2333 	if (lio->ptp_clock) {
2334 		ptp_clock_unregister(lio->ptp_clock);
2335 		lio->ptp_clock = NULL;
2336 	}
2337 
2338 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2339 
2340 	return 0;
2341 }
2342 
2343 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
2344 {
2345 	struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
2346 	struct net_device *netdev = (struct net_device *)nctrl->netpndev;
2347 	struct lio *lio = GET_LIO(netdev);
2348 	struct octeon_device *oct = lio->oct_dev;
2349 	u8 *mac;
2350 
2351 	switch (nctrl->ncmd.s.cmd) {
2352 	case OCTNET_CMD_CHANGE_DEVFLAGS:
2353 	case OCTNET_CMD_SET_MULTI_LIST:
2354 		break;
2355 
2356 	case OCTNET_CMD_CHANGE_MACADDR:
2357 		mac = ((u8 *)&nctrl->udd[0]) + 2;
2358 		netif_info(lio, probe, lio->netdev,
2359 			   "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
2360 			   "MACAddr changed to", mac[0], mac[1],
2361 			   mac[2], mac[3], mac[4], mac[5]);
2362 		break;
2363 
2364 	case OCTNET_CMD_CHANGE_MTU:
2365 		/* If command is successful, change the MTU. */
2366 		netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
2367 			   netdev->mtu, nctrl->ncmd.s.param1);
2368 		dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
2369 			 netdev->name, netdev->mtu,
2370 			 nctrl->ncmd.s.param1);
2371 		rtnl_lock();
2372 		netdev->mtu = nctrl->ncmd.s.param1;
2373 		call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
2374 		rtnl_unlock();
2375 		break;
2376 
2377 	case OCTNET_CMD_GPIO_ACCESS:
2378 		netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
2379 
2380 		break;
2381 
2382 	case OCTNET_CMD_LRO_ENABLE:
2383 		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
2384 		break;
2385 
2386 	case OCTNET_CMD_LRO_DISABLE:
2387 		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
2388 			 netdev->name);
2389 		break;
2390 
2391 	case OCTNET_CMD_VERBOSE_ENABLE:
2392 		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
2393 		break;
2394 
2395 	case OCTNET_CMD_VERBOSE_DISABLE:
2396 		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
2397 			 netdev->name);
2398 		break;
2399 
2400 	case OCTNET_CMD_ENABLE_VLAN_FILTER:
2401 		dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
2402 			 netdev->name);
2403 		break;
2404 
2405 	case OCTNET_CMD_ADD_VLAN_FILTER:
2406 		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
2407 			 netdev->name, nctrl->ncmd.s.param1);
2408 		break;
2409 
2410 	case OCTNET_CMD_DEL_VLAN_FILTER:
2411 		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
2412 			 netdev->name, nctrl->ncmd.s.param1);
2413 		break;
2414 
2415 	case OCTNET_CMD_SET_SETTINGS:
2416 		dev_info(&oct->pci_dev->dev, "%s settings changed\n",
2417 			 netdev->name);
2418 
2419 		break;
2420 		/* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
2421 		 * Command passed by NIC driver
2422 		 */
2423 	case OCTNET_CMD_TNL_RX_CSUM_CTL:
2424 		if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
2425 			netif_info(lio, probe, lio->netdev,
2426 				   "%s RX Checksum Offload Enabled\n",
2427 				   netdev->name);
2428 		} else if (nctrl->ncmd.s.param1 ==
2429 			   OCTNET_CMD_RXCSUM_DISABLE) {
2430 			netif_info(lio, probe, lio->netdev,
2431 				   "%s RX Checksum Offload Disabled\n",
2432 				   netdev->name);
2433 		}
2434 		break;
2435 
2436 		/* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
2437 		 * Command passed by NIC driver
2438 		 */
2439 	case OCTNET_CMD_TNL_TX_CSUM_CTL:
2440 		if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
2441 			netif_info(lio, probe, lio->netdev,
2442 				   "%s TX Checksum Offload Enabled\n",
2443 				   netdev->name);
2444 		} else if (nctrl->ncmd.s.param1 ==
2445 			   OCTNET_CMD_TXCSUM_DISABLE) {
2446 			netif_info(lio, probe, lio->netdev,
2447 				   "%s TX Checksum Offload Disabled\n",
2448 				   netdev->name);
2449 		}
2450 		break;
2451 
2452 		/* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
2453 		 * Command passed by NIC driver
2454 		 */
2455 	case OCTNET_CMD_VXLAN_PORT_CONFIG:
2456 		if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
2457 			netif_info(lio, probe, lio->netdev,
2458 				   "%s VxLAN Destination UDP PORT:%d ADDED\n",
2459 				   netdev->name,
2460 				   nctrl->ncmd.s.param1);
2461 		} else if (nctrl->ncmd.s.more ==
2462 			   OCTNET_CMD_VXLAN_PORT_DEL) {
2463 			netif_info(lio, probe, lio->netdev,
2464 				   "%s VxLAN Destination UDP PORT:%d DELETED\n",
2465 				   netdev->name,
2466 				   nctrl->ncmd.s.param1);
2467 		}
2468 		break;
2469 
2470 	case OCTNET_CMD_SET_FLOW_CTL:
2471 		netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
2472 		break;
2473 
2474 	default:
2475 		dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
2476 			nctrl->ncmd.s.cmd);
2477 	}
2478 }
2479 
2480 /**
2481  * \brief Converts a mask based on net device flags
2482  * @param netdev network device
2483  *
2484  * This routine generates a octnet_ifflags mask from the net device flags
2485  * received from the OS.
2486  */
2487 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2488 {
2489 	enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2490 
2491 	if (netdev->flags & IFF_PROMISC)
2492 		f |= OCTNET_IFFLAG_PROMISC;
2493 
2494 	if (netdev->flags & IFF_ALLMULTI)
2495 		f |= OCTNET_IFFLAG_ALLMULTI;
2496 
2497 	if (netdev->flags & IFF_MULTICAST) {
2498 		f |= OCTNET_IFFLAG_MULTICAST;
2499 
2500 		/* Accept all multicast addresses if there are more than we
2501 		 * can handle
2502 		 */
2503 		if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2504 			f |= OCTNET_IFFLAG_ALLMULTI;
2505 	}
2506 
2507 	if (netdev->flags & IFF_BROADCAST)
2508 		f |= OCTNET_IFFLAG_BROADCAST;
2509 
2510 	return f;
2511 }
2512 
2513 /**
2514  * \brief Net device set_multicast_list
2515  * @param netdev network device
2516  */
2517 static void liquidio_set_mcast_list(struct net_device *netdev)
2518 {
2519 	struct lio *lio = GET_LIO(netdev);
2520 	struct octeon_device *oct = lio->oct_dev;
2521 	struct octnic_ctrl_pkt nctrl;
2522 	struct netdev_hw_addr *ha;
2523 	u64 *mc;
2524 	int ret;
2525 	int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2526 
2527 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2528 
2529 	/* Create a ctrl pkt command to be sent to core app. */
2530 	nctrl.ncmd.u64 = 0;
2531 	nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2532 	nctrl.ncmd.s.param1 = get_new_flags(netdev);
2533 	nctrl.ncmd.s.param2 = mc_count;
2534 	nctrl.ncmd.s.more = mc_count;
2535 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2536 	nctrl.netpndev = (u64)netdev;
2537 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2538 
2539 	/* copy all the addresses into the udd */
2540 	mc = &nctrl.udd[0];
2541 	netdev_for_each_mc_addr(ha, netdev) {
2542 		*mc = 0;
2543 		memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2544 		/* no need to swap bytes */
2545 
2546 		if (++mc > &nctrl.udd[mc_count])
2547 			break;
2548 	}
2549 
2550 	/* Apparently, any activity in this call from the kernel has to
2551 	 * be atomic. So we won't wait for response.
2552 	 */
2553 	nctrl.wait_time = 0;
2554 
2555 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2556 	if (ret < 0) {
2557 		dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2558 			ret);
2559 	}
2560 }
2561 
2562 /**
2563  * \brief Net device set_mac_address
2564  * @param netdev network device
2565  */
2566 static int liquidio_set_mac(struct net_device *netdev, void *p)
2567 {
2568 	int ret = 0;
2569 	struct lio *lio = GET_LIO(netdev);
2570 	struct octeon_device *oct = lio->oct_dev;
2571 	struct sockaddr *addr = (struct sockaddr *)p;
2572 	struct octnic_ctrl_pkt nctrl;
2573 
2574 	if (!is_valid_ether_addr(addr->sa_data))
2575 		return -EADDRNOTAVAIL;
2576 
2577 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2578 
2579 	nctrl.ncmd.u64 = 0;
2580 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2581 	nctrl.ncmd.s.param1 = 0;
2582 	nctrl.ncmd.s.more = 1;
2583 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2584 	nctrl.netpndev = (u64)netdev;
2585 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2586 	nctrl.wait_time = 100;
2587 
2588 	nctrl.udd[0] = 0;
2589 	/* The MAC Address is presented in network byte order. */
2590 	memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2591 
2592 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2593 	if (ret < 0) {
2594 		dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2595 		return -ENOMEM;
2596 	}
2597 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2598 	memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2599 
2600 	return 0;
2601 }
2602 
2603 /**
2604  * \brief Net device get_stats
2605  * @param netdev network device
2606  */
2607 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2608 {
2609 	struct lio *lio = GET_LIO(netdev);
2610 	struct net_device_stats *stats = &netdev->stats;
2611 	struct octeon_device *oct;
2612 	u64 pkts = 0, drop = 0, bytes = 0;
2613 	struct oct_droq_stats *oq_stats;
2614 	struct oct_iq_stats *iq_stats;
2615 	int i, iq_no, oq_no;
2616 
2617 	oct = lio->oct_dev;
2618 
2619 	for (i = 0; i < lio->linfo.num_txpciq; i++) {
2620 		iq_no = lio->linfo.txpciq[i].s.q_no;
2621 		iq_stats = &oct->instr_queue[iq_no]->stats;
2622 		pkts += iq_stats->tx_done;
2623 		drop += iq_stats->tx_dropped;
2624 		bytes += iq_stats->tx_tot_bytes;
2625 	}
2626 
2627 	stats->tx_packets = pkts;
2628 	stats->tx_bytes = bytes;
2629 	stats->tx_dropped = drop;
2630 
2631 	pkts = 0;
2632 	drop = 0;
2633 	bytes = 0;
2634 
2635 	for (i = 0; i < lio->linfo.num_rxpciq; i++) {
2636 		oq_no = lio->linfo.rxpciq[i].s.q_no;
2637 		oq_stats = &oct->droq[oq_no]->stats;
2638 		pkts += oq_stats->rx_pkts_received;
2639 		drop += (oq_stats->rx_dropped +
2640 			 oq_stats->dropped_nodispatch +
2641 			 oq_stats->dropped_toomany +
2642 			 oq_stats->dropped_nomem);
2643 		bytes += oq_stats->rx_bytes_received;
2644 	}
2645 
2646 	stats->rx_bytes = bytes;
2647 	stats->rx_packets = pkts;
2648 	stats->rx_dropped = drop;
2649 
2650 	return stats;
2651 }
2652 
2653 /**
2654  * \brief Net device change_mtu
2655  * @param netdev network device
2656  */
2657 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2658 {
2659 	struct lio *lio = GET_LIO(netdev);
2660 	struct octeon_device *oct = lio->oct_dev;
2661 	struct octnic_ctrl_pkt nctrl;
2662 	int ret = 0;
2663 
2664 	/* Limit the MTU to make sure the ethernet packets are between 68 bytes
2665 	 * and 16000 bytes
2666 	 */
2667 	if ((new_mtu < LIO_MIN_MTU_SIZE) ||
2668 	    (new_mtu > LIO_MAX_MTU_SIZE)) {
2669 		dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
2670 		dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
2671 			LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
2672 		return -EINVAL;
2673 	}
2674 
2675 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2676 
2677 	nctrl.ncmd.u64 = 0;
2678 	nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
2679 	nctrl.ncmd.s.param1 = new_mtu;
2680 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2681 	nctrl.wait_time = 100;
2682 	nctrl.netpndev = (u64)netdev;
2683 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2684 
2685 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2686 	if (ret < 0) {
2687 		dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2688 		return -1;
2689 	}
2690 
2691 	lio->mtu = new_mtu;
2692 
2693 	return 0;
2694 }
2695 
2696 /**
2697  * \brief Handler for SIOCSHWTSTAMP ioctl
2698  * @param netdev network device
2699  * @param ifr interface request
2700  * @param cmd command
2701  */
2702 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2703 {
2704 	struct hwtstamp_config conf;
2705 	struct lio *lio = GET_LIO(netdev);
2706 
2707 	if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2708 		return -EFAULT;
2709 
2710 	if (conf.flags)
2711 		return -EINVAL;
2712 
2713 	switch (conf.tx_type) {
2714 	case HWTSTAMP_TX_ON:
2715 	case HWTSTAMP_TX_OFF:
2716 		break;
2717 	default:
2718 		return -ERANGE;
2719 	}
2720 
2721 	switch (conf.rx_filter) {
2722 	case HWTSTAMP_FILTER_NONE:
2723 		break;
2724 	case HWTSTAMP_FILTER_ALL:
2725 	case HWTSTAMP_FILTER_SOME:
2726 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2727 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2728 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2729 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2730 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2731 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2732 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2733 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2734 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2735 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2736 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2737 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2738 		conf.rx_filter = HWTSTAMP_FILTER_ALL;
2739 		break;
2740 	default:
2741 		return -ERANGE;
2742 	}
2743 
2744 	if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2745 		ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2746 
2747 	else
2748 		ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2749 
2750 	return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2751 }
2752 
2753 /**
2754  * \brief ioctl handler
2755  * @param netdev network device
2756  * @param ifr interface request
2757  * @param cmd command
2758  */
2759 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2760 {
2761 	switch (cmd) {
2762 	case SIOCSHWTSTAMP:
2763 		return hwtstamp_ioctl(netdev, ifr);
2764 	default:
2765 		return -EOPNOTSUPP;
2766 	}
2767 }
2768 
2769 /**
2770  * \brief handle a Tx timestamp response
2771  * @param status response status
2772  * @param buf pointer to skb
2773  */
2774 static void handle_timestamp(struct octeon_device *oct,
2775 			     u32 status,
2776 			     void *buf)
2777 {
2778 	struct octnet_buf_free_info *finfo;
2779 	struct octeon_soft_command *sc;
2780 	struct oct_timestamp_resp *resp;
2781 	struct lio *lio;
2782 	struct sk_buff *skb = (struct sk_buff *)buf;
2783 
2784 	finfo = (struct octnet_buf_free_info *)skb->cb;
2785 	lio = finfo->lio;
2786 	sc = finfo->sc;
2787 	oct = lio->oct_dev;
2788 	resp = (struct oct_timestamp_resp *)sc->virtrptr;
2789 
2790 	if (status != OCTEON_REQUEST_DONE) {
2791 		dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2792 			CVM_CAST64(status));
2793 		resp->timestamp = 0;
2794 	}
2795 
2796 	octeon_swap_8B_data(&resp->timestamp, 1);
2797 
2798 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2799 		struct skb_shared_hwtstamps ts;
2800 		u64 ns = resp->timestamp;
2801 
2802 		netif_info(lio, tx_done, lio->netdev,
2803 			   "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2804 			   skb, (unsigned long long)ns);
2805 		ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2806 		skb_tstamp_tx(skb, &ts);
2807 	}
2808 
2809 	octeon_free_soft_command(oct, sc);
2810 	tx_buffer_free(skb);
2811 }
2812 
2813 /* \brief Send a data packet that will be timestamped
2814  * @param oct octeon device
2815  * @param ndata pointer to network data
2816  * @param finfo pointer to private network data
2817  */
2818 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2819 					 struct octnic_data_pkt *ndata,
2820 					 struct octnet_buf_free_info *finfo,
2821 					 int xmit_more)
2822 {
2823 	int retval;
2824 	struct octeon_soft_command *sc;
2825 	struct lio *lio;
2826 	int ring_doorbell;
2827 	u32 len;
2828 
2829 	lio = finfo->lio;
2830 
2831 	sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2832 					    sizeof(struct oct_timestamp_resp));
2833 	finfo->sc = sc;
2834 
2835 	if (!sc) {
2836 		dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2837 		return IQ_SEND_FAILED;
2838 	}
2839 
2840 	if (ndata->reqtype == REQTYPE_NORESP_NET)
2841 		ndata->reqtype = REQTYPE_RESP_NET;
2842 	else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2843 		ndata->reqtype = REQTYPE_RESP_NET_SG;
2844 
2845 	sc->callback = handle_timestamp;
2846 	sc->callback_arg = finfo->skb;
2847 	sc->iq_no = ndata->q_no;
2848 
2849 	len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
2850 
2851 	ring_doorbell = !xmit_more;
2852 	retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2853 				     sc, len, ndata->reqtype);
2854 
2855 	if (retval == IQ_SEND_FAILED) {
2856 		dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2857 			retval);
2858 		octeon_free_soft_command(oct, sc);
2859 	} else {
2860 		netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2861 	}
2862 
2863 	return retval;
2864 }
2865 
2866 /** \brief Transmit networks packets to the Octeon interface
2867  * @param skbuff   skbuff struct to be passed to network layer.
2868  * @param netdev    pointer to network device
2869  * @returns whether the packet was transmitted to the device okay or not
2870  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2871  */
2872 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2873 {
2874 	struct lio *lio;
2875 	struct octnet_buf_free_info *finfo;
2876 	union octnic_cmd_setup cmdsetup;
2877 	struct octnic_data_pkt ndata;
2878 	struct octeon_device *oct;
2879 	struct oct_iq_stats *stats;
2880 	struct octeon_instr_irh *irh;
2881 	union tx_info *tx_info;
2882 	int status = 0;
2883 	int q_idx = 0, iq_no = 0;
2884 	int xmit_more, j;
2885 	u64 dptr = 0;
2886 	u32 tag = 0;
2887 
2888 	lio = GET_LIO(netdev);
2889 	oct = lio->oct_dev;
2890 
2891 	if (netif_is_multiqueue(netdev)) {
2892 		q_idx = skb->queue_mapping;
2893 		q_idx = (q_idx % (lio->linfo.num_txpciq));
2894 		tag = q_idx;
2895 		iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2896 	} else {
2897 		iq_no = lio->txq;
2898 	}
2899 
2900 	stats = &oct->instr_queue[iq_no]->stats;
2901 
2902 	/* Check for all conditions in which the current packet cannot be
2903 	 * transmitted.
2904 	 */
2905 	if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2906 	    (!lio->linfo.link.s.link_up) ||
2907 	    (skb->len <= 0)) {
2908 		netif_info(lio, tx_err, lio->netdev,
2909 			   "Transmit failed link_status : %d\n",
2910 			   lio->linfo.link.s.link_up);
2911 		goto lio_xmit_failed;
2912 	}
2913 
2914 	/* Use space in skb->cb to store info used to unmap and
2915 	 * free the buffers.
2916 	 */
2917 	finfo = (struct octnet_buf_free_info *)skb->cb;
2918 	finfo->lio = lio;
2919 	finfo->skb = skb;
2920 	finfo->sc = NULL;
2921 
2922 	/* Prepare the attributes for the data to be passed to OSI. */
2923 	memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2924 
2925 	ndata.buf = (void *)finfo;
2926 
2927 	ndata.q_no = iq_no;
2928 
2929 	if (netif_is_multiqueue(netdev)) {
2930 		if (octnet_iq_is_full(oct, ndata.q_no)) {
2931 			/* defer sending if queue is full */
2932 			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2933 				   ndata.q_no);
2934 			stats->tx_iq_busy++;
2935 			return NETDEV_TX_BUSY;
2936 		}
2937 	} else {
2938 		if (octnet_iq_is_full(oct, lio->txq)) {
2939 			/* defer sending if queue is full */
2940 			stats->tx_iq_busy++;
2941 			netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2942 				   lio->txq);
2943 			return NETDEV_TX_BUSY;
2944 		}
2945 	}
2946 	/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2947 	 *	lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2948 	 */
2949 
2950 	ndata.datasize = skb->len;
2951 
2952 	cmdsetup.u64 = 0;
2953 	cmdsetup.s.iq_no = iq_no;
2954 
2955 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2956 		if (skb->encapsulation) {
2957 			cmdsetup.s.tnl_csum = 1;
2958 			stats->tx_vxlan++;
2959 		} else {
2960 			cmdsetup.s.transport_csum = 1;
2961 		}
2962 	}
2963 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2964 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2965 		cmdsetup.s.timestamp = 1;
2966 	}
2967 
2968 	if (skb_shinfo(skb)->nr_frags == 0) {
2969 		cmdsetup.s.u.datasize = skb->len;
2970 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2971 
2972 		/* Offload checksum calculation for TCP/UDP packets */
2973 		dptr = dma_map_single(&oct->pci_dev->dev,
2974 				      skb->data,
2975 				      skb->len,
2976 				      DMA_TO_DEVICE);
2977 		if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2978 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2979 				__func__);
2980 			return NETDEV_TX_BUSY;
2981 		}
2982 
2983 		ndata.cmd.cmd2.dptr = dptr;
2984 		finfo->dptr = dptr;
2985 		ndata.reqtype = REQTYPE_NORESP_NET;
2986 
2987 	} else {
2988 		int i, frags;
2989 		struct skb_frag_struct *frag;
2990 		struct octnic_gather *g;
2991 
2992 		spin_lock(&lio->glist_lock[q_idx]);
2993 		g = (struct octnic_gather *)
2994 			list_delete_head(&lio->glist[q_idx]);
2995 		spin_unlock(&lio->glist_lock[q_idx]);
2996 
2997 		if (!g) {
2998 			netif_info(lio, tx_err, lio->netdev,
2999 				   "Transmit scatter gather: glist null!\n");
3000 			goto lio_xmit_failed;
3001 		}
3002 
3003 		cmdsetup.s.gather = 1;
3004 		cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
3005 		octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
3006 
3007 		memset(g->sg, 0, g->sg_size);
3008 
3009 		g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
3010 						 skb->data,
3011 						 (skb->len - skb->data_len),
3012 						 DMA_TO_DEVICE);
3013 		if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
3014 			dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
3015 				__func__);
3016 			return NETDEV_TX_BUSY;
3017 		}
3018 		add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
3019 
3020 		frags = skb_shinfo(skb)->nr_frags;
3021 		i = 1;
3022 		while (frags--) {
3023 			frag = &skb_shinfo(skb)->frags[i - 1];
3024 
3025 			g->sg[(i >> 2)].ptr[(i & 3)] =
3026 				dma_map_page(&oct->pci_dev->dev,
3027 					     frag->page.p,
3028 					     frag->page_offset,
3029 					     frag->size,
3030 					     DMA_TO_DEVICE);
3031 
3032 			if (dma_mapping_error(&oct->pci_dev->dev,
3033 					      g->sg[i >> 2].ptr[i & 3])) {
3034 				dma_unmap_single(&oct->pci_dev->dev,
3035 						 g->sg[0].ptr[0],
3036 						 skb->len - skb->data_len,
3037 						 DMA_TO_DEVICE);
3038 				for (j = 1; j < i; j++) {
3039 					frag = &skb_shinfo(skb)->frags[j - 1];
3040 					dma_unmap_page(&oct->pci_dev->dev,
3041 						       g->sg[j >> 2].ptr[j & 3],
3042 						       frag->size,
3043 						       DMA_TO_DEVICE);
3044 				}
3045 				dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
3046 					__func__);
3047 				return NETDEV_TX_BUSY;
3048 			}
3049 
3050 			add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
3051 			i++;
3052 		}
3053 
3054 		dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
3055 					   g->sg_size, DMA_TO_DEVICE);
3056 		dptr = g->sg_dma_ptr;
3057 
3058 		ndata.cmd.cmd2.dptr = dptr;
3059 		finfo->dptr = dptr;
3060 		finfo->g = g;
3061 
3062 		ndata.reqtype = REQTYPE_NORESP_NET_SG;
3063 	}
3064 
3065 	irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
3066 	tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
3067 
3068 	if (skb_shinfo(skb)->gso_size) {
3069 		tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
3070 		tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
3071 		stats->tx_gso++;
3072 	}
3073 
3074 	/* HW insert VLAN tag */
3075 	if (skb_vlan_tag_present(skb)) {
3076 		irh->priority = skb_vlan_tag_get(skb) >> 13;
3077 		irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
3078 	}
3079 
3080 	xmit_more = skb->xmit_more;
3081 
3082 	if (unlikely(cmdsetup.s.timestamp))
3083 		status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
3084 	else
3085 		status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
3086 	if (status == IQ_SEND_FAILED)
3087 		goto lio_xmit_failed;
3088 
3089 	netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
3090 
3091 	if (status == IQ_SEND_STOP)
3092 		stop_q(lio->netdev, q_idx);
3093 
3094 	netif_trans_update(netdev);
3095 
3096 	if (skb_shinfo(skb)->gso_size)
3097 		stats->tx_done += skb_shinfo(skb)->gso_segs;
3098 	else
3099 		stats->tx_done++;
3100 	stats->tx_tot_bytes += skb->len;
3101 
3102 	return NETDEV_TX_OK;
3103 
3104 lio_xmit_failed:
3105 	stats->tx_dropped++;
3106 	netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
3107 		   iq_no, stats->tx_dropped);
3108 	if (dptr)
3109 		dma_unmap_single(&oct->pci_dev->dev, dptr,
3110 				 ndata.datasize, DMA_TO_DEVICE);
3111 	tx_buffer_free(skb);
3112 	return NETDEV_TX_OK;
3113 }
3114 
3115 /** \brief Network device Tx timeout
3116  * @param netdev    pointer to network device
3117  */
3118 static void liquidio_tx_timeout(struct net_device *netdev)
3119 {
3120 	struct lio *lio;
3121 
3122 	lio = GET_LIO(netdev);
3123 
3124 	netif_info(lio, tx_err, lio->netdev,
3125 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3126 		   netdev->stats.tx_dropped);
3127 	netif_trans_update(netdev);
3128 	txqs_wake(netdev);
3129 }
3130 
3131 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3132 				    __be16 proto __attribute__((unused)),
3133 				    u16 vid)
3134 {
3135 	struct lio *lio = GET_LIO(netdev);
3136 	struct octeon_device *oct = lio->oct_dev;
3137 	struct octnic_ctrl_pkt nctrl;
3138 	int ret = 0;
3139 
3140 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3141 
3142 	nctrl.ncmd.u64 = 0;
3143 	nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3144 	nctrl.ncmd.s.param1 = vid;
3145 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3146 	nctrl.wait_time = 100;
3147 	nctrl.netpndev = (u64)netdev;
3148 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3149 
3150 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3151 	if (ret < 0) {
3152 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3153 			ret);
3154 	}
3155 
3156 	return ret;
3157 }
3158 
3159 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3160 				     __be16 proto __attribute__((unused)),
3161 				     u16 vid)
3162 {
3163 	struct lio *lio = GET_LIO(netdev);
3164 	struct octeon_device *oct = lio->oct_dev;
3165 	struct octnic_ctrl_pkt nctrl;
3166 	int ret = 0;
3167 
3168 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3169 
3170 	nctrl.ncmd.u64 = 0;
3171 	nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3172 	nctrl.ncmd.s.param1 = vid;
3173 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3174 	nctrl.wait_time = 100;
3175 	nctrl.netpndev = (u64)netdev;
3176 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3177 
3178 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3179 	if (ret < 0) {
3180 		dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3181 			ret);
3182 	}
3183 	return ret;
3184 }
3185 
3186 /** Sending command to enable/disable RX checksum offload
3187  * @param netdev                pointer to network device
3188  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
3189  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
3190  *                              OCTNET_CMD_RXCSUM_DISABLE
3191  * @returns                     SUCCESS or FAILURE
3192  */
3193 int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3194 				u8 rx_cmd)
3195 {
3196 	struct lio *lio = GET_LIO(netdev);
3197 	struct octeon_device *oct = lio->oct_dev;
3198 	struct octnic_ctrl_pkt nctrl;
3199 	int ret = 0;
3200 
3201 	nctrl.ncmd.u64 = 0;
3202 	nctrl.ncmd.s.cmd = command;
3203 	nctrl.ncmd.s.param1 = rx_cmd;
3204 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3205 	nctrl.wait_time = 100;
3206 	nctrl.netpndev = (u64)netdev;
3207 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3208 
3209 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3210 	if (ret < 0) {
3211 		dev_err(&oct->pci_dev->dev,
3212 			"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3213 			ret);
3214 	}
3215 	return ret;
3216 }
3217 
3218 /** Sending command to add/delete VxLAN UDP port to firmware
3219  * @param netdev                pointer to network device
3220  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
3221  * @param vxlan_port            VxLAN port to be added or deleted
3222  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
3223  *                              OCTNET_CMD_VXLAN_PORT_DEL
3224  * @returns                     SUCCESS or FAILURE
3225  */
3226 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3227 				       u16 vxlan_port, u8 vxlan_cmd_bit)
3228 {
3229 	struct lio *lio = GET_LIO(netdev);
3230 	struct octeon_device *oct = lio->oct_dev;
3231 	struct octnic_ctrl_pkt nctrl;
3232 	int ret = 0;
3233 
3234 	nctrl.ncmd.u64 = 0;
3235 	nctrl.ncmd.s.cmd = command;
3236 	nctrl.ncmd.s.more = vxlan_cmd_bit;
3237 	nctrl.ncmd.s.param1 = vxlan_port;
3238 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3239 	nctrl.wait_time = 100;
3240 	nctrl.netpndev = (u64)netdev;
3241 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3242 
3243 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3244 	if (ret < 0) {
3245 		dev_err(&oct->pci_dev->dev,
3246 			"VxLAN port add/delete failed in core (ret:0x%x)\n",
3247 			ret);
3248 	}
3249 	return ret;
3250 }
3251 
3252 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
3253 {
3254 	struct lio *lio = GET_LIO(netdev);
3255 	struct octeon_device *oct = lio->oct_dev;
3256 	struct octnic_ctrl_pkt nctrl;
3257 	int ret = 0;
3258 
3259 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3260 
3261 	nctrl.ncmd.u64 = 0;
3262 	nctrl.ncmd.s.cmd = cmd;
3263 	nctrl.ncmd.s.param1 = param1;
3264 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3265 	nctrl.wait_time = 100;
3266 	nctrl.netpndev = (u64)netdev;
3267 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3268 
3269 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3270 	if (ret < 0) {
3271 		dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
3272 			ret);
3273 	}
3274 	return ret;
3275 }
3276 
3277 /** \brief Net device fix features
3278  * @param netdev  pointer to network device
3279  * @param request features requested
3280  * @returns updated features list
3281  */
3282 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3283 					       netdev_features_t request)
3284 {
3285 	struct lio *lio = netdev_priv(netdev);
3286 
3287 	if ((request & NETIF_F_RXCSUM) &&
3288 	    !(lio->dev_capability & NETIF_F_RXCSUM))
3289 		request &= ~NETIF_F_RXCSUM;
3290 
3291 	if ((request & NETIF_F_HW_CSUM) &&
3292 	    !(lio->dev_capability & NETIF_F_HW_CSUM))
3293 		request &= ~NETIF_F_HW_CSUM;
3294 
3295 	if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3296 		request &= ~NETIF_F_TSO;
3297 
3298 	if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3299 		request &= ~NETIF_F_TSO6;
3300 
3301 	if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3302 		request &= ~NETIF_F_LRO;
3303 
3304 	/*Disable LRO if RXCSUM is off */
3305 	if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3306 	    (lio->dev_capability & NETIF_F_LRO))
3307 		request &= ~NETIF_F_LRO;
3308 
3309 	return request;
3310 }
3311 
3312 /** \brief Net device set features
3313  * @param netdev  pointer to network device
3314  * @param features features to enable/disable
3315  */
3316 static int liquidio_set_features(struct net_device *netdev,
3317 				 netdev_features_t features)
3318 {
3319 	struct lio *lio = netdev_priv(netdev);
3320 
3321 	if (!((netdev->features ^ features) & NETIF_F_LRO))
3322 		return 0;
3323 
3324 	if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
3325 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3326 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3327 	else if (!(features & NETIF_F_LRO) &&
3328 		 (lio->dev_capability & NETIF_F_LRO))
3329 		liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3330 				     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3331 
3332 	/* Sending command to firmware to enable/disable RX checksum
3333 	 * offload settings using ethtool
3334 	 */
3335 	if (!(netdev->features & NETIF_F_RXCSUM) &&
3336 	    (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3337 	    (features & NETIF_F_RXCSUM))
3338 		liquidio_set_rxcsum_command(netdev,
3339 					    OCTNET_CMD_TNL_RX_CSUM_CTL,
3340 					    OCTNET_CMD_RXCSUM_ENABLE);
3341 	else if ((netdev->features & NETIF_F_RXCSUM) &&
3342 		 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3343 		 !(features & NETIF_F_RXCSUM))
3344 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3345 					    OCTNET_CMD_RXCSUM_DISABLE);
3346 
3347 	return 0;
3348 }
3349 
3350 static void liquidio_add_vxlan_port(struct net_device *netdev,
3351 				    struct udp_tunnel_info *ti)
3352 {
3353 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3354 		return;
3355 
3356 	liquidio_vxlan_port_command(netdev,
3357 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
3358 				    htons(ti->port),
3359 				    OCTNET_CMD_VXLAN_PORT_ADD);
3360 }
3361 
3362 static void liquidio_del_vxlan_port(struct net_device *netdev,
3363 				    struct udp_tunnel_info *ti)
3364 {
3365 	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3366 		return;
3367 
3368 	liquidio_vxlan_port_command(netdev,
3369 				    OCTNET_CMD_VXLAN_PORT_CONFIG,
3370 				    htons(ti->port),
3371 				    OCTNET_CMD_VXLAN_PORT_DEL);
3372 }
3373 
3374 static struct net_device_ops lionetdevops = {
3375 	.ndo_open		= liquidio_open,
3376 	.ndo_stop		= liquidio_stop,
3377 	.ndo_start_xmit		= liquidio_xmit,
3378 	.ndo_get_stats		= liquidio_get_stats,
3379 	.ndo_set_mac_address	= liquidio_set_mac,
3380 	.ndo_set_rx_mode	= liquidio_set_mcast_list,
3381 	.ndo_tx_timeout		= liquidio_tx_timeout,
3382 
3383 	.ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3384 	.ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3385 	.ndo_change_mtu		= liquidio_change_mtu,
3386 	.ndo_do_ioctl		= liquidio_ioctl,
3387 	.ndo_fix_features	= liquidio_fix_features,
3388 	.ndo_set_features	= liquidio_set_features,
3389 	.ndo_udp_tunnel_add	= liquidio_add_vxlan_port,
3390 	.ndo_udp_tunnel_del	= liquidio_del_vxlan_port,
3391 };
3392 
3393 /** \brief Entry point for the liquidio module
3394  */
3395 static int __init liquidio_init(void)
3396 {
3397 	int i;
3398 	struct handshake *hs;
3399 
3400 	init_completion(&first_stage);
3401 
3402 	octeon_init_device_list(conf_type);
3403 
3404 	if (liquidio_init_pci())
3405 		return -EINVAL;
3406 
3407 	wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3408 
3409 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3410 		hs = &handshake[i];
3411 		if (hs->pci_dev) {
3412 			wait_for_completion(&hs->init);
3413 			if (!hs->init_ok) {
3414 				/* init handshake failed */
3415 				dev_err(&hs->pci_dev->dev,
3416 					"Failed to init device\n");
3417 				liquidio_deinit_pci();
3418 				return -EIO;
3419 			}
3420 		}
3421 	}
3422 
3423 	for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3424 		hs = &handshake[i];
3425 		if (hs->pci_dev) {
3426 			wait_for_completion_timeout(&hs->started,
3427 						    msecs_to_jiffies(30000));
3428 			if (!hs->started_ok) {
3429 				/* starter handshake failed */
3430 				dev_err(&hs->pci_dev->dev,
3431 					"Firmware failed to start\n");
3432 				liquidio_deinit_pci();
3433 				return -EIO;
3434 			}
3435 		}
3436 	}
3437 
3438 	return 0;
3439 }
3440 
3441 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3442 {
3443 	struct octeon_device *oct = (struct octeon_device *)buf;
3444 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3445 	int gmxport = 0;
3446 	union oct_link_status *ls;
3447 	int i;
3448 
3449 	if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
3450 		dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3451 			recv_pkt->buffer_size[0],
3452 			recv_pkt->rh.r_nic_info.gmxport);
3453 		goto nic_info_err;
3454 	}
3455 
3456 	gmxport = recv_pkt->rh.r_nic_info.gmxport;
3457 	ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3458 
3459 	octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3460 	for (i = 0; i < oct->ifcount; i++) {
3461 		if (oct->props[i].gmxport == gmxport) {
3462 			update_link_status(oct->props[i].netdev, ls);
3463 			break;
3464 		}
3465 	}
3466 
3467 nic_info_err:
3468 	for (i = 0; i < recv_pkt->buffer_count; i++)
3469 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
3470 	octeon_free_recv_info(recv_info);
3471 	return 0;
3472 }
3473 
3474 /**
3475  * \brief Setup network interfaces
3476  * @param octeon_dev  octeon device
3477  *
3478  * Called during init time for each device. It assumes the NIC
3479  * is already up and running.  The link information for each
3480  * interface is passed in link_info.
3481  */
3482 static int setup_nic_devices(struct octeon_device *octeon_dev)
3483 {
3484 	struct lio *lio = NULL;
3485 	struct net_device *netdev;
3486 	u8 mac[6], i, j;
3487 	struct octeon_soft_command *sc;
3488 	struct liquidio_if_cfg_context *ctx;
3489 	struct liquidio_if_cfg_resp *resp;
3490 	struct octdev_props *props;
3491 	int retval, num_iqueues, num_oqueues;
3492 	union oct_nic_if_cfg if_cfg;
3493 	unsigned int base_queue;
3494 	unsigned int gmx_port_id;
3495 	u32 resp_size, ctx_size;
3496 	u32 ifidx_or_pfnum;
3497 
3498 	/* This is to handle link status changes */
3499 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3500 				    OPCODE_NIC_INFO,
3501 				    lio_nic_info, octeon_dev);
3502 
3503 	/* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3504 	 * They are handled directly.
3505 	 */
3506 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3507 					free_netbuf);
3508 
3509 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3510 					free_netsgbuf);
3511 
3512 	octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3513 					free_netsgbuf_with_resp);
3514 
3515 	for (i = 0; i < octeon_dev->ifcount; i++) {
3516 		resp_size = sizeof(struct liquidio_if_cfg_resp);
3517 		ctx_size = sizeof(struct liquidio_if_cfg_context);
3518 		sc = (struct octeon_soft_command *)
3519 			octeon_alloc_soft_command(octeon_dev, 0,
3520 						  resp_size, ctx_size);
3521 		resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3522 		ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3523 
3524 		num_iqueues =
3525 			CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
3526 		num_oqueues =
3527 			CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
3528 		base_queue =
3529 			CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
3530 		gmx_port_id =
3531 			CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
3532 		ifidx_or_pfnum = i;
3533 
3534 		dev_dbg(&octeon_dev->pci_dev->dev,
3535 			"requesting config for interface %d, iqs %d, oqs %d\n",
3536 			ifidx_or_pfnum, num_iqueues, num_oqueues);
3537 		WRITE_ONCE(ctx->cond, 0);
3538 		ctx->octeon_id = lio_get_device_id(octeon_dev);
3539 		init_waitqueue_head(&ctx->wc);
3540 
3541 		if_cfg.u64 = 0;
3542 		if_cfg.s.num_iqueues = num_iqueues;
3543 		if_cfg.s.num_oqueues = num_oqueues;
3544 		if_cfg.s.base_queue = base_queue;
3545 		if_cfg.s.gmx_port_id = gmx_port_id;
3546 
3547 		sc->iq_no = 0;
3548 
3549 		octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3550 					    OPCODE_NIC_IF_CFG, 0,
3551 					    if_cfg.u64, 0);
3552 
3553 		sc->callback = if_cfg_callback;
3554 		sc->callback_arg = sc;
3555 		sc->wait_time = 3000;
3556 
3557 		retval = octeon_send_soft_command(octeon_dev, sc);
3558 		if (retval == IQ_SEND_FAILED) {
3559 			dev_err(&octeon_dev->pci_dev->dev,
3560 				"iq/oq config failed status: %x\n",
3561 				retval);
3562 			/* Soft instr is freed by driver in case of failure. */
3563 			goto setup_nic_dev_fail;
3564 		}
3565 
3566 		/* Sleep on a wait queue till the cond flag indicates that the
3567 		 * response arrived or timed-out.
3568 		 */
3569 		sleep_cond(&ctx->wc, &ctx->cond);
3570 		retval = resp->status;
3571 		if (retval) {
3572 			dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3573 			goto setup_nic_dev_fail;
3574 		}
3575 
3576 		octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3577 				    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3578 
3579 		num_iqueues = hweight64(resp->cfg_info.iqmask);
3580 		num_oqueues = hweight64(resp->cfg_info.oqmask);
3581 
3582 		if (!(num_iqueues) || !(num_oqueues)) {
3583 			dev_err(&octeon_dev->pci_dev->dev,
3584 				"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3585 				resp->cfg_info.iqmask,
3586 				resp->cfg_info.oqmask);
3587 			goto setup_nic_dev_fail;
3588 		}
3589 		dev_dbg(&octeon_dev->pci_dev->dev,
3590 			"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3591 			i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3592 			num_iqueues, num_oqueues);
3593 		netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3594 
3595 		if (!netdev) {
3596 			dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3597 			goto setup_nic_dev_fail;
3598 		}
3599 
3600 		SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3601 
3602 		if (num_iqueues > 1)
3603 			lionetdevops.ndo_select_queue = select_q;
3604 
3605 		/* Associate the routines that will handle different
3606 		 * netdev tasks.
3607 		 */
3608 		netdev->netdev_ops = &lionetdevops;
3609 
3610 		lio = GET_LIO(netdev);
3611 
3612 		memset(lio, 0, sizeof(struct lio));
3613 
3614 		lio->ifidx = ifidx_or_pfnum;
3615 
3616 		props = &octeon_dev->props[i];
3617 		props->gmxport = resp->cfg_info.linfo.gmxport;
3618 		props->netdev = netdev;
3619 
3620 		lio->linfo.num_rxpciq = num_oqueues;
3621 		lio->linfo.num_txpciq = num_iqueues;
3622 		for (j = 0; j < num_oqueues; j++) {
3623 			lio->linfo.rxpciq[j].u64 =
3624 				resp->cfg_info.linfo.rxpciq[j].u64;
3625 		}
3626 		for (j = 0; j < num_iqueues; j++) {
3627 			lio->linfo.txpciq[j].u64 =
3628 				resp->cfg_info.linfo.txpciq[j].u64;
3629 		}
3630 		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3631 		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3632 		lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3633 
3634 		lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3635 
3636 		lio->dev_capability = NETIF_F_HIGHDMA
3637 				| NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3638 				| NETIF_F_SG | NETIF_F_RXCSUM
3639 				| NETIF_F_GRO
3640 				| NETIF_F_TSO | NETIF_F_TSO6
3641 				| NETIF_F_LRO;
3642 		netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3643 
3644 		/*  Copy of transmit encapsulation capabilities:
3645 		 *  TSO, TSO6, Checksums for this device
3646 		 */
3647 		lio->enc_dev_capability = NETIF_F_IP_CSUM
3648 					  | NETIF_F_IPV6_CSUM
3649 					  | NETIF_F_GSO_UDP_TUNNEL
3650 					  | NETIF_F_HW_CSUM | NETIF_F_SG
3651 					  | NETIF_F_RXCSUM
3652 					  | NETIF_F_TSO | NETIF_F_TSO6
3653 					  | NETIF_F_LRO;
3654 
3655 		netdev->hw_enc_features = (lio->enc_dev_capability &
3656 					   ~NETIF_F_LRO);
3657 
3658 		lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3659 
3660 		netdev->vlan_features = lio->dev_capability;
3661 		/* Add any unchangeable hw features */
3662 		lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3663 					NETIF_F_HW_VLAN_CTAG_RX |
3664 					NETIF_F_HW_VLAN_CTAG_TX;
3665 
3666 		netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3667 
3668 		netdev->hw_features = lio->dev_capability;
3669 		/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3670 		netdev->hw_features = netdev->hw_features &
3671 			~NETIF_F_HW_VLAN_CTAG_RX;
3672 
3673 		/* Point to the  properties for octeon device to which this
3674 		 * interface belongs.
3675 		 */
3676 		lio->oct_dev = octeon_dev;
3677 		lio->octprops = props;
3678 		lio->netdev = netdev;
3679 
3680 		dev_dbg(&octeon_dev->pci_dev->dev,
3681 			"if%d gmx: %d hw_addr: 0x%llx\n", i,
3682 			lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3683 
3684 		/* 64-bit swap required on LE machines */
3685 		octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3686 		for (j = 0; j < 6; j++)
3687 			mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3688 
3689 		/* Copy MAC Address to OS network device structure */
3690 
3691 		ether_addr_copy(netdev->dev_addr, mac);
3692 
3693 		/* By default all interfaces on a single Octeon uses the same
3694 		 * tx and rx queues
3695 		 */
3696 		lio->txq = lio->linfo.txpciq[0].s.q_no;
3697 		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3698 		if (setup_io_queues(octeon_dev, i)) {
3699 			dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3700 			goto setup_nic_dev_fail;
3701 		}
3702 
3703 		ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3704 
3705 		lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3706 		lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3707 
3708 		if (setup_glists(octeon_dev, lio, num_iqueues)) {
3709 			dev_err(&octeon_dev->pci_dev->dev,
3710 				"Gather list allocation failed\n");
3711 			goto setup_nic_dev_fail;
3712 		}
3713 
3714 		/* Register ethtool support */
3715 		liquidio_set_ethtool_ops(netdev);
3716 		octeon_dev->priv_flags = 0x0;
3717 
3718 		if (netdev->features & NETIF_F_LRO)
3719 			liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3720 					     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3721 
3722 		liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
3723 
3724 		if ((debug != -1) && (debug & NETIF_MSG_HW))
3725 			liquidio_set_feature(netdev,
3726 					     OCTNET_CMD_VERBOSE_ENABLE, 0);
3727 
3728 		/* Register the network device with the OS */
3729 		if (register_netdev(netdev)) {
3730 			dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3731 			goto setup_nic_dev_fail;
3732 		}
3733 
3734 		dev_dbg(&octeon_dev->pci_dev->dev,
3735 			"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3736 			i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3737 		netif_carrier_off(netdev);
3738 		lio->link_changes++;
3739 
3740 		ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3741 
3742 		/* Sending command to firmware to enable Rx checksum offload
3743 		 * by default at the time of setup of Liquidio driver for
3744 		 * this device
3745 		 */
3746 		liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3747 					    OCTNET_CMD_RXCSUM_ENABLE);
3748 		liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3749 				     OCTNET_CMD_TXCSUM_ENABLE);
3750 
3751 		dev_dbg(&octeon_dev->pci_dev->dev,
3752 			"NIC ifidx:%d Setup successful\n", i);
3753 
3754 		octeon_free_soft_command(octeon_dev, sc);
3755 	}
3756 
3757 	return 0;
3758 
3759 setup_nic_dev_fail:
3760 
3761 	octeon_free_soft_command(octeon_dev, sc);
3762 
3763 	while (i--) {
3764 		dev_err(&octeon_dev->pci_dev->dev,
3765 			"NIC ifidx:%d Setup failed\n", i);
3766 		liquidio_destroy_nic_device(octeon_dev, i);
3767 	}
3768 	return -ENODEV;
3769 }
3770 
3771 /**
3772  * \brief initialize the NIC
3773  * @param oct octeon device
3774  *
3775  * This initialization routine is called once the Octeon device application is
3776  * up and running
3777  */
3778 static int liquidio_init_nic_module(struct octeon_device *oct)
3779 {
3780 	struct oct_intrmod_cfg *intrmod_cfg;
3781 	int i, retval = 0;
3782 	int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3783 
3784 	dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3785 
3786 	/* only default iq and oq were initialized
3787 	 * initialize the rest as well
3788 	 */
3789 	/* run port_config command for each port */
3790 	oct->ifcount = num_nic_ports;
3791 
3792 	memset(oct->props, 0,
3793 	       sizeof(struct octdev_props) * num_nic_ports);
3794 
3795 	for (i = 0; i < MAX_OCTEON_LINKS; i++)
3796 		oct->props[i].gmxport = -1;
3797 
3798 	retval = setup_nic_devices(oct);
3799 	if (retval) {
3800 		dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3801 		goto octnet_init_failure;
3802 	}
3803 
3804 	liquidio_ptp_init(oct);
3805 
3806 	/* Initialize interrupt moderation params */
3807 	intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
3808 	intrmod_cfg->rx_enable = 1;
3809 	intrmod_cfg->check_intrvl =   LIO_INTRMOD_CHECK_INTERVAL;
3810 	intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
3811 	intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
3812 	intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
3813 	intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
3814 	intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
3815 	intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
3816 	intrmod_cfg->tx_enable = 1;
3817 	intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
3818 	intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
3819 	intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
3820 	intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
3821 	dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3822 
3823 	return retval;
3824 
3825 octnet_init_failure:
3826 
3827 	oct->ifcount = 0;
3828 
3829 	return retval;
3830 }
3831 
3832 /**
3833  * \brief starter callback that invokes the remaining initialization work after
3834  * the NIC is up and running.
3835  * @param octptr  work struct work_struct
3836  */
3837 static void nic_starter(struct work_struct *work)
3838 {
3839 	struct octeon_device *oct;
3840 	struct cavium_wk *wk = (struct cavium_wk *)work;
3841 
3842 	oct = (struct octeon_device *)wk->ctxptr;
3843 
3844 	if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3845 		return;
3846 
3847 	/* If the status of the device is CORE_OK, the core
3848 	 * application has reported its application type. Call
3849 	 * any registered handlers now and move to the RUNNING
3850 	 * state.
3851 	 */
3852 	if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3853 		schedule_delayed_work(&oct->nic_poll_work.work,
3854 				      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3855 		return;
3856 	}
3857 
3858 	atomic_set(&oct->status, OCT_DEV_RUNNING);
3859 
3860 	if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3861 		dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3862 
3863 		if (liquidio_init_nic_module(oct))
3864 			dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3865 		else
3866 			handshake[oct->octeon_id].started_ok = 1;
3867 	} else {
3868 		dev_err(&oct->pci_dev->dev,
3869 			"Unexpected application running on NIC (%d). Check firmware.\n",
3870 			oct->app_mode);
3871 	}
3872 
3873 	complete(&handshake[oct->octeon_id].started);
3874 }
3875 
3876 /**
3877  * \brief Device initialization for each Octeon device that is probed
3878  * @param octeon_dev  octeon device
3879  */
3880 static int octeon_device_init(struct octeon_device *octeon_dev)
3881 {
3882 	int j, ret;
3883 	char bootcmd[] = "\n";
3884 	struct octeon_device_priv *oct_priv =
3885 		(struct octeon_device_priv *)octeon_dev->priv;
3886 	atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3887 
3888 	/* Enable access to the octeon device and make its DMA capability
3889 	 * known to the OS.
3890 	 */
3891 	if (octeon_pci_os_setup(octeon_dev))
3892 		return 1;
3893 
3894 	/* Identify the Octeon type and map the BAR address space. */
3895 	if (octeon_chip_specific_setup(octeon_dev)) {
3896 		dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
3897 		return 1;
3898 	}
3899 
3900 	atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
3901 
3902 	octeon_dev->app_mode = CVM_DRV_INVALID_APP;
3903 
3904 	/* Do a soft reset of the Octeon device. */
3905 	if (octeon_dev->fn_list.soft_reset(octeon_dev))
3906 		return 1;
3907 
3908 	/* Initialize the dispatch mechanism used to push packets arriving on
3909 	 * Octeon Output queues.
3910 	 */
3911 	if (octeon_init_dispatch_list(octeon_dev))
3912 		return 1;
3913 
3914 	octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3915 				    OPCODE_NIC_CORE_DRV_ACTIVE,
3916 				    octeon_core_drv_init,
3917 				    octeon_dev);
3918 
3919 	INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
3920 	octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
3921 	schedule_delayed_work(&octeon_dev->nic_poll_work.work,
3922 			      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3923 
3924 	atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
3925 
3926 	octeon_set_io_queues_off(octeon_dev);
3927 
3928 	/*  Setup the data structures that manage this Octeon's Input queues. */
3929 	if (octeon_setup_instr_queues(octeon_dev)) {
3930 		dev_err(&octeon_dev->pci_dev->dev,
3931 			"instruction queue initialization failed\n");
3932 		/* On error, release any previously allocated queues */
3933 		for (j = 0; j < octeon_dev->num_iqs; j++)
3934 			octeon_delete_instr_queue(octeon_dev, j);
3935 		return 1;
3936 	}
3937 	atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
3938 
3939 	/* Initialize soft command buffer pool
3940 	 */
3941 	if (octeon_setup_sc_buffer_pool(octeon_dev)) {
3942 		dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
3943 		return 1;
3944 	}
3945 	atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
3946 
3947 	/* Initialize lists to manage the requests of different types that
3948 	 * arrive from user & kernel applications for this octeon device.
3949 	 */
3950 	if (octeon_setup_response_list(octeon_dev)) {
3951 		dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
3952 		return 1;
3953 	}
3954 	atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
3955 
3956 	if (octeon_setup_output_queues(octeon_dev)) {
3957 		dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
3958 		/* Release any previously allocated queues */
3959 		for (j = 0; j < octeon_dev->num_oqs; j++)
3960 			octeon_delete_droq(octeon_dev, j);
3961 		return 1;
3962 	}
3963 
3964 	atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
3965 
3966 	/* The input and output queue registers were setup earlier (the queues
3967 	 * were not enabled). Any additional registers that need to be
3968 	 * programmed should be done now.
3969 	 */
3970 	ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
3971 	if (ret) {
3972 		dev_err(&octeon_dev->pci_dev->dev,
3973 			"Failed to configure device registers\n");
3974 		return ret;
3975 	}
3976 
3977 	/* Initialize the tasklet that handles output queue packet processing.*/
3978 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
3979 	tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
3980 		     (unsigned long)octeon_dev);
3981 
3982 	/* Setup the interrupt handler and record the INT SUM register address
3983 	 */
3984 	if (octeon_setup_interrupt(octeon_dev))
3985 		return 1;
3986 
3987 	/* Enable Octeon device interrupts */
3988 	octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
3989 
3990 	/* Enable the input and output queues for this Octeon device */
3991 	octeon_dev->fn_list.enable_io_queues(octeon_dev);
3992 
3993 	atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
3994 
3995 	dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
3996 
3997 	if (ddr_timeout == 0)
3998 		dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
3999 
4000 	schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4001 
4002 	/* Wait for the octeon to initialize DDR after the soft-reset. */
4003 	while (ddr_timeout == 0) {
4004 		set_current_state(TASK_INTERRUPTIBLE);
4005 		if (schedule_timeout(HZ / 10)) {
4006 			/* user probably pressed Control-C */
4007 			return 1;
4008 		}
4009 	}
4010 	ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4011 	if (ret) {
4012 		dev_err(&octeon_dev->pci_dev->dev,
4013 			"DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4014 			ret);
4015 		return 1;
4016 	}
4017 
4018 	if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
4019 		dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4020 		return 1;
4021 	}
4022 
4023 	/* Divert uboot to take commands from host instead. */
4024 	ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4025 
4026 	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4027 	ret = octeon_init_consoles(octeon_dev);
4028 	if (ret) {
4029 		dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4030 		return 1;
4031 	}
4032 	ret = octeon_add_console(octeon_dev, 0);
4033 	if (ret) {
4034 		dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4035 		return 1;
4036 	}
4037 
4038 	atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4039 
4040 	dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4041 	ret = load_firmware(octeon_dev);
4042 	if (ret) {
4043 		dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4044 		return 1;
4045 	}
4046 
4047 	handshake[octeon_dev->octeon_id].init_ok = 1;
4048 	complete(&handshake[octeon_dev->octeon_id].init);
4049 
4050 	atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4051 
4052 	/* Send Credit for Octeon Output queues. Credits are always sent after
4053 	 * the output queue is enabled.
4054 	 */
4055 	for (j = 0; j < octeon_dev->num_oqs; j++)
4056 		writel(octeon_dev->droq[j]->max_count,
4057 		       octeon_dev->droq[j]->pkts_credit_reg);
4058 
4059 	/* Packets can start arriving on the output queues from this point. */
4060 
4061 	return 0;
4062 }
4063 
4064 /**
4065  * \brief Exits the module
4066  */
4067 static void __exit liquidio_exit(void)
4068 {
4069 	liquidio_deinit_pci();
4070 
4071 	pr_info("LiquidIO network module is now unloaded\n");
4072 }
4073 
4074 module_init(liquidio_init);
4075 module_exit(liquidio_exit);
4076