xref: /openbmc/linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c (revision c51d39010a1bccc9c1294e2d7c00005aefeb2b5c)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <net/vxlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_main.h"
26 #include "cn23xx_vf_device.h"
27 
28 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
29 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(LIQUIDIO_VERSION);
32 
33 struct octeon_device_priv {
34 	/* Tasklet structures for this device. */
35 	struct tasklet_struct droq_tasklet;
36 	unsigned long napi_mask;
37 };
38 
39 static int
40 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
41 static void liquidio_vf_remove(struct pci_dev *pdev);
42 static int octeon_device_init(struct octeon_device *oct);
43 
44 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
45 {
46 	struct octeon_device_priv *oct_priv =
47 	    (struct octeon_device_priv *)oct->priv;
48 	int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT;
49 	int pkt_cnt = 0, pending_pkts;
50 	int i;
51 
52 	do {
53 		pending_pkts = 0;
54 
55 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
56 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
57 				continue;
58 			pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
59 		}
60 		if (pkt_cnt > 0) {
61 			pending_pkts += pkt_cnt;
62 			tasklet_schedule(&oct_priv->droq_tasklet);
63 		}
64 		pkt_cnt = 0;
65 		schedule_timeout_uninterruptible(1);
66 
67 	} while (retry-- && pending_pkts);
68 
69 	return pkt_cnt;
70 }
71 
72 /**
73  * \brief wait for all pending requests to complete
74  * @param oct Pointer to Octeon device
75  *
76  * Called during shutdown sequence
77  */
78 static int wait_for_pending_requests(struct octeon_device *oct)
79 {
80 	int i, pcount = 0;
81 
82 	for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) {
83 		pcount = atomic_read(
84 		    &oct->response_list[OCTEON_ORDERED_SC_LIST]
85 			 .pending_req_count);
86 		if (pcount)
87 			schedule_timeout_uninterruptible(HZ / 10);
88 		else
89 			break;
90 	}
91 
92 	if (pcount)
93 		return 1;
94 
95 	return 0;
96 }
97 
98 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
99 	{
100 		PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
101 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
102 	},
103 	{
104 		0, 0, 0, 0, 0, 0, 0
105 	}
106 };
107 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
108 
109 static struct pci_driver liquidio_vf_pci_driver = {
110 	.name		= "LiquidIO_VF",
111 	.id_table	= liquidio_vf_pci_tbl,
112 	.probe		= liquidio_vf_probe,
113 	.remove		= liquidio_vf_remove,
114 };
115 
116 static
117 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
118 {
119 	struct octeon_device *oct = droq->oct_dev;
120 	struct octeon_device_priv *oct_priv =
121 	    (struct octeon_device_priv *)oct->priv;
122 
123 	if (droq->ops.poll_mode) {
124 		droq->ops.napi_fn(droq);
125 	} else {
126 		if (ret & MSIX_PO_INT) {
127 			dev_err(&oct->pci_dev->dev,
128 				"should not come here should not get rx when poll mode = 0 for vf\n");
129 			tasklet_schedule(&oct_priv->droq_tasklet);
130 			return 1;
131 		}
132 		/* this will be flushed periodically by check iq db */
133 		if (ret & MSIX_PI_INT)
134 			return 0;
135 	}
136 	return 0;
137 }
138 
139 static irqreturn_t
140 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
141 {
142 	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
143 	struct octeon_device *oct = ioq_vector->oct_dev;
144 	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
145 	u64 ret;
146 
147 	ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
148 
149 	if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
150 		liquidio_schedule_msix_droq_pkt_handler(droq, ret);
151 
152 	return IRQ_HANDLED;
153 }
154 
155 /**
156  * \brief Setup interrupt for octeon device
157  * @param oct octeon device
158  *
159  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
160  */
161 static int octeon_setup_interrupt(struct octeon_device *oct)
162 {
163 	struct msix_entry *msix_entries;
164 	int num_alloc_ioq_vectors;
165 	int num_ioq_vectors;
166 	int irqret;
167 	int i;
168 
169 	if (oct->msix_on) {
170 		oct->num_msix_irqs = oct->sriov_info.rings_per_vf;
171 
172 		oct->msix_entries = kcalloc(
173 		    oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
174 		if (!oct->msix_entries)
175 			return 1;
176 
177 		msix_entries = (struct msix_entry *)oct->msix_entries;
178 
179 		for (i = 0; i < oct->num_msix_irqs; i++)
180 			msix_entries[i].entry = i;
181 		num_alloc_ioq_vectors = pci_enable_msix_range(
182 						oct->pci_dev, msix_entries,
183 						oct->num_msix_irqs,
184 						oct->num_msix_irqs);
185 		if (num_alloc_ioq_vectors < 0) {
186 			dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
187 			kfree(oct->msix_entries);
188 			oct->msix_entries = NULL;
189 			return 1;
190 		}
191 		dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
192 
193 		num_ioq_vectors = oct->num_msix_irqs;
194 
195 		for (i = 0; i < num_ioq_vectors; i++) {
196 			irqret = request_irq(msix_entries[i].vector,
197 					     liquidio_msix_intr_handler, 0,
198 					     "octeon", &oct->ioq_vector[i]);
199 			if (irqret) {
200 				dev_err(&oct->pci_dev->dev,
201 					"OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
202 					irqret);
203 
204 				while (i) {
205 					i--;
206 					irq_set_affinity_hint(
207 					    msix_entries[i].vector, NULL);
208 					free_irq(msix_entries[i].vector,
209 						 &oct->ioq_vector[i]);
210 				}
211 				pci_disable_msix(oct->pci_dev);
212 				kfree(oct->msix_entries);
213 				oct->msix_entries = NULL;
214 				return 1;
215 			}
216 			oct->ioq_vector[i].vector = msix_entries[i].vector;
217 			/* assign the cpu mask for this msix interrupt vector */
218 			irq_set_affinity_hint(
219 			    msix_entries[i].vector,
220 			    (&oct->ioq_vector[i].affinity_mask));
221 		}
222 		dev_dbg(&oct->pci_dev->dev,
223 			"OCTEON[%d]: MSI-X enabled\n", oct->octeon_id);
224 	}
225 	return 0;
226 }
227 
228 /**
229  * \brief PCI probe handler
230  * @param pdev PCI device structure
231  * @param ent unused
232  */
233 static int
234 liquidio_vf_probe(struct pci_dev *pdev,
235 		  const struct pci_device_id *ent __attribute__((unused)))
236 {
237 	struct octeon_device *oct_dev = NULL;
238 
239 	oct_dev = octeon_allocate_device(pdev->device,
240 					 sizeof(struct octeon_device_priv));
241 
242 	if (!oct_dev) {
243 		dev_err(&pdev->dev, "Unable to allocate device\n");
244 		return -ENOMEM;
245 	}
246 	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
247 
248 	dev_info(&pdev->dev, "Initializing device %x:%x.\n",
249 		 (u32)pdev->vendor, (u32)pdev->device);
250 
251 	/* Assign octeon_device for this device to the private data area. */
252 	pci_set_drvdata(pdev, oct_dev);
253 
254 	/* set linux specific device pointer */
255 	oct_dev->pci_dev = pdev;
256 
257 	if (octeon_device_init(oct_dev)) {
258 		liquidio_vf_remove(pdev);
259 		return -ENOMEM;
260 	}
261 
262 	dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
263 
264 	return 0;
265 }
266 
267 /**
268  * \brief PCI FLR for each Octeon device.
269  * @param oct octeon device
270  */
271 static void octeon_pci_flr(struct octeon_device *oct)
272 {
273 	u16 status;
274 
275 	pci_save_state(oct->pci_dev);
276 
277 	pci_cfg_access_lock(oct->pci_dev);
278 
279 	/* Quiesce the device completely */
280 	pci_write_config_word(oct->pci_dev, PCI_COMMAND,
281 			      PCI_COMMAND_INTX_DISABLE);
282 
283 	/* Wait for Transaction Pending bit clean */
284 	msleep(100);
285 	pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status);
286 	if (status & PCI_EXP_DEVSTA_TRPND) {
287 		dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
288 		ssleep(5);
289 		pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA,
290 					  &status);
291 		if (status & PCI_EXP_DEVSTA_TRPND)
292 			dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n");
293 	}
294 	pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL,
295 				 PCI_EXP_DEVCTL_BCR_FLR);
296 	mdelay(100);
297 
298 	pci_cfg_access_unlock(oct->pci_dev);
299 
300 	pci_restore_state(oct->pci_dev);
301 }
302 
303 /**
304  *\brief Destroy resources associated with octeon device
305  * @param pdev PCI device structure
306  * @param ent unused
307  */
308 static void octeon_destroy_resources(struct octeon_device *oct)
309 {
310 	struct msix_entry *msix_entries;
311 	int i;
312 
313 	switch (atomic_read(&oct->status)) {
314 	case OCT_DEV_RUNNING:
315 	case OCT_DEV_CORE_OK:
316 		/* No more instructions will be forwarded. */
317 		atomic_set(&oct->status, OCT_DEV_IN_RESET);
318 
319 		dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
320 			lio_get_state_string(&oct->status));
321 
322 		schedule_timeout_uninterruptible(HZ / 10);
323 
324 		/* fallthrough */
325 	case OCT_DEV_HOST_OK:
326 		/* fallthrough */
327 	case OCT_DEV_IO_QUEUES_DONE:
328 		if (wait_for_pending_requests(oct))
329 			dev_err(&oct->pci_dev->dev, "There were pending requests\n");
330 
331 		if (lio_wait_for_instr_fetch(oct))
332 			dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
333 
334 		/* Disable the input and output queues now. No more packets will
335 		 * arrive from Octeon, but we should wait for all packet
336 		 * processing to finish.
337 		 */
338 		oct->fn_list.disable_io_queues(oct);
339 
340 		if (lio_wait_for_oq_pkts(oct))
341 			dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
342 
343 	case OCT_DEV_INTR_SET_DONE:
344 		/* Disable interrupts  */
345 		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
346 
347 		if (oct->msix_on) {
348 			msix_entries = (struct msix_entry *)oct->msix_entries;
349 			for (i = 0; i < oct->num_msix_irqs; i++) {
350 				irq_set_affinity_hint(msix_entries[i].vector,
351 						      NULL);
352 				free_irq(msix_entries[i].vector,
353 					 &oct->ioq_vector[i]);
354 			}
355 			pci_disable_msix(oct->pci_dev);
356 			kfree(oct->msix_entries);
357 			oct->msix_entries = NULL;
358 		}
359 		/* Soft reset the octeon device before exiting */
360 		if (oct->pci_dev->reset_fn)
361 			octeon_pci_flr(oct);
362 		else
363 			cn23xx_vf_ask_pf_to_do_flr(oct);
364 
365 		/* fallthrough */
366 	case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
367 		octeon_free_ioq_vector(oct);
368 
369 		/* fallthrough */
370 	case OCT_DEV_MBOX_SETUP_DONE:
371 		oct->fn_list.free_mbox(oct);
372 
373 		/* fallthrough */
374 	case OCT_DEV_IN_RESET:
375 	case OCT_DEV_DROQ_INIT_DONE:
376 		mdelay(100);
377 		for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
378 			if (!(oct->io_qmask.oq & BIT_ULL(i)))
379 				continue;
380 			octeon_delete_droq(oct, i);
381 		}
382 
383 		/* fallthrough */
384 	case OCT_DEV_RESP_LIST_INIT_DONE:
385 		octeon_delete_response_list(oct);
386 
387 		/* fallthrough */
388 	case OCT_DEV_INSTR_QUEUE_INIT_DONE:
389 		for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
390 			if (!(oct->io_qmask.iq & BIT_ULL(i)))
391 				continue;
392 			octeon_delete_instr_queue(oct, i);
393 		}
394 
395 		/* fallthrough */
396 	case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
397 		octeon_free_sc_buffer_pool(oct);
398 
399 		/* fallthrough */
400 	case OCT_DEV_DISPATCH_INIT_DONE:
401 		octeon_delete_dispatch_list(oct);
402 		cancel_delayed_work_sync(&oct->nic_poll_work.work);
403 
404 		/* fallthrough */
405 	case OCT_DEV_PCI_MAP_DONE:
406 		octeon_unmap_pci_barx(oct, 0);
407 		octeon_unmap_pci_barx(oct, 1);
408 
409 		/* fallthrough */
410 	case OCT_DEV_PCI_ENABLE_DONE:
411 		pci_clear_master(oct->pci_dev);
412 		/* Disable the device, releasing the PCI INT */
413 		pci_disable_device(oct->pci_dev);
414 
415 		/* fallthrough */
416 	case OCT_DEV_BEGIN_STATE:
417 		/* Nothing to be done here either */
418 		break;
419 	}
420 }
421 
422 /**
423  * \brief Cleans up resources at unload time
424  * @param pdev PCI device structure
425  */
426 static void liquidio_vf_remove(struct pci_dev *pdev)
427 {
428 	struct octeon_device *oct_dev = pci_get_drvdata(pdev);
429 
430 	dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
431 
432 	/* Reset the octeon device and cleanup all memory allocated for
433 	 * the octeon device by driver.
434 	 */
435 	octeon_destroy_resources(oct_dev);
436 
437 	dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
438 
439 	/* This octeon device has been removed. Update the global
440 	 * data structure to reflect this. Free the device structure.
441 	 */
442 	octeon_free_device_mem(oct_dev);
443 }
444 
445 /**
446  * \brief PCI initialization for each Octeon device.
447  * @param oct octeon device
448  */
449 static int octeon_pci_os_setup(struct octeon_device *oct)
450 {
451 #ifdef CONFIG_PCI_IOV
452 	/* setup PCI stuff first */
453 	if (!oct->pci_dev->physfn)
454 		octeon_pci_flr(oct);
455 #endif
456 
457 	if (pci_enable_device(oct->pci_dev)) {
458 		dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
459 		return 1;
460 	}
461 
462 	if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
463 		dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
464 		pci_disable_device(oct->pci_dev);
465 		return 1;
466 	}
467 
468 	/* Enable PCI DMA Master. */
469 	pci_set_master(oct->pci_dev);
470 
471 	return 0;
472 }
473 
474 /**
475  * \brief Device initialization for each Octeon device that is probed
476  * @param octeon_dev  octeon device
477  */
478 static int octeon_device_init(struct octeon_device *oct)
479 {
480 	u32 rev_id;
481 	int j;
482 
483 	atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
484 
485 	/* Enable access to the octeon device and make its DMA capability
486 	 * known to the OS.
487 	 */
488 	if (octeon_pci_os_setup(oct))
489 		return 1;
490 	atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
491 
492 	oct->chip_id = OCTEON_CN23XX_VF_VID;
493 	pci_read_config_dword(oct->pci_dev, 8, &rev_id);
494 	oct->rev_id = rev_id & 0xff;
495 
496 	if (cn23xx_setup_octeon_vf_device(oct))
497 		return 1;
498 
499 	atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
500 
501 	/* Initialize the dispatch mechanism used to push packets arriving on
502 	 * Octeon Output queues.
503 	 */
504 	if (octeon_init_dispatch_list(oct))
505 		return 1;
506 
507 	atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
508 
509 	if (octeon_set_io_queues_off(oct)) {
510 		dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
511 		return 1;
512 	}
513 
514 	if (oct->fn_list.setup_device_regs(oct)) {
515 		dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
516 		return 1;
517 	}
518 
519 	/* Initialize soft command buffer pool */
520 	if (octeon_setup_sc_buffer_pool(oct)) {
521 		dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
522 		return 1;
523 	}
524 	atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
525 
526 	/* Setup the data structures that manage this Octeon's Input queues. */
527 	if (octeon_setup_instr_queues(oct)) {
528 		dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
529 		return 1;
530 	}
531 	atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
532 
533 	/* Initialize lists to manage the requests of different types that
534 	 * arrive from user & kernel applications for this octeon device.
535 	 */
536 	if (octeon_setup_response_list(oct)) {
537 		dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
538 		return 1;
539 	}
540 	atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
541 
542 	if (octeon_setup_output_queues(oct)) {
543 		dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
544 		return 1;
545 	}
546 	atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
547 
548 	if (oct->fn_list.setup_mbox(oct)) {
549 		dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
550 		return 1;
551 	}
552 	atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
553 
554 	if (octeon_allocate_ioq_vector(oct)) {
555 		dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
556 		return 1;
557 	}
558 	atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
559 
560 	dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
561 		 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
562 
563 	/* Setup the interrupt handler and record the INT SUM register address*/
564 	if (octeon_setup_interrupt(oct))
565 		return 1;
566 
567 	if (cn23xx_octeon_pfvf_handshake(oct))
568 		return 1;
569 
570 	/* Enable Octeon device interrupts */
571 	oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
572 
573 	atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
574 
575 	/* Enable the input and output queues for this Octeon device */
576 	if (oct->fn_list.enable_io_queues(oct)) {
577 		dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
578 		return 1;
579 	}
580 
581 	atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
582 
583 	atomic_set(&oct->status, OCT_DEV_HOST_OK);
584 
585 	/* Send Credit for Octeon Output queues. Credits are always sent after
586 	 * the output queue is enabled.
587 	 */
588 	for (j = 0; j < oct->num_oqs; j++)
589 		writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
590 
591 	/* Packets can start arriving on the output queues from this point. */
592 
593 	atomic_set(&oct->status, OCT_DEV_CORE_OK);
594 
595 	atomic_set(&oct->status, OCT_DEV_RUNNING);
596 
597 	return 0;
598 }
599 
600 static int __init liquidio_vf_init(void)
601 {
602 	octeon_init_device_list(0);
603 	return pci_register_driver(&liquidio_vf_pci_driver);
604 }
605 
606 static void __exit liquidio_vf_exit(void)
607 {
608 	pci_unregister_driver(&liquidio_vf_pci_driver);
609 
610 	pr_info("LiquidIO_VF network module is now unloaded\n");
611 }
612 
613 module_init(liquidio_vf_init);
614 module_exit(liquidio_vf_exit);
615