xref: /openbmc/linux/drivers/dma/idxd/submit.c (revision 1f4d4af4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <uapi/linux/idxd.h>
8 #include "idxd.h"
9 #include "registers.h"
10 
11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
12 {
13 	struct idxd_desc *desc;
14 	struct idxd_device *idxd = wq->idxd;
15 
16 	desc = wq->descs[idx];
17 	memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
18 	memset(desc->completion, 0, idxd->compl_size);
19 	desc->cpu = cpu;
20 
21 	if (device_pasid_enabled(idxd))
22 		desc->hw->pasid = idxd->pasid;
23 
24 	/*
25 	 * Descriptor completion vectors are 1-8 for MSIX. We will round
26 	 * robin through the 8 vectors.
27 	 */
28 	wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
29 	desc->hw->int_handle = wq->vec_ptr;
30 	return desc;
31 }
32 
33 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
34 {
35 	int cpu, idx;
36 	struct idxd_device *idxd = wq->idxd;
37 	DEFINE_SBQ_WAIT(wait);
38 	struct sbq_wait_state *ws;
39 	struct sbitmap_queue *sbq;
40 
41 	if (idxd->state != IDXD_DEV_ENABLED)
42 		return ERR_PTR(-EIO);
43 
44 	sbq = &wq->sbq;
45 	idx = sbitmap_queue_get(sbq, &cpu);
46 	if (idx < 0) {
47 		if (optype == IDXD_OP_NONBLOCK)
48 			return ERR_PTR(-EAGAIN);
49 	} else {
50 		return __get_desc(wq, idx, cpu);
51 	}
52 
53 	ws = &sbq->ws[0];
54 	for (;;) {
55 		sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
56 		if (signal_pending_state(TASK_INTERRUPTIBLE, current))
57 			break;
58 		idx = sbitmap_queue_get(sbq, &cpu);
59 		if (idx > 0)
60 			break;
61 		schedule();
62 	}
63 
64 	sbitmap_finish_wait(sbq, ws, &wait);
65 	if (idx < 0)
66 		return ERR_PTR(-EAGAIN);
67 
68 	return __get_desc(wq, idx, cpu);
69 }
70 
71 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
72 {
73 	int cpu = desc->cpu;
74 
75 	desc->cpu = -1;
76 	sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
77 }
78 
79 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
80 {
81 	struct idxd_device *idxd = wq->idxd;
82 	int vec = desc->hw->int_handle;
83 	void __iomem *portal;
84 	int rc;
85 
86 	if (idxd->state != IDXD_DEV_ENABLED)
87 		return -EIO;
88 
89 	portal = wq->portal;
90 
91 	/*
92 	 * The wmb() flushes writes to coherent DMA data before
93 	 * possibly triggering a DMA read. The wmb() is necessary
94 	 * even on UP because the recipient is a device.
95 	 */
96 	wmb();
97 	if (wq_dedicated(wq)) {
98 		iosubmit_cmds512(portal, desc->hw, 1);
99 	} else {
100 		/*
101 		 * It's not likely that we would receive queue full rejection
102 		 * since the descriptor allocation gates at wq size. If we
103 		 * receive a -EAGAIN, that means something went wrong such as the
104 		 * device is not accepting descriptor at all.
105 		 */
106 		rc = enqcmds(portal, desc->hw);
107 		if (rc < 0)
108 			return rc;
109 	}
110 
111 	/*
112 	 * Pending the descriptor to the lockless list for the irq_entry
113 	 * that we designated the descriptor to.
114 	 */
115 	if (desc->hw->flags & IDXD_OP_FLAG_RCI)
116 		llist_add(&desc->llnode,
117 			  &idxd->irq_entries[vec].pending_llist);
118 
119 	return 0;
120 }
121