1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Virtual DMA channel support for DMAengine
4 *
5 * Copyright (C) 2012 Russell King
6 */
7 #ifndef VIRT_DMA_H
8 #define VIRT_DMA_H
9
10 #include <linux/dmaengine.h>
11 #include <linux/interrupt.h>
12
13 #include "dmaengine.h"
14
15 struct virt_dma_desc {
16 struct dma_async_tx_descriptor tx;
17 struct dmaengine_result tx_result;
18 /* protected by vc.lock */
19 struct list_head node;
20 };
21
22 struct virt_dma_chan {
23 struct dma_chan chan;
24 struct tasklet_struct task;
25 void (*desc_free)(struct virt_dma_desc *);
26
27 spinlock_t lock;
28
29 /* protected by vc.lock */
30 struct list_head desc_allocated;
31 struct list_head desc_submitted;
32 struct list_head desc_issued;
33 struct list_head desc_completed;
34 struct list_head desc_terminated;
35
36 struct virt_dma_desc *cyclic;
37 };
38
to_virt_chan(struct dma_chan * chan)39 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
40 {
41 return container_of(chan, struct virt_dma_chan, chan);
42 }
43
44 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
45 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
46 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
47 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
48 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
49
50 /**
51 * vchan_tx_prep - prepare a descriptor
52 * @vc: virtual channel allocating this descriptor
53 * @vd: virtual descriptor to prepare
54 * @tx_flags: flags argument passed in to prepare function
55 */
vchan_tx_prep(struct virt_dma_chan * vc,struct virt_dma_desc * vd,unsigned long tx_flags)56 static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
57 struct virt_dma_desc *vd, unsigned long tx_flags)
58 {
59 unsigned long flags;
60
61 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62 vd->tx.flags = tx_flags;
63 vd->tx.tx_submit = vchan_tx_submit;
64 vd->tx.desc_free = vchan_tx_desc_free;
65
66 vd->tx_result.result = DMA_TRANS_NOERROR;
67 vd->tx_result.residue = 0;
68
69 spin_lock_irqsave(&vc->lock, flags);
70 list_add_tail(&vd->node, &vc->desc_allocated);
71 spin_unlock_irqrestore(&vc->lock, flags);
72
73 return &vd->tx;
74 }
75
76 /**
77 * vchan_issue_pending - move submitted descriptors to issued list
78 * @vc: virtual channel to update
79 *
80 * vc.lock must be held by caller
81 */
vchan_issue_pending(struct virt_dma_chan * vc)82 static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
83 {
84 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
85 return !list_empty(&vc->desc_issued);
86 }
87
88 /**
89 * vchan_cookie_complete - report completion of a descriptor
90 * @vd: virtual descriptor to update
91 *
92 * vc.lock must be held by caller
93 */
vchan_cookie_complete(struct virt_dma_desc * vd)94 static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
95 {
96 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
97 dma_cookie_t cookie;
98
99 cookie = vd->tx.cookie;
100 dma_cookie_complete(&vd->tx);
101 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
102 vd, cookie);
103 list_add_tail(&vd->node, &vc->desc_completed);
104
105 tasklet_schedule(&vc->task);
106 }
107
108 /**
109 * vchan_vdesc_fini - Free or reuse a descriptor
110 * @vd: virtual descriptor to free/reuse
111 */
vchan_vdesc_fini(struct virt_dma_desc * vd)112 static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
113 {
114 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
115
116 if (dmaengine_desc_test_reuse(&vd->tx)) {
117 unsigned long flags;
118
119 spin_lock_irqsave(&vc->lock, flags);
120 list_add(&vd->node, &vc->desc_allocated);
121 spin_unlock_irqrestore(&vc->lock, flags);
122 } else {
123 vc->desc_free(vd);
124 }
125 }
126
127 /**
128 * vchan_cyclic_callback - report the completion of a period
129 * @vd: virtual descriptor
130 */
vchan_cyclic_callback(struct virt_dma_desc * vd)131 static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
132 {
133 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
134
135 vc->cyclic = vd;
136 tasklet_schedule(&vc->task);
137 }
138
139 /**
140 * vchan_terminate_vdesc - Disable pending cyclic callback
141 * @vd: virtual descriptor to be terminated
142 *
143 * vc.lock must be held by caller
144 */
vchan_terminate_vdesc(struct virt_dma_desc * vd)145 static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
146 {
147 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
148
149 list_add_tail(&vd->node, &vc->desc_terminated);
150
151 if (vc->cyclic == vd)
152 vc->cyclic = NULL;
153 }
154
155 /**
156 * vchan_next_desc - peek at the next descriptor to be processed
157 * @vc: virtual channel to obtain descriptor from
158 *
159 * vc.lock must be held by caller
160 */
vchan_next_desc(struct virt_dma_chan * vc)161 static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
162 {
163 return list_first_entry_or_null(&vc->desc_issued,
164 struct virt_dma_desc, node);
165 }
166
167 /**
168 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
169 * @vc: virtual channel to get descriptors from
170 * @head: list of descriptors found
171 *
172 * vc.lock must be held by caller
173 *
174 * Removes all submitted and issued descriptors from internal lists, and
175 * provides a list of all descriptors found
176 */
vchan_get_all_descriptors(struct virt_dma_chan * vc,struct list_head * head)177 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
178 struct list_head *head)
179 {
180 list_splice_tail_init(&vc->desc_allocated, head);
181 list_splice_tail_init(&vc->desc_submitted, head);
182 list_splice_tail_init(&vc->desc_issued, head);
183 list_splice_tail_init(&vc->desc_completed, head);
184 list_splice_tail_init(&vc->desc_terminated, head);
185 }
186
vchan_free_chan_resources(struct virt_dma_chan * vc)187 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
188 {
189 struct virt_dma_desc *vd;
190 unsigned long flags;
191 LIST_HEAD(head);
192
193 spin_lock_irqsave(&vc->lock, flags);
194 vchan_get_all_descriptors(vc, &head);
195 list_for_each_entry(vd, &head, node)
196 dmaengine_desc_clear_reuse(&vd->tx);
197 spin_unlock_irqrestore(&vc->lock, flags);
198
199 vchan_dma_desc_free_list(vc, &head);
200 }
201
202 /**
203 * vchan_synchronize() - synchronize callback execution to the current context
204 * @vc: virtual channel to synchronize
205 *
206 * Makes sure that all scheduled or active callbacks have finished running. For
207 * proper operation the caller has to ensure that no new callbacks are scheduled
208 * after the invocation of this function started.
209 * Free up the terminated cyclic descriptor to prevent memory leakage.
210 */
vchan_synchronize(struct virt_dma_chan * vc)211 static inline void vchan_synchronize(struct virt_dma_chan *vc)
212 {
213 LIST_HEAD(head);
214 unsigned long flags;
215
216 tasklet_kill(&vc->task);
217
218 spin_lock_irqsave(&vc->lock, flags);
219
220 list_splice_tail_init(&vc->desc_terminated, &head);
221
222 spin_unlock_irqrestore(&vc->lock, flags);
223
224 vchan_dma_desc_free_list(vc, &head);
225 }
226
227 #endif
228