1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Virtual DMA channel support for DMAengine 4 * 5 * Copyright (C) 2012 Russell King 6 */ 7 #ifndef VIRT_DMA_H 8 #define VIRT_DMA_H 9 10 #include <linux/dmaengine.h> 11 #include <linux/interrupt.h> 12 13 #include "dmaengine.h" 14 15 struct virt_dma_desc { 16 struct dma_async_tx_descriptor tx; 17 /* protected by vc.lock */ 18 struct list_head node; 19 }; 20 21 struct virt_dma_chan { 22 struct dma_chan chan; 23 struct tasklet_struct task; 24 void (*desc_free)(struct virt_dma_desc *); 25 26 spinlock_t lock; 27 28 /* protected by vc.lock */ 29 struct list_head desc_allocated; 30 struct list_head desc_submitted; 31 struct list_head desc_issued; 32 struct list_head desc_completed; 33 34 struct virt_dma_desc *cyclic; 35 struct virt_dma_desc *vd_terminated; 36 }; 37 38 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) 39 { 40 return container_of(chan, struct virt_dma_chan, chan); 41 } 42 43 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); 44 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); 45 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); 46 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 47 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); 48 49 /** 50 * vchan_tx_prep - prepare a descriptor 51 * @vc: virtual channel allocating this descriptor 52 * @vd: virtual descriptor to prepare 53 * @tx_flags: flags argument passed in to prepare function 54 */ 55 static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, 56 struct virt_dma_desc *vd, unsigned long tx_flags) 57 { 58 unsigned long flags; 59 60 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 61 vd->tx.flags = tx_flags; 62 vd->tx.tx_submit = vchan_tx_submit; 63 vd->tx.desc_free = vchan_tx_desc_free; 64 65 spin_lock_irqsave(&vc->lock, flags); 66 list_add_tail(&vd->node, &vc->desc_allocated); 67 spin_unlock_irqrestore(&vc->lock, flags); 68 69 return &vd->tx; 70 } 71 72 /** 73 * vchan_issue_pending - move submitted descriptors to issued list 74 * @vc: virtual channel to update 75 * 76 * vc.lock must be held by caller 77 */ 78 static inline bool vchan_issue_pending(struct virt_dma_chan *vc) 79 { 80 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); 81 return !list_empty(&vc->desc_issued); 82 } 83 84 /** 85 * vchan_cookie_complete - report completion of a descriptor 86 * @vd: virtual descriptor to update 87 * 88 * vc.lock must be held by caller 89 */ 90 static inline void vchan_cookie_complete(struct virt_dma_desc *vd) 91 { 92 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 93 dma_cookie_t cookie; 94 95 cookie = vd->tx.cookie; 96 dma_cookie_complete(&vd->tx); 97 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", 98 vd, cookie); 99 list_add_tail(&vd->node, &vc->desc_completed); 100 101 tasklet_schedule(&vc->task); 102 } 103 104 /** 105 * vchan_vdesc_fini - Free or reuse a descriptor 106 * @vd: virtual descriptor to free/reuse 107 */ 108 static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) 109 { 110 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 111 112 if (dmaengine_desc_test_reuse(&vd->tx)) 113 list_add(&vd->node, &vc->desc_allocated); 114 else 115 vc->desc_free(vd); 116 } 117 118 /** 119 * vchan_cyclic_callback - report the completion of a period 120 * @vd: virtual descriptor 121 */ 122 static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) 123 { 124 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 125 126 vc->cyclic = vd; 127 tasklet_schedule(&vc->task); 128 } 129 130 /** 131 * vchan_terminate_vdesc - Disable pending cyclic callback 132 * @vd: virtual descriptor to be terminated 133 * 134 * vc.lock must be held by caller 135 */ 136 static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) 137 { 138 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 139 140 /* free up stuck descriptor */ 141 if (vc->vd_terminated) 142 vchan_vdesc_fini(vc->vd_terminated); 143 144 vc->vd_terminated = vd; 145 if (vc->cyclic == vd) 146 vc->cyclic = NULL; 147 } 148 149 /** 150 * vchan_next_desc - peek at the next descriptor to be processed 151 * @vc: virtual channel to obtain descriptor from 152 * 153 * vc.lock must be held by caller 154 */ 155 static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) 156 { 157 return list_first_entry_or_null(&vc->desc_issued, 158 struct virt_dma_desc, node); 159 } 160 161 /** 162 * vchan_get_all_descriptors - obtain all submitted and issued descriptors 163 * @vc: virtual channel to get descriptors from 164 * @head: list of descriptors found 165 * 166 * vc.lock must be held by caller 167 * 168 * Removes all submitted and issued descriptors from internal lists, and 169 * provides a list of all descriptors found 170 */ 171 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 172 struct list_head *head) 173 { 174 list_splice_tail_init(&vc->desc_allocated, head); 175 list_splice_tail_init(&vc->desc_submitted, head); 176 list_splice_tail_init(&vc->desc_issued, head); 177 list_splice_tail_init(&vc->desc_completed, head); 178 } 179 180 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 181 { 182 struct virt_dma_desc *vd; 183 unsigned long flags; 184 LIST_HEAD(head); 185 186 spin_lock_irqsave(&vc->lock, flags); 187 vchan_get_all_descriptors(vc, &head); 188 list_for_each_entry(vd, &head, node) 189 dmaengine_desc_clear_reuse(&vd->tx); 190 spin_unlock_irqrestore(&vc->lock, flags); 191 192 vchan_dma_desc_free_list(vc, &head); 193 } 194 195 /** 196 * vchan_synchronize() - synchronize callback execution to the current context 197 * @vc: virtual channel to synchronize 198 * 199 * Makes sure that all scheduled or active callbacks have finished running. For 200 * proper operation the caller has to ensure that no new callbacks are scheduled 201 * after the invocation of this function started. 202 * Free up the terminated cyclic descriptor to prevent memory leakage. 203 */ 204 static inline void vchan_synchronize(struct virt_dma_chan *vc) 205 { 206 unsigned long flags; 207 208 tasklet_kill(&vc->task); 209 210 spin_lock_irqsave(&vc->lock, flags); 211 if (vc->vd_terminated) { 212 vchan_vdesc_fini(vc->vd_terminated); 213 vc->vd_terminated = NULL; 214 } 215 spin_unlock_irqrestore(&vc->lock, flags); 216 } 217 218 #endif 219