1 /* 2 * Virtual DMA channel support for DMAengine 3 * 4 * Copyright (C) 2012 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #ifndef VIRT_DMA_H 11 #define VIRT_DMA_H 12 13 #include <linux/dmaengine.h> 14 #include <linux/interrupt.h> 15 16 #include "dmaengine.h" 17 18 struct virt_dma_desc { 19 struct dma_async_tx_descriptor tx; 20 /* protected by vc.lock */ 21 struct list_head node; 22 }; 23 24 struct virt_dma_chan { 25 struct dma_chan chan; 26 struct tasklet_struct task; 27 void (*desc_free)(struct virt_dma_desc *); 28 29 spinlock_t lock; 30 31 /* protected by vc.lock */ 32 struct list_head desc_allocated; 33 struct list_head desc_submitted; 34 struct list_head desc_issued; 35 struct list_head desc_completed; 36 37 struct virt_dma_desc *cyclic; 38 }; 39 40 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) 41 { 42 return container_of(chan, struct virt_dma_chan, chan); 43 } 44 45 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); 46 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); 47 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); 48 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 49 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); 50 51 /** 52 * vchan_tx_prep - prepare a descriptor 53 * @vc: virtual channel allocating this descriptor 54 * @vd: virtual descriptor to prepare 55 * @tx_flags: flags argument passed in to prepare function 56 */ 57 static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, 58 struct virt_dma_desc *vd, unsigned long tx_flags) 59 { 60 unsigned long flags; 61 62 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 63 vd->tx.flags = tx_flags; 64 vd->tx.tx_submit = vchan_tx_submit; 65 vd->tx.desc_free = vchan_tx_desc_free; 66 67 spin_lock_irqsave(&vc->lock, flags); 68 list_add_tail(&vd->node, &vc->desc_allocated); 69 spin_unlock_irqrestore(&vc->lock, flags); 70 71 return &vd->tx; 72 } 73 74 /** 75 * vchan_issue_pending - move submitted descriptors to issued list 76 * @vc: virtual channel to update 77 * 78 * vc.lock must be held by caller 79 */ 80 static inline bool vchan_issue_pending(struct virt_dma_chan *vc) 81 { 82 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); 83 return !list_empty(&vc->desc_issued); 84 } 85 86 /** 87 * vchan_cookie_complete - report completion of a descriptor 88 * @vd: virtual descriptor to update 89 * 90 * vc.lock must be held by caller 91 */ 92 static inline void vchan_cookie_complete(struct virt_dma_desc *vd) 93 { 94 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 95 dma_cookie_t cookie; 96 97 cookie = vd->tx.cookie; 98 dma_cookie_complete(&vd->tx); 99 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", 100 vd, cookie); 101 list_add_tail(&vd->node, &vc->desc_completed); 102 103 tasklet_schedule(&vc->task); 104 } 105 106 /** 107 * vchan_cyclic_callback - report the completion of a period 108 * @vd: virtual descriptor 109 */ 110 static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) 111 { 112 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 113 114 vc->cyclic = vd; 115 tasklet_schedule(&vc->task); 116 } 117 118 /** 119 * vchan_next_desc - peek at the next descriptor to be processed 120 * @vc: virtual channel to obtain descriptor from 121 * 122 * vc.lock must be held by caller 123 */ 124 static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) 125 { 126 return list_first_entry_or_null(&vc->desc_issued, 127 struct virt_dma_desc, node); 128 } 129 130 /** 131 * vchan_get_all_descriptors - obtain all submitted and issued descriptors 132 * @vc: virtual channel to get descriptors from 133 * @head: list of descriptors found 134 * 135 * vc.lock must be held by caller 136 * 137 * Removes all submitted and issued descriptors from internal lists, and 138 * provides a list of all descriptors found 139 */ 140 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 141 struct list_head *head) 142 { 143 list_splice_tail_init(&vc->desc_allocated, head); 144 list_splice_tail_init(&vc->desc_submitted, head); 145 list_splice_tail_init(&vc->desc_issued, head); 146 list_splice_tail_init(&vc->desc_completed, head); 147 } 148 149 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 150 { 151 struct virt_dma_desc *vd; 152 unsigned long flags; 153 LIST_HEAD(head); 154 155 spin_lock_irqsave(&vc->lock, flags); 156 vchan_get_all_descriptors(vc, &head); 157 list_for_each_entry(vd, &head, node) 158 dmaengine_desc_clear_reuse(&vd->tx); 159 spin_unlock_irqrestore(&vc->lock, flags); 160 161 vchan_dma_desc_free_list(vc, &head); 162 } 163 164 /** 165 * vchan_synchronize() - synchronize callback execution to the current context 166 * @vc: virtual channel to synchronize 167 * 168 * Makes sure that all scheduled or active callbacks have finished running. For 169 * proper operation the caller has to ensure that no new callbacks are scheduled 170 * after the invocation of this function started. 171 */ 172 static inline void vchan_synchronize(struct virt_dma_chan *vc) 173 { 174 tasklet_kill(&vc->task); 175 } 176 177 #endif 178