1 /* 2 * Virtual DMA channel support for DMAengine 3 * 4 * Copyright (C) 2012 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #ifndef VIRT_DMA_H 11 #define VIRT_DMA_H 12 13 #include <linux/dmaengine.h> 14 #include <linux/interrupt.h> 15 16 #include "dmaengine.h" 17 18 struct virt_dma_desc { 19 struct dma_async_tx_descriptor tx; 20 /* protected by vc.lock */ 21 struct list_head node; 22 }; 23 24 struct virt_dma_chan { 25 struct dma_chan chan; 26 struct tasklet_struct task; 27 void (*desc_free)(struct virt_dma_desc *); 28 29 spinlock_t lock; 30 31 /* protected by vc.lock */ 32 struct list_head desc_submitted; 33 struct list_head desc_issued; 34 struct list_head desc_completed; 35 36 struct virt_dma_desc *cyclic; 37 }; 38 39 static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) 40 { 41 return container_of(chan, struct virt_dma_chan, chan); 42 } 43 44 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); 45 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); 46 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); 47 48 /** 49 * vchan_tx_prep - prepare a descriptor 50 * vc: virtual channel allocating this descriptor 51 * vd: virtual descriptor to prepare 52 * tx_flags: flags argument passed in to prepare function 53 */ 54 static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, 55 struct virt_dma_desc *vd, unsigned long tx_flags) 56 { 57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 58 59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 60 vd->tx.flags = tx_flags; 61 vd->tx.tx_submit = vchan_tx_submit; 62 63 return &vd->tx; 64 } 65 66 /** 67 * vchan_issue_pending - move submitted descriptors to issued list 68 * vc: virtual channel to update 69 * 70 * vc.lock must be held by caller 71 */ 72 static inline bool vchan_issue_pending(struct virt_dma_chan *vc) 73 { 74 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); 75 return !list_empty(&vc->desc_issued); 76 } 77 78 /** 79 * vchan_cookie_complete - report completion of a descriptor 80 * vd: virtual descriptor to update 81 * 82 * vc.lock must be held by caller 83 */ 84 static inline void vchan_cookie_complete(struct virt_dma_desc *vd) 85 { 86 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 87 dma_cookie_t cookie; 88 89 cookie = vd->tx.cookie; 90 dma_cookie_complete(&vd->tx); 91 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", 92 vd, cookie); 93 list_add_tail(&vd->node, &vc->desc_completed); 94 95 tasklet_schedule(&vc->task); 96 } 97 98 /** 99 * vchan_cyclic_callback - report the completion of a period 100 * vd: virtual descriptor 101 */ 102 static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) 103 { 104 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 105 106 vc->cyclic = vd; 107 tasklet_schedule(&vc->task); 108 } 109 110 /** 111 * vchan_next_desc - peek at the next descriptor to be processed 112 * vc: virtual channel to obtain descriptor from 113 * 114 * vc.lock must be held by caller 115 */ 116 static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) 117 { 118 if (list_empty(&vc->desc_issued)) 119 return NULL; 120 121 return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); 122 } 123 124 /** 125 * vchan_get_all_descriptors - obtain all submitted and issued descriptors 126 * vc: virtual channel to get descriptors from 127 * head: list of descriptors found 128 * 129 * vc.lock must be held by caller 130 * 131 * Removes all submitted and issued descriptors from internal lists, and 132 * provides a list of all descriptors found 133 */ 134 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 135 struct list_head *head) 136 { 137 list_splice_tail_init(&vc->desc_submitted, head); 138 list_splice_tail_init(&vc->desc_issued, head); 139 list_splice_tail_init(&vc->desc_completed, head); 140 } 141 142 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 143 { 144 unsigned long flags; 145 LIST_HEAD(head); 146 147 spin_lock_irqsave(&vc->lock, flags); 148 vchan_get_all_descriptors(vc, &head); 149 spin_unlock_irqrestore(&vc->lock, flags); 150 151 vchan_dma_desc_free_list(vc, &head); 152 } 153 154 #endif 155