1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
250437bffSRussell King /*
350437bffSRussell King * Virtual DMA channel support for DMAengine
450437bffSRussell King *
550437bffSRussell King * Copyright (C) 2012 Russell King
650437bffSRussell King */
750437bffSRussell King #ifndef VIRT_DMA_H
850437bffSRussell King #define VIRT_DMA_H
950437bffSRussell King
1050437bffSRussell King #include <linux/dmaengine.h>
1150437bffSRussell King #include <linux/interrupt.h>
1250437bffSRussell King
1350437bffSRussell King #include "dmaengine.h"
1450437bffSRussell King
1550437bffSRussell King struct virt_dma_desc {
1650437bffSRussell King struct dma_async_tx_descriptor tx;
1709d5b702SAlexandru Ardelean struct dmaengine_result tx_result;
1850437bffSRussell King /* protected by vc.lock */
1950437bffSRussell King struct list_head node;
2050437bffSRussell King };
2150437bffSRussell King
2250437bffSRussell King struct virt_dma_chan {
2350437bffSRussell King struct dma_chan chan;
2450437bffSRussell King struct tasklet_struct task;
2550437bffSRussell King void (*desc_free)(struct virt_dma_desc *);
2650437bffSRussell King
2750437bffSRussell King spinlock_t lock;
2850437bffSRussell King
2950437bffSRussell King /* protected by vc.lock */
3013bb26aeSRobert Jarzmik struct list_head desc_allocated;
3150437bffSRussell King struct list_head desc_submitted;
3250437bffSRussell King struct list_head desc_issued;
3350437bffSRussell King struct list_head desc_completed;
34f8821011SSascha Hauer struct list_head desc_terminated;
35571fa740SRussell King
36571fa740SRussell King struct virt_dma_desc *cyclic;
3750437bffSRussell King };
3850437bffSRussell King
to_virt_chan(struct dma_chan * chan)3950437bffSRussell King static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
4050437bffSRussell King {
4150437bffSRussell King return container_of(chan, struct virt_dma_chan, chan);
4250437bffSRussell King }
4350437bffSRussell King
4450437bffSRussell King void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
4550437bffSRussell King void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
46fe045874SRussell King struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
4702aa8486SBaoyou Xie extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
4802aa8486SBaoyou Xie extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
4950437bffSRussell King
5050437bffSRussell King /**
5150437bffSRussell King * vchan_tx_prep - prepare a descriptor
5228ca3e85SLars-Peter Clausen * @vc: virtual channel allocating this descriptor
5328ca3e85SLars-Peter Clausen * @vd: virtual descriptor to prepare
5428ca3e85SLars-Peter Clausen * @tx_flags: flags argument passed in to prepare function
5550437bffSRussell King */
vchan_tx_prep(struct virt_dma_chan * vc,struct virt_dma_desc * vd,unsigned long tx_flags)5650437bffSRussell King static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
5750437bffSRussell King struct virt_dma_desc *vd, unsigned long tx_flags)
5850437bffSRussell King {
5913bb26aeSRobert Jarzmik unsigned long flags;
6050437bffSRussell King
6150437bffSRussell King dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
6250437bffSRussell King vd->tx.flags = tx_flags;
6350437bffSRussell King vd->tx.tx_submit = vchan_tx_submit;
6413bb26aeSRobert Jarzmik vd->tx.desc_free = vchan_tx_desc_free;
6513bb26aeSRobert Jarzmik
6609d5b702SAlexandru Ardelean vd->tx_result.result = DMA_TRANS_NOERROR;
6709d5b702SAlexandru Ardelean vd->tx_result.residue = 0;
6809d5b702SAlexandru Ardelean
6913bb26aeSRobert Jarzmik spin_lock_irqsave(&vc->lock, flags);
7013bb26aeSRobert Jarzmik list_add_tail(&vd->node, &vc->desc_allocated);
7113bb26aeSRobert Jarzmik spin_unlock_irqrestore(&vc->lock, flags);
7250437bffSRussell King
7350437bffSRussell King return &vd->tx;
7450437bffSRussell King }
7550437bffSRussell King
7650437bffSRussell King /**
7750437bffSRussell King * vchan_issue_pending - move submitted descriptors to issued list
7828ca3e85SLars-Peter Clausen * @vc: virtual channel to update
7950437bffSRussell King *
8050437bffSRussell King * vc.lock must be held by caller
8150437bffSRussell King */
vchan_issue_pending(struct virt_dma_chan * vc)8250437bffSRussell King static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
8350437bffSRussell King {
8450437bffSRussell King list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
8550437bffSRussell King return !list_empty(&vc->desc_issued);
8650437bffSRussell King }
8750437bffSRussell King
8850437bffSRussell King /**
8950437bffSRussell King * vchan_cookie_complete - report completion of a descriptor
9028ca3e85SLars-Peter Clausen * @vd: virtual descriptor to update
9150437bffSRussell King *
9250437bffSRussell King * vc.lock must be held by caller
9350437bffSRussell King */
vchan_cookie_complete(struct virt_dma_desc * vd)9450437bffSRussell King static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
9550437bffSRussell King {
9650437bffSRussell King struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
97af58652aSJonas Jensen dma_cookie_t cookie;
9850437bffSRussell King
99af58652aSJonas Jensen cookie = vd->tx.cookie;
10050437bffSRussell King dma_cookie_complete(&vd->tx);
10150437bffSRussell King dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
102af58652aSJonas Jensen vd, cookie);
10350437bffSRussell King list_add_tail(&vd->node, &vc->desc_completed);
10450437bffSRussell King
10550437bffSRussell King tasklet_schedule(&vc->task);
10650437bffSRussell King }
10750437bffSRussell King
10850437bffSRussell King /**
1096af149d2SPeter Ujfalusi * vchan_vdesc_fini - Free or reuse a descriptor
1106af149d2SPeter Ujfalusi * @vd: virtual descriptor to free/reuse
1116af149d2SPeter Ujfalusi */
vchan_vdesc_fini(struct virt_dma_desc * vd)1126af149d2SPeter Ujfalusi static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
1136af149d2SPeter Ujfalusi {
1146af149d2SPeter Ujfalusi struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
1156af149d2SPeter Ujfalusi
116*9f91e6bfSSascha Hauer if (dmaengine_desc_test_reuse(&vd->tx)) {
117*9f91e6bfSSascha Hauer unsigned long flags;
118*9f91e6bfSSascha Hauer
119*9f91e6bfSSascha Hauer spin_lock_irqsave(&vc->lock, flags);
1206af149d2SPeter Ujfalusi list_add(&vd->node, &vc->desc_allocated);
121*9f91e6bfSSascha Hauer spin_unlock_irqrestore(&vc->lock, flags);
122*9f91e6bfSSascha Hauer } else {
1236af149d2SPeter Ujfalusi vc->desc_free(vd);
1246af149d2SPeter Ujfalusi }
125*9f91e6bfSSascha Hauer }
1266af149d2SPeter Ujfalusi
1276af149d2SPeter Ujfalusi /**
128571fa740SRussell King * vchan_cyclic_callback - report the completion of a period
12928ca3e85SLars-Peter Clausen * @vd: virtual descriptor
130571fa740SRussell King */
vchan_cyclic_callback(struct virt_dma_desc * vd)131571fa740SRussell King static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
132571fa740SRussell King {
133571fa740SRussell King struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
134571fa740SRussell King
135571fa740SRussell King vc->cyclic = vd;
136571fa740SRussell King tasklet_schedule(&vc->task);
137571fa740SRussell King }
138571fa740SRussell King
139571fa740SRussell King /**
1401c7f072dSPeter Ujfalusi * vchan_terminate_vdesc - Disable pending cyclic callback
1411c7f072dSPeter Ujfalusi * @vd: virtual descriptor to be terminated
1421c7f072dSPeter Ujfalusi *
1431c7f072dSPeter Ujfalusi * vc.lock must be held by caller
1441c7f072dSPeter Ujfalusi */
vchan_terminate_vdesc(struct virt_dma_desc * vd)1451c7f072dSPeter Ujfalusi static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
1461c7f072dSPeter Ujfalusi {
1471c7f072dSPeter Ujfalusi struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
1481c7f072dSPeter Ujfalusi
149f8821011SSascha Hauer list_add_tail(&vd->node, &vc->desc_terminated);
1501c7f072dSPeter Ujfalusi
1511c7f072dSPeter Ujfalusi if (vc->cyclic == vd)
1521c7f072dSPeter Ujfalusi vc->cyclic = NULL;
1531c7f072dSPeter Ujfalusi }
1541c7f072dSPeter Ujfalusi
1551c7f072dSPeter Ujfalusi /**
15650437bffSRussell King * vchan_next_desc - peek at the next descriptor to be processed
15728ca3e85SLars-Peter Clausen * @vc: virtual channel to obtain descriptor from
15850437bffSRussell King *
15950437bffSRussell King * vc.lock must be held by caller
16050437bffSRussell King */
vchan_next_desc(struct virt_dma_chan * vc)16150437bffSRussell King static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
16250437bffSRussell King {
163360af35bSMasahiro Yamada return list_first_entry_or_null(&vc->desc_issued,
164360af35bSMasahiro Yamada struct virt_dma_desc, node);
16550437bffSRussell King }
16650437bffSRussell King
16750437bffSRussell King /**
1688c8fe97bSJun Nie * vchan_get_all_descriptors - obtain all submitted and issued descriptors
16928ca3e85SLars-Peter Clausen * @vc: virtual channel to get descriptors from
17028ca3e85SLars-Peter Clausen * @head: list of descriptors found
17150437bffSRussell King *
17250437bffSRussell King * vc.lock must be held by caller
17350437bffSRussell King *
17450437bffSRussell King * Removes all submitted and issued descriptors from internal lists, and
17550437bffSRussell King * provides a list of all descriptors found
17650437bffSRussell King */
vchan_get_all_descriptors(struct virt_dma_chan * vc,struct list_head * head)17750437bffSRussell King static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
17850437bffSRussell King struct list_head *head)
17950437bffSRussell King {
18013bb26aeSRobert Jarzmik list_splice_tail_init(&vc->desc_allocated, head);
18150437bffSRussell King list_splice_tail_init(&vc->desc_submitted, head);
18250437bffSRussell King list_splice_tail_init(&vc->desc_issued, head);
18350437bffSRussell King list_splice_tail_init(&vc->desc_completed, head);
184f8821011SSascha Hauer list_splice_tail_init(&vc->desc_terminated, head);
18550437bffSRussell King }
18650437bffSRussell King
vchan_free_chan_resources(struct virt_dma_chan * vc)18750437bffSRussell King static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
18850437bffSRussell King {
18913bb26aeSRobert Jarzmik struct virt_dma_desc *vd;
19050437bffSRussell King unsigned long flags;
19150437bffSRussell King LIST_HEAD(head);
19250437bffSRussell King
19350437bffSRussell King spin_lock_irqsave(&vc->lock, flags);
19450437bffSRussell King vchan_get_all_descriptors(vc, &head);
19513bb26aeSRobert Jarzmik list_for_each_entry(vd, &head, node)
19613bb26aeSRobert Jarzmik dmaengine_desc_clear_reuse(&vd->tx);
19750437bffSRussell King spin_unlock_irqrestore(&vc->lock, flags);
19850437bffSRussell King
19950437bffSRussell King vchan_dma_desc_free_list(vc, &head);
20050437bffSRussell King }
20150437bffSRussell King
2022ed08629SLars-Peter Clausen /**
2032ed08629SLars-Peter Clausen * vchan_synchronize() - synchronize callback execution to the current context
2042ed08629SLars-Peter Clausen * @vc: virtual channel to synchronize
2052ed08629SLars-Peter Clausen *
2062ed08629SLars-Peter Clausen * Makes sure that all scheduled or active callbacks have finished running. For
2072ed08629SLars-Peter Clausen * proper operation the caller has to ensure that no new callbacks are scheduled
2082ed08629SLars-Peter Clausen * after the invocation of this function started.
2091c7f072dSPeter Ujfalusi * Free up the terminated cyclic descriptor to prevent memory leakage.
2102ed08629SLars-Peter Clausen */
vchan_synchronize(struct virt_dma_chan * vc)2112ed08629SLars-Peter Clausen static inline void vchan_synchronize(struct virt_dma_chan *vc)
2122ed08629SLars-Peter Clausen {
213f8821011SSascha Hauer LIST_HEAD(head);
2141c7f072dSPeter Ujfalusi unsigned long flags;
2151c7f072dSPeter Ujfalusi
2162ed08629SLars-Peter Clausen tasklet_kill(&vc->task);
2171c7f072dSPeter Ujfalusi
2181c7f072dSPeter Ujfalusi spin_lock_irqsave(&vc->lock, flags);
219f8821011SSascha Hauer
220f8821011SSascha Hauer list_splice_tail_init(&vc->desc_terminated, &head);
221f8821011SSascha Hauer
2221c7f072dSPeter Ujfalusi spin_unlock_irqrestore(&vc->lock, flags);
223f8821011SSascha Hauer
224f8821011SSascha Hauer vchan_dma_desc_free_list(vc, &head);
2252ed08629SLars-Peter Clausen }
2262ed08629SLars-Peter Clausen
22750437bffSRussell King #endif
228