1b0b4a6b1SSanjay R Mehta // SPDX-License-Identifier: GPL-2.0-only
2b0b4a6b1SSanjay R Mehta /*
3b0b4a6b1SSanjay R Mehta  * AMD Passthrough DMA device driver
4b0b4a6b1SSanjay R Mehta  * -- Based on the CCP driver
5b0b4a6b1SSanjay R Mehta  *
6b0b4a6b1SSanjay R Mehta  * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
7b0b4a6b1SSanjay R Mehta  *
8b0b4a6b1SSanjay R Mehta  * Author: Sanjay R Mehta <sanju.mehta@amd.com>
9b0b4a6b1SSanjay R Mehta  * Author: Gary R Hook <gary.hook@amd.com>
10b0b4a6b1SSanjay R Mehta  */
11b0b4a6b1SSanjay R Mehta 
12b0b4a6b1SSanjay R Mehta #include "ptdma.h"
13b0b4a6b1SSanjay R Mehta #include "../dmaengine.h"
14b0b4a6b1SSanjay R Mehta #include "../virt-dma.h"
15b0b4a6b1SSanjay R Mehta 
to_pt_chan(struct dma_chan * dma_chan)16b0b4a6b1SSanjay R Mehta static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
17b0b4a6b1SSanjay R Mehta {
18b0b4a6b1SSanjay R Mehta 	return container_of(dma_chan, struct pt_dma_chan, vc.chan);
19b0b4a6b1SSanjay R Mehta }
20b0b4a6b1SSanjay R Mehta 
to_pt_desc(struct virt_dma_desc * vd)21b0b4a6b1SSanjay R Mehta static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
22b0b4a6b1SSanjay R Mehta {
23b0b4a6b1SSanjay R Mehta 	return container_of(vd, struct pt_dma_desc, vd);
24b0b4a6b1SSanjay R Mehta }
25b0b4a6b1SSanjay R Mehta 
pt_free_chan_resources(struct dma_chan * dma_chan)26b0b4a6b1SSanjay R Mehta static void pt_free_chan_resources(struct dma_chan *dma_chan)
27b0b4a6b1SSanjay R Mehta {
28b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
29b0b4a6b1SSanjay R Mehta 
30b0b4a6b1SSanjay R Mehta 	vchan_free_chan_resources(&chan->vc);
31b0b4a6b1SSanjay R Mehta }
32b0b4a6b1SSanjay R Mehta 
pt_synchronize(struct dma_chan * dma_chan)33b0b4a6b1SSanjay R Mehta static void pt_synchronize(struct dma_chan *dma_chan)
34b0b4a6b1SSanjay R Mehta {
35b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
36b0b4a6b1SSanjay R Mehta 
37b0b4a6b1SSanjay R Mehta 	vchan_synchronize(&chan->vc);
38b0b4a6b1SSanjay R Mehta }
39b0b4a6b1SSanjay R Mehta 
pt_do_cleanup(struct virt_dma_desc * vd)40b0b4a6b1SSanjay R Mehta static void pt_do_cleanup(struct virt_dma_desc *vd)
41b0b4a6b1SSanjay R Mehta {
42b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc = to_pt_desc(vd);
43b0b4a6b1SSanjay R Mehta 	struct pt_device *pt = desc->pt;
44b0b4a6b1SSanjay R Mehta 
45b0b4a6b1SSanjay R Mehta 	kmem_cache_free(pt->dma_desc_cache, desc);
46b0b4a6b1SSanjay R Mehta }
47b0b4a6b1SSanjay R Mehta 
pt_dma_start_desc(struct pt_dma_desc * desc)48b0b4a6b1SSanjay R Mehta static int pt_dma_start_desc(struct pt_dma_desc *desc)
49b0b4a6b1SSanjay R Mehta {
50b0b4a6b1SSanjay R Mehta 	struct pt_passthru_engine *pt_engine;
51b0b4a6b1SSanjay R Mehta 	struct pt_device *pt;
52b0b4a6b1SSanjay R Mehta 	struct pt_cmd *pt_cmd;
53b0b4a6b1SSanjay R Mehta 	struct pt_cmd_queue *cmd_q;
54b0b4a6b1SSanjay R Mehta 
55b0b4a6b1SSanjay R Mehta 	desc->issued_to_hw = 1;
56b0b4a6b1SSanjay R Mehta 
57b0b4a6b1SSanjay R Mehta 	pt_cmd = &desc->pt_cmd;
58b0b4a6b1SSanjay R Mehta 	pt = pt_cmd->pt;
59b0b4a6b1SSanjay R Mehta 	cmd_q = &pt->cmd_q;
60b0b4a6b1SSanjay R Mehta 	pt_engine = &pt_cmd->passthru;
61b0b4a6b1SSanjay R Mehta 
62b0b4a6b1SSanjay R Mehta 	pt->tdata.cmd = pt_cmd;
63b0b4a6b1SSanjay R Mehta 
64b0b4a6b1SSanjay R Mehta 	/* Execute the command */
65b0b4a6b1SSanjay R Mehta 	pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
66b0b4a6b1SSanjay R Mehta 
67b0b4a6b1SSanjay R Mehta 	return 0;
68b0b4a6b1SSanjay R Mehta }
69b0b4a6b1SSanjay R Mehta 
pt_next_dma_desc(struct pt_dma_chan * chan)70b0b4a6b1SSanjay R Mehta static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
71b0b4a6b1SSanjay R Mehta {
72b0b4a6b1SSanjay R Mehta 	/* Get the next DMA descriptor on the active list */
73b0b4a6b1SSanjay R Mehta 	struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
74b0b4a6b1SSanjay R Mehta 
75b0b4a6b1SSanjay R Mehta 	return vd ? to_pt_desc(vd) : NULL;
76b0b4a6b1SSanjay R Mehta }
77b0b4a6b1SSanjay R Mehta 
pt_handle_active_desc(struct pt_dma_chan * chan,struct pt_dma_desc * desc)78b0b4a6b1SSanjay R Mehta static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
79b0b4a6b1SSanjay R Mehta 						 struct pt_dma_desc *desc)
80b0b4a6b1SSanjay R Mehta {
81b0b4a6b1SSanjay R Mehta 	struct dma_async_tx_descriptor *tx_desc;
82b0b4a6b1SSanjay R Mehta 	struct virt_dma_desc *vd;
83b0b4a6b1SSanjay R Mehta 	unsigned long flags;
84b0b4a6b1SSanjay R Mehta 
85b0b4a6b1SSanjay R Mehta 	/* Loop over descriptors until one is found with commands */
86b0b4a6b1SSanjay R Mehta 	do {
87b0b4a6b1SSanjay R Mehta 		if (desc) {
88b0b4a6b1SSanjay R Mehta 			if (!desc->issued_to_hw) {
89b0b4a6b1SSanjay R Mehta 				/* No errors, keep going */
90b0b4a6b1SSanjay R Mehta 				if (desc->status != DMA_ERROR)
91b0b4a6b1SSanjay R Mehta 					return desc;
92b0b4a6b1SSanjay R Mehta 			}
93b0b4a6b1SSanjay R Mehta 
94b0b4a6b1SSanjay R Mehta 			tx_desc = &desc->vd.tx;
95b0b4a6b1SSanjay R Mehta 			vd = &desc->vd;
96b0b4a6b1SSanjay R Mehta 		} else {
97b0b4a6b1SSanjay R Mehta 			tx_desc = NULL;
98b0b4a6b1SSanjay R Mehta 		}
99b0b4a6b1SSanjay R Mehta 
100b0b4a6b1SSanjay R Mehta 		spin_lock_irqsave(&chan->vc.lock, flags);
101b0b4a6b1SSanjay R Mehta 
102b0b4a6b1SSanjay R Mehta 		if (desc) {
103b6ccf019SSanjay R Mehta 			if (desc->status != DMA_COMPLETE) {
104b0b4a6b1SSanjay R Mehta 				if (desc->status != DMA_ERROR)
105b0b4a6b1SSanjay R Mehta 					desc->status = DMA_COMPLETE;
106b0b4a6b1SSanjay R Mehta 
107b0b4a6b1SSanjay R Mehta 				dma_cookie_complete(tx_desc);
108b0b4a6b1SSanjay R Mehta 				dma_descriptor_unmap(tx_desc);
109b0b4a6b1SSanjay R Mehta 				list_del(&desc->vd.node);
110b6ccf019SSanjay R Mehta 			} else {
111b6ccf019SSanjay R Mehta 				/* Don't handle it twice */
112b6ccf019SSanjay R Mehta 				tx_desc = NULL;
113b6ccf019SSanjay R Mehta 			}
114b0b4a6b1SSanjay R Mehta 		}
115b0b4a6b1SSanjay R Mehta 
116b0b4a6b1SSanjay R Mehta 		desc = pt_next_dma_desc(chan);
117b0b4a6b1SSanjay R Mehta 
118b0b4a6b1SSanjay R Mehta 		spin_unlock_irqrestore(&chan->vc.lock, flags);
119b0b4a6b1SSanjay R Mehta 
120b0b4a6b1SSanjay R Mehta 		if (tx_desc) {
121b0b4a6b1SSanjay R Mehta 			dmaengine_desc_get_callback_invoke(tx_desc, NULL);
122b0b4a6b1SSanjay R Mehta 			dma_run_dependencies(tx_desc);
123b0b4a6b1SSanjay R Mehta 			vchan_vdesc_fini(vd);
124b0b4a6b1SSanjay R Mehta 		}
125b0b4a6b1SSanjay R Mehta 	} while (desc);
126b0b4a6b1SSanjay R Mehta 
127b0b4a6b1SSanjay R Mehta 	return NULL;
128b0b4a6b1SSanjay R Mehta }
129b0b4a6b1SSanjay R Mehta 
pt_cmd_callback(void * data,int err)130b0b4a6b1SSanjay R Mehta static void pt_cmd_callback(void *data, int err)
131b0b4a6b1SSanjay R Mehta {
132b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc = data;
133b0b4a6b1SSanjay R Mehta 	struct dma_chan *dma_chan;
134b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan;
135b0b4a6b1SSanjay R Mehta 	int ret;
136b0b4a6b1SSanjay R Mehta 
137b0b4a6b1SSanjay R Mehta 	if (err == -EINPROGRESS)
138b0b4a6b1SSanjay R Mehta 		return;
139b0b4a6b1SSanjay R Mehta 
140b0b4a6b1SSanjay R Mehta 	dma_chan = desc->vd.tx.chan;
141b0b4a6b1SSanjay R Mehta 	chan = to_pt_chan(dma_chan);
142b0b4a6b1SSanjay R Mehta 
143b0b4a6b1SSanjay R Mehta 	if (err)
144b0b4a6b1SSanjay R Mehta 		desc->status = DMA_ERROR;
145b0b4a6b1SSanjay R Mehta 
146b0b4a6b1SSanjay R Mehta 	while (true) {
147b0b4a6b1SSanjay R Mehta 		/* Check for DMA descriptor completion */
148b0b4a6b1SSanjay R Mehta 		desc = pt_handle_active_desc(chan, desc);
149b0b4a6b1SSanjay R Mehta 
150b0b4a6b1SSanjay R Mehta 		/* Don't submit cmd if no descriptor or DMA is paused */
151b0b4a6b1SSanjay R Mehta 		if (!desc)
152b0b4a6b1SSanjay R Mehta 			break;
153b0b4a6b1SSanjay R Mehta 
154b0b4a6b1SSanjay R Mehta 		ret = pt_dma_start_desc(desc);
155b0b4a6b1SSanjay R Mehta 		if (!ret)
156b0b4a6b1SSanjay R Mehta 			break;
157b0b4a6b1SSanjay R Mehta 
158b0b4a6b1SSanjay R Mehta 		desc->status = DMA_ERROR;
159b0b4a6b1SSanjay R Mehta 	}
160b0b4a6b1SSanjay R Mehta }
161b0b4a6b1SSanjay R Mehta 
pt_alloc_dma_desc(struct pt_dma_chan * chan,unsigned long flags)162b0b4a6b1SSanjay R Mehta static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
163b0b4a6b1SSanjay R Mehta 					     unsigned long flags)
164b0b4a6b1SSanjay R Mehta {
165b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc;
166b0b4a6b1SSanjay R Mehta 
167b0b4a6b1SSanjay R Mehta 	desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
168b0b4a6b1SSanjay R Mehta 	if (!desc)
169b0b4a6b1SSanjay R Mehta 		return NULL;
170b0b4a6b1SSanjay R Mehta 
171b0b4a6b1SSanjay R Mehta 	vchan_tx_prep(&chan->vc, &desc->vd, flags);
172b0b4a6b1SSanjay R Mehta 
173b0b4a6b1SSanjay R Mehta 	desc->pt = chan->pt;
174d9650682SIlya Novikov 	desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
175b0b4a6b1SSanjay R Mehta 	desc->issued_to_hw = 0;
176b0b4a6b1SSanjay R Mehta 	desc->status = DMA_IN_PROGRESS;
177b0b4a6b1SSanjay R Mehta 
178b0b4a6b1SSanjay R Mehta 	return desc;
179b0b4a6b1SSanjay R Mehta }
180b0b4a6b1SSanjay R Mehta 
pt_create_desc(struct dma_chan * dma_chan,dma_addr_t dst,dma_addr_t src,unsigned int len,unsigned long flags)181b0b4a6b1SSanjay R Mehta static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
182b0b4a6b1SSanjay R Mehta 					  dma_addr_t dst,
183b0b4a6b1SSanjay R Mehta 					  dma_addr_t src,
184b0b4a6b1SSanjay R Mehta 					  unsigned int len,
185b0b4a6b1SSanjay R Mehta 					  unsigned long flags)
186b0b4a6b1SSanjay R Mehta {
187b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
188b0b4a6b1SSanjay R Mehta 	struct pt_passthru_engine *pt_engine;
189b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc;
190b0b4a6b1SSanjay R Mehta 	struct pt_cmd *pt_cmd;
191b0b4a6b1SSanjay R Mehta 
192b0b4a6b1SSanjay R Mehta 	desc = pt_alloc_dma_desc(chan, flags);
193b0b4a6b1SSanjay R Mehta 	if (!desc)
194b0b4a6b1SSanjay R Mehta 		return NULL;
195b0b4a6b1SSanjay R Mehta 
196b0b4a6b1SSanjay R Mehta 	pt_cmd = &desc->pt_cmd;
197b0b4a6b1SSanjay R Mehta 	pt_cmd->pt = chan->pt;
198b0b4a6b1SSanjay R Mehta 	pt_engine = &pt_cmd->passthru;
199b0b4a6b1SSanjay R Mehta 	pt_cmd->engine = PT_ENGINE_PASSTHRU;
200b0b4a6b1SSanjay R Mehta 	pt_engine->src_dma = src;
201b0b4a6b1SSanjay R Mehta 	pt_engine->dst_dma = dst;
202b0b4a6b1SSanjay R Mehta 	pt_engine->src_len = len;
203b0b4a6b1SSanjay R Mehta 	pt_cmd->pt_cmd_callback = pt_cmd_callback;
204b0b4a6b1SSanjay R Mehta 	pt_cmd->data = desc;
205b0b4a6b1SSanjay R Mehta 
206b0b4a6b1SSanjay R Mehta 	desc->len = len;
207b0b4a6b1SSanjay R Mehta 
208b0b4a6b1SSanjay R Mehta 	return desc;
209b0b4a6b1SSanjay R Mehta }
210b0b4a6b1SSanjay R Mehta 
211b0b4a6b1SSanjay R Mehta static struct dma_async_tx_descriptor *
pt_prep_dma_memcpy(struct dma_chan * dma_chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)212b0b4a6b1SSanjay R Mehta pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
213b0b4a6b1SSanjay R Mehta 		   dma_addr_t src, size_t len, unsigned long flags)
214b0b4a6b1SSanjay R Mehta {
215b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc;
216b0b4a6b1SSanjay R Mehta 
217b0b4a6b1SSanjay R Mehta 	desc = pt_create_desc(dma_chan, dst, src, len, flags);
218b0b4a6b1SSanjay R Mehta 	if (!desc)
219b0b4a6b1SSanjay R Mehta 		return NULL;
220b0b4a6b1SSanjay R Mehta 
221b0b4a6b1SSanjay R Mehta 	return &desc->vd.tx;
222b0b4a6b1SSanjay R Mehta }
223b0b4a6b1SSanjay R Mehta 
224b0b4a6b1SSanjay R Mehta static struct dma_async_tx_descriptor *
pt_prep_dma_interrupt(struct dma_chan * dma_chan,unsigned long flags)225b0b4a6b1SSanjay R Mehta pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
226b0b4a6b1SSanjay R Mehta {
227b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
228b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc;
229b0b4a6b1SSanjay R Mehta 
230b0b4a6b1SSanjay R Mehta 	desc = pt_alloc_dma_desc(chan, flags);
231b0b4a6b1SSanjay R Mehta 	if (!desc)
232b0b4a6b1SSanjay R Mehta 		return NULL;
233b0b4a6b1SSanjay R Mehta 
234b0b4a6b1SSanjay R Mehta 	return &desc->vd.tx;
235b0b4a6b1SSanjay R Mehta }
236b0b4a6b1SSanjay R Mehta 
pt_issue_pending(struct dma_chan * dma_chan)237b0b4a6b1SSanjay R Mehta static void pt_issue_pending(struct dma_chan *dma_chan)
238b0b4a6b1SSanjay R Mehta {
239b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
240b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc;
241b0b4a6b1SSanjay R Mehta 	unsigned long flags;
2426fa7e0e8SSanjay R Mehta 	bool engine_is_idle = true;
243b0b4a6b1SSanjay R Mehta 
244b0b4a6b1SSanjay R Mehta 	spin_lock_irqsave(&chan->vc.lock, flags);
245b0b4a6b1SSanjay R Mehta 
2466fa7e0e8SSanjay R Mehta 	desc = pt_next_dma_desc(chan);
2476fa7e0e8SSanjay R Mehta 	if (desc)
2486fa7e0e8SSanjay R Mehta 		engine_is_idle = false;
2496fa7e0e8SSanjay R Mehta 
250b0b4a6b1SSanjay R Mehta 	vchan_issue_pending(&chan->vc);
251b0b4a6b1SSanjay R Mehta 
252b0b4a6b1SSanjay R Mehta 	desc = pt_next_dma_desc(chan);
253b0b4a6b1SSanjay R Mehta 
254b0b4a6b1SSanjay R Mehta 	spin_unlock_irqrestore(&chan->vc.lock, flags);
255b0b4a6b1SSanjay R Mehta 
256b0b4a6b1SSanjay R Mehta 	/* If there was nothing active, start processing */
257*92846998SEric Pilmore 	if (engine_is_idle && desc)
258b0b4a6b1SSanjay R Mehta 		pt_cmd_callback(desc, 0);
259b0b4a6b1SSanjay R Mehta }
260b0b4a6b1SSanjay R Mehta 
261e235fe3bSVinod Koul static enum dma_status
pt_tx_status(struct dma_chan * c,dma_cookie_t cookie,struct dma_tx_state * txstate)262d9650682SIlya Novikov pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
263d9650682SIlya Novikov 		struct dma_tx_state *txstate)
264d9650682SIlya Novikov {
265d9650682SIlya Novikov 	struct pt_device *pt = to_pt_chan(c)->pt;
266d9650682SIlya Novikov 	struct pt_cmd_queue *cmd_q = &pt->cmd_q;
267d9650682SIlya Novikov 
268d9650682SIlya Novikov 	pt_check_status_trans(pt, cmd_q);
269d9650682SIlya Novikov 	return dma_cookie_status(c, cookie, txstate);
270d9650682SIlya Novikov }
271d9650682SIlya Novikov 
pt_pause(struct dma_chan * dma_chan)272b0b4a6b1SSanjay R Mehta static int pt_pause(struct dma_chan *dma_chan)
273b0b4a6b1SSanjay R Mehta {
274b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
275b0b4a6b1SSanjay R Mehta 	unsigned long flags;
276b0b4a6b1SSanjay R Mehta 
277b0b4a6b1SSanjay R Mehta 	spin_lock_irqsave(&chan->vc.lock, flags);
278b0b4a6b1SSanjay R Mehta 	pt_stop_queue(&chan->pt->cmd_q);
279b0b4a6b1SSanjay R Mehta 	spin_unlock_irqrestore(&chan->vc.lock, flags);
280b0b4a6b1SSanjay R Mehta 
281b0b4a6b1SSanjay R Mehta 	return 0;
282b0b4a6b1SSanjay R Mehta }
283b0b4a6b1SSanjay R Mehta 
pt_resume(struct dma_chan * dma_chan)284b0b4a6b1SSanjay R Mehta static int pt_resume(struct dma_chan *dma_chan)
285b0b4a6b1SSanjay R Mehta {
286b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
287b0b4a6b1SSanjay R Mehta 	struct pt_dma_desc *desc = NULL;
288b0b4a6b1SSanjay R Mehta 	unsigned long flags;
289b0b4a6b1SSanjay R Mehta 
290b0b4a6b1SSanjay R Mehta 	spin_lock_irqsave(&chan->vc.lock, flags);
291b0b4a6b1SSanjay R Mehta 	pt_start_queue(&chan->pt->cmd_q);
292b0b4a6b1SSanjay R Mehta 	desc = pt_next_dma_desc(chan);
293b0b4a6b1SSanjay R Mehta 	spin_unlock_irqrestore(&chan->vc.lock, flags);
294b0b4a6b1SSanjay R Mehta 
295b0b4a6b1SSanjay R Mehta 	/* If there was something active, re-start */
296b0b4a6b1SSanjay R Mehta 	if (desc)
297b0b4a6b1SSanjay R Mehta 		pt_cmd_callback(desc, 0);
298b0b4a6b1SSanjay R Mehta 
299b0b4a6b1SSanjay R Mehta 	return 0;
300b0b4a6b1SSanjay R Mehta }
301b0b4a6b1SSanjay R Mehta 
pt_terminate_all(struct dma_chan * dma_chan)302b0b4a6b1SSanjay R Mehta static int pt_terminate_all(struct dma_chan *dma_chan)
303b0b4a6b1SSanjay R Mehta {
304b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
305b0b4a6b1SSanjay R Mehta 	unsigned long flags;
306d9650682SIlya Novikov 	struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
307b0b4a6b1SSanjay R Mehta 	LIST_HEAD(head);
308b0b4a6b1SSanjay R Mehta 
309d9650682SIlya Novikov 	iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
310b0b4a6b1SSanjay R Mehta 	spin_lock_irqsave(&chan->vc.lock, flags);
311b0b4a6b1SSanjay R Mehta 	vchan_get_all_descriptors(&chan->vc, &head);
312b0b4a6b1SSanjay R Mehta 	spin_unlock_irqrestore(&chan->vc.lock, flags);
313b0b4a6b1SSanjay R Mehta 
314b0b4a6b1SSanjay R Mehta 	vchan_dma_desc_free_list(&chan->vc, &head);
315b0b4a6b1SSanjay R Mehta 	vchan_free_chan_resources(&chan->vc);
316b0b4a6b1SSanjay R Mehta 
317b0b4a6b1SSanjay R Mehta 	return 0;
318b0b4a6b1SSanjay R Mehta }
319b0b4a6b1SSanjay R Mehta 
pt_dmaengine_register(struct pt_device * pt)320b0b4a6b1SSanjay R Mehta int pt_dmaengine_register(struct pt_device *pt)
321b0b4a6b1SSanjay R Mehta {
322b0b4a6b1SSanjay R Mehta 	struct pt_dma_chan *chan;
323b0b4a6b1SSanjay R Mehta 	struct dma_device *dma_dev = &pt->dma_dev;
324b0b4a6b1SSanjay R Mehta 	char *cmd_cache_name;
325b0b4a6b1SSanjay R Mehta 	char *desc_cache_name;
326b0b4a6b1SSanjay R Mehta 	int ret;
327b0b4a6b1SSanjay R Mehta 
328b0b4a6b1SSanjay R Mehta 	pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
329b0b4a6b1SSanjay R Mehta 				       GFP_KERNEL);
330b0b4a6b1SSanjay R Mehta 	if (!pt->pt_dma_chan)
331b0b4a6b1SSanjay R Mehta 		return -ENOMEM;
332b0b4a6b1SSanjay R Mehta 
333b0b4a6b1SSanjay R Mehta 	cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
334b0b4a6b1SSanjay R Mehta 					"%s-dmaengine-cmd-cache",
335b0b4a6b1SSanjay R Mehta 					dev_name(pt->dev));
336b0b4a6b1SSanjay R Mehta 	if (!cmd_cache_name)
337b0b4a6b1SSanjay R Mehta 		return -ENOMEM;
338b0b4a6b1SSanjay R Mehta 
339b0b4a6b1SSanjay R Mehta 	desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
340b0b4a6b1SSanjay R Mehta 					 "%s-dmaengine-desc-cache",
341b0b4a6b1SSanjay R Mehta 					 dev_name(pt->dev));
342b0b4a6b1SSanjay R Mehta 	if (!desc_cache_name) {
343b0b4a6b1SSanjay R Mehta 		ret = -ENOMEM;
344b0b4a6b1SSanjay R Mehta 		goto err_cache;
345b0b4a6b1SSanjay R Mehta 	}
346b0b4a6b1SSanjay R Mehta 
347b0b4a6b1SSanjay R Mehta 	pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
348b0b4a6b1SSanjay R Mehta 					       sizeof(struct pt_dma_desc), 0,
349b0b4a6b1SSanjay R Mehta 					       SLAB_HWCACHE_ALIGN, NULL);
350b0b4a6b1SSanjay R Mehta 	if (!pt->dma_desc_cache) {
351b0b4a6b1SSanjay R Mehta 		ret = -ENOMEM;
352b0b4a6b1SSanjay R Mehta 		goto err_cache;
353b0b4a6b1SSanjay R Mehta 	}
354b0b4a6b1SSanjay R Mehta 
355b0b4a6b1SSanjay R Mehta 	dma_dev->dev = pt->dev;
356b0b4a6b1SSanjay R Mehta 	dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
357b0b4a6b1SSanjay R Mehta 	dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
358b0b4a6b1SSanjay R Mehta 	dma_dev->directions = DMA_MEM_TO_MEM;
359b0b4a6b1SSanjay R Mehta 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
360b0b4a6b1SSanjay R Mehta 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
361b0b4a6b1SSanjay R Mehta 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
362b0b4a6b1SSanjay R Mehta 
363b0b4a6b1SSanjay R Mehta 	/*
364b0b4a6b1SSanjay R Mehta 	 * PTDMA is intended to be used with the AMD NTB devices, hence
365b0b4a6b1SSanjay R Mehta 	 * marking it as DMA_PRIVATE.
366b0b4a6b1SSanjay R Mehta 	 */
367b0b4a6b1SSanjay R Mehta 	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
368b0b4a6b1SSanjay R Mehta 
369b0b4a6b1SSanjay R Mehta 	INIT_LIST_HEAD(&dma_dev->channels);
370b0b4a6b1SSanjay R Mehta 
371b0b4a6b1SSanjay R Mehta 	chan = pt->pt_dma_chan;
372b0b4a6b1SSanjay R Mehta 	chan->pt = pt;
373b0b4a6b1SSanjay R Mehta 
374b0b4a6b1SSanjay R Mehta 	/* Set base and prep routines */
375b0b4a6b1SSanjay R Mehta 	dma_dev->device_free_chan_resources = pt_free_chan_resources;
376b0b4a6b1SSanjay R Mehta 	dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
377b0b4a6b1SSanjay R Mehta 	dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
378b0b4a6b1SSanjay R Mehta 	dma_dev->device_issue_pending = pt_issue_pending;
379d9650682SIlya Novikov 	dma_dev->device_tx_status = pt_tx_status;
380b0b4a6b1SSanjay R Mehta 	dma_dev->device_pause = pt_pause;
381b0b4a6b1SSanjay R Mehta 	dma_dev->device_resume = pt_resume;
382b0b4a6b1SSanjay R Mehta 	dma_dev->device_terminate_all = pt_terminate_all;
383b0b4a6b1SSanjay R Mehta 	dma_dev->device_synchronize = pt_synchronize;
384b0b4a6b1SSanjay R Mehta 
385b0b4a6b1SSanjay R Mehta 	chan->vc.desc_free = pt_do_cleanup;
386b0b4a6b1SSanjay R Mehta 	vchan_init(&chan->vc, dma_dev);
387b0b4a6b1SSanjay R Mehta 
388b0b4a6b1SSanjay R Mehta 	ret = dma_async_device_register(dma_dev);
389b0b4a6b1SSanjay R Mehta 	if (ret)
390b0b4a6b1SSanjay R Mehta 		goto err_reg;
391b0b4a6b1SSanjay R Mehta 
392b0b4a6b1SSanjay R Mehta 	return 0;
393b0b4a6b1SSanjay R Mehta 
394b0b4a6b1SSanjay R Mehta err_reg:
395b0b4a6b1SSanjay R Mehta 	kmem_cache_destroy(pt->dma_desc_cache);
396b0b4a6b1SSanjay R Mehta 
397b0b4a6b1SSanjay R Mehta err_cache:
398b0b4a6b1SSanjay R Mehta 	kmem_cache_destroy(pt->dma_cmd_cache);
399b0b4a6b1SSanjay R Mehta 
400b0b4a6b1SSanjay R Mehta 	return ret;
401b0b4a6b1SSanjay R Mehta }
402b0b4a6b1SSanjay R Mehta 
pt_dmaengine_unregister(struct pt_device * pt)403b0b4a6b1SSanjay R Mehta void pt_dmaengine_unregister(struct pt_device *pt)
404b0b4a6b1SSanjay R Mehta {
405b0b4a6b1SSanjay R Mehta 	struct dma_device *dma_dev = &pt->dma_dev;
406b0b4a6b1SSanjay R Mehta 
407b0b4a6b1SSanjay R Mehta 	dma_async_device_unregister(dma_dev);
408b0b4a6b1SSanjay R Mehta 
409b0b4a6b1SSanjay R Mehta 	kmem_cache_destroy(pt->dma_desc_cache);
410b0b4a6b1SSanjay R Mehta 	kmem_cache_destroy(pt->dma_cmd_cache);
411b0b4a6b1SSanjay R Mehta }
412