xref: /openbmc/linux/drivers/dma/sun6i-dma.c (revision 94c7b6fc)
1 /*
2  * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
3  * Author: Sugar <shuge@allwinnertech.com>
4  *
5  * Copyright (C) 2014 Maxime Ripard
6  * Maxime Ripard <maxime.ripard@free-electrons.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/reset.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 
26 #include "virt-dma.h"
27 
28 /*
29  * There's 16 physical channels that can work in parallel.
30  *
31  * However we have 30 different endpoints for our requests.
32  *
33  * Since the channels are able to handle only an unidirectional
34  * transfer, we need to allocate more virtual channels so that
35  * everyone can grab one channel.
36  *
37  * Some devices can't work in both direction (mostly because it
38  * wouldn't make sense), so we have a bit fewer virtual channels than
39  * 2 channels per endpoints.
40  */
41 
42 #define NR_MAX_CHANNELS		16
43 #define NR_MAX_REQUESTS		30
44 #define NR_MAX_VCHANS		53
45 
46 /*
47  * Common registers
48  */
49 #define DMA_IRQ_EN(x)		((x) * 0x04)
50 #define DMA_IRQ_HALF			BIT(0)
51 #define DMA_IRQ_PKG			BIT(1)
52 #define DMA_IRQ_QUEUE			BIT(2)
53 
54 #define DMA_IRQ_CHAN_NR			8
55 #define DMA_IRQ_CHAN_WIDTH		4
56 
57 
58 #define DMA_IRQ_STAT(x)		((x) * 0x04 + 0x10)
59 
60 #define DMA_STAT		0x30
61 
62 /*
63  * Channels specific registers
64  */
65 #define DMA_CHAN_ENABLE		0x00
66 #define DMA_CHAN_ENABLE_START		BIT(0)
67 #define DMA_CHAN_ENABLE_STOP		0
68 
69 #define DMA_CHAN_PAUSE		0x04
70 #define DMA_CHAN_PAUSE_PAUSE		BIT(1)
71 #define DMA_CHAN_PAUSE_RESUME		0
72 
73 #define DMA_CHAN_LLI_ADDR	0x08
74 
75 #define DMA_CHAN_CUR_CFG	0x0c
76 #define DMA_CHAN_CFG_SRC_DRQ(x)		((x) & 0x1f)
77 #define DMA_CHAN_CFG_SRC_IO_MODE	BIT(5)
78 #define DMA_CHAN_CFG_SRC_LINEAR_MODE	(0 << 5)
79 #define DMA_CHAN_CFG_SRC_BURST(x)	(((x) & 0x3) << 7)
80 #define DMA_CHAN_CFG_SRC_WIDTH(x)	(((x) & 0x3) << 9)
81 
82 #define DMA_CHAN_CFG_DST_DRQ(x)		(DMA_CHAN_CFG_SRC_DRQ(x) << 16)
83 #define DMA_CHAN_CFG_DST_IO_MODE	(DMA_CHAN_CFG_SRC_IO_MODE << 16)
84 #define DMA_CHAN_CFG_DST_LINEAR_MODE	(DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
85 #define DMA_CHAN_CFG_DST_BURST(x)	(DMA_CHAN_CFG_SRC_BURST(x) << 16)
86 #define DMA_CHAN_CFG_DST_WIDTH(x)	(DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
87 
88 #define DMA_CHAN_CUR_SRC	0x10
89 
90 #define DMA_CHAN_CUR_DST	0x14
91 
92 #define DMA_CHAN_CUR_CNT	0x18
93 
94 #define DMA_CHAN_CUR_PARA	0x1c
95 
96 
97 /*
98  * Various hardware related defines
99  */
100 #define LLI_LAST_ITEM	0xfffff800
101 #define NORMAL_WAIT	8
102 #define DRQ_SDRAM	1
103 
104 /*
105  * Hardware representation of the LLI
106  *
107  * The hardware will be fed the physical address of this structure,
108  * and read its content in order to start the transfer.
109  */
110 struct sun6i_dma_lli {
111 	u32			cfg;
112 	u32			src;
113 	u32			dst;
114 	u32			len;
115 	u32			para;
116 	u32			p_lli_next;
117 
118 	/*
119 	 * This field is not used by the DMA controller, but will be
120 	 * used by the CPU to go through the list (mostly for dumping
121 	 * or freeing it).
122 	 */
123 	struct sun6i_dma_lli	*v_lli_next;
124 };
125 
126 
127 struct sun6i_desc {
128 	struct virt_dma_desc	vd;
129 	dma_addr_t		p_lli;
130 	struct sun6i_dma_lli	*v_lli;
131 };
132 
133 struct sun6i_pchan {
134 	u32			idx;
135 	void __iomem		*base;
136 	struct sun6i_vchan	*vchan;
137 	struct sun6i_desc	*desc;
138 	struct sun6i_desc	*done;
139 };
140 
141 struct sun6i_vchan {
142 	struct virt_dma_chan	vc;
143 	struct list_head	node;
144 	struct dma_slave_config	cfg;
145 	struct sun6i_pchan	*phy;
146 	u8			port;
147 };
148 
149 struct sun6i_dma_dev {
150 	struct dma_device	slave;
151 	void __iomem		*base;
152 	struct clk		*clk;
153 	int			irq;
154 	spinlock_t		lock;
155 	struct reset_control	*rstc;
156 	struct tasklet_struct	task;
157 	atomic_t		tasklet_shutdown;
158 	struct list_head	pending;
159 	struct dma_pool		*pool;
160 	struct sun6i_pchan	*pchans;
161 	struct sun6i_vchan	*vchans;
162 };
163 
164 static struct device *chan2dev(struct dma_chan *chan)
165 {
166 	return &chan->dev->device;
167 }
168 
169 static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d)
170 {
171 	return container_of(d, struct sun6i_dma_dev, slave);
172 }
173 
174 static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan)
175 {
176 	return container_of(chan, struct sun6i_vchan, vc.chan);
177 }
178 
179 static inline struct sun6i_desc *
180 to_sun6i_desc(struct dma_async_tx_descriptor *tx)
181 {
182 	return container_of(tx, struct sun6i_desc, vd.tx);
183 }
184 
185 static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
186 {
187 	dev_dbg(sdev->slave.dev, "Common register:\n"
188 		"\tmask0(%04x): 0x%08x\n"
189 		"\tmask1(%04x): 0x%08x\n"
190 		"\tpend0(%04x): 0x%08x\n"
191 		"\tpend1(%04x): 0x%08x\n"
192 		"\tstats(%04x): 0x%08x\n",
193 		DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)),
194 		DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)),
195 		DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)),
196 		DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)),
197 		DMA_STAT, readl(sdev->base + DMA_STAT));
198 }
199 
200 static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
201 					    struct sun6i_pchan *pchan)
202 {
203 	phys_addr_t reg = virt_to_phys(pchan->base);
204 
205 	dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
206 		"\t___en(%04x): \t0x%08x\n"
207 		"\tpause(%04x): \t0x%08x\n"
208 		"\tstart(%04x): \t0x%08x\n"
209 		"\t__cfg(%04x): \t0x%08x\n"
210 		"\t__src(%04x): \t0x%08x\n"
211 		"\t__dst(%04x): \t0x%08x\n"
212 		"\tcount(%04x): \t0x%08x\n"
213 		"\t_para(%04x): \t0x%08x\n\n",
214 		pchan->idx, &reg,
215 		DMA_CHAN_ENABLE,
216 		readl(pchan->base + DMA_CHAN_ENABLE),
217 		DMA_CHAN_PAUSE,
218 		readl(pchan->base + DMA_CHAN_PAUSE),
219 		DMA_CHAN_LLI_ADDR,
220 		readl(pchan->base + DMA_CHAN_LLI_ADDR),
221 		DMA_CHAN_CUR_CFG,
222 		readl(pchan->base + DMA_CHAN_CUR_CFG),
223 		DMA_CHAN_CUR_SRC,
224 		readl(pchan->base + DMA_CHAN_CUR_SRC),
225 		DMA_CHAN_CUR_DST,
226 		readl(pchan->base + DMA_CHAN_CUR_DST),
227 		DMA_CHAN_CUR_CNT,
228 		readl(pchan->base + DMA_CHAN_CUR_CNT),
229 		DMA_CHAN_CUR_PARA,
230 		readl(pchan->base + DMA_CHAN_CUR_PARA));
231 }
232 
233 static inline int convert_burst(u32 maxburst, u8 *burst)
234 {
235 	switch (maxburst) {
236 	case 1:
237 		*burst = 0;
238 		break;
239 	case 8:
240 		*burst = 2;
241 		break;
242 	default:
243 		return -EINVAL;
244 	}
245 
246 	return 0;
247 }
248 
249 static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width)
250 {
251 	if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) ||
252 	    (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
253 		return -EINVAL;
254 
255 	*width = addr_width >> 1;
256 	return 0;
257 }
258 
259 static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
260 			       struct sun6i_dma_lli *next,
261 			       dma_addr_t next_phy,
262 			       struct sun6i_desc *txd)
263 {
264 	if ((!prev && !txd) || !next)
265 		return NULL;
266 
267 	if (!prev) {
268 		txd->p_lli = next_phy;
269 		txd->v_lli = next;
270 	} else {
271 		prev->p_lli_next = next_phy;
272 		prev->v_lli_next = next;
273 	}
274 
275 	next->p_lli_next = LLI_LAST_ITEM;
276 	next->v_lli_next = NULL;
277 
278 	return next;
279 }
280 
281 static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
282 				    dma_addr_t src,
283 				    dma_addr_t dst, u32 len,
284 				    struct dma_slave_config *config)
285 {
286 	u8 src_width, dst_width, src_burst, dst_burst;
287 	int ret;
288 
289 	if (!config)
290 		return -EINVAL;
291 
292 	ret = convert_burst(config->src_maxburst, &src_burst);
293 	if (ret)
294 		return ret;
295 
296 	ret = convert_burst(config->dst_maxburst, &dst_burst);
297 	if (ret)
298 		return ret;
299 
300 	ret = convert_buswidth(config->src_addr_width, &src_width);
301 	if (ret)
302 		return ret;
303 
304 	ret = convert_buswidth(config->dst_addr_width, &dst_width);
305 	if (ret)
306 		return ret;
307 
308 	lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
309 		DMA_CHAN_CFG_SRC_WIDTH(src_width) |
310 		DMA_CHAN_CFG_DST_BURST(dst_burst) |
311 		DMA_CHAN_CFG_DST_WIDTH(dst_width);
312 
313 	lli->src = src;
314 	lli->dst = dst;
315 	lli->len = len;
316 	lli->para = NORMAL_WAIT;
317 
318 	return 0;
319 }
320 
321 static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
322 				      struct sun6i_dma_lli *lli)
323 {
324 	phys_addr_t p_lli = virt_to_phys(lli);
325 
326 	dev_dbg(chan2dev(&vchan->vc.chan),
327 		"\n\tdesc:   p - %pa v - 0x%p\n"
328 		"\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
329 		"\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
330 		&p_lli, lli,
331 		lli->cfg, lli->src, lli->dst,
332 		lli->len, lli->para, lli->p_lli_next);
333 }
334 
335 static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
336 {
337 	struct sun6i_desc *txd = to_sun6i_desc(&vd->tx);
338 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device);
339 	struct sun6i_dma_lli *v_lli, *v_next;
340 	dma_addr_t p_lli, p_next;
341 
342 	if (unlikely(!txd))
343 		return;
344 
345 	p_lli = txd->p_lli;
346 	v_lli = txd->v_lli;
347 
348 	while (v_lli) {
349 		v_next = v_lli->v_lli_next;
350 		p_next = v_lli->p_lli_next;
351 
352 		dma_pool_free(sdev->pool, v_lli, p_lli);
353 
354 		v_lli = v_next;
355 		p_lli = p_next;
356 	}
357 
358 	kfree(txd);
359 }
360 
361 static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
362 {
363 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
364 	struct sun6i_pchan *pchan = vchan->phy;
365 	unsigned long flags;
366 	LIST_HEAD(head);
367 
368 	spin_lock(&sdev->lock);
369 	list_del_init(&vchan->node);
370 	spin_unlock(&sdev->lock);
371 
372 	spin_lock_irqsave(&vchan->vc.lock, flags);
373 
374 	vchan_get_all_descriptors(&vchan->vc, &head);
375 
376 	if (pchan) {
377 		writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
378 		writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
379 
380 		vchan->phy = NULL;
381 		pchan->vchan = NULL;
382 		pchan->desc = NULL;
383 		pchan->done = NULL;
384 	}
385 
386 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
387 
388 	vchan_dma_desc_free_list(&vchan->vc, &head);
389 
390 	return 0;
391 }
392 
393 static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
394 {
395 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
396 	struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc);
397 	struct sun6i_pchan *pchan = vchan->phy;
398 	u32 irq_val, irq_reg, irq_offset;
399 
400 	if (!pchan)
401 		return -EAGAIN;
402 
403 	if (!desc) {
404 		pchan->desc = NULL;
405 		pchan->done = NULL;
406 		return -EAGAIN;
407 	}
408 
409 	list_del(&desc->node);
410 
411 	pchan->desc = to_sun6i_desc(&desc->tx);
412 	pchan->done = NULL;
413 
414 	sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
415 
416 	irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
417 	irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
418 
419 	irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset));
420 	irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH);
421 	writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset));
422 
423 	writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
424 	writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
425 
426 	sun6i_dma_dump_com_regs(sdev);
427 	sun6i_dma_dump_chan_regs(sdev, pchan);
428 
429 	return 0;
430 }
431 
432 static void sun6i_dma_tasklet(unsigned long data)
433 {
434 	struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
435 	struct sun6i_vchan *vchan;
436 	struct sun6i_pchan *pchan;
437 	unsigned int pchan_alloc = 0;
438 	unsigned int pchan_idx;
439 
440 	list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) {
441 		spin_lock_irq(&vchan->vc.lock);
442 
443 		pchan = vchan->phy;
444 
445 		if (pchan && pchan->done) {
446 			if (sun6i_dma_start_desc(vchan)) {
447 				/*
448 				 * No current txd associated with this channel
449 				 */
450 				dev_dbg(sdev->slave.dev, "pchan %u: free\n",
451 					pchan->idx);
452 
453 				/* Mark this channel free */
454 				vchan->phy = NULL;
455 				pchan->vchan = NULL;
456 			}
457 		}
458 		spin_unlock_irq(&vchan->vc.lock);
459 	}
460 
461 	spin_lock_irq(&sdev->lock);
462 	for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
463 		pchan = &sdev->pchans[pchan_idx];
464 
465 		if (pchan->vchan || list_empty(&sdev->pending))
466 			continue;
467 
468 		vchan = list_first_entry(&sdev->pending,
469 					 struct sun6i_vchan, node);
470 
471 		/* Remove from pending channels */
472 		list_del_init(&vchan->node);
473 		pchan_alloc |= BIT(pchan_idx);
474 
475 		/* Mark this channel allocated */
476 		pchan->vchan = vchan;
477 		vchan->phy = pchan;
478 		dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n",
479 			pchan->idx, &vchan->vc);
480 	}
481 	spin_unlock_irq(&sdev->lock);
482 
483 	for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
484 		if (!(pchan_alloc & BIT(pchan_idx)))
485 			continue;
486 
487 		pchan = sdev->pchans + pchan_idx;
488 		vchan = pchan->vchan;
489 		if (vchan) {
490 			spin_lock_irq(&vchan->vc.lock);
491 			sun6i_dma_start_desc(vchan);
492 			spin_unlock_irq(&vchan->vc.lock);
493 		}
494 	}
495 }
496 
497 static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
498 {
499 	struct sun6i_dma_dev *sdev = dev_id;
500 	struct sun6i_vchan *vchan;
501 	struct sun6i_pchan *pchan;
502 	int i, j, ret = IRQ_NONE;
503 	u32 status;
504 
505 	for (i = 0; i < 2; i++) {
506 		status = readl(sdev->base + DMA_IRQ_STAT(i));
507 		if (!status)
508 			continue;
509 
510 		dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n",
511 			i ? "high" : "low", status);
512 
513 		writel(status, sdev->base + DMA_IRQ_STAT(i));
514 
515 		for (j = 0; (j < 8) && status; j++) {
516 			if (status & DMA_IRQ_QUEUE) {
517 				pchan = sdev->pchans + j;
518 				vchan = pchan->vchan;
519 
520 				if (vchan) {
521 					spin_lock(&vchan->vc.lock);
522 					vchan_cookie_complete(&pchan->desc->vd);
523 					pchan->done = pchan->desc;
524 					spin_unlock(&vchan->vc.lock);
525 				}
526 			}
527 
528 			status = status >> 4;
529 		}
530 
531 		if (!atomic_read(&sdev->tasklet_shutdown))
532 			tasklet_schedule(&sdev->task);
533 		ret = IRQ_HANDLED;
534 	}
535 
536 	return ret;
537 }
538 
539 static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
540 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
541 		size_t len, unsigned long flags)
542 {
543 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
544 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
545 	struct dma_slave_config *sconfig = &vchan->cfg;
546 	struct sun6i_dma_lli *v_lli;
547 	struct sun6i_desc *txd;
548 	dma_addr_t p_lli;
549 	int ret;
550 
551 	dev_dbg(chan2dev(chan),
552 		"%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
553 		__func__, vchan->vc.chan.chan_id, &dest, &src, len, flags);
554 
555 	if (!len)
556 		return NULL;
557 
558 	txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
559 	if (!txd)
560 		return NULL;
561 
562 	v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
563 	if (!v_lli) {
564 		dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
565 		goto err_txd_free;
566 	}
567 
568 	ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig);
569 	if (ret)
570 		goto err_dma_free;
571 
572 	v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
573 		DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
574 		DMA_CHAN_CFG_DST_LINEAR_MODE |
575 		DMA_CHAN_CFG_SRC_LINEAR_MODE;
576 
577 	sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
578 
579 	sun6i_dma_dump_lli(vchan, v_lli);
580 
581 	return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
582 
583 err_dma_free:
584 	dma_pool_free(sdev->pool, v_lli, p_lli);
585 err_txd_free:
586 	kfree(txd);
587 	return NULL;
588 }
589 
590 static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
591 		struct dma_chan *chan, struct scatterlist *sgl,
592 		unsigned int sg_len, enum dma_transfer_direction dir,
593 		unsigned long flags, void *context)
594 {
595 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
596 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
597 	struct dma_slave_config *sconfig = &vchan->cfg;
598 	struct sun6i_dma_lli *v_lli, *prev = NULL;
599 	struct sun6i_desc *txd;
600 	struct scatterlist *sg;
601 	dma_addr_t p_lli;
602 	int i, ret;
603 
604 	if (!sgl)
605 		return NULL;
606 
607 	if (!is_slave_direction(dir)) {
608 		dev_err(chan2dev(chan), "Invalid DMA direction\n");
609 		return NULL;
610 	}
611 
612 	txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
613 	if (!txd)
614 		return NULL;
615 
616 	for_each_sg(sgl, sg, sg_len, i) {
617 		v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
618 		if (!v_lli)
619 			goto err_lli_free;
620 
621 		if (dir == DMA_MEM_TO_DEV) {
622 			ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg),
623 						sconfig->dst_addr, sg_dma_len(sg),
624 						sconfig);
625 			if (ret)
626 				goto err_cur_lli_free;
627 
628 			v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE |
629 				DMA_CHAN_CFG_SRC_LINEAR_MODE |
630 				DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
631 				DMA_CHAN_CFG_DST_DRQ(vchan->port);
632 
633 			dev_dbg(chan2dev(chan),
634 				"%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
635 				__func__, vchan->vc.chan.chan_id,
636 				&sconfig->dst_addr, &sg_dma_address(sg),
637 				sg_dma_len(sg), flags);
638 
639 		} else {
640 			ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr,
641 						sg_dma_address(sg), sg_dma_len(sg),
642 						sconfig);
643 			if (ret)
644 				goto err_cur_lli_free;
645 
646 			v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE |
647 				DMA_CHAN_CFG_SRC_IO_MODE |
648 				DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
649 				DMA_CHAN_CFG_SRC_DRQ(vchan->port);
650 
651 			dev_dbg(chan2dev(chan),
652 				"%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
653 				__func__, vchan->vc.chan.chan_id,
654 				&sg_dma_address(sg), &sconfig->src_addr,
655 				sg_dma_len(sg), flags);
656 		}
657 
658 		prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
659 	}
660 
661 	dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
662 	for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
663 		sun6i_dma_dump_lli(vchan, prev);
664 
665 	return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
666 
667 err_cur_lli_free:
668 	dma_pool_free(sdev->pool, v_lli, p_lli);
669 err_lli_free:
670 	for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
671 		dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
672 	kfree(txd);
673 	return NULL;
674 }
675 
676 static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
677 		       unsigned long arg)
678 {
679 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
680 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
681 	struct sun6i_pchan *pchan = vchan->phy;
682 	unsigned long flags;
683 	int ret = 0;
684 
685 	switch (cmd) {
686 	case DMA_RESUME:
687 		dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
688 
689 		spin_lock_irqsave(&vchan->vc.lock, flags);
690 
691 		if (pchan) {
692 			writel(DMA_CHAN_PAUSE_RESUME,
693 			       pchan->base + DMA_CHAN_PAUSE);
694 		} else if (!list_empty(&vchan->vc.desc_issued)) {
695 			spin_lock(&sdev->lock);
696 			list_add_tail(&vchan->node, &sdev->pending);
697 			spin_unlock(&sdev->lock);
698 		}
699 
700 		spin_unlock_irqrestore(&vchan->vc.lock, flags);
701 		break;
702 
703 	case DMA_PAUSE:
704 		dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
705 
706 		if (pchan) {
707 			writel(DMA_CHAN_PAUSE_PAUSE,
708 			       pchan->base + DMA_CHAN_PAUSE);
709 		} else {
710 			spin_lock(&sdev->lock);
711 			list_del_init(&vchan->node);
712 			spin_unlock(&sdev->lock);
713 		}
714 		break;
715 
716 	case DMA_TERMINATE_ALL:
717 		ret = sun6i_dma_terminate_all(vchan);
718 		break;
719 	case DMA_SLAVE_CONFIG:
720 		memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config));
721 		break;
722 	default:
723 		ret = -ENXIO;
724 		break;
725 	}
726 	return ret;
727 }
728 
729 static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
730 					   dma_cookie_t cookie,
731 					   struct dma_tx_state *state)
732 {
733 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
734 	struct sun6i_pchan *pchan = vchan->phy;
735 	struct sun6i_dma_lli *lli;
736 	struct virt_dma_desc *vd;
737 	struct sun6i_desc *txd;
738 	enum dma_status ret;
739 	unsigned long flags;
740 	size_t bytes = 0;
741 
742 	ret = dma_cookie_status(chan, cookie, state);
743 	if (ret == DMA_COMPLETE)
744 		return ret;
745 
746 	spin_lock_irqsave(&vchan->vc.lock, flags);
747 
748 	vd = vchan_find_desc(&vchan->vc, cookie);
749 	txd = to_sun6i_desc(&vd->tx);
750 
751 	if (vd) {
752 		for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next)
753 			bytes += lli->len;
754 	} else if (!pchan || !pchan->desc) {
755 		bytes = 0;
756 	} else {
757 		bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
758 	}
759 
760 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
761 
762 	dma_set_residue(state, bytes);
763 
764 	return ret;
765 }
766 
767 static void sun6i_dma_issue_pending(struct dma_chan *chan)
768 {
769 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
770 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
771 	unsigned long flags;
772 
773 	spin_lock_irqsave(&vchan->vc.lock, flags);
774 
775 	if (vchan_issue_pending(&vchan->vc)) {
776 		spin_lock(&sdev->lock);
777 
778 		if (!vchan->phy && list_empty(&vchan->node)) {
779 			list_add_tail(&vchan->node, &sdev->pending);
780 			tasklet_schedule(&sdev->task);
781 			dev_dbg(chan2dev(chan), "vchan %p: issued\n",
782 				&vchan->vc);
783 		}
784 
785 		spin_unlock(&sdev->lock);
786 	} else {
787 		dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n",
788 			&vchan->vc);
789 	}
790 
791 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
792 }
793 
794 static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan)
795 {
796 	return 0;
797 }
798 
799 static void sun6i_dma_free_chan_resources(struct dma_chan *chan)
800 {
801 	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
802 	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
803 	unsigned long flags;
804 
805 	spin_lock_irqsave(&sdev->lock, flags);
806 	list_del_init(&vchan->node);
807 	spin_unlock_irqrestore(&sdev->lock, flags);
808 
809 	vchan_free_chan_resources(&vchan->vc);
810 }
811 
812 static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
813 					   struct of_dma *ofdma)
814 {
815 	struct sun6i_dma_dev *sdev = ofdma->of_dma_data;
816 	struct sun6i_vchan *vchan;
817 	struct dma_chan *chan;
818 	u8 port = dma_spec->args[0];
819 
820 	if (port > NR_MAX_REQUESTS)
821 		return NULL;
822 
823 	chan = dma_get_any_slave_channel(&sdev->slave);
824 	if (!chan)
825 		return NULL;
826 
827 	vchan = to_sun6i_vchan(chan);
828 	vchan->port = port;
829 
830 	return chan;
831 }
832 
833 static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev)
834 {
835 	/* Disable all interrupts from DMA */
836 	writel(0, sdev->base + DMA_IRQ_EN(0));
837 	writel(0, sdev->base + DMA_IRQ_EN(1));
838 
839 	/* Prevent spurious interrupts from scheduling the tasklet */
840 	atomic_inc(&sdev->tasklet_shutdown);
841 
842 	/* Make sure we won't have any further interrupts */
843 	devm_free_irq(sdev->slave.dev, sdev->irq, sdev);
844 
845 	/* Actually prevent the tasklet from being scheduled */
846 	tasklet_kill(&sdev->task);
847 }
848 
849 static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
850 {
851 	int i;
852 
853 	for (i = 0; i < NR_MAX_VCHANS; i++) {
854 		struct sun6i_vchan *vchan = &sdev->vchans[i];
855 
856 		list_del(&vchan->vc.chan.device_node);
857 		tasklet_kill(&vchan->vc.task);
858 	}
859 }
860 
861 static int sun6i_dma_probe(struct platform_device *pdev)
862 {
863 	struct sun6i_dma_dev *sdc;
864 	struct resource *res;
865 	struct clk *mux, *pll6;
866 	int ret, i;
867 
868 	sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
869 	if (!sdc)
870 		return -ENOMEM;
871 
872 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
873 	sdc->base = devm_ioremap_resource(&pdev->dev, res);
874 	if (IS_ERR(sdc->base))
875 		return PTR_ERR(sdc->base);
876 
877 	sdc->irq = platform_get_irq(pdev, 0);
878 	if (sdc->irq < 0) {
879 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
880 		return sdc->irq;
881 	}
882 
883 	sdc->clk = devm_clk_get(&pdev->dev, NULL);
884 	if (IS_ERR(sdc->clk)) {
885 		dev_err(&pdev->dev, "No clock specified\n");
886 		return PTR_ERR(sdc->clk);
887 	}
888 
889 	mux = clk_get(NULL, "ahb1_mux");
890 	if (IS_ERR(mux)) {
891 		dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n");
892 		return PTR_ERR(mux);
893 	}
894 
895 	pll6 = clk_get(NULL, "pll6");
896 	if (IS_ERR(pll6)) {
897 		dev_err(&pdev->dev, "Couldn't get PLL6\n");
898 		clk_put(mux);
899 		return PTR_ERR(pll6);
900 	}
901 
902 	ret = clk_set_parent(mux, pll6);
903 	clk_put(pll6);
904 	clk_put(mux);
905 
906 	if (ret) {
907 		dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n");
908 		return ret;
909 	}
910 
911 	sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
912 	if (IS_ERR(sdc->rstc)) {
913 		dev_err(&pdev->dev, "No reset controller specified\n");
914 		return PTR_ERR(sdc->rstc);
915 	}
916 
917 	sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
918 				     sizeof(struct sun6i_dma_lli), 4, 0);
919 	if (!sdc->pool) {
920 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
921 		return -ENOMEM;
922 	}
923 
924 	platform_set_drvdata(pdev, sdc);
925 	INIT_LIST_HEAD(&sdc->pending);
926 	spin_lock_init(&sdc->lock);
927 
928 	dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
929 	dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
930 	dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
931 
932 	INIT_LIST_HEAD(&sdc->slave.channels);
933 	sdc->slave.device_alloc_chan_resources	= sun6i_dma_alloc_chan_resources;
934 	sdc->slave.device_free_chan_resources	= sun6i_dma_free_chan_resources;
935 	sdc->slave.device_tx_status		= sun6i_dma_tx_status;
936 	sdc->slave.device_issue_pending		= sun6i_dma_issue_pending;
937 	sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg;
938 	sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy;
939 	sdc->slave.device_control		= sun6i_dma_control;
940 	sdc->slave.chancnt			= NR_MAX_VCHANS;
941 
942 	sdc->slave.dev = &pdev->dev;
943 
944 	sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS,
945 				   sizeof(struct sun6i_pchan), GFP_KERNEL);
946 	if (!sdc->pchans)
947 		return -ENOMEM;
948 
949 	sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS,
950 				   sizeof(struct sun6i_vchan), GFP_KERNEL);
951 	if (!sdc->vchans)
952 		return -ENOMEM;
953 
954 	tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
955 
956 	for (i = 0; i < NR_MAX_CHANNELS; i++) {
957 		struct sun6i_pchan *pchan = &sdc->pchans[i];
958 
959 		pchan->idx = i;
960 		pchan->base = sdc->base + 0x100 + i * 0x40;
961 	}
962 
963 	for (i = 0; i < NR_MAX_VCHANS; i++) {
964 		struct sun6i_vchan *vchan = &sdc->vchans[i];
965 
966 		INIT_LIST_HEAD(&vchan->node);
967 		vchan->vc.desc_free = sun6i_dma_free_desc;
968 		vchan_init(&vchan->vc, &sdc->slave);
969 	}
970 
971 	ret = reset_control_deassert(sdc->rstc);
972 	if (ret) {
973 		dev_err(&pdev->dev, "Couldn't deassert the device from reset\n");
974 		goto err_chan_free;
975 	}
976 
977 	ret = clk_prepare_enable(sdc->clk);
978 	if (ret) {
979 		dev_err(&pdev->dev, "Couldn't enable the clock\n");
980 		goto err_reset_assert;
981 	}
982 
983 	ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0,
984 			       dev_name(&pdev->dev), sdc);
985 	if (ret) {
986 		dev_err(&pdev->dev, "Cannot request IRQ\n");
987 		goto err_clk_disable;
988 	}
989 
990 	ret = dma_async_device_register(&sdc->slave);
991 	if (ret) {
992 		dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
993 		goto err_irq_disable;
994 	}
995 
996 	ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate,
997 					 sdc);
998 	if (ret) {
999 		dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1000 		goto err_dma_unregister;
1001 	}
1002 
1003 	return 0;
1004 
1005 err_dma_unregister:
1006 	dma_async_device_unregister(&sdc->slave);
1007 err_irq_disable:
1008 	sun6i_kill_tasklet(sdc);
1009 err_clk_disable:
1010 	clk_disable_unprepare(sdc->clk);
1011 err_reset_assert:
1012 	reset_control_assert(sdc->rstc);
1013 err_chan_free:
1014 	sun6i_dma_free(sdc);
1015 	return ret;
1016 }
1017 
1018 static int sun6i_dma_remove(struct platform_device *pdev)
1019 {
1020 	struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev);
1021 
1022 	of_dma_controller_free(pdev->dev.of_node);
1023 	dma_async_device_unregister(&sdc->slave);
1024 
1025 	sun6i_kill_tasklet(sdc);
1026 
1027 	clk_disable_unprepare(sdc->clk);
1028 	reset_control_assert(sdc->rstc);
1029 
1030 	sun6i_dma_free(sdc);
1031 
1032 	return 0;
1033 }
1034 
1035 static struct of_device_id sun6i_dma_match[] = {
1036 	{ .compatible = "allwinner,sun6i-a31-dma" },
1037 	{ /* sentinel */ }
1038 };
1039 
1040 static struct platform_driver sun6i_dma_driver = {
1041 	.probe		= sun6i_dma_probe,
1042 	.remove		= sun6i_dma_remove,
1043 	.driver = {
1044 		.name		= "sun6i-dma",
1045 		.of_match_table	= sun6i_dma_match,
1046 	},
1047 };
1048 module_platform_driver(sun6i_dma_driver);
1049 
1050 MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
1051 MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
1052 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1053 MODULE_LICENSE("GPL");
1054