xref: /openbmc/linux/drivers/dma/k3dma.c (revision 1b6216a6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013 - 2015 Linaro Ltd.
4  * Copyright (c) 2013 HiSilicon Limited.
5  */
6 #include <linux/sched.h>
7 #include <linux/device.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dmapool.h>
10 #include <linux/dmaengine.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/of_device.h>
19 #include <linux/of.h>
20 #include <linux/clk.h>
21 #include <linux/of_dma.h>
22 
23 #include "virt-dma.h"
24 
25 #define DRIVER_NAME		"k3-dma"
26 #define DMA_MAX_SIZE		0x1ffc
27 #define DMA_CYCLIC_MAX_PERIOD	0x1000
28 #define LLI_BLOCK_SIZE		(4 * PAGE_SIZE)
29 
30 #define INT_STAT		0x00
31 #define INT_TC1			0x04
32 #define INT_TC2			0x08
33 #define INT_ERR1		0x0c
34 #define INT_ERR2		0x10
35 #define INT_TC1_MASK		0x18
36 #define INT_TC2_MASK		0x1c
37 #define INT_ERR1_MASK		0x20
38 #define INT_ERR2_MASK		0x24
39 #define INT_TC1_RAW		0x600
40 #define INT_TC2_RAW		0x608
41 #define INT_ERR1_RAW		0x610
42 #define INT_ERR2_RAW		0x618
43 #define CH_PRI			0x688
44 #define CH_STAT			0x690
45 #define CX_CUR_CNT		0x704
46 #define CX_LLI			0x800
47 #define CX_CNT1			0x80c
48 #define CX_CNT0			0x810
49 #define CX_SRC			0x814
50 #define CX_DST			0x818
51 #define CX_CFG			0x81c
52 
53 #define CX_LLI_CHAIN_EN		0x2
54 #define CX_CFG_EN		0x1
55 #define CX_CFG_NODEIRQ		BIT(1)
56 #define CX_CFG_MEM2PER		(0x1 << 2)
57 #define CX_CFG_PER2MEM		(0x2 << 2)
58 #define CX_CFG_SRCINCR		(0x1 << 31)
59 #define CX_CFG_DSTINCR		(0x1 << 30)
60 
61 struct k3_desc_hw {
62 	u32 lli;
63 	u32 reserved[3];
64 	u32 count;
65 	u32 saddr;
66 	u32 daddr;
67 	u32 config;
68 } __aligned(32);
69 
70 struct k3_dma_desc_sw {
71 	struct virt_dma_desc	vd;
72 	dma_addr_t		desc_hw_lli;
73 	size_t			desc_num;
74 	size_t			size;
75 	struct k3_desc_hw	*desc_hw;
76 };
77 
78 struct k3_dma_phy;
79 
80 struct k3_dma_chan {
81 	u32			ccfg;
82 	struct virt_dma_chan	vc;
83 	struct k3_dma_phy	*phy;
84 	struct list_head	node;
85 	dma_addr_t		dev_addr;
86 	enum dma_status		status;
87 	bool			cyclic;
88 	struct dma_slave_config	slave_config;
89 };
90 
91 struct k3_dma_phy {
92 	u32			idx;
93 	void __iomem		*base;
94 	struct k3_dma_chan	*vchan;
95 	struct k3_dma_desc_sw	*ds_run;
96 	struct k3_dma_desc_sw	*ds_done;
97 };
98 
99 struct k3_dma_dev {
100 	struct dma_device	slave;
101 	void __iomem		*base;
102 	struct tasklet_struct	task;
103 	spinlock_t		lock;
104 	struct list_head	chan_pending;
105 	struct k3_dma_phy	*phy;
106 	struct k3_dma_chan	*chans;
107 	struct clk		*clk;
108 	struct dma_pool		*pool;
109 	u32			dma_channels;
110 	u32			dma_requests;
111 	u32			dma_channel_mask;
112 	unsigned int		irq;
113 };
114 
115 
116 #define K3_FLAG_NOCLK	BIT(1)
117 
118 struct k3dma_soc_data {
119 	unsigned long flags;
120 };
121 
122 
123 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
124 
125 static int k3_dma_config_write(struct dma_chan *chan,
126 			       enum dma_transfer_direction dir,
127 			       struct dma_slave_config *cfg);
128 
to_k3_chan(struct dma_chan * chan)129 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
130 {
131 	return container_of(chan, struct k3_dma_chan, vc.chan);
132 }
133 
k3_dma_pause_dma(struct k3_dma_phy * phy,bool on)134 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
135 {
136 	u32 val = 0;
137 
138 	if (on) {
139 		val = readl_relaxed(phy->base + CX_CFG);
140 		val |= CX_CFG_EN;
141 		writel_relaxed(val, phy->base + CX_CFG);
142 	} else {
143 		val = readl_relaxed(phy->base + CX_CFG);
144 		val &= ~CX_CFG_EN;
145 		writel_relaxed(val, phy->base + CX_CFG);
146 	}
147 }
148 
k3_dma_terminate_chan(struct k3_dma_phy * phy,struct k3_dma_dev * d)149 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
150 {
151 	u32 val = 0;
152 
153 	k3_dma_pause_dma(phy, false);
154 
155 	val = 0x1 << phy->idx;
156 	writel_relaxed(val, d->base + INT_TC1_RAW);
157 	writel_relaxed(val, d->base + INT_TC2_RAW);
158 	writel_relaxed(val, d->base + INT_ERR1_RAW);
159 	writel_relaxed(val, d->base + INT_ERR2_RAW);
160 }
161 
k3_dma_set_desc(struct k3_dma_phy * phy,struct k3_desc_hw * hw)162 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
163 {
164 	writel_relaxed(hw->lli, phy->base + CX_LLI);
165 	writel_relaxed(hw->count, phy->base + CX_CNT0);
166 	writel_relaxed(hw->saddr, phy->base + CX_SRC);
167 	writel_relaxed(hw->daddr, phy->base + CX_DST);
168 	writel_relaxed(hw->config, phy->base + CX_CFG);
169 }
170 
k3_dma_get_curr_cnt(struct k3_dma_dev * d,struct k3_dma_phy * phy)171 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
172 {
173 	u32 cnt = 0;
174 
175 	cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
176 	cnt &= 0xffff;
177 	return cnt;
178 }
179 
k3_dma_get_curr_lli(struct k3_dma_phy * phy)180 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
181 {
182 	return readl_relaxed(phy->base + CX_LLI);
183 }
184 
k3_dma_get_chan_stat(struct k3_dma_dev * d)185 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
186 {
187 	return readl_relaxed(d->base + CH_STAT);
188 }
189 
k3_dma_enable_dma(struct k3_dma_dev * d,bool on)190 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
191 {
192 	if (on) {
193 		/* set same priority */
194 		writel_relaxed(0x0, d->base + CH_PRI);
195 
196 		/* unmask irq */
197 		writel_relaxed(0xffff, d->base + INT_TC1_MASK);
198 		writel_relaxed(0xffff, d->base + INT_TC2_MASK);
199 		writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
200 		writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
201 	} else {
202 		/* mask irq */
203 		writel_relaxed(0x0, d->base + INT_TC1_MASK);
204 		writel_relaxed(0x0, d->base + INT_TC2_MASK);
205 		writel_relaxed(0x0, d->base + INT_ERR1_MASK);
206 		writel_relaxed(0x0, d->base + INT_ERR2_MASK);
207 	}
208 }
209 
k3_dma_int_handler(int irq,void * dev_id)210 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
211 {
212 	struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
213 	struct k3_dma_phy *p;
214 	struct k3_dma_chan *c;
215 	u32 stat = readl_relaxed(d->base + INT_STAT);
216 	u32 tc1  = readl_relaxed(d->base + INT_TC1);
217 	u32 tc2  = readl_relaxed(d->base + INT_TC2);
218 	u32 err1 = readl_relaxed(d->base + INT_ERR1);
219 	u32 err2 = readl_relaxed(d->base + INT_ERR2);
220 	u32 i, irq_chan = 0;
221 
222 	while (stat) {
223 		i = __ffs(stat);
224 		stat &= ~BIT(i);
225 		if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
226 
227 			p = &d->phy[i];
228 			c = p->vchan;
229 			if (c && (tc1 & BIT(i))) {
230 				spin_lock(&c->vc.lock);
231 				if (p->ds_run != NULL) {
232 					vchan_cookie_complete(&p->ds_run->vd);
233 					p->ds_done = p->ds_run;
234 					p->ds_run = NULL;
235 				}
236 				spin_unlock(&c->vc.lock);
237 			}
238 			if (c && (tc2 & BIT(i))) {
239 				spin_lock(&c->vc.lock);
240 				if (p->ds_run != NULL)
241 					vchan_cyclic_callback(&p->ds_run->vd);
242 				spin_unlock(&c->vc.lock);
243 			}
244 			irq_chan |= BIT(i);
245 		}
246 		if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
247 			dev_warn(d->slave.dev, "DMA ERR\n");
248 	}
249 
250 	writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
251 	writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
252 	writel_relaxed(err1, d->base + INT_ERR1_RAW);
253 	writel_relaxed(err2, d->base + INT_ERR2_RAW);
254 
255 	if (irq_chan)
256 		tasklet_schedule(&d->task);
257 
258 	if (irq_chan || err1 || err2)
259 		return IRQ_HANDLED;
260 
261 	return IRQ_NONE;
262 }
263 
k3_dma_start_txd(struct k3_dma_chan * c)264 static int k3_dma_start_txd(struct k3_dma_chan *c)
265 {
266 	struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
267 	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
268 
269 	if (!c->phy)
270 		return -EAGAIN;
271 
272 	if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
273 		return -EAGAIN;
274 
275 	/* Avoid losing track of  ds_run if a transaction is in flight */
276 	if (c->phy->ds_run)
277 		return -EAGAIN;
278 
279 	if (vd) {
280 		struct k3_dma_desc_sw *ds =
281 			container_of(vd, struct k3_dma_desc_sw, vd);
282 		/*
283 		 * fetch and remove request from vc->desc_issued
284 		 * so vc->desc_issued only contains desc pending
285 		 */
286 		list_del(&ds->vd.node);
287 
288 		c->phy->ds_run = ds;
289 		c->phy->ds_done = NULL;
290 		/* start dma */
291 		k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
292 		return 0;
293 	}
294 	c->phy->ds_run = NULL;
295 	c->phy->ds_done = NULL;
296 	return -EAGAIN;
297 }
298 
k3_dma_tasklet(struct tasklet_struct * t)299 static void k3_dma_tasklet(struct tasklet_struct *t)
300 {
301 	struct k3_dma_dev *d = from_tasklet(d, t, task);
302 	struct k3_dma_phy *p;
303 	struct k3_dma_chan *c, *cn;
304 	unsigned pch, pch_alloc = 0;
305 
306 	/* check new dma request of running channel in vc->desc_issued */
307 	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
308 		spin_lock_irq(&c->vc.lock);
309 		p = c->phy;
310 		if (p && p->ds_done) {
311 			if (k3_dma_start_txd(c)) {
312 				/* No current txd associated with this channel */
313 				dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
314 				/* Mark this channel free */
315 				c->phy = NULL;
316 				p->vchan = NULL;
317 			}
318 		}
319 		spin_unlock_irq(&c->vc.lock);
320 	}
321 
322 	/* check new channel request in d->chan_pending */
323 	spin_lock_irq(&d->lock);
324 	for (pch = 0; pch < d->dma_channels; pch++) {
325 		if (!(d->dma_channel_mask & (1 << pch)))
326 			continue;
327 
328 		p = &d->phy[pch];
329 
330 		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
331 			c = list_first_entry(&d->chan_pending,
332 				struct k3_dma_chan, node);
333 			/* remove from d->chan_pending */
334 			list_del_init(&c->node);
335 			pch_alloc |= 1 << pch;
336 			/* Mark this channel allocated */
337 			p->vchan = c;
338 			c->phy = p;
339 			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
340 		}
341 	}
342 	spin_unlock_irq(&d->lock);
343 
344 	for (pch = 0; pch < d->dma_channels; pch++) {
345 		if (!(d->dma_channel_mask & (1 << pch)))
346 			continue;
347 
348 		if (pch_alloc & (1 << pch)) {
349 			p = &d->phy[pch];
350 			c = p->vchan;
351 			if (c) {
352 				spin_lock_irq(&c->vc.lock);
353 				k3_dma_start_txd(c);
354 				spin_unlock_irq(&c->vc.lock);
355 			}
356 		}
357 	}
358 }
359 
k3_dma_free_chan_resources(struct dma_chan * chan)360 static void k3_dma_free_chan_resources(struct dma_chan *chan)
361 {
362 	struct k3_dma_chan *c = to_k3_chan(chan);
363 	struct k3_dma_dev *d = to_k3_dma(chan->device);
364 	unsigned long flags;
365 
366 	spin_lock_irqsave(&d->lock, flags);
367 	list_del_init(&c->node);
368 	spin_unlock_irqrestore(&d->lock, flags);
369 
370 	vchan_free_chan_resources(&c->vc);
371 	c->ccfg = 0;
372 }
373 
k3_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)374 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
375 	dma_cookie_t cookie, struct dma_tx_state *state)
376 {
377 	struct k3_dma_chan *c = to_k3_chan(chan);
378 	struct k3_dma_dev *d = to_k3_dma(chan->device);
379 	struct k3_dma_phy *p;
380 	struct virt_dma_desc *vd;
381 	unsigned long flags;
382 	enum dma_status ret;
383 	size_t bytes = 0;
384 
385 	ret = dma_cookie_status(&c->vc.chan, cookie, state);
386 	if (ret == DMA_COMPLETE)
387 		return ret;
388 
389 	spin_lock_irqsave(&c->vc.lock, flags);
390 	p = c->phy;
391 	ret = c->status;
392 
393 	/*
394 	 * If the cookie is on our issue queue, then the residue is
395 	 * its total size.
396 	 */
397 	vd = vchan_find_desc(&c->vc, cookie);
398 	if (vd && !c->cyclic) {
399 		bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
400 	} else if ((!p) || (!p->ds_run)) {
401 		bytes = 0;
402 	} else {
403 		struct k3_dma_desc_sw *ds = p->ds_run;
404 		u32 clli = 0, index = 0;
405 
406 		bytes = k3_dma_get_curr_cnt(d, p);
407 		clli = k3_dma_get_curr_lli(p);
408 		index = ((clli - ds->desc_hw_lli) /
409 				sizeof(struct k3_desc_hw)) + 1;
410 		for (; index < ds->desc_num; index++) {
411 			bytes += ds->desc_hw[index].count;
412 			/* end of lli */
413 			if (!ds->desc_hw[index].lli)
414 				break;
415 		}
416 	}
417 	spin_unlock_irqrestore(&c->vc.lock, flags);
418 	dma_set_residue(state, bytes);
419 	return ret;
420 }
421 
k3_dma_issue_pending(struct dma_chan * chan)422 static void k3_dma_issue_pending(struct dma_chan *chan)
423 {
424 	struct k3_dma_chan *c = to_k3_chan(chan);
425 	struct k3_dma_dev *d = to_k3_dma(chan->device);
426 	unsigned long flags;
427 
428 	spin_lock_irqsave(&c->vc.lock, flags);
429 	/* add request to vc->desc_issued */
430 	if (vchan_issue_pending(&c->vc)) {
431 		spin_lock(&d->lock);
432 		if (!c->phy) {
433 			if (list_empty(&c->node)) {
434 				/* if new channel, add chan_pending */
435 				list_add_tail(&c->node, &d->chan_pending);
436 				/* check in tasklet */
437 				tasklet_schedule(&d->task);
438 				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
439 			}
440 		}
441 		spin_unlock(&d->lock);
442 	} else
443 		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
444 	spin_unlock_irqrestore(&c->vc.lock, flags);
445 }
446 
k3_dma_fill_desc(struct k3_dma_desc_sw * ds,dma_addr_t dst,dma_addr_t src,size_t len,u32 num,u32 ccfg)447 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
448 			dma_addr_t src, size_t len, u32 num, u32 ccfg)
449 {
450 	if (num != ds->desc_num - 1)
451 		ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
452 			sizeof(struct k3_desc_hw);
453 
454 	ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
455 	ds->desc_hw[num].count = len;
456 	ds->desc_hw[num].saddr = src;
457 	ds->desc_hw[num].daddr = dst;
458 	ds->desc_hw[num].config = ccfg;
459 }
460 
k3_dma_alloc_desc_resource(int num,struct dma_chan * chan)461 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
462 							struct dma_chan *chan)
463 {
464 	struct k3_dma_chan *c = to_k3_chan(chan);
465 	struct k3_dma_desc_sw *ds;
466 	struct k3_dma_dev *d = to_k3_dma(chan->device);
467 	int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
468 
469 	if (num > lli_limit) {
470 		dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
471 			&c->vc, num, lli_limit);
472 		return NULL;
473 	}
474 
475 	ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
476 	if (!ds)
477 		return NULL;
478 
479 	ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
480 	if (!ds->desc_hw) {
481 		dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
482 		kfree(ds);
483 		return NULL;
484 	}
485 	ds->desc_num = num;
486 	return ds;
487 }
488 
k3_dma_prep_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)489 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
490 	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
491 	size_t len, unsigned long flags)
492 {
493 	struct k3_dma_chan *c = to_k3_chan(chan);
494 	struct k3_dma_desc_sw *ds;
495 	size_t copy = 0;
496 	int num = 0;
497 
498 	if (!len)
499 		return NULL;
500 
501 	num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
502 
503 	ds = k3_dma_alloc_desc_resource(num, chan);
504 	if (!ds)
505 		return NULL;
506 
507 	c->cyclic = 0;
508 	ds->size = len;
509 	num = 0;
510 
511 	if (!c->ccfg) {
512 		/* default is memtomem, without calling device_config */
513 		c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
514 		c->ccfg |= (0xf << 20) | (0xf << 24);	/* burst = 16 */
515 		c->ccfg |= (0x3 << 12) | (0x3 << 16);	/* width = 64 bit */
516 	}
517 
518 	do {
519 		copy = min_t(size_t, len, DMA_MAX_SIZE);
520 		k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
521 
522 		src += copy;
523 		dst += copy;
524 		len -= copy;
525 	} while (len);
526 
527 	ds->desc_hw[num-1].lli = 0;	/* end of link */
528 	return vchan_tx_prep(&c->vc, &ds->vd, flags);
529 }
530 
k3_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long flags,void * context)531 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
532 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
533 	enum dma_transfer_direction dir, unsigned long flags, void *context)
534 {
535 	struct k3_dma_chan *c = to_k3_chan(chan);
536 	struct k3_dma_desc_sw *ds;
537 	size_t len, avail, total = 0;
538 	struct scatterlist *sg;
539 	dma_addr_t addr, src = 0, dst = 0;
540 	int num = sglen, i;
541 
542 	if (sgl == NULL)
543 		return NULL;
544 
545 	c->cyclic = 0;
546 
547 	for_each_sg(sgl, sg, sglen, i) {
548 		avail = sg_dma_len(sg);
549 		if (avail > DMA_MAX_SIZE)
550 			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
551 	}
552 
553 	ds = k3_dma_alloc_desc_resource(num, chan);
554 	if (!ds)
555 		return NULL;
556 	num = 0;
557 	k3_dma_config_write(chan, dir, &c->slave_config);
558 
559 	for_each_sg(sgl, sg, sglen, i) {
560 		addr = sg_dma_address(sg);
561 		avail = sg_dma_len(sg);
562 		total += avail;
563 
564 		do {
565 			len = min_t(size_t, avail, DMA_MAX_SIZE);
566 
567 			if (dir == DMA_MEM_TO_DEV) {
568 				src = addr;
569 				dst = c->dev_addr;
570 			} else if (dir == DMA_DEV_TO_MEM) {
571 				src = c->dev_addr;
572 				dst = addr;
573 			}
574 
575 			k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
576 
577 			addr += len;
578 			avail -= len;
579 		} while (avail);
580 	}
581 
582 	ds->desc_hw[num-1].lli = 0;	/* end of link */
583 	ds->size = total;
584 	return vchan_tx_prep(&c->vc, &ds->vd, flags);
585 }
586 
587 static struct dma_async_tx_descriptor *
k3_dma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)588 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
589 		       size_t buf_len, size_t period_len,
590 		       enum dma_transfer_direction dir,
591 		       unsigned long flags)
592 {
593 	struct k3_dma_chan *c = to_k3_chan(chan);
594 	struct k3_dma_desc_sw *ds;
595 	size_t len, avail, total = 0;
596 	dma_addr_t addr, src = 0, dst = 0;
597 	int num = 1, since = 0;
598 	size_t modulo = DMA_CYCLIC_MAX_PERIOD;
599 	u32 en_tc2 = 0;
600 
601 	dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
602 	       __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
603 	       buf_len, period_len, (int)dir);
604 
605 	avail = buf_len;
606 	if (avail > modulo)
607 		num += DIV_ROUND_UP(avail, modulo) - 1;
608 
609 	ds = k3_dma_alloc_desc_resource(num, chan);
610 	if (!ds)
611 		return NULL;
612 
613 	c->cyclic = 1;
614 	addr = buf_addr;
615 	avail = buf_len;
616 	total = avail;
617 	num = 0;
618 	k3_dma_config_write(chan, dir, &c->slave_config);
619 
620 	if (period_len < modulo)
621 		modulo = period_len;
622 
623 	do {
624 		len = min_t(size_t, avail, modulo);
625 
626 		if (dir == DMA_MEM_TO_DEV) {
627 			src = addr;
628 			dst = c->dev_addr;
629 		} else if (dir == DMA_DEV_TO_MEM) {
630 			src = c->dev_addr;
631 			dst = addr;
632 		}
633 		since += len;
634 		if (since >= period_len) {
635 			/* descriptor asks for TC2 interrupt on completion */
636 			en_tc2 = CX_CFG_NODEIRQ;
637 			since -= period_len;
638 		} else
639 			en_tc2 = 0;
640 
641 		k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
642 
643 		addr += len;
644 		avail -= len;
645 	} while (avail);
646 
647 	/* "Cyclic" == end of link points back to start of link */
648 	ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
649 
650 	ds->size = total;
651 
652 	return vchan_tx_prep(&c->vc, &ds->vd, flags);
653 }
654 
k3_dma_config(struct dma_chan * chan,struct dma_slave_config * cfg)655 static int k3_dma_config(struct dma_chan *chan,
656 			 struct dma_slave_config *cfg)
657 {
658 	struct k3_dma_chan *c = to_k3_chan(chan);
659 
660 	memcpy(&c->slave_config, cfg, sizeof(*cfg));
661 
662 	return 0;
663 }
664 
k3_dma_config_write(struct dma_chan * chan,enum dma_transfer_direction dir,struct dma_slave_config * cfg)665 static int k3_dma_config_write(struct dma_chan *chan,
666 			       enum dma_transfer_direction dir,
667 			       struct dma_slave_config *cfg)
668 {
669 	struct k3_dma_chan *c = to_k3_chan(chan);
670 	u32 maxburst = 0, val = 0;
671 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
672 
673 	if (dir == DMA_DEV_TO_MEM) {
674 		c->ccfg = CX_CFG_DSTINCR;
675 		c->dev_addr = cfg->src_addr;
676 		maxburst = cfg->src_maxburst;
677 		width = cfg->src_addr_width;
678 	} else if (dir == DMA_MEM_TO_DEV) {
679 		c->ccfg = CX_CFG_SRCINCR;
680 		c->dev_addr = cfg->dst_addr;
681 		maxburst = cfg->dst_maxburst;
682 		width = cfg->dst_addr_width;
683 	}
684 	switch (width) {
685 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
686 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
687 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
688 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
689 		val =  __ffs(width);
690 		break;
691 	default:
692 		val = 3;
693 		break;
694 	}
695 	c->ccfg |= (val << 12) | (val << 16);
696 
697 	if ((maxburst == 0) || (maxburst > 16))
698 		val = 15;
699 	else
700 		val = maxburst - 1;
701 	c->ccfg |= (val << 20) | (val << 24);
702 	c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
703 
704 	/* specific request line */
705 	c->ccfg |= c->vc.chan.chan_id << 4;
706 
707 	return 0;
708 }
709 
k3_dma_free_desc(struct virt_dma_desc * vd)710 static void k3_dma_free_desc(struct virt_dma_desc *vd)
711 {
712 	struct k3_dma_desc_sw *ds =
713 		container_of(vd, struct k3_dma_desc_sw, vd);
714 	struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
715 
716 	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
717 	kfree(ds);
718 }
719 
k3_dma_terminate_all(struct dma_chan * chan)720 static int k3_dma_terminate_all(struct dma_chan *chan)
721 {
722 	struct k3_dma_chan *c = to_k3_chan(chan);
723 	struct k3_dma_dev *d = to_k3_dma(chan->device);
724 	struct k3_dma_phy *p = c->phy;
725 	unsigned long flags;
726 	LIST_HEAD(head);
727 
728 	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
729 
730 	/* Prevent this channel being scheduled */
731 	spin_lock(&d->lock);
732 	list_del_init(&c->node);
733 	spin_unlock(&d->lock);
734 
735 	/* Clear the tx descriptor lists */
736 	spin_lock_irqsave(&c->vc.lock, flags);
737 	vchan_get_all_descriptors(&c->vc, &head);
738 	if (p) {
739 		/* vchan is assigned to a pchan - stop the channel */
740 		k3_dma_terminate_chan(p, d);
741 		c->phy = NULL;
742 		p->vchan = NULL;
743 		if (p->ds_run) {
744 			vchan_terminate_vdesc(&p->ds_run->vd);
745 			p->ds_run = NULL;
746 		}
747 		p->ds_done = NULL;
748 	}
749 	spin_unlock_irqrestore(&c->vc.lock, flags);
750 	vchan_dma_desc_free_list(&c->vc, &head);
751 
752 	return 0;
753 }
754 
k3_dma_synchronize(struct dma_chan * chan)755 static void k3_dma_synchronize(struct dma_chan *chan)
756 {
757 	struct k3_dma_chan *c = to_k3_chan(chan);
758 
759 	vchan_synchronize(&c->vc);
760 }
761 
k3_dma_transfer_pause(struct dma_chan * chan)762 static int k3_dma_transfer_pause(struct dma_chan *chan)
763 {
764 	struct k3_dma_chan *c = to_k3_chan(chan);
765 	struct k3_dma_dev *d = to_k3_dma(chan->device);
766 	struct k3_dma_phy *p = c->phy;
767 
768 	dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
769 	if (c->status == DMA_IN_PROGRESS) {
770 		c->status = DMA_PAUSED;
771 		if (p) {
772 			k3_dma_pause_dma(p, false);
773 		} else {
774 			spin_lock(&d->lock);
775 			list_del_init(&c->node);
776 			spin_unlock(&d->lock);
777 		}
778 	}
779 
780 	return 0;
781 }
782 
k3_dma_transfer_resume(struct dma_chan * chan)783 static int k3_dma_transfer_resume(struct dma_chan *chan)
784 {
785 	struct k3_dma_chan *c = to_k3_chan(chan);
786 	struct k3_dma_dev *d = to_k3_dma(chan->device);
787 	struct k3_dma_phy *p = c->phy;
788 	unsigned long flags;
789 
790 	dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
791 	spin_lock_irqsave(&c->vc.lock, flags);
792 	if (c->status == DMA_PAUSED) {
793 		c->status = DMA_IN_PROGRESS;
794 		if (p) {
795 			k3_dma_pause_dma(p, true);
796 		} else if (!list_empty(&c->vc.desc_issued)) {
797 			spin_lock(&d->lock);
798 			list_add_tail(&c->node, &d->chan_pending);
799 			spin_unlock(&d->lock);
800 		}
801 	}
802 	spin_unlock_irqrestore(&c->vc.lock, flags);
803 
804 	return 0;
805 }
806 
807 static const struct k3dma_soc_data k3_v1_dma_data = {
808 	.flags = 0,
809 };
810 
811 static const struct k3dma_soc_data asp_v1_dma_data = {
812 	.flags = K3_FLAG_NOCLK,
813 };
814 
815 static const struct of_device_id k3_pdma_dt_ids[] = {
816 	{ .compatible = "hisilicon,k3-dma-1.0",
817 	  .data = &k3_v1_dma_data
818 	},
819 	{ .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
820 	  .data = &asp_v1_dma_data
821 	},
822 	{}
823 };
824 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
825 
k3_of_dma_simple_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)826 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
827 						struct of_dma *ofdma)
828 {
829 	struct k3_dma_dev *d = ofdma->of_dma_data;
830 	unsigned int request = dma_spec->args[0];
831 
832 	if (request >= d->dma_requests)
833 		return NULL;
834 
835 	return dma_get_slave_channel(&(d->chans[request].vc.chan));
836 }
837 
k3_dma_probe(struct platform_device * op)838 static int k3_dma_probe(struct platform_device *op)
839 {
840 	const struct k3dma_soc_data *soc_data;
841 	struct k3_dma_dev *d;
842 	const struct of_device_id *of_id;
843 	int i, ret, irq = 0;
844 
845 	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
846 	if (!d)
847 		return -ENOMEM;
848 
849 	soc_data = device_get_match_data(&op->dev);
850 	if (!soc_data)
851 		return -EINVAL;
852 
853 	d->base = devm_platform_ioremap_resource(op, 0);
854 	if (IS_ERR(d->base))
855 		return PTR_ERR(d->base);
856 
857 	of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
858 	if (of_id) {
859 		of_property_read_u32((&op->dev)->of_node,
860 				"dma-channels", &d->dma_channels);
861 		of_property_read_u32((&op->dev)->of_node,
862 				"dma-requests", &d->dma_requests);
863 		ret = of_property_read_u32((&op->dev)->of_node,
864 				"dma-channel-mask", &d->dma_channel_mask);
865 		if (ret) {
866 			dev_warn(&op->dev,
867 				 "dma-channel-mask doesn't exist, considering all as available.\n");
868 			d->dma_channel_mask = (u32)~0UL;
869 		}
870 	}
871 
872 	if (!(soc_data->flags & K3_FLAG_NOCLK)) {
873 		d->clk = devm_clk_get(&op->dev, NULL);
874 		if (IS_ERR(d->clk)) {
875 			dev_err(&op->dev, "no dma clk\n");
876 			return PTR_ERR(d->clk);
877 		}
878 	}
879 
880 	irq = platform_get_irq(op, 0);
881 	ret = devm_request_irq(&op->dev, irq,
882 			k3_dma_int_handler, 0, DRIVER_NAME, d);
883 	if (ret)
884 		return ret;
885 
886 	d->irq = irq;
887 
888 	/* A DMA memory pool for LLIs, align on 32-byte boundary */
889 	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
890 					LLI_BLOCK_SIZE, 32, 0);
891 	if (!d->pool)
892 		return -ENOMEM;
893 
894 	/* init phy channel */
895 	d->phy = devm_kcalloc(&op->dev,
896 		d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
897 	if (d->phy == NULL)
898 		return -ENOMEM;
899 
900 	for (i = 0; i < d->dma_channels; i++) {
901 		struct k3_dma_phy *p;
902 
903 		if (!(d->dma_channel_mask & BIT(i)))
904 			continue;
905 
906 		p = &d->phy[i];
907 		p->idx = i;
908 		p->base = d->base + i * 0x40;
909 	}
910 
911 	INIT_LIST_HEAD(&d->slave.channels);
912 	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
913 	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
914 	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
915 	d->slave.dev = &op->dev;
916 	d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
917 	d->slave.device_tx_status = k3_dma_tx_status;
918 	d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
919 	d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
920 	d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
921 	d->slave.device_issue_pending = k3_dma_issue_pending;
922 	d->slave.device_config = k3_dma_config;
923 	d->slave.device_pause = k3_dma_transfer_pause;
924 	d->slave.device_resume = k3_dma_transfer_resume;
925 	d->slave.device_terminate_all = k3_dma_terminate_all;
926 	d->slave.device_synchronize = k3_dma_synchronize;
927 	d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
928 
929 	/* init virtual channel */
930 	d->chans = devm_kcalloc(&op->dev,
931 		d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
932 	if (d->chans == NULL)
933 		return -ENOMEM;
934 
935 	for (i = 0; i < d->dma_requests; i++) {
936 		struct k3_dma_chan *c = &d->chans[i];
937 
938 		c->status = DMA_IN_PROGRESS;
939 		INIT_LIST_HEAD(&c->node);
940 		c->vc.desc_free = k3_dma_free_desc;
941 		vchan_init(&c->vc, &d->slave);
942 	}
943 
944 	/* Enable clock before accessing registers */
945 	ret = clk_prepare_enable(d->clk);
946 	if (ret < 0) {
947 		dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
948 		return ret;
949 	}
950 
951 	k3_dma_enable_dma(d, true);
952 
953 	ret = dma_async_device_register(&d->slave);
954 	if (ret)
955 		goto dma_async_register_fail;
956 
957 	ret = of_dma_controller_register((&op->dev)->of_node,
958 					k3_of_dma_simple_xlate, d);
959 	if (ret)
960 		goto of_dma_register_fail;
961 
962 	spin_lock_init(&d->lock);
963 	INIT_LIST_HEAD(&d->chan_pending);
964 	tasklet_setup(&d->task, k3_dma_tasklet);
965 	platform_set_drvdata(op, d);
966 	dev_info(&op->dev, "initialized\n");
967 
968 	return 0;
969 
970 of_dma_register_fail:
971 	dma_async_device_unregister(&d->slave);
972 dma_async_register_fail:
973 	clk_disable_unprepare(d->clk);
974 	return ret;
975 }
976 
k3_dma_remove(struct platform_device * op)977 static int k3_dma_remove(struct platform_device *op)
978 {
979 	struct k3_dma_chan *c, *cn;
980 	struct k3_dma_dev *d = platform_get_drvdata(op);
981 
982 	dma_async_device_unregister(&d->slave);
983 	of_dma_controller_free((&op->dev)->of_node);
984 
985 	devm_free_irq(&op->dev, d->irq, d);
986 
987 	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
988 		list_del(&c->vc.chan.device_node);
989 		tasklet_kill(&c->vc.task);
990 	}
991 	tasklet_kill(&d->task);
992 	clk_disable_unprepare(d->clk);
993 	return 0;
994 }
995 
996 #ifdef CONFIG_PM_SLEEP
k3_dma_suspend_dev(struct device * dev)997 static int k3_dma_suspend_dev(struct device *dev)
998 {
999 	struct k3_dma_dev *d = dev_get_drvdata(dev);
1000 	u32 stat = 0;
1001 
1002 	stat = k3_dma_get_chan_stat(d);
1003 	if (stat) {
1004 		dev_warn(d->slave.dev,
1005 			"chan %d is running fail to suspend\n", stat);
1006 		return -1;
1007 	}
1008 	k3_dma_enable_dma(d, false);
1009 	clk_disable_unprepare(d->clk);
1010 	return 0;
1011 }
1012 
k3_dma_resume_dev(struct device * dev)1013 static int k3_dma_resume_dev(struct device *dev)
1014 {
1015 	struct k3_dma_dev *d = dev_get_drvdata(dev);
1016 	int ret = 0;
1017 
1018 	ret = clk_prepare_enable(d->clk);
1019 	if (ret < 0) {
1020 		dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
1021 		return ret;
1022 	}
1023 	k3_dma_enable_dma(d, true);
1024 	return 0;
1025 }
1026 #endif
1027 
1028 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
1029 
1030 static struct platform_driver k3_pdma_driver = {
1031 	.driver		= {
1032 		.name	= DRIVER_NAME,
1033 		.pm	= &k3_dma_pmops,
1034 		.of_match_table = k3_pdma_dt_ids,
1035 	},
1036 	.probe		= k3_dma_probe,
1037 	.remove		= k3_dma_remove,
1038 };
1039 
1040 module_platform_driver(k3_pdma_driver);
1041 
1042 MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
1043 MODULE_ALIAS("platform:k3dma");
1044 MODULE_LICENSE("GPL v2");
1045