xref: /openbmc/linux/drivers/dma/mxs-dma.c (revision 976fa9a3)
1 /*
2  * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3  *
4  * Refer to drivers/dma/imx-sdma.c
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/interrupt.h>
15 #include <linux/clk.h>
16 #include <linux/wait.h>
17 #include <linux/sched.h>
18 #include <linux/semaphore.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/platform_device.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/module.h>
26 #include <linux/stmp_device.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_dma.h>
30 #include <linux/list.h>
31 
32 #include <asm/irq.h>
33 
34 #include "dmaengine.h"
35 
36 /*
37  * NOTE: The term "PIO" throughout the mxs-dma implementation means
38  * PIO mode of mxs apbh-dma and apbx-dma.  With this working mode,
39  * dma can program the controller registers of peripheral devices.
40  */
41 
42 #define dma_is_apbh(mxs_dma)	((mxs_dma)->type == MXS_DMA_APBH)
43 #define apbh_is_old(mxs_dma)	((mxs_dma)->dev_id == IMX23_DMA)
44 
45 #define HW_APBHX_CTRL0				0x000
46 #define BM_APBH_CTRL0_APB_BURST8_EN		(1 << 29)
47 #define BM_APBH_CTRL0_APB_BURST_EN		(1 << 28)
48 #define BP_APBH_CTRL0_RESET_CHANNEL		16
49 #define HW_APBHX_CTRL1				0x010
50 #define HW_APBHX_CTRL2				0x020
51 #define HW_APBHX_CHANNEL_CTRL			0x030
52 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL	16
53 /*
54  * The offset of NXTCMDAR register is different per both dma type and version,
55  * while stride for each channel is all the same 0x70.
56  */
57 #define HW_APBHX_CHn_NXTCMDAR(d, n) \
58 	(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
59 #define HW_APBHX_CHn_SEMA(d, n) \
60 	(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
61 #define HW_APBHX_CHn_BAR(d, n) \
62 	(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
63 #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
64 
65 /*
66  * ccw bits definitions
67  *
68  * COMMAND:		0..1	(2)
69  * CHAIN:		2	(1)
70  * IRQ:			3	(1)
71  * NAND_LOCK:		4	(1) - not implemented
72  * NAND_WAIT4READY:	5	(1) - not implemented
73  * DEC_SEM:		6	(1)
74  * WAIT4END:		7	(1)
75  * HALT_ON_TERMINATE:	8	(1)
76  * TERMINATE_FLUSH:	9	(1)
77  * RESERVED:		10..11	(2)
78  * PIO_NUM:		12..15	(4)
79  */
80 #define BP_CCW_COMMAND		0
81 #define BM_CCW_COMMAND		(3 << 0)
82 #define CCW_CHAIN		(1 << 2)
83 #define CCW_IRQ			(1 << 3)
84 #define CCW_DEC_SEM		(1 << 6)
85 #define CCW_WAIT4END		(1 << 7)
86 #define CCW_HALT_ON_TERM	(1 << 8)
87 #define CCW_TERM_FLUSH		(1 << 9)
88 #define BP_CCW_PIO_NUM		12
89 #define BM_CCW_PIO_NUM		(0xf << 12)
90 
91 #define BF_CCW(value, field)	(((value) << BP_CCW_##field) & BM_CCW_##field)
92 
93 #define MXS_DMA_CMD_NO_XFER	0
94 #define MXS_DMA_CMD_WRITE	1
95 #define MXS_DMA_CMD_READ	2
96 #define MXS_DMA_CMD_DMA_SENSE	3	/* not implemented */
97 
98 struct mxs_dma_ccw {
99 	u32		next;
100 	u16		bits;
101 	u16		xfer_bytes;
102 #define MAX_XFER_BYTES	0xff00
103 	u32		bufaddr;
104 #define MXS_PIO_WORDS	16
105 	u32		pio_words[MXS_PIO_WORDS];
106 };
107 
108 #define CCW_BLOCK_SIZE	(4 * PAGE_SIZE)
109 #define NUM_CCW	(int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
110 
111 struct mxs_dma_chan {
112 	struct mxs_dma_engine		*mxs_dma;
113 	struct dma_chan			chan;
114 	struct dma_async_tx_descriptor	desc;
115 	struct tasklet_struct		tasklet;
116 	unsigned int			chan_irq;
117 	struct mxs_dma_ccw		*ccw;
118 	dma_addr_t			ccw_phys;
119 	int				desc_count;
120 	enum dma_status			status;
121 	unsigned int			flags;
122 	bool				reset;
123 #define MXS_DMA_SG_LOOP			(1 << 0)
124 #define MXS_DMA_USE_SEMAPHORE		(1 << 1)
125 };
126 
127 #define MXS_DMA_CHANNELS		16
128 #define MXS_DMA_CHANNELS_MASK		0xffff
129 
130 enum mxs_dma_devtype {
131 	MXS_DMA_APBH,
132 	MXS_DMA_APBX,
133 };
134 
135 enum mxs_dma_id {
136 	IMX23_DMA,
137 	IMX28_DMA,
138 };
139 
140 struct mxs_dma_engine {
141 	enum mxs_dma_id			dev_id;
142 	enum mxs_dma_devtype		type;
143 	void __iomem			*base;
144 	struct clk			*clk;
145 	struct dma_device		dma_device;
146 	struct device_dma_parameters	dma_parms;
147 	struct mxs_dma_chan		mxs_chans[MXS_DMA_CHANNELS];
148 	struct platform_device		*pdev;
149 	unsigned int			nr_channels;
150 };
151 
152 struct mxs_dma_type {
153 	enum mxs_dma_id id;
154 	enum mxs_dma_devtype type;
155 };
156 
157 static struct mxs_dma_type mxs_dma_types[] = {
158 	{
159 		.id = IMX23_DMA,
160 		.type = MXS_DMA_APBH,
161 	}, {
162 		.id = IMX23_DMA,
163 		.type = MXS_DMA_APBX,
164 	}, {
165 		.id = IMX28_DMA,
166 		.type = MXS_DMA_APBH,
167 	}, {
168 		.id = IMX28_DMA,
169 		.type = MXS_DMA_APBX,
170 	}
171 };
172 
173 static struct platform_device_id mxs_dma_ids[] = {
174 	{
175 		.name = "imx23-dma-apbh",
176 		.driver_data = (kernel_ulong_t) &mxs_dma_types[0],
177 	}, {
178 		.name = "imx23-dma-apbx",
179 		.driver_data = (kernel_ulong_t) &mxs_dma_types[1],
180 	}, {
181 		.name = "imx28-dma-apbh",
182 		.driver_data = (kernel_ulong_t) &mxs_dma_types[2],
183 	}, {
184 		.name = "imx28-dma-apbx",
185 		.driver_data = (kernel_ulong_t) &mxs_dma_types[3],
186 	}, {
187 		/* end of list */
188 	}
189 };
190 
191 static const struct of_device_id mxs_dma_dt_ids[] = {
192 	{ .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
193 	{ .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
194 	{ .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
195 	{ .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
196 	{ /* sentinel */ }
197 };
198 MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
199 
200 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
201 {
202 	return container_of(chan, struct mxs_dma_chan, chan);
203 }
204 
205 static void mxs_dma_reset_chan(struct dma_chan *chan)
206 {
207 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
208 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
209 	int chan_id = mxs_chan->chan.chan_id;
210 
211 	/*
212 	 * mxs dma channel resets can cause a channel stall. To recover from a
213 	 * channel stall, we have to reset the whole DMA engine. To avoid this,
214 	 * we use cyclic DMA with semaphores, that are enhanced in
215 	 * mxs_dma_int_handler. To reset the channel, we can simply stop writing
216 	 * into the semaphore counter.
217 	 */
218 	if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
219 			mxs_chan->flags & MXS_DMA_SG_LOOP) {
220 		mxs_chan->reset = true;
221 	} else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
222 		writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
223 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
224 	} else {
225 		unsigned long elapsed = 0;
226 		const unsigned long max_wait = 50000; /* 50ms */
227 		void __iomem *reg_dbg1 = mxs_dma->base +
228 				HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
229 
230 		/*
231 		 * On i.MX28 APBX, the DMA channel can stop working if we reset
232 		 * the channel while it is in READ_FLUSH (0x08) state.
233 		 * We wait here until we leave the state. Then we trigger the
234 		 * reset. Waiting a maximum of 50ms, the kernel shouldn't crash
235 		 * because of this.
236 		 */
237 		while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
238 			udelay(100);
239 			elapsed += 100;
240 		}
241 
242 		if (elapsed >= max_wait)
243 			dev_err(&mxs_chan->mxs_dma->pdev->dev,
244 					"Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
245 					chan_id);
246 
247 		writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
248 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
249 	}
250 
251 	mxs_chan->status = DMA_COMPLETE;
252 }
253 
254 static void mxs_dma_enable_chan(struct dma_chan *chan)
255 {
256 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
257 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
258 	int chan_id = mxs_chan->chan.chan_id;
259 
260 	/* set cmd_addr up */
261 	writel(mxs_chan->ccw_phys,
262 		mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
263 
264 	/* write 1 to SEMA to kick off the channel */
265 	if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
266 			mxs_chan->flags & MXS_DMA_SG_LOOP) {
267 		/* A cyclic DMA consists of at least 2 segments, so initialize
268 		 * the semaphore with 2 so we have enough time to add 1 to the
269 		 * semaphore if we need to */
270 		writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
271 	} else {
272 		writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
273 	}
274 	mxs_chan->reset = false;
275 }
276 
277 static void mxs_dma_disable_chan(struct dma_chan *chan)
278 {
279 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
280 
281 	mxs_chan->status = DMA_COMPLETE;
282 }
283 
284 static int mxs_dma_pause_chan(struct dma_chan *chan)
285 {
286 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
287 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
288 	int chan_id = mxs_chan->chan.chan_id;
289 
290 	/* freeze the channel */
291 	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
292 		writel(1 << chan_id,
293 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
294 	else
295 		writel(1 << chan_id,
296 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
297 
298 	mxs_chan->status = DMA_PAUSED;
299 	return 0;
300 }
301 
302 static int mxs_dma_resume_chan(struct dma_chan *chan)
303 {
304 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
305 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
306 	int chan_id = mxs_chan->chan.chan_id;
307 
308 	/* unfreeze the channel */
309 	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
310 		writel(1 << chan_id,
311 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
312 	else
313 		writel(1 << chan_id,
314 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
315 
316 	mxs_chan->status = DMA_IN_PROGRESS;
317 	return 0;
318 }
319 
320 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
321 {
322 	return dma_cookie_assign(tx);
323 }
324 
325 static void mxs_dma_tasklet(unsigned long data)
326 {
327 	struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
328 
329 	if (mxs_chan->desc.callback)
330 		mxs_chan->desc.callback(mxs_chan->desc.callback_param);
331 }
332 
333 static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
334 {
335 	int i;
336 
337 	for (i = 0; i != mxs_dma->nr_channels; ++i)
338 		if (mxs_dma->mxs_chans[i].chan_irq == irq)
339 			return i;
340 
341 	return -EINVAL;
342 }
343 
344 static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
345 {
346 	struct mxs_dma_engine *mxs_dma = dev_id;
347 	struct mxs_dma_chan *mxs_chan;
348 	u32 completed;
349 	u32 err;
350 	int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
351 
352 	if (chan < 0)
353 		return IRQ_NONE;
354 
355 	/* completion status */
356 	completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
357 	completed = (completed >> chan) & 0x1;
358 
359 	/* Clear interrupt */
360 	writel((1 << chan),
361 			mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
362 
363 	/* error status */
364 	err = readl(mxs_dma->base + HW_APBHX_CTRL2);
365 	err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
366 
367 	/*
368 	 * error status bit is in the upper 16 bits, error irq bit in the lower
369 	 * 16 bits. We transform it into a simpler error code:
370 	 * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
371 	 */
372 	err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
373 
374 	/* Clear error irq */
375 	writel((1 << chan),
376 			mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
377 
378 	/*
379 	 * When both completion and error of termination bits set at the
380 	 * same time, we do not take it as an error.  IOW, it only becomes
381 	 * an error we need to handle here in case of either it's a bus
382 	 * error or a termination error with no completion. 0x01 is termination
383 	 * error, so we can subtract err & completed to get the real error case.
384 	 */
385 	err -= err & completed;
386 
387 	mxs_chan = &mxs_dma->mxs_chans[chan];
388 
389 	if (err) {
390 		dev_dbg(mxs_dma->dma_device.dev,
391 			"%s: error in channel %d\n", __func__,
392 			chan);
393 		mxs_chan->status = DMA_ERROR;
394 		mxs_dma_reset_chan(&mxs_chan->chan);
395 	} else if (mxs_chan->status != DMA_COMPLETE) {
396 		if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
397 			mxs_chan->status = DMA_IN_PROGRESS;
398 			if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
399 				writel(1, mxs_dma->base +
400 					HW_APBHX_CHn_SEMA(mxs_dma, chan));
401 		} else {
402 			mxs_chan->status = DMA_COMPLETE;
403 		}
404 	}
405 
406 	if (mxs_chan->status == DMA_COMPLETE) {
407 		if (mxs_chan->reset)
408 			return IRQ_HANDLED;
409 		dma_cookie_complete(&mxs_chan->desc);
410 	}
411 
412 	/* schedule tasklet on this channel */
413 	tasklet_schedule(&mxs_chan->tasklet);
414 
415 	return IRQ_HANDLED;
416 }
417 
418 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
419 {
420 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
421 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
422 	int ret;
423 
424 	mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
425 					    CCW_BLOCK_SIZE,
426 					    &mxs_chan->ccw_phys, GFP_KERNEL);
427 	if (!mxs_chan->ccw) {
428 		ret = -ENOMEM;
429 		goto err_alloc;
430 	}
431 
432 	if (mxs_chan->chan_irq != NO_IRQ) {
433 		ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
434 					0, "mxs-dma", mxs_dma);
435 		if (ret)
436 			goto err_irq;
437 	}
438 
439 	ret = clk_prepare_enable(mxs_dma->clk);
440 	if (ret)
441 		goto err_clk;
442 
443 	mxs_dma_reset_chan(chan);
444 
445 	dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
446 	mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
447 
448 	/* the descriptor is ready */
449 	async_tx_ack(&mxs_chan->desc);
450 
451 	return 0;
452 
453 err_clk:
454 	free_irq(mxs_chan->chan_irq, mxs_dma);
455 err_irq:
456 	dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
457 			mxs_chan->ccw, mxs_chan->ccw_phys);
458 err_alloc:
459 	return ret;
460 }
461 
462 static void mxs_dma_free_chan_resources(struct dma_chan *chan)
463 {
464 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
465 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
466 
467 	mxs_dma_disable_chan(chan);
468 
469 	free_irq(mxs_chan->chan_irq, mxs_dma);
470 
471 	dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
472 			mxs_chan->ccw, mxs_chan->ccw_phys);
473 
474 	clk_disable_unprepare(mxs_dma->clk);
475 }
476 
477 /*
478  * How to use the flags for ->device_prep_slave_sg() :
479  *    [1] If there is only one DMA command in the DMA chain, the code should be:
480  *            ......
481  *            ->device_prep_slave_sg(DMA_CTRL_ACK);
482  *            ......
483  *    [2] If there are two DMA commands in the DMA chain, the code should be
484  *            ......
485  *            ->device_prep_slave_sg(0);
486  *            ......
487  *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
488  *            ......
489  *    [3] If there are more than two DMA commands in the DMA chain, the code
490  *        should be:
491  *            ......
492  *            ->device_prep_slave_sg(0);                                // First
493  *            ......
494  *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
495  *            ......
496  *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
497  *            ......
498  */
499 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
500 		struct dma_chan *chan, struct scatterlist *sgl,
501 		unsigned int sg_len, enum dma_transfer_direction direction,
502 		unsigned long flags, void *context)
503 {
504 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
505 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
506 	struct mxs_dma_ccw *ccw;
507 	struct scatterlist *sg;
508 	u32 i, j;
509 	u32 *pio;
510 	bool append = flags & DMA_PREP_INTERRUPT;
511 	int idx = append ? mxs_chan->desc_count : 0;
512 
513 	if (mxs_chan->status == DMA_IN_PROGRESS && !append)
514 		return NULL;
515 
516 	if (sg_len + (append ? idx : 0) > NUM_CCW) {
517 		dev_err(mxs_dma->dma_device.dev,
518 				"maximum number of sg exceeded: %d > %d\n",
519 				sg_len, NUM_CCW);
520 		goto err_out;
521 	}
522 
523 	mxs_chan->status = DMA_IN_PROGRESS;
524 	mxs_chan->flags = 0;
525 
526 	/*
527 	 * If the sg is prepared with append flag set, the sg
528 	 * will be appended to the last prepared sg.
529 	 */
530 	if (append) {
531 		BUG_ON(idx < 1);
532 		ccw = &mxs_chan->ccw[idx - 1];
533 		ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
534 		ccw->bits |= CCW_CHAIN;
535 		ccw->bits &= ~CCW_IRQ;
536 		ccw->bits &= ~CCW_DEC_SEM;
537 	} else {
538 		idx = 0;
539 	}
540 
541 	if (direction == DMA_TRANS_NONE) {
542 		ccw = &mxs_chan->ccw[idx++];
543 		pio = (u32 *) sgl;
544 
545 		for (j = 0; j < sg_len;)
546 			ccw->pio_words[j++] = *pio++;
547 
548 		ccw->bits = 0;
549 		ccw->bits |= CCW_IRQ;
550 		ccw->bits |= CCW_DEC_SEM;
551 		if (flags & DMA_CTRL_ACK)
552 			ccw->bits |= CCW_WAIT4END;
553 		ccw->bits |= CCW_HALT_ON_TERM;
554 		ccw->bits |= CCW_TERM_FLUSH;
555 		ccw->bits |= BF_CCW(sg_len, PIO_NUM);
556 		ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
557 	} else {
558 		for_each_sg(sgl, sg, sg_len, i) {
559 			if (sg_dma_len(sg) > MAX_XFER_BYTES) {
560 				dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
561 						sg_dma_len(sg), MAX_XFER_BYTES);
562 				goto err_out;
563 			}
564 
565 			ccw = &mxs_chan->ccw[idx++];
566 
567 			ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
568 			ccw->bufaddr = sg->dma_address;
569 			ccw->xfer_bytes = sg_dma_len(sg);
570 
571 			ccw->bits = 0;
572 			ccw->bits |= CCW_CHAIN;
573 			ccw->bits |= CCW_HALT_ON_TERM;
574 			ccw->bits |= CCW_TERM_FLUSH;
575 			ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
576 					MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
577 					COMMAND);
578 
579 			if (i + 1 == sg_len) {
580 				ccw->bits &= ~CCW_CHAIN;
581 				ccw->bits |= CCW_IRQ;
582 				ccw->bits |= CCW_DEC_SEM;
583 				if (flags & DMA_CTRL_ACK)
584 					ccw->bits |= CCW_WAIT4END;
585 			}
586 		}
587 	}
588 	mxs_chan->desc_count = idx;
589 
590 	return &mxs_chan->desc;
591 
592 err_out:
593 	mxs_chan->status = DMA_ERROR;
594 	return NULL;
595 }
596 
597 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
598 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
599 		size_t period_len, enum dma_transfer_direction direction,
600 		unsigned long flags)
601 {
602 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
603 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
604 	u32 num_periods = buf_len / period_len;
605 	u32 i = 0, buf = 0;
606 
607 	if (mxs_chan->status == DMA_IN_PROGRESS)
608 		return NULL;
609 
610 	mxs_chan->status = DMA_IN_PROGRESS;
611 	mxs_chan->flags |= MXS_DMA_SG_LOOP;
612 	mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
613 
614 	if (num_periods > NUM_CCW) {
615 		dev_err(mxs_dma->dma_device.dev,
616 				"maximum number of sg exceeded: %d > %d\n",
617 				num_periods, NUM_CCW);
618 		goto err_out;
619 	}
620 
621 	if (period_len > MAX_XFER_BYTES) {
622 		dev_err(mxs_dma->dma_device.dev,
623 				"maximum period size exceeded: %d > %d\n",
624 				period_len, MAX_XFER_BYTES);
625 		goto err_out;
626 	}
627 
628 	while (buf < buf_len) {
629 		struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
630 
631 		if (i + 1 == num_periods)
632 			ccw->next = mxs_chan->ccw_phys;
633 		else
634 			ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
635 
636 		ccw->bufaddr = dma_addr;
637 		ccw->xfer_bytes = period_len;
638 
639 		ccw->bits = 0;
640 		ccw->bits |= CCW_CHAIN;
641 		ccw->bits |= CCW_IRQ;
642 		ccw->bits |= CCW_HALT_ON_TERM;
643 		ccw->bits |= CCW_TERM_FLUSH;
644 		ccw->bits |= CCW_DEC_SEM;
645 		ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
646 				MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
647 
648 		dma_addr += period_len;
649 		buf += period_len;
650 
651 		i++;
652 	}
653 	mxs_chan->desc_count = i;
654 
655 	return &mxs_chan->desc;
656 
657 err_out:
658 	mxs_chan->status = DMA_ERROR;
659 	return NULL;
660 }
661 
662 static int mxs_dma_terminate_all(struct dma_chan *chan)
663 {
664 	mxs_dma_reset_chan(chan);
665 	mxs_dma_disable_chan(chan);
666 
667 	return 0;
668 }
669 
670 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
671 			dma_cookie_t cookie, struct dma_tx_state *txstate)
672 {
673 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
674 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
675 	u32 residue = 0;
676 
677 	if (mxs_chan->status == DMA_IN_PROGRESS &&
678 			mxs_chan->flags & MXS_DMA_SG_LOOP) {
679 		struct mxs_dma_ccw *last_ccw;
680 		u32 bar;
681 
682 		last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
683 		residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
684 
685 		bar = readl(mxs_dma->base +
686 				HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
687 		residue -= bar;
688 	}
689 
690 	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
691 			residue);
692 
693 	return mxs_chan->status;
694 }
695 
696 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
697 {
698 	int ret;
699 
700 	ret = clk_prepare_enable(mxs_dma->clk);
701 	if (ret)
702 		return ret;
703 
704 	ret = stmp_reset_block(mxs_dma->base);
705 	if (ret)
706 		goto err_out;
707 
708 	/* enable apbh burst */
709 	if (dma_is_apbh(mxs_dma)) {
710 		writel(BM_APBH_CTRL0_APB_BURST_EN,
711 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
712 		writel(BM_APBH_CTRL0_APB_BURST8_EN,
713 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
714 	}
715 
716 	/* enable irq for all the channels */
717 	writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
718 		mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
719 
720 err_out:
721 	clk_disable_unprepare(mxs_dma->clk);
722 	return ret;
723 }
724 
725 struct mxs_dma_filter_param {
726 	struct device_node *of_node;
727 	unsigned int chan_id;
728 };
729 
730 static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
731 {
732 	struct mxs_dma_filter_param *param = fn_param;
733 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
734 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
735 	int chan_irq;
736 
737 	if (mxs_dma->dma_device.dev->of_node != param->of_node)
738 		return false;
739 
740 	if (chan->chan_id != param->chan_id)
741 		return false;
742 
743 	chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
744 	if (chan_irq < 0)
745 		return false;
746 
747 	mxs_chan->chan_irq = chan_irq;
748 
749 	return true;
750 }
751 
752 static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
753 			       struct of_dma *ofdma)
754 {
755 	struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
756 	dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
757 	struct mxs_dma_filter_param param;
758 
759 	if (dma_spec->args_count != 1)
760 		return NULL;
761 
762 	param.of_node = ofdma->of_node;
763 	param.chan_id = dma_spec->args[0];
764 
765 	if (param.chan_id >= mxs_dma->nr_channels)
766 		return NULL;
767 
768 	return dma_request_channel(mask, mxs_dma_filter_fn, &param);
769 }
770 
771 static int __init mxs_dma_probe(struct platform_device *pdev)
772 {
773 	struct device_node *np = pdev->dev.of_node;
774 	const struct platform_device_id *id_entry;
775 	const struct of_device_id *of_id;
776 	const struct mxs_dma_type *dma_type;
777 	struct mxs_dma_engine *mxs_dma;
778 	struct resource *iores;
779 	int ret, i;
780 
781 	mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
782 	if (!mxs_dma)
783 		return -ENOMEM;
784 
785 	ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
786 	if (ret) {
787 		dev_err(&pdev->dev, "failed to read dma-channels\n");
788 		return ret;
789 	}
790 
791 	of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
792 	if (of_id)
793 		id_entry = of_id->data;
794 	else
795 		id_entry = platform_get_device_id(pdev);
796 
797 	dma_type = (struct mxs_dma_type *)id_entry->driver_data;
798 	mxs_dma->type = dma_type->type;
799 	mxs_dma->dev_id = dma_type->id;
800 
801 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
802 	mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
803 	if (IS_ERR(mxs_dma->base))
804 		return PTR_ERR(mxs_dma->base);
805 
806 	mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
807 	if (IS_ERR(mxs_dma->clk))
808 		return PTR_ERR(mxs_dma->clk);
809 
810 	dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
811 	dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
812 
813 	INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
814 
815 	/* Initialize channel parameters */
816 	for (i = 0; i < MXS_DMA_CHANNELS; i++) {
817 		struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
818 
819 		mxs_chan->mxs_dma = mxs_dma;
820 		mxs_chan->chan.device = &mxs_dma->dma_device;
821 		dma_cookie_init(&mxs_chan->chan);
822 
823 		tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
824 			     (unsigned long) mxs_chan);
825 
826 
827 		/* Add the channel to mxs_chan list */
828 		list_add_tail(&mxs_chan->chan.device_node,
829 			&mxs_dma->dma_device.channels);
830 	}
831 
832 	ret = mxs_dma_init(mxs_dma);
833 	if (ret)
834 		return ret;
835 
836 	mxs_dma->pdev = pdev;
837 	mxs_dma->dma_device.dev = &pdev->dev;
838 
839 	/* mxs_dma gets 65535 bytes maximum sg size */
840 	mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
841 	dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
842 
843 	mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
844 	mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
845 	mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
846 	mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
847 	mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
848 	mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
849 	mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
850 	mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
851 	mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
852 	mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
853 	mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
854 	mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
855 	mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
856 
857 	ret = dma_async_device_register(&mxs_dma->dma_device);
858 	if (ret) {
859 		dev_err(mxs_dma->dma_device.dev, "unable to register\n");
860 		return ret;
861 	}
862 
863 	ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
864 	if (ret) {
865 		dev_err(mxs_dma->dma_device.dev,
866 			"failed to register controller\n");
867 		dma_async_device_unregister(&mxs_dma->dma_device);
868 	}
869 
870 	dev_info(mxs_dma->dma_device.dev, "initialized\n");
871 
872 	return 0;
873 }
874 
875 static struct platform_driver mxs_dma_driver = {
876 	.driver		= {
877 		.name	= "mxs-dma",
878 		.of_match_table = mxs_dma_dt_ids,
879 	},
880 	.id_table	= mxs_dma_ids,
881 };
882 
883 static int __init mxs_dma_module_init(void)
884 {
885 	return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
886 }
887 subsys_initcall(mxs_dma_module_init);
888