xref: /openbmc/u-boot/drivers/dma/apbh_dma.c (revision cf0bcd7d)
1 /*
2  * Freescale i.MX28 APBH DMA driver
3  *
4  * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
5  * on behalf of DENX Software Engineering GmbH
6  *
7  * Based on code from LTIB:
8  * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
9  *
10  * SPDX-License-Identifier:	GPL-2.0+
11  */
12 
13 #include <linux/list.h>
14 
15 #include <common.h>
16 #include <malloc.h>
17 #include <linux/errno.h>
18 #include <asm/io.h>
19 #include <asm/arch/clock.h>
20 #include <asm/arch/imx-regs.h>
21 #include <asm/arch/sys_proto.h>
22 #include <asm/mach-imx/dma.h>
23 #include <asm/mach-imx/regs-apbh.h>
24 
25 static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
26 
27 /*
28  * Test is the DMA channel is valid channel
29  */
30 int mxs_dma_validate_chan(int channel)
31 {
32 	struct mxs_dma_chan *pchan;
33 
34 	if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
35 		return -EINVAL;
36 
37 	pchan = mxs_dma_channels + channel;
38 	if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
39 		return -EINVAL;
40 
41 	return 0;
42 }
43 
44 /*
45  * Return the address of the command within a descriptor.
46  */
47 static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
48 {
49 	return desc->address + offsetof(struct mxs_dma_desc, cmd);
50 }
51 
52 /*
53  * Read a DMA channel's hardware semaphore.
54  *
55  * As used by the MXS platform's DMA software, the DMA channel's hardware
56  * semaphore reflects the number of DMA commands the hardware will process, but
57  * has not yet finished. This is a volatile value read directly from hardware,
58  * so it must be be viewed as immediately stale.
59  *
60  * If the channel is not marked busy, or has finished processing all its
61  * commands, this value should be zero.
62  *
63  * See mxs_dma_append() for details on how DMA command blocks must be configured
64  * to maintain the expected behavior of the semaphore's value.
65  */
66 static int mxs_dma_read_semaphore(int channel)
67 {
68 	struct mxs_apbh_regs *apbh_regs =
69 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
70 	uint32_t tmp;
71 	int ret;
72 
73 	ret = mxs_dma_validate_chan(channel);
74 	if (ret)
75 		return ret;
76 
77 	tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
78 
79 	tmp &= APBH_CHn_SEMA_PHORE_MASK;
80 	tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
81 
82 	return tmp;
83 }
84 
85 #ifndef	CONFIG_SYS_DCACHE_OFF
86 void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
87 {
88 	uint32_t addr;
89 	uint32_t size;
90 
91 	addr = (uint32_t)desc;
92 	size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
93 
94 	flush_dcache_range(addr, addr + size);
95 }
96 #else
97 inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
98 #endif
99 
100 /*
101  * Enable a DMA channel.
102  *
103  * If the given channel has any DMA descriptors on its active list, this
104  * function causes the DMA hardware to begin processing them.
105  *
106  * This function marks the DMA channel as "busy," whether or not there are any
107  * descriptors to process.
108  */
109 static int mxs_dma_enable(int channel)
110 {
111 	struct mxs_apbh_regs *apbh_regs =
112 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
113 	unsigned int sem;
114 	struct mxs_dma_chan *pchan;
115 	struct mxs_dma_desc *pdesc;
116 	int ret;
117 
118 	ret = mxs_dma_validate_chan(channel);
119 	if (ret)
120 		return ret;
121 
122 	pchan = mxs_dma_channels + channel;
123 
124 	if (pchan->pending_num == 0) {
125 		pchan->flags |= MXS_DMA_FLAGS_BUSY;
126 		return 0;
127 	}
128 
129 	pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
130 	if (pdesc == NULL)
131 		return -EFAULT;
132 
133 	if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
134 		if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
135 			return 0;
136 
137 		sem = mxs_dma_read_semaphore(channel);
138 		if (sem == 0)
139 			return 0;
140 
141 		if (sem == 1) {
142 			pdesc = list_entry(pdesc->node.next,
143 					   struct mxs_dma_desc, node);
144 			writel(mxs_dma_cmd_address(pdesc),
145 				&apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
146 		}
147 		writel(pchan->pending_num,
148 			&apbh_regs->ch[channel].hw_apbh_ch_sema);
149 		pchan->active_num += pchan->pending_num;
150 		pchan->pending_num = 0;
151 	} else {
152 		pchan->active_num += pchan->pending_num;
153 		pchan->pending_num = 0;
154 		writel(mxs_dma_cmd_address(pdesc),
155 			&apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
156 		writel(pchan->active_num,
157 			&apbh_regs->ch[channel].hw_apbh_ch_sema);
158 		writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
159 			&apbh_regs->hw_apbh_ctrl0_clr);
160 	}
161 
162 	pchan->flags |= MXS_DMA_FLAGS_BUSY;
163 	return 0;
164 }
165 
166 /*
167  * Disable a DMA channel.
168  *
169  * This function shuts down a DMA channel and marks it as "not busy." Any
170  * descriptors on the active list are immediately moved to the head of the
171  * "done" list, whether or not they have actually been processed by the
172  * hardware. The "ready" flags of these descriptors are NOT cleared, so they
173  * still appear to be active.
174  *
175  * This function immediately shuts down a DMA channel's hardware, aborting any
176  * I/O that may be in progress, potentially leaving I/O hardware in an undefined
177  * state. It is unwise to call this function if there is ANY chance the hardware
178  * is still processing a command.
179  */
180 static int mxs_dma_disable(int channel)
181 {
182 	struct mxs_dma_chan *pchan;
183 	struct mxs_apbh_regs *apbh_regs =
184 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
185 	int ret;
186 
187 	ret = mxs_dma_validate_chan(channel);
188 	if (ret)
189 		return ret;
190 
191 	pchan = mxs_dma_channels + channel;
192 
193 	if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
194 		return -EINVAL;
195 
196 	writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
197 		&apbh_regs->hw_apbh_ctrl0_set);
198 
199 	pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
200 	pchan->active_num = 0;
201 	pchan->pending_num = 0;
202 	list_splice_init(&pchan->active, &pchan->done);
203 
204 	return 0;
205 }
206 
207 /*
208  * Resets the DMA channel hardware.
209  */
210 static int mxs_dma_reset(int channel)
211 {
212 	struct mxs_apbh_regs *apbh_regs =
213 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
214 	int ret;
215 #if defined(CONFIG_MX23)
216 	uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
217 	uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
218 #elif (defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7))
219 	uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
220 	uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
221 #endif
222 
223 	ret = mxs_dma_validate_chan(channel);
224 	if (ret)
225 		return ret;
226 
227 	writel(1 << (channel + offset), setreg);
228 
229 	return 0;
230 }
231 
232 /*
233  * Enable or disable DMA interrupt.
234  *
235  * This function enables the given DMA channel to interrupt the CPU.
236  */
237 static int mxs_dma_enable_irq(int channel, int enable)
238 {
239 	struct mxs_apbh_regs *apbh_regs =
240 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
241 	int ret;
242 
243 	ret = mxs_dma_validate_chan(channel);
244 	if (ret)
245 		return ret;
246 
247 	if (enable)
248 		writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
249 			&apbh_regs->hw_apbh_ctrl1_set);
250 	else
251 		writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
252 			&apbh_regs->hw_apbh_ctrl1_clr);
253 
254 	return 0;
255 }
256 
257 /*
258  * Clear DMA interrupt.
259  *
260  * The software that is using the DMA channel must register to receive its
261  * interrupts and, when they arrive, must call this function to clear them.
262  */
263 static int mxs_dma_ack_irq(int channel)
264 {
265 	struct mxs_apbh_regs *apbh_regs =
266 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
267 	int ret;
268 
269 	ret = mxs_dma_validate_chan(channel);
270 	if (ret)
271 		return ret;
272 
273 	writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
274 	writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
275 
276 	return 0;
277 }
278 
279 /*
280  * Request to reserve a DMA channel
281  */
282 static int mxs_dma_request(int channel)
283 {
284 	struct mxs_dma_chan *pchan;
285 
286 	if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
287 		return -EINVAL;
288 
289 	pchan = mxs_dma_channels + channel;
290 	if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
291 		return -ENODEV;
292 
293 	if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
294 		return -EBUSY;
295 
296 	pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
297 	pchan->active_num = 0;
298 	pchan->pending_num = 0;
299 
300 	INIT_LIST_HEAD(&pchan->active);
301 	INIT_LIST_HEAD(&pchan->done);
302 
303 	return 0;
304 }
305 
306 /*
307  * Release a DMA channel.
308  *
309  * This function releases a DMA channel from its current owner.
310  *
311  * The channel will NOT be released if it's marked "busy" (see
312  * mxs_dma_enable()).
313  */
314 int mxs_dma_release(int channel)
315 {
316 	struct mxs_dma_chan *pchan;
317 	int ret;
318 
319 	ret = mxs_dma_validate_chan(channel);
320 	if (ret)
321 		return ret;
322 
323 	pchan = mxs_dma_channels + channel;
324 
325 	if (pchan->flags & MXS_DMA_FLAGS_BUSY)
326 		return -EBUSY;
327 
328 	pchan->dev = 0;
329 	pchan->active_num = 0;
330 	pchan->pending_num = 0;
331 	pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
332 
333 	return 0;
334 }
335 
336 /*
337  * Allocate DMA descriptor
338  */
339 struct mxs_dma_desc *mxs_dma_desc_alloc(void)
340 {
341 	struct mxs_dma_desc *pdesc;
342 	uint32_t size;
343 
344 	size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
345 	pdesc = memalign(MXS_DMA_ALIGNMENT, size);
346 
347 	if (pdesc == NULL)
348 		return NULL;
349 
350 	memset(pdesc, 0, sizeof(*pdesc));
351 	pdesc->address = (dma_addr_t)pdesc;
352 
353 	return pdesc;
354 };
355 
356 /*
357  * Free DMA descriptor
358  */
359 void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
360 {
361 	if (pdesc == NULL)
362 		return;
363 
364 	free(pdesc);
365 }
366 
367 /*
368  * Add a DMA descriptor to a channel.
369  *
370  * If the descriptor list for this channel is not empty, this function sets the
371  * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
372  * it will chain to the new descriptor's command.
373  *
374  * Then, this function marks the new descriptor as "ready," adds it to the end
375  * of the active descriptor list, and increments the count of pending
376  * descriptors.
377  *
378  * The MXS platform DMA software imposes some rules on DMA commands to maintain
379  * important invariants. These rules are NOT checked, but they must be carefully
380  * applied by software that uses MXS DMA channels.
381  *
382  * Invariant:
383  *     The DMA channel's hardware semaphore must reflect the number of DMA
384  *     commands the hardware will process, but has not yet finished.
385  *
386  * Explanation:
387  *     A DMA channel begins processing commands when its hardware semaphore is
388  *     written with a value greater than zero, and it stops processing commands
389  *     when the semaphore returns to zero.
390  *
391  *     When a channel finishes a DMA command, it will decrement its semaphore if
392  *     the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
393  *
394  *     In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
395  *     unless it suits the purposes of the software. For example, one could
396  *     construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
397  *     bit set only in the last one. Then, setting the DMA channel's hardware
398  *     semaphore to one would cause the entire series of five commands to be
399  *     processed. However, this example would violate the invariant given above.
400  *
401  * Rule:
402  *    ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
403  *    channel's hardware semaphore will be decremented EVERY time a command is
404  *    processed.
405  */
406 int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
407 {
408 	struct mxs_dma_chan *pchan;
409 	struct mxs_dma_desc *last;
410 	int ret;
411 
412 	ret = mxs_dma_validate_chan(channel);
413 	if (ret)
414 		return ret;
415 
416 	pchan = mxs_dma_channels + channel;
417 
418 	pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
419 	pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
420 
421 	if (!list_empty(&pchan->active)) {
422 		last = list_entry(pchan->active.prev, struct mxs_dma_desc,
423 					node);
424 
425 		pdesc->flags &= ~MXS_DMA_DESC_FIRST;
426 		last->flags &= ~MXS_DMA_DESC_LAST;
427 
428 		last->cmd.next = mxs_dma_cmd_address(pdesc);
429 		last->cmd.data |= MXS_DMA_DESC_CHAIN;
430 
431 		mxs_dma_flush_desc(last);
432 	}
433 	pdesc->flags |= MXS_DMA_DESC_READY;
434 	if (pdesc->flags & MXS_DMA_DESC_FIRST)
435 		pchan->pending_num++;
436 	list_add_tail(&pdesc->node, &pchan->active);
437 
438 	mxs_dma_flush_desc(pdesc);
439 
440 	return ret;
441 }
442 
443 /*
444  * Clean up processed DMA descriptors.
445  *
446  * This function removes processed DMA descriptors from the "active" list. Pass
447  * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
448  * to get the descriptors moved to the channel's "done" list. Descriptors on
449  * the "done" list can be retrieved with mxs_dma_get_finished().
450  *
451  * This function marks the DMA channel as "not busy" if no unprocessed
452  * descriptors remain on the "active" list.
453  */
454 static int mxs_dma_finish(int channel, struct list_head *head)
455 {
456 	int sem;
457 	struct mxs_dma_chan *pchan;
458 	struct list_head *p, *q;
459 	struct mxs_dma_desc *pdesc;
460 	int ret;
461 
462 	ret = mxs_dma_validate_chan(channel);
463 	if (ret)
464 		return ret;
465 
466 	pchan = mxs_dma_channels + channel;
467 
468 	sem = mxs_dma_read_semaphore(channel);
469 	if (sem < 0)
470 		return sem;
471 
472 	if (sem == pchan->active_num)
473 		return 0;
474 
475 	list_for_each_safe(p, q, &pchan->active) {
476 		if ((pchan->active_num) <= sem)
477 			break;
478 
479 		pdesc = list_entry(p, struct mxs_dma_desc, node);
480 		pdesc->flags &= ~MXS_DMA_DESC_READY;
481 
482 		if (head)
483 			list_move_tail(p, head);
484 		else
485 			list_move_tail(p, &pchan->done);
486 
487 		if (pdesc->flags & MXS_DMA_DESC_LAST)
488 			pchan->active_num--;
489 	}
490 
491 	if (sem == 0)
492 		pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
493 
494 	return 0;
495 }
496 
497 /*
498  * Wait for DMA channel to complete
499  */
500 static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
501 {
502 	struct mxs_apbh_regs *apbh_regs =
503 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
504 	int ret;
505 
506 	ret = mxs_dma_validate_chan(chan);
507 	if (ret)
508 		return ret;
509 
510 	if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
511 				1 << chan, timeout)) {
512 		ret = -ETIMEDOUT;
513 		mxs_dma_reset(chan);
514 	}
515 
516 	return ret;
517 }
518 
519 /*
520  * Execute the DMA channel
521  */
522 int mxs_dma_go(int chan)
523 {
524 	uint32_t timeout = 10000000;
525 	int ret;
526 
527 	LIST_HEAD(tmp_desc_list);
528 
529 	mxs_dma_enable_irq(chan, 1);
530 	mxs_dma_enable(chan);
531 
532 	/* Wait for DMA to finish. */
533 	ret = mxs_dma_wait_complete(timeout, chan);
534 
535 	/* Clear out the descriptors we just ran. */
536 	mxs_dma_finish(chan, &tmp_desc_list);
537 
538 	/* Shut the DMA channel down. */
539 	mxs_dma_ack_irq(chan);
540 	mxs_dma_reset(chan);
541 	mxs_dma_enable_irq(chan, 0);
542 	mxs_dma_disable(chan);
543 
544 	return ret;
545 }
546 
547 /*
548  * Execute a continuously running circular DMA descriptor.
549  * NOTE: This is not intended for general use, but rather
550  *	 for the LCD driver in Smart-LCD mode. It allows
551  *	 continuous triggering of the RUN bit there.
552  */
553 void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc)
554 {
555 	struct mxs_apbh_regs *apbh_regs =
556 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
557 
558 	mxs_dma_flush_desc(pdesc);
559 
560 	mxs_dma_enable_irq(chan, 1);
561 
562 	writel(mxs_dma_cmd_address(pdesc),
563 		&apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar);
564 	writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema);
565 	writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
566 		&apbh_regs->hw_apbh_ctrl0_clr);
567 }
568 
569 /*
570  * Initialize the DMA hardware
571  */
572 void mxs_dma_init(void)
573 {
574 	struct mxs_apbh_regs *apbh_regs =
575 		(struct mxs_apbh_regs *)MXS_APBH_BASE;
576 
577 	mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
578 
579 #ifdef CONFIG_APBH_DMA_BURST8
580 	writel(APBH_CTRL0_AHB_BURST8_EN,
581 		&apbh_regs->hw_apbh_ctrl0_set);
582 #else
583 	writel(APBH_CTRL0_AHB_BURST8_EN,
584 		&apbh_regs->hw_apbh_ctrl0_clr);
585 #endif
586 
587 #ifdef CONFIG_APBH_DMA_BURST
588 	writel(APBH_CTRL0_APB_BURST_EN,
589 		&apbh_regs->hw_apbh_ctrl0_set);
590 #else
591 	writel(APBH_CTRL0_APB_BURST_EN,
592 		&apbh_regs->hw_apbh_ctrl0_clr);
593 #endif
594 }
595 
596 int mxs_dma_init_channel(int channel)
597 {
598 	struct mxs_dma_chan *pchan;
599 	int ret;
600 
601 	pchan = mxs_dma_channels + channel;
602 	pchan->flags = MXS_DMA_FLAGS_VALID;
603 
604 	ret = mxs_dma_request(channel);
605 
606 	if (ret) {
607 		printf("MXS DMA: Can't acquire DMA channel %i\n",
608 			channel);
609 		return ret;
610 	}
611 
612 	mxs_dma_reset(channel);
613 	mxs_dma_ack_irq(channel);
614 
615 	return 0;
616 }
617