xref: /openbmc/u-boot/drivers/dma/apbh_dma.c (revision 314284b1)
1 /*
2  * Freescale i.MX28 APBH DMA driver
3  *
4  * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
5  * on behalf of DENX Software Engineering GmbH
6  *
7  * Based on code from LTIB:
8  * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License along
21  * with this program; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23  */
24 
25 #include <linux/list.h>
26 
27 #include <common.h>
28 #include <malloc.h>
29 #include <asm/errno.h>
30 #include <asm/io.h>
31 #include <asm/arch/clock.h>
32 #include <asm/arch/imx-regs.h>
33 #include <asm/arch/sys_proto.h>
34 #include <asm/arch/dma.h>
35 
36 static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
37 
38 /*
39  * Test is the DMA channel is valid channel
40  */
41 int mxs_dma_validate_chan(int channel)
42 {
43 	struct mxs_dma_chan *pchan;
44 
45 	if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
46 		return -EINVAL;
47 
48 	pchan = mxs_dma_channels + channel;
49 	if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
50 		return -EINVAL;
51 
52 	return 0;
53 }
54 
55 /*
56  * Return the address of the command within a descriptor.
57  */
58 static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
59 {
60 	return desc->address + offsetof(struct mxs_dma_desc, cmd);
61 }
62 
63 /*
64  * Read a DMA channel's hardware semaphore.
65  *
66  * As used by the MXS platform's DMA software, the DMA channel's hardware
67  * semaphore reflects the number of DMA commands the hardware will process, but
68  * has not yet finished. This is a volatile value read directly from hardware,
69  * so it must be be viewed as immediately stale.
70  *
71  * If the channel is not marked busy, or has finished processing all its
72  * commands, this value should be zero.
73  *
74  * See mxs_dma_append() for details on how DMA command blocks must be configured
75  * to maintain the expected behavior of the semaphore's value.
76  */
77 static int mxs_dma_read_semaphore(int channel)
78 {
79 	struct mx28_apbh_regs *apbh_regs =
80 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
81 	uint32_t tmp;
82 	int ret;
83 
84 	ret = mxs_dma_validate_chan(channel);
85 	if (ret)
86 		return ret;
87 
88 	tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
89 
90 	tmp &= APBH_CHn_SEMA_PHORE_MASK;
91 	tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
92 
93 	return tmp;
94 }
95 
96 /*
97  * Enable a DMA channel.
98  *
99  * If the given channel has any DMA descriptors on its active list, this
100  * function causes the DMA hardware to begin processing them.
101  *
102  * This function marks the DMA channel as "busy," whether or not there are any
103  * descriptors to process.
104  */
105 static int mxs_dma_enable(int channel)
106 {
107 	struct mx28_apbh_regs *apbh_regs =
108 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
109 	unsigned int sem;
110 	struct mxs_dma_chan *pchan;
111 	struct mxs_dma_desc *pdesc;
112 	int ret;
113 
114 	ret = mxs_dma_validate_chan(channel);
115 	if (ret)
116 		return ret;
117 
118 	pchan = mxs_dma_channels + channel;
119 
120 	if (pchan->pending_num == 0) {
121 		pchan->flags |= MXS_DMA_FLAGS_BUSY;
122 		return 0;
123 	}
124 
125 	pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
126 	if (pdesc == NULL)
127 		return -EFAULT;
128 
129 	if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
130 		if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
131 			return 0;
132 
133 		sem = mxs_dma_read_semaphore(channel);
134 		if (sem == 0)
135 			return 0;
136 
137 		if (sem == 1) {
138 			pdesc = list_entry(pdesc->node.next,
139 					   struct mxs_dma_desc, node);
140 			writel(mxs_dma_cmd_address(pdesc),
141 				&apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
142 		}
143 		writel(pchan->pending_num,
144 			&apbh_regs->ch[channel].hw_apbh_ch_sema);
145 		pchan->active_num += pchan->pending_num;
146 		pchan->pending_num = 0;
147 	} else {
148 		pchan->active_num += pchan->pending_num;
149 		pchan->pending_num = 0;
150 		writel(mxs_dma_cmd_address(pdesc),
151 			&apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
152 		writel(pchan->active_num,
153 			&apbh_regs->ch[channel].hw_apbh_ch_sema);
154 		writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
155 			&apbh_regs->hw_apbh_ctrl0_clr);
156 	}
157 
158 	pchan->flags |= MXS_DMA_FLAGS_BUSY;
159 	return 0;
160 }
161 
162 /*
163  * Disable a DMA channel.
164  *
165  * This function shuts down a DMA channel and marks it as "not busy." Any
166  * descriptors on the active list are immediately moved to the head of the
167  * "done" list, whether or not they have actually been processed by the
168  * hardware. The "ready" flags of these descriptors are NOT cleared, so they
169  * still appear to be active.
170  *
171  * This function immediately shuts down a DMA channel's hardware, aborting any
172  * I/O that may be in progress, potentially leaving I/O hardware in an undefined
173  * state. It is unwise to call this function if there is ANY chance the hardware
174  * is still processing a command.
175  */
176 static int mxs_dma_disable(int channel)
177 {
178 	struct mxs_dma_chan *pchan;
179 	struct mx28_apbh_regs *apbh_regs =
180 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
181 	int ret;
182 
183 	ret = mxs_dma_validate_chan(channel);
184 	if (ret)
185 		return ret;
186 
187 	pchan = mxs_dma_channels + channel;
188 
189 	if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
190 		return -EINVAL;
191 
192 	writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
193 		&apbh_regs->hw_apbh_ctrl0_set);
194 
195 	pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
196 	pchan->active_num = 0;
197 	pchan->pending_num = 0;
198 	list_splice_init(&pchan->active, &pchan->done);
199 
200 	return 0;
201 }
202 
203 /*
204  * Resets the DMA channel hardware.
205  */
206 static int mxs_dma_reset(int channel)
207 {
208 	struct mx28_apbh_regs *apbh_regs =
209 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
210 	int ret;
211 
212 	ret = mxs_dma_validate_chan(channel);
213 	if (ret)
214 		return ret;
215 
216 	writel(1 << (channel + APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET),
217 		&apbh_regs->hw_apbh_channel_ctrl_set);
218 
219 	return 0;
220 }
221 
222 /*
223  * Enable or disable DMA interrupt.
224  *
225  * This function enables the given DMA channel to interrupt the CPU.
226  */
227 static int mxs_dma_enable_irq(int channel, int enable)
228 {
229 	struct mx28_apbh_regs *apbh_regs =
230 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
231 	int ret;
232 
233 	ret = mxs_dma_validate_chan(channel);
234 	if (ret)
235 		return ret;
236 
237 	if (enable)
238 		writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
239 			&apbh_regs->hw_apbh_ctrl1_set);
240 	else
241 		writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
242 			&apbh_regs->hw_apbh_ctrl1_clr);
243 
244 	return 0;
245 }
246 
247 /*
248  * Clear DMA interrupt.
249  *
250  * The software that is using the DMA channel must register to receive its
251  * interrupts and, when they arrive, must call this function to clear them.
252  */
253 static int mxs_dma_ack_irq(int channel)
254 {
255 	struct mx28_apbh_regs *apbh_regs =
256 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
257 	int ret;
258 
259 	ret = mxs_dma_validate_chan(channel);
260 	if (ret)
261 		return ret;
262 
263 	writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
264 	writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
265 
266 	return 0;
267 }
268 
269 /*
270  * Request to reserve a DMA channel
271  */
272 static int mxs_dma_request(int channel)
273 {
274 	struct mxs_dma_chan *pchan;
275 
276 	if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
277 		return -EINVAL;
278 
279 	pchan = mxs_dma_channels + channel;
280 	if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
281 		return -ENODEV;
282 
283 	if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
284 		return -EBUSY;
285 
286 	pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
287 	pchan->active_num = 0;
288 	pchan->pending_num = 0;
289 
290 	INIT_LIST_HEAD(&pchan->active);
291 	INIT_LIST_HEAD(&pchan->done);
292 
293 	return 0;
294 }
295 
296 /*
297  * Release a DMA channel.
298  *
299  * This function releases a DMA channel from its current owner.
300  *
301  * The channel will NOT be released if it's marked "busy" (see
302  * mxs_dma_enable()).
303  */
304 static int mxs_dma_release(int channel)
305 {
306 	struct mxs_dma_chan *pchan;
307 	int ret;
308 
309 	ret = mxs_dma_validate_chan(channel);
310 	if (ret)
311 		return ret;
312 
313 	pchan = mxs_dma_channels + channel;
314 
315 	if (pchan->flags & MXS_DMA_FLAGS_BUSY)
316 		return -EBUSY;
317 
318 	pchan->dev = 0;
319 	pchan->active_num = 0;
320 	pchan->pending_num = 0;
321 	pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
322 
323 	return 0;
324 }
325 
326 /*
327  * Allocate DMA descriptor
328  */
329 struct mxs_dma_desc *mxs_dma_desc_alloc(void)
330 {
331 	struct mxs_dma_desc *pdesc;
332 
333 	pdesc = memalign(MXS_DMA_ALIGNMENT, sizeof(struct mxs_dma_desc));
334 
335 	if (pdesc == NULL)
336 		return NULL;
337 
338 	memset(pdesc, 0, sizeof(*pdesc));
339 	pdesc->address = (dma_addr_t)pdesc;
340 
341 	return pdesc;
342 };
343 
344 /*
345  * Free DMA descriptor
346  */
347 void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
348 {
349 	if (pdesc == NULL)
350 		return;
351 
352 	free(pdesc);
353 }
354 
355 /*
356  * Add a DMA descriptor to a channel.
357  *
358  * If the descriptor list for this channel is not empty, this function sets the
359  * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
360  * it will chain to the new descriptor's command.
361  *
362  * Then, this function marks the new descriptor as "ready," adds it to the end
363  * of the active descriptor list, and increments the count of pending
364  * descriptors.
365  *
366  * The MXS platform DMA software imposes some rules on DMA commands to maintain
367  * important invariants. These rules are NOT checked, but they must be carefully
368  * applied by software that uses MXS DMA channels.
369  *
370  * Invariant:
371  *     The DMA channel's hardware semaphore must reflect the number of DMA
372  *     commands the hardware will process, but has not yet finished.
373  *
374  * Explanation:
375  *     A DMA channel begins processing commands when its hardware semaphore is
376  *     written with a value greater than zero, and it stops processing commands
377  *     when the semaphore returns to zero.
378  *
379  *     When a channel finishes a DMA command, it will decrement its semaphore if
380  *     the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
381  *
382  *     In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
383  *     unless it suits the purposes of the software. For example, one could
384  *     construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
385  *     bit set only in the last one. Then, setting the DMA channel's hardware
386  *     semaphore to one would cause the entire series of five commands to be
387  *     processed. However, this example would violate the invariant given above.
388  *
389  * Rule:
390  *    ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
391  *    channel's hardware semaphore will be decremented EVERY time a command is
392  *    processed.
393  */
394 int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
395 {
396 	struct mxs_dma_chan *pchan;
397 	struct mxs_dma_desc *last;
398 	int ret;
399 
400 	ret = mxs_dma_validate_chan(channel);
401 	if (ret)
402 		return ret;
403 
404 	pchan = mxs_dma_channels + channel;
405 
406 	pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
407 	pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
408 
409 	if (!list_empty(&pchan->active)) {
410 		last = list_entry(pchan->active.prev, struct mxs_dma_desc,
411 					node);
412 
413 		pdesc->flags &= ~MXS_DMA_DESC_FIRST;
414 		last->flags &= ~MXS_DMA_DESC_LAST;
415 
416 		last->cmd.next = mxs_dma_cmd_address(pdesc);
417 		last->cmd.data |= MXS_DMA_DESC_CHAIN;
418 	}
419 	pdesc->flags |= MXS_DMA_DESC_READY;
420 	if (pdesc->flags & MXS_DMA_DESC_FIRST)
421 		pchan->pending_num++;
422 	list_add_tail(&pdesc->node, &pchan->active);
423 
424 	return ret;
425 }
426 
427 /*
428  * Clean up processed DMA descriptors.
429  *
430  * This function removes processed DMA descriptors from the "active" list. Pass
431  * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
432  * to get the descriptors moved to the channel's "done" list. Descriptors on
433  * the "done" list can be retrieved with mxs_dma_get_finished().
434  *
435  * This function marks the DMA channel as "not busy" if no unprocessed
436  * descriptors remain on the "active" list.
437  */
438 static int mxs_dma_finish(int channel, struct list_head *head)
439 {
440 	int sem;
441 	struct mxs_dma_chan *pchan;
442 	struct list_head *p, *q;
443 	struct mxs_dma_desc *pdesc;
444 	int ret;
445 
446 	ret = mxs_dma_validate_chan(channel);
447 	if (ret)
448 		return ret;
449 
450 	pchan = mxs_dma_channels + channel;
451 
452 	sem = mxs_dma_read_semaphore(channel);
453 	if (sem < 0)
454 		return sem;
455 
456 	if (sem == pchan->active_num)
457 		return 0;
458 
459 	list_for_each_safe(p, q, &pchan->active) {
460 		if ((pchan->active_num) <= sem)
461 			break;
462 
463 		pdesc = list_entry(p, struct mxs_dma_desc, node);
464 		pdesc->flags &= ~MXS_DMA_DESC_READY;
465 
466 		if (head)
467 			list_move_tail(p, head);
468 		else
469 			list_move_tail(p, &pchan->done);
470 
471 		if (pdesc->flags & MXS_DMA_DESC_LAST)
472 			pchan->active_num--;
473 	}
474 
475 	if (sem == 0)
476 		pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
477 
478 	return 0;
479 }
480 
481 /*
482  * Wait for DMA channel to complete
483  */
484 static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
485 {
486 	struct mx28_apbh_regs *apbh_regs =
487 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
488 	int ret;
489 
490 	ret = mxs_dma_validate_chan(chan);
491 	if (ret)
492 		return ret;
493 
494 	if (mx28_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
495 				1 << chan, timeout)) {
496 		ret = -ETIMEDOUT;
497 		mxs_dma_reset(chan);
498 	}
499 
500 	return ret;
501 }
502 
503 /*
504  * Execute the DMA channel
505  */
506 int mxs_dma_go(int chan)
507 {
508 	uint32_t timeout = 10000;
509 	int ret;
510 
511 	LIST_HEAD(tmp_desc_list);
512 
513 	mxs_dma_enable_irq(chan, 1);
514 	mxs_dma_enable(chan);
515 
516 	/* Wait for DMA to finish. */
517 	ret = mxs_dma_wait_complete(timeout, chan);
518 
519 	/* Clear out the descriptors we just ran. */
520 	mxs_dma_finish(chan, &tmp_desc_list);
521 
522 	/* Shut the DMA channel down. */
523 	mxs_dma_ack_irq(chan);
524 	mxs_dma_reset(chan);
525 	mxs_dma_enable_irq(chan, 0);
526 	mxs_dma_disable(chan);
527 
528 	return ret;
529 }
530 
531 /*
532  * Initialize the DMA hardware
533  */
534 int mxs_dma_init(void)
535 {
536 	struct mx28_apbh_regs *apbh_regs =
537 		(struct mx28_apbh_regs *)MXS_APBH_BASE;
538 	struct mxs_dma_chan *pchan;
539 	int ret, channel;
540 
541 	mx28_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
542 
543 #ifdef CONFIG_APBH_DMA_BURST8
544 	writel(APBH_CTRL0_AHB_BURST8_EN,
545 		&apbh_regs->hw_apbh_ctrl0_set);
546 #else
547 	writel(APBH_CTRL0_AHB_BURST8_EN,
548 		&apbh_regs->hw_apbh_ctrl0_clr);
549 #endif
550 
551 #ifdef CONFIG_APBH_DMA_BURST
552 	writel(APBH_CTRL0_APB_BURST_EN,
553 		&apbh_regs->hw_apbh_ctrl0_set);
554 #else
555 	writel(APBH_CTRL0_APB_BURST_EN,
556 		&apbh_regs->hw_apbh_ctrl0_clr);
557 #endif
558 
559 	for (channel = 0; channel < MXS_MAX_DMA_CHANNELS; channel++) {
560 		pchan = mxs_dma_channels + channel;
561 		pchan->flags = MXS_DMA_FLAGS_VALID;
562 
563 		ret = mxs_dma_request(channel);
564 
565 		if (ret) {
566 			printf("MXS DMA: Can't acquire DMA channel %i\n",
567 				channel);
568 
569 			goto err;
570 		}
571 
572 		mxs_dma_reset(channel);
573 		mxs_dma_ack_irq(channel);
574 	}
575 
576 	return 0;
577 
578 err:
579 	while (--channel >= 0)
580 		mxs_dma_release(channel);
581 	return ret;
582 }
583