xref: /openbmc/linux/drivers/dma/dw/core.c (revision 7587eb18)
1 /*
2  * Core driver for the Synopsys DesignWare DMA Controller
3  *
4  * Copyright (C) 2007-2008 Atmel Corporation
5  * Copyright (C) 2010-2011 ST Microelectronics
6  * Copyright (C) 2013 Intel Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26 
27 #include "../dmaengine.h"
28 #include "internal.h"
29 
30 /*
31  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33  * of which use ARM any more).  See the "Databook" from Synopsys for
34  * information beyond what licensees probably provide.
35  *
36  * The driver has been tested with the Atmel AT32AP7000, which does not
37  * support descriptor writeback.
38  */
39 
40 #define DWC_DEFAULT_CTLLO(_chan) ({				\
41 		struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);	\
42 		struct dma_slave_config	*_sconfig = &_dwc->dma_sconfig;	\
43 		bool _is_slave = is_slave_direction(_dwc->direction);	\
44 		u8 _smsize = _is_slave ? _sconfig->src_maxburst :	\
45 			DW_DMA_MSIZE_16;			\
46 		u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :	\
47 			DW_DMA_MSIZE_16;			\
48 		u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ?		\
49 			_dwc->p_master : _dwc->m_master;		\
50 		u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ?		\
51 			_dwc->p_master : _dwc->m_master;		\
52 								\
53 		(DWC_CTLL_DST_MSIZE(_dmsize)			\
54 		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
55 		 | DWC_CTLL_LLP_D_EN				\
56 		 | DWC_CTLL_LLP_S_EN				\
57 		 | DWC_CTLL_DMS(_dms)				\
58 		 | DWC_CTLL_SMS(_sms));				\
59 	})
60 
61 /* The set of bus widths supported by the DMA controller */
62 #define DW_DMA_BUSWIDTHS			  \
63 	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	| \
64 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		| \
65 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		| \
66 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
67 
68 /*----------------------------------------------------------------------*/
69 
70 static struct device *chan2dev(struct dma_chan *chan)
71 {
72 	return &chan->dev->device;
73 }
74 
75 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
76 {
77 	return to_dw_desc(dwc->active_list.next);
78 }
79 
80 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
81 {
82 	struct dw_desc		*desc = txd_to_dw_desc(tx);
83 	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
84 	dma_cookie_t		cookie;
85 	unsigned long		flags;
86 
87 	spin_lock_irqsave(&dwc->lock, flags);
88 	cookie = dma_cookie_assign(tx);
89 
90 	/*
91 	 * REVISIT: We should attempt to chain as many descriptors as
92 	 * possible, perhaps even appending to those already submitted
93 	 * for DMA. But this is hard to do in a race-free manner.
94 	 */
95 
96 	list_add_tail(&desc->desc_node, &dwc->queue);
97 	spin_unlock_irqrestore(&dwc->lock, flags);
98 	dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99 		 __func__, desc->txd.cookie);
100 
101 	return cookie;
102 }
103 
104 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105 {
106 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107 	struct dw_desc *desc;
108 	dma_addr_t phys;
109 
110 	desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
111 	if (!desc)
112 		return NULL;
113 
114 	dwc->descs_allocated++;
115 	INIT_LIST_HEAD(&desc->tx_list);
116 	dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117 	desc->txd.tx_submit = dwc_tx_submit;
118 	desc->txd.flags = DMA_CTRL_ACK;
119 	desc->txd.phys = phys;
120 	return desc;
121 }
122 
123 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124 {
125 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126 	struct dw_desc *child, *_next;
127 
128 	if (unlikely(!desc))
129 		return;
130 
131 	list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132 		list_del(&child->desc_node);
133 		dma_pool_free(dw->desc_pool, child, child->txd.phys);
134 		dwc->descs_allocated--;
135 	}
136 
137 	dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138 	dwc->descs_allocated--;
139 }
140 
141 static void dwc_initialize(struct dw_dma_chan *dwc)
142 {
143 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
144 	u32 cfghi = DWC_CFGH_FIFO_MODE;
145 	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
146 
147 	if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
148 		return;
149 
150 	cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
151 	cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
152 
153 	channel_writel(dwc, CFG_LO, cfglo);
154 	channel_writel(dwc, CFG_HI, cfghi);
155 
156 	/* Enable interrupts */
157 	channel_set_bit(dw, MASK.XFER, dwc->mask);
158 	channel_set_bit(dw, MASK.ERROR, dwc->mask);
159 
160 	set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
161 }
162 
163 /*----------------------------------------------------------------------*/
164 
165 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
166 {
167 	dev_err(chan2dev(&dwc->chan),
168 		"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
169 		channel_readl(dwc, SAR),
170 		channel_readl(dwc, DAR),
171 		channel_readl(dwc, LLP),
172 		channel_readl(dwc, CTL_HI),
173 		channel_readl(dwc, CTL_LO));
174 }
175 
176 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
177 {
178 	channel_clear_bit(dw, CH_EN, dwc->mask);
179 	while (dma_readl(dw, CH_EN) & dwc->mask)
180 		cpu_relax();
181 }
182 
183 /*----------------------------------------------------------------------*/
184 
185 /* Perform single block transfer */
186 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
187 				       struct dw_desc *desc)
188 {
189 	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
190 	u32		ctllo;
191 
192 	/*
193 	 * Software emulation of LLP mode relies on interrupts to continue
194 	 * multi block transfer.
195 	 */
196 	ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
197 
198 	channel_writel(dwc, SAR, lli_read(desc, sar));
199 	channel_writel(dwc, DAR, lli_read(desc, dar));
200 	channel_writel(dwc, CTL_LO, ctllo);
201 	channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
202 	channel_set_bit(dw, CH_EN, dwc->mask);
203 
204 	/* Move pointer to next descriptor */
205 	dwc->tx_node_active = dwc->tx_node_active->next;
206 }
207 
208 /* Called with dwc->lock held and bh disabled */
209 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
210 {
211 	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
212 	u8		lms = DWC_LLP_LMS(dwc->m_master);
213 	unsigned long	was_soft_llp;
214 
215 	/* ASSERT:  channel is idle */
216 	if (dma_readl(dw, CH_EN) & dwc->mask) {
217 		dev_err(chan2dev(&dwc->chan),
218 			"%s: BUG: Attempted to start non-idle channel\n",
219 			__func__);
220 		dwc_dump_chan_regs(dwc);
221 
222 		/* The tasklet will hopefully advance the queue... */
223 		return;
224 	}
225 
226 	if (dwc->nollp) {
227 		was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
228 						&dwc->flags);
229 		if (was_soft_llp) {
230 			dev_err(chan2dev(&dwc->chan),
231 				"BUG: Attempted to start new LLP transfer inside ongoing one\n");
232 			return;
233 		}
234 
235 		dwc_initialize(dwc);
236 
237 		first->residue = first->total_len;
238 		dwc->tx_node_active = &first->tx_list;
239 
240 		/* Submit first block */
241 		dwc_do_single_block(dwc, first);
242 
243 		return;
244 	}
245 
246 	dwc_initialize(dwc);
247 
248 	channel_writel(dwc, LLP, first->txd.phys | lms);
249 	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
250 	channel_writel(dwc, CTL_HI, 0);
251 	channel_set_bit(dw, CH_EN, dwc->mask);
252 }
253 
254 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
255 {
256 	struct dw_desc *desc;
257 
258 	if (list_empty(&dwc->queue))
259 		return;
260 
261 	list_move(dwc->queue.next, &dwc->active_list);
262 	desc = dwc_first_active(dwc);
263 	dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
264 	dwc_dostart(dwc, desc);
265 }
266 
267 /*----------------------------------------------------------------------*/
268 
269 static void
270 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
271 		bool callback_required)
272 {
273 	dma_async_tx_callback		callback = NULL;
274 	void				*param = NULL;
275 	struct dma_async_tx_descriptor	*txd = &desc->txd;
276 	struct dw_desc			*child;
277 	unsigned long			flags;
278 
279 	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
280 
281 	spin_lock_irqsave(&dwc->lock, flags);
282 	dma_cookie_complete(txd);
283 	if (callback_required) {
284 		callback = txd->callback;
285 		param = txd->callback_param;
286 	}
287 
288 	/* async_tx_ack */
289 	list_for_each_entry(child, &desc->tx_list, desc_node)
290 		async_tx_ack(&child->txd);
291 	async_tx_ack(&desc->txd);
292 	dwc_desc_put(dwc, desc);
293 	spin_unlock_irqrestore(&dwc->lock, flags);
294 
295 	if (callback)
296 		callback(param);
297 }
298 
299 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
300 {
301 	struct dw_desc *desc, *_desc;
302 	LIST_HEAD(list);
303 	unsigned long flags;
304 
305 	spin_lock_irqsave(&dwc->lock, flags);
306 	if (dma_readl(dw, CH_EN) & dwc->mask) {
307 		dev_err(chan2dev(&dwc->chan),
308 			"BUG: XFER bit set, but channel not idle!\n");
309 
310 		/* Try to continue after resetting the channel... */
311 		dwc_chan_disable(dw, dwc);
312 	}
313 
314 	/*
315 	 * Submit queued descriptors ASAP, i.e. before we go through
316 	 * the completed ones.
317 	 */
318 	list_splice_init(&dwc->active_list, &list);
319 	dwc_dostart_first_queued(dwc);
320 
321 	spin_unlock_irqrestore(&dwc->lock, flags);
322 
323 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
324 		dwc_descriptor_complete(dwc, desc, true);
325 }
326 
327 /* Returns how many bytes were already received from source */
328 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
329 {
330 	u32 ctlhi = channel_readl(dwc, CTL_HI);
331 	u32 ctllo = channel_readl(dwc, CTL_LO);
332 
333 	return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
334 }
335 
336 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
337 {
338 	dma_addr_t llp;
339 	struct dw_desc *desc, *_desc;
340 	struct dw_desc *child;
341 	u32 status_xfer;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&dwc->lock, flags);
345 	llp = channel_readl(dwc, LLP);
346 	status_xfer = dma_readl(dw, RAW.XFER);
347 
348 	if (status_xfer & dwc->mask) {
349 		/* Everything we've submitted is done */
350 		dma_writel(dw, CLEAR.XFER, dwc->mask);
351 
352 		if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
353 			struct list_head *head, *active = dwc->tx_node_active;
354 
355 			/*
356 			 * We are inside first active descriptor.
357 			 * Otherwise something is really wrong.
358 			 */
359 			desc = dwc_first_active(dwc);
360 
361 			head = &desc->tx_list;
362 			if (active != head) {
363 				/* Update residue to reflect last sent descriptor */
364 				if (active == head->next)
365 					desc->residue -= desc->len;
366 				else
367 					desc->residue -= to_dw_desc(active->prev)->len;
368 
369 				child = to_dw_desc(active);
370 
371 				/* Submit next block */
372 				dwc_do_single_block(dwc, child);
373 
374 				spin_unlock_irqrestore(&dwc->lock, flags);
375 				return;
376 			}
377 
378 			/* We are done here */
379 			clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
380 		}
381 
382 		spin_unlock_irqrestore(&dwc->lock, flags);
383 
384 		dwc_complete_all(dw, dwc);
385 		return;
386 	}
387 
388 	if (list_empty(&dwc->active_list)) {
389 		spin_unlock_irqrestore(&dwc->lock, flags);
390 		return;
391 	}
392 
393 	if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
394 		dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
395 		spin_unlock_irqrestore(&dwc->lock, flags);
396 		return;
397 	}
398 
399 	dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
400 
401 	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
402 		/* Initial residue value */
403 		desc->residue = desc->total_len;
404 
405 		/* Check first descriptors addr */
406 		if (desc->txd.phys == DWC_LLP_LOC(llp)) {
407 			spin_unlock_irqrestore(&dwc->lock, flags);
408 			return;
409 		}
410 
411 		/* Check first descriptors llp */
412 		if (lli_read(desc, llp) == llp) {
413 			/* This one is currently in progress */
414 			desc->residue -= dwc_get_sent(dwc);
415 			spin_unlock_irqrestore(&dwc->lock, flags);
416 			return;
417 		}
418 
419 		desc->residue -= desc->len;
420 		list_for_each_entry(child, &desc->tx_list, desc_node) {
421 			if (lli_read(child, llp) == llp) {
422 				/* Currently in progress */
423 				desc->residue -= dwc_get_sent(dwc);
424 				spin_unlock_irqrestore(&dwc->lock, flags);
425 				return;
426 			}
427 			desc->residue -= child->len;
428 		}
429 
430 		/*
431 		 * No descriptors so far seem to be in progress, i.e.
432 		 * this one must be done.
433 		 */
434 		spin_unlock_irqrestore(&dwc->lock, flags);
435 		dwc_descriptor_complete(dwc, desc, true);
436 		spin_lock_irqsave(&dwc->lock, flags);
437 	}
438 
439 	dev_err(chan2dev(&dwc->chan),
440 		"BUG: All descriptors done, but channel not idle!\n");
441 
442 	/* Try to continue after resetting the channel... */
443 	dwc_chan_disable(dw, dwc);
444 
445 	dwc_dostart_first_queued(dwc);
446 	spin_unlock_irqrestore(&dwc->lock, flags);
447 }
448 
449 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
450 {
451 	dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
452 		 lli_read(desc, sar),
453 		 lli_read(desc, dar),
454 		 lli_read(desc, llp),
455 		 lli_read(desc, ctlhi),
456 		 lli_read(desc, ctllo));
457 }
458 
459 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
460 {
461 	struct dw_desc *bad_desc;
462 	struct dw_desc *child;
463 	unsigned long flags;
464 
465 	dwc_scan_descriptors(dw, dwc);
466 
467 	spin_lock_irqsave(&dwc->lock, flags);
468 
469 	/*
470 	 * The descriptor currently at the head of the active list is
471 	 * borked. Since we don't have any way to report errors, we'll
472 	 * just have to scream loudly and try to carry on.
473 	 */
474 	bad_desc = dwc_first_active(dwc);
475 	list_del_init(&bad_desc->desc_node);
476 	list_move(dwc->queue.next, dwc->active_list.prev);
477 
478 	/* Clear the error flag and try to restart the controller */
479 	dma_writel(dw, CLEAR.ERROR, dwc->mask);
480 	if (!list_empty(&dwc->active_list))
481 		dwc_dostart(dwc, dwc_first_active(dwc));
482 
483 	/*
484 	 * WARN may seem harsh, but since this only happens
485 	 * when someone submits a bad physical address in a
486 	 * descriptor, we should consider ourselves lucky that the
487 	 * controller flagged an error instead of scribbling over
488 	 * random memory locations.
489 	 */
490 	dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
491 				       "  cookie: %d\n", bad_desc->txd.cookie);
492 	dwc_dump_lli(dwc, bad_desc);
493 	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
494 		dwc_dump_lli(dwc, child);
495 
496 	spin_unlock_irqrestore(&dwc->lock, flags);
497 
498 	/* Pretend the descriptor completed successfully */
499 	dwc_descriptor_complete(dwc, bad_desc, true);
500 }
501 
502 /* --------------------- Cyclic DMA API extensions -------------------- */
503 
504 dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
505 {
506 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
507 	return channel_readl(dwc, SAR);
508 }
509 EXPORT_SYMBOL(dw_dma_get_src_addr);
510 
511 dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
512 {
513 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
514 	return channel_readl(dwc, DAR);
515 }
516 EXPORT_SYMBOL(dw_dma_get_dst_addr);
517 
518 /* Called with dwc->lock held and all DMAC interrupts disabled */
519 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
520 		u32 status_block, u32 status_err, u32 status_xfer)
521 {
522 	unsigned long flags;
523 
524 	if (status_block & dwc->mask) {
525 		void (*callback)(void *param);
526 		void *callback_param;
527 
528 		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
529 				channel_readl(dwc, LLP));
530 		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
531 
532 		callback = dwc->cdesc->period_callback;
533 		callback_param = dwc->cdesc->period_callback_param;
534 
535 		if (callback)
536 			callback(callback_param);
537 	}
538 
539 	/*
540 	 * Error and transfer complete are highly unlikely, and will most
541 	 * likely be due to a configuration error by the user.
542 	 */
543 	if (unlikely(status_err & dwc->mask) ||
544 			unlikely(status_xfer & dwc->mask)) {
545 		unsigned int i;
546 
547 		dev_err(chan2dev(&dwc->chan),
548 			"cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
549 			status_xfer ? "xfer" : "error");
550 
551 		spin_lock_irqsave(&dwc->lock, flags);
552 
553 		dwc_dump_chan_regs(dwc);
554 
555 		dwc_chan_disable(dw, dwc);
556 
557 		/* Make sure DMA does not restart by loading a new list */
558 		channel_writel(dwc, LLP, 0);
559 		channel_writel(dwc, CTL_LO, 0);
560 		channel_writel(dwc, CTL_HI, 0);
561 
562 		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
563 		dma_writel(dw, CLEAR.ERROR, dwc->mask);
564 		dma_writel(dw, CLEAR.XFER, dwc->mask);
565 
566 		for (i = 0; i < dwc->cdesc->periods; i++)
567 			dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
568 
569 		spin_unlock_irqrestore(&dwc->lock, flags);
570 	}
571 
572 	/* Re-enable interrupts */
573 	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
574 }
575 
576 /* ------------------------------------------------------------------------- */
577 
578 static void dw_dma_tasklet(unsigned long data)
579 {
580 	struct dw_dma *dw = (struct dw_dma *)data;
581 	struct dw_dma_chan *dwc;
582 	u32 status_block;
583 	u32 status_xfer;
584 	u32 status_err;
585 	unsigned int i;
586 
587 	status_block = dma_readl(dw, RAW.BLOCK);
588 	status_xfer = dma_readl(dw, RAW.XFER);
589 	status_err = dma_readl(dw, RAW.ERROR);
590 
591 	dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
592 
593 	for (i = 0; i < dw->dma.chancnt; i++) {
594 		dwc = &dw->chan[i];
595 		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
596 			dwc_handle_cyclic(dw, dwc, status_block, status_err,
597 					status_xfer);
598 		else if (status_err & (1 << i))
599 			dwc_handle_error(dw, dwc);
600 		else if (status_xfer & (1 << i))
601 			dwc_scan_descriptors(dw, dwc);
602 	}
603 
604 	/* Re-enable interrupts */
605 	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
606 	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
607 }
608 
609 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
610 {
611 	struct dw_dma *dw = dev_id;
612 	u32 status;
613 
614 	/* Check if we have any interrupt from the DMAC which is not in use */
615 	if (!dw->in_use)
616 		return IRQ_NONE;
617 
618 	status = dma_readl(dw, STATUS_INT);
619 	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
620 
621 	/* Check if we have any interrupt from the DMAC */
622 	if (!status)
623 		return IRQ_NONE;
624 
625 	/*
626 	 * Just disable the interrupts. We'll turn them back on in the
627 	 * softirq handler.
628 	 */
629 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
630 	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
631 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
632 
633 	status = dma_readl(dw, STATUS_INT);
634 	if (status) {
635 		dev_err(dw->dma.dev,
636 			"BUG: Unexpected interrupts pending: 0x%x\n",
637 			status);
638 
639 		/* Try to recover */
640 		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
641 		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
642 		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
643 		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
644 		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
645 	}
646 
647 	tasklet_schedule(&dw->tasklet);
648 
649 	return IRQ_HANDLED;
650 }
651 
652 /*----------------------------------------------------------------------*/
653 
654 static struct dma_async_tx_descriptor *
655 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
656 		size_t len, unsigned long flags)
657 {
658 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
659 	struct dw_dma		*dw = to_dw_dma(chan->device);
660 	struct dw_desc		*desc;
661 	struct dw_desc		*first;
662 	struct dw_desc		*prev;
663 	size_t			xfer_count;
664 	size_t			offset;
665 	u8			m_master = dwc->m_master;
666 	unsigned int		src_width;
667 	unsigned int		dst_width;
668 	unsigned int		data_width = dw->pdata->data_width[m_master];
669 	u32			ctllo;
670 	u8			lms = DWC_LLP_LMS(m_master);
671 
672 	dev_vdbg(chan2dev(chan),
673 			"%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
674 			&dest, &src, len, flags);
675 
676 	if (unlikely(!len)) {
677 		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
678 		return NULL;
679 	}
680 
681 	dwc->direction = DMA_MEM_TO_MEM;
682 
683 	src_width = dst_width = __ffs(data_width | src | dest | len);
684 
685 	ctllo = DWC_DEFAULT_CTLLO(chan)
686 			| DWC_CTLL_DST_WIDTH(dst_width)
687 			| DWC_CTLL_SRC_WIDTH(src_width)
688 			| DWC_CTLL_DST_INC
689 			| DWC_CTLL_SRC_INC
690 			| DWC_CTLL_FC_M2M;
691 	prev = first = NULL;
692 
693 	for (offset = 0; offset < len; offset += xfer_count << src_width) {
694 		xfer_count = min_t(size_t, (len - offset) >> src_width,
695 					   dwc->block_size);
696 
697 		desc = dwc_desc_get(dwc);
698 		if (!desc)
699 			goto err_desc_get;
700 
701 		lli_write(desc, sar, src + offset);
702 		lli_write(desc, dar, dest + offset);
703 		lli_write(desc, ctllo, ctllo);
704 		lli_write(desc, ctlhi, xfer_count);
705 		desc->len = xfer_count << src_width;
706 
707 		if (!first) {
708 			first = desc;
709 		} else {
710 			lli_write(prev, llp, desc->txd.phys | lms);
711 			list_add_tail(&desc->desc_node, &first->tx_list);
712 		}
713 		prev = desc;
714 	}
715 
716 	if (flags & DMA_PREP_INTERRUPT)
717 		/* Trigger interrupt after last block */
718 		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
719 
720 	prev->lli.llp = 0;
721 	lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
722 	first->txd.flags = flags;
723 	first->total_len = len;
724 
725 	return &first->txd;
726 
727 err_desc_get:
728 	dwc_desc_put(dwc, first);
729 	return NULL;
730 }
731 
732 static struct dma_async_tx_descriptor *
733 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
734 		unsigned int sg_len, enum dma_transfer_direction direction,
735 		unsigned long flags, void *context)
736 {
737 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
738 	struct dw_dma		*dw = to_dw_dma(chan->device);
739 	struct dma_slave_config	*sconfig = &dwc->dma_sconfig;
740 	struct dw_desc		*prev;
741 	struct dw_desc		*first;
742 	u32			ctllo;
743 	u8			m_master = dwc->m_master;
744 	u8			lms = DWC_LLP_LMS(m_master);
745 	dma_addr_t		reg;
746 	unsigned int		reg_width;
747 	unsigned int		mem_width;
748 	unsigned int		data_width = dw->pdata->data_width[m_master];
749 	unsigned int		i;
750 	struct scatterlist	*sg;
751 	size_t			total_len = 0;
752 
753 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
754 
755 	if (unlikely(!is_slave_direction(direction) || !sg_len))
756 		return NULL;
757 
758 	dwc->direction = direction;
759 
760 	prev = first = NULL;
761 
762 	switch (direction) {
763 	case DMA_MEM_TO_DEV:
764 		reg_width = __ffs(sconfig->dst_addr_width);
765 		reg = sconfig->dst_addr;
766 		ctllo = (DWC_DEFAULT_CTLLO(chan)
767 				| DWC_CTLL_DST_WIDTH(reg_width)
768 				| DWC_CTLL_DST_FIX
769 				| DWC_CTLL_SRC_INC);
770 
771 		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
772 			DWC_CTLL_FC(DW_DMA_FC_D_M2P);
773 
774 		for_each_sg(sgl, sg, sg_len, i) {
775 			struct dw_desc	*desc;
776 			u32		len, dlen, mem;
777 
778 			mem = sg_dma_address(sg);
779 			len = sg_dma_len(sg);
780 
781 			mem_width = __ffs(data_width | mem | len);
782 
783 slave_sg_todev_fill_desc:
784 			desc = dwc_desc_get(dwc);
785 			if (!desc)
786 				goto err_desc_get;
787 
788 			lli_write(desc, sar, mem);
789 			lli_write(desc, dar, reg);
790 			lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
791 			if ((len >> mem_width) > dwc->block_size) {
792 				dlen = dwc->block_size << mem_width;
793 				mem += dlen;
794 				len -= dlen;
795 			} else {
796 				dlen = len;
797 				len = 0;
798 			}
799 
800 			lli_write(desc, ctlhi, dlen >> mem_width);
801 			desc->len = dlen;
802 
803 			if (!first) {
804 				first = desc;
805 			} else {
806 				lli_write(prev, llp, desc->txd.phys | lms);
807 				list_add_tail(&desc->desc_node, &first->tx_list);
808 			}
809 			prev = desc;
810 			total_len += dlen;
811 
812 			if (len)
813 				goto slave_sg_todev_fill_desc;
814 		}
815 		break;
816 	case DMA_DEV_TO_MEM:
817 		reg_width = __ffs(sconfig->src_addr_width);
818 		reg = sconfig->src_addr;
819 		ctllo = (DWC_DEFAULT_CTLLO(chan)
820 				| DWC_CTLL_SRC_WIDTH(reg_width)
821 				| DWC_CTLL_DST_INC
822 				| DWC_CTLL_SRC_FIX);
823 
824 		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
825 			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
826 
827 		for_each_sg(sgl, sg, sg_len, i) {
828 			struct dw_desc	*desc;
829 			u32		len, dlen, mem;
830 
831 			mem = sg_dma_address(sg);
832 			len = sg_dma_len(sg);
833 
834 			mem_width = __ffs(data_width | mem | len);
835 
836 slave_sg_fromdev_fill_desc:
837 			desc = dwc_desc_get(dwc);
838 			if (!desc)
839 				goto err_desc_get;
840 
841 			lli_write(desc, sar, reg);
842 			lli_write(desc, dar, mem);
843 			lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
844 			if ((len >> reg_width) > dwc->block_size) {
845 				dlen = dwc->block_size << reg_width;
846 				mem += dlen;
847 				len -= dlen;
848 			} else {
849 				dlen = len;
850 				len = 0;
851 			}
852 			lli_write(desc, ctlhi, dlen >> reg_width);
853 			desc->len = dlen;
854 
855 			if (!first) {
856 				first = desc;
857 			} else {
858 				lli_write(prev, llp, desc->txd.phys | lms);
859 				list_add_tail(&desc->desc_node, &first->tx_list);
860 			}
861 			prev = desc;
862 			total_len += dlen;
863 
864 			if (len)
865 				goto slave_sg_fromdev_fill_desc;
866 		}
867 		break;
868 	default:
869 		return NULL;
870 	}
871 
872 	if (flags & DMA_PREP_INTERRUPT)
873 		/* Trigger interrupt after last block */
874 		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
875 
876 	prev->lli.llp = 0;
877 	lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
878 	first->total_len = total_len;
879 
880 	return &first->txd;
881 
882 err_desc_get:
883 	dev_err(chan2dev(chan),
884 		"not enough descriptors available. Direction %d\n", direction);
885 	dwc_desc_put(dwc, first);
886 	return NULL;
887 }
888 
889 bool dw_dma_filter(struct dma_chan *chan, void *param)
890 {
891 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
892 	struct dw_dma_slave *dws = param;
893 
894 	if (dws->dma_dev != chan->device->dev)
895 		return false;
896 
897 	/* We have to copy data since dws can be temporary storage */
898 
899 	dwc->src_id = dws->src_id;
900 	dwc->dst_id = dws->dst_id;
901 
902 	dwc->m_master = dws->m_master;
903 	dwc->p_master = dws->p_master;
904 
905 	return true;
906 }
907 EXPORT_SYMBOL_GPL(dw_dma_filter);
908 
909 /*
910  * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
911  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
912  *
913  * NOTE: burst size 2 is not supported by controller.
914  *
915  * This can be done by finding least significant bit set: n & (n - 1)
916  */
917 static inline void convert_burst(u32 *maxburst)
918 {
919 	if (*maxburst > 1)
920 		*maxburst = fls(*maxburst) - 2;
921 	else
922 		*maxburst = 0;
923 }
924 
925 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
926 {
927 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
928 
929 	/* Check if chan will be configured for slave transfers */
930 	if (!is_slave_direction(sconfig->direction))
931 		return -EINVAL;
932 
933 	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
934 	dwc->direction = sconfig->direction;
935 
936 	convert_burst(&dwc->dma_sconfig.src_maxburst);
937 	convert_burst(&dwc->dma_sconfig.dst_maxburst);
938 
939 	return 0;
940 }
941 
942 static int dwc_pause(struct dma_chan *chan)
943 {
944 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
945 	unsigned long		flags;
946 	unsigned int		count = 20;	/* timeout iterations */
947 	u32			cfglo;
948 
949 	spin_lock_irqsave(&dwc->lock, flags);
950 
951 	cfglo = channel_readl(dwc, CFG_LO);
952 	channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
953 	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
954 		udelay(2);
955 
956 	set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
957 
958 	spin_unlock_irqrestore(&dwc->lock, flags);
959 
960 	return 0;
961 }
962 
963 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
964 {
965 	u32 cfglo = channel_readl(dwc, CFG_LO);
966 
967 	channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
968 
969 	clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
970 }
971 
972 static int dwc_resume(struct dma_chan *chan)
973 {
974 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
975 	unsigned long		flags;
976 
977 	spin_lock_irqsave(&dwc->lock, flags);
978 
979 	if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
980 		dwc_chan_resume(dwc);
981 
982 	spin_unlock_irqrestore(&dwc->lock, flags);
983 
984 	return 0;
985 }
986 
987 static int dwc_terminate_all(struct dma_chan *chan)
988 {
989 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
990 	struct dw_dma		*dw = to_dw_dma(chan->device);
991 	struct dw_desc		*desc, *_desc;
992 	unsigned long		flags;
993 	LIST_HEAD(list);
994 
995 	spin_lock_irqsave(&dwc->lock, flags);
996 
997 	clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
998 
999 	dwc_chan_disable(dw, dwc);
1000 
1001 	dwc_chan_resume(dwc);
1002 
1003 	/* active_list entries will end up before queued entries */
1004 	list_splice_init(&dwc->queue, &list);
1005 	list_splice_init(&dwc->active_list, &list);
1006 
1007 	spin_unlock_irqrestore(&dwc->lock, flags);
1008 
1009 	/* Flush all pending and queued descriptors */
1010 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
1011 		dwc_descriptor_complete(dwc, desc, false);
1012 
1013 	return 0;
1014 }
1015 
1016 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
1017 {
1018 	struct dw_desc *desc;
1019 
1020 	list_for_each_entry(desc, &dwc->active_list, desc_node)
1021 		if (desc->txd.cookie == c)
1022 			return desc;
1023 
1024 	return NULL;
1025 }
1026 
1027 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
1028 {
1029 	struct dw_desc *desc;
1030 	unsigned long flags;
1031 	u32 residue;
1032 
1033 	spin_lock_irqsave(&dwc->lock, flags);
1034 
1035 	desc = dwc_find_desc(dwc, cookie);
1036 	if (desc) {
1037 		if (desc == dwc_first_active(dwc)) {
1038 			residue = desc->residue;
1039 			if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1040 				residue -= dwc_get_sent(dwc);
1041 		} else {
1042 			residue = desc->total_len;
1043 		}
1044 	} else {
1045 		residue = 0;
1046 	}
1047 
1048 	spin_unlock_irqrestore(&dwc->lock, flags);
1049 	return residue;
1050 }
1051 
1052 static enum dma_status
1053 dwc_tx_status(struct dma_chan *chan,
1054 	      dma_cookie_t cookie,
1055 	      struct dma_tx_state *txstate)
1056 {
1057 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1058 	enum dma_status		ret;
1059 
1060 	ret = dma_cookie_status(chan, cookie, txstate);
1061 	if (ret == DMA_COMPLETE)
1062 		return ret;
1063 
1064 	dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1065 
1066 	ret = dma_cookie_status(chan, cookie, txstate);
1067 	if (ret == DMA_COMPLETE)
1068 		return ret;
1069 
1070 	dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
1071 
1072 	if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
1073 		return DMA_PAUSED;
1074 
1075 	return ret;
1076 }
1077 
1078 static void dwc_issue_pending(struct dma_chan *chan)
1079 {
1080 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1081 	unsigned long		flags;
1082 
1083 	spin_lock_irqsave(&dwc->lock, flags);
1084 	if (list_empty(&dwc->active_list))
1085 		dwc_dostart_first_queued(dwc);
1086 	spin_unlock_irqrestore(&dwc->lock, flags);
1087 }
1088 
1089 /*----------------------------------------------------------------------*/
1090 
1091 static void dw_dma_off(struct dw_dma *dw)
1092 {
1093 	unsigned int i;
1094 
1095 	dma_writel(dw, CFG, 0);
1096 
1097 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1098 	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1099 	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1100 	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1101 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1102 
1103 	while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1104 		cpu_relax();
1105 
1106 	for (i = 0; i < dw->dma.chancnt; i++)
1107 		clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
1108 }
1109 
1110 static void dw_dma_on(struct dw_dma *dw)
1111 {
1112 	dma_writel(dw, CFG, DW_CFG_DMA_EN);
1113 }
1114 
1115 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1116 {
1117 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1118 	struct dw_dma		*dw = to_dw_dma(chan->device);
1119 
1120 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1121 
1122 	/* ASSERT:  channel is idle */
1123 	if (dma_readl(dw, CH_EN) & dwc->mask) {
1124 		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1125 		return -EIO;
1126 	}
1127 
1128 	dma_cookie_init(chan);
1129 
1130 	/*
1131 	 * NOTE: some controllers may have additional features that we
1132 	 * need to initialize here, like "scatter-gather" (which
1133 	 * doesn't mean what you think it means), and status writeback.
1134 	 */
1135 
1136 	/*
1137 	 * We need controller-specific data to set up slave transfers.
1138 	 */
1139 	if (chan->private && !dw_dma_filter(chan, chan->private)) {
1140 		dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1141 		return -EINVAL;
1142 	}
1143 
1144 	/* Enable controller here if needed */
1145 	if (!dw->in_use)
1146 		dw_dma_on(dw);
1147 	dw->in_use |= dwc->mask;
1148 
1149 	return 0;
1150 }
1151 
1152 static void dwc_free_chan_resources(struct dma_chan *chan)
1153 {
1154 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1155 	struct dw_dma		*dw = to_dw_dma(chan->device);
1156 	unsigned long		flags;
1157 	LIST_HEAD(list);
1158 
1159 	dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1160 			dwc->descs_allocated);
1161 
1162 	/* ASSERT:  channel is idle */
1163 	BUG_ON(!list_empty(&dwc->active_list));
1164 	BUG_ON(!list_empty(&dwc->queue));
1165 	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1166 
1167 	spin_lock_irqsave(&dwc->lock, flags);
1168 
1169 	/* Clear custom channel configuration */
1170 	dwc->src_id = 0;
1171 	dwc->dst_id = 0;
1172 
1173 	dwc->m_master = 0;
1174 	dwc->p_master = 0;
1175 
1176 	clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1177 
1178 	/* Disable interrupts */
1179 	channel_clear_bit(dw, MASK.XFER, dwc->mask);
1180 	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1181 	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1182 
1183 	spin_unlock_irqrestore(&dwc->lock, flags);
1184 
1185 	/* Disable controller in case it was a last user */
1186 	dw->in_use &= ~dwc->mask;
1187 	if (!dw->in_use)
1188 		dw_dma_off(dw);
1189 
1190 	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1191 }
1192 
1193 /* --------------------- Cyclic DMA API extensions -------------------- */
1194 
1195 /**
1196  * dw_dma_cyclic_start - start the cyclic DMA transfer
1197  * @chan: the DMA channel to start
1198  *
1199  * Must be called with soft interrupts disabled. Returns zero on success or
1200  * -errno on failure.
1201  */
1202 int dw_dma_cyclic_start(struct dma_chan *chan)
1203 {
1204 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1205 	struct dw_dma		*dw = to_dw_dma(chan->device);
1206 	unsigned long		flags;
1207 
1208 	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1209 		dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1210 		return -ENODEV;
1211 	}
1212 
1213 	spin_lock_irqsave(&dwc->lock, flags);
1214 
1215 	/* Enable interrupts to perform cyclic transfer */
1216 	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1217 
1218 	dwc_dostart(dwc, dwc->cdesc->desc[0]);
1219 
1220 	spin_unlock_irqrestore(&dwc->lock, flags);
1221 
1222 	return 0;
1223 }
1224 EXPORT_SYMBOL(dw_dma_cyclic_start);
1225 
1226 /**
1227  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1228  * @chan: the DMA channel to stop
1229  *
1230  * Must be called with soft interrupts disabled.
1231  */
1232 void dw_dma_cyclic_stop(struct dma_chan *chan)
1233 {
1234 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1235 	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1236 	unsigned long		flags;
1237 
1238 	spin_lock_irqsave(&dwc->lock, flags);
1239 
1240 	dwc_chan_disable(dw, dwc);
1241 
1242 	spin_unlock_irqrestore(&dwc->lock, flags);
1243 }
1244 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1245 
1246 /**
1247  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1248  * @chan: the DMA channel to prepare
1249  * @buf_addr: physical DMA address where the buffer starts
1250  * @buf_len: total number of bytes for the entire buffer
1251  * @period_len: number of bytes for each period
1252  * @direction: transfer direction, to or from device
1253  *
1254  * Must be called before trying to start the transfer. Returns a valid struct
1255  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1256  */
1257 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1258 		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1259 		enum dma_transfer_direction direction)
1260 {
1261 	struct dw_dma_chan		*dwc = to_dw_dma_chan(chan);
1262 	struct dma_slave_config		*sconfig = &dwc->dma_sconfig;
1263 	struct dw_cyclic_desc		*cdesc;
1264 	struct dw_cyclic_desc		*retval = NULL;
1265 	struct dw_desc			*desc;
1266 	struct dw_desc			*last = NULL;
1267 	u8				lms = DWC_LLP_LMS(dwc->m_master);
1268 	unsigned long			was_cyclic;
1269 	unsigned int			reg_width;
1270 	unsigned int			periods;
1271 	unsigned int			i;
1272 	unsigned long			flags;
1273 
1274 	spin_lock_irqsave(&dwc->lock, flags);
1275 	if (dwc->nollp) {
1276 		spin_unlock_irqrestore(&dwc->lock, flags);
1277 		dev_dbg(chan2dev(&dwc->chan),
1278 				"channel doesn't support LLP transfers\n");
1279 		return ERR_PTR(-EINVAL);
1280 	}
1281 
1282 	if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1283 		spin_unlock_irqrestore(&dwc->lock, flags);
1284 		dev_dbg(chan2dev(&dwc->chan),
1285 				"queue and/or active list are not empty\n");
1286 		return ERR_PTR(-EBUSY);
1287 	}
1288 
1289 	was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1290 	spin_unlock_irqrestore(&dwc->lock, flags);
1291 	if (was_cyclic) {
1292 		dev_dbg(chan2dev(&dwc->chan),
1293 				"channel already prepared for cyclic DMA\n");
1294 		return ERR_PTR(-EBUSY);
1295 	}
1296 
1297 	retval = ERR_PTR(-EINVAL);
1298 
1299 	if (unlikely(!is_slave_direction(direction)))
1300 		goto out_err;
1301 
1302 	dwc->direction = direction;
1303 
1304 	if (direction == DMA_MEM_TO_DEV)
1305 		reg_width = __ffs(sconfig->dst_addr_width);
1306 	else
1307 		reg_width = __ffs(sconfig->src_addr_width);
1308 
1309 	periods = buf_len / period_len;
1310 
1311 	/* Check for too big/unaligned periods and unaligned DMA buffer. */
1312 	if (period_len > (dwc->block_size << reg_width))
1313 		goto out_err;
1314 	if (unlikely(period_len & ((1 << reg_width) - 1)))
1315 		goto out_err;
1316 	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1317 		goto out_err;
1318 
1319 	retval = ERR_PTR(-ENOMEM);
1320 
1321 	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1322 	if (!cdesc)
1323 		goto out_err;
1324 
1325 	cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1326 	if (!cdesc->desc)
1327 		goto out_err_alloc;
1328 
1329 	for (i = 0; i < periods; i++) {
1330 		desc = dwc_desc_get(dwc);
1331 		if (!desc)
1332 			goto out_err_desc_get;
1333 
1334 		switch (direction) {
1335 		case DMA_MEM_TO_DEV:
1336 			lli_write(desc, dar, sconfig->dst_addr);
1337 			lli_write(desc, sar, buf_addr + period_len * i);
1338 			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1339 				| DWC_CTLL_DST_WIDTH(reg_width)
1340 				| DWC_CTLL_SRC_WIDTH(reg_width)
1341 				| DWC_CTLL_DST_FIX
1342 				| DWC_CTLL_SRC_INC
1343 				| DWC_CTLL_INT_EN));
1344 
1345 			lli_set(desc, ctllo, sconfig->device_fc ?
1346 					DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1347 					DWC_CTLL_FC(DW_DMA_FC_D_M2P));
1348 
1349 			break;
1350 		case DMA_DEV_TO_MEM:
1351 			lli_write(desc, dar, buf_addr + period_len * i);
1352 			lli_write(desc, sar, sconfig->src_addr);
1353 			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1354 				| DWC_CTLL_SRC_WIDTH(reg_width)
1355 				| DWC_CTLL_DST_WIDTH(reg_width)
1356 				| DWC_CTLL_DST_INC
1357 				| DWC_CTLL_SRC_FIX
1358 				| DWC_CTLL_INT_EN));
1359 
1360 			lli_set(desc, ctllo, sconfig->device_fc ?
1361 					DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1362 					DWC_CTLL_FC(DW_DMA_FC_D_P2M));
1363 
1364 			break;
1365 		default:
1366 			break;
1367 		}
1368 
1369 		lli_write(desc, ctlhi, period_len >> reg_width);
1370 		cdesc->desc[i] = desc;
1371 
1372 		if (last)
1373 			lli_write(last, llp, desc->txd.phys | lms);
1374 
1375 		last = desc;
1376 	}
1377 
1378 	/* Let's make a cyclic list */
1379 	lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
1380 
1381 	dev_dbg(chan2dev(&dwc->chan),
1382 			"cyclic prepared buf %pad len %zu period %zu periods %d\n",
1383 			&buf_addr, buf_len, period_len, periods);
1384 
1385 	cdesc->periods = periods;
1386 	dwc->cdesc = cdesc;
1387 
1388 	return cdesc;
1389 
1390 out_err_desc_get:
1391 	while (i--)
1392 		dwc_desc_put(dwc, cdesc->desc[i]);
1393 out_err_alloc:
1394 	kfree(cdesc);
1395 out_err:
1396 	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1397 	return (struct dw_cyclic_desc *)retval;
1398 }
1399 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1400 
1401 /**
1402  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1403  * @chan: the DMA channel to free
1404  */
1405 void dw_dma_cyclic_free(struct dma_chan *chan)
1406 {
1407 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1408 	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1409 	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
1410 	unsigned int		i;
1411 	unsigned long		flags;
1412 
1413 	dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1414 
1415 	if (!cdesc)
1416 		return;
1417 
1418 	spin_lock_irqsave(&dwc->lock, flags);
1419 
1420 	dwc_chan_disable(dw, dwc);
1421 
1422 	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1423 	dma_writel(dw, CLEAR.ERROR, dwc->mask);
1424 	dma_writel(dw, CLEAR.XFER, dwc->mask);
1425 
1426 	spin_unlock_irqrestore(&dwc->lock, flags);
1427 
1428 	for (i = 0; i < cdesc->periods; i++)
1429 		dwc_desc_put(dwc, cdesc->desc[i]);
1430 
1431 	kfree(cdesc->desc);
1432 	kfree(cdesc);
1433 
1434 	dwc->cdesc = NULL;
1435 
1436 	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1437 }
1438 EXPORT_SYMBOL(dw_dma_cyclic_free);
1439 
1440 /*----------------------------------------------------------------------*/
1441 
1442 int dw_dma_probe(struct dw_dma_chip *chip)
1443 {
1444 	struct dw_dma_platform_data *pdata;
1445 	struct dw_dma		*dw;
1446 	bool			autocfg = false;
1447 	unsigned int		dw_params;
1448 	unsigned int		i;
1449 	int			err;
1450 
1451 	dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1452 	if (!dw)
1453 		return -ENOMEM;
1454 
1455 	dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1456 	if (!dw->pdata)
1457 		return -ENOMEM;
1458 
1459 	dw->regs = chip->regs;
1460 	chip->dw = dw;
1461 
1462 	pm_runtime_get_sync(chip->dev);
1463 
1464 	if (!chip->pdata) {
1465 		dw_params = dma_readl(dw, DW_PARAMS);
1466 		dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1467 
1468 		autocfg = dw_params >> DW_PARAMS_EN & 1;
1469 		if (!autocfg) {
1470 			err = -EINVAL;
1471 			goto err_pdata;
1472 		}
1473 
1474 		/* Reassign the platform data pointer */
1475 		pdata = dw->pdata;
1476 
1477 		/* Get hardware configuration parameters */
1478 		pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1479 		pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1480 		for (i = 0; i < pdata->nr_masters; i++) {
1481 			pdata->data_width[i] =
1482 				4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1483 		}
1484 		pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1485 
1486 		/* Fill platform data with the default values */
1487 		pdata->is_private = true;
1488 		pdata->is_memcpy = true;
1489 		pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1490 		pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1491 	} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1492 		err = -EINVAL;
1493 		goto err_pdata;
1494 	} else {
1495 		memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1496 
1497 		/* Reassign the platform data pointer */
1498 		pdata = dw->pdata;
1499 	}
1500 
1501 	dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1502 				GFP_KERNEL);
1503 	if (!dw->chan) {
1504 		err = -ENOMEM;
1505 		goto err_pdata;
1506 	}
1507 
1508 	/* Calculate all channel mask before DMA setup */
1509 	dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1510 
1511 	/* Force dma off, just in case */
1512 	dw_dma_off(dw);
1513 
1514 	/* Create a pool of consistent memory blocks for hardware descriptors */
1515 	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1516 					 sizeof(struct dw_desc), 4, 0);
1517 	if (!dw->desc_pool) {
1518 		dev_err(chip->dev, "No memory for descriptors dma pool\n");
1519 		err = -ENOMEM;
1520 		goto err_pdata;
1521 	}
1522 
1523 	tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1524 
1525 	err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1526 			  "dw_dmac", dw);
1527 	if (err)
1528 		goto err_pdata;
1529 
1530 	INIT_LIST_HEAD(&dw->dma.channels);
1531 	for (i = 0; i < pdata->nr_channels; i++) {
1532 		struct dw_dma_chan	*dwc = &dw->chan[i];
1533 
1534 		dwc->chan.device = &dw->dma;
1535 		dma_cookie_init(&dwc->chan);
1536 		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1537 			list_add_tail(&dwc->chan.device_node,
1538 					&dw->dma.channels);
1539 		else
1540 			list_add(&dwc->chan.device_node, &dw->dma.channels);
1541 
1542 		/* 7 is highest priority & 0 is lowest. */
1543 		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1544 			dwc->priority = pdata->nr_channels - i - 1;
1545 		else
1546 			dwc->priority = i;
1547 
1548 		dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1549 		spin_lock_init(&dwc->lock);
1550 		dwc->mask = 1 << i;
1551 
1552 		INIT_LIST_HEAD(&dwc->active_list);
1553 		INIT_LIST_HEAD(&dwc->queue);
1554 
1555 		channel_clear_bit(dw, CH_EN, dwc->mask);
1556 
1557 		dwc->direction = DMA_TRANS_NONE;
1558 
1559 		/* Hardware configuration */
1560 		if (autocfg) {
1561 			unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1562 			void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1563 			unsigned int dwc_params = dma_readl_native(addr);
1564 
1565 			dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1566 					   dwc_params);
1567 
1568 			/*
1569 			 * Decode maximum block size for given channel. The
1570 			 * stored 4 bit value represents blocks from 0x00 for 3
1571 			 * up to 0x0a for 4095.
1572 			 */
1573 			dwc->block_size =
1574 				(4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1575 			dwc->nollp =
1576 				(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1577 		} else {
1578 			dwc->block_size = pdata->block_size;
1579 
1580 			/* Check if channel supports multi block transfer */
1581 			channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff));
1582 			dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0;
1583 			channel_writel(dwc, LLP, 0);
1584 		}
1585 	}
1586 
1587 	/* Clear all interrupts on all channels. */
1588 	dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1589 	dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1590 	dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1591 	dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1592 	dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1593 
1594 	/* Set capabilities */
1595 	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1596 	if (pdata->is_private)
1597 		dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1598 	if (pdata->is_memcpy)
1599 		dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1600 
1601 	dw->dma.dev = chip->dev;
1602 	dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1603 	dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1604 
1605 	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1606 	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1607 
1608 	dw->dma.device_config = dwc_config;
1609 	dw->dma.device_pause = dwc_pause;
1610 	dw->dma.device_resume = dwc_resume;
1611 	dw->dma.device_terminate_all = dwc_terminate_all;
1612 
1613 	dw->dma.device_tx_status = dwc_tx_status;
1614 	dw->dma.device_issue_pending = dwc_issue_pending;
1615 
1616 	/* DMA capabilities */
1617 	dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1618 	dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1619 	dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1620 			     BIT(DMA_MEM_TO_MEM);
1621 	dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1622 
1623 	err = dma_async_device_register(&dw->dma);
1624 	if (err)
1625 		goto err_dma_register;
1626 
1627 	dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1628 		 pdata->nr_channels);
1629 
1630 	pm_runtime_put_sync_suspend(chip->dev);
1631 
1632 	return 0;
1633 
1634 err_dma_register:
1635 	free_irq(chip->irq, dw);
1636 err_pdata:
1637 	pm_runtime_put_sync_suspend(chip->dev);
1638 	return err;
1639 }
1640 EXPORT_SYMBOL_GPL(dw_dma_probe);
1641 
1642 int dw_dma_remove(struct dw_dma_chip *chip)
1643 {
1644 	struct dw_dma		*dw = chip->dw;
1645 	struct dw_dma_chan	*dwc, *_dwc;
1646 
1647 	pm_runtime_get_sync(chip->dev);
1648 
1649 	dw_dma_off(dw);
1650 	dma_async_device_unregister(&dw->dma);
1651 
1652 	free_irq(chip->irq, dw);
1653 	tasklet_kill(&dw->tasklet);
1654 
1655 	list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1656 			chan.device_node) {
1657 		list_del(&dwc->chan.device_node);
1658 		channel_clear_bit(dw, CH_EN, dwc->mask);
1659 	}
1660 
1661 	pm_runtime_put_sync_suspend(chip->dev);
1662 	return 0;
1663 }
1664 EXPORT_SYMBOL_GPL(dw_dma_remove);
1665 
1666 int dw_dma_disable(struct dw_dma_chip *chip)
1667 {
1668 	struct dw_dma *dw = chip->dw;
1669 
1670 	dw_dma_off(dw);
1671 	return 0;
1672 }
1673 EXPORT_SYMBOL_GPL(dw_dma_disable);
1674 
1675 int dw_dma_enable(struct dw_dma_chip *chip)
1676 {
1677 	struct dw_dma *dw = chip->dw;
1678 
1679 	dw_dma_on(dw);
1680 	return 0;
1681 }
1682 EXPORT_SYMBOL_GPL(dw_dma_enable);
1683 
1684 MODULE_LICENSE("GPL v2");
1685 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1686 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1687 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
1688