xref: /openbmc/linux/drivers/dma/tegra20-apb-dma.c (revision e0d07278)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * DMA driver for Nvidia's Tegra20 APB DMA controller.
4  *
5  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
27 #include <linux/wait.h>
28 
29 #include "dmaengine.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/tegra_apb_dma.h>
33 
34 #define TEGRA_APBDMA_GENERAL			0x0
35 #define TEGRA_APBDMA_GENERAL_ENABLE		BIT(31)
36 
37 #define TEGRA_APBDMA_CONTROL			0x010
38 #define TEGRA_APBDMA_IRQ_MASK			0x01c
39 #define TEGRA_APBDMA_IRQ_MASK_SET		0x020
40 
41 /* CSR register */
42 #define TEGRA_APBDMA_CHAN_CSR			0x00
43 #define TEGRA_APBDMA_CSR_ENB			BIT(31)
44 #define TEGRA_APBDMA_CSR_IE_EOC			BIT(30)
45 #define TEGRA_APBDMA_CSR_HOLD			BIT(29)
46 #define TEGRA_APBDMA_CSR_DIR			BIT(28)
47 #define TEGRA_APBDMA_CSR_ONCE			BIT(27)
48 #define TEGRA_APBDMA_CSR_FLOW			BIT(21)
49 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT		16
50 #define TEGRA_APBDMA_CSR_REQ_SEL_MASK		0x1F
51 #define TEGRA_APBDMA_CSR_WCOUNT_MASK		0xFFFC
52 
53 /* STATUS register */
54 #define TEGRA_APBDMA_CHAN_STATUS		0x004
55 #define TEGRA_APBDMA_STATUS_BUSY		BIT(31)
56 #define TEGRA_APBDMA_STATUS_ISE_EOC		BIT(30)
57 #define TEGRA_APBDMA_STATUS_HALT		BIT(29)
58 #define TEGRA_APBDMA_STATUS_PING_PONG		BIT(28)
59 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT		2
60 #define TEGRA_APBDMA_STATUS_COUNT_MASK		0xFFFC
61 
62 #define TEGRA_APBDMA_CHAN_CSRE			0x00C
63 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE		BIT(31)
64 
65 /* AHB memory address */
66 #define TEGRA_APBDMA_CHAN_AHBPTR		0x010
67 
68 /* AHB sequence register */
69 #define TEGRA_APBDMA_CHAN_AHBSEQ		0x14
70 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB		BIT(31)
71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8		(0 << 28)
72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16	(1 << 28)
73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32	(2 << 28)
74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64	(3 << 28)
75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128	(4 << 28)
76 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP		BIT(27)
77 #define TEGRA_APBDMA_AHBSEQ_BURST_1		(4 << 24)
78 #define TEGRA_APBDMA_AHBSEQ_BURST_4		(5 << 24)
79 #define TEGRA_APBDMA_AHBSEQ_BURST_8		(6 << 24)
80 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF		BIT(19)
81 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT		16
82 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE		0
83 
84 /* APB address */
85 #define TEGRA_APBDMA_CHAN_APBPTR		0x018
86 
87 /* APB sequence register */
88 #define TEGRA_APBDMA_CHAN_APBSEQ		0x01c
89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8		(0 << 28)
90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16	(1 << 28)
91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32	(2 << 28)
92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64	(3 << 28)
93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128	(4 << 28)
94 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP		BIT(27)
95 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1		(1 << 16)
96 
97 /* Tegra148 specific registers */
98 #define TEGRA_APBDMA_CHAN_WCOUNT		0x20
99 
100 #define TEGRA_APBDMA_CHAN_WORD_TRANSFER		0x24
101 
102 /*
103  * If any burst is in flight and DMA paused then this is the time to complete
104  * on-flight burst and update DMA status register.
105  */
106 #define TEGRA_APBDMA_BURST_COMPLETE_TIME	20
107 
108 /* Channel base address offset from APBDMA base address */
109 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET	0x1000
110 
111 #define TEGRA_APBDMA_SLAVE_ID_INVALID	(TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
112 
113 struct tegra_dma;
114 
115 /*
116  * tegra_dma_chip_data Tegra chip specific DMA data
117  * @nr_channels: Number of channels available in the controller.
118  * @channel_reg_size: Channel register size/stride.
119  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
120  * @support_channel_pause: Support channel wise pause of dma.
121  * @support_separate_wcount_reg: Support separate word count register.
122  */
123 struct tegra_dma_chip_data {
124 	unsigned int nr_channels;
125 	unsigned int channel_reg_size;
126 	unsigned int max_dma_count;
127 	bool support_channel_pause;
128 	bool support_separate_wcount_reg;
129 };
130 
131 /* DMA channel registers */
132 struct tegra_dma_channel_regs {
133 	u32 csr;
134 	u32 ahb_ptr;
135 	u32 apb_ptr;
136 	u32 ahb_seq;
137 	u32 apb_seq;
138 	u32 wcount;
139 };
140 
141 /*
142  * tegra_dma_sg_req: DMA request details to configure hardware. This
143  * contains the details for one transfer to configure DMA hw.
144  * The client's request for data transfer can be broken into multiple
145  * sub-transfer as per requester details and hw support.
146  * This sub transfer get added in the list of transfer and point to Tegra
147  * DMA descriptor which manages the transfer details.
148  */
149 struct tegra_dma_sg_req {
150 	struct tegra_dma_channel_regs	ch_regs;
151 	unsigned int			req_len;
152 	bool				configured;
153 	bool				last_sg;
154 	struct list_head		node;
155 	struct tegra_dma_desc		*dma_desc;
156 	unsigned int			words_xferred;
157 };
158 
159 /*
160  * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
161  * This descriptor keep track of transfer status, callbacks and request
162  * counts etc.
163  */
164 struct tegra_dma_desc {
165 	struct dma_async_tx_descriptor	txd;
166 	unsigned int			bytes_requested;
167 	unsigned int			bytes_transferred;
168 	enum dma_status			dma_status;
169 	struct list_head		node;
170 	struct list_head		tx_list;
171 	struct list_head		cb_node;
172 	unsigned int			cb_count;
173 };
174 
175 struct tegra_dma_channel;
176 
177 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
178 				bool to_terminate);
179 
180 /* tegra_dma_channel: Channel specific information */
181 struct tegra_dma_channel {
182 	struct dma_chan		dma_chan;
183 	char			name[12];
184 	bool			config_init;
185 	unsigned int		id;
186 	void __iomem		*chan_addr;
187 	spinlock_t		lock;
188 	bool			busy;
189 	struct tegra_dma	*tdma;
190 	bool			cyclic;
191 
192 	/* Different lists for managing the requests */
193 	struct list_head	free_sg_req;
194 	struct list_head	pending_sg_req;
195 	struct list_head	free_dma_desc;
196 	struct list_head	cb_desc;
197 
198 	/* ISR handler and tasklet for bottom half of isr handling */
199 	dma_isr_handler		isr_handler;
200 	struct tasklet_struct	tasklet;
201 
202 	/* Channel-slave specific configuration */
203 	unsigned int slave_id;
204 	struct dma_slave_config dma_sconfig;
205 	struct tegra_dma_channel_regs channel_reg;
206 
207 	struct wait_queue_head wq;
208 };
209 
210 /* tegra_dma: Tegra DMA specific information */
211 struct tegra_dma {
212 	struct dma_device		dma_dev;
213 	struct device			*dev;
214 	struct clk			*dma_clk;
215 	struct reset_control		*rst;
216 	spinlock_t			global_lock;
217 	void __iomem			*base_addr;
218 	const struct tegra_dma_chip_data *chip_data;
219 
220 	/*
221 	 * Counter for managing global pausing of the DMA controller.
222 	 * Only applicable for devices that don't support individual
223 	 * channel pausing.
224 	 */
225 	u32				global_pause_count;
226 
227 	/* Last member of the structure */
228 	struct tegra_dma_channel channels[];
229 };
230 
231 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
232 {
233 	writel(val, tdma->base_addr + reg);
234 }
235 
236 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
237 {
238 	return readl(tdma->base_addr + reg);
239 }
240 
241 static inline void tdc_write(struct tegra_dma_channel *tdc,
242 			     u32 reg, u32 val)
243 {
244 	writel(val, tdc->chan_addr + reg);
245 }
246 
247 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
248 {
249 	return readl(tdc->chan_addr + reg);
250 }
251 
252 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
253 {
254 	return container_of(dc, struct tegra_dma_channel, dma_chan);
255 }
256 
257 static inline struct tegra_dma_desc *
258 txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
259 {
260 	return container_of(td, struct tegra_dma_desc, txd);
261 }
262 
263 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
264 {
265 	return &tdc->dma_chan.dev->device;
266 }
267 
268 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
269 
270 /* Get DMA desc from free list, if not there then allocate it.  */
271 static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
272 {
273 	struct tegra_dma_desc *dma_desc;
274 	unsigned long flags;
275 
276 	spin_lock_irqsave(&tdc->lock, flags);
277 
278 	/* Do not allocate if desc are waiting for ack */
279 	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
280 		if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
281 			list_del(&dma_desc->node);
282 			spin_unlock_irqrestore(&tdc->lock, flags);
283 			dma_desc->txd.flags = 0;
284 			return dma_desc;
285 		}
286 	}
287 
288 	spin_unlock_irqrestore(&tdc->lock, flags);
289 
290 	/* Allocate DMA desc */
291 	dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
292 	if (!dma_desc)
293 		return NULL;
294 
295 	dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
296 	dma_desc->txd.tx_submit = tegra_dma_tx_submit;
297 	dma_desc->txd.flags = 0;
298 
299 	return dma_desc;
300 }
301 
302 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
303 			       struct tegra_dma_desc *dma_desc)
304 {
305 	unsigned long flags;
306 
307 	spin_lock_irqsave(&tdc->lock, flags);
308 	if (!list_empty(&dma_desc->tx_list))
309 		list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
310 	list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
311 	spin_unlock_irqrestore(&tdc->lock, flags);
312 }
313 
314 static struct tegra_dma_sg_req *
315 tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
316 {
317 	struct tegra_dma_sg_req *sg_req;
318 	unsigned long flags;
319 
320 	spin_lock_irqsave(&tdc->lock, flags);
321 	if (!list_empty(&tdc->free_sg_req)) {
322 		sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
323 					  node);
324 		list_del(&sg_req->node);
325 		spin_unlock_irqrestore(&tdc->lock, flags);
326 		return sg_req;
327 	}
328 	spin_unlock_irqrestore(&tdc->lock, flags);
329 
330 	sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
331 
332 	return sg_req;
333 }
334 
335 static int tegra_dma_slave_config(struct dma_chan *dc,
336 				  struct dma_slave_config *sconfig)
337 {
338 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
339 
340 	if (!list_empty(&tdc->pending_sg_req)) {
341 		dev_err(tdc2dev(tdc), "Configuration not allowed\n");
342 		return -EBUSY;
343 	}
344 
345 	memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
346 	if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
347 	    sconfig->device_fc) {
348 		if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
349 			return -EINVAL;
350 		tdc->slave_id = sconfig->slave_id;
351 	}
352 	tdc->config_init = true;
353 
354 	return 0;
355 }
356 
357 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
358 				   bool wait_for_burst_complete)
359 {
360 	struct tegra_dma *tdma = tdc->tdma;
361 
362 	spin_lock(&tdma->global_lock);
363 
364 	if (tdc->tdma->global_pause_count == 0) {
365 		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
366 		if (wait_for_burst_complete)
367 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
368 	}
369 
370 	tdc->tdma->global_pause_count++;
371 
372 	spin_unlock(&tdma->global_lock);
373 }
374 
375 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
376 {
377 	struct tegra_dma *tdma = tdc->tdma;
378 
379 	spin_lock(&tdma->global_lock);
380 
381 	if (WARN_ON(tdc->tdma->global_pause_count == 0))
382 		goto out;
383 
384 	if (--tdc->tdma->global_pause_count == 0)
385 		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
386 			   TEGRA_APBDMA_GENERAL_ENABLE);
387 
388 out:
389 	spin_unlock(&tdma->global_lock);
390 }
391 
392 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
393 			    bool wait_for_burst_complete)
394 {
395 	struct tegra_dma *tdma = tdc->tdma;
396 
397 	if (tdma->chip_data->support_channel_pause) {
398 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
399 			  TEGRA_APBDMA_CHAN_CSRE_PAUSE);
400 		if (wait_for_burst_complete)
401 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
402 	} else {
403 		tegra_dma_global_pause(tdc, wait_for_burst_complete);
404 	}
405 }
406 
407 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
408 {
409 	struct tegra_dma *tdma = tdc->tdma;
410 
411 	if (tdma->chip_data->support_channel_pause)
412 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
413 	else
414 		tegra_dma_global_resume(tdc);
415 }
416 
417 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
418 {
419 	u32 csr, status;
420 
421 	/* Disable interrupts */
422 	csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
423 	csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
424 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
425 
426 	/* Disable DMA */
427 	csr &= ~TEGRA_APBDMA_CSR_ENB;
428 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
429 
430 	/* Clear interrupt status if it is there */
431 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
432 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
433 		dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
434 		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
435 	}
436 	tdc->busy = false;
437 }
438 
439 static void tegra_dma_start(struct tegra_dma_channel *tdc,
440 			    struct tegra_dma_sg_req *sg_req)
441 {
442 	struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
443 
444 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
445 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
446 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
447 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
448 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
449 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
450 		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
451 
452 	/* Start DMA */
453 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
454 		  ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
455 }
456 
457 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
458 					 struct tegra_dma_sg_req *nsg_req)
459 {
460 	unsigned long status;
461 
462 	/*
463 	 * The DMA controller reloads the new configuration for next transfer
464 	 * after last burst of current transfer completes.
465 	 * If there is no IEC status then this makes sure that last burst
466 	 * has not be completed. There may be case that last burst is on
467 	 * flight and so it can complete but because DMA is paused, it
468 	 * will not generates interrupt as well as not reload the new
469 	 * configuration.
470 	 * If there is already IEC status then interrupt handler need to
471 	 * load new configuration.
472 	 */
473 	tegra_dma_pause(tdc, false);
474 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
475 
476 	/*
477 	 * If interrupt is pending then do nothing as the ISR will handle
478 	 * the programing for new request.
479 	 */
480 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
481 		dev_err(tdc2dev(tdc),
482 			"Skipping new configuration as interrupt is pending\n");
483 		tegra_dma_resume(tdc);
484 		return;
485 	}
486 
487 	/* Safe to program new configuration */
488 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
489 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
490 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
491 		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
492 			  nsg_req->ch_regs.wcount);
493 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
494 		  nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
495 	nsg_req->configured = true;
496 	nsg_req->words_xferred = 0;
497 
498 	tegra_dma_resume(tdc);
499 }
500 
501 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
502 {
503 	struct tegra_dma_sg_req *sg_req;
504 
505 	sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
506 	tegra_dma_start(tdc, sg_req);
507 	sg_req->configured = true;
508 	sg_req->words_xferred = 0;
509 	tdc->busy = true;
510 }
511 
512 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
513 {
514 	struct tegra_dma_sg_req *hsgreq, *hnsgreq;
515 
516 	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
517 	if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
518 		hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
519 					   node);
520 		tegra_dma_configure_for_next(tdc, hnsgreq);
521 	}
522 }
523 
524 static inline unsigned int
525 get_current_xferred_count(struct tegra_dma_channel *tdc,
526 			  struct tegra_dma_sg_req *sg_req,
527 			  unsigned long status)
528 {
529 	return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
530 }
531 
532 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
533 {
534 	struct tegra_dma_desc *dma_desc;
535 	struct tegra_dma_sg_req *sgreq;
536 
537 	while (!list_empty(&tdc->pending_sg_req)) {
538 		sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
539 					 node);
540 		list_move_tail(&sgreq->node, &tdc->free_sg_req);
541 		if (sgreq->last_sg) {
542 			dma_desc = sgreq->dma_desc;
543 			dma_desc->dma_status = DMA_ERROR;
544 			list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
545 
546 			/* Add in cb list if it is not there. */
547 			if (!dma_desc->cb_count)
548 				list_add_tail(&dma_desc->cb_node,
549 					      &tdc->cb_desc);
550 			dma_desc->cb_count++;
551 		}
552 	}
553 	tdc->isr_handler = NULL;
554 }
555 
556 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
557 					   bool to_terminate)
558 {
559 	struct tegra_dma_sg_req *hsgreq;
560 
561 	/*
562 	 * Check that head req on list should be in flight.
563 	 * If it is not in flight then abort transfer as
564 	 * looping of transfer can not continue.
565 	 */
566 	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
567 	if (!hsgreq->configured) {
568 		tegra_dma_stop(tdc);
569 		pm_runtime_put(tdc->tdma->dev);
570 		dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
571 		tegra_dma_abort_all(tdc);
572 		return false;
573 	}
574 
575 	/* Configure next request */
576 	if (!to_terminate)
577 		tdc_configure_next_head_desc(tdc);
578 
579 	return true;
580 }
581 
582 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
583 				 bool to_terminate)
584 {
585 	struct tegra_dma_desc *dma_desc;
586 	struct tegra_dma_sg_req *sgreq;
587 
588 	tdc->busy = false;
589 	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
590 	dma_desc = sgreq->dma_desc;
591 	dma_desc->bytes_transferred += sgreq->req_len;
592 
593 	list_del(&sgreq->node);
594 	if (sgreq->last_sg) {
595 		dma_desc->dma_status = DMA_COMPLETE;
596 		dma_cookie_complete(&dma_desc->txd);
597 		if (!dma_desc->cb_count)
598 			list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
599 		dma_desc->cb_count++;
600 		list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
601 	}
602 	list_add_tail(&sgreq->node, &tdc->free_sg_req);
603 
604 	/* Do not start DMA if it is going to be terminate */
605 	if (to_terminate)
606 		return;
607 
608 	if (list_empty(&tdc->pending_sg_req)) {
609 		pm_runtime_put(tdc->tdma->dev);
610 		return;
611 	}
612 
613 	tdc_start_head_req(tdc);
614 }
615 
616 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
617 					    bool to_terminate)
618 {
619 	struct tegra_dma_desc *dma_desc;
620 	struct tegra_dma_sg_req *sgreq;
621 	bool st;
622 
623 	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
624 	dma_desc = sgreq->dma_desc;
625 	/* if we dma for long enough the transfer count will wrap */
626 	dma_desc->bytes_transferred =
627 		(dma_desc->bytes_transferred + sgreq->req_len) %
628 		dma_desc->bytes_requested;
629 
630 	/* Callback need to be call */
631 	if (!dma_desc->cb_count)
632 		list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
633 	dma_desc->cb_count++;
634 
635 	sgreq->words_xferred = 0;
636 
637 	/* If not last req then put at end of pending list */
638 	if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
639 		list_move_tail(&sgreq->node, &tdc->pending_sg_req);
640 		sgreq->configured = false;
641 		st = handle_continuous_head_request(tdc, to_terminate);
642 		if (!st)
643 			dma_desc->dma_status = DMA_ERROR;
644 	}
645 }
646 
647 static void tegra_dma_tasklet(unsigned long data)
648 {
649 	struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
650 	struct dmaengine_desc_callback cb;
651 	struct tegra_dma_desc *dma_desc;
652 	unsigned int cb_count;
653 	unsigned long flags;
654 
655 	spin_lock_irqsave(&tdc->lock, flags);
656 	while (!list_empty(&tdc->cb_desc)) {
657 		dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
658 					    cb_node);
659 		list_del(&dma_desc->cb_node);
660 		dmaengine_desc_get_callback(&dma_desc->txd, &cb);
661 		cb_count = dma_desc->cb_count;
662 		dma_desc->cb_count = 0;
663 		trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
664 					    cb.callback);
665 		spin_unlock_irqrestore(&tdc->lock, flags);
666 		while (cb_count--)
667 			dmaengine_desc_callback_invoke(&cb, NULL);
668 		spin_lock_irqsave(&tdc->lock, flags);
669 	}
670 	spin_unlock_irqrestore(&tdc->lock, flags);
671 }
672 
673 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
674 {
675 	struct tegra_dma_channel *tdc = dev_id;
676 	u32 status;
677 
678 	spin_lock(&tdc->lock);
679 
680 	trace_tegra_dma_isr(&tdc->dma_chan, irq);
681 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
682 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
683 		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
684 		tdc->isr_handler(tdc, false);
685 		tasklet_schedule(&tdc->tasklet);
686 		wake_up_all(&tdc->wq);
687 		spin_unlock(&tdc->lock);
688 		return IRQ_HANDLED;
689 	}
690 
691 	spin_unlock(&tdc->lock);
692 	dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
693 		 status);
694 
695 	return IRQ_NONE;
696 }
697 
698 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
699 {
700 	struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
701 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
702 	unsigned long flags;
703 	dma_cookie_t cookie;
704 
705 	spin_lock_irqsave(&tdc->lock, flags);
706 	dma_desc->dma_status = DMA_IN_PROGRESS;
707 	cookie = dma_cookie_assign(&dma_desc->txd);
708 	list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
709 	spin_unlock_irqrestore(&tdc->lock, flags);
710 
711 	return cookie;
712 }
713 
714 static void tegra_dma_issue_pending(struct dma_chan *dc)
715 {
716 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
717 	unsigned long flags;
718 	int err;
719 
720 	spin_lock_irqsave(&tdc->lock, flags);
721 	if (list_empty(&tdc->pending_sg_req)) {
722 		dev_err(tdc2dev(tdc), "No DMA request\n");
723 		goto end;
724 	}
725 	if (!tdc->busy) {
726 		err = pm_runtime_get_sync(tdc->tdma->dev);
727 		if (err < 0) {
728 			dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
729 			goto end;
730 		}
731 
732 		tdc_start_head_req(tdc);
733 
734 		/* Continuous single mode: Configure next req */
735 		if (tdc->cyclic) {
736 			/*
737 			 * Wait for 1 burst time for configure DMA for
738 			 * next transfer.
739 			 */
740 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
741 			tdc_configure_next_head_desc(tdc);
742 		}
743 	}
744 end:
745 	spin_unlock_irqrestore(&tdc->lock, flags);
746 }
747 
748 static int tegra_dma_terminate_all(struct dma_chan *dc)
749 {
750 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
751 	struct tegra_dma_desc *dma_desc;
752 	struct tegra_dma_sg_req *sgreq;
753 	unsigned long flags;
754 	u32 status, wcount;
755 	bool was_busy;
756 
757 	spin_lock_irqsave(&tdc->lock, flags);
758 
759 	if (!tdc->busy)
760 		goto skip_dma_stop;
761 
762 	/* Pause DMA before checking the queue status */
763 	tegra_dma_pause(tdc, true);
764 
765 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
766 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
767 		dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
768 		tdc->isr_handler(tdc, true);
769 		status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
770 	}
771 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
772 		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
773 	else
774 		wcount = status;
775 
776 	was_busy = tdc->busy;
777 	tegra_dma_stop(tdc);
778 
779 	if (!list_empty(&tdc->pending_sg_req) && was_busy) {
780 		sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
781 					 node);
782 		sgreq->dma_desc->bytes_transferred +=
783 				get_current_xferred_count(tdc, sgreq, wcount);
784 	}
785 	tegra_dma_resume(tdc);
786 
787 	pm_runtime_put(tdc->tdma->dev);
788 	wake_up_all(&tdc->wq);
789 
790 skip_dma_stop:
791 	tegra_dma_abort_all(tdc);
792 
793 	while (!list_empty(&tdc->cb_desc)) {
794 		dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
795 					    cb_node);
796 		list_del(&dma_desc->cb_node);
797 		dma_desc->cb_count = 0;
798 	}
799 	spin_unlock_irqrestore(&tdc->lock, flags);
800 
801 	return 0;
802 }
803 
804 static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
805 {
806 	unsigned long flags;
807 	u32 status;
808 
809 	spin_lock_irqsave(&tdc->lock, flags);
810 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
811 	spin_unlock_irqrestore(&tdc->lock, flags);
812 
813 	return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
814 }
815 
816 static void tegra_dma_synchronize(struct dma_chan *dc)
817 {
818 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
819 	int err;
820 
821 	err = pm_runtime_get_sync(tdc->tdma->dev);
822 	if (err < 0) {
823 		dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
824 		return;
825 	}
826 
827 	/*
828 	 * CPU, which handles interrupt, could be busy in
829 	 * uninterruptible state, in this case sibling CPU
830 	 * should wait until interrupt is handled.
831 	 */
832 	wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
833 
834 	tasklet_kill(&tdc->tasklet);
835 
836 	pm_runtime_put(tdc->tdma->dev);
837 }
838 
839 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
840 					       struct tegra_dma_sg_req *sg_req)
841 {
842 	u32 status, wcount = 0;
843 
844 	if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
845 		return 0;
846 
847 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
848 		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
849 
850 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
851 
852 	if (!tdc->tdma->chip_data->support_separate_wcount_reg)
853 		wcount = status;
854 
855 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
856 		return sg_req->req_len;
857 
858 	wcount = get_current_xferred_count(tdc, sg_req, wcount);
859 
860 	if (!wcount) {
861 		/*
862 		 * If wcount wasn't ever polled for this SG before, then
863 		 * simply assume that transfer hasn't started yet.
864 		 *
865 		 * Otherwise it's the end of the transfer.
866 		 *
867 		 * The alternative would be to poll the status register
868 		 * until EOC bit is set or wcount goes UP. That's so
869 		 * because EOC bit is getting set only after the last
870 		 * burst's completion and counter is less than the actual
871 		 * transfer size by 4 bytes. The counter value wraps around
872 		 * in a cyclic mode before EOC is set(!), so we can't easily
873 		 * distinguish start of transfer from its end.
874 		 */
875 		if (sg_req->words_xferred)
876 			wcount = sg_req->req_len - 4;
877 
878 	} else if (wcount < sg_req->words_xferred) {
879 		/*
880 		 * This case will never happen for a non-cyclic transfer.
881 		 *
882 		 * For a cyclic transfer, although it is possible for the
883 		 * next transfer to have already started (resetting the word
884 		 * count), this case should still not happen because we should
885 		 * have detected that the EOC bit is set and hence the transfer
886 		 * was completed.
887 		 */
888 		WARN_ON_ONCE(1);
889 
890 		wcount = sg_req->req_len - 4;
891 	} else {
892 		sg_req->words_xferred = wcount;
893 	}
894 
895 	return wcount;
896 }
897 
898 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
899 					   dma_cookie_t cookie,
900 					   struct dma_tx_state *txstate)
901 {
902 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
903 	struct tegra_dma_desc *dma_desc;
904 	struct tegra_dma_sg_req *sg_req;
905 	enum dma_status ret;
906 	unsigned long flags;
907 	unsigned int residual;
908 	unsigned int bytes = 0;
909 
910 	ret = dma_cookie_status(dc, cookie, txstate);
911 	if (ret == DMA_COMPLETE)
912 		return ret;
913 
914 	spin_lock_irqsave(&tdc->lock, flags);
915 
916 	/* Check on wait_ack desc status */
917 	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
918 		if (dma_desc->txd.cookie == cookie) {
919 			ret = dma_desc->dma_status;
920 			goto found;
921 		}
922 	}
923 
924 	/* Check in pending list */
925 	list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
926 		dma_desc = sg_req->dma_desc;
927 		if (dma_desc->txd.cookie == cookie) {
928 			bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
929 			ret = dma_desc->dma_status;
930 			goto found;
931 		}
932 	}
933 
934 	dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
935 	dma_desc = NULL;
936 
937 found:
938 	if (dma_desc && txstate) {
939 		residual = dma_desc->bytes_requested -
940 			   ((dma_desc->bytes_transferred + bytes) %
941 			    dma_desc->bytes_requested);
942 		dma_set_residue(txstate, residual);
943 	}
944 
945 	trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
946 	spin_unlock_irqrestore(&tdc->lock, flags);
947 
948 	return ret;
949 }
950 
951 static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
952 					 enum dma_slave_buswidth slave_bw)
953 {
954 	switch (slave_bw) {
955 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
956 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
957 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
958 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
959 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
960 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
961 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
962 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
963 	default:
964 		dev_warn(tdc2dev(tdc),
965 			 "slave bw is not supported, using 32bits\n");
966 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
967 	}
968 }
969 
970 static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
971 					  u32 burst_size,
972 					  enum dma_slave_buswidth slave_bw,
973 					  u32 len)
974 {
975 	unsigned int burst_byte, burst_ahb_width;
976 
977 	/*
978 	 * burst_size from client is in terms of the bus_width.
979 	 * convert them into AHB memory width which is 4 byte.
980 	 */
981 	burst_byte = burst_size * slave_bw;
982 	burst_ahb_width = burst_byte / 4;
983 
984 	/* If burst size is 0 then calculate the burst size based on length */
985 	if (!burst_ahb_width) {
986 		if (len & 0xF)
987 			return TEGRA_APBDMA_AHBSEQ_BURST_1;
988 		else if ((len >> 4) & 0x1)
989 			return TEGRA_APBDMA_AHBSEQ_BURST_4;
990 		else
991 			return TEGRA_APBDMA_AHBSEQ_BURST_8;
992 	}
993 	if (burst_ahb_width < 4)
994 		return TEGRA_APBDMA_AHBSEQ_BURST_1;
995 	else if (burst_ahb_width < 8)
996 		return TEGRA_APBDMA_AHBSEQ_BURST_4;
997 	else
998 		return TEGRA_APBDMA_AHBSEQ_BURST_8;
999 }
1000 
1001 static int get_transfer_param(struct tegra_dma_channel *tdc,
1002 			      enum dma_transfer_direction direction,
1003 			      u32 *apb_addr,
1004 			      u32 *apb_seq,
1005 			      u32 *csr,
1006 			      unsigned int *burst_size,
1007 			      enum dma_slave_buswidth *slave_bw)
1008 {
1009 	switch (direction) {
1010 	case DMA_MEM_TO_DEV:
1011 		*apb_addr = tdc->dma_sconfig.dst_addr;
1012 		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
1013 		*burst_size = tdc->dma_sconfig.dst_maxburst;
1014 		*slave_bw = tdc->dma_sconfig.dst_addr_width;
1015 		*csr = TEGRA_APBDMA_CSR_DIR;
1016 		return 0;
1017 
1018 	case DMA_DEV_TO_MEM:
1019 		*apb_addr = tdc->dma_sconfig.src_addr;
1020 		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
1021 		*burst_size = tdc->dma_sconfig.src_maxburst;
1022 		*slave_bw = tdc->dma_sconfig.src_addr_width;
1023 		*csr = 0;
1024 		return 0;
1025 
1026 	default:
1027 		dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
1028 		break;
1029 	}
1030 
1031 	return -EINVAL;
1032 }
1033 
1034 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
1035 				  struct tegra_dma_channel_regs *ch_regs,
1036 				  u32 len)
1037 {
1038 	u32 len_field = (len - 4) & 0xFFFC;
1039 
1040 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
1041 		ch_regs->wcount = len_field;
1042 	else
1043 		ch_regs->csr |= len_field;
1044 }
1045 
1046 static struct dma_async_tx_descriptor *
1047 tegra_dma_prep_slave_sg(struct dma_chan *dc,
1048 			struct scatterlist *sgl,
1049 			unsigned int sg_len,
1050 			enum dma_transfer_direction direction,
1051 			unsigned long flags,
1052 			void *context)
1053 {
1054 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1055 	struct tegra_dma_sg_req *sg_req = NULL;
1056 	u32 csr, ahb_seq, apb_ptr, apb_seq;
1057 	enum dma_slave_buswidth slave_bw;
1058 	struct tegra_dma_desc *dma_desc;
1059 	struct list_head req_list;
1060 	struct scatterlist *sg;
1061 	unsigned int burst_size;
1062 	unsigned int i;
1063 
1064 	if (!tdc->config_init) {
1065 		dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1066 		return NULL;
1067 	}
1068 	if (sg_len < 1) {
1069 		dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1070 		return NULL;
1071 	}
1072 
1073 	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1074 			       &burst_size, &slave_bw) < 0)
1075 		return NULL;
1076 
1077 	INIT_LIST_HEAD(&req_list);
1078 
1079 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1080 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1081 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1082 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1083 
1084 	csr |= TEGRA_APBDMA_CSR_ONCE;
1085 
1086 	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1087 		csr |= TEGRA_APBDMA_CSR_FLOW;
1088 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1089 	}
1090 
1091 	if (flags & DMA_PREP_INTERRUPT) {
1092 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
1093 	} else {
1094 		WARN_ON_ONCE(1);
1095 		return NULL;
1096 	}
1097 
1098 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1099 
1100 	dma_desc = tegra_dma_desc_get(tdc);
1101 	if (!dma_desc) {
1102 		dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
1103 		return NULL;
1104 	}
1105 	INIT_LIST_HEAD(&dma_desc->tx_list);
1106 	INIT_LIST_HEAD(&dma_desc->cb_node);
1107 	dma_desc->cb_count = 0;
1108 	dma_desc->bytes_requested = 0;
1109 	dma_desc->bytes_transferred = 0;
1110 	dma_desc->dma_status = DMA_IN_PROGRESS;
1111 
1112 	/* Make transfer requests */
1113 	for_each_sg(sgl, sg, sg_len, i) {
1114 		u32 len, mem;
1115 
1116 		mem = sg_dma_address(sg);
1117 		len = sg_dma_len(sg);
1118 
1119 		if ((len & 3) || (mem & 3) ||
1120 		    len > tdc->tdma->chip_data->max_dma_count) {
1121 			dev_err(tdc2dev(tdc),
1122 				"DMA length/memory address is not supported\n");
1123 			tegra_dma_desc_put(tdc, dma_desc);
1124 			return NULL;
1125 		}
1126 
1127 		sg_req = tegra_dma_sg_req_get(tdc);
1128 		if (!sg_req) {
1129 			dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1130 			tegra_dma_desc_put(tdc, dma_desc);
1131 			return NULL;
1132 		}
1133 
1134 		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1135 		dma_desc->bytes_requested += len;
1136 
1137 		sg_req->ch_regs.apb_ptr = apb_ptr;
1138 		sg_req->ch_regs.ahb_ptr = mem;
1139 		sg_req->ch_regs.csr = csr;
1140 		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1141 		sg_req->ch_regs.apb_seq = apb_seq;
1142 		sg_req->ch_regs.ahb_seq = ahb_seq;
1143 		sg_req->configured = false;
1144 		sg_req->last_sg = false;
1145 		sg_req->dma_desc = dma_desc;
1146 		sg_req->req_len = len;
1147 
1148 		list_add_tail(&sg_req->node, &dma_desc->tx_list);
1149 	}
1150 	sg_req->last_sg = true;
1151 	if (flags & DMA_CTRL_ACK)
1152 		dma_desc->txd.flags = DMA_CTRL_ACK;
1153 
1154 	/*
1155 	 * Make sure that mode should not be conflicting with currently
1156 	 * configured mode.
1157 	 */
1158 	if (!tdc->isr_handler) {
1159 		tdc->isr_handler = handle_once_dma_done;
1160 		tdc->cyclic = false;
1161 	} else {
1162 		if (tdc->cyclic) {
1163 			dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1164 			tegra_dma_desc_put(tdc, dma_desc);
1165 			return NULL;
1166 		}
1167 	}
1168 
1169 	return &dma_desc->txd;
1170 }
1171 
1172 static struct dma_async_tx_descriptor *
1173 tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
1174 			  size_t buf_len,
1175 			  size_t period_len,
1176 			  enum dma_transfer_direction direction,
1177 			  unsigned long flags)
1178 {
1179 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1180 	struct tegra_dma_sg_req *sg_req = NULL;
1181 	u32 csr, ahb_seq, apb_ptr, apb_seq;
1182 	enum dma_slave_buswidth slave_bw;
1183 	struct tegra_dma_desc *dma_desc;
1184 	dma_addr_t mem = buf_addr;
1185 	unsigned int burst_size;
1186 	size_t len, remain_len;
1187 
1188 	if (!buf_len || !period_len) {
1189 		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1190 		return NULL;
1191 	}
1192 
1193 	if (!tdc->config_init) {
1194 		dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1195 		return NULL;
1196 	}
1197 
1198 	/*
1199 	 * We allow to take more number of requests till DMA is
1200 	 * not started. The driver will loop over all requests.
1201 	 * Once DMA is started then new requests can be queued only after
1202 	 * terminating the DMA.
1203 	 */
1204 	if (tdc->busy) {
1205 		dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
1206 		return NULL;
1207 	}
1208 
1209 	/*
1210 	 * We only support cycle transfer when buf_len is multiple of
1211 	 * period_len.
1212 	 */
1213 	if (buf_len % period_len) {
1214 		dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1215 		return NULL;
1216 	}
1217 
1218 	len = period_len;
1219 	if ((len & 3) || (buf_addr & 3) ||
1220 	    len > tdc->tdma->chip_data->max_dma_count) {
1221 		dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1222 		return NULL;
1223 	}
1224 
1225 	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1226 			       &burst_size, &slave_bw) < 0)
1227 		return NULL;
1228 
1229 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1230 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1231 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1232 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1233 
1234 	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1235 		csr |= TEGRA_APBDMA_CSR_FLOW;
1236 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1237 	}
1238 
1239 	if (flags & DMA_PREP_INTERRUPT) {
1240 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
1241 	} else {
1242 		WARN_ON_ONCE(1);
1243 		return NULL;
1244 	}
1245 
1246 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1247 
1248 	dma_desc = tegra_dma_desc_get(tdc);
1249 	if (!dma_desc) {
1250 		dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1251 		return NULL;
1252 	}
1253 
1254 	INIT_LIST_HEAD(&dma_desc->tx_list);
1255 	INIT_LIST_HEAD(&dma_desc->cb_node);
1256 	dma_desc->cb_count = 0;
1257 
1258 	dma_desc->bytes_transferred = 0;
1259 	dma_desc->bytes_requested = buf_len;
1260 	remain_len = buf_len;
1261 
1262 	/* Split transfer equal to period size */
1263 	while (remain_len) {
1264 		sg_req = tegra_dma_sg_req_get(tdc);
1265 		if (!sg_req) {
1266 			dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1267 			tegra_dma_desc_put(tdc, dma_desc);
1268 			return NULL;
1269 		}
1270 
1271 		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1272 		sg_req->ch_regs.apb_ptr = apb_ptr;
1273 		sg_req->ch_regs.ahb_ptr = mem;
1274 		sg_req->ch_regs.csr = csr;
1275 		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1276 		sg_req->ch_regs.apb_seq = apb_seq;
1277 		sg_req->ch_regs.ahb_seq = ahb_seq;
1278 		sg_req->configured = false;
1279 		sg_req->last_sg = false;
1280 		sg_req->dma_desc = dma_desc;
1281 		sg_req->req_len = len;
1282 
1283 		list_add_tail(&sg_req->node, &dma_desc->tx_list);
1284 		remain_len -= len;
1285 		mem += len;
1286 	}
1287 	sg_req->last_sg = true;
1288 	if (flags & DMA_CTRL_ACK)
1289 		dma_desc->txd.flags = DMA_CTRL_ACK;
1290 
1291 	/*
1292 	 * Make sure that mode should not be conflicting with currently
1293 	 * configured mode.
1294 	 */
1295 	if (!tdc->isr_handler) {
1296 		tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1297 		tdc->cyclic = true;
1298 	} else {
1299 		if (!tdc->cyclic) {
1300 			dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1301 			tegra_dma_desc_put(tdc, dma_desc);
1302 			return NULL;
1303 		}
1304 	}
1305 
1306 	return &dma_desc->txd;
1307 }
1308 
1309 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1310 {
1311 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1312 
1313 	dma_cookie_init(&tdc->dma_chan);
1314 
1315 	return 0;
1316 }
1317 
1318 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1319 {
1320 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1321 	struct tegra_dma_desc *dma_desc;
1322 	struct tegra_dma_sg_req *sg_req;
1323 	struct list_head dma_desc_list;
1324 	struct list_head sg_req_list;
1325 
1326 	INIT_LIST_HEAD(&dma_desc_list);
1327 	INIT_LIST_HEAD(&sg_req_list);
1328 
1329 	dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1330 
1331 	tegra_dma_terminate_all(dc);
1332 	tasklet_kill(&tdc->tasklet);
1333 
1334 	list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1335 	list_splice_init(&tdc->free_sg_req, &sg_req_list);
1336 	list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1337 	INIT_LIST_HEAD(&tdc->cb_desc);
1338 	tdc->config_init = false;
1339 	tdc->isr_handler = NULL;
1340 
1341 	while (!list_empty(&dma_desc_list)) {
1342 		dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
1343 					    node);
1344 		list_del(&dma_desc->node);
1345 		kfree(dma_desc);
1346 	}
1347 
1348 	while (!list_empty(&sg_req_list)) {
1349 		sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1350 		list_del(&sg_req->node);
1351 		kfree(sg_req);
1352 	}
1353 
1354 	tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1355 }
1356 
1357 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1358 					   struct of_dma *ofdma)
1359 {
1360 	struct tegra_dma *tdma = ofdma->of_dma_data;
1361 	struct tegra_dma_channel *tdc;
1362 	struct dma_chan *chan;
1363 
1364 	if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1365 		dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1366 		return NULL;
1367 	}
1368 
1369 	chan = dma_get_any_slave_channel(&tdma->dma_dev);
1370 	if (!chan)
1371 		return NULL;
1372 
1373 	tdc = to_tegra_dma_chan(chan);
1374 	tdc->slave_id = dma_spec->args[0];
1375 
1376 	return chan;
1377 }
1378 
1379 /* Tegra20 specific DMA controller information */
1380 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1381 	.nr_channels		= 16,
1382 	.channel_reg_size	= 0x20,
1383 	.max_dma_count		= 1024UL * 64,
1384 	.support_channel_pause	= false,
1385 	.support_separate_wcount_reg = false,
1386 };
1387 
1388 /* Tegra30 specific DMA controller information */
1389 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1390 	.nr_channels		= 32,
1391 	.channel_reg_size	= 0x20,
1392 	.max_dma_count		= 1024UL * 64,
1393 	.support_channel_pause	= false,
1394 	.support_separate_wcount_reg = false,
1395 };
1396 
1397 /* Tegra114 specific DMA controller information */
1398 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1399 	.nr_channels		= 32,
1400 	.channel_reg_size	= 0x20,
1401 	.max_dma_count		= 1024UL * 64,
1402 	.support_channel_pause	= true,
1403 	.support_separate_wcount_reg = false,
1404 };
1405 
1406 /* Tegra148 specific DMA controller information */
1407 static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1408 	.nr_channels		= 32,
1409 	.channel_reg_size	= 0x40,
1410 	.max_dma_count		= 1024UL * 64,
1411 	.support_channel_pause	= true,
1412 	.support_separate_wcount_reg = true,
1413 };
1414 
1415 static int tegra_dma_init_hw(struct tegra_dma *tdma)
1416 {
1417 	int err;
1418 
1419 	err = reset_control_assert(tdma->rst);
1420 	if (err) {
1421 		dev_err(tdma->dev, "failed to assert reset: %d\n", err);
1422 		return err;
1423 	}
1424 
1425 	err = clk_enable(tdma->dma_clk);
1426 	if (err) {
1427 		dev_err(tdma->dev, "failed to enable clk: %d\n", err);
1428 		return err;
1429 	}
1430 
1431 	/* reset DMA controller */
1432 	udelay(2);
1433 	reset_control_deassert(tdma->rst);
1434 
1435 	/* enable global DMA registers */
1436 	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1437 	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1438 	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
1439 
1440 	clk_disable(tdma->dma_clk);
1441 
1442 	return 0;
1443 }
1444 
1445 static int tegra_dma_probe(struct platform_device *pdev)
1446 {
1447 	const struct tegra_dma_chip_data *cdata;
1448 	struct tegra_dma *tdma;
1449 	unsigned int i;
1450 	size_t size;
1451 	int ret;
1452 
1453 	cdata = of_device_get_match_data(&pdev->dev);
1454 	size = struct_size(tdma, channels, cdata->nr_channels);
1455 
1456 	tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1457 	if (!tdma)
1458 		return -ENOMEM;
1459 
1460 	tdma->dev = &pdev->dev;
1461 	tdma->chip_data = cdata;
1462 	platform_set_drvdata(pdev, tdma);
1463 
1464 	tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
1465 	if (IS_ERR(tdma->base_addr))
1466 		return PTR_ERR(tdma->base_addr);
1467 
1468 	tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1469 	if (IS_ERR(tdma->dma_clk)) {
1470 		dev_err(&pdev->dev, "Error: Missing controller clock\n");
1471 		return PTR_ERR(tdma->dma_clk);
1472 	}
1473 
1474 	tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1475 	if (IS_ERR(tdma->rst)) {
1476 		dev_err(&pdev->dev, "Error: Missing reset\n");
1477 		return PTR_ERR(tdma->rst);
1478 	}
1479 
1480 	spin_lock_init(&tdma->global_lock);
1481 
1482 	ret = clk_prepare(tdma->dma_clk);
1483 	if (ret)
1484 		return ret;
1485 
1486 	ret = tegra_dma_init_hw(tdma);
1487 	if (ret)
1488 		goto err_clk_unprepare;
1489 
1490 	pm_runtime_irq_safe(&pdev->dev);
1491 	pm_runtime_enable(&pdev->dev);
1492 
1493 	INIT_LIST_HEAD(&tdma->dma_dev.channels);
1494 	for (i = 0; i < cdata->nr_channels; i++) {
1495 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1496 		int irq;
1497 
1498 		tdc->chan_addr = tdma->base_addr +
1499 				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1500 				 (i * cdata->channel_reg_size);
1501 
1502 		irq = platform_get_irq(pdev, i);
1503 		if (irq < 0) {
1504 			ret = irq;
1505 			goto err_pm_disable;
1506 		}
1507 
1508 		snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1509 		ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
1510 				       tdc->name, tdc);
1511 		if (ret) {
1512 			dev_err(&pdev->dev,
1513 				"request_irq failed with err %d channel %d\n",
1514 				ret, i);
1515 			goto err_pm_disable;
1516 		}
1517 
1518 		tdc->dma_chan.device = &tdma->dma_dev;
1519 		dma_cookie_init(&tdc->dma_chan);
1520 		list_add_tail(&tdc->dma_chan.device_node,
1521 			      &tdma->dma_dev.channels);
1522 		tdc->tdma = tdma;
1523 		tdc->id = i;
1524 		tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1525 
1526 		tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1527 			     (unsigned long)tdc);
1528 		spin_lock_init(&tdc->lock);
1529 		init_waitqueue_head(&tdc->wq);
1530 
1531 		INIT_LIST_HEAD(&tdc->pending_sg_req);
1532 		INIT_LIST_HEAD(&tdc->free_sg_req);
1533 		INIT_LIST_HEAD(&tdc->free_dma_desc);
1534 		INIT_LIST_HEAD(&tdc->cb_desc);
1535 	}
1536 
1537 	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1538 	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1539 	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1540 
1541 	tdma->global_pause_count = 0;
1542 	tdma->dma_dev.dev = &pdev->dev;
1543 	tdma->dma_dev.device_alloc_chan_resources =
1544 					tegra_dma_alloc_chan_resources;
1545 	tdma->dma_dev.device_free_chan_resources =
1546 					tegra_dma_free_chan_resources;
1547 	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1548 	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1549 	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1550 		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1551 		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1552 		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1553 	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1554 		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1555 		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1556 		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1557 	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1558 	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1559 	tdma->dma_dev.device_config = tegra_dma_slave_config;
1560 	tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1561 	tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
1562 	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1563 	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1564 
1565 	ret = dma_async_device_register(&tdma->dma_dev);
1566 	if (ret < 0) {
1567 		dev_err(&pdev->dev,
1568 			"Tegra20 APB DMA driver registration failed %d\n", ret);
1569 		goto err_pm_disable;
1570 	}
1571 
1572 	ret = of_dma_controller_register(pdev->dev.of_node,
1573 					 tegra_dma_of_xlate, tdma);
1574 	if (ret < 0) {
1575 		dev_err(&pdev->dev,
1576 			"Tegra20 APB DMA OF registration failed %d\n", ret);
1577 		goto err_unregister_dma_dev;
1578 	}
1579 
1580 	dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n",
1581 		 cdata->nr_channels);
1582 
1583 	return 0;
1584 
1585 err_unregister_dma_dev:
1586 	dma_async_device_unregister(&tdma->dma_dev);
1587 
1588 err_pm_disable:
1589 	pm_runtime_disable(&pdev->dev);
1590 
1591 err_clk_unprepare:
1592 	clk_unprepare(tdma->dma_clk);
1593 
1594 	return ret;
1595 }
1596 
1597 static int tegra_dma_remove(struct platform_device *pdev)
1598 {
1599 	struct tegra_dma *tdma = platform_get_drvdata(pdev);
1600 
1601 	of_dma_controller_free(pdev->dev.of_node);
1602 	dma_async_device_unregister(&tdma->dma_dev);
1603 	pm_runtime_disable(&pdev->dev);
1604 	clk_unprepare(tdma->dma_clk);
1605 
1606 	return 0;
1607 }
1608 
1609 static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
1610 {
1611 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1612 
1613 	clk_disable(tdma->dma_clk);
1614 
1615 	return 0;
1616 }
1617 
1618 static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
1619 {
1620 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1621 
1622 	return clk_enable(tdma->dma_clk);
1623 }
1624 
1625 static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
1626 {
1627 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1628 	unsigned long flags;
1629 	unsigned int i;
1630 	bool busy;
1631 
1632 	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1633 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1634 
1635 		tasklet_kill(&tdc->tasklet);
1636 
1637 		spin_lock_irqsave(&tdc->lock, flags);
1638 		busy = tdc->busy;
1639 		spin_unlock_irqrestore(&tdc->lock, flags);
1640 
1641 		if (busy) {
1642 			dev_err(tdma->dev, "channel %u busy\n", i);
1643 			return -EBUSY;
1644 		}
1645 	}
1646 
1647 	return pm_runtime_force_suspend(dev);
1648 }
1649 
1650 static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
1651 {
1652 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1653 	int err;
1654 
1655 	err = tegra_dma_init_hw(tdma);
1656 	if (err)
1657 		return err;
1658 
1659 	return pm_runtime_force_resume(dev);
1660 }
1661 
1662 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1663 	SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1664 			   NULL)
1665 	SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
1666 };
1667 
1668 static const struct of_device_id tegra_dma_of_match[] = {
1669 	{
1670 		.compatible = "nvidia,tegra148-apbdma",
1671 		.data = &tegra148_dma_chip_data,
1672 	}, {
1673 		.compatible = "nvidia,tegra114-apbdma",
1674 		.data = &tegra114_dma_chip_data,
1675 	}, {
1676 		.compatible = "nvidia,tegra30-apbdma",
1677 		.data = &tegra30_dma_chip_data,
1678 	}, {
1679 		.compatible = "nvidia,tegra20-apbdma",
1680 		.data = &tegra20_dma_chip_data,
1681 	}, {
1682 	},
1683 };
1684 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1685 
1686 static struct platform_driver tegra_dmac_driver = {
1687 	.driver = {
1688 		.name	= "tegra-apbdma",
1689 		.pm	= &tegra_dma_dev_pm_ops,
1690 		.of_match_table = tegra_dma_of_match,
1691 	},
1692 	.probe		= tegra_dma_probe,
1693 	.remove		= tegra_dma_remove,
1694 };
1695 
1696 module_platform_driver(tegra_dmac_driver);
1697 
1698 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1699 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1700 MODULE_LICENSE("GPL v2");
1701