xref: /openbmc/linux/drivers/dma/tegra20-apb-dma.c (revision f220d3eb)
1 /*
2  * DMA driver for Nvidia's Tegra20 APB DMA controller.
3  *
4  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/bitops.h>
20 #include <linux/clk.h>
21 #include <linux/delay.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_dma.h>
33 #include <linux/platform_device.h>
34 #include <linux/pm.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/reset.h>
37 #include <linux/slab.h>
38 
39 #include "dmaengine.h"
40 
41 #define TEGRA_APBDMA_GENERAL			0x0
42 #define TEGRA_APBDMA_GENERAL_ENABLE		BIT(31)
43 
44 #define TEGRA_APBDMA_CONTROL			0x010
45 #define TEGRA_APBDMA_IRQ_MASK			0x01c
46 #define TEGRA_APBDMA_IRQ_MASK_SET		0x020
47 
48 /* CSR register */
49 #define TEGRA_APBDMA_CHAN_CSR			0x00
50 #define TEGRA_APBDMA_CSR_ENB			BIT(31)
51 #define TEGRA_APBDMA_CSR_IE_EOC			BIT(30)
52 #define TEGRA_APBDMA_CSR_HOLD			BIT(29)
53 #define TEGRA_APBDMA_CSR_DIR			BIT(28)
54 #define TEGRA_APBDMA_CSR_ONCE			BIT(27)
55 #define TEGRA_APBDMA_CSR_FLOW			BIT(21)
56 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT		16
57 #define TEGRA_APBDMA_CSR_REQ_SEL_MASK		0x1F
58 #define TEGRA_APBDMA_CSR_WCOUNT_MASK		0xFFFC
59 
60 /* STATUS register */
61 #define TEGRA_APBDMA_CHAN_STATUS		0x004
62 #define TEGRA_APBDMA_STATUS_BUSY		BIT(31)
63 #define TEGRA_APBDMA_STATUS_ISE_EOC		BIT(30)
64 #define TEGRA_APBDMA_STATUS_HALT		BIT(29)
65 #define TEGRA_APBDMA_STATUS_PING_PONG		BIT(28)
66 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT		2
67 #define TEGRA_APBDMA_STATUS_COUNT_MASK		0xFFFC
68 
69 #define TEGRA_APBDMA_CHAN_CSRE			0x00C
70 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE		(1 << 31)
71 
72 /* AHB memory address */
73 #define TEGRA_APBDMA_CHAN_AHBPTR		0x010
74 
75 /* AHB sequence register */
76 #define TEGRA_APBDMA_CHAN_AHBSEQ		0x14
77 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB		BIT(31)
78 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8		(0 << 28)
79 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16	(1 << 28)
80 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32	(2 << 28)
81 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64	(3 << 28)
82 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128	(4 << 28)
83 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP		BIT(27)
84 #define TEGRA_APBDMA_AHBSEQ_BURST_1		(4 << 24)
85 #define TEGRA_APBDMA_AHBSEQ_BURST_4		(5 << 24)
86 #define TEGRA_APBDMA_AHBSEQ_BURST_8		(6 << 24)
87 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF		BIT(19)
88 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT		16
89 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE		0
90 
91 /* APB address */
92 #define TEGRA_APBDMA_CHAN_APBPTR		0x018
93 
94 /* APB sequence register */
95 #define TEGRA_APBDMA_CHAN_APBSEQ		0x01c
96 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8		(0 << 28)
97 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16	(1 << 28)
98 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32	(2 << 28)
99 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64	(3 << 28)
100 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128	(4 << 28)
101 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP		BIT(27)
102 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1		(1 << 16)
103 
104 /* Tegra148 specific registers */
105 #define TEGRA_APBDMA_CHAN_WCOUNT		0x20
106 
107 #define TEGRA_APBDMA_CHAN_WORD_TRANSFER		0x24
108 
109 /*
110  * If any burst is in flight and DMA paused then this is the time to complete
111  * on-flight burst and update DMA status register.
112  */
113 #define TEGRA_APBDMA_BURST_COMPLETE_TIME	20
114 
115 /* Channel base address offset from APBDMA base address */
116 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET	0x1000
117 
118 #define TEGRA_APBDMA_SLAVE_ID_INVALID	(TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
119 
120 struct tegra_dma;
121 
122 /*
123  * tegra_dma_chip_data Tegra chip specific DMA data
124  * @nr_channels: Number of channels available in the controller.
125  * @channel_reg_size: Channel register size/stride.
126  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
127  * @support_channel_pause: Support channel wise pause of dma.
128  * @support_separate_wcount_reg: Support separate word count register.
129  */
130 struct tegra_dma_chip_data {
131 	int nr_channels;
132 	int channel_reg_size;
133 	int max_dma_count;
134 	bool support_channel_pause;
135 	bool support_separate_wcount_reg;
136 };
137 
138 /* DMA channel registers */
139 struct tegra_dma_channel_regs {
140 	unsigned long	csr;
141 	unsigned long	ahb_ptr;
142 	unsigned long	apb_ptr;
143 	unsigned long	ahb_seq;
144 	unsigned long	apb_seq;
145 	unsigned long	wcount;
146 };
147 
148 /*
149  * tegra_dma_sg_req: Dma request details to configure hardware. This
150  * contains the details for one transfer to configure DMA hw.
151  * The client's request for data transfer can be broken into multiple
152  * sub-transfer as per requester details and hw support.
153  * This sub transfer get added in the list of transfer and point to Tegra
154  * DMA descriptor which manages the transfer details.
155  */
156 struct tegra_dma_sg_req {
157 	struct tegra_dma_channel_regs	ch_regs;
158 	int				req_len;
159 	bool				configured;
160 	bool				last_sg;
161 	struct list_head		node;
162 	struct tegra_dma_desc		*dma_desc;
163 };
164 
165 /*
166  * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
167  * This descriptor keep track of transfer status, callbacks and request
168  * counts etc.
169  */
170 struct tegra_dma_desc {
171 	struct dma_async_tx_descriptor	txd;
172 	int				bytes_requested;
173 	int				bytes_transferred;
174 	enum dma_status			dma_status;
175 	struct list_head		node;
176 	struct list_head		tx_list;
177 	struct list_head		cb_node;
178 	int				cb_count;
179 };
180 
181 struct tegra_dma_channel;
182 
183 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
184 				bool to_terminate);
185 
186 /* tegra_dma_channel: Channel specific information */
187 struct tegra_dma_channel {
188 	struct dma_chan		dma_chan;
189 	char			name[30];
190 	bool			config_init;
191 	int			id;
192 	int			irq;
193 	void __iomem		*chan_addr;
194 	spinlock_t		lock;
195 	bool			busy;
196 	struct tegra_dma	*tdma;
197 	bool			cyclic;
198 
199 	/* Different lists for managing the requests */
200 	struct list_head	free_sg_req;
201 	struct list_head	pending_sg_req;
202 	struct list_head	free_dma_desc;
203 	struct list_head	cb_desc;
204 
205 	/* ISR handler and tasklet for bottom half of isr handling */
206 	dma_isr_handler		isr_handler;
207 	struct tasklet_struct	tasklet;
208 
209 	/* Channel-slave specific configuration */
210 	unsigned int slave_id;
211 	struct dma_slave_config dma_sconfig;
212 	struct tegra_dma_channel_regs	channel_reg;
213 };
214 
215 /* tegra_dma: Tegra DMA specific information */
216 struct tegra_dma {
217 	struct dma_device		dma_dev;
218 	struct device			*dev;
219 	struct clk			*dma_clk;
220 	struct reset_control		*rst;
221 	spinlock_t			global_lock;
222 	void __iomem			*base_addr;
223 	const struct tegra_dma_chip_data *chip_data;
224 
225 	/*
226 	 * Counter for managing global pausing of the DMA controller.
227 	 * Only applicable for devices that don't support individual
228 	 * channel pausing.
229 	 */
230 	u32				global_pause_count;
231 
232 	/* Some register need to be cache before suspend */
233 	u32				reg_gen;
234 
235 	/* Last member of the structure */
236 	struct tegra_dma_channel channels[0];
237 };
238 
239 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
240 {
241 	writel(val, tdma->base_addr + reg);
242 }
243 
244 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
245 {
246 	return readl(tdma->base_addr + reg);
247 }
248 
249 static inline void tdc_write(struct tegra_dma_channel *tdc,
250 		u32 reg, u32 val)
251 {
252 	writel(val, tdc->chan_addr + reg);
253 }
254 
255 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
256 {
257 	return readl(tdc->chan_addr + reg);
258 }
259 
260 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
261 {
262 	return container_of(dc, struct tegra_dma_channel, dma_chan);
263 }
264 
265 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
266 		struct dma_async_tx_descriptor *td)
267 {
268 	return container_of(td, struct tegra_dma_desc, txd);
269 }
270 
271 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
272 {
273 	return &tdc->dma_chan.dev->device;
274 }
275 
276 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
277 static int tegra_dma_runtime_suspend(struct device *dev);
278 static int tegra_dma_runtime_resume(struct device *dev);
279 
280 /* Get DMA desc from free list, if not there then allocate it.  */
281 static struct tegra_dma_desc *tegra_dma_desc_get(
282 		struct tegra_dma_channel *tdc)
283 {
284 	struct tegra_dma_desc *dma_desc;
285 	unsigned long flags;
286 
287 	spin_lock_irqsave(&tdc->lock, flags);
288 
289 	/* Do not allocate if desc are waiting for ack */
290 	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
291 		if (async_tx_test_ack(&dma_desc->txd)) {
292 			list_del(&dma_desc->node);
293 			spin_unlock_irqrestore(&tdc->lock, flags);
294 			dma_desc->txd.flags = 0;
295 			return dma_desc;
296 		}
297 	}
298 
299 	spin_unlock_irqrestore(&tdc->lock, flags);
300 
301 	/* Allocate DMA desc */
302 	dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
303 	if (!dma_desc)
304 		return NULL;
305 
306 	dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
307 	dma_desc->txd.tx_submit = tegra_dma_tx_submit;
308 	dma_desc->txd.flags = 0;
309 	return dma_desc;
310 }
311 
312 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
313 		struct tegra_dma_desc *dma_desc)
314 {
315 	unsigned long flags;
316 
317 	spin_lock_irqsave(&tdc->lock, flags);
318 	if (!list_empty(&dma_desc->tx_list))
319 		list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
320 	list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
321 	spin_unlock_irqrestore(&tdc->lock, flags);
322 }
323 
324 static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
325 		struct tegra_dma_channel *tdc)
326 {
327 	struct tegra_dma_sg_req *sg_req = NULL;
328 	unsigned long flags;
329 
330 	spin_lock_irqsave(&tdc->lock, flags);
331 	if (!list_empty(&tdc->free_sg_req)) {
332 		sg_req = list_first_entry(&tdc->free_sg_req,
333 					typeof(*sg_req), node);
334 		list_del(&sg_req->node);
335 		spin_unlock_irqrestore(&tdc->lock, flags);
336 		return sg_req;
337 	}
338 	spin_unlock_irqrestore(&tdc->lock, flags);
339 
340 	sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
341 
342 	return sg_req;
343 }
344 
345 static int tegra_dma_slave_config(struct dma_chan *dc,
346 		struct dma_slave_config *sconfig)
347 {
348 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
349 
350 	if (!list_empty(&tdc->pending_sg_req)) {
351 		dev_err(tdc2dev(tdc), "Configuration not allowed\n");
352 		return -EBUSY;
353 	}
354 
355 	memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
356 	if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
357 	    sconfig->device_fc) {
358 		if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
359 			return -EINVAL;
360 		tdc->slave_id = sconfig->slave_id;
361 	}
362 	tdc->config_init = true;
363 	return 0;
364 }
365 
366 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
367 	bool wait_for_burst_complete)
368 {
369 	struct tegra_dma *tdma = tdc->tdma;
370 
371 	spin_lock(&tdma->global_lock);
372 
373 	if (tdc->tdma->global_pause_count == 0) {
374 		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
375 		if (wait_for_burst_complete)
376 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
377 	}
378 
379 	tdc->tdma->global_pause_count++;
380 
381 	spin_unlock(&tdma->global_lock);
382 }
383 
384 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
385 {
386 	struct tegra_dma *tdma = tdc->tdma;
387 
388 	spin_lock(&tdma->global_lock);
389 
390 	if (WARN_ON(tdc->tdma->global_pause_count == 0))
391 		goto out;
392 
393 	if (--tdc->tdma->global_pause_count == 0)
394 		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
395 			   TEGRA_APBDMA_GENERAL_ENABLE);
396 
397 out:
398 	spin_unlock(&tdma->global_lock);
399 }
400 
401 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
402 	bool wait_for_burst_complete)
403 {
404 	struct tegra_dma *tdma = tdc->tdma;
405 
406 	if (tdma->chip_data->support_channel_pause) {
407 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
408 				TEGRA_APBDMA_CHAN_CSRE_PAUSE);
409 		if (wait_for_burst_complete)
410 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
411 	} else {
412 		tegra_dma_global_pause(tdc, wait_for_burst_complete);
413 	}
414 }
415 
416 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
417 {
418 	struct tegra_dma *tdma = tdc->tdma;
419 
420 	if (tdma->chip_data->support_channel_pause) {
421 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
422 	} else {
423 		tegra_dma_global_resume(tdc);
424 	}
425 }
426 
427 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
428 {
429 	u32 csr;
430 	u32 status;
431 
432 	/* Disable interrupts */
433 	csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
434 	csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
435 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
436 
437 	/* Disable DMA */
438 	csr &= ~TEGRA_APBDMA_CSR_ENB;
439 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
440 
441 	/* Clear interrupt status if it is there */
442 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
443 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
444 		dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
445 		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
446 	}
447 	tdc->busy = false;
448 }
449 
450 static void tegra_dma_start(struct tegra_dma_channel *tdc,
451 		struct tegra_dma_sg_req *sg_req)
452 {
453 	struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
454 
455 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
456 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
457 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
458 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
459 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
460 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
461 		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
462 
463 	/* Start DMA */
464 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
465 				ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
466 }
467 
468 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
469 		struct tegra_dma_sg_req *nsg_req)
470 {
471 	unsigned long status;
472 
473 	/*
474 	 * The DMA controller reloads the new configuration for next transfer
475 	 * after last burst of current transfer completes.
476 	 * If there is no IEC status then this makes sure that last burst
477 	 * has not be completed. There may be case that last burst is on
478 	 * flight and so it can complete but because DMA is paused, it
479 	 * will not generates interrupt as well as not reload the new
480 	 * configuration.
481 	 * If there is already IEC status then interrupt handler need to
482 	 * load new configuration.
483 	 */
484 	tegra_dma_pause(tdc, false);
485 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
486 
487 	/*
488 	 * If interrupt is pending then do nothing as the ISR will handle
489 	 * the programing for new request.
490 	 */
491 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
492 		dev_err(tdc2dev(tdc),
493 			"Skipping new configuration as interrupt is pending\n");
494 		tegra_dma_resume(tdc);
495 		return;
496 	}
497 
498 	/* Safe to program new configuration */
499 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
500 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
501 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
502 		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
503 						nsg_req->ch_regs.wcount);
504 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
505 				nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
506 	nsg_req->configured = true;
507 
508 	tegra_dma_resume(tdc);
509 }
510 
511 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
512 {
513 	struct tegra_dma_sg_req *sg_req;
514 
515 	if (list_empty(&tdc->pending_sg_req))
516 		return;
517 
518 	sg_req = list_first_entry(&tdc->pending_sg_req,
519 					typeof(*sg_req), node);
520 	tegra_dma_start(tdc, sg_req);
521 	sg_req->configured = true;
522 	tdc->busy = true;
523 }
524 
525 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
526 {
527 	struct tegra_dma_sg_req *hsgreq;
528 	struct tegra_dma_sg_req *hnsgreq;
529 
530 	if (list_empty(&tdc->pending_sg_req))
531 		return;
532 
533 	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
534 	if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
535 		hnsgreq = list_first_entry(&hsgreq->node,
536 					typeof(*hnsgreq), node);
537 		tegra_dma_configure_for_next(tdc, hnsgreq);
538 	}
539 }
540 
541 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
542 	struct tegra_dma_sg_req *sg_req, unsigned long status)
543 {
544 	return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
545 }
546 
547 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
548 {
549 	struct tegra_dma_sg_req *sgreq;
550 	struct tegra_dma_desc *dma_desc;
551 
552 	while (!list_empty(&tdc->pending_sg_req)) {
553 		sgreq = list_first_entry(&tdc->pending_sg_req,
554 						typeof(*sgreq), node);
555 		list_move_tail(&sgreq->node, &tdc->free_sg_req);
556 		if (sgreq->last_sg) {
557 			dma_desc = sgreq->dma_desc;
558 			dma_desc->dma_status = DMA_ERROR;
559 			list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
560 
561 			/* Add in cb list if it is not there. */
562 			if (!dma_desc->cb_count)
563 				list_add_tail(&dma_desc->cb_node,
564 							&tdc->cb_desc);
565 			dma_desc->cb_count++;
566 		}
567 	}
568 	tdc->isr_handler = NULL;
569 }
570 
571 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
572 		struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
573 {
574 	struct tegra_dma_sg_req *hsgreq = NULL;
575 
576 	if (list_empty(&tdc->pending_sg_req)) {
577 		dev_err(tdc2dev(tdc), "Dma is running without req\n");
578 		tegra_dma_stop(tdc);
579 		return false;
580 	}
581 
582 	/*
583 	 * Check that head req on list should be in flight.
584 	 * If it is not in flight then abort transfer as
585 	 * looping of transfer can not continue.
586 	 */
587 	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
588 	if (!hsgreq->configured) {
589 		tegra_dma_stop(tdc);
590 		dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
591 		tegra_dma_abort_all(tdc);
592 		return false;
593 	}
594 
595 	/* Configure next request */
596 	if (!to_terminate)
597 		tdc_configure_next_head_desc(tdc);
598 	return true;
599 }
600 
601 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
602 	bool to_terminate)
603 {
604 	struct tegra_dma_sg_req *sgreq;
605 	struct tegra_dma_desc *dma_desc;
606 
607 	tdc->busy = false;
608 	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
609 	dma_desc = sgreq->dma_desc;
610 	dma_desc->bytes_transferred += sgreq->req_len;
611 
612 	list_del(&sgreq->node);
613 	if (sgreq->last_sg) {
614 		dma_desc->dma_status = DMA_COMPLETE;
615 		dma_cookie_complete(&dma_desc->txd);
616 		if (!dma_desc->cb_count)
617 			list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
618 		dma_desc->cb_count++;
619 		list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
620 	}
621 	list_add_tail(&sgreq->node, &tdc->free_sg_req);
622 
623 	/* Do not start DMA if it is going to be terminate */
624 	if (to_terminate || list_empty(&tdc->pending_sg_req))
625 		return;
626 
627 	tdc_start_head_req(tdc);
628 }
629 
630 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
631 		bool to_terminate)
632 {
633 	struct tegra_dma_sg_req *sgreq;
634 	struct tegra_dma_desc *dma_desc;
635 	bool st;
636 
637 	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
638 	dma_desc = sgreq->dma_desc;
639 	dma_desc->bytes_transferred += sgreq->req_len;
640 
641 	/* Callback need to be call */
642 	if (!dma_desc->cb_count)
643 		list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
644 	dma_desc->cb_count++;
645 
646 	/* If not last req then put at end of pending list */
647 	if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
648 		list_move_tail(&sgreq->node, &tdc->pending_sg_req);
649 		sgreq->configured = false;
650 		st = handle_continuous_head_request(tdc, sgreq, to_terminate);
651 		if (!st)
652 			dma_desc->dma_status = DMA_ERROR;
653 	}
654 }
655 
656 static void tegra_dma_tasklet(unsigned long data)
657 {
658 	struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
659 	struct dmaengine_desc_callback cb;
660 	struct tegra_dma_desc *dma_desc;
661 	unsigned long flags;
662 	int cb_count;
663 
664 	spin_lock_irqsave(&tdc->lock, flags);
665 	while (!list_empty(&tdc->cb_desc)) {
666 		dma_desc  = list_first_entry(&tdc->cb_desc,
667 					typeof(*dma_desc), cb_node);
668 		list_del(&dma_desc->cb_node);
669 		dmaengine_desc_get_callback(&dma_desc->txd, &cb);
670 		cb_count = dma_desc->cb_count;
671 		dma_desc->cb_count = 0;
672 		spin_unlock_irqrestore(&tdc->lock, flags);
673 		while (cb_count--)
674 			dmaengine_desc_callback_invoke(&cb, NULL);
675 		spin_lock_irqsave(&tdc->lock, flags);
676 	}
677 	spin_unlock_irqrestore(&tdc->lock, flags);
678 }
679 
680 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
681 {
682 	struct tegra_dma_channel *tdc = dev_id;
683 	unsigned long status;
684 	unsigned long flags;
685 
686 	spin_lock_irqsave(&tdc->lock, flags);
687 
688 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
689 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
690 		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
691 		tdc->isr_handler(tdc, false);
692 		tasklet_schedule(&tdc->tasklet);
693 		spin_unlock_irqrestore(&tdc->lock, flags);
694 		return IRQ_HANDLED;
695 	}
696 
697 	spin_unlock_irqrestore(&tdc->lock, flags);
698 	dev_info(tdc2dev(tdc),
699 		"Interrupt already served status 0x%08lx\n", status);
700 	return IRQ_NONE;
701 }
702 
703 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
704 {
705 	struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
706 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
707 	unsigned long flags;
708 	dma_cookie_t cookie;
709 
710 	spin_lock_irqsave(&tdc->lock, flags);
711 	dma_desc->dma_status = DMA_IN_PROGRESS;
712 	cookie = dma_cookie_assign(&dma_desc->txd);
713 	list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
714 	spin_unlock_irqrestore(&tdc->lock, flags);
715 	return cookie;
716 }
717 
718 static void tegra_dma_issue_pending(struct dma_chan *dc)
719 {
720 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
721 	unsigned long flags;
722 
723 	spin_lock_irqsave(&tdc->lock, flags);
724 	if (list_empty(&tdc->pending_sg_req)) {
725 		dev_err(tdc2dev(tdc), "No DMA request\n");
726 		goto end;
727 	}
728 	if (!tdc->busy) {
729 		tdc_start_head_req(tdc);
730 
731 		/* Continuous single mode: Configure next req */
732 		if (tdc->cyclic) {
733 			/*
734 			 * Wait for 1 burst time for configure DMA for
735 			 * next transfer.
736 			 */
737 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
738 			tdc_configure_next_head_desc(tdc);
739 		}
740 	}
741 end:
742 	spin_unlock_irqrestore(&tdc->lock, flags);
743 }
744 
745 static int tegra_dma_terminate_all(struct dma_chan *dc)
746 {
747 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
748 	struct tegra_dma_sg_req *sgreq;
749 	struct tegra_dma_desc *dma_desc;
750 	unsigned long flags;
751 	unsigned long status;
752 	unsigned long wcount;
753 	bool was_busy;
754 
755 	spin_lock_irqsave(&tdc->lock, flags);
756 	if (list_empty(&tdc->pending_sg_req)) {
757 		spin_unlock_irqrestore(&tdc->lock, flags);
758 		return 0;
759 	}
760 
761 	if (!tdc->busy)
762 		goto skip_dma_stop;
763 
764 	/* Pause DMA before checking the queue status */
765 	tegra_dma_pause(tdc, true);
766 
767 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
768 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
769 		dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
770 		tdc->isr_handler(tdc, true);
771 		status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
772 	}
773 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
774 		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
775 	else
776 		wcount = status;
777 
778 	was_busy = tdc->busy;
779 	tegra_dma_stop(tdc);
780 
781 	if (!list_empty(&tdc->pending_sg_req) && was_busy) {
782 		sgreq = list_first_entry(&tdc->pending_sg_req,
783 					typeof(*sgreq), node);
784 		sgreq->dma_desc->bytes_transferred +=
785 				get_current_xferred_count(tdc, sgreq, wcount);
786 	}
787 	tegra_dma_resume(tdc);
788 
789 skip_dma_stop:
790 	tegra_dma_abort_all(tdc);
791 
792 	while (!list_empty(&tdc->cb_desc)) {
793 		dma_desc  = list_first_entry(&tdc->cb_desc,
794 					typeof(*dma_desc), cb_node);
795 		list_del(&dma_desc->cb_node);
796 		dma_desc->cb_count = 0;
797 	}
798 	spin_unlock_irqrestore(&tdc->lock, flags);
799 	return 0;
800 }
801 
802 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
803 	dma_cookie_t cookie, struct dma_tx_state *txstate)
804 {
805 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
806 	struct tegra_dma_desc *dma_desc;
807 	struct tegra_dma_sg_req *sg_req;
808 	enum dma_status ret;
809 	unsigned long flags;
810 	unsigned int residual;
811 
812 	ret = dma_cookie_status(dc, cookie, txstate);
813 	if (ret == DMA_COMPLETE)
814 		return ret;
815 
816 	spin_lock_irqsave(&tdc->lock, flags);
817 
818 	/* Check on wait_ack desc status */
819 	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
820 		if (dma_desc->txd.cookie == cookie) {
821 			ret = dma_desc->dma_status;
822 			goto found;
823 		}
824 	}
825 
826 	/* Check in pending list */
827 	list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
828 		dma_desc = sg_req->dma_desc;
829 		if (dma_desc->txd.cookie == cookie) {
830 			ret = dma_desc->dma_status;
831 			goto found;
832 		}
833 	}
834 
835 	dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
836 	dma_desc = NULL;
837 
838 found:
839 	if (dma_desc && txstate) {
840 		residual = dma_desc->bytes_requested -
841 			   (dma_desc->bytes_transferred %
842 			    dma_desc->bytes_requested);
843 		dma_set_residue(txstate, residual);
844 	}
845 
846 	spin_unlock_irqrestore(&tdc->lock, flags);
847 	return ret;
848 }
849 
850 static inline int get_bus_width(struct tegra_dma_channel *tdc,
851 		enum dma_slave_buswidth slave_bw)
852 {
853 	switch (slave_bw) {
854 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
855 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
856 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
857 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
858 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
859 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
860 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
861 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
862 	default:
863 		dev_warn(tdc2dev(tdc),
864 			"slave bw is not supported, using 32bits\n");
865 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
866 	}
867 }
868 
869 static inline int get_burst_size(struct tegra_dma_channel *tdc,
870 	u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
871 {
872 	int burst_byte;
873 	int burst_ahb_width;
874 
875 	/*
876 	 * burst_size from client is in terms of the bus_width.
877 	 * convert them into AHB memory width which is 4 byte.
878 	 */
879 	burst_byte = burst_size * slave_bw;
880 	burst_ahb_width = burst_byte / 4;
881 
882 	/* If burst size is 0 then calculate the burst size based on length */
883 	if (!burst_ahb_width) {
884 		if (len & 0xF)
885 			return TEGRA_APBDMA_AHBSEQ_BURST_1;
886 		else if ((len >> 4) & 0x1)
887 			return TEGRA_APBDMA_AHBSEQ_BURST_4;
888 		else
889 			return TEGRA_APBDMA_AHBSEQ_BURST_8;
890 	}
891 	if (burst_ahb_width < 4)
892 		return TEGRA_APBDMA_AHBSEQ_BURST_1;
893 	else if (burst_ahb_width < 8)
894 		return TEGRA_APBDMA_AHBSEQ_BURST_4;
895 	else
896 		return TEGRA_APBDMA_AHBSEQ_BURST_8;
897 }
898 
899 static int get_transfer_param(struct tegra_dma_channel *tdc,
900 	enum dma_transfer_direction direction, unsigned long *apb_addr,
901 	unsigned long *apb_seq,	unsigned long *csr, unsigned int *burst_size,
902 	enum dma_slave_buswidth *slave_bw)
903 {
904 	switch (direction) {
905 	case DMA_MEM_TO_DEV:
906 		*apb_addr = tdc->dma_sconfig.dst_addr;
907 		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
908 		*burst_size = tdc->dma_sconfig.dst_maxburst;
909 		*slave_bw = tdc->dma_sconfig.dst_addr_width;
910 		*csr = TEGRA_APBDMA_CSR_DIR;
911 		return 0;
912 
913 	case DMA_DEV_TO_MEM:
914 		*apb_addr = tdc->dma_sconfig.src_addr;
915 		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
916 		*burst_size = tdc->dma_sconfig.src_maxburst;
917 		*slave_bw = tdc->dma_sconfig.src_addr_width;
918 		*csr = 0;
919 		return 0;
920 
921 	default:
922 		dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
923 		return -EINVAL;
924 	}
925 	return -EINVAL;
926 }
927 
928 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
929 	struct tegra_dma_channel_regs *ch_regs, u32 len)
930 {
931 	u32 len_field = (len - 4) & 0xFFFC;
932 
933 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
934 		ch_regs->wcount = len_field;
935 	else
936 		ch_regs->csr |= len_field;
937 }
938 
939 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
940 	struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
941 	enum dma_transfer_direction direction, unsigned long flags,
942 	void *context)
943 {
944 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
945 	struct tegra_dma_desc *dma_desc;
946 	unsigned int i;
947 	struct scatterlist *sg;
948 	unsigned long csr, ahb_seq, apb_ptr, apb_seq;
949 	struct list_head req_list;
950 	struct tegra_dma_sg_req  *sg_req = NULL;
951 	u32 burst_size;
952 	enum dma_slave_buswidth slave_bw;
953 
954 	if (!tdc->config_init) {
955 		dev_err(tdc2dev(tdc), "dma channel is not configured\n");
956 		return NULL;
957 	}
958 	if (sg_len < 1) {
959 		dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
960 		return NULL;
961 	}
962 
963 	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
964 				&burst_size, &slave_bw) < 0)
965 		return NULL;
966 
967 	INIT_LIST_HEAD(&req_list);
968 
969 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
970 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
971 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
972 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
973 
974 	csr |= TEGRA_APBDMA_CSR_ONCE;
975 
976 	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
977 		csr |= TEGRA_APBDMA_CSR_FLOW;
978 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
979 	}
980 
981 	if (flags & DMA_PREP_INTERRUPT)
982 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
983 
984 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
985 
986 	dma_desc = tegra_dma_desc_get(tdc);
987 	if (!dma_desc) {
988 		dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
989 		return NULL;
990 	}
991 	INIT_LIST_HEAD(&dma_desc->tx_list);
992 	INIT_LIST_HEAD(&dma_desc->cb_node);
993 	dma_desc->cb_count = 0;
994 	dma_desc->bytes_requested = 0;
995 	dma_desc->bytes_transferred = 0;
996 	dma_desc->dma_status = DMA_IN_PROGRESS;
997 
998 	/* Make transfer requests */
999 	for_each_sg(sgl, sg, sg_len, i) {
1000 		u32 len, mem;
1001 
1002 		mem = sg_dma_address(sg);
1003 		len = sg_dma_len(sg);
1004 
1005 		if ((len & 3) || (mem & 3) ||
1006 				(len > tdc->tdma->chip_data->max_dma_count)) {
1007 			dev_err(tdc2dev(tdc),
1008 				"Dma length/memory address is not supported\n");
1009 			tegra_dma_desc_put(tdc, dma_desc);
1010 			return NULL;
1011 		}
1012 
1013 		sg_req = tegra_dma_sg_req_get(tdc);
1014 		if (!sg_req) {
1015 			dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1016 			tegra_dma_desc_put(tdc, dma_desc);
1017 			return NULL;
1018 		}
1019 
1020 		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1021 		dma_desc->bytes_requested += len;
1022 
1023 		sg_req->ch_regs.apb_ptr = apb_ptr;
1024 		sg_req->ch_regs.ahb_ptr = mem;
1025 		sg_req->ch_regs.csr = csr;
1026 		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1027 		sg_req->ch_regs.apb_seq = apb_seq;
1028 		sg_req->ch_regs.ahb_seq = ahb_seq;
1029 		sg_req->configured = false;
1030 		sg_req->last_sg = false;
1031 		sg_req->dma_desc = dma_desc;
1032 		sg_req->req_len = len;
1033 
1034 		list_add_tail(&sg_req->node, &dma_desc->tx_list);
1035 	}
1036 	sg_req->last_sg = true;
1037 	if (flags & DMA_CTRL_ACK)
1038 		dma_desc->txd.flags = DMA_CTRL_ACK;
1039 
1040 	/*
1041 	 * Make sure that mode should not be conflicting with currently
1042 	 * configured mode.
1043 	 */
1044 	if (!tdc->isr_handler) {
1045 		tdc->isr_handler = handle_once_dma_done;
1046 		tdc->cyclic = false;
1047 	} else {
1048 		if (tdc->cyclic) {
1049 			dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1050 			tegra_dma_desc_put(tdc, dma_desc);
1051 			return NULL;
1052 		}
1053 	}
1054 
1055 	return &dma_desc->txd;
1056 }
1057 
1058 static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1059 	struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1060 	size_t period_len, enum dma_transfer_direction direction,
1061 	unsigned long flags)
1062 {
1063 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1064 	struct tegra_dma_desc *dma_desc = NULL;
1065 	struct tegra_dma_sg_req *sg_req = NULL;
1066 	unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1067 	int len;
1068 	size_t remain_len;
1069 	dma_addr_t mem = buf_addr;
1070 	u32 burst_size;
1071 	enum dma_slave_buswidth slave_bw;
1072 
1073 	if (!buf_len || !period_len) {
1074 		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1075 		return NULL;
1076 	}
1077 
1078 	if (!tdc->config_init) {
1079 		dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1080 		return NULL;
1081 	}
1082 
1083 	/*
1084 	 * We allow to take more number of requests till DMA is
1085 	 * not started. The driver will loop over all requests.
1086 	 * Once DMA is started then new requests can be queued only after
1087 	 * terminating the DMA.
1088 	 */
1089 	if (tdc->busy) {
1090 		dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1091 		return NULL;
1092 	}
1093 
1094 	/*
1095 	 * We only support cycle transfer when buf_len is multiple of
1096 	 * period_len.
1097 	 */
1098 	if (buf_len % period_len) {
1099 		dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1100 		return NULL;
1101 	}
1102 
1103 	len = period_len;
1104 	if ((len & 3) || (buf_addr & 3) ||
1105 			(len > tdc->tdma->chip_data->max_dma_count)) {
1106 		dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1107 		return NULL;
1108 	}
1109 
1110 	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1111 				&burst_size, &slave_bw) < 0)
1112 		return NULL;
1113 
1114 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1115 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1116 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1117 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1118 
1119 	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1120 		csr |= TEGRA_APBDMA_CSR_FLOW;
1121 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1122 	}
1123 
1124 	if (flags & DMA_PREP_INTERRUPT)
1125 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
1126 
1127 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1128 
1129 	dma_desc = tegra_dma_desc_get(tdc);
1130 	if (!dma_desc) {
1131 		dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1132 		return NULL;
1133 	}
1134 
1135 	INIT_LIST_HEAD(&dma_desc->tx_list);
1136 	INIT_LIST_HEAD(&dma_desc->cb_node);
1137 	dma_desc->cb_count = 0;
1138 
1139 	dma_desc->bytes_transferred = 0;
1140 	dma_desc->bytes_requested = buf_len;
1141 	remain_len = buf_len;
1142 
1143 	/* Split transfer equal to period size */
1144 	while (remain_len) {
1145 		sg_req = tegra_dma_sg_req_get(tdc);
1146 		if (!sg_req) {
1147 			dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1148 			tegra_dma_desc_put(tdc, dma_desc);
1149 			return NULL;
1150 		}
1151 
1152 		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1153 		sg_req->ch_regs.apb_ptr = apb_ptr;
1154 		sg_req->ch_regs.ahb_ptr = mem;
1155 		sg_req->ch_regs.csr = csr;
1156 		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1157 		sg_req->ch_regs.apb_seq = apb_seq;
1158 		sg_req->ch_regs.ahb_seq = ahb_seq;
1159 		sg_req->configured = false;
1160 		sg_req->last_sg = false;
1161 		sg_req->dma_desc = dma_desc;
1162 		sg_req->req_len = len;
1163 
1164 		list_add_tail(&sg_req->node, &dma_desc->tx_list);
1165 		remain_len -= len;
1166 		mem += len;
1167 	}
1168 	sg_req->last_sg = true;
1169 	if (flags & DMA_CTRL_ACK)
1170 		dma_desc->txd.flags = DMA_CTRL_ACK;
1171 
1172 	/*
1173 	 * Make sure that mode should not be conflicting with currently
1174 	 * configured mode.
1175 	 */
1176 	if (!tdc->isr_handler) {
1177 		tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1178 		tdc->cyclic = true;
1179 	} else {
1180 		if (!tdc->cyclic) {
1181 			dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1182 			tegra_dma_desc_put(tdc, dma_desc);
1183 			return NULL;
1184 		}
1185 	}
1186 
1187 	return &dma_desc->txd;
1188 }
1189 
1190 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1191 {
1192 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1193 	struct tegra_dma *tdma = tdc->tdma;
1194 	int ret;
1195 
1196 	dma_cookie_init(&tdc->dma_chan);
1197 	tdc->config_init = false;
1198 
1199 	ret = pm_runtime_get_sync(tdma->dev);
1200 	if (ret < 0)
1201 		return ret;
1202 
1203 	return 0;
1204 }
1205 
1206 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1207 {
1208 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1209 	struct tegra_dma *tdma = tdc->tdma;
1210 	struct tegra_dma_desc *dma_desc;
1211 	struct tegra_dma_sg_req *sg_req;
1212 	struct list_head dma_desc_list;
1213 	struct list_head sg_req_list;
1214 	unsigned long flags;
1215 
1216 	INIT_LIST_HEAD(&dma_desc_list);
1217 	INIT_LIST_HEAD(&sg_req_list);
1218 
1219 	dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1220 
1221 	if (tdc->busy)
1222 		tegra_dma_terminate_all(dc);
1223 
1224 	spin_lock_irqsave(&tdc->lock, flags);
1225 	list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1226 	list_splice_init(&tdc->free_sg_req, &sg_req_list);
1227 	list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1228 	INIT_LIST_HEAD(&tdc->cb_desc);
1229 	tdc->config_init = false;
1230 	tdc->isr_handler = NULL;
1231 	spin_unlock_irqrestore(&tdc->lock, flags);
1232 
1233 	while (!list_empty(&dma_desc_list)) {
1234 		dma_desc = list_first_entry(&dma_desc_list,
1235 					typeof(*dma_desc), node);
1236 		list_del(&dma_desc->node);
1237 		kfree(dma_desc);
1238 	}
1239 
1240 	while (!list_empty(&sg_req_list)) {
1241 		sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1242 		list_del(&sg_req->node);
1243 		kfree(sg_req);
1244 	}
1245 	pm_runtime_put(tdma->dev);
1246 
1247 	tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1248 }
1249 
1250 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1251 					   struct of_dma *ofdma)
1252 {
1253 	struct tegra_dma *tdma = ofdma->of_dma_data;
1254 	struct dma_chan *chan;
1255 	struct tegra_dma_channel *tdc;
1256 
1257 	if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1258 		dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1259 		return NULL;
1260 	}
1261 
1262 	chan = dma_get_any_slave_channel(&tdma->dma_dev);
1263 	if (!chan)
1264 		return NULL;
1265 
1266 	tdc = to_tegra_dma_chan(chan);
1267 	tdc->slave_id = dma_spec->args[0];
1268 
1269 	return chan;
1270 }
1271 
1272 /* Tegra20 specific DMA controller information */
1273 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1274 	.nr_channels		= 16,
1275 	.channel_reg_size	= 0x20,
1276 	.max_dma_count		= 1024UL * 64,
1277 	.support_channel_pause	= false,
1278 	.support_separate_wcount_reg = false,
1279 };
1280 
1281 /* Tegra30 specific DMA controller information */
1282 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1283 	.nr_channels		= 32,
1284 	.channel_reg_size	= 0x20,
1285 	.max_dma_count		= 1024UL * 64,
1286 	.support_channel_pause	= false,
1287 	.support_separate_wcount_reg = false,
1288 };
1289 
1290 /* Tegra114 specific DMA controller information */
1291 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1292 	.nr_channels		= 32,
1293 	.channel_reg_size	= 0x20,
1294 	.max_dma_count		= 1024UL * 64,
1295 	.support_channel_pause	= true,
1296 	.support_separate_wcount_reg = false,
1297 };
1298 
1299 /* Tegra148 specific DMA controller information */
1300 static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1301 	.nr_channels		= 32,
1302 	.channel_reg_size	= 0x40,
1303 	.max_dma_count		= 1024UL * 64,
1304 	.support_channel_pause	= true,
1305 	.support_separate_wcount_reg = true,
1306 };
1307 
1308 static int tegra_dma_probe(struct platform_device *pdev)
1309 {
1310 	struct resource *res;
1311 	struct tegra_dma *tdma;
1312 	int ret;
1313 	int i;
1314 	const struct tegra_dma_chip_data *cdata;
1315 
1316 	cdata = of_device_get_match_data(&pdev->dev);
1317 	if (!cdata) {
1318 		dev_err(&pdev->dev, "Error: No device match data found\n");
1319 		return -ENODEV;
1320 	}
1321 
1322 	tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1323 			sizeof(struct tegra_dma_channel), GFP_KERNEL);
1324 	if (!tdma)
1325 		return -ENOMEM;
1326 
1327 	tdma->dev = &pdev->dev;
1328 	tdma->chip_data = cdata;
1329 	platform_set_drvdata(pdev, tdma);
1330 
1331 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1332 	tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1333 	if (IS_ERR(tdma->base_addr))
1334 		return PTR_ERR(tdma->base_addr);
1335 
1336 	tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1337 	if (IS_ERR(tdma->dma_clk)) {
1338 		dev_err(&pdev->dev, "Error: Missing controller clock\n");
1339 		return PTR_ERR(tdma->dma_clk);
1340 	}
1341 
1342 	tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1343 	if (IS_ERR(tdma->rst)) {
1344 		dev_err(&pdev->dev, "Error: Missing reset\n");
1345 		return PTR_ERR(tdma->rst);
1346 	}
1347 
1348 	spin_lock_init(&tdma->global_lock);
1349 
1350 	pm_runtime_enable(&pdev->dev);
1351 	if (!pm_runtime_enabled(&pdev->dev))
1352 		ret = tegra_dma_runtime_resume(&pdev->dev);
1353 	else
1354 		ret = pm_runtime_get_sync(&pdev->dev);
1355 
1356 	if (ret < 0) {
1357 		pm_runtime_disable(&pdev->dev);
1358 		return ret;
1359 	}
1360 
1361 	/* Reset DMA controller */
1362 	reset_control_assert(tdma->rst);
1363 	udelay(2);
1364 	reset_control_deassert(tdma->rst);
1365 
1366 	/* Enable global DMA registers */
1367 	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1368 	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1369 	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1370 
1371 	pm_runtime_put(&pdev->dev);
1372 
1373 	INIT_LIST_HEAD(&tdma->dma_dev.channels);
1374 	for (i = 0; i < cdata->nr_channels; i++) {
1375 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1376 
1377 		tdc->chan_addr = tdma->base_addr +
1378 				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1379 				 (i * cdata->channel_reg_size);
1380 
1381 		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1382 		if (!res) {
1383 			ret = -EINVAL;
1384 			dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1385 			goto err_irq;
1386 		}
1387 		tdc->irq = res->start;
1388 		snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1389 		ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
1390 		if (ret) {
1391 			dev_err(&pdev->dev,
1392 				"request_irq failed with err %d channel %d\n",
1393 				ret, i);
1394 			goto err_irq;
1395 		}
1396 
1397 		tdc->dma_chan.device = &tdma->dma_dev;
1398 		dma_cookie_init(&tdc->dma_chan);
1399 		list_add_tail(&tdc->dma_chan.device_node,
1400 				&tdma->dma_dev.channels);
1401 		tdc->tdma = tdma;
1402 		tdc->id = i;
1403 		tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1404 
1405 		tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1406 				(unsigned long)tdc);
1407 		spin_lock_init(&tdc->lock);
1408 
1409 		INIT_LIST_HEAD(&tdc->pending_sg_req);
1410 		INIT_LIST_HEAD(&tdc->free_sg_req);
1411 		INIT_LIST_HEAD(&tdc->free_dma_desc);
1412 		INIT_LIST_HEAD(&tdc->cb_desc);
1413 	}
1414 
1415 	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1416 	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1417 	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1418 
1419 	tdma->global_pause_count = 0;
1420 	tdma->dma_dev.dev = &pdev->dev;
1421 	tdma->dma_dev.device_alloc_chan_resources =
1422 					tegra_dma_alloc_chan_resources;
1423 	tdma->dma_dev.device_free_chan_resources =
1424 					tegra_dma_free_chan_resources;
1425 	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1426 	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1427 	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1428 		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1429 		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1430 		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1431 	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1432 		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1433 		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1434 		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1435 	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1436 	/*
1437 	 * XXX The hardware appears to support
1438 	 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
1439 	 * only used by this driver during tegra_dma_terminate_all()
1440 	 */
1441 	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1442 	tdma->dma_dev.device_config = tegra_dma_slave_config;
1443 	tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1444 	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1445 	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1446 
1447 	ret = dma_async_device_register(&tdma->dma_dev);
1448 	if (ret < 0) {
1449 		dev_err(&pdev->dev,
1450 			"Tegra20 APB DMA driver registration failed %d\n", ret);
1451 		goto err_irq;
1452 	}
1453 
1454 	ret = of_dma_controller_register(pdev->dev.of_node,
1455 					 tegra_dma_of_xlate, tdma);
1456 	if (ret < 0) {
1457 		dev_err(&pdev->dev,
1458 			"Tegra20 APB DMA OF registration failed %d\n", ret);
1459 		goto err_unregister_dma_dev;
1460 	}
1461 
1462 	dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1463 			cdata->nr_channels);
1464 	return 0;
1465 
1466 err_unregister_dma_dev:
1467 	dma_async_device_unregister(&tdma->dma_dev);
1468 err_irq:
1469 	while (--i >= 0) {
1470 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1471 
1472 		free_irq(tdc->irq, tdc);
1473 		tasklet_kill(&tdc->tasklet);
1474 	}
1475 
1476 	pm_runtime_disable(&pdev->dev);
1477 	if (!pm_runtime_status_suspended(&pdev->dev))
1478 		tegra_dma_runtime_suspend(&pdev->dev);
1479 	return ret;
1480 }
1481 
1482 static int tegra_dma_remove(struct platform_device *pdev)
1483 {
1484 	struct tegra_dma *tdma = platform_get_drvdata(pdev);
1485 	int i;
1486 	struct tegra_dma_channel *tdc;
1487 
1488 	dma_async_device_unregister(&tdma->dma_dev);
1489 
1490 	for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1491 		tdc = &tdma->channels[i];
1492 		free_irq(tdc->irq, tdc);
1493 		tasklet_kill(&tdc->tasklet);
1494 	}
1495 
1496 	pm_runtime_disable(&pdev->dev);
1497 	if (!pm_runtime_status_suspended(&pdev->dev))
1498 		tegra_dma_runtime_suspend(&pdev->dev);
1499 
1500 	return 0;
1501 }
1502 
1503 static int tegra_dma_runtime_suspend(struct device *dev)
1504 {
1505 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1506 	int i;
1507 
1508 	tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1509 	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1510 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1511 		struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1512 
1513 		/* Only save the state of DMA channels that are in use */
1514 		if (!tdc->config_init)
1515 			continue;
1516 
1517 		ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1518 		ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1519 		ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1520 		ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1521 		ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1522 		if (tdma->chip_data->support_separate_wcount_reg)
1523 			ch_reg->wcount = tdc_read(tdc,
1524 						  TEGRA_APBDMA_CHAN_WCOUNT);
1525 	}
1526 
1527 	clk_disable_unprepare(tdma->dma_clk);
1528 
1529 	return 0;
1530 }
1531 
1532 static int tegra_dma_runtime_resume(struct device *dev)
1533 {
1534 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1535 	int i, ret;
1536 
1537 	ret = clk_prepare_enable(tdma->dma_clk);
1538 	if (ret < 0) {
1539 		dev_err(dev, "clk_enable failed: %d\n", ret);
1540 		return ret;
1541 	}
1542 
1543 	tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1544 	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1545 	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1546 
1547 	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1548 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1549 		struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1550 
1551 		/* Only restore the state of DMA channels that are in use */
1552 		if (!tdc->config_init)
1553 			continue;
1554 
1555 		if (tdma->chip_data->support_separate_wcount_reg)
1556 			tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
1557 				  ch_reg->wcount);
1558 		tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1559 		tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1560 		tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1561 		tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1562 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1563 			(ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1570 	SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1571 			   NULL)
1572 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1573 				pm_runtime_force_resume)
1574 };
1575 
1576 static const struct of_device_id tegra_dma_of_match[] = {
1577 	{
1578 		.compatible = "nvidia,tegra148-apbdma",
1579 		.data = &tegra148_dma_chip_data,
1580 	}, {
1581 		.compatible = "nvidia,tegra114-apbdma",
1582 		.data = &tegra114_dma_chip_data,
1583 	}, {
1584 		.compatible = "nvidia,tegra30-apbdma",
1585 		.data = &tegra30_dma_chip_data,
1586 	}, {
1587 		.compatible = "nvidia,tegra20-apbdma",
1588 		.data = &tegra20_dma_chip_data,
1589 	}, {
1590 	},
1591 };
1592 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1593 
1594 static struct platform_driver tegra_dmac_driver = {
1595 	.driver = {
1596 		.name	= "tegra-apbdma",
1597 		.pm	= &tegra_dma_dev_pm_ops,
1598 		.of_match_table = tegra_dma_of_match,
1599 	},
1600 	.probe		= tegra_dma_probe,
1601 	.remove		= tegra_dma_remove,
1602 };
1603 
1604 module_platform_driver(tegra_dmac_driver);
1605 
1606 MODULE_ALIAS("platform:tegra20-apbdma");
1607 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1608 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1609 MODULE_LICENSE("GPL v2");
1610