xref: /openbmc/linux/drivers/dma/tegra20-apb-dma.c (revision 2985bed6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * DMA driver for Nvidia's Tegra20 APB DMA controller.
4  *
5  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
27 
28 #include "dmaengine.h"
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/tegra_apb_dma.h>
32 
33 #define TEGRA_APBDMA_GENERAL			0x0
34 #define TEGRA_APBDMA_GENERAL_ENABLE		BIT(31)
35 
36 #define TEGRA_APBDMA_CONTROL			0x010
37 #define TEGRA_APBDMA_IRQ_MASK			0x01c
38 #define TEGRA_APBDMA_IRQ_MASK_SET		0x020
39 
40 /* CSR register */
41 #define TEGRA_APBDMA_CHAN_CSR			0x00
42 #define TEGRA_APBDMA_CSR_ENB			BIT(31)
43 #define TEGRA_APBDMA_CSR_IE_EOC			BIT(30)
44 #define TEGRA_APBDMA_CSR_HOLD			BIT(29)
45 #define TEGRA_APBDMA_CSR_DIR			BIT(28)
46 #define TEGRA_APBDMA_CSR_ONCE			BIT(27)
47 #define TEGRA_APBDMA_CSR_FLOW			BIT(21)
48 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT		16
49 #define TEGRA_APBDMA_CSR_REQ_SEL_MASK		0x1F
50 #define TEGRA_APBDMA_CSR_WCOUNT_MASK		0xFFFC
51 
52 /* STATUS register */
53 #define TEGRA_APBDMA_CHAN_STATUS		0x004
54 #define TEGRA_APBDMA_STATUS_BUSY		BIT(31)
55 #define TEGRA_APBDMA_STATUS_ISE_EOC		BIT(30)
56 #define TEGRA_APBDMA_STATUS_HALT		BIT(29)
57 #define TEGRA_APBDMA_STATUS_PING_PONG		BIT(28)
58 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT		2
59 #define TEGRA_APBDMA_STATUS_COUNT_MASK		0xFFFC
60 
61 #define TEGRA_APBDMA_CHAN_CSRE			0x00C
62 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE		(1 << 31)
63 
64 /* AHB memory address */
65 #define TEGRA_APBDMA_CHAN_AHBPTR		0x010
66 
67 /* AHB sequence register */
68 #define TEGRA_APBDMA_CHAN_AHBSEQ		0x14
69 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB		BIT(31)
70 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8		(0 << 28)
71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16	(1 << 28)
72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32	(2 << 28)
73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64	(3 << 28)
74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128	(4 << 28)
75 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP		BIT(27)
76 #define TEGRA_APBDMA_AHBSEQ_BURST_1		(4 << 24)
77 #define TEGRA_APBDMA_AHBSEQ_BURST_4		(5 << 24)
78 #define TEGRA_APBDMA_AHBSEQ_BURST_8		(6 << 24)
79 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF		BIT(19)
80 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT		16
81 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE		0
82 
83 /* APB address */
84 #define TEGRA_APBDMA_CHAN_APBPTR		0x018
85 
86 /* APB sequence register */
87 #define TEGRA_APBDMA_CHAN_APBSEQ		0x01c
88 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8		(0 << 28)
89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16	(1 << 28)
90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32	(2 << 28)
91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64	(3 << 28)
92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128	(4 << 28)
93 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP		BIT(27)
94 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1		(1 << 16)
95 
96 /* Tegra148 specific registers */
97 #define TEGRA_APBDMA_CHAN_WCOUNT		0x20
98 
99 #define TEGRA_APBDMA_CHAN_WORD_TRANSFER		0x24
100 
101 /*
102  * If any burst is in flight and DMA paused then this is the time to complete
103  * on-flight burst and update DMA status register.
104  */
105 #define TEGRA_APBDMA_BURST_COMPLETE_TIME	20
106 
107 /* Channel base address offset from APBDMA base address */
108 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET	0x1000
109 
110 #define TEGRA_APBDMA_SLAVE_ID_INVALID	(TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
111 
112 struct tegra_dma;
113 
114 /*
115  * tegra_dma_chip_data Tegra chip specific DMA data
116  * @nr_channels: Number of channels available in the controller.
117  * @channel_reg_size: Channel register size/stride.
118  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
119  * @support_channel_pause: Support channel wise pause of dma.
120  * @support_separate_wcount_reg: Support separate word count register.
121  */
122 struct tegra_dma_chip_data {
123 	int nr_channels;
124 	int channel_reg_size;
125 	int max_dma_count;
126 	bool support_channel_pause;
127 	bool support_separate_wcount_reg;
128 };
129 
130 /* DMA channel registers */
131 struct tegra_dma_channel_regs {
132 	unsigned long	csr;
133 	unsigned long	ahb_ptr;
134 	unsigned long	apb_ptr;
135 	unsigned long	ahb_seq;
136 	unsigned long	apb_seq;
137 	unsigned long	wcount;
138 };
139 
140 /*
141  * tegra_dma_sg_req: DMA request details to configure hardware. This
142  * contains the details for one transfer to configure DMA hw.
143  * The client's request for data transfer can be broken into multiple
144  * sub-transfer as per requester details and hw support.
145  * This sub transfer get added in the list of transfer and point to Tegra
146  * DMA descriptor which manages the transfer details.
147  */
148 struct tegra_dma_sg_req {
149 	struct tegra_dma_channel_regs	ch_regs;
150 	unsigned int			req_len;
151 	bool				configured;
152 	bool				last_sg;
153 	struct list_head		node;
154 	struct tegra_dma_desc		*dma_desc;
155 	unsigned int			words_xferred;
156 };
157 
158 /*
159  * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
160  * This descriptor keep track of transfer status, callbacks and request
161  * counts etc.
162  */
163 struct tegra_dma_desc {
164 	struct dma_async_tx_descriptor	txd;
165 	unsigned int			bytes_requested;
166 	unsigned int			bytes_transferred;
167 	enum dma_status			dma_status;
168 	struct list_head		node;
169 	struct list_head		tx_list;
170 	struct list_head		cb_node;
171 	int				cb_count;
172 };
173 
174 struct tegra_dma_channel;
175 
176 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
177 				bool to_terminate);
178 
179 /* tegra_dma_channel: Channel specific information */
180 struct tegra_dma_channel {
181 	struct dma_chan		dma_chan;
182 	char			name[12];
183 	bool			config_init;
184 	int			id;
185 	int			irq;
186 	void __iomem		*chan_addr;
187 	spinlock_t		lock;
188 	bool			busy;
189 	struct tegra_dma	*tdma;
190 	bool			cyclic;
191 
192 	/* Different lists for managing the requests */
193 	struct list_head	free_sg_req;
194 	struct list_head	pending_sg_req;
195 	struct list_head	free_dma_desc;
196 	struct list_head	cb_desc;
197 
198 	/* ISR handler and tasklet for bottom half of isr handling */
199 	dma_isr_handler		isr_handler;
200 	struct tasklet_struct	tasklet;
201 
202 	/* Channel-slave specific configuration */
203 	unsigned int slave_id;
204 	struct dma_slave_config dma_sconfig;
205 	struct tegra_dma_channel_regs	channel_reg;
206 };
207 
208 /* tegra_dma: Tegra DMA specific information */
209 struct tegra_dma {
210 	struct dma_device		dma_dev;
211 	struct device			*dev;
212 	struct clk			*dma_clk;
213 	struct reset_control		*rst;
214 	spinlock_t			global_lock;
215 	void __iomem			*base_addr;
216 	const struct tegra_dma_chip_data *chip_data;
217 
218 	/*
219 	 * Counter for managing global pausing of the DMA controller.
220 	 * Only applicable for devices that don't support individual
221 	 * channel pausing.
222 	 */
223 	u32				global_pause_count;
224 
225 	/* Some register need to be cache before suspend */
226 	u32				reg_gen;
227 
228 	/* Last member of the structure */
229 	struct tegra_dma_channel channels[0];
230 };
231 
232 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
233 {
234 	writel(val, tdma->base_addr + reg);
235 }
236 
237 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
238 {
239 	return readl(tdma->base_addr + reg);
240 }
241 
242 static inline void tdc_write(struct tegra_dma_channel *tdc,
243 		u32 reg, u32 val)
244 {
245 	writel(val, tdc->chan_addr + reg);
246 }
247 
248 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
249 {
250 	return readl(tdc->chan_addr + reg);
251 }
252 
253 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
254 {
255 	return container_of(dc, struct tegra_dma_channel, dma_chan);
256 }
257 
258 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
259 		struct dma_async_tx_descriptor *td)
260 {
261 	return container_of(td, struct tegra_dma_desc, txd);
262 }
263 
264 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
265 {
266 	return &tdc->dma_chan.dev->device;
267 }
268 
269 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
270 static int tegra_dma_runtime_suspend(struct device *dev);
271 static int tegra_dma_runtime_resume(struct device *dev);
272 
273 /* Get DMA desc from free list, if not there then allocate it.  */
274 static struct tegra_dma_desc *tegra_dma_desc_get(
275 		struct tegra_dma_channel *tdc)
276 {
277 	struct tegra_dma_desc *dma_desc;
278 	unsigned long flags;
279 
280 	spin_lock_irqsave(&tdc->lock, flags);
281 
282 	/* Do not allocate if desc are waiting for ack */
283 	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
284 		if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
285 			list_del(&dma_desc->node);
286 			spin_unlock_irqrestore(&tdc->lock, flags);
287 			dma_desc->txd.flags = 0;
288 			return dma_desc;
289 		}
290 	}
291 
292 	spin_unlock_irqrestore(&tdc->lock, flags);
293 
294 	/* Allocate DMA desc */
295 	dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
296 	if (!dma_desc)
297 		return NULL;
298 
299 	dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
300 	dma_desc->txd.tx_submit = tegra_dma_tx_submit;
301 	dma_desc->txd.flags = 0;
302 	return dma_desc;
303 }
304 
305 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
306 		struct tegra_dma_desc *dma_desc)
307 {
308 	unsigned long flags;
309 
310 	spin_lock_irqsave(&tdc->lock, flags);
311 	if (!list_empty(&dma_desc->tx_list))
312 		list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
313 	list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
314 	spin_unlock_irqrestore(&tdc->lock, flags);
315 }
316 
317 static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
318 		struct tegra_dma_channel *tdc)
319 {
320 	struct tegra_dma_sg_req *sg_req = NULL;
321 	unsigned long flags;
322 
323 	spin_lock_irqsave(&tdc->lock, flags);
324 	if (!list_empty(&tdc->free_sg_req)) {
325 		sg_req = list_first_entry(&tdc->free_sg_req,
326 					typeof(*sg_req), node);
327 		list_del(&sg_req->node);
328 		spin_unlock_irqrestore(&tdc->lock, flags);
329 		return sg_req;
330 	}
331 	spin_unlock_irqrestore(&tdc->lock, flags);
332 
333 	sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
334 
335 	return sg_req;
336 }
337 
338 static int tegra_dma_slave_config(struct dma_chan *dc,
339 		struct dma_slave_config *sconfig)
340 {
341 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
342 
343 	if (!list_empty(&tdc->pending_sg_req)) {
344 		dev_err(tdc2dev(tdc), "Configuration not allowed\n");
345 		return -EBUSY;
346 	}
347 
348 	memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
349 	if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
350 	    sconfig->device_fc) {
351 		if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
352 			return -EINVAL;
353 		tdc->slave_id = sconfig->slave_id;
354 	}
355 	tdc->config_init = true;
356 	return 0;
357 }
358 
359 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
360 	bool wait_for_burst_complete)
361 {
362 	struct tegra_dma *tdma = tdc->tdma;
363 
364 	spin_lock(&tdma->global_lock);
365 
366 	if (tdc->tdma->global_pause_count == 0) {
367 		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
368 		if (wait_for_burst_complete)
369 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
370 	}
371 
372 	tdc->tdma->global_pause_count++;
373 
374 	spin_unlock(&tdma->global_lock);
375 }
376 
377 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
378 {
379 	struct tegra_dma *tdma = tdc->tdma;
380 
381 	spin_lock(&tdma->global_lock);
382 
383 	if (WARN_ON(tdc->tdma->global_pause_count == 0))
384 		goto out;
385 
386 	if (--tdc->tdma->global_pause_count == 0)
387 		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
388 			   TEGRA_APBDMA_GENERAL_ENABLE);
389 
390 out:
391 	spin_unlock(&tdma->global_lock);
392 }
393 
394 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
395 	bool wait_for_burst_complete)
396 {
397 	struct tegra_dma *tdma = tdc->tdma;
398 
399 	if (tdma->chip_data->support_channel_pause) {
400 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
401 				TEGRA_APBDMA_CHAN_CSRE_PAUSE);
402 		if (wait_for_burst_complete)
403 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
404 	} else {
405 		tegra_dma_global_pause(tdc, wait_for_burst_complete);
406 	}
407 }
408 
409 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
410 {
411 	struct tegra_dma *tdma = tdc->tdma;
412 
413 	if (tdma->chip_data->support_channel_pause) {
414 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
415 	} else {
416 		tegra_dma_global_resume(tdc);
417 	}
418 }
419 
420 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
421 {
422 	u32 csr;
423 	u32 status;
424 
425 	/* Disable interrupts */
426 	csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
427 	csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
428 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
429 
430 	/* Disable DMA */
431 	csr &= ~TEGRA_APBDMA_CSR_ENB;
432 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
433 
434 	/* Clear interrupt status if it is there */
435 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
436 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
437 		dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
438 		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
439 	}
440 	tdc->busy = false;
441 }
442 
443 static void tegra_dma_start(struct tegra_dma_channel *tdc,
444 		struct tegra_dma_sg_req *sg_req)
445 {
446 	struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
447 
448 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
449 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
450 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
451 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
452 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
453 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
454 		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
455 
456 	/* Start DMA */
457 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
458 				ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
459 }
460 
461 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
462 		struct tegra_dma_sg_req *nsg_req)
463 {
464 	unsigned long status;
465 
466 	/*
467 	 * The DMA controller reloads the new configuration for next transfer
468 	 * after last burst of current transfer completes.
469 	 * If there is no IEC status then this makes sure that last burst
470 	 * has not be completed. There may be case that last burst is on
471 	 * flight and so it can complete but because DMA is paused, it
472 	 * will not generates interrupt as well as not reload the new
473 	 * configuration.
474 	 * If there is already IEC status then interrupt handler need to
475 	 * load new configuration.
476 	 */
477 	tegra_dma_pause(tdc, false);
478 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
479 
480 	/*
481 	 * If interrupt is pending then do nothing as the ISR will handle
482 	 * the programing for new request.
483 	 */
484 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
485 		dev_err(tdc2dev(tdc),
486 			"Skipping new configuration as interrupt is pending\n");
487 		tegra_dma_resume(tdc);
488 		return;
489 	}
490 
491 	/* Safe to program new configuration */
492 	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
493 	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
494 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
495 		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
496 						nsg_req->ch_regs.wcount);
497 	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
498 				nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
499 	nsg_req->configured = true;
500 	nsg_req->words_xferred = 0;
501 
502 	tegra_dma_resume(tdc);
503 }
504 
505 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
506 {
507 	struct tegra_dma_sg_req *sg_req;
508 
509 	if (list_empty(&tdc->pending_sg_req))
510 		return;
511 
512 	sg_req = list_first_entry(&tdc->pending_sg_req,
513 					typeof(*sg_req), node);
514 	tegra_dma_start(tdc, sg_req);
515 	sg_req->configured = true;
516 	sg_req->words_xferred = 0;
517 	tdc->busy = true;
518 }
519 
520 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
521 {
522 	struct tegra_dma_sg_req *hsgreq;
523 	struct tegra_dma_sg_req *hnsgreq;
524 
525 	if (list_empty(&tdc->pending_sg_req))
526 		return;
527 
528 	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
529 	if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
530 		hnsgreq = list_first_entry(&hsgreq->node,
531 					typeof(*hnsgreq), node);
532 		tegra_dma_configure_for_next(tdc, hnsgreq);
533 	}
534 }
535 
536 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
537 	struct tegra_dma_sg_req *sg_req, unsigned long status)
538 {
539 	return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
540 }
541 
542 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
543 {
544 	struct tegra_dma_sg_req *sgreq;
545 	struct tegra_dma_desc *dma_desc;
546 
547 	while (!list_empty(&tdc->pending_sg_req)) {
548 		sgreq = list_first_entry(&tdc->pending_sg_req,
549 						typeof(*sgreq), node);
550 		list_move_tail(&sgreq->node, &tdc->free_sg_req);
551 		if (sgreq->last_sg) {
552 			dma_desc = sgreq->dma_desc;
553 			dma_desc->dma_status = DMA_ERROR;
554 			list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
555 
556 			/* Add in cb list if it is not there. */
557 			if (!dma_desc->cb_count)
558 				list_add_tail(&dma_desc->cb_node,
559 							&tdc->cb_desc);
560 			dma_desc->cb_count++;
561 		}
562 	}
563 	tdc->isr_handler = NULL;
564 }
565 
566 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
567 		struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
568 {
569 	struct tegra_dma_sg_req *hsgreq = NULL;
570 
571 	if (list_empty(&tdc->pending_sg_req)) {
572 		dev_err(tdc2dev(tdc), "DMA is running without req\n");
573 		tegra_dma_stop(tdc);
574 		return false;
575 	}
576 
577 	/*
578 	 * Check that head req on list should be in flight.
579 	 * If it is not in flight then abort transfer as
580 	 * looping of transfer can not continue.
581 	 */
582 	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
583 	if (!hsgreq->configured) {
584 		tegra_dma_stop(tdc);
585 		dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
586 		tegra_dma_abort_all(tdc);
587 		return false;
588 	}
589 
590 	/* Configure next request */
591 	if (!to_terminate)
592 		tdc_configure_next_head_desc(tdc);
593 	return true;
594 }
595 
596 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
597 	bool to_terminate)
598 {
599 	struct tegra_dma_sg_req *sgreq;
600 	struct tegra_dma_desc *dma_desc;
601 
602 	tdc->busy = false;
603 	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
604 	dma_desc = sgreq->dma_desc;
605 	dma_desc->bytes_transferred += sgreq->req_len;
606 
607 	list_del(&sgreq->node);
608 	if (sgreq->last_sg) {
609 		dma_desc->dma_status = DMA_COMPLETE;
610 		dma_cookie_complete(&dma_desc->txd);
611 		if (!dma_desc->cb_count)
612 			list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
613 		dma_desc->cb_count++;
614 		list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
615 	}
616 	list_add_tail(&sgreq->node, &tdc->free_sg_req);
617 
618 	/* Do not start DMA if it is going to be terminate */
619 	if (to_terminate || list_empty(&tdc->pending_sg_req))
620 		return;
621 
622 	tdc_start_head_req(tdc);
623 }
624 
625 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
626 		bool to_terminate)
627 {
628 	struct tegra_dma_sg_req *sgreq;
629 	struct tegra_dma_desc *dma_desc;
630 	bool st;
631 
632 	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
633 	dma_desc = sgreq->dma_desc;
634 	/* if we dma for long enough the transfer count will wrap */
635 	dma_desc->bytes_transferred =
636 		(dma_desc->bytes_transferred + sgreq->req_len) %
637 		dma_desc->bytes_requested;
638 
639 	/* Callback need to be call */
640 	if (!dma_desc->cb_count)
641 		list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
642 	dma_desc->cb_count++;
643 
644 	sgreq->words_xferred = 0;
645 
646 	/* If not last req then put at end of pending list */
647 	if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
648 		list_move_tail(&sgreq->node, &tdc->pending_sg_req);
649 		sgreq->configured = false;
650 		st = handle_continuous_head_request(tdc, sgreq, to_terminate);
651 		if (!st)
652 			dma_desc->dma_status = DMA_ERROR;
653 	}
654 }
655 
656 static void tegra_dma_tasklet(unsigned long data)
657 {
658 	struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
659 	struct dmaengine_desc_callback cb;
660 	struct tegra_dma_desc *dma_desc;
661 	unsigned long flags;
662 	int cb_count;
663 
664 	spin_lock_irqsave(&tdc->lock, flags);
665 	while (!list_empty(&tdc->cb_desc)) {
666 		dma_desc  = list_first_entry(&tdc->cb_desc,
667 					typeof(*dma_desc), cb_node);
668 		list_del(&dma_desc->cb_node);
669 		dmaengine_desc_get_callback(&dma_desc->txd, &cb);
670 		cb_count = dma_desc->cb_count;
671 		dma_desc->cb_count = 0;
672 		trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
673 					    cb.callback);
674 		spin_unlock_irqrestore(&tdc->lock, flags);
675 		while (cb_count--)
676 			dmaengine_desc_callback_invoke(&cb, NULL);
677 		spin_lock_irqsave(&tdc->lock, flags);
678 	}
679 	spin_unlock_irqrestore(&tdc->lock, flags);
680 }
681 
682 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
683 {
684 	struct tegra_dma_channel *tdc = dev_id;
685 	unsigned long status;
686 	unsigned long flags;
687 
688 	spin_lock_irqsave(&tdc->lock, flags);
689 
690 	trace_tegra_dma_isr(&tdc->dma_chan, irq);
691 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
692 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
693 		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
694 		tdc->isr_handler(tdc, false);
695 		tasklet_schedule(&tdc->tasklet);
696 		spin_unlock_irqrestore(&tdc->lock, flags);
697 		return IRQ_HANDLED;
698 	}
699 
700 	spin_unlock_irqrestore(&tdc->lock, flags);
701 	dev_info(tdc2dev(tdc),
702 		"Interrupt already served status 0x%08lx\n", status);
703 	return IRQ_NONE;
704 }
705 
706 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
707 {
708 	struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
709 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
710 	unsigned long flags;
711 	dma_cookie_t cookie;
712 
713 	spin_lock_irqsave(&tdc->lock, flags);
714 	dma_desc->dma_status = DMA_IN_PROGRESS;
715 	cookie = dma_cookie_assign(&dma_desc->txd);
716 	list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
717 	spin_unlock_irqrestore(&tdc->lock, flags);
718 	return cookie;
719 }
720 
721 static void tegra_dma_issue_pending(struct dma_chan *dc)
722 {
723 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
724 	unsigned long flags;
725 
726 	spin_lock_irqsave(&tdc->lock, flags);
727 	if (list_empty(&tdc->pending_sg_req)) {
728 		dev_err(tdc2dev(tdc), "No DMA request\n");
729 		goto end;
730 	}
731 	if (!tdc->busy) {
732 		tdc_start_head_req(tdc);
733 
734 		/* Continuous single mode: Configure next req */
735 		if (tdc->cyclic) {
736 			/*
737 			 * Wait for 1 burst time for configure DMA for
738 			 * next transfer.
739 			 */
740 			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
741 			tdc_configure_next_head_desc(tdc);
742 		}
743 	}
744 end:
745 	spin_unlock_irqrestore(&tdc->lock, flags);
746 }
747 
748 static int tegra_dma_terminate_all(struct dma_chan *dc)
749 {
750 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
751 	struct tegra_dma_sg_req *sgreq;
752 	struct tegra_dma_desc *dma_desc;
753 	unsigned long flags;
754 	unsigned long status;
755 	unsigned long wcount;
756 	bool was_busy;
757 
758 	spin_lock_irqsave(&tdc->lock, flags);
759 
760 	if (!tdc->busy)
761 		goto skip_dma_stop;
762 
763 	/* Pause DMA before checking the queue status */
764 	tegra_dma_pause(tdc, true);
765 
766 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
767 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
768 		dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
769 		tdc->isr_handler(tdc, true);
770 		status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
771 	}
772 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
773 		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
774 	else
775 		wcount = status;
776 
777 	was_busy = tdc->busy;
778 	tegra_dma_stop(tdc);
779 
780 	if (!list_empty(&tdc->pending_sg_req) && was_busy) {
781 		sgreq = list_first_entry(&tdc->pending_sg_req,
782 					typeof(*sgreq), node);
783 		sgreq->dma_desc->bytes_transferred +=
784 				get_current_xferred_count(tdc, sgreq, wcount);
785 	}
786 	tegra_dma_resume(tdc);
787 
788 skip_dma_stop:
789 	tegra_dma_abort_all(tdc);
790 
791 	while (!list_empty(&tdc->cb_desc)) {
792 		dma_desc  = list_first_entry(&tdc->cb_desc,
793 					typeof(*dma_desc), cb_node);
794 		list_del(&dma_desc->cb_node);
795 		dma_desc->cb_count = 0;
796 	}
797 	spin_unlock_irqrestore(&tdc->lock, flags);
798 	return 0;
799 }
800 
801 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
802 					       struct tegra_dma_sg_req *sg_req)
803 {
804 	unsigned long status, wcount = 0;
805 
806 	if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
807 		return 0;
808 
809 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
810 		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
811 
812 	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
813 
814 	if (!tdc->tdma->chip_data->support_separate_wcount_reg)
815 		wcount = status;
816 
817 	if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
818 		return sg_req->req_len;
819 
820 	wcount = get_current_xferred_count(tdc, sg_req, wcount);
821 
822 	if (!wcount) {
823 		/*
824 		 * If wcount wasn't ever polled for this SG before, then
825 		 * simply assume that transfer hasn't started yet.
826 		 *
827 		 * Otherwise it's the end of the transfer.
828 		 *
829 		 * The alternative would be to poll the status register
830 		 * until EOC bit is set or wcount goes UP. That's so
831 		 * because EOC bit is getting set only after the last
832 		 * burst's completion and counter is less than the actual
833 		 * transfer size by 4 bytes. The counter value wraps around
834 		 * in a cyclic mode before EOC is set(!), so we can't easily
835 		 * distinguish start of transfer from its end.
836 		 */
837 		if (sg_req->words_xferred)
838 			wcount = sg_req->req_len - 4;
839 
840 	} else if (wcount < sg_req->words_xferred) {
841 		/*
842 		 * This case will never happen for a non-cyclic transfer.
843 		 *
844 		 * For a cyclic transfer, although it is possible for the
845 		 * next transfer to have already started (resetting the word
846 		 * count), this case should still not happen because we should
847 		 * have detected that the EOC bit is set and hence the transfer
848 		 * was completed.
849 		 */
850 		WARN_ON_ONCE(1);
851 
852 		wcount = sg_req->req_len - 4;
853 	} else {
854 		sg_req->words_xferred = wcount;
855 	}
856 
857 	return wcount;
858 }
859 
860 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
861 	dma_cookie_t cookie, struct dma_tx_state *txstate)
862 {
863 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
864 	struct tegra_dma_desc *dma_desc;
865 	struct tegra_dma_sg_req *sg_req;
866 	enum dma_status ret;
867 	unsigned long flags;
868 	unsigned int residual;
869 	unsigned int bytes = 0;
870 
871 	ret = dma_cookie_status(dc, cookie, txstate);
872 	if (ret == DMA_COMPLETE)
873 		return ret;
874 
875 	spin_lock_irqsave(&tdc->lock, flags);
876 
877 	/* Check on wait_ack desc status */
878 	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
879 		if (dma_desc->txd.cookie == cookie) {
880 			ret = dma_desc->dma_status;
881 			goto found;
882 		}
883 	}
884 
885 	/* Check in pending list */
886 	list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
887 		dma_desc = sg_req->dma_desc;
888 		if (dma_desc->txd.cookie == cookie) {
889 			bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
890 			ret = dma_desc->dma_status;
891 			goto found;
892 		}
893 	}
894 
895 	dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
896 	dma_desc = NULL;
897 
898 found:
899 	if (dma_desc && txstate) {
900 		residual = dma_desc->bytes_requested -
901 			   ((dma_desc->bytes_transferred + bytes) %
902 			    dma_desc->bytes_requested);
903 		dma_set_residue(txstate, residual);
904 	}
905 
906 	trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
907 	spin_unlock_irqrestore(&tdc->lock, flags);
908 	return ret;
909 }
910 
911 static inline int get_bus_width(struct tegra_dma_channel *tdc,
912 		enum dma_slave_buswidth slave_bw)
913 {
914 	switch (slave_bw) {
915 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
916 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
917 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
918 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
919 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
920 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
921 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
922 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
923 	default:
924 		dev_warn(tdc2dev(tdc),
925 			"slave bw is not supported, using 32bits\n");
926 		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
927 	}
928 }
929 
930 static inline int get_burst_size(struct tegra_dma_channel *tdc,
931 	u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
932 {
933 	int burst_byte;
934 	int burst_ahb_width;
935 
936 	/*
937 	 * burst_size from client is in terms of the bus_width.
938 	 * convert them into AHB memory width which is 4 byte.
939 	 */
940 	burst_byte = burst_size * slave_bw;
941 	burst_ahb_width = burst_byte / 4;
942 
943 	/* If burst size is 0 then calculate the burst size based on length */
944 	if (!burst_ahb_width) {
945 		if (len & 0xF)
946 			return TEGRA_APBDMA_AHBSEQ_BURST_1;
947 		else if ((len >> 4) & 0x1)
948 			return TEGRA_APBDMA_AHBSEQ_BURST_4;
949 		else
950 			return TEGRA_APBDMA_AHBSEQ_BURST_8;
951 	}
952 	if (burst_ahb_width < 4)
953 		return TEGRA_APBDMA_AHBSEQ_BURST_1;
954 	else if (burst_ahb_width < 8)
955 		return TEGRA_APBDMA_AHBSEQ_BURST_4;
956 	else
957 		return TEGRA_APBDMA_AHBSEQ_BURST_8;
958 }
959 
960 static int get_transfer_param(struct tegra_dma_channel *tdc,
961 	enum dma_transfer_direction direction, unsigned long *apb_addr,
962 	unsigned long *apb_seq,	unsigned long *csr, unsigned int *burst_size,
963 	enum dma_slave_buswidth *slave_bw)
964 {
965 	switch (direction) {
966 	case DMA_MEM_TO_DEV:
967 		*apb_addr = tdc->dma_sconfig.dst_addr;
968 		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
969 		*burst_size = tdc->dma_sconfig.dst_maxburst;
970 		*slave_bw = tdc->dma_sconfig.dst_addr_width;
971 		*csr = TEGRA_APBDMA_CSR_DIR;
972 		return 0;
973 
974 	case DMA_DEV_TO_MEM:
975 		*apb_addr = tdc->dma_sconfig.src_addr;
976 		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
977 		*burst_size = tdc->dma_sconfig.src_maxburst;
978 		*slave_bw = tdc->dma_sconfig.src_addr_width;
979 		*csr = 0;
980 		return 0;
981 
982 	default:
983 		dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
984 		return -EINVAL;
985 	}
986 	return -EINVAL;
987 }
988 
989 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
990 	struct tegra_dma_channel_regs *ch_regs, u32 len)
991 {
992 	u32 len_field = (len - 4) & 0xFFFC;
993 
994 	if (tdc->tdma->chip_data->support_separate_wcount_reg)
995 		ch_regs->wcount = len_field;
996 	else
997 		ch_regs->csr |= len_field;
998 }
999 
1000 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
1001 	struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
1002 	enum dma_transfer_direction direction, unsigned long flags,
1003 	void *context)
1004 {
1005 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1006 	struct tegra_dma_desc *dma_desc;
1007 	unsigned int i;
1008 	struct scatterlist *sg;
1009 	unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1010 	struct list_head req_list;
1011 	struct tegra_dma_sg_req  *sg_req = NULL;
1012 	u32 burst_size;
1013 	enum dma_slave_buswidth slave_bw;
1014 
1015 	if (!tdc->config_init) {
1016 		dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1017 		return NULL;
1018 	}
1019 	if (sg_len < 1) {
1020 		dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1021 		return NULL;
1022 	}
1023 
1024 	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1025 				&burst_size, &slave_bw) < 0)
1026 		return NULL;
1027 
1028 	INIT_LIST_HEAD(&req_list);
1029 
1030 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1031 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1032 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1033 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1034 
1035 	csr |= TEGRA_APBDMA_CSR_ONCE;
1036 
1037 	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1038 		csr |= TEGRA_APBDMA_CSR_FLOW;
1039 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1040 	}
1041 
1042 	if (flags & DMA_PREP_INTERRUPT) {
1043 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
1044 	} else {
1045 		WARN_ON_ONCE(1);
1046 		return NULL;
1047 	}
1048 
1049 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1050 
1051 	dma_desc = tegra_dma_desc_get(tdc);
1052 	if (!dma_desc) {
1053 		dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
1054 		return NULL;
1055 	}
1056 	INIT_LIST_HEAD(&dma_desc->tx_list);
1057 	INIT_LIST_HEAD(&dma_desc->cb_node);
1058 	dma_desc->cb_count = 0;
1059 	dma_desc->bytes_requested = 0;
1060 	dma_desc->bytes_transferred = 0;
1061 	dma_desc->dma_status = DMA_IN_PROGRESS;
1062 
1063 	/* Make transfer requests */
1064 	for_each_sg(sgl, sg, sg_len, i) {
1065 		u32 len, mem;
1066 
1067 		mem = sg_dma_address(sg);
1068 		len = sg_dma_len(sg);
1069 
1070 		if ((len & 3) || (mem & 3) ||
1071 				(len > tdc->tdma->chip_data->max_dma_count)) {
1072 			dev_err(tdc2dev(tdc),
1073 				"DMA length/memory address is not supported\n");
1074 			tegra_dma_desc_put(tdc, dma_desc);
1075 			return NULL;
1076 		}
1077 
1078 		sg_req = tegra_dma_sg_req_get(tdc);
1079 		if (!sg_req) {
1080 			dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1081 			tegra_dma_desc_put(tdc, dma_desc);
1082 			return NULL;
1083 		}
1084 
1085 		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1086 		dma_desc->bytes_requested += len;
1087 
1088 		sg_req->ch_regs.apb_ptr = apb_ptr;
1089 		sg_req->ch_regs.ahb_ptr = mem;
1090 		sg_req->ch_regs.csr = csr;
1091 		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1092 		sg_req->ch_regs.apb_seq = apb_seq;
1093 		sg_req->ch_regs.ahb_seq = ahb_seq;
1094 		sg_req->configured = false;
1095 		sg_req->last_sg = false;
1096 		sg_req->dma_desc = dma_desc;
1097 		sg_req->req_len = len;
1098 
1099 		list_add_tail(&sg_req->node, &dma_desc->tx_list);
1100 	}
1101 	sg_req->last_sg = true;
1102 	if (flags & DMA_CTRL_ACK)
1103 		dma_desc->txd.flags = DMA_CTRL_ACK;
1104 
1105 	/*
1106 	 * Make sure that mode should not be conflicting with currently
1107 	 * configured mode.
1108 	 */
1109 	if (!tdc->isr_handler) {
1110 		tdc->isr_handler = handle_once_dma_done;
1111 		tdc->cyclic = false;
1112 	} else {
1113 		if (tdc->cyclic) {
1114 			dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1115 			tegra_dma_desc_put(tdc, dma_desc);
1116 			return NULL;
1117 		}
1118 	}
1119 
1120 	return &dma_desc->txd;
1121 }
1122 
1123 static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1124 	struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1125 	size_t period_len, enum dma_transfer_direction direction,
1126 	unsigned long flags)
1127 {
1128 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1129 	struct tegra_dma_desc *dma_desc = NULL;
1130 	struct tegra_dma_sg_req *sg_req = NULL;
1131 	unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1132 	int len;
1133 	size_t remain_len;
1134 	dma_addr_t mem = buf_addr;
1135 	u32 burst_size;
1136 	enum dma_slave_buswidth slave_bw;
1137 
1138 	if (!buf_len || !period_len) {
1139 		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1140 		return NULL;
1141 	}
1142 
1143 	if (!tdc->config_init) {
1144 		dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1145 		return NULL;
1146 	}
1147 
1148 	/*
1149 	 * We allow to take more number of requests till DMA is
1150 	 * not started. The driver will loop over all requests.
1151 	 * Once DMA is started then new requests can be queued only after
1152 	 * terminating the DMA.
1153 	 */
1154 	if (tdc->busy) {
1155 		dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
1156 		return NULL;
1157 	}
1158 
1159 	/*
1160 	 * We only support cycle transfer when buf_len is multiple of
1161 	 * period_len.
1162 	 */
1163 	if (buf_len % period_len) {
1164 		dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1165 		return NULL;
1166 	}
1167 
1168 	len = period_len;
1169 	if ((len & 3) || (buf_addr & 3) ||
1170 			(len > tdc->tdma->chip_data->max_dma_count)) {
1171 		dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1172 		return NULL;
1173 	}
1174 
1175 	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1176 				&burst_size, &slave_bw) < 0)
1177 		return NULL;
1178 
1179 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1180 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1181 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1182 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1183 
1184 	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1185 		csr |= TEGRA_APBDMA_CSR_FLOW;
1186 		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1187 	}
1188 
1189 	if (flags & DMA_PREP_INTERRUPT) {
1190 		csr |= TEGRA_APBDMA_CSR_IE_EOC;
1191 	} else {
1192 		WARN_ON_ONCE(1);
1193 		return NULL;
1194 	}
1195 
1196 	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1197 
1198 	dma_desc = tegra_dma_desc_get(tdc);
1199 	if (!dma_desc) {
1200 		dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1201 		return NULL;
1202 	}
1203 
1204 	INIT_LIST_HEAD(&dma_desc->tx_list);
1205 	INIT_LIST_HEAD(&dma_desc->cb_node);
1206 	dma_desc->cb_count = 0;
1207 
1208 	dma_desc->bytes_transferred = 0;
1209 	dma_desc->bytes_requested = buf_len;
1210 	remain_len = buf_len;
1211 
1212 	/* Split transfer equal to period size */
1213 	while (remain_len) {
1214 		sg_req = tegra_dma_sg_req_get(tdc);
1215 		if (!sg_req) {
1216 			dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1217 			tegra_dma_desc_put(tdc, dma_desc);
1218 			return NULL;
1219 		}
1220 
1221 		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1222 		sg_req->ch_regs.apb_ptr = apb_ptr;
1223 		sg_req->ch_regs.ahb_ptr = mem;
1224 		sg_req->ch_regs.csr = csr;
1225 		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1226 		sg_req->ch_regs.apb_seq = apb_seq;
1227 		sg_req->ch_regs.ahb_seq = ahb_seq;
1228 		sg_req->configured = false;
1229 		sg_req->last_sg = false;
1230 		sg_req->dma_desc = dma_desc;
1231 		sg_req->req_len = len;
1232 
1233 		list_add_tail(&sg_req->node, &dma_desc->tx_list);
1234 		remain_len -= len;
1235 		mem += len;
1236 	}
1237 	sg_req->last_sg = true;
1238 	if (flags & DMA_CTRL_ACK)
1239 		dma_desc->txd.flags = DMA_CTRL_ACK;
1240 
1241 	/*
1242 	 * Make sure that mode should not be conflicting with currently
1243 	 * configured mode.
1244 	 */
1245 	if (!tdc->isr_handler) {
1246 		tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1247 		tdc->cyclic = true;
1248 	} else {
1249 		if (!tdc->cyclic) {
1250 			dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1251 			tegra_dma_desc_put(tdc, dma_desc);
1252 			return NULL;
1253 		}
1254 	}
1255 
1256 	return &dma_desc->txd;
1257 }
1258 
1259 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1260 {
1261 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1262 	struct tegra_dma *tdma = tdc->tdma;
1263 	int ret;
1264 
1265 	dma_cookie_init(&tdc->dma_chan);
1266 	tdc->config_init = false;
1267 
1268 	ret = pm_runtime_get_sync(tdma->dev);
1269 	if (ret < 0)
1270 		return ret;
1271 
1272 	return 0;
1273 }
1274 
1275 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1276 {
1277 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1278 	struct tegra_dma *tdma = tdc->tdma;
1279 	struct tegra_dma_desc *dma_desc;
1280 	struct tegra_dma_sg_req *sg_req;
1281 	struct list_head dma_desc_list;
1282 	struct list_head sg_req_list;
1283 	unsigned long flags;
1284 
1285 	INIT_LIST_HEAD(&dma_desc_list);
1286 	INIT_LIST_HEAD(&sg_req_list);
1287 
1288 	dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1289 
1290 	if (tdc->busy)
1291 		tegra_dma_terminate_all(dc);
1292 
1293 	spin_lock_irqsave(&tdc->lock, flags);
1294 	list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1295 	list_splice_init(&tdc->free_sg_req, &sg_req_list);
1296 	list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1297 	INIT_LIST_HEAD(&tdc->cb_desc);
1298 	tdc->config_init = false;
1299 	tdc->isr_handler = NULL;
1300 	spin_unlock_irqrestore(&tdc->lock, flags);
1301 
1302 	while (!list_empty(&dma_desc_list)) {
1303 		dma_desc = list_first_entry(&dma_desc_list,
1304 					typeof(*dma_desc), node);
1305 		list_del(&dma_desc->node);
1306 		kfree(dma_desc);
1307 	}
1308 
1309 	while (!list_empty(&sg_req_list)) {
1310 		sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1311 		list_del(&sg_req->node);
1312 		kfree(sg_req);
1313 	}
1314 	pm_runtime_put(tdma->dev);
1315 
1316 	tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1317 }
1318 
1319 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1320 					   struct of_dma *ofdma)
1321 {
1322 	struct tegra_dma *tdma = ofdma->of_dma_data;
1323 	struct dma_chan *chan;
1324 	struct tegra_dma_channel *tdc;
1325 
1326 	if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1327 		dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1328 		return NULL;
1329 	}
1330 
1331 	chan = dma_get_any_slave_channel(&tdma->dma_dev);
1332 	if (!chan)
1333 		return NULL;
1334 
1335 	tdc = to_tegra_dma_chan(chan);
1336 	tdc->slave_id = dma_spec->args[0];
1337 
1338 	return chan;
1339 }
1340 
1341 /* Tegra20 specific DMA controller information */
1342 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1343 	.nr_channels		= 16,
1344 	.channel_reg_size	= 0x20,
1345 	.max_dma_count		= 1024UL * 64,
1346 	.support_channel_pause	= false,
1347 	.support_separate_wcount_reg = false,
1348 };
1349 
1350 /* Tegra30 specific DMA controller information */
1351 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1352 	.nr_channels		= 32,
1353 	.channel_reg_size	= 0x20,
1354 	.max_dma_count		= 1024UL * 64,
1355 	.support_channel_pause	= false,
1356 	.support_separate_wcount_reg = false,
1357 };
1358 
1359 /* Tegra114 specific DMA controller information */
1360 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1361 	.nr_channels		= 32,
1362 	.channel_reg_size	= 0x20,
1363 	.max_dma_count		= 1024UL * 64,
1364 	.support_channel_pause	= true,
1365 	.support_separate_wcount_reg = false,
1366 };
1367 
1368 /* Tegra148 specific DMA controller information */
1369 static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1370 	.nr_channels		= 32,
1371 	.channel_reg_size	= 0x40,
1372 	.max_dma_count		= 1024UL * 64,
1373 	.support_channel_pause	= true,
1374 	.support_separate_wcount_reg = true,
1375 };
1376 
1377 static int tegra_dma_probe(struct platform_device *pdev)
1378 {
1379 	struct resource *res;
1380 	struct tegra_dma *tdma;
1381 	int ret;
1382 	int i;
1383 	const struct tegra_dma_chip_data *cdata;
1384 
1385 	cdata = of_device_get_match_data(&pdev->dev);
1386 	if (!cdata) {
1387 		dev_err(&pdev->dev, "Error: No device match data found\n");
1388 		return -ENODEV;
1389 	}
1390 
1391 	tdma = devm_kzalloc(&pdev->dev,
1392 			    struct_size(tdma, channels, cdata->nr_channels),
1393 			    GFP_KERNEL);
1394 	if (!tdma)
1395 		return -ENOMEM;
1396 
1397 	tdma->dev = &pdev->dev;
1398 	tdma->chip_data = cdata;
1399 	platform_set_drvdata(pdev, tdma);
1400 
1401 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1402 	tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1403 	if (IS_ERR(tdma->base_addr))
1404 		return PTR_ERR(tdma->base_addr);
1405 
1406 	tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1407 	if (IS_ERR(tdma->dma_clk)) {
1408 		dev_err(&pdev->dev, "Error: Missing controller clock\n");
1409 		return PTR_ERR(tdma->dma_clk);
1410 	}
1411 
1412 	tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1413 	if (IS_ERR(tdma->rst)) {
1414 		dev_err(&pdev->dev, "Error: Missing reset\n");
1415 		return PTR_ERR(tdma->rst);
1416 	}
1417 
1418 	spin_lock_init(&tdma->global_lock);
1419 
1420 	pm_runtime_enable(&pdev->dev);
1421 	if (!pm_runtime_enabled(&pdev->dev))
1422 		ret = tegra_dma_runtime_resume(&pdev->dev);
1423 	else
1424 		ret = pm_runtime_get_sync(&pdev->dev);
1425 
1426 	if (ret < 0) {
1427 		pm_runtime_disable(&pdev->dev);
1428 		return ret;
1429 	}
1430 
1431 	/* Reset DMA controller */
1432 	reset_control_assert(tdma->rst);
1433 	udelay(2);
1434 	reset_control_deassert(tdma->rst);
1435 
1436 	/* Enable global DMA registers */
1437 	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1438 	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1439 	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1440 
1441 	pm_runtime_put(&pdev->dev);
1442 
1443 	INIT_LIST_HEAD(&tdma->dma_dev.channels);
1444 	for (i = 0; i < cdata->nr_channels; i++) {
1445 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1446 
1447 		tdc->chan_addr = tdma->base_addr +
1448 				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1449 				 (i * cdata->channel_reg_size);
1450 
1451 		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1452 		if (!res) {
1453 			ret = -EINVAL;
1454 			dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1455 			goto err_irq;
1456 		}
1457 		tdc->irq = res->start;
1458 		snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1459 		ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
1460 		if (ret) {
1461 			dev_err(&pdev->dev,
1462 				"request_irq failed with err %d channel %d\n",
1463 				ret, i);
1464 			goto err_irq;
1465 		}
1466 
1467 		tdc->dma_chan.device = &tdma->dma_dev;
1468 		dma_cookie_init(&tdc->dma_chan);
1469 		list_add_tail(&tdc->dma_chan.device_node,
1470 				&tdma->dma_dev.channels);
1471 		tdc->tdma = tdma;
1472 		tdc->id = i;
1473 		tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1474 
1475 		tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1476 				(unsigned long)tdc);
1477 		spin_lock_init(&tdc->lock);
1478 
1479 		INIT_LIST_HEAD(&tdc->pending_sg_req);
1480 		INIT_LIST_HEAD(&tdc->free_sg_req);
1481 		INIT_LIST_HEAD(&tdc->free_dma_desc);
1482 		INIT_LIST_HEAD(&tdc->cb_desc);
1483 	}
1484 
1485 	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1486 	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1487 	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1488 
1489 	tdma->global_pause_count = 0;
1490 	tdma->dma_dev.dev = &pdev->dev;
1491 	tdma->dma_dev.device_alloc_chan_resources =
1492 					tegra_dma_alloc_chan_resources;
1493 	tdma->dma_dev.device_free_chan_resources =
1494 					tegra_dma_free_chan_resources;
1495 	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1496 	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1497 	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1498 		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1499 		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1500 		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1501 	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1502 		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1503 		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1504 		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1505 	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1506 	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1507 	tdma->dma_dev.device_config = tegra_dma_slave_config;
1508 	tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1509 	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1510 	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1511 
1512 	ret = dma_async_device_register(&tdma->dma_dev);
1513 	if (ret < 0) {
1514 		dev_err(&pdev->dev,
1515 			"Tegra20 APB DMA driver registration failed %d\n", ret);
1516 		goto err_irq;
1517 	}
1518 
1519 	ret = of_dma_controller_register(pdev->dev.of_node,
1520 					 tegra_dma_of_xlate, tdma);
1521 	if (ret < 0) {
1522 		dev_err(&pdev->dev,
1523 			"Tegra20 APB DMA OF registration failed %d\n", ret);
1524 		goto err_unregister_dma_dev;
1525 	}
1526 
1527 	dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1528 			cdata->nr_channels);
1529 	return 0;
1530 
1531 err_unregister_dma_dev:
1532 	dma_async_device_unregister(&tdma->dma_dev);
1533 err_irq:
1534 	while (--i >= 0) {
1535 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1536 
1537 		free_irq(tdc->irq, tdc);
1538 		tasklet_kill(&tdc->tasklet);
1539 	}
1540 
1541 	pm_runtime_disable(&pdev->dev);
1542 	if (!pm_runtime_status_suspended(&pdev->dev))
1543 		tegra_dma_runtime_suspend(&pdev->dev);
1544 	return ret;
1545 }
1546 
1547 static int tegra_dma_remove(struct platform_device *pdev)
1548 {
1549 	struct tegra_dma *tdma = platform_get_drvdata(pdev);
1550 	int i;
1551 	struct tegra_dma_channel *tdc;
1552 
1553 	dma_async_device_unregister(&tdma->dma_dev);
1554 
1555 	for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1556 		tdc = &tdma->channels[i];
1557 		free_irq(tdc->irq, tdc);
1558 		tasklet_kill(&tdc->tasklet);
1559 	}
1560 
1561 	pm_runtime_disable(&pdev->dev);
1562 	if (!pm_runtime_status_suspended(&pdev->dev))
1563 		tegra_dma_runtime_suspend(&pdev->dev);
1564 
1565 	return 0;
1566 }
1567 
1568 static int tegra_dma_runtime_suspend(struct device *dev)
1569 {
1570 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1571 	int i;
1572 
1573 	tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1574 	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1575 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1576 		struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1577 
1578 		/* Only save the state of DMA channels that are in use */
1579 		if (!tdc->config_init)
1580 			continue;
1581 
1582 		ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1583 		ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1584 		ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1585 		ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1586 		ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1587 		if (tdma->chip_data->support_separate_wcount_reg)
1588 			ch_reg->wcount = tdc_read(tdc,
1589 						  TEGRA_APBDMA_CHAN_WCOUNT);
1590 	}
1591 
1592 	clk_disable_unprepare(tdma->dma_clk);
1593 
1594 	return 0;
1595 }
1596 
1597 static int tegra_dma_runtime_resume(struct device *dev)
1598 {
1599 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1600 	int i, ret;
1601 
1602 	ret = clk_prepare_enable(tdma->dma_clk);
1603 	if (ret < 0) {
1604 		dev_err(dev, "clk_enable failed: %d\n", ret);
1605 		return ret;
1606 	}
1607 
1608 	tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1609 	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1610 	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1611 
1612 	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1613 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1614 		struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1615 
1616 		/* Only restore the state of DMA channels that are in use */
1617 		if (!tdc->config_init)
1618 			continue;
1619 
1620 		if (tdma->chip_data->support_separate_wcount_reg)
1621 			tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
1622 				  ch_reg->wcount);
1623 		tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1624 		tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1625 		tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1626 		tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1627 		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1628 			(ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1629 	}
1630 
1631 	return 0;
1632 }
1633 
1634 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1635 	SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1636 			   NULL)
1637 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1638 				pm_runtime_force_resume)
1639 };
1640 
1641 static const struct of_device_id tegra_dma_of_match[] = {
1642 	{
1643 		.compatible = "nvidia,tegra148-apbdma",
1644 		.data = &tegra148_dma_chip_data,
1645 	}, {
1646 		.compatible = "nvidia,tegra114-apbdma",
1647 		.data = &tegra114_dma_chip_data,
1648 	}, {
1649 		.compatible = "nvidia,tegra30-apbdma",
1650 		.data = &tegra30_dma_chip_data,
1651 	}, {
1652 		.compatible = "nvidia,tegra20-apbdma",
1653 		.data = &tegra20_dma_chip_data,
1654 	}, {
1655 	},
1656 };
1657 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1658 
1659 static struct platform_driver tegra_dmac_driver = {
1660 	.driver = {
1661 		.name	= "tegra-apbdma",
1662 		.pm	= &tegra_dma_dev_pm_ops,
1663 		.of_match_table = tegra_dma_of_match,
1664 	},
1665 	.probe		= tegra_dma_probe,
1666 	.remove		= tegra_dma_remove,
1667 };
1668 
1669 module_platform_driver(tegra_dmac_driver);
1670 
1671 MODULE_ALIAS("platform:tegra20-apbdma");
1672 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1673 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1674 MODULE_LICENSE("GPL v2");
1675