xref: /openbmc/linux/drivers/dma/xilinx/xilinx_dma.c (revision 80483c3a)
1 /*
2  * DMA driver for Xilinx Video DMA Engine
3  *
4  * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
5  *
6  * Based on the Freescale DMA driver.
7  *
8  * Description:
9  * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10  * core that provides high-bandwidth direct memory access between memory
11  * and AXI4-Stream type video target peripherals. The core provides efficient
12  * two dimensional DMA operations with independent asynchronous read (S2MM)
13  * and write (MM2S) channel operation. It can be configured to have either
14  * one channel or two channels. If configured as two channels, one is to
15  * transmit to the video device (MM2S) and another is to receive from the
16  * video device (S2MM). Initialization, status, interrupt and management
17  * registers are accessed through an AXI4-Lite slave interface.
18  *
19  * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20  * provides high-bandwidth one dimensional direct memory access between memory
21  * and AXI4-Stream target peripherals. It supports one receive and one
22  * transmit channel, both of them optional at synthesis time.
23  *
24  * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25  * Access (DMA) between a memory-mapped source address and a memory-mapped
26  * destination address.
27  *
28  * This program is free software: you can redistribute it and/or modify
29  * it under the terms of the GNU General Public License as published by
30  * the Free Software Foundation, either version 2 of the License, or
31  * (at your option) any later version.
32  */
33 
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
49 
50 #include "../dmaengine.h"
51 
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET		0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET		0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET		0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET		0x00a0
57 
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR			0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX		0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT		24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX	0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT	16
64 #define XILINX_DMA_DMACR_ERR_IRQ		BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ		BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ		BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT		8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT	5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN		BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN		BIT(3)
71 #define XILINX_DMA_DMACR_RESET			BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN		BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP		BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK		GENMASK(6, 5)
75 
76 #define XILINX_DMA_REG_DMASR			0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR		BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ		BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ		BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ		BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR		BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR		BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR		BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR		BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR		BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR		BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR		BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR		BIT(4)
89 #define XILINX_DMA_DMASR_IDLE			BIT(1)
90 #define XILINX_DMA_DMASR_HALTED		BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK		GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK	GENMASK(23, 16)
93 
94 #define XILINX_DMA_REG_CURDESC			0x0008
95 #define XILINX_DMA_REG_TAILDESC		0x0010
96 #define XILINX_DMA_REG_REG_INDEX		0x0014
97 #define XILINX_DMA_REG_FRMSTORE		0x0018
98 #define XILINX_DMA_REG_THRESHOLD		0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS		0x0024
100 #define XILINX_DMA_REG_PARK_PTR		0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT	8
102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT	0
103 #define XILINX_DMA_REG_VDMA_VERSION		0x002c
104 
105 /* Register Direct Mode Registers */
106 #define XILINX_DMA_REG_VSIZE			0x0000
107 #define XILINX_DMA_REG_HSIZE			0x0004
108 
109 #define XILINX_DMA_REG_FRMDLY_STRIDE		0x0008
110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT	24
111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT	0
112 
113 #define XILINX_VDMA_REG_START_ADDRESS(n)	(0x000c + 4 * (n))
114 #define XILINX_VDMA_REG_START_ADDRESS_64(n)	(0x000c + 8 * (n))
115 
116 /* HW specific definitions */
117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE	0x20
118 
119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK	\
120 		(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
121 		 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
122 		 XILINX_DMA_DMASR_ERR_IRQ)
123 
124 #define XILINX_DMA_DMASR_ALL_ERR_MASK	\
125 		(XILINX_DMA_DMASR_EOL_LATE_ERR | \
126 		 XILINX_DMA_DMASR_SOF_LATE_ERR | \
127 		 XILINX_DMA_DMASR_SG_DEC_ERR | \
128 		 XILINX_DMA_DMASR_SG_SLV_ERR | \
129 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
130 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
131 		 XILINX_DMA_DMASR_DMA_DEC_ERR | \
132 		 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
133 		 XILINX_DMA_DMASR_DMA_INT_ERR)
134 
135 /*
136  * Recoverable errors are DMA Internal error, SOF Early, EOF Early
137  * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
138  * is enabled in the h/w system.
139  */
140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK	\
141 		(XILINX_DMA_DMASR_SOF_LATE_ERR | \
142 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 		 XILINX_DMA_DMASR_DMA_INT_ERR)
145 
146 /* Axi VDMA Flush on Fsync bits */
147 #define XILINX_DMA_FLUSH_S2MM		3
148 #define XILINX_DMA_FLUSH_MM2S		2
149 #define XILINX_DMA_FLUSH_BOTH		1
150 
151 /* Delay loop counter to prevent hardware failure */
152 #define XILINX_DMA_LOOP_COUNT		1000000
153 
154 /* AXI DMA Specific Registers/Offsets */
155 #define XILINX_DMA_REG_SRCDSTADDR	0x18
156 #define XILINX_DMA_REG_BTT		0x28
157 
158 /* AXI DMA Specific Masks/Bit fields */
159 #define XILINX_DMA_MAX_TRANS_LEN	GENMASK(22, 0)
160 #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
162 #define XILINX_DMA_CR_COALESCE_SHIFT	16
163 #define XILINX_DMA_BD_SOP		BIT(27)
164 #define XILINX_DMA_BD_EOP		BIT(26)
165 #define XILINX_DMA_COALESCE_MAX		255
166 #define XILINX_DMA_NUM_APP_WORDS	5
167 
168 /* Multi-Channel DMA Descriptor offsets*/
169 #define XILINX_DMA_MCRX_CDESC(x)	(0x40 + (x-1) * 0x20)
170 #define XILINX_DMA_MCRX_TDESC(x)	(0x48 + (x-1) * 0x20)
171 
172 /* Multi-Channel DMA Masks/Shifts */
173 #define XILINX_DMA_BD_HSIZE_MASK	GENMASK(15, 0)
174 #define XILINX_DMA_BD_STRIDE_MASK	GENMASK(15, 0)
175 #define XILINX_DMA_BD_VSIZE_MASK	GENMASK(31, 19)
176 #define XILINX_DMA_BD_TDEST_MASK	GENMASK(4, 0)
177 #define XILINX_DMA_BD_STRIDE_SHIFT	0
178 #define XILINX_DMA_BD_VSIZE_SHIFT	19
179 
180 /* AXI CDMA Specific Registers/Offsets */
181 #define XILINX_CDMA_REG_SRCADDR		0x18
182 #define XILINX_CDMA_REG_DSTADDR		0x20
183 
184 /* AXI CDMA Specific Masks */
185 #define XILINX_CDMA_CR_SGMODE          BIT(3)
186 
187 /**
188  * struct xilinx_vdma_desc_hw - Hardware Descriptor
189  * @next_desc: Next Descriptor Pointer @0x00
190  * @pad1: Reserved @0x04
191  * @buf_addr: Buffer address @0x08
192  * @buf_addr_msb: MSB of Buffer address @0x0C
193  * @vsize: Vertical Size @0x10
194  * @hsize: Horizontal Size @0x14
195  * @stride: Number of bytes between the first
196  *	    pixels of each horizontal line @0x18
197  */
198 struct xilinx_vdma_desc_hw {
199 	u32 next_desc;
200 	u32 pad1;
201 	u32 buf_addr;
202 	u32 buf_addr_msb;
203 	u32 vsize;
204 	u32 hsize;
205 	u32 stride;
206 } __aligned(64);
207 
208 /**
209  * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
210  * @next_desc: Next Descriptor Pointer @0x00
211  * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
212  * @buf_addr: Buffer address @0x08
213  * @buf_addr_msb: MSB of Buffer address @0x0C
214  * @pad1: Reserved @0x10
215  * @pad2: Reserved @0x14
216  * @control: Control field @0x18
217  * @status: Status field @0x1C
218  * @app: APP Fields @0x20 - 0x30
219  */
220 struct xilinx_axidma_desc_hw {
221 	u32 next_desc;
222 	u32 next_desc_msb;
223 	u32 buf_addr;
224 	u32 buf_addr_msb;
225 	u32 mcdma_control;
226 	u32 vsize_stride;
227 	u32 control;
228 	u32 status;
229 	u32 app[XILINX_DMA_NUM_APP_WORDS];
230 } __aligned(64);
231 
232 /**
233  * struct xilinx_cdma_desc_hw - Hardware Descriptor
234  * @next_desc: Next Descriptor Pointer @0x00
235  * @next_descmsb: Next Descriptor Pointer MSB @0x04
236  * @src_addr: Source address @0x08
237  * @src_addrmsb: Source address MSB @0x0C
238  * @dest_addr: Destination address @0x10
239  * @dest_addrmsb: Destination address MSB @0x14
240  * @control: Control field @0x18
241  * @status: Status field @0x1C
242  */
243 struct xilinx_cdma_desc_hw {
244 	u32 next_desc;
245 	u32 next_desc_msb;
246 	u32 src_addr;
247 	u32 src_addr_msb;
248 	u32 dest_addr;
249 	u32 dest_addr_msb;
250 	u32 control;
251 	u32 status;
252 } __aligned(64);
253 
254 /**
255  * struct xilinx_vdma_tx_segment - Descriptor segment
256  * @hw: Hardware descriptor
257  * @node: Node in the descriptor segments list
258  * @phys: Physical address of segment
259  */
260 struct xilinx_vdma_tx_segment {
261 	struct xilinx_vdma_desc_hw hw;
262 	struct list_head node;
263 	dma_addr_t phys;
264 } __aligned(64);
265 
266 /**
267  * struct xilinx_axidma_tx_segment - Descriptor segment
268  * @hw: Hardware descriptor
269  * @node: Node in the descriptor segments list
270  * @phys: Physical address of segment
271  */
272 struct xilinx_axidma_tx_segment {
273 	struct xilinx_axidma_desc_hw hw;
274 	struct list_head node;
275 	dma_addr_t phys;
276 } __aligned(64);
277 
278 /**
279  * struct xilinx_cdma_tx_segment - Descriptor segment
280  * @hw: Hardware descriptor
281  * @node: Node in the descriptor segments list
282  * @phys: Physical address of segment
283  */
284 struct xilinx_cdma_tx_segment {
285 	struct xilinx_cdma_desc_hw hw;
286 	struct list_head node;
287 	dma_addr_t phys;
288 } __aligned(64);
289 
290 /**
291  * struct xilinx_dma_tx_descriptor - Per Transaction structure
292  * @async_tx: Async transaction descriptor
293  * @segments: TX segments list
294  * @node: Node in the channel descriptors list
295  * @cyclic: Check for cyclic transfers.
296  */
297 struct xilinx_dma_tx_descriptor {
298 	struct dma_async_tx_descriptor async_tx;
299 	struct list_head segments;
300 	struct list_head node;
301 	bool cyclic;
302 };
303 
304 /**
305  * struct xilinx_dma_chan - Driver specific DMA channel structure
306  * @xdev: Driver specific device structure
307  * @ctrl_offset: Control registers offset
308  * @desc_offset: TX descriptor registers offset
309  * @lock: Descriptor operation lock
310  * @pending_list: Descriptors waiting
311  * @active_list: Descriptors ready to submit
312  * @done_list: Complete descriptors
313  * @common: DMA common channel
314  * @desc_pool: Descriptors pool
315  * @dev: The dma device
316  * @irq: Channel IRQ
317  * @id: Channel ID
318  * @direction: Transfer direction
319  * @num_frms: Number of frames
320  * @has_sg: Support scatter transfers
321  * @cyclic: Check for cyclic transfers.
322  * @genlock: Support genlock mode
323  * @err: Channel has errors
324  * @tasklet: Cleanup work after irq
325  * @config: Device configuration info
326  * @flush_on_fsync: Flush on Frame sync
327  * @desc_pendingcount: Descriptor pending count
328  * @ext_addr: Indicates 64 bit addressing is supported by dma channel
329  * @desc_submitcount: Descriptor h/w submitted count
330  * @residue: Residue for AXI DMA
331  * @seg_v: Statically allocated segments base
332  * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
333  * @start_transfer: Differentiate b/w DMA IP's transfer
334  */
335 struct xilinx_dma_chan {
336 	struct xilinx_dma_device *xdev;
337 	u32 ctrl_offset;
338 	u32 desc_offset;
339 	spinlock_t lock;
340 	struct list_head pending_list;
341 	struct list_head active_list;
342 	struct list_head done_list;
343 	struct dma_chan common;
344 	struct dma_pool *desc_pool;
345 	struct device *dev;
346 	int irq;
347 	int id;
348 	enum dma_transfer_direction direction;
349 	int num_frms;
350 	bool has_sg;
351 	bool cyclic;
352 	bool genlock;
353 	bool err;
354 	struct tasklet_struct tasklet;
355 	struct xilinx_vdma_config config;
356 	bool flush_on_fsync;
357 	u32 desc_pendingcount;
358 	bool ext_addr;
359 	u32 desc_submitcount;
360 	u32 residue;
361 	struct xilinx_axidma_tx_segment *seg_v;
362 	struct xilinx_axidma_tx_segment *cyclic_seg_v;
363 	void (*start_transfer)(struct xilinx_dma_chan *chan);
364 	u16 tdest;
365 };
366 
367 struct xilinx_dma_config {
368 	enum xdma_ip_type dmatype;
369 	int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
370 			struct clk **tx_clk, struct clk **txs_clk,
371 			struct clk **rx_clk, struct clk **rxs_clk);
372 };
373 
374 /**
375  * struct xilinx_dma_device - DMA device structure
376  * @regs: I/O mapped base address
377  * @dev: Device Structure
378  * @common: DMA device structure
379  * @chan: Driver specific DMA channel
380  * @has_sg: Specifies whether Scatter-Gather is present or not
381  * @mcdma: Specifies whether Multi-Channel is present or not
382  * @flush_on_fsync: Flush on frame sync
383  * @ext_addr: Indicates 64 bit addressing is supported by dma device
384  * @pdev: Platform device structure pointer
385  * @dma_config: DMA config structure
386  * @axi_clk: DMA Axi4-lite interace clock
387  * @tx_clk: DMA mm2s clock
388  * @txs_clk: DMA mm2s stream clock
389  * @rx_clk: DMA s2mm clock
390  * @rxs_clk: DMA s2mm stream clock
391  * @nr_channels: Number of channels DMA device supports
392  * @chan_id: DMA channel identifier
393  */
394 struct xilinx_dma_device {
395 	void __iomem *regs;
396 	struct device *dev;
397 	struct dma_device common;
398 	struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
399 	bool has_sg;
400 	bool mcdma;
401 	u32 flush_on_fsync;
402 	bool ext_addr;
403 	struct platform_device  *pdev;
404 	const struct xilinx_dma_config *dma_config;
405 	struct clk *axi_clk;
406 	struct clk *tx_clk;
407 	struct clk *txs_clk;
408 	struct clk *rx_clk;
409 	struct clk *rxs_clk;
410 	u32 nr_channels;
411 	u32 chan_id;
412 };
413 
414 /* Macros */
415 #define to_xilinx_chan(chan) \
416 	container_of(chan, struct xilinx_dma_chan, common)
417 #define to_dma_tx_descriptor(tx) \
418 	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
419 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
420 	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
421 			   cond, delay_us, timeout_us)
422 
423 /* IO accessors */
424 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
425 {
426 	return ioread32(chan->xdev->regs + reg);
427 }
428 
429 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
430 {
431 	iowrite32(value, chan->xdev->regs + reg);
432 }
433 
434 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
435 				   u32 value)
436 {
437 	dma_write(chan, chan->desc_offset + reg, value);
438 }
439 
440 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
441 {
442 	return dma_read(chan, chan->ctrl_offset + reg);
443 }
444 
445 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
446 				   u32 value)
447 {
448 	dma_write(chan, chan->ctrl_offset + reg, value);
449 }
450 
451 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
452 				 u32 clr)
453 {
454 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
455 }
456 
457 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
458 				 u32 set)
459 {
460 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
461 }
462 
463 /**
464  * vdma_desc_write_64 - 64-bit descriptor write
465  * @chan: Driver specific VDMA channel
466  * @reg: Register to write
467  * @value_lsb: lower address of the descriptor.
468  * @value_msb: upper address of the descriptor.
469  *
470  * Since vdma driver is trying to write to a register offset which is not a
471  * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
472  * instead of a single 64 bit register write.
473  */
474 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
475 				      u32 value_lsb, u32 value_msb)
476 {
477 	/* Write the lsb 32 bits*/
478 	writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
479 
480 	/* Write the msb 32 bits */
481 	writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
482 }
483 
484 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
485 {
486 	lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
487 }
488 
489 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
490 				dma_addr_t addr)
491 {
492 	if (chan->ext_addr)
493 		dma_writeq(chan, reg, addr);
494 	else
495 		dma_ctrl_write(chan, reg, addr);
496 }
497 
498 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
499 				     struct xilinx_axidma_desc_hw *hw,
500 				     dma_addr_t buf_addr, size_t sg_used,
501 				     size_t period_len)
502 {
503 	if (chan->ext_addr) {
504 		hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
505 		hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
506 						 period_len);
507 	} else {
508 		hw->buf_addr = buf_addr + sg_used + period_len;
509 	}
510 }
511 
512 /* -----------------------------------------------------------------------------
513  * Descriptors and segments alloc and free
514  */
515 
516 /**
517  * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
518  * @chan: Driver specific DMA channel
519  *
520  * Return: The allocated segment on success and NULL on failure.
521  */
522 static struct xilinx_vdma_tx_segment *
523 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
524 {
525 	struct xilinx_vdma_tx_segment *segment;
526 	dma_addr_t phys;
527 
528 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
529 	if (!segment)
530 		return NULL;
531 
532 	segment->phys = phys;
533 
534 	return segment;
535 }
536 
537 /**
538  * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
539  * @chan: Driver specific DMA channel
540  *
541  * Return: The allocated segment on success and NULL on failure.
542  */
543 static struct xilinx_cdma_tx_segment *
544 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
545 {
546 	struct xilinx_cdma_tx_segment *segment;
547 	dma_addr_t phys;
548 
549 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
550 	if (!segment)
551 		return NULL;
552 
553 	segment->phys = phys;
554 
555 	return segment;
556 }
557 
558 /**
559  * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
560  * @chan: Driver specific DMA channel
561  *
562  * Return: The allocated segment on success and NULL on failure.
563  */
564 static struct xilinx_axidma_tx_segment *
565 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
566 {
567 	struct xilinx_axidma_tx_segment *segment;
568 	dma_addr_t phys;
569 
570 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
571 	if (!segment)
572 		return NULL;
573 
574 	segment->phys = phys;
575 
576 	return segment;
577 }
578 
579 /**
580  * xilinx_dma_free_tx_segment - Free transaction segment
581  * @chan: Driver specific DMA channel
582  * @segment: DMA transaction segment
583  */
584 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
585 				struct xilinx_axidma_tx_segment *segment)
586 {
587 	dma_pool_free(chan->desc_pool, segment, segment->phys);
588 }
589 
590 /**
591  * xilinx_cdma_free_tx_segment - Free transaction segment
592  * @chan: Driver specific DMA channel
593  * @segment: DMA transaction segment
594  */
595 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
596 				struct xilinx_cdma_tx_segment *segment)
597 {
598 	dma_pool_free(chan->desc_pool, segment, segment->phys);
599 }
600 
601 /**
602  * xilinx_vdma_free_tx_segment - Free transaction segment
603  * @chan: Driver specific DMA channel
604  * @segment: DMA transaction segment
605  */
606 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
607 					struct xilinx_vdma_tx_segment *segment)
608 {
609 	dma_pool_free(chan->desc_pool, segment, segment->phys);
610 }
611 
612 /**
613  * xilinx_dma_tx_descriptor - Allocate transaction descriptor
614  * @chan: Driver specific DMA channel
615  *
616  * Return: The allocated descriptor on success and NULL on failure.
617  */
618 static struct xilinx_dma_tx_descriptor *
619 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
620 {
621 	struct xilinx_dma_tx_descriptor *desc;
622 
623 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
624 	if (!desc)
625 		return NULL;
626 
627 	INIT_LIST_HEAD(&desc->segments);
628 
629 	return desc;
630 }
631 
632 /**
633  * xilinx_dma_free_tx_descriptor - Free transaction descriptor
634  * @chan: Driver specific DMA channel
635  * @desc: DMA transaction descriptor
636  */
637 static void
638 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
639 			       struct xilinx_dma_tx_descriptor *desc)
640 {
641 	struct xilinx_vdma_tx_segment *segment, *next;
642 	struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
643 	struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
644 
645 	if (!desc)
646 		return;
647 
648 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
649 		list_for_each_entry_safe(segment, next, &desc->segments, node) {
650 			list_del(&segment->node);
651 			xilinx_vdma_free_tx_segment(chan, segment);
652 		}
653 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
654 		list_for_each_entry_safe(cdma_segment, cdma_next,
655 					 &desc->segments, node) {
656 			list_del(&cdma_segment->node);
657 			xilinx_cdma_free_tx_segment(chan, cdma_segment);
658 		}
659 	} else {
660 		list_for_each_entry_safe(axidma_segment, axidma_next,
661 					 &desc->segments, node) {
662 			list_del(&axidma_segment->node);
663 			xilinx_dma_free_tx_segment(chan, axidma_segment);
664 		}
665 	}
666 
667 	kfree(desc);
668 }
669 
670 /* Required functions */
671 
672 /**
673  * xilinx_dma_free_desc_list - Free descriptors list
674  * @chan: Driver specific DMA channel
675  * @list: List to parse and delete the descriptor
676  */
677 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
678 					struct list_head *list)
679 {
680 	struct xilinx_dma_tx_descriptor *desc, *next;
681 
682 	list_for_each_entry_safe(desc, next, list, node) {
683 		list_del(&desc->node);
684 		xilinx_dma_free_tx_descriptor(chan, desc);
685 	}
686 }
687 
688 /**
689  * xilinx_dma_free_descriptors - Free channel descriptors
690  * @chan: Driver specific DMA channel
691  */
692 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
693 {
694 	unsigned long flags;
695 
696 	spin_lock_irqsave(&chan->lock, flags);
697 
698 	xilinx_dma_free_desc_list(chan, &chan->pending_list);
699 	xilinx_dma_free_desc_list(chan, &chan->done_list);
700 	xilinx_dma_free_desc_list(chan, &chan->active_list);
701 
702 	spin_unlock_irqrestore(&chan->lock, flags);
703 }
704 
705 /**
706  * xilinx_dma_free_chan_resources - Free channel resources
707  * @dchan: DMA channel
708  */
709 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
710 {
711 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
712 
713 	dev_dbg(chan->dev, "Free all channel resources.\n");
714 
715 	xilinx_dma_free_descriptors(chan);
716 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
717 		xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
718 		xilinx_dma_free_tx_segment(chan, chan->seg_v);
719 	}
720 	dma_pool_destroy(chan->desc_pool);
721 	chan->desc_pool = NULL;
722 }
723 
724 /**
725  * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
726  * @chan: Driver specific dma channel
727  * @desc: dma transaction descriptor
728  * @flags: flags for spin lock
729  */
730 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
731 					  struct xilinx_dma_tx_descriptor *desc,
732 					  unsigned long *flags)
733 {
734 	dma_async_tx_callback callback;
735 	void *callback_param;
736 
737 	callback = desc->async_tx.callback;
738 	callback_param = desc->async_tx.callback_param;
739 	if (callback) {
740 		spin_unlock_irqrestore(&chan->lock, *flags);
741 		callback(callback_param);
742 		spin_lock_irqsave(&chan->lock, *flags);
743 	}
744 }
745 
746 /**
747  * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
748  * @chan: Driver specific DMA channel
749  */
750 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
751 {
752 	struct xilinx_dma_tx_descriptor *desc, *next;
753 	unsigned long flags;
754 
755 	spin_lock_irqsave(&chan->lock, flags);
756 
757 	list_for_each_entry_safe(desc, next, &chan->done_list, node) {
758 		dma_async_tx_callback callback;
759 		void *callback_param;
760 
761 		if (desc->cyclic) {
762 			xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
763 			break;
764 		}
765 
766 		/* Remove from the list of running transactions */
767 		list_del(&desc->node);
768 
769 		/* Run the link descriptor callback function */
770 		callback = desc->async_tx.callback;
771 		callback_param = desc->async_tx.callback_param;
772 		if (callback) {
773 			spin_unlock_irqrestore(&chan->lock, flags);
774 			callback(callback_param);
775 			spin_lock_irqsave(&chan->lock, flags);
776 		}
777 
778 		/* Run any dependencies, then free the descriptor */
779 		dma_run_dependencies(&desc->async_tx);
780 		xilinx_dma_free_tx_descriptor(chan, desc);
781 	}
782 
783 	spin_unlock_irqrestore(&chan->lock, flags);
784 }
785 
786 /**
787  * xilinx_dma_do_tasklet - Schedule completion tasklet
788  * @data: Pointer to the Xilinx DMA channel structure
789  */
790 static void xilinx_dma_do_tasklet(unsigned long data)
791 {
792 	struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
793 
794 	xilinx_dma_chan_desc_cleanup(chan);
795 }
796 
797 /**
798  * xilinx_dma_alloc_chan_resources - Allocate channel resources
799  * @dchan: DMA channel
800  *
801  * Return: '0' on success and failure value on error
802  */
803 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
804 {
805 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
806 
807 	/* Has this channel already been allocated? */
808 	if (chan->desc_pool)
809 		return 0;
810 
811 	/*
812 	 * We need the descriptor to be aligned to 64bytes
813 	 * for meeting Xilinx VDMA specification requirement.
814 	 */
815 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
816 		chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
817 				   chan->dev,
818 				   sizeof(struct xilinx_axidma_tx_segment),
819 				   __alignof__(struct xilinx_axidma_tx_segment),
820 				   0);
821 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
822 		chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
823 				   chan->dev,
824 				   sizeof(struct xilinx_cdma_tx_segment),
825 				   __alignof__(struct xilinx_cdma_tx_segment),
826 				   0);
827 	} else {
828 		chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
829 				     chan->dev,
830 				     sizeof(struct xilinx_vdma_tx_segment),
831 				     __alignof__(struct xilinx_vdma_tx_segment),
832 				     0);
833 	}
834 
835 	if (!chan->desc_pool) {
836 		dev_err(chan->dev,
837 			"unable to allocate channel %d descriptor pool\n",
838 			chan->id);
839 		return -ENOMEM;
840 	}
841 
842 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
843 		/*
844 		 * For AXI DMA case after submitting a pending_list, keep
845 		 * an extra segment allocated so that the "next descriptor"
846 		 * pointer on the tail descriptor always points to a
847 		 * valid descriptor, even when paused after reaching taildesc.
848 		 * This way, it is possible to issue additional
849 		 * transfers without halting and restarting the channel.
850 		 */
851 		chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
852 
853 		/*
854 		 * For cyclic DMA mode we need to program the tail Descriptor
855 		 * register with a value which is not a part of the BD chain
856 		 * so allocating a desc segment during channel allocation for
857 		 * programming tail descriptor.
858 		 */
859 		chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
860 	}
861 
862 	dma_cookie_init(dchan);
863 
864 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
865 		/* For AXI DMA resetting once channel will reset the
866 		 * other channel as well so enable the interrupts here.
867 		 */
868 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
869 			      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
870 	}
871 
872 	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
873 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
874 			     XILINX_CDMA_CR_SGMODE);
875 
876 	return 0;
877 }
878 
879 /**
880  * xilinx_dma_tx_status - Get DMA transaction status
881  * @dchan: DMA channel
882  * @cookie: Transaction identifier
883  * @txstate: Transaction state
884  *
885  * Return: DMA transaction status
886  */
887 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
888 					dma_cookie_t cookie,
889 					struct dma_tx_state *txstate)
890 {
891 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
892 	struct xilinx_dma_tx_descriptor *desc;
893 	struct xilinx_axidma_tx_segment *segment;
894 	struct xilinx_axidma_desc_hw *hw;
895 	enum dma_status ret;
896 	unsigned long flags;
897 	u32 residue = 0;
898 
899 	ret = dma_cookie_status(dchan, cookie, txstate);
900 	if (ret == DMA_COMPLETE || !txstate)
901 		return ret;
902 
903 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
904 		spin_lock_irqsave(&chan->lock, flags);
905 
906 		desc = list_last_entry(&chan->active_list,
907 				       struct xilinx_dma_tx_descriptor, node);
908 		if (chan->has_sg) {
909 			list_for_each_entry(segment, &desc->segments, node) {
910 				hw = &segment->hw;
911 				residue += (hw->control - hw->status) &
912 					   XILINX_DMA_MAX_TRANS_LEN;
913 			}
914 		}
915 		spin_unlock_irqrestore(&chan->lock, flags);
916 
917 		chan->residue = residue;
918 		dma_set_residue(txstate, chan->residue);
919 	}
920 
921 	return ret;
922 }
923 
924 /**
925  * xilinx_dma_is_running - Check if DMA channel is running
926  * @chan: Driver specific DMA channel
927  *
928  * Return: '1' if running, '0' if not.
929  */
930 static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
931 {
932 	return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
933 		 XILINX_DMA_DMASR_HALTED) &&
934 		(dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
935 		 XILINX_DMA_DMACR_RUNSTOP);
936 }
937 
938 /**
939  * xilinx_dma_is_idle - Check if DMA channel is idle
940  * @chan: Driver specific DMA channel
941  *
942  * Return: '1' if idle, '0' if not.
943  */
944 static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
945 {
946 	return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
947 		XILINX_DMA_DMASR_IDLE;
948 }
949 
950 /**
951  * xilinx_dma_halt - Halt DMA channel
952  * @chan: Driver specific DMA channel
953  */
954 static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
955 {
956 	int err;
957 	u32 val;
958 
959 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
960 
961 	/* Wait for the hardware to halt */
962 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
963 				      (val & XILINX_DMA_DMASR_HALTED), 0,
964 				      XILINX_DMA_LOOP_COUNT);
965 
966 	if (err) {
967 		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
968 			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
969 		chan->err = true;
970 	}
971 }
972 
973 /**
974  * xilinx_dma_start - Start DMA channel
975  * @chan: Driver specific DMA channel
976  */
977 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
978 {
979 	int err;
980 	u32 val;
981 
982 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
983 
984 	/* Wait for the hardware to start */
985 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
986 				      !(val & XILINX_DMA_DMASR_HALTED), 0,
987 				      XILINX_DMA_LOOP_COUNT);
988 
989 	if (err) {
990 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
991 			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
992 
993 		chan->err = true;
994 	}
995 }
996 
997 /**
998  * xilinx_vdma_start_transfer - Starts VDMA transfer
999  * @chan: Driver specific channel struct pointer
1000  */
1001 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1002 {
1003 	struct xilinx_vdma_config *config = &chan->config;
1004 	struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1005 	u32 reg;
1006 	struct xilinx_vdma_tx_segment *tail_segment;
1007 
1008 	/* This function was invoked with lock held */
1009 	if (chan->err)
1010 		return;
1011 
1012 	if (list_empty(&chan->pending_list))
1013 		return;
1014 
1015 	desc = list_first_entry(&chan->pending_list,
1016 				struct xilinx_dma_tx_descriptor, node);
1017 	tail_desc = list_last_entry(&chan->pending_list,
1018 				    struct xilinx_dma_tx_descriptor, node);
1019 
1020 	tail_segment = list_last_entry(&tail_desc->segments,
1021 				       struct xilinx_vdma_tx_segment, node);
1022 
1023 	/* If it is SG mode and hardware is busy, cannot submit */
1024 	if (chan->has_sg && xilinx_dma_is_running(chan) &&
1025 	    !xilinx_dma_is_idle(chan)) {
1026 		dev_dbg(chan->dev, "DMA controller still busy\n");
1027 		return;
1028 	}
1029 
1030 	/*
1031 	 * If hardware is idle, then all descriptors on the running lists are
1032 	 * done, start new transfers
1033 	 */
1034 	if (chan->has_sg)
1035 		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1036 				desc->async_tx.phys);
1037 
1038 	/* Configure the hardware using info in the config structure */
1039 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1040 
1041 	if (config->frm_cnt_en)
1042 		reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1043 	else
1044 		reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1045 
1046 	/* Configure channel to allow number frame buffers */
1047 	dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
1048 			chan->desc_pendingcount);
1049 
1050 	/*
1051 	 * With SG, start with circular mode, so that BDs can be fetched.
1052 	 * In direct register mode, if not parking, enable circular mode
1053 	 */
1054 	if (chan->has_sg || !config->park)
1055 		reg |= XILINX_DMA_DMACR_CIRC_EN;
1056 
1057 	if (config->park)
1058 		reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1059 
1060 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1061 
1062 	if (config->park && (config->park_frm >= 0) &&
1063 			(config->park_frm < chan->num_frms)) {
1064 		if (chan->direction == DMA_MEM_TO_DEV)
1065 			dma_write(chan, XILINX_DMA_REG_PARK_PTR,
1066 				config->park_frm <<
1067 					XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
1068 		else
1069 			dma_write(chan, XILINX_DMA_REG_PARK_PTR,
1070 				config->park_frm <<
1071 					XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
1072 	}
1073 
1074 	/* Start the hardware */
1075 	xilinx_dma_start(chan);
1076 
1077 	if (chan->err)
1078 		return;
1079 
1080 	/* Start the transfer */
1081 	if (chan->has_sg) {
1082 		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1083 				tail_segment->phys);
1084 	} else {
1085 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
1086 		int i = 0;
1087 
1088 		if (chan->desc_submitcount < chan->num_frms)
1089 			i = chan->desc_submitcount;
1090 
1091 		list_for_each_entry(segment, &desc->segments, node) {
1092 			if (chan->ext_addr)
1093 				vdma_desc_write_64(chan,
1094 					XILINX_VDMA_REG_START_ADDRESS_64(i++),
1095 					segment->hw.buf_addr,
1096 					segment->hw.buf_addr_msb);
1097 			else
1098 				vdma_desc_write(chan,
1099 					XILINX_VDMA_REG_START_ADDRESS(i++),
1100 					segment->hw.buf_addr);
1101 
1102 			last = segment;
1103 		}
1104 
1105 		if (!last)
1106 			return;
1107 
1108 		/* HW expects these parameters to be same for one transaction */
1109 		vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1110 		vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1111 				last->hw.stride);
1112 		vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1113 	}
1114 
1115 	if (!chan->has_sg) {
1116 		list_del(&desc->node);
1117 		list_add_tail(&desc->node, &chan->active_list);
1118 		chan->desc_submitcount++;
1119 		chan->desc_pendingcount--;
1120 		if (chan->desc_submitcount == chan->num_frms)
1121 			chan->desc_submitcount = 0;
1122 	} else {
1123 		list_splice_tail_init(&chan->pending_list, &chan->active_list);
1124 		chan->desc_pendingcount = 0;
1125 	}
1126 }
1127 
1128 /**
1129  * xilinx_cdma_start_transfer - Starts cdma transfer
1130  * @chan: Driver specific channel struct pointer
1131  */
1132 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1133 {
1134 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1135 	struct xilinx_cdma_tx_segment *tail_segment;
1136 	u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1137 
1138 	if (chan->err)
1139 		return;
1140 
1141 	if (list_empty(&chan->pending_list))
1142 		return;
1143 
1144 	head_desc = list_first_entry(&chan->pending_list,
1145 				     struct xilinx_dma_tx_descriptor, node);
1146 	tail_desc = list_last_entry(&chan->pending_list,
1147 				    struct xilinx_dma_tx_descriptor, node);
1148 	tail_segment = list_last_entry(&tail_desc->segments,
1149 				       struct xilinx_cdma_tx_segment, node);
1150 
1151 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1152 		ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1153 		ctrl_reg |= chan->desc_pendingcount <<
1154 				XILINX_DMA_CR_COALESCE_SHIFT;
1155 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1156 	}
1157 
1158 	if (chan->has_sg) {
1159 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1160 			     head_desc->async_tx.phys);
1161 
1162 		/* Update tail ptr register which will start the transfer */
1163 		xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1164 			     tail_segment->phys);
1165 	} else {
1166 		/* In simple mode */
1167 		struct xilinx_cdma_tx_segment *segment;
1168 		struct xilinx_cdma_desc_hw *hw;
1169 
1170 		segment = list_first_entry(&head_desc->segments,
1171 					   struct xilinx_cdma_tx_segment,
1172 					   node);
1173 
1174 		hw = &segment->hw;
1175 
1176 		xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1177 		xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1178 
1179 		/* Start the transfer */
1180 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1181 				hw->control & XILINX_DMA_MAX_TRANS_LEN);
1182 	}
1183 
1184 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1185 	chan->desc_pendingcount = 0;
1186 }
1187 
1188 /**
1189  * xilinx_dma_start_transfer - Starts DMA transfer
1190  * @chan: Driver specific channel struct pointer
1191  */
1192 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1193 {
1194 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1195 	struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
1196 	u32 reg;
1197 
1198 	if (chan->err)
1199 		return;
1200 
1201 	if (list_empty(&chan->pending_list))
1202 		return;
1203 
1204 	/* If it is SG mode and hardware is busy, cannot submit */
1205 	if (chan->has_sg && xilinx_dma_is_running(chan) &&
1206 	    !xilinx_dma_is_idle(chan)) {
1207 		dev_dbg(chan->dev, "DMA controller still busy\n");
1208 		return;
1209 	}
1210 
1211 	head_desc = list_first_entry(&chan->pending_list,
1212 				     struct xilinx_dma_tx_descriptor, node);
1213 	tail_desc = list_last_entry(&chan->pending_list,
1214 				    struct xilinx_dma_tx_descriptor, node);
1215 	tail_segment = list_last_entry(&tail_desc->segments,
1216 				       struct xilinx_axidma_tx_segment, node);
1217 
1218 	if (chan->has_sg && !chan->xdev->mcdma) {
1219 		old_head = list_first_entry(&head_desc->segments,
1220 					struct xilinx_axidma_tx_segment, node);
1221 		new_head = chan->seg_v;
1222 		/* Copy Buffer Descriptor fields. */
1223 		new_head->hw = old_head->hw;
1224 
1225 		/* Swap and save new reserve */
1226 		list_replace_init(&old_head->node, &new_head->node);
1227 		chan->seg_v = old_head;
1228 
1229 		tail_segment->hw.next_desc = chan->seg_v->phys;
1230 		head_desc->async_tx.phys = new_head->phys;
1231 	}
1232 
1233 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1234 
1235 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1236 		reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1237 		reg |= chan->desc_pendingcount <<
1238 				  XILINX_DMA_CR_COALESCE_SHIFT;
1239 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1240 	}
1241 
1242 	if (chan->has_sg && !chan->xdev->mcdma)
1243 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1244 			     head_desc->async_tx.phys);
1245 
1246 	if (chan->has_sg && chan->xdev->mcdma) {
1247 		if (chan->direction == DMA_MEM_TO_DEV) {
1248 			dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1249 				       head_desc->async_tx.phys);
1250 		} else {
1251 			if (!chan->tdest) {
1252 				dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1253 				       head_desc->async_tx.phys);
1254 			} else {
1255 				dma_ctrl_write(chan,
1256 					XILINX_DMA_MCRX_CDESC(chan->tdest),
1257 				       head_desc->async_tx.phys);
1258 			}
1259 		}
1260 	}
1261 
1262 	xilinx_dma_start(chan);
1263 
1264 	if (chan->err)
1265 		return;
1266 
1267 	/* Start the transfer */
1268 	if (chan->has_sg && !chan->xdev->mcdma) {
1269 		if (chan->cyclic)
1270 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1271 				     chan->cyclic_seg_v->phys);
1272 		else
1273 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1274 				     tail_segment->phys);
1275 	} else if (chan->has_sg && chan->xdev->mcdma) {
1276 		if (chan->direction == DMA_MEM_TO_DEV) {
1277 			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1278 			       tail_segment->phys);
1279 		} else {
1280 			if (!chan->tdest) {
1281 				dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1282 					       tail_segment->phys);
1283 			} else {
1284 				dma_ctrl_write(chan,
1285 					XILINX_DMA_MCRX_TDESC(chan->tdest),
1286 					tail_segment->phys);
1287 			}
1288 		}
1289 	} else {
1290 		struct xilinx_axidma_tx_segment *segment;
1291 		struct xilinx_axidma_desc_hw *hw;
1292 
1293 		segment = list_first_entry(&head_desc->segments,
1294 					   struct xilinx_axidma_tx_segment,
1295 					   node);
1296 		hw = &segment->hw;
1297 
1298 		xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1299 
1300 		/* Start the transfer */
1301 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1302 			       hw->control & XILINX_DMA_MAX_TRANS_LEN);
1303 	}
1304 
1305 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1306 	chan->desc_pendingcount = 0;
1307 }
1308 
1309 /**
1310  * xilinx_dma_issue_pending - Issue pending transactions
1311  * @dchan: DMA channel
1312  */
1313 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1314 {
1315 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1316 	unsigned long flags;
1317 
1318 	spin_lock_irqsave(&chan->lock, flags);
1319 	chan->start_transfer(chan);
1320 	spin_unlock_irqrestore(&chan->lock, flags);
1321 }
1322 
1323 /**
1324  * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1325  * @chan : xilinx DMA channel
1326  *
1327  * CONTEXT: hardirq
1328  */
1329 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1330 {
1331 	struct xilinx_dma_tx_descriptor *desc, *next;
1332 
1333 	/* This function was invoked with lock held */
1334 	if (list_empty(&chan->active_list))
1335 		return;
1336 
1337 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1338 		list_del(&desc->node);
1339 		if (!desc->cyclic)
1340 			dma_cookie_complete(&desc->async_tx);
1341 		list_add_tail(&desc->node, &chan->done_list);
1342 	}
1343 }
1344 
1345 /**
1346  * xilinx_dma_reset - Reset DMA channel
1347  * @chan: Driver specific DMA channel
1348  *
1349  * Return: '0' on success and failure value on error
1350  */
1351 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1352 {
1353 	int err;
1354 	u32 tmp;
1355 
1356 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1357 
1358 	/* Wait for the hardware to finish reset */
1359 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1360 				      !(tmp & XILINX_DMA_DMACR_RESET), 0,
1361 				      XILINX_DMA_LOOP_COUNT);
1362 
1363 	if (err) {
1364 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1365 			dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1366 			dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1367 		return -ETIMEDOUT;
1368 	}
1369 
1370 	chan->err = false;
1371 
1372 	return err;
1373 }
1374 
1375 /**
1376  * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1377  * @chan: Driver specific DMA channel
1378  *
1379  * Return: '0' on success and failure value on error
1380  */
1381 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1382 {
1383 	int err;
1384 
1385 	/* Reset VDMA */
1386 	err = xilinx_dma_reset(chan);
1387 	if (err)
1388 		return err;
1389 
1390 	/* Enable interrupts */
1391 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1392 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1393 
1394 	return 0;
1395 }
1396 
1397 /**
1398  * xilinx_dma_irq_handler - DMA Interrupt handler
1399  * @irq: IRQ number
1400  * @data: Pointer to the Xilinx DMA channel structure
1401  *
1402  * Return: IRQ_HANDLED/IRQ_NONE
1403  */
1404 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1405 {
1406 	struct xilinx_dma_chan *chan = data;
1407 	u32 status;
1408 
1409 	/* Read the status and ack the interrupts. */
1410 	status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1411 	if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1412 		return IRQ_NONE;
1413 
1414 	dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1415 			status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1416 
1417 	if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1418 		/*
1419 		 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1420 		 * error is recoverable, ignore it. Otherwise flag the error.
1421 		 *
1422 		 * Only recoverable errors can be cleared in the DMASR register,
1423 		 * make sure not to write to other error bits to 1.
1424 		 */
1425 		u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1426 
1427 		dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1428 				errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1429 
1430 		if (!chan->flush_on_fsync ||
1431 		    (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1432 			dev_err(chan->dev,
1433 				"Channel %p has errors %x, cdr %x tdr %x\n",
1434 				chan, errors,
1435 				dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1436 				dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1437 			chan->err = true;
1438 		}
1439 	}
1440 
1441 	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1442 		/*
1443 		 * Device takes too long to do the transfer when user requires
1444 		 * responsiveness.
1445 		 */
1446 		dev_dbg(chan->dev, "Inter-packet latency too long\n");
1447 	}
1448 
1449 	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1450 		spin_lock(&chan->lock);
1451 		xilinx_dma_complete_descriptor(chan);
1452 		chan->start_transfer(chan);
1453 		spin_unlock(&chan->lock);
1454 	}
1455 
1456 	tasklet_schedule(&chan->tasklet);
1457 	return IRQ_HANDLED;
1458 }
1459 
1460 /**
1461  * append_desc_queue - Queuing descriptor
1462  * @chan: Driver specific dma channel
1463  * @desc: dma transaction descriptor
1464  */
1465 static void append_desc_queue(struct xilinx_dma_chan *chan,
1466 			      struct xilinx_dma_tx_descriptor *desc)
1467 {
1468 	struct xilinx_vdma_tx_segment *tail_segment;
1469 	struct xilinx_dma_tx_descriptor *tail_desc;
1470 	struct xilinx_axidma_tx_segment *axidma_tail_segment;
1471 	struct xilinx_cdma_tx_segment *cdma_tail_segment;
1472 
1473 	if (list_empty(&chan->pending_list))
1474 		goto append;
1475 
1476 	/*
1477 	 * Add the hardware descriptor to the chain of hardware descriptors
1478 	 * that already exists in memory.
1479 	 */
1480 	tail_desc = list_last_entry(&chan->pending_list,
1481 				    struct xilinx_dma_tx_descriptor, node);
1482 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1483 		tail_segment = list_last_entry(&tail_desc->segments,
1484 					       struct xilinx_vdma_tx_segment,
1485 					       node);
1486 		tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1487 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1488 		cdma_tail_segment = list_last_entry(&tail_desc->segments,
1489 						struct xilinx_cdma_tx_segment,
1490 						node);
1491 		cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1492 	} else {
1493 		axidma_tail_segment = list_last_entry(&tail_desc->segments,
1494 					       struct xilinx_axidma_tx_segment,
1495 					       node);
1496 		axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1497 	}
1498 
1499 	/*
1500 	 * Add the software descriptor and all children to the list
1501 	 * of pending transactions
1502 	 */
1503 append:
1504 	list_add_tail(&desc->node, &chan->pending_list);
1505 	chan->desc_pendingcount++;
1506 
1507 	if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1508 	    && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1509 		dev_dbg(chan->dev, "desc pendingcount is too high\n");
1510 		chan->desc_pendingcount = chan->num_frms;
1511 	}
1512 }
1513 
1514 /**
1515  * xilinx_dma_tx_submit - Submit DMA transaction
1516  * @tx: Async transaction descriptor
1517  *
1518  * Return: cookie value on success and failure value on error
1519  */
1520 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1521 {
1522 	struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1523 	struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1524 	dma_cookie_t cookie;
1525 	unsigned long flags;
1526 	int err;
1527 
1528 	if (chan->cyclic) {
1529 		xilinx_dma_free_tx_descriptor(chan, desc);
1530 		return -EBUSY;
1531 	}
1532 
1533 	if (chan->err) {
1534 		/*
1535 		 * If reset fails, need to hard reset the system.
1536 		 * Channel is no longer functional
1537 		 */
1538 		err = xilinx_dma_chan_reset(chan);
1539 		if (err < 0)
1540 			return err;
1541 	}
1542 
1543 	spin_lock_irqsave(&chan->lock, flags);
1544 
1545 	cookie = dma_cookie_assign(tx);
1546 
1547 	/* Put this transaction onto the tail of the pending queue */
1548 	append_desc_queue(chan, desc);
1549 
1550 	if (desc->cyclic)
1551 		chan->cyclic = true;
1552 
1553 	spin_unlock_irqrestore(&chan->lock, flags);
1554 
1555 	return cookie;
1556 }
1557 
1558 /**
1559  * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1560  *	DMA_SLAVE transaction
1561  * @dchan: DMA channel
1562  * @xt: Interleaved template pointer
1563  * @flags: transfer ack flags
1564  *
1565  * Return: Async transaction descriptor on success and NULL on failure
1566  */
1567 static struct dma_async_tx_descriptor *
1568 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1569 				 struct dma_interleaved_template *xt,
1570 				 unsigned long flags)
1571 {
1572 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1573 	struct xilinx_dma_tx_descriptor *desc;
1574 	struct xilinx_vdma_tx_segment *segment, *prev = NULL;
1575 	struct xilinx_vdma_desc_hw *hw;
1576 
1577 	if (!is_slave_direction(xt->dir))
1578 		return NULL;
1579 
1580 	if (!xt->numf || !xt->sgl[0].size)
1581 		return NULL;
1582 
1583 	if (xt->frame_size != 1)
1584 		return NULL;
1585 
1586 	/* Allocate a transaction descriptor. */
1587 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1588 	if (!desc)
1589 		return NULL;
1590 
1591 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1592 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1593 	async_tx_ack(&desc->async_tx);
1594 
1595 	/* Allocate the link descriptor from DMA pool */
1596 	segment = xilinx_vdma_alloc_tx_segment(chan);
1597 	if (!segment)
1598 		goto error;
1599 
1600 	/* Fill in the hardware descriptor */
1601 	hw = &segment->hw;
1602 	hw->vsize = xt->numf;
1603 	hw->hsize = xt->sgl[0].size;
1604 	hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1605 			XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1606 	hw->stride |= chan->config.frm_dly <<
1607 			XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1608 
1609 	if (xt->dir != DMA_MEM_TO_DEV) {
1610 		if (chan->ext_addr) {
1611 			hw->buf_addr = lower_32_bits(xt->dst_start);
1612 			hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1613 		} else {
1614 			hw->buf_addr = xt->dst_start;
1615 		}
1616 	} else {
1617 		if (chan->ext_addr) {
1618 			hw->buf_addr = lower_32_bits(xt->src_start);
1619 			hw->buf_addr_msb = upper_32_bits(xt->src_start);
1620 		} else {
1621 			hw->buf_addr = xt->src_start;
1622 		}
1623 	}
1624 
1625 	/* Insert the segment into the descriptor segments list. */
1626 	list_add_tail(&segment->node, &desc->segments);
1627 
1628 	prev = segment;
1629 
1630 	/* Link the last hardware descriptor with the first. */
1631 	segment = list_first_entry(&desc->segments,
1632 				   struct xilinx_vdma_tx_segment, node);
1633 	desc->async_tx.phys = segment->phys;
1634 
1635 	return &desc->async_tx;
1636 
1637 error:
1638 	xilinx_dma_free_tx_descriptor(chan, desc);
1639 	return NULL;
1640 }
1641 
1642 /**
1643  * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1644  * @dchan: DMA channel
1645  * @dma_dst: destination address
1646  * @dma_src: source address
1647  * @len: transfer length
1648  * @flags: transfer ack flags
1649  *
1650  * Return: Async transaction descriptor on success and NULL on failure
1651  */
1652 static struct dma_async_tx_descriptor *
1653 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1654 			dma_addr_t dma_src, size_t len, unsigned long flags)
1655 {
1656 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1657 	struct xilinx_dma_tx_descriptor *desc;
1658 	struct xilinx_cdma_tx_segment *segment, *prev;
1659 	struct xilinx_cdma_desc_hw *hw;
1660 
1661 	if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1662 		return NULL;
1663 
1664 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1665 	if (!desc)
1666 		return NULL;
1667 
1668 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1669 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1670 
1671 	/* Allocate the link descriptor from DMA pool */
1672 	segment = xilinx_cdma_alloc_tx_segment(chan);
1673 	if (!segment)
1674 		goto error;
1675 
1676 	hw = &segment->hw;
1677 	hw->control = len;
1678 	hw->src_addr = dma_src;
1679 	hw->dest_addr = dma_dst;
1680 	if (chan->ext_addr) {
1681 		hw->src_addr_msb = upper_32_bits(dma_src);
1682 		hw->dest_addr_msb = upper_32_bits(dma_dst);
1683 	}
1684 
1685 	/* Fill the previous next descriptor with current */
1686 	prev = list_last_entry(&desc->segments,
1687 			       struct xilinx_cdma_tx_segment, node);
1688 	prev->hw.next_desc = segment->phys;
1689 
1690 	/* Insert the segment into the descriptor segments list. */
1691 	list_add_tail(&segment->node, &desc->segments);
1692 
1693 	prev = segment;
1694 
1695 	/* Link the last hardware descriptor with the first. */
1696 	segment = list_first_entry(&desc->segments,
1697 				struct xilinx_cdma_tx_segment, node);
1698 	desc->async_tx.phys = segment->phys;
1699 	prev->hw.next_desc = segment->phys;
1700 
1701 	return &desc->async_tx;
1702 
1703 error:
1704 	xilinx_dma_free_tx_descriptor(chan, desc);
1705 	return NULL;
1706 }
1707 
1708 /**
1709  * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1710  * @dchan: DMA channel
1711  * @sgl: scatterlist to transfer to/from
1712  * @sg_len: number of entries in @scatterlist
1713  * @direction: DMA direction
1714  * @flags: transfer ack flags
1715  * @context: APP words of the descriptor
1716  *
1717  * Return: Async transaction descriptor on success and NULL on failure
1718  */
1719 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1720 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1721 	enum dma_transfer_direction direction, unsigned long flags,
1722 	void *context)
1723 {
1724 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1725 	struct xilinx_dma_tx_descriptor *desc;
1726 	struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
1727 	u32 *app_w = (u32 *)context;
1728 	struct scatterlist *sg;
1729 	size_t copy;
1730 	size_t sg_used;
1731 	unsigned int i;
1732 
1733 	if (!is_slave_direction(direction))
1734 		return NULL;
1735 
1736 	/* Allocate a transaction descriptor. */
1737 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1738 	if (!desc)
1739 		return NULL;
1740 
1741 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1742 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1743 
1744 	/* Build transactions using information in the scatter gather list */
1745 	for_each_sg(sgl, sg, sg_len, i) {
1746 		sg_used = 0;
1747 
1748 		/* Loop until the entire scatterlist entry is used */
1749 		while (sg_used < sg_dma_len(sg)) {
1750 			struct xilinx_axidma_desc_hw *hw;
1751 
1752 			/* Get a free segment */
1753 			segment = xilinx_axidma_alloc_tx_segment(chan);
1754 			if (!segment)
1755 				goto error;
1756 
1757 			/*
1758 			 * Calculate the maximum number of bytes to transfer,
1759 			 * making sure it is less than the hw limit
1760 			 */
1761 			copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1762 				     XILINX_DMA_MAX_TRANS_LEN);
1763 			hw = &segment->hw;
1764 
1765 			/* Fill in the descriptor */
1766 			xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1767 					  sg_used, 0);
1768 
1769 			hw->control = copy;
1770 
1771 			if (chan->direction == DMA_MEM_TO_DEV) {
1772 				if (app_w)
1773 					memcpy(hw->app, app_w, sizeof(u32) *
1774 					       XILINX_DMA_NUM_APP_WORDS);
1775 			}
1776 
1777 			if (prev)
1778 				prev->hw.next_desc = segment->phys;
1779 
1780 			prev = segment;
1781 			sg_used += copy;
1782 
1783 			/*
1784 			 * Insert the segment into the descriptor segments
1785 			 * list.
1786 			 */
1787 			list_add_tail(&segment->node, &desc->segments);
1788 		}
1789 	}
1790 
1791 	segment = list_first_entry(&desc->segments,
1792 				   struct xilinx_axidma_tx_segment, node);
1793 	desc->async_tx.phys = segment->phys;
1794 	prev->hw.next_desc = segment->phys;
1795 
1796 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
1797 	if (chan->direction == DMA_MEM_TO_DEV) {
1798 		segment->hw.control |= XILINX_DMA_BD_SOP;
1799 		segment = list_last_entry(&desc->segments,
1800 					  struct xilinx_axidma_tx_segment,
1801 					  node);
1802 		segment->hw.control |= XILINX_DMA_BD_EOP;
1803 	}
1804 
1805 	return &desc->async_tx;
1806 
1807 error:
1808 	xilinx_dma_free_tx_descriptor(chan, desc);
1809 	return NULL;
1810 }
1811 
1812 /**
1813  * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1814  * @chan: DMA channel
1815  * @sgl: scatterlist to transfer to/from
1816  * @sg_len: number of entries in @scatterlist
1817  * @direction: DMA direction
1818  * @flags: transfer ack flags
1819  */
1820 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1821 	struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1822 	size_t period_len, enum dma_transfer_direction direction,
1823 	unsigned long flags)
1824 {
1825 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1826 	struct xilinx_dma_tx_descriptor *desc;
1827 	struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1828 	size_t copy, sg_used;
1829 	unsigned int num_periods;
1830 	int i;
1831 	u32 reg;
1832 
1833 	if (!period_len)
1834 		return NULL;
1835 
1836 	num_periods = buf_len / period_len;
1837 
1838 	if (!num_periods)
1839 		return NULL;
1840 
1841 	if (!is_slave_direction(direction))
1842 		return NULL;
1843 
1844 	/* Allocate a transaction descriptor. */
1845 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1846 	if (!desc)
1847 		return NULL;
1848 
1849 	chan->direction = direction;
1850 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1851 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1852 
1853 	for (i = 0; i < num_periods; ++i) {
1854 		sg_used = 0;
1855 
1856 		while (sg_used < period_len) {
1857 			struct xilinx_axidma_desc_hw *hw;
1858 
1859 			/* Get a free segment */
1860 			segment = xilinx_axidma_alloc_tx_segment(chan);
1861 			if (!segment)
1862 				goto error;
1863 
1864 			/*
1865 			 * Calculate the maximum number of bytes to transfer,
1866 			 * making sure it is less than the hw limit
1867 			 */
1868 			copy = min_t(size_t, period_len - sg_used,
1869 				     XILINX_DMA_MAX_TRANS_LEN);
1870 			hw = &segment->hw;
1871 			xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1872 					  period_len * i);
1873 			hw->control = copy;
1874 
1875 			if (prev)
1876 				prev->hw.next_desc = segment->phys;
1877 
1878 			prev = segment;
1879 			sg_used += copy;
1880 
1881 			/*
1882 			 * Insert the segment into the descriptor segments
1883 			 * list.
1884 			 */
1885 			list_add_tail(&segment->node, &desc->segments);
1886 		}
1887 	}
1888 
1889 	head_segment = list_first_entry(&desc->segments,
1890 				   struct xilinx_axidma_tx_segment, node);
1891 	desc->async_tx.phys = head_segment->phys;
1892 
1893 	desc->cyclic = true;
1894 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1895 	reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1896 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1897 
1898 	segment = list_last_entry(&desc->segments,
1899 				  struct xilinx_axidma_tx_segment,
1900 				  node);
1901 	segment->hw.next_desc = (u32) head_segment->phys;
1902 
1903 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
1904 	if (direction == DMA_MEM_TO_DEV) {
1905 		head_segment->hw.control |= XILINX_DMA_BD_SOP;
1906 		segment->hw.control |= XILINX_DMA_BD_EOP;
1907 	}
1908 
1909 	return &desc->async_tx;
1910 
1911 error:
1912 	xilinx_dma_free_tx_descriptor(chan, desc);
1913 	return NULL;
1914 }
1915 
1916 /**
1917  * xilinx_dma_prep_interleaved - prepare a descriptor for a
1918  *	DMA_SLAVE transaction
1919  * @dchan: DMA channel
1920  * @xt: Interleaved template pointer
1921  * @flags: transfer ack flags
1922  *
1923  * Return: Async transaction descriptor on success and NULL on failure
1924  */
1925 static struct dma_async_tx_descriptor *
1926 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1927 				 struct dma_interleaved_template *xt,
1928 				 unsigned long flags)
1929 {
1930 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1931 	struct xilinx_dma_tx_descriptor *desc;
1932 	struct xilinx_axidma_tx_segment *segment;
1933 	struct xilinx_axidma_desc_hw *hw;
1934 
1935 	if (!is_slave_direction(xt->dir))
1936 		return NULL;
1937 
1938 	if (!xt->numf || !xt->sgl[0].size)
1939 		return NULL;
1940 
1941 	if (xt->frame_size != 1)
1942 		return NULL;
1943 
1944 	/* Allocate a transaction descriptor. */
1945 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1946 	if (!desc)
1947 		return NULL;
1948 
1949 	chan->direction = xt->dir;
1950 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1951 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1952 
1953 	/* Get a free segment */
1954 	segment = xilinx_axidma_alloc_tx_segment(chan);
1955 	if (!segment)
1956 		goto error;
1957 
1958 	hw = &segment->hw;
1959 
1960 	/* Fill in the descriptor */
1961 	if (xt->dir != DMA_MEM_TO_DEV)
1962 		hw->buf_addr = xt->dst_start;
1963 	else
1964 		hw->buf_addr = xt->src_start;
1965 
1966 	hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
1967 	hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
1968 			    XILINX_DMA_BD_VSIZE_MASK;
1969 	hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
1970 			    XILINX_DMA_BD_STRIDE_MASK;
1971 	hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
1972 
1973 	/*
1974 	 * Insert the segment into the descriptor segments
1975 	 * list.
1976 	 */
1977 	list_add_tail(&segment->node, &desc->segments);
1978 
1979 
1980 	segment = list_first_entry(&desc->segments,
1981 				   struct xilinx_axidma_tx_segment, node);
1982 	desc->async_tx.phys = segment->phys;
1983 
1984 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
1985 	if (xt->dir == DMA_MEM_TO_DEV) {
1986 		segment->hw.control |= XILINX_DMA_BD_SOP;
1987 		segment = list_last_entry(&desc->segments,
1988 					  struct xilinx_axidma_tx_segment,
1989 					  node);
1990 		segment->hw.control |= XILINX_DMA_BD_EOP;
1991 	}
1992 
1993 	return &desc->async_tx;
1994 
1995 error:
1996 	xilinx_dma_free_tx_descriptor(chan, desc);
1997 	return NULL;
1998 }
1999 
2000 /**
2001  * xilinx_dma_terminate_all - Halt the channel and free descriptors
2002  * @chan: Driver specific DMA Channel pointer
2003  */
2004 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2005 {
2006 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2007 	u32 reg;
2008 
2009 	if (chan->cyclic)
2010 		xilinx_dma_chan_reset(chan);
2011 
2012 	/* Halt the DMA engine */
2013 	xilinx_dma_halt(chan);
2014 
2015 	/* Remove and free all of the descriptors in the lists */
2016 	xilinx_dma_free_descriptors(chan);
2017 
2018 	if (chan->cyclic) {
2019 		reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2020 		reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2021 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2022 		chan->cyclic = false;
2023 	}
2024 
2025 	return 0;
2026 }
2027 
2028 /**
2029  * xilinx_dma_channel_set_config - Configure VDMA channel
2030  * Run-time configuration for Axi VDMA, supports:
2031  * . halt the channel
2032  * . configure interrupt coalescing and inter-packet delay threshold
2033  * . start/stop parking
2034  * . enable genlock
2035  *
2036  * @dchan: DMA channel
2037  * @cfg: VDMA device configuration pointer
2038  *
2039  * Return: '0' on success and failure value on error
2040  */
2041 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2042 					struct xilinx_vdma_config *cfg)
2043 {
2044 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2045 	u32 dmacr;
2046 
2047 	if (cfg->reset)
2048 		return xilinx_dma_chan_reset(chan);
2049 
2050 	dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2051 
2052 	chan->config.frm_dly = cfg->frm_dly;
2053 	chan->config.park = cfg->park;
2054 
2055 	/* genlock settings */
2056 	chan->config.gen_lock = cfg->gen_lock;
2057 	chan->config.master = cfg->master;
2058 
2059 	if (cfg->gen_lock && chan->genlock) {
2060 		dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2061 		dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2062 	}
2063 
2064 	chan->config.frm_cnt_en = cfg->frm_cnt_en;
2065 	if (cfg->park)
2066 		chan->config.park_frm = cfg->park_frm;
2067 	else
2068 		chan->config.park_frm = -1;
2069 
2070 	chan->config.coalesc = cfg->coalesc;
2071 	chan->config.delay = cfg->delay;
2072 
2073 	if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2074 		dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2075 		chan->config.coalesc = cfg->coalesc;
2076 	}
2077 
2078 	if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2079 		dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2080 		chan->config.delay = cfg->delay;
2081 	}
2082 
2083 	/* FSync Source selection */
2084 	dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2085 	dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2086 
2087 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2088 
2089 	return 0;
2090 }
2091 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2092 
2093 /* -----------------------------------------------------------------------------
2094  * Probe and remove
2095  */
2096 
2097 /**
2098  * xilinx_dma_chan_remove - Per Channel remove function
2099  * @chan: Driver specific DMA channel
2100  */
2101 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2102 {
2103 	/* Disable all interrupts */
2104 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2105 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2106 
2107 	if (chan->irq > 0)
2108 		free_irq(chan->irq, chan);
2109 
2110 	tasklet_kill(&chan->tasklet);
2111 
2112 	list_del(&chan->common.device_node);
2113 }
2114 
2115 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2116 			    struct clk **tx_clk, struct clk **rx_clk,
2117 			    struct clk **sg_clk, struct clk **tmp_clk)
2118 {
2119 	int err;
2120 
2121 	*tmp_clk = NULL;
2122 
2123 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2124 	if (IS_ERR(*axi_clk)) {
2125 		err = PTR_ERR(*axi_clk);
2126 		dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
2127 		return err;
2128 	}
2129 
2130 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2131 	if (IS_ERR(*tx_clk))
2132 		*tx_clk = NULL;
2133 
2134 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2135 	if (IS_ERR(*rx_clk))
2136 		*rx_clk = NULL;
2137 
2138 	*sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2139 	if (IS_ERR(*sg_clk))
2140 		*sg_clk = NULL;
2141 
2142 	err = clk_prepare_enable(*axi_clk);
2143 	if (err) {
2144 		dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2145 		return err;
2146 	}
2147 
2148 	err = clk_prepare_enable(*tx_clk);
2149 	if (err) {
2150 		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2151 		goto err_disable_axiclk;
2152 	}
2153 
2154 	err = clk_prepare_enable(*rx_clk);
2155 	if (err) {
2156 		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2157 		goto err_disable_txclk;
2158 	}
2159 
2160 	err = clk_prepare_enable(*sg_clk);
2161 	if (err) {
2162 		dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
2163 		goto err_disable_rxclk;
2164 	}
2165 
2166 	return 0;
2167 
2168 err_disable_rxclk:
2169 	clk_disable_unprepare(*rx_clk);
2170 err_disable_txclk:
2171 	clk_disable_unprepare(*tx_clk);
2172 err_disable_axiclk:
2173 	clk_disable_unprepare(*axi_clk);
2174 
2175 	return err;
2176 }
2177 
2178 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2179 			    struct clk **dev_clk, struct clk **tmp_clk,
2180 			    struct clk **tmp1_clk, struct clk **tmp2_clk)
2181 {
2182 	int err;
2183 
2184 	*tmp_clk = NULL;
2185 	*tmp1_clk = NULL;
2186 	*tmp2_clk = NULL;
2187 
2188 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2189 	if (IS_ERR(*axi_clk)) {
2190 		err = PTR_ERR(*axi_clk);
2191 		dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
2192 		return err;
2193 	}
2194 
2195 	*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2196 	if (IS_ERR(*dev_clk)) {
2197 		err = PTR_ERR(*dev_clk);
2198 		dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
2199 		return err;
2200 	}
2201 
2202 	err = clk_prepare_enable(*axi_clk);
2203 	if (err) {
2204 		dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2205 		return err;
2206 	}
2207 
2208 	err = clk_prepare_enable(*dev_clk);
2209 	if (err) {
2210 		dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
2211 		goto err_disable_axiclk;
2212 	}
2213 
2214 	return 0;
2215 
2216 err_disable_axiclk:
2217 	clk_disable_unprepare(*axi_clk);
2218 
2219 	return err;
2220 }
2221 
2222 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2223 			    struct clk **tx_clk, struct clk **txs_clk,
2224 			    struct clk **rx_clk, struct clk **rxs_clk)
2225 {
2226 	int err;
2227 
2228 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2229 	if (IS_ERR(*axi_clk)) {
2230 		err = PTR_ERR(*axi_clk);
2231 		dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
2232 		return err;
2233 	}
2234 
2235 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2236 	if (IS_ERR(*tx_clk))
2237 		*tx_clk = NULL;
2238 
2239 	*txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2240 	if (IS_ERR(*txs_clk))
2241 		*txs_clk = NULL;
2242 
2243 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2244 	if (IS_ERR(*rx_clk))
2245 		*rx_clk = NULL;
2246 
2247 	*rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2248 	if (IS_ERR(*rxs_clk))
2249 		*rxs_clk = NULL;
2250 
2251 	err = clk_prepare_enable(*axi_clk);
2252 	if (err) {
2253 		dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2254 		return err;
2255 	}
2256 
2257 	err = clk_prepare_enable(*tx_clk);
2258 	if (err) {
2259 		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2260 		goto err_disable_axiclk;
2261 	}
2262 
2263 	err = clk_prepare_enable(*txs_clk);
2264 	if (err) {
2265 		dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
2266 		goto err_disable_txclk;
2267 	}
2268 
2269 	err = clk_prepare_enable(*rx_clk);
2270 	if (err) {
2271 		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2272 		goto err_disable_txsclk;
2273 	}
2274 
2275 	err = clk_prepare_enable(*rxs_clk);
2276 	if (err) {
2277 		dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
2278 		goto err_disable_rxclk;
2279 	}
2280 
2281 	return 0;
2282 
2283 err_disable_rxclk:
2284 	clk_disable_unprepare(*rx_clk);
2285 err_disable_txsclk:
2286 	clk_disable_unprepare(*txs_clk);
2287 err_disable_txclk:
2288 	clk_disable_unprepare(*tx_clk);
2289 err_disable_axiclk:
2290 	clk_disable_unprepare(*axi_clk);
2291 
2292 	return err;
2293 }
2294 
2295 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2296 {
2297 	clk_disable_unprepare(xdev->rxs_clk);
2298 	clk_disable_unprepare(xdev->rx_clk);
2299 	clk_disable_unprepare(xdev->txs_clk);
2300 	clk_disable_unprepare(xdev->tx_clk);
2301 	clk_disable_unprepare(xdev->axi_clk);
2302 }
2303 
2304 /**
2305  * xilinx_dma_chan_probe - Per Channel Probing
2306  * It get channel features from the device tree entry and
2307  * initialize special channel handling routines
2308  *
2309  * @xdev: Driver specific device structure
2310  * @node: Device node
2311  *
2312  * Return: '0' on success and failure value on error
2313  */
2314 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2315 				  struct device_node *node, int chan_id)
2316 {
2317 	struct xilinx_dma_chan *chan;
2318 	bool has_dre = false;
2319 	u32 value, width;
2320 	int err;
2321 
2322 	/* Allocate and initialize the channel structure */
2323 	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2324 	if (!chan)
2325 		return -ENOMEM;
2326 
2327 	chan->dev = xdev->dev;
2328 	chan->xdev = xdev;
2329 	chan->has_sg = xdev->has_sg;
2330 	chan->desc_pendingcount = 0x0;
2331 	chan->ext_addr = xdev->ext_addr;
2332 
2333 	spin_lock_init(&chan->lock);
2334 	INIT_LIST_HEAD(&chan->pending_list);
2335 	INIT_LIST_HEAD(&chan->done_list);
2336 	INIT_LIST_HEAD(&chan->active_list);
2337 
2338 	/* Retrieve the channel properties from the device tree */
2339 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
2340 
2341 	chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2342 
2343 	err = of_property_read_u32(node, "xlnx,datawidth", &value);
2344 	if (err) {
2345 		dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2346 		return err;
2347 	}
2348 	width = value >> 3; /* Convert bits to bytes */
2349 
2350 	/* If data width is greater than 8 bytes, DRE is not in hw */
2351 	if (width > 8)
2352 		has_dre = false;
2353 
2354 	if (!has_dre)
2355 		xdev->common.copy_align = fls(width - 1);
2356 
2357 	if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2358 	    of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2359 	    of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2360 		chan->direction = DMA_MEM_TO_DEV;
2361 		chan->id = chan_id;
2362 		chan->tdest = chan_id;
2363 
2364 		chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2365 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2366 			chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2367 
2368 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2369 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2370 				chan->flush_on_fsync = true;
2371 		}
2372 	} else if (of_device_is_compatible(node,
2373 					   "xlnx,axi-vdma-s2mm-channel") ||
2374 		   of_device_is_compatible(node,
2375 					   "xlnx,axi-dma-s2mm-channel")) {
2376 		chan->direction = DMA_DEV_TO_MEM;
2377 		chan->id = chan_id;
2378 		chan->tdest = chan_id - xdev->nr_channels;
2379 
2380 		chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2381 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2382 			chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2383 
2384 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2385 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2386 				chan->flush_on_fsync = true;
2387 		}
2388 	} else {
2389 		dev_err(xdev->dev, "Invalid channel compatible node\n");
2390 		return -EINVAL;
2391 	}
2392 
2393 	/* Request the interrupt */
2394 	chan->irq = irq_of_parse_and_map(node, 0);
2395 	err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2396 			  "xilinx-dma-controller", chan);
2397 	if (err) {
2398 		dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2399 		return err;
2400 	}
2401 
2402 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2403 		chan->start_transfer = xilinx_dma_start_transfer;
2404 	else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2405 		chan->start_transfer = xilinx_cdma_start_transfer;
2406 	else
2407 		chan->start_transfer = xilinx_vdma_start_transfer;
2408 
2409 	/* Initialize the tasklet */
2410 	tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2411 			(unsigned long)chan);
2412 
2413 	/*
2414 	 * Initialize the DMA channel and add it to the DMA engine channels
2415 	 * list.
2416 	 */
2417 	chan->common.device = &xdev->common;
2418 
2419 	list_add_tail(&chan->common.device_node, &xdev->common.channels);
2420 	xdev->chan[chan->id] = chan;
2421 
2422 	/* Reset the channel */
2423 	err = xilinx_dma_chan_reset(chan);
2424 	if (err < 0) {
2425 		dev_err(xdev->dev, "Reset channel failed\n");
2426 		return err;
2427 	}
2428 
2429 	return 0;
2430 }
2431 
2432 /**
2433  * xilinx_dma_child_probe - Per child node probe
2434  * It get number of dma-channels per child node from
2435  * device-tree and initializes all the channels.
2436  *
2437  * @xdev: Driver specific device structure
2438  * @node: Device node
2439  *
2440  * Return: 0 always.
2441  */
2442 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2443 				    struct device_node *node) {
2444 	int ret, i, nr_channels = 1;
2445 
2446 	ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2447 	if ((ret < 0) && xdev->mcdma)
2448 		dev_warn(xdev->dev, "missing dma-channels property\n");
2449 
2450 	for (i = 0; i < nr_channels; i++)
2451 		xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2452 
2453 	xdev->nr_channels += nr_channels;
2454 
2455 	return 0;
2456 }
2457 
2458 /**
2459  * of_dma_xilinx_xlate - Translation function
2460  * @dma_spec: Pointer to DMA specifier as found in the device tree
2461  * @ofdma: Pointer to DMA controller data
2462  *
2463  * Return: DMA channel pointer on success and NULL on error
2464  */
2465 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2466 						struct of_dma *ofdma)
2467 {
2468 	struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2469 	int chan_id = dma_spec->args[0];
2470 
2471 	if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2472 		return NULL;
2473 
2474 	return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2475 }
2476 
2477 static const struct xilinx_dma_config axidma_config = {
2478 	.dmatype = XDMA_TYPE_AXIDMA,
2479 	.clk_init = axidma_clk_init,
2480 };
2481 
2482 static const struct xilinx_dma_config axicdma_config = {
2483 	.dmatype = XDMA_TYPE_CDMA,
2484 	.clk_init = axicdma_clk_init,
2485 };
2486 
2487 static const struct xilinx_dma_config axivdma_config = {
2488 	.dmatype = XDMA_TYPE_VDMA,
2489 	.clk_init = axivdma_clk_init,
2490 };
2491 
2492 static const struct of_device_id xilinx_dma_of_ids[] = {
2493 	{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2494 	{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2495 	{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2496 	{}
2497 };
2498 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2499 
2500 /**
2501  * xilinx_dma_probe - Driver probe function
2502  * @pdev: Pointer to the platform_device structure
2503  *
2504  * Return: '0' on success and failure value on error
2505  */
2506 static int xilinx_dma_probe(struct platform_device *pdev)
2507 {
2508 	int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2509 			struct clk **, struct clk **, struct clk **)
2510 					= axivdma_clk_init;
2511 	struct device_node *node = pdev->dev.of_node;
2512 	struct xilinx_dma_device *xdev;
2513 	struct device_node *child, *np = pdev->dev.of_node;
2514 	struct resource *io;
2515 	u32 num_frames, addr_width;
2516 	int i, err;
2517 
2518 	/* Allocate and initialize the DMA engine structure */
2519 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2520 	if (!xdev)
2521 		return -ENOMEM;
2522 
2523 	xdev->dev = &pdev->dev;
2524 	if (np) {
2525 		const struct of_device_id *match;
2526 
2527 		match = of_match_node(xilinx_dma_of_ids, np);
2528 		if (match && match->data) {
2529 			xdev->dma_config = match->data;
2530 			clk_init = xdev->dma_config->clk_init;
2531 		}
2532 	}
2533 
2534 	err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2535 		       &xdev->rx_clk, &xdev->rxs_clk);
2536 	if (err)
2537 		return err;
2538 
2539 	/* Request and map I/O memory */
2540 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2541 	xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2542 	if (IS_ERR(xdev->regs))
2543 		return PTR_ERR(xdev->regs);
2544 
2545 	/* Retrieve the DMA engine properties from the device tree */
2546 	xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2547 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2548 		xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2549 
2550 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2551 		err = of_property_read_u32(node, "xlnx,num-fstores",
2552 					   &num_frames);
2553 		if (err < 0) {
2554 			dev_err(xdev->dev,
2555 				"missing xlnx,num-fstores property\n");
2556 			return err;
2557 		}
2558 
2559 		err = of_property_read_u32(node, "xlnx,flush-fsync",
2560 					   &xdev->flush_on_fsync);
2561 		if (err < 0)
2562 			dev_warn(xdev->dev,
2563 				 "missing xlnx,flush-fsync property\n");
2564 	}
2565 
2566 	err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2567 	if (err < 0)
2568 		dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2569 
2570 	if (addr_width > 32)
2571 		xdev->ext_addr = true;
2572 	else
2573 		xdev->ext_addr = false;
2574 
2575 	/* Set the dma mask bits */
2576 	dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2577 
2578 	/* Initialize the DMA engine */
2579 	xdev->common.dev = &pdev->dev;
2580 
2581 	INIT_LIST_HEAD(&xdev->common.channels);
2582 	if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2583 		dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2584 		dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2585 	}
2586 
2587 	xdev->common.device_alloc_chan_resources =
2588 				xilinx_dma_alloc_chan_resources;
2589 	xdev->common.device_free_chan_resources =
2590 				xilinx_dma_free_chan_resources;
2591 	xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2592 	xdev->common.device_tx_status = xilinx_dma_tx_status;
2593 	xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2594 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2595 		dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2596 		xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2597 		xdev->common.device_prep_dma_cyclic =
2598 					  xilinx_dma_prep_dma_cyclic;
2599 		xdev->common.device_prep_interleaved_dma =
2600 					xilinx_dma_prep_interleaved;
2601 		/* Residue calculation is supported by only AXI DMA */
2602 		xdev->common.residue_granularity =
2603 					  DMA_RESIDUE_GRANULARITY_SEGMENT;
2604 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2605 		dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2606 		xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2607 	} else {
2608 		xdev->common.device_prep_interleaved_dma =
2609 				xilinx_vdma_dma_prep_interleaved;
2610 	}
2611 
2612 	platform_set_drvdata(pdev, xdev);
2613 
2614 	/* Initialize the channels */
2615 	for_each_child_of_node(node, child) {
2616 		err = xilinx_dma_child_probe(xdev, child);
2617 		if (err < 0)
2618 			goto disable_clks;
2619 	}
2620 
2621 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2622 		for (i = 0; i < xdev->nr_channels; i++)
2623 			if (xdev->chan[i])
2624 				xdev->chan[i]->num_frms = num_frames;
2625 	}
2626 
2627 	/* Register the DMA engine with the core */
2628 	dma_async_device_register(&xdev->common);
2629 
2630 	err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2631 					 xdev);
2632 	if (err < 0) {
2633 		dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2634 		dma_async_device_unregister(&xdev->common);
2635 		goto error;
2636 	}
2637 
2638 	dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2639 
2640 	return 0;
2641 
2642 disable_clks:
2643 	xdma_disable_allclks(xdev);
2644 error:
2645 	for (i = 0; i < xdev->nr_channels; i++)
2646 		if (xdev->chan[i])
2647 			xilinx_dma_chan_remove(xdev->chan[i]);
2648 
2649 	return err;
2650 }
2651 
2652 /**
2653  * xilinx_dma_remove - Driver remove function
2654  * @pdev: Pointer to the platform_device structure
2655  *
2656  * Return: Always '0'
2657  */
2658 static int xilinx_dma_remove(struct platform_device *pdev)
2659 {
2660 	struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2661 	int i;
2662 
2663 	of_dma_controller_free(pdev->dev.of_node);
2664 
2665 	dma_async_device_unregister(&xdev->common);
2666 
2667 	for (i = 0; i < xdev->nr_channels; i++)
2668 		if (xdev->chan[i])
2669 			xilinx_dma_chan_remove(xdev->chan[i]);
2670 
2671 	xdma_disable_allclks(xdev);
2672 
2673 	return 0;
2674 }
2675 
2676 static struct platform_driver xilinx_vdma_driver = {
2677 	.driver = {
2678 		.name = "xilinx-vdma",
2679 		.of_match_table = xilinx_dma_of_ids,
2680 	},
2681 	.probe = xilinx_dma_probe,
2682 	.remove = xilinx_dma_remove,
2683 };
2684 
2685 module_platform_driver(xilinx_vdma_driver);
2686 
2687 MODULE_AUTHOR("Xilinx, Inc.");
2688 MODULE_DESCRIPTION("Xilinx VDMA driver");
2689 MODULE_LICENSE("GPL v2");
2690