xref: /openbmc/linux/drivers/dma/xilinx/xilinx_dma.c (revision 6d99a79c)
1 /*
2  * DMA driver for Xilinx Video DMA Engine
3  *
4  * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
5  *
6  * Based on the Freescale DMA driver.
7  *
8  * Description:
9  * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10  * core that provides high-bandwidth direct memory access between memory
11  * and AXI4-Stream type video target peripherals. The core provides efficient
12  * two dimensional DMA operations with independent asynchronous read (S2MM)
13  * and write (MM2S) channel operation. It can be configured to have either
14  * one channel or two channels. If configured as two channels, one is to
15  * transmit to the video device (MM2S) and another is to receive from the
16  * video device (S2MM). Initialization, status, interrupt and management
17  * registers are accessed through an AXI4-Lite slave interface.
18  *
19  * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20  * provides high-bandwidth one dimensional direct memory access between memory
21  * and AXI4-Stream target peripherals. It supports one receive and one
22  * transmit channel, both of them optional at synthesis time.
23  *
24  * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25  * Access (DMA) between a memory-mapped source address and a memory-mapped
26  * destination address.
27  *
28  * This program is free software: you can redistribute it and/or modify
29  * it under the terms of the GNU General Public License as published by
30  * the Free Software Foundation, either version 2 of the License, or
31  * (at your option) any later version.
32  */
33 
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
49 
50 #include "../dmaengine.h"
51 
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET		0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET		0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET		0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET		0x00a0
57 
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR			0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX		0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT		24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX	0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT	16
64 #define XILINX_DMA_DMACR_ERR_IRQ		BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ		BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ		BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT		8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT	5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN		BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN		BIT(3)
71 #define XILINX_DMA_DMACR_RESET			BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN		BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP		BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK		GENMASK(6, 5)
75 
76 #define XILINX_DMA_REG_DMASR			0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR		BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ		BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ		BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ		BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR		BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR		BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR		BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR		BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR		BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR		BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR		BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR		BIT(4)
89 #define XILINX_DMA_DMASR_IDLE			BIT(1)
90 #define XILINX_DMA_DMASR_HALTED		BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK		GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK	GENMASK(23, 16)
93 
94 #define XILINX_DMA_REG_CURDESC			0x0008
95 #define XILINX_DMA_REG_TAILDESC		0x0010
96 #define XILINX_DMA_REG_REG_INDEX		0x0014
97 #define XILINX_DMA_REG_FRMSTORE		0x0018
98 #define XILINX_DMA_REG_THRESHOLD		0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS		0x0024
100 #define XILINX_DMA_REG_PARK_PTR		0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT	8
102 #define XILINX_DMA_PARK_PTR_WR_REF_MASK		GENMASK(12, 8)
103 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT	0
104 #define XILINX_DMA_PARK_PTR_RD_REF_MASK		GENMASK(4, 0)
105 #define XILINX_DMA_REG_VDMA_VERSION		0x002c
106 
107 /* Register Direct Mode Registers */
108 #define XILINX_DMA_REG_VSIZE			0x0000
109 #define XILINX_DMA_REG_HSIZE			0x0004
110 
111 #define XILINX_DMA_REG_FRMDLY_STRIDE		0x0008
112 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT	24
113 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT	0
114 
115 #define XILINX_VDMA_REG_START_ADDRESS(n)	(0x000c + 4 * (n))
116 #define XILINX_VDMA_REG_START_ADDRESS_64(n)	(0x000c + 8 * (n))
117 
118 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP	0x00ec
119 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP	BIT(0)
120 
121 /* HW specific definitions */
122 #define XILINX_DMA_MAX_CHANS_PER_DEVICE	0x20
123 
124 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK	\
125 		(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
126 		 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
127 		 XILINX_DMA_DMASR_ERR_IRQ)
128 
129 #define XILINX_DMA_DMASR_ALL_ERR_MASK	\
130 		(XILINX_DMA_DMASR_EOL_LATE_ERR | \
131 		 XILINX_DMA_DMASR_SOF_LATE_ERR | \
132 		 XILINX_DMA_DMASR_SG_DEC_ERR | \
133 		 XILINX_DMA_DMASR_SG_SLV_ERR | \
134 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
135 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
136 		 XILINX_DMA_DMASR_DMA_DEC_ERR | \
137 		 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
138 		 XILINX_DMA_DMASR_DMA_INT_ERR)
139 
140 /*
141  * Recoverable errors are DMA Internal error, SOF Early, EOF Early
142  * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
143  * is enabled in the h/w system.
144  */
145 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK	\
146 		(XILINX_DMA_DMASR_SOF_LATE_ERR | \
147 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
148 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
149 		 XILINX_DMA_DMASR_DMA_INT_ERR)
150 
151 /* Axi VDMA Flush on Fsync bits */
152 #define XILINX_DMA_FLUSH_S2MM		3
153 #define XILINX_DMA_FLUSH_MM2S		2
154 #define XILINX_DMA_FLUSH_BOTH		1
155 
156 /* Delay loop counter to prevent hardware failure */
157 #define XILINX_DMA_LOOP_COUNT		1000000
158 
159 /* AXI DMA Specific Registers/Offsets */
160 #define XILINX_DMA_REG_SRCDSTADDR	0x18
161 #define XILINX_DMA_REG_BTT		0x28
162 
163 /* AXI DMA Specific Masks/Bit fields */
164 #define XILINX_DMA_MAX_TRANS_LEN	GENMASK(22, 0)
165 #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
166 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
167 #define XILINX_DMA_CR_COALESCE_SHIFT	16
168 #define XILINX_DMA_BD_SOP		BIT(27)
169 #define XILINX_DMA_BD_EOP		BIT(26)
170 #define XILINX_DMA_COALESCE_MAX		255
171 #define XILINX_DMA_NUM_DESCS		255
172 #define XILINX_DMA_NUM_APP_WORDS	5
173 
174 /* Multi-Channel DMA Descriptor offsets*/
175 #define XILINX_DMA_MCRX_CDESC(x)	(0x40 + (x-1) * 0x20)
176 #define XILINX_DMA_MCRX_TDESC(x)	(0x48 + (x-1) * 0x20)
177 
178 /* Multi-Channel DMA Masks/Shifts */
179 #define XILINX_DMA_BD_HSIZE_MASK	GENMASK(15, 0)
180 #define XILINX_DMA_BD_STRIDE_MASK	GENMASK(15, 0)
181 #define XILINX_DMA_BD_VSIZE_MASK	GENMASK(31, 19)
182 #define XILINX_DMA_BD_TDEST_MASK	GENMASK(4, 0)
183 #define XILINX_DMA_BD_STRIDE_SHIFT	0
184 #define XILINX_DMA_BD_VSIZE_SHIFT	19
185 
186 /* AXI CDMA Specific Registers/Offsets */
187 #define XILINX_CDMA_REG_SRCADDR		0x18
188 #define XILINX_CDMA_REG_DSTADDR		0x20
189 
190 /* AXI CDMA Specific Masks */
191 #define XILINX_CDMA_CR_SGMODE          BIT(3)
192 
193 /**
194  * struct xilinx_vdma_desc_hw - Hardware Descriptor
195  * @next_desc: Next Descriptor Pointer @0x00
196  * @pad1: Reserved @0x04
197  * @buf_addr: Buffer address @0x08
198  * @buf_addr_msb: MSB of Buffer address @0x0C
199  * @vsize: Vertical Size @0x10
200  * @hsize: Horizontal Size @0x14
201  * @stride: Number of bytes between the first
202  *	    pixels of each horizontal line @0x18
203  */
204 struct xilinx_vdma_desc_hw {
205 	u32 next_desc;
206 	u32 pad1;
207 	u32 buf_addr;
208 	u32 buf_addr_msb;
209 	u32 vsize;
210 	u32 hsize;
211 	u32 stride;
212 } __aligned(64);
213 
214 /**
215  * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
216  * @next_desc: Next Descriptor Pointer @0x00
217  * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
218  * @buf_addr: Buffer address @0x08
219  * @buf_addr_msb: MSB of Buffer address @0x0C
220  * @mcdma_control: Control field for mcdma @0x10
221  * @vsize_stride: Vsize and Stride field for mcdma @0x14
222  * @control: Control field @0x18
223  * @status: Status field @0x1C
224  * @app: APP Fields @0x20 - 0x30
225  */
226 struct xilinx_axidma_desc_hw {
227 	u32 next_desc;
228 	u32 next_desc_msb;
229 	u32 buf_addr;
230 	u32 buf_addr_msb;
231 	u32 mcdma_control;
232 	u32 vsize_stride;
233 	u32 control;
234 	u32 status;
235 	u32 app[XILINX_DMA_NUM_APP_WORDS];
236 } __aligned(64);
237 
238 /**
239  * struct xilinx_cdma_desc_hw - Hardware Descriptor
240  * @next_desc: Next Descriptor Pointer @0x00
241  * @next_desc_msb: Next Descriptor Pointer MSB @0x04
242  * @src_addr: Source address @0x08
243  * @src_addr_msb: Source address MSB @0x0C
244  * @dest_addr: Destination address @0x10
245  * @dest_addr_msb: Destination address MSB @0x14
246  * @control: Control field @0x18
247  * @status: Status field @0x1C
248  */
249 struct xilinx_cdma_desc_hw {
250 	u32 next_desc;
251 	u32 next_desc_msb;
252 	u32 src_addr;
253 	u32 src_addr_msb;
254 	u32 dest_addr;
255 	u32 dest_addr_msb;
256 	u32 control;
257 	u32 status;
258 } __aligned(64);
259 
260 /**
261  * struct xilinx_vdma_tx_segment - Descriptor segment
262  * @hw: Hardware descriptor
263  * @node: Node in the descriptor segments list
264  * @phys: Physical address of segment
265  */
266 struct xilinx_vdma_tx_segment {
267 	struct xilinx_vdma_desc_hw hw;
268 	struct list_head node;
269 	dma_addr_t phys;
270 } __aligned(64);
271 
272 /**
273  * struct xilinx_axidma_tx_segment - Descriptor segment
274  * @hw: Hardware descriptor
275  * @node: Node in the descriptor segments list
276  * @phys: Physical address of segment
277  */
278 struct xilinx_axidma_tx_segment {
279 	struct xilinx_axidma_desc_hw hw;
280 	struct list_head node;
281 	dma_addr_t phys;
282 } __aligned(64);
283 
284 /**
285  * struct xilinx_cdma_tx_segment - Descriptor segment
286  * @hw: Hardware descriptor
287  * @node: Node in the descriptor segments list
288  * @phys: Physical address of segment
289  */
290 struct xilinx_cdma_tx_segment {
291 	struct xilinx_cdma_desc_hw hw;
292 	struct list_head node;
293 	dma_addr_t phys;
294 } __aligned(64);
295 
296 /**
297  * struct xilinx_dma_tx_descriptor - Per Transaction structure
298  * @async_tx: Async transaction descriptor
299  * @segments: TX segments list
300  * @node: Node in the channel descriptors list
301  * @cyclic: Check for cyclic transfers.
302  */
303 struct xilinx_dma_tx_descriptor {
304 	struct dma_async_tx_descriptor async_tx;
305 	struct list_head segments;
306 	struct list_head node;
307 	bool cyclic;
308 };
309 
310 /**
311  * struct xilinx_dma_chan - Driver specific DMA channel structure
312  * @xdev: Driver specific device structure
313  * @ctrl_offset: Control registers offset
314  * @desc_offset: TX descriptor registers offset
315  * @lock: Descriptor operation lock
316  * @pending_list: Descriptors waiting
317  * @active_list: Descriptors ready to submit
318  * @done_list: Complete descriptors
319  * @free_seg_list: Free descriptors
320  * @common: DMA common channel
321  * @desc_pool: Descriptors pool
322  * @dev: The dma device
323  * @irq: Channel IRQ
324  * @id: Channel ID
325  * @direction: Transfer direction
326  * @num_frms: Number of frames
327  * @has_sg: Support scatter transfers
328  * @cyclic: Check for cyclic transfers.
329  * @genlock: Support genlock mode
330  * @err: Channel has errors
331  * @idle: Check for channel idle
332  * @tasklet: Cleanup work after irq
333  * @config: Device configuration info
334  * @flush_on_fsync: Flush on Frame sync
335  * @desc_pendingcount: Descriptor pending count
336  * @ext_addr: Indicates 64 bit addressing is supported by dma channel
337  * @desc_submitcount: Descriptor h/w submitted count
338  * @residue: Residue for AXI DMA
339  * @seg_v: Statically allocated segments base
340  * @seg_p: Physical allocated segments base
341  * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
342  * @cyclic_seg_p: Physical allocated segments base for cyclic dma
343  * @start_transfer: Differentiate b/w DMA IP's transfer
344  * @stop_transfer: Differentiate b/w DMA IP's quiesce
345  * @tdest: TDEST value for mcdma
346  * @has_vflip: S2MM vertical flip
347  */
348 struct xilinx_dma_chan {
349 	struct xilinx_dma_device *xdev;
350 	u32 ctrl_offset;
351 	u32 desc_offset;
352 	spinlock_t lock;
353 	struct list_head pending_list;
354 	struct list_head active_list;
355 	struct list_head done_list;
356 	struct list_head free_seg_list;
357 	struct dma_chan common;
358 	struct dma_pool *desc_pool;
359 	struct device *dev;
360 	int irq;
361 	int id;
362 	enum dma_transfer_direction direction;
363 	int num_frms;
364 	bool has_sg;
365 	bool cyclic;
366 	bool genlock;
367 	bool err;
368 	bool idle;
369 	struct tasklet_struct tasklet;
370 	struct xilinx_vdma_config config;
371 	bool flush_on_fsync;
372 	u32 desc_pendingcount;
373 	bool ext_addr;
374 	u32 desc_submitcount;
375 	u32 residue;
376 	struct xilinx_axidma_tx_segment *seg_v;
377 	dma_addr_t seg_p;
378 	struct xilinx_axidma_tx_segment *cyclic_seg_v;
379 	dma_addr_t cyclic_seg_p;
380 	void (*start_transfer)(struct xilinx_dma_chan *chan);
381 	int (*stop_transfer)(struct xilinx_dma_chan *chan);
382 	u16 tdest;
383 	bool has_vflip;
384 };
385 
386 /**
387  * enum xdma_ip_type - DMA IP type.
388  *
389  * @XDMA_TYPE_AXIDMA: Axi dma ip.
390  * @XDMA_TYPE_CDMA: Axi cdma ip.
391  * @XDMA_TYPE_VDMA: Axi vdma ip.
392  *
393  */
394 enum xdma_ip_type {
395 	XDMA_TYPE_AXIDMA = 0,
396 	XDMA_TYPE_CDMA,
397 	XDMA_TYPE_VDMA,
398 };
399 
400 struct xilinx_dma_config {
401 	enum xdma_ip_type dmatype;
402 	int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
403 			struct clk **tx_clk, struct clk **txs_clk,
404 			struct clk **rx_clk, struct clk **rxs_clk);
405 };
406 
407 /**
408  * struct xilinx_dma_device - DMA device structure
409  * @regs: I/O mapped base address
410  * @dev: Device Structure
411  * @common: DMA device structure
412  * @chan: Driver specific DMA channel
413  * @has_sg: Specifies whether Scatter-Gather is present or not
414  * @mcdma: Specifies whether Multi-Channel is present or not
415  * @flush_on_fsync: Flush on frame sync
416  * @ext_addr: Indicates 64 bit addressing is supported by dma device
417  * @pdev: Platform device structure pointer
418  * @dma_config: DMA config structure
419  * @axi_clk: DMA Axi4-lite interace clock
420  * @tx_clk: DMA mm2s clock
421  * @txs_clk: DMA mm2s stream clock
422  * @rx_clk: DMA s2mm clock
423  * @rxs_clk: DMA s2mm stream clock
424  * @nr_channels: Number of channels DMA device supports
425  * @chan_id: DMA channel identifier
426  */
427 struct xilinx_dma_device {
428 	void __iomem *regs;
429 	struct device *dev;
430 	struct dma_device common;
431 	struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
432 	bool has_sg;
433 	bool mcdma;
434 	u32 flush_on_fsync;
435 	bool ext_addr;
436 	struct platform_device  *pdev;
437 	const struct xilinx_dma_config *dma_config;
438 	struct clk *axi_clk;
439 	struct clk *tx_clk;
440 	struct clk *txs_clk;
441 	struct clk *rx_clk;
442 	struct clk *rxs_clk;
443 	u32 nr_channels;
444 	u32 chan_id;
445 };
446 
447 /* Macros */
448 #define to_xilinx_chan(chan) \
449 	container_of(chan, struct xilinx_dma_chan, common)
450 #define to_dma_tx_descriptor(tx) \
451 	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
452 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
453 	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
454 			   cond, delay_us, timeout_us)
455 
456 /* IO accessors */
457 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
458 {
459 	return ioread32(chan->xdev->regs + reg);
460 }
461 
462 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
463 {
464 	iowrite32(value, chan->xdev->regs + reg);
465 }
466 
467 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
468 				   u32 value)
469 {
470 	dma_write(chan, chan->desc_offset + reg, value);
471 }
472 
473 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
474 {
475 	return dma_read(chan, chan->ctrl_offset + reg);
476 }
477 
478 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
479 				   u32 value)
480 {
481 	dma_write(chan, chan->ctrl_offset + reg, value);
482 }
483 
484 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
485 				 u32 clr)
486 {
487 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
488 }
489 
490 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
491 				 u32 set)
492 {
493 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
494 }
495 
496 /**
497  * vdma_desc_write_64 - 64-bit descriptor write
498  * @chan: Driver specific VDMA channel
499  * @reg: Register to write
500  * @value_lsb: lower address of the descriptor.
501  * @value_msb: upper address of the descriptor.
502  *
503  * Since vdma driver is trying to write to a register offset which is not a
504  * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
505  * instead of a single 64 bit register write.
506  */
507 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
508 				      u32 value_lsb, u32 value_msb)
509 {
510 	/* Write the lsb 32 bits*/
511 	writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
512 
513 	/* Write the msb 32 bits */
514 	writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
515 }
516 
517 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
518 {
519 	lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
520 }
521 
522 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
523 				dma_addr_t addr)
524 {
525 	if (chan->ext_addr)
526 		dma_writeq(chan, reg, addr);
527 	else
528 		dma_ctrl_write(chan, reg, addr);
529 }
530 
531 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
532 				     struct xilinx_axidma_desc_hw *hw,
533 				     dma_addr_t buf_addr, size_t sg_used,
534 				     size_t period_len)
535 {
536 	if (chan->ext_addr) {
537 		hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
538 		hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
539 						 period_len);
540 	} else {
541 		hw->buf_addr = buf_addr + sg_used + period_len;
542 	}
543 }
544 
545 /* -----------------------------------------------------------------------------
546  * Descriptors and segments alloc and free
547  */
548 
549 /**
550  * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
551  * @chan: Driver specific DMA channel
552  *
553  * Return: The allocated segment on success and NULL on failure.
554  */
555 static struct xilinx_vdma_tx_segment *
556 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
557 {
558 	struct xilinx_vdma_tx_segment *segment;
559 	dma_addr_t phys;
560 
561 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
562 	if (!segment)
563 		return NULL;
564 
565 	segment->phys = phys;
566 
567 	return segment;
568 }
569 
570 /**
571  * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
572  * @chan: Driver specific DMA channel
573  *
574  * Return: The allocated segment on success and NULL on failure.
575  */
576 static struct xilinx_cdma_tx_segment *
577 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
578 {
579 	struct xilinx_cdma_tx_segment *segment;
580 	dma_addr_t phys;
581 
582 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
583 	if (!segment)
584 		return NULL;
585 
586 	segment->phys = phys;
587 
588 	return segment;
589 }
590 
591 /**
592  * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
593  * @chan: Driver specific DMA channel
594  *
595  * Return: The allocated segment on success and NULL on failure.
596  */
597 static struct xilinx_axidma_tx_segment *
598 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
599 {
600 	struct xilinx_axidma_tx_segment *segment = NULL;
601 	unsigned long flags;
602 
603 	spin_lock_irqsave(&chan->lock, flags);
604 	if (!list_empty(&chan->free_seg_list)) {
605 		segment = list_first_entry(&chan->free_seg_list,
606 					   struct xilinx_axidma_tx_segment,
607 					   node);
608 		list_del(&segment->node);
609 	}
610 	spin_unlock_irqrestore(&chan->lock, flags);
611 
612 	return segment;
613 }
614 
615 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
616 {
617 	u32 next_desc = hw->next_desc;
618 	u32 next_desc_msb = hw->next_desc_msb;
619 
620 	memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
621 
622 	hw->next_desc = next_desc;
623 	hw->next_desc_msb = next_desc_msb;
624 }
625 
626 /**
627  * xilinx_dma_free_tx_segment - Free transaction segment
628  * @chan: Driver specific DMA channel
629  * @segment: DMA transaction segment
630  */
631 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
632 				struct xilinx_axidma_tx_segment *segment)
633 {
634 	xilinx_dma_clean_hw_desc(&segment->hw);
635 
636 	list_add_tail(&segment->node, &chan->free_seg_list);
637 }
638 
639 /**
640  * xilinx_cdma_free_tx_segment - Free transaction segment
641  * @chan: Driver specific DMA channel
642  * @segment: DMA transaction segment
643  */
644 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
645 				struct xilinx_cdma_tx_segment *segment)
646 {
647 	dma_pool_free(chan->desc_pool, segment, segment->phys);
648 }
649 
650 /**
651  * xilinx_vdma_free_tx_segment - Free transaction segment
652  * @chan: Driver specific DMA channel
653  * @segment: DMA transaction segment
654  */
655 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
656 					struct xilinx_vdma_tx_segment *segment)
657 {
658 	dma_pool_free(chan->desc_pool, segment, segment->phys);
659 }
660 
661 /**
662  * xilinx_dma_tx_descriptor - Allocate transaction descriptor
663  * @chan: Driver specific DMA channel
664  *
665  * Return: The allocated descriptor on success and NULL on failure.
666  */
667 static struct xilinx_dma_tx_descriptor *
668 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
669 {
670 	struct xilinx_dma_tx_descriptor *desc;
671 
672 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
673 	if (!desc)
674 		return NULL;
675 
676 	INIT_LIST_HEAD(&desc->segments);
677 
678 	return desc;
679 }
680 
681 /**
682  * xilinx_dma_free_tx_descriptor - Free transaction descriptor
683  * @chan: Driver specific DMA channel
684  * @desc: DMA transaction descriptor
685  */
686 static void
687 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
688 			       struct xilinx_dma_tx_descriptor *desc)
689 {
690 	struct xilinx_vdma_tx_segment *segment, *next;
691 	struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
692 	struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
693 
694 	if (!desc)
695 		return;
696 
697 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
698 		list_for_each_entry_safe(segment, next, &desc->segments, node) {
699 			list_del(&segment->node);
700 			xilinx_vdma_free_tx_segment(chan, segment);
701 		}
702 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
703 		list_for_each_entry_safe(cdma_segment, cdma_next,
704 					 &desc->segments, node) {
705 			list_del(&cdma_segment->node);
706 			xilinx_cdma_free_tx_segment(chan, cdma_segment);
707 		}
708 	} else {
709 		list_for_each_entry_safe(axidma_segment, axidma_next,
710 					 &desc->segments, node) {
711 			list_del(&axidma_segment->node);
712 			xilinx_dma_free_tx_segment(chan, axidma_segment);
713 		}
714 	}
715 
716 	kfree(desc);
717 }
718 
719 /* Required functions */
720 
721 /**
722  * xilinx_dma_free_desc_list - Free descriptors list
723  * @chan: Driver specific DMA channel
724  * @list: List to parse and delete the descriptor
725  */
726 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
727 					struct list_head *list)
728 {
729 	struct xilinx_dma_tx_descriptor *desc, *next;
730 
731 	list_for_each_entry_safe(desc, next, list, node) {
732 		list_del(&desc->node);
733 		xilinx_dma_free_tx_descriptor(chan, desc);
734 	}
735 }
736 
737 /**
738  * xilinx_dma_free_descriptors - Free channel descriptors
739  * @chan: Driver specific DMA channel
740  */
741 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
742 {
743 	unsigned long flags;
744 
745 	spin_lock_irqsave(&chan->lock, flags);
746 
747 	xilinx_dma_free_desc_list(chan, &chan->pending_list);
748 	xilinx_dma_free_desc_list(chan, &chan->done_list);
749 	xilinx_dma_free_desc_list(chan, &chan->active_list);
750 
751 	spin_unlock_irqrestore(&chan->lock, flags);
752 }
753 
754 /**
755  * xilinx_dma_free_chan_resources - Free channel resources
756  * @dchan: DMA channel
757  */
758 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
759 {
760 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
761 	unsigned long flags;
762 
763 	dev_dbg(chan->dev, "Free all channel resources.\n");
764 
765 	xilinx_dma_free_descriptors(chan);
766 
767 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
768 		spin_lock_irqsave(&chan->lock, flags);
769 		INIT_LIST_HEAD(&chan->free_seg_list);
770 		spin_unlock_irqrestore(&chan->lock, flags);
771 
772 		/* Free memory that is allocated for BD */
773 		dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
774 				  XILINX_DMA_NUM_DESCS, chan->seg_v,
775 				  chan->seg_p);
776 
777 		/* Free Memory that is allocated for cyclic DMA Mode */
778 		dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
779 				  chan->cyclic_seg_v, chan->cyclic_seg_p);
780 	}
781 
782 	if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
783 		dma_pool_destroy(chan->desc_pool);
784 		chan->desc_pool = NULL;
785 	}
786 }
787 
788 /**
789  * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
790  * @chan: Driver specific dma channel
791  * @desc: dma transaction descriptor
792  * @flags: flags for spin lock
793  */
794 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
795 					  struct xilinx_dma_tx_descriptor *desc,
796 					  unsigned long *flags)
797 {
798 	dma_async_tx_callback callback;
799 	void *callback_param;
800 
801 	callback = desc->async_tx.callback;
802 	callback_param = desc->async_tx.callback_param;
803 	if (callback) {
804 		spin_unlock_irqrestore(&chan->lock, *flags);
805 		callback(callback_param);
806 		spin_lock_irqsave(&chan->lock, *flags);
807 	}
808 }
809 
810 /**
811  * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
812  * @chan: Driver specific DMA channel
813  */
814 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
815 {
816 	struct xilinx_dma_tx_descriptor *desc, *next;
817 	unsigned long flags;
818 
819 	spin_lock_irqsave(&chan->lock, flags);
820 
821 	list_for_each_entry_safe(desc, next, &chan->done_list, node) {
822 		struct dmaengine_desc_callback cb;
823 
824 		if (desc->cyclic) {
825 			xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
826 			break;
827 		}
828 
829 		/* Remove from the list of running transactions */
830 		list_del(&desc->node);
831 
832 		/* Run the link descriptor callback function */
833 		dmaengine_desc_get_callback(&desc->async_tx, &cb);
834 		if (dmaengine_desc_callback_valid(&cb)) {
835 			spin_unlock_irqrestore(&chan->lock, flags);
836 			dmaengine_desc_callback_invoke(&cb, NULL);
837 			spin_lock_irqsave(&chan->lock, flags);
838 		}
839 
840 		/* Run any dependencies, then free the descriptor */
841 		dma_run_dependencies(&desc->async_tx);
842 		xilinx_dma_free_tx_descriptor(chan, desc);
843 	}
844 
845 	spin_unlock_irqrestore(&chan->lock, flags);
846 }
847 
848 /**
849  * xilinx_dma_do_tasklet - Schedule completion tasklet
850  * @data: Pointer to the Xilinx DMA channel structure
851  */
852 static void xilinx_dma_do_tasklet(unsigned long data)
853 {
854 	struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
855 
856 	xilinx_dma_chan_desc_cleanup(chan);
857 }
858 
859 /**
860  * xilinx_dma_alloc_chan_resources - Allocate channel resources
861  * @dchan: DMA channel
862  *
863  * Return: '0' on success and failure value on error
864  */
865 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
866 {
867 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
868 	int i;
869 
870 	/* Has this channel already been allocated? */
871 	if (chan->desc_pool)
872 		return 0;
873 
874 	/*
875 	 * We need the descriptor to be aligned to 64bytes
876 	 * for meeting Xilinx VDMA specification requirement.
877 	 */
878 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
879 		/* Allocate the buffer descriptors. */
880 		chan->seg_v = dma_zalloc_coherent(chan->dev,
881 						  sizeof(*chan->seg_v) *
882 						  XILINX_DMA_NUM_DESCS,
883 						  &chan->seg_p, GFP_KERNEL);
884 		if (!chan->seg_v) {
885 			dev_err(chan->dev,
886 				"unable to allocate channel %d descriptors\n",
887 				chan->id);
888 			return -ENOMEM;
889 		}
890 
891 		for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
892 			chan->seg_v[i].hw.next_desc =
893 			lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
894 				((i + 1) % XILINX_DMA_NUM_DESCS));
895 			chan->seg_v[i].hw.next_desc_msb =
896 			upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
897 				((i + 1) % XILINX_DMA_NUM_DESCS));
898 			chan->seg_v[i].phys = chan->seg_p +
899 				sizeof(*chan->seg_v) * i;
900 			list_add_tail(&chan->seg_v[i].node,
901 				      &chan->free_seg_list);
902 		}
903 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
904 		chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
905 				   chan->dev,
906 				   sizeof(struct xilinx_cdma_tx_segment),
907 				   __alignof__(struct xilinx_cdma_tx_segment),
908 				   0);
909 	} else {
910 		chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
911 				     chan->dev,
912 				     sizeof(struct xilinx_vdma_tx_segment),
913 				     __alignof__(struct xilinx_vdma_tx_segment),
914 				     0);
915 	}
916 
917 	if (!chan->desc_pool &&
918 	    (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
919 		dev_err(chan->dev,
920 			"unable to allocate channel %d descriptor pool\n",
921 			chan->id);
922 		return -ENOMEM;
923 	}
924 
925 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
926 		/*
927 		 * For cyclic DMA mode we need to program the tail Descriptor
928 		 * register with a value which is not a part of the BD chain
929 		 * so allocating a desc segment during channel allocation for
930 		 * programming tail descriptor.
931 		 */
932 		chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
933 					sizeof(*chan->cyclic_seg_v),
934 					&chan->cyclic_seg_p, GFP_KERNEL);
935 		if (!chan->cyclic_seg_v) {
936 			dev_err(chan->dev,
937 				"unable to allocate desc segment for cyclic DMA\n");
938 			return -ENOMEM;
939 		}
940 		chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
941 	}
942 
943 	dma_cookie_init(dchan);
944 
945 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
946 		/* For AXI DMA resetting once channel will reset the
947 		 * other channel as well so enable the interrupts here.
948 		 */
949 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
950 			      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
951 	}
952 
953 	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
954 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
955 			     XILINX_CDMA_CR_SGMODE);
956 
957 	return 0;
958 }
959 
960 /**
961  * xilinx_dma_tx_status - Get DMA transaction status
962  * @dchan: DMA channel
963  * @cookie: Transaction identifier
964  * @txstate: Transaction state
965  *
966  * Return: DMA transaction status
967  */
968 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
969 					dma_cookie_t cookie,
970 					struct dma_tx_state *txstate)
971 {
972 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
973 	struct xilinx_dma_tx_descriptor *desc;
974 	struct xilinx_axidma_tx_segment *segment;
975 	struct xilinx_axidma_desc_hw *hw;
976 	enum dma_status ret;
977 	unsigned long flags;
978 	u32 residue = 0;
979 
980 	ret = dma_cookie_status(dchan, cookie, txstate);
981 	if (ret == DMA_COMPLETE || !txstate)
982 		return ret;
983 
984 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
985 		spin_lock_irqsave(&chan->lock, flags);
986 
987 		desc = list_last_entry(&chan->active_list,
988 				       struct xilinx_dma_tx_descriptor, node);
989 		if (chan->has_sg) {
990 			list_for_each_entry(segment, &desc->segments, node) {
991 				hw = &segment->hw;
992 				residue += (hw->control - hw->status) &
993 					   XILINX_DMA_MAX_TRANS_LEN;
994 			}
995 		}
996 		spin_unlock_irqrestore(&chan->lock, flags);
997 
998 		chan->residue = residue;
999 		dma_set_residue(txstate, chan->residue);
1000 	}
1001 
1002 	return ret;
1003 }
1004 
1005 /**
1006  * xilinx_dma_stop_transfer - Halt DMA channel
1007  * @chan: Driver specific DMA channel
1008  *
1009  * Return: '0' on success and failure value on error
1010  */
1011 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1012 {
1013 	u32 val;
1014 
1015 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1016 
1017 	/* Wait for the hardware to halt */
1018 	return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1019 				       val & XILINX_DMA_DMASR_HALTED, 0,
1020 				       XILINX_DMA_LOOP_COUNT);
1021 }
1022 
1023 /**
1024  * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1025  * @chan: Driver specific DMA channel
1026  *
1027  * Return: '0' on success and failure value on error
1028  */
1029 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1030 {
1031 	u32 val;
1032 
1033 	return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1034 				       val & XILINX_DMA_DMASR_IDLE, 0,
1035 				       XILINX_DMA_LOOP_COUNT);
1036 }
1037 
1038 /**
1039  * xilinx_dma_start - Start DMA channel
1040  * @chan: Driver specific DMA channel
1041  */
1042 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1043 {
1044 	int err;
1045 	u32 val;
1046 
1047 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1048 
1049 	/* Wait for the hardware to start */
1050 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1051 				      !(val & XILINX_DMA_DMASR_HALTED), 0,
1052 				      XILINX_DMA_LOOP_COUNT);
1053 
1054 	if (err) {
1055 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
1056 			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1057 
1058 		chan->err = true;
1059 	}
1060 }
1061 
1062 /**
1063  * xilinx_vdma_start_transfer - Starts VDMA transfer
1064  * @chan: Driver specific channel struct pointer
1065  */
1066 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1067 {
1068 	struct xilinx_vdma_config *config = &chan->config;
1069 	struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1070 	u32 reg, j;
1071 	struct xilinx_vdma_tx_segment *tail_segment;
1072 
1073 	/* This function was invoked with lock held */
1074 	if (chan->err)
1075 		return;
1076 
1077 	if (!chan->idle)
1078 		return;
1079 
1080 	if (list_empty(&chan->pending_list))
1081 		return;
1082 
1083 	desc = list_first_entry(&chan->pending_list,
1084 				struct xilinx_dma_tx_descriptor, node);
1085 	tail_desc = list_last_entry(&chan->pending_list,
1086 				    struct xilinx_dma_tx_descriptor, node);
1087 
1088 	tail_segment = list_last_entry(&tail_desc->segments,
1089 				       struct xilinx_vdma_tx_segment, node);
1090 
1091 	/*
1092 	 * If hardware is idle, then all descriptors on the running lists are
1093 	 * done, start new transfers
1094 	 */
1095 	if (chan->has_sg)
1096 		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1097 				desc->async_tx.phys);
1098 
1099 	/* Configure the hardware using info in the config structure */
1100 	if (chan->has_vflip) {
1101 		reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1102 		reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1103 		reg |= config->vflip_en;
1104 		dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1105 			  reg);
1106 	}
1107 
1108 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1109 
1110 	if (config->frm_cnt_en)
1111 		reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1112 	else
1113 		reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1114 
1115 	/*
1116 	 * With SG, start with circular mode, so that BDs can be fetched.
1117 	 * In direct register mode, if not parking, enable circular mode
1118 	 */
1119 	if (chan->has_sg || !config->park)
1120 		reg |= XILINX_DMA_DMACR_CIRC_EN;
1121 
1122 	if (config->park)
1123 		reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1124 
1125 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1126 
1127 	j = chan->desc_submitcount;
1128 	reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1129 	if (chan->direction == DMA_MEM_TO_DEV) {
1130 		reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1131 		reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1132 	} else {
1133 		reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1134 		reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1135 	}
1136 	dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1137 
1138 	/* Start the hardware */
1139 	xilinx_dma_start(chan);
1140 
1141 	if (chan->err)
1142 		return;
1143 
1144 	/* Start the transfer */
1145 	if (chan->has_sg) {
1146 		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1147 				tail_segment->phys);
1148 		list_splice_tail_init(&chan->pending_list, &chan->active_list);
1149 		chan->desc_pendingcount = 0;
1150 	} else {
1151 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
1152 		int i = 0;
1153 
1154 		if (chan->desc_submitcount < chan->num_frms)
1155 			i = chan->desc_submitcount;
1156 
1157 		list_for_each_entry(segment, &desc->segments, node) {
1158 			if (chan->ext_addr)
1159 				vdma_desc_write_64(chan,
1160 					XILINX_VDMA_REG_START_ADDRESS_64(i++),
1161 					segment->hw.buf_addr,
1162 					segment->hw.buf_addr_msb);
1163 			else
1164 				vdma_desc_write(chan,
1165 					XILINX_VDMA_REG_START_ADDRESS(i++),
1166 					segment->hw.buf_addr);
1167 
1168 			last = segment;
1169 		}
1170 
1171 		if (!last)
1172 			return;
1173 
1174 		/* HW expects these parameters to be same for one transaction */
1175 		vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1176 		vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1177 				last->hw.stride);
1178 		vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1179 
1180 		chan->desc_submitcount++;
1181 		chan->desc_pendingcount--;
1182 		list_del(&desc->node);
1183 		list_add_tail(&desc->node, &chan->active_list);
1184 		if (chan->desc_submitcount == chan->num_frms)
1185 			chan->desc_submitcount = 0;
1186 	}
1187 
1188 	chan->idle = false;
1189 }
1190 
1191 /**
1192  * xilinx_cdma_start_transfer - Starts cdma transfer
1193  * @chan: Driver specific channel struct pointer
1194  */
1195 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1196 {
1197 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1198 	struct xilinx_cdma_tx_segment *tail_segment;
1199 	u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1200 
1201 	if (chan->err)
1202 		return;
1203 
1204 	if (!chan->idle)
1205 		return;
1206 
1207 	if (list_empty(&chan->pending_list))
1208 		return;
1209 
1210 	head_desc = list_first_entry(&chan->pending_list,
1211 				     struct xilinx_dma_tx_descriptor, node);
1212 	tail_desc = list_last_entry(&chan->pending_list,
1213 				    struct xilinx_dma_tx_descriptor, node);
1214 	tail_segment = list_last_entry(&tail_desc->segments,
1215 				       struct xilinx_cdma_tx_segment, node);
1216 
1217 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1218 		ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1219 		ctrl_reg |= chan->desc_pendingcount <<
1220 				XILINX_DMA_CR_COALESCE_SHIFT;
1221 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1222 	}
1223 
1224 	if (chan->has_sg) {
1225 		dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1226 			     XILINX_CDMA_CR_SGMODE);
1227 
1228 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1229 			     XILINX_CDMA_CR_SGMODE);
1230 
1231 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1232 			     head_desc->async_tx.phys);
1233 
1234 		/* Update tail ptr register which will start the transfer */
1235 		xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1236 			     tail_segment->phys);
1237 	} else {
1238 		/* In simple mode */
1239 		struct xilinx_cdma_tx_segment *segment;
1240 		struct xilinx_cdma_desc_hw *hw;
1241 
1242 		segment = list_first_entry(&head_desc->segments,
1243 					   struct xilinx_cdma_tx_segment,
1244 					   node);
1245 
1246 		hw = &segment->hw;
1247 
1248 		xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1249 		xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1250 
1251 		/* Start the transfer */
1252 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1253 				hw->control & XILINX_DMA_MAX_TRANS_LEN);
1254 	}
1255 
1256 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1257 	chan->desc_pendingcount = 0;
1258 	chan->idle = false;
1259 }
1260 
1261 /**
1262  * xilinx_dma_start_transfer - Starts DMA transfer
1263  * @chan: Driver specific channel struct pointer
1264  */
1265 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1266 {
1267 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1268 	struct xilinx_axidma_tx_segment *tail_segment;
1269 	u32 reg;
1270 
1271 	if (chan->err)
1272 		return;
1273 
1274 	if (list_empty(&chan->pending_list))
1275 		return;
1276 
1277 	if (!chan->idle)
1278 		return;
1279 
1280 	head_desc = list_first_entry(&chan->pending_list,
1281 				     struct xilinx_dma_tx_descriptor, node);
1282 	tail_desc = list_last_entry(&chan->pending_list,
1283 				    struct xilinx_dma_tx_descriptor, node);
1284 	tail_segment = list_last_entry(&tail_desc->segments,
1285 				       struct xilinx_axidma_tx_segment, node);
1286 
1287 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1288 
1289 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1290 		reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1291 		reg |= chan->desc_pendingcount <<
1292 				  XILINX_DMA_CR_COALESCE_SHIFT;
1293 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1294 	}
1295 
1296 	if (chan->has_sg && !chan->xdev->mcdma)
1297 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1298 			     head_desc->async_tx.phys);
1299 
1300 	if (chan->has_sg && chan->xdev->mcdma) {
1301 		if (chan->direction == DMA_MEM_TO_DEV) {
1302 			dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1303 				       head_desc->async_tx.phys);
1304 		} else {
1305 			if (!chan->tdest) {
1306 				dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1307 				       head_desc->async_tx.phys);
1308 			} else {
1309 				dma_ctrl_write(chan,
1310 					XILINX_DMA_MCRX_CDESC(chan->tdest),
1311 				       head_desc->async_tx.phys);
1312 			}
1313 		}
1314 	}
1315 
1316 	xilinx_dma_start(chan);
1317 
1318 	if (chan->err)
1319 		return;
1320 
1321 	/* Start the transfer */
1322 	if (chan->has_sg && !chan->xdev->mcdma) {
1323 		if (chan->cyclic)
1324 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1325 				     chan->cyclic_seg_v->phys);
1326 		else
1327 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1328 				     tail_segment->phys);
1329 	} else if (chan->has_sg && chan->xdev->mcdma) {
1330 		if (chan->direction == DMA_MEM_TO_DEV) {
1331 			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1332 			       tail_segment->phys);
1333 		} else {
1334 			if (!chan->tdest) {
1335 				dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1336 					       tail_segment->phys);
1337 			} else {
1338 				dma_ctrl_write(chan,
1339 					XILINX_DMA_MCRX_TDESC(chan->tdest),
1340 					tail_segment->phys);
1341 			}
1342 		}
1343 	} else {
1344 		struct xilinx_axidma_tx_segment *segment;
1345 		struct xilinx_axidma_desc_hw *hw;
1346 
1347 		segment = list_first_entry(&head_desc->segments,
1348 					   struct xilinx_axidma_tx_segment,
1349 					   node);
1350 		hw = &segment->hw;
1351 
1352 		xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1353 
1354 		/* Start the transfer */
1355 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1356 			       hw->control & XILINX_DMA_MAX_TRANS_LEN);
1357 	}
1358 
1359 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1360 	chan->desc_pendingcount = 0;
1361 	chan->idle = false;
1362 }
1363 
1364 /**
1365  * xilinx_dma_issue_pending - Issue pending transactions
1366  * @dchan: DMA channel
1367  */
1368 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1369 {
1370 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1371 	unsigned long flags;
1372 
1373 	spin_lock_irqsave(&chan->lock, flags);
1374 	chan->start_transfer(chan);
1375 	spin_unlock_irqrestore(&chan->lock, flags);
1376 }
1377 
1378 /**
1379  * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1380  * @chan : xilinx DMA channel
1381  *
1382  * CONTEXT: hardirq
1383  */
1384 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1385 {
1386 	struct xilinx_dma_tx_descriptor *desc, *next;
1387 
1388 	/* This function was invoked with lock held */
1389 	if (list_empty(&chan->active_list))
1390 		return;
1391 
1392 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1393 		list_del(&desc->node);
1394 		if (!desc->cyclic)
1395 			dma_cookie_complete(&desc->async_tx);
1396 		list_add_tail(&desc->node, &chan->done_list);
1397 	}
1398 }
1399 
1400 /**
1401  * xilinx_dma_reset - Reset DMA channel
1402  * @chan: Driver specific DMA channel
1403  *
1404  * Return: '0' on success and failure value on error
1405  */
1406 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1407 {
1408 	int err;
1409 	u32 tmp;
1410 
1411 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1412 
1413 	/* Wait for the hardware to finish reset */
1414 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1415 				      !(tmp & XILINX_DMA_DMACR_RESET), 0,
1416 				      XILINX_DMA_LOOP_COUNT);
1417 
1418 	if (err) {
1419 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1420 			dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1421 			dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1422 		return -ETIMEDOUT;
1423 	}
1424 
1425 	chan->err = false;
1426 	chan->idle = true;
1427 	chan->desc_submitcount = 0;
1428 
1429 	return err;
1430 }
1431 
1432 /**
1433  * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1434  * @chan: Driver specific DMA channel
1435  *
1436  * Return: '0' on success and failure value on error
1437  */
1438 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1439 {
1440 	int err;
1441 
1442 	/* Reset VDMA */
1443 	err = xilinx_dma_reset(chan);
1444 	if (err)
1445 		return err;
1446 
1447 	/* Enable interrupts */
1448 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1449 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1450 
1451 	return 0;
1452 }
1453 
1454 /**
1455  * xilinx_dma_irq_handler - DMA Interrupt handler
1456  * @irq: IRQ number
1457  * @data: Pointer to the Xilinx DMA channel structure
1458  *
1459  * Return: IRQ_HANDLED/IRQ_NONE
1460  */
1461 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1462 {
1463 	struct xilinx_dma_chan *chan = data;
1464 	u32 status;
1465 
1466 	/* Read the status and ack the interrupts. */
1467 	status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1468 	if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1469 		return IRQ_NONE;
1470 
1471 	dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1472 			status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1473 
1474 	if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1475 		/*
1476 		 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1477 		 * error is recoverable, ignore it. Otherwise flag the error.
1478 		 *
1479 		 * Only recoverable errors can be cleared in the DMASR register,
1480 		 * make sure not to write to other error bits to 1.
1481 		 */
1482 		u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1483 
1484 		dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1485 				errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1486 
1487 		if (!chan->flush_on_fsync ||
1488 		    (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1489 			dev_err(chan->dev,
1490 				"Channel %p has errors %x, cdr %x tdr %x\n",
1491 				chan, errors,
1492 				dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1493 				dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1494 			chan->err = true;
1495 		}
1496 	}
1497 
1498 	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1499 		/*
1500 		 * Device takes too long to do the transfer when user requires
1501 		 * responsiveness.
1502 		 */
1503 		dev_dbg(chan->dev, "Inter-packet latency too long\n");
1504 	}
1505 
1506 	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1507 		spin_lock(&chan->lock);
1508 		xilinx_dma_complete_descriptor(chan);
1509 		chan->idle = true;
1510 		chan->start_transfer(chan);
1511 		spin_unlock(&chan->lock);
1512 	}
1513 
1514 	tasklet_schedule(&chan->tasklet);
1515 	return IRQ_HANDLED;
1516 }
1517 
1518 /**
1519  * append_desc_queue - Queuing descriptor
1520  * @chan: Driver specific dma channel
1521  * @desc: dma transaction descriptor
1522  */
1523 static void append_desc_queue(struct xilinx_dma_chan *chan,
1524 			      struct xilinx_dma_tx_descriptor *desc)
1525 {
1526 	struct xilinx_vdma_tx_segment *tail_segment;
1527 	struct xilinx_dma_tx_descriptor *tail_desc;
1528 	struct xilinx_axidma_tx_segment *axidma_tail_segment;
1529 	struct xilinx_cdma_tx_segment *cdma_tail_segment;
1530 
1531 	if (list_empty(&chan->pending_list))
1532 		goto append;
1533 
1534 	/*
1535 	 * Add the hardware descriptor to the chain of hardware descriptors
1536 	 * that already exists in memory.
1537 	 */
1538 	tail_desc = list_last_entry(&chan->pending_list,
1539 				    struct xilinx_dma_tx_descriptor, node);
1540 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1541 		tail_segment = list_last_entry(&tail_desc->segments,
1542 					       struct xilinx_vdma_tx_segment,
1543 					       node);
1544 		tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1545 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1546 		cdma_tail_segment = list_last_entry(&tail_desc->segments,
1547 						struct xilinx_cdma_tx_segment,
1548 						node);
1549 		cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1550 	} else {
1551 		axidma_tail_segment = list_last_entry(&tail_desc->segments,
1552 					       struct xilinx_axidma_tx_segment,
1553 					       node);
1554 		axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1555 	}
1556 
1557 	/*
1558 	 * Add the software descriptor and all children to the list
1559 	 * of pending transactions
1560 	 */
1561 append:
1562 	list_add_tail(&desc->node, &chan->pending_list);
1563 	chan->desc_pendingcount++;
1564 
1565 	if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1566 	    && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1567 		dev_dbg(chan->dev, "desc pendingcount is too high\n");
1568 		chan->desc_pendingcount = chan->num_frms;
1569 	}
1570 }
1571 
1572 /**
1573  * xilinx_dma_tx_submit - Submit DMA transaction
1574  * @tx: Async transaction descriptor
1575  *
1576  * Return: cookie value on success and failure value on error
1577  */
1578 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1579 {
1580 	struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1581 	struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1582 	dma_cookie_t cookie;
1583 	unsigned long flags;
1584 	int err;
1585 
1586 	if (chan->cyclic) {
1587 		xilinx_dma_free_tx_descriptor(chan, desc);
1588 		return -EBUSY;
1589 	}
1590 
1591 	if (chan->err) {
1592 		/*
1593 		 * If reset fails, need to hard reset the system.
1594 		 * Channel is no longer functional
1595 		 */
1596 		err = xilinx_dma_chan_reset(chan);
1597 		if (err < 0)
1598 			return err;
1599 	}
1600 
1601 	spin_lock_irqsave(&chan->lock, flags);
1602 
1603 	cookie = dma_cookie_assign(tx);
1604 
1605 	/* Put this transaction onto the tail of the pending queue */
1606 	append_desc_queue(chan, desc);
1607 
1608 	if (desc->cyclic)
1609 		chan->cyclic = true;
1610 
1611 	spin_unlock_irqrestore(&chan->lock, flags);
1612 
1613 	return cookie;
1614 }
1615 
1616 /**
1617  * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1618  *	DMA_SLAVE transaction
1619  * @dchan: DMA channel
1620  * @xt: Interleaved template pointer
1621  * @flags: transfer ack flags
1622  *
1623  * Return: Async transaction descriptor on success and NULL on failure
1624  */
1625 static struct dma_async_tx_descriptor *
1626 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1627 				 struct dma_interleaved_template *xt,
1628 				 unsigned long flags)
1629 {
1630 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1631 	struct xilinx_dma_tx_descriptor *desc;
1632 	struct xilinx_vdma_tx_segment *segment;
1633 	struct xilinx_vdma_desc_hw *hw;
1634 
1635 	if (!is_slave_direction(xt->dir))
1636 		return NULL;
1637 
1638 	if (!xt->numf || !xt->sgl[0].size)
1639 		return NULL;
1640 
1641 	if (xt->frame_size != 1)
1642 		return NULL;
1643 
1644 	/* Allocate a transaction descriptor. */
1645 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1646 	if (!desc)
1647 		return NULL;
1648 
1649 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1650 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1651 	async_tx_ack(&desc->async_tx);
1652 
1653 	/* Allocate the link descriptor from DMA pool */
1654 	segment = xilinx_vdma_alloc_tx_segment(chan);
1655 	if (!segment)
1656 		goto error;
1657 
1658 	/* Fill in the hardware descriptor */
1659 	hw = &segment->hw;
1660 	hw->vsize = xt->numf;
1661 	hw->hsize = xt->sgl[0].size;
1662 	hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1663 			XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1664 	hw->stride |= chan->config.frm_dly <<
1665 			XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1666 
1667 	if (xt->dir != DMA_MEM_TO_DEV) {
1668 		if (chan->ext_addr) {
1669 			hw->buf_addr = lower_32_bits(xt->dst_start);
1670 			hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1671 		} else {
1672 			hw->buf_addr = xt->dst_start;
1673 		}
1674 	} else {
1675 		if (chan->ext_addr) {
1676 			hw->buf_addr = lower_32_bits(xt->src_start);
1677 			hw->buf_addr_msb = upper_32_bits(xt->src_start);
1678 		} else {
1679 			hw->buf_addr = xt->src_start;
1680 		}
1681 	}
1682 
1683 	/* Insert the segment into the descriptor segments list. */
1684 	list_add_tail(&segment->node, &desc->segments);
1685 
1686 	/* Link the last hardware descriptor with the first. */
1687 	segment = list_first_entry(&desc->segments,
1688 				   struct xilinx_vdma_tx_segment, node);
1689 	desc->async_tx.phys = segment->phys;
1690 
1691 	return &desc->async_tx;
1692 
1693 error:
1694 	xilinx_dma_free_tx_descriptor(chan, desc);
1695 	return NULL;
1696 }
1697 
1698 /**
1699  * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1700  * @dchan: DMA channel
1701  * @dma_dst: destination address
1702  * @dma_src: source address
1703  * @len: transfer length
1704  * @flags: transfer ack flags
1705  *
1706  * Return: Async transaction descriptor on success and NULL on failure
1707  */
1708 static struct dma_async_tx_descriptor *
1709 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1710 			dma_addr_t dma_src, size_t len, unsigned long flags)
1711 {
1712 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1713 	struct xilinx_dma_tx_descriptor *desc;
1714 	struct xilinx_cdma_tx_segment *segment;
1715 	struct xilinx_cdma_desc_hw *hw;
1716 
1717 	if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1718 		return NULL;
1719 
1720 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1721 	if (!desc)
1722 		return NULL;
1723 
1724 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1725 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1726 
1727 	/* Allocate the link descriptor from DMA pool */
1728 	segment = xilinx_cdma_alloc_tx_segment(chan);
1729 	if (!segment)
1730 		goto error;
1731 
1732 	hw = &segment->hw;
1733 	hw->control = len;
1734 	hw->src_addr = dma_src;
1735 	hw->dest_addr = dma_dst;
1736 	if (chan->ext_addr) {
1737 		hw->src_addr_msb = upper_32_bits(dma_src);
1738 		hw->dest_addr_msb = upper_32_bits(dma_dst);
1739 	}
1740 
1741 	/* Insert the segment into the descriptor segments list. */
1742 	list_add_tail(&segment->node, &desc->segments);
1743 
1744 	desc->async_tx.phys = segment->phys;
1745 	hw->next_desc = segment->phys;
1746 
1747 	return &desc->async_tx;
1748 
1749 error:
1750 	xilinx_dma_free_tx_descriptor(chan, desc);
1751 	return NULL;
1752 }
1753 
1754 /**
1755  * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1756  * @dchan: DMA channel
1757  * @sgl: scatterlist to transfer to/from
1758  * @sg_len: number of entries in @scatterlist
1759  * @direction: DMA direction
1760  * @flags: transfer ack flags
1761  * @context: APP words of the descriptor
1762  *
1763  * Return: Async transaction descriptor on success and NULL on failure
1764  */
1765 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1766 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1767 	enum dma_transfer_direction direction, unsigned long flags,
1768 	void *context)
1769 {
1770 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1771 	struct xilinx_dma_tx_descriptor *desc;
1772 	struct xilinx_axidma_tx_segment *segment = NULL;
1773 	u32 *app_w = (u32 *)context;
1774 	struct scatterlist *sg;
1775 	size_t copy;
1776 	size_t sg_used;
1777 	unsigned int i;
1778 
1779 	if (!is_slave_direction(direction))
1780 		return NULL;
1781 
1782 	/* Allocate a transaction descriptor. */
1783 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1784 	if (!desc)
1785 		return NULL;
1786 
1787 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1788 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1789 
1790 	/* Build transactions using information in the scatter gather list */
1791 	for_each_sg(sgl, sg, sg_len, i) {
1792 		sg_used = 0;
1793 
1794 		/* Loop until the entire scatterlist entry is used */
1795 		while (sg_used < sg_dma_len(sg)) {
1796 			struct xilinx_axidma_desc_hw *hw;
1797 
1798 			/* Get a free segment */
1799 			segment = xilinx_axidma_alloc_tx_segment(chan);
1800 			if (!segment)
1801 				goto error;
1802 
1803 			/*
1804 			 * Calculate the maximum number of bytes to transfer,
1805 			 * making sure it is less than the hw limit
1806 			 */
1807 			copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1808 				     XILINX_DMA_MAX_TRANS_LEN);
1809 			hw = &segment->hw;
1810 
1811 			/* Fill in the descriptor */
1812 			xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1813 					  sg_used, 0);
1814 
1815 			hw->control = copy;
1816 
1817 			if (chan->direction == DMA_MEM_TO_DEV) {
1818 				if (app_w)
1819 					memcpy(hw->app, app_w, sizeof(u32) *
1820 					       XILINX_DMA_NUM_APP_WORDS);
1821 			}
1822 
1823 			sg_used += copy;
1824 
1825 			/*
1826 			 * Insert the segment into the descriptor segments
1827 			 * list.
1828 			 */
1829 			list_add_tail(&segment->node, &desc->segments);
1830 		}
1831 	}
1832 
1833 	segment = list_first_entry(&desc->segments,
1834 				   struct xilinx_axidma_tx_segment, node);
1835 	desc->async_tx.phys = segment->phys;
1836 
1837 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
1838 	if (chan->direction == DMA_MEM_TO_DEV) {
1839 		segment->hw.control |= XILINX_DMA_BD_SOP;
1840 		segment = list_last_entry(&desc->segments,
1841 					  struct xilinx_axidma_tx_segment,
1842 					  node);
1843 		segment->hw.control |= XILINX_DMA_BD_EOP;
1844 	}
1845 
1846 	return &desc->async_tx;
1847 
1848 error:
1849 	xilinx_dma_free_tx_descriptor(chan, desc);
1850 	return NULL;
1851 }
1852 
1853 /**
1854  * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1855  * @dchan: DMA channel
1856  * @buf_addr: Physical address of the buffer
1857  * @buf_len: Total length of the cyclic buffers
1858  * @period_len: length of individual cyclic buffer
1859  * @direction: DMA direction
1860  * @flags: transfer ack flags
1861  *
1862  * Return: Async transaction descriptor on success and NULL on failure
1863  */
1864 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1865 	struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1866 	size_t period_len, enum dma_transfer_direction direction,
1867 	unsigned long flags)
1868 {
1869 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1870 	struct xilinx_dma_tx_descriptor *desc;
1871 	struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1872 	size_t copy, sg_used;
1873 	unsigned int num_periods;
1874 	int i;
1875 	u32 reg;
1876 
1877 	if (!period_len)
1878 		return NULL;
1879 
1880 	num_periods = buf_len / period_len;
1881 
1882 	if (!num_periods)
1883 		return NULL;
1884 
1885 	if (!is_slave_direction(direction))
1886 		return NULL;
1887 
1888 	/* Allocate a transaction descriptor. */
1889 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1890 	if (!desc)
1891 		return NULL;
1892 
1893 	chan->direction = direction;
1894 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1895 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1896 
1897 	for (i = 0; i < num_periods; ++i) {
1898 		sg_used = 0;
1899 
1900 		while (sg_used < period_len) {
1901 			struct xilinx_axidma_desc_hw *hw;
1902 
1903 			/* Get a free segment */
1904 			segment = xilinx_axidma_alloc_tx_segment(chan);
1905 			if (!segment)
1906 				goto error;
1907 
1908 			/*
1909 			 * Calculate the maximum number of bytes to transfer,
1910 			 * making sure it is less than the hw limit
1911 			 */
1912 			copy = min_t(size_t, period_len - sg_used,
1913 				     XILINX_DMA_MAX_TRANS_LEN);
1914 			hw = &segment->hw;
1915 			xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1916 					  period_len * i);
1917 			hw->control = copy;
1918 
1919 			if (prev)
1920 				prev->hw.next_desc = segment->phys;
1921 
1922 			prev = segment;
1923 			sg_used += copy;
1924 
1925 			/*
1926 			 * Insert the segment into the descriptor segments
1927 			 * list.
1928 			 */
1929 			list_add_tail(&segment->node, &desc->segments);
1930 		}
1931 	}
1932 
1933 	head_segment = list_first_entry(&desc->segments,
1934 				   struct xilinx_axidma_tx_segment, node);
1935 	desc->async_tx.phys = head_segment->phys;
1936 
1937 	desc->cyclic = true;
1938 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1939 	reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1940 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1941 
1942 	segment = list_last_entry(&desc->segments,
1943 				  struct xilinx_axidma_tx_segment,
1944 				  node);
1945 	segment->hw.next_desc = (u32) head_segment->phys;
1946 
1947 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
1948 	if (direction == DMA_MEM_TO_DEV) {
1949 		head_segment->hw.control |= XILINX_DMA_BD_SOP;
1950 		segment->hw.control |= XILINX_DMA_BD_EOP;
1951 	}
1952 
1953 	return &desc->async_tx;
1954 
1955 error:
1956 	xilinx_dma_free_tx_descriptor(chan, desc);
1957 	return NULL;
1958 }
1959 
1960 /**
1961  * xilinx_dma_prep_interleaved - prepare a descriptor for a
1962  *	DMA_SLAVE transaction
1963  * @dchan: DMA channel
1964  * @xt: Interleaved template pointer
1965  * @flags: transfer ack flags
1966  *
1967  * Return: Async transaction descriptor on success and NULL on failure
1968  */
1969 static struct dma_async_tx_descriptor *
1970 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1971 				 struct dma_interleaved_template *xt,
1972 				 unsigned long flags)
1973 {
1974 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1975 	struct xilinx_dma_tx_descriptor *desc;
1976 	struct xilinx_axidma_tx_segment *segment;
1977 	struct xilinx_axidma_desc_hw *hw;
1978 
1979 	if (!is_slave_direction(xt->dir))
1980 		return NULL;
1981 
1982 	if (!xt->numf || !xt->sgl[0].size)
1983 		return NULL;
1984 
1985 	if (xt->frame_size != 1)
1986 		return NULL;
1987 
1988 	/* Allocate a transaction descriptor. */
1989 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1990 	if (!desc)
1991 		return NULL;
1992 
1993 	chan->direction = xt->dir;
1994 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1995 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1996 
1997 	/* Get a free segment */
1998 	segment = xilinx_axidma_alloc_tx_segment(chan);
1999 	if (!segment)
2000 		goto error;
2001 
2002 	hw = &segment->hw;
2003 
2004 	/* Fill in the descriptor */
2005 	if (xt->dir != DMA_MEM_TO_DEV)
2006 		hw->buf_addr = xt->dst_start;
2007 	else
2008 		hw->buf_addr = xt->src_start;
2009 
2010 	hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2011 	hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2012 			    XILINX_DMA_BD_VSIZE_MASK;
2013 	hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2014 			    XILINX_DMA_BD_STRIDE_MASK;
2015 	hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2016 
2017 	/*
2018 	 * Insert the segment into the descriptor segments
2019 	 * list.
2020 	 */
2021 	list_add_tail(&segment->node, &desc->segments);
2022 
2023 
2024 	segment = list_first_entry(&desc->segments,
2025 				   struct xilinx_axidma_tx_segment, node);
2026 	desc->async_tx.phys = segment->phys;
2027 
2028 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
2029 	if (xt->dir == DMA_MEM_TO_DEV) {
2030 		segment->hw.control |= XILINX_DMA_BD_SOP;
2031 		segment = list_last_entry(&desc->segments,
2032 					  struct xilinx_axidma_tx_segment,
2033 					  node);
2034 		segment->hw.control |= XILINX_DMA_BD_EOP;
2035 	}
2036 
2037 	return &desc->async_tx;
2038 
2039 error:
2040 	xilinx_dma_free_tx_descriptor(chan, desc);
2041 	return NULL;
2042 }
2043 
2044 /**
2045  * xilinx_dma_terminate_all - Halt the channel and free descriptors
2046  * @dchan: Driver specific DMA Channel pointer
2047  *
2048  * Return: '0' always.
2049  */
2050 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2051 {
2052 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2053 	u32 reg;
2054 	int err;
2055 
2056 	if (chan->cyclic)
2057 		xilinx_dma_chan_reset(chan);
2058 
2059 	err = chan->stop_transfer(chan);
2060 	if (err) {
2061 		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2062 			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2063 		chan->err = true;
2064 	}
2065 
2066 	/* Remove and free all of the descriptors in the lists */
2067 	xilinx_dma_free_descriptors(chan);
2068 	chan->idle = true;
2069 
2070 	if (chan->cyclic) {
2071 		reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2072 		reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2073 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2074 		chan->cyclic = false;
2075 	}
2076 
2077 	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2078 		dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2079 			     XILINX_CDMA_CR_SGMODE);
2080 
2081 	return 0;
2082 }
2083 
2084 /**
2085  * xilinx_dma_channel_set_config - Configure VDMA channel
2086  * Run-time configuration for Axi VDMA, supports:
2087  * . halt the channel
2088  * . configure interrupt coalescing and inter-packet delay threshold
2089  * . start/stop parking
2090  * . enable genlock
2091  *
2092  * @dchan: DMA channel
2093  * @cfg: VDMA device configuration pointer
2094  *
2095  * Return: '0' on success and failure value on error
2096  */
2097 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2098 					struct xilinx_vdma_config *cfg)
2099 {
2100 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2101 	u32 dmacr;
2102 
2103 	if (cfg->reset)
2104 		return xilinx_dma_chan_reset(chan);
2105 
2106 	dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2107 
2108 	chan->config.frm_dly = cfg->frm_dly;
2109 	chan->config.park = cfg->park;
2110 
2111 	/* genlock settings */
2112 	chan->config.gen_lock = cfg->gen_lock;
2113 	chan->config.master = cfg->master;
2114 
2115 	if (cfg->gen_lock && chan->genlock) {
2116 		dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2117 		dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2118 	}
2119 
2120 	chan->config.frm_cnt_en = cfg->frm_cnt_en;
2121 	chan->config.vflip_en = cfg->vflip_en;
2122 
2123 	if (cfg->park)
2124 		chan->config.park_frm = cfg->park_frm;
2125 	else
2126 		chan->config.park_frm = -1;
2127 
2128 	chan->config.coalesc = cfg->coalesc;
2129 	chan->config.delay = cfg->delay;
2130 
2131 	if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2132 		dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2133 		chan->config.coalesc = cfg->coalesc;
2134 	}
2135 
2136 	if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2137 		dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2138 		chan->config.delay = cfg->delay;
2139 	}
2140 
2141 	/* FSync Source selection */
2142 	dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2143 	dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2144 
2145 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2146 
2147 	return 0;
2148 }
2149 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2150 
2151 /* -----------------------------------------------------------------------------
2152  * Probe and remove
2153  */
2154 
2155 /**
2156  * xilinx_dma_chan_remove - Per Channel remove function
2157  * @chan: Driver specific DMA channel
2158  */
2159 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2160 {
2161 	/* Disable all interrupts */
2162 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2163 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2164 
2165 	if (chan->irq > 0)
2166 		free_irq(chan->irq, chan);
2167 
2168 	tasklet_kill(&chan->tasklet);
2169 
2170 	list_del(&chan->common.device_node);
2171 }
2172 
2173 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2174 			    struct clk **tx_clk, struct clk **rx_clk,
2175 			    struct clk **sg_clk, struct clk **tmp_clk)
2176 {
2177 	int err;
2178 
2179 	*tmp_clk = NULL;
2180 
2181 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2182 	if (IS_ERR(*axi_clk)) {
2183 		err = PTR_ERR(*axi_clk);
2184 		dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2185 		return err;
2186 	}
2187 
2188 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2189 	if (IS_ERR(*tx_clk))
2190 		*tx_clk = NULL;
2191 
2192 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2193 	if (IS_ERR(*rx_clk))
2194 		*rx_clk = NULL;
2195 
2196 	*sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2197 	if (IS_ERR(*sg_clk))
2198 		*sg_clk = NULL;
2199 
2200 	err = clk_prepare_enable(*axi_clk);
2201 	if (err) {
2202 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2203 		return err;
2204 	}
2205 
2206 	err = clk_prepare_enable(*tx_clk);
2207 	if (err) {
2208 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2209 		goto err_disable_axiclk;
2210 	}
2211 
2212 	err = clk_prepare_enable(*rx_clk);
2213 	if (err) {
2214 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2215 		goto err_disable_txclk;
2216 	}
2217 
2218 	err = clk_prepare_enable(*sg_clk);
2219 	if (err) {
2220 		dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2221 		goto err_disable_rxclk;
2222 	}
2223 
2224 	return 0;
2225 
2226 err_disable_rxclk:
2227 	clk_disable_unprepare(*rx_clk);
2228 err_disable_txclk:
2229 	clk_disable_unprepare(*tx_clk);
2230 err_disable_axiclk:
2231 	clk_disable_unprepare(*axi_clk);
2232 
2233 	return err;
2234 }
2235 
2236 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2237 			    struct clk **dev_clk, struct clk **tmp_clk,
2238 			    struct clk **tmp1_clk, struct clk **tmp2_clk)
2239 {
2240 	int err;
2241 
2242 	*tmp_clk = NULL;
2243 	*tmp1_clk = NULL;
2244 	*tmp2_clk = NULL;
2245 
2246 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2247 	if (IS_ERR(*axi_clk)) {
2248 		err = PTR_ERR(*axi_clk);
2249 		dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2250 		return err;
2251 	}
2252 
2253 	*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2254 	if (IS_ERR(*dev_clk)) {
2255 		err = PTR_ERR(*dev_clk);
2256 		dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2257 		return err;
2258 	}
2259 
2260 	err = clk_prepare_enable(*axi_clk);
2261 	if (err) {
2262 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2263 		return err;
2264 	}
2265 
2266 	err = clk_prepare_enable(*dev_clk);
2267 	if (err) {
2268 		dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2269 		goto err_disable_axiclk;
2270 	}
2271 
2272 	return 0;
2273 
2274 err_disable_axiclk:
2275 	clk_disable_unprepare(*axi_clk);
2276 
2277 	return err;
2278 }
2279 
2280 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2281 			    struct clk **tx_clk, struct clk **txs_clk,
2282 			    struct clk **rx_clk, struct clk **rxs_clk)
2283 {
2284 	int err;
2285 
2286 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2287 	if (IS_ERR(*axi_clk)) {
2288 		err = PTR_ERR(*axi_clk);
2289 		dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2290 		return err;
2291 	}
2292 
2293 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2294 	if (IS_ERR(*tx_clk))
2295 		*tx_clk = NULL;
2296 
2297 	*txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2298 	if (IS_ERR(*txs_clk))
2299 		*txs_clk = NULL;
2300 
2301 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2302 	if (IS_ERR(*rx_clk))
2303 		*rx_clk = NULL;
2304 
2305 	*rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2306 	if (IS_ERR(*rxs_clk))
2307 		*rxs_clk = NULL;
2308 
2309 	err = clk_prepare_enable(*axi_clk);
2310 	if (err) {
2311 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2312 		return err;
2313 	}
2314 
2315 	err = clk_prepare_enable(*tx_clk);
2316 	if (err) {
2317 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2318 		goto err_disable_axiclk;
2319 	}
2320 
2321 	err = clk_prepare_enable(*txs_clk);
2322 	if (err) {
2323 		dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2324 		goto err_disable_txclk;
2325 	}
2326 
2327 	err = clk_prepare_enable(*rx_clk);
2328 	if (err) {
2329 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2330 		goto err_disable_txsclk;
2331 	}
2332 
2333 	err = clk_prepare_enable(*rxs_clk);
2334 	if (err) {
2335 		dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2336 		goto err_disable_rxclk;
2337 	}
2338 
2339 	return 0;
2340 
2341 err_disable_rxclk:
2342 	clk_disable_unprepare(*rx_clk);
2343 err_disable_txsclk:
2344 	clk_disable_unprepare(*txs_clk);
2345 err_disable_txclk:
2346 	clk_disable_unprepare(*tx_clk);
2347 err_disable_axiclk:
2348 	clk_disable_unprepare(*axi_clk);
2349 
2350 	return err;
2351 }
2352 
2353 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2354 {
2355 	clk_disable_unprepare(xdev->rxs_clk);
2356 	clk_disable_unprepare(xdev->rx_clk);
2357 	clk_disable_unprepare(xdev->txs_clk);
2358 	clk_disable_unprepare(xdev->tx_clk);
2359 	clk_disable_unprepare(xdev->axi_clk);
2360 }
2361 
2362 /**
2363  * xilinx_dma_chan_probe - Per Channel Probing
2364  * It get channel features from the device tree entry and
2365  * initialize special channel handling routines
2366  *
2367  * @xdev: Driver specific device structure
2368  * @node: Device node
2369  * @chan_id: DMA Channel id
2370  *
2371  * Return: '0' on success and failure value on error
2372  */
2373 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2374 				  struct device_node *node, int chan_id)
2375 {
2376 	struct xilinx_dma_chan *chan;
2377 	bool has_dre = false;
2378 	u32 value, width;
2379 	int err;
2380 
2381 	/* Allocate and initialize the channel structure */
2382 	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2383 	if (!chan)
2384 		return -ENOMEM;
2385 
2386 	chan->dev = xdev->dev;
2387 	chan->xdev = xdev;
2388 	chan->has_sg = xdev->has_sg;
2389 	chan->desc_pendingcount = 0x0;
2390 	chan->ext_addr = xdev->ext_addr;
2391 	/* This variable ensures that descriptors are not
2392 	 * Submitted when dma engine is in progress. This variable is
2393 	 * Added to avoid polling for a bit in the status register to
2394 	 * Know dma state in the driver hot path.
2395 	 */
2396 	chan->idle = true;
2397 
2398 	spin_lock_init(&chan->lock);
2399 	INIT_LIST_HEAD(&chan->pending_list);
2400 	INIT_LIST_HEAD(&chan->done_list);
2401 	INIT_LIST_HEAD(&chan->active_list);
2402 	INIT_LIST_HEAD(&chan->free_seg_list);
2403 
2404 	/* Retrieve the channel properties from the device tree */
2405 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
2406 
2407 	chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2408 
2409 	err = of_property_read_u32(node, "xlnx,datawidth", &value);
2410 	if (err) {
2411 		dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2412 		return err;
2413 	}
2414 	width = value >> 3; /* Convert bits to bytes */
2415 
2416 	/* If data width is greater than 8 bytes, DRE is not in hw */
2417 	if (width > 8)
2418 		has_dre = false;
2419 
2420 	if (!has_dre)
2421 		xdev->common.copy_align = fls(width - 1);
2422 
2423 	if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2424 	    of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2425 	    of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2426 		chan->direction = DMA_MEM_TO_DEV;
2427 		chan->id = chan_id;
2428 		chan->tdest = chan_id;
2429 
2430 		chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2431 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2432 			chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2433 			chan->config.park = 1;
2434 
2435 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2436 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2437 				chan->flush_on_fsync = true;
2438 		}
2439 	} else if (of_device_is_compatible(node,
2440 					   "xlnx,axi-vdma-s2mm-channel") ||
2441 		   of_device_is_compatible(node,
2442 					   "xlnx,axi-dma-s2mm-channel")) {
2443 		chan->direction = DMA_DEV_TO_MEM;
2444 		chan->id = chan_id;
2445 		chan->tdest = chan_id - xdev->nr_channels;
2446 		chan->has_vflip = of_property_read_bool(node,
2447 					"xlnx,enable-vert-flip");
2448 		if (chan->has_vflip) {
2449 			chan->config.vflip_en = dma_read(chan,
2450 				XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2451 				XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2452 		}
2453 
2454 		chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2455 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2456 			chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2457 			chan->config.park = 1;
2458 
2459 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2460 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2461 				chan->flush_on_fsync = true;
2462 		}
2463 	} else {
2464 		dev_err(xdev->dev, "Invalid channel compatible node\n");
2465 		return -EINVAL;
2466 	}
2467 
2468 	/* Request the interrupt */
2469 	chan->irq = irq_of_parse_and_map(node, 0);
2470 	err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2471 			  "xilinx-dma-controller", chan);
2472 	if (err) {
2473 		dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2474 		return err;
2475 	}
2476 
2477 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2478 		chan->start_transfer = xilinx_dma_start_transfer;
2479 		chan->stop_transfer = xilinx_dma_stop_transfer;
2480 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2481 		chan->start_transfer = xilinx_cdma_start_transfer;
2482 		chan->stop_transfer = xilinx_cdma_stop_transfer;
2483 	} else {
2484 		chan->start_transfer = xilinx_vdma_start_transfer;
2485 		chan->stop_transfer = xilinx_dma_stop_transfer;
2486 	}
2487 
2488 	/* Initialize the tasklet */
2489 	tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2490 			(unsigned long)chan);
2491 
2492 	/*
2493 	 * Initialize the DMA channel and add it to the DMA engine channels
2494 	 * list.
2495 	 */
2496 	chan->common.device = &xdev->common;
2497 
2498 	list_add_tail(&chan->common.device_node, &xdev->common.channels);
2499 	xdev->chan[chan->id] = chan;
2500 
2501 	/* Reset the channel */
2502 	err = xilinx_dma_chan_reset(chan);
2503 	if (err < 0) {
2504 		dev_err(xdev->dev, "Reset channel failed\n");
2505 		return err;
2506 	}
2507 
2508 	return 0;
2509 }
2510 
2511 /**
2512  * xilinx_dma_child_probe - Per child node probe
2513  * It get number of dma-channels per child node from
2514  * device-tree and initializes all the channels.
2515  *
2516  * @xdev: Driver specific device structure
2517  * @node: Device node
2518  *
2519  * Return: 0 always.
2520  */
2521 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2522 				    struct device_node *node)
2523 {
2524 	int ret, i, nr_channels = 1;
2525 
2526 	ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2527 	if ((ret < 0) && xdev->mcdma)
2528 		dev_warn(xdev->dev, "missing dma-channels property\n");
2529 
2530 	for (i = 0; i < nr_channels; i++)
2531 		xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2532 
2533 	xdev->nr_channels += nr_channels;
2534 
2535 	return 0;
2536 }
2537 
2538 /**
2539  * of_dma_xilinx_xlate - Translation function
2540  * @dma_spec: Pointer to DMA specifier as found in the device tree
2541  * @ofdma: Pointer to DMA controller data
2542  *
2543  * Return: DMA channel pointer on success and NULL on error
2544  */
2545 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2546 						struct of_dma *ofdma)
2547 {
2548 	struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2549 	int chan_id = dma_spec->args[0];
2550 
2551 	if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2552 		return NULL;
2553 
2554 	return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2555 }
2556 
2557 static const struct xilinx_dma_config axidma_config = {
2558 	.dmatype = XDMA_TYPE_AXIDMA,
2559 	.clk_init = axidma_clk_init,
2560 };
2561 
2562 static const struct xilinx_dma_config axicdma_config = {
2563 	.dmatype = XDMA_TYPE_CDMA,
2564 	.clk_init = axicdma_clk_init,
2565 };
2566 
2567 static const struct xilinx_dma_config axivdma_config = {
2568 	.dmatype = XDMA_TYPE_VDMA,
2569 	.clk_init = axivdma_clk_init,
2570 };
2571 
2572 static const struct of_device_id xilinx_dma_of_ids[] = {
2573 	{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2574 	{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2575 	{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2576 	{}
2577 };
2578 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2579 
2580 /**
2581  * xilinx_dma_probe - Driver probe function
2582  * @pdev: Pointer to the platform_device structure
2583  *
2584  * Return: '0' on success and failure value on error
2585  */
2586 static int xilinx_dma_probe(struct platform_device *pdev)
2587 {
2588 	int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2589 			struct clk **, struct clk **, struct clk **)
2590 					= axivdma_clk_init;
2591 	struct device_node *node = pdev->dev.of_node;
2592 	struct xilinx_dma_device *xdev;
2593 	struct device_node *child, *np = pdev->dev.of_node;
2594 	struct resource *io;
2595 	u32 num_frames, addr_width;
2596 	int i, err;
2597 
2598 	/* Allocate and initialize the DMA engine structure */
2599 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2600 	if (!xdev)
2601 		return -ENOMEM;
2602 
2603 	xdev->dev = &pdev->dev;
2604 	if (np) {
2605 		const struct of_device_id *match;
2606 
2607 		match = of_match_node(xilinx_dma_of_ids, np);
2608 		if (match && match->data) {
2609 			xdev->dma_config = match->data;
2610 			clk_init = xdev->dma_config->clk_init;
2611 		}
2612 	}
2613 
2614 	err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2615 		       &xdev->rx_clk, &xdev->rxs_clk);
2616 	if (err)
2617 		return err;
2618 
2619 	/* Request and map I/O memory */
2620 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2621 	xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2622 	if (IS_ERR(xdev->regs))
2623 		return PTR_ERR(xdev->regs);
2624 
2625 	/* Retrieve the DMA engine properties from the device tree */
2626 	xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2627 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2628 		xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2629 
2630 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2631 		err = of_property_read_u32(node, "xlnx,num-fstores",
2632 					   &num_frames);
2633 		if (err < 0) {
2634 			dev_err(xdev->dev,
2635 				"missing xlnx,num-fstores property\n");
2636 			return err;
2637 		}
2638 
2639 		err = of_property_read_u32(node, "xlnx,flush-fsync",
2640 					   &xdev->flush_on_fsync);
2641 		if (err < 0)
2642 			dev_warn(xdev->dev,
2643 				 "missing xlnx,flush-fsync property\n");
2644 	}
2645 
2646 	err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2647 	if (err < 0)
2648 		dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2649 
2650 	if (addr_width > 32)
2651 		xdev->ext_addr = true;
2652 	else
2653 		xdev->ext_addr = false;
2654 
2655 	/* Set the dma mask bits */
2656 	dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2657 
2658 	/* Initialize the DMA engine */
2659 	xdev->common.dev = &pdev->dev;
2660 
2661 	INIT_LIST_HEAD(&xdev->common.channels);
2662 	if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2663 		dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2664 		dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2665 	}
2666 
2667 	xdev->common.device_alloc_chan_resources =
2668 				xilinx_dma_alloc_chan_resources;
2669 	xdev->common.device_free_chan_resources =
2670 				xilinx_dma_free_chan_resources;
2671 	xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2672 	xdev->common.device_tx_status = xilinx_dma_tx_status;
2673 	xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2674 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2675 		dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2676 		xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2677 		xdev->common.device_prep_dma_cyclic =
2678 					  xilinx_dma_prep_dma_cyclic;
2679 		xdev->common.device_prep_interleaved_dma =
2680 					xilinx_dma_prep_interleaved;
2681 		/* Residue calculation is supported by only AXI DMA */
2682 		xdev->common.residue_granularity =
2683 					  DMA_RESIDUE_GRANULARITY_SEGMENT;
2684 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2685 		dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2686 		xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2687 	} else {
2688 		xdev->common.device_prep_interleaved_dma =
2689 				xilinx_vdma_dma_prep_interleaved;
2690 	}
2691 
2692 	platform_set_drvdata(pdev, xdev);
2693 
2694 	/* Initialize the channels */
2695 	for_each_child_of_node(node, child) {
2696 		err = xilinx_dma_child_probe(xdev, child);
2697 		if (err < 0)
2698 			goto disable_clks;
2699 	}
2700 
2701 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2702 		for (i = 0; i < xdev->nr_channels; i++)
2703 			if (xdev->chan[i])
2704 				xdev->chan[i]->num_frms = num_frames;
2705 	}
2706 
2707 	/* Register the DMA engine with the core */
2708 	dma_async_device_register(&xdev->common);
2709 
2710 	err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2711 					 xdev);
2712 	if (err < 0) {
2713 		dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2714 		dma_async_device_unregister(&xdev->common);
2715 		goto error;
2716 	}
2717 
2718 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2719 		dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2720 	else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2721 		dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2722 	else
2723 		dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2724 
2725 	return 0;
2726 
2727 disable_clks:
2728 	xdma_disable_allclks(xdev);
2729 error:
2730 	for (i = 0; i < xdev->nr_channels; i++)
2731 		if (xdev->chan[i])
2732 			xilinx_dma_chan_remove(xdev->chan[i]);
2733 
2734 	return err;
2735 }
2736 
2737 /**
2738  * xilinx_dma_remove - Driver remove function
2739  * @pdev: Pointer to the platform_device structure
2740  *
2741  * Return: Always '0'
2742  */
2743 static int xilinx_dma_remove(struct platform_device *pdev)
2744 {
2745 	struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2746 	int i;
2747 
2748 	of_dma_controller_free(pdev->dev.of_node);
2749 
2750 	dma_async_device_unregister(&xdev->common);
2751 
2752 	for (i = 0; i < xdev->nr_channels; i++)
2753 		if (xdev->chan[i])
2754 			xilinx_dma_chan_remove(xdev->chan[i]);
2755 
2756 	xdma_disable_allclks(xdev);
2757 
2758 	return 0;
2759 }
2760 
2761 static struct platform_driver xilinx_vdma_driver = {
2762 	.driver = {
2763 		.name = "xilinx-vdma",
2764 		.of_match_table = xilinx_dma_of_ids,
2765 	},
2766 	.probe = xilinx_dma_probe,
2767 	.remove = xilinx_dma_remove,
2768 };
2769 
2770 module_platform_driver(xilinx_vdma_driver);
2771 
2772 MODULE_AUTHOR("Xilinx, Inc.");
2773 MODULE_DESCRIPTION("Xilinx VDMA driver");
2774 MODULE_LICENSE("GPL v2");
2775