1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx ZynqMP DPDMA Engine driver
4  *
5  * Copyright (C) 2015 - 2020 Xilinx, Inc.
6  *
7  * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dmapool.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/wait.h>
26 
27 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
28 
29 #include "../dmaengine.h"
30 #include "../virt-dma.h"
31 
32 /* DPDMA registers */
33 #define XILINX_DPDMA_ERR_CTRL				0x000
34 #define XILINX_DPDMA_ISR				0x004
35 #define XILINX_DPDMA_IMR				0x008
36 #define XILINX_DPDMA_IEN				0x00c
37 #define XILINX_DPDMA_IDS				0x010
38 #define XILINX_DPDMA_INTR_DESC_DONE(n)			BIT((n) + 0)
39 #define XILINX_DPDMA_INTR_DESC_DONE_MASK		GENMASK(5, 0)
40 #define XILINX_DPDMA_INTR_NO_OSTAND(n)			BIT((n) + 6)
41 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK		GENMASK(11, 6)
42 #define XILINX_DPDMA_INTR_AXI_ERR(n)			BIT((n) + 12)
43 #define XILINX_DPDMA_INTR_AXI_ERR_MASK			GENMASK(17, 12)
44 #define XILINX_DPDMA_INTR_DESC_ERR(n)			BIT((n) + 16)
45 #define XILINX_DPDMA_INTR_DESC_ERR_MASK			GENMASK(23, 18)
46 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL		BIT(24)
47 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL		BIT(25)
48 #define XILINX_DPDMA_INTR_AXI_4K_CROSS			BIT(26)
49 #define XILINX_DPDMA_INTR_VSYNC				BIT(27)
50 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK			0x00041000
51 #define XILINX_DPDMA_INTR_CHAN_ERR			0x00fff000
52 #define XILINX_DPDMA_INTR_GLOBAL_ERR			0x07000000
53 #define XILINX_DPDMA_INTR_ERR_ALL			0x07fff000
54 #define XILINX_DPDMA_INTR_CHAN_MASK			0x00041041
55 #define XILINX_DPDMA_INTR_GLOBAL_MASK			0x0f000000
56 #define XILINX_DPDMA_INTR_ALL				0x0fffffff
57 #define XILINX_DPDMA_EISR				0x014
58 #define XILINX_DPDMA_EIMR				0x018
59 #define XILINX_DPDMA_EIEN				0x01c
60 #define XILINX_DPDMA_EIDS				0x020
61 #define XILINX_DPDMA_EINTR_INV_APB			BIT(0)
62 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n)		BIT((n) + 1)
63 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK		GENMASK(6, 1)
64 #define XILINX_DPDMA_EINTR_PRE_ERR(n)			BIT((n) + 7)
65 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK			GENMASK(12, 7)
66 #define XILINX_DPDMA_EINTR_CRC_ERR(n)			BIT((n) + 13)
67 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK			GENMASK(18, 13)
68 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n)		BIT((n) + 19)
69 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK		GENMASK(24, 19)
70 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n)		BIT((n) + 25)
71 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK		GENMASK(30, 25)
72 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL		BIT(32)
73 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK		0x02082082
74 #define XILINX_DPDMA_EINTR_CHAN_ERR			0x7ffffffe
75 #define XILINX_DPDMA_EINTR_GLOBAL_ERR			0x80000001
76 #define XILINX_DPDMA_EINTR_ALL				0xffffffff
77 #define XILINX_DPDMA_CNTL				0x100
78 #define XILINX_DPDMA_GBL				0x104
79 #define XILINX_DPDMA_GBL_TRIG_MASK(n)			((n) << 0)
80 #define XILINX_DPDMA_GBL_RETRIG_MASK(n)			((n) << 6)
81 #define XILINX_DPDMA_ALC0_CNTL				0x108
82 #define XILINX_DPDMA_ALC0_STATUS			0x10c
83 #define XILINX_DPDMA_ALC0_MAX				0x110
84 #define XILINX_DPDMA_ALC0_MIN				0x114
85 #define XILINX_DPDMA_ALC0_ACC				0x118
86 #define XILINX_DPDMA_ALC0_ACC_TRAN			0x11c
87 #define XILINX_DPDMA_ALC1_CNTL				0x120
88 #define XILINX_DPDMA_ALC1_STATUS			0x124
89 #define XILINX_DPDMA_ALC1_MAX				0x128
90 #define XILINX_DPDMA_ALC1_MIN				0x12c
91 #define XILINX_DPDMA_ALC1_ACC				0x130
92 #define XILINX_DPDMA_ALC1_ACC_TRAN			0x134
93 
94 /* Channel register */
95 #define XILINX_DPDMA_CH_BASE				0x200
96 #define XILINX_DPDMA_CH_OFFSET				0x100
97 #define XILINX_DPDMA_CH_DESC_START_ADDRE		0x000
98 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK		GENMASK(15, 0)
99 #define XILINX_DPDMA_CH_DESC_START_ADDR			0x004
100 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE			0x008
101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR			0x00c
102 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE			0x010
103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR			0x014
104 #define XILINX_DPDMA_CH_CNTL				0x018
105 #define XILINX_DPDMA_CH_CNTL_ENABLE			BIT(0)
106 #define XILINX_DPDMA_CH_CNTL_PAUSE			BIT(1)
107 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK		GENMASK(5, 2)
108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK		GENMASK(9, 6)
109 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK		GENMASK(13, 10)
110 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS		11
111 #define XILINX_DPDMA_CH_STATUS				0x01c
112 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK		GENMASK(24, 21)
113 #define XILINX_DPDMA_CH_VDO				0x020
114 #define XILINX_DPDMA_CH_PYLD_SZ				0x024
115 #define XILINX_DPDMA_CH_DESC_ID				0x028
116 #define XILINX_DPDMA_CH_DESC_ID_MASK			GENMASK(15, 0)
117 
118 /* DPDMA descriptor fields */
119 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE		0xa5
120 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR		BIT(8)
121 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE		BIT(9)
122 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE		BIT(10)
123 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE		BIT(18)
124 #define XILINX_DPDMA_DESC_CONTROL_LAST			BIT(19)
125 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC		BIT(20)
126 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME		BIT(21)
127 #define XILINX_DPDMA_DESC_ID_MASK			GENMASK(15, 0)
128 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK	GENMASK(17, 0)
129 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK	GENMASK(31, 18)
130 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK	GENMASK(15, 0)
131 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK	GENMASK(31, 16)
132 
133 #define XILINX_DPDMA_ALIGN_BYTES			256
134 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS		128
135 
136 #define XILINX_DPDMA_NUM_CHAN				6
137 
138 struct xilinx_dpdma_chan;
139 
140 /**
141  * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
142  * @control: control configuration field
143  * @desc_id: descriptor ID
144  * @xfer_size: transfer size
145  * @hsize_stride: horizontal size and stride
146  * @timestamp_lsb: LSB of time stamp
147  * @timestamp_msb: MSB of time stamp
148  * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
149  * @next_desc: next descriptor 32 bit address
150  * @src_addr: payload source address (1st page, 32 LSB)
151  * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
152  * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
153  * @src_addr2: payload source address (2nd page, 32 LSB)
154  * @src_addr3: payload source address (3rd page, 32 LSB)
155  * @src_addr4: payload source address (4th page, 32 LSB)
156  * @src_addr5: payload source address (5th page, 32 LSB)
157  * @crc: descriptor CRC
158  */
159 struct xilinx_dpdma_hw_desc {
160 	u32 control;
161 	u32 desc_id;
162 	u32 xfer_size;
163 	u32 hsize_stride;
164 	u32 timestamp_lsb;
165 	u32 timestamp_msb;
166 	u32 addr_ext;
167 	u32 next_desc;
168 	u32 src_addr;
169 	u32 addr_ext_23;
170 	u32 addr_ext_45;
171 	u32 src_addr2;
172 	u32 src_addr3;
173 	u32 src_addr4;
174 	u32 src_addr5;
175 	u32 crc;
176 } __aligned(XILINX_DPDMA_ALIGN_BYTES);
177 
178 /**
179  * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
180  * @hw: DPDMA hardware descriptor
181  * @node: list node for software descriptors
182  * @dma_addr: DMA address of the software descriptor
183  */
184 struct xilinx_dpdma_sw_desc {
185 	struct xilinx_dpdma_hw_desc hw;
186 	struct list_head node;
187 	dma_addr_t dma_addr;
188 };
189 
190 /**
191  * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
192  * @vdesc: virtual DMA descriptor
193  * @chan: DMA channel
194  * @descriptors: list of software descriptors
195  * @error: an error has been detected with this descriptor
196  */
197 struct xilinx_dpdma_tx_desc {
198 	struct virt_dma_desc vdesc;
199 	struct xilinx_dpdma_chan *chan;
200 	struct list_head descriptors;
201 	bool error;
202 };
203 
204 #define to_dpdma_tx_desc(_desc) \
205 	container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
206 
207 /**
208  * struct xilinx_dpdma_chan - DPDMA channel
209  * @vchan: virtual DMA channel
210  * @reg: register base address
211  * @id: channel ID
212  * @wait_to_stop: queue to wait for outstanding transacitons before stopping
213  * @running: true if the channel is running
214  * @first_frame: flag for the first frame of stream
215  * @video_group: flag if multi-channel operation is needed for video channels
216  * @lock: lock to access struct xilinx_dpdma_chan
217  * @desc_pool: descriptor allocation pool
218  * @err_task: error IRQ bottom half handler
219  * @desc: References to descriptors being processed
220  * @desc.pending: Descriptor schedule to the hardware, pending execution
221  * @desc.active: Descriptor being executed by the hardware
222  * @xdev: DPDMA device
223  */
224 struct xilinx_dpdma_chan {
225 	struct virt_dma_chan vchan;
226 	void __iomem *reg;
227 	unsigned int id;
228 
229 	wait_queue_head_t wait_to_stop;
230 	bool running;
231 	bool first_frame;
232 	bool video_group;
233 
234 	spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
235 	struct dma_pool *desc_pool;
236 	struct tasklet_struct err_task;
237 
238 	struct {
239 		struct xilinx_dpdma_tx_desc *pending;
240 		struct xilinx_dpdma_tx_desc *active;
241 	} desc;
242 
243 	struct xilinx_dpdma_device *xdev;
244 };
245 
246 #define to_xilinx_chan(_chan) \
247 	container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
248 
249 /**
250  * struct xilinx_dpdma_device - DPDMA device
251  * @common: generic dma device structure
252  * @reg: register base address
253  * @dev: generic device structure
254  * @irq: the interrupt number
255  * @axi_clk: axi clock
256  * @chan: DPDMA channels
257  * @ext_addr: flag for 64 bit system (48 bit addressing)
258  */
259 struct xilinx_dpdma_device {
260 	struct dma_device common;
261 	void __iomem *reg;
262 	struct device *dev;
263 	int irq;
264 
265 	struct clk *axi_clk;
266 	struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
267 
268 	bool ext_addr;
269 };
270 
271 /* -----------------------------------------------------------------------------
272  * DebugFS
273  */
274 
275 #ifdef CONFIG_DEBUG_FS
276 
277 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE	32
278 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR	"65535"
279 
280 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
281 enum xilinx_dpdma_testcases {
282 	DPDMA_TC_INTR_DONE,
283 	DPDMA_TC_NONE
284 };
285 
286 struct xilinx_dpdma_debugfs {
287 	enum xilinx_dpdma_testcases testcase;
288 	u16 xilinx_dpdma_irq_done_count;
289 	unsigned int chan_id;
290 };
291 
292 static struct xilinx_dpdma_debugfs dpdma_debugfs;
293 struct xilinx_dpdma_debugfs_request {
294 	const char *name;
295 	enum xilinx_dpdma_testcases tc;
296 	ssize_t (*read)(char *buf);
297 	int (*write)(char *args);
298 };
299 
300 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
301 {
302 	if (chan->id == dpdma_debugfs.chan_id)
303 		dpdma_debugfs.xilinx_dpdma_irq_done_count++;
304 }
305 
306 static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
307 {
308 	size_t out_str_len;
309 
310 	dpdma_debugfs.testcase = DPDMA_TC_NONE;
311 
312 	out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
313 	out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
314 			    out_str_len);
315 	snprintf(buf, out_str_len, "%d",
316 		 dpdma_debugfs.xilinx_dpdma_irq_done_count);
317 
318 	return 0;
319 }
320 
321 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
322 {
323 	char *arg;
324 	int ret;
325 	u32 id;
326 
327 	arg = strsep(&args, " ");
328 	if (!arg || strncasecmp(arg, "start", 5))
329 		return -EINVAL;
330 
331 	arg = strsep(&args, " ");
332 	if (!arg)
333 		return -EINVAL;
334 
335 	ret = kstrtou32(arg, 0, &id);
336 	if (ret < 0)
337 		return ret;
338 
339 	if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
340 		return -EINVAL;
341 
342 	dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
343 	dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
344 	dpdma_debugfs.chan_id = id;
345 
346 	return 0;
347 }
348 
349 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
350 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
351 	{
352 		.name = "DESCRIPTOR_DONE_INTR",
353 		.tc = DPDMA_TC_INTR_DONE,
354 		.read = xilinx_dpdma_debugfs_desc_done_irq_read,
355 		.write = xilinx_dpdma_debugfs_desc_done_irq_write,
356 	},
357 };
358 
359 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
360 					 size_t size, loff_t *pos)
361 {
362 	enum xilinx_dpdma_testcases testcase;
363 	char *kern_buff;
364 	int ret = 0;
365 
366 	if (*pos != 0 || size <= 0)
367 		return -EINVAL;
368 
369 	kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
370 	if (!kern_buff) {
371 		dpdma_debugfs.testcase = DPDMA_TC_NONE;
372 		return -ENOMEM;
373 	}
374 
375 	testcase = READ_ONCE(dpdma_debugfs.testcase);
376 	if (testcase != DPDMA_TC_NONE) {
377 		ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
378 		if (ret < 0)
379 			goto done;
380 	} else {
381 		strlcpy(kern_buff, "No testcase executed",
382 			XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
383 	}
384 
385 	size = min(size, strlen(kern_buff));
386 	if (copy_to_user(buf, kern_buff, size))
387 		ret = -EFAULT;
388 
389 done:
390 	kfree(kern_buff);
391 	if (ret)
392 		return ret;
393 
394 	*pos = size + 1;
395 	return size;
396 }
397 
398 static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
399 					  const char __user *buf, size_t size,
400 					  loff_t *pos)
401 {
402 	char *kern_buff, *kern_buff_start;
403 	char *testcase;
404 	unsigned int i;
405 	int ret;
406 
407 	if (*pos != 0 || size <= 0)
408 		return -EINVAL;
409 
410 	/* Supporting single instance of test as of now. */
411 	if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
412 		return -EBUSY;
413 
414 	kern_buff = kzalloc(size, GFP_KERNEL);
415 	if (!kern_buff)
416 		return -ENOMEM;
417 	kern_buff_start = kern_buff;
418 
419 	ret = strncpy_from_user(kern_buff, buf, size);
420 	if (ret < 0)
421 		goto done;
422 
423 	/* Read the testcase name from a user request. */
424 	testcase = strsep(&kern_buff, " ");
425 
426 	for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
427 		if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
428 			break;
429 	}
430 
431 	if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
432 		ret = -EINVAL;
433 		goto done;
434 	}
435 
436 	ret = dpdma_debugfs_reqs[i].write(kern_buff);
437 	if (ret < 0)
438 		goto done;
439 
440 	ret = size;
441 
442 done:
443 	kfree(kern_buff_start);
444 	return ret;
445 }
446 
447 static const struct file_operations fops_xilinx_dpdma_dbgfs = {
448 	.owner = THIS_MODULE,
449 	.read = xilinx_dpdma_debugfs_read,
450 	.write = xilinx_dpdma_debugfs_write,
451 };
452 
453 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
454 {
455 	struct dentry *dent;
456 
457 	dpdma_debugfs.testcase = DPDMA_TC_NONE;
458 
459 	dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
460 				   NULL, &fops_xilinx_dpdma_dbgfs);
461 	if (IS_ERR(dent))
462 		dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
463 }
464 
465 #else
466 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
467 {
468 }
469 
470 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
471 {
472 }
473 #endif /* CONFIG_DEBUG_FS */
474 
475 /* -----------------------------------------------------------------------------
476  * I/O Accessors
477  */
478 
479 static inline u32 dpdma_read(void __iomem *base, u32 offset)
480 {
481 	return ioread32(base + offset);
482 }
483 
484 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
485 {
486 	iowrite32(val, base + offset);
487 }
488 
489 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
490 {
491 	dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
492 }
493 
494 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
495 {
496 	dpdma_write(base, offset, dpdma_read(base, offset) | set);
497 }
498 
499 /* -----------------------------------------------------------------------------
500  * Descriptor Operations
501  */
502 
503 /**
504  * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor
505  * @xdev: DPDMA device
506  * @sw_desc: The software descriptor in which to set DMA addresses
507  * @prev: The previous descriptor
508  * @dma_addr: array of dma addresses
509  * @num_src_addr: number of addresses in @dma_addr
510  *
511  * Set all the DMA addresses in the hardware descriptor corresponding to @dev
512  * from @dma_addr. If a previous descriptor is specified in @prev, its next
513  * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be
514  * identical to @sw_desc for cyclic transfers.
515  */
516 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
517 					       struct xilinx_dpdma_sw_desc *sw_desc,
518 					       struct xilinx_dpdma_sw_desc *prev,
519 					       dma_addr_t dma_addr[],
520 					       unsigned int num_src_addr)
521 {
522 	struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
523 	unsigned int i;
524 
525 	hw_desc->src_addr = lower_32_bits(dma_addr[0]);
526 	if (xdev->ext_addr)
527 		hw_desc->addr_ext |=
528 			FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK,
529 				   upper_32_bits(dma_addr[0]));
530 
531 	for (i = 1; i < num_src_addr; i++) {
532 		u32 *addr = &hw_desc->src_addr2;
533 
534 		addr[i-1] = lower_32_bits(dma_addr[i]);
535 
536 		if (xdev->ext_addr) {
537 			u32 *addr_ext = &hw_desc->addr_ext_23;
538 			u32 addr_msb;
539 
540 			addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0);
541 			addr_msb <<= 16 * ((i - 1) % 2);
542 			addr_ext[(i - 1) / 2] |= addr_msb;
543 		}
544 	}
545 
546 	if (!prev)
547 		return;
548 
549 	prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
550 	if (xdev->ext_addr)
551 		prev->hw.addr_ext |=
552 			FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
553 				   upper_32_bits(sw_desc->dma_addr));
554 }
555 
556 /**
557  * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
558  * @chan: DPDMA channel
559  *
560  * Allocate a software descriptor from the channel's descriptor pool.
561  *
562  * Return: a software descriptor or NULL.
563  */
564 static struct xilinx_dpdma_sw_desc *
565 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
566 {
567 	struct xilinx_dpdma_sw_desc *sw_desc;
568 	dma_addr_t dma_addr;
569 
570 	sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr);
571 	if (!sw_desc)
572 		return NULL;
573 
574 	sw_desc->dma_addr = dma_addr;
575 
576 	return sw_desc;
577 }
578 
579 /**
580  * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
581  * @chan: DPDMA channel
582  * @sw_desc: software descriptor to free
583  *
584  * Free a software descriptor from the channel's descriptor pool.
585  */
586 static void
587 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
588 			       struct xilinx_dpdma_sw_desc *sw_desc)
589 {
590 	dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr);
591 }
592 
593 /**
594  * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
595  * @chan: DPDMA channel
596  * @tx_desc: tx descriptor to dump
597  *
598  * Dump contents of a tx descriptor
599  */
600 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
601 					   struct xilinx_dpdma_tx_desc *tx_desc)
602 {
603 	struct xilinx_dpdma_sw_desc *sw_desc;
604 	struct device *dev = chan->xdev->dev;
605 	unsigned int i = 0;
606 
607 	dev_dbg(dev, "------- TX descriptor dump start -------\n");
608 	dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
609 
610 	list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
611 		struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
612 
613 		dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
614 		dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr);
615 		dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
616 		dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
617 		dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
618 		dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
619 		dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
620 		dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
621 		dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
622 		dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
623 		dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
624 		dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
625 		dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
626 		dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
627 		dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
628 		dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
629 		dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
630 		dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
631 	}
632 
633 	dev_dbg(dev, "------- TX descriptor dump end -------\n");
634 }
635 
636 /**
637  * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
638  * @chan: DPDMA channel
639  *
640  * Allocate a tx descriptor.
641  *
642  * Return: a tx descriptor or NULL.
643  */
644 static struct xilinx_dpdma_tx_desc *
645 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
646 {
647 	struct xilinx_dpdma_tx_desc *tx_desc;
648 
649 	tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT);
650 	if (!tx_desc)
651 		return NULL;
652 
653 	INIT_LIST_HEAD(&tx_desc->descriptors);
654 	tx_desc->chan = chan;
655 	tx_desc->error = false;
656 
657 	return tx_desc;
658 }
659 
660 /**
661  * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor
662  * @vdesc: virtual DMA descriptor
663  *
664  * Free the virtual DMA descriptor @vdesc including its software descriptors.
665  */
666 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
667 {
668 	struct xilinx_dpdma_sw_desc *sw_desc, *next;
669 	struct xilinx_dpdma_tx_desc *desc;
670 
671 	if (!vdesc)
672 		return;
673 
674 	desc = to_dpdma_tx_desc(vdesc);
675 
676 	list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
677 		list_del(&sw_desc->node);
678 		xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc);
679 	}
680 
681 	kfree(desc);
682 }
683 
684 /**
685  * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
686  *					    descriptor
687  * @chan: DPDMA channel
688  * @xt: dma interleaved template
689  *
690  * Prepare a tx descriptor including internal software/hardware descriptors
691  * based on @xt.
692  *
693  * Return: A DPDMA TX descriptor on success, or NULL.
694  */
695 static struct xilinx_dpdma_tx_desc *
696 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
697 				       struct dma_interleaved_template *xt)
698 {
699 	struct xilinx_dpdma_tx_desc *tx_desc;
700 	struct xilinx_dpdma_sw_desc *sw_desc;
701 	struct xilinx_dpdma_hw_desc *hw_desc;
702 	size_t hsize = xt->sgl[0].size;
703 	size_t stride = hsize + xt->sgl[0].icg;
704 
705 	if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
706 		dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
707 			XILINX_DPDMA_ALIGN_BYTES);
708 		return NULL;
709 	}
710 
711 	tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
712 	if (!tx_desc)
713 		return NULL;
714 
715 	sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
716 	if (!sw_desc) {
717 		xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
718 		return NULL;
719 	}
720 
721 	xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc,
722 					   &xt->src_start, 1);
723 
724 	hw_desc = &sw_desc->hw;
725 	hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
726 	hw_desc->xfer_size = hsize * xt->numf;
727 	hw_desc->hsize_stride =
728 		FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) |
729 		FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
730 			   stride / 16);
731 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
732 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
733 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
734 	hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
735 
736 	list_add_tail(&sw_desc->node, &tx_desc->descriptors);
737 
738 	return tx_desc;
739 }
740 
741 /* -----------------------------------------------------------------------------
742  * DPDMA Channel Operations
743  */
744 
745 /**
746  * xilinx_dpdma_chan_enable - Enable the channel
747  * @chan: DPDMA channel
748  *
749  * Enable the channel and its interrupts. Set the QoS values for video class.
750  */
751 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
752 {
753 	u32 reg;
754 
755 	reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id)
756 	    | XILINX_DPDMA_INTR_GLOBAL_MASK;
757 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
758 	reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)
759 	    | XILINX_DPDMA_INTR_GLOBAL_ERR;
760 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
761 
762 	reg = XILINX_DPDMA_CH_CNTL_ENABLE
763 	    | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK,
764 			 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
765 	    | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK,
766 			 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
767 	    | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK,
768 			 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS);
769 	dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
770 }
771 
772 /**
773  * xilinx_dpdma_chan_disable - Disable the channel
774  * @chan: DPDMA channel
775  *
776  * Disable the channel and its interrupts.
777  */
778 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
779 {
780 	u32 reg;
781 
782 	reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
783 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
784 	reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
785 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
786 
787 	dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
788 }
789 
790 /**
791  * xilinx_dpdma_chan_pause - Pause the channel
792  * @chan: DPDMA channel
793  *
794  * Pause the channel.
795  */
796 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
797 {
798 	dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
799 }
800 
801 /**
802  * xilinx_dpdma_chan_unpause - Unpause the channel
803  * @chan: DPDMA channel
804  *
805  * Unpause the channel.
806  */
807 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
808 {
809 	dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
810 }
811 
812 static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
813 {
814 	struct xilinx_dpdma_device *xdev = chan->xdev;
815 	u32 channels = 0;
816 	unsigned int i;
817 
818 	for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
819 		if (xdev->chan[i]->video_group && !xdev->chan[i]->running)
820 			return 0;
821 
822 		if (xdev->chan[i]->video_group)
823 			channels |= BIT(i);
824 	}
825 
826 	return channels;
827 }
828 
829 /**
830  * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
831  * @chan: DPDMA channel
832  *
833  * Queue the next descriptor, if any, to the hardware. If the channel is
834  * stopped, start it first. Otherwise retrigger it with the next descriptor.
835  */
836 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
837 {
838 	struct xilinx_dpdma_device *xdev = chan->xdev;
839 	struct xilinx_dpdma_sw_desc *sw_desc;
840 	struct xilinx_dpdma_tx_desc *desc;
841 	struct virt_dma_desc *vdesc;
842 	u32 reg, channels;
843 	bool first_frame;
844 
845 	lockdep_assert_held(&chan->lock);
846 
847 	if (chan->desc.pending)
848 		return;
849 
850 	if (!chan->running) {
851 		xilinx_dpdma_chan_unpause(chan);
852 		xilinx_dpdma_chan_enable(chan);
853 		chan->first_frame = true;
854 		chan->running = true;
855 	}
856 
857 	vdesc = vchan_next_desc(&chan->vchan);
858 	if (!vdesc)
859 		return;
860 
861 	desc = to_dpdma_tx_desc(vdesc);
862 	chan->desc.pending = desc;
863 	list_del(&desc->vdesc.node);
864 
865 	/*
866 	 * Assign the cookie to descriptors in this transaction. Only 16 bit
867 	 * will be used, but it should be enough.
868 	 */
869 	list_for_each_entry(sw_desc, &desc->descriptors, node)
870 		sw_desc->hw.desc_id = desc->vdesc.tx.cookie
871 				    & XILINX_DPDMA_CH_DESC_ID_MASK;
872 
873 	sw_desc = list_first_entry(&desc->descriptors,
874 				   struct xilinx_dpdma_sw_desc, node);
875 	dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
876 		    lower_32_bits(sw_desc->dma_addr));
877 	if (xdev->ext_addr)
878 		dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
879 			    FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
880 				       upper_32_bits(sw_desc->dma_addr)));
881 
882 	first_frame = chan->first_frame;
883 	chan->first_frame = false;
884 
885 	if (chan->video_group) {
886 		channels = xilinx_dpdma_chan_video_group_ready(chan);
887 		/*
888 		 * Trigger the transfer only when all channels in the group are
889 		 * ready.
890 		 */
891 		if (!channels)
892 			return;
893 	} else {
894 		channels = BIT(chan->id);
895 	}
896 
897 	if (first_frame)
898 		reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
899 	else
900 		reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
901 
902 	dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
903 }
904 
905 /**
906  * xilinx_dpdma_chan_ostand - Number of outstanding transactions
907  * @chan: DPDMA channel
908  *
909  * Read and return the number of outstanding transactions from register.
910  *
911  * Return: Number of outstanding transactions from the status register.
912  */
913 static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
914 {
915 	return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK,
916 			 dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS));
917 }
918 
919 /**
920  * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
921  * @chan: DPDMA channel
922  *
923  * Notify waiters for no outstanding event, so waiters can stop the channel
924  * safely. This function is supposed to be called when 'no outstanding'
925  * interrupt is generated. The 'no outstanding' interrupt is disabled and
926  * should be re-enabled when this event is handled. If the channel status
927  * register still shows some number of outstanding transactions, the interrupt
928  * remains enabled.
929  *
930  * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
931  * transaction(s).
932  */
933 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
934 {
935 	u32 cnt;
936 
937 	cnt = xilinx_dpdma_chan_ostand(chan);
938 	if (cnt) {
939 		dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
940 		return -EWOULDBLOCK;
941 	}
942 
943 	/* Disable 'no outstanding' interrupt */
944 	dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
945 		    XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
946 	wake_up(&chan->wait_to_stop);
947 
948 	return 0;
949 }
950 
951 /**
952  * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq
953  * @chan: DPDMA channel
954  *
955  * Wait for the no outstanding transaction interrupt. This functions can sleep
956  * for 50ms.
957  *
958  * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
959  * from wait_event_interruptible_timeout().
960  */
961 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
962 {
963 	int ret;
964 
965 	/* Wait for a no outstanding transaction interrupt upto 50msec */
966 	ret = wait_event_interruptible_timeout(chan->wait_to_stop,
967 					       !xilinx_dpdma_chan_ostand(chan),
968 					       msecs_to_jiffies(50));
969 	if (ret > 0) {
970 		dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
971 			    XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
972 		return 0;
973 	}
974 
975 	dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
976 		xilinx_dpdma_chan_ostand(chan));
977 
978 	if (ret == 0)
979 		return -ETIMEDOUT;
980 
981 	return ret;
982 }
983 
984 /**
985  * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
986  * @chan: DPDMA channel
987  *
988  * Poll the outstanding transaction status, and return when there's no
989  * outstanding transaction. This functions can be used in the interrupt context
990  * or where the atomicity is required. Calling thread may wait more than 50ms.
991  *
992  * Return: 0 on success, or -ETIMEDOUT.
993  */
994 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
995 {
996 	u32 cnt, loop = 50000;
997 
998 	/* Poll at least for 50ms (20 fps). */
999 	do {
1000 		cnt = xilinx_dpdma_chan_ostand(chan);
1001 		udelay(1);
1002 	} while (loop-- > 0 && cnt);
1003 
1004 	if (loop) {
1005 		dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1006 			    XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
1007 		return 0;
1008 	}
1009 
1010 	dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1011 		xilinx_dpdma_chan_ostand(chan));
1012 
1013 	return -ETIMEDOUT;
1014 }
1015 
1016 /**
1017  * xilinx_dpdma_chan_stop - Stop the channel
1018  * @chan: DPDMA channel
1019  *
1020  * Stop a previously paused channel by first waiting for completion of all
1021  * outstanding transaction and then disabling the channel.
1022  *
1023  * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1024  */
1025 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1026 {
1027 	unsigned long flags;
1028 	int ret;
1029 
1030 	ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1031 	if (ret)
1032 		return ret;
1033 
1034 	spin_lock_irqsave(&chan->lock, flags);
1035 	xilinx_dpdma_chan_disable(chan);
1036 	chan->running = false;
1037 	spin_unlock_irqrestore(&chan->lock, flags);
1038 
1039 	return 0;
1040 }
1041 
1042 /**
1043  * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion
1044  * @chan: DPDMA channel
1045  *
1046  * Handle completion of the currently active descriptor (@chan->desc.active). As
1047  * we currently support cyclic transfers only, this just invokes the cyclic
1048  * callback. The descriptor will be completed at the VSYNC interrupt when a new
1049  * descriptor replaces it.
1050  */
1051 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
1052 {
1053 	struct xilinx_dpdma_tx_desc *active;
1054 	unsigned long flags;
1055 
1056 	spin_lock_irqsave(&chan->lock, flags);
1057 
1058 	xilinx_dpdma_debugfs_desc_done_irq(chan);
1059 
1060 	active = chan->desc.active;
1061 	if (active)
1062 		vchan_cyclic_callback(&active->vdesc);
1063 	else
1064 		dev_warn(chan->xdev->dev,
1065 			 "DONE IRQ with no active descriptor!\n");
1066 
1067 	spin_unlock_irqrestore(&chan->lock, flags);
1068 }
1069 
1070 /**
1071  * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling
1072  * @chan: DPDMA channel
1073  *
1074  * At VSYNC the active descriptor may have been replaced by the pending
1075  * descriptor. Detect this through the DESC_ID and perform appropriate
1076  * bookkeeping.
1077  */
1078 static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
1079 {
1080 	struct xilinx_dpdma_tx_desc *pending;
1081 	struct xilinx_dpdma_sw_desc *sw_desc;
1082 	unsigned long flags;
1083 	u32 desc_id;
1084 
1085 	spin_lock_irqsave(&chan->lock, flags);
1086 
1087 	pending = chan->desc.pending;
1088 	if (!chan->running || !pending)
1089 		goto out;
1090 
1091 	desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
1092 		& XILINX_DPDMA_CH_DESC_ID_MASK;
1093 
1094 	/* If the retrigger raced with vsync, retry at the next frame. */
1095 	sw_desc = list_first_entry(&pending->descriptors,
1096 				   struct xilinx_dpdma_sw_desc, node);
1097 	if (sw_desc->hw.desc_id != desc_id)
1098 		goto out;
1099 
1100 	/*
1101 	 * Complete the active descriptor, if any, promote the pending
1102 	 * descriptor to active, and queue the next transfer, if any.
1103 	 */
1104 	if (chan->desc.active)
1105 		vchan_cookie_complete(&chan->desc.active->vdesc);
1106 	chan->desc.active = pending;
1107 	chan->desc.pending = NULL;
1108 
1109 	xilinx_dpdma_chan_queue_transfer(chan);
1110 
1111 out:
1112 	spin_unlock_irqrestore(&chan->lock, flags);
1113 }
1114 
1115 /**
1116  * xilinx_dpdma_chan_err - Detect any channel error
1117  * @chan: DPDMA channel
1118  * @isr: masked Interrupt Status Register
1119  * @eisr: Error Interrupt Status Register
1120  *
1121  * Return: true if any channel error occurs, or false otherwise.
1122  */
1123 static bool
1124 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1125 {
1126 	if (!chan)
1127 		return false;
1128 
1129 	if (chan->running &&
1130 	    ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1131 	    (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1132 		return true;
1133 
1134 	return false;
1135 }
1136 
1137 /**
1138  * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1139  * @chan: DPDMA channel
1140  *
1141  * This function is called when any channel error or any global error occurs.
1142  * The function disables the paused channel by errors and determines
1143  * if the current active descriptor can be rescheduled depending on
1144  * the descriptor status.
1145  */
1146 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1147 {
1148 	struct xilinx_dpdma_device *xdev = chan->xdev;
1149 	struct xilinx_dpdma_tx_desc *active;
1150 	unsigned long flags;
1151 
1152 	spin_lock_irqsave(&chan->lock, flags);
1153 
1154 	dev_dbg(xdev->dev, "cur desc addr = 0x%04x%08x\n",
1155 		dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1156 		dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1157 	dev_dbg(xdev->dev, "cur payload addr = 0x%04x%08x\n",
1158 		dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1159 		dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1160 
1161 	xilinx_dpdma_chan_disable(chan);
1162 	chan->running = false;
1163 
1164 	if (!chan->desc.active)
1165 		goto out_unlock;
1166 
1167 	active = chan->desc.active;
1168 	chan->desc.active = NULL;
1169 
1170 	xilinx_dpdma_chan_dump_tx_desc(chan, active);
1171 
1172 	if (active->error)
1173 		dev_dbg(xdev->dev, "repeated error on desc\n");
1174 
1175 	/* Reschedule if there's no new descriptor */
1176 	if (!chan->desc.pending &&
1177 	    list_empty(&chan->vchan.desc_issued)) {
1178 		active->error = true;
1179 		list_add_tail(&active->vdesc.node,
1180 			      &chan->vchan.desc_issued);
1181 	} else {
1182 		xilinx_dpdma_chan_free_tx_desc(&active->vdesc);
1183 	}
1184 
1185 out_unlock:
1186 	spin_unlock_irqrestore(&chan->lock, flags);
1187 }
1188 
1189 /* -----------------------------------------------------------------------------
1190  * DMA Engine Operations
1191  */
1192 
1193 static struct dma_async_tx_descriptor *
1194 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1195 				  struct dma_interleaved_template *xt,
1196 				  unsigned long flags)
1197 {
1198 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1199 	struct xilinx_dpdma_tx_desc *desc;
1200 
1201 	if (xt->dir != DMA_MEM_TO_DEV)
1202 		return NULL;
1203 
1204 	if (!xt->numf || !xt->sgl[0].size)
1205 		return NULL;
1206 
1207 	if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT))
1208 		return NULL;
1209 
1210 	desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt);
1211 	if (!desc)
1212 		return NULL;
1213 
1214 	vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK);
1215 
1216 	return &desc->vdesc.tx;
1217 }
1218 
1219 /**
1220  * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel
1221  * @dchan: DMA channel
1222  *
1223  * Allocate a descriptor pool for the channel.
1224  *
1225  * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1226  */
1227 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1228 {
1229 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1230 	size_t align = __alignof__(struct xilinx_dpdma_sw_desc);
1231 
1232 	chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1233 					  chan->xdev->dev,
1234 					  sizeof(struct xilinx_dpdma_sw_desc),
1235 					  align, 0);
1236 	if (!chan->desc_pool) {
1237 		dev_err(chan->xdev->dev,
1238 			"failed to allocate a descriptor pool\n");
1239 		return -ENOMEM;
1240 	}
1241 
1242 	return 0;
1243 }
1244 
1245 /**
1246  * xilinx_dpdma_free_chan_resources - Free all resources for the channel
1247  * @dchan: DMA channel
1248  *
1249  * Free resources associated with the virtual DMA channel, and destroy the
1250  * descriptor pool.
1251  */
1252 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1253 {
1254 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1255 
1256 	vchan_free_chan_resources(&chan->vchan);
1257 
1258 	dma_pool_destroy(chan->desc_pool);
1259 	chan->desc_pool = NULL;
1260 }
1261 
1262 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1263 {
1264 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1265 	unsigned long flags;
1266 
1267 	spin_lock_irqsave(&chan->vchan.lock, flags);
1268 	if (vchan_issue_pending(&chan->vchan))
1269 		xilinx_dpdma_chan_queue_transfer(chan);
1270 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
1271 }
1272 
1273 static int xilinx_dpdma_config(struct dma_chan *dchan,
1274 			       struct dma_slave_config *config)
1275 {
1276 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1277 	unsigned long flags;
1278 
1279 	/*
1280 	 * The destination address doesn't need to be specified as the DPDMA is
1281 	 * hardwired to the destination (the DP controller). The transfer
1282 	 * width, burst size and port window size are thus meaningless, they're
1283 	 * fixed both on the DPDMA side and on the DP controller side.
1284 	 */
1285 
1286 	spin_lock_irqsave(&chan->lock, flags);
1287 
1288 	/*
1289 	 * Abuse the slave_id to indicate that the channel is part of a video
1290 	 * group.
1291 	 */
1292 	if (chan->id <= ZYNQMP_DPDMA_VIDEO2)
1293 		chan->video_group = config->slave_id != 0;
1294 
1295 	spin_unlock_irqrestore(&chan->lock, flags);
1296 
1297 	return 0;
1298 }
1299 
1300 static int xilinx_dpdma_pause(struct dma_chan *dchan)
1301 {
1302 	xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1303 
1304 	return 0;
1305 }
1306 
1307 static int xilinx_dpdma_resume(struct dma_chan *dchan)
1308 {
1309 	xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1310 
1311 	return 0;
1312 }
1313 
1314 /**
1315  * xilinx_dpdma_terminate_all - Terminate the channel and descriptors
1316  * @dchan: DMA channel
1317  *
1318  * Pause the channel without waiting for ongoing transfers to complete. Waiting
1319  * for completion is performed by xilinx_dpdma_synchronize() that will disable
1320  * the channel to complete the stop.
1321  *
1322  * All the descriptors associated with the channel that are guaranteed not to
1323  * be touched by the hardware. The pending and active descriptor are not
1324  * touched, and will be freed either upon completion, or by
1325  * xilinx_dpdma_synchronize().
1326  *
1327  * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1328  */
1329 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1330 {
1331 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1332 	struct xilinx_dpdma_device *xdev = chan->xdev;
1333 	LIST_HEAD(descriptors);
1334 	unsigned long flags;
1335 	unsigned int i;
1336 
1337 	/* Pause the channel (including the whole video group if applicable). */
1338 	if (chan->video_group) {
1339 		for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
1340 			if (xdev->chan[i]->video_group &&
1341 			    xdev->chan[i]->running) {
1342 				xilinx_dpdma_chan_pause(xdev->chan[i]);
1343 				xdev->chan[i]->video_group = false;
1344 			}
1345 		}
1346 	} else {
1347 		xilinx_dpdma_chan_pause(chan);
1348 	}
1349 
1350 	/* Gather all the descriptors we can free and free them. */
1351 	spin_lock_irqsave(&chan->vchan.lock, flags);
1352 	vchan_get_all_descriptors(&chan->vchan, &descriptors);
1353 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
1354 
1355 	vchan_dma_desc_free_list(&chan->vchan, &descriptors);
1356 
1357 	return 0;
1358 }
1359 
1360 /**
1361  * xilinx_dpdma_synchronize - Synchronize callback execution
1362  * @dchan: DMA channel
1363  *
1364  * Synchronizing callback execution ensures that all previously issued
1365  * transfers have completed and all associated callbacks have been called and
1366  * have returned.
1367  *
1368  * This function waits for the DMA channel to stop. It assumes it has been
1369  * paused by a previous call to dmaengine_terminate_async(), and that no new
1370  * pending descriptors have been issued with dma_async_issue_pending(). The
1371  * behaviour is undefined otherwise.
1372  */
1373 static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
1374 {
1375 	struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1376 	unsigned long flags;
1377 
1378 	xilinx_dpdma_chan_stop(chan);
1379 
1380 	spin_lock_irqsave(&chan->vchan.lock, flags);
1381 	if (chan->desc.pending) {
1382 		vchan_terminate_vdesc(&chan->desc.pending->vdesc);
1383 		chan->desc.pending = NULL;
1384 	}
1385 	if (chan->desc.active) {
1386 		vchan_terminate_vdesc(&chan->desc.active->vdesc);
1387 		chan->desc.active = NULL;
1388 	}
1389 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
1390 
1391 	vchan_synchronize(&chan->vchan);
1392 }
1393 
1394 /* -----------------------------------------------------------------------------
1395  * Interrupt and Tasklet Handling
1396  */
1397 
1398 /**
1399  * xilinx_dpdma_err - Detect any global error
1400  * @isr: Interrupt Status Register
1401  * @eisr: Error Interrupt Status Register
1402  *
1403  * Return: True if any global error occurs, or false otherwise.
1404  */
1405 static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1406 {
1407 	if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1408 	    eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR)
1409 		return true;
1410 
1411 	return false;
1412 }
1413 
1414 /**
1415  * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt
1416  * @xdev: DPDMA device
1417  * @isr: masked Interrupt Status Register
1418  * @eisr: Error Interrupt Status Register
1419  *
1420  * Handle if any error occurs based on @isr and @eisr. This function disables
1421  * corresponding error interrupts, and those should be re-enabled once handling
1422  * is done.
1423  */
1424 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev,
1425 					u32 isr, u32 eisr)
1426 {
1427 	bool err = xilinx_dpdma_err(isr, eisr);
1428 	unsigned int i;
1429 
1430 	dev_dbg_ratelimited(xdev->dev,
1431 			    "error irq: isr = 0x%08x, eisr = 0x%08x\n",
1432 			    isr, eisr);
1433 
1434 	/* Disable channel error interrupts until errors are handled. */
1435 	dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1436 		    isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1437 	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1438 		    eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1439 
1440 	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1441 		if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1442 			tasklet_schedule(&xdev->chan[i]->err_task);
1443 }
1444 
1445 /**
1446  * xilinx_dpdma_enable_irq - Enable interrupts
1447  * @xdev: DPDMA device
1448  *
1449  * Enable interrupts.
1450  */
1451 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
1452 {
1453 	dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1454 	dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1455 }
1456 
1457 /**
1458  * xilinx_dpdma_disable_irq - Disable interrupts
1459  * @xdev: DPDMA device
1460  *
1461  * Disable interrupts.
1462  */
1463 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
1464 {
1465 	dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
1466 	dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1467 }
1468 
1469 /**
1470  * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1471  * @t: pointer to the tasklet associated with this handler
1472  *
1473  * Per channel error handling tasklet. This function waits for the outstanding
1474  * transaction to complete and triggers error handling. After error handling,
1475  * re-enable channel error interrupts, and restart the channel if needed.
1476  */
1477 static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1478 {
1479 	struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
1480 	struct xilinx_dpdma_device *xdev = chan->xdev;
1481 	unsigned long flags;
1482 
1483 	/* Proceed error handling even when polling fails. */
1484 	xilinx_dpdma_chan_poll_no_ostand(chan);
1485 
1486 	xilinx_dpdma_chan_handle_err(chan);
1487 
1488 	dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1489 		    XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1490 	dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1491 		    XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1492 
1493 	spin_lock_irqsave(&chan->lock, flags);
1494 	xilinx_dpdma_chan_queue_transfer(chan);
1495 	spin_unlock_irqrestore(&chan->lock, flags);
1496 }
1497 
1498 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
1499 {
1500 	struct xilinx_dpdma_device *xdev = data;
1501 	unsigned long mask;
1502 	unsigned int i;
1503 	u32 status;
1504 	u32 error;
1505 
1506 	status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
1507 	error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
1508 	if (!status && !error)
1509 		return IRQ_NONE;
1510 
1511 	dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
1512 	dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
1513 
1514 	if (status & XILINX_DPDMA_INTR_VSYNC) {
1515 		/*
1516 		 * There's a single VSYNC interrupt that needs to be processed
1517 		 * by each running channel to update the active descriptor.
1518 		 */
1519 		for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1520 			struct xilinx_dpdma_chan *chan = xdev->chan[i];
1521 
1522 			if (chan)
1523 				xilinx_dpdma_chan_vsync_irq(chan);
1524 		}
1525 	}
1526 
1527 	mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status);
1528 	if (mask) {
1529 		for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1530 			xilinx_dpdma_chan_done_irq(xdev->chan[i]);
1531 	}
1532 
1533 	mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status);
1534 	if (mask) {
1535 		for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1536 			xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
1537 	}
1538 
1539 	mask = status & XILINX_DPDMA_INTR_ERR_ALL;
1540 	if (mask || error)
1541 		xilinx_dpdma_handle_err_irq(xdev, mask, error);
1542 
1543 	return IRQ_HANDLED;
1544 }
1545 
1546 /* -----------------------------------------------------------------------------
1547  * Initialization & Cleanup
1548  */
1549 
1550 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
1551 				  unsigned int chan_id)
1552 {
1553 	struct xilinx_dpdma_chan *chan;
1554 
1555 	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1556 	if (!chan)
1557 		return -ENOMEM;
1558 
1559 	chan->id = chan_id;
1560 	chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE
1561 		  + XILINX_DPDMA_CH_OFFSET * chan->id;
1562 	chan->running = false;
1563 	chan->xdev = xdev;
1564 
1565 	spin_lock_init(&chan->lock);
1566 	init_waitqueue_head(&chan->wait_to_stop);
1567 
1568 	tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
1569 
1570 	chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
1571 	vchan_init(&chan->vchan, &xdev->common);
1572 
1573 	xdev->chan[chan->id] = chan;
1574 
1575 	return 0;
1576 }
1577 
1578 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
1579 {
1580 	if (!chan)
1581 		return;
1582 
1583 	tasklet_kill(&chan->err_task);
1584 	list_del(&chan->vchan.chan.device_node);
1585 }
1586 
1587 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1588 					    struct of_dma *ofdma)
1589 {
1590 	struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
1591 	uint32_t chan_id = dma_spec->args[0];
1592 
1593 	if (chan_id >= ARRAY_SIZE(xdev->chan))
1594 		return NULL;
1595 
1596 	if (!xdev->chan[chan_id])
1597 		return NULL;
1598 
1599 	return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
1600 }
1601 
1602 static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
1603 {
1604 	unsigned int i;
1605 	void __iomem *reg;
1606 
1607 	/* Disable all interrupts */
1608 	xilinx_dpdma_disable_irq(xdev);
1609 
1610 	/* Stop all channels */
1611 	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1612 		reg = xdev->reg + XILINX_DPDMA_CH_BASE
1613 				+ XILINX_DPDMA_CH_OFFSET * i;
1614 		dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
1615 	}
1616 
1617 	/* Clear the interrupt status registers */
1618 	dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
1619 	dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
1620 }
1621 
1622 static int xilinx_dpdma_probe(struct platform_device *pdev)
1623 {
1624 	struct xilinx_dpdma_device *xdev;
1625 	struct dma_device *ddev;
1626 	unsigned int i;
1627 	int ret;
1628 
1629 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1630 	if (!xdev)
1631 		return -ENOMEM;
1632 
1633 	xdev->dev = &pdev->dev;
1634 	xdev->ext_addr = sizeof(dma_addr_t) > 4;
1635 
1636 	INIT_LIST_HEAD(&xdev->common.channels);
1637 
1638 	platform_set_drvdata(pdev, xdev);
1639 
1640 	xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
1641 	if (IS_ERR(xdev->axi_clk))
1642 		return PTR_ERR(xdev->axi_clk);
1643 
1644 	xdev->reg = devm_platform_ioremap_resource(pdev, 0);
1645 	if (IS_ERR(xdev->reg))
1646 		return PTR_ERR(xdev->reg);
1647 
1648 	dpdma_hw_init(xdev);
1649 
1650 	xdev->irq = platform_get_irq(pdev, 0);
1651 	if (xdev->irq < 0) {
1652 		dev_err(xdev->dev, "failed to get platform irq\n");
1653 		return xdev->irq;
1654 	}
1655 
1656 	ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
1657 			  dev_name(xdev->dev), xdev);
1658 	if (ret) {
1659 		dev_err(xdev->dev, "failed to request IRQ\n");
1660 		return ret;
1661 	}
1662 
1663 	ddev = &xdev->common;
1664 	ddev->dev = &pdev->dev;
1665 
1666 	dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1667 	dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1668 	dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
1669 	dma_cap_set(DMA_REPEAT, ddev->cap_mask);
1670 	dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
1671 	ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
1672 
1673 	ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
1674 	ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1675 	ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
1676 	/* TODO: Can we achieve better granularity ? */
1677 	ddev->device_tx_status = dma_cookie_status;
1678 	ddev->device_issue_pending = xilinx_dpdma_issue_pending;
1679 	ddev->device_config = xilinx_dpdma_config;
1680 	ddev->device_pause = xilinx_dpdma_pause;
1681 	ddev->device_resume = xilinx_dpdma_resume;
1682 	ddev->device_terminate_all = xilinx_dpdma_terminate_all;
1683 	ddev->device_synchronize = xilinx_dpdma_synchronize;
1684 	ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
1685 	ddev->directions = BIT(DMA_MEM_TO_DEV);
1686 	ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1687 
1688 	for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) {
1689 		ret = xilinx_dpdma_chan_init(xdev, i);
1690 		if (ret < 0) {
1691 			dev_err(xdev->dev, "failed to initialize channel %u\n",
1692 				i);
1693 			goto error;
1694 		}
1695 	}
1696 
1697 	ret = clk_prepare_enable(xdev->axi_clk);
1698 	if (ret) {
1699 		dev_err(xdev->dev, "failed to enable the axi clock\n");
1700 		goto error;
1701 	}
1702 
1703 	ret = dma_async_device_register(ddev);
1704 	if (ret) {
1705 		dev_err(xdev->dev, "failed to register the dma device\n");
1706 		goto error_dma_async;
1707 	}
1708 
1709 	ret = of_dma_controller_register(xdev->dev->of_node,
1710 					 of_dma_xilinx_xlate, ddev);
1711 	if (ret) {
1712 		dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
1713 		goto error_of_dma;
1714 	}
1715 
1716 	xilinx_dpdma_enable_irq(xdev);
1717 
1718 	xilinx_dpdma_debugfs_init(xdev);
1719 
1720 	dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
1721 
1722 	return 0;
1723 
1724 error_of_dma:
1725 	dma_async_device_unregister(ddev);
1726 error_dma_async:
1727 	clk_disable_unprepare(xdev->axi_clk);
1728 error:
1729 	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1730 		xilinx_dpdma_chan_remove(xdev->chan[i]);
1731 
1732 	free_irq(xdev->irq, xdev);
1733 
1734 	return ret;
1735 }
1736 
1737 static int xilinx_dpdma_remove(struct platform_device *pdev)
1738 {
1739 	struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
1740 	unsigned int i;
1741 
1742 	/* Start by disabling the IRQ to avoid races during cleanup. */
1743 	free_irq(xdev->irq, xdev);
1744 
1745 	xilinx_dpdma_disable_irq(xdev);
1746 	of_dma_controller_free(pdev->dev.of_node);
1747 	dma_async_device_unregister(&xdev->common);
1748 	clk_disable_unprepare(xdev->axi_clk);
1749 
1750 	for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1751 		xilinx_dpdma_chan_remove(xdev->chan[i]);
1752 
1753 	return 0;
1754 }
1755 
1756 static const struct of_device_id xilinx_dpdma_of_match[] = {
1757 	{ .compatible = "xlnx,zynqmp-dpdma",},
1758 	{ /* end of table */ },
1759 };
1760 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
1761 
1762 static struct platform_driver xilinx_dpdma_driver = {
1763 	.probe			= xilinx_dpdma_probe,
1764 	.remove			= xilinx_dpdma_remove,
1765 	.driver			= {
1766 		.name		= "xilinx-zynqmp-dpdma",
1767 		.of_match_table	= xilinx_dpdma_of_match,
1768 	},
1769 };
1770 
1771 module_platform_driver(xilinx_dpdma_driver);
1772 
1773 MODULE_AUTHOR("Xilinx, Inc.");
1774 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
1775 MODULE_LICENSE("GPL v2");
1776