xref: /openbmc/linux/drivers/dma/xilinx/xdma.c (revision 17ce2522)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DMA driver for Xilinx DMA/Bridge Subsystem
4  *
5  * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6  * Copyright (C) 2022, Advanced Micro Devices, Inc.
7  */
8 
9 /*
10  * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11  * between Host memory and the DMA subsystem. It does this by operating on
12  * 'descriptors' that contain information about the source, destination and
13  * amount of data to transfer. These direct memory transfers can be both in
14  * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15  * configured to have a single AXI4 Master interface shared by all channels
16  * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17  * specified on a per-channel basis in descriptor linked lists, which the DMA
18  * fetches from host memory and processes. Events such as descriptor completion
19  * and errors are signaled using interrupts. The core also provides up to 16
20  * user interrupt wires that generate interrupts to the host.
21  */
22 
23 #include <linux/mod_devicetable.h>
24 #include <linux/bitfield.h>
25 #include <linux/dmapool.h>
26 #include <linux/regmap.h>
27 #include <linux/dmaengine.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/amd_xdma.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/pci.h>
32 #include "../virt-dma.h"
33 #include "xdma-regs.h"
34 
35 /* mmio regmap config for all XDMA registers */
36 static const struct regmap_config xdma_regmap_config = {
37 	.reg_bits = 32,
38 	.val_bits = 32,
39 	.reg_stride = 4,
40 	.max_register = XDMA_REG_SPACE_LEN,
41 };
42 
43 /**
44  * struct xdma_desc_block - Descriptor block
45  * @virt_addr: Virtual address of block start
46  * @dma_addr: DMA address of block start
47  */
48 struct xdma_desc_block {
49 	void		*virt_addr;
50 	dma_addr_t	dma_addr;
51 };
52 
53 /**
54  * struct xdma_chan - Driver specific DMA channel structure
55  * @vchan: Virtual channel
56  * @xdev_hdl: Pointer to DMA device structure
57  * @base: Offset of channel registers
58  * @desc_pool: Descriptor pool
59  * @busy: Busy flag of the channel
60  * @dir: Transferring direction of the channel
61  * @cfg: Transferring config of the channel
62  * @irq: IRQ assigned to the channel
63  */
64 struct xdma_chan {
65 	struct virt_dma_chan		vchan;
66 	void				*xdev_hdl;
67 	u32				base;
68 	struct dma_pool			*desc_pool;
69 	bool				busy;
70 	enum dma_transfer_direction	dir;
71 	struct dma_slave_config		cfg;
72 	u32				irq;
73 };
74 
75 /**
76  * struct xdma_desc - DMA desc structure
77  * @vdesc: Virtual DMA descriptor
78  * @chan: DMA channel pointer
79  * @dir: Transferring direction of the request
80  * @dev_addr: Physical address on DMA device side
81  * @desc_blocks: Hardware descriptor blocks
82  * @dblk_num: Number of hardware descriptor blocks
83  * @desc_num: Number of hardware descriptors
84  * @completed_desc_num: Completed hardware descriptors
85  */
86 struct xdma_desc {
87 	struct virt_dma_desc		vdesc;
88 	struct xdma_chan		*chan;
89 	enum dma_transfer_direction	dir;
90 	u64				dev_addr;
91 	struct xdma_desc_block		*desc_blocks;
92 	u32				dblk_num;
93 	u32				desc_num;
94 	u32				completed_desc_num;
95 };
96 
97 #define XDMA_DEV_STATUS_REG_DMA		BIT(0)
98 #define XDMA_DEV_STATUS_INIT_MSIX	BIT(1)
99 
100 /**
101  * struct xdma_device - DMA device structure
102  * @pdev: Platform device pointer
103  * @dma_dev: DMA device structure
104  * @rmap: MMIO regmap for DMA registers
105  * @h2c_chans: Host to Card channels
106  * @c2h_chans: Card to Host channels
107  * @h2c_chan_num: Number of H2C channels
108  * @c2h_chan_num: Number of C2H channels
109  * @irq_start: Start IRQ assigned to device
110  * @irq_num: Number of IRQ assigned to device
111  * @status: Initialization status
112  */
113 struct xdma_device {
114 	struct platform_device	*pdev;
115 	struct dma_device	dma_dev;
116 	struct regmap		*rmap;
117 	struct xdma_chan	*h2c_chans;
118 	struct xdma_chan	*c2h_chans;
119 	u32			h2c_chan_num;
120 	u32			c2h_chan_num;
121 	u32			irq_start;
122 	u32			irq_num;
123 	u32			status;
124 };
125 
126 #define xdma_err(xdev, fmt, args...)					\
127 	dev_err(&(xdev)->pdev->dev, fmt, ##args)
128 #define XDMA_CHAN_NUM(_xd) ({						\
129 	typeof(_xd) (xd) = (_xd);					\
130 	((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
131 
132 /* Get the last desc in a desc block */
133 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
134 {
135 	return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
136 }
137 
138 /**
139  * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
140  * @sw_desc: Tx descriptor pointer
141  */
142 static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
143 {
144 	struct xdma_desc_block *block;
145 	u32 last_blk_desc, desc_control;
146 	struct xdma_hw_desc *desc;
147 	int i;
148 
149 	desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
150 	for (i = 1; i < sw_desc->dblk_num; i++) {
151 		block = &sw_desc->desc_blocks[i - 1];
152 		desc = xdma_blk_last_desc(block);
153 
154 		if (!(i & XDMA_DESC_BLOCK_MASK)) {
155 			desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
156 			continue;
157 		}
158 		desc->control = cpu_to_le32(desc_control);
159 		desc->next_desc = cpu_to_le64(block[1].dma_addr);
160 	}
161 
162 	/* update the last block */
163 	last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
164 	if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
165 		block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
166 		desc = xdma_blk_last_desc(block);
167 		desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
168 		desc->control = cpu_to_le32(desc_control);
169 	}
170 
171 	block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
172 	desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
173 	desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
174 }
175 
176 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
177 {
178 	return container_of(chan, struct xdma_chan, vchan.chan);
179 }
180 
181 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
182 {
183 	return container_of(vdesc, struct xdma_desc, vdesc);
184 }
185 
186 /**
187  * xdma_channel_init - Initialize DMA channel registers
188  * @chan: DMA channel pointer
189  */
190 static int xdma_channel_init(struct xdma_chan *chan)
191 {
192 	struct xdma_device *xdev = chan->xdev_hdl;
193 	int ret;
194 
195 	ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
196 			   CHAN_CTRL_NON_INCR_ADDR);
197 	if (ret)
198 		return ret;
199 
200 	ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
201 			   CHAN_IM_ALL);
202 	if (ret)
203 		return ret;
204 
205 	return 0;
206 }
207 
208 /**
209  * xdma_free_desc - Free descriptor
210  * @vdesc: Virtual DMA descriptor
211  */
212 static void xdma_free_desc(struct virt_dma_desc *vdesc)
213 {
214 	struct xdma_desc *sw_desc;
215 	int i;
216 
217 	sw_desc = to_xdma_desc(vdesc);
218 	for (i = 0; i < sw_desc->dblk_num; i++) {
219 		if (!sw_desc->desc_blocks[i].virt_addr)
220 			break;
221 		dma_pool_free(sw_desc->chan->desc_pool,
222 			      sw_desc->desc_blocks[i].virt_addr,
223 			      sw_desc->desc_blocks[i].dma_addr);
224 	}
225 	kfree(sw_desc->desc_blocks);
226 	kfree(sw_desc);
227 }
228 
229 /**
230  * xdma_alloc_desc - Allocate descriptor
231  * @chan: DMA channel pointer
232  * @desc_num: Number of hardware descriptors
233  */
234 static struct xdma_desc *
235 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
236 {
237 	struct xdma_desc *sw_desc;
238 	struct xdma_hw_desc *desc;
239 	dma_addr_t dma_addr;
240 	u32 dblk_num;
241 	void *addr;
242 	int i, j;
243 
244 	sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
245 	if (!sw_desc)
246 		return NULL;
247 
248 	sw_desc->chan = chan;
249 	sw_desc->desc_num = desc_num;
250 	dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
251 	sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
252 				       GFP_NOWAIT);
253 	if (!sw_desc->desc_blocks)
254 		goto failed;
255 
256 	sw_desc->dblk_num = dblk_num;
257 	for (i = 0; i < sw_desc->dblk_num; i++) {
258 		addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
259 		if (!addr)
260 			goto failed;
261 
262 		sw_desc->desc_blocks[i].virt_addr = addr;
263 		sw_desc->desc_blocks[i].dma_addr = dma_addr;
264 		for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
265 			desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
266 	}
267 
268 	xdma_link_desc_blocks(sw_desc);
269 
270 	return sw_desc;
271 
272 failed:
273 	xdma_free_desc(&sw_desc->vdesc);
274 	return NULL;
275 }
276 
277 /**
278  * xdma_xfer_start - Start DMA transfer
279  * @xdma_chan: DMA channel pointer
280  */
281 static int xdma_xfer_start(struct xdma_chan *xchan)
282 {
283 	struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
284 	struct xdma_device *xdev = xchan->xdev_hdl;
285 	struct xdma_desc_block *block;
286 	u32 val, completed_blocks;
287 	struct xdma_desc *desc;
288 	int ret;
289 
290 	/*
291 	 * check if there is not any submitted descriptor or channel is busy.
292 	 * vchan lock should be held where this function is called.
293 	 */
294 	if (!vd || xchan->busy)
295 		return -EINVAL;
296 
297 	/* clear run stop bit to get ready for transfer */
298 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
299 			   CHAN_CTRL_RUN_STOP);
300 	if (ret)
301 		return ret;
302 
303 	desc = to_xdma_desc(vd);
304 	if (desc->dir != xchan->dir) {
305 		xdma_err(xdev, "incorrect request direction");
306 		return -EINVAL;
307 	}
308 
309 	/* set DMA engine to the first descriptor block */
310 	completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
311 	block = &desc->desc_blocks[completed_blocks];
312 	val = lower_32_bits(block->dma_addr);
313 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
314 	if (ret)
315 		return ret;
316 
317 	val = upper_32_bits(block->dma_addr);
318 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
319 	if (ret)
320 		return ret;
321 
322 	if (completed_blocks + 1 == desc->dblk_num)
323 		val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
324 	else
325 		val = XDMA_DESC_ADJACENT - 1;
326 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
327 	if (ret)
328 		return ret;
329 
330 	/* kick off DMA transfer */
331 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
332 			   CHAN_CTRL_START);
333 	if (ret)
334 		return ret;
335 
336 	xchan->busy = true;
337 	return 0;
338 }
339 
340 /**
341  * xdma_alloc_channels - Detect and allocate DMA channels
342  * @xdev: DMA device pointer
343  * @dir: Channel direction
344  */
345 static int xdma_alloc_channels(struct xdma_device *xdev,
346 			       enum dma_transfer_direction dir)
347 {
348 	struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
349 	struct xdma_chan **chans, *xchan;
350 	u32 base, identifier, target;
351 	u32 *chan_num;
352 	int i, j, ret;
353 
354 	if (dir == DMA_MEM_TO_DEV) {
355 		base = XDMA_CHAN_H2C_OFFSET;
356 		target = XDMA_CHAN_H2C_TARGET;
357 		chans = &xdev->h2c_chans;
358 		chan_num = &xdev->h2c_chan_num;
359 	} else if (dir == DMA_DEV_TO_MEM) {
360 		base = XDMA_CHAN_C2H_OFFSET;
361 		target = XDMA_CHAN_C2H_TARGET;
362 		chans = &xdev->c2h_chans;
363 		chan_num = &xdev->c2h_chan_num;
364 	} else {
365 		xdma_err(xdev, "invalid direction specified");
366 		return -EINVAL;
367 	}
368 
369 	/* detect number of available DMA channels */
370 	for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
371 		ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
372 				  &identifier);
373 		if (ret)
374 			return ret;
375 
376 		/* check if it is available DMA channel */
377 		if (XDMA_CHAN_CHECK_TARGET(identifier, target))
378 			(*chan_num)++;
379 	}
380 
381 	if (!*chan_num) {
382 		xdma_err(xdev, "does not probe any channel");
383 		return -EINVAL;
384 	}
385 
386 	*chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
387 			      GFP_KERNEL);
388 	if (!*chans)
389 		return -ENOMEM;
390 
391 	for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
392 		ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
393 				  &identifier);
394 		if (ret)
395 			return ret;
396 
397 		if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
398 			continue;
399 
400 		if (j == *chan_num) {
401 			xdma_err(xdev, "invalid channel number");
402 			return -EIO;
403 		}
404 
405 		/* init channel structure and hardware */
406 		xchan = &(*chans)[j];
407 		xchan->xdev_hdl = xdev;
408 		xchan->base = base + i * XDMA_CHAN_STRIDE;
409 		xchan->dir = dir;
410 
411 		ret = xdma_channel_init(xchan);
412 		if (ret)
413 			return ret;
414 		xchan->vchan.desc_free = xdma_free_desc;
415 		vchan_init(&xchan->vchan, &xdev->dma_dev);
416 
417 		j++;
418 	}
419 
420 	dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
421 		 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
422 
423 	return 0;
424 }
425 
426 /**
427  * xdma_issue_pending - Issue pending transactions
428  * @chan: DMA channel pointer
429  */
430 static void xdma_issue_pending(struct dma_chan *chan)
431 {
432 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
433 	unsigned long flags;
434 
435 	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
436 	if (vchan_issue_pending(&xdma_chan->vchan))
437 		xdma_xfer_start(xdma_chan);
438 	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
439 }
440 
441 /**
442  * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
443  * @chan: DMA channel pointer
444  * @sgl: Transfer scatter gather list
445  * @sg_len: Length of scatter gather list
446  * @dir: Transfer direction
447  * @flags: transfer ack flags
448  * @context: APP words of the descriptor
449  */
450 static struct dma_async_tx_descriptor *
451 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
452 		    unsigned int sg_len, enum dma_transfer_direction dir,
453 		    unsigned long flags, void *context)
454 {
455 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
456 	struct dma_async_tx_descriptor *tx_desc;
457 	u32 desc_num = 0, i, len, rest;
458 	struct xdma_desc_block *dblk;
459 	struct xdma_hw_desc *desc;
460 	struct xdma_desc *sw_desc;
461 	u64 dev_addr, *src, *dst;
462 	struct scatterlist *sg;
463 	u64 addr;
464 
465 	for_each_sg(sgl, sg, sg_len, i)
466 		desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
467 
468 	sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
469 	if (!sw_desc)
470 		return NULL;
471 	sw_desc->dir = dir;
472 
473 	if (dir == DMA_MEM_TO_DEV) {
474 		dev_addr = xdma_chan->cfg.dst_addr;
475 		src = &addr;
476 		dst = &dev_addr;
477 	} else {
478 		dev_addr = xdma_chan->cfg.src_addr;
479 		src = &dev_addr;
480 		dst = &addr;
481 	}
482 
483 	dblk = sw_desc->desc_blocks;
484 	desc = dblk->virt_addr;
485 	desc_num = 1;
486 	for_each_sg(sgl, sg, sg_len, i) {
487 		addr = sg_dma_address(sg);
488 		rest = sg_dma_len(sg);
489 
490 		do {
491 			len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
492 			/* set hardware descriptor */
493 			desc->bytes = cpu_to_le32(len);
494 			desc->src_addr = cpu_to_le64(*src);
495 			desc->dst_addr = cpu_to_le64(*dst);
496 
497 			if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
498 				dblk++;
499 				desc = dblk->virt_addr;
500 			} else {
501 				desc++;
502 			}
503 
504 			desc_num++;
505 			dev_addr += len;
506 			addr += len;
507 			rest -= len;
508 		} while (rest);
509 	}
510 
511 	tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
512 	if (!tx_desc)
513 		goto failed;
514 
515 	return tx_desc;
516 
517 failed:
518 	xdma_free_desc(&sw_desc->vdesc);
519 
520 	return NULL;
521 }
522 
523 /**
524  * xdma_device_config - Configure the DMA channel
525  * @chan: DMA channel
526  * @cfg: channel configuration
527  */
528 static int xdma_device_config(struct dma_chan *chan,
529 			      struct dma_slave_config *cfg)
530 {
531 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
532 
533 	memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
534 
535 	return 0;
536 }
537 
538 /**
539  * xdma_free_chan_resources - Free channel resources
540  * @chan: DMA channel
541  */
542 static void xdma_free_chan_resources(struct dma_chan *chan)
543 {
544 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
545 
546 	vchan_free_chan_resources(&xdma_chan->vchan);
547 	dma_pool_destroy(xdma_chan->desc_pool);
548 	xdma_chan->desc_pool = NULL;
549 }
550 
551 /**
552  * xdma_alloc_chan_resources - Allocate channel resources
553  * @chan: DMA channel
554  */
555 static int xdma_alloc_chan_resources(struct dma_chan *chan)
556 {
557 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
558 	struct xdma_device *xdev = xdma_chan->xdev_hdl;
559 	struct device *dev = xdev->dma_dev.dev;
560 
561 	while (dev && !dev_is_pci(dev))
562 		dev = dev->parent;
563 	if (!dev) {
564 		xdma_err(xdev, "unable to find pci device");
565 		return -EINVAL;
566 	}
567 
568 	xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
569 					       dev, XDMA_DESC_BLOCK_SIZE,
570 					       XDMA_DESC_BLOCK_ALIGN, 0);
571 	if (!xdma_chan->desc_pool) {
572 		xdma_err(xdev, "unable to allocate descriptor pool");
573 		return -ENOMEM;
574 	}
575 
576 	return 0;
577 }
578 
579 /**
580  * xdma_channel_isr - XDMA channel interrupt handler
581  * @irq: IRQ number
582  * @dev_id: Pointer to the DMA channel structure
583  */
584 static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
585 {
586 	struct xdma_chan *xchan = dev_id;
587 	u32 complete_desc_num = 0;
588 	struct xdma_device *xdev;
589 	struct virt_dma_desc *vd;
590 	struct xdma_desc *desc;
591 	int ret;
592 
593 	spin_lock(&xchan->vchan.lock);
594 
595 	/* get submitted request */
596 	vd = vchan_next_desc(&xchan->vchan);
597 	if (!vd)
598 		goto out;
599 
600 	xchan->busy = false;
601 	desc = to_xdma_desc(vd);
602 	xdev = xchan->xdev_hdl;
603 
604 	ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
605 			  &complete_desc_num);
606 	if (ret)
607 		goto out;
608 
609 	desc->completed_desc_num += complete_desc_num;
610 	/*
611 	 * if all data blocks are transferred, remove and complete the request
612 	 */
613 	if (desc->completed_desc_num == desc->desc_num) {
614 		list_del(&vd->node);
615 		vchan_cookie_complete(vd);
616 		goto out;
617 	}
618 
619 	if (desc->completed_desc_num > desc->desc_num ||
620 	    complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
621 		goto out;
622 
623 	/* transfer the rest of data */
624 	xdma_xfer_start(xchan);
625 
626 out:
627 	spin_unlock(&xchan->vchan.lock);
628 	return IRQ_HANDLED;
629 }
630 
631 /**
632  * xdma_irq_fini - Uninitialize IRQ
633  * @xdev: DMA device pointer
634  */
635 static void xdma_irq_fini(struct xdma_device *xdev)
636 {
637 	int i;
638 
639 	/* disable interrupt */
640 	regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
641 
642 	/* free irq handler */
643 	for (i = 0; i < xdev->h2c_chan_num; i++)
644 		free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
645 
646 	for (i = 0; i < xdev->c2h_chan_num; i++)
647 		free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
648 }
649 
650 /**
651  * xdma_set_vector_reg - configure hardware IRQ registers
652  * @xdev: DMA device pointer
653  * @vec_tbl_start: Start of IRQ registers
654  * @irq_start: Start of IRQ
655  * @irq_num: Number of IRQ
656  */
657 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
658 			       u32 irq_start, u32 irq_num)
659 {
660 	u32 shift, i, val = 0;
661 	int ret;
662 
663 	/* Each IRQ register is 32 bit and contains 4 IRQs */
664 	while (irq_num > 0) {
665 		for (i = 0; i < 4; i++) {
666 			shift = XDMA_IRQ_VEC_SHIFT * i;
667 			val |= irq_start << shift;
668 			irq_start++;
669 			irq_num--;
670 		}
671 
672 		/* write IRQ register */
673 		ret = regmap_write(xdev->rmap, vec_tbl_start, val);
674 		if (ret)
675 			return ret;
676 		vec_tbl_start += sizeof(u32);
677 		val = 0;
678 	}
679 
680 	return 0;
681 }
682 
683 /**
684  * xdma_irq_init - initialize IRQs
685  * @xdev: DMA device pointer
686  */
687 static int xdma_irq_init(struct xdma_device *xdev)
688 {
689 	u32 irq = xdev->irq_start;
690 	int i, j, ret;
691 
692 	/* return failure if there are not enough IRQs */
693 	if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
694 		xdma_err(xdev, "not enough irq");
695 		return -EINVAL;
696 	}
697 
698 	/* setup H2C interrupt handler */
699 	for (i = 0; i < xdev->h2c_chan_num; i++) {
700 		ret = request_irq(irq, xdma_channel_isr, 0,
701 				  "xdma-h2c-channel", &xdev->h2c_chans[i]);
702 		if (ret) {
703 			xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
704 				 i, irq, ret);
705 			goto failed_init_h2c;
706 		}
707 		xdev->h2c_chans[i].irq = irq;
708 		irq++;
709 	}
710 
711 	/* setup C2H interrupt handler */
712 	for (j = 0; j < xdev->c2h_chan_num; j++) {
713 		ret = request_irq(irq, xdma_channel_isr, 0,
714 				  "xdma-c2h-channel", &xdev->c2h_chans[j]);
715 		if (ret) {
716 			xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
717 				 j, irq, ret);
718 			goto failed_init_c2h;
719 		}
720 		xdev->c2h_chans[j].irq = irq;
721 		irq++;
722 	}
723 
724 	/* config hardware IRQ registers */
725 	ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
726 				  XDMA_CHAN_NUM(xdev));
727 	if (ret) {
728 		xdma_err(xdev, "failed to set channel vectors: %d", ret);
729 		goto failed_init_c2h;
730 	}
731 
732 	/* enable interrupt */
733 	ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
734 	if (ret)
735 		goto failed_init_c2h;
736 
737 	return 0;
738 
739 failed_init_c2h:
740 	while (j--)
741 		free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
742 failed_init_h2c:
743 	while (i--)
744 		free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
745 
746 	return ret;
747 }
748 
749 static bool xdma_filter_fn(struct dma_chan *chan, void *param)
750 {
751 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
752 	struct xdma_chan_info *chan_info = param;
753 
754 	return chan_info->dir == xdma_chan->dir;
755 }
756 
757 /**
758  * xdma_remove - Driver remove function
759  * @pdev: Pointer to the platform_device structure
760  */
761 static int xdma_remove(struct platform_device *pdev)
762 {
763 	struct xdma_device *xdev = platform_get_drvdata(pdev);
764 
765 	if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
766 		xdma_irq_fini(xdev);
767 
768 	if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
769 		dma_async_device_unregister(&xdev->dma_dev);
770 
771 	return 0;
772 }
773 
774 /**
775  * xdma_probe - Driver probe function
776  * @pdev: Pointer to the platform_device structure
777  */
778 static int xdma_probe(struct platform_device *pdev)
779 {
780 	struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
781 	struct xdma_device *xdev;
782 	void __iomem *reg_base;
783 	struct resource *res;
784 	int ret = -ENODEV;
785 
786 	if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
787 		dev_err(&pdev->dev, "invalid max dma channels %d",
788 			pdata->max_dma_channels);
789 		return -EINVAL;
790 	}
791 
792 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
793 	if (!xdev)
794 		return -ENOMEM;
795 
796 	platform_set_drvdata(pdev, xdev);
797 	xdev->pdev = pdev;
798 
799 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
800 	if (!res) {
801 		xdma_err(xdev, "failed to get irq resource");
802 		goto failed;
803 	}
804 	xdev->irq_start = res->start;
805 	xdev->irq_num = res->end - res->start + 1;
806 
807 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
808 	if (!res) {
809 		xdma_err(xdev, "failed to get io resource");
810 		goto failed;
811 	}
812 
813 	reg_base = devm_ioremap_resource(&pdev->dev, res);
814 	if (!reg_base) {
815 		xdma_err(xdev, "ioremap failed");
816 		goto failed;
817 	}
818 
819 	xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
820 					   &xdma_regmap_config);
821 	if (!xdev->rmap) {
822 		xdma_err(xdev, "config regmap failed: %d", ret);
823 		goto failed;
824 	}
825 	INIT_LIST_HEAD(&xdev->dma_dev.channels);
826 
827 	ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
828 	if (ret) {
829 		xdma_err(xdev, "config H2C channels failed: %d", ret);
830 		goto failed;
831 	}
832 
833 	ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
834 	if (ret) {
835 		xdma_err(xdev, "config C2H channels failed: %d", ret);
836 		goto failed;
837 	}
838 
839 	dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
840 	dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
841 
842 	xdev->dma_dev.dev = &pdev->dev;
843 	xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
844 	xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
845 	xdev->dma_dev.device_tx_status = dma_cookie_status;
846 	xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
847 	xdev->dma_dev.device_config = xdma_device_config;
848 	xdev->dma_dev.device_issue_pending = xdma_issue_pending;
849 	xdev->dma_dev.filter.map = pdata->device_map;
850 	xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
851 	xdev->dma_dev.filter.fn = xdma_filter_fn;
852 
853 	ret = dma_async_device_register(&xdev->dma_dev);
854 	if (ret) {
855 		xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
856 		goto failed;
857 	}
858 	xdev->status |= XDMA_DEV_STATUS_REG_DMA;
859 
860 	ret = xdma_irq_init(xdev);
861 	if (ret) {
862 		xdma_err(xdev, "failed to init msix: %d", ret);
863 		goto failed;
864 	}
865 	xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
866 
867 	return 0;
868 
869 failed:
870 	xdma_remove(pdev);
871 
872 	return ret;
873 }
874 
875 static const struct platform_device_id xdma_id_table[] = {
876 	{ "xdma", 0},
877 	{ },
878 };
879 
880 static struct platform_driver xdma_driver = {
881 	.driver		= {
882 		.name = "xdma",
883 	},
884 	.id_table	= xdma_id_table,
885 	.probe		= xdma_probe,
886 	.remove		= xdma_remove,
887 };
888 
889 module_platform_driver(xdma_driver);
890 
891 MODULE_DESCRIPTION("AMD XDMA driver");
892 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
893 MODULE_LICENSE("GPL");
894