1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DMA driver for Xilinx DMA/Bridge Subsystem
4 *
5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6 * Copyright (C) 2022, Advanced Micro Devices, Inc.
7 */
8
9 /*
10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11 * between Host memory and the DMA subsystem. It does this by operating on
12 * 'descriptors' that contain information about the source, destination and
13 * amount of data to transfer. These direct memory transfers can be both in
14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15 * configured to have a single AXI4 Master interface shared by all channels
16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17 * specified on a per-channel basis in descriptor linked lists, which the DMA
18 * fetches from host memory and processes. Events such as descriptor completion
19 * and errors are signaled using interrupts. The core also provides up to 16
20 * user interrupt wires that generate interrupts to the host.
21 */
22
23 #include <linux/mod_devicetable.h>
24 #include <linux/bitfield.h>
25 #include <linux/dmapool.h>
26 #include <linux/regmap.h>
27 #include <linux/dmaengine.h>
28 #include <linux/dma/amd_xdma.h>
29 #include <linux/platform_device.h>
30 #include <linux/platform_data/amd_xdma.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/pci.h>
33 #include "../virt-dma.h"
34 #include "xdma-regs.h"
35
36 /* mmio regmap config for all XDMA registers */
37 static const struct regmap_config xdma_regmap_config = {
38 .reg_bits = 32,
39 .val_bits = 32,
40 .reg_stride = 4,
41 .max_register = XDMA_REG_SPACE_LEN,
42 };
43
44 /**
45 * struct xdma_desc_block - Descriptor block
46 * @virt_addr: Virtual address of block start
47 * @dma_addr: DMA address of block start
48 */
49 struct xdma_desc_block {
50 void *virt_addr;
51 dma_addr_t dma_addr;
52 };
53
54 /**
55 * struct xdma_chan - Driver specific DMA channel structure
56 * @vchan: Virtual channel
57 * @xdev_hdl: Pointer to DMA device structure
58 * @base: Offset of channel registers
59 * @desc_pool: Descriptor pool
60 * @busy: Busy flag of the channel
61 * @dir: Transferring direction of the channel
62 * @cfg: Transferring config of the channel
63 * @irq: IRQ assigned to the channel
64 */
65 struct xdma_chan {
66 struct virt_dma_chan vchan;
67 void *xdev_hdl;
68 u32 base;
69 struct dma_pool *desc_pool;
70 bool busy;
71 enum dma_transfer_direction dir;
72 struct dma_slave_config cfg;
73 u32 irq;
74 };
75
76 /**
77 * struct xdma_desc - DMA desc structure
78 * @vdesc: Virtual DMA descriptor
79 * @chan: DMA channel pointer
80 * @dir: Transferring direction of the request
81 * @dev_addr: Physical address on DMA device side
82 * @desc_blocks: Hardware descriptor blocks
83 * @dblk_num: Number of hardware descriptor blocks
84 * @desc_num: Number of hardware descriptors
85 * @completed_desc_num: Completed hardware descriptors
86 */
87 struct xdma_desc {
88 struct virt_dma_desc vdesc;
89 struct xdma_chan *chan;
90 enum dma_transfer_direction dir;
91 u64 dev_addr;
92 struct xdma_desc_block *desc_blocks;
93 u32 dblk_num;
94 u32 desc_num;
95 u32 completed_desc_num;
96 };
97
98 #define XDMA_DEV_STATUS_REG_DMA BIT(0)
99 #define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
100
101 /**
102 * struct xdma_device - DMA device structure
103 * @pdev: Platform device pointer
104 * @dma_dev: DMA device structure
105 * @rmap: MMIO regmap for DMA registers
106 * @h2c_chans: Host to Card channels
107 * @c2h_chans: Card to Host channels
108 * @h2c_chan_num: Number of H2C channels
109 * @c2h_chan_num: Number of C2H channels
110 * @irq_start: Start IRQ assigned to device
111 * @irq_num: Number of IRQ assigned to device
112 * @status: Initialization status
113 */
114 struct xdma_device {
115 struct platform_device *pdev;
116 struct dma_device dma_dev;
117 struct regmap *rmap;
118 struct xdma_chan *h2c_chans;
119 struct xdma_chan *c2h_chans;
120 u32 h2c_chan_num;
121 u32 c2h_chan_num;
122 u32 irq_start;
123 u32 irq_num;
124 u32 status;
125 };
126
127 #define xdma_err(xdev, fmt, args...) \
128 dev_err(&(xdev)->pdev->dev, fmt, ##args)
129 #define XDMA_CHAN_NUM(_xd) ({ \
130 typeof(_xd) (xd) = (_xd); \
131 ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
132
133 /* Get the last desc in a desc block */
xdma_blk_last_desc(struct xdma_desc_block * block)134 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
135 {
136 return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
137 }
138
139 /**
140 * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
141 * @sw_desc: Tx descriptor pointer
142 */
xdma_link_desc_blocks(struct xdma_desc * sw_desc)143 static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
144 {
145 struct xdma_desc_block *block;
146 u32 last_blk_desc, desc_control;
147 struct xdma_hw_desc *desc;
148 int i;
149
150 desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
151 for (i = 1; i < sw_desc->dblk_num; i++) {
152 block = &sw_desc->desc_blocks[i - 1];
153 desc = xdma_blk_last_desc(block);
154
155 if (!(i & XDMA_DESC_BLOCK_MASK)) {
156 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
157 continue;
158 }
159 desc->control = cpu_to_le32(desc_control);
160 desc->next_desc = cpu_to_le64(block[1].dma_addr);
161 }
162
163 /* update the last block */
164 last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
165 if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
166 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
167 desc = xdma_blk_last_desc(block);
168 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
169 desc->control = cpu_to_le32(desc_control);
170 }
171
172 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
173 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
174 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
175 }
176
to_xdma_chan(struct dma_chan * chan)177 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
178 {
179 return container_of(chan, struct xdma_chan, vchan.chan);
180 }
181
to_xdma_desc(struct virt_dma_desc * vdesc)182 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
183 {
184 return container_of(vdesc, struct xdma_desc, vdesc);
185 }
186
187 /**
188 * xdma_channel_init - Initialize DMA channel registers
189 * @chan: DMA channel pointer
190 */
xdma_channel_init(struct xdma_chan * chan)191 static int xdma_channel_init(struct xdma_chan *chan)
192 {
193 struct xdma_device *xdev = chan->xdev_hdl;
194 int ret;
195
196 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
197 CHAN_CTRL_NON_INCR_ADDR);
198 if (ret)
199 return ret;
200
201 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
202 CHAN_IM_ALL);
203 if (ret)
204 return ret;
205
206 return 0;
207 }
208
209 /**
210 * xdma_free_desc - Free descriptor
211 * @vdesc: Virtual DMA descriptor
212 */
xdma_free_desc(struct virt_dma_desc * vdesc)213 static void xdma_free_desc(struct virt_dma_desc *vdesc)
214 {
215 struct xdma_desc *sw_desc;
216 int i;
217
218 sw_desc = to_xdma_desc(vdesc);
219 for (i = 0; i < sw_desc->dblk_num; i++) {
220 if (!sw_desc->desc_blocks[i].virt_addr)
221 break;
222 dma_pool_free(sw_desc->chan->desc_pool,
223 sw_desc->desc_blocks[i].virt_addr,
224 sw_desc->desc_blocks[i].dma_addr);
225 }
226 kfree(sw_desc->desc_blocks);
227 kfree(sw_desc);
228 }
229
230 /**
231 * xdma_alloc_desc - Allocate descriptor
232 * @chan: DMA channel pointer
233 * @desc_num: Number of hardware descriptors
234 */
235 static struct xdma_desc *
xdma_alloc_desc(struct xdma_chan * chan,u32 desc_num)236 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
237 {
238 struct xdma_desc *sw_desc;
239 struct xdma_hw_desc *desc;
240 dma_addr_t dma_addr;
241 u32 dblk_num;
242 void *addr;
243 int i, j;
244
245 sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
246 if (!sw_desc)
247 return NULL;
248
249 sw_desc->chan = chan;
250 sw_desc->desc_num = desc_num;
251 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
252 sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
253 GFP_NOWAIT);
254 if (!sw_desc->desc_blocks)
255 goto failed;
256
257 sw_desc->dblk_num = dblk_num;
258 for (i = 0; i < sw_desc->dblk_num; i++) {
259 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
260 if (!addr)
261 goto failed;
262
263 sw_desc->desc_blocks[i].virt_addr = addr;
264 sw_desc->desc_blocks[i].dma_addr = dma_addr;
265 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
266 desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
267 }
268
269 xdma_link_desc_blocks(sw_desc);
270
271 return sw_desc;
272
273 failed:
274 xdma_free_desc(&sw_desc->vdesc);
275 return NULL;
276 }
277
278 /**
279 * xdma_xfer_start - Start DMA transfer
280 * @xchan: DMA channel pointer
281 */
xdma_xfer_start(struct xdma_chan * xchan)282 static int xdma_xfer_start(struct xdma_chan *xchan)
283 {
284 struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
285 struct xdma_device *xdev = xchan->xdev_hdl;
286 struct xdma_desc_block *block;
287 u32 val, completed_blocks;
288 struct xdma_desc *desc;
289 int ret;
290
291 /*
292 * check if there is not any submitted descriptor or channel is busy.
293 * vchan lock should be held where this function is called.
294 */
295 if (!vd || xchan->busy)
296 return -EINVAL;
297
298 /* clear run stop bit to get ready for transfer */
299 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
300 CHAN_CTRL_RUN_STOP);
301 if (ret)
302 return ret;
303
304 desc = to_xdma_desc(vd);
305 if (desc->dir != xchan->dir) {
306 xdma_err(xdev, "incorrect request direction");
307 return -EINVAL;
308 }
309
310 /* set DMA engine to the first descriptor block */
311 completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
312 block = &desc->desc_blocks[completed_blocks];
313 val = lower_32_bits(block->dma_addr);
314 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
315 if (ret)
316 return ret;
317
318 val = upper_32_bits(block->dma_addr);
319 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
320 if (ret)
321 return ret;
322
323 if (completed_blocks + 1 == desc->dblk_num)
324 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
325 else
326 val = XDMA_DESC_ADJACENT - 1;
327 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
328 if (ret)
329 return ret;
330
331 /* kick off DMA transfer */
332 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
333 CHAN_CTRL_START);
334 if (ret)
335 return ret;
336
337 xchan->busy = true;
338 return 0;
339 }
340
341 /**
342 * xdma_alloc_channels - Detect and allocate DMA channels
343 * @xdev: DMA device pointer
344 * @dir: Channel direction
345 */
xdma_alloc_channels(struct xdma_device * xdev,enum dma_transfer_direction dir)346 static int xdma_alloc_channels(struct xdma_device *xdev,
347 enum dma_transfer_direction dir)
348 {
349 struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
350 struct xdma_chan **chans, *xchan;
351 u32 base, identifier, target;
352 u32 *chan_num;
353 int i, j, ret;
354
355 if (dir == DMA_MEM_TO_DEV) {
356 base = XDMA_CHAN_H2C_OFFSET;
357 target = XDMA_CHAN_H2C_TARGET;
358 chans = &xdev->h2c_chans;
359 chan_num = &xdev->h2c_chan_num;
360 } else if (dir == DMA_DEV_TO_MEM) {
361 base = XDMA_CHAN_C2H_OFFSET;
362 target = XDMA_CHAN_C2H_TARGET;
363 chans = &xdev->c2h_chans;
364 chan_num = &xdev->c2h_chan_num;
365 } else {
366 xdma_err(xdev, "invalid direction specified");
367 return -EINVAL;
368 }
369
370 /* detect number of available DMA channels */
371 for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
372 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
373 &identifier);
374 if (ret)
375 return ret;
376
377 /* check if it is available DMA channel */
378 if (XDMA_CHAN_CHECK_TARGET(identifier, target))
379 (*chan_num)++;
380 }
381
382 if (!*chan_num) {
383 xdma_err(xdev, "does not probe any channel");
384 return -EINVAL;
385 }
386
387 *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
388 GFP_KERNEL);
389 if (!*chans)
390 return -ENOMEM;
391
392 for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
393 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
394 &identifier);
395 if (ret)
396 return ret;
397
398 if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
399 continue;
400
401 if (j == *chan_num) {
402 xdma_err(xdev, "invalid channel number");
403 return -EIO;
404 }
405
406 /* init channel structure and hardware */
407 xchan = &(*chans)[j];
408 xchan->xdev_hdl = xdev;
409 xchan->base = base + i * XDMA_CHAN_STRIDE;
410 xchan->dir = dir;
411
412 ret = xdma_channel_init(xchan);
413 if (ret)
414 return ret;
415 xchan->vchan.desc_free = xdma_free_desc;
416 vchan_init(&xchan->vchan, &xdev->dma_dev);
417
418 j++;
419 }
420
421 dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
422 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
423
424 return 0;
425 }
426
427 /**
428 * xdma_issue_pending - Issue pending transactions
429 * @chan: DMA channel pointer
430 */
xdma_issue_pending(struct dma_chan * chan)431 static void xdma_issue_pending(struct dma_chan *chan)
432 {
433 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
434 unsigned long flags;
435
436 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
437 if (vchan_issue_pending(&xdma_chan->vchan))
438 xdma_xfer_start(xdma_chan);
439 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
440 }
441
442 /**
443 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
444 * @chan: DMA channel pointer
445 * @sgl: Transfer scatter gather list
446 * @sg_len: Length of scatter gather list
447 * @dir: Transfer direction
448 * @flags: transfer ack flags
449 * @context: APP words of the descriptor
450 */
451 static struct dma_async_tx_descriptor *
xdma_prep_device_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)452 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
453 unsigned int sg_len, enum dma_transfer_direction dir,
454 unsigned long flags, void *context)
455 {
456 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
457 struct dma_async_tx_descriptor *tx_desc;
458 u32 desc_num = 0, i, len, rest;
459 struct xdma_desc_block *dblk;
460 struct xdma_hw_desc *desc;
461 struct xdma_desc *sw_desc;
462 u64 dev_addr, *src, *dst;
463 struct scatterlist *sg;
464 u64 addr;
465
466 for_each_sg(sgl, sg, sg_len, i)
467 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
468
469 sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
470 if (!sw_desc)
471 return NULL;
472 sw_desc->dir = dir;
473
474 if (dir == DMA_MEM_TO_DEV) {
475 dev_addr = xdma_chan->cfg.dst_addr;
476 src = &addr;
477 dst = &dev_addr;
478 } else {
479 dev_addr = xdma_chan->cfg.src_addr;
480 src = &dev_addr;
481 dst = &addr;
482 }
483
484 dblk = sw_desc->desc_blocks;
485 desc = dblk->virt_addr;
486 desc_num = 1;
487 for_each_sg(sgl, sg, sg_len, i) {
488 addr = sg_dma_address(sg);
489 rest = sg_dma_len(sg);
490
491 do {
492 len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
493 /* set hardware descriptor */
494 desc->bytes = cpu_to_le32(len);
495 desc->src_addr = cpu_to_le64(*src);
496 desc->dst_addr = cpu_to_le64(*dst);
497
498 if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
499 dblk++;
500 desc = dblk->virt_addr;
501 } else {
502 desc++;
503 }
504
505 desc_num++;
506 dev_addr += len;
507 addr += len;
508 rest -= len;
509 } while (rest);
510 }
511
512 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
513 if (!tx_desc)
514 goto failed;
515
516 return tx_desc;
517
518 failed:
519 xdma_free_desc(&sw_desc->vdesc);
520
521 return NULL;
522 }
523
524 /**
525 * xdma_device_config - Configure the DMA channel
526 * @chan: DMA channel
527 * @cfg: channel configuration
528 */
xdma_device_config(struct dma_chan * chan,struct dma_slave_config * cfg)529 static int xdma_device_config(struct dma_chan *chan,
530 struct dma_slave_config *cfg)
531 {
532 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
533
534 memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
535
536 return 0;
537 }
538
539 /**
540 * xdma_free_chan_resources - Free channel resources
541 * @chan: DMA channel
542 */
xdma_free_chan_resources(struct dma_chan * chan)543 static void xdma_free_chan_resources(struct dma_chan *chan)
544 {
545 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
546
547 vchan_free_chan_resources(&xdma_chan->vchan);
548 dma_pool_destroy(xdma_chan->desc_pool);
549 xdma_chan->desc_pool = NULL;
550 }
551
552 /**
553 * xdma_alloc_chan_resources - Allocate channel resources
554 * @chan: DMA channel
555 */
xdma_alloc_chan_resources(struct dma_chan * chan)556 static int xdma_alloc_chan_resources(struct dma_chan *chan)
557 {
558 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
559 struct xdma_device *xdev = xdma_chan->xdev_hdl;
560 struct device *dev = xdev->dma_dev.dev;
561
562 while (dev && !dev_is_pci(dev))
563 dev = dev->parent;
564 if (!dev) {
565 xdma_err(xdev, "unable to find pci device");
566 return -EINVAL;
567 }
568
569 xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
570 dev, XDMA_DESC_BLOCK_SIZE,
571 XDMA_DESC_BLOCK_ALIGN, 0);
572 if (!xdma_chan->desc_pool) {
573 xdma_err(xdev, "unable to allocate descriptor pool");
574 return -ENOMEM;
575 }
576
577 return 0;
578 }
579
580 /**
581 * xdma_channel_isr - XDMA channel interrupt handler
582 * @irq: IRQ number
583 * @dev_id: Pointer to the DMA channel structure
584 */
xdma_channel_isr(int irq,void * dev_id)585 static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
586 {
587 struct xdma_chan *xchan = dev_id;
588 u32 complete_desc_num = 0;
589 struct xdma_device *xdev;
590 struct virt_dma_desc *vd;
591 struct xdma_desc *desc;
592 int ret;
593
594 spin_lock(&xchan->vchan.lock);
595
596 /* get submitted request */
597 vd = vchan_next_desc(&xchan->vchan);
598 if (!vd)
599 goto out;
600
601 xchan->busy = false;
602 desc = to_xdma_desc(vd);
603 xdev = xchan->xdev_hdl;
604
605 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
606 &complete_desc_num);
607 if (ret)
608 goto out;
609
610 desc->completed_desc_num += complete_desc_num;
611 /*
612 * if all data blocks are transferred, remove and complete the request
613 */
614 if (desc->completed_desc_num == desc->desc_num) {
615 list_del(&vd->node);
616 vchan_cookie_complete(vd);
617 goto out;
618 }
619
620 if (desc->completed_desc_num > desc->desc_num ||
621 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
622 goto out;
623
624 /* transfer the rest of data */
625 xdma_xfer_start(xchan);
626
627 out:
628 spin_unlock(&xchan->vchan.lock);
629 return IRQ_HANDLED;
630 }
631
632 /**
633 * xdma_irq_fini - Uninitialize IRQ
634 * @xdev: DMA device pointer
635 */
xdma_irq_fini(struct xdma_device * xdev)636 static void xdma_irq_fini(struct xdma_device *xdev)
637 {
638 int i;
639
640 /* disable interrupt */
641 regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
642
643 /* free irq handler */
644 for (i = 0; i < xdev->h2c_chan_num; i++)
645 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
646
647 for (i = 0; i < xdev->c2h_chan_num; i++)
648 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
649 }
650
651 /**
652 * xdma_set_vector_reg - configure hardware IRQ registers
653 * @xdev: DMA device pointer
654 * @vec_tbl_start: Start of IRQ registers
655 * @irq_start: Start of IRQ
656 * @irq_num: Number of IRQ
657 */
xdma_set_vector_reg(struct xdma_device * xdev,u32 vec_tbl_start,u32 irq_start,u32 irq_num)658 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
659 u32 irq_start, u32 irq_num)
660 {
661 u32 shift, i, val = 0;
662 int ret;
663
664 /* Each IRQ register is 32 bit and contains 4 IRQs */
665 while (irq_num > 0) {
666 for (i = 0; i < 4; i++) {
667 shift = XDMA_IRQ_VEC_SHIFT * i;
668 val |= irq_start << shift;
669 irq_start++;
670 irq_num--;
671 if (!irq_num)
672 break;
673 }
674
675 /* write IRQ register */
676 ret = regmap_write(xdev->rmap, vec_tbl_start, val);
677 if (ret)
678 return ret;
679 vec_tbl_start += sizeof(u32);
680 val = 0;
681 }
682
683 return 0;
684 }
685
686 /**
687 * xdma_irq_init - initialize IRQs
688 * @xdev: DMA device pointer
689 */
xdma_irq_init(struct xdma_device * xdev)690 static int xdma_irq_init(struct xdma_device *xdev)
691 {
692 u32 irq = xdev->irq_start;
693 u32 user_irq_start;
694 int i, j, ret;
695
696 /* return failure if there are not enough IRQs */
697 if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
698 xdma_err(xdev, "not enough irq");
699 return -EINVAL;
700 }
701
702 /* setup H2C interrupt handler */
703 for (i = 0; i < xdev->h2c_chan_num; i++) {
704 ret = request_irq(irq, xdma_channel_isr, 0,
705 "xdma-h2c-channel", &xdev->h2c_chans[i]);
706 if (ret) {
707 xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
708 i, irq, ret);
709 goto failed_init_h2c;
710 }
711 xdev->h2c_chans[i].irq = irq;
712 irq++;
713 }
714
715 /* setup C2H interrupt handler */
716 for (j = 0; j < xdev->c2h_chan_num; j++) {
717 ret = request_irq(irq, xdma_channel_isr, 0,
718 "xdma-c2h-channel", &xdev->c2h_chans[j]);
719 if (ret) {
720 xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
721 j, irq, ret);
722 goto failed_init_c2h;
723 }
724 xdev->c2h_chans[j].irq = irq;
725 irq++;
726 }
727
728 /* config hardware IRQ registers */
729 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
730 XDMA_CHAN_NUM(xdev));
731 if (ret) {
732 xdma_err(xdev, "failed to set channel vectors: %d", ret);
733 goto failed_init_c2h;
734 }
735
736 /* config user IRQ registers if needed */
737 user_irq_start = XDMA_CHAN_NUM(xdev);
738 if (xdev->irq_num > user_irq_start) {
739 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
740 user_irq_start,
741 xdev->irq_num - user_irq_start);
742 if (ret) {
743 xdma_err(xdev, "failed to set user vectors: %d", ret);
744 goto failed_init_c2h;
745 }
746 }
747
748 /* enable interrupt */
749 ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
750 if (ret)
751 goto failed_init_c2h;
752
753 return 0;
754
755 failed_init_c2h:
756 while (j--)
757 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
758 failed_init_h2c:
759 while (i--)
760 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
761
762 return ret;
763 }
764
xdma_filter_fn(struct dma_chan * chan,void * param)765 static bool xdma_filter_fn(struct dma_chan *chan, void *param)
766 {
767 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
768 struct xdma_chan_info *chan_info = param;
769
770 return chan_info->dir == xdma_chan->dir;
771 }
772
773 /**
774 * xdma_disable_user_irq - Disable user interrupt
775 * @pdev: Pointer to the platform_device structure
776 * @irq_num: System IRQ number
777 */
xdma_disable_user_irq(struct platform_device * pdev,u32 irq_num)778 void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
779 {
780 struct xdma_device *xdev = platform_get_drvdata(pdev);
781 u32 index;
782
783 index = irq_num - xdev->irq_start;
784 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
785 xdma_err(xdev, "invalid user irq number");
786 return;
787 }
788 index -= XDMA_CHAN_NUM(xdev);
789
790 regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
791 }
792 EXPORT_SYMBOL(xdma_disable_user_irq);
793
794 /**
795 * xdma_enable_user_irq - Enable user logic interrupt
796 * @pdev: Pointer to the platform_device structure
797 * @irq_num: System IRQ number
798 */
xdma_enable_user_irq(struct platform_device * pdev,u32 irq_num)799 int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
800 {
801 struct xdma_device *xdev = platform_get_drvdata(pdev);
802 u32 index;
803 int ret;
804
805 index = irq_num - xdev->irq_start;
806 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
807 xdma_err(xdev, "invalid user irq number");
808 return -EINVAL;
809 }
810 index -= XDMA_CHAN_NUM(xdev);
811
812 ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
813 if (ret)
814 return ret;
815
816 return 0;
817 }
818 EXPORT_SYMBOL(xdma_enable_user_irq);
819
820 /**
821 * xdma_get_user_irq - Get system IRQ number
822 * @pdev: Pointer to the platform_device structure
823 * @user_irq_index: User logic IRQ wire index
824 *
825 * Return: The system IRQ number allocated for the given wire index.
826 */
xdma_get_user_irq(struct platform_device * pdev,u32 user_irq_index)827 int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
828 {
829 struct xdma_device *xdev = platform_get_drvdata(pdev);
830
831 if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
832 xdma_err(xdev, "invalid user irq index");
833 return -EINVAL;
834 }
835
836 return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
837 }
838 EXPORT_SYMBOL(xdma_get_user_irq);
839
840 /**
841 * xdma_remove - Driver remove function
842 * @pdev: Pointer to the platform_device structure
843 */
xdma_remove(struct platform_device * pdev)844 static int xdma_remove(struct platform_device *pdev)
845 {
846 struct xdma_device *xdev = platform_get_drvdata(pdev);
847
848 if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
849 xdma_irq_fini(xdev);
850
851 if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
852 dma_async_device_unregister(&xdev->dma_dev);
853
854 return 0;
855 }
856
857 /**
858 * xdma_probe - Driver probe function
859 * @pdev: Pointer to the platform_device structure
860 */
xdma_probe(struct platform_device * pdev)861 static int xdma_probe(struct platform_device *pdev)
862 {
863 struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
864 struct xdma_device *xdev;
865 void __iomem *reg_base;
866 struct resource *res;
867 int ret = -ENODEV;
868
869 if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
870 dev_err(&pdev->dev, "invalid max dma channels %d",
871 pdata->max_dma_channels);
872 return -EINVAL;
873 }
874
875 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
876 if (!xdev)
877 return -ENOMEM;
878
879 platform_set_drvdata(pdev, xdev);
880 xdev->pdev = pdev;
881
882 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
883 if (!res) {
884 xdma_err(xdev, "failed to get irq resource");
885 goto failed;
886 }
887 xdev->irq_start = res->start;
888 xdev->irq_num = res->end - res->start + 1;
889
890 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
891 if (!res) {
892 xdma_err(xdev, "failed to get io resource");
893 goto failed;
894 }
895
896 reg_base = devm_ioremap_resource(&pdev->dev, res);
897 if (IS_ERR(reg_base)) {
898 xdma_err(xdev, "ioremap failed");
899 goto failed;
900 }
901
902 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
903 &xdma_regmap_config);
904 if (!xdev->rmap) {
905 xdma_err(xdev, "config regmap failed: %d", ret);
906 goto failed;
907 }
908 INIT_LIST_HEAD(&xdev->dma_dev.channels);
909
910 ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
911 if (ret) {
912 xdma_err(xdev, "config H2C channels failed: %d", ret);
913 goto failed;
914 }
915
916 ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
917 if (ret) {
918 xdma_err(xdev, "config C2H channels failed: %d", ret);
919 goto failed;
920 }
921
922 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
923 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
924
925 xdev->dma_dev.dev = &pdev->dev;
926 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
927 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
928 xdev->dma_dev.device_tx_status = dma_cookie_status;
929 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
930 xdev->dma_dev.device_config = xdma_device_config;
931 xdev->dma_dev.device_issue_pending = xdma_issue_pending;
932 xdev->dma_dev.filter.map = pdata->device_map;
933 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
934 xdev->dma_dev.filter.fn = xdma_filter_fn;
935
936 ret = dma_async_device_register(&xdev->dma_dev);
937 if (ret) {
938 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
939 goto failed;
940 }
941 xdev->status |= XDMA_DEV_STATUS_REG_DMA;
942
943 ret = xdma_irq_init(xdev);
944 if (ret) {
945 xdma_err(xdev, "failed to init msix: %d", ret);
946 goto failed;
947 }
948 xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
949
950 return 0;
951
952 failed:
953 xdma_remove(pdev);
954
955 return ret;
956 }
957
958 static const struct platform_device_id xdma_id_table[] = {
959 { "xdma", 0},
960 { },
961 };
962
963 static struct platform_driver xdma_driver = {
964 .driver = {
965 .name = "xdma",
966 },
967 .id_table = xdma_id_table,
968 .probe = xdma_probe,
969 .remove = xdma_remove,
970 };
971
972 module_platform_driver(xdma_driver);
973
974 MODULE_DESCRIPTION("AMD XDMA driver");
975 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
976 MODULE_LICENSE("GPL");
977