1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA driver for Xilinx DMA/Bridge Subsystem 4 * 5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. 6 * Copyright (C) 2022, Advanced Micro Devices, Inc. 7 */ 8 9 /* 10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data 11 * between Host memory and the DMA subsystem. It does this by operating on 12 * 'descriptors' that contain information about the source, destination and 13 * amount of data to transfer. These direct memory transfers can be both in 14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be 15 * configured to have a single AXI4 Master interface shared by all channels 16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are 17 * specified on a per-channel basis in descriptor linked lists, which the DMA 18 * fetches from host memory and processes. Events such as descriptor completion 19 * and errors are signaled using interrupts. The core also provides up to 16 20 * user interrupt wires that generate interrupts to the host. 21 */ 22 23 #include <linux/mod_devicetable.h> 24 #include <linux/bitfield.h> 25 #include <linux/dmapool.h> 26 #include <linux/regmap.h> 27 #include <linux/dmaengine.h> 28 #include <linux/dma/amd_xdma.h> 29 #include <linux/platform_device.h> 30 #include <linux/platform_data/amd_xdma.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/pci.h> 33 #include "../virt-dma.h" 34 #include "xdma-regs.h" 35 36 /* mmio regmap config for all XDMA registers */ 37 static const struct regmap_config xdma_regmap_config = { 38 .reg_bits = 32, 39 .val_bits = 32, 40 .reg_stride = 4, 41 .max_register = XDMA_REG_SPACE_LEN, 42 }; 43 44 /** 45 * struct xdma_desc_block - Descriptor block 46 * @virt_addr: Virtual address of block start 47 * @dma_addr: DMA address of block start 48 */ 49 struct xdma_desc_block { 50 void *virt_addr; 51 dma_addr_t dma_addr; 52 }; 53 54 /** 55 * struct xdma_chan - Driver specific DMA channel structure 56 * @vchan: Virtual channel 57 * @xdev_hdl: Pointer to DMA device structure 58 * @base: Offset of channel registers 59 * @desc_pool: Descriptor pool 60 * @busy: Busy flag of the channel 61 * @dir: Transferring direction of the channel 62 * @cfg: Transferring config of the channel 63 * @irq: IRQ assigned to the channel 64 */ 65 struct xdma_chan { 66 struct virt_dma_chan vchan; 67 void *xdev_hdl; 68 u32 base; 69 struct dma_pool *desc_pool; 70 bool busy; 71 enum dma_transfer_direction dir; 72 struct dma_slave_config cfg; 73 u32 irq; 74 }; 75 76 /** 77 * struct xdma_desc - DMA desc structure 78 * @vdesc: Virtual DMA descriptor 79 * @chan: DMA channel pointer 80 * @dir: Transferring direction of the request 81 * @dev_addr: Physical address on DMA device side 82 * @desc_blocks: Hardware descriptor blocks 83 * @dblk_num: Number of hardware descriptor blocks 84 * @desc_num: Number of hardware descriptors 85 * @completed_desc_num: Completed hardware descriptors 86 */ 87 struct xdma_desc { 88 struct virt_dma_desc vdesc; 89 struct xdma_chan *chan; 90 enum dma_transfer_direction dir; 91 u64 dev_addr; 92 struct xdma_desc_block *desc_blocks; 93 u32 dblk_num; 94 u32 desc_num; 95 u32 completed_desc_num; 96 }; 97 98 #define XDMA_DEV_STATUS_REG_DMA BIT(0) 99 #define XDMA_DEV_STATUS_INIT_MSIX BIT(1) 100 101 /** 102 * struct xdma_device - DMA device structure 103 * @pdev: Platform device pointer 104 * @dma_dev: DMA device structure 105 * @rmap: MMIO regmap for DMA registers 106 * @h2c_chans: Host to Card channels 107 * @c2h_chans: Card to Host channels 108 * @h2c_chan_num: Number of H2C channels 109 * @c2h_chan_num: Number of C2H channels 110 * @irq_start: Start IRQ assigned to device 111 * @irq_num: Number of IRQ assigned to device 112 * @status: Initialization status 113 */ 114 struct xdma_device { 115 struct platform_device *pdev; 116 struct dma_device dma_dev; 117 struct regmap *rmap; 118 struct xdma_chan *h2c_chans; 119 struct xdma_chan *c2h_chans; 120 u32 h2c_chan_num; 121 u32 c2h_chan_num; 122 u32 irq_start; 123 u32 irq_num; 124 u32 status; 125 }; 126 127 #define xdma_err(xdev, fmt, args...) \ 128 dev_err(&(xdev)->pdev->dev, fmt, ##args) 129 #define XDMA_CHAN_NUM(_xd) ({ \ 130 typeof(_xd) (xd) = (_xd); \ 131 ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) 132 133 /* Get the last desc in a desc block */ 134 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) 135 { 136 return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; 137 } 138 139 /** 140 * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer 141 * @sw_desc: Tx descriptor pointer 142 */ 143 static void xdma_link_desc_blocks(struct xdma_desc *sw_desc) 144 { 145 struct xdma_desc_block *block; 146 u32 last_blk_desc, desc_control; 147 struct xdma_hw_desc *desc; 148 int i; 149 150 desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); 151 for (i = 1; i < sw_desc->dblk_num; i++) { 152 block = &sw_desc->desc_blocks[i - 1]; 153 desc = xdma_blk_last_desc(block); 154 155 if (!(i & XDMA_DESC_BLOCK_MASK)) { 156 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); 157 continue; 158 } 159 desc->control = cpu_to_le32(desc_control); 160 desc->next_desc = cpu_to_le64(block[1].dma_addr); 161 } 162 163 /* update the last block */ 164 last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; 165 if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { 166 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; 167 desc = xdma_blk_last_desc(block); 168 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); 169 desc->control = cpu_to_le32(desc_control); 170 } 171 172 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; 173 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; 174 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); 175 } 176 177 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) 178 { 179 return container_of(chan, struct xdma_chan, vchan.chan); 180 } 181 182 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) 183 { 184 return container_of(vdesc, struct xdma_desc, vdesc); 185 } 186 187 /** 188 * xdma_channel_init - Initialize DMA channel registers 189 * @chan: DMA channel pointer 190 */ 191 static int xdma_channel_init(struct xdma_chan *chan) 192 { 193 struct xdma_device *xdev = chan->xdev_hdl; 194 int ret; 195 196 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C, 197 CHAN_CTRL_NON_INCR_ADDR); 198 if (ret) 199 return ret; 200 201 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE, 202 CHAN_IM_ALL); 203 if (ret) 204 return ret; 205 206 return 0; 207 } 208 209 /** 210 * xdma_free_desc - Free descriptor 211 * @vdesc: Virtual DMA descriptor 212 */ 213 static void xdma_free_desc(struct virt_dma_desc *vdesc) 214 { 215 struct xdma_desc *sw_desc; 216 int i; 217 218 sw_desc = to_xdma_desc(vdesc); 219 for (i = 0; i < sw_desc->dblk_num; i++) { 220 if (!sw_desc->desc_blocks[i].virt_addr) 221 break; 222 dma_pool_free(sw_desc->chan->desc_pool, 223 sw_desc->desc_blocks[i].virt_addr, 224 sw_desc->desc_blocks[i].dma_addr); 225 } 226 kfree(sw_desc->desc_blocks); 227 kfree(sw_desc); 228 } 229 230 /** 231 * xdma_alloc_desc - Allocate descriptor 232 * @chan: DMA channel pointer 233 * @desc_num: Number of hardware descriptors 234 */ 235 static struct xdma_desc * 236 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num) 237 { 238 struct xdma_desc *sw_desc; 239 struct xdma_hw_desc *desc; 240 dma_addr_t dma_addr; 241 u32 dblk_num; 242 void *addr; 243 int i, j; 244 245 sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT); 246 if (!sw_desc) 247 return NULL; 248 249 sw_desc->chan = chan; 250 sw_desc->desc_num = desc_num; 251 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); 252 sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), 253 GFP_NOWAIT); 254 if (!sw_desc->desc_blocks) 255 goto failed; 256 257 sw_desc->dblk_num = dblk_num; 258 for (i = 0; i < sw_desc->dblk_num; i++) { 259 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr); 260 if (!addr) 261 goto failed; 262 263 sw_desc->desc_blocks[i].virt_addr = addr; 264 sw_desc->desc_blocks[i].dma_addr = dma_addr; 265 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) 266 desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0)); 267 } 268 269 xdma_link_desc_blocks(sw_desc); 270 271 return sw_desc; 272 273 failed: 274 xdma_free_desc(&sw_desc->vdesc); 275 return NULL; 276 } 277 278 /** 279 * xdma_xfer_start - Start DMA transfer 280 * @xchan: DMA channel pointer 281 */ 282 static int xdma_xfer_start(struct xdma_chan *xchan) 283 { 284 struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan); 285 struct xdma_device *xdev = xchan->xdev_hdl; 286 struct xdma_desc_block *block; 287 u32 val, completed_blocks; 288 struct xdma_desc *desc; 289 int ret; 290 291 /* 292 * check if there is not any submitted descriptor or channel is busy. 293 * vchan lock should be held where this function is called. 294 */ 295 if (!vd || xchan->busy) 296 return -EINVAL; 297 298 /* clear run stop bit to get ready for transfer */ 299 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, 300 CHAN_CTRL_RUN_STOP); 301 if (ret) 302 return ret; 303 304 desc = to_xdma_desc(vd); 305 if (desc->dir != xchan->dir) { 306 xdma_err(xdev, "incorrect request direction"); 307 return -EINVAL; 308 } 309 310 /* set DMA engine to the first descriptor block */ 311 completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; 312 block = &desc->desc_blocks[completed_blocks]; 313 val = lower_32_bits(block->dma_addr); 314 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val); 315 if (ret) 316 return ret; 317 318 val = upper_32_bits(block->dma_addr); 319 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val); 320 if (ret) 321 return ret; 322 323 if (completed_blocks + 1 == desc->dblk_num) 324 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; 325 else 326 val = XDMA_DESC_ADJACENT - 1; 327 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val); 328 if (ret) 329 return ret; 330 331 /* kick off DMA transfer */ 332 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL, 333 CHAN_CTRL_START); 334 if (ret) 335 return ret; 336 337 xchan->busy = true; 338 return 0; 339 } 340 341 /** 342 * xdma_alloc_channels - Detect and allocate DMA channels 343 * @xdev: DMA device pointer 344 * @dir: Channel direction 345 */ 346 static int xdma_alloc_channels(struct xdma_device *xdev, 347 enum dma_transfer_direction dir) 348 { 349 struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev); 350 struct xdma_chan **chans, *xchan; 351 u32 base, identifier, target; 352 u32 *chan_num; 353 int i, j, ret; 354 355 if (dir == DMA_MEM_TO_DEV) { 356 base = XDMA_CHAN_H2C_OFFSET; 357 target = XDMA_CHAN_H2C_TARGET; 358 chans = &xdev->h2c_chans; 359 chan_num = &xdev->h2c_chan_num; 360 } else if (dir == DMA_DEV_TO_MEM) { 361 base = XDMA_CHAN_C2H_OFFSET; 362 target = XDMA_CHAN_C2H_TARGET; 363 chans = &xdev->c2h_chans; 364 chan_num = &xdev->c2h_chan_num; 365 } else { 366 xdma_err(xdev, "invalid direction specified"); 367 return -EINVAL; 368 } 369 370 /* detect number of available DMA channels */ 371 for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { 372 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, 373 &identifier); 374 if (ret) 375 return ret; 376 377 /* check if it is available DMA channel */ 378 if (XDMA_CHAN_CHECK_TARGET(identifier, target)) 379 (*chan_num)++; 380 } 381 382 if (!*chan_num) { 383 xdma_err(xdev, "does not probe any channel"); 384 return -EINVAL; 385 } 386 387 *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans), 388 GFP_KERNEL); 389 if (!*chans) 390 return -ENOMEM; 391 392 for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { 393 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, 394 &identifier); 395 if (ret) 396 return ret; 397 398 if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) 399 continue; 400 401 if (j == *chan_num) { 402 xdma_err(xdev, "invalid channel number"); 403 return -EIO; 404 } 405 406 /* init channel structure and hardware */ 407 xchan = &(*chans)[j]; 408 xchan->xdev_hdl = xdev; 409 xchan->base = base + i * XDMA_CHAN_STRIDE; 410 xchan->dir = dir; 411 412 ret = xdma_channel_init(xchan); 413 if (ret) 414 return ret; 415 xchan->vchan.desc_free = xdma_free_desc; 416 vchan_init(&xchan->vchan, &xdev->dma_dev); 417 418 j++; 419 } 420 421 dev_info(&xdev->pdev->dev, "configured %d %s channels", j, 422 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H"); 423 424 return 0; 425 } 426 427 /** 428 * xdma_issue_pending - Issue pending transactions 429 * @chan: DMA channel pointer 430 */ 431 static void xdma_issue_pending(struct dma_chan *chan) 432 { 433 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 434 unsigned long flags; 435 436 spin_lock_irqsave(&xdma_chan->vchan.lock, flags); 437 if (vchan_issue_pending(&xdma_chan->vchan)) 438 xdma_xfer_start(xdma_chan); 439 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); 440 } 441 442 /** 443 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction 444 * @chan: DMA channel pointer 445 * @sgl: Transfer scatter gather list 446 * @sg_len: Length of scatter gather list 447 * @dir: Transfer direction 448 * @flags: transfer ack flags 449 * @context: APP words of the descriptor 450 */ 451 static struct dma_async_tx_descriptor * 452 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, 453 unsigned int sg_len, enum dma_transfer_direction dir, 454 unsigned long flags, void *context) 455 { 456 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 457 struct dma_async_tx_descriptor *tx_desc; 458 u32 desc_num = 0, i, len, rest; 459 struct xdma_desc_block *dblk; 460 struct xdma_hw_desc *desc; 461 struct xdma_desc *sw_desc; 462 u64 dev_addr, *src, *dst; 463 struct scatterlist *sg; 464 u64 addr; 465 466 for_each_sg(sgl, sg, sg_len, i) 467 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); 468 469 sw_desc = xdma_alloc_desc(xdma_chan, desc_num); 470 if (!sw_desc) 471 return NULL; 472 sw_desc->dir = dir; 473 474 if (dir == DMA_MEM_TO_DEV) { 475 dev_addr = xdma_chan->cfg.dst_addr; 476 src = &addr; 477 dst = &dev_addr; 478 } else { 479 dev_addr = xdma_chan->cfg.src_addr; 480 src = &dev_addr; 481 dst = &addr; 482 } 483 484 dblk = sw_desc->desc_blocks; 485 desc = dblk->virt_addr; 486 desc_num = 1; 487 for_each_sg(sgl, sg, sg_len, i) { 488 addr = sg_dma_address(sg); 489 rest = sg_dma_len(sg); 490 491 do { 492 len = min_t(u32, rest, XDMA_DESC_BLEN_MAX); 493 /* set hardware descriptor */ 494 desc->bytes = cpu_to_le32(len); 495 desc->src_addr = cpu_to_le64(*src); 496 desc->dst_addr = cpu_to_le64(*dst); 497 498 if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) { 499 dblk++; 500 desc = dblk->virt_addr; 501 } else { 502 desc++; 503 } 504 505 desc_num++; 506 dev_addr += len; 507 addr += len; 508 rest -= len; 509 } while (rest); 510 } 511 512 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); 513 if (!tx_desc) 514 goto failed; 515 516 return tx_desc; 517 518 failed: 519 xdma_free_desc(&sw_desc->vdesc); 520 521 return NULL; 522 } 523 524 /** 525 * xdma_device_config - Configure the DMA channel 526 * @chan: DMA channel 527 * @cfg: channel configuration 528 */ 529 static int xdma_device_config(struct dma_chan *chan, 530 struct dma_slave_config *cfg) 531 { 532 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 533 534 memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); 535 536 return 0; 537 } 538 539 /** 540 * xdma_free_chan_resources - Free channel resources 541 * @chan: DMA channel 542 */ 543 static void xdma_free_chan_resources(struct dma_chan *chan) 544 { 545 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 546 547 vchan_free_chan_resources(&xdma_chan->vchan); 548 dma_pool_destroy(xdma_chan->desc_pool); 549 xdma_chan->desc_pool = NULL; 550 } 551 552 /** 553 * xdma_alloc_chan_resources - Allocate channel resources 554 * @chan: DMA channel 555 */ 556 static int xdma_alloc_chan_resources(struct dma_chan *chan) 557 { 558 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 559 struct xdma_device *xdev = xdma_chan->xdev_hdl; 560 struct device *dev = xdev->dma_dev.dev; 561 562 while (dev && !dev_is_pci(dev)) 563 dev = dev->parent; 564 if (!dev) { 565 xdma_err(xdev, "unable to find pci device"); 566 return -EINVAL; 567 } 568 569 xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), 570 dev, XDMA_DESC_BLOCK_SIZE, 571 XDMA_DESC_BLOCK_ALIGN, 0); 572 if (!xdma_chan->desc_pool) { 573 xdma_err(xdev, "unable to allocate descriptor pool"); 574 return -ENOMEM; 575 } 576 577 return 0; 578 } 579 580 /** 581 * xdma_channel_isr - XDMA channel interrupt handler 582 * @irq: IRQ number 583 * @dev_id: Pointer to the DMA channel structure 584 */ 585 static irqreturn_t xdma_channel_isr(int irq, void *dev_id) 586 { 587 struct xdma_chan *xchan = dev_id; 588 u32 complete_desc_num = 0; 589 struct xdma_device *xdev; 590 struct virt_dma_desc *vd; 591 struct xdma_desc *desc; 592 int ret; 593 594 spin_lock(&xchan->vchan.lock); 595 596 /* get submitted request */ 597 vd = vchan_next_desc(&xchan->vchan); 598 if (!vd) 599 goto out; 600 601 xchan->busy = false; 602 desc = to_xdma_desc(vd); 603 xdev = xchan->xdev_hdl; 604 605 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC, 606 &complete_desc_num); 607 if (ret) 608 goto out; 609 610 desc->completed_desc_num += complete_desc_num; 611 /* 612 * if all data blocks are transferred, remove and complete the request 613 */ 614 if (desc->completed_desc_num == desc->desc_num) { 615 list_del(&vd->node); 616 vchan_cookie_complete(vd); 617 goto out; 618 } 619 620 if (desc->completed_desc_num > desc->desc_num || 621 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) 622 goto out; 623 624 /* transfer the rest of data */ 625 xdma_xfer_start(xchan); 626 627 out: 628 spin_unlock(&xchan->vchan.lock); 629 return IRQ_HANDLED; 630 } 631 632 /** 633 * xdma_irq_fini - Uninitialize IRQ 634 * @xdev: DMA device pointer 635 */ 636 static void xdma_irq_fini(struct xdma_device *xdev) 637 { 638 int i; 639 640 /* disable interrupt */ 641 regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0); 642 643 /* free irq handler */ 644 for (i = 0; i < xdev->h2c_chan_num; i++) 645 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); 646 647 for (i = 0; i < xdev->c2h_chan_num; i++) 648 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); 649 } 650 651 /** 652 * xdma_set_vector_reg - configure hardware IRQ registers 653 * @xdev: DMA device pointer 654 * @vec_tbl_start: Start of IRQ registers 655 * @irq_start: Start of IRQ 656 * @irq_num: Number of IRQ 657 */ 658 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, 659 u32 irq_start, u32 irq_num) 660 { 661 u32 shift, i, val = 0; 662 int ret; 663 664 /* Each IRQ register is 32 bit and contains 4 IRQs */ 665 while (irq_num > 0) { 666 for (i = 0; i < 4; i++) { 667 shift = XDMA_IRQ_VEC_SHIFT * i; 668 val |= irq_start << shift; 669 irq_start++; 670 irq_num--; 671 } 672 673 /* write IRQ register */ 674 ret = regmap_write(xdev->rmap, vec_tbl_start, val); 675 if (ret) 676 return ret; 677 vec_tbl_start += sizeof(u32); 678 val = 0; 679 } 680 681 return 0; 682 } 683 684 /** 685 * xdma_irq_init - initialize IRQs 686 * @xdev: DMA device pointer 687 */ 688 static int xdma_irq_init(struct xdma_device *xdev) 689 { 690 u32 irq = xdev->irq_start; 691 u32 user_irq_start; 692 int i, j, ret; 693 694 /* return failure if there are not enough IRQs */ 695 if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { 696 xdma_err(xdev, "not enough irq"); 697 return -EINVAL; 698 } 699 700 /* setup H2C interrupt handler */ 701 for (i = 0; i < xdev->h2c_chan_num; i++) { 702 ret = request_irq(irq, xdma_channel_isr, 0, 703 "xdma-h2c-channel", &xdev->h2c_chans[i]); 704 if (ret) { 705 xdma_err(xdev, "H2C channel%d request irq%d failed: %d", 706 i, irq, ret); 707 goto failed_init_h2c; 708 } 709 xdev->h2c_chans[i].irq = irq; 710 irq++; 711 } 712 713 /* setup C2H interrupt handler */ 714 for (j = 0; j < xdev->c2h_chan_num; j++) { 715 ret = request_irq(irq, xdma_channel_isr, 0, 716 "xdma-c2h-channel", &xdev->c2h_chans[j]); 717 if (ret) { 718 xdma_err(xdev, "H2C channel%d request irq%d failed: %d", 719 j, irq, ret); 720 goto failed_init_c2h; 721 } 722 xdev->c2h_chans[j].irq = irq; 723 irq++; 724 } 725 726 /* config hardware IRQ registers */ 727 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0, 728 XDMA_CHAN_NUM(xdev)); 729 if (ret) { 730 xdma_err(xdev, "failed to set channel vectors: %d", ret); 731 goto failed_init_c2h; 732 } 733 734 /* config user IRQ registers if needed */ 735 user_irq_start = XDMA_CHAN_NUM(xdev); 736 if (xdev->irq_num > user_irq_start) { 737 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, 738 user_irq_start, 739 xdev->irq_num - user_irq_start); 740 if (ret) { 741 xdma_err(xdev, "failed to set user vectors: %d", ret); 742 goto failed_init_c2h; 743 } 744 } 745 746 /* enable interrupt */ 747 ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); 748 if (ret) 749 goto failed_init_c2h; 750 751 return 0; 752 753 failed_init_c2h: 754 while (j--) 755 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); 756 failed_init_h2c: 757 while (i--) 758 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); 759 760 return ret; 761 } 762 763 static bool xdma_filter_fn(struct dma_chan *chan, void *param) 764 { 765 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 766 struct xdma_chan_info *chan_info = param; 767 768 return chan_info->dir == xdma_chan->dir; 769 } 770 771 /** 772 * xdma_disable_user_irq - Disable user interrupt 773 * @pdev: Pointer to the platform_device structure 774 * @irq_num: System IRQ number 775 */ 776 void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) 777 { 778 struct xdma_device *xdev = platform_get_drvdata(pdev); 779 u32 index; 780 781 index = irq_num - xdev->irq_start; 782 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { 783 xdma_err(xdev, "invalid user irq number"); 784 return; 785 } 786 index -= XDMA_CHAN_NUM(xdev); 787 788 regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index); 789 } 790 EXPORT_SYMBOL(xdma_disable_user_irq); 791 792 /** 793 * xdma_enable_user_irq - Enable user logic interrupt 794 * @pdev: Pointer to the platform_device structure 795 * @irq_num: System IRQ number 796 */ 797 int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) 798 { 799 struct xdma_device *xdev = platform_get_drvdata(pdev); 800 u32 index; 801 int ret; 802 803 index = irq_num - xdev->irq_start; 804 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { 805 xdma_err(xdev, "invalid user irq number"); 806 return -EINVAL; 807 } 808 index -= XDMA_CHAN_NUM(xdev); 809 810 ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index); 811 if (ret) 812 return ret; 813 814 return 0; 815 } 816 EXPORT_SYMBOL(xdma_enable_user_irq); 817 818 /** 819 * xdma_get_user_irq - Get system IRQ number 820 * @pdev: Pointer to the platform_device structure 821 * @user_irq_index: User logic IRQ wire index 822 * 823 * Return: The system IRQ number allocated for the given wire index. 824 */ 825 int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) 826 { 827 struct xdma_device *xdev = platform_get_drvdata(pdev); 828 829 if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { 830 xdma_err(xdev, "invalid user irq index"); 831 return -EINVAL; 832 } 833 834 return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; 835 } 836 EXPORT_SYMBOL(xdma_get_user_irq); 837 838 /** 839 * xdma_remove - Driver remove function 840 * @pdev: Pointer to the platform_device structure 841 */ 842 static int xdma_remove(struct platform_device *pdev) 843 { 844 struct xdma_device *xdev = platform_get_drvdata(pdev); 845 846 if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) 847 xdma_irq_fini(xdev); 848 849 if (xdev->status & XDMA_DEV_STATUS_REG_DMA) 850 dma_async_device_unregister(&xdev->dma_dev); 851 852 return 0; 853 } 854 855 /** 856 * xdma_probe - Driver probe function 857 * @pdev: Pointer to the platform_device structure 858 */ 859 static int xdma_probe(struct platform_device *pdev) 860 { 861 struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev); 862 struct xdma_device *xdev; 863 void __iomem *reg_base; 864 struct resource *res; 865 int ret = -ENODEV; 866 867 if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { 868 dev_err(&pdev->dev, "invalid max dma channels %d", 869 pdata->max_dma_channels); 870 return -EINVAL; 871 } 872 873 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 874 if (!xdev) 875 return -ENOMEM; 876 877 platform_set_drvdata(pdev, xdev); 878 xdev->pdev = pdev; 879 880 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 881 if (!res) { 882 xdma_err(xdev, "failed to get irq resource"); 883 goto failed; 884 } 885 xdev->irq_start = res->start; 886 xdev->irq_num = res->end - res->start + 1; 887 888 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 889 if (!res) { 890 xdma_err(xdev, "failed to get io resource"); 891 goto failed; 892 } 893 894 reg_base = devm_ioremap_resource(&pdev->dev, res); 895 if (!reg_base) { 896 xdma_err(xdev, "ioremap failed"); 897 goto failed; 898 } 899 900 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, 901 &xdma_regmap_config); 902 if (!xdev->rmap) { 903 xdma_err(xdev, "config regmap failed: %d", ret); 904 goto failed; 905 } 906 INIT_LIST_HEAD(&xdev->dma_dev.channels); 907 908 ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV); 909 if (ret) { 910 xdma_err(xdev, "config H2C channels failed: %d", ret); 911 goto failed; 912 } 913 914 ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM); 915 if (ret) { 916 xdma_err(xdev, "config C2H channels failed: %d", ret); 917 goto failed; 918 } 919 920 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); 921 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); 922 923 xdev->dma_dev.dev = &pdev->dev; 924 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; 925 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; 926 xdev->dma_dev.device_tx_status = dma_cookie_status; 927 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; 928 xdev->dma_dev.device_config = xdma_device_config; 929 xdev->dma_dev.device_issue_pending = xdma_issue_pending; 930 xdev->dma_dev.filter.map = pdata->device_map; 931 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; 932 xdev->dma_dev.filter.fn = xdma_filter_fn; 933 934 ret = dma_async_device_register(&xdev->dma_dev); 935 if (ret) { 936 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret); 937 goto failed; 938 } 939 xdev->status |= XDMA_DEV_STATUS_REG_DMA; 940 941 ret = xdma_irq_init(xdev); 942 if (ret) { 943 xdma_err(xdev, "failed to init msix: %d", ret); 944 goto failed; 945 } 946 xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; 947 948 return 0; 949 950 failed: 951 xdma_remove(pdev); 952 953 return ret; 954 } 955 956 static const struct platform_device_id xdma_id_table[] = { 957 { "xdma", 0}, 958 { }, 959 }; 960 961 static struct platform_driver xdma_driver = { 962 .driver = { 963 .name = "xdma", 964 }, 965 .id_table = xdma_id_table, 966 .probe = xdma_probe, 967 .remove = xdma_remove, 968 }; 969 970 module_platform_driver(xdma_driver); 971 972 MODULE_DESCRIPTION("AMD XDMA driver"); 973 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>"); 974 MODULE_LICENSE("GPL"); 975