1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 * Synopsys DesignWare eDMA core driver 5 * 6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/irq.h> 17 #include <linux/dma/edma.h> 18 #include <linux/dma-mapping.h> 19 20 #include "dw-edma-core.h" 21 #include "dw-edma-v0-core.h" 22 #include "../dmaengine.h" 23 #include "../virt-dma.h" 24 25 static inline 26 struct device *dchan2dev(struct dma_chan *dchan) 27 { 28 return &dchan->dev->device; 29 } 30 31 static inline 32 struct device *chan2dev(struct dw_edma_chan *chan) 33 { 34 return &chan->vc.chan.dev->device; 35 } 36 37 static inline 38 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) 39 { 40 return container_of(vd, struct dw_edma_desc, vd); 41 } 42 43 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) 44 { 45 struct dw_edma_burst *burst; 46 47 burst = kzalloc(sizeof(*burst), GFP_NOWAIT); 48 if (unlikely(!burst)) 49 return NULL; 50 51 INIT_LIST_HEAD(&burst->list); 52 if (chunk->burst) { 53 /* Create and add new element into the linked list */ 54 chunk->bursts_alloc++; 55 list_add_tail(&burst->list, &chunk->burst->list); 56 } else { 57 /* List head */ 58 chunk->bursts_alloc = 0; 59 chunk->burst = burst; 60 } 61 62 return burst; 63 } 64 65 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) 66 { 67 struct dw_edma_chan *chan = desc->chan; 68 struct dw_edma *dw = chan->chip->dw; 69 struct dw_edma_chunk *chunk; 70 71 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); 72 if (unlikely(!chunk)) 73 return NULL; 74 75 INIT_LIST_HEAD(&chunk->list); 76 chunk->chan = chan; 77 /* Toggling change bit (CB) in each chunk, this is a mechanism to 78 * inform the eDMA HW block that this is a new linked list ready 79 * to be consumed. 80 * - Odd chunks originate CB equal to 0 81 * - Even chunks originate CB equal to 1 82 */ 83 chunk->cb = !(desc->chunks_alloc % 2); 84 chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; 85 chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; 86 87 if (desc->chunk) { 88 /* Create and add new element into the linked list */ 89 if (!dw_edma_alloc_burst(chunk)) { 90 kfree(chunk); 91 return NULL; 92 } 93 desc->chunks_alloc++; 94 list_add_tail(&chunk->list, &desc->chunk->list); 95 } else { 96 /* List head */ 97 chunk->burst = NULL; 98 desc->chunks_alloc = 0; 99 desc->chunk = chunk; 100 } 101 102 return chunk; 103 } 104 105 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) 106 { 107 struct dw_edma_desc *desc; 108 109 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 110 if (unlikely(!desc)) 111 return NULL; 112 113 desc->chan = chan; 114 if (!dw_edma_alloc_chunk(desc)) { 115 kfree(desc); 116 return NULL; 117 } 118 119 return desc; 120 } 121 122 static void dw_edma_free_burst(struct dw_edma_chunk *chunk) 123 { 124 struct dw_edma_burst *child, *_next; 125 126 /* Remove all the list elements */ 127 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { 128 list_del(&child->list); 129 kfree(child); 130 chunk->bursts_alloc--; 131 } 132 133 /* Remove the list head */ 134 kfree(child); 135 chunk->burst = NULL; 136 } 137 138 static void dw_edma_free_chunk(struct dw_edma_desc *desc) 139 { 140 struct dw_edma_chunk *child, *_next; 141 142 if (!desc->chunk) 143 return; 144 145 /* Remove all the list elements */ 146 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { 147 dw_edma_free_burst(child); 148 list_del(&child->list); 149 kfree(child); 150 desc->chunks_alloc--; 151 } 152 153 /* Remove the list head */ 154 kfree(child); 155 desc->chunk = NULL; 156 } 157 158 static void dw_edma_free_desc(struct dw_edma_desc *desc) 159 { 160 dw_edma_free_chunk(desc); 161 kfree(desc); 162 } 163 164 static void vchan_free_desc(struct virt_dma_desc *vdesc) 165 { 166 dw_edma_free_desc(vd2dw_edma_desc(vdesc)); 167 } 168 169 static void dw_edma_start_transfer(struct dw_edma_chan *chan) 170 { 171 struct dw_edma_chunk *child; 172 struct dw_edma_desc *desc; 173 struct virt_dma_desc *vd; 174 175 vd = vchan_next_desc(&chan->vc); 176 if (!vd) 177 return; 178 179 desc = vd2dw_edma_desc(vd); 180 if (!desc) 181 return; 182 183 child = list_first_entry_or_null(&desc->chunk->list, 184 struct dw_edma_chunk, list); 185 if (!child) 186 return; 187 188 dw_edma_v0_core_start(child, !desc->xfer_sz); 189 desc->xfer_sz += child->ll_region.sz; 190 dw_edma_free_burst(child); 191 list_del(&child->list); 192 kfree(child); 193 desc->chunks_alloc--; 194 } 195 196 static int dw_edma_device_config(struct dma_chan *dchan, 197 struct dma_slave_config *config) 198 { 199 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 200 201 memcpy(&chan->config, config, sizeof(*config)); 202 chan->configured = true; 203 204 return 0; 205 } 206 207 static int dw_edma_device_pause(struct dma_chan *dchan) 208 { 209 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 210 int err = 0; 211 212 if (!chan->configured) 213 err = -EPERM; 214 else if (chan->status != EDMA_ST_BUSY) 215 err = -EPERM; 216 else if (chan->request != EDMA_REQ_NONE) 217 err = -EPERM; 218 else 219 chan->request = EDMA_REQ_PAUSE; 220 221 return err; 222 } 223 224 static int dw_edma_device_resume(struct dma_chan *dchan) 225 { 226 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 227 int err = 0; 228 229 if (!chan->configured) { 230 err = -EPERM; 231 } else if (chan->status != EDMA_ST_PAUSE) { 232 err = -EPERM; 233 } else if (chan->request != EDMA_REQ_NONE) { 234 err = -EPERM; 235 } else { 236 chan->status = EDMA_ST_BUSY; 237 dw_edma_start_transfer(chan); 238 } 239 240 return err; 241 } 242 243 static int dw_edma_device_terminate_all(struct dma_chan *dchan) 244 { 245 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 246 int err = 0; 247 LIST_HEAD(head); 248 249 if (!chan->configured) { 250 /* Do nothing */ 251 } else if (chan->status == EDMA_ST_PAUSE) { 252 chan->status = EDMA_ST_IDLE; 253 chan->configured = false; 254 } else if (chan->status == EDMA_ST_IDLE) { 255 chan->configured = false; 256 } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { 257 /* 258 * The channel is in a false BUSY state, probably didn't 259 * receive or lost an interrupt 260 */ 261 chan->status = EDMA_ST_IDLE; 262 chan->configured = false; 263 } else if (chan->request > EDMA_REQ_PAUSE) { 264 err = -EPERM; 265 } else { 266 chan->request = EDMA_REQ_STOP; 267 } 268 269 return err; 270 } 271 272 static void dw_edma_device_issue_pending(struct dma_chan *dchan) 273 { 274 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 275 unsigned long flags; 276 277 spin_lock_irqsave(&chan->vc.lock, flags); 278 if (chan->configured && chan->request == EDMA_REQ_NONE && 279 chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { 280 chan->status = EDMA_ST_BUSY; 281 dw_edma_start_transfer(chan); 282 } 283 spin_unlock_irqrestore(&chan->vc.lock, flags); 284 } 285 286 static enum dma_status 287 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 288 struct dma_tx_state *txstate) 289 { 290 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 291 struct dw_edma_desc *desc; 292 struct virt_dma_desc *vd; 293 unsigned long flags; 294 enum dma_status ret; 295 u32 residue = 0; 296 297 ret = dma_cookie_status(dchan, cookie, txstate); 298 if (ret == DMA_COMPLETE) 299 return ret; 300 301 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) 302 ret = DMA_PAUSED; 303 304 if (!txstate) 305 goto ret_residue; 306 307 spin_lock_irqsave(&chan->vc.lock, flags); 308 vd = vchan_find_desc(&chan->vc, cookie); 309 if (vd) { 310 desc = vd2dw_edma_desc(vd); 311 if (desc) 312 residue = desc->alloc_sz - desc->xfer_sz; 313 } 314 spin_unlock_irqrestore(&chan->vc.lock, flags); 315 316 ret_residue: 317 dma_set_residue(txstate, residue); 318 319 return ret; 320 } 321 322 static struct dma_async_tx_descriptor * 323 dw_edma_device_transfer(struct dw_edma_transfer *xfer) 324 { 325 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); 326 enum dma_transfer_direction dir = xfer->direction; 327 phys_addr_t src_addr, dst_addr; 328 struct scatterlist *sg = NULL; 329 struct dw_edma_chunk *chunk; 330 struct dw_edma_burst *burst; 331 struct dw_edma_desc *desc; 332 u32 cnt; 333 int i; 334 335 if (!chan->configured) 336 return NULL; 337 338 switch (chan->config.direction) { 339 case DMA_DEV_TO_MEM: /* local dma */ 340 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) 341 break; 342 return NULL; 343 case DMA_MEM_TO_DEV: /* local dma */ 344 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) 345 break; 346 return NULL; 347 default: /* remote dma */ 348 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ) 349 break; 350 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE) 351 break; 352 return NULL; 353 } 354 355 if (xfer->cyclic) { 356 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) 357 return NULL; 358 } else { 359 if (xfer->xfer.sg.len < 1) 360 return NULL; 361 } 362 363 desc = dw_edma_alloc_desc(chan); 364 if (unlikely(!desc)) 365 goto err_alloc; 366 367 chunk = dw_edma_alloc_chunk(desc); 368 if (unlikely(!chunk)) 369 goto err_alloc; 370 371 src_addr = chan->config.src_addr; 372 dst_addr = chan->config.dst_addr; 373 374 if (xfer->cyclic) { 375 cnt = xfer->xfer.cyclic.cnt; 376 } else { 377 cnt = xfer->xfer.sg.len; 378 sg = xfer->xfer.sg.sgl; 379 } 380 381 for (i = 0; i < cnt; i++) { 382 if (!xfer->cyclic && !sg) 383 break; 384 385 if (chunk->bursts_alloc == chan->ll_max) { 386 chunk = dw_edma_alloc_chunk(desc); 387 if (unlikely(!chunk)) 388 goto err_alloc; 389 } 390 391 burst = dw_edma_alloc_burst(chunk); 392 if (unlikely(!burst)) 393 goto err_alloc; 394 395 if (xfer->cyclic) 396 burst->sz = xfer->xfer.cyclic.len; 397 else 398 burst->sz = sg_dma_len(sg); 399 400 chunk->ll_region.sz += burst->sz; 401 desc->alloc_sz += burst->sz; 402 403 if (chan->dir == EDMA_DIR_WRITE) { 404 burst->sar = src_addr; 405 if (xfer->cyclic) { 406 burst->dar = xfer->xfer.cyclic.paddr; 407 } else { 408 burst->dar = dst_addr; 409 /* Unlike the typical assumption by other 410 * drivers/IPs the peripheral memory isn't 411 * a FIFO memory, in this case, it's a 412 * linear memory and that why the source 413 * and destination addresses are increased 414 * by the same portion (data length) 415 */ 416 } 417 } else { 418 burst->dar = dst_addr; 419 if (xfer->cyclic) { 420 burst->sar = xfer->xfer.cyclic.paddr; 421 } else { 422 burst->sar = src_addr; 423 /* Unlike the typical assumption by other 424 * drivers/IPs the peripheral memory isn't 425 * a FIFO memory, in this case, it's a 426 * linear memory and that why the source 427 * and destination addresses are increased 428 * by the same portion (data length) 429 */ 430 } 431 } 432 433 if (!xfer->cyclic) { 434 src_addr += sg_dma_len(sg); 435 dst_addr += sg_dma_len(sg); 436 sg = sg_next(sg); 437 } 438 } 439 440 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); 441 442 err_alloc: 443 if (desc) 444 dw_edma_free_desc(desc); 445 446 return NULL; 447 } 448 449 static struct dma_async_tx_descriptor * 450 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 451 unsigned int len, 452 enum dma_transfer_direction direction, 453 unsigned long flags, void *context) 454 { 455 struct dw_edma_transfer xfer; 456 457 xfer.dchan = dchan; 458 xfer.direction = direction; 459 xfer.xfer.sg.sgl = sgl; 460 xfer.xfer.sg.len = len; 461 xfer.flags = flags; 462 xfer.cyclic = false; 463 464 return dw_edma_device_transfer(&xfer); 465 } 466 467 static struct dma_async_tx_descriptor * 468 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, 469 size_t len, size_t count, 470 enum dma_transfer_direction direction, 471 unsigned long flags) 472 { 473 struct dw_edma_transfer xfer; 474 475 xfer.dchan = dchan; 476 xfer.direction = direction; 477 xfer.xfer.cyclic.paddr = paddr; 478 xfer.xfer.cyclic.len = len; 479 xfer.xfer.cyclic.cnt = count; 480 xfer.flags = flags; 481 xfer.cyclic = true; 482 483 return dw_edma_device_transfer(&xfer); 484 } 485 486 static void dw_edma_done_interrupt(struct dw_edma_chan *chan) 487 { 488 struct dw_edma_desc *desc; 489 struct virt_dma_desc *vd; 490 unsigned long flags; 491 492 dw_edma_v0_core_clear_done_int(chan); 493 494 spin_lock_irqsave(&chan->vc.lock, flags); 495 vd = vchan_next_desc(&chan->vc); 496 if (vd) { 497 switch (chan->request) { 498 case EDMA_REQ_NONE: 499 desc = vd2dw_edma_desc(vd); 500 if (desc->chunks_alloc) { 501 chan->status = EDMA_ST_BUSY; 502 dw_edma_start_transfer(chan); 503 } else { 504 list_del(&vd->node); 505 vchan_cookie_complete(vd); 506 chan->status = EDMA_ST_IDLE; 507 } 508 break; 509 510 case EDMA_REQ_STOP: 511 list_del(&vd->node); 512 vchan_cookie_complete(vd); 513 chan->request = EDMA_REQ_NONE; 514 chan->status = EDMA_ST_IDLE; 515 break; 516 517 case EDMA_REQ_PAUSE: 518 chan->request = EDMA_REQ_NONE; 519 chan->status = EDMA_ST_PAUSE; 520 break; 521 522 default: 523 break; 524 } 525 } 526 spin_unlock_irqrestore(&chan->vc.lock, flags); 527 } 528 529 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) 530 { 531 struct virt_dma_desc *vd; 532 unsigned long flags; 533 534 dw_edma_v0_core_clear_abort_int(chan); 535 536 spin_lock_irqsave(&chan->vc.lock, flags); 537 vd = vchan_next_desc(&chan->vc); 538 if (vd) { 539 list_del(&vd->node); 540 vchan_cookie_complete(vd); 541 } 542 spin_unlock_irqrestore(&chan->vc.lock, flags); 543 chan->request = EDMA_REQ_NONE; 544 chan->status = EDMA_ST_IDLE; 545 } 546 547 static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) 548 { 549 struct dw_edma_irq *dw_irq = data; 550 struct dw_edma *dw = dw_irq->dw; 551 unsigned long total, pos, val; 552 unsigned long off; 553 u32 mask; 554 555 if (write) { 556 total = dw->wr_ch_cnt; 557 off = 0; 558 mask = dw_irq->wr_mask; 559 } else { 560 total = dw->rd_ch_cnt; 561 off = dw->wr_ch_cnt; 562 mask = dw_irq->rd_mask; 563 } 564 565 val = dw_edma_v0_core_status_done_int(dw, write ? 566 EDMA_DIR_WRITE : 567 EDMA_DIR_READ); 568 val &= mask; 569 for_each_set_bit(pos, &val, total) { 570 struct dw_edma_chan *chan = &dw->chan[pos + off]; 571 572 dw_edma_done_interrupt(chan); 573 } 574 575 val = dw_edma_v0_core_status_abort_int(dw, write ? 576 EDMA_DIR_WRITE : 577 EDMA_DIR_READ); 578 val &= mask; 579 for_each_set_bit(pos, &val, total) { 580 struct dw_edma_chan *chan = &dw->chan[pos + off]; 581 582 dw_edma_abort_interrupt(chan); 583 } 584 585 return IRQ_HANDLED; 586 } 587 588 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) 589 { 590 return dw_edma_interrupt(irq, data, true); 591 } 592 593 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) 594 { 595 return dw_edma_interrupt(irq, data, false); 596 } 597 598 static irqreturn_t dw_edma_interrupt_common(int irq, void *data) 599 { 600 dw_edma_interrupt(irq, data, true); 601 dw_edma_interrupt(irq, data, false); 602 603 return IRQ_HANDLED; 604 } 605 606 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) 607 { 608 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 609 610 if (chan->status != EDMA_ST_IDLE) 611 return -EBUSY; 612 613 pm_runtime_get(chan->chip->dev); 614 615 return 0; 616 } 617 618 static void dw_edma_free_chan_resources(struct dma_chan *dchan) 619 { 620 unsigned long timeout = jiffies + msecs_to_jiffies(5000); 621 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 622 int ret; 623 624 while (time_before(jiffies, timeout)) { 625 ret = dw_edma_device_terminate_all(dchan); 626 if (!ret) 627 break; 628 629 if (time_after_eq(jiffies, timeout)) 630 return; 631 632 cpu_relax(); 633 } 634 635 pm_runtime_put(chan->chip->dev); 636 } 637 638 static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, 639 u32 wr_alloc, u32 rd_alloc) 640 { 641 struct dw_edma_region *dt_region; 642 struct device *dev = chip->dev; 643 struct dw_edma *dw = chip->dw; 644 struct dw_edma_chan *chan; 645 size_t ll_chunk, dt_chunk; 646 struct dw_edma_irq *irq; 647 struct dma_device *dma; 648 u32 i, j, cnt, ch_cnt; 649 u32 alloc, off_alloc; 650 int err = 0; 651 u32 pos; 652 653 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; 654 ll_chunk = dw->ll_region.sz; 655 dt_chunk = dw->dt_region.sz; 656 657 /* Calculate linked list chunk for each channel */ 658 ll_chunk /= roundup_pow_of_two(ch_cnt); 659 660 /* Calculate linked list chunk for each channel */ 661 dt_chunk /= roundup_pow_of_two(ch_cnt); 662 663 if (write) { 664 i = 0; 665 cnt = dw->wr_ch_cnt; 666 dma = &dw->wr_edma; 667 alloc = wr_alloc; 668 off_alloc = 0; 669 } else { 670 i = dw->wr_ch_cnt; 671 cnt = dw->rd_ch_cnt; 672 dma = &dw->rd_edma; 673 alloc = rd_alloc; 674 off_alloc = wr_alloc; 675 } 676 677 INIT_LIST_HEAD(&dma->channels); 678 for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { 679 chan = &dw->chan[i]; 680 681 dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); 682 if (!dt_region) 683 return -ENOMEM; 684 685 chan->vc.chan.private = dt_region; 686 687 chan->chip = chip; 688 chan->id = j; 689 chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; 690 chan->configured = false; 691 chan->request = EDMA_REQ_NONE; 692 chan->status = EDMA_ST_IDLE; 693 694 chan->ll_off = (ll_chunk * i); 695 chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; 696 697 chan->dt_off = (dt_chunk * i); 698 699 dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", 700 write ? "write" : "read", j, 701 chan->ll_off, chan->ll_max); 702 703 if (dw->nr_irqs == 1) 704 pos = 0; 705 else 706 pos = off_alloc + (j % alloc); 707 708 irq = &dw->irq[pos]; 709 710 if (write) 711 irq->wr_mask |= BIT(j); 712 else 713 irq->rd_mask |= BIT(j); 714 715 irq->dw = dw; 716 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); 717 718 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", 719 write ? "write" : "read", j, 720 chan->msi.address_hi, chan->msi.address_lo, 721 chan->msi.data); 722 723 chan->vc.desc_free = vchan_free_desc; 724 vchan_init(&chan->vc, dma); 725 726 dt_region->paddr = dw->dt_region.paddr + chan->dt_off; 727 dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; 728 dt_region->sz = dt_chunk; 729 730 dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", 731 write ? "write" : "read", j, chan->dt_off); 732 733 dw_edma_v0_core_device_config(chan); 734 } 735 736 /* Set DMA channel capabilities */ 737 dma_cap_zero(dma->cap_mask); 738 dma_cap_set(DMA_SLAVE, dma->cap_mask); 739 dma_cap_set(DMA_CYCLIC, dma->cap_mask); 740 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 741 dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); 742 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 743 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 744 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 745 dma->chancnt = cnt; 746 747 /* Set DMA channel callbacks */ 748 dma->dev = chip->dev; 749 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; 750 dma->device_free_chan_resources = dw_edma_free_chan_resources; 751 dma->device_config = dw_edma_device_config; 752 dma->device_pause = dw_edma_device_pause; 753 dma->device_resume = dw_edma_device_resume; 754 dma->device_terminate_all = dw_edma_device_terminate_all; 755 dma->device_issue_pending = dw_edma_device_issue_pending; 756 dma->device_tx_status = dw_edma_device_tx_status; 757 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; 758 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; 759 760 dma_set_max_seg_size(dma->dev, U32_MAX); 761 762 /* Register DMA device */ 763 err = dma_async_device_register(dma); 764 765 return err; 766 } 767 768 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) 769 { 770 if (*nr_irqs && *alloc < cnt) { 771 (*alloc)++; 772 (*nr_irqs)--; 773 } 774 } 775 776 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) 777 { 778 while (*mask * alloc < cnt) 779 (*mask)++; 780 } 781 782 static int dw_edma_irq_request(struct dw_edma_chip *chip, 783 u32 *wr_alloc, u32 *rd_alloc) 784 { 785 struct device *dev = chip->dev; 786 struct dw_edma *dw = chip->dw; 787 u32 wr_mask = 1; 788 u32 rd_mask = 1; 789 int i, err = 0; 790 u32 ch_cnt; 791 int irq; 792 793 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; 794 795 if (dw->nr_irqs < 1) 796 return -EINVAL; 797 798 if (dw->nr_irqs == 1) { 799 /* Common IRQ shared among all channels */ 800 irq = dw->ops->irq_vector(dev, 0); 801 err = request_irq(irq, dw_edma_interrupt_common, 802 IRQF_SHARED, dw->name, &dw->irq[0]); 803 if (err) { 804 dw->nr_irqs = 0; 805 return err; 806 } 807 808 if (irq_get_msi_desc(irq)) 809 get_cached_msi_msg(irq, &dw->irq[0].msi); 810 } else { 811 /* Distribute IRQs equally among all channels */ 812 int tmp = dw->nr_irqs; 813 814 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { 815 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); 816 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); 817 } 818 819 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); 820 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); 821 822 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { 823 irq = dw->ops->irq_vector(dev, i); 824 err = request_irq(irq, 825 i < *wr_alloc ? 826 dw_edma_interrupt_write : 827 dw_edma_interrupt_read, 828 IRQF_SHARED, dw->name, 829 &dw->irq[i]); 830 if (err) { 831 dw->nr_irqs = i; 832 return err; 833 } 834 835 if (irq_get_msi_desc(irq)) 836 get_cached_msi_msg(irq, &dw->irq[i].msi); 837 } 838 839 dw->nr_irqs = i; 840 } 841 842 return err; 843 } 844 845 int dw_edma_probe(struct dw_edma_chip *chip) 846 { 847 struct device *dev; 848 struct dw_edma *dw; 849 u32 wr_alloc = 0; 850 u32 rd_alloc = 0; 851 int i, err; 852 853 if (!chip) 854 return -EINVAL; 855 856 dev = chip->dev; 857 if (!dev) 858 return -EINVAL; 859 860 dw = chip->dw; 861 if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector) 862 return -EINVAL; 863 864 raw_spin_lock_init(&dw->lock); 865 866 /* Find out how many write channels are supported by hardware */ 867 dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); 868 if (!dw->wr_ch_cnt) 869 return -EINVAL; 870 871 /* Find out how many read channels are supported by hardware */ 872 dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); 873 if (!dw->rd_ch_cnt) 874 return -EINVAL; 875 876 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", 877 dw->wr_ch_cnt, dw->rd_ch_cnt); 878 879 /* Allocate channels */ 880 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, 881 sizeof(*dw->chan), GFP_KERNEL); 882 if (!dw->chan) 883 return -ENOMEM; 884 885 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); 886 887 /* Disable eDMA, only to establish the ideal initial conditions */ 888 dw_edma_v0_core_off(dw); 889 890 /* Request IRQs */ 891 err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); 892 if (err) 893 return err; 894 895 /* Setup write channels */ 896 err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); 897 if (err) 898 goto err_irq_free; 899 900 /* Setup read channels */ 901 err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); 902 if (err) 903 goto err_irq_free; 904 905 /* Power management */ 906 pm_runtime_enable(dev); 907 908 /* Turn debugfs on */ 909 dw_edma_v0_core_debugfs_on(chip); 910 911 return 0; 912 913 err_irq_free: 914 for (i = (dw->nr_irqs - 1); i >= 0; i--) 915 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); 916 917 dw->nr_irqs = 0; 918 919 return err; 920 } 921 EXPORT_SYMBOL_GPL(dw_edma_probe); 922 923 int dw_edma_remove(struct dw_edma_chip *chip) 924 { 925 struct dw_edma_chan *chan, *_chan; 926 struct device *dev = chip->dev; 927 struct dw_edma *dw = chip->dw; 928 int i; 929 930 /* Disable eDMA */ 931 dw_edma_v0_core_off(dw); 932 933 /* Free irqs */ 934 for (i = (dw->nr_irqs - 1); i >= 0; i--) 935 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); 936 937 /* Power management */ 938 pm_runtime_disable(dev); 939 940 list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, 941 vc.chan.device_node) { 942 list_del(&chan->vc.chan.device_node); 943 tasklet_kill(&chan->vc.task); 944 } 945 946 list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, 947 vc.chan.device_node) { 948 list_del(&chan->vc.chan.device_node); 949 tasklet_kill(&chan->vc.task); 950 } 951 952 /* Deregister eDMA device */ 953 dma_async_device_unregister(&dw->wr_edma); 954 dma_async_device_unregister(&dw->rd_edma); 955 956 /* Turn debugfs off */ 957 dw_edma_v0_core_debugfs_off(); 958 959 return 0; 960 } 961 EXPORT_SYMBOL_GPL(dw_edma_remove); 962 963 MODULE_LICENSE("GPL v2"); 964 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); 965 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); 966