1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 * Synopsys DesignWare eDMA core driver 5 * 6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/irq.h> 17 #include <linux/dma/edma.h> 18 #include <linux/dma-mapping.h> 19 20 #include "dw-edma-core.h" 21 #include "dw-edma-v0-core.h" 22 #include "../dmaengine.h" 23 #include "../virt-dma.h" 24 25 static inline 26 struct device *dchan2dev(struct dma_chan *dchan) 27 { 28 return &dchan->dev->device; 29 } 30 31 static inline 32 struct device *chan2dev(struct dw_edma_chan *chan) 33 { 34 return &chan->vc.chan.dev->device; 35 } 36 37 static inline 38 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) 39 { 40 return container_of(vd, struct dw_edma_desc, vd); 41 } 42 43 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) 44 { 45 struct dw_edma_burst *burst; 46 47 burst = kzalloc(sizeof(*burst), GFP_NOWAIT); 48 if (unlikely(!burst)) 49 return NULL; 50 51 INIT_LIST_HEAD(&burst->list); 52 if (chunk->burst) { 53 /* Create and add new element into the linked list */ 54 chunk->bursts_alloc++; 55 list_add_tail(&burst->list, &chunk->burst->list); 56 } else { 57 /* List head */ 58 chunk->bursts_alloc = 0; 59 chunk->burst = burst; 60 } 61 62 return burst; 63 } 64 65 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) 66 { 67 struct dw_edma_chip *chip = desc->chan->dw->chip; 68 struct dw_edma_chan *chan = desc->chan; 69 struct dw_edma_chunk *chunk; 70 71 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); 72 if (unlikely(!chunk)) 73 return NULL; 74 75 INIT_LIST_HEAD(&chunk->list); 76 chunk->chan = chan; 77 /* Toggling change bit (CB) in each chunk, this is a mechanism to 78 * inform the eDMA HW block that this is a new linked list ready 79 * to be consumed. 80 * - Odd chunks originate CB equal to 0 81 * - Even chunks originate CB equal to 1 82 */ 83 chunk->cb = !(desc->chunks_alloc % 2); 84 if (chan->dir == EDMA_DIR_WRITE) { 85 chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr; 86 chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr; 87 } else { 88 chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr; 89 chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr; 90 } 91 92 if (desc->chunk) { 93 /* Create and add new element into the linked list */ 94 if (!dw_edma_alloc_burst(chunk)) { 95 kfree(chunk); 96 return NULL; 97 } 98 desc->chunks_alloc++; 99 list_add_tail(&chunk->list, &desc->chunk->list); 100 } else { 101 /* List head */ 102 chunk->burst = NULL; 103 desc->chunks_alloc = 0; 104 desc->chunk = chunk; 105 } 106 107 return chunk; 108 } 109 110 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) 111 { 112 struct dw_edma_desc *desc; 113 114 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 115 if (unlikely(!desc)) 116 return NULL; 117 118 desc->chan = chan; 119 if (!dw_edma_alloc_chunk(desc)) { 120 kfree(desc); 121 return NULL; 122 } 123 124 return desc; 125 } 126 127 static void dw_edma_free_burst(struct dw_edma_chunk *chunk) 128 { 129 struct dw_edma_burst *child, *_next; 130 131 /* Remove all the list elements */ 132 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { 133 list_del(&child->list); 134 kfree(child); 135 chunk->bursts_alloc--; 136 } 137 138 /* Remove the list head */ 139 kfree(child); 140 chunk->burst = NULL; 141 } 142 143 static void dw_edma_free_chunk(struct dw_edma_desc *desc) 144 { 145 struct dw_edma_chunk *child, *_next; 146 147 if (!desc->chunk) 148 return; 149 150 /* Remove all the list elements */ 151 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { 152 dw_edma_free_burst(child); 153 list_del(&child->list); 154 kfree(child); 155 desc->chunks_alloc--; 156 } 157 158 /* Remove the list head */ 159 kfree(child); 160 desc->chunk = NULL; 161 } 162 163 static void dw_edma_free_desc(struct dw_edma_desc *desc) 164 { 165 dw_edma_free_chunk(desc); 166 kfree(desc); 167 } 168 169 static void vchan_free_desc(struct virt_dma_desc *vdesc) 170 { 171 dw_edma_free_desc(vd2dw_edma_desc(vdesc)); 172 } 173 174 static void dw_edma_start_transfer(struct dw_edma_chan *chan) 175 { 176 struct dw_edma_chunk *child; 177 struct dw_edma_desc *desc; 178 struct virt_dma_desc *vd; 179 180 vd = vchan_next_desc(&chan->vc); 181 if (!vd) 182 return; 183 184 desc = vd2dw_edma_desc(vd); 185 if (!desc) 186 return; 187 188 child = list_first_entry_or_null(&desc->chunk->list, 189 struct dw_edma_chunk, list); 190 if (!child) 191 return; 192 193 dw_edma_v0_core_start(child, !desc->xfer_sz); 194 desc->xfer_sz += child->ll_region.sz; 195 dw_edma_free_burst(child); 196 list_del(&child->list); 197 kfree(child); 198 desc->chunks_alloc--; 199 } 200 201 static int dw_edma_device_config(struct dma_chan *dchan, 202 struct dma_slave_config *config) 203 { 204 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 205 206 memcpy(&chan->config, config, sizeof(*config)); 207 chan->configured = true; 208 209 return 0; 210 } 211 212 static int dw_edma_device_pause(struct dma_chan *dchan) 213 { 214 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 215 int err = 0; 216 217 if (!chan->configured) 218 err = -EPERM; 219 else if (chan->status != EDMA_ST_BUSY) 220 err = -EPERM; 221 else if (chan->request != EDMA_REQ_NONE) 222 err = -EPERM; 223 else 224 chan->request = EDMA_REQ_PAUSE; 225 226 return err; 227 } 228 229 static int dw_edma_device_resume(struct dma_chan *dchan) 230 { 231 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 232 int err = 0; 233 234 if (!chan->configured) { 235 err = -EPERM; 236 } else if (chan->status != EDMA_ST_PAUSE) { 237 err = -EPERM; 238 } else if (chan->request != EDMA_REQ_NONE) { 239 err = -EPERM; 240 } else { 241 chan->status = EDMA_ST_BUSY; 242 dw_edma_start_transfer(chan); 243 } 244 245 return err; 246 } 247 248 static int dw_edma_device_terminate_all(struct dma_chan *dchan) 249 { 250 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 251 int err = 0; 252 253 if (!chan->configured) { 254 /* Do nothing */ 255 } else if (chan->status == EDMA_ST_PAUSE) { 256 chan->status = EDMA_ST_IDLE; 257 chan->configured = false; 258 } else if (chan->status == EDMA_ST_IDLE) { 259 chan->configured = false; 260 } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { 261 /* 262 * The channel is in a false BUSY state, probably didn't 263 * receive or lost an interrupt 264 */ 265 chan->status = EDMA_ST_IDLE; 266 chan->configured = false; 267 } else if (chan->request > EDMA_REQ_PAUSE) { 268 err = -EPERM; 269 } else { 270 chan->request = EDMA_REQ_STOP; 271 } 272 273 return err; 274 } 275 276 static void dw_edma_device_issue_pending(struct dma_chan *dchan) 277 { 278 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 279 unsigned long flags; 280 281 spin_lock_irqsave(&chan->vc.lock, flags); 282 if (chan->configured && chan->request == EDMA_REQ_NONE && 283 chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { 284 chan->status = EDMA_ST_BUSY; 285 dw_edma_start_transfer(chan); 286 } 287 spin_unlock_irqrestore(&chan->vc.lock, flags); 288 } 289 290 static enum dma_status 291 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 292 struct dma_tx_state *txstate) 293 { 294 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 295 struct dw_edma_desc *desc; 296 struct virt_dma_desc *vd; 297 unsigned long flags; 298 enum dma_status ret; 299 u32 residue = 0; 300 301 ret = dma_cookie_status(dchan, cookie, txstate); 302 if (ret == DMA_COMPLETE) 303 return ret; 304 305 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) 306 ret = DMA_PAUSED; 307 308 if (!txstate) 309 goto ret_residue; 310 311 spin_lock_irqsave(&chan->vc.lock, flags); 312 vd = vchan_find_desc(&chan->vc, cookie); 313 if (vd) { 314 desc = vd2dw_edma_desc(vd); 315 if (desc) 316 residue = desc->alloc_sz - desc->xfer_sz; 317 } 318 spin_unlock_irqrestore(&chan->vc.lock, flags); 319 320 ret_residue: 321 dma_set_residue(txstate, residue); 322 323 return ret; 324 } 325 326 static struct dma_async_tx_descriptor * 327 dw_edma_device_transfer(struct dw_edma_transfer *xfer) 328 { 329 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); 330 enum dma_transfer_direction dir = xfer->direction; 331 phys_addr_t src_addr, dst_addr; 332 struct scatterlist *sg = NULL; 333 struct dw_edma_chunk *chunk; 334 struct dw_edma_burst *burst; 335 struct dw_edma_desc *desc; 336 u32 cnt = 0; 337 int i; 338 339 if (!chan->configured) 340 return NULL; 341 342 /* 343 * Local Root Port/End-point Remote End-point 344 * +-----------------------+ PCIe bus +----------------------+ 345 * | | +-+ | | 346 * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM | 347 * | | | | | | 348 * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV | 349 * | | +-+ | | 350 * +-----------------------+ +----------------------+ 351 * 352 * 1. Normal logic: 353 * If eDMA is embedded into the DW PCIe RP/EP and controlled from the 354 * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used 355 * for the device read operations (DEV_TO_MEM) and the Tx channel 356 * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV). 357 * 358 * 2. Inverted logic: 359 * If eDMA is embedded into a Remote PCIe EP and is controlled by the 360 * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx 361 * channel (EDMA_DIR_WRITE) will be used for the device read operations 362 * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write 363 * operations (MEM_TO_DEV). 364 * 365 * It is the client driver responsibility to choose a proper channel 366 * for the DMA transfers. 367 */ 368 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { 369 if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) || 370 (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV)) 371 return NULL; 372 } else { 373 if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) || 374 (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV)) 375 return NULL; 376 } 377 378 if (xfer->type == EDMA_XFER_CYCLIC) { 379 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) 380 return NULL; 381 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { 382 if (xfer->xfer.sg.len < 1) 383 return NULL; 384 } else if (xfer->type == EDMA_XFER_INTERLEAVED) { 385 if (!xfer->xfer.il->numf) 386 return NULL; 387 if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0) 388 return NULL; 389 } else { 390 return NULL; 391 } 392 393 desc = dw_edma_alloc_desc(chan); 394 if (unlikely(!desc)) 395 goto err_alloc; 396 397 chunk = dw_edma_alloc_chunk(desc); 398 if (unlikely(!chunk)) 399 goto err_alloc; 400 401 if (xfer->type == EDMA_XFER_INTERLEAVED) { 402 src_addr = xfer->xfer.il->src_start; 403 dst_addr = xfer->xfer.il->dst_start; 404 } else { 405 src_addr = chan->config.src_addr; 406 dst_addr = chan->config.dst_addr; 407 } 408 409 if (xfer->type == EDMA_XFER_CYCLIC) { 410 cnt = xfer->xfer.cyclic.cnt; 411 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { 412 cnt = xfer->xfer.sg.len; 413 sg = xfer->xfer.sg.sgl; 414 } else if (xfer->type == EDMA_XFER_INTERLEAVED) { 415 if (xfer->xfer.il->numf > 0) 416 cnt = xfer->xfer.il->numf; 417 else 418 cnt = xfer->xfer.il->frame_size; 419 } 420 421 for (i = 0; i < cnt; i++) { 422 if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg) 423 break; 424 425 if (chunk->bursts_alloc == chan->ll_max) { 426 chunk = dw_edma_alloc_chunk(desc); 427 if (unlikely(!chunk)) 428 goto err_alloc; 429 } 430 431 burst = dw_edma_alloc_burst(chunk); 432 if (unlikely(!burst)) 433 goto err_alloc; 434 435 if (xfer->type == EDMA_XFER_CYCLIC) 436 burst->sz = xfer->xfer.cyclic.len; 437 else if (xfer->type == EDMA_XFER_SCATTER_GATHER) 438 burst->sz = sg_dma_len(sg); 439 else if (xfer->type == EDMA_XFER_INTERLEAVED) 440 burst->sz = xfer->xfer.il->sgl[i].size; 441 442 chunk->ll_region.sz += burst->sz; 443 desc->alloc_sz += burst->sz; 444 445 if (dir == DMA_DEV_TO_MEM) { 446 burst->sar = src_addr; 447 if (xfer->type == EDMA_XFER_CYCLIC) { 448 burst->dar = xfer->xfer.cyclic.paddr; 449 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { 450 src_addr += sg_dma_len(sg); 451 burst->dar = sg_dma_address(sg); 452 /* Unlike the typical assumption by other 453 * drivers/IPs the peripheral memory isn't 454 * a FIFO memory, in this case, it's a 455 * linear memory and that why the source 456 * and destination addresses are increased 457 * by the same portion (data length) 458 */ 459 } 460 } else { 461 burst->dar = dst_addr; 462 if (xfer->type == EDMA_XFER_CYCLIC) { 463 burst->sar = xfer->xfer.cyclic.paddr; 464 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { 465 dst_addr += sg_dma_len(sg); 466 burst->sar = sg_dma_address(sg); 467 /* Unlike the typical assumption by other 468 * drivers/IPs the peripheral memory isn't 469 * a FIFO memory, in this case, it's a 470 * linear memory and that why the source 471 * and destination addresses are increased 472 * by the same portion (data length) 473 */ 474 } 475 } 476 477 if (xfer->type == EDMA_XFER_SCATTER_GATHER) { 478 sg = sg_next(sg); 479 } else if (xfer->type == EDMA_XFER_INTERLEAVED && 480 xfer->xfer.il->frame_size > 0) { 481 struct dma_interleaved_template *il = xfer->xfer.il; 482 struct data_chunk *dc = &il->sgl[i]; 483 484 if (il->src_sgl) { 485 src_addr += burst->sz; 486 src_addr += dmaengine_get_src_icg(il, dc); 487 } 488 489 if (il->dst_sgl) { 490 dst_addr += burst->sz; 491 dst_addr += dmaengine_get_dst_icg(il, dc); 492 } 493 } 494 } 495 496 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); 497 498 err_alloc: 499 if (desc) 500 dw_edma_free_desc(desc); 501 502 return NULL; 503 } 504 505 static struct dma_async_tx_descriptor * 506 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 507 unsigned int len, 508 enum dma_transfer_direction direction, 509 unsigned long flags, void *context) 510 { 511 struct dw_edma_transfer xfer; 512 513 xfer.dchan = dchan; 514 xfer.direction = direction; 515 xfer.xfer.sg.sgl = sgl; 516 xfer.xfer.sg.len = len; 517 xfer.flags = flags; 518 xfer.type = EDMA_XFER_SCATTER_GATHER; 519 520 return dw_edma_device_transfer(&xfer); 521 } 522 523 static struct dma_async_tx_descriptor * 524 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, 525 size_t len, size_t count, 526 enum dma_transfer_direction direction, 527 unsigned long flags) 528 { 529 struct dw_edma_transfer xfer; 530 531 xfer.dchan = dchan; 532 xfer.direction = direction; 533 xfer.xfer.cyclic.paddr = paddr; 534 xfer.xfer.cyclic.len = len; 535 xfer.xfer.cyclic.cnt = count; 536 xfer.flags = flags; 537 xfer.type = EDMA_XFER_CYCLIC; 538 539 return dw_edma_device_transfer(&xfer); 540 } 541 542 static struct dma_async_tx_descriptor * 543 dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan, 544 struct dma_interleaved_template *ilt, 545 unsigned long flags) 546 { 547 struct dw_edma_transfer xfer; 548 549 xfer.dchan = dchan; 550 xfer.direction = ilt->dir; 551 xfer.xfer.il = ilt; 552 xfer.flags = flags; 553 xfer.type = EDMA_XFER_INTERLEAVED; 554 555 return dw_edma_device_transfer(&xfer); 556 } 557 558 static void dw_edma_done_interrupt(struct dw_edma_chan *chan) 559 { 560 struct dw_edma_desc *desc; 561 struct virt_dma_desc *vd; 562 unsigned long flags; 563 564 dw_edma_v0_core_clear_done_int(chan); 565 566 spin_lock_irqsave(&chan->vc.lock, flags); 567 vd = vchan_next_desc(&chan->vc); 568 if (vd) { 569 switch (chan->request) { 570 case EDMA_REQ_NONE: 571 desc = vd2dw_edma_desc(vd); 572 if (desc->chunks_alloc) { 573 chan->status = EDMA_ST_BUSY; 574 dw_edma_start_transfer(chan); 575 } else { 576 list_del(&vd->node); 577 vchan_cookie_complete(vd); 578 chan->status = EDMA_ST_IDLE; 579 } 580 break; 581 582 case EDMA_REQ_STOP: 583 list_del(&vd->node); 584 vchan_cookie_complete(vd); 585 chan->request = EDMA_REQ_NONE; 586 chan->status = EDMA_ST_IDLE; 587 break; 588 589 case EDMA_REQ_PAUSE: 590 chan->request = EDMA_REQ_NONE; 591 chan->status = EDMA_ST_PAUSE; 592 break; 593 594 default: 595 break; 596 } 597 } 598 spin_unlock_irqrestore(&chan->vc.lock, flags); 599 } 600 601 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) 602 { 603 struct virt_dma_desc *vd; 604 unsigned long flags; 605 606 dw_edma_v0_core_clear_abort_int(chan); 607 608 spin_lock_irqsave(&chan->vc.lock, flags); 609 vd = vchan_next_desc(&chan->vc); 610 if (vd) { 611 list_del(&vd->node); 612 vchan_cookie_complete(vd); 613 } 614 spin_unlock_irqrestore(&chan->vc.lock, flags); 615 chan->request = EDMA_REQ_NONE; 616 chan->status = EDMA_ST_IDLE; 617 } 618 619 static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) 620 { 621 struct dw_edma_irq *dw_irq = data; 622 struct dw_edma *dw = dw_irq->dw; 623 unsigned long total, pos, val; 624 unsigned long off; 625 u32 mask; 626 627 if (write) { 628 total = dw->wr_ch_cnt; 629 off = 0; 630 mask = dw_irq->wr_mask; 631 } else { 632 total = dw->rd_ch_cnt; 633 off = dw->wr_ch_cnt; 634 mask = dw_irq->rd_mask; 635 } 636 637 val = dw_edma_v0_core_status_done_int(dw, write ? 638 EDMA_DIR_WRITE : 639 EDMA_DIR_READ); 640 val &= mask; 641 for_each_set_bit(pos, &val, total) { 642 struct dw_edma_chan *chan = &dw->chan[pos + off]; 643 644 dw_edma_done_interrupt(chan); 645 } 646 647 val = dw_edma_v0_core_status_abort_int(dw, write ? 648 EDMA_DIR_WRITE : 649 EDMA_DIR_READ); 650 val &= mask; 651 for_each_set_bit(pos, &val, total) { 652 struct dw_edma_chan *chan = &dw->chan[pos + off]; 653 654 dw_edma_abort_interrupt(chan); 655 } 656 657 return IRQ_HANDLED; 658 } 659 660 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) 661 { 662 return dw_edma_interrupt(irq, data, true); 663 } 664 665 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) 666 { 667 return dw_edma_interrupt(irq, data, false); 668 } 669 670 static irqreturn_t dw_edma_interrupt_common(int irq, void *data) 671 { 672 dw_edma_interrupt(irq, data, true); 673 dw_edma_interrupt(irq, data, false); 674 675 return IRQ_HANDLED; 676 } 677 678 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) 679 { 680 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 681 682 if (chan->status != EDMA_ST_IDLE) 683 return -EBUSY; 684 685 pm_runtime_get(chan->dw->chip->dev); 686 687 return 0; 688 } 689 690 static void dw_edma_free_chan_resources(struct dma_chan *dchan) 691 { 692 unsigned long timeout = jiffies + msecs_to_jiffies(5000); 693 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 694 int ret; 695 696 while (time_before(jiffies, timeout)) { 697 ret = dw_edma_device_terminate_all(dchan); 698 if (!ret) 699 break; 700 701 if (time_after_eq(jiffies, timeout)) 702 return; 703 704 cpu_relax(); 705 } 706 707 pm_runtime_put(chan->dw->chip->dev); 708 } 709 710 static int dw_edma_channel_setup(struct dw_edma *dw, bool write, 711 u32 wr_alloc, u32 rd_alloc) 712 { 713 struct dw_edma_chip *chip = dw->chip; 714 struct dw_edma_region *dt_region; 715 struct device *dev = chip->dev; 716 struct dw_edma_chan *chan; 717 struct dw_edma_irq *irq; 718 struct dma_device *dma; 719 u32 alloc, off_alloc; 720 u32 i, j, cnt; 721 int err = 0; 722 u32 pos; 723 724 if (write) { 725 i = 0; 726 cnt = dw->wr_ch_cnt; 727 dma = &dw->wr_edma; 728 alloc = wr_alloc; 729 off_alloc = 0; 730 } else { 731 i = dw->wr_ch_cnt; 732 cnt = dw->rd_ch_cnt; 733 dma = &dw->rd_edma; 734 alloc = rd_alloc; 735 off_alloc = wr_alloc; 736 } 737 738 INIT_LIST_HEAD(&dma->channels); 739 for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { 740 chan = &dw->chan[i]; 741 742 dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); 743 if (!dt_region) 744 return -ENOMEM; 745 746 chan->vc.chan.private = dt_region; 747 748 chan->dw = dw; 749 chan->id = j; 750 chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; 751 chan->configured = false; 752 chan->request = EDMA_REQ_NONE; 753 chan->status = EDMA_ST_IDLE; 754 755 if (write) 756 chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ); 757 else 758 chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ); 759 chan->ll_max -= 1; 760 761 dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", 762 write ? "write" : "read", j, chan->ll_max); 763 764 if (dw->nr_irqs == 1) 765 pos = 0; 766 else 767 pos = off_alloc + (j % alloc); 768 769 irq = &dw->irq[pos]; 770 771 if (write) 772 irq->wr_mask |= BIT(j); 773 else 774 irq->rd_mask |= BIT(j); 775 776 irq->dw = dw; 777 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); 778 779 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", 780 write ? "write" : "read", j, 781 chan->msi.address_hi, chan->msi.address_lo, 782 chan->msi.data); 783 784 chan->vc.desc_free = vchan_free_desc; 785 vchan_init(&chan->vc, dma); 786 787 if (write) { 788 dt_region->paddr = chip->dt_region_wr[j].paddr; 789 dt_region->vaddr = chip->dt_region_wr[j].vaddr; 790 dt_region->sz = chip->dt_region_wr[j].sz; 791 } else { 792 dt_region->paddr = chip->dt_region_rd[j].paddr; 793 dt_region->vaddr = chip->dt_region_rd[j].vaddr; 794 dt_region->sz = chip->dt_region_rd[j].sz; 795 } 796 797 dw_edma_v0_core_device_config(chan); 798 } 799 800 /* Set DMA channel capabilities */ 801 dma_cap_zero(dma->cap_mask); 802 dma_cap_set(DMA_SLAVE, dma->cap_mask); 803 dma_cap_set(DMA_CYCLIC, dma->cap_mask); 804 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 805 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); 806 dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); 807 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 808 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 809 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 810 dma->chancnt = cnt; 811 812 /* Set DMA channel callbacks */ 813 dma->dev = chip->dev; 814 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; 815 dma->device_free_chan_resources = dw_edma_free_chan_resources; 816 dma->device_config = dw_edma_device_config; 817 dma->device_pause = dw_edma_device_pause; 818 dma->device_resume = dw_edma_device_resume; 819 dma->device_terminate_all = dw_edma_device_terminate_all; 820 dma->device_issue_pending = dw_edma_device_issue_pending; 821 dma->device_tx_status = dw_edma_device_tx_status; 822 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; 823 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; 824 dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma; 825 826 dma_set_max_seg_size(dma->dev, U32_MAX); 827 828 /* Register DMA device */ 829 err = dma_async_device_register(dma); 830 831 return err; 832 } 833 834 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) 835 { 836 if (*nr_irqs && *alloc < cnt) { 837 (*alloc)++; 838 (*nr_irqs)--; 839 } 840 } 841 842 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) 843 { 844 while (*mask * alloc < cnt) 845 (*mask)++; 846 } 847 848 static int dw_edma_irq_request(struct dw_edma *dw, 849 u32 *wr_alloc, u32 *rd_alloc) 850 { 851 struct dw_edma_chip *chip = dw->chip; 852 struct device *dev = dw->chip->dev; 853 u32 wr_mask = 1; 854 u32 rd_mask = 1; 855 int i, err = 0; 856 u32 ch_cnt; 857 int irq; 858 859 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; 860 861 if (chip->nr_irqs < 1 || !chip->ops->irq_vector) 862 return -EINVAL; 863 864 dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL); 865 if (!dw->irq) 866 return -ENOMEM; 867 868 if (chip->nr_irqs == 1) { 869 /* Common IRQ shared among all channels */ 870 irq = chip->ops->irq_vector(dev, 0); 871 err = request_irq(irq, dw_edma_interrupt_common, 872 IRQF_SHARED, dw->name, &dw->irq[0]); 873 if (err) { 874 dw->nr_irqs = 0; 875 return err; 876 } 877 878 if (irq_get_msi_desc(irq)) 879 get_cached_msi_msg(irq, &dw->irq[0].msi); 880 881 dw->nr_irqs = 1; 882 } else { 883 /* Distribute IRQs equally among all channels */ 884 int tmp = chip->nr_irqs; 885 886 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { 887 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); 888 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); 889 } 890 891 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); 892 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); 893 894 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { 895 irq = chip->ops->irq_vector(dev, i); 896 err = request_irq(irq, 897 i < *wr_alloc ? 898 dw_edma_interrupt_write : 899 dw_edma_interrupt_read, 900 IRQF_SHARED, dw->name, 901 &dw->irq[i]); 902 if (err) { 903 dw->nr_irqs = i; 904 return err; 905 } 906 907 if (irq_get_msi_desc(irq)) 908 get_cached_msi_msg(irq, &dw->irq[i].msi); 909 } 910 911 dw->nr_irqs = i; 912 } 913 914 return err; 915 } 916 917 int dw_edma_probe(struct dw_edma_chip *chip) 918 { 919 struct device *dev; 920 struct dw_edma *dw; 921 u32 wr_alloc = 0; 922 u32 rd_alloc = 0; 923 int i, err; 924 925 if (!chip) 926 return -EINVAL; 927 928 dev = chip->dev; 929 if (!dev || !chip->ops) 930 return -EINVAL; 931 932 dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); 933 if (!dw) 934 return -ENOMEM; 935 936 dw->chip = chip; 937 938 raw_spin_lock_init(&dw->lock); 939 940 dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt, 941 dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE)); 942 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH); 943 944 dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt, 945 dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ)); 946 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH); 947 948 if (!dw->wr_ch_cnt && !dw->rd_ch_cnt) 949 return -EINVAL; 950 951 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", 952 dw->wr_ch_cnt, dw->rd_ch_cnt); 953 954 /* Allocate channels */ 955 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, 956 sizeof(*dw->chan), GFP_KERNEL); 957 if (!dw->chan) 958 return -ENOMEM; 959 960 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); 961 962 /* Disable eDMA, only to establish the ideal initial conditions */ 963 dw_edma_v0_core_off(dw); 964 965 /* Request IRQs */ 966 err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc); 967 if (err) 968 return err; 969 970 /* Setup write channels */ 971 err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc); 972 if (err) 973 goto err_irq_free; 974 975 /* Setup read channels */ 976 err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc); 977 if (err) 978 goto err_irq_free; 979 980 /* Power management */ 981 pm_runtime_enable(dev); 982 983 /* Turn debugfs on */ 984 dw_edma_v0_core_debugfs_on(dw); 985 986 chip->dw = dw; 987 988 return 0; 989 990 err_irq_free: 991 for (i = (dw->nr_irqs - 1); i >= 0; i--) 992 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); 993 994 return err; 995 } 996 EXPORT_SYMBOL_GPL(dw_edma_probe); 997 998 int dw_edma_remove(struct dw_edma_chip *chip) 999 { 1000 struct dw_edma_chan *chan, *_chan; 1001 struct device *dev = chip->dev; 1002 struct dw_edma *dw = chip->dw; 1003 int i; 1004 1005 /* Disable eDMA */ 1006 dw_edma_v0_core_off(dw); 1007 1008 /* Free irqs */ 1009 for (i = (dw->nr_irqs - 1); i >= 0; i--) 1010 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]); 1011 1012 /* Power management */ 1013 pm_runtime_disable(dev); 1014 1015 /* Deregister eDMA device */ 1016 dma_async_device_unregister(&dw->wr_edma); 1017 list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, 1018 vc.chan.device_node) { 1019 tasklet_kill(&chan->vc.task); 1020 list_del(&chan->vc.chan.device_node); 1021 } 1022 1023 dma_async_device_unregister(&dw->rd_edma); 1024 list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, 1025 vc.chan.device_node) { 1026 tasklet_kill(&chan->vc.task); 1027 list_del(&chan->vc.chan.device_node); 1028 } 1029 1030 /* Turn debugfs off */ 1031 dw_edma_v0_core_debugfs_off(dw); 1032 1033 return 0; 1034 } 1035 EXPORT_SYMBOL_GPL(dw_edma_remove); 1036 1037 MODULE_LICENSE("GPL v2"); 1038 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); 1039 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); 1040