1 /* 2 * Freescale MPC85xx, MPC83xx DMA Engine support 3 * 4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. 5 * 6 * Author: 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9 * 10 * Description: 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13 * The support for MPC8349 DMA controller is also added. 14 * 15 * This driver instructs the DMA controller to issue the PCI Read Multiple 16 * command for PCI read operations, instead of using the default PCI Read Line 17 * command. Please be aware that this setting may result in read pre-fetching 18 * on some platforms. 19 * 20 * This is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 */ 26 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 #include <linux/interrupt.h> 32 #include <linux/dmaengine.h> 33 #include <linux/delay.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/dmapool.h> 36 #include <linux/of_address.h> 37 #include <linux/of_irq.h> 38 #include <linux/of_platform.h> 39 40 #include "dmaengine.h" 41 #include "fsldma.h" 42 43 #define chan_dbg(chan, fmt, arg...) \ 44 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) 45 #define chan_err(chan, fmt, arg...) \ 46 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) 47 48 static const char msg_ld_oom[] = "No free memory for link descriptor"; 49 50 /* 51 * Register Helpers 52 */ 53 54 static void set_sr(struct fsldma_chan *chan, u32 val) 55 { 56 DMA_OUT(chan, &chan->regs->sr, val, 32); 57 } 58 59 static u32 get_sr(struct fsldma_chan *chan) 60 { 61 return DMA_IN(chan, &chan->regs->sr, 32); 62 } 63 64 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 65 { 66 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 67 } 68 69 static dma_addr_t get_cdar(struct fsldma_chan *chan) 70 { 71 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 72 } 73 74 static u32 get_bcr(struct fsldma_chan *chan) 75 { 76 return DMA_IN(chan, &chan->regs->bcr, 32); 77 } 78 79 /* 80 * Descriptor Helpers 81 */ 82 83 static void set_desc_cnt(struct fsldma_chan *chan, 84 struct fsl_dma_ld_hw *hw, u32 count) 85 { 86 hw->count = CPU_TO_DMA(chan, count, 32); 87 } 88 89 static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 90 { 91 return DMA_TO_CPU(chan, desc->hw.count, 32); 92 } 93 94 static void set_desc_src(struct fsldma_chan *chan, 95 struct fsl_dma_ld_hw *hw, dma_addr_t src) 96 { 97 u64 snoop_bits; 98 99 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 100 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 102 } 103 104 static dma_addr_t get_desc_src(struct fsldma_chan *chan, 105 struct fsl_desc_sw *desc) 106 { 107 u64 snoop_bits; 108 109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; 112 } 113 114 static void set_desc_dst(struct fsldma_chan *chan, 115 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 116 { 117 u64 snoop_bits; 118 119 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 120 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 122 } 123 124 static dma_addr_t get_desc_dst(struct fsldma_chan *chan, 125 struct fsl_desc_sw *desc) 126 { 127 u64 snoop_bits; 128 129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; 132 } 133 134 static void set_desc_next(struct fsldma_chan *chan, 135 struct fsl_dma_ld_hw *hw, dma_addr_t next) 136 { 137 u64 snoop_bits; 138 139 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 140 ? FSL_DMA_SNEN : 0; 141 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 142 } 143 144 static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 145 { 146 u64 snoop_bits; 147 148 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 149 ? FSL_DMA_SNEN : 0; 150 151 desc->hw.next_ln_addr = CPU_TO_DMA(chan, 152 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 153 | snoop_bits, 64); 154 } 155 156 /* 157 * DMA Engine Hardware Control Helpers 158 */ 159 160 static void dma_init(struct fsldma_chan *chan) 161 { 162 /* Reset the channel */ 163 DMA_OUT(chan, &chan->regs->mr, 0, 32); 164 165 switch (chan->feature & FSL_DMA_IP_MASK) { 166 case FSL_DMA_IP_85XX: 167 /* Set the channel to below modes: 168 * EIE - Error interrupt enable 169 * EOLNIE - End of links interrupt enable 170 * BWC - Bandwidth sharing among channels 171 */ 172 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC 173 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); 174 break; 175 case FSL_DMA_IP_83XX: 176 /* Set the channel to below modes: 177 * EOTIE - End-of-transfer interrupt enable 178 * PRC_RM - PCI read multiple 179 */ 180 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE 181 | FSL_DMA_MR_PRC_RM, 32); 182 break; 183 } 184 } 185 186 static int dma_is_idle(struct fsldma_chan *chan) 187 { 188 u32 sr = get_sr(chan); 189 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 190 } 191 192 /* 193 * Start the DMA controller 194 * 195 * Preconditions: 196 * - the CDAR register must point to the start descriptor 197 * - the MRn[CS] bit must be cleared 198 */ 199 static void dma_start(struct fsldma_chan *chan) 200 { 201 u32 mode; 202 203 mode = DMA_IN(chan, &chan->regs->mr, 32); 204 205 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 206 DMA_OUT(chan, &chan->regs->bcr, 0, 32); 207 mode |= FSL_DMA_MR_EMP_EN; 208 } else { 209 mode &= ~FSL_DMA_MR_EMP_EN; 210 } 211 212 if (chan->feature & FSL_DMA_CHAN_START_EXT) { 213 mode |= FSL_DMA_MR_EMS_EN; 214 } else { 215 mode &= ~FSL_DMA_MR_EMS_EN; 216 mode |= FSL_DMA_MR_CS; 217 } 218 219 DMA_OUT(chan, &chan->regs->mr, mode, 32); 220 } 221 222 static void dma_halt(struct fsldma_chan *chan) 223 { 224 u32 mode; 225 int i; 226 227 /* read the mode register */ 228 mode = DMA_IN(chan, &chan->regs->mr, 32); 229 230 /* 231 * The 85xx controller supports channel abort, which will stop 232 * the current transfer. On 83xx, this bit is the transfer error 233 * mask bit, which should not be changed. 234 */ 235 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 236 mode |= FSL_DMA_MR_CA; 237 DMA_OUT(chan, &chan->regs->mr, mode, 32); 238 239 mode &= ~FSL_DMA_MR_CA; 240 } 241 242 /* stop the DMA controller */ 243 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); 244 DMA_OUT(chan, &chan->regs->mr, mode, 32); 245 246 /* wait for the DMA controller to become idle */ 247 for (i = 0; i < 100; i++) { 248 if (dma_is_idle(chan)) 249 return; 250 251 udelay(10); 252 } 253 254 if (!dma_is_idle(chan)) 255 chan_err(chan, "DMA halt timeout!\n"); 256 } 257 258 /** 259 * fsl_chan_set_src_loop_size - Set source address hold transfer size 260 * @chan : Freescale DMA channel 261 * @size : Address loop size, 0 for disable loop 262 * 263 * The set source address hold transfer size. The source 264 * address hold or loop transfer size is when the DMA transfer 265 * data from source address (SA), if the loop size is 4, the DMA will 266 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 267 * SA + 1 ... and so on. 268 */ 269 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) 270 { 271 u32 mode; 272 273 mode = DMA_IN(chan, &chan->regs->mr, 32); 274 275 switch (size) { 276 case 0: 277 mode &= ~FSL_DMA_MR_SAHE; 278 break; 279 case 1: 280 case 2: 281 case 4: 282 case 8: 283 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); 284 break; 285 } 286 287 DMA_OUT(chan, &chan->regs->mr, mode, 32); 288 } 289 290 /** 291 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size 292 * @chan : Freescale DMA channel 293 * @size : Address loop size, 0 for disable loop 294 * 295 * The set destination address hold transfer size. The destination 296 * address hold or loop transfer size is when the DMA transfer 297 * data to destination address (TA), if the loop size is 4, the DMA will 298 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 299 * TA + 1 ... and so on. 300 */ 301 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) 302 { 303 u32 mode; 304 305 mode = DMA_IN(chan, &chan->regs->mr, 32); 306 307 switch (size) { 308 case 0: 309 mode &= ~FSL_DMA_MR_DAHE; 310 break; 311 case 1: 312 case 2: 313 case 4: 314 case 8: 315 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); 316 break; 317 } 318 319 DMA_OUT(chan, &chan->regs->mr, mode, 32); 320 } 321 322 /** 323 * fsl_chan_set_request_count - Set DMA Request Count for external control 324 * @chan : Freescale DMA channel 325 * @size : Number of bytes to transfer in a single request 326 * 327 * The Freescale DMA channel can be controlled by the external signal DREQ#. 328 * The DMA request count is how many bytes are allowed to transfer before 329 * pausing the channel, after which a new assertion of DREQ# resumes channel 330 * operation. 331 * 332 * A size of 0 disables external pause control. The maximum size is 1024. 333 */ 334 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) 335 { 336 u32 mode; 337 338 BUG_ON(size > 1024); 339 340 mode = DMA_IN(chan, &chan->regs->mr, 32); 341 mode |= (__ilog2(size) << 24) & 0x0f000000; 342 343 DMA_OUT(chan, &chan->regs->mr, mode, 32); 344 } 345 346 /** 347 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 348 * @chan : Freescale DMA channel 349 * @enable : 0 is disabled, 1 is enabled. 350 * 351 * The Freescale DMA channel can be controlled by the external signal DREQ#. 352 * The DMA Request Count feature should be used in addition to this feature 353 * to set the number of bytes to transfer before pausing the channel. 354 */ 355 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) 356 { 357 if (enable) 358 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 359 else 360 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 361 } 362 363 /** 364 * fsl_chan_toggle_ext_start - Toggle channel external start status 365 * @chan : Freescale DMA channel 366 * @enable : 0 is disabled, 1 is enabled. 367 * 368 * If enable the external start, the channel can be started by an 369 * external DMA start pin. So the dma_start() does not start the 370 * transfer immediately. The DMA channel will wait for the 371 * control pin asserted. 372 */ 373 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) 374 { 375 if (enable) 376 chan->feature |= FSL_DMA_CHAN_START_EXT; 377 else 378 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 379 } 380 381 static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 382 { 383 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 384 385 if (list_empty(&chan->ld_pending)) 386 goto out_splice; 387 388 /* 389 * Add the hardware descriptor to the chain of hardware descriptors 390 * that already exists in memory. 391 * 392 * This will un-set the EOL bit of the existing transaction, and the 393 * last link in this transaction will become the EOL descriptor. 394 */ 395 set_desc_next(chan, &tail->hw, desc->async_tx.phys); 396 397 /* 398 * Add the software descriptor and all children to the list 399 * of pending transactions 400 */ 401 out_splice: 402 list_splice_tail_init(&desc->tx_list, &chan->ld_pending); 403 } 404 405 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 406 { 407 struct fsldma_chan *chan = to_fsl_chan(tx->chan); 408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 409 struct fsl_desc_sw *child; 410 unsigned long flags; 411 dma_cookie_t cookie; 412 413 spin_lock_irqsave(&chan->desc_lock, flags); 414 415 /* 416 * assign cookies to all of the software descriptors 417 * that make up this transaction 418 */ 419 list_for_each_entry(child, &desc->tx_list, node) { 420 cookie = dma_cookie_assign(&child->async_tx); 421 } 422 423 /* put this transaction onto the tail of the pending queue */ 424 append_ld_queue(chan, desc); 425 426 spin_unlock_irqrestore(&chan->desc_lock, flags); 427 428 return cookie; 429 } 430 431 /** 432 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 433 * @chan : Freescale DMA channel 434 * 435 * Return - The descriptor allocated. NULL for failed. 436 */ 437 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) 438 { 439 struct fsl_desc_sw *desc; 440 dma_addr_t pdesc; 441 442 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 443 if (!desc) { 444 chan_dbg(chan, "out of memory for link descriptor\n"); 445 return NULL; 446 } 447 448 memset(desc, 0, sizeof(*desc)); 449 INIT_LIST_HEAD(&desc->tx_list); 450 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 451 desc->async_tx.tx_submit = fsl_dma_tx_submit; 452 desc->async_tx.phys = pdesc; 453 454 #ifdef FSL_DMA_LD_DEBUG 455 chan_dbg(chan, "LD %p allocated\n", desc); 456 #endif 457 458 return desc; 459 } 460 461 /** 462 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 463 * @chan : Freescale DMA channel 464 * 465 * This function will create a dma pool for descriptor allocation. 466 * 467 * Return - The number of descriptors allocated. 468 */ 469 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) 470 { 471 struct fsldma_chan *chan = to_fsl_chan(dchan); 472 473 /* Has this channel already been allocated? */ 474 if (chan->desc_pool) 475 return 1; 476 477 /* 478 * We need the descriptor to be aligned to 32bytes 479 * for meeting FSL DMA specification requirement. 480 */ 481 chan->desc_pool = dma_pool_create(chan->name, chan->dev, 482 sizeof(struct fsl_desc_sw), 483 __alignof__(struct fsl_desc_sw), 0); 484 if (!chan->desc_pool) { 485 chan_err(chan, "unable to allocate descriptor pool\n"); 486 return -ENOMEM; 487 } 488 489 /* there is at least one descriptor free to be allocated */ 490 return 1; 491 } 492 493 /** 494 * fsldma_free_desc_list - Free all descriptors in a queue 495 * @chan: Freescae DMA channel 496 * @list: the list to free 497 * 498 * LOCKING: must hold chan->desc_lock 499 */ 500 static void fsldma_free_desc_list(struct fsldma_chan *chan, 501 struct list_head *list) 502 { 503 struct fsl_desc_sw *desc, *_desc; 504 505 list_for_each_entry_safe(desc, _desc, list, node) { 506 list_del(&desc->node); 507 #ifdef FSL_DMA_LD_DEBUG 508 chan_dbg(chan, "LD %p free\n", desc); 509 #endif 510 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 511 } 512 } 513 514 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, 515 struct list_head *list) 516 { 517 struct fsl_desc_sw *desc, *_desc; 518 519 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 520 list_del(&desc->node); 521 #ifdef FSL_DMA_LD_DEBUG 522 chan_dbg(chan, "LD %p free\n", desc); 523 #endif 524 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 525 } 526 } 527 528 /** 529 * fsl_dma_free_chan_resources - Free all resources of the channel. 530 * @chan : Freescale DMA channel 531 */ 532 static void fsl_dma_free_chan_resources(struct dma_chan *dchan) 533 { 534 struct fsldma_chan *chan = to_fsl_chan(dchan); 535 unsigned long flags; 536 537 chan_dbg(chan, "free all channel resources\n"); 538 spin_lock_irqsave(&chan->desc_lock, flags); 539 fsldma_free_desc_list(chan, &chan->ld_pending); 540 fsldma_free_desc_list(chan, &chan->ld_running); 541 spin_unlock_irqrestore(&chan->desc_lock, flags); 542 543 dma_pool_destroy(chan->desc_pool); 544 chan->desc_pool = NULL; 545 } 546 547 static struct dma_async_tx_descriptor * 548 fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) 549 { 550 struct fsldma_chan *chan; 551 struct fsl_desc_sw *new; 552 553 if (!dchan) 554 return NULL; 555 556 chan = to_fsl_chan(dchan); 557 558 new = fsl_dma_alloc_descriptor(chan); 559 if (!new) { 560 chan_err(chan, "%s\n", msg_ld_oom); 561 return NULL; 562 } 563 564 new->async_tx.cookie = -EBUSY; 565 new->async_tx.flags = flags; 566 567 /* Insert the link descriptor to the LD ring */ 568 list_add_tail(&new->node, &new->tx_list); 569 570 /* Set End-of-link to the last link descriptor of new list */ 571 set_ld_eol(chan, new); 572 573 return &new->async_tx; 574 } 575 576 static struct dma_async_tx_descriptor * 577 fsl_dma_prep_memcpy(struct dma_chan *dchan, 578 dma_addr_t dma_dst, dma_addr_t dma_src, 579 size_t len, unsigned long flags) 580 { 581 struct fsldma_chan *chan; 582 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 583 size_t copy; 584 585 if (!dchan) 586 return NULL; 587 588 if (!len) 589 return NULL; 590 591 chan = to_fsl_chan(dchan); 592 593 do { 594 595 /* Allocate the link descriptor from DMA pool */ 596 new = fsl_dma_alloc_descriptor(chan); 597 if (!new) { 598 chan_err(chan, "%s\n", msg_ld_oom); 599 goto fail; 600 } 601 602 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 603 604 set_desc_cnt(chan, &new->hw, copy); 605 set_desc_src(chan, &new->hw, dma_src); 606 set_desc_dst(chan, &new->hw, dma_dst); 607 608 if (!first) 609 first = new; 610 else 611 set_desc_next(chan, &prev->hw, new->async_tx.phys); 612 613 new->async_tx.cookie = 0; 614 async_tx_ack(&new->async_tx); 615 616 prev = new; 617 len -= copy; 618 dma_src += copy; 619 dma_dst += copy; 620 621 /* Insert the link descriptor to the LD ring */ 622 list_add_tail(&new->node, &first->tx_list); 623 } while (len); 624 625 new->async_tx.flags = flags; /* client is in control of this ack */ 626 new->async_tx.cookie = -EBUSY; 627 628 /* Set End-of-link to the last link descriptor of new list */ 629 set_ld_eol(chan, new); 630 631 return &first->async_tx; 632 633 fail: 634 if (!first) 635 return NULL; 636 637 fsldma_free_desc_list_reverse(chan, &first->tx_list); 638 return NULL; 639 } 640 641 static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, 642 struct scatterlist *dst_sg, unsigned int dst_nents, 643 struct scatterlist *src_sg, unsigned int src_nents, 644 unsigned long flags) 645 { 646 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 647 struct fsldma_chan *chan = to_fsl_chan(dchan); 648 size_t dst_avail, src_avail; 649 dma_addr_t dst, src; 650 size_t len; 651 652 /* basic sanity checks */ 653 if (dst_nents == 0 || src_nents == 0) 654 return NULL; 655 656 if (dst_sg == NULL || src_sg == NULL) 657 return NULL; 658 659 /* 660 * TODO: should we check that both scatterlists have the same 661 * TODO: number of bytes in total? Is that really an error? 662 */ 663 664 /* get prepared for the loop */ 665 dst_avail = sg_dma_len(dst_sg); 666 src_avail = sg_dma_len(src_sg); 667 668 /* run until we are out of scatterlist entries */ 669 while (true) { 670 671 /* create the largest transaction possible */ 672 len = min_t(size_t, src_avail, dst_avail); 673 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); 674 if (len == 0) 675 goto fetch; 676 677 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; 678 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; 679 680 /* allocate and populate the descriptor */ 681 new = fsl_dma_alloc_descriptor(chan); 682 if (!new) { 683 chan_err(chan, "%s\n", msg_ld_oom); 684 goto fail; 685 } 686 687 set_desc_cnt(chan, &new->hw, len); 688 set_desc_src(chan, &new->hw, src); 689 set_desc_dst(chan, &new->hw, dst); 690 691 if (!first) 692 first = new; 693 else 694 set_desc_next(chan, &prev->hw, new->async_tx.phys); 695 696 new->async_tx.cookie = 0; 697 async_tx_ack(&new->async_tx); 698 prev = new; 699 700 /* Insert the link descriptor to the LD ring */ 701 list_add_tail(&new->node, &first->tx_list); 702 703 /* update metadata */ 704 dst_avail -= len; 705 src_avail -= len; 706 707 fetch: 708 /* fetch the next dst scatterlist entry */ 709 if (dst_avail == 0) { 710 711 /* no more entries: we're done */ 712 if (dst_nents == 0) 713 break; 714 715 /* fetch the next entry: if there are no more: done */ 716 dst_sg = sg_next(dst_sg); 717 if (dst_sg == NULL) 718 break; 719 720 dst_nents--; 721 dst_avail = sg_dma_len(dst_sg); 722 } 723 724 /* fetch the next src scatterlist entry */ 725 if (src_avail == 0) { 726 727 /* no more entries: we're done */ 728 if (src_nents == 0) 729 break; 730 731 /* fetch the next entry: if there are no more: done */ 732 src_sg = sg_next(src_sg); 733 if (src_sg == NULL) 734 break; 735 736 src_nents--; 737 src_avail = sg_dma_len(src_sg); 738 } 739 } 740 741 new->async_tx.flags = flags; /* client is in control of this ack */ 742 new->async_tx.cookie = -EBUSY; 743 744 /* Set End-of-link to the last link descriptor of new list */ 745 set_ld_eol(chan, new); 746 747 return &first->async_tx; 748 749 fail: 750 if (!first) 751 return NULL; 752 753 fsldma_free_desc_list_reverse(chan, &first->tx_list); 754 return NULL; 755 } 756 757 /** 758 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 759 * @chan: DMA channel 760 * @sgl: scatterlist to transfer to/from 761 * @sg_len: number of entries in @scatterlist 762 * @direction: DMA direction 763 * @flags: DMAEngine flags 764 * @context: transaction context (ignored) 765 * 766 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the 767 * DMA_SLAVE API, this gets the device-specific information from the 768 * chan->private variable. 769 */ 770 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 771 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 772 enum dma_transfer_direction direction, unsigned long flags, 773 void *context) 774 { 775 /* 776 * This operation is not supported on the Freescale DMA controller 777 * 778 * However, we need to provide the function pointer to allow the 779 * device_control() method to work. 780 */ 781 return NULL; 782 } 783 784 static int fsl_dma_device_control(struct dma_chan *dchan, 785 enum dma_ctrl_cmd cmd, unsigned long arg) 786 { 787 struct dma_slave_config *config; 788 struct fsldma_chan *chan; 789 unsigned long flags; 790 int size; 791 792 if (!dchan) 793 return -EINVAL; 794 795 chan = to_fsl_chan(dchan); 796 797 switch (cmd) { 798 case DMA_TERMINATE_ALL: 799 spin_lock_irqsave(&chan->desc_lock, flags); 800 801 /* Halt the DMA engine */ 802 dma_halt(chan); 803 804 /* Remove and free all of the descriptors in the LD queue */ 805 fsldma_free_desc_list(chan, &chan->ld_pending); 806 fsldma_free_desc_list(chan, &chan->ld_running); 807 chan->idle = true; 808 809 spin_unlock_irqrestore(&chan->desc_lock, flags); 810 return 0; 811 812 case DMA_SLAVE_CONFIG: 813 config = (struct dma_slave_config *)arg; 814 815 /* make sure the channel supports setting burst size */ 816 if (!chan->set_request_count) 817 return -ENXIO; 818 819 /* we set the controller burst size depending on direction */ 820 if (config->direction == DMA_MEM_TO_DEV) 821 size = config->dst_addr_width * config->dst_maxburst; 822 else 823 size = config->src_addr_width * config->src_maxburst; 824 825 chan->set_request_count(chan, size); 826 return 0; 827 828 case FSLDMA_EXTERNAL_START: 829 830 /* make sure the channel supports external start */ 831 if (!chan->toggle_ext_start) 832 return -ENXIO; 833 834 chan->toggle_ext_start(chan, arg); 835 return 0; 836 837 default: 838 return -ENXIO; 839 } 840 841 return 0; 842 } 843 844 /** 845 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor 846 * @chan: Freescale DMA channel 847 * @desc: descriptor to cleanup and free 848 * 849 * This function is used on a descriptor which has been executed by the DMA 850 * controller. It will run any callbacks, submit any dependencies, and then 851 * free the descriptor. 852 */ 853 static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, 854 struct fsl_desc_sw *desc) 855 { 856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 857 struct device *dev = chan->common.device->dev; 858 dma_addr_t src = get_desc_src(chan, desc); 859 dma_addr_t dst = get_desc_dst(chan, desc); 860 u32 len = get_desc_cnt(chan, desc); 861 862 /* Run the link descriptor callback function */ 863 if (txd->callback) { 864 #ifdef FSL_DMA_LD_DEBUG 865 chan_dbg(chan, "LD %p callback\n", desc); 866 #endif 867 txd->callback(txd->callback_param); 868 } 869 870 /* Run any dependencies */ 871 dma_run_dependencies(txd); 872 873 dma_descriptor_unmap(txd); 874 #ifdef FSL_DMA_LD_DEBUG 875 chan_dbg(chan, "LD %p free\n", desc); 876 #endif 877 dma_pool_free(chan->desc_pool, desc, txd->phys); 878 } 879 880 /** 881 * fsl_chan_xfer_ld_queue - transfer any pending transactions 882 * @chan : Freescale DMA channel 883 * 884 * HARDWARE STATE: idle 885 * LOCKING: must hold chan->desc_lock 886 */ 887 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 888 { 889 struct fsl_desc_sw *desc; 890 891 /* 892 * If the list of pending descriptors is empty, then we 893 * don't need to do any work at all 894 */ 895 if (list_empty(&chan->ld_pending)) { 896 chan_dbg(chan, "no pending LDs\n"); 897 return; 898 } 899 900 /* 901 * The DMA controller is not idle, which means that the interrupt 902 * handler will start any queued transactions when it runs after 903 * this transaction finishes 904 */ 905 if (!chan->idle) { 906 chan_dbg(chan, "DMA controller still busy\n"); 907 return; 908 } 909 910 /* 911 * If there are some link descriptors which have not been 912 * transferred, we need to start the controller 913 */ 914 915 /* 916 * Move all elements from the queue of pending transactions 917 * onto the list of running transactions 918 */ 919 chan_dbg(chan, "idle, starting controller\n"); 920 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 921 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 922 923 /* 924 * The 85xx DMA controller doesn't clear the channel start bit 925 * automatically at the end of a transfer. Therefore we must clear 926 * it in software before starting the transfer. 927 */ 928 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 929 u32 mode; 930 931 mode = DMA_IN(chan, &chan->regs->mr, 32); 932 mode &= ~FSL_DMA_MR_CS; 933 DMA_OUT(chan, &chan->regs->mr, mode, 32); 934 } 935 936 /* 937 * Program the descriptor's address into the DMA controller, 938 * then start the DMA transaction 939 */ 940 set_cdar(chan, desc->async_tx.phys); 941 get_cdar(chan); 942 943 dma_start(chan); 944 chan->idle = false; 945 } 946 947 /** 948 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 949 * @chan : Freescale DMA channel 950 */ 951 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 952 { 953 struct fsldma_chan *chan = to_fsl_chan(dchan); 954 unsigned long flags; 955 956 spin_lock_irqsave(&chan->desc_lock, flags); 957 fsl_chan_xfer_ld_queue(chan); 958 spin_unlock_irqrestore(&chan->desc_lock, flags); 959 } 960 961 /** 962 * fsl_tx_status - Determine the DMA status 963 * @chan : Freescale DMA channel 964 */ 965 static enum dma_status fsl_tx_status(struct dma_chan *dchan, 966 dma_cookie_t cookie, 967 struct dma_tx_state *txstate) 968 { 969 return dma_cookie_status(dchan, cookie, txstate); 970 } 971 972 /*----------------------------------------------------------------------------*/ 973 /* Interrupt Handling */ 974 /*----------------------------------------------------------------------------*/ 975 976 static irqreturn_t fsldma_chan_irq(int irq, void *data) 977 { 978 struct fsldma_chan *chan = data; 979 u32 stat; 980 981 /* save and clear the status register */ 982 stat = get_sr(chan); 983 set_sr(chan, stat); 984 chan_dbg(chan, "irq: stat = 0x%x\n", stat); 985 986 /* check that this was really our device */ 987 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 988 if (!stat) 989 return IRQ_NONE; 990 991 if (stat & FSL_DMA_SR_TE) 992 chan_err(chan, "Transfer Error!\n"); 993 994 /* 995 * Programming Error 996 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 997 * trigger a PE interrupt. 998 */ 999 if (stat & FSL_DMA_SR_PE) { 1000 chan_dbg(chan, "irq: Programming Error INT\n"); 1001 stat &= ~FSL_DMA_SR_PE; 1002 if (get_bcr(chan) != 0) 1003 chan_err(chan, "Programming Error!\n"); 1004 } 1005 1006 /* 1007 * For MPC8349, EOCDI event need to update cookie 1008 * and start the next transfer if it exist. 1009 */ 1010 if (stat & FSL_DMA_SR_EOCDI) { 1011 chan_dbg(chan, "irq: End-of-Chain link INT\n"); 1012 stat &= ~FSL_DMA_SR_EOCDI; 1013 } 1014 1015 /* 1016 * If it current transfer is the end-of-transfer, 1017 * we should clear the Channel Start bit for 1018 * prepare next transfer. 1019 */ 1020 if (stat & FSL_DMA_SR_EOLNI) { 1021 chan_dbg(chan, "irq: End-of-link INT\n"); 1022 stat &= ~FSL_DMA_SR_EOLNI; 1023 } 1024 1025 /* check that the DMA controller is really idle */ 1026 if (!dma_is_idle(chan)) 1027 chan_err(chan, "irq: controller not idle!\n"); 1028 1029 /* check that we handled all of the bits */ 1030 if (stat) 1031 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); 1032 1033 /* 1034 * Schedule the tasklet to handle all cleanup of the current 1035 * transaction. It will start a new transaction if there is 1036 * one pending. 1037 */ 1038 tasklet_schedule(&chan->tasklet); 1039 chan_dbg(chan, "irq: Exit\n"); 1040 return IRQ_HANDLED; 1041 } 1042 1043 static void dma_do_tasklet(unsigned long data) 1044 { 1045 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1046 struct fsl_desc_sw *desc, *_desc; 1047 LIST_HEAD(ld_cleanup); 1048 unsigned long flags; 1049 1050 chan_dbg(chan, "tasklet entry\n"); 1051 1052 spin_lock_irqsave(&chan->desc_lock, flags); 1053 1054 /* update the cookie if we have some descriptors to cleanup */ 1055 if (!list_empty(&chan->ld_running)) { 1056 dma_cookie_t cookie; 1057 1058 desc = to_fsl_desc(chan->ld_running.prev); 1059 cookie = desc->async_tx.cookie; 1060 dma_cookie_complete(&desc->async_tx); 1061 1062 chan_dbg(chan, "completed_cookie=%d\n", cookie); 1063 } 1064 1065 /* 1066 * move the descriptors to a temporary list so we can drop the lock 1067 * during the entire cleanup operation 1068 */ 1069 list_splice_tail_init(&chan->ld_running, &ld_cleanup); 1070 1071 /* the hardware is now idle and ready for more */ 1072 chan->idle = true; 1073 1074 /* 1075 * Start any pending transactions automatically 1076 * 1077 * In the ideal case, we keep the DMA controller busy while we go 1078 * ahead and free the descriptors below. 1079 */ 1080 fsl_chan_xfer_ld_queue(chan); 1081 spin_unlock_irqrestore(&chan->desc_lock, flags); 1082 1083 /* Run the callback for each descriptor, in order */ 1084 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { 1085 1086 /* Remove from the list of transactions */ 1087 list_del(&desc->node); 1088 1089 /* Run all cleanup for this descriptor */ 1090 fsldma_cleanup_descriptor(chan, desc); 1091 } 1092 1093 chan_dbg(chan, "tasklet exit\n"); 1094 } 1095 1096 static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1097 { 1098 struct fsldma_device *fdev = data; 1099 struct fsldma_chan *chan; 1100 unsigned int handled = 0; 1101 u32 gsr, mask; 1102 int i; 1103 1104 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) 1105 : in_le32(fdev->regs); 1106 mask = 0xff000000; 1107 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); 1108 1109 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1110 chan = fdev->chan[i]; 1111 if (!chan) 1112 continue; 1113 1114 if (gsr & mask) { 1115 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); 1116 fsldma_chan_irq(irq, chan); 1117 handled++; 1118 } 1119 1120 gsr &= ~mask; 1121 mask >>= 8; 1122 } 1123 1124 return IRQ_RETVAL(handled); 1125 } 1126 1127 static void fsldma_free_irqs(struct fsldma_device *fdev) 1128 { 1129 struct fsldma_chan *chan; 1130 int i; 1131 1132 if (fdev->irq != NO_IRQ) { 1133 dev_dbg(fdev->dev, "free per-controller IRQ\n"); 1134 free_irq(fdev->irq, fdev); 1135 return; 1136 } 1137 1138 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1139 chan = fdev->chan[i]; 1140 if (chan && chan->irq != NO_IRQ) { 1141 chan_dbg(chan, "free per-channel IRQ\n"); 1142 free_irq(chan->irq, chan); 1143 } 1144 } 1145 } 1146 1147 static int fsldma_request_irqs(struct fsldma_device *fdev) 1148 { 1149 struct fsldma_chan *chan; 1150 int ret; 1151 int i; 1152 1153 /* if we have a per-controller IRQ, use that */ 1154 if (fdev->irq != NO_IRQ) { 1155 dev_dbg(fdev->dev, "request per-controller IRQ\n"); 1156 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, 1157 "fsldma-controller", fdev); 1158 return ret; 1159 } 1160 1161 /* no per-controller IRQ, use the per-channel IRQs */ 1162 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1163 chan = fdev->chan[i]; 1164 if (!chan) 1165 continue; 1166 1167 if (chan->irq == NO_IRQ) { 1168 chan_err(chan, "interrupts property missing in device tree\n"); 1169 ret = -ENODEV; 1170 goto out_unwind; 1171 } 1172 1173 chan_dbg(chan, "request per-channel IRQ\n"); 1174 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1175 "fsldma-chan", chan); 1176 if (ret) { 1177 chan_err(chan, "unable to request per-channel IRQ\n"); 1178 goto out_unwind; 1179 } 1180 } 1181 1182 return 0; 1183 1184 out_unwind: 1185 for (/* none */; i >= 0; i--) { 1186 chan = fdev->chan[i]; 1187 if (!chan) 1188 continue; 1189 1190 if (chan->irq == NO_IRQ) 1191 continue; 1192 1193 free_irq(chan->irq, chan); 1194 } 1195 1196 return ret; 1197 } 1198 1199 /*----------------------------------------------------------------------------*/ 1200 /* OpenFirmware Subsystem */ 1201 /*----------------------------------------------------------------------------*/ 1202 1203 static int fsl_dma_chan_probe(struct fsldma_device *fdev, 1204 struct device_node *node, u32 feature, const char *compatible) 1205 { 1206 struct fsldma_chan *chan; 1207 struct resource res; 1208 int err; 1209 1210 /* alloc channel */ 1211 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1212 if (!chan) { 1213 dev_err(fdev->dev, "no free memory for DMA channels!\n"); 1214 err = -ENOMEM; 1215 goto out_return; 1216 } 1217 1218 /* ioremap registers for use */ 1219 chan->regs = of_iomap(node, 0); 1220 if (!chan->regs) { 1221 dev_err(fdev->dev, "unable to ioremap registers\n"); 1222 err = -ENOMEM; 1223 goto out_free_chan; 1224 } 1225 1226 err = of_address_to_resource(node, 0, &res); 1227 if (err) { 1228 dev_err(fdev->dev, "unable to find 'reg' property\n"); 1229 goto out_iounmap_regs; 1230 } 1231 1232 chan->feature = feature; 1233 if (!fdev->feature) 1234 fdev->feature = chan->feature; 1235 1236 /* 1237 * If the DMA device's feature is different than the feature 1238 * of its channels, report the bug 1239 */ 1240 WARN_ON(fdev->feature != chan->feature); 1241 1242 chan->dev = fdev->dev; 1243 chan->id = (res.start & 0xfff) < 0x300 ? 1244 ((res.start - 0x100) & 0xfff) >> 7 : 1245 ((res.start - 0x200) & 0xfff) >> 7; 1246 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 1247 dev_err(fdev->dev, "too many channels for device\n"); 1248 err = -EINVAL; 1249 goto out_iounmap_regs; 1250 } 1251 1252 fdev->chan[chan->id] = chan; 1253 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1254 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); 1255 1256 /* Initialize the channel */ 1257 dma_init(chan); 1258 1259 /* Clear cdar registers */ 1260 set_cdar(chan, 0); 1261 1262 switch (chan->feature & FSL_DMA_IP_MASK) { 1263 case FSL_DMA_IP_85XX: 1264 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1265 case FSL_DMA_IP_83XX: 1266 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1267 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1268 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; 1269 chan->set_request_count = fsl_chan_set_request_count; 1270 } 1271 1272 spin_lock_init(&chan->desc_lock); 1273 INIT_LIST_HEAD(&chan->ld_pending); 1274 INIT_LIST_HEAD(&chan->ld_running); 1275 chan->idle = true; 1276 1277 chan->common.device = &fdev->common; 1278 dma_cookie_init(&chan->common); 1279 1280 /* find the IRQ line, if it exists in the device tree */ 1281 chan->irq = irq_of_parse_and_map(node, 0); 1282 1283 /* Add the channel to DMA device channel list */ 1284 list_add_tail(&chan->common.device_node, &fdev->common.channels); 1285 fdev->common.chancnt++; 1286 1287 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, 1288 chan->irq != NO_IRQ ? chan->irq : fdev->irq); 1289 1290 return 0; 1291 1292 out_iounmap_regs: 1293 iounmap(chan->regs); 1294 out_free_chan: 1295 kfree(chan); 1296 out_return: 1297 return err; 1298 } 1299 1300 static void fsl_dma_chan_remove(struct fsldma_chan *chan) 1301 { 1302 irq_dispose_mapping(chan->irq); 1303 list_del(&chan->common.device_node); 1304 iounmap(chan->regs); 1305 kfree(chan); 1306 } 1307 1308 static int fsldma_of_probe(struct platform_device *op) 1309 { 1310 struct fsldma_device *fdev; 1311 struct device_node *child; 1312 int err; 1313 1314 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); 1315 if (!fdev) { 1316 dev_err(&op->dev, "No enough memory for 'priv'\n"); 1317 err = -ENOMEM; 1318 goto out_return; 1319 } 1320 1321 fdev->dev = &op->dev; 1322 INIT_LIST_HEAD(&fdev->common.channels); 1323 1324 /* ioremap the registers for use */ 1325 fdev->regs = of_iomap(op->dev.of_node, 0); 1326 if (!fdev->regs) { 1327 dev_err(&op->dev, "unable to ioremap registers\n"); 1328 err = -ENOMEM; 1329 goto out_free_fdev; 1330 } 1331 1332 /* map the channel IRQ if it exists, but don't hookup the handler yet */ 1333 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); 1334 1335 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1336 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1337 dma_cap_set(DMA_SG, fdev->common.cap_mask); 1338 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); 1339 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1340 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1341 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1342 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1343 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1344 fdev->common.device_tx_status = fsl_tx_status; 1345 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1346 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1347 fdev->common.device_control = fsl_dma_device_control; 1348 fdev->common.dev = &op->dev; 1349 1350 dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1351 1352 platform_set_drvdata(op, fdev); 1353 1354 /* 1355 * We cannot use of_platform_bus_probe() because there is no 1356 * of_platform_bus_remove(). Instead, we manually instantiate every DMA 1357 * channel object. 1358 */ 1359 for_each_child_of_node(op->dev.of_node, child) { 1360 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { 1361 fsl_dma_chan_probe(fdev, child, 1362 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1363 "fsl,eloplus-dma-channel"); 1364 } 1365 1366 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { 1367 fsl_dma_chan_probe(fdev, child, 1368 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1369 "fsl,elo-dma-channel"); 1370 } 1371 } 1372 1373 /* 1374 * Hookup the IRQ handler(s) 1375 * 1376 * If we have a per-controller interrupt, we prefer that to the 1377 * per-channel interrupts to reduce the number of shared interrupt 1378 * handlers on the same IRQ line 1379 */ 1380 err = fsldma_request_irqs(fdev); 1381 if (err) { 1382 dev_err(fdev->dev, "unable to request IRQs\n"); 1383 goto out_free_fdev; 1384 } 1385 1386 dma_async_device_register(&fdev->common); 1387 return 0; 1388 1389 out_free_fdev: 1390 irq_dispose_mapping(fdev->irq); 1391 kfree(fdev); 1392 out_return: 1393 return err; 1394 } 1395 1396 static int fsldma_of_remove(struct platform_device *op) 1397 { 1398 struct fsldma_device *fdev; 1399 unsigned int i; 1400 1401 fdev = platform_get_drvdata(op); 1402 dma_async_device_unregister(&fdev->common); 1403 1404 fsldma_free_irqs(fdev); 1405 1406 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1407 if (fdev->chan[i]) 1408 fsl_dma_chan_remove(fdev->chan[i]); 1409 } 1410 1411 iounmap(fdev->regs); 1412 kfree(fdev); 1413 1414 return 0; 1415 } 1416 1417 static const struct of_device_id fsldma_of_ids[] = { 1418 { .compatible = "fsl,elo3-dma", }, 1419 { .compatible = "fsl,eloplus-dma", }, 1420 { .compatible = "fsl,elo-dma", }, 1421 {} 1422 }; 1423 1424 static struct platform_driver fsldma_of_driver = { 1425 .driver = { 1426 .name = "fsl-elo-dma", 1427 .owner = THIS_MODULE, 1428 .of_match_table = fsldma_of_ids, 1429 }, 1430 .probe = fsldma_of_probe, 1431 .remove = fsldma_of_remove, 1432 }; 1433 1434 /*----------------------------------------------------------------------------*/ 1435 /* Module Init / Exit */ 1436 /*----------------------------------------------------------------------------*/ 1437 1438 static __init int fsldma_init(void) 1439 { 1440 pr_info("Freescale Elo series DMA driver\n"); 1441 return platform_driver_register(&fsldma_of_driver); 1442 } 1443 1444 static void __exit fsldma_exit(void) 1445 { 1446 platform_driver_unregister(&fsldma_of_driver); 1447 } 1448 1449 subsys_initcall(fsldma_init); 1450 module_exit(fsldma_exit); 1451 1452 MODULE_DESCRIPTION("Freescale Elo series DMA driver"); 1453 MODULE_LICENSE("GPL"); 1454