1 /* 2 * Freescale MPC85xx, MPC83xx DMA Engine support 3 * 4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 5 * 6 * Author: 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9 * 10 * Description: 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13 * The support for MPC8349 DMA contorller is also added. 14 * 15 * This driver instructs the DMA controller to issue the PCI Read Multiple 16 * command for PCI read operations, instead of using the default PCI Read Line 17 * command. Please be aware that this setting may result in read pre-fetching 18 * on some platforms. 19 * 20 * This is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 */ 26 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 #include <linux/interrupt.h> 32 #include <linux/dmaengine.h> 33 #include <linux/delay.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/dmapool.h> 36 #include <linux/of_platform.h> 37 38 #include <asm/fsldma.h> 39 #include "fsldma.h" 40 41 static void dma_init(struct fsldma_chan *chan) 42 { 43 /* Reset the channel */ 44 DMA_OUT(chan, &chan->regs->mr, 0, 32); 45 46 switch (chan->feature & FSL_DMA_IP_MASK) { 47 case FSL_DMA_IP_85XX: 48 /* Set the channel to below modes: 49 * EIE - Error interrupt enable 50 * EOSIE - End of segments interrupt enable (basic mode) 51 * EOLNIE - End of links interrupt enable 52 */ 53 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE 54 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); 55 break; 56 case FSL_DMA_IP_83XX: 57 /* Set the channel to below modes: 58 * EOTIE - End-of-transfer interrupt enable 59 * PRC_RM - PCI read multiple 60 */ 61 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE 62 | FSL_DMA_MR_PRC_RM, 32); 63 break; 64 } 65 } 66 67 static void set_sr(struct fsldma_chan *chan, u32 val) 68 { 69 DMA_OUT(chan, &chan->regs->sr, val, 32); 70 } 71 72 static u32 get_sr(struct fsldma_chan *chan) 73 { 74 return DMA_IN(chan, &chan->regs->sr, 32); 75 } 76 77 static void set_desc_cnt(struct fsldma_chan *chan, 78 struct fsl_dma_ld_hw *hw, u32 count) 79 { 80 hw->count = CPU_TO_DMA(chan, count, 32); 81 } 82 83 static void set_desc_src(struct fsldma_chan *chan, 84 struct fsl_dma_ld_hw *hw, dma_addr_t src) 85 { 86 u64 snoop_bits; 87 88 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 89 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 90 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 91 } 92 93 static void set_desc_dst(struct fsldma_chan *chan, 94 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 95 { 96 u64 snoop_bits; 97 98 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 99 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 100 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 101 } 102 103 static void set_desc_next(struct fsldma_chan *chan, 104 struct fsl_dma_ld_hw *hw, dma_addr_t next) 105 { 106 u64 snoop_bits; 107 108 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 109 ? FSL_DMA_SNEN : 0; 110 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 111 } 112 113 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 114 { 115 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 116 } 117 118 static dma_addr_t get_cdar(struct fsldma_chan *chan) 119 { 120 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 121 } 122 123 static dma_addr_t get_ndar(struct fsldma_chan *chan) 124 { 125 return DMA_IN(chan, &chan->regs->ndar, 64); 126 } 127 128 static u32 get_bcr(struct fsldma_chan *chan) 129 { 130 return DMA_IN(chan, &chan->regs->bcr, 32); 131 } 132 133 static int dma_is_idle(struct fsldma_chan *chan) 134 { 135 u32 sr = get_sr(chan); 136 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 137 } 138 139 static void dma_start(struct fsldma_chan *chan) 140 { 141 u32 mode; 142 143 mode = DMA_IN(chan, &chan->regs->mr, 32); 144 145 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 146 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 147 DMA_OUT(chan, &chan->regs->bcr, 0, 32); 148 mode |= FSL_DMA_MR_EMP_EN; 149 } else { 150 mode &= ~FSL_DMA_MR_EMP_EN; 151 } 152 } 153 154 if (chan->feature & FSL_DMA_CHAN_START_EXT) 155 mode |= FSL_DMA_MR_EMS_EN; 156 else 157 mode |= FSL_DMA_MR_CS; 158 159 DMA_OUT(chan, &chan->regs->mr, mode, 32); 160 } 161 162 static void dma_halt(struct fsldma_chan *chan) 163 { 164 u32 mode; 165 int i; 166 167 mode = DMA_IN(chan, &chan->regs->mr, 32); 168 mode |= FSL_DMA_MR_CA; 169 DMA_OUT(chan, &chan->regs->mr, mode, 32); 170 171 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); 172 DMA_OUT(chan, &chan->regs->mr, mode, 32); 173 174 for (i = 0; i < 100; i++) { 175 if (dma_is_idle(chan)) 176 return; 177 178 udelay(10); 179 } 180 181 if (!dma_is_idle(chan)) 182 dev_err(chan->dev, "DMA halt timeout!\n"); 183 } 184 185 static void set_ld_eol(struct fsldma_chan *chan, 186 struct fsl_desc_sw *desc) 187 { 188 u64 snoop_bits; 189 190 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 191 ? FSL_DMA_SNEN : 0; 192 193 desc->hw.next_ln_addr = CPU_TO_DMA(chan, 194 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 195 | snoop_bits, 64); 196 } 197 198 /** 199 * fsl_chan_set_src_loop_size - Set source address hold transfer size 200 * @chan : Freescale DMA channel 201 * @size : Address loop size, 0 for disable loop 202 * 203 * The set source address hold transfer size. The source 204 * address hold or loop transfer size is when the DMA transfer 205 * data from source address (SA), if the loop size is 4, the DMA will 206 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 207 * SA + 1 ... and so on. 208 */ 209 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) 210 { 211 u32 mode; 212 213 mode = DMA_IN(chan, &chan->regs->mr, 32); 214 215 switch (size) { 216 case 0: 217 mode &= ~FSL_DMA_MR_SAHE; 218 break; 219 case 1: 220 case 2: 221 case 4: 222 case 8: 223 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); 224 break; 225 } 226 227 DMA_OUT(chan, &chan->regs->mr, mode, 32); 228 } 229 230 /** 231 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size 232 * @chan : Freescale DMA channel 233 * @size : Address loop size, 0 for disable loop 234 * 235 * The set destination address hold transfer size. The destination 236 * address hold or loop transfer size is when the DMA transfer 237 * data to destination address (TA), if the loop size is 4, the DMA will 238 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 239 * TA + 1 ... and so on. 240 */ 241 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) 242 { 243 u32 mode; 244 245 mode = DMA_IN(chan, &chan->regs->mr, 32); 246 247 switch (size) { 248 case 0: 249 mode &= ~FSL_DMA_MR_DAHE; 250 break; 251 case 1: 252 case 2: 253 case 4: 254 case 8: 255 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); 256 break; 257 } 258 259 DMA_OUT(chan, &chan->regs->mr, mode, 32); 260 } 261 262 /** 263 * fsl_chan_set_request_count - Set DMA Request Count for external control 264 * @chan : Freescale DMA channel 265 * @size : Number of bytes to transfer in a single request 266 * 267 * The Freescale DMA channel can be controlled by the external signal DREQ#. 268 * The DMA request count is how many bytes are allowed to transfer before 269 * pausing the channel, after which a new assertion of DREQ# resumes channel 270 * operation. 271 * 272 * A size of 0 disables external pause control. The maximum size is 1024. 273 */ 274 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) 275 { 276 u32 mode; 277 278 BUG_ON(size > 1024); 279 280 mode = DMA_IN(chan, &chan->regs->mr, 32); 281 mode |= (__ilog2(size) << 24) & 0x0f000000; 282 283 DMA_OUT(chan, &chan->regs->mr, mode, 32); 284 } 285 286 /** 287 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 288 * @chan : Freescale DMA channel 289 * @enable : 0 is disabled, 1 is enabled. 290 * 291 * The Freescale DMA channel can be controlled by the external signal DREQ#. 292 * The DMA Request Count feature should be used in addition to this feature 293 * to set the number of bytes to transfer before pausing the channel. 294 */ 295 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) 296 { 297 if (enable) 298 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 299 else 300 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 301 } 302 303 /** 304 * fsl_chan_toggle_ext_start - Toggle channel external start status 305 * @chan : Freescale DMA channel 306 * @enable : 0 is disabled, 1 is enabled. 307 * 308 * If enable the external start, the channel can be started by an 309 * external DMA start pin. So the dma_start() does not start the 310 * transfer immediately. The DMA channel will wait for the 311 * control pin asserted. 312 */ 313 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) 314 { 315 if (enable) 316 chan->feature |= FSL_DMA_CHAN_START_EXT; 317 else 318 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 319 } 320 321 static void append_ld_queue(struct fsldma_chan *chan, 322 struct fsl_desc_sw *desc) 323 { 324 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 325 326 if (list_empty(&chan->ld_pending)) 327 goto out_splice; 328 329 /* 330 * Add the hardware descriptor to the chain of hardware descriptors 331 * that already exists in memory. 332 * 333 * This will un-set the EOL bit of the existing transaction, and the 334 * last link in this transaction will become the EOL descriptor. 335 */ 336 set_desc_next(chan, &tail->hw, desc->async_tx.phys); 337 338 /* 339 * Add the software descriptor and all children to the list 340 * of pending transactions 341 */ 342 out_splice: 343 list_splice_tail_init(&desc->tx_list, &chan->ld_pending); 344 } 345 346 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 347 { 348 struct fsldma_chan *chan = to_fsl_chan(tx->chan); 349 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 350 struct fsl_desc_sw *child; 351 unsigned long flags; 352 dma_cookie_t cookie; 353 354 spin_lock_irqsave(&chan->desc_lock, flags); 355 356 /* 357 * assign cookies to all of the software descriptors 358 * that make up this transaction 359 */ 360 cookie = chan->common.cookie; 361 list_for_each_entry(child, &desc->tx_list, node) { 362 cookie++; 363 if (cookie < 0) 364 cookie = 1; 365 366 child->async_tx.cookie = cookie; 367 } 368 369 chan->common.cookie = cookie; 370 371 /* put this transaction onto the tail of the pending queue */ 372 append_ld_queue(chan, desc); 373 374 spin_unlock_irqrestore(&chan->desc_lock, flags); 375 376 return cookie; 377 } 378 379 /** 380 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 381 * @chan : Freescale DMA channel 382 * 383 * Return - The descriptor allocated. NULL for failed. 384 */ 385 static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 386 struct fsldma_chan *chan) 387 { 388 struct fsl_desc_sw *desc; 389 dma_addr_t pdesc; 390 391 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 392 if (!desc) { 393 dev_dbg(chan->dev, "out of memory for link desc\n"); 394 return NULL; 395 } 396 397 memset(desc, 0, sizeof(*desc)); 398 INIT_LIST_HEAD(&desc->tx_list); 399 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 400 desc->async_tx.tx_submit = fsl_dma_tx_submit; 401 desc->async_tx.phys = pdesc; 402 403 return desc; 404 } 405 406 407 /** 408 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 409 * @chan : Freescale DMA channel 410 * 411 * This function will create a dma pool for descriptor allocation. 412 * 413 * Return - The number of descriptors allocated. 414 */ 415 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) 416 { 417 struct fsldma_chan *chan = to_fsl_chan(dchan); 418 419 /* Has this channel already been allocated? */ 420 if (chan->desc_pool) 421 return 1; 422 423 /* 424 * We need the descriptor to be aligned to 32bytes 425 * for meeting FSL DMA specification requirement. 426 */ 427 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 428 chan->dev, 429 sizeof(struct fsl_desc_sw), 430 __alignof__(struct fsl_desc_sw), 0); 431 if (!chan->desc_pool) { 432 dev_err(chan->dev, "unable to allocate channel %d " 433 "descriptor pool\n", chan->id); 434 return -ENOMEM; 435 } 436 437 /* there is at least one descriptor free to be allocated */ 438 return 1; 439 } 440 441 /** 442 * fsldma_free_desc_list - Free all descriptors in a queue 443 * @chan: Freescae DMA channel 444 * @list: the list to free 445 * 446 * LOCKING: must hold chan->desc_lock 447 */ 448 static void fsldma_free_desc_list(struct fsldma_chan *chan, 449 struct list_head *list) 450 { 451 struct fsl_desc_sw *desc, *_desc; 452 453 list_for_each_entry_safe(desc, _desc, list, node) { 454 list_del(&desc->node); 455 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 456 } 457 } 458 459 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, 460 struct list_head *list) 461 { 462 struct fsl_desc_sw *desc, *_desc; 463 464 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 465 list_del(&desc->node); 466 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 467 } 468 } 469 470 /** 471 * fsl_dma_free_chan_resources - Free all resources of the channel. 472 * @chan : Freescale DMA channel 473 */ 474 static void fsl_dma_free_chan_resources(struct dma_chan *dchan) 475 { 476 struct fsldma_chan *chan = to_fsl_chan(dchan); 477 unsigned long flags; 478 479 dev_dbg(chan->dev, "Free all channel resources.\n"); 480 spin_lock_irqsave(&chan->desc_lock, flags); 481 fsldma_free_desc_list(chan, &chan->ld_pending); 482 fsldma_free_desc_list(chan, &chan->ld_running); 483 spin_unlock_irqrestore(&chan->desc_lock, flags); 484 485 dma_pool_destroy(chan->desc_pool); 486 chan->desc_pool = NULL; 487 } 488 489 static struct dma_async_tx_descriptor * 490 fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) 491 { 492 struct fsldma_chan *chan; 493 struct fsl_desc_sw *new; 494 495 if (!dchan) 496 return NULL; 497 498 chan = to_fsl_chan(dchan); 499 500 new = fsl_dma_alloc_descriptor(chan); 501 if (!new) { 502 dev_err(chan->dev, "No free memory for link descriptor\n"); 503 return NULL; 504 } 505 506 new->async_tx.cookie = -EBUSY; 507 new->async_tx.flags = flags; 508 509 /* Insert the link descriptor to the LD ring */ 510 list_add_tail(&new->node, &new->tx_list); 511 512 /* Set End-of-link to the last link descriptor of new list*/ 513 set_ld_eol(chan, new); 514 515 return &new->async_tx; 516 } 517 518 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 519 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, 520 size_t len, unsigned long flags) 521 { 522 struct fsldma_chan *chan; 523 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 524 size_t copy; 525 526 if (!dchan) 527 return NULL; 528 529 if (!len) 530 return NULL; 531 532 chan = to_fsl_chan(dchan); 533 534 do { 535 536 /* Allocate the link descriptor from DMA pool */ 537 new = fsl_dma_alloc_descriptor(chan); 538 if (!new) { 539 dev_err(chan->dev, 540 "No free memory for link descriptor\n"); 541 goto fail; 542 } 543 #ifdef FSL_DMA_LD_DEBUG 544 dev_dbg(chan->dev, "new link desc alloc %p\n", new); 545 #endif 546 547 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 548 549 set_desc_cnt(chan, &new->hw, copy); 550 set_desc_src(chan, &new->hw, dma_src); 551 set_desc_dst(chan, &new->hw, dma_dst); 552 553 if (!first) 554 first = new; 555 else 556 set_desc_next(chan, &prev->hw, new->async_tx.phys); 557 558 new->async_tx.cookie = 0; 559 async_tx_ack(&new->async_tx); 560 561 prev = new; 562 len -= copy; 563 dma_src += copy; 564 dma_dst += copy; 565 566 /* Insert the link descriptor to the LD ring */ 567 list_add_tail(&new->node, &first->tx_list); 568 } while (len); 569 570 new->async_tx.flags = flags; /* client is in control of this ack */ 571 new->async_tx.cookie = -EBUSY; 572 573 /* Set End-of-link to the last link descriptor of new list*/ 574 set_ld_eol(chan, new); 575 576 return &first->async_tx; 577 578 fail: 579 if (!first) 580 return NULL; 581 582 fsldma_free_desc_list_reverse(chan, &first->tx_list); 583 return NULL; 584 } 585 586 /** 587 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 588 * @chan: DMA channel 589 * @sgl: scatterlist to transfer to/from 590 * @sg_len: number of entries in @scatterlist 591 * @direction: DMA direction 592 * @flags: DMAEngine flags 593 * 594 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the 595 * DMA_SLAVE API, this gets the device-specific information from the 596 * chan->private variable. 597 */ 598 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 599 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 600 enum dma_data_direction direction, unsigned long flags) 601 { 602 struct fsldma_chan *chan; 603 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 604 struct fsl_dma_slave *slave; 605 size_t copy; 606 607 int i; 608 struct scatterlist *sg; 609 size_t sg_used; 610 size_t hw_used; 611 struct fsl_dma_hw_addr *hw; 612 dma_addr_t dma_dst, dma_src; 613 614 if (!dchan) 615 return NULL; 616 617 if (!dchan->private) 618 return NULL; 619 620 chan = to_fsl_chan(dchan); 621 slave = dchan->private; 622 623 if (list_empty(&slave->addresses)) 624 return NULL; 625 626 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); 627 hw_used = 0; 628 629 /* 630 * Build the hardware transaction to copy from the scatterlist to 631 * the hardware, or from the hardware to the scatterlist 632 * 633 * If you are copying from the hardware to the scatterlist and it 634 * takes two hardware entries to fill an entire page, then both 635 * hardware entries will be coalesced into the same page 636 * 637 * If you are copying from the scatterlist to the hardware and a 638 * single page can fill two hardware entries, then the data will 639 * be read out of the page into the first hardware entry, and so on 640 */ 641 for_each_sg(sgl, sg, sg_len, i) { 642 sg_used = 0; 643 644 /* Loop until the entire scatterlist entry is used */ 645 while (sg_used < sg_dma_len(sg)) { 646 647 /* 648 * If we've used up the current hardware address/length 649 * pair, we need to load a new one 650 * 651 * This is done in a while loop so that descriptors with 652 * length == 0 will be skipped 653 */ 654 while (hw_used >= hw->length) { 655 656 /* 657 * If the current hardware entry is the last 658 * entry in the list, we're finished 659 */ 660 if (list_is_last(&hw->entry, &slave->addresses)) 661 goto finished; 662 663 /* Get the next hardware address/length pair */ 664 hw = list_entry(hw->entry.next, 665 struct fsl_dma_hw_addr, entry); 666 hw_used = 0; 667 } 668 669 /* Allocate the link descriptor from DMA pool */ 670 new = fsl_dma_alloc_descriptor(chan); 671 if (!new) { 672 dev_err(chan->dev, "No free memory for " 673 "link descriptor\n"); 674 goto fail; 675 } 676 #ifdef FSL_DMA_LD_DEBUG 677 dev_dbg(chan->dev, "new link desc alloc %p\n", new); 678 #endif 679 680 /* 681 * Calculate the maximum number of bytes to transfer, 682 * making sure it is less than the DMA controller limit 683 */ 684 copy = min_t(size_t, sg_dma_len(sg) - sg_used, 685 hw->length - hw_used); 686 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); 687 688 /* 689 * DMA_FROM_DEVICE 690 * from the hardware to the scatterlist 691 * 692 * DMA_TO_DEVICE 693 * from the scatterlist to the hardware 694 */ 695 if (direction == DMA_FROM_DEVICE) { 696 dma_src = hw->address + hw_used; 697 dma_dst = sg_dma_address(sg) + sg_used; 698 } else { 699 dma_src = sg_dma_address(sg) + sg_used; 700 dma_dst = hw->address + hw_used; 701 } 702 703 /* Fill in the descriptor */ 704 set_desc_cnt(chan, &new->hw, copy); 705 set_desc_src(chan, &new->hw, dma_src); 706 set_desc_dst(chan, &new->hw, dma_dst); 707 708 /* 709 * If this is not the first descriptor, chain the 710 * current descriptor after the previous descriptor 711 */ 712 if (!first) { 713 first = new; 714 } else { 715 set_desc_next(chan, &prev->hw, 716 new->async_tx.phys); 717 } 718 719 new->async_tx.cookie = 0; 720 async_tx_ack(&new->async_tx); 721 722 prev = new; 723 sg_used += copy; 724 hw_used += copy; 725 726 /* Insert the link descriptor into the LD ring */ 727 list_add_tail(&new->node, &first->tx_list); 728 } 729 } 730 731 finished: 732 733 /* All of the hardware address/length pairs had length == 0 */ 734 if (!first || !new) 735 return NULL; 736 737 new->async_tx.flags = flags; 738 new->async_tx.cookie = -EBUSY; 739 740 /* Set End-of-link to the last link descriptor of new list */ 741 set_ld_eol(chan, new); 742 743 /* Enable extra controller features */ 744 if (chan->set_src_loop_size) 745 chan->set_src_loop_size(chan, slave->src_loop_size); 746 747 if (chan->set_dst_loop_size) 748 chan->set_dst_loop_size(chan, slave->dst_loop_size); 749 750 if (chan->toggle_ext_start) 751 chan->toggle_ext_start(chan, slave->external_start); 752 753 if (chan->toggle_ext_pause) 754 chan->toggle_ext_pause(chan, slave->external_pause); 755 756 if (chan->set_request_count) 757 chan->set_request_count(chan, slave->request_count); 758 759 return &first->async_tx; 760 761 fail: 762 /* If first was not set, then we failed to allocate the very first 763 * descriptor, and we're done */ 764 if (!first) 765 return NULL; 766 767 /* 768 * First is set, so all of the descriptors we allocated have been added 769 * to first->tx_list, INCLUDING "first" itself. Therefore we 770 * must traverse the list backwards freeing each descriptor in turn 771 * 772 * We're re-using variables for the loop, oh well 773 */ 774 fsldma_free_desc_list_reverse(chan, &first->tx_list); 775 return NULL; 776 } 777 778 static void fsl_dma_device_terminate_all(struct dma_chan *dchan) 779 { 780 struct fsldma_chan *chan; 781 unsigned long flags; 782 783 if (!dchan) 784 return; 785 786 chan = to_fsl_chan(dchan); 787 788 /* Halt the DMA engine */ 789 dma_halt(chan); 790 791 spin_lock_irqsave(&chan->desc_lock, flags); 792 793 /* Remove and free all of the descriptors in the LD queue */ 794 fsldma_free_desc_list(chan, &chan->ld_pending); 795 fsldma_free_desc_list(chan, &chan->ld_running); 796 797 spin_unlock_irqrestore(&chan->desc_lock, flags); 798 } 799 800 /** 801 * fsl_dma_update_completed_cookie - Update the completed cookie. 802 * @chan : Freescale DMA channel 803 * 804 * CONTEXT: hardirq 805 */ 806 static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) 807 { 808 struct fsl_desc_sw *desc; 809 unsigned long flags; 810 dma_cookie_t cookie; 811 812 spin_lock_irqsave(&chan->desc_lock, flags); 813 814 if (list_empty(&chan->ld_running)) { 815 dev_dbg(chan->dev, "no running descriptors\n"); 816 goto out_unlock; 817 } 818 819 /* Get the last descriptor, update the cookie to that */ 820 desc = to_fsl_desc(chan->ld_running.prev); 821 if (dma_is_idle(chan)) 822 cookie = desc->async_tx.cookie; 823 else { 824 cookie = desc->async_tx.cookie - 1; 825 if (unlikely(cookie < DMA_MIN_COOKIE)) 826 cookie = DMA_MAX_COOKIE; 827 } 828 829 chan->completed_cookie = cookie; 830 831 out_unlock: 832 spin_unlock_irqrestore(&chan->desc_lock, flags); 833 } 834 835 /** 836 * fsldma_desc_status - Check the status of a descriptor 837 * @chan: Freescale DMA channel 838 * @desc: DMA SW descriptor 839 * 840 * This function will return the status of the given descriptor 841 */ 842 static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, 843 struct fsl_desc_sw *desc) 844 { 845 return dma_async_is_complete(desc->async_tx.cookie, 846 chan->completed_cookie, 847 chan->common.cookie); 848 } 849 850 /** 851 * fsl_chan_ld_cleanup - Clean up link descriptors 852 * @chan : Freescale DMA channel 853 * 854 * This function clean up the ld_queue of DMA channel. 855 */ 856 static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 857 { 858 struct fsl_desc_sw *desc, *_desc; 859 unsigned long flags; 860 861 spin_lock_irqsave(&chan->desc_lock, flags); 862 863 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); 864 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 865 dma_async_tx_callback callback; 866 void *callback_param; 867 868 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) 869 break; 870 871 /* Remove from the list of running transactions */ 872 list_del(&desc->node); 873 874 /* Run the link descriptor callback function */ 875 callback = desc->async_tx.callback; 876 callback_param = desc->async_tx.callback_param; 877 if (callback) { 878 spin_unlock_irqrestore(&chan->desc_lock, flags); 879 dev_dbg(chan->dev, "LD %p callback\n", desc); 880 callback(callback_param); 881 spin_lock_irqsave(&chan->desc_lock, flags); 882 } 883 884 /* Run any dependencies, then free the descriptor */ 885 dma_run_dependencies(&desc->async_tx); 886 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 887 } 888 889 spin_unlock_irqrestore(&chan->desc_lock, flags); 890 } 891 892 /** 893 * fsl_chan_xfer_ld_queue - transfer any pending transactions 894 * @chan : Freescale DMA channel 895 * 896 * This will make sure that any pending transactions will be run. 897 * If the DMA controller is idle, it will be started. Otherwise, 898 * the DMA controller's interrupt handler will start any pending 899 * transactions when it becomes idle. 900 */ 901 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 902 { 903 struct fsl_desc_sw *desc; 904 unsigned long flags; 905 906 spin_lock_irqsave(&chan->desc_lock, flags); 907 908 /* 909 * If the list of pending descriptors is empty, then we 910 * don't need to do any work at all 911 */ 912 if (list_empty(&chan->ld_pending)) { 913 dev_dbg(chan->dev, "no pending LDs\n"); 914 goto out_unlock; 915 } 916 917 /* 918 * The DMA controller is not idle, which means the interrupt 919 * handler will start any queued transactions when it runs 920 * at the end of the current transaction 921 */ 922 if (!dma_is_idle(chan)) { 923 dev_dbg(chan->dev, "DMA controller still busy\n"); 924 goto out_unlock; 925 } 926 927 /* 928 * TODO: 929 * make sure the dma_halt() function really un-wedges the 930 * controller as much as possible 931 */ 932 dma_halt(chan); 933 934 /* 935 * If there are some link descriptors which have not been 936 * transferred, we need to start the controller 937 */ 938 939 /* 940 * Move all elements from the queue of pending transactions 941 * onto the list of running transactions 942 */ 943 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 944 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 945 946 /* 947 * Program the descriptor's address into the DMA controller, 948 * then start the DMA transaction 949 */ 950 set_cdar(chan, desc->async_tx.phys); 951 dma_start(chan); 952 953 out_unlock: 954 spin_unlock_irqrestore(&chan->desc_lock, flags); 955 } 956 957 /** 958 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 959 * @chan : Freescale DMA channel 960 */ 961 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 962 { 963 struct fsldma_chan *chan = to_fsl_chan(dchan); 964 fsl_chan_xfer_ld_queue(chan); 965 } 966 967 /** 968 * fsl_dma_is_complete - Determine the DMA status 969 * @chan : Freescale DMA channel 970 */ 971 static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, 972 dma_cookie_t cookie, 973 dma_cookie_t *done, 974 dma_cookie_t *used) 975 { 976 struct fsldma_chan *chan = to_fsl_chan(dchan); 977 dma_cookie_t last_used; 978 dma_cookie_t last_complete; 979 980 fsl_chan_ld_cleanup(chan); 981 982 last_used = dchan->cookie; 983 last_complete = chan->completed_cookie; 984 985 if (done) 986 *done = last_complete; 987 988 if (used) 989 *used = last_used; 990 991 return dma_async_is_complete(cookie, last_complete, last_used); 992 } 993 994 /*----------------------------------------------------------------------------*/ 995 /* Interrupt Handling */ 996 /*----------------------------------------------------------------------------*/ 997 998 static irqreturn_t fsldma_chan_irq(int irq, void *data) 999 { 1000 struct fsldma_chan *chan = data; 1001 int update_cookie = 0; 1002 int xfer_ld_q = 0; 1003 u32 stat; 1004 1005 /* save and clear the status register */ 1006 stat = get_sr(chan); 1007 set_sr(chan, stat); 1008 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); 1009 1010 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1011 if (!stat) 1012 return IRQ_NONE; 1013 1014 if (stat & FSL_DMA_SR_TE) 1015 dev_err(chan->dev, "Transfer Error!\n"); 1016 1017 /* 1018 * Programming Error 1019 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 1020 * triger a PE interrupt. 1021 */ 1022 if (stat & FSL_DMA_SR_PE) { 1023 dev_dbg(chan->dev, "irq: Programming Error INT\n"); 1024 if (get_bcr(chan) == 0) { 1025 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 1026 * Now, update the completed cookie, and continue the 1027 * next uncompleted transfer. 1028 */ 1029 update_cookie = 1; 1030 xfer_ld_q = 1; 1031 } 1032 stat &= ~FSL_DMA_SR_PE; 1033 } 1034 1035 /* 1036 * If the link descriptor segment transfer finishes, 1037 * we will recycle the used descriptor. 1038 */ 1039 if (stat & FSL_DMA_SR_EOSI) { 1040 dev_dbg(chan->dev, "irq: End-of-segments INT\n"); 1041 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", 1042 (unsigned long long)get_cdar(chan), 1043 (unsigned long long)get_ndar(chan)); 1044 stat &= ~FSL_DMA_SR_EOSI; 1045 update_cookie = 1; 1046 } 1047 1048 /* 1049 * For MPC8349, EOCDI event need to update cookie 1050 * and start the next transfer if it exist. 1051 */ 1052 if (stat & FSL_DMA_SR_EOCDI) { 1053 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); 1054 stat &= ~FSL_DMA_SR_EOCDI; 1055 update_cookie = 1; 1056 xfer_ld_q = 1; 1057 } 1058 1059 /* 1060 * If it current transfer is the end-of-transfer, 1061 * we should clear the Channel Start bit for 1062 * prepare next transfer. 1063 */ 1064 if (stat & FSL_DMA_SR_EOLNI) { 1065 dev_dbg(chan->dev, "irq: End-of-link INT\n"); 1066 stat &= ~FSL_DMA_SR_EOLNI; 1067 xfer_ld_q = 1; 1068 } 1069 1070 if (update_cookie) 1071 fsl_dma_update_completed_cookie(chan); 1072 if (xfer_ld_q) 1073 fsl_chan_xfer_ld_queue(chan); 1074 if (stat) 1075 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); 1076 1077 dev_dbg(chan->dev, "irq: Exit\n"); 1078 tasklet_schedule(&chan->tasklet); 1079 return IRQ_HANDLED; 1080 } 1081 1082 static void dma_do_tasklet(unsigned long data) 1083 { 1084 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1085 fsl_chan_ld_cleanup(chan); 1086 } 1087 1088 static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1089 { 1090 struct fsldma_device *fdev = data; 1091 struct fsldma_chan *chan; 1092 unsigned int handled = 0; 1093 u32 gsr, mask; 1094 int i; 1095 1096 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) 1097 : in_le32(fdev->regs); 1098 mask = 0xff000000; 1099 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); 1100 1101 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1102 chan = fdev->chan[i]; 1103 if (!chan) 1104 continue; 1105 1106 if (gsr & mask) { 1107 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); 1108 fsldma_chan_irq(irq, chan); 1109 handled++; 1110 } 1111 1112 gsr &= ~mask; 1113 mask >>= 8; 1114 } 1115 1116 return IRQ_RETVAL(handled); 1117 } 1118 1119 static void fsldma_free_irqs(struct fsldma_device *fdev) 1120 { 1121 struct fsldma_chan *chan; 1122 int i; 1123 1124 if (fdev->irq != NO_IRQ) { 1125 dev_dbg(fdev->dev, "free per-controller IRQ\n"); 1126 free_irq(fdev->irq, fdev); 1127 return; 1128 } 1129 1130 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1131 chan = fdev->chan[i]; 1132 if (chan && chan->irq != NO_IRQ) { 1133 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); 1134 free_irq(chan->irq, chan); 1135 } 1136 } 1137 } 1138 1139 static int fsldma_request_irqs(struct fsldma_device *fdev) 1140 { 1141 struct fsldma_chan *chan; 1142 int ret; 1143 int i; 1144 1145 /* if we have a per-controller IRQ, use that */ 1146 if (fdev->irq != NO_IRQ) { 1147 dev_dbg(fdev->dev, "request per-controller IRQ\n"); 1148 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, 1149 "fsldma-controller", fdev); 1150 return ret; 1151 } 1152 1153 /* no per-controller IRQ, use the per-channel IRQs */ 1154 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1155 chan = fdev->chan[i]; 1156 if (!chan) 1157 continue; 1158 1159 if (chan->irq == NO_IRQ) { 1160 dev_err(fdev->dev, "no interrupts property defined for " 1161 "DMA channel %d. Please fix your " 1162 "device tree\n", chan->id); 1163 ret = -ENODEV; 1164 goto out_unwind; 1165 } 1166 1167 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); 1168 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1169 "fsldma-chan", chan); 1170 if (ret) { 1171 dev_err(fdev->dev, "unable to request IRQ for DMA " 1172 "channel %d\n", chan->id); 1173 goto out_unwind; 1174 } 1175 } 1176 1177 return 0; 1178 1179 out_unwind: 1180 for (/* none */; i >= 0; i--) { 1181 chan = fdev->chan[i]; 1182 if (!chan) 1183 continue; 1184 1185 if (chan->irq == NO_IRQ) 1186 continue; 1187 1188 free_irq(chan->irq, chan); 1189 } 1190 1191 return ret; 1192 } 1193 1194 /*----------------------------------------------------------------------------*/ 1195 /* OpenFirmware Subsystem */ 1196 /*----------------------------------------------------------------------------*/ 1197 1198 static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, 1199 struct device_node *node, u32 feature, const char *compatible) 1200 { 1201 struct fsldma_chan *chan; 1202 struct resource res; 1203 int err; 1204 1205 /* alloc channel */ 1206 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1207 if (!chan) { 1208 dev_err(fdev->dev, "no free memory for DMA channels!\n"); 1209 err = -ENOMEM; 1210 goto out_return; 1211 } 1212 1213 /* ioremap registers for use */ 1214 chan->regs = of_iomap(node, 0); 1215 if (!chan->regs) { 1216 dev_err(fdev->dev, "unable to ioremap registers\n"); 1217 err = -ENOMEM; 1218 goto out_free_chan; 1219 } 1220 1221 err = of_address_to_resource(node, 0, &res); 1222 if (err) { 1223 dev_err(fdev->dev, "unable to find 'reg' property\n"); 1224 goto out_iounmap_regs; 1225 } 1226 1227 chan->feature = feature; 1228 if (!fdev->feature) 1229 fdev->feature = chan->feature; 1230 1231 /* 1232 * If the DMA device's feature is different than the feature 1233 * of its channels, report the bug 1234 */ 1235 WARN_ON(fdev->feature != chan->feature); 1236 1237 chan->dev = fdev->dev; 1238 chan->id = ((res.start - 0x100) & 0xfff) >> 7; 1239 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 1240 dev_err(fdev->dev, "too many channels for device\n"); 1241 err = -EINVAL; 1242 goto out_iounmap_regs; 1243 } 1244 1245 fdev->chan[chan->id] = chan; 1246 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1247 1248 /* Initialize the channel */ 1249 dma_init(chan); 1250 1251 /* Clear cdar registers */ 1252 set_cdar(chan, 0); 1253 1254 switch (chan->feature & FSL_DMA_IP_MASK) { 1255 case FSL_DMA_IP_85XX: 1256 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1257 case FSL_DMA_IP_83XX: 1258 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1259 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1260 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; 1261 chan->set_request_count = fsl_chan_set_request_count; 1262 } 1263 1264 spin_lock_init(&chan->desc_lock); 1265 INIT_LIST_HEAD(&chan->ld_pending); 1266 INIT_LIST_HEAD(&chan->ld_running); 1267 1268 chan->common.device = &fdev->common; 1269 1270 /* find the IRQ line, if it exists in the device tree */ 1271 chan->irq = irq_of_parse_and_map(node, 0); 1272 1273 /* Add the channel to DMA device channel list */ 1274 list_add_tail(&chan->common.device_node, &fdev->common.channels); 1275 fdev->common.chancnt++; 1276 1277 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, 1278 chan->irq != NO_IRQ ? chan->irq : fdev->irq); 1279 1280 return 0; 1281 1282 out_iounmap_regs: 1283 iounmap(chan->regs); 1284 out_free_chan: 1285 kfree(chan); 1286 out_return: 1287 return err; 1288 } 1289 1290 static void fsl_dma_chan_remove(struct fsldma_chan *chan) 1291 { 1292 irq_dispose_mapping(chan->irq); 1293 list_del(&chan->common.device_node); 1294 iounmap(chan->regs); 1295 kfree(chan); 1296 } 1297 1298 static int __devinit fsldma_of_probe(struct of_device *op, 1299 const struct of_device_id *match) 1300 { 1301 struct fsldma_device *fdev; 1302 struct device_node *child; 1303 int err; 1304 1305 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); 1306 if (!fdev) { 1307 dev_err(&op->dev, "No enough memory for 'priv'\n"); 1308 err = -ENOMEM; 1309 goto out_return; 1310 } 1311 1312 fdev->dev = &op->dev; 1313 INIT_LIST_HEAD(&fdev->common.channels); 1314 1315 /* ioremap the registers for use */ 1316 fdev->regs = of_iomap(op->node, 0); 1317 if (!fdev->regs) { 1318 dev_err(&op->dev, "unable to ioremap registers\n"); 1319 err = -ENOMEM; 1320 goto out_free_fdev; 1321 } 1322 1323 /* map the channel IRQ if it exists, but don't hookup the handler yet */ 1324 fdev->irq = irq_of_parse_and_map(op->node, 0); 1325 1326 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1327 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1328 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); 1329 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1330 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1331 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1332 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1333 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1334 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1335 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1336 fdev->common.device_terminate_all = fsl_dma_device_terminate_all; 1337 fdev->common.dev = &op->dev; 1338 1339 dev_set_drvdata(&op->dev, fdev); 1340 1341 /* 1342 * We cannot use of_platform_bus_probe() because there is no 1343 * of_platform_bus_remove(). Instead, we manually instantiate every DMA 1344 * channel object. 1345 */ 1346 for_each_child_of_node(op->node, child) { 1347 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { 1348 fsl_dma_chan_probe(fdev, child, 1349 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1350 "fsl,eloplus-dma-channel"); 1351 } 1352 1353 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { 1354 fsl_dma_chan_probe(fdev, child, 1355 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1356 "fsl,elo-dma-channel"); 1357 } 1358 } 1359 1360 /* 1361 * Hookup the IRQ handler(s) 1362 * 1363 * If we have a per-controller interrupt, we prefer that to the 1364 * per-channel interrupts to reduce the number of shared interrupt 1365 * handlers on the same IRQ line 1366 */ 1367 err = fsldma_request_irqs(fdev); 1368 if (err) { 1369 dev_err(fdev->dev, "unable to request IRQs\n"); 1370 goto out_free_fdev; 1371 } 1372 1373 dma_async_device_register(&fdev->common); 1374 return 0; 1375 1376 out_free_fdev: 1377 irq_dispose_mapping(fdev->irq); 1378 kfree(fdev); 1379 out_return: 1380 return err; 1381 } 1382 1383 static int fsldma_of_remove(struct of_device *op) 1384 { 1385 struct fsldma_device *fdev; 1386 unsigned int i; 1387 1388 fdev = dev_get_drvdata(&op->dev); 1389 dma_async_device_unregister(&fdev->common); 1390 1391 fsldma_free_irqs(fdev); 1392 1393 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1394 if (fdev->chan[i]) 1395 fsl_dma_chan_remove(fdev->chan[i]); 1396 } 1397 1398 iounmap(fdev->regs); 1399 dev_set_drvdata(&op->dev, NULL); 1400 kfree(fdev); 1401 1402 return 0; 1403 } 1404 1405 static const struct of_device_id fsldma_of_ids[] = { 1406 { .compatible = "fsl,eloplus-dma", }, 1407 { .compatible = "fsl,elo-dma", }, 1408 {} 1409 }; 1410 1411 static struct of_platform_driver fsldma_of_driver = { 1412 .name = "fsl-elo-dma", 1413 .match_table = fsldma_of_ids, 1414 .probe = fsldma_of_probe, 1415 .remove = fsldma_of_remove, 1416 }; 1417 1418 /*----------------------------------------------------------------------------*/ 1419 /* Module Init / Exit */ 1420 /*----------------------------------------------------------------------------*/ 1421 1422 static __init int fsldma_init(void) 1423 { 1424 int ret; 1425 1426 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1427 1428 ret = of_register_platform_driver(&fsldma_of_driver); 1429 if (ret) 1430 pr_err("fsldma: failed to register platform driver\n"); 1431 1432 return ret; 1433 } 1434 1435 static void __exit fsldma_exit(void) 1436 { 1437 of_unregister_platform_driver(&fsldma_of_driver); 1438 } 1439 1440 subsys_initcall(fsldma_init); 1441 module_exit(fsldma_exit); 1442 1443 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1444 MODULE_LICENSE("GPL"); 1445