1 /* 2 * Freescale MPC85xx, MPC83xx DMA Engine support 3 * 4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. 5 * 6 * Author: 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9 * 10 * Description: 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13 * The support for MPC8349 DMA controller is also added. 14 * 15 * This driver instructs the DMA controller to issue the PCI Read Multiple 16 * command for PCI read operations, instead of using the default PCI Read Line 17 * command. Please be aware that this setting may result in read pre-fetching 18 * on some platforms. 19 * 20 * This is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 */ 26 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 #include <linux/interrupt.h> 32 #include <linux/dmaengine.h> 33 #include <linux/delay.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/dmapool.h> 36 #include <linux/of_address.h> 37 #include <linux/of_irq.h> 38 #include <linux/of_platform.h> 39 40 #include "dmaengine.h" 41 #include "fsldma.h" 42 43 #define chan_dbg(chan, fmt, arg...) \ 44 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) 45 #define chan_err(chan, fmt, arg...) \ 46 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) 47 48 static const char msg_ld_oom[] = "No free memory for link descriptor"; 49 50 /* 51 * Register Helpers 52 */ 53 54 static void set_sr(struct fsldma_chan *chan, u32 val) 55 { 56 DMA_OUT(chan, &chan->regs->sr, val, 32); 57 } 58 59 static u32 get_sr(struct fsldma_chan *chan) 60 { 61 return DMA_IN(chan, &chan->regs->sr, 32); 62 } 63 64 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 65 { 66 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 67 } 68 69 static dma_addr_t get_cdar(struct fsldma_chan *chan) 70 { 71 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 72 } 73 74 static u32 get_bcr(struct fsldma_chan *chan) 75 { 76 return DMA_IN(chan, &chan->regs->bcr, 32); 77 } 78 79 /* 80 * Descriptor Helpers 81 */ 82 83 static void set_desc_cnt(struct fsldma_chan *chan, 84 struct fsl_dma_ld_hw *hw, u32 count) 85 { 86 hw->count = CPU_TO_DMA(chan, count, 32); 87 } 88 89 static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 90 { 91 return DMA_TO_CPU(chan, desc->hw.count, 32); 92 } 93 94 static void set_desc_src(struct fsldma_chan *chan, 95 struct fsl_dma_ld_hw *hw, dma_addr_t src) 96 { 97 u64 snoop_bits; 98 99 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 100 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 102 } 103 104 static dma_addr_t get_desc_src(struct fsldma_chan *chan, 105 struct fsl_desc_sw *desc) 106 { 107 u64 snoop_bits; 108 109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; 112 } 113 114 static void set_desc_dst(struct fsldma_chan *chan, 115 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 116 { 117 u64 snoop_bits; 118 119 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 120 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 122 } 123 124 static dma_addr_t get_desc_dst(struct fsldma_chan *chan, 125 struct fsl_desc_sw *desc) 126 { 127 u64 snoop_bits; 128 129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; 132 } 133 134 static void set_desc_next(struct fsldma_chan *chan, 135 struct fsl_dma_ld_hw *hw, dma_addr_t next) 136 { 137 u64 snoop_bits; 138 139 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 140 ? FSL_DMA_SNEN : 0; 141 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 142 } 143 144 static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 145 { 146 u64 snoop_bits; 147 148 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 149 ? FSL_DMA_SNEN : 0; 150 151 desc->hw.next_ln_addr = CPU_TO_DMA(chan, 152 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 153 | snoop_bits, 64); 154 } 155 156 /* 157 * DMA Engine Hardware Control Helpers 158 */ 159 160 static void dma_init(struct fsldma_chan *chan) 161 { 162 /* Reset the channel */ 163 DMA_OUT(chan, &chan->regs->mr, 0, 32); 164 165 switch (chan->feature & FSL_DMA_IP_MASK) { 166 case FSL_DMA_IP_85XX: 167 /* Set the channel to below modes: 168 * EIE - Error interrupt enable 169 * EOLNIE - End of links interrupt enable 170 * BWC - Bandwidth sharing among channels 171 */ 172 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC 173 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); 174 break; 175 case FSL_DMA_IP_83XX: 176 /* Set the channel to below modes: 177 * EOTIE - End-of-transfer interrupt enable 178 * PRC_RM - PCI read multiple 179 */ 180 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE 181 | FSL_DMA_MR_PRC_RM, 32); 182 break; 183 } 184 } 185 186 static int dma_is_idle(struct fsldma_chan *chan) 187 { 188 u32 sr = get_sr(chan); 189 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 190 } 191 192 /* 193 * Start the DMA controller 194 * 195 * Preconditions: 196 * - the CDAR register must point to the start descriptor 197 * - the MRn[CS] bit must be cleared 198 */ 199 static void dma_start(struct fsldma_chan *chan) 200 { 201 u32 mode; 202 203 mode = DMA_IN(chan, &chan->regs->mr, 32); 204 205 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 206 DMA_OUT(chan, &chan->regs->bcr, 0, 32); 207 mode |= FSL_DMA_MR_EMP_EN; 208 } else { 209 mode &= ~FSL_DMA_MR_EMP_EN; 210 } 211 212 if (chan->feature & FSL_DMA_CHAN_START_EXT) { 213 mode |= FSL_DMA_MR_EMS_EN; 214 } else { 215 mode &= ~FSL_DMA_MR_EMS_EN; 216 mode |= FSL_DMA_MR_CS; 217 } 218 219 DMA_OUT(chan, &chan->regs->mr, mode, 32); 220 } 221 222 static void dma_halt(struct fsldma_chan *chan) 223 { 224 u32 mode; 225 int i; 226 227 /* read the mode register */ 228 mode = DMA_IN(chan, &chan->regs->mr, 32); 229 230 /* 231 * The 85xx controller supports channel abort, which will stop 232 * the current transfer. On 83xx, this bit is the transfer error 233 * mask bit, which should not be changed. 234 */ 235 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 236 mode |= FSL_DMA_MR_CA; 237 DMA_OUT(chan, &chan->regs->mr, mode, 32); 238 239 mode &= ~FSL_DMA_MR_CA; 240 } 241 242 /* stop the DMA controller */ 243 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); 244 DMA_OUT(chan, &chan->regs->mr, mode, 32); 245 246 /* wait for the DMA controller to become idle */ 247 for (i = 0; i < 100; i++) { 248 if (dma_is_idle(chan)) 249 return; 250 251 udelay(10); 252 } 253 254 if (!dma_is_idle(chan)) 255 chan_err(chan, "DMA halt timeout!\n"); 256 } 257 258 /** 259 * fsl_chan_set_src_loop_size - Set source address hold transfer size 260 * @chan : Freescale DMA channel 261 * @size : Address loop size, 0 for disable loop 262 * 263 * The set source address hold transfer size. The source 264 * address hold or loop transfer size is when the DMA transfer 265 * data from source address (SA), if the loop size is 4, the DMA will 266 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 267 * SA + 1 ... and so on. 268 */ 269 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) 270 { 271 u32 mode; 272 273 mode = DMA_IN(chan, &chan->regs->mr, 32); 274 275 switch (size) { 276 case 0: 277 mode &= ~FSL_DMA_MR_SAHE; 278 break; 279 case 1: 280 case 2: 281 case 4: 282 case 8: 283 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); 284 break; 285 } 286 287 DMA_OUT(chan, &chan->regs->mr, mode, 32); 288 } 289 290 /** 291 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size 292 * @chan : Freescale DMA channel 293 * @size : Address loop size, 0 for disable loop 294 * 295 * The set destination address hold transfer size. The destination 296 * address hold or loop transfer size is when the DMA transfer 297 * data to destination address (TA), if the loop size is 4, the DMA will 298 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 299 * TA + 1 ... and so on. 300 */ 301 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) 302 { 303 u32 mode; 304 305 mode = DMA_IN(chan, &chan->regs->mr, 32); 306 307 switch (size) { 308 case 0: 309 mode &= ~FSL_DMA_MR_DAHE; 310 break; 311 case 1: 312 case 2: 313 case 4: 314 case 8: 315 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); 316 break; 317 } 318 319 DMA_OUT(chan, &chan->regs->mr, mode, 32); 320 } 321 322 /** 323 * fsl_chan_set_request_count - Set DMA Request Count for external control 324 * @chan : Freescale DMA channel 325 * @size : Number of bytes to transfer in a single request 326 * 327 * The Freescale DMA channel can be controlled by the external signal DREQ#. 328 * The DMA request count is how many bytes are allowed to transfer before 329 * pausing the channel, after which a new assertion of DREQ# resumes channel 330 * operation. 331 * 332 * A size of 0 disables external pause control. The maximum size is 1024. 333 */ 334 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) 335 { 336 u32 mode; 337 338 BUG_ON(size > 1024); 339 340 mode = DMA_IN(chan, &chan->regs->mr, 32); 341 mode |= (__ilog2(size) << 24) & 0x0f000000; 342 343 DMA_OUT(chan, &chan->regs->mr, mode, 32); 344 } 345 346 /** 347 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 348 * @chan : Freescale DMA channel 349 * @enable : 0 is disabled, 1 is enabled. 350 * 351 * The Freescale DMA channel can be controlled by the external signal DREQ#. 352 * The DMA Request Count feature should be used in addition to this feature 353 * to set the number of bytes to transfer before pausing the channel. 354 */ 355 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) 356 { 357 if (enable) 358 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 359 else 360 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 361 } 362 363 /** 364 * fsl_chan_toggle_ext_start - Toggle channel external start status 365 * @chan : Freescale DMA channel 366 * @enable : 0 is disabled, 1 is enabled. 367 * 368 * If enable the external start, the channel can be started by an 369 * external DMA start pin. So the dma_start() does not start the 370 * transfer immediately. The DMA channel will wait for the 371 * control pin asserted. 372 */ 373 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) 374 { 375 if (enable) 376 chan->feature |= FSL_DMA_CHAN_START_EXT; 377 else 378 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 379 } 380 381 static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 382 { 383 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 384 385 if (list_empty(&chan->ld_pending)) 386 goto out_splice; 387 388 /* 389 * Add the hardware descriptor to the chain of hardware descriptors 390 * that already exists in memory. 391 * 392 * This will un-set the EOL bit of the existing transaction, and the 393 * last link in this transaction will become the EOL descriptor. 394 */ 395 set_desc_next(chan, &tail->hw, desc->async_tx.phys); 396 397 /* 398 * Add the software descriptor and all children to the list 399 * of pending transactions 400 */ 401 out_splice: 402 list_splice_tail_init(&desc->tx_list, &chan->ld_pending); 403 } 404 405 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 406 { 407 struct fsldma_chan *chan = to_fsl_chan(tx->chan); 408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 409 struct fsl_desc_sw *child; 410 unsigned long flags; 411 dma_cookie_t cookie; 412 413 spin_lock_irqsave(&chan->desc_lock, flags); 414 415 /* 416 * assign cookies to all of the software descriptors 417 * that make up this transaction 418 */ 419 list_for_each_entry(child, &desc->tx_list, node) { 420 cookie = dma_cookie_assign(&child->async_tx); 421 } 422 423 /* put this transaction onto the tail of the pending queue */ 424 append_ld_queue(chan, desc); 425 426 spin_unlock_irqrestore(&chan->desc_lock, flags); 427 428 return cookie; 429 } 430 431 /** 432 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 433 * @chan : Freescale DMA channel 434 * 435 * Return - The descriptor allocated. NULL for failed. 436 */ 437 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) 438 { 439 struct fsl_desc_sw *desc; 440 dma_addr_t pdesc; 441 442 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 443 if (!desc) { 444 chan_dbg(chan, "out of memory for link descriptor\n"); 445 return NULL; 446 } 447 448 memset(desc, 0, sizeof(*desc)); 449 INIT_LIST_HEAD(&desc->tx_list); 450 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 451 desc->async_tx.tx_submit = fsl_dma_tx_submit; 452 desc->async_tx.phys = pdesc; 453 454 #ifdef FSL_DMA_LD_DEBUG 455 chan_dbg(chan, "LD %p allocated\n", desc); 456 #endif 457 458 return desc; 459 } 460 461 /** 462 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 463 * @chan : Freescale DMA channel 464 * 465 * This function will create a dma pool for descriptor allocation. 466 * 467 * Return - The number of descriptors allocated. 468 */ 469 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) 470 { 471 struct fsldma_chan *chan = to_fsl_chan(dchan); 472 473 /* Has this channel already been allocated? */ 474 if (chan->desc_pool) 475 return 1; 476 477 /* 478 * We need the descriptor to be aligned to 32bytes 479 * for meeting FSL DMA specification requirement. 480 */ 481 chan->desc_pool = dma_pool_create(chan->name, chan->dev, 482 sizeof(struct fsl_desc_sw), 483 __alignof__(struct fsl_desc_sw), 0); 484 if (!chan->desc_pool) { 485 chan_err(chan, "unable to allocate descriptor pool\n"); 486 return -ENOMEM; 487 } 488 489 /* there is at least one descriptor free to be allocated */ 490 return 1; 491 } 492 493 /** 494 * fsldma_free_desc_list - Free all descriptors in a queue 495 * @chan: Freescae DMA channel 496 * @list: the list to free 497 * 498 * LOCKING: must hold chan->desc_lock 499 */ 500 static void fsldma_free_desc_list(struct fsldma_chan *chan, 501 struct list_head *list) 502 { 503 struct fsl_desc_sw *desc, *_desc; 504 505 list_for_each_entry_safe(desc, _desc, list, node) { 506 list_del(&desc->node); 507 #ifdef FSL_DMA_LD_DEBUG 508 chan_dbg(chan, "LD %p free\n", desc); 509 #endif 510 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 511 } 512 } 513 514 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, 515 struct list_head *list) 516 { 517 struct fsl_desc_sw *desc, *_desc; 518 519 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 520 list_del(&desc->node); 521 #ifdef FSL_DMA_LD_DEBUG 522 chan_dbg(chan, "LD %p free\n", desc); 523 #endif 524 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 525 } 526 } 527 528 /** 529 * fsl_dma_free_chan_resources - Free all resources of the channel. 530 * @chan : Freescale DMA channel 531 */ 532 static void fsl_dma_free_chan_resources(struct dma_chan *dchan) 533 { 534 struct fsldma_chan *chan = to_fsl_chan(dchan); 535 unsigned long flags; 536 537 chan_dbg(chan, "free all channel resources\n"); 538 spin_lock_irqsave(&chan->desc_lock, flags); 539 fsldma_free_desc_list(chan, &chan->ld_pending); 540 fsldma_free_desc_list(chan, &chan->ld_running); 541 spin_unlock_irqrestore(&chan->desc_lock, flags); 542 543 dma_pool_destroy(chan->desc_pool); 544 chan->desc_pool = NULL; 545 } 546 547 static struct dma_async_tx_descriptor * 548 fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) 549 { 550 struct fsldma_chan *chan; 551 struct fsl_desc_sw *new; 552 553 if (!dchan) 554 return NULL; 555 556 chan = to_fsl_chan(dchan); 557 558 new = fsl_dma_alloc_descriptor(chan); 559 if (!new) { 560 chan_err(chan, "%s\n", msg_ld_oom); 561 return NULL; 562 } 563 564 new->async_tx.cookie = -EBUSY; 565 new->async_tx.flags = flags; 566 567 /* Insert the link descriptor to the LD ring */ 568 list_add_tail(&new->node, &new->tx_list); 569 570 /* Set End-of-link to the last link descriptor of new list */ 571 set_ld_eol(chan, new); 572 573 return &new->async_tx; 574 } 575 576 static struct dma_async_tx_descriptor * 577 fsl_dma_prep_memcpy(struct dma_chan *dchan, 578 dma_addr_t dma_dst, dma_addr_t dma_src, 579 size_t len, unsigned long flags) 580 { 581 struct fsldma_chan *chan; 582 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 583 size_t copy; 584 585 if (!dchan) 586 return NULL; 587 588 if (!len) 589 return NULL; 590 591 chan = to_fsl_chan(dchan); 592 593 do { 594 595 /* Allocate the link descriptor from DMA pool */ 596 new = fsl_dma_alloc_descriptor(chan); 597 if (!new) { 598 chan_err(chan, "%s\n", msg_ld_oom); 599 goto fail; 600 } 601 602 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 603 604 set_desc_cnt(chan, &new->hw, copy); 605 set_desc_src(chan, &new->hw, dma_src); 606 set_desc_dst(chan, &new->hw, dma_dst); 607 608 if (!first) 609 first = new; 610 else 611 set_desc_next(chan, &prev->hw, new->async_tx.phys); 612 613 new->async_tx.cookie = 0; 614 async_tx_ack(&new->async_tx); 615 616 prev = new; 617 len -= copy; 618 dma_src += copy; 619 dma_dst += copy; 620 621 /* Insert the link descriptor to the LD ring */ 622 list_add_tail(&new->node, &first->tx_list); 623 } while (len); 624 625 new->async_tx.flags = flags; /* client is in control of this ack */ 626 new->async_tx.cookie = -EBUSY; 627 628 /* Set End-of-link to the last link descriptor of new list */ 629 set_ld_eol(chan, new); 630 631 return &first->async_tx; 632 633 fail: 634 if (!first) 635 return NULL; 636 637 fsldma_free_desc_list_reverse(chan, &first->tx_list); 638 return NULL; 639 } 640 641 static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, 642 struct scatterlist *dst_sg, unsigned int dst_nents, 643 struct scatterlist *src_sg, unsigned int src_nents, 644 unsigned long flags) 645 { 646 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 647 struct fsldma_chan *chan = to_fsl_chan(dchan); 648 size_t dst_avail, src_avail; 649 dma_addr_t dst, src; 650 size_t len; 651 652 /* basic sanity checks */ 653 if (dst_nents == 0 || src_nents == 0) 654 return NULL; 655 656 if (dst_sg == NULL || src_sg == NULL) 657 return NULL; 658 659 /* 660 * TODO: should we check that both scatterlists have the same 661 * TODO: number of bytes in total? Is that really an error? 662 */ 663 664 /* get prepared for the loop */ 665 dst_avail = sg_dma_len(dst_sg); 666 src_avail = sg_dma_len(src_sg); 667 668 /* run until we are out of scatterlist entries */ 669 while (true) { 670 671 /* create the largest transaction possible */ 672 len = min_t(size_t, src_avail, dst_avail); 673 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); 674 if (len == 0) 675 goto fetch; 676 677 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; 678 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; 679 680 /* allocate and populate the descriptor */ 681 new = fsl_dma_alloc_descriptor(chan); 682 if (!new) { 683 chan_err(chan, "%s\n", msg_ld_oom); 684 goto fail; 685 } 686 687 set_desc_cnt(chan, &new->hw, len); 688 set_desc_src(chan, &new->hw, src); 689 set_desc_dst(chan, &new->hw, dst); 690 691 if (!first) 692 first = new; 693 else 694 set_desc_next(chan, &prev->hw, new->async_tx.phys); 695 696 new->async_tx.cookie = 0; 697 async_tx_ack(&new->async_tx); 698 prev = new; 699 700 /* Insert the link descriptor to the LD ring */ 701 list_add_tail(&new->node, &first->tx_list); 702 703 /* update metadata */ 704 dst_avail -= len; 705 src_avail -= len; 706 707 fetch: 708 /* fetch the next dst scatterlist entry */ 709 if (dst_avail == 0) { 710 711 /* no more entries: we're done */ 712 if (dst_nents == 0) 713 break; 714 715 /* fetch the next entry: if there are no more: done */ 716 dst_sg = sg_next(dst_sg); 717 if (dst_sg == NULL) 718 break; 719 720 dst_nents--; 721 dst_avail = sg_dma_len(dst_sg); 722 } 723 724 /* fetch the next src scatterlist entry */ 725 if (src_avail == 0) { 726 727 /* no more entries: we're done */ 728 if (src_nents == 0) 729 break; 730 731 /* fetch the next entry: if there are no more: done */ 732 src_sg = sg_next(src_sg); 733 if (src_sg == NULL) 734 break; 735 736 src_nents--; 737 src_avail = sg_dma_len(src_sg); 738 } 739 } 740 741 new->async_tx.flags = flags; /* client is in control of this ack */ 742 new->async_tx.cookie = -EBUSY; 743 744 /* Set End-of-link to the last link descriptor of new list */ 745 set_ld_eol(chan, new); 746 747 return &first->async_tx; 748 749 fail: 750 if (!first) 751 return NULL; 752 753 fsldma_free_desc_list_reverse(chan, &first->tx_list); 754 return NULL; 755 } 756 757 /** 758 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 759 * @chan: DMA channel 760 * @sgl: scatterlist to transfer to/from 761 * @sg_len: number of entries in @scatterlist 762 * @direction: DMA direction 763 * @flags: DMAEngine flags 764 * @context: transaction context (ignored) 765 * 766 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the 767 * DMA_SLAVE API, this gets the device-specific information from the 768 * chan->private variable. 769 */ 770 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 771 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 772 enum dma_transfer_direction direction, unsigned long flags, 773 void *context) 774 { 775 /* 776 * This operation is not supported on the Freescale DMA controller 777 * 778 * However, we need to provide the function pointer to allow the 779 * device_control() method to work. 780 */ 781 return NULL; 782 } 783 784 static int fsl_dma_device_control(struct dma_chan *dchan, 785 enum dma_ctrl_cmd cmd, unsigned long arg) 786 { 787 struct dma_slave_config *config; 788 struct fsldma_chan *chan; 789 unsigned long flags; 790 int size; 791 792 if (!dchan) 793 return -EINVAL; 794 795 chan = to_fsl_chan(dchan); 796 797 switch (cmd) { 798 case DMA_TERMINATE_ALL: 799 spin_lock_irqsave(&chan->desc_lock, flags); 800 801 /* Halt the DMA engine */ 802 dma_halt(chan); 803 804 /* Remove and free all of the descriptors in the LD queue */ 805 fsldma_free_desc_list(chan, &chan->ld_pending); 806 fsldma_free_desc_list(chan, &chan->ld_running); 807 chan->idle = true; 808 809 spin_unlock_irqrestore(&chan->desc_lock, flags); 810 return 0; 811 812 case DMA_SLAVE_CONFIG: 813 config = (struct dma_slave_config *)arg; 814 815 /* make sure the channel supports setting burst size */ 816 if (!chan->set_request_count) 817 return -ENXIO; 818 819 /* we set the controller burst size depending on direction */ 820 if (config->direction == DMA_MEM_TO_DEV) 821 size = config->dst_addr_width * config->dst_maxburst; 822 else 823 size = config->src_addr_width * config->src_maxburst; 824 825 chan->set_request_count(chan, size); 826 return 0; 827 828 case FSLDMA_EXTERNAL_START: 829 830 /* make sure the channel supports external start */ 831 if (!chan->toggle_ext_start) 832 return -ENXIO; 833 834 chan->toggle_ext_start(chan, arg); 835 return 0; 836 837 default: 838 return -ENXIO; 839 } 840 841 return 0; 842 } 843 844 /** 845 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor 846 * @chan: Freescale DMA channel 847 * @desc: descriptor to cleanup and free 848 * 849 * This function is used on a descriptor which has been executed by the DMA 850 * controller. It will run any callbacks, submit any dependencies, and then 851 * free the descriptor. 852 */ 853 static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, 854 struct fsl_desc_sw *desc) 855 { 856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 857 struct device *dev = chan->common.device->dev; 858 dma_addr_t src = get_desc_src(chan, desc); 859 dma_addr_t dst = get_desc_dst(chan, desc); 860 u32 len = get_desc_cnt(chan, desc); 861 862 /* Run the link descriptor callback function */ 863 if (txd->callback) { 864 #ifdef FSL_DMA_LD_DEBUG 865 chan_dbg(chan, "LD %p callback\n", desc); 866 #endif 867 txd->callback(txd->callback_param); 868 } 869 870 /* Run any dependencies */ 871 dma_run_dependencies(txd); 872 873 /* Unmap the dst buffer, if requested */ 874 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 875 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 876 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); 877 else 878 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); 879 } 880 881 /* Unmap the src buffer, if requested */ 882 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 883 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 884 dma_unmap_single(dev, src, len, DMA_TO_DEVICE); 885 else 886 dma_unmap_page(dev, src, len, DMA_TO_DEVICE); 887 } 888 889 #ifdef FSL_DMA_LD_DEBUG 890 chan_dbg(chan, "LD %p free\n", desc); 891 #endif 892 dma_pool_free(chan->desc_pool, desc, txd->phys); 893 } 894 895 /** 896 * fsl_chan_xfer_ld_queue - transfer any pending transactions 897 * @chan : Freescale DMA channel 898 * 899 * HARDWARE STATE: idle 900 * LOCKING: must hold chan->desc_lock 901 */ 902 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 903 { 904 struct fsl_desc_sw *desc; 905 906 /* 907 * If the list of pending descriptors is empty, then we 908 * don't need to do any work at all 909 */ 910 if (list_empty(&chan->ld_pending)) { 911 chan_dbg(chan, "no pending LDs\n"); 912 return; 913 } 914 915 /* 916 * The DMA controller is not idle, which means that the interrupt 917 * handler will start any queued transactions when it runs after 918 * this transaction finishes 919 */ 920 if (!chan->idle) { 921 chan_dbg(chan, "DMA controller still busy\n"); 922 return; 923 } 924 925 /* 926 * If there are some link descriptors which have not been 927 * transferred, we need to start the controller 928 */ 929 930 /* 931 * Move all elements from the queue of pending transactions 932 * onto the list of running transactions 933 */ 934 chan_dbg(chan, "idle, starting controller\n"); 935 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 936 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 937 938 /* 939 * The 85xx DMA controller doesn't clear the channel start bit 940 * automatically at the end of a transfer. Therefore we must clear 941 * it in software before starting the transfer. 942 */ 943 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 944 u32 mode; 945 946 mode = DMA_IN(chan, &chan->regs->mr, 32); 947 mode &= ~FSL_DMA_MR_CS; 948 DMA_OUT(chan, &chan->regs->mr, mode, 32); 949 } 950 951 /* 952 * Program the descriptor's address into the DMA controller, 953 * then start the DMA transaction 954 */ 955 set_cdar(chan, desc->async_tx.phys); 956 get_cdar(chan); 957 958 dma_start(chan); 959 chan->idle = false; 960 } 961 962 /** 963 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 964 * @chan : Freescale DMA channel 965 */ 966 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 967 { 968 struct fsldma_chan *chan = to_fsl_chan(dchan); 969 unsigned long flags; 970 971 spin_lock_irqsave(&chan->desc_lock, flags); 972 fsl_chan_xfer_ld_queue(chan); 973 spin_unlock_irqrestore(&chan->desc_lock, flags); 974 } 975 976 /** 977 * fsl_tx_status - Determine the DMA status 978 * @chan : Freescale DMA channel 979 */ 980 static enum dma_status fsl_tx_status(struct dma_chan *dchan, 981 dma_cookie_t cookie, 982 struct dma_tx_state *txstate) 983 { 984 return dma_cookie_status(dchan, cookie, txstate); 985 } 986 987 /*----------------------------------------------------------------------------*/ 988 /* Interrupt Handling */ 989 /*----------------------------------------------------------------------------*/ 990 991 static irqreturn_t fsldma_chan_irq(int irq, void *data) 992 { 993 struct fsldma_chan *chan = data; 994 u32 stat; 995 996 /* save and clear the status register */ 997 stat = get_sr(chan); 998 set_sr(chan, stat); 999 chan_dbg(chan, "irq: stat = 0x%x\n", stat); 1000 1001 /* check that this was really our device */ 1002 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1003 if (!stat) 1004 return IRQ_NONE; 1005 1006 if (stat & FSL_DMA_SR_TE) 1007 chan_err(chan, "Transfer Error!\n"); 1008 1009 /* 1010 * Programming Error 1011 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 1012 * trigger a PE interrupt. 1013 */ 1014 if (stat & FSL_DMA_SR_PE) { 1015 chan_dbg(chan, "irq: Programming Error INT\n"); 1016 stat &= ~FSL_DMA_SR_PE; 1017 if (get_bcr(chan) != 0) 1018 chan_err(chan, "Programming Error!\n"); 1019 } 1020 1021 /* 1022 * For MPC8349, EOCDI event need to update cookie 1023 * and start the next transfer if it exist. 1024 */ 1025 if (stat & FSL_DMA_SR_EOCDI) { 1026 chan_dbg(chan, "irq: End-of-Chain link INT\n"); 1027 stat &= ~FSL_DMA_SR_EOCDI; 1028 } 1029 1030 /* 1031 * If it current transfer is the end-of-transfer, 1032 * we should clear the Channel Start bit for 1033 * prepare next transfer. 1034 */ 1035 if (stat & FSL_DMA_SR_EOLNI) { 1036 chan_dbg(chan, "irq: End-of-link INT\n"); 1037 stat &= ~FSL_DMA_SR_EOLNI; 1038 } 1039 1040 /* check that the DMA controller is really idle */ 1041 if (!dma_is_idle(chan)) 1042 chan_err(chan, "irq: controller not idle!\n"); 1043 1044 /* check that we handled all of the bits */ 1045 if (stat) 1046 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); 1047 1048 /* 1049 * Schedule the tasklet to handle all cleanup of the current 1050 * transaction. It will start a new transaction if there is 1051 * one pending. 1052 */ 1053 tasklet_schedule(&chan->tasklet); 1054 chan_dbg(chan, "irq: Exit\n"); 1055 return IRQ_HANDLED; 1056 } 1057 1058 static void dma_do_tasklet(unsigned long data) 1059 { 1060 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1061 struct fsl_desc_sw *desc, *_desc; 1062 LIST_HEAD(ld_cleanup); 1063 unsigned long flags; 1064 1065 chan_dbg(chan, "tasklet entry\n"); 1066 1067 spin_lock_irqsave(&chan->desc_lock, flags); 1068 1069 /* update the cookie if we have some descriptors to cleanup */ 1070 if (!list_empty(&chan->ld_running)) { 1071 dma_cookie_t cookie; 1072 1073 desc = to_fsl_desc(chan->ld_running.prev); 1074 cookie = desc->async_tx.cookie; 1075 dma_cookie_complete(&desc->async_tx); 1076 1077 chan_dbg(chan, "completed_cookie=%d\n", cookie); 1078 } 1079 1080 /* 1081 * move the descriptors to a temporary list so we can drop the lock 1082 * during the entire cleanup operation 1083 */ 1084 list_splice_tail_init(&chan->ld_running, &ld_cleanup); 1085 1086 /* the hardware is now idle and ready for more */ 1087 chan->idle = true; 1088 1089 /* 1090 * Start any pending transactions automatically 1091 * 1092 * In the ideal case, we keep the DMA controller busy while we go 1093 * ahead and free the descriptors below. 1094 */ 1095 fsl_chan_xfer_ld_queue(chan); 1096 spin_unlock_irqrestore(&chan->desc_lock, flags); 1097 1098 /* Run the callback for each descriptor, in order */ 1099 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { 1100 1101 /* Remove from the list of transactions */ 1102 list_del(&desc->node); 1103 1104 /* Run all cleanup for this descriptor */ 1105 fsldma_cleanup_descriptor(chan, desc); 1106 } 1107 1108 chan_dbg(chan, "tasklet exit\n"); 1109 } 1110 1111 static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1112 { 1113 struct fsldma_device *fdev = data; 1114 struct fsldma_chan *chan; 1115 unsigned int handled = 0; 1116 u32 gsr, mask; 1117 int i; 1118 1119 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) 1120 : in_le32(fdev->regs); 1121 mask = 0xff000000; 1122 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); 1123 1124 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1125 chan = fdev->chan[i]; 1126 if (!chan) 1127 continue; 1128 1129 if (gsr & mask) { 1130 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); 1131 fsldma_chan_irq(irq, chan); 1132 handled++; 1133 } 1134 1135 gsr &= ~mask; 1136 mask >>= 8; 1137 } 1138 1139 return IRQ_RETVAL(handled); 1140 } 1141 1142 static void fsldma_free_irqs(struct fsldma_device *fdev) 1143 { 1144 struct fsldma_chan *chan; 1145 int i; 1146 1147 if (fdev->irq != NO_IRQ) { 1148 dev_dbg(fdev->dev, "free per-controller IRQ\n"); 1149 free_irq(fdev->irq, fdev); 1150 return; 1151 } 1152 1153 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1154 chan = fdev->chan[i]; 1155 if (chan && chan->irq != NO_IRQ) { 1156 chan_dbg(chan, "free per-channel IRQ\n"); 1157 free_irq(chan->irq, chan); 1158 } 1159 } 1160 } 1161 1162 static int fsldma_request_irqs(struct fsldma_device *fdev) 1163 { 1164 struct fsldma_chan *chan; 1165 int ret; 1166 int i; 1167 1168 /* if we have a per-controller IRQ, use that */ 1169 if (fdev->irq != NO_IRQ) { 1170 dev_dbg(fdev->dev, "request per-controller IRQ\n"); 1171 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, 1172 "fsldma-controller", fdev); 1173 return ret; 1174 } 1175 1176 /* no per-controller IRQ, use the per-channel IRQs */ 1177 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1178 chan = fdev->chan[i]; 1179 if (!chan) 1180 continue; 1181 1182 if (chan->irq == NO_IRQ) { 1183 chan_err(chan, "interrupts property missing in device tree\n"); 1184 ret = -ENODEV; 1185 goto out_unwind; 1186 } 1187 1188 chan_dbg(chan, "request per-channel IRQ\n"); 1189 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1190 "fsldma-chan", chan); 1191 if (ret) { 1192 chan_err(chan, "unable to request per-channel IRQ\n"); 1193 goto out_unwind; 1194 } 1195 } 1196 1197 return 0; 1198 1199 out_unwind: 1200 for (/* none */; i >= 0; i--) { 1201 chan = fdev->chan[i]; 1202 if (!chan) 1203 continue; 1204 1205 if (chan->irq == NO_IRQ) 1206 continue; 1207 1208 free_irq(chan->irq, chan); 1209 } 1210 1211 return ret; 1212 } 1213 1214 /*----------------------------------------------------------------------------*/ 1215 /* OpenFirmware Subsystem */ 1216 /*----------------------------------------------------------------------------*/ 1217 1218 static int fsl_dma_chan_probe(struct fsldma_device *fdev, 1219 struct device_node *node, u32 feature, const char *compatible) 1220 { 1221 struct fsldma_chan *chan; 1222 struct resource res; 1223 int err; 1224 1225 /* alloc channel */ 1226 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1227 if (!chan) { 1228 dev_err(fdev->dev, "no free memory for DMA channels!\n"); 1229 err = -ENOMEM; 1230 goto out_return; 1231 } 1232 1233 /* ioremap registers for use */ 1234 chan->regs = of_iomap(node, 0); 1235 if (!chan->regs) { 1236 dev_err(fdev->dev, "unable to ioremap registers\n"); 1237 err = -ENOMEM; 1238 goto out_free_chan; 1239 } 1240 1241 err = of_address_to_resource(node, 0, &res); 1242 if (err) { 1243 dev_err(fdev->dev, "unable to find 'reg' property\n"); 1244 goto out_iounmap_regs; 1245 } 1246 1247 chan->feature = feature; 1248 if (!fdev->feature) 1249 fdev->feature = chan->feature; 1250 1251 /* 1252 * If the DMA device's feature is different than the feature 1253 * of its channels, report the bug 1254 */ 1255 WARN_ON(fdev->feature != chan->feature); 1256 1257 chan->dev = fdev->dev; 1258 chan->id = ((res.start - 0x100) & 0xfff) >> 7; 1259 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 1260 dev_err(fdev->dev, "too many channels for device\n"); 1261 err = -EINVAL; 1262 goto out_iounmap_regs; 1263 } 1264 1265 fdev->chan[chan->id] = chan; 1266 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1267 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); 1268 1269 /* Initialize the channel */ 1270 dma_init(chan); 1271 1272 /* Clear cdar registers */ 1273 set_cdar(chan, 0); 1274 1275 switch (chan->feature & FSL_DMA_IP_MASK) { 1276 case FSL_DMA_IP_85XX: 1277 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1278 case FSL_DMA_IP_83XX: 1279 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1280 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1281 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; 1282 chan->set_request_count = fsl_chan_set_request_count; 1283 } 1284 1285 spin_lock_init(&chan->desc_lock); 1286 INIT_LIST_HEAD(&chan->ld_pending); 1287 INIT_LIST_HEAD(&chan->ld_running); 1288 chan->idle = true; 1289 1290 chan->common.device = &fdev->common; 1291 dma_cookie_init(&chan->common); 1292 1293 /* find the IRQ line, if it exists in the device tree */ 1294 chan->irq = irq_of_parse_and_map(node, 0); 1295 1296 /* Add the channel to DMA device channel list */ 1297 list_add_tail(&chan->common.device_node, &fdev->common.channels); 1298 fdev->common.chancnt++; 1299 1300 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, 1301 chan->irq != NO_IRQ ? chan->irq : fdev->irq); 1302 1303 return 0; 1304 1305 out_iounmap_regs: 1306 iounmap(chan->regs); 1307 out_free_chan: 1308 kfree(chan); 1309 out_return: 1310 return err; 1311 } 1312 1313 static void fsl_dma_chan_remove(struct fsldma_chan *chan) 1314 { 1315 irq_dispose_mapping(chan->irq); 1316 list_del(&chan->common.device_node); 1317 iounmap(chan->regs); 1318 kfree(chan); 1319 } 1320 1321 static int fsldma_of_probe(struct platform_device *op) 1322 { 1323 struct fsldma_device *fdev; 1324 struct device_node *child; 1325 int err; 1326 1327 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); 1328 if (!fdev) { 1329 dev_err(&op->dev, "No enough memory for 'priv'\n"); 1330 err = -ENOMEM; 1331 goto out_return; 1332 } 1333 1334 fdev->dev = &op->dev; 1335 INIT_LIST_HEAD(&fdev->common.channels); 1336 1337 /* ioremap the registers for use */ 1338 fdev->regs = of_iomap(op->dev.of_node, 0); 1339 if (!fdev->regs) { 1340 dev_err(&op->dev, "unable to ioremap registers\n"); 1341 err = -ENOMEM; 1342 goto out_free_fdev; 1343 } 1344 1345 /* map the channel IRQ if it exists, but don't hookup the handler yet */ 1346 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); 1347 1348 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1349 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1350 dma_cap_set(DMA_SG, fdev->common.cap_mask); 1351 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); 1352 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1353 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1354 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1355 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1356 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1357 fdev->common.device_tx_status = fsl_tx_status; 1358 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1359 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1360 fdev->common.device_control = fsl_dma_device_control; 1361 fdev->common.dev = &op->dev; 1362 1363 dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1364 1365 platform_set_drvdata(op, fdev); 1366 1367 /* 1368 * We cannot use of_platform_bus_probe() because there is no 1369 * of_platform_bus_remove(). Instead, we manually instantiate every DMA 1370 * channel object. 1371 */ 1372 for_each_child_of_node(op->dev.of_node, child) { 1373 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { 1374 fsl_dma_chan_probe(fdev, child, 1375 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1376 "fsl,eloplus-dma-channel"); 1377 } 1378 1379 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { 1380 fsl_dma_chan_probe(fdev, child, 1381 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1382 "fsl,elo-dma-channel"); 1383 } 1384 } 1385 1386 /* 1387 * Hookup the IRQ handler(s) 1388 * 1389 * If we have a per-controller interrupt, we prefer that to the 1390 * per-channel interrupts to reduce the number of shared interrupt 1391 * handlers on the same IRQ line 1392 */ 1393 err = fsldma_request_irqs(fdev); 1394 if (err) { 1395 dev_err(fdev->dev, "unable to request IRQs\n"); 1396 goto out_free_fdev; 1397 } 1398 1399 dma_async_device_register(&fdev->common); 1400 return 0; 1401 1402 out_free_fdev: 1403 irq_dispose_mapping(fdev->irq); 1404 kfree(fdev); 1405 out_return: 1406 return err; 1407 } 1408 1409 static int fsldma_of_remove(struct platform_device *op) 1410 { 1411 struct fsldma_device *fdev; 1412 unsigned int i; 1413 1414 fdev = platform_get_drvdata(op); 1415 dma_async_device_unregister(&fdev->common); 1416 1417 fsldma_free_irqs(fdev); 1418 1419 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1420 if (fdev->chan[i]) 1421 fsl_dma_chan_remove(fdev->chan[i]); 1422 } 1423 1424 iounmap(fdev->regs); 1425 kfree(fdev); 1426 1427 return 0; 1428 } 1429 1430 static const struct of_device_id fsldma_of_ids[] = { 1431 { .compatible = "fsl,eloplus-dma", }, 1432 { .compatible = "fsl,elo-dma", }, 1433 {} 1434 }; 1435 1436 static struct platform_driver fsldma_of_driver = { 1437 .driver = { 1438 .name = "fsl-elo-dma", 1439 .owner = THIS_MODULE, 1440 .of_match_table = fsldma_of_ids, 1441 }, 1442 .probe = fsldma_of_probe, 1443 .remove = fsldma_of_remove, 1444 }; 1445 1446 /*----------------------------------------------------------------------------*/ 1447 /* Module Init / Exit */ 1448 /*----------------------------------------------------------------------------*/ 1449 1450 static __init int fsldma_init(void) 1451 { 1452 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1453 return platform_driver_register(&fsldma_of_driver); 1454 } 1455 1456 static void __exit fsldma_exit(void) 1457 { 1458 platform_driver_unregister(&fsldma_of_driver); 1459 } 1460 1461 subsys_initcall(fsldma_init); 1462 module_exit(fsldma_exit); 1463 1464 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1465 MODULE_LICENSE("GPL"); 1466