1 /* 2 * Freescale MPC85xx, MPC83xx DMA Engine support 3 * 4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. 5 * 6 * Author: 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9 * 10 * Description: 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13 * The support for MPC8349 DMA controller is also added. 14 * 15 * This driver instructs the DMA controller to issue the PCI Read Multiple 16 * command for PCI read operations, instead of using the default PCI Read Line 17 * command. Please be aware that this setting may result in read pre-fetching 18 * on some platforms. 19 * 20 * This is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 */ 26 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 #include <linux/interrupt.h> 32 #include <linux/dmaengine.h> 33 #include <linux/delay.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/dmapool.h> 36 #include <linux/of_platform.h> 37 38 #include "fsldma.h" 39 40 static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 41 42 static void dma_init(struct fsldma_chan *chan) 43 { 44 /* Reset the channel */ 45 DMA_OUT(chan, &chan->regs->mr, 0, 32); 46 47 switch (chan->feature & FSL_DMA_IP_MASK) { 48 case FSL_DMA_IP_85XX: 49 /* Set the channel to below modes: 50 * EIE - Error interrupt enable 51 * EOSIE - End of segments interrupt enable (basic mode) 52 * EOLNIE - End of links interrupt enable 53 * BWC - Bandwidth sharing among channels 54 */ 55 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC 56 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE 57 | FSL_DMA_MR_EOSIE, 32); 58 break; 59 case FSL_DMA_IP_83XX: 60 /* Set the channel to below modes: 61 * EOTIE - End-of-transfer interrupt enable 62 * PRC_RM - PCI read multiple 63 */ 64 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE 65 | FSL_DMA_MR_PRC_RM, 32); 66 break; 67 } 68 } 69 70 static void set_sr(struct fsldma_chan *chan, u32 val) 71 { 72 DMA_OUT(chan, &chan->regs->sr, val, 32); 73 } 74 75 static u32 get_sr(struct fsldma_chan *chan) 76 { 77 return DMA_IN(chan, &chan->regs->sr, 32); 78 } 79 80 static void set_desc_cnt(struct fsldma_chan *chan, 81 struct fsl_dma_ld_hw *hw, u32 count) 82 { 83 hw->count = CPU_TO_DMA(chan, count, 32); 84 } 85 86 static void set_desc_src(struct fsldma_chan *chan, 87 struct fsl_dma_ld_hw *hw, dma_addr_t src) 88 { 89 u64 snoop_bits; 90 91 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 92 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 93 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 94 } 95 96 static void set_desc_dst(struct fsldma_chan *chan, 97 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 98 { 99 u64 snoop_bits; 100 101 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 102 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 103 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 104 } 105 106 static void set_desc_next(struct fsldma_chan *chan, 107 struct fsl_dma_ld_hw *hw, dma_addr_t next) 108 { 109 u64 snoop_bits; 110 111 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 112 ? FSL_DMA_SNEN : 0; 113 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 114 } 115 116 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 117 { 118 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 119 } 120 121 static dma_addr_t get_cdar(struct fsldma_chan *chan) 122 { 123 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 124 } 125 126 static dma_addr_t get_ndar(struct fsldma_chan *chan) 127 { 128 return DMA_IN(chan, &chan->regs->ndar, 64); 129 } 130 131 static u32 get_bcr(struct fsldma_chan *chan) 132 { 133 return DMA_IN(chan, &chan->regs->bcr, 32); 134 } 135 136 static int dma_is_idle(struct fsldma_chan *chan) 137 { 138 u32 sr = get_sr(chan); 139 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 140 } 141 142 static void dma_start(struct fsldma_chan *chan) 143 { 144 u32 mode; 145 146 mode = DMA_IN(chan, &chan->regs->mr, 32); 147 148 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 149 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 150 DMA_OUT(chan, &chan->regs->bcr, 0, 32); 151 mode |= FSL_DMA_MR_EMP_EN; 152 } else { 153 mode &= ~FSL_DMA_MR_EMP_EN; 154 } 155 } 156 157 if (chan->feature & FSL_DMA_CHAN_START_EXT) 158 mode |= FSL_DMA_MR_EMS_EN; 159 else 160 mode |= FSL_DMA_MR_CS; 161 162 DMA_OUT(chan, &chan->regs->mr, mode, 32); 163 } 164 165 static void dma_halt(struct fsldma_chan *chan) 166 { 167 u32 mode; 168 int i; 169 170 mode = DMA_IN(chan, &chan->regs->mr, 32); 171 mode |= FSL_DMA_MR_CA; 172 DMA_OUT(chan, &chan->regs->mr, mode, 32); 173 174 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); 175 DMA_OUT(chan, &chan->regs->mr, mode, 32); 176 177 for (i = 0; i < 100; i++) { 178 if (dma_is_idle(chan)) 179 return; 180 181 udelay(10); 182 } 183 184 if (!dma_is_idle(chan)) 185 dev_err(chan->dev, "DMA halt timeout!\n"); 186 } 187 188 static void set_ld_eol(struct fsldma_chan *chan, 189 struct fsl_desc_sw *desc) 190 { 191 u64 snoop_bits; 192 193 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 194 ? FSL_DMA_SNEN : 0; 195 196 desc->hw.next_ln_addr = CPU_TO_DMA(chan, 197 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 198 | snoop_bits, 64); 199 } 200 201 /** 202 * fsl_chan_set_src_loop_size - Set source address hold transfer size 203 * @chan : Freescale DMA channel 204 * @size : Address loop size, 0 for disable loop 205 * 206 * The set source address hold transfer size. The source 207 * address hold or loop transfer size is when the DMA transfer 208 * data from source address (SA), if the loop size is 4, the DMA will 209 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 210 * SA + 1 ... and so on. 211 */ 212 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) 213 { 214 u32 mode; 215 216 mode = DMA_IN(chan, &chan->regs->mr, 32); 217 218 switch (size) { 219 case 0: 220 mode &= ~FSL_DMA_MR_SAHE; 221 break; 222 case 1: 223 case 2: 224 case 4: 225 case 8: 226 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); 227 break; 228 } 229 230 DMA_OUT(chan, &chan->regs->mr, mode, 32); 231 } 232 233 /** 234 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size 235 * @chan : Freescale DMA channel 236 * @size : Address loop size, 0 for disable loop 237 * 238 * The set destination address hold transfer size. The destination 239 * address hold or loop transfer size is when the DMA transfer 240 * data to destination address (TA), if the loop size is 4, the DMA will 241 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 242 * TA + 1 ... and so on. 243 */ 244 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) 245 { 246 u32 mode; 247 248 mode = DMA_IN(chan, &chan->regs->mr, 32); 249 250 switch (size) { 251 case 0: 252 mode &= ~FSL_DMA_MR_DAHE; 253 break; 254 case 1: 255 case 2: 256 case 4: 257 case 8: 258 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); 259 break; 260 } 261 262 DMA_OUT(chan, &chan->regs->mr, mode, 32); 263 } 264 265 /** 266 * fsl_chan_set_request_count - Set DMA Request Count for external control 267 * @chan : Freescale DMA channel 268 * @size : Number of bytes to transfer in a single request 269 * 270 * The Freescale DMA channel can be controlled by the external signal DREQ#. 271 * The DMA request count is how many bytes are allowed to transfer before 272 * pausing the channel, after which a new assertion of DREQ# resumes channel 273 * operation. 274 * 275 * A size of 0 disables external pause control. The maximum size is 1024. 276 */ 277 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) 278 { 279 u32 mode; 280 281 BUG_ON(size > 1024); 282 283 mode = DMA_IN(chan, &chan->regs->mr, 32); 284 mode |= (__ilog2(size) << 24) & 0x0f000000; 285 286 DMA_OUT(chan, &chan->regs->mr, mode, 32); 287 } 288 289 /** 290 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 291 * @chan : Freescale DMA channel 292 * @enable : 0 is disabled, 1 is enabled. 293 * 294 * The Freescale DMA channel can be controlled by the external signal DREQ#. 295 * The DMA Request Count feature should be used in addition to this feature 296 * to set the number of bytes to transfer before pausing the channel. 297 */ 298 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) 299 { 300 if (enable) 301 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 302 else 303 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 304 } 305 306 /** 307 * fsl_chan_toggle_ext_start - Toggle channel external start status 308 * @chan : Freescale DMA channel 309 * @enable : 0 is disabled, 1 is enabled. 310 * 311 * If enable the external start, the channel can be started by an 312 * external DMA start pin. So the dma_start() does not start the 313 * transfer immediately. The DMA channel will wait for the 314 * control pin asserted. 315 */ 316 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) 317 { 318 if (enable) 319 chan->feature |= FSL_DMA_CHAN_START_EXT; 320 else 321 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 322 } 323 324 static void append_ld_queue(struct fsldma_chan *chan, 325 struct fsl_desc_sw *desc) 326 { 327 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 328 329 if (list_empty(&chan->ld_pending)) 330 goto out_splice; 331 332 /* 333 * Add the hardware descriptor to the chain of hardware descriptors 334 * that already exists in memory. 335 * 336 * This will un-set the EOL bit of the existing transaction, and the 337 * last link in this transaction will become the EOL descriptor. 338 */ 339 set_desc_next(chan, &tail->hw, desc->async_tx.phys); 340 341 /* 342 * Add the software descriptor and all children to the list 343 * of pending transactions 344 */ 345 out_splice: 346 list_splice_tail_init(&desc->tx_list, &chan->ld_pending); 347 } 348 349 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 350 { 351 struct fsldma_chan *chan = to_fsl_chan(tx->chan); 352 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 353 struct fsl_desc_sw *child; 354 unsigned long flags; 355 dma_cookie_t cookie; 356 357 spin_lock_irqsave(&chan->desc_lock, flags); 358 359 /* 360 * assign cookies to all of the software descriptors 361 * that make up this transaction 362 */ 363 cookie = chan->common.cookie; 364 list_for_each_entry(child, &desc->tx_list, node) { 365 cookie++; 366 if (cookie < 0) 367 cookie = 1; 368 369 child->async_tx.cookie = cookie; 370 } 371 372 chan->common.cookie = cookie; 373 374 /* put this transaction onto the tail of the pending queue */ 375 append_ld_queue(chan, desc); 376 377 spin_unlock_irqrestore(&chan->desc_lock, flags); 378 379 return cookie; 380 } 381 382 /** 383 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 384 * @chan : Freescale DMA channel 385 * 386 * Return - The descriptor allocated. NULL for failed. 387 */ 388 static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 389 struct fsldma_chan *chan) 390 { 391 struct fsl_desc_sw *desc; 392 dma_addr_t pdesc; 393 394 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 395 if (!desc) { 396 dev_dbg(chan->dev, "out of memory for link desc\n"); 397 return NULL; 398 } 399 400 memset(desc, 0, sizeof(*desc)); 401 INIT_LIST_HEAD(&desc->tx_list); 402 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 403 desc->async_tx.tx_submit = fsl_dma_tx_submit; 404 desc->async_tx.phys = pdesc; 405 406 return desc; 407 } 408 409 410 /** 411 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 412 * @chan : Freescale DMA channel 413 * 414 * This function will create a dma pool for descriptor allocation. 415 * 416 * Return - The number of descriptors allocated. 417 */ 418 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) 419 { 420 struct fsldma_chan *chan = to_fsl_chan(dchan); 421 422 /* Has this channel already been allocated? */ 423 if (chan->desc_pool) 424 return 1; 425 426 /* 427 * We need the descriptor to be aligned to 32bytes 428 * for meeting FSL DMA specification requirement. 429 */ 430 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 431 chan->dev, 432 sizeof(struct fsl_desc_sw), 433 __alignof__(struct fsl_desc_sw), 0); 434 if (!chan->desc_pool) { 435 dev_err(chan->dev, "unable to allocate channel %d " 436 "descriptor pool\n", chan->id); 437 return -ENOMEM; 438 } 439 440 /* there is at least one descriptor free to be allocated */ 441 return 1; 442 } 443 444 /** 445 * fsldma_free_desc_list - Free all descriptors in a queue 446 * @chan: Freescae DMA channel 447 * @list: the list to free 448 * 449 * LOCKING: must hold chan->desc_lock 450 */ 451 static void fsldma_free_desc_list(struct fsldma_chan *chan, 452 struct list_head *list) 453 { 454 struct fsl_desc_sw *desc, *_desc; 455 456 list_for_each_entry_safe(desc, _desc, list, node) { 457 list_del(&desc->node); 458 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 459 } 460 } 461 462 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, 463 struct list_head *list) 464 { 465 struct fsl_desc_sw *desc, *_desc; 466 467 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 468 list_del(&desc->node); 469 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 470 } 471 } 472 473 /** 474 * fsl_dma_free_chan_resources - Free all resources of the channel. 475 * @chan : Freescale DMA channel 476 */ 477 static void fsl_dma_free_chan_resources(struct dma_chan *dchan) 478 { 479 struct fsldma_chan *chan = to_fsl_chan(dchan); 480 unsigned long flags; 481 482 dev_dbg(chan->dev, "Free all channel resources.\n"); 483 spin_lock_irqsave(&chan->desc_lock, flags); 484 fsldma_free_desc_list(chan, &chan->ld_pending); 485 fsldma_free_desc_list(chan, &chan->ld_running); 486 spin_unlock_irqrestore(&chan->desc_lock, flags); 487 488 dma_pool_destroy(chan->desc_pool); 489 chan->desc_pool = NULL; 490 } 491 492 static struct dma_async_tx_descriptor * 493 fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) 494 { 495 struct fsldma_chan *chan; 496 struct fsl_desc_sw *new; 497 498 if (!dchan) 499 return NULL; 500 501 chan = to_fsl_chan(dchan); 502 503 new = fsl_dma_alloc_descriptor(chan); 504 if (!new) { 505 dev_err(chan->dev, msg_ld_oom); 506 return NULL; 507 } 508 509 new->async_tx.cookie = -EBUSY; 510 new->async_tx.flags = flags; 511 512 /* Insert the link descriptor to the LD ring */ 513 list_add_tail(&new->node, &new->tx_list); 514 515 /* Set End-of-link to the last link descriptor of new list*/ 516 set_ld_eol(chan, new); 517 518 return &new->async_tx; 519 } 520 521 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 522 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, 523 size_t len, unsigned long flags) 524 { 525 struct fsldma_chan *chan; 526 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 527 size_t copy; 528 529 if (!dchan) 530 return NULL; 531 532 if (!len) 533 return NULL; 534 535 chan = to_fsl_chan(dchan); 536 537 do { 538 539 /* Allocate the link descriptor from DMA pool */ 540 new = fsl_dma_alloc_descriptor(chan); 541 if (!new) { 542 dev_err(chan->dev, msg_ld_oom); 543 goto fail; 544 } 545 #ifdef FSL_DMA_LD_DEBUG 546 dev_dbg(chan->dev, "new link desc alloc %p\n", new); 547 #endif 548 549 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 550 551 set_desc_cnt(chan, &new->hw, copy); 552 set_desc_src(chan, &new->hw, dma_src); 553 set_desc_dst(chan, &new->hw, dma_dst); 554 555 if (!first) 556 first = new; 557 else 558 set_desc_next(chan, &prev->hw, new->async_tx.phys); 559 560 new->async_tx.cookie = 0; 561 async_tx_ack(&new->async_tx); 562 563 prev = new; 564 len -= copy; 565 dma_src += copy; 566 dma_dst += copy; 567 568 /* Insert the link descriptor to the LD ring */ 569 list_add_tail(&new->node, &first->tx_list); 570 } while (len); 571 572 new->async_tx.flags = flags; /* client is in control of this ack */ 573 new->async_tx.cookie = -EBUSY; 574 575 /* Set End-of-link to the last link descriptor of new list*/ 576 set_ld_eol(chan, new); 577 578 return &first->async_tx; 579 580 fail: 581 if (!first) 582 return NULL; 583 584 fsldma_free_desc_list_reverse(chan, &first->tx_list); 585 return NULL; 586 } 587 588 static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, 589 struct scatterlist *dst_sg, unsigned int dst_nents, 590 struct scatterlist *src_sg, unsigned int src_nents, 591 unsigned long flags) 592 { 593 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 594 struct fsldma_chan *chan = to_fsl_chan(dchan); 595 size_t dst_avail, src_avail; 596 dma_addr_t dst, src; 597 size_t len; 598 599 /* basic sanity checks */ 600 if (dst_nents == 0 || src_nents == 0) 601 return NULL; 602 603 if (dst_sg == NULL || src_sg == NULL) 604 return NULL; 605 606 /* 607 * TODO: should we check that both scatterlists have the same 608 * TODO: number of bytes in total? Is that really an error? 609 */ 610 611 /* get prepared for the loop */ 612 dst_avail = sg_dma_len(dst_sg); 613 src_avail = sg_dma_len(src_sg); 614 615 /* run until we are out of scatterlist entries */ 616 while (true) { 617 618 /* create the largest transaction possible */ 619 len = min_t(size_t, src_avail, dst_avail); 620 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); 621 if (len == 0) 622 goto fetch; 623 624 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; 625 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; 626 627 /* allocate and populate the descriptor */ 628 new = fsl_dma_alloc_descriptor(chan); 629 if (!new) { 630 dev_err(chan->dev, msg_ld_oom); 631 goto fail; 632 } 633 #ifdef FSL_DMA_LD_DEBUG 634 dev_dbg(chan->dev, "new link desc alloc %p\n", new); 635 #endif 636 637 set_desc_cnt(chan, &new->hw, len); 638 set_desc_src(chan, &new->hw, src); 639 set_desc_dst(chan, &new->hw, dst); 640 641 if (!first) 642 first = new; 643 else 644 set_desc_next(chan, &prev->hw, new->async_tx.phys); 645 646 new->async_tx.cookie = 0; 647 async_tx_ack(&new->async_tx); 648 prev = new; 649 650 /* Insert the link descriptor to the LD ring */ 651 list_add_tail(&new->node, &first->tx_list); 652 653 /* update metadata */ 654 dst_avail -= len; 655 src_avail -= len; 656 657 fetch: 658 /* fetch the next dst scatterlist entry */ 659 if (dst_avail == 0) { 660 661 /* no more entries: we're done */ 662 if (dst_nents == 0) 663 break; 664 665 /* fetch the next entry: if there are no more: done */ 666 dst_sg = sg_next(dst_sg); 667 if (dst_sg == NULL) 668 break; 669 670 dst_nents--; 671 dst_avail = sg_dma_len(dst_sg); 672 } 673 674 /* fetch the next src scatterlist entry */ 675 if (src_avail == 0) { 676 677 /* no more entries: we're done */ 678 if (src_nents == 0) 679 break; 680 681 /* fetch the next entry: if there are no more: done */ 682 src_sg = sg_next(src_sg); 683 if (src_sg == NULL) 684 break; 685 686 src_nents--; 687 src_avail = sg_dma_len(src_sg); 688 } 689 } 690 691 new->async_tx.flags = flags; /* client is in control of this ack */ 692 new->async_tx.cookie = -EBUSY; 693 694 /* Set End-of-link to the last link descriptor of new list */ 695 set_ld_eol(chan, new); 696 697 return &first->async_tx; 698 699 fail: 700 if (!first) 701 return NULL; 702 703 fsldma_free_desc_list_reverse(chan, &first->tx_list); 704 return NULL; 705 } 706 707 /** 708 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 709 * @chan: DMA channel 710 * @sgl: scatterlist to transfer to/from 711 * @sg_len: number of entries in @scatterlist 712 * @direction: DMA direction 713 * @flags: DMAEngine flags 714 * 715 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the 716 * DMA_SLAVE API, this gets the device-specific information from the 717 * chan->private variable. 718 */ 719 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 720 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 721 enum dma_data_direction direction, unsigned long flags) 722 { 723 /* 724 * This operation is not supported on the Freescale DMA controller 725 * 726 * However, we need to provide the function pointer to allow the 727 * device_control() method to work. 728 */ 729 return NULL; 730 } 731 732 static int fsl_dma_device_control(struct dma_chan *dchan, 733 enum dma_ctrl_cmd cmd, unsigned long arg) 734 { 735 struct dma_slave_config *config; 736 struct fsldma_chan *chan; 737 unsigned long flags; 738 int size; 739 740 if (!dchan) 741 return -EINVAL; 742 743 chan = to_fsl_chan(dchan); 744 745 switch (cmd) { 746 case DMA_TERMINATE_ALL: 747 /* Halt the DMA engine */ 748 dma_halt(chan); 749 750 spin_lock_irqsave(&chan->desc_lock, flags); 751 752 /* Remove and free all of the descriptors in the LD queue */ 753 fsldma_free_desc_list(chan, &chan->ld_pending); 754 fsldma_free_desc_list(chan, &chan->ld_running); 755 756 spin_unlock_irqrestore(&chan->desc_lock, flags); 757 return 0; 758 759 case DMA_SLAVE_CONFIG: 760 config = (struct dma_slave_config *)arg; 761 762 /* make sure the channel supports setting burst size */ 763 if (!chan->set_request_count) 764 return -ENXIO; 765 766 /* we set the controller burst size depending on direction */ 767 if (config->direction == DMA_TO_DEVICE) 768 size = config->dst_addr_width * config->dst_maxburst; 769 else 770 size = config->src_addr_width * config->src_maxburst; 771 772 chan->set_request_count(chan, size); 773 return 0; 774 775 case FSLDMA_EXTERNAL_START: 776 777 /* make sure the channel supports external start */ 778 if (!chan->toggle_ext_start) 779 return -ENXIO; 780 781 chan->toggle_ext_start(chan, arg); 782 return 0; 783 784 default: 785 return -ENXIO; 786 } 787 788 return 0; 789 } 790 791 /** 792 * fsl_dma_update_completed_cookie - Update the completed cookie. 793 * @chan : Freescale DMA channel 794 * 795 * CONTEXT: hardirq 796 */ 797 static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) 798 { 799 struct fsl_desc_sw *desc; 800 unsigned long flags; 801 dma_cookie_t cookie; 802 803 spin_lock_irqsave(&chan->desc_lock, flags); 804 805 if (list_empty(&chan->ld_running)) { 806 dev_dbg(chan->dev, "no running descriptors\n"); 807 goto out_unlock; 808 } 809 810 /* Get the last descriptor, update the cookie to that */ 811 desc = to_fsl_desc(chan->ld_running.prev); 812 if (dma_is_idle(chan)) 813 cookie = desc->async_tx.cookie; 814 else { 815 cookie = desc->async_tx.cookie - 1; 816 if (unlikely(cookie < DMA_MIN_COOKIE)) 817 cookie = DMA_MAX_COOKIE; 818 } 819 820 chan->completed_cookie = cookie; 821 822 out_unlock: 823 spin_unlock_irqrestore(&chan->desc_lock, flags); 824 } 825 826 /** 827 * fsldma_desc_status - Check the status of a descriptor 828 * @chan: Freescale DMA channel 829 * @desc: DMA SW descriptor 830 * 831 * This function will return the status of the given descriptor 832 */ 833 static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, 834 struct fsl_desc_sw *desc) 835 { 836 return dma_async_is_complete(desc->async_tx.cookie, 837 chan->completed_cookie, 838 chan->common.cookie); 839 } 840 841 /** 842 * fsl_chan_ld_cleanup - Clean up link descriptors 843 * @chan : Freescale DMA channel 844 * 845 * This function clean up the ld_queue of DMA channel. 846 */ 847 static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 848 { 849 struct fsl_desc_sw *desc, *_desc; 850 unsigned long flags; 851 852 spin_lock_irqsave(&chan->desc_lock, flags); 853 854 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); 855 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 856 dma_async_tx_callback callback; 857 void *callback_param; 858 859 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) 860 break; 861 862 /* Remove from the list of running transactions */ 863 list_del(&desc->node); 864 865 /* Run the link descriptor callback function */ 866 callback = desc->async_tx.callback; 867 callback_param = desc->async_tx.callback_param; 868 if (callback) { 869 spin_unlock_irqrestore(&chan->desc_lock, flags); 870 dev_dbg(chan->dev, "LD %p callback\n", desc); 871 callback(callback_param); 872 spin_lock_irqsave(&chan->desc_lock, flags); 873 } 874 875 /* Run any dependencies, then free the descriptor */ 876 dma_run_dependencies(&desc->async_tx); 877 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 878 } 879 880 spin_unlock_irqrestore(&chan->desc_lock, flags); 881 } 882 883 /** 884 * fsl_chan_xfer_ld_queue - transfer any pending transactions 885 * @chan : Freescale DMA channel 886 * 887 * This will make sure that any pending transactions will be run. 888 * If the DMA controller is idle, it will be started. Otherwise, 889 * the DMA controller's interrupt handler will start any pending 890 * transactions when it becomes idle. 891 */ 892 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 893 { 894 struct fsl_desc_sw *desc; 895 unsigned long flags; 896 897 spin_lock_irqsave(&chan->desc_lock, flags); 898 899 /* 900 * If the list of pending descriptors is empty, then we 901 * don't need to do any work at all 902 */ 903 if (list_empty(&chan->ld_pending)) { 904 dev_dbg(chan->dev, "no pending LDs\n"); 905 goto out_unlock; 906 } 907 908 /* 909 * The DMA controller is not idle, which means the interrupt 910 * handler will start any queued transactions when it runs 911 * at the end of the current transaction 912 */ 913 if (!dma_is_idle(chan)) { 914 dev_dbg(chan->dev, "DMA controller still busy\n"); 915 goto out_unlock; 916 } 917 918 /* 919 * TODO: 920 * make sure the dma_halt() function really un-wedges the 921 * controller as much as possible 922 */ 923 dma_halt(chan); 924 925 /* 926 * If there are some link descriptors which have not been 927 * transferred, we need to start the controller 928 */ 929 930 /* 931 * Move all elements from the queue of pending transactions 932 * onto the list of running transactions 933 */ 934 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 935 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 936 937 /* 938 * Program the descriptor's address into the DMA controller, 939 * then start the DMA transaction 940 */ 941 set_cdar(chan, desc->async_tx.phys); 942 dma_start(chan); 943 944 out_unlock: 945 spin_unlock_irqrestore(&chan->desc_lock, flags); 946 } 947 948 /** 949 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 950 * @chan : Freescale DMA channel 951 */ 952 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 953 { 954 struct fsldma_chan *chan = to_fsl_chan(dchan); 955 fsl_chan_xfer_ld_queue(chan); 956 } 957 958 /** 959 * fsl_tx_status - Determine the DMA status 960 * @chan : Freescale DMA channel 961 */ 962 static enum dma_status fsl_tx_status(struct dma_chan *dchan, 963 dma_cookie_t cookie, 964 struct dma_tx_state *txstate) 965 { 966 struct fsldma_chan *chan = to_fsl_chan(dchan); 967 dma_cookie_t last_used; 968 dma_cookie_t last_complete; 969 970 fsl_chan_ld_cleanup(chan); 971 972 last_used = dchan->cookie; 973 last_complete = chan->completed_cookie; 974 975 dma_set_tx_state(txstate, last_complete, last_used, 0); 976 977 return dma_async_is_complete(cookie, last_complete, last_used); 978 } 979 980 /*----------------------------------------------------------------------------*/ 981 /* Interrupt Handling */ 982 /*----------------------------------------------------------------------------*/ 983 984 static irqreturn_t fsldma_chan_irq(int irq, void *data) 985 { 986 struct fsldma_chan *chan = data; 987 int update_cookie = 0; 988 int xfer_ld_q = 0; 989 u32 stat; 990 991 /* save and clear the status register */ 992 stat = get_sr(chan); 993 set_sr(chan, stat); 994 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); 995 996 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 997 if (!stat) 998 return IRQ_NONE; 999 1000 if (stat & FSL_DMA_SR_TE) 1001 dev_err(chan->dev, "Transfer Error!\n"); 1002 1003 /* 1004 * Programming Error 1005 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 1006 * triger a PE interrupt. 1007 */ 1008 if (stat & FSL_DMA_SR_PE) { 1009 dev_dbg(chan->dev, "irq: Programming Error INT\n"); 1010 if (get_bcr(chan) == 0) { 1011 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 1012 * Now, update the completed cookie, and continue the 1013 * next uncompleted transfer. 1014 */ 1015 update_cookie = 1; 1016 xfer_ld_q = 1; 1017 } 1018 stat &= ~FSL_DMA_SR_PE; 1019 } 1020 1021 /* 1022 * If the link descriptor segment transfer finishes, 1023 * we will recycle the used descriptor. 1024 */ 1025 if (stat & FSL_DMA_SR_EOSI) { 1026 dev_dbg(chan->dev, "irq: End-of-segments INT\n"); 1027 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", 1028 (unsigned long long)get_cdar(chan), 1029 (unsigned long long)get_ndar(chan)); 1030 stat &= ~FSL_DMA_SR_EOSI; 1031 update_cookie = 1; 1032 } 1033 1034 /* 1035 * For MPC8349, EOCDI event need to update cookie 1036 * and start the next transfer if it exist. 1037 */ 1038 if (stat & FSL_DMA_SR_EOCDI) { 1039 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); 1040 stat &= ~FSL_DMA_SR_EOCDI; 1041 update_cookie = 1; 1042 xfer_ld_q = 1; 1043 } 1044 1045 /* 1046 * If it current transfer is the end-of-transfer, 1047 * we should clear the Channel Start bit for 1048 * prepare next transfer. 1049 */ 1050 if (stat & FSL_DMA_SR_EOLNI) { 1051 dev_dbg(chan->dev, "irq: End-of-link INT\n"); 1052 stat &= ~FSL_DMA_SR_EOLNI; 1053 xfer_ld_q = 1; 1054 } 1055 1056 if (update_cookie) 1057 fsl_dma_update_completed_cookie(chan); 1058 if (xfer_ld_q) 1059 fsl_chan_xfer_ld_queue(chan); 1060 if (stat) 1061 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); 1062 1063 dev_dbg(chan->dev, "irq: Exit\n"); 1064 tasklet_schedule(&chan->tasklet); 1065 return IRQ_HANDLED; 1066 } 1067 1068 static void dma_do_tasklet(unsigned long data) 1069 { 1070 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1071 fsl_chan_ld_cleanup(chan); 1072 } 1073 1074 static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1075 { 1076 struct fsldma_device *fdev = data; 1077 struct fsldma_chan *chan; 1078 unsigned int handled = 0; 1079 u32 gsr, mask; 1080 int i; 1081 1082 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) 1083 : in_le32(fdev->regs); 1084 mask = 0xff000000; 1085 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); 1086 1087 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1088 chan = fdev->chan[i]; 1089 if (!chan) 1090 continue; 1091 1092 if (gsr & mask) { 1093 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); 1094 fsldma_chan_irq(irq, chan); 1095 handled++; 1096 } 1097 1098 gsr &= ~mask; 1099 mask >>= 8; 1100 } 1101 1102 return IRQ_RETVAL(handled); 1103 } 1104 1105 static void fsldma_free_irqs(struct fsldma_device *fdev) 1106 { 1107 struct fsldma_chan *chan; 1108 int i; 1109 1110 if (fdev->irq != NO_IRQ) { 1111 dev_dbg(fdev->dev, "free per-controller IRQ\n"); 1112 free_irq(fdev->irq, fdev); 1113 return; 1114 } 1115 1116 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1117 chan = fdev->chan[i]; 1118 if (chan && chan->irq != NO_IRQ) { 1119 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); 1120 free_irq(chan->irq, chan); 1121 } 1122 } 1123 } 1124 1125 static int fsldma_request_irqs(struct fsldma_device *fdev) 1126 { 1127 struct fsldma_chan *chan; 1128 int ret; 1129 int i; 1130 1131 /* if we have a per-controller IRQ, use that */ 1132 if (fdev->irq != NO_IRQ) { 1133 dev_dbg(fdev->dev, "request per-controller IRQ\n"); 1134 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, 1135 "fsldma-controller", fdev); 1136 return ret; 1137 } 1138 1139 /* no per-controller IRQ, use the per-channel IRQs */ 1140 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1141 chan = fdev->chan[i]; 1142 if (!chan) 1143 continue; 1144 1145 if (chan->irq == NO_IRQ) { 1146 dev_err(fdev->dev, "no interrupts property defined for " 1147 "DMA channel %d. Please fix your " 1148 "device tree\n", chan->id); 1149 ret = -ENODEV; 1150 goto out_unwind; 1151 } 1152 1153 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); 1154 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1155 "fsldma-chan", chan); 1156 if (ret) { 1157 dev_err(fdev->dev, "unable to request IRQ for DMA " 1158 "channel %d\n", chan->id); 1159 goto out_unwind; 1160 } 1161 } 1162 1163 return 0; 1164 1165 out_unwind: 1166 for (/* none */; i >= 0; i--) { 1167 chan = fdev->chan[i]; 1168 if (!chan) 1169 continue; 1170 1171 if (chan->irq == NO_IRQ) 1172 continue; 1173 1174 free_irq(chan->irq, chan); 1175 } 1176 1177 return ret; 1178 } 1179 1180 /*----------------------------------------------------------------------------*/ 1181 /* OpenFirmware Subsystem */ 1182 /*----------------------------------------------------------------------------*/ 1183 1184 static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, 1185 struct device_node *node, u32 feature, const char *compatible) 1186 { 1187 struct fsldma_chan *chan; 1188 struct resource res; 1189 int err; 1190 1191 /* alloc channel */ 1192 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1193 if (!chan) { 1194 dev_err(fdev->dev, "no free memory for DMA channels!\n"); 1195 err = -ENOMEM; 1196 goto out_return; 1197 } 1198 1199 /* ioremap registers for use */ 1200 chan->regs = of_iomap(node, 0); 1201 if (!chan->regs) { 1202 dev_err(fdev->dev, "unable to ioremap registers\n"); 1203 err = -ENOMEM; 1204 goto out_free_chan; 1205 } 1206 1207 err = of_address_to_resource(node, 0, &res); 1208 if (err) { 1209 dev_err(fdev->dev, "unable to find 'reg' property\n"); 1210 goto out_iounmap_regs; 1211 } 1212 1213 chan->feature = feature; 1214 if (!fdev->feature) 1215 fdev->feature = chan->feature; 1216 1217 /* 1218 * If the DMA device's feature is different than the feature 1219 * of its channels, report the bug 1220 */ 1221 WARN_ON(fdev->feature != chan->feature); 1222 1223 chan->dev = fdev->dev; 1224 chan->id = ((res.start - 0x100) & 0xfff) >> 7; 1225 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 1226 dev_err(fdev->dev, "too many channels for device\n"); 1227 err = -EINVAL; 1228 goto out_iounmap_regs; 1229 } 1230 1231 fdev->chan[chan->id] = chan; 1232 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1233 1234 /* Initialize the channel */ 1235 dma_init(chan); 1236 1237 /* Clear cdar registers */ 1238 set_cdar(chan, 0); 1239 1240 switch (chan->feature & FSL_DMA_IP_MASK) { 1241 case FSL_DMA_IP_85XX: 1242 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1243 case FSL_DMA_IP_83XX: 1244 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1245 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1246 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; 1247 chan->set_request_count = fsl_chan_set_request_count; 1248 } 1249 1250 spin_lock_init(&chan->desc_lock); 1251 INIT_LIST_HEAD(&chan->ld_pending); 1252 INIT_LIST_HEAD(&chan->ld_running); 1253 1254 chan->common.device = &fdev->common; 1255 1256 /* find the IRQ line, if it exists in the device tree */ 1257 chan->irq = irq_of_parse_and_map(node, 0); 1258 1259 /* Add the channel to DMA device channel list */ 1260 list_add_tail(&chan->common.device_node, &fdev->common.channels); 1261 fdev->common.chancnt++; 1262 1263 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, 1264 chan->irq != NO_IRQ ? chan->irq : fdev->irq); 1265 1266 return 0; 1267 1268 out_iounmap_regs: 1269 iounmap(chan->regs); 1270 out_free_chan: 1271 kfree(chan); 1272 out_return: 1273 return err; 1274 } 1275 1276 static void fsl_dma_chan_remove(struct fsldma_chan *chan) 1277 { 1278 irq_dispose_mapping(chan->irq); 1279 list_del(&chan->common.device_node); 1280 iounmap(chan->regs); 1281 kfree(chan); 1282 } 1283 1284 static int __devinit fsldma_of_probe(struct platform_device *op) 1285 { 1286 struct fsldma_device *fdev; 1287 struct device_node *child; 1288 int err; 1289 1290 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); 1291 if (!fdev) { 1292 dev_err(&op->dev, "No enough memory for 'priv'\n"); 1293 err = -ENOMEM; 1294 goto out_return; 1295 } 1296 1297 fdev->dev = &op->dev; 1298 INIT_LIST_HEAD(&fdev->common.channels); 1299 1300 /* ioremap the registers for use */ 1301 fdev->regs = of_iomap(op->dev.of_node, 0); 1302 if (!fdev->regs) { 1303 dev_err(&op->dev, "unable to ioremap registers\n"); 1304 err = -ENOMEM; 1305 goto out_free_fdev; 1306 } 1307 1308 /* map the channel IRQ if it exists, but don't hookup the handler yet */ 1309 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); 1310 1311 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1312 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1313 dma_cap_set(DMA_SG, fdev->common.cap_mask); 1314 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); 1315 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1316 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1317 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1318 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1319 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1320 fdev->common.device_tx_status = fsl_tx_status; 1321 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1322 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1323 fdev->common.device_control = fsl_dma_device_control; 1324 fdev->common.dev = &op->dev; 1325 1326 dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1327 1328 dev_set_drvdata(&op->dev, fdev); 1329 1330 /* 1331 * We cannot use of_platform_bus_probe() because there is no 1332 * of_platform_bus_remove(). Instead, we manually instantiate every DMA 1333 * channel object. 1334 */ 1335 for_each_child_of_node(op->dev.of_node, child) { 1336 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { 1337 fsl_dma_chan_probe(fdev, child, 1338 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1339 "fsl,eloplus-dma-channel"); 1340 } 1341 1342 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { 1343 fsl_dma_chan_probe(fdev, child, 1344 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1345 "fsl,elo-dma-channel"); 1346 } 1347 } 1348 1349 /* 1350 * Hookup the IRQ handler(s) 1351 * 1352 * If we have a per-controller interrupt, we prefer that to the 1353 * per-channel interrupts to reduce the number of shared interrupt 1354 * handlers on the same IRQ line 1355 */ 1356 err = fsldma_request_irqs(fdev); 1357 if (err) { 1358 dev_err(fdev->dev, "unable to request IRQs\n"); 1359 goto out_free_fdev; 1360 } 1361 1362 dma_async_device_register(&fdev->common); 1363 return 0; 1364 1365 out_free_fdev: 1366 irq_dispose_mapping(fdev->irq); 1367 kfree(fdev); 1368 out_return: 1369 return err; 1370 } 1371 1372 static int fsldma_of_remove(struct platform_device *op) 1373 { 1374 struct fsldma_device *fdev; 1375 unsigned int i; 1376 1377 fdev = dev_get_drvdata(&op->dev); 1378 dma_async_device_unregister(&fdev->common); 1379 1380 fsldma_free_irqs(fdev); 1381 1382 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1383 if (fdev->chan[i]) 1384 fsl_dma_chan_remove(fdev->chan[i]); 1385 } 1386 1387 iounmap(fdev->regs); 1388 dev_set_drvdata(&op->dev, NULL); 1389 kfree(fdev); 1390 1391 return 0; 1392 } 1393 1394 static const struct of_device_id fsldma_of_ids[] = { 1395 { .compatible = "fsl,eloplus-dma", }, 1396 { .compatible = "fsl,elo-dma", }, 1397 {} 1398 }; 1399 1400 static struct of_platform_driver fsldma_of_driver = { 1401 .driver = { 1402 .name = "fsl-elo-dma", 1403 .owner = THIS_MODULE, 1404 .of_match_table = fsldma_of_ids, 1405 }, 1406 .probe = fsldma_of_probe, 1407 .remove = fsldma_of_remove, 1408 }; 1409 1410 /*----------------------------------------------------------------------------*/ 1411 /* Module Init / Exit */ 1412 /*----------------------------------------------------------------------------*/ 1413 1414 static __init int fsldma_init(void) 1415 { 1416 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1417 return platform_driver_register(&fsldma_of_driver); 1418 } 1419 1420 static void __exit fsldma_exit(void) 1421 { 1422 platform_driver_unregister(&fsldma_of_driver); 1423 } 1424 1425 subsys_initcall(fsldma_init); 1426 module_exit(fsldma_exit); 1427 1428 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1429 MODULE_LICENSE("GPL"); 1430