1 /* 2 * Freescale MPC85xx, MPC83xx DMA Engine support 3 * 4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 5 * 6 * Author: 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9 * 10 * Description: 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13 * The support for MPC8349 DMA contorller is also added. 14 * 15 * This driver instructs the DMA controller to issue the PCI Read Multiple 16 * command for PCI read operations, instead of using the default PCI Read Line 17 * command. Please be aware that this setting may result in read pre-fetching 18 * on some platforms. 19 * 20 * This is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 */ 26 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/interrupt.h> 31 #include <linux/dmaengine.h> 32 #include <linux/delay.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/dmapool.h> 35 #include <linux/of_platform.h> 36 37 #include "fsldma.h" 38 39 static void dma_init(struct fsl_dma_chan *fsl_chan) 40 { 41 /* Reset the channel */ 42 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); 43 44 switch (fsl_chan->feature & FSL_DMA_IP_MASK) { 45 case FSL_DMA_IP_85XX: 46 /* Set the channel to below modes: 47 * EIE - Error interrupt enable 48 * EOSIE - End of segments interrupt enable (basic mode) 49 * EOLNIE - End of links interrupt enable 50 */ 51 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE 52 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); 53 break; 54 case FSL_DMA_IP_83XX: 55 /* Set the channel to below modes: 56 * EOTIE - End-of-transfer interrupt enable 57 * PRC_RM - PCI read multiple 58 */ 59 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE 60 | FSL_DMA_MR_PRC_RM, 32); 61 break; 62 } 63 64 } 65 66 static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) 67 { 68 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 69 } 70 71 static u32 get_sr(struct fsl_dma_chan *fsl_chan) 72 { 73 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 74 } 75 76 static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, 77 struct fsl_dma_ld_hw *hw, u32 count) 78 { 79 hw->count = CPU_TO_DMA(fsl_chan, count, 32); 80 } 81 82 static void set_desc_src(struct fsl_dma_chan *fsl_chan, 83 struct fsl_dma_ld_hw *hw, dma_addr_t src) 84 { 85 u64 snoop_bits; 86 87 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 88 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 89 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); 90 } 91 92 static void set_desc_dest(struct fsl_dma_chan *fsl_chan, 93 struct fsl_dma_ld_hw *hw, dma_addr_t dest) 94 { 95 u64 snoop_bits; 96 97 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 98 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 99 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); 100 } 101 102 static void set_desc_next(struct fsl_dma_chan *fsl_chan, 103 struct fsl_dma_ld_hw *hw, dma_addr_t next) 104 { 105 u64 snoop_bits; 106 107 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 108 ? FSL_DMA_SNEN : 0; 109 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); 110 } 111 112 static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 113 { 114 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); 115 } 116 117 static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) 118 { 119 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; 120 } 121 122 static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 123 { 124 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); 125 } 126 127 static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) 128 { 129 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); 130 } 131 132 static u32 get_bcr(struct fsl_dma_chan *fsl_chan) 133 { 134 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); 135 } 136 137 static int dma_is_idle(struct fsl_dma_chan *fsl_chan) 138 { 139 u32 sr = get_sr(fsl_chan); 140 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 141 } 142 143 static void dma_start(struct fsl_dma_chan *fsl_chan) 144 { 145 u32 mr_set = 0; 146 147 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 148 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); 149 mr_set |= FSL_DMA_MR_EMP_EN; 150 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 151 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 152 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 153 & ~FSL_DMA_MR_EMP_EN, 32); 154 } 155 156 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) 157 mr_set |= FSL_DMA_MR_EMS_EN; 158 else 159 mr_set |= FSL_DMA_MR_CS; 160 161 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 162 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 163 | mr_set, 32); 164 } 165 166 static void dma_halt(struct fsl_dma_chan *fsl_chan) 167 { 168 int i; 169 170 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 171 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, 172 32); 173 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 174 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS 175 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); 176 177 for (i = 0; i < 100; i++) { 178 if (dma_is_idle(fsl_chan)) 179 break; 180 udelay(10); 181 } 182 if (i >= 100 && !dma_is_idle(fsl_chan)) 183 dev_err(fsl_chan->dev, "DMA halt timeout!\n"); 184 } 185 186 static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 187 struct fsl_desc_sw *desc) 188 { 189 u64 snoop_bits; 190 191 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 192 ? FSL_DMA_SNEN : 0; 193 194 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 195 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 196 | snoop_bits, 64); 197 } 198 199 static void append_ld_queue(struct fsl_dma_chan *fsl_chan, 200 struct fsl_desc_sw *new_desc) 201 { 202 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); 203 204 if (list_empty(&fsl_chan->ld_queue)) 205 return; 206 207 /* Link to the new descriptor physical address and 208 * Enable End-of-segment interrupt for 209 * the last link descriptor. 210 * (the previous node's next link descriptor) 211 * 212 * For FSL_DMA_IP_83xx, the snoop enable bit need be set. 213 */ 214 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 215 new_desc->async_tx.phys | FSL_DMA_EOSIE | 216 (((fsl_chan->feature & FSL_DMA_IP_MASK) 217 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); 218 } 219 220 /** 221 * fsl_chan_set_src_loop_size - Set source address hold transfer size 222 * @fsl_chan : Freescale DMA channel 223 * @size : Address loop size, 0 for disable loop 224 * 225 * The set source address hold transfer size. The source 226 * address hold or loop transfer size is when the DMA transfer 227 * data from source address (SA), if the loop size is 4, the DMA will 228 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 229 * SA + 1 ... and so on. 230 */ 231 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) 232 { 233 switch (size) { 234 case 0: 235 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 236 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 237 (~FSL_DMA_MR_SAHE), 32); 238 break; 239 case 1: 240 case 2: 241 case 4: 242 case 8: 243 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 244 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 245 FSL_DMA_MR_SAHE | (__ilog2(size) << 14), 246 32); 247 break; 248 } 249 } 250 251 /** 252 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size 253 * @fsl_chan : Freescale DMA channel 254 * @size : Address loop size, 0 for disable loop 255 * 256 * The set destination address hold transfer size. The destination 257 * address hold or loop transfer size is when the DMA transfer 258 * data to destination address (TA), if the loop size is 4, the DMA will 259 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 260 * TA + 1 ... and so on. 261 */ 262 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) 263 { 264 switch (size) { 265 case 0: 266 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 267 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 268 (~FSL_DMA_MR_DAHE), 32); 269 break; 270 case 1: 271 case 2: 272 case 4: 273 case 8: 274 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 275 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 276 FSL_DMA_MR_DAHE | (__ilog2(size) << 16), 277 32); 278 break; 279 } 280 } 281 282 /** 283 * fsl_chan_set_request_count - Set DMA Request Count for external control 284 * @fsl_chan : Freescale DMA channel 285 * @size : Number of bytes to transfer in a single request 286 * 287 * The Freescale DMA channel can be controlled by the external signal DREQ#. 288 * The DMA request count is how many bytes are allowed to transfer before 289 * pausing the channel, after which a new assertion of DREQ# resumes channel 290 * operation. 291 * 292 * A size of 0 disables external pause control. The maximum size is 1024. 293 */ 294 static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) 295 { 296 BUG_ON(size > 1024); 297 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 298 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 299 | ((__ilog2(size) << 24) & 0x0f000000), 300 32); 301 } 302 303 /** 304 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 305 * @fsl_chan : Freescale DMA channel 306 * @enable : 0 is disabled, 1 is enabled. 307 * 308 * The Freescale DMA channel can be controlled by the external signal DREQ#. 309 * The DMA Request Count feature should be used in addition to this feature 310 * to set the number of bytes to transfer before pausing the channel. 311 */ 312 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) 313 { 314 if (enable) 315 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 316 else 317 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 318 } 319 320 /** 321 * fsl_chan_toggle_ext_start - Toggle channel external start status 322 * @fsl_chan : Freescale DMA channel 323 * @enable : 0 is disabled, 1 is enabled. 324 * 325 * If enable the external start, the channel can be started by an 326 * external DMA start pin. So the dma_start() does not start the 327 * transfer immediately. The DMA channel will wait for the 328 * control pin asserted. 329 */ 330 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) 331 { 332 if (enable) 333 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; 334 else 335 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; 336 } 337 338 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 339 { 340 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 341 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 342 struct fsl_desc_sw *child; 343 unsigned long flags; 344 dma_cookie_t cookie; 345 346 /* cookie increment and adding to ld_queue must be atomic */ 347 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 348 349 cookie = fsl_chan->common.cookie; 350 list_for_each_entry(child, &desc->tx_list, node) { 351 cookie++; 352 if (cookie < 0) 353 cookie = 1; 354 355 desc->async_tx.cookie = cookie; 356 } 357 358 fsl_chan->common.cookie = cookie; 359 append_ld_queue(fsl_chan, desc); 360 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); 361 362 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 363 364 return cookie; 365 } 366 367 /** 368 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 369 * @fsl_chan : Freescale DMA channel 370 * 371 * Return - The descriptor allocated. NULL for failed. 372 */ 373 static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 374 struct fsl_dma_chan *fsl_chan) 375 { 376 dma_addr_t pdesc; 377 struct fsl_desc_sw *desc_sw; 378 379 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); 380 if (desc_sw) { 381 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 382 INIT_LIST_HEAD(&desc_sw->tx_list); 383 dma_async_tx_descriptor_init(&desc_sw->async_tx, 384 &fsl_chan->common); 385 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 386 desc_sw->async_tx.phys = pdesc; 387 } 388 389 return desc_sw; 390 } 391 392 393 /** 394 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 395 * @fsl_chan : Freescale DMA channel 396 * 397 * This function will create a dma pool for descriptor allocation. 398 * 399 * Return - The number of descriptors allocated. 400 */ 401 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 402 { 403 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 404 405 /* Has this channel already been allocated? */ 406 if (fsl_chan->desc_pool) 407 return 1; 408 409 /* We need the descriptor to be aligned to 32bytes 410 * for meeting FSL DMA specification requirement. 411 */ 412 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 413 fsl_chan->dev, sizeof(struct fsl_desc_sw), 414 32, 0); 415 if (!fsl_chan->desc_pool) { 416 dev_err(fsl_chan->dev, "No memory for channel %d " 417 "descriptor dma pool.\n", fsl_chan->id); 418 return 0; 419 } 420 421 return 1; 422 } 423 424 /** 425 * fsl_dma_free_chan_resources - Free all resources of the channel. 426 * @fsl_chan : Freescale DMA channel 427 */ 428 static void fsl_dma_free_chan_resources(struct dma_chan *chan) 429 { 430 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 431 struct fsl_desc_sw *desc, *_desc; 432 unsigned long flags; 433 434 dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); 435 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 436 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 437 #ifdef FSL_DMA_LD_DEBUG 438 dev_dbg(fsl_chan->dev, 439 "LD %p will be released.\n", desc); 440 #endif 441 list_del(&desc->node); 442 /* free link descriptor */ 443 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 444 } 445 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 446 dma_pool_destroy(fsl_chan->desc_pool); 447 448 fsl_chan->desc_pool = NULL; 449 } 450 451 static struct dma_async_tx_descriptor * 452 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) 453 { 454 struct fsl_dma_chan *fsl_chan; 455 struct fsl_desc_sw *new; 456 457 if (!chan) 458 return NULL; 459 460 fsl_chan = to_fsl_chan(chan); 461 462 new = fsl_dma_alloc_descriptor(fsl_chan); 463 if (!new) { 464 dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); 465 return NULL; 466 } 467 468 new->async_tx.cookie = -EBUSY; 469 new->async_tx.flags = flags; 470 471 /* Insert the link descriptor to the LD ring */ 472 list_add_tail(&new->node, &new->tx_list); 473 474 /* Set End-of-link to the last link descriptor of new list*/ 475 set_ld_eol(fsl_chan, new); 476 477 return &new->async_tx; 478 } 479 480 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 481 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 482 size_t len, unsigned long flags) 483 { 484 struct fsl_dma_chan *fsl_chan; 485 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 486 struct list_head *list; 487 size_t copy; 488 489 if (!chan) 490 return NULL; 491 492 if (!len) 493 return NULL; 494 495 fsl_chan = to_fsl_chan(chan); 496 497 do { 498 499 /* Allocate the link descriptor from DMA pool */ 500 new = fsl_dma_alloc_descriptor(fsl_chan); 501 if (!new) { 502 dev_err(fsl_chan->dev, 503 "No free memory for link descriptor\n"); 504 goto fail; 505 } 506 #ifdef FSL_DMA_LD_DEBUG 507 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 508 #endif 509 510 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 511 512 set_desc_cnt(fsl_chan, &new->hw, copy); 513 set_desc_src(fsl_chan, &new->hw, dma_src); 514 set_desc_dest(fsl_chan, &new->hw, dma_dest); 515 516 if (!first) 517 first = new; 518 else 519 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); 520 521 new->async_tx.cookie = 0; 522 async_tx_ack(&new->async_tx); 523 524 prev = new; 525 len -= copy; 526 dma_src += copy; 527 dma_dest += copy; 528 529 /* Insert the link descriptor to the LD ring */ 530 list_add_tail(&new->node, &first->tx_list); 531 } while (len); 532 533 new->async_tx.flags = flags; /* client is in control of this ack */ 534 new->async_tx.cookie = -EBUSY; 535 536 /* Set End-of-link to the last link descriptor of new list*/ 537 set_ld_eol(fsl_chan, new); 538 539 return &first->async_tx; 540 541 fail: 542 if (!first) 543 return NULL; 544 545 list = &first->tx_list; 546 list_for_each_entry_safe_reverse(new, prev, list, node) { 547 list_del(&new->node); 548 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); 549 } 550 551 return NULL; 552 } 553 554 /** 555 * fsl_dma_update_completed_cookie - Update the completed cookie. 556 * @fsl_chan : Freescale DMA channel 557 */ 558 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) 559 { 560 struct fsl_desc_sw *cur_desc, *desc; 561 dma_addr_t ld_phy; 562 563 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; 564 565 if (ld_phy) { 566 cur_desc = NULL; 567 list_for_each_entry(desc, &fsl_chan->ld_queue, node) 568 if (desc->async_tx.phys == ld_phy) { 569 cur_desc = desc; 570 break; 571 } 572 573 if (cur_desc && cur_desc->async_tx.cookie) { 574 if (dma_is_idle(fsl_chan)) 575 fsl_chan->completed_cookie = 576 cur_desc->async_tx.cookie; 577 else 578 fsl_chan->completed_cookie = 579 cur_desc->async_tx.cookie - 1; 580 } 581 } 582 } 583 584 /** 585 * fsl_chan_ld_cleanup - Clean up link descriptors 586 * @fsl_chan : Freescale DMA channel 587 * 588 * This function clean up the ld_queue of DMA channel. 589 * If 'in_intr' is set, the function will move the link descriptor to 590 * the recycle list. Otherwise, free it directly. 591 */ 592 static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) 593 { 594 struct fsl_desc_sw *desc, *_desc; 595 unsigned long flags; 596 597 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 598 599 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 600 fsl_chan->completed_cookie); 601 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 602 dma_async_tx_callback callback; 603 void *callback_param; 604 605 if (dma_async_is_complete(desc->async_tx.cookie, 606 fsl_chan->completed_cookie, fsl_chan->common.cookie) 607 == DMA_IN_PROGRESS) 608 break; 609 610 callback = desc->async_tx.callback; 611 callback_param = desc->async_tx.callback_param; 612 613 /* Remove from ld_queue list */ 614 list_del(&desc->node); 615 616 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", 617 desc); 618 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 619 620 /* Run the link descriptor callback function */ 621 if (callback) { 622 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 623 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", 624 desc); 625 callback(callback_param); 626 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 627 } 628 } 629 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 630 } 631 632 /** 633 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. 634 * @fsl_chan : Freescale DMA channel 635 */ 636 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) 637 { 638 struct list_head *ld_node; 639 dma_addr_t next_dest_addr; 640 unsigned long flags; 641 642 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 643 644 if (!dma_is_idle(fsl_chan)) 645 goto out_unlock; 646 647 dma_halt(fsl_chan); 648 649 /* If there are some link descriptors 650 * not transfered in queue. We need to start it. 651 */ 652 653 /* Find the first un-transfer desciptor */ 654 for (ld_node = fsl_chan->ld_queue.next; 655 (ld_node != &fsl_chan->ld_queue) 656 && (dma_async_is_complete( 657 to_fsl_desc(ld_node)->async_tx.cookie, 658 fsl_chan->completed_cookie, 659 fsl_chan->common.cookie) == DMA_SUCCESS); 660 ld_node = ld_node->next); 661 662 if (ld_node != &fsl_chan->ld_queue) { 663 /* Get the ld start address from ld_queue */ 664 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 665 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", 666 (unsigned long long)next_dest_addr); 667 set_cdar(fsl_chan, next_dest_addr); 668 dma_start(fsl_chan); 669 } else { 670 set_cdar(fsl_chan, 0); 671 set_ndar(fsl_chan, 0); 672 } 673 674 out_unlock: 675 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 676 } 677 678 /** 679 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 680 * @fsl_chan : Freescale DMA channel 681 */ 682 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) 683 { 684 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 685 686 #ifdef FSL_DMA_LD_DEBUG 687 struct fsl_desc_sw *ld; 688 unsigned long flags; 689 690 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 691 if (list_empty(&fsl_chan->ld_queue)) { 692 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 693 return; 694 } 695 696 dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); 697 list_for_each_entry(ld, &fsl_chan->ld_queue, node) { 698 int i; 699 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", 700 fsl_chan->id, ld->async_tx.phys); 701 for (i = 0; i < 8; i++) 702 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", 703 i, *(((u32 *)&ld->hw) + i)); 704 } 705 dev_dbg(fsl_chan->dev, "----------------\n"); 706 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 707 #endif 708 709 fsl_chan_xfer_ld_queue(fsl_chan); 710 } 711 712 /** 713 * fsl_dma_is_complete - Determine the DMA status 714 * @fsl_chan : Freescale DMA channel 715 */ 716 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, 717 dma_cookie_t cookie, 718 dma_cookie_t *done, 719 dma_cookie_t *used) 720 { 721 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 722 dma_cookie_t last_used; 723 dma_cookie_t last_complete; 724 725 fsl_chan_ld_cleanup(fsl_chan); 726 727 last_used = chan->cookie; 728 last_complete = fsl_chan->completed_cookie; 729 730 if (done) 731 *done = last_complete; 732 733 if (used) 734 *used = last_used; 735 736 return dma_async_is_complete(cookie, last_complete, last_used); 737 } 738 739 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 740 { 741 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 742 u32 stat; 743 int update_cookie = 0; 744 int xfer_ld_q = 0; 745 746 stat = get_sr(fsl_chan); 747 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", 748 fsl_chan->id, stat); 749 set_sr(fsl_chan, stat); /* Clear the event register */ 750 751 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 752 if (!stat) 753 return IRQ_NONE; 754 755 if (stat & FSL_DMA_SR_TE) 756 dev_err(fsl_chan->dev, "Transfer Error!\n"); 757 758 /* Programming Error 759 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 760 * triger a PE interrupt. 761 */ 762 if (stat & FSL_DMA_SR_PE) { 763 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); 764 if (get_bcr(fsl_chan) == 0) { 765 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 766 * Now, update the completed cookie, and continue the 767 * next uncompleted transfer. 768 */ 769 update_cookie = 1; 770 xfer_ld_q = 1; 771 } 772 stat &= ~FSL_DMA_SR_PE; 773 } 774 775 /* If the link descriptor segment transfer finishes, 776 * we will recycle the used descriptor. 777 */ 778 if (stat & FSL_DMA_SR_EOSI) { 779 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 780 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", 781 (unsigned long long)get_cdar(fsl_chan), 782 (unsigned long long)get_ndar(fsl_chan)); 783 stat &= ~FSL_DMA_SR_EOSI; 784 update_cookie = 1; 785 } 786 787 /* For MPC8349, EOCDI event need to update cookie 788 * and start the next transfer if it exist. 789 */ 790 if (stat & FSL_DMA_SR_EOCDI) { 791 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); 792 stat &= ~FSL_DMA_SR_EOCDI; 793 update_cookie = 1; 794 xfer_ld_q = 1; 795 } 796 797 /* If it current transfer is the end-of-transfer, 798 * we should clear the Channel Start bit for 799 * prepare next transfer. 800 */ 801 if (stat & FSL_DMA_SR_EOLNI) { 802 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); 803 stat &= ~FSL_DMA_SR_EOLNI; 804 xfer_ld_q = 1; 805 } 806 807 if (update_cookie) 808 fsl_dma_update_completed_cookie(fsl_chan); 809 if (xfer_ld_q) 810 fsl_chan_xfer_ld_queue(fsl_chan); 811 if (stat) 812 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", 813 stat); 814 815 dev_dbg(fsl_chan->dev, "event: Exit\n"); 816 tasklet_schedule(&fsl_chan->tasklet); 817 return IRQ_HANDLED; 818 } 819 820 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) 821 { 822 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; 823 u32 gsr; 824 int ch_nr; 825 826 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) 827 : in_le32(fdev->reg_base); 828 ch_nr = (32 - ffs(gsr)) / 8; 829 830 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, 831 fdev->chan[ch_nr]) : IRQ_NONE; 832 } 833 834 static void dma_do_tasklet(unsigned long data) 835 { 836 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 837 fsl_chan_ld_cleanup(fsl_chan); 838 } 839 840 static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, 841 struct device_node *node, u32 feature, const char *compatible) 842 { 843 struct fsl_dma_chan *new_fsl_chan; 844 int err; 845 846 /* alloc channel */ 847 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); 848 if (!new_fsl_chan) { 849 dev_err(fdev->dev, "No free memory for allocating " 850 "dma channels!\n"); 851 return -ENOMEM; 852 } 853 854 /* get dma channel register base */ 855 err = of_address_to_resource(node, 0, &new_fsl_chan->reg); 856 if (err) { 857 dev_err(fdev->dev, "Can't get %s property 'reg'\n", 858 node->full_name); 859 goto err_no_reg; 860 } 861 862 new_fsl_chan->feature = feature; 863 864 if (!fdev->feature) 865 fdev->feature = new_fsl_chan->feature; 866 867 /* If the DMA device's feature is different than its channels', 868 * report the bug. 869 */ 870 WARN_ON(fdev->feature != new_fsl_chan->feature); 871 872 new_fsl_chan->dev = fdev->dev; 873 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 874 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 875 876 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 877 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 878 dev_err(fdev->dev, "There is no %d channel!\n", 879 new_fsl_chan->id); 880 err = -EINVAL; 881 goto err_no_chan; 882 } 883 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 884 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 885 (unsigned long)new_fsl_chan); 886 887 /* Init the channel */ 888 dma_init(new_fsl_chan); 889 890 /* Clear cdar registers */ 891 set_cdar(new_fsl_chan, 0); 892 893 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { 894 case FSL_DMA_IP_85XX: 895 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 896 case FSL_DMA_IP_83XX: 897 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; 898 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; 899 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; 900 new_fsl_chan->set_request_count = fsl_chan_set_request_count; 901 } 902 903 spin_lock_init(&new_fsl_chan->desc_lock); 904 INIT_LIST_HEAD(&new_fsl_chan->ld_queue); 905 906 new_fsl_chan->common.device = &fdev->common; 907 908 /* Add the channel to DMA device channel list */ 909 list_add_tail(&new_fsl_chan->common.device_node, 910 &fdev->common.channels); 911 fdev->common.chancnt++; 912 913 new_fsl_chan->irq = irq_of_parse_and_map(node, 0); 914 if (new_fsl_chan->irq != NO_IRQ) { 915 err = request_irq(new_fsl_chan->irq, 916 &fsl_dma_chan_do_interrupt, IRQF_SHARED, 917 "fsldma-channel", new_fsl_chan); 918 if (err) { 919 dev_err(fdev->dev, "DMA channel %s request_irq error " 920 "with return %d\n", node->full_name, err); 921 goto err_no_irq; 922 } 923 } 924 925 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 926 compatible, 927 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq); 928 929 return 0; 930 931 err_no_irq: 932 list_del(&new_fsl_chan->common.device_node); 933 err_no_chan: 934 iounmap(new_fsl_chan->reg_base); 935 err_no_reg: 936 kfree(new_fsl_chan); 937 return err; 938 } 939 940 static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) 941 { 942 if (fchan->irq != NO_IRQ) 943 free_irq(fchan->irq, fchan); 944 list_del(&fchan->common.device_node); 945 iounmap(fchan->reg_base); 946 kfree(fchan); 947 } 948 949 static int __devinit of_fsl_dma_probe(struct of_device *dev, 950 const struct of_device_id *match) 951 { 952 int err; 953 struct fsl_dma_device *fdev; 954 struct device_node *child; 955 956 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 957 if (!fdev) { 958 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 959 return -ENOMEM; 960 } 961 fdev->dev = &dev->dev; 962 INIT_LIST_HEAD(&fdev->common.channels); 963 964 /* get DMA controller register base */ 965 err = of_address_to_resource(dev->node, 0, &fdev->reg); 966 if (err) { 967 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 968 dev->node->full_name); 969 goto err_no_reg; 970 } 971 972 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 973 "controller at 0x%llx...\n", 974 match->compatible, (unsigned long long)fdev->reg.start); 975 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 976 - fdev->reg.start + 1); 977 978 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 979 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 980 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 981 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 982 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 983 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 984 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 985 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 986 fdev->common.dev = &dev->dev; 987 988 fdev->irq = irq_of_parse_and_map(dev->node, 0); 989 if (fdev->irq != NO_IRQ) { 990 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, 991 "fsldma-device", fdev); 992 if (err) { 993 dev_err(&dev->dev, "DMA device request_irq error " 994 "with return %d\n", err); 995 goto err; 996 } 997 } 998 999 dev_set_drvdata(&(dev->dev), fdev); 1000 1001 /* We cannot use of_platform_bus_probe() because there is no 1002 * of_platform_bus_remove. Instead, we manually instantiate every DMA 1003 * channel object. 1004 */ 1005 for_each_child_of_node(dev->node, child) { 1006 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) 1007 fsl_dma_chan_probe(fdev, child, 1008 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1009 "fsl,eloplus-dma-channel"); 1010 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) 1011 fsl_dma_chan_probe(fdev, child, 1012 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1013 "fsl,elo-dma-channel"); 1014 } 1015 1016 dma_async_device_register(&fdev->common); 1017 return 0; 1018 1019 err: 1020 iounmap(fdev->reg_base); 1021 err_no_reg: 1022 kfree(fdev); 1023 return err; 1024 } 1025 1026 static int of_fsl_dma_remove(struct of_device *of_dev) 1027 { 1028 struct fsl_dma_device *fdev; 1029 unsigned int i; 1030 1031 fdev = dev_get_drvdata(&of_dev->dev); 1032 1033 dma_async_device_unregister(&fdev->common); 1034 1035 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) 1036 if (fdev->chan[i]) 1037 fsl_dma_chan_remove(fdev->chan[i]); 1038 1039 if (fdev->irq != NO_IRQ) 1040 free_irq(fdev->irq, fdev); 1041 1042 iounmap(fdev->reg_base); 1043 1044 kfree(fdev); 1045 dev_set_drvdata(&of_dev->dev, NULL); 1046 1047 return 0; 1048 } 1049 1050 static struct of_device_id of_fsl_dma_ids[] = { 1051 { .compatible = "fsl,eloplus-dma", }, 1052 { .compatible = "fsl,elo-dma", }, 1053 {} 1054 }; 1055 1056 static struct of_platform_driver of_fsl_dma_driver = { 1057 .name = "fsl-elo-dma", 1058 .match_table = of_fsl_dma_ids, 1059 .probe = of_fsl_dma_probe, 1060 .remove = of_fsl_dma_remove, 1061 }; 1062 1063 static __init int of_fsl_dma_init(void) 1064 { 1065 int ret; 1066 1067 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1068 1069 ret = of_register_platform_driver(&of_fsl_dma_driver); 1070 if (ret) 1071 pr_err("fsldma: failed to register platform driver\n"); 1072 1073 return ret; 1074 } 1075 1076 static void __exit of_fsl_dma_exit(void) 1077 { 1078 of_unregister_platform_driver(&of_fsl_dma_driver); 1079 } 1080 1081 subsys_initcall(of_fsl_dma_init); 1082 module_exit(of_fsl_dma_exit); 1083 1084 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1085 MODULE_LICENSE("GPL"); 1086