1 /* 2 * Freescale MPC85xx, MPC83xx DMA Engine support 3 * 4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 5 * 6 * Author: 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 9 * 10 * Description: 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 13 * The support for MPC8349 DMA contorller is also added. 14 * 15 * This driver instructs the DMA controller to issue the PCI Read Multiple 16 * command for PCI read operations, instead of using the default PCI Read Line 17 * command. Please be aware that this setting may result in read pre-fetching 18 * on some platforms. 19 * 20 * This is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 */ 26 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/interrupt.h> 31 #include <linux/dmaengine.h> 32 #include <linux/delay.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/dmapool.h> 35 #include <linux/of_platform.h> 36 37 #include "fsldma.h" 38 39 static void dma_init(struct fsl_dma_chan *fsl_chan) 40 { 41 /* Reset the channel */ 42 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); 43 44 switch (fsl_chan->feature & FSL_DMA_IP_MASK) { 45 case FSL_DMA_IP_85XX: 46 /* Set the channel to below modes: 47 * EIE - Error interrupt enable 48 * EOSIE - End of segments interrupt enable (basic mode) 49 * EOLNIE - End of links interrupt enable 50 */ 51 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE 52 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); 53 break; 54 case FSL_DMA_IP_83XX: 55 /* Set the channel to below modes: 56 * EOTIE - End-of-transfer interrupt enable 57 * PRC_RM - PCI read multiple 58 */ 59 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE 60 | FSL_DMA_MR_PRC_RM, 32); 61 break; 62 } 63 64 } 65 66 static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) 67 { 68 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 69 } 70 71 static u32 get_sr(struct fsl_dma_chan *fsl_chan) 72 { 73 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 74 } 75 76 static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, 77 struct fsl_dma_ld_hw *hw, u32 count) 78 { 79 hw->count = CPU_TO_DMA(fsl_chan, count, 32); 80 } 81 82 static void set_desc_src(struct fsl_dma_chan *fsl_chan, 83 struct fsl_dma_ld_hw *hw, dma_addr_t src) 84 { 85 u64 snoop_bits; 86 87 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 88 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 89 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); 90 } 91 92 static void set_desc_dest(struct fsl_dma_chan *fsl_chan, 93 struct fsl_dma_ld_hw *hw, dma_addr_t dest) 94 { 95 u64 snoop_bits; 96 97 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 98 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 99 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); 100 } 101 102 static void set_desc_next(struct fsl_dma_chan *fsl_chan, 103 struct fsl_dma_ld_hw *hw, dma_addr_t next) 104 { 105 u64 snoop_bits; 106 107 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 108 ? FSL_DMA_SNEN : 0; 109 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); 110 } 111 112 static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 113 { 114 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); 115 } 116 117 static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) 118 { 119 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; 120 } 121 122 static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 123 { 124 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); 125 } 126 127 static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) 128 { 129 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); 130 } 131 132 static u32 get_bcr(struct fsl_dma_chan *fsl_chan) 133 { 134 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); 135 } 136 137 static int dma_is_idle(struct fsl_dma_chan *fsl_chan) 138 { 139 u32 sr = get_sr(fsl_chan); 140 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 141 } 142 143 static void dma_start(struct fsl_dma_chan *fsl_chan) 144 { 145 u32 mr_set = 0; 146 147 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 148 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); 149 mr_set |= FSL_DMA_MR_EMP_EN; 150 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 151 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 152 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 153 & ~FSL_DMA_MR_EMP_EN, 32); 154 } 155 156 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) 157 mr_set |= FSL_DMA_MR_EMS_EN; 158 else 159 mr_set |= FSL_DMA_MR_CS; 160 161 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 162 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 163 | mr_set, 32); 164 } 165 166 static void dma_halt(struct fsl_dma_chan *fsl_chan) 167 { 168 int i; 169 170 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 171 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, 172 32); 173 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 174 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS 175 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); 176 177 for (i = 0; i < 100; i++) { 178 if (dma_is_idle(fsl_chan)) 179 break; 180 udelay(10); 181 } 182 if (i >= 100 && !dma_is_idle(fsl_chan)) 183 dev_err(fsl_chan->dev, "DMA halt timeout!\n"); 184 } 185 186 static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 187 struct fsl_desc_sw *desc) 188 { 189 u64 snoop_bits; 190 191 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 192 ? FSL_DMA_SNEN : 0; 193 194 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 195 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 196 | snoop_bits, 64); 197 } 198 199 static void append_ld_queue(struct fsl_dma_chan *fsl_chan, 200 struct fsl_desc_sw *new_desc) 201 { 202 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); 203 204 if (list_empty(&fsl_chan->ld_queue)) 205 return; 206 207 /* Link to the new descriptor physical address and 208 * Enable End-of-segment interrupt for 209 * the last link descriptor. 210 * (the previous node's next link descriptor) 211 * 212 * For FSL_DMA_IP_83xx, the snoop enable bit need be set. 213 */ 214 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 215 new_desc->async_tx.phys | FSL_DMA_EOSIE | 216 (((fsl_chan->feature & FSL_DMA_IP_MASK) 217 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); 218 } 219 220 /** 221 * fsl_chan_set_src_loop_size - Set source address hold transfer size 222 * @fsl_chan : Freescale DMA channel 223 * @size : Address loop size, 0 for disable loop 224 * 225 * The set source address hold transfer size. The source 226 * address hold or loop transfer size is when the DMA transfer 227 * data from source address (SA), if the loop size is 4, the DMA will 228 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 229 * SA + 1 ... and so on. 230 */ 231 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) 232 { 233 switch (size) { 234 case 0: 235 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 236 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 237 (~FSL_DMA_MR_SAHE), 32); 238 break; 239 case 1: 240 case 2: 241 case 4: 242 case 8: 243 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 244 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 245 FSL_DMA_MR_SAHE | (__ilog2(size) << 14), 246 32); 247 break; 248 } 249 } 250 251 /** 252 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size 253 * @fsl_chan : Freescale DMA channel 254 * @size : Address loop size, 0 for disable loop 255 * 256 * The set destination address hold transfer size. The destination 257 * address hold or loop transfer size is when the DMA transfer 258 * data to destination address (TA), if the loop size is 4, the DMA will 259 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 260 * TA + 1 ... and so on. 261 */ 262 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) 263 { 264 switch (size) { 265 case 0: 266 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 267 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 268 (~FSL_DMA_MR_DAHE), 32); 269 break; 270 case 1: 271 case 2: 272 case 4: 273 case 8: 274 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 275 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 276 FSL_DMA_MR_DAHE | (__ilog2(size) << 16), 277 32); 278 break; 279 } 280 } 281 282 /** 283 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 284 * @fsl_chan : Freescale DMA channel 285 * @size : Pause control size, 0 for disable external pause control. 286 * The maximum is 1024. 287 * 288 * The Freescale DMA channel can be controlled by the external 289 * signal DREQ#. The pause control size is how many bytes are allowed 290 * to transfer before pausing the channel, after which a new assertion 291 * of DREQ# resumes channel operation. 292 */ 293 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) 294 { 295 if (size > 1024) 296 return; 297 298 if (size) { 299 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 300 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 301 | ((__ilog2(size) << 24) & 0x0f000000), 302 32); 303 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 304 } else 305 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 306 } 307 308 /** 309 * fsl_chan_toggle_ext_start - Toggle channel external start status 310 * @fsl_chan : Freescale DMA channel 311 * @enable : 0 is disabled, 1 is enabled. 312 * 313 * If enable the external start, the channel can be started by an 314 * external DMA start pin. So the dma_start() does not start the 315 * transfer immediately. The DMA channel will wait for the 316 * control pin asserted. 317 */ 318 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) 319 { 320 if (enable) 321 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; 322 else 323 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; 324 } 325 326 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 327 { 328 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 329 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 330 struct fsl_desc_sw *child; 331 unsigned long flags; 332 dma_cookie_t cookie; 333 334 /* cookie increment and adding to ld_queue must be atomic */ 335 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 336 337 cookie = fsl_chan->common.cookie; 338 list_for_each_entry(child, &desc->tx_list, node) { 339 cookie++; 340 if (cookie < 0) 341 cookie = 1; 342 343 desc->async_tx.cookie = cookie; 344 } 345 346 fsl_chan->common.cookie = cookie; 347 append_ld_queue(fsl_chan, desc); 348 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); 349 350 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 351 352 return cookie; 353 } 354 355 /** 356 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 357 * @fsl_chan : Freescale DMA channel 358 * 359 * Return - The descriptor allocated. NULL for failed. 360 */ 361 static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 362 struct fsl_dma_chan *fsl_chan) 363 { 364 dma_addr_t pdesc; 365 struct fsl_desc_sw *desc_sw; 366 367 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); 368 if (desc_sw) { 369 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 370 INIT_LIST_HEAD(&desc_sw->tx_list); 371 dma_async_tx_descriptor_init(&desc_sw->async_tx, 372 &fsl_chan->common); 373 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 374 desc_sw->async_tx.phys = pdesc; 375 } 376 377 return desc_sw; 378 } 379 380 381 /** 382 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 383 * @fsl_chan : Freescale DMA channel 384 * 385 * This function will create a dma pool for descriptor allocation. 386 * 387 * Return - The number of descriptors allocated. 388 */ 389 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 390 { 391 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 392 393 /* Has this channel already been allocated? */ 394 if (fsl_chan->desc_pool) 395 return 1; 396 397 /* We need the descriptor to be aligned to 32bytes 398 * for meeting FSL DMA specification requirement. 399 */ 400 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 401 fsl_chan->dev, sizeof(struct fsl_desc_sw), 402 32, 0); 403 if (!fsl_chan->desc_pool) { 404 dev_err(fsl_chan->dev, "No memory for channel %d " 405 "descriptor dma pool.\n", fsl_chan->id); 406 return 0; 407 } 408 409 return 1; 410 } 411 412 /** 413 * fsl_dma_free_chan_resources - Free all resources of the channel. 414 * @fsl_chan : Freescale DMA channel 415 */ 416 static void fsl_dma_free_chan_resources(struct dma_chan *chan) 417 { 418 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 419 struct fsl_desc_sw *desc, *_desc; 420 unsigned long flags; 421 422 dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); 423 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 424 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 425 #ifdef FSL_DMA_LD_DEBUG 426 dev_dbg(fsl_chan->dev, 427 "LD %p will be released.\n", desc); 428 #endif 429 list_del(&desc->node); 430 /* free link descriptor */ 431 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 432 } 433 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 434 dma_pool_destroy(fsl_chan->desc_pool); 435 436 fsl_chan->desc_pool = NULL; 437 } 438 439 static struct dma_async_tx_descriptor * 440 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) 441 { 442 struct fsl_dma_chan *fsl_chan; 443 struct fsl_desc_sw *new; 444 445 if (!chan) 446 return NULL; 447 448 fsl_chan = to_fsl_chan(chan); 449 450 new = fsl_dma_alloc_descriptor(fsl_chan); 451 if (!new) { 452 dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); 453 return NULL; 454 } 455 456 new->async_tx.cookie = -EBUSY; 457 new->async_tx.flags = flags; 458 459 /* Insert the link descriptor to the LD ring */ 460 list_add_tail(&new->node, &new->tx_list); 461 462 /* Set End-of-link to the last link descriptor of new list*/ 463 set_ld_eol(fsl_chan, new); 464 465 return &new->async_tx; 466 } 467 468 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 469 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 470 size_t len, unsigned long flags) 471 { 472 struct fsl_dma_chan *fsl_chan; 473 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 474 struct list_head *list; 475 size_t copy; 476 477 if (!chan) 478 return NULL; 479 480 if (!len) 481 return NULL; 482 483 fsl_chan = to_fsl_chan(chan); 484 485 do { 486 487 /* Allocate the link descriptor from DMA pool */ 488 new = fsl_dma_alloc_descriptor(fsl_chan); 489 if (!new) { 490 dev_err(fsl_chan->dev, 491 "No free memory for link descriptor\n"); 492 goto fail; 493 } 494 #ifdef FSL_DMA_LD_DEBUG 495 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 496 #endif 497 498 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 499 500 set_desc_cnt(fsl_chan, &new->hw, copy); 501 set_desc_src(fsl_chan, &new->hw, dma_src); 502 set_desc_dest(fsl_chan, &new->hw, dma_dest); 503 504 if (!first) 505 first = new; 506 else 507 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); 508 509 new->async_tx.cookie = 0; 510 async_tx_ack(&new->async_tx); 511 512 prev = new; 513 len -= copy; 514 dma_src += copy; 515 dma_dest += copy; 516 517 /* Insert the link descriptor to the LD ring */ 518 list_add_tail(&new->node, &first->tx_list); 519 } while (len); 520 521 new->async_tx.flags = flags; /* client is in control of this ack */ 522 new->async_tx.cookie = -EBUSY; 523 524 /* Set End-of-link to the last link descriptor of new list*/ 525 set_ld_eol(fsl_chan, new); 526 527 return &first->async_tx; 528 529 fail: 530 if (!first) 531 return NULL; 532 533 list = &first->tx_list; 534 list_for_each_entry_safe_reverse(new, prev, list, node) { 535 list_del(&new->node); 536 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); 537 } 538 539 return NULL; 540 } 541 542 /** 543 * fsl_dma_update_completed_cookie - Update the completed cookie. 544 * @fsl_chan : Freescale DMA channel 545 */ 546 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) 547 { 548 struct fsl_desc_sw *cur_desc, *desc; 549 dma_addr_t ld_phy; 550 551 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; 552 553 if (ld_phy) { 554 cur_desc = NULL; 555 list_for_each_entry(desc, &fsl_chan->ld_queue, node) 556 if (desc->async_tx.phys == ld_phy) { 557 cur_desc = desc; 558 break; 559 } 560 561 if (cur_desc && cur_desc->async_tx.cookie) { 562 if (dma_is_idle(fsl_chan)) 563 fsl_chan->completed_cookie = 564 cur_desc->async_tx.cookie; 565 else 566 fsl_chan->completed_cookie = 567 cur_desc->async_tx.cookie - 1; 568 } 569 } 570 } 571 572 /** 573 * fsl_chan_ld_cleanup - Clean up link descriptors 574 * @fsl_chan : Freescale DMA channel 575 * 576 * This function clean up the ld_queue of DMA channel. 577 * If 'in_intr' is set, the function will move the link descriptor to 578 * the recycle list. Otherwise, free it directly. 579 */ 580 static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) 581 { 582 struct fsl_desc_sw *desc, *_desc; 583 unsigned long flags; 584 585 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 586 587 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 588 fsl_chan->completed_cookie); 589 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 590 dma_async_tx_callback callback; 591 void *callback_param; 592 593 if (dma_async_is_complete(desc->async_tx.cookie, 594 fsl_chan->completed_cookie, fsl_chan->common.cookie) 595 == DMA_IN_PROGRESS) 596 break; 597 598 callback = desc->async_tx.callback; 599 callback_param = desc->async_tx.callback_param; 600 601 /* Remove from ld_queue list */ 602 list_del(&desc->node); 603 604 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", 605 desc); 606 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 607 608 /* Run the link descriptor callback function */ 609 if (callback) { 610 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 611 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", 612 desc); 613 callback(callback_param); 614 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 615 } 616 } 617 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 618 } 619 620 /** 621 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. 622 * @fsl_chan : Freescale DMA channel 623 */ 624 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) 625 { 626 struct list_head *ld_node; 627 dma_addr_t next_dest_addr; 628 unsigned long flags; 629 630 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 631 632 if (!dma_is_idle(fsl_chan)) 633 goto out_unlock; 634 635 dma_halt(fsl_chan); 636 637 /* If there are some link descriptors 638 * not transfered in queue. We need to start it. 639 */ 640 641 /* Find the first un-transfer desciptor */ 642 for (ld_node = fsl_chan->ld_queue.next; 643 (ld_node != &fsl_chan->ld_queue) 644 && (dma_async_is_complete( 645 to_fsl_desc(ld_node)->async_tx.cookie, 646 fsl_chan->completed_cookie, 647 fsl_chan->common.cookie) == DMA_SUCCESS); 648 ld_node = ld_node->next); 649 650 if (ld_node != &fsl_chan->ld_queue) { 651 /* Get the ld start address from ld_queue */ 652 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 653 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", 654 (unsigned long long)next_dest_addr); 655 set_cdar(fsl_chan, next_dest_addr); 656 dma_start(fsl_chan); 657 } else { 658 set_cdar(fsl_chan, 0); 659 set_ndar(fsl_chan, 0); 660 } 661 662 out_unlock: 663 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 664 } 665 666 /** 667 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 668 * @fsl_chan : Freescale DMA channel 669 */ 670 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) 671 { 672 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 673 674 #ifdef FSL_DMA_LD_DEBUG 675 struct fsl_desc_sw *ld; 676 unsigned long flags; 677 678 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 679 if (list_empty(&fsl_chan->ld_queue)) { 680 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 681 return; 682 } 683 684 dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); 685 list_for_each_entry(ld, &fsl_chan->ld_queue, node) { 686 int i; 687 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", 688 fsl_chan->id, ld->async_tx.phys); 689 for (i = 0; i < 8; i++) 690 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", 691 i, *(((u32 *)&ld->hw) + i)); 692 } 693 dev_dbg(fsl_chan->dev, "----------------\n"); 694 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 695 #endif 696 697 fsl_chan_xfer_ld_queue(fsl_chan); 698 } 699 700 /** 701 * fsl_dma_is_complete - Determine the DMA status 702 * @fsl_chan : Freescale DMA channel 703 */ 704 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, 705 dma_cookie_t cookie, 706 dma_cookie_t *done, 707 dma_cookie_t *used) 708 { 709 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 710 dma_cookie_t last_used; 711 dma_cookie_t last_complete; 712 713 fsl_chan_ld_cleanup(fsl_chan); 714 715 last_used = chan->cookie; 716 last_complete = fsl_chan->completed_cookie; 717 718 if (done) 719 *done = last_complete; 720 721 if (used) 722 *used = last_used; 723 724 return dma_async_is_complete(cookie, last_complete, last_used); 725 } 726 727 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 728 { 729 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 730 u32 stat; 731 int update_cookie = 0; 732 int xfer_ld_q = 0; 733 734 stat = get_sr(fsl_chan); 735 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", 736 fsl_chan->id, stat); 737 set_sr(fsl_chan, stat); /* Clear the event register */ 738 739 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 740 if (!stat) 741 return IRQ_NONE; 742 743 if (stat & FSL_DMA_SR_TE) 744 dev_err(fsl_chan->dev, "Transfer Error!\n"); 745 746 /* Programming Error 747 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 748 * triger a PE interrupt. 749 */ 750 if (stat & FSL_DMA_SR_PE) { 751 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); 752 if (get_bcr(fsl_chan) == 0) { 753 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 754 * Now, update the completed cookie, and continue the 755 * next uncompleted transfer. 756 */ 757 update_cookie = 1; 758 xfer_ld_q = 1; 759 } 760 stat &= ~FSL_DMA_SR_PE; 761 } 762 763 /* If the link descriptor segment transfer finishes, 764 * we will recycle the used descriptor. 765 */ 766 if (stat & FSL_DMA_SR_EOSI) { 767 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 768 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", 769 (unsigned long long)get_cdar(fsl_chan), 770 (unsigned long long)get_ndar(fsl_chan)); 771 stat &= ~FSL_DMA_SR_EOSI; 772 update_cookie = 1; 773 } 774 775 /* For MPC8349, EOCDI event need to update cookie 776 * and start the next transfer if it exist. 777 */ 778 if (stat & FSL_DMA_SR_EOCDI) { 779 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); 780 stat &= ~FSL_DMA_SR_EOCDI; 781 update_cookie = 1; 782 xfer_ld_q = 1; 783 } 784 785 /* If it current transfer is the end-of-transfer, 786 * we should clear the Channel Start bit for 787 * prepare next transfer. 788 */ 789 if (stat & FSL_DMA_SR_EOLNI) { 790 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); 791 stat &= ~FSL_DMA_SR_EOLNI; 792 xfer_ld_q = 1; 793 } 794 795 if (update_cookie) 796 fsl_dma_update_completed_cookie(fsl_chan); 797 if (xfer_ld_q) 798 fsl_chan_xfer_ld_queue(fsl_chan); 799 if (stat) 800 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", 801 stat); 802 803 dev_dbg(fsl_chan->dev, "event: Exit\n"); 804 tasklet_schedule(&fsl_chan->tasklet); 805 return IRQ_HANDLED; 806 } 807 808 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) 809 { 810 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; 811 u32 gsr; 812 int ch_nr; 813 814 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) 815 : in_le32(fdev->reg_base); 816 ch_nr = (32 - ffs(gsr)) / 8; 817 818 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, 819 fdev->chan[ch_nr]) : IRQ_NONE; 820 } 821 822 static void dma_do_tasklet(unsigned long data) 823 { 824 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 825 fsl_chan_ld_cleanup(fsl_chan); 826 } 827 828 static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, 829 struct device_node *node, u32 feature, const char *compatible) 830 { 831 struct fsl_dma_chan *new_fsl_chan; 832 int err; 833 834 /* alloc channel */ 835 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); 836 if (!new_fsl_chan) { 837 dev_err(fdev->dev, "No free memory for allocating " 838 "dma channels!\n"); 839 return -ENOMEM; 840 } 841 842 /* get dma channel register base */ 843 err = of_address_to_resource(node, 0, &new_fsl_chan->reg); 844 if (err) { 845 dev_err(fdev->dev, "Can't get %s property 'reg'\n", 846 node->full_name); 847 goto err_no_reg; 848 } 849 850 new_fsl_chan->feature = feature; 851 852 if (!fdev->feature) 853 fdev->feature = new_fsl_chan->feature; 854 855 /* If the DMA device's feature is different than its channels', 856 * report the bug. 857 */ 858 WARN_ON(fdev->feature != new_fsl_chan->feature); 859 860 new_fsl_chan->dev = fdev->dev; 861 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 862 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 863 864 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 865 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 866 dev_err(fdev->dev, "There is no %d channel!\n", 867 new_fsl_chan->id); 868 err = -EINVAL; 869 goto err_no_chan; 870 } 871 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 872 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 873 (unsigned long)new_fsl_chan); 874 875 /* Init the channel */ 876 dma_init(new_fsl_chan); 877 878 /* Clear cdar registers */ 879 set_cdar(new_fsl_chan, 0); 880 881 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { 882 case FSL_DMA_IP_85XX: 883 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 884 case FSL_DMA_IP_83XX: 885 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; 886 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; 887 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; 888 } 889 890 spin_lock_init(&new_fsl_chan->desc_lock); 891 INIT_LIST_HEAD(&new_fsl_chan->ld_queue); 892 893 new_fsl_chan->common.device = &fdev->common; 894 895 /* Add the channel to DMA device channel list */ 896 list_add_tail(&new_fsl_chan->common.device_node, 897 &fdev->common.channels); 898 fdev->common.chancnt++; 899 900 new_fsl_chan->irq = irq_of_parse_and_map(node, 0); 901 if (new_fsl_chan->irq != NO_IRQ) { 902 err = request_irq(new_fsl_chan->irq, 903 &fsl_dma_chan_do_interrupt, IRQF_SHARED, 904 "fsldma-channel", new_fsl_chan); 905 if (err) { 906 dev_err(fdev->dev, "DMA channel %s request_irq error " 907 "with return %d\n", node->full_name, err); 908 goto err_no_irq; 909 } 910 } 911 912 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 913 compatible, 914 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq); 915 916 return 0; 917 918 err_no_irq: 919 list_del(&new_fsl_chan->common.device_node); 920 err_no_chan: 921 iounmap(new_fsl_chan->reg_base); 922 err_no_reg: 923 kfree(new_fsl_chan); 924 return err; 925 } 926 927 static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) 928 { 929 if (fchan->irq != NO_IRQ) 930 free_irq(fchan->irq, fchan); 931 list_del(&fchan->common.device_node); 932 iounmap(fchan->reg_base); 933 kfree(fchan); 934 } 935 936 static int __devinit of_fsl_dma_probe(struct of_device *dev, 937 const struct of_device_id *match) 938 { 939 int err; 940 struct fsl_dma_device *fdev; 941 struct device_node *child; 942 943 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 944 if (!fdev) { 945 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 946 return -ENOMEM; 947 } 948 fdev->dev = &dev->dev; 949 INIT_LIST_HEAD(&fdev->common.channels); 950 951 /* get DMA controller register base */ 952 err = of_address_to_resource(dev->node, 0, &fdev->reg); 953 if (err) { 954 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 955 dev->node->full_name); 956 goto err_no_reg; 957 } 958 959 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 960 "controller at 0x%llx...\n", 961 match->compatible, (unsigned long long)fdev->reg.start); 962 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 963 - fdev->reg.start + 1); 964 965 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 966 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 967 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 968 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 969 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 970 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 971 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 972 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 973 fdev->common.dev = &dev->dev; 974 975 fdev->irq = irq_of_parse_and_map(dev->node, 0); 976 if (fdev->irq != NO_IRQ) { 977 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, 978 "fsldma-device", fdev); 979 if (err) { 980 dev_err(&dev->dev, "DMA device request_irq error " 981 "with return %d\n", err); 982 goto err; 983 } 984 } 985 986 dev_set_drvdata(&(dev->dev), fdev); 987 988 /* We cannot use of_platform_bus_probe() because there is no 989 * of_platform_bus_remove. Instead, we manually instantiate every DMA 990 * channel object. 991 */ 992 for_each_child_of_node(dev->node, child) { 993 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) 994 fsl_dma_chan_probe(fdev, child, 995 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 996 "fsl,eloplus-dma-channel"); 997 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) 998 fsl_dma_chan_probe(fdev, child, 999 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1000 "fsl,elo-dma-channel"); 1001 } 1002 1003 dma_async_device_register(&fdev->common); 1004 return 0; 1005 1006 err: 1007 iounmap(fdev->reg_base); 1008 err_no_reg: 1009 kfree(fdev); 1010 return err; 1011 } 1012 1013 static int of_fsl_dma_remove(struct of_device *of_dev) 1014 { 1015 struct fsl_dma_device *fdev; 1016 unsigned int i; 1017 1018 fdev = dev_get_drvdata(&of_dev->dev); 1019 1020 dma_async_device_unregister(&fdev->common); 1021 1022 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) 1023 if (fdev->chan[i]) 1024 fsl_dma_chan_remove(fdev->chan[i]); 1025 1026 if (fdev->irq != NO_IRQ) 1027 free_irq(fdev->irq, fdev); 1028 1029 iounmap(fdev->reg_base); 1030 1031 kfree(fdev); 1032 dev_set_drvdata(&of_dev->dev, NULL); 1033 1034 return 0; 1035 } 1036 1037 static struct of_device_id of_fsl_dma_ids[] = { 1038 { .compatible = "fsl,eloplus-dma", }, 1039 { .compatible = "fsl,elo-dma", }, 1040 {} 1041 }; 1042 1043 static struct of_platform_driver of_fsl_dma_driver = { 1044 .name = "fsl-elo-dma", 1045 .match_table = of_fsl_dma_ids, 1046 .probe = of_fsl_dma_probe, 1047 .remove = of_fsl_dma_remove, 1048 }; 1049 1050 static __init int of_fsl_dma_init(void) 1051 { 1052 int ret; 1053 1054 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1055 1056 ret = of_register_platform_driver(&of_fsl_dma_driver); 1057 if (ret) 1058 pr_err("fsldma: failed to register platform driver\n"); 1059 1060 return ret; 1061 } 1062 1063 static void __exit of_fsl_dma_exit(void) 1064 { 1065 of_unregister_platform_driver(&of_fsl_dma_driver); 1066 } 1067 1068 subsys_initcall(of_fsl_dma_init); 1069 module_exit(of_fsl_dma_exit); 1070 1071 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1072 MODULE_LICENSE("GPL"); 1073