1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. 13 * The only Atmel DMA Controller that is not covered by this driver is the one 14 * found on AT91SAM9263. 15 */ 16 17 #include <linux/clk.h> 18 #include <linux/dmaengine.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/dmapool.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/platform_device.h> 24 #include <linux/slab.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 28 #include "at_hdmac_regs.h" 29 #include "dmaengine.h" 30 31 /* 32 * Glossary 33 * -------- 34 * 35 * at_hdmac : Name of the ATmel AHB DMA Controller 36 * at_dma_ / atdma : ATmel DMA controller entity related 37 * atc_ / atchan : ATmel DMA Channel entity related 38 */ 39 40 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 41 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 42 |ATC_DIF(AT_DMA_MEM_IF)) 43 44 /* 45 * Initial number of descriptors to allocate for each channel. This could 46 * be increased during dma usage. 47 */ 48 static unsigned int init_nr_desc_per_channel = 64; 49 module_param(init_nr_desc_per_channel, uint, 0644); 50 MODULE_PARM_DESC(init_nr_desc_per_channel, 51 "initial descriptors per channel (default: 64)"); 52 53 54 /* prototypes */ 55 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 56 57 58 /*----------------------------------------------------------------------*/ 59 60 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 61 { 62 return list_first_entry(&atchan->active_list, 63 struct at_desc, desc_node); 64 } 65 66 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 67 { 68 return list_first_entry(&atchan->queue, 69 struct at_desc, desc_node); 70 } 71 72 /** 73 * atc_alloc_descriptor - allocate and return an initialized descriptor 74 * @chan: the channel to allocate descriptors for 75 * @gfp_flags: GFP allocation flags 76 * 77 * Note: The ack-bit is positioned in the descriptor flag at creation time 78 * to make initial allocation more convenient. This bit will be cleared 79 * and control will be given to client at usage time (during 80 * preparation functions). 81 */ 82 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 83 gfp_t gfp_flags) 84 { 85 struct at_desc *desc = NULL; 86 struct at_dma *atdma = to_at_dma(chan->device); 87 dma_addr_t phys; 88 89 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 90 if (desc) { 91 memset(desc, 0, sizeof(struct at_desc)); 92 INIT_LIST_HEAD(&desc->tx_list); 93 dma_async_tx_descriptor_init(&desc->txd, chan); 94 /* txd.flags will be overwritten in prep functions */ 95 desc->txd.flags = DMA_CTRL_ACK; 96 desc->txd.tx_submit = atc_tx_submit; 97 desc->txd.phys = phys; 98 } 99 100 return desc; 101 } 102 103 /** 104 * atc_desc_get - get an unused descriptor from free_list 105 * @atchan: channel we want a new descriptor for 106 */ 107 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 108 { 109 struct at_desc *desc, *_desc; 110 struct at_desc *ret = NULL; 111 unsigned long flags; 112 unsigned int i = 0; 113 LIST_HEAD(tmp_list); 114 115 spin_lock_irqsave(&atchan->lock, flags); 116 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 117 i++; 118 if (async_tx_test_ack(&desc->txd)) { 119 list_del(&desc->desc_node); 120 ret = desc; 121 break; 122 } 123 dev_dbg(chan2dev(&atchan->chan_common), 124 "desc %p not ACKed\n", desc); 125 } 126 spin_unlock_irqrestore(&atchan->lock, flags); 127 dev_vdbg(chan2dev(&atchan->chan_common), 128 "scanned %u descriptors on freelist\n", i); 129 130 /* no more descriptor available in initial pool: create one more */ 131 if (!ret) { 132 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 133 if (ret) { 134 spin_lock_irqsave(&atchan->lock, flags); 135 atchan->descs_allocated++; 136 spin_unlock_irqrestore(&atchan->lock, flags); 137 } else { 138 dev_err(chan2dev(&atchan->chan_common), 139 "not enough descriptors available\n"); 140 } 141 } 142 143 return ret; 144 } 145 146 /** 147 * atc_desc_put - move a descriptor, including any children, to the free list 148 * @atchan: channel we work on 149 * @desc: descriptor, at the head of a chain, to move to free list 150 */ 151 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 152 { 153 if (desc) { 154 struct at_desc *child; 155 unsigned long flags; 156 157 spin_lock_irqsave(&atchan->lock, flags); 158 list_for_each_entry(child, &desc->tx_list, desc_node) 159 dev_vdbg(chan2dev(&atchan->chan_common), 160 "moving child desc %p to freelist\n", 161 child); 162 list_splice_init(&desc->tx_list, &atchan->free_list); 163 dev_vdbg(chan2dev(&atchan->chan_common), 164 "moving desc %p to freelist\n", desc); 165 list_add(&desc->desc_node, &atchan->free_list); 166 spin_unlock_irqrestore(&atchan->lock, flags); 167 } 168 } 169 170 /** 171 * atc_desc_chain - build chain adding a descripor 172 * @first: address of first descripor of the chain 173 * @prev: address of previous descripor of the chain 174 * @desc: descriptor to queue 175 * 176 * Called from prep_* functions 177 */ 178 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 179 struct at_desc *desc) 180 { 181 if (!(*first)) { 182 *first = desc; 183 } else { 184 /* inform the HW lli about chaining */ 185 (*prev)->lli.dscr = desc->txd.phys; 186 /* insert the link descriptor to the LD ring */ 187 list_add_tail(&desc->desc_node, 188 &(*first)->tx_list); 189 } 190 *prev = desc; 191 } 192 193 /** 194 * atc_dostart - starts the DMA engine for real 195 * @atchan: the channel we want to start 196 * @first: first descriptor in the list we want to begin with 197 * 198 * Called with atchan->lock held and bh disabled 199 */ 200 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 201 { 202 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 203 204 /* ASSERT: channel is idle */ 205 if (atc_chan_is_enabled(atchan)) { 206 dev_err(chan2dev(&atchan->chan_common), 207 "BUG: Attempted to start non-idle channel\n"); 208 dev_err(chan2dev(&atchan->chan_common), 209 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 210 channel_readl(atchan, SADDR), 211 channel_readl(atchan, DADDR), 212 channel_readl(atchan, CTRLA), 213 channel_readl(atchan, CTRLB), 214 channel_readl(atchan, DSCR)); 215 216 /* The tasklet will hopefully advance the queue... */ 217 return; 218 } 219 220 vdbg_dump_regs(atchan); 221 222 channel_writel(atchan, SADDR, 0); 223 channel_writel(atchan, DADDR, 0); 224 channel_writel(atchan, CTRLA, 0); 225 channel_writel(atchan, CTRLB, 0); 226 channel_writel(atchan, DSCR, first->txd.phys); 227 dma_writel(atdma, CHER, atchan->mask); 228 229 vdbg_dump_regs(atchan); 230 } 231 232 /** 233 * atc_chain_complete - finish work for one transaction chain 234 * @atchan: channel we work on 235 * @desc: descriptor at the head of the chain we want do complete 236 * 237 * Called with atchan->lock held and bh disabled */ 238 static void 239 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 240 { 241 struct dma_async_tx_descriptor *txd = &desc->txd; 242 243 dev_vdbg(chan2dev(&atchan->chan_common), 244 "descriptor %u complete\n", txd->cookie); 245 246 /* mark the descriptor as complete for non cyclic cases only */ 247 if (!atc_chan_is_cyclic(atchan)) 248 dma_cookie_complete(txd); 249 250 /* move children to free_list */ 251 list_splice_init(&desc->tx_list, &atchan->free_list); 252 /* move myself to free_list */ 253 list_move(&desc->desc_node, &atchan->free_list); 254 255 /* unmap dma addresses (not on slave channels) */ 256 if (!atchan->chan_common.private) { 257 struct device *parent = chan2parent(&atchan->chan_common); 258 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 259 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 260 dma_unmap_single(parent, 261 desc->lli.daddr, 262 desc->len, DMA_FROM_DEVICE); 263 else 264 dma_unmap_page(parent, 265 desc->lli.daddr, 266 desc->len, DMA_FROM_DEVICE); 267 } 268 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 269 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 270 dma_unmap_single(parent, 271 desc->lli.saddr, 272 desc->len, DMA_TO_DEVICE); 273 else 274 dma_unmap_page(parent, 275 desc->lli.saddr, 276 desc->len, DMA_TO_DEVICE); 277 } 278 } 279 280 /* for cyclic transfers, 281 * no need to replay callback function while stopping */ 282 if (!atc_chan_is_cyclic(atchan)) { 283 dma_async_tx_callback callback = txd->callback; 284 void *param = txd->callback_param; 285 286 /* 287 * The API requires that no submissions are done from a 288 * callback, so we don't need to drop the lock here 289 */ 290 if (callback) 291 callback(param); 292 } 293 294 dma_run_dependencies(txd); 295 } 296 297 /** 298 * atc_complete_all - finish work for all transactions 299 * @atchan: channel to complete transactions for 300 * 301 * Eventually submit queued descriptors if any 302 * 303 * Assume channel is idle while calling this function 304 * Called with atchan->lock held and bh disabled 305 */ 306 static void atc_complete_all(struct at_dma_chan *atchan) 307 { 308 struct at_desc *desc, *_desc; 309 LIST_HEAD(list); 310 311 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 312 313 BUG_ON(atc_chan_is_enabled(atchan)); 314 315 /* 316 * Submit queued descriptors ASAP, i.e. before we go through 317 * the completed ones. 318 */ 319 if (!list_empty(&atchan->queue)) 320 atc_dostart(atchan, atc_first_queued(atchan)); 321 /* empty active_list now it is completed */ 322 list_splice_init(&atchan->active_list, &list); 323 /* empty queue list by moving descriptors (if any) to active_list */ 324 list_splice_init(&atchan->queue, &atchan->active_list); 325 326 list_for_each_entry_safe(desc, _desc, &list, desc_node) 327 atc_chain_complete(atchan, desc); 328 } 329 330 /** 331 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 332 * @atchan: channel to be cleaned up 333 * 334 * Called with atchan->lock held and bh disabled 335 */ 336 static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 337 { 338 struct at_desc *desc, *_desc; 339 struct at_desc *child; 340 341 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 342 343 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 344 if (!(desc->lli.ctrla & ATC_DONE)) 345 /* This one is currently in progress */ 346 return; 347 348 list_for_each_entry(child, &desc->tx_list, desc_node) 349 if (!(child->lli.ctrla & ATC_DONE)) 350 /* Currently in progress */ 351 return; 352 353 /* 354 * No descriptors so far seem to be in progress, i.e. 355 * this chain must be done. 356 */ 357 atc_chain_complete(atchan, desc); 358 } 359 } 360 361 /** 362 * atc_advance_work - at the end of a transaction, move forward 363 * @atchan: channel where the transaction ended 364 * 365 * Called with atchan->lock held and bh disabled 366 */ 367 static void atc_advance_work(struct at_dma_chan *atchan) 368 { 369 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 370 371 if (list_empty(&atchan->active_list) || 372 list_is_singular(&atchan->active_list)) { 373 atc_complete_all(atchan); 374 } else { 375 atc_chain_complete(atchan, atc_first_active(atchan)); 376 /* advance work */ 377 atc_dostart(atchan, atc_first_active(atchan)); 378 } 379 } 380 381 382 /** 383 * atc_handle_error - handle errors reported by DMA controller 384 * @atchan: channel where error occurs 385 * 386 * Called with atchan->lock held and bh disabled 387 */ 388 static void atc_handle_error(struct at_dma_chan *atchan) 389 { 390 struct at_desc *bad_desc; 391 struct at_desc *child; 392 393 /* 394 * The descriptor currently at the head of the active list is 395 * broked. Since we don't have any way to report errors, we'll 396 * just have to scream loudly and try to carry on. 397 */ 398 bad_desc = atc_first_active(atchan); 399 list_del_init(&bad_desc->desc_node); 400 401 /* As we are stopped, take advantage to push queued descriptors 402 * in active_list */ 403 list_splice_init(&atchan->queue, atchan->active_list.prev); 404 405 /* Try to restart the controller */ 406 if (!list_empty(&atchan->active_list)) 407 atc_dostart(atchan, atc_first_active(atchan)); 408 409 /* 410 * KERN_CRITICAL may seem harsh, but since this only happens 411 * when someone submits a bad physical address in a 412 * descriptor, we should consider ourselves lucky that the 413 * controller flagged an error instead of scribbling over 414 * random memory locations. 415 */ 416 dev_crit(chan2dev(&atchan->chan_common), 417 "Bad descriptor submitted for DMA!\n"); 418 dev_crit(chan2dev(&atchan->chan_common), 419 " cookie: %d\n", bad_desc->txd.cookie); 420 atc_dump_lli(atchan, &bad_desc->lli); 421 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 422 atc_dump_lli(atchan, &child->lli); 423 424 /* Pretend the descriptor completed successfully */ 425 atc_chain_complete(atchan, bad_desc); 426 } 427 428 /** 429 * atc_handle_cyclic - at the end of a period, run callback function 430 * @atchan: channel used for cyclic operations 431 * 432 * Called with atchan->lock held and bh disabled 433 */ 434 static void atc_handle_cyclic(struct at_dma_chan *atchan) 435 { 436 struct at_desc *first = atc_first_active(atchan); 437 struct dma_async_tx_descriptor *txd = &first->txd; 438 dma_async_tx_callback callback = txd->callback; 439 void *param = txd->callback_param; 440 441 dev_vdbg(chan2dev(&atchan->chan_common), 442 "new cyclic period llp 0x%08x\n", 443 channel_readl(atchan, DSCR)); 444 445 if (callback) 446 callback(param); 447 } 448 449 /*-- IRQ & Tasklet ---------------------------------------------------*/ 450 451 static void atc_tasklet(unsigned long data) 452 { 453 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 454 unsigned long flags; 455 456 spin_lock_irqsave(&atchan->lock, flags); 457 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 458 atc_handle_error(atchan); 459 else if (atc_chan_is_cyclic(atchan)) 460 atc_handle_cyclic(atchan); 461 else 462 atc_advance_work(atchan); 463 464 spin_unlock_irqrestore(&atchan->lock, flags); 465 } 466 467 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 468 { 469 struct at_dma *atdma = (struct at_dma *)dev_id; 470 struct at_dma_chan *atchan; 471 int i; 472 u32 status, pending, imr; 473 int ret = IRQ_NONE; 474 475 do { 476 imr = dma_readl(atdma, EBCIMR); 477 status = dma_readl(atdma, EBCISR); 478 pending = status & imr; 479 480 if (!pending) 481 break; 482 483 dev_vdbg(atdma->dma_common.dev, 484 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 485 status, imr, pending); 486 487 for (i = 0; i < atdma->dma_common.chancnt; i++) { 488 atchan = &atdma->chan[i]; 489 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 490 if (pending & AT_DMA_ERR(i)) { 491 /* Disable channel on AHB error */ 492 dma_writel(atdma, CHDR, 493 AT_DMA_RES(i) | atchan->mask); 494 /* Give information to tasklet */ 495 set_bit(ATC_IS_ERROR, &atchan->status); 496 } 497 tasklet_schedule(&atchan->tasklet); 498 ret = IRQ_HANDLED; 499 } 500 } 501 502 } while (pending); 503 504 return ret; 505 } 506 507 508 /*-- DMA Engine API --------------------------------------------------*/ 509 510 /** 511 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 512 * @desc: descriptor at the head of the transaction chain 513 * 514 * Queue chain if DMA engine is working already 515 * 516 * Cookie increment and adding to active_list or queue must be atomic 517 */ 518 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 519 { 520 struct at_desc *desc = txd_to_at_desc(tx); 521 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 522 dma_cookie_t cookie; 523 unsigned long flags; 524 525 spin_lock_irqsave(&atchan->lock, flags); 526 cookie = dma_cookie_assign(tx); 527 528 if (list_empty(&atchan->active_list)) { 529 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 530 desc->txd.cookie); 531 atc_dostart(atchan, desc); 532 list_add_tail(&desc->desc_node, &atchan->active_list); 533 } else { 534 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 535 desc->txd.cookie); 536 list_add_tail(&desc->desc_node, &atchan->queue); 537 } 538 539 spin_unlock_irqrestore(&atchan->lock, flags); 540 541 return cookie; 542 } 543 544 /** 545 * atc_prep_dma_memcpy - prepare a memcpy operation 546 * @chan: the channel to prepare operation on 547 * @dest: operation virtual destination address 548 * @src: operation virtual source address 549 * @len: operation length 550 * @flags: tx descriptor status flags 551 */ 552 static struct dma_async_tx_descriptor * 553 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 554 size_t len, unsigned long flags) 555 { 556 struct at_dma_chan *atchan = to_at_dma_chan(chan); 557 struct at_desc *desc = NULL; 558 struct at_desc *first = NULL; 559 struct at_desc *prev = NULL; 560 size_t xfer_count; 561 size_t offset; 562 unsigned int src_width; 563 unsigned int dst_width; 564 u32 ctrla; 565 u32 ctrlb; 566 567 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 568 dest, src, len, flags); 569 570 if (unlikely(!len)) { 571 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 572 return NULL; 573 } 574 575 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 576 | ATC_SRC_ADDR_MODE_INCR 577 | ATC_DST_ADDR_MODE_INCR 578 | ATC_FC_MEM2MEM; 579 580 /* 581 * We can be a lot more clever here, but this should take care 582 * of the most common optimization. 583 */ 584 if (!((src | dest | len) & 3)) { 585 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 586 src_width = dst_width = 2; 587 } else if (!((src | dest | len) & 1)) { 588 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 589 src_width = dst_width = 1; 590 } else { 591 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 592 src_width = dst_width = 0; 593 } 594 595 for (offset = 0; offset < len; offset += xfer_count << src_width) { 596 xfer_count = min_t(size_t, (len - offset) >> src_width, 597 ATC_BTSIZE_MAX); 598 599 desc = atc_desc_get(atchan); 600 if (!desc) 601 goto err_desc_get; 602 603 desc->lli.saddr = src + offset; 604 desc->lli.daddr = dest + offset; 605 desc->lli.ctrla = ctrla | xfer_count; 606 desc->lli.ctrlb = ctrlb; 607 608 desc->txd.cookie = 0; 609 610 atc_desc_chain(&first, &prev, desc); 611 } 612 613 /* First descriptor of the chain embedds additional information */ 614 first->txd.cookie = -EBUSY; 615 first->len = len; 616 617 /* set end-of-link to the last link descriptor of list*/ 618 set_desc_eol(desc); 619 620 first->txd.flags = flags; /* client is in control of this ack */ 621 622 return &first->txd; 623 624 err_desc_get: 625 atc_desc_put(atchan, first); 626 return NULL; 627 } 628 629 630 /** 631 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 632 * @chan: DMA channel 633 * @sgl: scatterlist to transfer to/from 634 * @sg_len: number of entries in @scatterlist 635 * @direction: DMA direction 636 * @flags: tx descriptor status flags 637 * @context: transaction context (ignored) 638 */ 639 static struct dma_async_tx_descriptor * 640 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 641 unsigned int sg_len, enum dma_transfer_direction direction, 642 unsigned long flags, void *context) 643 { 644 struct at_dma_chan *atchan = to_at_dma_chan(chan); 645 struct at_dma_slave *atslave = chan->private; 646 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 647 struct at_desc *first = NULL; 648 struct at_desc *prev = NULL; 649 u32 ctrla; 650 u32 ctrlb; 651 dma_addr_t reg; 652 unsigned int reg_width; 653 unsigned int mem_width; 654 unsigned int i; 655 struct scatterlist *sg; 656 size_t total_len = 0; 657 658 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 659 sg_len, 660 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 661 flags); 662 663 if (unlikely(!atslave || !sg_len)) { 664 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 665 return NULL; 666 } 667 668 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 669 | ATC_DCSIZE(sconfig->dst_maxburst); 670 ctrlb = ATC_IEN; 671 672 switch (direction) { 673 case DMA_MEM_TO_DEV: 674 reg_width = convert_buswidth(sconfig->dst_addr_width); 675 ctrla |= ATC_DST_WIDTH(reg_width); 676 ctrlb |= ATC_DST_ADDR_MODE_FIXED 677 | ATC_SRC_ADDR_MODE_INCR 678 | ATC_FC_MEM2PER 679 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 680 reg = sconfig->dst_addr; 681 for_each_sg(sgl, sg, sg_len, i) { 682 struct at_desc *desc; 683 u32 len; 684 u32 mem; 685 686 desc = atc_desc_get(atchan); 687 if (!desc) 688 goto err_desc_get; 689 690 mem = sg_dma_address(sg); 691 len = sg_dma_len(sg); 692 mem_width = 2; 693 if (unlikely(mem & 3 || len & 3)) 694 mem_width = 0; 695 696 desc->lli.saddr = mem; 697 desc->lli.daddr = reg; 698 desc->lli.ctrla = ctrla 699 | ATC_SRC_WIDTH(mem_width) 700 | len >> mem_width; 701 desc->lli.ctrlb = ctrlb; 702 703 atc_desc_chain(&first, &prev, desc); 704 total_len += len; 705 } 706 break; 707 case DMA_DEV_TO_MEM: 708 reg_width = convert_buswidth(sconfig->src_addr_width); 709 ctrla |= ATC_SRC_WIDTH(reg_width); 710 ctrlb |= ATC_DST_ADDR_MODE_INCR 711 | ATC_SRC_ADDR_MODE_FIXED 712 | ATC_FC_PER2MEM 713 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 714 715 reg = sconfig->src_addr; 716 for_each_sg(sgl, sg, sg_len, i) { 717 struct at_desc *desc; 718 u32 len; 719 u32 mem; 720 721 desc = atc_desc_get(atchan); 722 if (!desc) 723 goto err_desc_get; 724 725 mem = sg_dma_address(sg); 726 len = sg_dma_len(sg); 727 mem_width = 2; 728 if (unlikely(mem & 3 || len & 3)) 729 mem_width = 0; 730 731 desc->lli.saddr = reg; 732 desc->lli.daddr = mem; 733 desc->lli.ctrla = ctrla 734 | ATC_DST_WIDTH(mem_width) 735 | len >> reg_width; 736 desc->lli.ctrlb = ctrlb; 737 738 atc_desc_chain(&first, &prev, desc); 739 total_len += len; 740 } 741 break; 742 default: 743 return NULL; 744 } 745 746 /* set end-of-link to the last link descriptor of list*/ 747 set_desc_eol(prev); 748 749 /* First descriptor of the chain embedds additional information */ 750 first->txd.cookie = -EBUSY; 751 first->len = total_len; 752 753 /* first link descriptor of list is responsible of flags */ 754 first->txd.flags = flags; /* client is in control of this ack */ 755 756 return &first->txd; 757 758 err_desc_get: 759 dev_err(chan2dev(chan), "not enough descriptors available\n"); 760 atc_desc_put(atchan, first); 761 return NULL; 762 } 763 764 /** 765 * atc_dma_cyclic_check_values 766 * Check for too big/unaligned periods and unaligned DMA buffer 767 */ 768 static int 769 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 770 size_t period_len, enum dma_transfer_direction direction) 771 { 772 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 773 goto err_out; 774 if (unlikely(period_len & ((1 << reg_width) - 1))) 775 goto err_out; 776 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 777 goto err_out; 778 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) 779 goto err_out; 780 781 return 0; 782 783 err_out: 784 return -EINVAL; 785 } 786 787 /** 788 * atc_dma_cyclic_fill_desc - Fill one period decriptor 789 */ 790 static int 791 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 792 unsigned int period_index, dma_addr_t buf_addr, 793 unsigned int reg_width, size_t period_len, 794 enum dma_transfer_direction direction) 795 { 796 struct at_dma_chan *atchan = to_at_dma_chan(chan); 797 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 798 u32 ctrla; 799 800 /* prepare common CRTLA value */ 801 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 802 | ATC_DCSIZE(sconfig->dst_maxburst) 803 | ATC_DST_WIDTH(reg_width) 804 | ATC_SRC_WIDTH(reg_width) 805 | period_len >> reg_width; 806 807 switch (direction) { 808 case DMA_MEM_TO_DEV: 809 desc->lli.saddr = buf_addr + (period_len * period_index); 810 desc->lli.daddr = sconfig->dst_addr; 811 desc->lli.ctrla = ctrla; 812 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 813 | ATC_SRC_ADDR_MODE_INCR 814 | ATC_FC_MEM2PER 815 | ATC_SIF(AT_DMA_MEM_IF) 816 | ATC_DIF(AT_DMA_PER_IF); 817 break; 818 819 case DMA_DEV_TO_MEM: 820 desc->lli.saddr = sconfig->src_addr; 821 desc->lli.daddr = buf_addr + (period_len * period_index); 822 desc->lli.ctrla = ctrla; 823 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 824 | ATC_SRC_ADDR_MODE_FIXED 825 | ATC_FC_PER2MEM 826 | ATC_SIF(AT_DMA_PER_IF) 827 | ATC_DIF(AT_DMA_MEM_IF); 828 break; 829 830 default: 831 return -EINVAL; 832 } 833 834 return 0; 835 } 836 837 /** 838 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 839 * @chan: the DMA channel to prepare 840 * @buf_addr: physical DMA address where the buffer starts 841 * @buf_len: total number of bytes for the entire buffer 842 * @period_len: number of bytes for each period 843 * @direction: transfer direction, to or from device 844 * @context: transfer context (ignored) 845 */ 846 static struct dma_async_tx_descriptor * 847 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 848 size_t period_len, enum dma_transfer_direction direction, 849 void *context) 850 { 851 struct at_dma_chan *atchan = to_at_dma_chan(chan); 852 struct at_dma_slave *atslave = chan->private; 853 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 854 struct at_desc *first = NULL; 855 struct at_desc *prev = NULL; 856 unsigned long was_cyclic; 857 unsigned int reg_width; 858 unsigned int periods = buf_len / period_len; 859 unsigned int i; 860 861 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 862 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 863 buf_addr, 864 periods, buf_len, period_len); 865 866 if (unlikely(!atslave || !buf_len || !period_len)) { 867 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 868 return NULL; 869 } 870 871 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 872 if (was_cyclic) { 873 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 874 return NULL; 875 } 876 877 if (sconfig->direction == DMA_MEM_TO_DEV) 878 reg_width = convert_buswidth(sconfig->dst_addr_width); 879 else 880 reg_width = convert_buswidth(sconfig->src_addr_width); 881 882 /* Check for too big/unaligned periods and unaligned DMA buffer */ 883 if (atc_dma_cyclic_check_values(reg_width, buf_addr, 884 period_len, direction)) 885 goto err_out; 886 887 /* build cyclic linked list */ 888 for (i = 0; i < periods; i++) { 889 struct at_desc *desc; 890 891 desc = atc_desc_get(atchan); 892 if (!desc) 893 goto err_desc_get; 894 895 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 896 reg_width, period_len, direction)) 897 goto err_desc_get; 898 899 atc_desc_chain(&first, &prev, desc); 900 } 901 902 /* lets make a cyclic list */ 903 prev->lli.dscr = first->txd.phys; 904 905 /* First descriptor of the chain embedds additional information */ 906 first->txd.cookie = -EBUSY; 907 first->len = buf_len; 908 909 return &first->txd; 910 911 err_desc_get: 912 dev_err(chan2dev(chan), "not enough descriptors available\n"); 913 atc_desc_put(atchan, first); 914 err_out: 915 clear_bit(ATC_IS_CYCLIC, &atchan->status); 916 return NULL; 917 } 918 919 static int set_runtime_config(struct dma_chan *chan, 920 struct dma_slave_config *sconfig) 921 { 922 struct at_dma_chan *atchan = to_at_dma_chan(chan); 923 924 /* Check if it is chan is configured for slave transfers */ 925 if (!chan->private) 926 return -EINVAL; 927 928 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 929 930 convert_burst(&atchan->dma_sconfig.src_maxburst); 931 convert_burst(&atchan->dma_sconfig.dst_maxburst); 932 933 return 0; 934 } 935 936 937 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 938 unsigned long arg) 939 { 940 struct at_dma_chan *atchan = to_at_dma_chan(chan); 941 struct at_dma *atdma = to_at_dma(chan->device); 942 int chan_id = atchan->chan_common.chan_id; 943 unsigned long flags; 944 945 LIST_HEAD(list); 946 947 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 948 949 if (cmd == DMA_PAUSE) { 950 spin_lock_irqsave(&atchan->lock, flags); 951 952 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 953 set_bit(ATC_IS_PAUSED, &atchan->status); 954 955 spin_unlock_irqrestore(&atchan->lock, flags); 956 } else if (cmd == DMA_RESUME) { 957 if (!atc_chan_is_paused(atchan)) 958 return 0; 959 960 spin_lock_irqsave(&atchan->lock, flags); 961 962 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 963 clear_bit(ATC_IS_PAUSED, &atchan->status); 964 965 spin_unlock_irqrestore(&atchan->lock, flags); 966 } else if (cmd == DMA_TERMINATE_ALL) { 967 struct at_desc *desc, *_desc; 968 /* 969 * This is only called when something went wrong elsewhere, so 970 * we don't really care about the data. Just disable the 971 * channel. We still have to poll the channel enable bit due 972 * to AHB/HSB limitations. 973 */ 974 spin_lock_irqsave(&atchan->lock, flags); 975 976 /* disabling channel: must also remove suspend state */ 977 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 978 979 /* confirm that this channel is disabled */ 980 while (dma_readl(atdma, CHSR) & atchan->mask) 981 cpu_relax(); 982 983 /* active_list entries will end up before queued entries */ 984 list_splice_init(&atchan->queue, &list); 985 list_splice_init(&atchan->active_list, &list); 986 987 /* Flush all pending and queued descriptors */ 988 list_for_each_entry_safe(desc, _desc, &list, desc_node) 989 atc_chain_complete(atchan, desc); 990 991 clear_bit(ATC_IS_PAUSED, &atchan->status); 992 /* if channel dedicated to cyclic operations, free it */ 993 clear_bit(ATC_IS_CYCLIC, &atchan->status); 994 995 spin_unlock_irqrestore(&atchan->lock, flags); 996 } else if (cmd == DMA_SLAVE_CONFIG) { 997 return set_runtime_config(chan, (struct dma_slave_config *)arg); 998 } else { 999 return -ENXIO; 1000 } 1001 1002 return 0; 1003 } 1004 1005 /** 1006 * atc_tx_status - poll for transaction completion 1007 * @chan: DMA channel 1008 * @cookie: transaction identifier to check status of 1009 * @txstate: if not %NULL updated with transaction state 1010 * 1011 * If @txstate is passed in, upon return it reflect the driver 1012 * internal state and can be used with dma_async_is_complete() to check 1013 * the status of multiple cookies without re-checking hardware state. 1014 */ 1015 static enum dma_status 1016 atc_tx_status(struct dma_chan *chan, 1017 dma_cookie_t cookie, 1018 struct dma_tx_state *txstate) 1019 { 1020 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1021 dma_cookie_t last_used; 1022 dma_cookie_t last_complete; 1023 unsigned long flags; 1024 enum dma_status ret; 1025 1026 spin_lock_irqsave(&atchan->lock, flags); 1027 1028 ret = dma_cookie_status(chan, cookie, txstate); 1029 if (ret != DMA_SUCCESS) { 1030 atc_cleanup_descriptors(atchan); 1031 1032 ret = dma_cookie_status(chan, cookie, txstate); 1033 } 1034 1035 last_complete = chan->completed_cookie; 1036 last_used = chan->cookie; 1037 1038 spin_unlock_irqrestore(&atchan->lock, flags); 1039 1040 if (ret != DMA_SUCCESS) 1041 dma_set_residue(txstate, atc_first_active(atchan)->len); 1042 1043 if (atc_chan_is_paused(atchan)) 1044 ret = DMA_PAUSED; 1045 1046 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1047 ret, cookie, last_complete ? last_complete : 0, 1048 last_used ? last_used : 0); 1049 1050 return ret; 1051 } 1052 1053 /** 1054 * atc_issue_pending - try to finish work 1055 * @chan: target DMA channel 1056 */ 1057 static void atc_issue_pending(struct dma_chan *chan) 1058 { 1059 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1060 unsigned long flags; 1061 1062 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1063 1064 /* Not needed for cyclic transfers */ 1065 if (atc_chan_is_cyclic(atchan)) 1066 return; 1067 1068 spin_lock_irqsave(&atchan->lock, flags); 1069 if (!atc_chan_is_enabled(atchan)) { 1070 atc_advance_work(atchan); 1071 } 1072 spin_unlock_irqrestore(&atchan->lock, flags); 1073 } 1074 1075 /** 1076 * atc_alloc_chan_resources - allocate resources for DMA channel 1077 * @chan: allocate descriptor resources for this channel 1078 * @client: current client requesting the channel be ready for requests 1079 * 1080 * return - the number of allocated descriptors 1081 */ 1082 static int atc_alloc_chan_resources(struct dma_chan *chan) 1083 { 1084 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1085 struct at_dma *atdma = to_at_dma(chan->device); 1086 struct at_desc *desc; 1087 struct at_dma_slave *atslave; 1088 unsigned long flags; 1089 int i; 1090 u32 cfg; 1091 LIST_HEAD(tmp_list); 1092 1093 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1094 1095 /* ASSERT: channel is idle */ 1096 if (atc_chan_is_enabled(atchan)) { 1097 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1098 return -EIO; 1099 } 1100 1101 cfg = ATC_DEFAULT_CFG; 1102 1103 atslave = chan->private; 1104 if (atslave) { 1105 /* 1106 * We need controller-specific data to set up slave 1107 * transfers. 1108 */ 1109 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1110 1111 /* if cfg configuration specified take it instad of default */ 1112 if (atslave->cfg) 1113 cfg = atslave->cfg; 1114 } 1115 1116 /* have we already been set up? 1117 * reconfigure channel but no need to reallocate descriptors */ 1118 if (!list_empty(&atchan->free_list)) 1119 return atchan->descs_allocated; 1120 1121 /* Allocate initial pool of descriptors */ 1122 for (i = 0; i < init_nr_desc_per_channel; i++) { 1123 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1124 if (!desc) { 1125 dev_err(atdma->dma_common.dev, 1126 "Only %d initial descriptors\n", i); 1127 break; 1128 } 1129 list_add_tail(&desc->desc_node, &tmp_list); 1130 } 1131 1132 spin_lock_irqsave(&atchan->lock, flags); 1133 atchan->descs_allocated = i; 1134 list_splice(&tmp_list, &atchan->free_list); 1135 dma_cookie_init(chan); 1136 spin_unlock_irqrestore(&atchan->lock, flags); 1137 1138 /* channel parameters */ 1139 channel_writel(atchan, CFG, cfg); 1140 1141 dev_dbg(chan2dev(chan), 1142 "alloc_chan_resources: allocated %d descriptors\n", 1143 atchan->descs_allocated); 1144 1145 return atchan->descs_allocated; 1146 } 1147 1148 /** 1149 * atc_free_chan_resources - free all channel resources 1150 * @chan: DMA channel 1151 */ 1152 static void atc_free_chan_resources(struct dma_chan *chan) 1153 { 1154 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1155 struct at_dma *atdma = to_at_dma(chan->device); 1156 struct at_desc *desc, *_desc; 1157 LIST_HEAD(list); 1158 1159 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1160 atchan->descs_allocated); 1161 1162 /* ASSERT: channel is idle */ 1163 BUG_ON(!list_empty(&atchan->active_list)); 1164 BUG_ON(!list_empty(&atchan->queue)); 1165 BUG_ON(atc_chan_is_enabled(atchan)); 1166 1167 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1168 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1169 list_del(&desc->desc_node); 1170 /* free link descriptor */ 1171 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1172 } 1173 list_splice_init(&atchan->free_list, &list); 1174 atchan->descs_allocated = 0; 1175 atchan->status = 0; 1176 1177 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1178 } 1179 1180 1181 /*-- Module Management -----------------------------------------------*/ 1182 1183 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1184 static struct at_dma_platform_data at91sam9rl_config = { 1185 .nr_channels = 2, 1186 }; 1187 static struct at_dma_platform_data at91sam9g45_config = { 1188 .nr_channels = 8, 1189 }; 1190 1191 #if defined(CONFIG_OF) 1192 static const struct of_device_id atmel_dma_dt_ids[] = { 1193 { 1194 .compatible = "atmel,at91sam9rl-dma", 1195 .data = &at91sam9rl_config, 1196 }, { 1197 .compatible = "atmel,at91sam9g45-dma", 1198 .data = &at91sam9g45_config, 1199 }, { 1200 /* sentinel */ 1201 } 1202 }; 1203 1204 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1205 #endif 1206 1207 static const struct platform_device_id atdma_devtypes[] = { 1208 { 1209 .name = "at91sam9rl_dma", 1210 .driver_data = (unsigned long) &at91sam9rl_config, 1211 }, { 1212 .name = "at91sam9g45_dma", 1213 .driver_data = (unsigned long) &at91sam9g45_config, 1214 }, { 1215 /* sentinel */ 1216 } 1217 }; 1218 1219 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( 1220 struct platform_device *pdev) 1221 { 1222 if (pdev->dev.of_node) { 1223 const struct of_device_id *match; 1224 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1225 if (match == NULL) 1226 return NULL; 1227 return match->data; 1228 } 1229 return (struct at_dma_platform_data *) 1230 platform_get_device_id(pdev)->driver_data; 1231 } 1232 1233 /** 1234 * at_dma_off - disable DMA controller 1235 * @atdma: the Atmel HDAMC device 1236 */ 1237 static void at_dma_off(struct at_dma *atdma) 1238 { 1239 dma_writel(atdma, EN, 0); 1240 1241 /* disable all interrupts */ 1242 dma_writel(atdma, EBCIDR, -1L); 1243 1244 /* confirm that all channels are disabled */ 1245 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1246 cpu_relax(); 1247 } 1248 1249 static int __init at_dma_probe(struct platform_device *pdev) 1250 { 1251 struct resource *io; 1252 struct at_dma *atdma; 1253 size_t size; 1254 int irq; 1255 int err; 1256 int i; 1257 const struct at_dma_platform_data *plat_dat; 1258 1259 /* setup platform data for each SoC */ 1260 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1261 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1262 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1263 1264 /* get DMA parameters from controller type */ 1265 plat_dat = at_dma_get_driver_data(pdev); 1266 if (!plat_dat) 1267 return -ENODEV; 1268 1269 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1270 if (!io) 1271 return -EINVAL; 1272 1273 irq = platform_get_irq(pdev, 0); 1274 if (irq < 0) 1275 return irq; 1276 1277 size = sizeof(struct at_dma); 1278 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1279 atdma = kzalloc(size, GFP_KERNEL); 1280 if (!atdma) 1281 return -ENOMEM; 1282 1283 /* discover transaction capabilities */ 1284 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1285 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1286 1287 size = resource_size(io); 1288 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1289 err = -EBUSY; 1290 goto err_kfree; 1291 } 1292 1293 atdma->regs = ioremap(io->start, size); 1294 if (!atdma->regs) { 1295 err = -ENOMEM; 1296 goto err_release_r; 1297 } 1298 1299 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1300 if (IS_ERR(atdma->clk)) { 1301 err = PTR_ERR(atdma->clk); 1302 goto err_clk; 1303 } 1304 clk_enable(atdma->clk); 1305 1306 /* force dma off, just in case */ 1307 at_dma_off(atdma); 1308 1309 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1310 if (err) 1311 goto err_irq; 1312 1313 platform_set_drvdata(pdev, atdma); 1314 1315 /* create a pool of consistent memory blocks for hardware descriptors */ 1316 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1317 &pdev->dev, sizeof(struct at_desc), 1318 4 /* word alignment */, 0); 1319 if (!atdma->dma_desc_pool) { 1320 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1321 err = -ENOMEM; 1322 goto err_pool_create; 1323 } 1324 1325 /* clear any pending interrupt */ 1326 while (dma_readl(atdma, EBCISR)) 1327 cpu_relax(); 1328 1329 /* initialize channels related values */ 1330 INIT_LIST_HEAD(&atdma->dma_common.channels); 1331 for (i = 0; i < plat_dat->nr_channels; i++) { 1332 struct at_dma_chan *atchan = &atdma->chan[i]; 1333 1334 atchan->chan_common.device = &atdma->dma_common; 1335 dma_cookie_init(&atchan->chan_common); 1336 list_add_tail(&atchan->chan_common.device_node, 1337 &atdma->dma_common.channels); 1338 1339 atchan->ch_regs = atdma->regs + ch_regs(i); 1340 spin_lock_init(&atchan->lock); 1341 atchan->mask = 1 << i; 1342 1343 INIT_LIST_HEAD(&atchan->active_list); 1344 INIT_LIST_HEAD(&atchan->queue); 1345 INIT_LIST_HEAD(&atchan->free_list); 1346 1347 tasklet_init(&atchan->tasklet, atc_tasklet, 1348 (unsigned long)atchan); 1349 atc_enable_chan_irq(atdma, i); 1350 } 1351 1352 /* set base routines */ 1353 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1354 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1355 atdma->dma_common.device_tx_status = atc_tx_status; 1356 atdma->dma_common.device_issue_pending = atc_issue_pending; 1357 atdma->dma_common.dev = &pdev->dev; 1358 1359 /* set prep routines based on capability */ 1360 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1361 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1362 1363 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1364 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1365 /* controller can do slave DMA: can trigger cyclic transfers */ 1366 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1367 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1368 atdma->dma_common.device_control = atc_control; 1369 } 1370 1371 dma_writel(atdma, EN, AT_DMA_ENABLE); 1372 1373 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1374 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1375 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1376 plat_dat->nr_channels); 1377 1378 dma_async_device_register(&atdma->dma_common); 1379 1380 return 0; 1381 1382 err_pool_create: 1383 platform_set_drvdata(pdev, NULL); 1384 free_irq(platform_get_irq(pdev, 0), atdma); 1385 err_irq: 1386 clk_disable(atdma->clk); 1387 clk_put(atdma->clk); 1388 err_clk: 1389 iounmap(atdma->regs); 1390 atdma->regs = NULL; 1391 err_release_r: 1392 release_mem_region(io->start, size); 1393 err_kfree: 1394 kfree(atdma); 1395 return err; 1396 } 1397 1398 static int __exit at_dma_remove(struct platform_device *pdev) 1399 { 1400 struct at_dma *atdma = platform_get_drvdata(pdev); 1401 struct dma_chan *chan, *_chan; 1402 struct resource *io; 1403 1404 at_dma_off(atdma); 1405 dma_async_device_unregister(&atdma->dma_common); 1406 1407 dma_pool_destroy(atdma->dma_desc_pool); 1408 platform_set_drvdata(pdev, NULL); 1409 free_irq(platform_get_irq(pdev, 0), atdma); 1410 1411 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1412 device_node) { 1413 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1414 1415 /* Disable interrupts */ 1416 atc_disable_chan_irq(atdma, chan->chan_id); 1417 tasklet_disable(&atchan->tasklet); 1418 1419 tasklet_kill(&atchan->tasklet); 1420 list_del(&chan->device_node); 1421 } 1422 1423 clk_disable(atdma->clk); 1424 clk_put(atdma->clk); 1425 1426 iounmap(atdma->regs); 1427 atdma->regs = NULL; 1428 1429 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1430 release_mem_region(io->start, resource_size(io)); 1431 1432 kfree(atdma); 1433 1434 return 0; 1435 } 1436 1437 static void at_dma_shutdown(struct platform_device *pdev) 1438 { 1439 struct at_dma *atdma = platform_get_drvdata(pdev); 1440 1441 at_dma_off(platform_get_drvdata(pdev)); 1442 clk_disable(atdma->clk); 1443 } 1444 1445 static int at_dma_prepare(struct device *dev) 1446 { 1447 struct platform_device *pdev = to_platform_device(dev); 1448 struct at_dma *atdma = platform_get_drvdata(pdev); 1449 struct dma_chan *chan, *_chan; 1450 1451 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1452 device_node) { 1453 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1454 /* wait for transaction completion (except in cyclic case) */ 1455 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1456 return -EAGAIN; 1457 } 1458 return 0; 1459 } 1460 1461 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1462 { 1463 struct dma_chan *chan = &atchan->chan_common; 1464 1465 /* Channel should be paused by user 1466 * do it anyway even if it is not done already */ 1467 if (!atc_chan_is_paused(atchan)) { 1468 dev_warn(chan2dev(chan), 1469 "cyclic channel not paused, should be done by channel user\n"); 1470 atc_control(chan, DMA_PAUSE, 0); 1471 } 1472 1473 /* now preserve additional data for cyclic operations */ 1474 /* next descriptor address in the cyclic list */ 1475 atchan->save_dscr = channel_readl(atchan, DSCR); 1476 1477 vdbg_dump_regs(atchan); 1478 } 1479 1480 static int at_dma_suspend_noirq(struct device *dev) 1481 { 1482 struct platform_device *pdev = to_platform_device(dev); 1483 struct at_dma *atdma = platform_get_drvdata(pdev); 1484 struct dma_chan *chan, *_chan; 1485 1486 /* preserve data */ 1487 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1488 device_node) { 1489 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1490 1491 if (atc_chan_is_cyclic(atchan)) 1492 atc_suspend_cyclic(atchan); 1493 atchan->save_cfg = channel_readl(atchan, CFG); 1494 } 1495 atdma->save_imr = dma_readl(atdma, EBCIMR); 1496 1497 /* disable DMA controller */ 1498 at_dma_off(atdma); 1499 clk_disable(atdma->clk); 1500 return 0; 1501 } 1502 1503 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1504 { 1505 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1506 1507 /* restore channel status for cyclic descriptors list: 1508 * next descriptor in the cyclic list at the time of suspend */ 1509 channel_writel(atchan, SADDR, 0); 1510 channel_writel(atchan, DADDR, 0); 1511 channel_writel(atchan, CTRLA, 0); 1512 channel_writel(atchan, CTRLB, 0); 1513 channel_writel(atchan, DSCR, atchan->save_dscr); 1514 dma_writel(atdma, CHER, atchan->mask); 1515 1516 /* channel pause status should be removed by channel user 1517 * We cannot take the initiative to do it here */ 1518 1519 vdbg_dump_regs(atchan); 1520 } 1521 1522 static int at_dma_resume_noirq(struct device *dev) 1523 { 1524 struct platform_device *pdev = to_platform_device(dev); 1525 struct at_dma *atdma = platform_get_drvdata(pdev); 1526 struct dma_chan *chan, *_chan; 1527 1528 /* bring back DMA controller */ 1529 clk_enable(atdma->clk); 1530 dma_writel(atdma, EN, AT_DMA_ENABLE); 1531 1532 /* clear any pending interrupt */ 1533 while (dma_readl(atdma, EBCISR)) 1534 cpu_relax(); 1535 1536 /* restore saved data */ 1537 dma_writel(atdma, EBCIER, atdma->save_imr); 1538 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1539 device_node) { 1540 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1541 1542 channel_writel(atchan, CFG, atchan->save_cfg); 1543 if (atc_chan_is_cyclic(atchan)) 1544 atc_resume_cyclic(atchan); 1545 } 1546 return 0; 1547 } 1548 1549 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1550 .prepare = at_dma_prepare, 1551 .suspend_noirq = at_dma_suspend_noirq, 1552 .resume_noirq = at_dma_resume_noirq, 1553 }; 1554 1555 static struct platform_driver at_dma_driver = { 1556 .remove = __exit_p(at_dma_remove), 1557 .shutdown = at_dma_shutdown, 1558 .id_table = atdma_devtypes, 1559 .driver = { 1560 .name = "at_hdmac", 1561 .pm = &at_dma_dev_pm_ops, 1562 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1563 }, 1564 }; 1565 1566 static int __init at_dma_init(void) 1567 { 1568 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1569 } 1570 subsys_initcall(at_dma_init); 1571 1572 static void __exit at_dma_exit(void) 1573 { 1574 platform_driver_unregister(&at_dma_driver); 1575 } 1576 module_exit(at_dma_exit); 1577 1578 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1579 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1580 MODULE_LICENSE("GPL"); 1581 MODULE_ALIAS("platform:at_hdmac"); 1582