1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller, 13 * 14 * The driver has currently been tested with the Atmel AT91SAM9RL 15 * and AT91SAM9G45 series. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/dmapool.h> 22 #include <linux/interrupt.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 29 #include "at_hdmac_regs.h" 30 #include "dmaengine.h" 31 32 /* 33 * Glossary 34 * -------- 35 * 36 * at_hdmac : Name of the ATmel AHB DMA Controller 37 * at_dma_ / atdma : ATmel DMA controller entity related 38 * atc_ / atchan : ATmel DMA Channel entity related 39 */ 40 41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42 #define ATC_DEFAULT_CTRLA (0) 43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 44 |ATC_DIF(AT_DMA_MEM_IF)) 45 46 /* 47 * Initial number of descriptors to allocate for each channel. This could 48 * be increased during dma usage. 49 */ 50 static unsigned int init_nr_desc_per_channel = 64; 51 module_param(init_nr_desc_per_channel, uint, 0644); 52 MODULE_PARM_DESC(init_nr_desc_per_channel, 53 "initial descriptors per channel (default: 64)"); 54 55 56 /* prototypes */ 57 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 58 59 60 /*----------------------------------------------------------------------*/ 61 62 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 63 { 64 return list_first_entry(&atchan->active_list, 65 struct at_desc, desc_node); 66 } 67 68 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 69 { 70 return list_first_entry(&atchan->queue, 71 struct at_desc, desc_node); 72 } 73 74 /** 75 * atc_alloc_descriptor - allocate and return an initialized descriptor 76 * @chan: the channel to allocate descriptors for 77 * @gfp_flags: GFP allocation flags 78 * 79 * Note: The ack-bit is positioned in the descriptor flag at creation time 80 * to make initial allocation more convenient. This bit will be cleared 81 * and control will be given to client at usage time (during 82 * preparation functions). 83 */ 84 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 85 gfp_t gfp_flags) 86 { 87 struct at_desc *desc = NULL; 88 struct at_dma *atdma = to_at_dma(chan->device); 89 dma_addr_t phys; 90 91 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 92 if (desc) { 93 memset(desc, 0, sizeof(struct at_desc)); 94 INIT_LIST_HEAD(&desc->tx_list); 95 dma_async_tx_descriptor_init(&desc->txd, chan); 96 /* txd.flags will be overwritten in prep functions */ 97 desc->txd.flags = DMA_CTRL_ACK; 98 desc->txd.tx_submit = atc_tx_submit; 99 desc->txd.phys = phys; 100 } 101 102 return desc; 103 } 104 105 /** 106 * atc_desc_get - get an unused descriptor from free_list 107 * @atchan: channel we want a new descriptor for 108 */ 109 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 110 { 111 struct at_desc *desc, *_desc; 112 struct at_desc *ret = NULL; 113 unsigned long flags; 114 unsigned int i = 0; 115 LIST_HEAD(tmp_list); 116 117 spin_lock_irqsave(&atchan->lock, flags); 118 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 119 i++; 120 if (async_tx_test_ack(&desc->txd)) { 121 list_del(&desc->desc_node); 122 ret = desc; 123 break; 124 } 125 dev_dbg(chan2dev(&atchan->chan_common), 126 "desc %p not ACKed\n", desc); 127 } 128 spin_unlock_irqrestore(&atchan->lock, flags); 129 dev_vdbg(chan2dev(&atchan->chan_common), 130 "scanned %u descriptors on freelist\n", i); 131 132 /* no more descriptor available in initial pool: create one more */ 133 if (!ret) { 134 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 135 if (ret) { 136 spin_lock_irqsave(&atchan->lock, flags); 137 atchan->descs_allocated++; 138 spin_unlock_irqrestore(&atchan->lock, flags); 139 } else { 140 dev_err(chan2dev(&atchan->chan_common), 141 "not enough descriptors available\n"); 142 } 143 } 144 145 return ret; 146 } 147 148 /** 149 * atc_desc_put - move a descriptor, including any children, to the free list 150 * @atchan: channel we work on 151 * @desc: descriptor, at the head of a chain, to move to free list 152 */ 153 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 154 { 155 if (desc) { 156 struct at_desc *child; 157 unsigned long flags; 158 159 spin_lock_irqsave(&atchan->lock, flags); 160 list_for_each_entry(child, &desc->tx_list, desc_node) 161 dev_vdbg(chan2dev(&atchan->chan_common), 162 "moving child desc %p to freelist\n", 163 child); 164 list_splice_init(&desc->tx_list, &atchan->free_list); 165 dev_vdbg(chan2dev(&atchan->chan_common), 166 "moving desc %p to freelist\n", desc); 167 list_add(&desc->desc_node, &atchan->free_list); 168 spin_unlock_irqrestore(&atchan->lock, flags); 169 } 170 } 171 172 /** 173 * atc_desc_chain - build chain adding a descripor 174 * @first: address of first descripor of the chain 175 * @prev: address of previous descripor of the chain 176 * @desc: descriptor to queue 177 * 178 * Called from prep_* functions 179 */ 180 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 181 struct at_desc *desc) 182 { 183 if (!(*first)) { 184 *first = desc; 185 } else { 186 /* inform the HW lli about chaining */ 187 (*prev)->lli.dscr = desc->txd.phys; 188 /* insert the link descriptor to the LD ring */ 189 list_add_tail(&desc->desc_node, 190 &(*first)->tx_list); 191 } 192 *prev = desc; 193 } 194 195 /** 196 * atc_dostart - starts the DMA engine for real 197 * @atchan: the channel we want to start 198 * @first: first descriptor in the list we want to begin with 199 * 200 * Called with atchan->lock held and bh disabled 201 */ 202 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 203 { 204 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 205 206 /* ASSERT: channel is idle */ 207 if (atc_chan_is_enabled(atchan)) { 208 dev_err(chan2dev(&atchan->chan_common), 209 "BUG: Attempted to start non-idle channel\n"); 210 dev_err(chan2dev(&atchan->chan_common), 211 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 212 channel_readl(atchan, SADDR), 213 channel_readl(atchan, DADDR), 214 channel_readl(atchan, CTRLA), 215 channel_readl(atchan, CTRLB), 216 channel_readl(atchan, DSCR)); 217 218 /* The tasklet will hopefully advance the queue... */ 219 return; 220 } 221 222 vdbg_dump_regs(atchan); 223 224 channel_writel(atchan, SADDR, 0); 225 channel_writel(atchan, DADDR, 0); 226 channel_writel(atchan, CTRLA, 0); 227 channel_writel(atchan, CTRLB, 0); 228 channel_writel(atchan, DSCR, first->txd.phys); 229 dma_writel(atdma, CHER, atchan->mask); 230 231 vdbg_dump_regs(atchan); 232 } 233 234 /** 235 * atc_chain_complete - finish work for one transaction chain 236 * @atchan: channel we work on 237 * @desc: descriptor at the head of the chain we want do complete 238 * 239 * Called with atchan->lock held and bh disabled */ 240 static void 241 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 242 { 243 struct dma_async_tx_descriptor *txd = &desc->txd; 244 245 dev_vdbg(chan2dev(&atchan->chan_common), 246 "descriptor %u complete\n", txd->cookie); 247 248 /* mark the descriptor as complete for non cyclic cases only */ 249 if (!atc_chan_is_cyclic(atchan)) 250 dma_cookie_complete(txd); 251 252 /* move children to free_list */ 253 list_splice_init(&desc->tx_list, &atchan->free_list); 254 /* move myself to free_list */ 255 list_move(&desc->desc_node, &atchan->free_list); 256 257 /* unmap dma addresses (not on slave channels) */ 258 if (!atchan->chan_common.private) { 259 struct device *parent = chan2parent(&atchan->chan_common); 260 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 261 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 262 dma_unmap_single(parent, 263 desc->lli.daddr, 264 desc->len, DMA_FROM_DEVICE); 265 else 266 dma_unmap_page(parent, 267 desc->lli.daddr, 268 desc->len, DMA_FROM_DEVICE); 269 } 270 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 271 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 272 dma_unmap_single(parent, 273 desc->lli.saddr, 274 desc->len, DMA_TO_DEVICE); 275 else 276 dma_unmap_page(parent, 277 desc->lli.saddr, 278 desc->len, DMA_TO_DEVICE); 279 } 280 } 281 282 /* for cyclic transfers, 283 * no need to replay callback function while stopping */ 284 if (!atc_chan_is_cyclic(atchan)) { 285 dma_async_tx_callback callback = txd->callback; 286 void *param = txd->callback_param; 287 288 /* 289 * The API requires that no submissions are done from a 290 * callback, so we don't need to drop the lock here 291 */ 292 if (callback) 293 callback(param); 294 } 295 296 dma_run_dependencies(txd); 297 } 298 299 /** 300 * atc_complete_all - finish work for all transactions 301 * @atchan: channel to complete transactions for 302 * 303 * Eventually submit queued descriptors if any 304 * 305 * Assume channel is idle while calling this function 306 * Called with atchan->lock held and bh disabled 307 */ 308 static void atc_complete_all(struct at_dma_chan *atchan) 309 { 310 struct at_desc *desc, *_desc; 311 LIST_HEAD(list); 312 313 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 314 315 BUG_ON(atc_chan_is_enabled(atchan)); 316 317 /* 318 * Submit queued descriptors ASAP, i.e. before we go through 319 * the completed ones. 320 */ 321 if (!list_empty(&atchan->queue)) 322 atc_dostart(atchan, atc_first_queued(atchan)); 323 /* empty active_list now it is completed */ 324 list_splice_init(&atchan->active_list, &list); 325 /* empty queue list by moving descriptors (if any) to active_list */ 326 list_splice_init(&atchan->queue, &atchan->active_list); 327 328 list_for_each_entry_safe(desc, _desc, &list, desc_node) 329 atc_chain_complete(atchan, desc); 330 } 331 332 /** 333 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 334 * @atchan: channel to be cleaned up 335 * 336 * Called with atchan->lock held and bh disabled 337 */ 338 static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 339 { 340 struct at_desc *desc, *_desc; 341 struct at_desc *child; 342 343 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 344 345 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 346 if (!(desc->lli.ctrla & ATC_DONE)) 347 /* This one is currently in progress */ 348 return; 349 350 list_for_each_entry(child, &desc->tx_list, desc_node) 351 if (!(child->lli.ctrla & ATC_DONE)) 352 /* Currently in progress */ 353 return; 354 355 /* 356 * No descriptors so far seem to be in progress, i.e. 357 * this chain must be done. 358 */ 359 atc_chain_complete(atchan, desc); 360 } 361 } 362 363 /** 364 * atc_advance_work - at the end of a transaction, move forward 365 * @atchan: channel where the transaction ended 366 * 367 * Called with atchan->lock held and bh disabled 368 */ 369 static void atc_advance_work(struct at_dma_chan *atchan) 370 { 371 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 372 373 if (list_empty(&atchan->active_list) || 374 list_is_singular(&atchan->active_list)) { 375 atc_complete_all(atchan); 376 } else { 377 atc_chain_complete(atchan, atc_first_active(atchan)); 378 /* advance work */ 379 atc_dostart(atchan, atc_first_active(atchan)); 380 } 381 } 382 383 384 /** 385 * atc_handle_error - handle errors reported by DMA controller 386 * @atchan: channel where error occurs 387 * 388 * Called with atchan->lock held and bh disabled 389 */ 390 static void atc_handle_error(struct at_dma_chan *atchan) 391 { 392 struct at_desc *bad_desc; 393 struct at_desc *child; 394 395 /* 396 * The descriptor currently at the head of the active list is 397 * broked. Since we don't have any way to report errors, we'll 398 * just have to scream loudly and try to carry on. 399 */ 400 bad_desc = atc_first_active(atchan); 401 list_del_init(&bad_desc->desc_node); 402 403 /* As we are stopped, take advantage to push queued descriptors 404 * in active_list */ 405 list_splice_init(&atchan->queue, atchan->active_list.prev); 406 407 /* Try to restart the controller */ 408 if (!list_empty(&atchan->active_list)) 409 atc_dostart(atchan, atc_first_active(atchan)); 410 411 /* 412 * KERN_CRITICAL may seem harsh, but since this only happens 413 * when someone submits a bad physical address in a 414 * descriptor, we should consider ourselves lucky that the 415 * controller flagged an error instead of scribbling over 416 * random memory locations. 417 */ 418 dev_crit(chan2dev(&atchan->chan_common), 419 "Bad descriptor submitted for DMA!\n"); 420 dev_crit(chan2dev(&atchan->chan_common), 421 " cookie: %d\n", bad_desc->txd.cookie); 422 atc_dump_lli(atchan, &bad_desc->lli); 423 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 424 atc_dump_lli(atchan, &child->lli); 425 426 /* Pretend the descriptor completed successfully */ 427 atc_chain_complete(atchan, bad_desc); 428 } 429 430 /** 431 * atc_handle_cyclic - at the end of a period, run callback function 432 * @atchan: channel used for cyclic operations 433 * 434 * Called with atchan->lock held and bh disabled 435 */ 436 static void atc_handle_cyclic(struct at_dma_chan *atchan) 437 { 438 struct at_desc *first = atc_first_active(atchan); 439 struct dma_async_tx_descriptor *txd = &first->txd; 440 dma_async_tx_callback callback = txd->callback; 441 void *param = txd->callback_param; 442 443 dev_vdbg(chan2dev(&atchan->chan_common), 444 "new cyclic period llp 0x%08x\n", 445 channel_readl(atchan, DSCR)); 446 447 if (callback) 448 callback(param); 449 } 450 451 /*-- IRQ & Tasklet ---------------------------------------------------*/ 452 453 static void atc_tasklet(unsigned long data) 454 { 455 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 456 unsigned long flags; 457 458 spin_lock_irqsave(&atchan->lock, flags); 459 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 460 atc_handle_error(atchan); 461 else if (atc_chan_is_cyclic(atchan)) 462 atc_handle_cyclic(atchan); 463 else 464 atc_advance_work(atchan); 465 466 spin_unlock_irqrestore(&atchan->lock, flags); 467 } 468 469 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 470 { 471 struct at_dma *atdma = (struct at_dma *)dev_id; 472 struct at_dma_chan *atchan; 473 int i; 474 u32 status, pending, imr; 475 int ret = IRQ_NONE; 476 477 do { 478 imr = dma_readl(atdma, EBCIMR); 479 status = dma_readl(atdma, EBCISR); 480 pending = status & imr; 481 482 if (!pending) 483 break; 484 485 dev_vdbg(atdma->dma_common.dev, 486 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 487 status, imr, pending); 488 489 for (i = 0; i < atdma->dma_common.chancnt; i++) { 490 atchan = &atdma->chan[i]; 491 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 492 if (pending & AT_DMA_ERR(i)) { 493 /* Disable channel on AHB error */ 494 dma_writel(atdma, CHDR, 495 AT_DMA_RES(i) | atchan->mask); 496 /* Give information to tasklet */ 497 set_bit(ATC_IS_ERROR, &atchan->status); 498 } 499 tasklet_schedule(&atchan->tasklet); 500 ret = IRQ_HANDLED; 501 } 502 } 503 504 } while (pending); 505 506 return ret; 507 } 508 509 510 /*-- DMA Engine API --------------------------------------------------*/ 511 512 /** 513 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 514 * @desc: descriptor at the head of the transaction chain 515 * 516 * Queue chain if DMA engine is working already 517 * 518 * Cookie increment and adding to active_list or queue must be atomic 519 */ 520 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 521 { 522 struct at_desc *desc = txd_to_at_desc(tx); 523 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 524 dma_cookie_t cookie; 525 unsigned long flags; 526 527 spin_lock_irqsave(&atchan->lock, flags); 528 cookie = dma_cookie_assign(tx); 529 530 if (list_empty(&atchan->active_list)) { 531 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 532 desc->txd.cookie); 533 atc_dostart(atchan, desc); 534 list_add_tail(&desc->desc_node, &atchan->active_list); 535 } else { 536 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 537 desc->txd.cookie); 538 list_add_tail(&desc->desc_node, &atchan->queue); 539 } 540 541 spin_unlock_irqrestore(&atchan->lock, flags); 542 543 return cookie; 544 } 545 546 /** 547 * atc_prep_dma_memcpy - prepare a memcpy operation 548 * @chan: the channel to prepare operation on 549 * @dest: operation virtual destination address 550 * @src: operation virtual source address 551 * @len: operation length 552 * @flags: tx descriptor status flags 553 */ 554 static struct dma_async_tx_descriptor * 555 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 556 size_t len, unsigned long flags) 557 { 558 struct at_dma_chan *atchan = to_at_dma_chan(chan); 559 struct at_desc *desc = NULL; 560 struct at_desc *first = NULL; 561 struct at_desc *prev = NULL; 562 size_t xfer_count; 563 size_t offset; 564 unsigned int src_width; 565 unsigned int dst_width; 566 u32 ctrla; 567 u32 ctrlb; 568 569 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 570 dest, src, len, flags); 571 572 if (unlikely(!len)) { 573 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 574 return NULL; 575 } 576 577 ctrla = ATC_DEFAULT_CTRLA; 578 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 579 | ATC_SRC_ADDR_MODE_INCR 580 | ATC_DST_ADDR_MODE_INCR 581 | ATC_FC_MEM2MEM; 582 583 /* 584 * We can be a lot more clever here, but this should take care 585 * of the most common optimization. 586 */ 587 if (!((src | dest | len) & 3)) { 588 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 589 src_width = dst_width = 2; 590 } else if (!((src | dest | len) & 1)) { 591 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 592 src_width = dst_width = 1; 593 } else { 594 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 595 src_width = dst_width = 0; 596 } 597 598 for (offset = 0; offset < len; offset += xfer_count << src_width) { 599 xfer_count = min_t(size_t, (len - offset) >> src_width, 600 ATC_BTSIZE_MAX); 601 602 desc = atc_desc_get(atchan); 603 if (!desc) 604 goto err_desc_get; 605 606 desc->lli.saddr = src + offset; 607 desc->lli.daddr = dest + offset; 608 desc->lli.ctrla = ctrla | xfer_count; 609 desc->lli.ctrlb = ctrlb; 610 611 desc->txd.cookie = 0; 612 613 atc_desc_chain(&first, &prev, desc); 614 } 615 616 /* First descriptor of the chain embedds additional information */ 617 first->txd.cookie = -EBUSY; 618 first->len = len; 619 620 /* set end-of-link to the last link descriptor of list*/ 621 set_desc_eol(desc); 622 623 first->txd.flags = flags; /* client is in control of this ack */ 624 625 return &first->txd; 626 627 err_desc_get: 628 atc_desc_put(atchan, first); 629 return NULL; 630 } 631 632 633 /** 634 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 635 * @chan: DMA channel 636 * @sgl: scatterlist to transfer to/from 637 * @sg_len: number of entries in @scatterlist 638 * @direction: DMA direction 639 * @flags: tx descriptor status flags 640 * @context: transaction context (ignored) 641 */ 642 static struct dma_async_tx_descriptor * 643 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 644 unsigned int sg_len, enum dma_transfer_direction direction, 645 unsigned long flags, void *context) 646 { 647 struct at_dma_chan *atchan = to_at_dma_chan(chan); 648 struct at_dma_slave *atslave = chan->private; 649 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 650 struct at_desc *first = NULL; 651 struct at_desc *prev = NULL; 652 u32 ctrla; 653 u32 ctrlb; 654 dma_addr_t reg; 655 unsigned int reg_width; 656 unsigned int mem_width; 657 unsigned int i; 658 struct scatterlist *sg; 659 size_t total_len = 0; 660 661 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 662 sg_len, 663 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 664 flags); 665 666 if (unlikely(!atslave || !sg_len)) { 667 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 668 return NULL; 669 } 670 671 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 672 ctrlb = ATC_IEN; 673 674 switch (direction) { 675 case DMA_MEM_TO_DEV: 676 reg_width = convert_buswidth(sconfig->dst_addr_width); 677 ctrla |= ATC_DST_WIDTH(reg_width); 678 ctrlb |= ATC_DST_ADDR_MODE_FIXED 679 | ATC_SRC_ADDR_MODE_INCR 680 | ATC_FC_MEM2PER 681 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 682 reg = sconfig->dst_addr; 683 for_each_sg(sgl, sg, sg_len, i) { 684 struct at_desc *desc; 685 u32 len; 686 u32 mem; 687 688 desc = atc_desc_get(atchan); 689 if (!desc) 690 goto err_desc_get; 691 692 mem = sg_dma_address(sg); 693 len = sg_dma_len(sg); 694 mem_width = 2; 695 if (unlikely(mem & 3 || len & 3)) 696 mem_width = 0; 697 698 desc->lli.saddr = mem; 699 desc->lli.daddr = reg; 700 desc->lli.ctrla = ctrla 701 | ATC_SRC_WIDTH(mem_width) 702 | len >> mem_width; 703 desc->lli.ctrlb = ctrlb; 704 705 atc_desc_chain(&first, &prev, desc); 706 total_len += len; 707 } 708 break; 709 case DMA_DEV_TO_MEM: 710 reg_width = convert_buswidth(sconfig->src_addr_width); 711 ctrla |= ATC_SRC_WIDTH(reg_width); 712 ctrlb |= ATC_DST_ADDR_MODE_INCR 713 | ATC_SRC_ADDR_MODE_FIXED 714 | ATC_FC_PER2MEM 715 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 716 717 reg = sconfig->src_addr; 718 for_each_sg(sgl, sg, sg_len, i) { 719 struct at_desc *desc; 720 u32 len; 721 u32 mem; 722 723 desc = atc_desc_get(atchan); 724 if (!desc) 725 goto err_desc_get; 726 727 mem = sg_dma_address(sg); 728 len = sg_dma_len(sg); 729 mem_width = 2; 730 if (unlikely(mem & 3 || len & 3)) 731 mem_width = 0; 732 733 desc->lli.saddr = reg; 734 desc->lli.daddr = mem; 735 desc->lli.ctrla = ctrla 736 | ATC_DST_WIDTH(mem_width) 737 | len >> reg_width; 738 desc->lli.ctrlb = ctrlb; 739 740 atc_desc_chain(&first, &prev, desc); 741 total_len += len; 742 } 743 break; 744 default: 745 return NULL; 746 } 747 748 /* set end-of-link to the last link descriptor of list*/ 749 set_desc_eol(prev); 750 751 /* First descriptor of the chain embedds additional information */ 752 first->txd.cookie = -EBUSY; 753 first->len = total_len; 754 755 /* first link descriptor of list is responsible of flags */ 756 first->txd.flags = flags; /* client is in control of this ack */ 757 758 return &first->txd; 759 760 err_desc_get: 761 dev_err(chan2dev(chan), "not enough descriptors available\n"); 762 atc_desc_put(atchan, first); 763 return NULL; 764 } 765 766 /** 767 * atc_dma_cyclic_check_values 768 * Check for too big/unaligned periods and unaligned DMA buffer 769 */ 770 static int 771 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 772 size_t period_len, enum dma_transfer_direction direction) 773 { 774 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 775 goto err_out; 776 if (unlikely(period_len & ((1 << reg_width) - 1))) 777 goto err_out; 778 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 779 goto err_out; 780 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) 781 goto err_out; 782 783 return 0; 784 785 err_out: 786 return -EINVAL; 787 } 788 789 /** 790 * atc_dma_cyclic_fill_desc - Fill one period decriptor 791 */ 792 static int 793 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 794 unsigned int period_index, dma_addr_t buf_addr, 795 unsigned int reg_width, size_t period_len, 796 enum dma_transfer_direction direction) 797 { 798 struct at_dma_chan *atchan = to_at_dma_chan(chan); 799 struct at_dma_slave *atslave = chan->private; 800 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 801 u32 ctrla; 802 803 /* prepare common CRTLA value */ 804 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla 805 | ATC_DST_WIDTH(reg_width) 806 | ATC_SRC_WIDTH(reg_width) 807 | period_len >> reg_width; 808 809 switch (direction) { 810 case DMA_MEM_TO_DEV: 811 desc->lli.saddr = buf_addr + (period_len * period_index); 812 desc->lli.daddr = sconfig->dst_addr; 813 desc->lli.ctrla = ctrla; 814 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 815 | ATC_SRC_ADDR_MODE_INCR 816 | ATC_FC_MEM2PER 817 | ATC_SIF(AT_DMA_MEM_IF) 818 | ATC_DIF(AT_DMA_PER_IF); 819 break; 820 821 case DMA_DEV_TO_MEM: 822 desc->lli.saddr = sconfig->src_addr; 823 desc->lli.daddr = buf_addr + (period_len * period_index); 824 desc->lli.ctrla = ctrla; 825 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 826 | ATC_SRC_ADDR_MODE_FIXED 827 | ATC_FC_PER2MEM 828 | ATC_SIF(AT_DMA_PER_IF) 829 | ATC_DIF(AT_DMA_MEM_IF); 830 break; 831 832 default: 833 return -EINVAL; 834 } 835 836 return 0; 837 } 838 839 /** 840 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 841 * @chan: the DMA channel to prepare 842 * @buf_addr: physical DMA address where the buffer starts 843 * @buf_len: total number of bytes for the entire buffer 844 * @period_len: number of bytes for each period 845 * @direction: transfer direction, to or from device 846 * @context: transfer context (ignored) 847 */ 848 static struct dma_async_tx_descriptor * 849 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 850 size_t period_len, enum dma_transfer_direction direction, 851 void *context) 852 { 853 struct at_dma_chan *atchan = to_at_dma_chan(chan); 854 struct at_dma_slave *atslave = chan->private; 855 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 856 struct at_desc *first = NULL; 857 struct at_desc *prev = NULL; 858 unsigned long was_cyclic; 859 unsigned int reg_width; 860 unsigned int periods = buf_len / period_len; 861 unsigned int i; 862 863 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 864 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 865 buf_addr, 866 periods, buf_len, period_len); 867 868 if (unlikely(!atslave || !buf_len || !period_len)) { 869 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 870 return NULL; 871 } 872 873 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 874 if (was_cyclic) { 875 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 876 return NULL; 877 } 878 879 if (sconfig->direction == DMA_MEM_TO_DEV) 880 reg_width = convert_buswidth(sconfig->dst_addr_width); 881 else 882 reg_width = convert_buswidth(sconfig->src_addr_width); 883 884 /* Check for too big/unaligned periods and unaligned DMA buffer */ 885 if (atc_dma_cyclic_check_values(reg_width, buf_addr, 886 period_len, direction)) 887 goto err_out; 888 889 /* build cyclic linked list */ 890 for (i = 0; i < periods; i++) { 891 struct at_desc *desc; 892 893 desc = atc_desc_get(atchan); 894 if (!desc) 895 goto err_desc_get; 896 897 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 898 reg_width, period_len, direction)) 899 goto err_desc_get; 900 901 atc_desc_chain(&first, &prev, desc); 902 } 903 904 /* lets make a cyclic list */ 905 prev->lli.dscr = first->txd.phys; 906 907 /* First descriptor of the chain embedds additional information */ 908 first->txd.cookie = -EBUSY; 909 first->len = buf_len; 910 911 return &first->txd; 912 913 err_desc_get: 914 dev_err(chan2dev(chan), "not enough descriptors available\n"); 915 atc_desc_put(atchan, first); 916 err_out: 917 clear_bit(ATC_IS_CYCLIC, &atchan->status); 918 return NULL; 919 } 920 921 static int set_runtime_config(struct dma_chan *chan, 922 struct dma_slave_config *sconfig) 923 { 924 struct at_dma_chan *atchan = to_at_dma_chan(chan); 925 926 /* Check if it is chan is configured for slave transfers */ 927 if (!chan->private) 928 return -EINVAL; 929 930 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 931 932 convert_burst(&atchan->dma_sconfig.src_maxburst); 933 convert_burst(&atchan->dma_sconfig.dst_maxburst); 934 935 return 0; 936 } 937 938 939 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 940 unsigned long arg) 941 { 942 struct at_dma_chan *atchan = to_at_dma_chan(chan); 943 struct at_dma *atdma = to_at_dma(chan->device); 944 int chan_id = atchan->chan_common.chan_id; 945 unsigned long flags; 946 947 LIST_HEAD(list); 948 949 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 950 951 if (cmd == DMA_PAUSE) { 952 spin_lock_irqsave(&atchan->lock, flags); 953 954 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 955 set_bit(ATC_IS_PAUSED, &atchan->status); 956 957 spin_unlock_irqrestore(&atchan->lock, flags); 958 } else if (cmd == DMA_RESUME) { 959 if (!atc_chan_is_paused(atchan)) 960 return 0; 961 962 spin_lock_irqsave(&atchan->lock, flags); 963 964 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 965 clear_bit(ATC_IS_PAUSED, &atchan->status); 966 967 spin_unlock_irqrestore(&atchan->lock, flags); 968 } else if (cmd == DMA_TERMINATE_ALL) { 969 struct at_desc *desc, *_desc; 970 /* 971 * This is only called when something went wrong elsewhere, so 972 * we don't really care about the data. Just disable the 973 * channel. We still have to poll the channel enable bit due 974 * to AHB/HSB limitations. 975 */ 976 spin_lock_irqsave(&atchan->lock, flags); 977 978 /* disabling channel: must also remove suspend state */ 979 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 980 981 /* confirm that this channel is disabled */ 982 while (dma_readl(atdma, CHSR) & atchan->mask) 983 cpu_relax(); 984 985 /* active_list entries will end up before queued entries */ 986 list_splice_init(&atchan->queue, &list); 987 list_splice_init(&atchan->active_list, &list); 988 989 /* Flush all pending and queued descriptors */ 990 list_for_each_entry_safe(desc, _desc, &list, desc_node) 991 atc_chain_complete(atchan, desc); 992 993 clear_bit(ATC_IS_PAUSED, &atchan->status); 994 /* if channel dedicated to cyclic operations, free it */ 995 clear_bit(ATC_IS_CYCLIC, &atchan->status); 996 997 spin_unlock_irqrestore(&atchan->lock, flags); 998 } else if (cmd == DMA_SLAVE_CONFIG) { 999 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1000 } else { 1001 return -ENXIO; 1002 } 1003 1004 return 0; 1005 } 1006 1007 /** 1008 * atc_tx_status - poll for transaction completion 1009 * @chan: DMA channel 1010 * @cookie: transaction identifier to check status of 1011 * @txstate: if not %NULL updated with transaction state 1012 * 1013 * If @txstate is passed in, upon return it reflect the driver 1014 * internal state and can be used with dma_async_is_complete() to check 1015 * the status of multiple cookies without re-checking hardware state. 1016 */ 1017 static enum dma_status 1018 atc_tx_status(struct dma_chan *chan, 1019 dma_cookie_t cookie, 1020 struct dma_tx_state *txstate) 1021 { 1022 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1023 dma_cookie_t last_used; 1024 dma_cookie_t last_complete; 1025 unsigned long flags; 1026 enum dma_status ret; 1027 1028 spin_lock_irqsave(&atchan->lock, flags); 1029 1030 ret = dma_cookie_status(chan, cookie, txstate); 1031 if (ret != DMA_SUCCESS) { 1032 atc_cleanup_descriptors(atchan); 1033 1034 ret = dma_cookie_status(chan, cookie, txstate); 1035 } 1036 1037 last_complete = chan->completed_cookie; 1038 last_used = chan->cookie; 1039 1040 spin_unlock_irqrestore(&atchan->lock, flags); 1041 1042 if (ret != DMA_SUCCESS) 1043 dma_set_residue(txstate, atc_first_active(atchan)->len); 1044 1045 if (atc_chan_is_paused(atchan)) 1046 ret = DMA_PAUSED; 1047 1048 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1049 ret, cookie, last_complete ? last_complete : 0, 1050 last_used ? last_used : 0); 1051 1052 return ret; 1053 } 1054 1055 /** 1056 * atc_issue_pending - try to finish work 1057 * @chan: target DMA channel 1058 */ 1059 static void atc_issue_pending(struct dma_chan *chan) 1060 { 1061 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1062 unsigned long flags; 1063 1064 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1065 1066 /* Not needed for cyclic transfers */ 1067 if (atc_chan_is_cyclic(atchan)) 1068 return; 1069 1070 spin_lock_irqsave(&atchan->lock, flags); 1071 if (!atc_chan_is_enabled(atchan)) { 1072 atc_advance_work(atchan); 1073 } 1074 spin_unlock_irqrestore(&atchan->lock, flags); 1075 } 1076 1077 /** 1078 * atc_alloc_chan_resources - allocate resources for DMA channel 1079 * @chan: allocate descriptor resources for this channel 1080 * @client: current client requesting the channel be ready for requests 1081 * 1082 * return - the number of allocated descriptors 1083 */ 1084 static int atc_alloc_chan_resources(struct dma_chan *chan) 1085 { 1086 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1087 struct at_dma *atdma = to_at_dma(chan->device); 1088 struct at_desc *desc; 1089 struct at_dma_slave *atslave; 1090 unsigned long flags; 1091 int i; 1092 u32 cfg; 1093 LIST_HEAD(tmp_list); 1094 1095 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1096 1097 /* ASSERT: channel is idle */ 1098 if (atc_chan_is_enabled(atchan)) { 1099 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1100 return -EIO; 1101 } 1102 1103 cfg = ATC_DEFAULT_CFG; 1104 1105 atslave = chan->private; 1106 if (atslave) { 1107 /* 1108 * We need controller-specific data to set up slave 1109 * transfers. 1110 */ 1111 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1112 1113 /* if cfg configuration specified take it instad of default */ 1114 if (atslave->cfg) 1115 cfg = atslave->cfg; 1116 } 1117 1118 /* have we already been set up? 1119 * reconfigure channel but no need to reallocate descriptors */ 1120 if (!list_empty(&atchan->free_list)) 1121 return atchan->descs_allocated; 1122 1123 /* Allocate initial pool of descriptors */ 1124 for (i = 0; i < init_nr_desc_per_channel; i++) { 1125 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1126 if (!desc) { 1127 dev_err(atdma->dma_common.dev, 1128 "Only %d initial descriptors\n", i); 1129 break; 1130 } 1131 list_add_tail(&desc->desc_node, &tmp_list); 1132 } 1133 1134 spin_lock_irqsave(&atchan->lock, flags); 1135 atchan->descs_allocated = i; 1136 list_splice(&tmp_list, &atchan->free_list); 1137 dma_cookie_init(chan); 1138 spin_unlock_irqrestore(&atchan->lock, flags); 1139 1140 /* channel parameters */ 1141 channel_writel(atchan, CFG, cfg); 1142 1143 dev_dbg(chan2dev(chan), 1144 "alloc_chan_resources: allocated %d descriptors\n", 1145 atchan->descs_allocated); 1146 1147 return atchan->descs_allocated; 1148 } 1149 1150 /** 1151 * atc_free_chan_resources - free all channel resources 1152 * @chan: DMA channel 1153 */ 1154 static void atc_free_chan_resources(struct dma_chan *chan) 1155 { 1156 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1157 struct at_dma *atdma = to_at_dma(chan->device); 1158 struct at_desc *desc, *_desc; 1159 LIST_HEAD(list); 1160 1161 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1162 atchan->descs_allocated); 1163 1164 /* ASSERT: channel is idle */ 1165 BUG_ON(!list_empty(&atchan->active_list)); 1166 BUG_ON(!list_empty(&atchan->queue)); 1167 BUG_ON(atc_chan_is_enabled(atchan)); 1168 1169 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1170 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1171 list_del(&desc->desc_node); 1172 /* free link descriptor */ 1173 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1174 } 1175 list_splice_init(&atchan->free_list, &list); 1176 atchan->descs_allocated = 0; 1177 atchan->status = 0; 1178 1179 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1180 } 1181 1182 1183 /*-- Module Management -----------------------------------------------*/ 1184 1185 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1186 static struct at_dma_platform_data at91sam9rl_config = { 1187 .nr_channels = 2, 1188 }; 1189 static struct at_dma_platform_data at91sam9g45_config = { 1190 .nr_channels = 8, 1191 }; 1192 1193 #if defined(CONFIG_OF) 1194 static const struct of_device_id atmel_dma_dt_ids[] = { 1195 { 1196 .compatible = "atmel,at91sam9rl-dma", 1197 .data = &at91sam9rl_config, 1198 }, { 1199 .compatible = "atmel,at91sam9g45-dma", 1200 .data = &at91sam9g45_config, 1201 }, { 1202 /* sentinel */ 1203 } 1204 }; 1205 1206 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1207 #endif 1208 1209 static const struct platform_device_id atdma_devtypes[] = { 1210 { 1211 .name = "at91sam9rl_dma", 1212 .driver_data = (unsigned long) &at91sam9rl_config, 1213 }, { 1214 .name = "at91sam9g45_dma", 1215 .driver_data = (unsigned long) &at91sam9g45_config, 1216 }, { 1217 /* sentinel */ 1218 } 1219 }; 1220 1221 static inline struct at_dma_platform_data * __init at_dma_get_driver_data( 1222 struct platform_device *pdev) 1223 { 1224 if (pdev->dev.of_node) { 1225 const struct of_device_id *match; 1226 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1227 if (match == NULL) 1228 return NULL; 1229 return match->data; 1230 } 1231 return (struct at_dma_platform_data *) 1232 platform_get_device_id(pdev)->driver_data; 1233 } 1234 1235 /** 1236 * at_dma_off - disable DMA controller 1237 * @atdma: the Atmel HDAMC device 1238 */ 1239 static void at_dma_off(struct at_dma *atdma) 1240 { 1241 dma_writel(atdma, EN, 0); 1242 1243 /* disable all interrupts */ 1244 dma_writel(atdma, EBCIDR, -1L); 1245 1246 /* confirm that all channels are disabled */ 1247 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1248 cpu_relax(); 1249 } 1250 1251 static int __init at_dma_probe(struct platform_device *pdev) 1252 { 1253 struct resource *io; 1254 struct at_dma *atdma; 1255 size_t size; 1256 int irq; 1257 int err; 1258 int i; 1259 struct at_dma_platform_data *plat_dat; 1260 1261 /* setup platform data for each SoC */ 1262 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1263 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1264 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1265 1266 /* get DMA parameters from controller type */ 1267 plat_dat = at_dma_get_driver_data(pdev); 1268 if (!plat_dat) 1269 return -ENODEV; 1270 1271 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1272 if (!io) 1273 return -EINVAL; 1274 1275 irq = platform_get_irq(pdev, 0); 1276 if (irq < 0) 1277 return irq; 1278 1279 size = sizeof(struct at_dma); 1280 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1281 atdma = kzalloc(size, GFP_KERNEL); 1282 if (!atdma) 1283 return -ENOMEM; 1284 1285 /* discover transaction capabilities */ 1286 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1287 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1288 1289 size = resource_size(io); 1290 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1291 err = -EBUSY; 1292 goto err_kfree; 1293 } 1294 1295 atdma->regs = ioremap(io->start, size); 1296 if (!atdma->regs) { 1297 err = -ENOMEM; 1298 goto err_release_r; 1299 } 1300 1301 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1302 if (IS_ERR(atdma->clk)) { 1303 err = PTR_ERR(atdma->clk); 1304 goto err_clk; 1305 } 1306 clk_enable(atdma->clk); 1307 1308 /* force dma off, just in case */ 1309 at_dma_off(atdma); 1310 1311 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1312 if (err) 1313 goto err_irq; 1314 1315 platform_set_drvdata(pdev, atdma); 1316 1317 /* create a pool of consistent memory blocks for hardware descriptors */ 1318 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1319 &pdev->dev, sizeof(struct at_desc), 1320 4 /* word alignment */, 0); 1321 if (!atdma->dma_desc_pool) { 1322 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1323 err = -ENOMEM; 1324 goto err_pool_create; 1325 } 1326 1327 /* clear any pending interrupt */ 1328 while (dma_readl(atdma, EBCISR)) 1329 cpu_relax(); 1330 1331 /* initialize channels related values */ 1332 INIT_LIST_HEAD(&atdma->dma_common.channels); 1333 for (i = 0; i < plat_dat->nr_channels; i++) { 1334 struct at_dma_chan *atchan = &atdma->chan[i]; 1335 1336 atchan->chan_common.device = &atdma->dma_common; 1337 dma_cookie_init(&atchan->chan_common); 1338 list_add_tail(&atchan->chan_common.device_node, 1339 &atdma->dma_common.channels); 1340 1341 atchan->ch_regs = atdma->regs + ch_regs(i); 1342 spin_lock_init(&atchan->lock); 1343 atchan->mask = 1 << i; 1344 1345 INIT_LIST_HEAD(&atchan->active_list); 1346 INIT_LIST_HEAD(&atchan->queue); 1347 INIT_LIST_HEAD(&atchan->free_list); 1348 1349 tasklet_init(&atchan->tasklet, atc_tasklet, 1350 (unsigned long)atchan); 1351 atc_enable_chan_irq(atdma, i); 1352 } 1353 1354 /* set base routines */ 1355 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1356 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1357 atdma->dma_common.device_tx_status = atc_tx_status; 1358 atdma->dma_common.device_issue_pending = atc_issue_pending; 1359 atdma->dma_common.dev = &pdev->dev; 1360 1361 /* set prep routines based on capability */ 1362 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1363 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1364 1365 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1366 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1367 /* controller can do slave DMA: can trigger cyclic transfers */ 1368 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1369 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1370 atdma->dma_common.device_control = atc_control; 1371 } 1372 1373 dma_writel(atdma, EN, AT_DMA_ENABLE); 1374 1375 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1376 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1377 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1378 plat_dat->nr_channels); 1379 1380 dma_async_device_register(&atdma->dma_common); 1381 1382 return 0; 1383 1384 err_pool_create: 1385 platform_set_drvdata(pdev, NULL); 1386 free_irq(platform_get_irq(pdev, 0), atdma); 1387 err_irq: 1388 clk_disable(atdma->clk); 1389 clk_put(atdma->clk); 1390 err_clk: 1391 iounmap(atdma->regs); 1392 atdma->regs = NULL; 1393 err_release_r: 1394 release_mem_region(io->start, size); 1395 err_kfree: 1396 kfree(atdma); 1397 return err; 1398 } 1399 1400 static int __exit at_dma_remove(struct platform_device *pdev) 1401 { 1402 struct at_dma *atdma = platform_get_drvdata(pdev); 1403 struct dma_chan *chan, *_chan; 1404 struct resource *io; 1405 1406 at_dma_off(atdma); 1407 dma_async_device_unregister(&atdma->dma_common); 1408 1409 dma_pool_destroy(atdma->dma_desc_pool); 1410 platform_set_drvdata(pdev, NULL); 1411 free_irq(platform_get_irq(pdev, 0), atdma); 1412 1413 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1414 device_node) { 1415 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1416 1417 /* Disable interrupts */ 1418 atc_disable_chan_irq(atdma, chan->chan_id); 1419 tasklet_disable(&atchan->tasklet); 1420 1421 tasklet_kill(&atchan->tasklet); 1422 list_del(&chan->device_node); 1423 } 1424 1425 clk_disable(atdma->clk); 1426 clk_put(atdma->clk); 1427 1428 iounmap(atdma->regs); 1429 atdma->regs = NULL; 1430 1431 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1432 release_mem_region(io->start, resource_size(io)); 1433 1434 kfree(atdma); 1435 1436 return 0; 1437 } 1438 1439 static void at_dma_shutdown(struct platform_device *pdev) 1440 { 1441 struct at_dma *atdma = platform_get_drvdata(pdev); 1442 1443 at_dma_off(platform_get_drvdata(pdev)); 1444 clk_disable(atdma->clk); 1445 } 1446 1447 static int at_dma_prepare(struct device *dev) 1448 { 1449 struct platform_device *pdev = to_platform_device(dev); 1450 struct at_dma *atdma = platform_get_drvdata(pdev); 1451 struct dma_chan *chan, *_chan; 1452 1453 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1454 device_node) { 1455 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1456 /* wait for transaction completion (except in cyclic case) */ 1457 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1458 return -EAGAIN; 1459 } 1460 return 0; 1461 } 1462 1463 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1464 { 1465 struct dma_chan *chan = &atchan->chan_common; 1466 1467 /* Channel should be paused by user 1468 * do it anyway even if it is not done already */ 1469 if (!atc_chan_is_paused(atchan)) { 1470 dev_warn(chan2dev(chan), 1471 "cyclic channel not paused, should be done by channel user\n"); 1472 atc_control(chan, DMA_PAUSE, 0); 1473 } 1474 1475 /* now preserve additional data for cyclic operations */ 1476 /* next descriptor address in the cyclic list */ 1477 atchan->save_dscr = channel_readl(atchan, DSCR); 1478 1479 vdbg_dump_regs(atchan); 1480 } 1481 1482 static int at_dma_suspend_noirq(struct device *dev) 1483 { 1484 struct platform_device *pdev = to_platform_device(dev); 1485 struct at_dma *atdma = platform_get_drvdata(pdev); 1486 struct dma_chan *chan, *_chan; 1487 1488 /* preserve data */ 1489 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1490 device_node) { 1491 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1492 1493 if (atc_chan_is_cyclic(atchan)) 1494 atc_suspend_cyclic(atchan); 1495 atchan->save_cfg = channel_readl(atchan, CFG); 1496 } 1497 atdma->save_imr = dma_readl(atdma, EBCIMR); 1498 1499 /* disable DMA controller */ 1500 at_dma_off(atdma); 1501 clk_disable(atdma->clk); 1502 return 0; 1503 } 1504 1505 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1506 { 1507 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1508 1509 /* restore channel status for cyclic descriptors list: 1510 * next descriptor in the cyclic list at the time of suspend */ 1511 channel_writel(atchan, SADDR, 0); 1512 channel_writel(atchan, DADDR, 0); 1513 channel_writel(atchan, CTRLA, 0); 1514 channel_writel(atchan, CTRLB, 0); 1515 channel_writel(atchan, DSCR, atchan->save_dscr); 1516 dma_writel(atdma, CHER, atchan->mask); 1517 1518 /* channel pause status should be removed by channel user 1519 * We cannot take the initiative to do it here */ 1520 1521 vdbg_dump_regs(atchan); 1522 } 1523 1524 static int at_dma_resume_noirq(struct device *dev) 1525 { 1526 struct platform_device *pdev = to_platform_device(dev); 1527 struct at_dma *atdma = platform_get_drvdata(pdev); 1528 struct dma_chan *chan, *_chan; 1529 1530 /* bring back DMA controller */ 1531 clk_enable(atdma->clk); 1532 dma_writel(atdma, EN, AT_DMA_ENABLE); 1533 1534 /* clear any pending interrupt */ 1535 while (dma_readl(atdma, EBCISR)) 1536 cpu_relax(); 1537 1538 /* restore saved data */ 1539 dma_writel(atdma, EBCIER, atdma->save_imr); 1540 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1541 device_node) { 1542 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1543 1544 channel_writel(atchan, CFG, atchan->save_cfg); 1545 if (atc_chan_is_cyclic(atchan)) 1546 atc_resume_cyclic(atchan); 1547 } 1548 return 0; 1549 } 1550 1551 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1552 .prepare = at_dma_prepare, 1553 .suspend_noirq = at_dma_suspend_noirq, 1554 .resume_noirq = at_dma_resume_noirq, 1555 }; 1556 1557 static struct platform_driver at_dma_driver = { 1558 .remove = __exit_p(at_dma_remove), 1559 .shutdown = at_dma_shutdown, 1560 .id_table = atdma_devtypes, 1561 .driver = { 1562 .name = "at_hdmac", 1563 .pm = &at_dma_dev_pm_ops, 1564 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1565 }, 1566 }; 1567 1568 static int __init at_dma_init(void) 1569 { 1570 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1571 } 1572 subsys_initcall(at_dma_init); 1573 1574 static void __exit at_dma_exit(void) 1575 { 1576 platform_driver_unregister(&at_dma_driver); 1577 } 1578 module_exit(at_dma_exit); 1579 1580 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1581 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1582 MODULE_LICENSE("GPL"); 1583 MODULE_ALIAS("platform:at_hdmac"); 1584