1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. 13 * The only Atmel DMA Controller that is not covered by this driver is the one 14 * found on AT91SAM9263. 15 */ 16 17 #include <linux/clk.h> 18 #include <linux/dmaengine.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/dmapool.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/platform_device.h> 24 #include <linux/slab.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/of_dma.h> 28 29 #include "at_hdmac_regs.h" 30 #include "dmaengine.h" 31 32 /* 33 * Glossary 34 * -------- 35 * 36 * at_hdmac : Name of the ATmel AHB DMA Controller 37 * at_dma_ / atdma : ATmel DMA controller entity related 38 * atc_ / atchan : ATmel DMA Channel entity related 39 */ 40 41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43 |ATC_DIF(AT_DMA_MEM_IF)) 44 45 /* 46 * Initial number of descriptors to allocate for each channel. This could 47 * be increased during dma usage. 48 */ 49 static unsigned int init_nr_desc_per_channel = 64; 50 module_param(init_nr_desc_per_channel, uint, 0644); 51 MODULE_PARM_DESC(init_nr_desc_per_channel, 52 "initial descriptors per channel (default: 64)"); 53 54 55 /* prototypes */ 56 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 57 58 59 /*----------------------------------------------------------------------*/ 60 61 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 62 { 63 return list_first_entry(&atchan->active_list, 64 struct at_desc, desc_node); 65 } 66 67 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 68 { 69 return list_first_entry(&atchan->queue, 70 struct at_desc, desc_node); 71 } 72 73 /** 74 * atc_alloc_descriptor - allocate and return an initialized descriptor 75 * @chan: the channel to allocate descriptors for 76 * @gfp_flags: GFP allocation flags 77 * 78 * Note: The ack-bit is positioned in the descriptor flag at creation time 79 * to make initial allocation more convenient. This bit will be cleared 80 * and control will be given to client at usage time (during 81 * preparation functions). 82 */ 83 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 84 gfp_t gfp_flags) 85 { 86 struct at_desc *desc = NULL; 87 struct at_dma *atdma = to_at_dma(chan->device); 88 dma_addr_t phys; 89 90 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 91 if (desc) { 92 memset(desc, 0, sizeof(struct at_desc)); 93 INIT_LIST_HEAD(&desc->tx_list); 94 dma_async_tx_descriptor_init(&desc->txd, chan); 95 /* txd.flags will be overwritten in prep functions */ 96 desc->txd.flags = DMA_CTRL_ACK; 97 desc->txd.tx_submit = atc_tx_submit; 98 desc->txd.phys = phys; 99 } 100 101 return desc; 102 } 103 104 /** 105 * atc_desc_get - get an unused descriptor from free_list 106 * @atchan: channel we want a new descriptor for 107 */ 108 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 109 { 110 struct at_desc *desc, *_desc; 111 struct at_desc *ret = NULL; 112 unsigned long flags; 113 unsigned int i = 0; 114 LIST_HEAD(tmp_list); 115 116 spin_lock_irqsave(&atchan->lock, flags); 117 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 118 i++; 119 if (async_tx_test_ack(&desc->txd)) { 120 list_del(&desc->desc_node); 121 ret = desc; 122 break; 123 } 124 dev_dbg(chan2dev(&atchan->chan_common), 125 "desc %p not ACKed\n", desc); 126 } 127 spin_unlock_irqrestore(&atchan->lock, flags); 128 dev_vdbg(chan2dev(&atchan->chan_common), 129 "scanned %u descriptors on freelist\n", i); 130 131 /* no more descriptor available in initial pool: create one more */ 132 if (!ret) { 133 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 134 if (ret) { 135 spin_lock_irqsave(&atchan->lock, flags); 136 atchan->descs_allocated++; 137 spin_unlock_irqrestore(&atchan->lock, flags); 138 } else { 139 dev_err(chan2dev(&atchan->chan_common), 140 "not enough descriptors available\n"); 141 } 142 } 143 144 return ret; 145 } 146 147 /** 148 * atc_desc_put - move a descriptor, including any children, to the free list 149 * @atchan: channel we work on 150 * @desc: descriptor, at the head of a chain, to move to free list 151 */ 152 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 153 { 154 if (desc) { 155 struct at_desc *child; 156 unsigned long flags; 157 158 spin_lock_irqsave(&atchan->lock, flags); 159 list_for_each_entry(child, &desc->tx_list, desc_node) 160 dev_vdbg(chan2dev(&atchan->chan_common), 161 "moving child desc %p to freelist\n", 162 child); 163 list_splice_init(&desc->tx_list, &atchan->free_list); 164 dev_vdbg(chan2dev(&atchan->chan_common), 165 "moving desc %p to freelist\n", desc); 166 list_add(&desc->desc_node, &atchan->free_list); 167 spin_unlock_irqrestore(&atchan->lock, flags); 168 } 169 } 170 171 /** 172 * atc_desc_chain - build chain adding a descriptor 173 * @first: address of first descriptor of the chain 174 * @prev: address of previous descriptor of the chain 175 * @desc: descriptor to queue 176 * 177 * Called from prep_* functions 178 */ 179 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 180 struct at_desc *desc) 181 { 182 if (!(*first)) { 183 *first = desc; 184 } else { 185 /* inform the HW lli about chaining */ 186 (*prev)->lli.dscr = desc->txd.phys; 187 /* insert the link descriptor to the LD ring */ 188 list_add_tail(&desc->desc_node, 189 &(*first)->tx_list); 190 } 191 *prev = desc; 192 } 193 194 /** 195 * atc_dostart - starts the DMA engine for real 196 * @atchan: the channel we want to start 197 * @first: first descriptor in the list we want to begin with 198 * 199 * Called with atchan->lock held and bh disabled 200 */ 201 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 202 { 203 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 204 205 /* ASSERT: channel is idle */ 206 if (atc_chan_is_enabled(atchan)) { 207 dev_err(chan2dev(&atchan->chan_common), 208 "BUG: Attempted to start non-idle channel\n"); 209 dev_err(chan2dev(&atchan->chan_common), 210 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 211 channel_readl(atchan, SADDR), 212 channel_readl(atchan, DADDR), 213 channel_readl(atchan, CTRLA), 214 channel_readl(atchan, CTRLB), 215 channel_readl(atchan, DSCR)); 216 217 /* The tasklet will hopefully advance the queue... */ 218 return; 219 } 220 221 vdbg_dump_regs(atchan); 222 223 channel_writel(atchan, SADDR, 0); 224 channel_writel(atchan, DADDR, 0); 225 channel_writel(atchan, CTRLA, 0); 226 channel_writel(atchan, CTRLB, 0); 227 channel_writel(atchan, DSCR, first->txd.phys); 228 dma_writel(atdma, CHER, atchan->mask); 229 230 vdbg_dump_regs(atchan); 231 } 232 233 /** 234 * atc_chain_complete - finish work for one transaction chain 235 * @atchan: channel we work on 236 * @desc: descriptor at the head of the chain we want do complete 237 * 238 * Called with atchan->lock held and bh disabled */ 239 static void 240 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 241 { 242 struct dma_async_tx_descriptor *txd = &desc->txd; 243 244 dev_vdbg(chan2dev(&atchan->chan_common), 245 "descriptor %u complete\n", txd->cookie); 246 247 /* mark the descriptor as complete for non cyclic cases only */ 248 if (!atc_chan_is_cyclic(atchan)) 249 dma_cookie_complete(txd); 250 251 /* move children to free_list */ 252 list_splice_init(&desc->tx_list, &atchan->free_list); 253 /* move myself to free_list */ 254 list_move(&desc->desc_node, &atchan->free_list); 255 256 /* unmap dma addresses (not on slave channels) */ 257 if (!atchan->chan_common.private) { 258 struct device *parent = chan2parent(&atchan->chan_common); 259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 260 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 261 dma_unmap_single(parent, 262 desc->lli.daddr, 263 desc->len, DMA_FROM_DEVICE); 264 else 265 dma_unmap_page(parent, 266 desc->lli.daddr, 267 desc->len, DMA_FROM_DEVICE); 268 } 269 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 270 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 271 dma_unmap_single(parent, 272 desc->lli.saddr, 273 desc->len, DMA_TO_DEVICE); 274 else 275 dma_unmap_page(parent, 276 desc->lli.saddr, 277 desc->len, DMA_TO_DEVICE); 278 } 279 } 280 281 /* for cyclic transfers, 282 * no need to replay callback function while stopping */ 283 if (!atc_chan_is_cyclic(atchan)) { 284 dma_async_tx_callback callback = txd->callback; 285 void *param = txd->callback_param; 286 287 /* 288 * The API requires that no submissions are done from a 289 * callback, so we don't need to drop the lock here 290 */ 291 if (callback) 292 callback(param); 293 } 294 295 dma_run_dependencies(txd); 296 } 297 298 /** 299 * atc_complete_all - finish work for all transactions 300 * @atchan: channel to complete transactions for 301 * 302 * Eventually submit queued descriptors if any 303 * 304 * Assume channel is idle while calling this function 305 * Called with atchan->lock held and bh disabled 306 */ 307 static void atc_complete_all(struct at_dma_chan *atchan) 308 { 309 struct at_desc *desc, *_desc; 310 LIST_HEAD(list); 311 312 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 313 314 /* 315 * Submit queued descriptors ASAP, i.e. before we go through 316 * the completed ones. 317 */ 318 if (!list_empty(&atchan->queue)) 319 atc_dostart(atchan, atc_first_queued(atchan)); 320 /* empty active_list now it is completed */ 321 list_splice_init(&atchan->active_list, &list); 322 /* empty queue list by moving descriptors (if any) to active_list */ 323 list_splice_init(&atchan->queue, &atchan->active_list); 324 325 list_for_each_entry_safe(desc, _desc, &list, desc_node) 326 atc_chain_complete(atchan, desc); 327 } 328 329 /** 330 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 331 * @atchan: channel to be cleaned up 332 * 333 * Called with atchan->lock held and bh disabled 334 */ 335 static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 336 { 337 struct at_desc *desc, *_desc; 338 struct at_desc *child; 339 340 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 341 342 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 343 if (!(desc->lli.ctrla & ATC_DONE)) 344 /* This one is currently in progress */ 345 return; 346 347 list_for_each_entry(child, &desc->tx_list, desc_node) 348 if (!(child->lli.ctrla & ATC_DONE)) 349 /* Currently in progress */ 350 return; 351 352 /* 353 * No descriptors so far seem to be in progress, i.e. 354 * this chain must be done. 355 */ 356 atc_chain_complete(atchan, desc); 357 } 358 } 359 360 /** 361 * atc_advance_work - at the end of a transaction, move forward 362 * @atchan: channel where the transaction ended 363 * 364 * Called with atchan->lock held and bh disabled 365 */ 366 static void atc_advance_work(struct at_dma_chan *atchan) 367 { 368 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 369 370 if (atc_chan_is_enabled(atchan)) 371 return; 372 373 if (list_empty(&atchan->active_list) || 374 list_is_singular(&atchan->active_list)) { 375 atc_complete_all(atchan); 376 } else { 377 atc_chain_complete(atchan, atc_first_active(atchan)); 378 /* advance work */ 379 atc_dostart(atchan, atc_first_active(atchan)); 380 } 381 } 382 383 384 /** 385 * atc_handle_error - handle errors reported by DMA controller 386 * @atchan: channel where error occurs 387 * 388 * Called with atchan->lock held and bh disabled 389 */ 390 static void atc_handle_error(struct at_dma_chan *atchan) 391 { 392 struct at_desc *bad_desc; 393 struct at_desc *child; 394 395 /* 396 * The descriptor currently at the head of the active list is 397 * broked. Since we don't have any way to report errors, we'll 398 * just have to scream loudly and try to carry on. 399 */ 400 bad_desc = atc_first_active(atchan); 401 list_del_init(&bad_desc->desc_node); 402 403 /* As we are stopped, take advantage to push queued descriptors 404 * in active_list */ 405 list_splice_init(&atchan->queue, atchan->active_list.prev); 406 407 /* Try to restart the controller */ 408 if (!list_empty(&atchan->active_list)) 409 atc_dostart(atchan, atc_first_active(atchan)); 410 411 /* 412 * KERN_CRITICAL may seem harsh, but since this only happens 413 * when someone submits a bad physical address in a 414 * descriptor, we should consider ourselves lucky that the 415 * controller flagged an error instead of scribbling over 416 * random memory locations. 417 */ 418 dev_crit(chan2dev(&atchan->chan_common), 419 "Bad descriptor submitted for DMA!\n"); 420 dev_crit(chan2dev(&atchan->chan_common), 421 " cookie: %d\n", bad_desc->txd.cookie); 422 atc_dump_lli(atchan, &bad_desc->lli); 423 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 424 atc_dump_lli(atchan, &child->lli); 425 426 /* Pretend the descriptor completed successfully */ 427 atc_chain_complete(atchan, bad_desc); 428 } 429 430 /** 431 * atc_handle_cyclic - at the end of a period, run callback function 432 * @atchan: channel used for cyclic operations 433 * 434 * Called with atchan->lock held and bh disabled 435 */ 436 static void atc_handle_cyclic(struct at_dma_chan *atchan) 437 { 438 struct at_desc *first = atc_first_active(atchan); 439 struct dma_async_tx_descriptor *txd = &first->txd; 440 dma_async_tx_callback callback = txd->callback; 441 void *param = txd->callback_param; 442 443 dev_vdbg(chan2dev(&atchan->chan_common), 444 "new cyclic period llp 0x%08x\n", 445 channel_readl(atchan, DSCR)); 446 447 if (callback) 448 callback(param); 449 } 450 451 /*-- IRQ & Tasklet ---------------------------------------------------*/ 452 453 static void atc_tasklet(unsigned long data) 454 { 455 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 456 unsigned long flags; 457 458 spin_lock_irqsave(&atchan->lock, flags); 459 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 460 atc_handle_error(atchan); 461 else if (atc_chan_is_cyclic(atchan)) 462 atc_handle_cyclic(atchan); 463 else 464 atc_advance_work(atchan); 465 466 spin_unlock_irqrestore(&atchan->lock, flags); 467 } 468 469 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 470 { 471 struct at_dma *atdma = (struct at_dma *)dev_id; 472 struct at_dma_chan *atchan; 473 int i; 474 u32 status, pending, imr; 475 int ret = IRQ_NONE; 476 477 do { 478 imr = dma_readl(atdma, EBCIMR); 479 status = dma_readl(atdma, EBCISR); 480 pending = status & imr; 481 482 if (!pending) 483 break; 484 485 dev_vdbg(atdma->dma_common.dev, 486 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 487 status, imr, pending); 488 489 for (i = 0; i < atdma->dma_common.chancnt; i++) { 490 atchan = &atdma->chan[i]; 491 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 492 if (pending & AT_DMA_ERR(i)) { 493 /* Disable channel on AHB error */ 494 dma_writel(atdma, CHDR, 495 AT_DMA_RES(i) | atchan->mask); 496 /* Give information to tasklet */ 497 set_bit(ATC_IS_ERROR, &atchan->status); 498 } 499 tasklet_schedule(&atchan->tasklet); 500 ret = IRQ_HANDLED; 501 } 502 } 503 504 } while (pending); 505 506 return ret; 507 } 508 509 510 /*-- DMA Engine API --------------------------------------------------*/ 511 512 /** 513 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 514 * @desc: descriptor at the head of the transaction chain 515 * 516 * Queue chain if DMA engine is working already 517 * 518 * Cookie increment and adding to active_list or queue must be atomic 519 */ 520 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 521 { 522 struct at_desc *desc = txd_to_at_desc(tx); 523 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 524 dma_cookie_t cookie; 525 unsigned long flags; 526 527 spin_lock_irqsave(&atchan->lock, flags); 528 cookie = dma_cookie_assign(tx); 529 530 if (list_empty(&atchan->active_list)) { 531 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 532 desc->txd.cookie); 533 atc_dostart(atchan, desc); 534 list_add_tail(&desc->desc_node, &atchan->active_list); 535 } else { 536 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 537 desc->txd.cookie); 538 list_add_tail(&desc->desc_node, &atchan->queue); 539 } 540 541 spin_unlock_irqrestore(&atchan->lock, flags); 542 543 return cookie; 544 } 545 546 /** 547 * atc_prep_dma_memcpy - prepare a memcpy operation 548 * @chan: the channel to prepare operation on 549 * @dest: operation virtual destination address 550 * @src: operation virtual source address 551 * @len: operation length 552 * @flags: tx descriptor status flags 553 */ 554 static struct dma_async_tx_descriptor * 555 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 556 size_t len, unsigned long flags) 557 { 558 struct at_dma_chan *atchan = to_at_dma_chan(chan); 559 struct at_desc *desc = NULL; 560 struct at_desc *first = NULL; 561 struct at_desc *prev = NULL; 562 size_t xfer_count; 563 size_t offset; 564 unsigned int src_width; 565 unsigned int dst_width; 566 u32 ctrla; 567 u32 ctrlb; 568 569 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 570 dest, src, len, flags); 571 572 if (unlikely(!len)) { 573 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 574 return NULL; 575 } 576 577 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 578 | ATC_SRC_ADDR_MODE_INCR 579 | ATC_DST_ADDR_MODE_INCR 580 | ATC_FC_MEM2MEM; 581 582 /* 583 * We can be a lot more clever here, but this should take care 584 * of the most common optimization. 585 */ 586 if (!((src | dest | len) & 3)) { 587 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 588 src_width = dst_width = 2; 589 } else if (!((src | dest | len) & 1)) { 590 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 591 src_width = dst_width = 1; 592 } else { 593 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 594 src_width = dst_width = 0; 595 } 596 597 for (offset = 0; offset < len; offset += xfer_count << src_width) { 598 xfer_count = min_t(size_t, (len - offset) >> src_width, 599 ATC_BTSIZE_MAX); 600 601 desc = atc_desc_get(atchan); 602 if (!desc) 603 goto err_desc_get; 604 605 desc->lli.saddr = src + offset; 606 desc->lli.daddr = dest + offset; 607 desc->lli.ctrla = ctrla | xfer_count; 608 desc->lli.ctrlb = ctrlb; 609 610 desc->txd.cookie = 0; 611 612 atc_desc_chain(&first, &prev, desc); 613 } 614 615 /* First descriptor of the chain embedds additional information */ 616 first->txd.cookie = -EBUSY; 617 first->len = len; 618 619 /* set end-of-link to the last link descriptor of list*/ 620 set_desc_eol(desc); 621 622 first->txd.flags = flags; /* client is in control of this ack */ 623 624 return &first->txd; 625 626 err_desc_get: 627 atc_desc_put(atchan, first); 628 return NULL; 629 } 630 631 632 /** 633 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 634 * @chan: DMA channel 635 * @sgl: scatterlist to transfer to/from 636 * @sg_len: number of entries in @scatterlist 637 * @direction: DMA direction 638 * @flags: tx descriptor status flags 639 * @context: transaction context (ignored) 640 */ 641 static struct dma_async_tx_descriptor * 642 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 643 unsigned int sg_len, enum dma_transfer_direction direction, 644 unsigned long flags, void *context) 645 { 646 struct at_dma_chan *atchan = to_at_dma_chan(chan); 647 struct at_dma_slave *atslave = chan->private; 648 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 649 struct at_desc *first = NULL; 650 struct at_desc *prev = NULL; 651 u32 ctrla; 652 u32 ctrlb; 653 dma_addr_t reg; 654 unsigned int reg_width; 655 unsigned int mem_width; 656 unsigned int i; 657 struct scatterlist *sg; 658 size_t total_len = 0; 659 660 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 661 sg_len, 662 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 663 flags); 664 665 if (unlikely(!atslave || !sg_len)) { 666 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); 667 return NULL; 668 } 669 670 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 671 | ATC_DCSIZE(sconfig->dst_maxburst); 672 ctrlb = ATC_IEN; 673 674 switch (direction) { 675 case DMA_MEM_TO_DEV: 676 reg_width = convert_buswidth(sconfig->dst_addr_width); 677 ctrla |= ATC_DST_WIDTH(reg_width); 678 ctrlb |= ATC_DST_ADDR_MODE_FIXED 679 | ATC_SRC_ADDR_MODE_INCR 680 | ATC_FC_MEM2PER 681 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); 682 reg = sconfig->dst_addr; 683 for_each_sg(sgl, sg, sg_len, i) { 684 struct at_desc *desc; 685 u32 len; 686 u32 mem; 687 688 desc = atc_desc_get(atchan); 689 if (!desc) 690 goto err_desc_get; 691 692 mem = sg_dma_address(sg); 693 len = sg_dma_len(sg); 694 if (unlikely(!len)) { 695 dev_dbg(chan2dev(chan), 696 "prep_slave_sg: sg(%d) data length is zero\n", i); 697 goto err; 698 } 699 mem_width = 2; 700 if (unlikely(mem & 3 || len & 3)) 701 mem_width = 0; 702 703 desc->lli.saddr = mem; 704 desc->lli.daddr = reg; 705 desc->lli.ctrla = ctrla 706 | ATC_SRC_WIDTH(mem_width) 707 | len >> mem_width; 708 desc->lli.ctrlb = ctrlb; 709 710 atc_desc_chain(&first, &prev, desc); 711 total_len += len; 712 } 713 break; 714 case DMA_DEV_TO_MEM: 715 reg_width = convert_buswidth(sconfig->src_addr_width); 716 ctrla |= ATC_SRC_WIDTH(reg_width); 717 ctrlb |= ATC_DST_ADDR_MODE_INCR 718 | ATC_SRC_ADDR_MODE_FIXED 719 | ATC_FC_PER2MEM 720 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); 721 722 reg = sconfig->src_addr; 723 for_each_sg(sgl, sg, sg_len, i) { 724 struct at_desc *desc; 725 u32 len; 726 u32 mem; 727 728 desc = atc_desc_get(atchan); 729 if (!desc) 730 goto err_desc_get; 731 732 mem = sg_dma_address(sg); 733 len = sg_dma_len(sg); 734 if (unlikely(!len)) { 735 dev_dbg(chan2dev(chan), 736 "prep_slave_sg: sg(%d) data length is zero\n", i); 737 goto err; 738 } 739 mem_width = 2; 740 if (unlikely(mem & 3 || len & 3)) 741 mem_width = 0; 742 743 desc->lli.saddr = reg; 744 desc->lli.daddr = mem; 745 desc->lli.ctrla = ctrla 746 | ATC_DST_WIDTH(mem_width) 747 | len >> reg_width; 748 desc->lli.ctrlb = ctrlb; 749 750 atc_desc_chain(&first, &prev, desc); 751 total_len += len; 752 } 753 break; 754 default: 755 return NULL; 756 } 757 758 /* set end-of-link to the last link descriptor of list*/ 759 set_desc_eol(prev); 760 761 /* First descriptor of the chain embedds additional information */ 762 first->txd.cookie = -EBUSY; 763 first->len = total_len; 764 765 /* first link descriptor of list is responsible of flags */ 766 first->txd.flags = flags; /* client is in control of this ack */ 767 768 return &first->txd; 769 770 err_desc_get: 771 dev_err(chan2dev(chan), "not enough descriptors available\n"); 772 err: 773 atc_desc_put(atchan, first); 774 return NULL; 775 } 776 777 /** 778 * atc_dma_cyclic_check_values 779 * Check for too big/unaligned periods and unaligned DMA buffer 780 */ 781 static int 782 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 783 size_t period_len) 784 { 785 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 786 goto err_out; 787 if (unlikely(period_len & ((1 << reg_width) - 1))) 788 goto err_out; 789 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 790 goto err_out; 791 792 return 0; 793 794 err_out: 795 return -EINVAL; 796 } 797 798 /** 799 * atc_dma_cyclic_fill_desc - Fill one period descriptor 800 */ 801 static int 802 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 803 unsigned int period_index, dma_addr_t buf_addr, 804 unsigned int reg_width, size_t period_len, 805 enum dma_transfer_direction direction) 806 { 807 struct at_dma_chan *atchan = to_at_dma_chan(chan); 808 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 809 u32 ctrla; 810 811 /* prepare common CRTLA value */ 812 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 813 | ATC_DCSIZE(sconfig->dst_maxburst) 814 | ATC_DST_WIDTH(reg_width) 815 | ATC_SRC_WIDTH(reg_width) 816 | period_len >> reg_width; 817 818 switch (direction) { 819 case DMA_MEM_TO_DEV: 820 desc->lli.saddr = buf_addr + (period_len * period_index); 821 desc->lli.daddr = sconfig->dst_addr; 822 desc->lli.ctrla = ctrla; 823 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 824 | ATC_SRC_ADDR_MODE_INCR 825 | ATC_FC_MEM2PER 826 | ATC_SIF(atchan->mem_if) 827 | ATC_DIF(atchan->per_if); 828 break; 829 830 case DMA_DEV_TO_MEM: 831 desc->lli.saddr = sconfig->src_addr; 832 desc->lli.daddr = buf_addr + (period_len * period_index); 833 desc->lli.ctrla = ctrla; 834 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 835 | ATC_SRC_ADDR_MODE_FIXED 836 | ATC_FC_PER2MEM 837 | ATC_SIF(atchan->per_if) 838 | ATC_DIF(atchan->mem_if); 839 break; 840 841 default: 842 return -EINVAL; 843 } 844 845 return 0; 846 } 847 848 /** 849 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 850 * @chan: the DMA channel to prepare 851 * @buf_addr: physical DMA address where the buffer starts 852 * @buf_len: total number of bytes for the entire buffer 853 * @period_len: number of bytes for each period 854 * @direction: transfer direction, to or from device 855 * @flags: tx descriptor status flags 856 * @context: transfer context (ignored) 857 */ 858 static struct dma_async_tx_descriptor * 859 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 860 size_t period_len, enum dma_transfer_direction direction, 861 unsigned long flags, void *context) 862 { 863 struct at_dma_chan *atchan = to_at_dma_chan(chan); 864 struct at_dma_slave *atslave = chan->private; 865 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 866 struct at_desc *first = NULL; 867 struct at_desc *prev = NULL; 868 unsigned long was_cyclic; 869 unsigned int reg_width; 870 unsigned int periods = buf_len / period_len; 871 unsigned int i; 872 873 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 874 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 875 buf_addr, 876 periods, buf_len, period_len); 877 878 if (unlikely(!atslave || !buf_len || !period_len)) { 879 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 880 return NULL; 881 } 882 883 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 884 if (was_cyclic) { 885 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 886 return NULL; 887 } 888 889 if (unlikely(!is_slave_direction(direction))) 890 goto err_out; 891 892 if (sconfig->direction == DMA_MEM_TO_DEV) 893 reg_width = convert_buswidth(sconfig->dst_addr_width); 894 else 895 reg_width = convert_buswidth(sconfig->src_addr_width); 896 897 /* Check for too big/unaligned periods and unaligned DMA buffer */ 898 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) 899 goto err_out; 900 901 /* build cyclic linked list */ 902 for (i = 0; i < periods; i++) { 903 struct at_desc *desc; 904 905 desc = atc_desc_get(atchan); 906 if (!desc) 907 goto err_desc_get; 908 909 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 910 reg_width, period_len, direction)) 911 goto err_desc_get; 912 913 atc_desc_chain(&first, &prev, desc); 914 } 915 916 /* lets make a cyclic list */ 917 prev->lli.dscr = first->txd.phys; 918 919 /* First descriptor of the chain embedds additional information */ 920 first->txd.cookie = -EBUSY; 921 first->len = buf_len; 922 923 return &first->txd; 924 925 err_desc_get: 926 dev_err(chan2dev(chan), "not enough descriptors available\n"); 927 atc_desc_put(atchan, first); 928 err_out: 929 clear_bit(ATC_IS_CYCLIC, &atchan->status); 930 return NULL; 931 } 932 933 static int set_runtime_config(struct dma_chan *chan, 934 struct dma_slave_config *sconfig) 935 { 936 struct at_dma_chan *atchan = to_at_dma_chan(chan); 937 938 /* Check if it is chan is configured for slave transfers */ 939 if (!chan->private) 940 return -EINVAL; 941 942 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 943 944 convert_burst(&atchan->dma_sconfig.src_maxburst); 945 convert_burst(&atchan->dma_sconfig.dst_maxburst); 946 947 return 0; 948 } 949 950 951 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 952 unsigned long arg) 953 { 954 struct at_dma_chan *atchan = to_at_dma_chan(chan); 955 struct at_dma *atdma = to_at_dma(chan->device); 956 int chan_id = atchan->chan_common.chan_id; 957 unsigned long flags; 958 959 LIST_HEAD(list); 960 961 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 962 963 if (cmd == DMA_PAUSE) { 964 spin_lock_irqsave(&atchan->lock, flags); 965 966 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 967 set_bit(ATC_IS_PAUSED, &atchan->status); 968 969 spin_unlock_irqrestore(&atchan->lock, flags); 970 } else if (cmd == DMA_RESUME) { 971 if (!atc_chan_is_paused(atchan)) 972 return 0; 973 974 spin_lock_irqsave(&atchan->lock, flags); 975 976 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 977 clear_bit(ATC_IS_PAUSED, &atchan->status); 978 979 spin_unlock_irqrestore(&atchan->lock, flags); 980 } else if (cmd == DMA_TERMINATE_ALL) { 981 struct at_desc *desc, *_desc; 982 /* 983 * This is only called when something went wrong elsewhere, so 984 * we don't really care about the data. Just disable the 985 * channel. We still have to poll the channel enable bit due 986 * to AHB/HSB limitations. 987 */ 988 spin_lock_irqsave(&atchan->lock, flags); 989 990 /* disabling channel: must also remove suspend state */ 991 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 992 993 /* confirm that this channel is disabled */ 994 while (dma_readl(atdma, CHSR) & atchan->mask) 995 cpu_relax(); 996 997 /* active_list entries will end up before queued entries */ 998 list_splice_init(&atchan->queue, &list); 999 list_splice_init(&atchan->active_list, &list); 1000 1001 /* Flush all pending and queued descriptors */ 1002 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1003 atc_chain_complete(atchan, desc); 1004 1005 clear_bit(ATC_IS_PAUSED, &atchan->status); 1006 /* if channel dedicated to cyclic operations, free it */ 1007 clear_bit(ATC_IS_CYCLIC, &atchan->status); 1008 1009 spin_unlock_irqrestore(&atchan->lock, flags); 1010 } else if (cmd == DMA_SLAVE_CONFIG) { 1011 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1012 } else { 1013 return -ENXIO; 1014 } 1015 1016 return 0; 1017 } 1018 1019 /** 1020 * atc_tx_status - poll for transaction completion 1021 * @chan: DMA channel 1022 * @cookie: transaction identifier to check status of 1023 * @txstate: if not %NULL updated with transaction state 1024 * 1025 * If @txstate is passed in, upon return it reflect the driver 1026 * internal state and can be used with dma_async_is_complete() to check 1027 * the status of multiple cookies without re-checking hardware state. 1028 */ 1029 static enum dma_status 1030 atc_tx_status(struct dma_chan *chan, 1031 dma_cookie_t cookie, 1032 struct dma_tx_state *txstate) 1033 { 1034 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1035 dma_cookie_t last_used; 1036 dma_cookie_t last_complete; 1037 unsigned long flags; 1038 enum dma_status ret; 1039 1040 spin_lock_irqsave(&atchan->lock, flags); 1041 1042 ret = dma_cookie_status(chan, cookie, txstate); 1043 if (ret != DMA_SUCCESS) { 1044 atc_cleanup_descriptors(atchan); 1045 1046 ret = dma_cookie_status(chan, cookie, txstate); 1047 } 1048 1049 last_complete = chan->completed_cookie; 1050 last_used = chan->cookie; 1051 1052 spin_unlock_irqrestore(&atchan->lock, flags); 1053 1054 if (ret != DMA_SUCCESS) 1055 dma_set_residue(txstate, atc_first_active(atchan)->len); 1056 1057 if (atc_chan_is_paused(atchan)) 1058 ret = DMA_PAUSED; 1059 1060 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1061 ret, cookie, last_complete ? last_complete : 0, 1062 last_used ? last_used : 0); 1063 1064 return ret; 1065 } 1066 1067 /** 1068 * atc_issue_pending - try to finish work 1069 * @chan: target DMA channel 1070 */ 1071 static void atc_issue_pending(struct dma_chan *chan) 1072 { 1073 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1074 unsigned long flags; 1075 1076 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1077 1078 /* Not needed for cyclic transfers */ 1079 if (atc_chan_is_cyclic(atchan)) 1080 return; 1081 1082 spin_lock_irqsave(&atchan->lock, flags); 1083 atc_advance_work(atchan); 1084 spin_unlock_irqrestore(&atchan->lock, flags); 1085 } 1086 1087 /** 1088 * atc_alloc_chan_resources - allocate resources for DMA channel 1089 * @chan: allocate descriptor resources for this channel 1090 * @client: current client requesting the channel be ready for requests 1091 * 1092 * return - the number of allocated descriptors 1093 */ 1094 static int atc_alloc_chan_resources(struct dma_chan *chan) 1095 { 1096 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1097 struct at_dma *atdma = to_at_dma(chan->device); 1098 struct at_desc *desc; 1099 struct at_dma_slave *atslave; 1100 unsigned long flags; 1101 int i; 1102 u32 cfg; 1103 LIST_HEAD(tmp_list); 1104 1105 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1106 1107 /* ASSERT: channel is idle */ 1108 if (atc_chan_is_enabled(atchan)) { 1109 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1110 return -EIO; 1111 } 1112 1113 cfg = ATC_DEFAULT_CFG; 1114 1115 atslave = chan->private; 1116 if (atslave) { 1117 /* 1118 * We need controller-specific data to set up slave 1119 * transfers. 1120 */ 1121 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1122 1123 /* if cfg configuration specified take it instad of default */ 1124 if (atslave->cfg) 1125 cfg = atslave->cfg; 1126 } 1127 1128 /* have we already been set up? 1129 * reconfigure channel but no need to reallocate descriptors */ 1130 if (!list_empty(&atchan->free_list)) 1131 return atchan->descs_allocated; 1132 1133 /* Allocate initial pool of descriptors */ 1134 for (i = 0; i < init_nr_desc_per_channel; i++) { 1135 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1136 if (!desc) { 1137 dev_err(atdma->dma_common.dev, 1138 "Only %d initial descriptors\n", i); 1139 break; 1140 } 1141 list_add_tail(&desc->desc_node, &tmp_list); 1142 } 1143 1144 spin_lock_irqsave(&atchan->lock, flags); 1145 atchan->descs_allocated = i; 1146 list_splice(&tmp_list, &atchan->free_list); 1147 dma_cookie_init(chan); 1148 spin_unlock_irqrestore(&atchan->lock, flags); 1149 1150 /* channel parameters */ 1151 channel_writel(atchan, CFG, cfg); 1152 1153 dev_dbg(chan2dev(chan), 1154 "alloc_chan_resources: allocated %d descriptors\n", 1155 atchan->descs_allocated); 1156 1157 return atchan->descs_allocated; 1158 } 1159 1160 /** 1161 * atc_free_chan_resources - free all channel resources 1162 * @chan: DMA channel 1163 */ 1164 static void atc_free_chan_resources(struct dma_chan *chan) 1165 { 1166 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1167 struct at_dma *atdma = to_at_dma(chan->device); 1168 struct at_desc *desc, *_desc; 1169 LIST_HEAD(list); 1170 1171 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1172 atchan->descs_allocated); 1173 1174 /* ASSERT: channel is idle */ 1175 BUG_ON(!list_empty(&atchan->active_list)); 1176 BUG_ON(!list_empty(&atchan->queue)); 1177 BUG_ON(atc_chan_is_enabled(atchan)); 1178 1179 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1180 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1181 list_del(&desc->desc_node); 1182 /* free link descriptor */ 1183 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1184 } 1185 list_splice_init(&atchan->free_list, &list); 1186 atchan->descs_allocated = 0; 1187 atchan->status = 0; 1188 1189 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1190 } 1191 1192 #ifdef CONFIG_OF 1193 static bool at_dma_filter(struct dma_chan *chan, void *slave) 1194 { 1195 struct at_dma_slave *atslave = slave; 1196 1197 if (atslave->dma_dev == chan->device->dev) { 1198 chan->private = atslave; 1199 return true; 1200 } else { 1201 return false; 1202 } 1203 } 1204 1205 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1206 struct of_dma *of_dma) 1207 { 1208 struct dma_chan *chan; 1209 struct at_dma_chan *atchan; 1210 struct at_dma_slave *atslave; 1211 dma_cap_mask_t mask; 1212 unsigned int per_id; 1213 struct platform_device *dmac_pdev; 1214 1215 if (dma_spec->args_count != 2) 1216 return NULL; 1217 1218 dmac_pdev = of_find_device_by_node(dma_spec->np); 1219 1220 dma_cap_zero(mask); 1221 dma_cap_set(DMA_SLAVE, mask); 1222 1223 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1224 if (!atslave) 1225 return NULL; 1226 /* 1227 * We can fill both SRC_PER and DST_PER, one of these fields will be 1228 * ignored depending on DMA transfer direction. 1229 */ 1230 per_id = dma_spec->args[1]; 1231 atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW 1232 | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) 1233 | ATC_SRC_PER(per_id); 1234 atslave->dma_dev = &dmac_pdev->dev; 1235 1236 chan = dma_request_channel(mask, at_dma_filter, atslave); 1237 if (!chan) 1238 return NULL; 1239 1240 atchan = to_at_dma_chan(chan); 1241 atchan->per_if = dma_spec->args[0] & 0xff; 1242 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; 1243 1244 return chan; 1245 } 1246 #else 1247 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1248 struct of_dma *of_dma) 1249 { 1250 return NULL; 1251 } 1252 #endif 1253 1254 /*-- Module Management -----------------------------------------------*/ 1255 1256 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1257 static struct at_dma_platform_data at91sam9rl_config = { 1258 .nr_channels = 2, 1259 }; 1260 static struct at_dma_platform_data at91sam9g45_config = { 1261 .nr_channels = 8, 1262 }; 1263 1264 #if defined(CONFIG_OF) 1265 static const struct of_device_id atmel_dma_dt_ids[] = { 1266 { 1267 .compatible = "atmel,at91sam9rl-dma", 1268 .data = &at91sam9rl_config, 1269 }, { 1270 .compatible = "atmel,at91sam9g45-dma", 1271 .data = &at91sam9g45_config, 1272 }, { 1273 /* sentinel */ 1274 } 1275 }; 1276 1277 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1278 #endif 1279 1280 static const struct platform_device_id atdma_devtypes[] = { 1281 { 1282 .name = "at91sam9rl_dma", 1283 .driver_data = (unsigned long) &at91sam9rl_config, 1284 }, { 1285 .name = "at91sam9g45_dma", 1286 .driver_data = (unsigned long) &at91sam9g45_config, 1287 }, { 1288 /* sentinel */ 1289 } 1290 }; 1291 1292 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( 1293 struct platform_device *pdev) 1294 { 1295 if (pdev->dev.of_node) { 1296 const struct of_device_id *match; 1297 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1298 if (match == NULL) 1299 return NULL; 1300 return match->data; 1301 } 1302 return (struct at_dma_platform_data *) 1303 platform_get_device_id(pdev)->driver_data; 1304 } 1305 1306 /** 1307 * at_dma_off - disable DMA controller 1308 * @atdma: the Atmel HDAMC device 1309 */ 1310 static void at_dma_off(struct at_dma *atdma) 1311 { 1312 dma_writel(atdma, EN, 0); 1313 1314 /* disable all interrupts */ 1315 dma_writel(atdma, EBCIDR, -1L); 1316 1317 /* confirm that all channels are disabled */ 1318 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1319 cpu_relax(); 1320 } 1321 1322 static int __init at_dma_probe(struct platform_device *pdev) 1323 { 1324 struct resource *io; 1325 struct at_dma *atdma; 1326 size_t size; 1327 int irq; 1328 int err; 1329 int i; 1330 const struct at_dma_platform_data *plat_dat; 1331 1332 /* setup platform data for each SoC */ 1333 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1334 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1335 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1336 1337 /* get DMA parameters from controller type */ 1338 plat_dat = at_dma_get_driver_data(pdev); 1339 if (!plat_dat) 1340 return -ENODEV; 1341 1342 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1343 if (!io) 1344 return -EINVAL; 1345 1346 irq = platform_get_irq(pdev, 0); 1347 if (irq < 0) 1348 return irq; 1349 1350 size = sizeof(struct at_dma); 1351 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1352 atdma = kzalloc(size, GFP_KERNEL); 1353 if (!atdma) 1354 return -ENOMEM; 1355 1356 /* discover transaction capabilities */ 1357 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1358 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1359 1360 size = resource_size(io); 1361 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1362 err = -EBUSY; 1363 goto err_kfree; 1364 } 1365 1366 atdma->regs = ioremap(io->start, size); 1367 if (!atdma->regs) { 1368 err = -ENOMEM; 1369 goto err_release_r; 1370 } 1371 1372 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1373 if (IS_ERR(atdma->clk)) { 1374 err = PTR_ERR(atdma->clk); 1375 goto err_clk; 1376 } 1377 clk_enable(atdma->clk); 1378 1379 /* force dma off, just in case */ 1380 at_dma_off(atdma); 1381 1382 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1383 if (err) 1384 goto err_irq; 1385 1386 platform_set_drvdata(pdev, atdma); 1387 1388 /* create a pool of consistent memory blocks for hardware descriptors */ 1389 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1390 &pdev->dev, sizeof(struct at_desc), 1391 4 /* word alignment */, 0); 1392 if (!atdma->dma_desc_pool) { 1393 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1394 err = -ENOMEM; 1395 goto err_pool_create; 1396 } 1397 1398 /* clear any pending interrupt */ 1399 while (dma_readl(atdma, EBCISR)) 1400 cpu_relax(); 1401 1402 /* initialize channels related values */ 1403 INIT_LIST_HEAD(&atdma->dma_common.channels); 1404 for (i = 0; i < plat_dat->nr_channels; i++) { 1405 struct at_dma_chan *atchan = &atdma->chan[i]; 1406 1407 atchan->mem_if = AT_DMA_MEM_IF; 1408 atchan->per_if = AT_DMA_PER_IF; 1409 atchan->chan_common.device = &atdma->dma_common; 1410 dma_cookie_init(&atchan->chan_common); 1411 list_add_tail(&atchan->chan_common.device_node, 1412 &atdma->dma_common.channels); 1413 1414 atchan->ch_regs = atdma->regs + ch_regs(i); 1415 spin_lock_init(&atchan->lock); 1416 atchan->mask = 1 << i; 1417 1418 INIT_LIST_HEAD(&atchan->active_list); 1419 INIT_LIST_HEAD(&atchan->queue); 1420 INIT_LIST_HEAD(&atchan->free_list); 1421 1422 tasklet_init(&atchan->tasklet, atc_tasklet, 1423 (unsigned long)atchan); 1424 atc_enable_chan_irq(atdma, i); 1425 } 1426 1427 /* set base routines */ 1428 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1429 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1430 atdma->dma_common.device_tx_status = atc_tx_status; 1431 atdma->dma_common.device_issue_pending = atc_issue_pending; 1432 atdma->dma_common.dev = &pdev->dev; 1433 1434 /* set prep routines based on capability */ 1435 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1436 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1437 1438 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1439 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1440 /* controller can do slave DMA: can trigger cyclic transfers */ 1441 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1442 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1443 atdma->dma_common.device_control = atc_control; 1444 } 1445 1446 dma_writel(atdma, EN, AT_DMA_ENABLE); 1447 1448 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1449 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1450 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1451 plat_dat->nr_channels); 1452 1453 dma_async_device_register(&atdma->dma_common); 1454 1455 /* 1456 * Do not return an error if the dmac node is not present in order to 1457 * not break the existing way of requesting channel with 1458 * dma_request_channel(). 1459 */ 1460 if (pdev->dev.of_node) { 1461 err = of_dma_controller_register(pdev->dev.of_node, 1462 at_dma_xlate, atdma); 1463 if (err) { 1464 dev_err(&pdev->dev, "could not register of_dma_controller\n"); 1465 goto err_of_dma_controller_register; 1466 } 1467 } 1468 1469 return 0; 1470 1471 err_of_dma_controller_register: 1472 dma_async_device_unregister(&atdma->dma_common); 1473 dma_pool_destroy(atdma->dma_desc_pool); 1474 err_pool_create: 1475 platform_set_drvdata(pdev, NULL); 1476 free_irq(platform_get_irq(pdev, 0), atdma); 1477 err_irq: 1478 clk_disable(atdma->clk); 1479 clk_put(atdma->clk); 1480 err_clk: 1481 iounmap(atdma->regs); 1482 atdma->regs = NULL; 1483 err_release_r: 1484 release_mem_region(io->start, size); 1485 err_kfree: 1486 kfree(atdma); 1487 return err; 1488 } 1489 1490 static int at_dma_remove(struct platform_device *pdev) 1491 { 1492 struct at_dma *atdma = platform_get_drvdata(pdev); 1493 struct dma_chan *chan, *_chan; 1494 struct resource *io; 1495 1496 at_dma_off(atdma); 1497 dma_async_device_unregister(&atdma->dma_common); 1498 1499 dma_pool_destroy(atdma->dma_desc_pool); 1500 platform_set_drvdata(pdev, NULL); 1501 free_irq(platform_get_irq(pdev, 0), atdma); 1502 1503 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1504 device_node) { 1505 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1506 1507 /* Disable interrupts */ 1508 atc_disable_chan_irq(atdma, chan->chan_id); 1509 tasklet_disable(&atchan->tasklet); 1510 1511 tasklet_kill(&atchan->tasklet); 1512 list_del(&chan->device_node); 1513 } 1514 1515 clk_disable(atdma->clk); 1516 clk_put(atdma->clk); 1517 1518 iounmap(atdma->regs); 1519 atdma->regs = NULL; 1520 1521 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1522 release_mem_region(io->start, resource_size(io)); 1523 1524 kfree(atdma); 1525 1526 return 0; 1527 } 1528 1529 static void at_dma_shutdown(struct platform_device *pdev) 1530 { 1531 struct at_dma *atdma = platform_get_drvdata(pdev); 1532 1533 at_dma_off(platform_get_drvdata(pdev)); 1534 clk_disable(atdma->clk); 1535 } 1536 1537 static int at_dma_prepare(struct device *dev) 1538 { 1539 struct platform_device *pdev = to_platform_device(dev); 1540 struct at_dma *atdma = platform_get_drvdata(pdev); 1541 struct dma_chan *chan, *_chan; 1542 1543 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1544 device_node) { 1545 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1546 /* wait for transaction completion (except in cyclic case) */ 1547 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1548 return -EAGAIN; 1549 } 1550 return 0; 1551 } 1552 1553 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1554 { 1555 struct dma_chan *chan = &atchan->chan_common; 1556 1557 /* Channel should be paused by user 1558 * do it anyway even if it is not done already */ 1559 if (!atc_chan_is_paused(atchan)) { 1560 dev_warn(chan2dev(chan), 1561 "cyclic channel not paused, should be done by channel user\n"); 1562 atc_control(chan, DMA_PAUSE, 0); 1563 } 1564 1565 /* now preserve additional data for cyclic operations */ 1566 /* next descriptor address in the cyclic list */ 1567 atchan->save_dscr = channel_readl(atchan, DSCR); 1568 1569 vdbg_dump_regs(atchan); 1570 } 1571 1572 static int at_dma_suspend_noirq(struct device *dev) 1573 { 1574 struct platform_device *pdev = to_platform_device(dev); 1575 struct at_dma *atdma = platform_get_drvdata(pdev); 1576 struct dma_chan *chan, *_chan; 1577 1578 /* preserve data */ 1579 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1580 device_node) { 1581 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1582 1583 if (atc_chan_is_cyclic(atchan)) 1584 atc_suspend_cyclic(atchan); 1585 atchan->save_cfg = channel_readl(atchan, CFG); 1586 } 1587 atdma->save_imr = dma_readl(atdma, EBCIMR); 1588 1589 /* disable DMA controller */ 1590 at_dma_off(atdma); 1591 clk_disable(atdma->clk); 1592 return 0; 1593 } 1594 1595 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1596 { 1597 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1598 1599 /* restore channel status for cyclic descriptors list: 1600 * next descriptor in the cyclic list at the time of suspend */ 1601 channel_writel(atchan, SADDR, 0); 1602 channel_writel(atchan, DADDR, 0); 1603 channel_writel(atchan, CTRLA, 0); 1604 channel_writel(atchan, CTRLB, 0); 1605 channel_writel(atchan, DSCR, atchan->save_dscr); 1606 dma_writel(atdma, CHER, atchan->mask); 1607 1608 /* channel pause status should be removed by channel user 1609 * We cannot take the initiative to do it here */ 1610 1611 vdbg_dump_regs(atchan); 1612 } 1613 1614 static int at_dma_resume_noirq(struct device *dev) 1615 { 1616 struct platform_device *pdev = to_platform_device(dev); 1617 struct at_dma *atdma = platform_get_drvdata(pdev); 1618 struct dma_chan *chan, *_chan; 1619 1620 /* bring back DMA controller */ 1621 clk_enable(atdma->clk); 1622 dma_writel(atdma, EN, AT_DMA_ENABLE); 1623 1624 /* clear any pending interrupt */ 1625 while (dma_readl(atdma, EBCISR)) 1626 cpu_relax(); 1627 1628 /* restore saved data */ 1629 dma_writel(atdma, EBCIER, atdma->save_imr); 1630 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1631 device_node) { 1632 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1633 1634 channel_writel(atchan, CFG, atchan->save_cfg); 1635 if (atc_chan_is_cyclic(atchan)) 1636 atc_resume_cyclic(atchan); 1637 } 1638 return 0; 1639 } 1640 1641 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1642 .prepare = at_dma_prepare, 1643 .suspend_noirq = at_dma_suspend_noirq, 1644 .resume_noirq = at_dma_resume_noirq, 1645 }; 1646 1647 static struct platform_driver at_dma_driver = { 1648 .remove = at_dma_remove, 1649 .shutdown = at_dma_shutdown, 1650 .id_table = atdma_devtypes, 1651 .driver = { 1652 .name = "at_hdmac", 1653 .pm = &at_dma_dev_pm_ops, 1654 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1655 }, 1656 }; 1657 1658 static int __init at_dma_init(void) 1659 { 1660 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1661 } 1662 subsys_initcall(at_dma_init); 1663 1664 static void __exit at_dma_exit(void) 1665 { 1666 platform_driver_unregister(&at_dma_driver); 1667 } 1668 module_exit(at_dma_exit); 1669 1670 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1671 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1672 MODULE_LICENSE("GPL"); 1673 MODULE_ALIAS("platform:at_hdmac"); 1674