1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. 13 * The only Atmel DMA Controller that is not covered by this driver is the one 14 * found on AT91SAM9263. 15 */ 16 17 #include <dt-bindings/dma/at91.h> 18 #include <linux/clk.h> 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/dmapool.h> 22 #include <linux/interrupt.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 #include <linux/of_dma.h> 29 30 #include "at_hdmac_regs.h" 31 #include "dmaengine.h" 32 33 /* 34 * Glossary 35 * -------- 36 * 37 * at_hdmac : Name of the ATmel AHB DMA Controller 38 * at_dma_ / atdma : ATmel DMA controller entity related 39 * atc_ / atchan : ATmel DMA Channel entity related 40 */ 41 42 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 44 |ATC_DIF(AT_DMA_MEM_IF)) 45 #define ATC_DMA_BUSWIDTHS\ 46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ 47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ 48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 50 51 /* 52 * Initial number of descriptors to allocate for each channel. This could 53 * be increased during dma usage. 54 */ 55 static unsigned int init_nr_desc_per_channel = 64; 56 module_param(init_nr_desc_per_channel, uint, 0644); 57 MODULE_PARM_DESC(init_nr_desc_per_channel, 58 "initial descriptors per channel (default: 64)"); 59 60 61 /* prototypes */ 62 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 63 static void atc_issue_pending(struct dma_chan *chan); 64 65 66 /*----------------------------------------------------------------------*/ 67 68 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 69 { 70 return list_first_entry(&atchan->active_list, 71 struct at_desc, desc_node); 72 } 73 74 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 75 { 76 return list_first_entry(&atchan->queue, 77 struct at_desc, desc_node); 78 } 79 80 /** 81 * atc_alloc_descriptor - allocate and return an initialized descriptor 82 * @chan: the channel to allocate descriptors for 83 * @gfp_flags: GFP allocation flags 84 * 85 * Note: The ack-bit is positioned in the descriptor flag at creation time 86 * to make initial allocation more convenient. This bit will be cleared 87 * and control will be given to client at usage time (during 88 * preparation functions). 89 */ 90 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 91 gfp_t gfp_flags) 92 { 93 struct at_desc *desc = NULL; 94 struct at_dma *atdma = to_at_dma(chan->device); 95 dma_addr_t phys; 96 97 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 98 if (desc) { 99 memset(desc, 0, sizeof(struct at_desc)); 100 INIT_LIST_HEAD(&desc->tx_list); 101 dma_async_tx_descriptor_init(&desc->txd, chan); 102 /* txd.flags will be overwritten in prep functions */ 103 desc->txd.flags = DMA_CTRL_ACK; 104 desc->txd.tx_submit = atc_tx_submit; 105 desc->txd.phys = phys; 106 } 107 108 return desc; 109 } 110 111 /** 112 * atc_desc_get - get an unused descriptor from free_list 113 * @atchan: channel we want a new descriptor for 114 */ 115 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 116 { 117 struct at_desc *desc, *_desc; 118 struct at_desc *ret = NULL; 119 unsigned long flags; 120 unsigned int i = 0; 121 LIST_HEAD(tmp_list); 122 123 spin_lock_irqsave(&atchan->lock, flags); 124 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 125 i++; 126 if (async_tx_test_ack(&desc->txd)) { 127 list_del(&desc->desc_node); 128 ret = desc; 129 break; 130 } 131 dev_dbg(chan2dev(&atchan->chan_common), 132 "desc %p not ACKed\n", desc); 133 } 134 spin_unlock_irqrestore(&atchan->lock, flags); 135 dev_vdbg(chan2dev(&atchan->chan_common), 136 "scanned %u descriptors on freelist\n", i); 137 138 /* no more descriptor available in initial pool: create one more */ 139 if (!ret) { 140 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 141 if (ret) { 142 spin_lock_irqsave(&atchan->lock, flags); 143 atchan->descs_allocated++; 144 spin_unlock_irqrestore(&atchan->lock, flags); 145 } else { 146 dev_err(chan2dev(&atchan->chan_common), 147 "not enough descriptors available\n"); 148 } 149 } 150 151 return ret; 152 } 153 154 /** 155 * atc_desc_put - move a descriptor, including any children, to the free list 156 * @atchan: channel we work on 157 * @desc: descriptor, at the head of a chain, to move to free list 158 */ 159 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 160 { 161 if (desc) { 162 struct at_desc *child; 163 unsigned long flags; 164 165 spin_lock_irqsave(&atchan->lock, flags); 166 list_for_each_entry(child, &desc->tx_list, desc_node) 167 dev_vdbg(chan2dev(&atchan->chan_common), 168 "moving child desc %p to freelist\n", 169 child); 170 list_splice_init(&desc->tx_list, &atchan->free_list); 171 dev_vdbg(chan2dev(&atchan->chan_common), 172 "moving desc %p to freelist\n", desc); 173 list_add(&desc->desc_node, &atchan->free_list); 174 spin_unlock_irqrestore(&atchan->lock, flags); 175 } 176 } 177 178 /** 179 * atc_desc_chain - build chain adding a descriptor 180 * @first: address of first descriptor of the chain 181 * @prev: address of previous descriptor of the chain 182 * @desc: descriptor to queue 183 * 184 * Called from prep_* functions 185 */ 186 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 187 struct at_desc *desc) 188 { 189 if (!(*first)) { 190 *first = desc; 191 } else { 192 /* inform the HW lli about chaining */ 193 (*prev)->lli.dscr = desc->txd.phys; 194 /* insert the link descriptor to the LD ring */ 195 list_add_tail(&desc->desc_node, 196 &(*first)->tx_list); 197 } 198 *prev = desc; 199 } 200 201 /** 202 * atc_dostart - starts the DMA engine for real 203 * @atchan: the channel we want to start 204 * @first: first descriptor in the list we want to begin with 205 * 206 * Called with atchan->lock held and bh disabled 207 */ 208 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 209 { 210 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 211 212 /* ASSERT: channel is idle */ 213 if (atc_chan_is_enabled(atchan)) { 214 dev_err(chan2dev(&atchan->chan_common), 215 "BUG: Attempted to start non-idle channel\n"); 216 dev_err(chan2dev(&atchan->chan_common), 217 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 218 channel_readl(atchan, SADDR), 219 channel_readl(atchan, DADDR), 220 channel_readl(atchan, CTRLA), 221 channel_readl(atchan, CTRLB), 222 channel_readl(atchan, DSCR)); 223 224 /* The tasklet will hopefully advance the queue... */ 225 return; 226 } 227 228 vdbg_dump_regs(atchan); 229 230 channel_writel(atchan, SADDR, 0); 231 channel_writel(atchan, DADDR, 0); 232 channel_writel(atchan, CTRLA, 0); 233 channel_writel(atchan, CTRLB, 0); 234 channel_writel(atchan, DSCR, first->txd.phys); 235 dma_writel(atdma, CHER, atchan->mask); 236 237 vdbg_dump_regs(atchan); 238 } 239 240 /* 241 * atc_get_current_descriptors - 242 * locate the descriptor which equal to physical address in DSCR 243 * @atchan: the channel we want to start 244 * @dscr_addr: physical descriptor address in DSCR 245 */ 246 static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, 247 u32 dscr_addr) 248 { 249 struct at_desc *desc, *_desc, *child, *desc_cur = NULL; 250 251 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 252 if (desc->lli.dscr == dscr_addr) { 253 desc_cur = desc; 254 break; 255 } 256 257 list_for_each_entry(child, &desc->tx_list, desc_node) { 258 if (child->lli.dscr == dscr_addr) { 259 desc_cur = child; 260 break; 261 } 262 } 263 } 264 265 return desc_cur; 266 } 267 268 /* 269 * atc_get_bytes_left - 270 * Get the number of bytes residue in dma buffer, 271 * @chan: the channel we want to start 272 */ 273 static int atc_get_bytes_left(struct dma_chan *chan) 274 { 275 struct at_dma_chan *atchan = to_at_dma_chan(chan); 276 struct at_dma *atdma = to_at_dma(chan->device); 277 int chan_id = atchan->chan_common.chan_id; 278 struct at_desc *desc_first = atc_first_active(atchan); 279 struct at_desc *desc_cur; 280 int ret = 0, count = 0; 281 282 /* 283 * Initialize necessary values in the first time. 284 * remain_desc record remain desc length. 285 */ 286 if (atchan->remain_desc == 0) 287 /* First descriptor embedds the transaction length */ 288 atchan->remain_desc = desc_first->len; 289 290 /* 291 * This happens when current descriptor transfer complete. 292 * The residual buffer size should reduce current descriptor length. 293 */ 294 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { 295 clear_bit(ATC_IS_BTC, &atchan->status); 296 desc_cur = atc_get_current_descriptors(atchan, 297 channel_readl(atchan, DSCR)); 298 if (!desc_cur) { 299 ret = -EINVAL; 300 goto out; 301 } 302 303 count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) 304 << desc_first->tx_width; 305 if (atchan->remain_desc < count) { 306 ret = -EINVAL; 307 goto out; 308 } 309 310 atchan->remain_desc -= count; 311 ret = atchan->remain_desc; 312 } else { 313 /* 314 * Get residual bytes when current 315 * descriptor transfer in progress. 316 */ 317 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) 318 << (desc_first->tx_width); 319 ret = atchan->remain_desc - count; 320 } 321 /* 322 * Check fifo empty. 323 */ 324 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) 325 atc_issue_pending(chan); 326 327 out: 328 return ret; 329 } 330 331 /** 332 * atc_chain_complete - finish work for one transaction chain 333 * @atchan: channel we work on 334 * @desc: descriptor at the head of the chain we want do complete 335 * 336 * Called with atchan->lock held and bh disabled */ 337 static void 338 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 339 { 340 struct dma_async_tx_descriptor *txd = &desc->txd; 341 342 dev_vdbg(chan2dev(&atchan->chan_common), 343 "descriptor %u complete\n", txd->cookie); 344 345 /* mark the descriptor as complete for non cyclic cases only */ 346 if (!atc_chan_is_cyclic(atchan)) 347 dma_cookie_complete(txd); 348 349 /* move children to free_list */ 350 list_splice_init(&desc->tx_list, &atchan->free_list); 351 /* move myself to free_list */ 352 list_move(&desc->desc_node, &atchan->free_list); 353 354 dma_descriptor_unmap(txd); 355 /* for cyclic transfers, 356 * no need to replay callback function while stopping */ 357 if (!atc_chan_is_cyclic(atchan)) { 358 dma_async_tx_callback callback = txd->callback; 359 void *param = txd->callback_param; 360 361 /* 362 * The API requires that no submissions are done from a 363 * callback, so we don't need to drop the lock here 364 */ 365 if (callback) 366 callback(param); 367 } 368 369 dma_run_dependencies(txd); 370 } 371 372 /** 373 * atc_complete_all - finish work for all transactions 374 * @atchan: channel to complete transactions for 375 * 376 * Eventually submit queued descriptors if any 377 * 378 * Assume channel is idle while calling this function 379 * Called with atchan->lock held and bh disabled 380 */ 381 static void atc_complete_all(struct at_dma_chan *atchan) 382 { 383 struct at_desc *desc, *_desc; 384 LIST_HEAD(list); 385 386 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 387 388 /* 389 * Submit queued descriptors ASAP, i.e. before we go through 390 * the completed ones. 391 */ 392 if (!list_empty(&atchan->queue)) 393 atc_dostart(atchan, atc_first_queued(atchan)); 394 /* empty active_list now it is completed */ 395 list_splice_init(&atchan->active_list, &list); 396 /* empty queue list by moving descriptors (if any) to active_list */ 397 list_splice_init(&atchan->queue, &atchan->active_list); 398 399 list_for_each_entry_safe(desc, _desc, &list, desc_node) 400 atc_chain_complete(atchan, desc); 401 } 402 403 /** 404 * atc_advance_work - at the end of a transaction, move forward 405 * @atchan: channel where the transaction ended 406 * 407 * Called with atchan->lock held and bh disabled 408 */ 409 static void atc_advance_work(struct at_dma_chan *atchan) 410 { 411 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 412 413 if (atc_chan_is_enabled(atchan)) 414 return; 415 416 if (list_empty(&atchan->active_list) || 417 list_is_singular(&atchan->active_list)) { 418 atc_complete_all(atchan); 419 } else { 420 atc_chain_complete(atchan, atc_first_active(atchan)); 421 /* advance work */ 422 atc_dostart(atchan, atc_first_active(atchan)); 423 } 424 } 425 426 427 /** 428 * atc_handle_error - handle errors reported by DMA controller 429 * @atchan: channel where error occurs 430 * 431 * Called with atchan->lock held and bh disabled 432 */ 433 static void atc_handle_error(struct at_dma_chan *atchan) 434 { 435 struct at_desc *bad_desc; 436 struct at_desc *child; 437 438 /* 439 * The descriptor currently at the head of the active list is 440 * broked. Since we don't have any way to report errors, we'll 441 * just have to scream loudly and try to carry on. 442 */ 443 bad_desc = atc_first_active(atchan); 444 list_del_init(&bad_desc->desc_node); 445 446 /* As we are stopped, take advantage to push queued descriptors 447 * in active_list */ 448 list_splice_init(&atchan->queue, atchan->active_list.prev); 449 450 /* Try to restart the controller */ 451 if (!list_empty(&atchan->active_list)) 452 atc_dostart(atchan, atc_first_active(atchan)); 453 454 /* 455 * KERN_CRITICAL may seem harsh, but since this only happens 456 * when someone submits a bad physical address in a 457 * descriptor, we should consider ourselves lucky that the 458 * controller flagged an error instead of scribbling over 459 * random memory locations. 460 */ 461 dev_crit(chan2dev(&atchan->chan_common), 462 "Bad descriptor submitted for DMA!\n"); 463 dev_crit(chan2dev(&atchan->chan_common), 464 " cookie: %d\n", bad_desc->txd.cookie); 465 atc_dump_lli(atchan, &bad_desc->lli); 466 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 467 atc_dump_lli(atchan, &child->lli); 468 469 /* Pretend the descriptor completed successfully */ 470 atc_chain_complete(atchan, bad_desc); 471 } 472 473 /** 474 * atc_handle_cyclic - at the end of a period, run callback function 475 * @atchan: channel used for cyclic operations 476 * 477 * Called with atchan->lock held and bh disabled 478 */ 479 static void atc_handle_cyclic(struct at_dma_chan *atchan) 480 { 481 struct at_desc *first = atc_first_active(atchan); 482 struct dma_async_tx_descriptor *txd = &first->txd; 483 dma_async_tx_callback callback = txd->callback; 484 void *param = txd->callback_param; 485 486 dev_vdbg(chan2dev(&atchan->chan_common), 487 "new cyclic period llp 0x%08x\n", 488 channel_readl(atchan, DSCR)); 489 490 if (callback) 491 callback(param); 492 } 493 494 /*-- IRQ & Tasklet ---------------------------------------------------*/ 495 496 static void atc_tasklet(unsigned long data) 497 { 498 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 499 unsigned long flags; 500 501 spin_lock_irqsave(&atchan->lock, flags); 502 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 503 atc_handle_error(atchan); 504 else if (atc_chan_is_cyclic(atchan)) 505 atc_handle_cyclic(atchan); 506 else 507 atc_advance_work(atchan); 508 509 spin_unlock_irqrestore(&atchan->lock, flags); 510 } 511 512 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 513 { 514 struct at_dma *atdma = (struct at_dma *)dev_id; 515 struct at_dma_chan *atchan; 516 int i; 517 u32 status, pending, imr; 518 int ret = IRQ_NONE; 519 520 do { 521 imr = dma_readl(atdma, EBCIMR); 522 status = dma_readl(atdma, EBCISR); 523 pending = status & imr; 524 525 if (!pending) 526 break; 527 528 dev_vdbg(atdma->dma_common.dev, 529 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 530 status, imr, pending); 531 532 for (i = 0; i < atdma->dma_common.chancnt; i++) { 533 atchan = &atdma->chan[i]; 534 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 535 if (pending & AT_DMA_ERR(i)) { 536 /* Disable channel on AHB error */ 537 dma_writel(atdma, CHDR, 538 AT_DMA_RES(i) | atchan->mask); 539 /* Give information to tasklet */ 540 set_bit(ATC_IS_ERROR, &atchan->status); 541 } 542 if (pending & AT_DMA_BTC(i)) 543 set_bit(ATC_IS_BTC, &atchan->status); 544 tasklet_schedule(&atchan->tasklet); 545 ret = IRQ_HANDLED; 546 } 547 } 548 549 } while (pending); 550 551 return ret; 552 } 553 554 555 /*-- DMA Engine API --------------------------------------------------*/ 556 557 /** 558 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 559 * @desc: descriptor at the head of the transaction chain 560 * 561 * Queue chain if DMA engine is working already 562 * 563 * Cookie increment and adding to active_list or queue must be atomic 564 */ 565 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 566 { 567 struct at_desc *desc = txd_to_at_desc(tx); 568 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 569 dma_cookie_t cookie; 570 unsigned long flags; 571 572 spin_lock_irqsave(&atchan->lock, flags); 573 cookie = dma_cookie_assign(tx); 574 575 if (list_empty(&atchan->active_list)) { 576 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 577 desc->txd.cookie); 578 atc_dostart(atchan, desc); 579 list_add_tail(&desc->desc_node, &atchan->active_list); 580 } else { 581 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 582 desc->txd.cookie); 583 list_add_tail(&desc->desc_node, &atchan->queue); 584 } 585 586 spin_unlock_irqrestore(&atchan->lock, flags); 587 588 return cookie; 589 } 590 591 /** 592 * atc_prep_dma_memcpy - prepare a memcpy operation 593 * @chan: the channel to prepare operation on 594 * @dest: operation virtual destination address 595 * @src: operation virtual source address 596 * @len: operation length 597 * @flags: tx descriptor status flags 598 */ 599 static struct dma_async_tx_descriptor * 600 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 601 size_t len, unsigned long flags) 602 { 603 struct at_dma_chan *atchan = to_at_dma_chan(chan); 604 struct at_desc *desc = NULL; 605 struct at_desc *first = NULL; 606 struct at_desc *prev = NULL; 607 size_t xfer_count; 608 size_t offset; 609 unsigned int src_width; 610 unsigned int dst_width; 611 u32 ctrla; 612 u32 ctrlb; 613 614 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 615 dest, src, len, flags); 616 617 if (unlikely(!len)) { 618 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 619 return NULL; 620 } 621 622 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 623 | ATC_SRC_ADDR_MODE_INCR 624 | ATC_DST_ADDR_MODE_INCR 625 | ATC_FC_MEM2MEM; 626 627 /* 628 * We can be a lot more clever here, but this should take care 629 * of the most common optimization. 630 */ 631 if (!((src | dest | len) & 3)) { 632 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 633 src_width = dst_width = 2; 634 } else if (!((src | dest | len) & 1)) { 635 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 636 src_width = dst_width = 1; 637 } else { 638 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 639 src_width = dst_width = 0; 640 } 641 642 for (offset = 0; offset < len; offset += xfer_count << src_width) { 643 xfer_count = min_t(size_t, (len - offset) >> src_width, 644 ATC_BTSIZE_MAX); 645 646 desc = atc_desc_get(atchan); 647 if (!desc) 648 goto err_desc_get; 649 650 desc->lli.saddr = src + offset; 651 desc->lli.daddr = dest + offset; 652 desc->lli.ctrla = ctrla | xfer_count; 653 desc->lli.ctrlb = ctrlb; 654 655 desc->txd.cookie = 0; 656 657 atc_desc_chain(&first, &prev, desc); 658 } 659 660 /* First descriptor of the chain embedds additional information */ 661 first->txd.cookie = -EBUSY; 662 first->len = len; 663 first->tx_width = src_width; 664 665 /* set end-of-link to the last link descriptor of list*/ 666 set_desc_eol(desc); 667 668 first->txd.flags = flags; /* client is in control of this ack */ 669 670 return &first->txd; 671 672 err_desc_get: 673 atc_desc_put(atchan, first); 674 return NULL; 675 } 676 677 678 /** 679 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 680 * @chan: DMA channel 681 * @sgl: scatterlist to transfer to/from 682 * @sg_len: number of entries in @scatterlist 683 * @direction: DMA direction 684 * @flags: tx descriptor status flags 685 * @context: transaction context (ignored) 686 */ 687 static struct dma_async_tx_descriptor * 688 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 689 unsigned int sg_len, enum dma_transfer_direction direction, 690 unsigned long flags, void *context) 691 { 692 struct at_dma_chan *atchan = to_at_dma_chan(chan); 693 struct at_dma_slave *atslave = chan->private; 694 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 695 struct at_desc *first = NULL; 696 struct at_desc *prev = NULL; 697 u32 ctrla; 698 u32 ctrlb; 699 dma_addr_t reg; 700 unsigned int reg_width; 701 unsigned int mem_width; 702 unsigned int i; 703 struct scatterlist *sg; 704 size_t total_len = 0; 705 706 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 707 sg_len, 708 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 709 flags); 710 711 if (unlikely(!atslave || !sg_len)) { 712 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); 713 return NULL; 714 } 715 716 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 717 | ATC_DCSIZE(sconfig->dst_maxburst); 718 ctrlb = ATC_IEN; 719 720 switch (direction) { 721 case DMA_MEM_TO_DEV: 722 reg_width = convert_buswidth(sconfig->dst_addr_width); 723 ctrla |= ATC_DST_WIDTH(reg_width); 724 ctrlb |= ATC_DST_ADDR_MODE_FIXED 725 | ATC_SRC_ADDR_MODE_INCR 726 | ATC_FC_MEM2PER 727 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); 728 reg = sconfig->dst_addr; 729 for_each_sg(sgl, sg, sg_len, i) { 730 struct at_desc *desc; 731 u32 len; 732 u32 mem; 733 734 desc = atc_desc_get(atchan); 735 if (!desc) 736 goto err_desc_get; 737 738 mem = sg_dma_address(sg); 739 len = sg_dma_len(sg); 740 if (unlikely(!len)) { 741 dev_dbg(chan2dev(chan), 742 "prep_slave_sg: sg(%d) data length is zero\n", i); 743 goto err; 744 } 745 mem_width = 2; 746 if (unlikely(mem & 3 || len & 3)) 747 mem_width = 0; 748 749 desc->lli.saddr = mem; 750 desc->lli.daddr = reg; 751 desc->lli.ctrla = ctrla 752 | ATC_SRC_WIDTH(mem_width) 753 | len >> mem_width; 754 desc->lli.ctrlb = ctrlb; 755 756 atc_desc_chain(&first, &prev, desc); 757 total_len += len; 758 } 759 break; 760 case DMA_DEV_TO_MEM: 761 reg_width = convert_buswidth(sconfig->src_addr_width); 762 ctrla |= ATC_SRC_WIDTH(reg_width); 763 ctrlb |= ATC_DST_ADDR_MODE_INCR 764 | ATC_SRC_ADDR_MODE_FIXED 765 | ATC_FC_PER2MEM 766 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); 767 768 reg = sconfig->src_addr; 769 for_each_sg(sgl, sg, sg_len, i) { 770 struct at_desc *desc; 771 u32 len; 772 u32 mem; 773 774 desc = atc_desc_get(atchan); 775 if (!desc) 776 goto err_desc_get; 777 778 mem = sg_dma_address(sg); 779 len = sg_dma_len(sg); 780 if (unlikely(!len)) { 781 dev_dbg(chan2dev(chan), 782 "prep_slave_sg: sg(%d) data length is zero\n", i); 783 goto err; 784 } 785 mem_width = 2; 786 if (unlikely(mem & 3 || len & 3)) 787 mem_width = 0; 788 789 desc->lli.saddr = reg; 790 desc->lli.daddr = mem; 791 desc->lli.ctrla = ctrla 792 | ATC_DST_WIDTH(mem_width) 793 | len >> reg_width; 794 desc->lli.ctrlb = ctrlb; 795 796 atc_desc_chain(&first, &prev, desc); 797 total_len += len; 798 } 799 break; 800 default: 801 return NULL; 802 } 803 804 /* set end-of-link to the last link descriptor of list*/ 805 set_desc_eol(prev); 806 807 /* First descriptor of the chain embedds additional information */ 808 first->txd.cookie = -EBUSY; 809 first->len = total_len; 810 first->tx_width = reg_width; 811 812 /* first link descriptor of list is responsible of flags */ 813 first->txd.flags = flags; /* client is in control of this ack */ 814 815 return &first->txd; 816 817 err_desc_get: 818 dev_err(chan2dev(chan), "not enough descriptors available\n"); 819 err: 820 atc_desc_put(atchan, first); 821 return NULL; 822 } 823 824 /** 825 * atc_dma_cyclic_check_values 826 * Check for too big/unaligned periods and unaligned DMA buffer 827 */ 828 static int 829 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 830 size_t period_len) 831 { 832 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 833 goto err_out; 834 if (unlikely(period_len & ((1 << reg_width) - 1))) 835 goto err_out; 836 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 837 goto err_out; 838 839 return 0; 840 841 err_out: 842 return -EINVAL; 843 } 844 845 /** 846 * atc_dma_cyclic_fill_desc - Fill one period descriptor 847 */ 848 static int 849 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 850 unsigned int period_index, dma_addr_t buf_addr, 851 unsigned int reg_width, size_t period_len, 852 enum dma_transfer_direction direction) 853 { 854 struct at_dma_chan *atchan = to_at_dma_chan(chan); 855 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 856 u32 ctrla; 857 858 /* prepare common CRTLA value */ 859 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 860 | ATC_DCSIZE(sconfig->dst_maxburst) 861 | ATC_DST_WIDTH(reg_width) 862 | ATC_SRC_WIDTH(reg_width) 863 | period_len >> reg_width; 864 865 switch (direction) { 866 case DMA_MEM_TO_DEV: 867 desc->lli.saddr = buf_addr + (period_len * period_index); 868 desc->lli.daddr = sconfig->dst_addr; 869 desc->lli.ctrla = ctrla; 870 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 871 | ATC_SRC_ADDR_MODE_INCR 872 | ATC_FC_MEM2PER 873 | ATC_SIF(atchan->mem_if) 874 | ATC_DIF(atchan->per_if); 875 break; 876 877 case DMA_DEV_TO_MEM: 878 desc->lli.saddr = sconfig->src_addr; 879 desc->lli.daddr = buf_addr + (period_len * period_index); 880 desc->lli.ctrla = ctrla; 881 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 882 | ATC_SRC_ADDR_MODE_FIXED 883 | ATC_FC_PER2MEM 884 | ATC_SIF(atchan->per_if) 885 | ATC_DIF(atchan->mem_if); 886 break; 887 888 default: 889 return -EINVAL; 890 } 891 892 return 0; 893 } 894 895 /** 896 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 897 * @chan: the DMA channel to prepare 898 * @buf_addr: physical DMA address where the buffer starts 899 * @buf_len: total number of bytes for the entire buffer 900 * @period_len: number of bytes for each period 901 * @direction: transfer direction, to or from device 902 * @flags: tx descriptor status flags 903 */ 904 static struct dma_async_tx_descriptor * 905 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 906 size_t period_len, enum dma_transfer_direction direction, 907 unsigned long flags) 908 { 909 struct at_dma_chan *atchan = to_at_dma_chan(chan); 910 struct at_dma_slave *atslave = chan->private; 911 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 912 struct at_desc *first = NULL; 913 struct at_desc *prev = NULL; 914 unsigned long was_cyclic; 915 unsigned int reg_width; 916 unsigned int periods = buf_len / period_len; 917 unsigned int i; 918 919 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 920 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 921 buf_addr, 922 periods, buf_len, period_len); 923 924 if (unlikely(!atslave || !buf_len || !period_len)) { 925 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 926 return NULL; 927 } 928 929 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 930 if (was_cyclic) { 931 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 932 return NULL; 933 } 934 935 if (unlikely(!is_slave_direction(direction))) 936 goto err_out; 937 938 if (sconfig->direction == DMA_MEM_TO_DEV) 939 reg_width = convert_buswidth(sconfig->dst_addr_width); 940 else 941 reg_width = convert_buswidth(sconfig->src_addr_width); 942 943 /* Check for too big/unaligned periods and unaligned DMA buffer */ 944 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) 945 goto err_out; 946 947 /* build cyclic linked list */ 948 for (i = 0; i < periods; i++) { 949 struct at_desc *desc; 950 951 desc = atc_desc_get(atchan); 952 if (!desc) 953 goto err_desc_get; 954 955 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 956 reg_width, period_len, direction)) 957 goto err_desc_get; 958 959 atc_desc_chain(&first, &prev, desc); 960 } 961 962 /* lets make a cyclic list */ 963 prev->lli.dscr = first->txd.phys; 964 965 /* First descriptor of the chain embedds additional information */ 966 first->txd.cookie = -EBUSY; 967 first->len = buf_len; 968 first->tx_width = reg_width; 969 970 return &first->txd; 971 972 err_desc_get: 973 dev_err(chan2dev(chan), "not enough descriptors available\n"); 974 atc_desc_put(atchan, first); 975 err_out: 976 clear_bit(ATC_IS_CYCLIC, &atchan->status); 977 return NULL; 978 } 979 980 static int atc_config(struct dma_chan *chan, 981 struct dma_slave_config *sconfig) 982 { 983 struct at_dma_chan *atchan = to_at_dma_chan(chan); 984 985 dev_vdbg(chan2dev(chan), "%s\n", __func__); 986 987 /* Check if it is chan is configured for slave transfers */ 988 if (!chan->private) 989 return -EINVAL; 990 991 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 992 993 convert_burst(&atchan->dma_sconfig.src_maxburst); 994 convert_burst(&atchan->dma_sconfig.dst_maxburst); 995 996 return 0; 997 } 998 999 static int atc_pause(struct dma_chan *chan) 1000 { 1001 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1002 struct at_dma *atdma = to_at_dma(chan->device); 1003 int chan_id = atchan->chan_common.chan_id; 1004 unsigned long flags; 1005 1006 LIST_HEAD(list); 1007 1008 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1009 1010 spin_lock_irqsave(&atchan->lock, flags); 1011 1012 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 1013 set_bit(ATC_IS_PAUSED, &atchan->status); 1014 1015 spin_unlock_irqrestore(&atchan->lock, flags); 1016 1017 return 0; 1018 } 1019 1020 static int atc_resume(struct dma_chan *chan) 1021 { 1022 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1023 struct at_dma *atdma = to_at_dma(chan->device); 1024 int chan_id = atchan->chan_common.chan_id; 1025 unsigned long flags; 1026 1027 LIST_HEAD(list); 1028 1029 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1030 1031 if (!atc_chan_is_paused(atchan)) 1032 return 0; 1033 1034 spin_lock_irqsave(&atchan->lock, flags); 1035 1036 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 1037 clear_bit(ATC_IS_PAUSED, &atchan->status); 1038 1039 spin_unlock_irqrestore(&atchan->lock, flags); 1040 1041 return 0; 1042 } 1043 1044 static int atc_terminate_all(struct dma_chan *chan) 1045 { 1046 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1047 struct at_dma *atdma = to_at_dma(chan->device); 1048 int chan_id = atchan->chan_common.chan_id; 1049 struct at_desc *desc, *_desc; 1050 unsigned long flags; 1051 1052 LIST_HEAD(list); 1053 1054 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1055 1056 /* 1057 * This is only called when something went wrong elsewhere, so 1058 * we don't really care about the data. Just disable the 1059 * channel. We still have to poll the channel enable bit due 1060 * to AHB/HSB limitations. 1061 */ 1062 spin_lock_irqsave(&atchan->lock, flags); 1063 1064 /* disabling channel: must also remove suspend state */ 1065 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 1066 1067 /* confirm that this channel is disabled */ 1068 while (dma_readl(atdma, CHSR) & atchan->mask) 1069 cpu_relax(); 1070 1071 /* active_list entries will end up before queued entries */ 1072 list_splice_init(&atchan->queue, &list); 1073 list_splice_init(&atchan->active_list, &list); 1074 1075 /* Flush all pending and queued descriptors */ 1076 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1077 atc_chain_complete(atchan, desc); 1078 1079 clear_bit(ATC_IS_PAUSED, &atchan->status); 1080 /* if channel dedicated to cyclic operations, free it */ 1081 clear_bit(ATC_IS_CYCLIC, &atchan->status); 1082 1083 spin_unlock_irqrestore(&atchan->lock, flags); 1084 1085 return 0; 1086 } 1087 1088 /** 1089 * atc_tx_status - poll for transaction completion 1090 * @chan: DMA channel 1091 * @cookie: transaction identifier to check status of 1092 * @txstate: if not %NULL updated with transaction state 1093 * 1094 * If @txstate is passed in, upon return it reflect the driver 1095 * internal state and can be used with dma_async_is_complete() to check 1096 * the status of multiple cookies without re-checking hardware state. 1097 */ 1098 static enum dma_status 1099 atc_tx_status(struct dma_chan *chan, 1100 dma_cookie_t cookie, 1101 struct dma_tx_state *txstate) 1102 { 1103 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1104 unsigned long flags; 1105 enum dma_status ret; 1106 int bytes = 0; 1107 1108 ret = dma_cookie_status(chan, cookie, txstate); 1109 if (ret == DMA_COMPLETE) 1110 return ret; 1111 /* 1112 * There's no point calculating the residue if there's 1113 * no txstate to store the value. 1114 */ 1115 if (!txstate) 1116 return DMA_ERROR; 1117 1118 spin_lock_irqsave(&atchan->lock, flags); 1119 1120 /* Get number of bytes left in the active transactions */ 1121 bytes = atc_get_bytes_left(chan); 1122 1123 spin_unlock_irqrestore(&atchan->lock, flags); 1124 1125 if (unlikely(bytes < 0)) { 1126 dev_vdbg(chan2dev(chan), "get residual bytes error\n"); 1127 return DMA_ERROR; 1128 } else { 1129 dma_set_residue(txstate, bytes); 1130 } 1131 1132 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", 1133 ret, cookie, bytes); 1134 1135 return ret; 1136 } 1137 1138 /** 1139 * atc_issue_pending - try to finish work 1140 * @chan: target DMA channel 1141 */ 1142 static void atc_issue_pending(struct dma_chan *chan) 1143 { 1144 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1145 unsigned long flags; 1146 1147 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1148 1149 /* Not needed for cyclic transfers */ 1150 if (atc_chan_is_cyclic(atchan)) 1151 return; 1152 1153 spin_lock_irqsave(&atchan->lock, flags); 1154 atc_advance_work(atchan); 1155 spin_unlock_irqrestore(&atchan->lock, flags); 1156 } 1157 1158 /** 1159 * atc_alloc_chan_resources - allocate resources for DMA channel 1160 * @chan: allocate descriptor resources for this channel 1161 * @client: current client requesting the channel be ready for requests 1162 * 1163 * return - the number of allocated descriptors 1164 */ 1165 static int atc_alloc_chan_resources(struct dma_chan *chan) 1166 { 1167 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1168 struct at_dma *atdma = to_at_dma(chan->device); 1169 struct at_desc *desc; 1170 struct at_dma_slave *atslave; 1171 unsigned long flags; 1172 int i; 1173 u32 cfg; 1174 LIST_HEAD(tmp_list); 1175 1176 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1177 1178 /* ASSERT: channel is idle */ 1179 if (atc_chan_is_enabled(atchan)) { 1180 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1181 return -EIO; 1182 } 1183 1184 cfg = ATC_DEFAULT_CFG; 1185 1186 atslave = chan->private; 1187 if (atslave) { 1188 /* 1189 * We need controller-specific data to set up slave 1190 * transfers. 1191 */ 1192 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1193 1194 /* if cfg configuration specified take it instead of default */ 1195 if (atslave->cfg) 1196 cfg = atslave->cfg; 1197 } 1198 1199 /* have we already been set up? 1200 * reconfigure channel but no need to reallocate descriptors */ 1201 if (!list_empty(&atchan->free_list)) 1202 return atchan->descs_allocated; 1203 1204 /* Allocate initial pool of descriptors */ 1205 for (i = 0; i < init_nr_desc_per_channel; i++) { 1206 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1207 if (!desc) { 1208 dev_err(atdma->dma_common.dev, 1209 "Only %d initial descriptors\n", i); 1210 break; 1211 } 1212 list_add_tail(&desc->desc_node, &tmp_list); 1213 } 1214 1215 spin_lock_irqsave(&atchan->lock, flags); 1216 atchan->descs_allocated = i; 1217 atchan->remain_desc = 0; 1218 list_splice(&tmp_list, &atchan->free_list); 1219 dma_cookie_init(chan); 1220 spin_unlock_irqrestore(&atchan->lock, flags); 1221 1222 /* channel parameters */ 1223 channel_writel(atchan, CFG, cfg); 1224 1225 dev_dbg(chan2dev(chan), 1226 "alloc_chan_resources: allocated %d descriptors\n", 1227 atchan->descs_allocated); 1228 1229 return atchan->descs_allocated; 1230 } 1231 1232 /** 1233 * atc_free_chan_resources - free all channel resources 1234 * @chan: DMA channel 1235 */ 1236 static void atc_free_chan_resources(struct dma_chan *chan) 1237 { 1238 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1239 struct at_dma *atdma = to_at_dma(chan->device); 1240 struct at_desc *desc, *_desc; 1241 LIST_HEAD(list); 1242 1243 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1244 atchan->descs_allocated); 1245 1246 /* ASSERT: channel is idle */ 1247 BUG_ON(!list_empty(&atchan->active_list)); 1248 BUG_ON(!list_empty(&atchan->queue)); 1249 BUG_ON(atc_chan_is_enabled(atchan)); 1250 1251 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1252 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1253 list_del(&desc->desc_node); 1254 /* free link descriptor */ 1255 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1256 } 1257 list_splice_init(&atchan->free_list, &list); 1258 atchan->descs_allocated = 0; 1259 atchan->status = 0; 1260 atchan->remain_desc = 0; 1261 1262 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1263 } 1264 1265 #ifdef CONFIG_OF 1266 static bool at_dma_filter(struct dma_chan *chan, void *slave) 1267 { 1268 struct at_dma_slave *atslave = slave; 1269 1270 if (atslave->dma_dev == chan->device->dev) { 1271 chan->private = atslave; 1272 return true; 1273 } else { 1274 return false; 1275 } 1276 } 1277 1278 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1279 struct of_dma *of_dma) 1280 { 1281 struct dma_chan *chan; 1282 struct at_dma_chan *atchan; 1283 struct at_dma_slave *atslave; 1284 dma_cap_mask_t mask; 1285 unsigned int per_id; 1286 struct platform_device *dmac_pdev; 1287 1288 if (dma_spec->args_count != 2) 1289 return NULL; 1290 1291 dmac_pdev = of_find_device_by_node(dma_spec->np); 1292 1293 dma_cap_zero(mask); 1294 dma_cap_set(DMA_SLAVE, mask); 1295 1296 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1297 if (!atslave) 1298 return NULL; 1299 1300 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; 1301 /* 1302 * We can fill both SRC_PER and DST_PER, one of these fields will be 1303 * ignored depending on DMA transfer direction. 1304 */ 1305 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; 1306 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) 1307 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); 1308 /* 1309 * We have to translate the value we get from the device tree since 1310 * the half FIFO configuration value had to be 0 to keep backward 1311 * compatibility. 1312 */ 1313 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { 1314 case AT91_DMA_CFG_FIFOCFG_ALAP: 1315 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; 1316 break; 1317 case AT91_DMA_CFG_FIFOCFG_ASAP: 1318 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; 1319 break; 1320 case AT91_DMA_CFG_FIFOCFG_HALF: 1321 default: 1322 atslave->cfg |= ATC_FIFOCFG_HALFFIFO; 1323 } 1324 atslave->dma_dev = &dmac_pdev->dev; 1325 1326 chan = dma_request_channel(mask, at_dma_filter, atslave); 1327 if (!chan) 1328 return NULL; 1329 1330 atchan = to_at_dma_chan(chan); 1331 atchan->per_if = dma_spec->args[0] & 0xff; 1332 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; 1333 1334 return chan; 1335 } 1336 #else 1337 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1338 struct of_dma *of_dma) 1339 { 1340 return NULL; 1341 } 1342 #endif 1343 1344 /*-- Module Management -----------------------------------------------*/ 1345 1346 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1347 static struct at_dma_platform_data at91sam9rl_config = { 1348 .nr_channels = 2, 1349 }; 1350 static struct at_dma_platform_data at91sam9g45_config = { 1351 .nr_channels = 8, 1352 }; 1353 1354 #if defined(CONFIG_OF) 1355 static const struct of_device_id atmel_dma_dt_ids[] = { 1356 { 1357 .compatible = "atmel,at91sam9rl-dma", 1358 .data = &at91sam9rl_config, 1359 }, { 1360 .compatible = "atmel,at91sam9g45-dma", 1361 .data = &at91sam9g45_config, 1362 }, { 1363 /* sentinel */ 1364 } 1365 }; 1366 1367 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1368 #endif 1369 1370 static const struct platform_device_id atdma_devtypes[] = { 1371 { 1372 .name = "at91sam9rl_dma", 1373 .driver_data = (unsigned long) &at91sam9rl_config, 1374 }, { 1375 .name = "at91sam9g45_dma", 1376 .driver_data = (unsigned long) &at91sam9g45_config, 1377 }, { 1378 /* sentinel */ 1379 } 1380 }; 1381 1382 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( 1383 struct platform_device *pdev) 1384 { 1385 if (pdev->dev.of_node) { 1386 const struct of_device_id *match; 1387 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1388 if (match == NULL) 1389 return NULL; 1390 return match->data; 1391 } 1392 return (struct at_dma_platform_data *) 1393 platform_get_device_id(pdev)->driver_data; 1394 } 1395 1396 /** 1397 * at_dma_off - disable DMA controller 1398 * @atdma: the Atmel HDAMC device 1399 */ 1400 static void at_dma_off(struct at_dma *atdma) 1401 { 1402 dma_writel(atdma, EN, 0); 1403 1404 /* disable all interrupts */ 1405 dma_writel(atdma, EBCIDR, -1L); 1406 1407 /* confirm that all channels are disabled */ 1408 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1409 cpu_relax(); 1410 } 1411 1412 static int __init at_dma_probe(struct platform_device *pdev) 1413 { 1414 struct resource *io; 1415 struct at_dma *atdma; 1416 size_t size; 1417 int irq; 1418 int err; 1419 int i; 1420 const struct at_dma_platform_data *plat_dat; 1421 1422 /* setup platform data for each SoC */ 1423 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1424 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1425 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1426 1427 /* get DMA parameters from controller type */ 1428 plat_dat = at_dma_get_driver_data(pdev); 1429 if (!plat_dat) 1430 return -ENODEV; 1431 1432 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1433 if (!io) 1434 return -EINVAL; 1435 1436 irq = platform_get_irq(pdev, 0); 1437 if (irq < 0) 1438 return irq; 1439 1440 size = sizeof(struct at_dma); 1441 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1442 atdma = kzalloc(size, GFP_KERNEL); 1443 if (!atdma) 1444 return -ENOMEM; 1445 1446 /* discover transaction capabilities */ 1447 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1448 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1449 1450 size = resource_size(io); 1451 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1452 err = -EBUSY; 1453 goto err_kfree; 1454 } 1455 1456 atdma->regs = ioremap(io->start, size); 1457 if (!atdma->regs) { 1458 err = -ENOMEM; 1459 goto err_release_r; 1460 } 1461 1462 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1463 if (IS_ERR(atdma->clk)) { 1464 err = PTR_ERR(atdma->clk); 1465 goto err_clk; 1466 } 1467 err = clk_prepare_enable(atdma->clk); 1468 if (err) 1469 goto err_clk_prepare; 1470 1471 /* force dma off, just in case */ 1472 at_dma_off(atdma); 1473 1474 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1475 if (err) 1476 goto err_irq; 1477 1478 platform_set_drvdata(pdev, atdma); 1479 1480 /* create a pool of consistent memory blocks for hardware descriptors */ 1481 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1482 &pdev->dev, sizeof(struct at_desc), 1483 4 /* word alignment */, 0); 1484 if (!atdma->dma_desc_pool) { 1485 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1486 err = -ENOMEM; 1487 goto err_pool_create; 1488 } 1489 1490 /* clear any pending interrupt */ 1491 while (dma_readl(atdma, EBCISR)) 1492 cpu_relax(); 1493 1494 /* initialize channels related values */ 1495 INIT_LIST_HEAD(&atdma->dma_common.channels); 1496 for (i = 0; i < plat_dat->nr_channels; i++) { 1497 struct at_dma_chan *atchan = &atdma->chan[i]; 1498 1499 atchan->mem_if = AT_DMA_MEM_IF; 1500 atchan->per_if = AT_DMA_PER_IF; 1501 atchan->chan_common.device = &atdma->dma_common; 1502 dma_cookie_init(&atchan->chan_common); 1503 list_add_tail(&atchan->chan_common.device_node, 1504 &atdma->dma_common.channels); 1505 1506 atchan->ch_regs = atdma->regs + ch_regs(i); 1507 spin_lock_init(&atchan->lock); 1508 atchan->mask = 1 << i; 1509 1510 INIT_LIST_HEAD(&atchan->active_list); 1511 INIT_LIST_HEAD(&atchan->queue); 1512 INIT_LIST_HEAD(&atchan->free_list); 1513 1514 tasklet_init(&atchan->tasklet, atc_tasklet, 1515 (unsigned long)atchan); 1516 atc_enable_chan_irq(atdma, i); 1517 } 1518 1519 /* set base routines */ 1520 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1521 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1522 atdma->dma_common.device_tx_status = atc_tx_status; 1523 atdma->dma_common.device_issue_pending = atc_issue_pending; 1524 atdma->dma_common.dev = &pdev->dev; 1525 1526 /* set prep routines based on capability */ 1527 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1528 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1529 1530 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1531 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1532 /* controller can do slave DMA: can trigger cyclic transfers */ 1533 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1534 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1535 atdma->dma_common.device_config = atc_config; 1536 atdma->dma_common.device_pause = atc_pause; 1537 atdma->dma_common.device_resume = atc_resume; 1538 atdma->dma_common.device_terminate_all = atc_terminate_all; 1539 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS; 1540 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS; 1541 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1542 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1543 } 1544 1545 dma_writel(atdma, EN, AT_DMA_ENABLE); 1546 1547 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1548 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1549 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1550 plat_dat->nr_channels); 1551 1552 dma_async_device_register(&atdma->dma_common); 1553 1554 /* 1555 * Do not return an error if the dmac node is not present in order to 1556 * not break the existing way of requesting channel with 1557 * dma_request_channel(). 1558 */ 1559 if (pdev->dev.of_node) { 1560 err = of_dma_controller_register(pdev->dev.of_node, 1561 at_dma_xlate, atdma); 1562 if (err) { 1563 dev_err(&pdev->dev, "could not register of_dma_controller\n"); 1564 goto err_of_dma_controller_register; 1565 } 1566 } 1567 1568 return 0; 1569 1570 err_of_dma_controller_register: 1571 dma_async_device_unregister(&atdma->dma_common); 1572 dma_pool_destroy(atdma->dma_desc_pool); 1573 err_pool_create: 1574 free_irq(platform_get_irq(pdev, 0), atdma); 1575 err_irq: 1576 clk_disable_unprepare(atdma->clk); 1577 err_clk_prepare: 1578 clk_put(atdma->clk); 1579 err_clk: 1580 iounmap(atdma->regs); 1581 atdma->regs = NULL; 1582 err_release_r: 1583 release_mem_region(io->start, size); 1584 err_kfree: 1585 kfree(atdma); 1586 return err; 1587 } 1588 1589 static int at_dma_remove(struct platform_device *pdev) 1590 { 1591 struct at_dma *atdma = platform_get_drvdata(pdev); 1592 struct dma_chan *chan, *_chan; 1593 struct resource *io; 1594 1595 at_dma_off(atdma); 1596 dma_async_device_unregister(&atdma->dma_common); 1597 1598 dma_pool_destroy(atdma->dma_desc_pool); 1599 free_irq(platform_get_irq(pdev, 0), atdma); 1600 1601 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1602 device_node) { 1603 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1604 1605 /* Disable interrupts */ 1606 atc_disable_chan_irq(atdma, chan->chan_id); 1607 1608 tasklet_kill(&atchan->tasklet); 1609 list_del(&chan->device_node); 1610 } 1611 1612 clk_disable_unprepare(atdma->clk); 1613 clk_put(atdma->clk); 1614 1615 iounmap(atdma->regs); 1616 atdma->regs = NULL; 1617 1618 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1619 release_mem_region(io->start, resource_size(io)); 1620 1621 kfree(atdma); 1622 1623 return 0; 1624 } 1625 1626 static void at_dma_shutdown(struct platform_device *pdev) 1627 { 1628 struct at_dma *atdma = platform_get_drvdata(pdev); 1629 1630 at_dma_off(platform_get_drvdata(pdev)); 1631 clk_disable_unprepare(atdma->clk); 1632 } 1633 1634 static int at_dma_prepare(struct device *dev) 1635 { 1636 struct platform_device *pdev = to_platform_device(dev); 1637 struct at_dma *atdma = platform_get_drvdata(pdev); 1638 struct dma_chan *chan, *_chan; 1639 1640 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1641 device_node) { 1642 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1643 /* wait for transaction completion (except in cyclic case) */ 1644 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1645 return -EAGAIN; 1646 } 1647 return 0; 1648 } 1649 1650 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1651 { 1652 struct dma_chan *chan = &atchan->chan_common; 1653 1654 /* Channel should be paused by user 1655 * do it anyway even if it is not done already */ 1656 if (!atc_chan_is_paused(atchan)) { 1657 dev_warn(chan2dev(chan), 1658 "cyclic channel not paused, should be done by channel user\n"); 1659 atc_pause(chan); 1660 } 1661 1662 /* now preserve additional data for cyclic operations */ 1663 /* next descriptor address in the cyclic list */ 1664 atchan->save_dscr = channel_readl(atchan, DSCR); 1665 1666 vdbg_dump_regs(atchan); 1667 } 1668 1669 static int at_dma_suspend_noirq(struct device *dev) 1670 { 1671 struct platform_device *pdev = to_platform_device(dev); 1672 struct at_dma *atdma = platform_get_drvdata(pdev); 1673 struct dma_chan *chan, *_chan; 1674 1675 /* preserve data */ 1676 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1677 device_node) { 1678 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1679 1680 if (atc_chan_is_cyclic(atchan)) 1681 atc_suspend_cyclic(atchan); 1682 atchan->save_cfg = channel_readl(atchan, CFG); 1683 } 1684 atdma->save_imr = dma_readl(atdma, EBCIMR); 1685 1686 /* disable DMA controller */ 1687 at_dma_off(atdma); 1688 clk_disable_unprepare(atdma->clk); 1689 return 0; 1690 } 1691 1692 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1693 { 1694 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1695 1696 /* restore channel status for cyclic descriptors list: 1697 * next descriptor in the cyclic list at the time of suspend */ 1698 channel_writel(atchan, SADDR, 0); 1699 channel_writel(atchan, DADDR, 0); 1700 channel_writel(atchan, CTRLA, 0); 1701 channel_writel(atchan, CTRLB, 0); 1702 channel_writel(atchan, DSCR, atchan->save_dscr); 1703 dma_writel(atdma, CHER, atchan->mask); 1704 1705 /* channel pause status should be removed by channel user 1706 * We cannot take the initiative to do it here */ 1707 1708 vdbg_dump_regs(atchan); 1709 } 1710 1711 static int at_dma_resume_noirq(struct device *dev) 1712 { 1713 struct platform_device *pdev = to_platform_device(dev); 1714 struct at_dma *atdma = platform_get_drvdata(pdev); 1715 struct dma_chan *chan, *_chan; 1716 1717 /* bring back DMA controller */ 1718 clk_prepare_enable(atdma->clk); 1719 dma_writel(atdma, EN, AT_DMA_ENABLE); 1720 1721 /* clear any pending interrupt */ 1722 while (dma_readl(atdma, EBCISR)) 1723 cpu_relax(); 1724 1725 /* restore saved data */ 1726 dma_writel(atdma, EBCIER, atdma->save_imr); 1727 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1728 device_node) { 1729 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1730 1731 channel_writel(atchan, CFG, atchan->save_cfg); 1732 if (atc_chan_is_cyclic(atchan)) 1733 atc_resume_cyclic(atchan); 1734 } 1735 return 0; 1736 } 1737 1738 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1739 .prepare = at_dma_prepare, 1740 .suspend_noirq = at_dma_suspend_noirq, 1741 .resume_noirq = at_dma_resume_noirq, 1742 }; 1743 1744 static struct platform_driver at_dma_driver = { 1745 .remove = at_dma_remove, 1746 .shutdown = at_dma_shutdown, 1747 .id_table = atdma_devtypes, 1748 .driver = { 1749 .name = "at_hdmac", 1750 .pm = &at_dma_dev_pm_ops, 1751 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1752 }, 1753 }; 1754 1755 static int __init at_dma_init(void) 1756 { 1757 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1758 } 1759 subsys_initcall(at_dma_init); 1760 1761 static void __exit at_dma_exit(void) 1762 { 1763 platform_driver_unregister(&at_dma_driver); 1764 } 1765 module_exit(at_dma_exit); 1766 1767 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1768 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1769 MODULE_LICENSE("GPL"); 1770 MODULE_ALIAS("platform:at_hdmac"); 1771