1 /* 2 * Core driver for the Synopsys DesignWare DMA Controller 3 * 4 * Copyright (C) 2007-2008 Atmel Corporation 5 * Copyright (C) 2010-2011 ST Microelectronics 6 * Copyright (C) 2013 Intel Corporation 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/bitops.h> 14 #include <linux/delay.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/dmapool.h> 18 #include <linux/err.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/pm_runtime.h> 26 27 #include "../dmaengine.h" 28 #include "internal.h" 29 30 /* 31 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all 33 * of which use ARM any more). See the "Databook" from Synopsys for 34 * information beyond what licensees probably provide. 35 * 36 * The driver has been tested with the Atmel AT32AP7000, which does not 37 * support descriptor writeback. 38 */ 39 40 #define DWC_DEFAULT_CTLLO(_chan) ({ \ 41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ 42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ 43 bool _is_slave = is_slave_direction(_dwc->direction); \ 44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ 45 DW_DMA_MSIZE_16; \ 46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ 47 DW_DMA_MSIZE_16; \ 48 \ 49 (DWC_CTLL_DST_MSIZE(_dmsize) \ 50 | DWC_CTLL_SRC_MSIZE(_smsize) \ 51 | DWC_CTLL_LLP_D_EN \ 52 | DWC_CTLL_LLP_S_EN \ 53 | DWC_CTLL_DMS(_dwc->dst_master) \ 54 | DWC_CTLL_SMS(_dwc->src_master)); \ 55 }) 56 57 /* 58 * Number of descriptors to allocate for each channel. This should be 59 * made configurable somehow; preferably, the clients (at least the 60 * ones using slave transfers) should be able to give us a hint. 61 */ 62 #define NR_DESCS_PER_CHANNEL 64 63 64 /* The set of bus widths supported by the DMA controller */ 65 #define DW_DMA_BUSWIDTHS \ 66 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 67 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 68 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 69 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 70 71 /*----------------------------------------------------------------------*/ 72 73 static struct device *chan2dev(struct dma_chan *chan) 74 { 75 return &chan->dev->device; 76 } 77 78 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 79 { 80 return to_dw_desc(dwc->active_list.next); 81 } 82 83 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 84 { 85 struct dw_desc *desc, *_desc; 86 struct dw_desc *ret = NULL; 87 unsigned int i = 0; 88 unsigned long flags; 89 90 spin_lock_irqsave(&dwc->lock, flags); 91 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 92 i++; 93 if (async_tx_test_ack(&desc->txd)) { 94 list_del(&desc->desc_node); 95 ret = desc; 96 break; 97 } 98 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 99 } 100 spin_unlock_irqrestore(&dwc->lock, flags); 101 102 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); 103 104 return ret; 105 } 106 107 /* 108 * Move a descriptor, including any children, to the free list. 109 * `desc' must not be on any lists. 110 */ 111 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 112 { 113 unsigned long flags; 114 115 if (desc) { 116 struct dw_desc *child; 117 118 spin_lock_irqsave(&dwc->lock, flags); 119 list_for_each_entry(child, &desc->tx_list, desc_node) 120 dev_vdbg(chan2dev(&dwc->chan), 121 "moving child desc %p to freelist\n", 122 child); 123 list_splice_init(&desc->tx_list, &dwc->free_list); 124 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 125 list_add(&desc->desc_node, &dwc->free_list); 126 spin_unlock_irqrestore(&dwc->lock, flags); 127 } 128 } 129 130 static void dwc_initialize(struct dw_dma_chan *dwc) 131 { 132 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 133 struct dw_dma_slave *dws = dwc->chan.private; 134 u32 cfghi = DWC_CFGH_FIFO_MODE; 135 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); 136 137 if (dwc->initialized == true) 138 return; 139 140 if (dws) { 141 /* 142 * We need controller-specific data to set up slave 143 * transfers. 144 */ 145 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 146 147 cfghi |= DWC_CFGH_DST_PER(dws->dst_id); 148 cfghi |= DWC_CFGH_SRC_PER(dws->src_id); 149 } else { 150 cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); 151 cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); 152 } 153 154 channel_writel(dwc, CFG_LO, cfglo); 155 channel_writel(dwc, CFG_HI, cfghi); 156 157 /* Enable interrupts */ 158 channel_set_bit(dw, MASK.XFER, dwc->mask); 159 channel_set_bit(dw, MASK.BLOCK, dwc->mask); 160 channel_set_bit(dw, MASK.ERROR, dwc->mask); 161 162 dwc->initialized = true; 163 } 164 165 /*----------------------------------------------------------------------*/ 166 167 static inline unsigned int dwc_fast_ffs(unsigned long long v) 168 { 169 /* 170 * We can be a lot more clever here, but this should take care 171 * of the most common optimization. 172 */ 173 if (!(v & 7)) 174 return 3; 175 else if (!(v & 3)) 176 return 2; 177 else if (!(v & 1)) 178 return 1; 179 return 0; 180 } 181 182 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 183 { 184 dev_err(chan2dev(&dwc->chan), 185 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 186 channel_readl(dwc, SAR), 187 channel_readl(dwc, DAR), 188 channel_readl(dwc, LLP), 189 channel_readl(dwc, CTL_HI), 190 channel_readl(dwc, CTL_LO)); 191 } 192 193 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) 194 { 195 channel_clear_bit(dw, CH_EN, dwc->mask); 196 while (dma_readl(dw, CH_EN) & dwc->mask) 197 cpu_relax(); 198 } 199 200 /*----------------------------------------------------------------------*/ 201 202 /* Perform single block transfer */ 203 static inline void dwc_do_single_block(struct dw_dma_chan *dwc, 204 struct dw_desc *desc) 205 { 206 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 207 u32 ctllo; 208 209 /* 210 * Software emulation of LLP mode relies on interrupts to continue 211 * multi block transfer. 212 */ 213 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; 214 215 channel_writel(dwc, SAR, desc->lli.sar); 216 channel_writel(dwc, DAR, desc->lli.dar); 217 channel_writel(dwc, CTL_LO, ctllo); 218 channel_writel(dwc, CTL_HI, desc->lli.ctlhi); 219 channel_set_bit(dw, CH_EN, dwc->mask); 220 221 /* Move pointer to next descriptor */ 222 dwc->tx_node_active = dwc->tx_node_active->next; 223 } 224 225 /* Called with dwc->lock held and bh disabled */ 226 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 227 { 228 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 229 unsigned long was_soft_llp; 230 231 /* ASSERT: channel is idle */ 232 if (dma_readl(dw, CH_EN) & dwc->mask) { 233 dev_err(chan2dev(&dwc->chan), 234 "%s: BUG: Attempted to start non-idle channel\n", 235 __func__); 236 dwc_dump_chan_regs(dwc); 237 238 /* The tasklet will hopefully advance the queue... */ 239 return; 240 } 241 242 if (dwc->nollp) { 243 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, 244 &dwc->flags); 245 if (was_soft_llp) { 246 dev_err(chan2dev(&dwc->chan), 247 "BUG: Attempted to start new LLP transfer inside ongoing one\n"); 248 return; 249 } 250 251 dwc_initialize(dwc); 252 253 dwc->residue = first->total_len; 254 dwc->tx_node_active = &first->tx_list; 255 256 /* Submit first block */ 257 dwc_do_single_block(dwc, first); 258 259 return; 260 } 261 262 dwc_initialize(dwc); 263 264 channel_writel(dwc, LLP, first->txd.phys); 265 channel_writel(dwc, CTL_LO, 266 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 267 channel_writel(dwc, CTL_HI, 0); 268 channel_set_bit(dw, CH_EN, dwc->mask); 269 } 270 271 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) 272 { 273 struct dw_desc *desc; 274 275 if (list_empty(&dwc->queue)) 276 return; 277 278 list_move(dwc->queue.next, &dwc->active_list); 279 desc = dwc_first_active(dwc); 280 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); 281 dwc_dostart(dwc, desc); 282 } 283 284 /*----------------------------------------------------------------------*/ 285 286 static void 287 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, 288 bool callback_required) 289 { 290 dma_async_tx_callback callback = NULL; 291 void *param = NULL; 292 struct dma_async_tx_descriptor *txd = &desc->txd; 293 struct dw_desc *child; 294 unsigned long flags; 295 296 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 297 298 spin_lock_irqsave(&dwc->lock, flags); 299 dma_cookie_complete(txd); 300 if (callback_required) { 301 callback = txd->callback; 302 param = txd->callback_param; 303 } 304 305 /* async_tx_ack */ 306 list_for_each_entry(child, &desc->tx_list, desc_node) 307 async_tx_ack(&child->txd); 308 async_tx_ack(&desc->txd); 309 310 list_splice_init(&desc->tx_list, &dwc->free_list); 311 list_move(&desc->desc_node, &dwc->free_list); 312 313 dma_descriptor_unmap(txd); 314 spin_unlock_irqrestore(&dwc->lock, flags); 315 316 if (callback) 317 callback(param); 318 } 319 320 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) 321 { 322 struct dw_desc *desc, *_desc; 323 LIST_HEAD(list); 324 unsigned long flags; 325 326 spin_lock_irqsave(&dwc->lock, flags); 327 if (dma_readl(dw, CH_EN) & dwc->mask) { 328 dev_err(chan2dev(&dwc->chan), 329 "BUG: XFER bit set, but channel not idle!\n"); 330 331 /* Try to continue after resetting the channel... */ 332 dwc_chan_disable(dw, dwc); 333 } 334 335 /* 336 * Submit queued descriptors ASAP, i.e. before we go through 337 * the completed ones. 338 */ 339 list_splice_init(&dwc->active_list, &list); 340 dwc_dostart_first_queued(dwc); 341 342 spin_unlock_irqrestore(&dwc->lock, flags); 343 344 list_for_each_entry_safe(desc, _desc, &list, desc_node) 345 dwc_descriptor_complete(dwc, desc, true); 346 } 347 348 /* Returns how many bytes were already received from source */ 349 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) 350 { 351 u32 ctlhi = channel_readl(dwc, CTL_HI); 352 u32 ctllo = channel_readl(dwc, CTL_LO); 353 354 return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); 355 } 356 357 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 358 { 359 dma_addr_t llp; 360 struct dw_desc *desc, *_desc; 361 struct dw_desc *child; 362 u32 status_xfer; 363 unsigned long flags; 364 365 spin_lock_irqsave(&dwc->lock, flags); 366 llp = channel_readl(dwc, LLP); 367 status_xfer = dma_readl(dw, RAW.XFER); 368 369 if (status_xfer & dwc->mask) { 370 /* Everything we've submitted is done */ 371 dma_writel(dw, CLEAR.XFER, dwc->mask); 372 373 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 374 struct list_head *head, *active = dwc->tx_node_active; 375 376 /* 377 * We are inside first active descriptor. 378 * Otherwise something is really wrong. 379 */ 380 desc = dwc_first_active(dwc); 381 382 head = &desc->tx_list; 383 if (active != head) { 384 /* Update desc to reflect last sent one */ 385 if (active != head->next) 386 desc = to_dw_desc(active->prev); 387 388 dwc->residue -= desc->len; 389 390 child = to_dw_desc(active); 391 392 /* Submit next block */ 393 dwc_do_single_block(dwc, child); 394 395 spin_unlock_irqrestore(&dwc->lock, flags); 396 return; 397 } 398 399 /* We are done here */ 400 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 401 } 402 403 dwc->residue = 0; 404 405 spin_unlock_irqrestore(&dwc->lock, flags); 406 407 dwc_complete_all(dw, dwc); 408 return; 409 } 410 411 if (list_empty(&dwc->active_list)) { 412 dwc->residue = 0; 413 spin_unlock_irqrestore(&dwc->lock, flags); 414 return; 415 } 416 417 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 418 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); 419 spin_unlock_irqrestore(&dwc->lock, flags); 420 return; 421 } 422 423 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); 424 425 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 426 /* Initial residue value */ 427 dwc->residue = desc->total_len; 428 429 /* Check first descriptors addr */ 430 if (desc->txd.phys == llp) { 431 spin_unlock_irqrestore(&dwc->lock, flags); 432 return; 433 } 434 435 /* Check first descriptors llp */ 436 if (desc->lli.llp == llp) { 437 /* This one is currently in progress */ 438 dwc->residue -= dwc_get_sent(dwc); 439 spin_unlock_irqrestore(&dwc->lock, flags); 440 return; 441 } 442 443 dwc->residue -= desc->len; 444 list_for_each_entry(child, &desc->tx_list, desc_node) { 445 if (child->lli.llp == llp) { 446 /* Currently in progress */ 447 dwc->residue -= dwc_get_sent(dwc); 448 spin_unlock_irqrestore(&dwc->lock, flags); 449 return; 450 } 451 dwc->residue -= child->len; 452 } 453 454 /* 455 * No descriptors so far seem to be in progress, i.e. 456 * this one must be done. 457 */ 458 spin_unlock_irqrestore(&dwc->lock, flags); 459 dwc_descriptor_complete(dwc, desc, true); 460 spin_lock_irqsave(&dwc->lock, flags); 461 } 462 463 dev_err(chan2dev(&dwc->chan), 464 "BUG: All descriptors done, but channel not idle!\n"); 465 466 /* Try to continue after resetting the channel... */ 467 dwc_chan_disable(dw, dwc); 468 469 dwc_dostart_first_queued(dwc); 470 spin_unlock_irqrestore(&dwc->lock, flags); 471 } 472 473 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 474 { 475 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 476 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); 477 } 478 479 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) 480 { 481 struct dw_desc *bad_desc; 482 struct dw_desc *child; 483 unsigned long flags; 484 485 dwc_scan_descriptors(dw, dwc); 486 487 spin_lock_irqsave(&dwc->lock, flags); 488 489 /* 490 * The descriptor currently at the head of the active list is 491 * borked. Since we don't have any way to report errors, we'll 492 * just have to scream loudly and try to carry on. 493 */ 494 bad_desc = dwc_first_active(dwc); 495 list_del_init(&bad_desc->desc_node); 496 list_move(dwc->queue.next, dwc->active_list.prev); 497 498 /* Clear the error flag and try to restart the controller */ 499 dma_writel(dw, CLEAR.ERROR, dwc->mask); 500 if (!list_empty(&dwc->active_list)) 501 dwc_dostart(dwc, dwc_first_active(dwc)); 502 503 /* 504 * WARN may seem harsh, but since this only happens 505 * when someone submits a bad physical address in a 506 * descriptor, we should consider ourselves lucky that the 507 * controller flagged an error instead of scribbling over 508 * random memory locations. 509 */ 510 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" 511 " cookie: %d\n", bad_desc->txd.cookie); 512 dwc_dump_lli(dwc, &bad_desc->lli); 513 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 514 dwc_dump_lli(dwc, &child->lli); 515 516 spin_unlock_irqrestore(&dwc->lock, flags); 517 518 /* Pretend the descriptor completed successfully */ 519 dwc_descriptor_complete(dwc, bad_desc, true); 520 } 521 522 /* --------------------- Cyclic DMA API extensions -------------------- */ 523 524 dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) 525 { 526 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 527 return channel_readl(dwc, SAR); 528 } 529 EXPORT_SYMBOL(dw_dma_get_src_addr); 530 531 dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) 532 { 533 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 534 return channel_readl(dwc, DAR); 535 } 536 EXPORT_SYMBOL(dw_dma_get_dst_addr); 537 538 /* Called with dwc->lock held and all DMAC interrupts disabled */ 539 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 540 u32 status_block, u32 status_err, u32 status_xfer) 541 { 542 unsigned long flags; 543 544 if (status_block & dwc->mask) { 545 void (*callback)(void *param); 546 void *callback_param; 547 548 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 549 channel_readl(dwc, LLP)); 550 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 551 552 callback = dwc->cdesc->period_callback; 553 callback_param = dwc->cdesc->period_callback_param; 554 555 if (callback) 556 callback(callback_param); 557 } 558 559 /* 560 * Error and transfer complete are highly unlikely, and will most 561 * likely be due to a configuration error by the user. 562 */ 563 if (unlikely(status_err & dwc->mask) || 564 unlikely(status_xfer & dwc->mask)) { 565 int i; 566 567 dev_err(chan2dev(&dwc->chan), 568 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", 569 status_xfer ? "xfer" : "error"); 570 571 spin_lock_irqsave(&dwc->lock, flags); 572 573 dwc_dump_chan_regs(dwc); 574 575 dwc_chan_disable(dw, dwc); 576 577 /* Make sure DMA does not restart by loading a new list */ 578 channel_writel(dwc, LLP, 0); 579 channel_writel(dwc, CTL_LO, 0); 580 channel_writel(dwc, CTL_HI, 0); 581 582 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 583 dma_writel(dw, CLEAR.ERROR, dwc->mask); 584 dma_writel(dw, CLEAR.XFER, dwc->mask); 585 586 for (i = 0; i < dwc->cdesc->periods; i++) 587 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 588 589 spin_unlock_irqrestore(&dwc->lock, flags); 590 } 591 } 592 593 /* ------------------------------------------------------------------------- */ 594 595 static void dw_dma_tasklet(unsigned long data) 596 { 597 struct dw_dma *dw = (struct dw_dma *)data; 598 struct dw_dma_chan *dwc; 599 u32 status_block; 600 u32 status_xfer; 601 u32 status_err; 602 int i; 603 604 status_block = dma_readl(dw, RAW.BLOCK); 605 status_xfer = dma_readl(dw, RAW.XFER); 606 status_err = dma_readl(dw, RAW.ERROR); 607 608 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); 609 610 for (i = 0; i < dw->dma.chancnt; i++) { 611 dwc = &dw->chan[i]; 612 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 613 dwc_handle_cyclic(dw, dwc, status_block, status_err, 614 status_xfer); 615 else if (status_err & (1 << i)) 616 dwc_handle_error(dw, dwc); 617 else if (status_xfer & (1 << i)) 618 dwc_scan_descriptors(dw, dwc); 619 } 620 621 /* 622 * Re-enable interrupts. 623 */ 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); 626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 627 } 628 629 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) 630 { 631 struct dw_dma *dw = dev_id; 632 u32 status; 633 634 /* Check if we have any interrupt from the DMAC which is not in use */ 635 if (!dw->in_use) 636 return IRQ_NONE; 637 638 status = dma_readl(dw, STATUS_INT); 639 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); 640 641 /* Check if we have any interrupt from the DMAC */ 642 if (!status) 643 return IRQ_NONE; 644 645 /* 646 * Just disable the interrupts. We'll turn them back on in the 647 * softirq handler. 648 */ 649 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 650 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 651 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 652 653 status = dma_readl(dw, STATUS_INT); 654 if (status) { 655 dev_err(dw->dma.dev, 656 "BUG: Unexpected interrupts pending: 0x%x\n", 657 status); 658 659 /* Try to recover */ 660 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 661 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); 662 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 663 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 664 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 665 } 666 667 tasklet_schedule(&dw->tasklet); 668 669 return IRQ_HANDLED; 670 } 671 672 /*----------------------------------------------------------------------*/ 673 674 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) 675 { 676 struct dw_desc *desc = txd_to_dw_desc(tx); 677 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 678 dma_cookie_t cookie; 679 unsigned long flags; 680 681 spin_lock_irqsave(&dwc->lock, flags); 682 cookie = dma_cookie_assign(tx); 683 684 /* 685 * REVISIT: We should attempt to chain as many descriptors as 686 * possible, perhaps even appending to those already submitted 687 * for DMA. But this is hard to do in a race-free manner. 688 */ 689 690 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); 691 list_add_tail(&desc->desc_node, &dwc->queue); 692 693 spin_unlock_irqrestore(&dwc->lock, flags); 694 695 return cookie; 696 } 697 698 static struct dma_async_tx_descriptor * 699 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 700 size_t len, unsigned long flags) 701 { 702 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 703 struct dw_dma *dw = to_dw_dma(chan->device); 704 struct dw_desc *desc; 705 struct dw_desc *first; 706 struct dw_desc *prev; 707 size_t xfer_count; 708 size_t offset; 709 unsigned int src_width; 710 unsigned int dst_width; 711 unsigned int data_width; 712 u32 ctllo; 713 714 dev_vdbg(chan2dev(chan), 715 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, 716 &dest, &src, len, flags); 717 718 if (unlikely(!len)) { 719 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 720 return NULL; 721 } 722 723 dwc->direction = DMA_MEM_TO_MEM; 724 725 data_width = min_t(unsigned int, dw->data_width[dwc->src_master], 726 dw->data_width[dwc->dst_master]); 727 728 src_width = dst_width = min_t(unsigned int, data_width, 729 dwc_fast_ffs(src | dest | len)); 730 731 ctllo = DWC_DEFAULT_CTLLO(chan) 732 | DWC_CTLL_DST_WIDTH(dst_width) 733 | DWC_CTLL_SRC_WIDTH(src_width) 734 | DWC_CTLL_DST_INC 735 | DWC_CTLL_SRC_INC 736 | DWC_CTLL_FC_M2M; 737 prev = first = NULL; 738 739 for (offset = 0; offset < len; offset += xfer_count << src_width) { 740 xfer_count = min_t(size_t, (len - offset) >> src_width, 741 dwc->block_size); 742 743 desc = dwc_desc_get(dwc); 744 if (!desc) 745 goto err_desc_get; 746 747 desc->lli.sar = src + offset; 748 desc->lli.dar = dest + offset; 749 desc->lli.ctllo = ctllo; 750 desc->lli.ctlhi = xfer_count; 751 desc->len = xfer_count << src_width; 752 753 if (!first) { 754 first = desc; 755 } else { 756 prev->lli.llp = desc->txd.phys; 757 list_add_tail(&desc->desc_node, 758 &first->tx_list); 759 } 760 prev = desc; 761 } 762 763 if (flags & DMA_PREP_INTERRUPT) 764 /* Trigger interrupt after last block */ 765 prev->lli.ctllo |= DWC_CTLL_INT_EN; 766 767 prev->lli.llp = 0; 768 first->txd.flags = flags; 769 first->total_len = len; 770 771 return &first->txd; 772 773 err_desc_get: 774 dwc_desc_put(dwc, first); 775 return NULL; 776 } 777 778 static struct dma_async_tx_descriptor * 779 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 780 unsigned int sg_len, enum dma_transfer_direction direction, 781 unsigned long flags, void *context) 782 { 783 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 784 struct dw_dma *dw = to_dw_dma(chan->device); 785 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 786 struct dw_desc *prev; 787 struct dw_desc *first; 788 u32 ctllo; 789 dma_addr_t reg; 790 unsigned int reg_width; 791 unsigned int mem_width; 792 unsigned int data_width; 793 unsigned int i; 794 struct scatterlist *sg; 795 size_t total_len = 0; 796 797 dev_vdbg(chan2dev(chan), "%s\n", __func__); 798 799 if (unlikely(!is_slave_direction(direction) || !sg_len)) 800 return NULL; 801 802 dwc->direction = direction; 803 804 prev = first = NULL; 805 806 switch (direction) { 807 case DMA_MEM_TO_DEV: 808 reg_width = __ffs(sconfig->dst_addr_width); 809 reg = sconfig->dst_addr; 810 ctllo = (DWC_DEFAULT_CTLLO(chan) 811 | DWC_CTLL_DST_WIDTH(reg_width) 812 | DWC_CTLL_DST_FIX 813 | DWC_CTLL_SRC_INC); 814 815 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 816 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 817 818 data_width = dw->data_width[dwc->src_master]; 819 820 for_each_sg(sgl, sg, sg_len, i) { 821 struct dw_desc *desc; 822 u32 len, dlen, mem; 823 824 mem = sg_dma_address(sg); 825 len = sg_dma_len(sg); 826 827 mem_width = min_t(unsigned int, 828 data_width, dwc_fast_ffs(mem | len)); 829 830 slave_sg_todev_fill_desc: 831 desc = dwc_desc_get(dwc); 832 if (!desc) 833 goto err_desc_get; 834 835 desc->lli.sar = mem; 836 desc->lli.dar = reg; 837 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 838 if ((len >> mem_width) > dwc->block_size) { 839 dlen = dwc->block_size << mem_width; 840 mem += dlen; 841 len -= dlen; 842 } else { 843 dlen = len; 844 len = 0; 845 } 846 847 desc->lli.ctlhi = dlen >> mem_width; 848 desc->len = dlen; 849 850 if (!first) { 851 first = desc; 852 } else { 853 prev->lli.llp = desc->txd.phys; 854 list_add_tail(&desc->desc_node, 855 &first->tx_list); 856 } 857 prev = desc; 858 total_len += dlen; 859 860 if (len) 861 goto slave_sg_todev_fill_desc; 862 } 863 break; 864 case DMA_DEV_TO_MEM: 865 reg_width = __ffs(sconfig->src_addr_width); 866 reg = sconfig->src_addr; 867 ctllo = (DWC_DEFAULT_CTLLO(chan) 868 | DWC_CTLL_SRC_WIDTH(reg_width) 869 | DWC_CTLL_DST_INC 870 | DWC_CTLL_SRC_FIX); 871 872 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 873 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 874 875 data_width = dw->data_width[dwc->dst_master]; 876 877 for_each_sg(sgl, sg, sg_len, i) { 878 struct dw_desc *desc; 879 u32 len, dlen, mem; 880 881 mem = sg_dma_address(sg); 882 len = sg_dma_len(sg); 883 884 mem_width = min_t(unsigned int, 885 data_width, dwc_fast_ffs(mem | len)); 886 887 slave_sg_fromdev_fill_desc: 888 desc = dwc_desc_get(dwc); 889 if (!desc) 890 goto err_desc_get; 891 892 desc->lli.sar = reg; 893 desc->lli.dar = mem; 894 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 895 if ((len >> reg_width) > dwc->block_size) { 896 dlen = dwc->block_size << reg_width; 897 mem += dlen; 898 len -= dlen; 899 } else { 900 dlen = len; 901 len = 0; 902 } 903 desc->lli.ctlhi = dlen >> reg_width; 904 desc->len = dlen; 905 906 if (!first) { 907 first = desc; 908 } else { 909 prev->lli.llp = desc->txd.phys; 910 list_add_tail(&desc->desc_node, 911 &first->tx_list); 912 } 913 prev = desc; 914 total_len += dlen; 915 916 if (len) 917 goto slave_sg_fromdev_fill_desc; 918 } 919 break; 920 default: 921 return NULL; 922 } 923 924 if (flags & DMA_PREP_INTERRUPT) 925 /* Trigger interrupt after last block */ 926 prev->lli.ctllo |= DWC_CTLL_INT_EN; 927 928 prev->lli.llp = 0; 929 first->total_len = total_len; 930 931 return &first->txd; 932 933 err_desc_get: 934 dev_err(chan2dev(chan), 935 "not enough descriptors available. Direction %d\n", direction); 936 dwc_desc_put(dwc, first); 937 return NULL; 938 } 939 940 bool dw_dma_filter(struct dma_chan *chan, void *param) 941 { 942 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 943 struct dw_dma_slave *dws = param; 944 945 if (!dws || dws->dma_dev != chan->device->dev) 946 return false; 947 948 /* We have to copy data since dws can be temporary storage */ 949 950 dwc->src_id = dws->src_id; 951 dwc->dst_id = dws->dst_id; 952 953 dwc->src_master = dws->src_master; 954 dwc->dst_master = dws->dst_master; 955 956 return true; 957 } 958 EXPORT_SYMBOL_GPL(dw_dma_filter); 959 960 /* 961 * Fix sconfig's burst size according to dw_dmac. We need to convert them as: 962 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. 963 * 964 * NOTE: burst size 2 is not supported by controller. 965 * 966 * This can be done by finding least significant bit set: n & (n - 1) 967 */ 968 static inline void convert_burst(u32 *maxburst) 969 { 970 if (*maxburst > 1) 971 *maxburst = fls(*maxburst) - 2; 972 else 973 *maxburst = 0; 974 } 975 976 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 977 { 978 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 979 980 /* Check if chan will be configured for slave transfers */ 981 if (!is_slave_direction(sconfig->direction)) 982 return -EINVAL; 983 984 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); 985 dwc->direction = sconfig->direction; 986 987 convert_burst(&dwc->dma_sconfig.src_maxburst); 988 convert_burst(&dwc->dma_sconfig.dst_maxburst); 989 990 return 0; 991 } 992 993 static int dwc_pause(struct dma_chan *chan) 994 { 995 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 996 unsigned long flags; 997 unsigned int count = 20; /* timeout iterations */ 998 u32 cfglo; 999 1000 spin_lock_irqsave(&dwc->lock, flags); 1001 1002 cfglo = channel_readl(dwc, CFG_LO); 1003 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 1004 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 1005 udelay(2); 1006 1007 dwc->paused = true; 1008 1009 spin_unlock_irqrestore(&dwc->lock, flags); 1010 1011 return 0; 1012 } 1013 1014 static inline void dwc_chan_resume(struct dw_dma_chan *dwc) 1015 { 1016 u32 cfglo = channel_readl(dwc, CFG_LO); 1017 1018 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); 1019 1020 dwc->paused = false; 1021 } 1022 1023 static int dwc_resume(struct dma_chan *chan) 1024 { 1025 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1026 unsigned long flags; 1027 1028 if (!dwc->paused) 1029 return 0; 1030 1031 spin_lock_irqsave(&dwc->lock, flags); 1032 1033 dwc_chan_resume(dwc); 1034 1035 spin_unlock_irqrestore(&dwc->lock, flags); 1036 1037 return 0; 1038 } 1039 1040 static int dwc_terminate_all(struct dma_chan *chan) 1041 { 1042 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1043 struct dw_dma *dw = to_dw_dma(chan->device); 1044 struct dw_desc *desc, *_desc; 1045 unsigned long flags; 1046 LIST_HEAD(list); 1047 1048 spin_lock_irqsave(&dwc->lock, flags); 1049 1050 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 1051 1052 dwc_chan_disable(dw, dwc); 1053 1054 dwc_chan_resume(dwc); 1055 1056 /* active_list entries will end up before queued entries */ 1057 list_splice_init(&dwc->queue, &list); 1058 list_splice_init(&dwc->active_list, &list); 1059 1060 spin_unlock_irqrestore(&dwc->lock, flags); 1061 1062 /* Flush all pending and queued descriptors */ 1063 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1064 dwc_descriptor_complete(dwc, desc, false); 1065 1066 return 0; 1067 } 1068 1069 static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) 1070 { 1071 unsigned long flags; 1072 u32 residue; 1073 1074 spin_lock_irqsave(&dwc->lock, flags); 1075 1076 residue = dwc->residue; 1077 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) 1078 residue -= dwc_get_sent(dwc); 1079 1080 spin_unlock_irqrestore(&dwc->lock, flags); 1081 return residue; 1082 } 1083 1084 static enum dma_status 1085 dwc_tx_status(struct dma_chan *chan, 1086 dma_cookie_t cookie, 1087 struct dma_tx_state *txstate) 1088 { 1089 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1090 enum dma_status ret; 1091 1092 ret = dma_cookie_status(chan, cookie, txstate); 1093 if (ret == DMA_COMPLETE) 1094 return ret; 1095 1096 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1097 1098 ret = dma_cookie_status(chan, cookie, txstate); 1099 if (ret != DMA_COMPLETE) 1100 dma_set_residue(txstate, dwc_get_residue(dwc)); 1101 1102 if (dwc->paused && ret == DMA_IN_PROGRESS) 1103 return DMA_PAUSED; 1104 1105 return ret; 1106 } 1107 1108 static void dwc_issue_pending(struct dma_chan *chan) 1109 { 1110 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1111 unsigned long flags; 1112 1113 spin_lock_irqsave(&dwc->lock, flags); 1114 if (list_empty(&dwc->active_list)) 1115 dwc_dostart_first_queued(dwc); 1116 spin_unlock_irqrestore(&dwc->lock, flags); 1117 } 1118 1119 /*----------------------------------------------------------------------*/ 1120 1121 static void dw_dma_off(struct dw_dma *dw) 1122 { 1123 int i; 1124 1125 dma_writel(dw, CFG, 0); 1126 1127 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1128 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1129 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1130 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1131 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1132 1133 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 1134 cpu_relax(); 1135 1136 for (i = 0; i < dw->dma.chancnt; i++) 1137 dw->chan[i].initialized = false; 1138 } 1139 1140 static void dw_dma_on(struct dw_dma *dw) 1141 { 1142 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1143 } 1144 1145 static int dwc_alloc_chan_resources(struct dma_chan *chan) 1146 { 1147 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1148 struct dw_dma *dw = to_dw_dma(chan->device); 1149 struct dw_desc *desc; 1150 int i; 1151 unsigned long flags; 1152 1153 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1154 1155 /* ASSERT: channel is idle */ 1156 if (dma_readl(dw, CH_EN) & dwc->mask) { 1157 dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); 1158 return -EIO; 1159 } 1160 1161 dma_cookie_init(chan); 1162 1163 /* 1164 * NOTE: some controllers may have additional features that we 1165 * need to initialize here, like "scatter-gather" (which 1166 * doesn't mean what you think it means), and status writeback. 1167 */ 1168 1169 /* Enable controller here if needed */ 1170 if (!dw->in_use) 1171 dw_dma_on(dw); 1172 dw->in_use |= dwc->mask; 1173 1174 spin_lock_irqsave(&dwc->lock, flags); 1175 i = dwc->descs_allocated; 1176 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 1177 dma_addr_t phys; 1178 1179 spin_unlock_irqrestore(&dwc->lock, flags); 1180 1181 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); 1182 if (!desc) 1183 goto err_desc_alloc; 1184 1185 memset(desc, 0, sizeof(struct dw_desc)); 1186 1187 INIT_LIST_HEAD(&desc->tx_list); 1188 dma_async_tx_descriptor_init(&desc->txd, chan); 1189 desc->txd.tx_submit = dwc_tx_submit; 1190 desc->txd.flags = DMA_CTRL_ACK; 1191 desc->txd.phys = phys; 1192 1193 dwc_desc_put(dwc, desc); 1194 1195 spin_lock_irqsave(&dwc->lock, flags); 1196 i = ++dwc->descs_allocated; 1197 } 1198 1199 spin_unlock_irqrestore(&dwc->lock, flags); 1200 1201 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1202 1203 return i; 1204 1205 err_desc_alloc: 1206 dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); 1207 1208 return i; 1209 } 1210 1211 static void dwc_free_chan_resources(struct dma_chan *chan) 1212 { 1213 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1214 struct dw_dma *dw = to_dw_dma(chan->device); 1215 struct dw_desc *desc, *_desc; 1216 unsigned long flags; 1217 LIST_HEAD(list); 1218 1219 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, 1220 dwc->descs_allocated); 1221 1222 /* ASSERT: channel is idle */ 1223 BUG_ON(!list_empty(&dwc->active_list)); 1224 BUG_ON(!list_empty(&dwc->queue)); 1225 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1226 1227 spin_lock_irqsave(&dwc->lock, flags); 1228 list_splice_init(&dwc->free_list, &list); 1229 dwc->descs_allocated = 0; 1230 dwc->initialized = false; 1231 1232 /* Disable interrupts */ 1233 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1234 channel_clear_bit(dw, MASK.BLOCK, dwc->mask); 1235 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1236 1237 spin_unlock_irqrestore(&dwc->lock, flags); 1238 1239 /* Disable controller in case it was a last user */ 1240 dw->in_use &= ~dwc->mask; 1241 if (!dw->in_use) 1242 dw_dma_off(dw); 1243 1244 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1245 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1246 dma_pool_free(dw->desc_pool, desc, desc->txd.phys); 1247 } 1248 1249 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1250 } 1251 1252 /* --------------------- Cyclic DMA API extensions -------------------- */ 1253 1254 /** 1255 * dw_dma_cyclic_start - start the cyclic DMA transfer 1256 * @chan: the DMA channel to start 1257 * 1258 * Must be called with soft interrupts disabled. Returns zero on success or 1259 * -errno on failure. 1260 */ 1261 int dw_dma_cyclic_start(struct dma_chan *chan) 1262 { 1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1264 unsigned long flags; 1265 1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1267 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1268 return -ENODEV; 1269 } 1270 1271 spin_lock_irqsave(&dwc->lock, flags); 1272 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1273 spin_unlock_irqrestore(&dwc->lock, flags); 1274 1275 return 0; 1276 } 1277 EXPORT_SYMBOL(dw_dma_cyclic_start); 1278 1279 /** 1280 * dw_dma_cyclic_stop - stop the cyclic DMA transfer 1281 * @chan: the DMA channel to stop 1282 * 1283 * Must be called with soft interrupts disabled. 1284 */ 1285 void dw_dma_cyclic_stop(struct dma_chan *chan) 1286 { 1287 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1288 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1289 unsigned long flags; 1290 1291 spin_lock_irqsave(&dwc->lock, flags); 1292 1293 dwc_chan_disable(dw, dwc); 1294 1295 spin_unlock_irqrestore(&dwc->lock, flags); 1296 } 1297 EXPORT_SYMBOL(dw_dma_cyclic_stop); 1298 1299 /** 1300 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer 1301 * @chan: the DMA channel to prepare 1302 * @buf_addr: physical DMA address where the buffer starts 1303 * @buf_len: total number of bytes for the entire buffer 1304 * @period_len: number of bytes for each period 1305 * @direction: transfer direction, to or from device 1306 * 1307 * Must be called before trying to start the transfer. Returns a valid struct 1308 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. 1309 */ 1310 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1311 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1312 enum dma_transfer_direction direction) 1313 { 1314 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1315 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 1316 struct dw_cyclic_desc *cdesc; 1317 struct dw_cyclic_desc *retval = NULL; 1318 struct dw_desc *desc; 1319 struct dw_desc *last = NULL; 1320 unsigned long was_cyclic; 1321 unsigned int reg_width; 1322 unsigned int periods; 1323 unsigned int i; 1324 unsigned long flags; 1325 1326 spin_lock_irqsave(&dwc->lock, flags); 1327 if (dwc->nollp) { 1328 spin_unlock_irqrestore(&dwc->lock, flags); 1329 dev_dbg(chan2dev(&dwc->chan), 1330 "channel doesn't support LLP transfers\n"); 1331 return ERR_PTR(-EINVAL); 1332 } 1333 1334 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1335 spin_unlock_irqrestore(&dwc->lock, flags); 1336 dev_dbg(chan2dev(&dwc->chan), 1337 "queue and/or active list are not empty\n"); 1338 return ERR_PTR(-EBUSY); 1339 } 1340 1341 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1342 spin_unlock_irqrestore(&dwc->lock, flags); 1343 if (was_cyclic) { 1344 dev_dbg(chan2dev(&dwc->chan), 1345 "channel already prepared for cyclic DMA\n"); 1346 return ERR_PTR(-EBUSY); 1347 } 1348 1349 retval = ERR_PTR(-EINVAL); 1350 1351 if (unlikely(!is_slave_direction(direction))) 1352 goto out_err; 1353 1354 dwc->direction = direction; 1355 1356 if (direction == DMA_MEM_TO_DEV) 1357 reg_width = __ffs(sconfig->dst_addr_width); 1358 else 1359 reg_width = __ffs(sconfig->src_addr_width); 1360 1361 periods = buf_len / period_len; 1362 1363 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1364 if (period_len > (dwc->block_size << reg_width)) 1365 goto out_err; 1366 if (unlikely(period_len & ((1 << reg_width) - 1))) 1367 goto out_err; 1368 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1369 goto out_err; 1370 1371 retval = ERR_PTR(-ENOMEM); 1372 1373 if (periods > NR_DESCS_PER_CHANNEL) 1374 goto out_err; 1375 1376 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); 1377 if (!cdesc) 1378 goto out_err; 1379 1380 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); 1381 if (!cdesc->desc) 1382 goto out_err_alloc; 1383 1384 for (i = 0; i < periods; i++) { 1385 desc = dwc_desc_get(dwc); 1386 if (!desc) 1387 goto out_err_desc_get; 1388 1389 switch (direction) { 1390 case DMA_MEM_TO_DEV: 1391 desc->lli.dar = sconfig->dst_addr; 1392 desc->lli.sar = buf_addr + (period_len * i); 1393 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) 1394 | DWC_CTLL_DST_WIDTH(reg_width) 1395 | DWC_CTLL_SRC_WIDTH(reg_width) 1396 | DWC_CTLL_DST_FIX 1397 | DWC_CTLL_SRC_INC 1398 | DWC_CTLL_INT_EN); 1399 1400 desc->lli.ctllo |= sconfig->device_fc ? 1401 DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 1402 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 1403 1404 break; 1405 case DMA_DEV_TO_MEM: 1406 desc->lli.dar = buf_addr + (period_len * i); 1407 desc->lli.sar = sconfig->src_addr; 1408 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) 1409 | DWC_CTLL_SRC_WIDTH(reg_width) 1410 | DWC_CTLL_DST_WIDTH(reg_width) 1411 | DWC_CTLL_DST_INC 1412 | DWC_CTLL_SRC_FIX 1413 | DWC_CTLL_INT_EN); 1414 1415 desc->lli.ctllo |= sconfig->device_fc ? 1416 DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 1417 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 1418 1419 break; 1420 default: 1421 break; 1422 } 1423 1424 desc->lli.ctlhi = (period_len >> reg_width); 1425 cdesc->desc[i] = desc; 1426 1427 if (last) 1428 last->lli.llp = desc->txd.phys; 1429 1430 last = desc; 1431 } 1432 1433 /* Let's make a cyclic list */ 1434 last->lli.llp = cdesc->desc[0]->txd.phys; 1435 1436 dev_dbg(chan2dev(&dwc->chan), 1437 "cyclic prepared buf %pad len %zu period %zu periods %d\n", 1438 &buf_addr, buf_len, period_len, periods); 1439 1440 cdesc->periods = periods; 1441 dwc->cdesc = cdesc; 1442 1443 return cdesc; 1444 1445 out_err_desc_get: 1446 while (i--) 1447 dwc_desc_put(dwc, cdesc->desc[i]); 1448 out_err_alloc: 1449 kfree(cdesc); 1450 out_err: 1451 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1452 return (struct dw_cyclic_desc *)retval; 1453 } 1454 EXPORT_SYMBOL(dw_dma_cyclic_prep); 1455 1456 /** 1457 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer 1458 * @chan: the DMA channel to free 1459 */ 1460 void dw_dma_cyclic_free(struct dma_chan *chan) 1461 { 1462 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1463 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1464 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1465 int i; 1466 unsigned long flags; 1467 1468 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); 1469 1470 if (!cdesc) 1471 return; 1472 1473 spin_lock_irqsave(&dwc->lock, flags); 1474 1475 dwc_chan_disable(dw, dwc); 1476 1477 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 1478 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1479 dma_writel(dw, CLEAR.XFER, dwc->mask); 1480 1481 spin_unlock_irqrestore(&dwc->lock, flags); 1482 1483 for (i = 0; i < cdesc->periods; i++) 1484 dwc_desc_put(dwc, cdesc->desc[i]); 1485 1486 kfree(cdesc->desc); 1487 kfree(cdesc); 1488 1489 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1490 } 1491 EXPORT_SYMBOL(dw_dma_cyclic_free); 1492 1493 /*----------------------------------------------------------------------*/ 1494 1495 int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) 1496 { 1497 struct dw_dma *dw; 1498 bool autocfg = false; 1499 unsigned int dw_params; 1500 unsigned int max_blk_size = 0; 1501 int err; 1502 int i; 1503 1504 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); 1505 if (!dw) 1506 return -ENOMEM; 1507 1508 dw->regs = chip->regs; 1509 chip->dw = dw; 1510 1511 pm_runtime_get_sync(chip->dev); 1512 1513 if (!pdata) { 1514 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); 1515 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); 1516 1517 autocfg = dw_params >> DW_PARAMS_EN & 1; 1518 if (!autocfg) { 1519 err = -EINVAL; 1520 goto err_pdata; 1521 } 1522 1523 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); 1524 if (!pdata) { 1525 err = -ENOMEM; 1526 goto err_pdata; 1527 } 1528 1529 /* Get hardware configuration parameters */ 1530 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; 1531 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; 1532 for (i = 0; i < pdata->nr_masters; i++) { 1533 pdata->data_width[i] = 1534 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; 1535 } 1536 max_blk_size = dma_readl(dw, MAX_BLK_SIZE); 1537 1538 /* Fill platform data with the default values */ 1539 pdata->is_private = true; 1540 pdata->is_memcpy = true; 1541 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; 1542 pdata->chan_priority = CHAN_PRIORITY_ASCENDING; 1543 } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { 1544 err = -EINVAL; 1545 goto err_pdata; 1546 } 1547 1548 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), 1549 GFP_KERNEL); 1550 if (!dw->chan) { 1551 err = -ENOMEM; 1552 goto err_pdata; 1553 } 1554 1555 /* Get hardware configuration parameters */ 1556 dw->nr_masters = pdata->nr_masters; 1557 for (i = 0; i < dw->nr_masters; i++) 1558 dw->data_width[i] = pdata->data_width[i]; 1559 1560 /* Calculate all channel mask before DMA setup */ 1561 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1562 1563 /* Force dma off, just in case */ 1564 dw_dma_off(dw); 1565 1566 /* Create a pool of consistent memory blocks for hardware descriptors */ 1567 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1568 sizeof(struct dw_desc), 4, 0); 1569 if (!dw->desc_pool) { 1570 dev_err(chip->dev, "No memory for descriptors dma pool\n"); 1571 err = -ENOMEM; 1572 goto err_pdata; 1573 } 1574 1575 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1576 1577 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, 1578 "dw_dmac", dw); 1579 if (err) 1580 goto err_pdata; 1581 1582 INIT_LIST_HEAD(&dw->dma.channels); 1583 for (i = 0; i < pdata->nr_channels; i++) { 1584 struct dw_dma_chan *dwc = &dw->chan[i]; 1585 1586 dwc->chan.device = &dw->dma; 1587 dma_cookie_init(&dwc->chan); 1588 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1589 list_add_tail(&dwc->chan.device_node, 1590 &dw->dma.channels); 1591 else 1592 list_add(&dwc->chan.device_node, &dw->dma.channels); 1593 1594 /* 7 is highest priority & 0 is lowest. */ 1595 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1596 dwc->priority = pdata->nr_channels - i - 1; 1597 else 1598 dwc->priority = i; 1599 1600 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1601 spin_lock_init(&dwc->lock); 1602 dwc->mask = 1 << i; 1603 1604 INIT_LIST_HEAD(&dwc->active_list); 1605 INIT_LIST_HEAD(&dwc->queue); 1606 INIT_LIST_HEAD(&dwc->free_list); 1607 1608 channel_clear_bit(dw, CH_EN, dwc->mask); 1609 1610 dwc->direction = DMA_TRANS_NONE; 1611 1612 /* Hardware configuration */ 1613 if (autocfg) { 1614 unsigned int dwc_params; 1615 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; 1616 void __iomem *addr = chip->regs + r * sizeof(u32); 1617 1618 dwc_params = dma_read_byaddr(addr, DWC_PARAMS); 1619 1620 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1621 dwc_params); 1622 1623 /* 1624 * Decode maximum block size for given channel. The 1625 * stored 4 bit value represents blocks from 0x00 for 3 1626 * up to 0x0a for 4095. 1627 */ 1628 dwc->block_size = 1629 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; 1630 dwc->nollp = 1631 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; 1632 } else { 1633 dwc->block_size = pdata->block_size; 1634 1635 /* Check if channel supports multi block transfer */ 1636 channel_writel(dwc, LLP, 0xfffffffc); 1637 dwc->nollp = 1638 (channel_readl(dwc, LLP) & 0xfffffffc) == 0; 1639 channel_writel(dwc, LLP, 0); 1640 } 1641 } 1642 1643 /* Clear all interrupts on all channels. */ 1644 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1645 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); 1646 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1647 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1648 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1649 1650 /* Set capabilities */ 1651 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1652 if (pdata->is_private) 1653 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); 1654 if (pdata->is_memcpy) 1655 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1656 1657 dw->dma.dev = chip->dev; 1658 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1659 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1660 1661 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1662 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1663 1664 dw->dma.device_config = dwc_config; 1665 dw->dma.device_pause = dwc_pause; 1666 dw->dma.device_resume = dwc_resume; 1667 dw->dma.device_terminate_all = dwc_terminate_all; 1668 1669 dw->dma.device_tx_status = dwc_tx_status; 1670 dw->dma.device_issue_pending = dwc_issue_pending; 1671 1672 /* DMA capabilities */ 1673 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; 1674 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; 1675 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | 1676 BIT(DMA_MEM_TO_MEM); 1677 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1678 1679 err = dma_async_device_register(&dw->dma); 1680 if (err) 1681 goto err_dma_register; 1682 1683 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", 1684 pdata->nr_channels); 1685 1686 pm_runtime_put_sync_suspend(chip->dev); 1687 1688 return 0; 1689 1690 err_dma_register: 1691 free_irq(chip->irq, dw); 1692 err_pdata: 1693 pm_runtime_put_sync_suspend(chip->dev); 1694 return err; 1695 } 1696 EXPORT_SYMBOL_GPL(dw_dma_probe); 1697 1698 int dw_dma_remove(struct dw_dma_chip *chip) 1699 { 1700 struct dw_dma *dw = chip->dw; 1701 struct dw_dma_chan *dwc, *_dwc; 1702 1703 pm_runtime_get_sync(chip->dev); 1704 1705 dw_dma_off(dw); 1706 dma_async_device_unregister(&dw->dma); 1707 1708 free_irq(chip->irq, dw); 1709 tasklet_kill(&dw->tasklet); 1710 1711 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1712 chan.device_node) { 1713 list_del(&dwc->chan.device_node); 1714 channel_clear_bit(dw, CH_EN, dwc->mask); 1715 } 1716 1717 pm_runtime_put_sync_suspend(chip->dev); 1718 return 0; 1719 } 1720 EXPORT_SYMBOL_GPL(dw_dma_remove); 1721 1722 int dw_dma_disable(struct dw_dma_chip *chip) 1723 { 1724 struct dw_dma *dw = chip->dw; 1725 1726 dw_dma_off(dw); 1727 return 0; 1728 } 1729 EXPORT_SYMBOL_GPL(dw_dma_disable); 1730 1731 int dw_dma_enable(struct dw_dma_chip *chip) 1732 { 1733 struct dw_dma *dw = chip->dw; 1734 1735 dw_dma_on(dw); 1736 return 0; 1737 } 1738 EXPORT_SYMBOL_GPL(dw_dma_enable); 1739 1740 MODULE_LICENSE("GPL v2"); 1741 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); 1742 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1743 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); 1744