1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Core driver for the Synopsys DesignWare DMA Controller 4 * 5 * Copyright (C) 2007-2008 Atmel Corporation 6 * Copyright (C) 2010-2011 ST Microelectronics 7 * Copyright (C) 2013 Intel Corporation 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/dmapool.h> 15 #include <linux/err.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/log2.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/slab.h> 23 #include <linux/pm_runtime.h> 24 25 #include "../dmaengine.h" 26 #include "internal.h" 27 28 /* 29 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 30 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all 31 * of which use ARM any more). See the "Databook" from Synopsys for 32 * information beyond what licensees probably provide. 33 */ 34 35 /* The set of bus widths supported by the DMA controller */ 36 #define DW_DMA_BUSWIDTHS \ 37 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 38 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 39 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 40 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 41 42 /*----------------------------------------------------------------------*/ 43 44 static struct device *chan2dev(struct dma_chan *chan) 45 { 46 return &chan->dev->device; 47 } 48 49 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 50 { 51 return to_dw_desc(dwc->active_list.next); 52 } 53 54 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) 55 { 56 struct dw_desc *desc = txd_to_dw_desc(tx); 57 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 58 dma_cookie_t cookie; 59 unsigned long flags; 60 61 spin_lock_irqsave(&dwc->lock, flags); 62 cookie = dma_cookie_assign(tx); 63 64 /* 65 * REVISIT: We should attempt to chain as many descriptors as 66 * possible, perhaps even appending to those already submitted 67 * for DMA. But this is hard to do in a race-free manner. 68 */ 69 70 list_add_tail(&desc->desc_node, &dwc->queue); 71 spin_unlock_irqrestore(&dwc->lock, flags); 72 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", 73 __func__, desc->txd.cookie); 74 75 return cookie; 76 } 77 78 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 79 { 80 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 81 struct dw_desc *desc; 82 dma_addr_t phys; 83 84 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys); 85 if (!desc) 86 return NULL; 87 88 dwc->descs_allocated++; 89 INIT_LIST_HEAD(&desc->tx_list); 90 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan); 91 desc->txd.tx_submit = dwc_tx_submit; 92 desc->txd.flags = DMA_CTRL_ACK; 93 desc->txd.phys = phys; 94 return desc; 95 } 96 97 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 98 { 99 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 100 struct dw_desc *child, *_next; 101 102 if (unlikely(!desc)) 103 return; 104 105 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { 106 list_del(&child->desc_node); 107 dma_pool_free(dw->desc_pool, child, child->txd.phys); 108 dwc->descs_allocated--; 109 } 110 111 dma_pool_free(dw->desc_pool, desc, desc->txd.phys); 112 dwc->descs_allocated--; 113 } 114 115 static void dwc_initialize(struct dw_dma_chan *dwc) 116 { 117 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 118 119 dw->initialize_chan(dwc); 120 121 /* Enable interrupts */ 122 channel_set_bit(dw, MASK.XFER, dwc->mask); 123 channel_set_bit(dw, MASK.ERROR, dwc->mask); 124 } 125 126 /*----------------------------------------------------------------------*/ 127 128 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 129 { 130 dev_err(chan2dev(&dwc->chan), 131 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 132 channel_readl(dwc, SAR), 133 channel_readl(dwc, DAR), 134 channel_readl(dwc, LLP), 135 channel_readl(dwc, CTL_HI), 136 channel_readl(dwc, CTL_LO)); 137 } 138 139 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) 140 { 141 channel_clear_bit(dw, CH_EN, dwc->mask); 142 while (dma_readl(dw, CH_EN) & dwc->mask) 143 cpu_relax(); 144 } 145 146 /*----------------------------------------------------------------------*/ 147 148 /* Perform single block transfer */ 149 static inline void dwc_do_single_block(struct dw_dma_chan *dwc, 150 struct dw_desc *desc) 151 { 152 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 153 u32 ctllo; 154 155 /* 156 * Software emulation of LLP mode relies on interrupts to continue 157 * multi block transfer. 158 */ 159 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; 160 161 channel_writel(dwc, SAR, lli_read(desc, sar)); 162 channel_writel(dwc, DAR, lli_read(desc, dar)); 163 channel_writel(dwc, CTL_LO, ctllo); 164 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); 165 channel_set_bit(dw, CH_EN, dwc->mask); 166 167 /* Move pointer to next descriptor */ 168 dwc->tx_node_active = dwc->tx_node_active->next; 169 } 170 171 /* Called with dwc->lock held and bh disabled */ 172 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 173 { 174 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 175 u8 lms = DWC_LLP_LMS(dwc->dws.m_master); 176 unsigned long was_soft_llp; 177 178 /* ASSERT: channel is idle */ 179 if (dma_readl(dw, CH_EN) & dwc->mask) { 180 dev_err(chan2dev(&dwc->chan), 181 "%s: BUG: Attempted to start non-idle channel\n", 182 __func__); 183 dwc_dump_chan_regs(dwc); 184 185 /* The tasklet will hopefully advance the queue... */ 186 return; 187 } 188 189 if (dwc->nollp) { 190 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, 191 &dwc->flags); 192 if (was_soft_llp) { 193 dev_err(chan2dev(&dwc->chan), 194 "BUG: Attempted to start new LLP transfer inside ongoing one\n"); 195 return; 196 } 197 198 dwc_initialize(dwc); 199 200 first->residue = first->total_len; 201 dwc->tx_node_active = &first->tx_list; 202 203 /* Submit first block */ 204 dwc_do_single_block(dwc, first); 205 206 return; 207 } 208 209 dwc_initialize(dwc); 210 211 channel_writel(dwc, LLP, first->txd.phys | lms); 212 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 213 channel_writel(dwc, CTL_HI, 0); 214 channel_set_bit(dw, CH_EN, dwc->mask); 215 } 216 217 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) 218 { 219 struct dw_desc *desc; 220 221 if (list_empty(&dwc->queue)) 222 return; 223 224 list_move(dwc->queue.next, &dwc->active_list); 225 desc = dwc_first_active(dwc); 226 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); 227 dwc_dostart(dwc, desc); 228 } 229 230 /*----------------------------------------------------------------------*/ 231 232 static void 233 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, 234 bool callback_required) 235 { 236 struct dma_async_tx_descriptor *txd = &desc->txd; 237 struct dw_desc *child; 238 unsigned long flags; 239 struct dmaengine_desc_callback cb; 240 241 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 242 243 spin_lock_irqsave(&dwc->lock, flags); 244 dma_cookie_complete(txd); 245 if (callback_required) 246 dmaengine_desc_get_callback(txd, &cb); 247 else 248 memset(&cb, 0, sizeof(cb)); 249 250 /* async_tx_ack */ 251 list_for_each_entry(child, &desc->tx_list, desc_node) 252 async_tx_ack(&child->txd); 253 async_tx_ack(&desc->txd); 254 dwc_desc_put(dwc, desc); 255 spin_unlock_irqrestore(&dwc->lock, flags); 256 257 dmaengine_desc_callback_invoke(&cb, NULL); 258 } 259 260 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) 261 { 262 struct dw_desc *desc, *_desc; 263 LIST_HEAD(list); 264 unsigned long flags; 265 266 spin_lock_irqsave(&dwc->lock, flags); 267 if (dma_readl(dw, CH_EN) & dwc->mask) { 268 dev_err(chan2dev(&dwc->chan), 269 "BUG: XFER bit set, but channel not idle!\n"); 270 271 /* Try to continue after resetting the channel... */ 272 dwc_chan_disable(dw, dwc); 273 } 274 275 /* 276 * Submit queued descriptors ASAP, i.e. before we go through 277 * the completed ones. 278 */ 279 list_splice_init(&dwc->active_list, &list); 280 dwc_dostart_first_queued(dwc); 281 282 spin_unlock_irqrestore(&dwc->lock, flags); 283 284 list_for_each_entry_safe(desc, _desc, &list, desc_node) 285 dwc_descriptor_complete(dwc, desc, true); 286 } 287 288 /* Returns how many bytes were already received from source */ 289 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) 290 { 291 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 292 u32 ctlhi = channel_readl(dwc, CTL_HI); 293 u32 ctllo = channel_readl(dwc, CTL_LO); 294 295 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7); 296 } 297 298 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 299 { 300 dma_addr_t llp; 301 struct dw_desc *desc, *_desc; 302 struct dw_desc *child; 303 u32 status_xfer; 304 unsigned long flags; 305 306 spin_lock_irqsave(&dwc->lock, flags); 307 llp = channel_readl(dwc, LLP); 308 status_xfer = dma_readl(dw, RAW.XFER); 309 310 if (status_xfer & dwc->mask) { 311 /* Everything we've submitted is done */ 312 dma_writel(dw, CLEAR.XFER, dwc->mask); 313 314 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 315 struct list_head *head, *active = dwc->tx_node_active; 316 317 /* 318 * We are inside first active descriptor. 319 * Otherwise something is really wrong. 320 */ 321 desc = dwc_first_active(dwc); 322 323 head = &desc->tx_list; 324 if (active != head) { 325 /* Update residue to reflect last sent descriptor */ 326 if (active == head->next) 327 desc->residue -= desc->len; 328 else 329 desc->residue -= to_dw_desc(active->prev)->len; 330 331 child = to_dw_desc(active); 332 333 /* Submit next block */ 334 dwc_do_single_block(dwc, child); 335 336 spin_unlock_irqrestore(&dwc->lock, flags); 337 return; 338 } 339 340 /* We are done here */ 341 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 342 } 343 344 spin_unlock_irqrestore(&dwc->lock, flags); 345 346 dwc_complete_all(dw, dwc); 347 return; 348 } 349 350 if (list_empty(&dwc->active_list)) { 351 spin_unlock_irqrestore(&dwc->lock, flags); 352 return; 353 } 354 355 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 356 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); 357 spin_unlock_irqrestore(&dwc->lock, flags); 358 return; 359 } 360 361 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); 362 363 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 364 /* Initial residue value */ 365 desc->residue = desc->total_len; 366 367 /* Check first descriptors addr */ 368 if (desc->txd.phys == DWC_LLP_LOC(llp)) { 369 spin_unlock_irqrestore(&dwc->lock, flags); 370 return; 371 } 372 373 /* Check first descriptors llp */ 374 if (lli_read(desc, llp) == llp) { 375 /* This one is currently in progress */ 376 desc->residue -= dwc_get_sent(dwc); 377 spin_unlock_irqrestore(&dwc->lock, flags); 378 return; 379 } 380 381 desc->residue -= desc->len; 382 list_for_each_entry(child, &desc->tx_list, desc_node) { 383 if (lli_read(child, llp) == llp) { 384 /* Currently in progress */ 385 desc->residue -= dwc_get_sent(dwc); 386 spin_unlock_irqrestore(&dwc->lock, flags); 387 return; 388 } 389 desc->residue -= child->len; 390 } 391 392 /* 393 * No descriptors so far seem to be in progress, i.e. 394 * this one must be done. 395 */ 396 spin_unlock_irqrestore(&dwc->lock, flags); 397 dwc_descriptor_complete(dwc, desc, true); 398 spin_lock_irqsave(&dwc->lock, flags); 399 } 400 401 dev_err(chan2dev(&dwc->chan), 402 "BUG: All descriptors done, but channel not idle!\n"); 403 404 /* Try to continue after resetting the channel... */ 405 dwc_chan_disable(dw, dwc); 406 407 dwc_dostart_first_queued(dwc); 408 spin_unlock_irqrestore(&dwc->lock, flags); 409 } 410 411 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) 412 { 413 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 414 lli_read(desc, sar), 415 lli_read(desc, dar), 416 lli_read(desc, llp), 417 lli_read(desc, ctlhi), 418 lli_read(desc, ctllo)); 419 } 420 421 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) 422 { 423 struct dw_desc *bad_desc; 424 struct dw_desc *child; 425 unsigned long flags; 426 427 dwc_scan_descriptors(dw, dwc); 428 429 spin_lock_irqsave(&dwc->lock, flags); 430 431 /* 432 * The descriptor currently at the head of the active list is 433 * borked. Since we don't have any way to report errors, we'll 434 * just have to scream loudly and try to carry on. 435 */ 436 bad_desc = dwc_first_active(dwc); 437 list_del_init(&bad_desc->desc_node); 438 list_move(dwc->queue.next, dwc->active_list.prev); 439 440 /* Clear the error flag and try to restart the controller */ 441 dma_writel(dw, CLEAR.ERROR, dwc->mask); 442 if (!list_empty(&dwc->active_list)) 443 dwc_dostart(dwc, dwc_first_active(dwc)); 444 445 /* 446 * WARN may seem harsh, but since this only happens 447 * when someone submits a bad physical address in a 448 * descriptor, we should consider ourselves lucky that the 449 * controller flagged an error instead of scribbling over 450 * random memory locations. 451 */ 452 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" 453 " cookie: %d\n", bad_desc->txd.cookie); 454 dwc_dump_lli(dwc, bad_desc); 455 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 456 dwc_dump_lli(dwc, child); 457 458 spin_unlock_irqrestore(&dwc->lock, flags); 459 460 /* Pretend the descriptor completed successfully */ 461 dwc_descriptor_complete(dwc, bad_desc, true); 462 } 463 464 static void dw_dma_tasklet(struct tasklet_struct *t) 465 { 466 struct dw_dma *dw = from_tasklet(dw, t, tasklet); 467 struct dw_dma_chan *dwc; 468 u32 status_xfer; 469 u32 status_err; 470 unsigned int i; 471 472 status_xfer = dma_readl(dw, RAW.XFER); 473 status_err = dma_readl(dw, RAW.ERROR); 474 475 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); 476 477 for (i = 0; i < dw->dma.chancnt; i++) { 478 dwc = &dw->chan[i]; 479 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 480 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n"); 481 else if (status_err & (1 << i)) 482 dwc_handle_error(dw, dwc); 483 else if (status_xfer & (1 << i)) 484 dwc_scan_descriptors(dw, dwc); 485 } 486 487 /* Re-enable interrupts */ 488 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 489 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 490 } 491 492 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) 493 { 494 struct dw_dma *dw = dev_id; 495 u32 status; 496 497 /* Check if we have any interrupt from the DMAC which is not in use */ 498 if (!dw->in_use) 499 return IRQ_NONE; 500 501 status = dma_readl(dw, STATUS_INT); 502 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); 503 504 /* Check if we have any interrupt from the DMAC */ 505 if (!status) 506 return IRQ_NONE; 507 508 /* 509 * Just disable the interrupts. We'll turn them back on in the 510 * softirq handler. 511 */ 512 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 513 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 514 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 515 516 status = dma_readl(dw, STATUS_INT); 517 if (status) { 518 dev_err(dw->dma.dev, 519 "BUG: Unexpected interrupts pending: 0x%x\n", 520 status); 521 522 /* Try to recover */ 523 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 524 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); 525 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 526 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 527 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 528 } 529 530 tasklet_schedule(&dw->tasklet); 531 532 return IRQ_HANDLED; 533 } 534 535 /*----------------------------------------------------------------------*/ 536 537 static struct dma_async_tx_descriptor * 538 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 539 size_t len, unsigned long flags) 540 { 541 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 542 struct dw_dma *dw = to_dw_dma(chan->device); 543 struct dw_desc *desc; 544 struct dw_desc *first; 545 struct dw_desc *prev; 546 size_t xfer_count; 547 size_t offset; 548 u8 m_master = dwc->dws.m_master; 549 unsigned int src_width; 550 unsigned int dst_width; 551 unsigned int data_width = dw->pdata->data_width[m_master]; 552 u32 ctllo, ctlhi; 553 u8 lms = DWC_LLP_LMS(m_master); 554 555 dev_vdbg(chan2dev(chan), 556 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, 557 &dest, &src, len, flags); 558 559 if (unlikely(!len)) { 560 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 561 return NULL; 562 } 563 564 dwc->direction = DMA_MEM_TO_MEM; 565 566 src_width = dst_width = __ffs(data_width | src | dest | len); 567 568 ctllo = dw->prepare_ctllo(dwc) 569 | DWC_CTLL_DST_WIDTH(dst_width) 570 | DWC_CTLL_SRC_WIDTH(src_width) 571 | DWC_CTLL_DST_INC 572 | DWC_CTLL_SRC_INC 573 | DWC_CTLL_FC_M2M; 574 prev = first = NULL; 575 576 for (offset = 0; offset < len; offset += xfer_count) { 577 desc = dwc_desc_get(dwc); 578 if (!desc) 579 goto err_desc_get; 580 581 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count); 582 583 lli_write(desc, sar, src + offset); 584 lli_write(desc, dar, dest + offset); 585 lli_write(desc, ctllo, ctllo); 586 lli_write(desc, ctlhi, ctlhi); 587 desc->len = xfer_count; 588 589 if (!first) { 590 first = desc; 591 } else { 592 lli_write(prev, llp, desc->txd.phys | lms); 593 list_add_tail(&desc->desc_node, &first->tx_list); 594 } 595 prev = desc; 596 } 597 598 if (flags & DMA_PREP_INTERRUPT) 599 /* Trigger interrupt after last block */ 600 lli_set(prev, ctllo, DWC_CTLL_INT_EN); 601 602 prev->lli.llp = 0; 603 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 604 first->txd.flags = flags; 605 first->total_len = len; 606 607 return &first->txd; 608 609 err_desc_get: 610 dwc_desc_put(dwc, first); 611 return NULL; 612 } 613 614 static struct dma_async_tx_descriptor * 615 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 616 unsigned int sg_len, enum dma_transfer_direction direction, 617 unsigned long flags, void *context) 618 { 619 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 620 struct dw_dma *dw = to_dw_dma(chan->device); 621 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 622 struct dw_desc *prev; 623 struct dw_desc *first; 624 u32 ctllo, ctlhi; 625 u8 lms = DWC_LLP_LMS(dwc->dws.m_master); 626 dma_addr_t reg; 627 unsigned int reg_width; 628 unsigned int mem_width; 629 unsigned int i; 630 struct scatterlist *sg; 631 size_t total_len = 0; 632 633 dev_vdbg(chan2dev(chan), "%s\n", __func__); 634 635 if (unlikely(!is_slave_direction(direction) || !sg_len)) 636 return NULL; 637 638 dwc->direction = direction; 639 640 prev = first = NULL; 641 642 switch (direction) { 643 case DMA_MEM_TO_DEV: 644 reg_width = __ffs(sconfig->dst_addr_width); 645 reg = sconfig->dst_addr; 646 ctllo = dw->prepare_ctllo(dwc) 647 | DWC_CTLL_DST_WIDTH(reg_width) 648 | DWC_CTLL_DST_FIX 649 | DWC_CTLL_SRC_INC; 650 651 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 652 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 653 654 for_each_sg(sgl, sg, sg_len, i) { 655 struct dw_desc *desc; 656 u32 len, mem; 657 size_t dlen; 658 659 mem = sg_dma_address(sg); 660 len = sg_dma_len(sg); 661 662 mem_width = __ffs(sconfig->src_addr_width | mem | len); 663 664 slave_sg_todev_fill_desc: 665 desc = dwc_desc_get(dwc); 666 if (!desc) 667 goto err_desc_get; 668 669 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen); 670 671 lli_write(desc, sar, mem); 672 lli_write(desc, dar, reg); 673 lli_write(desc, ctlhi, ctlhi); 674 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); 675 desc->len = dlen; 676 677 if (!first) { 678 first = desc; 679 } else { 680 lli_write(prev, llp, desc->txd.phys | lms); 681 list_add_tail(&desc->desc_node, &first->tx_list); 682 } 683 prev = desc; 684 685 mem += dlen; 686 len -= dlen; 687 total_len += dlen; 688 689 if (len) 690 goto slave_sg_todev_fill_desc; 691 } 692 break; 693 case DMA_DEV_TO_MEM: 694 reg_width = __ffs(sconfig->src_addr_width); 695 reg = sconfig->src_addr; 696 ctllo = dw->prepare_ctllo(dwc) 697 | DWC_CTLL_SRC_WIDTH(reg_width) 698 | DWC_CTLL_DST_INC 699 | DWC_CTLL_SRC_FIX; 700 701 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 702 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 703 704 for_each_sg(sgl, sg, sg_len, i) { 705 struct dw_desc *desc; 706 u32 len, mem; 707 size_t dlen; 708 709 mem = sg_dma_address(sg); 710 len = sg_dma_len(sg); 711 712 slave_sg_fromdev_fill_desc: 713 desc = dwc_desc_get(dwc); 714 if (!desc) 715 goto err_desc_get; 716 717 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen); 718 719 lli_write(desc, sar, reg); 720 lli_write(desc, dar, mem); 721 lli_write(desc, ctlhi, ctlhi); 722 mem_width = __ffs(sconfig->dst_addr_width | mem); 723 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); 724 desc->len = dlen; 725 726 if (!first) { 727 first = desc; 728 } else { 729 lli_write(prev, llp, desc->txd.phys | lms); 730 list_add_tail(&desc->desc_node, &first->tx_list); 731 } 732 prev = desc; 733 734 mem += dlen; 735 len -= dlen; 736 total_len += dlen; 737 738 if (len) 739 goto slave_sg_fromdev_fill_desc; 740 } 741 break; 742 default: 743 return NULL; 744 } 745 746 if (flags & DMA_PREP_INTERRUPT) 747 /* Trigger interrupt after last block */ 748 lli_set(prev, ctllo, DWC_CTLL_INT_EN); 749 750 prev->lli.llp = 0; 751 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 752 first->total_len = total_len; 753 754 return &first->txd; 755 756 err_desc_get: 757 dev_err(chan2dev(chan), 758 "not enough descriptors available. Direction %d\n", direction); 759 dwc_desc_put(dwc, first); 760 return NULL; 761 } 762 763 bool dw_dma_filter(struct dma_chan *chan, void *param) 764 { 765 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 766 struct dw_dma_slave *dws = param; 767 768 if (dws->dma_dev != chan->device->dev) 769 return false; 770 771 /* permit channels in accordance with the channels mask */ 772 if (dws->channels && !(dws->channels & dwc->mask)) 773 return false; 774 775 /* We have to copy data since dws can be temporary storage */ 776 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); 777 778 return true; 779 } 780 EXPORT_SYMBOL_GPL(dw_dma_filter); 781 782 static int dwc_verify_p_buswidth(struct dma_chan *chan) 783 { 784 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 785 struct dw_dma *dw = to_dw_dma(chan->device); 786 u32 reg_width, max_width; 787 788 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) 789 reg_width = dwc->dma_sconfig.dst_addr_width; 790 else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) 791 reg_width = dwc->dma_sconfig.src_addr_width; 792 else /* DMA_MEM_TO_MEM */ 793 return 0; 794 795 max_width = dw->pdata->data_width[dwc->dws.p_master]; 796 797 /* Fall-back to 1-byte transfer width if undefined */ 798 if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 799 reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 800 else if (!is_power_of_2(reg_width) || reg_width > max_width) 801 return -EINVAL; 802 else /* bus width is valid */ 803 return 0; 804 805 /* Update undefined addr width value */ 806 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) 807 dwc->dma_sconfig.dst_addr_width = reg_width; 808 else /* DMA_DEV_TO_MEM */ 809 dwc->dma_sconfig.src_addr_width = reg_width; 810 811 return 0; 812 } 813 814 static int dwc_verify_m_buswidth(struct dma_chan *chan) 815 { 816 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 817 struct dw_dma *dw = to_dw_dma(chan->device); 818 u32 reg_width, reg_burst, mem_width; 819 820 mem_width = dw->pdata->data_width[dwc->dws.m_master]; 821 822 /* 823 * It's possible to have a data portion locked in the DMA FIFO in case 824 * of the channel suspension. Subsequent channel disabling will cause 825 * that data silent loss. In order to prevent that maintain the src and 826 * dst transfer widths coherency by means of the relation: 827 * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH) 828 * Look for the details in the commit message that brings this change. 829 * 830 * Note the DMA configs utilized in the calculations below must have 831 * been verified to have correct values by this method call. 832 */ 833 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) { 834 reg_width = dwc->dma_sconfig.dst_addr_width; 835 if (mem_width < reg_width) 836 return -EINVAL; 837 838 dwc->dma_sconfig.src_addr_width = mem_width; 839 } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) { 840 reg_width = dwc->dma_sconfig.src_addr_width; 841 reg_burst = rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst); 842 843 dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst); 844 } 845 846 return 0; 847 } 848 849 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 850 { 851 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 852 struct dw_dma *dw = to_dw_dma(chan->device); 853 int ret; 854 855 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); 856 857 dwc->dma_sconfig.src_maxburst = 858 clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst); 859 dwc->dma_sconfig.dst_maxburst = 860 clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst); 861 862 ret = dwc_verify_p_buswidth(chan); 863 if (ret) 864 return ret; 865 866 ret = dwc_verify_m_buswidth(chan); 867 if (ret) 868 return ret; 869 870 dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst); 871 dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst); 872 873 return 0; 874 } 875 876 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain) 877 { 878 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 879 unsigned int count = 20; /* timeout iterations */ 880 881 dw->suspend_chan(dwc, drain); 882 883 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 884 udelay(2); 885 886 set_bit(DW_DMA_IS_PAUSED, &dwc->flags); 887 } 888 889 static int dwc_pause(struct dma_chan *chan) 890 { 891 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 892 unsigned long flags; 893 894 spin_lock_irqsave(&dwc->lock, flags); 895 dwc_chan_pause(dwc, false); 896 spin_unlock_irqrestore(&dwc->lock, flags); 897 898 return 0; 899 } 900 901 static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain) 902 { 903 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 904 905 dw->resume_chan(dwc, drain); 906 907 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); 908 } 909 910 static int dwc_resume(struct dma_chan *chan) 911 { 912 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 913 unsigned long flags; 914 915 spin_lock_irqsave(&dwc->lock, flags); 916 917 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) 918 dwc_chan_resume(dwc, false); 919 920 spin_unlock_irqrestore(&dwc->lock, flags); 921 922 return 0; 923 } 924 925 static int dwc_terminate_all(struct dma_chan *chan) 926 { 927 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 928 struct dw_dma *dw = to_dw_dma(chan->device); 929 struct dw_desc *desc, *_desc; 930 unsigned long flags; 931 LIST_HEAD(list); 932 933 spin_lock_irqsave(&dwc->lock, flags); 934 935 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 936 937 dwc_chan_pause(dwc, true); 938 939 dwc_chan_disable(dw, dwc); 940 941 dwc_chan_resume(dwc, true); 942 943 /* active_list entries will end up before queued entries */ 944 list_splice_init(&dwc->queue, &list); 945 list_splice_init(&dwc->active_list, &list); 946 947 spin_unlock_irqrestore(&dwc->lock, flags); 948 949 /* Flush all pending and queued descriptors */ 950 list_for_each_entry_safe(desc, _desc, &list, desc_node) 951 dwc_descriptor_complete(dwc, desc, false); 952 953 return 0; 954 } 955 956 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) 957 { 958 struct dw_desc *desc; 959 960 list_for_each_entry(desc, &dwc->active_list, desc_node) 961 if (desc->txd.cookie == c) 962 return desc; 963 964 return NULL; 965 } 966 967 static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie, 968 enum dma_status *status) 969 { 970 struct dw_desc *desc; 971 unsigned long flags; 972 u32 residue; 973 974 spin_lock_irqsave(&dwc->lock, flags); 975 976 desc = dwc_find_desc(dwc, cookie); 977 if (desc) { 978 if (desc == dwc_first_active(dwc)) { 979 residue = desc->residue; 980 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) 981 residue -= dwc_get_sent(dwc); 982 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) 983 *status = DMA_PAUSED; 984 } else { 985 residue = desc->total_len; 986 } 987 } else { 988 residue = 0; 989 } 990 991 spin_unlock_irqrestore(&dwc->lock, flags); 992 return residue; 993 } 994 995 static enum dma_status 996 dwc_tx_status(struct dma_chan *chan, 997 dma_cookie_t cookie, 998 struct dma_tx_state *txstate) 999 { 1000 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1001 enum dma_status ret; 1002 1003 ret = dma_cookie_status(chan, cookie, txstate); 1004 if (ret == DMA_COMPLETE) 1005 return ret; 1006 1007 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1008 1009 ret = dma_cookie_status(chan, cookie, txstate); 1010 if (ret == DMA_COMPLETE) 1011 return ret; 1012 1013 dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret)); 1014 return ret; 1015 } 1016 1017 static void dwc_issue_pending(struct dma_chan *chan) 1018 { 1019 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1020 unsigned long flags; 1021 1022 spin_lock_irqsave(&dwc->lock, flags); 1023 if (list_empty(&dwc->active_list)) 1024 dwc_dostart_first_queued(dwc); 1025 spin_unlock_irqrestore(&dwc->lock, flags); 1026 } 1027 1028 /*----------------------------------------------------------------------*/ 1029 1030 void do_dw_dma_off(struct dw_dma *dw) 1031 { 1032 dma_writel(dw, CFG, 0); 1033 1034 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1035 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1036 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1037 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1038 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1039 1040 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 1041 cpu_relax(); 1042 } 1043 1044 void do_dw_dma_on(struct dw_dma *dw) 1045 { 1046 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1047 } 1048 1049 static int dwc_alloc_chan_resources(struct dma_chan *chan) 1050 { 1051 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1052 struct dw_dma *dw = to_dw_dma(chan->device); 1053 1054 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1055 1056 /* ASSERT: channel is idle */ 1057 if (dma_readl(dw, CH_EN) & dwc->mask) { 1058 dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); 1059 return -EIO; 1060 } 1061 1062 dma_cookie_init(chan); 1063 1064 /* 1065 * NOTE: some controllers may have additional features that we 1066 * need to initialize here, like "scatter-gather" (which 1067 * doesn't mean what you think it means), and status writeback. 1068 */ 1069 1070 /* 1071 * We need controller-specific data to set up slave transfers. 1072 */ 1073 if (chan->private && !dw_dma_filter(chan, chan->private)) { 1074 dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); 1075 return -EINVAL; 1076 } 1077 1078 /* Enable controller here if needed */ 1079 if (!dw->in_use) 1080 do_dw_dma_on(dw); 1081 dw->in_use |= dwc->mask; 1082 1083 return 0; 1084 } 1085 1086 static void dwc_free_chan_resources(struct dma_chan *chan) 1087 { 1088 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1089 struct dw_dma *dw = to_dw_dma(chan->device); 1090 unsigned long flags; 1091 1092 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, 1093 dwc->descs_allocated); 1094 1095 /* ASSERT: channel is idle */ 1096 BUG_ON(!list_empty(&dwc->active_list)); 1097 BUG_ON(!list_empty(&dwc->queue)); 1098 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1099 1100 spin_lock_irqsave(&dwc->lock, flags); 1101 1102 /* Clear custom channel configuration */ 1103 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); 1104 1105 /* Disable interrupts */ 1106 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1107 channel_clear_bit(dw, MASK.BLOCK, dwc->mask); 1108 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1109 1110 spin_unlock_irqrestore(&dwc->lock, flags); 1111 1112 /* Disable controller in case it was a last user */ 1113 dw->in_use &= ~dwc->mask; 1114 if (!dw->in_use) 1115 do_dw_dma_off(dw); 1116 1117 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1118 } 1119 1120 static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps) 1121 { 1122 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1123 1124 caps->max_burst = dwc->max_burst; 1125 1126 /* 1127 * It might be crucial for some devices to have the hardware 1128 * accelerated multi-block transfers supported, aka LLPs in DW DMAC 1129 * notation. So if LLPs are supported then max_sg_burst is set to 1130 * zero which means unlimited number of SG entries can be handled in a 1131 * single DMA transaction, otherwise it's just one SG entry. 1132 */ 1133 if (dwc->nollp) 1134 caps->max_sg_burst = 1; 1135 else 1136 caps->max_sg_burst = 0; 1137 } 1138 1139 int do_dma_probe(struct dw_dma_chip *chip) 1140 { 1141 struct dw_dma *dw = chip->dw; 1142 struct dw_dma_platform_data *pdata; 1143 bool autocfg = false; 1144 unsigned int dw_params; 1145 unsigned int i; 1146 int err; 1147 1148 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); 1149 if (!dw->pdata) 1150 return -ENOMEM; 1151 1152 dw->regs = chip->regs; 1153 1154 pm_runtime_get_sync(chip->dev); 1155 1156 if (!chip->pdata) { 1157 dw_params = dma_readl(dw, DW_PARAMS); 1158 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); 1159 1160 autocfg = dw_params >> DW_PARAMS_EN & 1; 1161 if (!autocfg) { 1162 err = -EINVAL; 1163 goto err_pdata; 1164 } 1165 1166 /* Reassign the platform data pointer */ 1167 pdata = dw->pdata; 1168 1169 /* Get hardware configuration parameters */ 1170 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; 1171 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; 1172 for (i = 0; i < pdata->nr_masters; i++) { 1173 pdata->data_width[i] = 1174 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); 1175 } 1176 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); 1177 1178 /* Fill platform data with the default values */ 1179 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; 1180 pdata->chan_priority = CHAN_PRIORITY_ASCENDING; 1181 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { 1182 err = -EINVAL; 1183 goto err_pdata; 1184 } else { 1185 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); 1186 1187 /* Reassign the platform data pointer */ 1188 pdata = dw->pdata; 1189 } 1190 1191 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), 1192 GFP_KERNEL); 1193 if (!dw->chan) { 1194 err = -ENOMEM; 1195 goto err_pdata; 1196 } 1197 1198 /* Calculate all channel mask before DMA setup */ 1199 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1200 1201 /* Force dma off, just in case */ 1202 dw->disable(dw); 1203 1204 /* Device and instance ID for IRQ and DMA pool */ 1205 dw->set_device_name(dw, chip->id); 1206 1207 /* Create a pool of consistent memory blocks for hardware descriptors */ 1208 dw->desc_pool = dmam_pool_create(dw->name, chip->dev, 1209 sizeof(struct dw_desc), 4, 0); 1210 if (!dw->desc_pool) { 1211 dev_err(chip->dev, "No memory for descriptors dma pool\n"); 1212 err = -ENOMEM; 1213 goto err_pdata; 1214 } 1215 1216 tasklet_setup(&dw->tasklet, dw_dma_tasklet); 1217 1218 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, 1219 dw->name, dw); 1220 if (err) 1221 goto err_pdata; 1222 1223 INIT_LIST_HEAD(&dw->dma.channels); 1224 for (i = 0; i < pdata->nr_channels; i++) { 1225 struct dw_dma_chan *dwc = &dw->chan[i]; 1226 1227 dwc->chan.device = &dw->dma; 1228 dma_cookie_init(&dwc->chan); 1229 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1230 list_add_tail(&dwc->chan.device_node, 1231 &dw->dma.channels); 1232 else 1233 list_add(&dwc->chan.device_node, &dw->dma.channels); 1234 1235 /* 7 is highest priority & 0 is lowest. */ 1236 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1237 dwc->priority = pdata->nr_channels - i - 1; 1238 else 1239 dwc->priority = i; 1240 1241 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1242 spin_lock_init(&dwc->lock); 1243 dwc->mask = 1 << i; 1244 1245 INIT_LIST_HEAD(&dwc->active_list); 1246 INIT_LIST_HEAD(&dwc->queue); 1247 1248 channel_clear_bit(dw, CH_EN, dwc->mask); 1249 1250 dwc->direction = DMA_TRANS_NONE; 1251 1252 /* Hardware configuration */ 1253 if (autocfg) { 1254 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; 1255 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; 1256 unsigned int dwc_params = readl(addr); 1257 1258 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1259 dwc_params); 1260 1261 /* 1262 * Decode maximum block size for given channel. The 1263 * stored 4 bit value represents blocks from 0x00 for 3 1264 * up to 0x0a for 4095. 1265 */ 1266 dwc->block_size = 1267 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; 1268 1269 /* 1270 * According to the DW DMA databook the true scatter- 1271 * gether LLPs aren't available if either multi-block 1272 * config is disabled (CHx_MULTI_BLK_EN == 0) or the 1273 * LLP register is hard-coded to zeros 1274 * (CHx_HC_LLP == 1). 1275 */ 1276 dwc->nollp = 1277 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 || 1278 (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1; 1279 dwc->max_burst = 1280 (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7)); 1281 } else { 1282 dwc->block_size = pdata->block_size; 1283 dwc->nollp = !pdata->multi_block[i]; 1284 dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST; 1285 } 1286 } 1287 1288 /* Clear all interrupts on all channels. */ 1289 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1290 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); 1291 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1292 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1293 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1294 1295 /* Set capabilities */ 1296 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1297 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); 1298 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1299 1300 dw->dma.dev = chip->dev; 1301 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1302 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1303 1304 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1305 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1306 1307 dw->dma.device_caps = dwc_caps; 1308 dw->dma.device_config = dwc_config; 1309 dw->dma.device_pause = dwc_pause; 1310 dw->dma.device_resume = dwc_resume; 1311 dw->dma.device_terminate_all = dwc_terminate_all; 1312 1313 dw->dma.device_tx_status = dwc_tx_status; 1314 dw->dma.device_issue_pending = dwc_issue_pending; 1315 1316 /* DMA capabilities */ 1317 dw->dma.min_burst = DW_DMA_MIN_BURST; 1318 dw->dma.max_burst = DW_DMA_MAX_BURST; 1319 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; 1320 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; 1321 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | 1322 BIT(DMA_MEM_TO_MEM); 1323 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1324 1325 /* 1326 * For now there is no hardware with non uniform maximum block size 1327 * across all of the device channels, so we set the maximum segment 1328 * size as the block size found for the very first channel. 1329 */ 1330 dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size); 1331 1332 err = dma_async_device_register(&dw->dma); 1333 if (err) 1334 goto err_dma_register; 1335 1336 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", 1337 pdata->nr_channels); 1338 1339 pm_runtime_put_sync_suspend(chip->dev); 1340 1341 return 0; 1342 1343 err_dma_register: 1344 free_irq(chip->irq, dw); 1345 err_pdata: 1346 pm_runtime_put_sync_suspend(chip->dev); 1347 return err; 1348 } 1349 1350 int do_dma_remove(struct dw_dma_chip *chip) 1351 { 1352 struct dw_dma *dw = chip->dw; 1353 struct dw_dma_chan *dwc, *_dwc; 1354 1355 pm_runtime_get_sync(chip->dev); 1356 1357 do_dw_dma_off(dw); 1358 dma_async_device_unregister(&dw->dma); 1359 1360 free_irq(chip->irq, dw); 1361 tasklet_kill(&dw->tasklet); 1362 1363 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1364 chan.device_node) { 1365 list_del(&dwc->chan.device_node); 1366 channel_clear_bit(dw, CH_EN, dwc->mask); 1367 } 1368 1369 pm_runtime_put_sync_suspend(chip->dev); 1370 return 0; 1371 } 1372 1373 int do_dw_dma_disable(struct dw_dma_chip *chip) 1374 { 1375 struct dw_dma *dw = chip->dw; 1376 1377 dw->disable(dw); 1378 return 0; 1379 } 1380 EXPORT_SYMBOL_GPL(do_dw_dma_disable); 1381 1382 int do_dw_dma_enable(struct dw_dma_chip *chip) 1383 { 1384 struct dw_dma *dw = chip->dw; 1385 1386 dw->enable(dw); 1387 return 0; 1388 } 1389 EXPORT_SYMBOL_GPL(do_dw_dma_enable); 1390 1391 MODULE_LICENSE("GPL v2"); 1392 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); 1393 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1394 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); 1395