1 /* 2 * Copyright 2012 Marvell International Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 #include <linux/err.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/types.h> 12 #include <linux/interrupt.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/slab.h> 15 #include <linux/dmaengine.h> 16 #include <linux/platform_device.h> 17 #include <linux/device.h> 18 #include <linux/platform_data/mmp_dma.h> 19 #include <linux/dmapool.h> 20 #include <linux/of_device.h> 21 #include <linux/of_dma.h> 22 #include <linux/of.h> 23 #include <linux/dma/mmp-pdma.h> 24 25 #include "dmaengine.h" 26 27 #define DCSR 0x0000 28 #define DALGN 0x00a0 29 #define DINT 0x00f0 30 #define DDADR 0x0200 31 #define DSADR 0x0204 32 #define DTADR 0x0208 33 #define DCMD 0x020c 34 35 #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ 36 #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ 37 #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ 38 #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ 39 #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ 40 #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ 41 #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ 42 #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ 43 44 #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ 45 #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ 46 #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ 47 #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ 48 #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ 49 #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ 50 #define DCSR_EORINTR (1 << 9) /* The end of Receive */ 51 52 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ 53 (((n) & 0x3f) << 2)) 54 #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ 55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ 56 57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ 58 #define DDADR_STOP (1 << 0) /* Stop (read / write) */ 59 60 #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ 61 #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ 62 #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ 63 #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ 64 #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ 65 #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ 66 #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ 67 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ 68 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ 69 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ 70 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ 71 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ 72 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ 73 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ 74 75 #define PDMA_ALIGNMENT 3 76 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH 77 78 struct mmp_pdma_desc_hw { 79 u32 ddadr; /* Points to the next descriptor + flags */ 80 u32 dsadr; /* DSADR value for the current transfer */ 81 u32 dtadr; /* DTADR value for the current transfer */ 82 u32 dcmd; /* DCMD value for the current transfer */ 83 } __aligned(32); 84 85 struct mmp_pdma_desc_sw { 86 struct mmp_pdma_desc_hw desc; 87 struct list_head node; 88 struct list_head tx_list; 89 struct dma_async_tx_descriptor async_tx; 90 }; 91 92 struct mmp_pdma_phy; 93 94 struct mmp_pdma_chan { 95 struct device *dev; 96 struct dma_chan chan; 97 struct dma_async_tx_descriptor desc; 98 struct mmp_pdma_phy *phy; 99 enum dma_transfer_direction dir; 100 101 struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel 102 * is in cyclic mode */ 103 104 /* channel's basic info */ 105 struct tasklet_struct tasklet; 106 u32 dcmd; 107 u32 drcmr; 108 u32 dev_addr; 109 110 /* list for desc */ 111 spinlock_t desc_lock; /* Descriptor list lock */ 112 struct list_head chain_pending; /* Link descriptors queue for pending */ 113 struct list_head chain_running; /* Link descriptors queue for running */ 114 bool idle; /* channel statue machine */ 115 bool byte_align; 116 117 struct dma_pool *desc_pool; /* Descriptors pool */ 118 }; 119 120 struct mmp_pdma_phy { 121 int idx; 122 void __iomem *base; 123 struct mmp_pdma_chan *vchan; 124 }; 125 126 struct mmp_pdma_device { 127 int dma_channels; 128 void __iomem *base; 129 struct device *dev; 130 struct dma_device device; 131 struct mmp_pdma_phy *phy; 132 spinlock_t phy_lock; /* protect alloc/free phy channels */ 133 }; 134 135 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) 136 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) 137 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) 138 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) 139 140 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) 141 { 142 u32 reg = (phy->idx << 4) + DDADR; 143 144 writel(addr, phy->base + reg); 145 } 146 147 static void enable_chan(struct mmp_pdma_phy *phy) 148 { 149 u32 reg, dalgn; 150 151 if (!phy->vchan) 152 return; 153 154 reg = DRCMR(phy->vchan->drcmr); 155 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); 156 157 dalgn = readl(phy->base + DALGN); 158 if (phy->vchan->byte_align) 159 dalgn |= 1 << phy->idx; 160 else 161 dalgn &= ~(1 << phy->idx); 162 writel(dalgn, phy->base + DALGN); 163 164 reg = (phy->idx << 2) + DCSR; 165 writel(readl(phy->base + reg) | DCSR_RUN, 166 phy->base + reg); 167 } 168 169 static void disable_chan(struct mmp_pdma_phy *phy) 170 { 171 u32 reg; 172 173 if (phy) { 174 reg = (phy->idx << 2) + DCSR; 175 writel(readl(phy->base + reg) & ~DCSR_RUN, 176 phy->base + reg); 177 } 178 } 179 180 static int clear_chan_irq(struct mmp_pdma_phy *phy) 181 { 182 u32 dcsr; 183 u32 dint = readl(phy->base + DINT); 184 u32 reg = (phy->idx << 2) + DCSR; 185 186 if (dint & BIT(phy->idx)) { 187 /* clear irq */ 188 dcsr = readl(phy->base + reg); 189 writel(dcsr, phy->base + reg); 190 if ((dcsr & DCSR_BUSERR) && (phy->vchan)) 191 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); 192 return 0; 193 } 194 return -EAGAIN; 195 } 196 197 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) 198 { 199 struct mmp_pdma_phy *phy = dev_id; 200 201 if (clear_chan_irq(phy) == 0) { 202 tasklet_schedule(&phy->vchan->tasklet); 203 return IRQ_HANDLED; 204 } else 205 return IRQ_NONE; 206 } 207 208 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) 209 { 210 struct mmp_pdma_device *pdev = dev_id; 211 struct mmp_pdma_phy *phy; 212 u32 dint = readl(pdev->base + DINT); 213 int i, ret; 214 int irq_num = 0; 215 216 while (dint) { 217 i = __ffs(dint); 218 dint &= (dint - 1); 219 phy = &pdev->phy[i]; 220 ret = mmp_pdma_chan_handler(irq, phy); 221 if (ret == IRQ_HANDLED) 222 irq_num++; 223 } 224 225 if (irq_num) 226 return IRQ_HANDLED; 227 else 228 return IRQ_NONE; 229 } 230 231 /* lookup free phy channel as descending priority */ 232 static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) 233 { 234 int prio, i; 235 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); 236 struct mmp_pdma_phy *phy, *found = NULL; 237 unsigned long flags; 238 239 /* 240 * dma channel priorities 241 * ch 0 - 3, 16 - 19 <--> (0) 242 * ch 4 - 7, 20 - 23 <--> (1) 243 * ch 8 - 11, 24 - 27 <--> (2) 244 * ch 12 - 15, 28 - 31 <--> (3) 245 */ 246 247 spin_lock_irqsave(&pdev->phy_lock, flags); 248 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { 249 for (i = 0; i < pdev->dma_channels; i++) { 250 if (prio != ((i & 0xf) >> 2)) 251 continue; 252 phy = &pdev->phy[i]; 253 if (!phy->vchan) { 254 phy->vchan = pchan; 255 found = phy; 256 goto out_unlock; 257 } 258 } 259 } 260 261 out_unlock: 262 spin_unlock_irqrestore(&pdev->phy_lock, flags); 263 return found; 264 } 265 266 static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) 267 { 268 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); 269 unsigned long flags; 270 u32 reg; 271 272 if (!pchan->phy) 273 return; 274 275 /* clear the channel mapping in DRCMR */ 276 reg = DRCMR(pchan->phy->vchan->drcmr); 277 writel(0, pchan->phy->base + reg); 278 279 spin_lock_irqsave(&pdev->phy_lock, flags); 280 pchan->phy->vchan = NULL; 281 pchan->phy = NULL; 282 spin_unlock_irqrestore(&pdev->phy_lock, flags); 283 } 284 285 /** 286 * start_pending_queue - transfer any pending transactions 287 * pending list ==> running list 288 */ 289 static void start_pending_queue(struct mmp_pdma_chan *chan) 290 { 291 struct mmp_pdma_desc_sw *desc; 292 293 /* still in running, irq will start the pending list */ 294 if (!chan->idle) { 295 dev_dbg(chan->dev, "DMA controller still busy\n"); 296 return; 297 } 298 299 if (list_empty(&chan->chain_pending)) { 300 /* chance to re-fetch phy channel with higher prio */ 301 mmp_pdma_free_phy(chan); 302 dev_dbg(chan->dev, "no pending list\n"); 303 return; 304 } 305 306 if (!chan->phy) { 307 chan->phy = lookup_phy(chan); 308 if (!chan->phy) { 309 dev_dbg(chan->dev, "no free dma channel\n"); 310 return; 311 } 312 } 313 314 /* 315 * pending -> running 316 * reintilize pending list 317 */ 318 desc = list_first_entry(&chan->chain_pending, 319 struct mmp_pdma_desc_sw, node); 320 list_splice_tail_init(&chan->chain_pending, &chan->chain_running); 321 322 /* 323 * Program the descriptor's address into the DMA controller, 324 * then start the DMA transaction 325 */ 326 set_desc(chan->phy, desc->async_tx.phys); 327 enable_chan(chan->phy); 328 chan->idle = false; 329 } 330 331 332 /* desc->tx_list ==> pending list */ 333 static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) 334 { 335 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); 336 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); 337 struct mmp_pdma_desc_sw *child; 338 unsigned long flags; 339 dma_cookie_t cookie = -EBUSY; 340 341 spin_lock_irqsave(&chan->desc_lock, flags); 342 343 list_for_each_entry(child, &desc->tx_list, node) { 344 cookie = dma_cookie_assign(&child->async_tx); 345 } 346 347 /* softly link to pending list - desc->tx_list ==> pending list */ 348 list_splice_tail_init(&desc->tx_list, &chan->chain_pending); 349 350 spin_unlock_irqrestore(&chan->desc_lock, flags); 351 352 return cookie; 353 } 354 355 static struct mmp_pdma_desc_sw * 356 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) 357 { 358 struct mmp_pdma_desc_sw *desc; 359 dma_addr_t pdesc; 360 361 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 362 if (!desc) { 363 dev_err(chan->dev, "out of memory for link descriptor\n"); 364 return NULL; 365 } 366 367 memset(desc, 0, sizeof(*desc)); 368 INIT_LIST_HEAD(&desc->tx_list); 369 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); 370 /* each desc has submit */ 371 desc->async_tx.tx_submit = mmp_pdma_tx_submit; 372 desc->async_tx.phys = pdesc; 373 374 return desc; 375 } 376 377 /** 378 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. 379 * 380 * This function will create a dma pool for descriptor allocation. 381 * Request irq only when channel is requested 382 * Return - The number of allocated descriptors. 383 */ 384 385 static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) 386 { 387 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 388 389 if (chan->desc_pool) 390 return 1; 391 392 chan->desc_pool = 393 dma_pool_create(dev_name(&dchan->dev->device), chan->dev, 394 sizeof(struct mmp_pdma_desc_sw), 395 __alignof__(struct mmp_pdma_desc_sw), 0); 396 if (!chan->desc_pool) { 397 dev_err(chan->dev, "unable to allocate descriptor pool\n"); 398 return -ENOMEM; 399 } 400 mmp_pdma_free_phy(chan); 401 chan->idle = true; 402 chan->dev_addr = 0; 403 return 1; 404 } 405 406 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, 407 struct list_head *list) 408 { 409 struct mmp_pdma_desc_sw *desc, *_desc; 410 411 list_for_each_entry_safe(desc, _desc, list, node) { 412 list_del(&desc->node); 413 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 414 } 415 } 416 417 static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) 418 { 419 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 420 unsigned long flags; 421 422 spin_lock_irqsave(&chan->desc_lock, flags); 423 mmp_pdma_free_desc_list(chan, &chan->chain_pending); 424 mmp_pdma_free_desc_list(chan, &chan->chain_running); 425 spin_unlock_irqrestore(&chan->desc_lock, flags); 426 427 dma_pool_destroy(chan->desc_pool); 428 chan->desc_pool = NULL; 429 chan->idle = true; 430 chan->dev_addr = 0; 431 mmp_pdma_free_phy(chan); 432 return; 433 } 434 435 static struct dma_async_tx_descriptor * 436 mmp_pdma_prep_memcpy(struct dma_chan *dchan, 437 dma_addr_t dma_dst, dma_addr_t dma_src, 438 size_t len, unsigned long flags) 439 { 440 struct mmp_pdma_chan *chan; 441 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; 442 size_t copy = 0; 443 444 if (!dchan) 445 return NULL; 446 447 if (!len) 448 return NULL; 449 450 chan = to_mmp_pdma_chan(dchan); 451 chan->byte_align = false; 452 453 if (!chan->dir) { 454 chan->dir = DMA_MEM_TO_MEM; 455 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; 456 chan->dcmd |= DCMD_BURST32; 457 } 458 459 do { 460 /* Allocate the link descriptor from DMA pool */ 461 new = mmp_pdma_alloc_descriptor(chan); 462 if (!new) { 463 dev_err(chan->dev, "no memory for desc\n"); 464 goto fail; 465 } 466 467 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); 468 if (dma_src & 0x7 || dma_dst & 0x7) 469 chan->byte_align = true; 470 471 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); 472 new->desc.dsadr = dma_src; 473 new->desc.dtadr = dma_dst; 474 475 if (!first) 476 first = new; 477 else 478 prev->desc.ddadr = new->async_tx.phys; 479 480 new->async_tx.cookie = 0; 481 async_tx_ack(&new->async_tx); 482 483 prev = new; 484 len -= copy; 485 486 if (chan->dir == DMA_MEM_TO_DEV) { 487 dma_src += copy; 488 } else if (chan->dir == DMA_DEV_TO_MEM) { 489 dma_dst += copy; 490 } else if (chan->dir == DMA_MEM_TO_MEM) { 491 dma_src += copy; 492 dma_dst += copy; 493 } 494 495 /* Insert the link descriptor to the LD ring */ 496 list_add_tail(&new->node, &first->tx_list); 497 } while (len); 498 499 first->async_tx.flags = flags; /* client is in control of this ack */ 500 first->async_tx.cookie = -EBUSY; 501 502 /* last desc and fire IRQ */ 503 new->desc.ddadr = DDADR_STOP; 504 new->desc.dcmd |= DCMD_ENDIRQEN; 505 506 chan->cyclic_first = NULL; 507 508 return &first->async_tx; 509 510 fail: 511 if (first) 512 mmp_pdma_free_desc_list(chan, &first->tx_list); 513 return NULL; 514 } 515 516 static struct dma_async_tx_descriptor * 517 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 518 unsigned int sg_len, enum dma_transfer_direction dir, 519 unsigned long flags, void *context) 520 { 521 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 522 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; 523 size_t len, avail; 524 struct scatterlist *sg; 525 dma_addr_t addr; 526 int i; 527 528 if ((sgl == NULL) || (sg_len == 0)) 529 return NULL; 530 531 chan->byte_align = false; 532 533 for_each_sg(sgl, sg, sg_len, i) { 534 addr = sg_dma_address(sg); 535 avail = sg_dma_len(sgl); 536 537 do { 538 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); 539 if (addr & 0x7) 540 chan->byte_align = true; 541 542 /* allocate and populate the descriptor */ 543 new = mmp_pdma_alloc_descriptor(chan); 544 if (!new) { 545 dev_err(chan->dev, "no memory for desc\n"); 546 goto fail; 547 } 548 549 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); 550 if (dir == DMA_MEM_TO_DEV) { 551 new->desc.dsadr = addr; 552 new->desc.dtadr = chan->dev_addr; 553 } else { 554 new->desc.dsadr = chan->dev_addr; 555 new->desc.dtadr = addr; 556 } 557 558 if (!first) 559 first = new; 560 else 561 prev->desc.ddadr = new->async_tx.phys; 562 563 new->async_tx.cookie = 0; 564 async_tx_ack(&new->async_tx); 565 prev = new; 566 567 /* Insert the link descriptor to the LD ring */ 568 list_add_tail(&new->node, &first->tx_list); 569 570 /* update metadata */ 571 addr += len; 572 avail -= len; 573 } while (avail); 574 } 575 576 first->async_tx.cookie = -EBUSY; 577 first->async_tx.flags = flags; 578 579 /* last desc and fire IRQ */ 580 new->desc.ddadr = DDADR_STOP; 581 new->desc.dcmd |= DCMD_ENDIRQEN; 582 583 chan->dir = dir; 584 chan->cyclic_first = NULL; 585 586 return &first->async_tx; 587 588 fail: 589 if (first) 590 mmp_pdma_free_desc_list(chan, &first->tx_list); 591 return NULL; 592 } 593 594 static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( 595 struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, 596 size_t period_len, enum dma_transfer_direction direction, 597 unsigned long flags, void *context) 598 { 599 struct mmp_pdma_chan *chan; 600 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; 601 dma_addr_t dma_src, dma_dst; 602 603 if (!dchan || !len || !period_len) 604 return NULL; 605 606 /* the buffer length must be a multiple of period_len */ 607 if (len % period_len != 0) 608 return NULL; 609 610 if (period_len > PDMA_MAX_DESC_BYTES) 611 return NULL; 612 613 chan = to_mmp_pdma_chan(dchan); 614 615 switch (direction) { 616 case DMA_MEM_TO_DEV: 617 dma_src = buf_addr; 618 dma_dst = chan->dev_addr; 619 break; 620 case DMA_DEV_TO_MEM: 621 dma_dst = buf_addr; 622 dma_src = chan->dev_addr; 623 break; 624 default: 625 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); 626 return NULL; 627 } 628 629 chan->dir = direction; 630 631 do { 632 /* Allocate the link descriptor from DMA pool */ 633 new = mmp_pdma_alloc_descriptor(chan); 634 if (!new) { 635 dev_err(chan->dev, "no memory for desc\n"); 636 goto fail; 637 } 638 639 new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN | 640 (DCMD_LENGTH & period_len); 641 new->desc.dsadr = dma_src; 642 new->desc.dtadr = dma_dst; 643 644 if (!first) 645 first = new; 646 else 647 prev->desc.ddadr = new->async_tx.phys; 648 649 new->async_tx.cookie = 0; 650 async_tx_ack(&new->async_tx); 651 652 prev = new; 653 len -= period_len; 654 655 if (chan->dir == DMA_MEM_TO_DEV) 656 dma_src += period_len; 657 else 658 dma_dst += period_len; 659 660 /* Insert the link descriptor to the LD ring */ 661 list_add_tail(&new->node, &first->tx_list); 662 } while (len); 663 664 first->async_tx.flags = flags; /* client is in control of this ack */ 665 first->async_tx.cookie = -EBUSY; 666 667 /* make the cyclic link */ 668 new->desc.ddadr = first->async_tx.phys; 669 chan->cyclic_first = first; 670 671 return &first->async_tx; 672 673 fail: 674 if (first) 675 mmp_pdma_free_desc_list(chan, &first->tx_list); 676 return NULL; 677 } 678 679 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 680 unsigned long arg) 681 { 682 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 683 struct dma_slave_config *cfg = (void *)arg; 684 unsigned long flags; 685 int ret = 0; 686 u32 maxburst = 0, addr = 0; 687 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 688 689 if (!dchan) 690 return -EINVAL; 691 692 switch (cmd) { 693 case DMA_TERMINATE_ALL: 694 disable_chan(chan->phy); 695 mmp_pdma_free_phy(chan); 696 spin_lock_irqsave(&chan->desc_lock, flags); 697 mmp_pdma_free_desc_list(chan, &chan->chain_pending); 698 mmp_pdma_free_desc_list(chan, &chan->chain_running); 699 spin_unlock_irqrestore(&chan->desc_lock, flags); 700 chan->idle = true; 701 break; 702 case DMA_SLAVE_CONFIG: 703 if (cfg->direction == DMA_DEV_TO_MEM) { 704 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; 705 maxburst = cfg->src_maxburst; 706 width = cfg->src_addr_width; 707 addr = cfg->src_addr; 708 } else if (cfg->direction == DMA_MEM_TO_DEV) { 709 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; 710 maxburst = cfg->dst_maxburst; 711 width = cfg->dst_addr_width; 712 addr = cfg->dst_addr; 713 } 714 715 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) 716 chan->dcmd |= DCMD_WIDTH1; 717 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 718 chan->dcmd |= DCMD_WIDTH2; 719 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) 720 chan->dcmd |= DCMD_WIDTH4; 721 722 if (maxburst == 8) 723 chan->dcmd |= DCMD_BURST8; 724 else if (maxburst == 16) 725 chan->dcmd |= DCMD_BURST16; 726 else if (maxburst == 32) 727 chan->dcmd |= DCMD_BURST32; 728 729 chan->dir = cfg->direction; 730 chan->dev_addr = addr; 731 /* FIXME: drivers should be ported over to use the filter 732 * function. Once that's done, the following two lines can 733 * be removed. 734 */ 735 if (cfg->slave_id) 736 chan->drcmr = cfg->slave_id; 737 break; 738 default: 739 return -ENOSYS; 740 } 741 742 return ret; 743 } 744 745 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, 746 dma_cookie_t cookie, struct dma_tx_state *txstate) 747 { 748 return dma_cookie_status(dchan, cookie, txstate); 749 } 750 751 /** 752 * mmp_pdma_issue_pending - Issue the DMA start command 753 * pending list ==> running list 754 */ 755 static void mmp_pdma_issue_pending(struct dma_chan *dchan) 756 { 757 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 758 unsigned long flags; 759 760 spin_lock_irqsave(&chan->desc_lock, flags); 761 start_pending_queue(chan); 762 spin_unlock_irqrestore(&chan->desc_lock, flags); 763 } 764 765 /* 766 * dma_do_tasklet 767 * Do call back 768 * Start pending list 769 */ 770 static void dma_do_tasklet(unsigned long data) 771 { 772 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; 773 struct mmp_pdma_desc_sw *desc, *_desc; 774 LIST_HEAD(chain_cleanup); 775 unsigned long flags; 776 777 if (chan->cyclic_first) { 778 dma_async_tx_callback cb = NULL; 779 void *cb_data = NULL; 780 781 spin_lock_irqsave(&chan->desc_lock, flags); 782 desc = chan->cyclic_first; 783 cb = desc->async_tx.callback; 784 cb_data = desc->async_tx.callback_param; 785 spin_unlock_irqrestore(&chan->desc_lock, flags); 786 787 if (cb) 788 cb(cb_data); 789 790 return; 791 } 792 793 /* submit pending list; callback for each desc; free desc */ 794 spin_lock_irqsave(&chan->desc_lock, flags); 795 796 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { 797 /* 798 * move the descriptors to a temporary list so we can drop 799 * the lock during the entire cleanup operation 800 */ 801 list_move(&desc->node, &chain_cleanup); 802 803 /* 804 * Look for the first list entry which has the ENDIRQEN flag 805 * set. That is the descriptor we got an interrupt for, so 806 * complete that transaction and its cookie. 807 */ 808 if (desc->desc.dcmd & DCMD_ENDIRQEN) { 809 dma_cookie_t cookie = desc->async_tx.cookie; 810 dma_cookie_complete(&desc->async_tx); 811 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); 812 break; 813 } 814 } 815 816 /* 817 * The hardware is idle and ready for more when the 818 * chain_running list is empty. 819 */ 820 chan->idle = list_empty(&chan->chain_running); 821 822 /* Start any pending transactions automatically */ 823 start_pending_queue(chan); 824 spin_unlock_irqrestore(&chan->desc_lock, flags); 825 826 /* Run the callback for each descriptor, in order */ 827 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { 828 struct dma_async_tx_descriptor *txd = &desc->async_tx; 829 830 /* Remove from the list of transactions */ 831 list_del(&desc->node); 832 /* Run the link descriptor callback function */ 833 if (txd->callback) 834 txd->callback(txd->callback_param); 835 836 dma_pool_free(chan->desc_pool, desc, txd->phys); 837 } 838 } 839 840 static int mmp_pdma_remove(struct platform_device *op) 841 { 842 struct mmp_pdma_device *pdev = platform_get_drvdata(op); 843 844 dma_async_device_unregister(&pdev->device); 845 return 0; 846 } 847 848 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, 849 int idx, int irq) 850 { 851 struct mmp_pdma_phy *phy = &pdev->phy[idx]; 852 struct mmp_pdma_chan *chan; 853 int ret; 854 855 chan = devm_kzalloc(pdev->dev, 856 sizeof(struct mmp_pdma_chan), GFP_KERNEL); 857 if (chan == NULL) 858 return -ENOMEM; 859 860 phy->idx = idx; 861 phy->base = pdev->base; 862 863 if (irq) { 864 ret = devm_request_irq(pdev->dev, irq, 865 mmp_pdma_chan_handler, 0, "pdma", phy); 866 if (ret) { 867 dev_err(pdev->dev, "channel request irq fail!\n"); 868 return ret; 869 } 870 } 871 872 spin_lock_init(&chan->desc_lock); 873 chan->dev = pdev->dev; 874 chan->chan.device = &pdev->device; 875 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 876 INIT_LIST_HEAD(&chan->chain_pending); 877 INIT_LIST_HEAD(&chan->chain_running); 878 879 /* register virt channel to dma engine */ 880 list_add_tail(&chan->chan.device_node, 881 &pdev->device.channels); 882 883 return 0; 884 } 885 886 static struct of_device_id mmp_pdma_dt_ids[] = { 887 { .compatible = "marvell,pdma-1.0", }, 888 {} 889 }; 890 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); 891 892 static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, 893 struct of_dma *ofdma) 894 { 895 struct mmp_pdma_device *d = ofdma->of_dma_data; 896 struct dma_chan *chan, *candidate; 897 898 retry: 899 candidate = NULL; 900 901 /* walk the list of channels registered with the current instance and 902 * find one that is currently unused */ 903 list_for_each_entry(chan, &d->device.channels, device_node) 904 if (chan->client_count == 0) { 905 candidate = chan; 906 break; 907 } 908 909 if (!candidate) 910 return NULL; 911 912 /* dma_get_slave_channel will return NULL if we lost a race between 913 * the lookup and the reservation */ 914 chan = dma_get_slave_channel(candidate); 915 916 if (chan) { 917 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); 918 c->drcmr = dma_spec->args[0]; 919 return chan; 920 } 921 922 goto retry; 923 } 924 925 static int mmp_pdma_probe(struct platform_device *op) 926 { 927 struct mmp_pdma_device *pdev; 928 const struct of_device_id *of_id; 929 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); 930 struct resource *iores; 931 int i, ret, irq = 0; 932 int dma_channels = 0, irq_num = 0; 933 934 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 935 if (!pdev) 936 return -ENOMEM; 937 pdev->dev = &op->dev; 938 939 spin_lock_init(&pdev->phy_lock); 940 941 iores = platform_get_resource(op, IORESOURCE_MEM, 0); 942 pdev->base = devm_ioremap_resource(pdev->dev, iores); 943 if (IS_ERR(pdev->base)) 944 return PTR_ERR(pdev->base); 945 946 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); 947 if (of_id) 948 of_property_read_u32(pdev->dev->of_node, 949 "#dma-channels", &dma_channels); 950 else if (pdata && pdata->dma_channels) 951 dma_channels = pdata->dma_channels; 952 else 953 dma_channels = 32; /* default 32 channel */ 954 pdev->dma_channels = dma_channels; 955 956 for (i = 0; i < dma_channels; i++) { 957 if (platform_get_irq(op, i) > 0) 958 irq_num++; 959 } 960 961 pdev->phy = devm_kzalloc(pdev->dev, 962 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); 963 if (pdev->phy == NULL) 964 return -ENOMEM; 965 966 INIT_LIST_HEAD(&pdev->device.channels); 967 968 if (irq_num != dma_channels) { 969 /* all chan share one irq, demux inside */ 970 irq = platform_get_irq(op, 0); 971 ret = devm_request_irq(pdev->dev, irq, 972 mmp_pdma_int_handler, 0, "pdma", pdev); 973 if (ret) 974 return ret; 975 } 976 977 for (i = 0; i < dma_channels; i++) { 978 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); 979 ret = mmp_pdma_chan_init(pdev, i, irq); 980 if (ret) 981 return ret; 982 } 983 984 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); 985 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); 986 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); 987 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); 988 pdev->device.dev = &op->dev; 989 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; 990 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; 991 pdev->device.device_tx_status = mmp_pdma_tx_status; 992 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; 993 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; 994 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; 995 pdev->device.device_issue_pending = mmp_pdma_issue_pending; 996 pdev->device.device_control = mmp_pdma_control; 997 pdev->device.copy_align = PDMA_ALIGNMENT; 998 999 if (pdev->dev->coherent_dma_mask) 1000 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 1001 else 1002 dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); 1003 1004 ret = dma_async_device_register(&pdev->device); 1005 if (ret) { 1006 dev_err(pdev->device.dev, "unable to register\n"); 1007 return ret; 1008 } 1009 1010 if (op->dev.of_node) { 1011 /* Device-tree DMA controller registration */ 1012 ret = of_dma_controller_register(op->dev.of_node, 1013 mmp_pdma_dma_xlate, pdev); 1014 if (ret < 0) { 1015 dev_err(&op->dev, "of_dma_controller_register failed\n"); 1016 return ret; 1017 } 1018 } 1019 1020 platform_set_drvdata(op, pdev); 1021 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); 1022 return 0; 1023 } 1024 1025 static const struct platform_device_id mmp_pdma_id_table[] = { 1026 { "mmp-pdma", }, 1027 { }, 1028 }; 1029 1030 static struct platform_driver mmp_pdma_driver = { 1031 .driver = { 1032 .name = "mmp-pdma", 1033 .owner = THIS_MODULE, 1034 .of_match_table = mmp_pdma_dt_ids, 1035 }, 1036 .id_table = mmp_pdma_id_table, 1037 .probe = mmp_pdma_probe, 1038 .remove = mmp_pdma_remove, 1039 }; 1040 1041 bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) 1042 { 1043 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); 1044 1045 if (chan->device->dev->driver != &mmp_pdma_driver.driver) 1046 return false; 1047 1048 c->drcmr = *(unsigned int *) param; 1049 1050 return true; 1051 } 1052 EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn); 1053 1054 module_platform_driver(mmp_pdma_driver); 1055 1056 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); 1057 MODULE_AUTHOR("Marvell International Ltd."); 1058 MODULE_LICENSE("GPL v2"); 1059