1 /* linux/drivers/dma/pl330.c 2 * 3 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 4 * Jaswinder Singh <jassi.brar@samsung.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/io.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/dmaengine.h> 17 #include <linux/interrupt.h> 18 #include <linux/amba/bus.h> 19 #include <linux/amba/pl330.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/scatterlist.h> 22 #include <linux/of.h> 23 24 #define NR_DEFAULT_DESC 16 25 26 enum desc_status { 27 /* In the DMAC pool */ 28 FREE, 29 /* 30 * Allocted to some channel during prep_xxx 31 * Also may be sitting on the work_list. 32 */ 33 PREP, 34 /* 35 * Sitting on the work_list and already submitted 36 * to the PL330 core. Not more than two descriptors 37 * of a channel can be BUSY at any time. 38 */ 39 BUSY, 40 /* 41 * Sitting on the channel work_list but xfer done 42 * by PL330 core 43 */ 44 DONE, 45 }; 46 47 struct dma_pl330_chan { 48 /* Schedule desc completion */ 49 struct tasklet_struct task; 50 51 /* DMA-Engine Channel */ 52 struct dma_chan chan; 53 54 /* Last completed cookie */ 55 dma_cookie_t completed; 56 57 /* List of to be xfered descriptors */ 58 struct list_head work_list; 59 60 /* Pointer to the DMAC that manages this channel, 61 * NULL if the channel is available to be acquired. 62 * As the parent, this DMAC also provides descriptors 63 * to the channel. 64 */ 65 struct dma_pl330_dmac *dmac; 66 67 /* To protect channel manipulation */ 68 spinlock_t lock; 69 70 /* Token of a hardware channel thread of PL330 DMAC 71 * NULL if the channel is available to be acquired. 72 */ 73 void *pl330_chid; 74 75 /* For D-to-M and M-to-D channels */ 76 int burst_sz; /* the peripheral fifo width */ 77 int burst_len; /* the number of burst */ 78 dma_addr_t fifo_addr; 79 80 /* for cyclic capability */ 81 bool cyclic; 82 }; 83 84 struct dma_pl330_dmac { 85 struct pl330_info pif; 86 87 /* DMA-Engine Device */ 88 struct dma_device ddma; 89 90 /* Pool of descriptors available for the DMAC's channels */ 91 struct list_head desc_pool; 92 /* To protect desc_pool manipulation */ 93 spinlock_t pool_lock; 94 95 /* Peripheral channels connected to this DMAC */ 96 struct dma_pl330_chan *peripherals; /* keep at end */ 97 98 struct clk *clk; 99 }; 100 101 struct dma_pl330_desc { 102 /* To attach to a queue as child */ 103 struct list_head node; 104 105 /* Descriptor for the DMA Engine API */ 106 struct dma_async_tx_descriptor txd; 107 108 /* Xfer for PL330 core */ 109 struct pl330_xfer px; 110 111 struct pl330_reqcfg rqcfg; 112 struct pl330_req req; 113 114 enum desc_status status; 115 116 /* The channel which currently holds this desc */ 117 struct dma_pl330_chan *pchan; 118 }; 119 120 /* forward declaration */ 121 static struct amba_driver pl330_driver; 122 123 static inline struct dma_pl330_chan * 124 to_pchan(struct dma_chan *ch) 125 { 126 if (!ch) 127 return NULL; 128 129 return container_of(ch, struct dma_pl330_chan, chan); 130 } 131 132 static inline struct dma_pl330_desc * 133 to_desc(struct dma_async_tx_descriptor *tx) 134 { 135 return container_of(tx, struct dma_pl330_desc, txd); 136 } 137 138 static inline void free_desc_list(struct list_head *list) 139 { 140 struct dma_pl330_dmac *pdmac; 141 struct dma_pl330_desc *desc; 142 struct dma_pl330_chan *pch; 143 unsigned long flags; 144 145 if (list_empty(list)) 146 return; 147 148 /* Finish off the work list */ 149 list_for_each_entry(desc, list, node) { 150 dma_async_tx_callback callback; 151 void *param; 152 153 /* All desc in a list belong to same channel */ 154 pch = desc->pchan; 155 callback = desc->txd.callback; 156 param = desc->txd.callback_param; 157 158 if (callback) 159 callback(param); 160 161 desc->pchan = NULL; 162 } 163 164 pdmac = pch->dmac; 165 166 spin_lock_irqsave(&pdmac->pool_lock, flags); 167 list_splice_tail_init(list, &pdmac->desc_pool); 168 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 169 } 170 171 static inline void handle_cyclic_desc_list(struct list_head *list) 172 { 173 struct dma_pl330_desc *desc; 174 struct dma_pl330_chan *pch; 175 unsigned long flags; 176 177 if (list_empty(list)) 178 return; 179 180 list_for_each_entry(desc, list, node) { 181 dma_async_tx_callback callback; 182 183 /* Change status to reload it */ 184 desc->status = PREP; 185 pch = desc->pchan; 186 callback = desc->txd.callback; 187 if (callback) 188 callback(desc->txd.callback_param); 189 } 190 191 spin_lock_irqsave(&pch->lock, flags); 192 list_splice_tail_init(list, &pch->work_list); 193 spin_unlock_irqrestore(&pch->lock, flags); 194 } 195 196 static inline void fill_queue(struct dma_pl330_chan *pch) 197 { 198 struct dma_pl330_desc *desc; 199 int ret; 200 201 list_for_each_entry(desc, &pch->work_list, node) { 202 203 /* If already submitted */ 204 if (desc->status == BUSY) 205 break; 206 207 ret = pl330_submit_req(pch->pl330_chid, 208 &desc->req); 209 if (!ret) { 210 desc->status = BUSY; 211 break; 212 } else if (ret == -EAGAIN) { 213 /* QFull or DMAC Dying */ 214 break; 215 } else { 216 /* Unacceptable request */ 217 desc->status = DONE; 218 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", 219 __func__, __LINE__, desc->txd.cookie); 220 tasklet_schedule(&pch->task); 221 } 222 } 223 } 224 225 static void pl330_tasklet(unsigned long data) 226 { 227 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; 228 struct dma_pl330_desc *desc, *_dt; 229 unsigned long flags; 230 LIST_HEAD(list); 231 232 spin_lock_irqsave(&pch->lock, flags); 233 234 /* Pick up ripe tomatoes */ 235 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 236 if (desc->status == DONE) { 237 pch->completed = desc->txd.cookie; 238 list_move_tail(&desc->node, &list); 239 } 240 241 /* Try to submit a req imm. next to the last completed cookie */ 242 fill_queue(pch); 243 244 /* Make sure the PL330 Channel thread is active */ 245 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); 246 247 spin_unlock_irqrestore(&pch->lock, flags); 248 249 if (pch->cyclic) 250 handle_cyclic_desc_list(&list); 251 else 252 free_desc_list(&list); 253 } 254 255 static void dma_pl330_rqcb(void *token, enum pl330_op_err err) 256 { 257 struct dma_pl330_desc *desc = token; 258 struct dma_pl330_chan *pch = desc->pchan; 259 unsigned long flags; 260 261 /* If desc aborted */ 262 if (!pch) 263 return; 264 265 spin_lock_irqsave(&pch->lock, flags); 266 267 desc->status = DONE; 268 269 spin_unlock_irqrestore(&pch->lock, flags); 270 271 tasklet_schedule(&pch->task); 272 } 273 274 bool pl330_filter(struct dma_chan *chan, void *param) 275 { 276 u8 *peri_id; 277 278 if (chan->device->dev->driver != &pl330_driver.drv) 279 return false; 280 281 #ifdef CONFIG_OF 282 if (chan->device->dev->of_node) { 283 const __be32 *prop_value; 284 phandle phandle; 285 struct device_node *node; 286 287 prop_value = ((struct property *)param)->value; 288 phandle = be32_to_cpup(prop_value++); 289 node = of_find_node_by_phandle(phandle); 290 return ((chan->private == node) && 291 (chan->chan_id == be32_to_cpup(prop_value))); 292 } 293 #endif 294 295 peri_id = chan->private; 296 return *peri_id == (unsigned)param; 297 } 298 EXPORT_SYMBOL(pl330_filter); 299 300 static int pl330_alloc_chan_resources(struct dma_chan *chan) 301 { 302 struct dma_pl330_chan *pch = to_pchan(chan); 303 struct dma_pl330_dmac *pdmac = pch->dmac; 304 unsigned long flags; 305 306 spin_lock_irqsave(&pch->lock, flags); 307 308 pch->completed = chan->cookie = 1; 309 pch->cyclic = false; 310 311 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 312 if (!pch->pl330_chid) { 313 spin_unlock_irqrestore(&pch->lock, flags); 314 return 0; 315 } 316 317 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); 318 319 spin_unlock_irqrestore(&pch->lock, flags); 320 321 return 1; 322 } 323 324 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 325 { 326 struct dma_pl330_chan *pch = to_pchan(chan); 327 struct dma_pl330_desc *desc, *_dt; 328 unsigned long flags; 329 struct dma_pl330_dmac *pdmac = pch->dmac; 330 struct dma_slave_config *slave_config; 331 LIST_HEAD(list); 332 333 switch (cmd) { 334 case DMA_TERMINATE_ALL: 335 spin_lock_irqsave(&pch->lock, flags); 336 337 /* FLUSH the PL330 Channel thread */ 338 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); 339 340 /* Mark all desc done */ 341 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { 342 desc->status = DONE; 343 pch->completed = desc->txd.cookie; 344 list_move_tail(&desc->node, &list); 345 } 346 347 list_splice_tail_init(&list, &pdmac->desc_pool); 348 spin_unlock_irqrestore(&pch->lock, flags); 349 break; 350 case DMA_SLAVE_CONFIG: 351 slave_config = (struct dma_slave_config *)arg; 352 353 if (slave_config->direction == DMA_MEM_TO_DEV) { 354 if (slave_config->dst_addr) 355 pch->fifo_addr = slave_config->dst_addr; 356 if (slave_config->dst_addr_width) 357 pch->burst_sz = __ffs(slave_config->dst_addr_width); 358 if (slave_config->dst_maxburst) 359 pch->burst_len = slave_config->dst_maxburst; 360 } else if (slave_config->direction == DMA_DEV_TO_MEM) { 361 if (slave_config->src_addr) 362 pch->fifo_addr = slave_config->src_addr; 363 if (slave_config->src_addr_width) 364 pch->burst_sz = __ffs(slave_config->src_addr_width); 365 if (slave_config->src_maxburst) 366 pch->burst_len = slave_config->src_maxburst; 367 } 368 break; 369 default: 370 dev_err(pch->dmac->pif.dev, "Not supported command.\n"); 371 return -ENXIO; 372 } 373 374 return 0; 375 } 376 377 static void pl330_free_chan_resources(struct dma_chan *chan) 378 { 379 struct dma_pl330_chan *pch = to_pchan(chan); 380 unsigned long flags; 381 382 spin_lock_irqsave(&pch->lock, flags); 383 384 tasklet_kill(&pch->task); 385 386 pl330_release_channel(pch->pl330_chid); 387 pch->pl330_chid = NULL; 388 389 if (pch->cyclic) 390 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); 391 392 spin_unlock_irqrestore(&pch->lock, flags); 393 } 394 395 static enum dma_status 396 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 397 struct dma_tx_state *txstate) 398 { 399 struct dma_pl330_chan *pch = to_pchan(chan); 400 dma_cookie_t last_done, last_used; 401 int ret; 402 403 last_done = pch->completed; 404 last_used = chan->cookie; 405 406 ret = dma_async_is_complete(cookie, last_done, last_used); 407 408 dma_set_tx_state(txstate, last_done, last_used, 0); 409 410 return ret; 411 } 412 413 static void pl330_issue_pending(struct dma_chan *chan) 414 { 415 pl330_tasklet((unsigned long) to_pchan(chan)); 416 } 417 418 /* 419 * We returned the last one of the circular list of descriptor(s) 420 * from prep_xxx, so the argument to submit corresponds to the last 421 * descriptor of the list. 422 */ 423 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) 424 { 425 struct dma_pl330_desc *desc, *last = to_desc(tx); 426 struct dma_pl330_chan *pch = to_pchan(tx->chan); 427 dma_cookie_t cookie; 428 unsigned long flags; 429 430 spin_lock_irqsave(&pch->lock, flags); 431 432 /* Assign cookies to all nodes */ 433 cookie = tx->chan->cookie; 434 435 while (!list_empty(&last->node)) { 436 desc = list_entry(last->node.next, struct dma_pl330_desc, node); 437 438 if (++cookie < 0) 439 cookie = 1; 440 desc->txd.cookie = cookie; 441 442 list_move_tail(&desc->node, &pch->work_list); 443 } 444 445 if (++cookie < 0) 446 cookie = 1; 447 last->txd.cookie = cookie; 448 449 list_add_tail(&last->node, &pch->work_list); 450 451 tx->chan->cookie = cookie; 452 453 spin_unlock_irqrestore(&pch->lock, flags); 454 455 return cookie; 456 } 457 458 static inline void _init_desc(struct dma_pl330_desc *desc) 459 { 460 desc->pchan = NULL; 461 desc->req.x = &desc->px; 462 desc->req.token = desc; 463 desc->rqcfg.swap = SWAP_NO; 464 desc->rqcfg.privileged = 0; 465 desc->rqcfg.insnaccess = 0; 466 desc->rqcfg.scctl = SCCTRL0; 467 desc->rqcfg.dcctl = DCCTRL0; 468 desc->req.cfg = &desc->rqcfg; 469 desc->req.xfer_cb = dma_pl330_rqcb; 470 desc->txd.tx_submit = pl330_tx_submit; 471 472 INIT_LIST_HEAD(&desc->node); 473 } 474 475 /* Returns the number of descriptors added to the DMAC pool */ 476 int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) 477 { 478 struct dma_pl330_desc *desc; 479 unsigned long flags; 480 int i; 481 482 if (!pdmac) 483 return 0; 484 485 desc = kmalloc(count * sizeof(*desc), flg); 486 if (!desc) 487 return 0; 488 489 spin_lock_irqsave(&pdmac->pool_lock, flags); 490 491 for (i = 0; i < count; i++) { 492 _init_desc(&desc[i]); 493 list_add_tail(&desc[i].node, &pdmac->desc_pool); 494 } 495 496 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 497 498 return count; 499 } 500 501 static struct dma_pl330_desc * 502 pluck_desc(struct dma_pl330_dmac *pdmac) 503 { 504 struct dma_pl330_desc *desc = NULL; 505 unsigned long flags; 506 507 if (!pdmac) 508 return NULL; 509 510 spin_lock_irqsave(&pdmac->pool_lock, flags); 511 512 if (!list_empty(&pdmac->desc_pool)) { 513 desc = list_entry(pdmac->desc_pool.next, 514 struct dma_pl330_desc, node); 515 516 list_del_init(&desc->node); 517 518 desc->status = PREP; 519 desc->txd.callback = NULL; 520 } 521 522 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 523 524 return desc; 525 } 526 527 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) 528 { 529 struct dma_pl330_dmac *pdmac = pch->dmac; 530 u8 *peri_id = pch->chan.private; 531 struct dma_pl330_desc *desc; 532 533 /* Pluck one desc from the pool of DMAC */ 534 desc = pluck_desc(pdmac); 535 536 /* If the DMAC pool is empty, alloc new */ 537 if (!desc) { 538 if (!add_desc(pdmac, GFP_ATOMIC, 1)) 539 return NULL; 540 541 /* Try again */ 542 desc = pluck_desc(pdmac); 543 if (!desc) { 544 dev_err(pch->dmac->pif.dev, 545 "%s:%d ALERT!\n", __func__, __LINE__); 546 return NULL; 547 } 548 } 549 550 /* Initialize the descriptor */ 551 desc->pchan = pch; 552 desc->txd.cookie = 0; 553 async_tx_ack(&desc->txd); 554 555 desc->req.peri = peri_id ? pch->chan.chan_id : 0; 556 557 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); 558 559 return desc; 560 } 561 562 static inline void fill_px(struct pl330_xfer *px, 563 dma_addr_t dst, dma_addr_t src, size_t len) 564 { 565 px->next = NULL; 566 px->bytes = len; 567 px->dst_addr = dst; 568 px->src_addr = src; 569 } 570 571 static struct dma_pl330_desc * 572 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, 573 dma_addr_t src, size_t len) 574 { 575 struct dma_pl330_desc *desc = pl330_get_desc(pch); 576 577 if (!desc) { 578 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", 579 __func__, __LINE__); 580 return NULL; 581 } 582 583 /* 584 * Ideally we should lookout for reqs bigger than 585 * those that can be programmed with 256 bytes of 586 * MC buffer, but considering a req size is seldom 587 * going to be word-unaligned and more than 200MB, 588 * we take it easy. 589 * Also, should the limit is reached we'd rather 590 * have the platform increase MC buffer size than 591 * complicating this API driver. 592 */ 593 fill_px(&desc->px, dst, src, len); 594 595 return desc; 596 } 597 598 /* Call after fixing burst size */ 599 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) 600 { 601 struct dma_pl330_chan *pch = desc->pchan; 602 struct pl330_info *pi = &pch->dmac->pif; 603 int burst_len; 604 605 burst_len = pi->pcfg.data_bus_width / 8; 606 burst_len *= pi->pcfg.data_buf_dep; 607 burst_len >>= desc->rqcfg.brst_size; 608 609 /* src/dst_burst_len can't be more than 16 */ 610 if (burst_len > 16) 611 burst_len = 16; 612 613 while (burst_len > 1) { 614 if (!(len % (burst_len << desc->rqcfg.brst_size))) 615 break; 616 burst_len--; 617 } 618 619 return burst_len; 620 } 621 622 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( 623 struct dma_chan *chan, dma_addr_t dma_addr, size_t len, 624 size_t period_len, enum dma_transfer_direction direction) 625 { 626 struct dma_pl330_desc *desc; 627 struct dma_pl330_chan *pch = to_pchan(chan); 628 dma_addr_t dst; 629 dma_addr_t src; 630 631 desc = pl330_get_desc(pch); 632 if (!desc) { 633 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", 634 __func__, __LINE__); 635 return NULL; 636 } 637 638 switch (direction) { 639 case DMA_MEM_TO_DEV: 640 desc->rqcfg.src_inc = 1; 641 desc->rqcfg.dst_inc = 0; 642 desc->req.rqtype = MEMTODEV; 643 src = dma_addr; 644 dst = pch->fifo_addr; 645 break; 646 case DMA_DEV_TO_MEM: 647 desc->rqcfg.src_inc = 0; 648 desc->rqcfg.dst_inc = 1; 649 desc->req.rqtype = DEVTOMEM; 650 src = pch->fifo_addr; 651 dst = dma_addr; 652 break; 653 default: 654 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", 655 __func__, __LINE__); 656 return NULL; 657 } 658 659 desc->rqcfg.brst_size = pch->burst_sz; 660 desc->rqcfg.brst_len = 1; 661 662 pch->cyclic = true; 663 664 fill_px(&desc->px, dst, src, period_len); 665 666 return &desc->txd; 667 } 668 669 static struct dma_async_tx_descriptor * 670 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, 671 dma_addr_t src, size_t len, unsigned long flags) 672 { 673 struct dma_pl330_desc *desc; 674 struct dma_pl330_chan *pch = to_pchan(chan); 675 struct pl330_info *pi; 676 int burst; 677 678 if (unlikely(!pch || !len)) 679 return NULL; 680 681 pi = &pch->dmac->pif; 682 683 desc = __pl330_prep_dma_memcpy(pch, dst, src, len); 684 if (!desc) 685 return NULL; 686 687 desc->rqcfg.src_inc = 1; 688 desc->rqcfg.dst_inc = 1; 689 desc->req.rqtype = MEMTOMEM; 690 691 /* Select max possible burst size */ 692 burst = pi->pcfg.data_bus_width / 8; 693 694 while (burst > 1) { 695 if (!(len % burst)) 696 break; 697 burst /= 2; 698 } 699 700 desc->rqcfg.brst_size = 0; 701 while (burst != (1 << desc->rqcfg.brst_size)) 702 desc->rqcfg.brst_size++; 703 704 desc->rqcfg.brst_len = get_burst_len(desc, len); 705 706 desc->txd.flags = flags; 707 708 return &desc->txd; 709 } 710 711 static struct dma_async_tx_descriptor * 712 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 713 unsigned int sg_len, enum dma_transfer_direction direction, 714 unsigned long flg) 715 { 716 struct dma_pl330_desc *first, *desc = NULL; 717 struct dma_pl330_chan *pch = to_pchan(chan); 718 struct scatterlist *sg; 719 unsigned long flags; 720 int i; 721 dma_addr_t addr; 722 723 if (unlikely(!pch || !sgl || !sg_len)) 724 return NULL; 725 726 addr = pch->fifo_addr; 727 728 first = NULL; 729 730 for_each_sg(sgl, sg, sg_len, i) { 731 732 desc = pl330_get_desc(pch); 733 if (!desc) { 734 struct dma_pl330_dmac *pdmac = pch->dmac; 735 736 dev_err(pch->dmac->pif.dev, 737 "%s:%d Unable to fetch desc\n", 738 __func__, __LINE__); 739 if (!first) 740 return NULL; 741 742 spin_lock_irqsave(&pdmac->pool_lock, flags); 743 744 while (!list_empty(&first->node)) { 745 desc = list_entry(first->node.next, 746 struct dma_pl330_desc, node); 747 list_move_tail(&desc->node, &pdmac->desc_pool); 748 } 749 750 list_move_tail(&first->node, &pdmac->desc_pool); 751 752 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 753 754 return NULL; 755 } 756 757 if (!first) 758 first = desc; 759 else 760 list_add_tail(&desc->node, &first->node); 761 762 if (direction == DMA_MEM_TO_DEV) { 763 desc->rqcfg.src_inc = 1; 764 desc->rqcfg.dst_inc = 0; 765 desc->req.rqtype = MEMTODEV; 766 fill_px(&desc->px, 767 addr, sg_dma_address(sg), sg_dma_len(sg)); 768 } else { 769 desc->rqcfg.src_inc = 0; 770 desc->rqcfg.dst_inc = 1; 771 desc->req.rqtype = DEVTOMEM; 772 fill_px(&desc->px, 773 sg_dma_address(sg), addr, sg_dma_len(sg)); 774 } 775 776 desc->rqcfg.brst_size = pch->burst_sz; 777 desc->rqcfg.brst_len = 1; 778 } 779 780 /* Return the last desc in the chain */ 781 desc->txd.flags = flg; 782 return &desc->txd; 783 } 784 785 static irqreturn_t pl330_irq_handler(int irq, void *data) 786 { 787 if (pl330_update(data)) 788 return IRQ_HANDLED; 789 else 790 return IRQ_NONE; 791 } 792 793 static int __devinit 794 pl330_probe(struct amba_device *adev, const struct amba_id *id) 795 { 796 struct dma_pl330_platdata *pdat; 797 struct dma_pl330_dmac *pdmac; 798 struct dma_pl330_chan *pch; 799 struct pl330_info *pi; 800 struct dma_device *pd; 801 struct resource *res; 802 int i, ret, irq; 803 int num_chan; 804 805 pdat = adev->dev.platform_data; 806 807 /* Allocate a new DMAC and its Channels */ 808 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); 809 if (!pdmac) { 810 dev_err(&adev->dev, "unable to allocate mem\n"); 811 return -ENOMEM; 812 } 813 814 pi = &pdmac->pif; 815 pi->dev = &adev->dev; 816 pi->pl330_data = NULL; 817 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; 818 819 res = &adev->res; 820 request_mem_region(res->start, resource_size(res), "dma-pl330"); 821 822 pi->base = ioremap(res->start, resource_size(res)); 823 if (!pi->base) { 824 ret = -ENXIO; 825 goto probe_err1; 826 } 827 828 pdmac->clk = clk_get(&adev->dev, "dma"); 829 if (IS_ERR(pdmac->clk)) { 830 dev_err(&adev->dev, "Cannot get operation clock.\n"); 831 ret = -EINVAL; 832 goto probe_err1; 833 } 834 835 amba_set_drvdata(adev, pdmac); 836 837 #ifndef CONFIG_PM_RUNTIME 838 /* enable dma clk */ 839 clk_enable(pdmac->clk); 840 #endif 841 842 irq = adev->irq[0]; 843 ret = request_irq(irq, pl330_irq_handler, 0, 844 dev_name(&adev->dev), pi); 845 if (ret) 846 goto probe_err2; 847 848 ret = pl330_add(pi); 849 if (ret) 850 goto probe_err3; 851 852 INIT_LIST_HEAD(&pdmac->desc_pool); 853 spin_lock_init(&pdmac->pool_lock); 854 855 /* Create a descriptor pool of default size */ 856 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) 857 dev_warn(&adev->dev, "unable to allocate desc\n"); 858 859 pd = &pdmac->ddma; 860 INIT_LIST_HEAD(&pd->channels); 861 862 /* Initialize channel parameters */ 863 num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri, 864 (u8)pi->pcfg.num_chan); 865 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); 866 867 for (i = 0; i < num_chan; i++) { 868 pch = &pdmac->peripherals[i]; 869 if (!adev->dev.of_node) 870 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; 871 else 872 pch->chan.private = adev->dev.of_node; 873 874 INIT_LIST_HEAD(&pch->work_list); 875 spin_lock_init(&pch->lock); 876 pch->pl330_chid = NULL; 877 pch->chan.device = pd; 878 pch->dmac = pdmac; 879 880 /* Add the channel to the DMAC list */ 881 list_add_tail(&pch->chan.device_node, &pd->channels); 882 } 883 884 pd->dev = &adev->dev; 885 if (pdat) { 886 pd->cap_mask = pdat->cap_mask; 887 } else { 888 dma_cap_set(DMA_MEMCPY, pd->cap_mask); 889 if (pi->pcfg.num_peri) { 890 dma_cap_set(DMA_SLAVE, pd->cap_mask); 891 dma_cap_set(DMA_CYCLIC, pd->cap_mask); 892 } 893 } 894 895 pd->device_alloc_chan_resources = pl330_alloc_chan_resources; 896 pd->device_free_chan_resources = pl330_free_chan_resources; 897 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; 898 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; 899 pd->device_tx_status = pl330_tx_status; 900 pd->device_prep_slave_sg = pl330_prep_slave_sg; 901 pd->device_control = pl330_control; 902 pd->device_issue_pending = pl330_issue_pending; 903 904 ret = dma_async_device_register(pd); 905 if (ret) { 906 dev_err(&adev->dev, "unable to register DMAC\n"); 907 goto probe_err4; 908 } 909 910 dev_info(&adev->dev, 911 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); 912 dev_info(&adev->dev, 913 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", 914 pi->pcfg.data_buf_dep, 915 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, 916 pi->pcfg.num_peri, pi->pcfg.num_events); 917 918 return 0; 919 920 probe_err4: 921 pl330_del(pi); 922 probe_err3: 923 free_irq(irq, pi); 924 probe_err2: 925 iounmap(pi->base); 926 probe_err1: 927 release_mem_region(res->start, resource_size(res)); 928 kfree(pdmac); 929 930 return ret; 931 } 932 933 static int __devexit pl330_remove(struct amba_device *adev) 934 { 935 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); 936 struct dma_pl330_chan *pch, *_p; 937 struct pl330_info *pi; 938 struct resource *res; 939 int irq; 940 941 if (!pdmac) 942 return 0; 943 944 amba_set_drvdata(adev, NULL); 945 946 /* Idle the DMAC */ 947 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, 948 chan.device_node) { 949 950 /* Remove the channel */ 951 list_del(&pch->chan.device_node); 952 953 /* Flush the channel */ 954 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 955 pl330_free_chan_resources(&pch->chan); 956 } 957 958 pi = &pdmac->pif; 959 960 pl330_del(pi); 961 962 irq = adev->irq[0]; 963 free_irq(irq, pi); 964 965 iounmap(pi->base); 966 967 res = &adev->res; 968 release_mem_region(res->start, resource_size(res)); 969 970 #ifndef CONFIG_PM_RUNTIME 971 clk_disable(pdmac->clk); 972 #endif 973 974 kfree(pdmac); 975 976 return 0; 977 } 978 979 static struct amba_id pl330_ids[] = { 980 { 981 .id = 0x00041330, 982 .mask = 0x000fffff, 983 }, 984 { 0, 0 }, 985 }; 986 987 MODULE_DEVICE_TABLE(amba, pl330_ids); 988 989 #ifdef CONFIG_PM_RUNTIME 990 static int pl330_runtime_suspend(struct device *dev) 991 { 992 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); 993 994 if (!pdmac) { 995 dev_err(dev, "failed to get dmac\n"); 996 return -ENODEV; 997 } 998 999 clk_disable(pdmac->clk); 1000 1001 return 0; 1002 } 1003 1004 static int pl330_runtime_resume(struct device *dev) 1005 { 1006 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); 1007 1008 if (!pdmac) { 1009 dev_err(dev, "failed to get dmac\n"); 1010 return -ENODEV; 1011 } 1012 1013 clk_enable(pdmac->clk); 1014 1015 return 0; 1016 } 1017 #else 1018 #define pl330_runtime_suspend NULL 1019 #define pl330_runtime_resume NULL 1020 #endif /* CONFIG_PM_RUNTIME */ 1021 1022 static const struct dev_pm_ops pl330_pm_ops = { 1023 .runtime_suspend = pl330_runtime_suspend, 1024 .runtime_resume = pl330_runtime_resume, 1025 }; 1026 1027 static struct amba_driver pl330_driver = { 1028 .drv = { 1029 .owner = THIS_MODULE, 1030 .name = "dma-pl330", 1031 .pm = &pl330_pm_ops, 1032 }, 1033 .id_table = pl330_ids, 1034 .probe = pl330_probe, 1035 .remove = pl330_remove, 1036 }; 1037 1038 static int __init pl330_init(void) 1039 { 1040 return amba_driver_register(&pl330_driver); 1041 } 1042 module_init(pl330_init); 1043 1044 static void __exit pl330_exit(void) 1045 { 1046 amba_driver_unregister(&pl330_driver); 1047 return; 1048 } 1049 module_exit(pl330_exit); 1050 1051 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 1052 MODULE_DESCRIPTION("API Driver for PL330 DMAC"); 1053 MODULE_LICENSE("GPL"); 1054