1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 * 22 * The full GNU General Public License is in this distribution in the file 23 * called COPYING. 24 * 25 * Documentation: ARM DDI 0196G == PL080 26 * Documentation: ARM DDI 0218E == PL081 27 * 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * channel. 30 * 31 * The PL080 has 8 channels available for simultaneous use, and the PL081 32 * has only two channels. So on these DMA controllers the number of channels 33 * and the number of incoming DMA signals are two totally different things. 34 * It is usually not possible to theoretically handle all physical signals, 35 * so a multiplexing scheme with possible denial of use is necessary. 36 * 37 * The PL080 has a dual bus master, PL081 has a single master. 38 * 39 * Memory to peripheral transfer may be visualized as 40 * Get data from memory to DMAC 41 * Until no data left 42 * On burst request from peripheral 43 * Destination burst from DMAC to peripheral 44 * Clear burst request 45 * Raise terminal count interrupt 46 * 47 * For peripherals with a FIFO: 48 * Source burst size == half the depth of the peripheral FIFO 49 * Destination burst size == the depth of the peripheral FIFO 50 * 51 * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 * signals, the DMA controller will simply facilitate its AHB master.) 53 * 54 * ASSUMES default (little) endianness for DMA transfers 55 * 56 * The PL08x has two flow control settings: 57 * - DMAC flow control: the transfer size defines the number of transfers 58 * which occur for the current LLI entry, and the DMAC raises TC at the 59 * end of every LLI entry. Observed behaviour shows the DMAC listening 60 * to both the BREQ and SREQ signals (contrary to documented), 61 * transferring data if either is active. The LBREQ and LSREQ signals 62 * are ignored. 63 * 64 * - Peripheral flow control: the transfer size is ignored (and should be 65 * zero). The data is transferred from the current LLI entry, until 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 * will then move to the next LLI entry. 68 * 69 * Global TODO: 70 * - Break out common code from arch/arm/mach-s3c64xx and share 71 */ 72 #include <linux/amba/bus.h> 73 #include <linux/amba/pl08x.h> 74 #include <linux/debugfs.h> 75 #include <linux/delay.h> 76 #include <linux/device.h> 77 #include <linux/dmaengine.h> 78 #include <linux/dmapool.h> 79 #include <linux/dma-mapping.h> 80 #include <linux/init.h> 81 #include <linux/interrupt.h> 82 #include <linux/module.h> 83 #include <linux/pm_runtime.h> 84 #include <linux/seq_file.h> 85 #include <linux/slab.h> 86 #include <asm/hardware/pl080.h> 87 88 #include "dmaengine.h" 89 90 #define DRIVER_NAME "pl08xdmac" 91 92 static struct amba_driver pl08x_amba_driver; 93 94 /** 95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 96 * @channels: the number of channels available in this variant 97 * @dualmaster: whether this version supports dual AHB masters or not. 98 */ 99 struct vendor_data { 100 u8 channels; 101 bool dualmaster; 102 }; 103 104 /* 105 * PL08X private data structures 106 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, 107 * start & end do not - their bus bit info is in cctl. Also note that these 108 * are fixed 32-bit quantities. 109 */ 110 struct pl08x_lli { 111 u32 src; 112 u32 dst; 113 u32 lli; 114 u32 cctl; 115 }; 116 117 /** 118 * struct pl08x_driver_data - the local state holder for the PL08x 119 * @slave: slave engine for this instance 120 * @memcpy: memcpy engine for this instance 121 * @base: virtual memory base (remapped) for the PL08x 122 * @adev: the corresponding AMBA (PrimeCell) bus entry 123 * @vd: vendor data for this PL08x variant 124 * @pd: platform data passed in from the platform/machine 125 * @phy_chans: array of data for the physical channels 126 * @pool: a pool for the LLI descriptors 127 * @pool_ctr: counter of LLIs in the pool 128 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 129 * fetches 130 * @mem_buses: set to indicate memory transfers on AHB2. 131 * @lock: a spinlock for this struct 132 */ 133 struct pl08x_driver_data { 134 struct dma_device slave; 135 struct dma_device memcpy; 136 void __iomem *base; 137 struct amba_device *adev; 138 const struct vendor_data *vd; 139 struct pl08x_platform_data *pd; 140 struct pl08x_phy_chan *phy_chans; 141 struct dma_pool *pool; 142 int pool_ctr; 143 u8 lli_buses; 144 u8 mem_buses; 145 spinlock_t lock; 146 }; 147 148 /* 149 * PL08X specific defines 150 */ 151 152 /* Size (bytes) of each LLI buffer allocated for one transfer */ 153 # define PL08X_LLI_TSFR_SIZE 0x2000 154 155 /* Maximum times we call dma_pool_alloc on this pool without freeing */ 156 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 157 #define PL08X_ALIGN 8 158 159 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 160 { 161 return container_of(chan, struct pl08x_dma_chan, chan); 162 } 163 164 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 165 { 166 return container_of(tx, struct pl08x_txd, tx); 167 } 168 169 /* 170 * Physical channel handling 171 */ 172 173 /* Whether a certain channel is busy or not */ 174 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 175 { 176 unsigned int val; 177 178 val = readl(ch->base + PL080_CH_CONFIG); 179 return val & PL080_CONFIG_ACTIVE; 180 } 181 182 /* 183 * Set the initial DMA register values i.e. those for the first LLI 184 * The next LLI pointer and the configuration interrupt bit have 185 * been set when the LLIs were constructed. Poke them into the hardware 186 * and start the transfer. 187 */ 188 static void pl08x_start_txd(struct pl08x_dma_chan *plchan, 189 struct pl08x_txd *txd) 190 { 191 struct pl08x_driver_data *pl08x = plchan->host; 192 struct pl08x_phy_chan *phychan = plchan->phychan; 193 struct pl08x_lli *lli = &txd->llis_va[0]; 194 u32 val; 195 196 plchan->at = txd; 197 198 /* Wait for channel inactive */ 199 while (pl08x_phy_channel_busy(phychan)) 200 cpu_relax(); 201 202 dev_vdbg(&pl08x->adev->dev, 203 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 204 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 205 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, 206 txd->ccfg); 207 208 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); 209 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); 210 writel(lli->lli, phychan->base + PL080_CH_LLI); 211 writel(lli->cctl, phychan->base + PL080_CH_CONTROL); 212 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); 213 214 /* Enable the DMA channel */ 215 /* Do not access config register until channel shows as disabled */ 216 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 217 cpu_relax(); 218 219 /* Do not access config register until channel shows as inactive */ 220 val = readl(phychan->base + PL080_CH_CONFIG); 221 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 222 val = readl(phychan->base + PL080_CH_CONFIG); 223 224 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); 225 } 226 227 /* 228 * Pause the channel by setting the HALT bit. 229 * 230 * For M->P transfers, pause the DMAC first and then stop the peripheral - 231 * the FIFO can only drain if the peripheral is still requesting data. 232 * (note: this can still timeout if the DMAC FIFO never drains of data.) 233 * 234 * For P->M transfers, disable the peripheral first to stop it filling 235 * the DMAC FIFO, and then pause the DMAC. 236 */ 237 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 238 { 239 u32 val; 240 int timeout; 241 242 /* Set the HALT bit and wait for the FIFO to drain */ 243 val = readl(ch->base + PL080_CH_CONFIG); 244 val |= PL080_CONFIG_HALT; 245 writel(val, ch->base + PL080_CH_CONFIG); 246 247 /* Wait for channel inactive */ 248 for (timeout = 1000; timeout; timeout--) { 249 if (!pl08x_phy_channel_busy(ch)) 250 break; 251 udelay(1); 252 } 253 if (pl08x_phy_channel_busy(ch)) 254 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 255 } 256 257 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 258 { 259 u32 val; 260 261 /* Clear the HALT bit */ 262 val = readl(ch->base + PL080_CH_CONFIG); 263 val &= ~PL080_CONFIG_HALT; 264 writel(val, ch->base + PL080_CH_CONFIG); 265 } 266 267 /* 268 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 269 * clears any pending interrupt status. This should not be used for 270 * an on-going transfer, but as a method of shutting down a channel 271 * (eg, when it's no longer used) or terminating a transfer. 272 */ 273 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 274 struct pl08x_phy_chan *ch) 275 { 276 u32 val = readl(ch->base + PL080_CH_CONFIG); 277 278 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 279 PL080_CONFIG_TC_IRQ_MASK); 280 281 writel(val, ch->base + PL080_CH_CONFIG); 282 283 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 284 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 285 } 286 287 static inline u32 get_bytes_in_cctl(u32 cctl) 288 { 289 /* The source width defines the number of bytes */ 290 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 291 292 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 293 case PL080_WIDTH_8BIT: 294 break; 295 case PL080_WIDTH_16BIT: 296 bytes *= 2; 297 break; 298 case PL080_WIDTH_32BIT: 299 bytes *= 4; 300 break; 301 } 302 return bytes; 303 } 304 305 /* The channel should be paused when calling this */ 306 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 307 { 308 struct pl08x_phy_chan *ch; 309 struct pl08x_txd *txd; 310 unsigned long flags; 311 size_t bytes = 0; 312 313 spin_lock_irqsave(&plchan->lock, flags); 314 ch = plchan->phychan; 315 txd = plchan->at; 316 317 /* 318 * Follow the LLIs to get the number of remaining 319 * bytes in the currently active transaction. 320 */ 321 if (ch && txd) { 322 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 323 324 /* First get the remaining bytes in the active transfer */ 325 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 326 327 if (clli) { 328 struct pl08x_lli *llis_va = txd->llis_va; 329 dma_addr_t llis_bus = txd->llis_bus; 330 int index; 331 332 BUG_ON(clli < llis_bus || clli >= llis_bus + 333 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); 334 335 /* 336 * Locate the next LLI - as this is an array, 337 * it's simple maths to find. 338 */ 339 index = (clli - llis_bus) / sizeof(struct pl08x_lli); 340 341 for (; index < MAX_NUM_TSFR_LLIS; index++) { 342 bytes += get_bytes_in_cctl(llis_va[index].cctl); 343 344 /* 345 * A LLI pointer of 0 terminates the LLI list 346 */ 347 if (!llis_va[index].lli) 348 break; 349 } 350 } 351 } 352 353 /* Sum up all queued transactions */ 354 if (!list_empty(&plchan->pend_list)) { 355 struct pl08x_txd *txdi; 356 list_for_each_entry(txdi, &plchan->pend_list, node) { 357 struct pl08x_sg *dsg; 358 list_for_each_entry(dsg, &txd->dsg_list, node) 359 bytes += dsg->len; 360 } 361 } 362 363 spin_unlock_irqrestore(&plchan->lock, flags); 364 365 return bytes; 366 } 367 368 /* 369 * Allocate a physical channel for a virtual channel 370 * 371 * Try to locate a physical channel to be used for this transfer. If all 372 * are taken return NULL and the requester will have to cope by using 373 * some fallback PIO mode or retrying later. 374 */ 375 static struct pl08x_phy_chan * 376 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 377 struct pl08x_dma_chan *virt_chan) 378 { 379 struct pl08x_phy_chan *ch = NULL; 380 unsigned long flags; 381 int i; 382 383 for (i = 0; i < pl08x->vd->channels; i++) { 384 ch = &pl08x->phy_chans[i]; 385 386 spin_lock_irqsave(&ch->lock, flags); 387 388 if (!ch->serving) { 389 ch->serving = virt_chan; 390 ch->signal = -1; 391 spin_unlock_irqrestore(&ch->lock, flags); 392 break; 393 } 394 395 spin_unlock_irqrestore(&ch->lock, flags); 396 } 397 398 if (i == pl08x->vd->channels) { 399 /* No physical channel available, cope with it */ 400 return NULL; 401 } 402 403 pm_runtime_get_sync(&pl08x->adev->dev); 404 return ch; 405 } 406 407 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 408 struct pl08x_phy_chan *ch) 409 { 410 unsigned long flags; 411 412 spin_lock_irqsave(&ch->lock, flags); 413 414 /* Stop the channel and clear its interrupts */ 415 pl08x_terminate_phy_chan(pl08x, ch); 416 417 pm_runtime_put(&pl08x->adev->dev); 418 419 /* Mark it as free */ 420 ch->serving = NULL; 421 spin_unlock_irqrestore(&ch->lock, flags); 422 } 423 424 /* 425 * LLI handling 426 */ 427 428 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 429 { 430 switch (coded) { 431 case PL080_WIDTH_8BIT: 432 return 1; 433 case PL080_WIDTH_16BIT: 434 return 2; 435 case PL080_WIDTH_32BIT: 436 return 4; 437 default: 438 break; 439 } 440 BUG(); 441 return 0; 442 } 443 444 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 445 size_t tsize) 446 { 447 u32 retbits = cctl; 448 449 /* Remove all src, dst and transfer size bits */ 450 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 451 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 452 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 453 454 /* Then set the bits according to the parameters */ 455 switch (srcwidth) { 456 case 1: 457 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 458 break; 459 case 2: 460 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 461 break; 462 case 4: 463 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 464 break; 465 default: 466 BUG(); 467 break; 468 } 469 470 switch (dstwidth) { 471 case 1: 472 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 473 break; 474 case 2: 475 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 476 break; 477 case 4: 478 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 479 break; 480 default: 481 BUG(); 482 break; 483 } 484 485 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 486 return retbits; 487 } 488 489 struct pl08x_lli_build_data { 490 struct pl08x_txd *txd; 491 struct pl08x_bus_data srcbus; 492 struct pl08x_bus_data dstbus; 493 size_t remainder; 494 u32 lli_bus; 495 }; 496 497 /* 498 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 499 * victim in case src & dest are not similarly aligned. i.e. If after aligning 500 * masters address with width requirements of transfer (by sending few byte by 501 * byte data), slave is still not aligned, then its width will be reduced to 502 * BYTE. 503 * - prefers the destination bus if both available 504 * - prefers bus with fixed address (i.e. peripheral) 505 */ 506 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 507 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 508 { 509 if (!(cctl & PL080_CONTROL_DST_INCR)) { 510 *mbus = &bd->dstbus; 511 *sbus = &bd->srcbus; 512 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 513 *mbus = &bd->srcbus; 514 *sbus = &bd->dstbus; 515 } else { 516 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 517 *mbus = &bd->dstbus; 518 *sbus = &bd->srcbus; 519 } else { 520 *mbus = &bd->srcbus; 521 *sbus = &bd->dstbus; 522 } 523 } 524 } 525 526 /* 527 * Fills in one LLI for a certain transfer descriptor and advance the counter 528 */ 529 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, 530 int num_llis, int len, u32 cctl) 531 { 532 struct pl08x_lli *llis_va = bd->txd->llis_va; 533 dma_addr_t llis_bus = bd->txd->llis_bus; 534 535 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 536 537 llis_va[num_llis].cctl = cctl; 538 llis_va[num_llis].src = bd->srcbus.addr; 539 llis_va[num_llis].dst = bd->dstbus.addr; 540 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * 541 sizeof(struct pl08x_lli); 542 llis_va[num_llis].lli |= bd->lli_bus; 543 544 if (cctl & PL080_CONTROL_SRC_INCR) 545 bd->srcbus.addr += len; 546 if (cctl & PL080_CONTROL_DST_INCR) 547 bd->dstbus.addr += len; 548 549 BUG_ON(bd->remainder < len); 550 551 bd->remainder -= len; 552 } 553 554 static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, 555 u32 *cctl, u32 len, int num_llis, size_t *total_bytes) 556 { 557 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 558 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); 559 (*total_bytes) += len; 560 } 561 562 /* 563 * This fills in the table of LLIs for the transfer descriptor 564 * Note that we assume we never have to change the burst sizes 565 * Return 0 for error 566 */ 567 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 568 struct pl08x_txd *txd) 569 { 570 struct pl08x_bus_data *mbus, *sbus; 571 struct pl08x_lli_build_data bd; 572 int num_llis = 0; 573 u32 cctl, early_bytes = 0; 574 size_t max_bytes_per_lli, total_bytes; 575 struct pl08x_lli *llis_va; 576 struct pl08x_sg *dsg; 577 578 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 579 if (!txd->llis_va) { 580 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 581 return 0; 582 } 583 584 pl08x->pool_ctr++; 585 586 bd.txd = txd; 587 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 588 cctl = txd->cctl; 589 590 /* Find maximum width of the source bus */ 591 bd.srcbus.maxwidth = 592 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 593 PL080_CONTROL_SWIDTH_SHIFT); 594 595 /* Find maximum width of the destination bus */ 596 bd.dstbus.maxwidth = 597 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 598 PL080_CONTROL_DWIDTH_SHIFT); 599 600 list_for_each_entry(dsg, &txd->dsg_list, node) { 601 total_bytes = 0; 602 cctl = txd->cctl; 603 604 bd.srcbus.addr = dsg->src_addr; 605 bd.dstbus.addr = dsg->dst_addr; 606 bd.remainder = dsg->len; 607 bd.srcbus.buswidth = bd.srcbus.maxwidth; 608 bd.dstbus.buswidth = bd.dstbus.maxwidth; 609 610 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 611 612 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", 613 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 614 bd.srcbus.buswidth, 615 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 616 bd.dstbus.buswidth, 617 bd.remainder); 618 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 619 mbus == &bd.srcbus ? "src" : "dst", 620 sbus == &bd.srcbus ? "src" : "dst"); 621 622 /* 623 * Zero length is only allowed if all these requirements are 624 * met: 625 * - flow controller is peripheral. 626 * - src.addr is aligned to src.width 627 * - dst.addr is aligned to dst.width 628 * 629 * sg_len == 1 should be true, as there can be two cases here: 630 * 631 * - Memory addresses are contiguous and are not scattered. 632 * Here, Only one sg will be passed by user driver, with 633 * memory address and zero length. We pass this to controller 634 * and after the transfer it will receive the last burst 635 * request from peripheral and so transfer finishes. 636 * 637 * - Memory addresses are scattered and are not contiguous. 638 * Here, Obviously as DMA controller doesn't know when a lli's 639 * transfer gets over, it can't load next lli. So in this 640 * case, there has to be an assumption that only one lli is 641 * supported. Thus, we can't have scattered addresses. 642 */ 643 if (!bd.remainder) { 644 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 645 PL080_CONFIG_FLOW_CONTROL_SHIFT; 646 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 647 (fc <= PL080_FLOW_SRC2DST_SRC))) { 648 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 649 __func__); 650 return 0; 651 } 652 653 if ((bd.srcbus.addr % bd.srcbus.buswidth) || 654 (bd.dstbus.addr % bd.dstbus.buswidth)) { 655 dev_err(&pl08x->adev->dev, 656 "%s src & dst address must be aligned to src" 657 " & dst width if peripheral is flow controller", 658 __func__); 659 return 0; 660 } 661 662 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 663 bd.dstbus.buswidth, 0); 664 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); 665 break; 666 } 667 668 /* 669 * Send byte by byte for following cases 670 * - Less than a bus width available 671 * - until master bus is aligned 672 */ 673 if (bd.remainder < mbus->buswidth) 674 early_bytes = bd.remainder; 675 else if ((mbus->addr) % (mbus->buswidth)) { 676 early_bytes = mbus->buswidth - (mbus->addr) % 677 (mbus->buswidth); 678 if ((bd.remainder - early_bytes) < mbus->buswidth) 679 early_bytes = bd.remainder; 680 } 681 682 if (early_bytes) { 683 dev_vdbg(&pl08x->adev->dev, 684 "%s byte width LLIs (remain 0x%08x)\n", 685 __func__, bd.remainder); 686 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 687 &total_bytes); 688 } 689 690 if (bd.remainder) { 691 /* 692 * Master now aligned 693 * - if slave is not then we must set its width down 694 */ 695 if (sbus->addr % sbus->buswidth) { 696 dev_dbg(&pl08x->adev->dev, 697 "%s set down bus width to one byte\n", 698 __func__); 699 700 sbus->buswidth = 1; 701 } 702 703 /* 704 * Bytes transferred = tsize * src width, not 705 * MIN(buswidths) 706 */ 707 max_bytes_per_lli = bd.srcbus.buswidth * 708 PL080_CONTROL_TRANSFER_SIZE_MASK; 709 dev_vdbg(&pl08x->adev->dev, 710 "%s max bytes per lli = %zu\n", 711 __func__, max_bytes_per_lli); 712 713 /* 714 * Make largest possible LLIs until less than one bus 715 * width left 716 */ 717 while (bd.remainder > (mbus->buswidth - 1)) { 718 size_t lli_len, tsize, width; 719 720 /* 721 * If enough left try to send max possible, 722 * otherwise try to send the remainder 723 */ 724 lli_len = min(bd.remainder, max_bytes_per_lli); 725 726 /* 727 * Check against maximum bus alignment: 728 * Calculate actual transfer size in relation to 729 * bus width an get a maximum remainder of the 730 * highest bus width - 1 731 */ 732 width = max(mbus->buswidth, sbus->buswidth); 733 lli_len = (lli_len / width) * width; 734 tsize = lli_len / bd.srcbus.buswidth; 735 736 dev_vdbg(&pl08x->adev->dev, 737 "%s fill lli with single lli chunk of " 738 "size 0x%08zx (remainder 0x%08zx)\n", 739 __func__, lli_len, bd.remainder); 740 741 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 742 bd.dstbus.buswidth, tsize); 743 pl08x_fill_lli_for_desc(&bd, num_llis++, 744 lli_len, cctl); 745 total_bytes += lli_len; 746 } 747 748 /* 749 * Send any odd bytes 750 */ 751 if (bd.remainder) { 752 dev_vdbg(&pl08x->adev->dev, 753 "%s align with boundary, send odd bytes (remain %zu)\n", 754 __func__, bd.remainder); 755 prep_byte_width_lli(&bd, &cctl, bd.remainder, 756 num_llis++, &total_bytes); 757 } 758 } 759 760 if (total_bytes != dsg->len) { 761 dev_err(&pl08x->adev->dev, 762 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 763 __func__, total_bytes, dsg->len); 764 return 0; 765 } 766 767 if (num_llis >= MAX_NUM_TSFR_LLIS) { 768 dev_err(&pl08x->adev->dev, 769 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 770 __func__, (u32) MAX_NUM_TSFR_LLIS); 771 return 0; 772 } 773 } 774 775 llis_va = txd->llis_va; 776 /* The final LLI terminates the LLI. */ 777 llis_va[num_llis - 1].lli = 0; 778 /* The final LLI element shall also fire an interrupt. */ 779 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 780 781 #ifdef VERBOSE_DEBUG 782 { 783 int i; 784 785 dev_vdbg(&pl08x->adev->dev, 786 "%-3s %-9s %-10s %-10s %-10s %s\n", 787 "lli", "", "csrc", "cdst", "clli", "cctl"); 788 for (i = 0; i < num_llis; i++) { 789 dev_vdbg(&pl08x->adev->dev, 790 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 791 i, &llis_va[i], llis_va[i].src, 792 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl 793 ); 794 } 795 } 796 #endif 797 798 return num_llis; 799 } 800 801 /* You should call this with the struct pl08x lock held */ 802 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 803 struct pl08x_txd *txd) 804 { 805 struct pl08x_sg *dsg, *_dsg; 806 807 /* Free the LLI */ 808 if (txd->llis_va) 809 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 810 811 pl08x->pool_ctr--; 812 813 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 814 list_del(&dsg->node); 815 kfree(dsg); 816 } 817 818 kfree(txd); 819 } 820 821 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 822 struct pl08x_dma_chan *plchan) 823 { 824 struct pl08x_txd *txdi = NULL; 825 struct pl08x_txd *next; 826 827 if (!list_empty(&plchan->pend_list)) { 828 list_for_each_entry_safe(txdi, 829 next, &plchan->pend_list, node) { 830 list_del(&txdi->node); 831 pl08x_free_txd(pl08x, txdi); 832 } 833 } 834 } 835 836 /* 837 * The DMA ENGINE API 838 */ 839 static int pl08x_alloc_chan_resources(struct dma_chan *chan) 840 { 841 return 0; 842 } 843 844 static void pl08x_free_chan_resources(struct dma_chan *chan) 845 { 846 } 847 848 /* 849 * This should be called with the channel plchan->lock held 850 */ 851 static int prep_phy_channel(struct pl08x_dma_chan *plchan, 852 struct pl08x_txd *txd) 853 { 854 struct pl08x_driver_data *pl08x = plchan->host; 855 struct pl08x_phy_chan *ch; 856 int ret; 857 858 /* Check if we already have a channel */ 859 if (plchan->phychan) { 860 ch = plchan->phychan; 861 goto got_channel; 862 } 863 864 ch = pl08x_get_phy_channel(pl08x, plchan); 865 if (!ch) { 866 /* No physical channel available, cope with it */ 867 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 868 return -EBUSY; 869 } 870 871 /* 872 * OK we have a physical channel: for memcpy() this is all we 873 * need, but for slaves the physical signals may be muxed! 874 * Can the platform allow us to use this channel? 875 */ 876 if (plchan->slave && pl08x->pd->get_signal) { 877 ret = pl08x->pd->get_signal(plchan); 878 if (ret < 0) { 879 dev_dbg(&pl08x->adev->dev, 880 "unable to use physical channel %d for transfer on %s due to platform restrictions\n", 881 ch->id, plchan->name); 882 /* Release physical channel & return */ 883 pl08x_put_phy_channel(pl08x, ch); 884 return -EBUSY; 885 } 886 ch->signal = ret; 887 } 888 889 plchan->phychan = ch; 890 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 891 ch->id, 892 ch->signal, 893 plchan->name); 894 895 got_channel: 896 /* Assign the flow control signal to this channel */ 897 if (txd->direction == DMA_MEM_TO_DEV) 898 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; 899 else if (txd->direction == DMA_DEV_TO_MEM) 900 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; 901 902 plchan->phychan_hold++; 903 904 return 0; 905 } 906 907 static void release_phy_channel(struct pl08x_dma_chan *plchan) 908 { 909 struct pl08x_driver_data *pl08x = plchan->host; 910 911 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { 912 pl08x->pd->put_signal(plchan); 913 plchan->phychan->signal = -1; 914 } 915 pl08x_put_phy_channel(pl08x, plchan->phychan); 916 plchan->phychan = NULL; 917 } 918 919 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 920 { 921 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 922 struct pl08x_txd *txd = to_pl08x_txd(tx); 923 unsigned long flags; 924 dma_cookie_t cookie; 925 926 spin_lock_irqsave(&plchan->lock, flags); 927 cookie = dma_cookie_assign(tx); 928 929 /* Put this onto the pending list */ 930 list_add_tail(&txd->node, &plchan->pend_list); 931 932 /* 933 * If there was no physical channel available for this memcpy, 934 * stack the request up and indicate that the channel is waiting 935 * for a free physical channel. 936 */ 937 if (!plchan->slave && !plchan->phychan) { 938 /* Do this memcpy whenever there is a channel ready */ 939 plchan->state = PL08X_CHAN_WAITING; 940 plchan->waiting = txd; 941 } else { 942 plchan->phychan_hold--; 943 } 944 945 spin_unlock_irqrestore(&plchan->lock, flags); 946 947 return cookie; 948 } 949 950 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 951 struct dma_chan *chan, unsigned long flags) 952 { 953 struct dma_async_tx_descriptor *retval = NULL; 954 955 return retval; 956 } 957 958 /* 959 * Code accessing dma_async_is_complete() in a tight loop may give problems. 960 * If slaves are relying on interrupts to signal completion this function 961 * must not be called with interrupts disabled. 962 */ 963 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 964 dma_cookie_t cookie, struct dma_tx_state *txstate) 965 { 966 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 967 enum dma_status ret; 968 969 ret = dma_cookie_status(chan, cookie, txstate); 970 if (ret == DMA_SUCCESS) 971 return ret; 972 973 /* 974 * This cookie not complete yet 975 * Get number of bytes left in the active transactions and queue 976 */ 977 dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); 978 979 if (plchan->state == PL08X_CHAN_PAUSED) 980 return DMA_PAUSED; 981 982 /* Whether waiting or running, we're in progress */ 983 return DMA_IN_PROGRESS; 984 } 985 986 /* PrimeCell DMA extension */ 987 struct burst_table { 988 u32 burstwords; 989 u32 reg; 990 }; 991 992 static const struct burst_table burst_sizes[] = { 993 { 994 .burstwords = 256, 995 .reg = PL080_BSIZE_256, 996 }, 997 { 998 .burstwords = 128, 999 .reg = PL080_BSIZE_128, 1000 }, 1001 { 1002 .burstwords = 64, 1003 .reg = PL080_BSIZE_64, 1004 }, 1005 { 1006 .burstwords = 32, 1007 .reg = PL080_BSIZE_32, 1008 }, 1009 { 1010 .burstwords = 16, 1011 .reg = PL080_BSIZE_16, 1012 }, 1013 { 1014 .burstwords = 8, 1015 .reg = PL080_BSIZE_8, 1016 }, 1017 { 1018 .burstwords = 4, 1019 .reg = PL080_BSIZE_4, 1020 }, 1021 { 1022 .burstwords = 0, 1023 .reg = PL080_BSIZE_1, 1024 }, 1025 }; 1026 1027 /* 1028 * Given the source and destination available bus masks, select which 1029 * will be routed to each port. We try to have source and destination 1030 * on separate ports, but always respect the allowable settings. 1031 */ 1032 static u32 pl08x_select_bus(u8 src, u8 dst) 1033 { 1034 u32 cctl = 0; 1035 1036 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1037 cctl |= PL080_CONTROL_DST_AHB2; 1038 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1039 cctl |= PL080_CONTROL_SRC_AHB2; 1040 1041 return cctl; 1042 } 1043 1044 static u32 pl08x_cctl(u32 cctl) 1045 { 1046 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1047 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1048 PL080_CONTROL_PROT_MASK); 1049 1050 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1051 return cctl | PL080_CONTROL_PROT_SYS; 1052 } 1053 1054 static u32 pl08x_width(enum dma_slave_buswidth width) 1055 { 1056 switch (width) { 1057 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1058 return PL080_WIDTH_8BIT; 1059 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1060 return PL080_WIDTH_16BIT; 1061 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1062 return PL080_WIDTH_32BIT; 1063 default: 1064 return ~0; 1065 } 1066 } 1067 1068 static u32 pl08x_burst(u32 maxburst) 1069 { 1070 int i; 1071 1072 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1073 if (burst_sizes[i].burstwords <= maxburst) 1074 break; 1075 1076 return burst_sizes[i].reg; 1077 } 1078 1079 static int dma_set_runtime_config(struct dma_chan *chan, 1080 struct dma_slave_config *config) 1081 { 1082 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1083 struct pl08x_driver_data *pl08x = plchan->host; 1084 enum dma_slave_buswidth addr_width; 1085 u32 width, burst, maxburst; 1086 u32 cctl = 0; 1087 1088 if (!plchan->slave) 1089 return -EINVAL; 1090 1091 /* Transfer direction */ 1092 plchan->runtime_direction = config->direction; 1093 if (config->direction == DMA_MEM_TO_DEV) { 1094 addr_width = config->dst_addr_width; 1095 maxburst = config->dst_maxburst; 1096 } else if (config->direction == DMA_DEV_TO_MEM) { 1097 addr_width = config->src_addr_width; 1098 maxburst = config->src_maxburst; 1099 } else { 1100 dev_err(&pl08x->adev->dev, 1101 "bad runtime_config: alien transfer direction\n"); 1102 return -EINVAL; 1103 } 1104 1105 width = pl08x_width(addr_width); 1106 if (width == ~0) { 1107 dev_err(&pl08x->adev->dev, 1108 "bad runtime_config: alien address width\n"); 1109 return -EINVAL; 1110 } 1111 1112 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1113 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1114 1115 /* 1116 * If this channel will only request single transfers, set this 1117 * down to ONE element. Also select one element if no maxburst 1118 * is specified. 1119 */ 1120 if (plchan->cd->single) 1121 maxburst = 1; 1122 1123 burst = pl08x_burst(maxburst); 1124 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1125 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1126 1127 plchan->device_fc = config->device_fc; 1128 1129 if (plchan->runtime_direction == DMA_DEV_TO_MEM) { 1130 plchan->src_addr = config->src_addr; 1131 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1132 pl08x_select_bus(plchan->cd->periph_buses, 1133 pl08x->mem_buses); 1134 } else { 1135 plchan->dst_addr = config->dst_addr; 1136 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | 1137 pl08x_select_bus(pl08x->mem_buses, 1138 plchan->cd->periph_buses); 1139 } 1140 1141 dev_dbg(&pl08x->adev->dev, 1142 "configured channel %s (%s) for %s, data width %d, " 1143 "maxburst %d words, LE, CCTL=0x%08x\n", 1144 dma_chan_name(chan), plchan->name, 1145 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 1146 addr_width, 1147 maxburst, 1148 cctl); 1149 1150 return 0; 1151 } 1152 1153 /* 1154 * Slave transactions callback to the slave device to allow 1155 * synchronization of slave DMA signals with the DMAC enable 1156 */ 1157 static void pl08x_issue_pending(struct dma_chan *chan) 1158 { 1159 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1160 unsigned long flags; 1161 1162 spin_lock_irqsave(&plchan->lock, flags); 1163 /* Something is already active, or we're waiting for a channel... */ 1164 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { 1165 spin_unlock_irqrestore(&plchan->lock, flags); 1166 return; 1167 } 1168 1169 /* Take the first element in the queue and execute it */ 1170 if (!list_empty(&plchan->pend_list)) { 1171 struct pl08x_txd *next; 1172 1173 next = list_first_entry(&plchan->pend_list, 1174 struct pl08x_txd, 1175 node); 1176 list_del(&next->node); 1177 plchan->state = PL08X_CHAN_RUNNING; 1178 1179 pl08x_start_txd(plchan, next); 1180 } 1181 1182 spin_unlock_irqrestore(&plchan->lock, flags); 1183 } 1184 1185 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1186 struct pl08x_txd *txd) 1187 { 1188 struct pl08x_driver_data *pl08x = plchan->host; 1189 unsigned long flags; 1190 int num_llis, ret; 1191 1192 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1193 if (!num_llis) { 1194 spin_lock_irqsave(&plchan->lock, flags); 1195 pl08x_free_txd(pl08x, txd); 1196 spin_unlock_irqrestore(&plchan->lock, flags); 1197 return -EINVAL; 1198 } 1199 1200 spin_lock_irqsave(&plchan->lock, flags); 1201 1202 /* 1203 * See if we already have a physical channel allocated, 1204 * else this is the time to try to get one. 1205 */ 1206 ret = prep_phy_channel(plchan, txd); 1207 if (ret) { 1208 /* 1209 * No physical channel was available. 1210 * 1211 * memcpy transfers can be sorted out at submission time. 1212 * 1213 * Slave transfers may have been denied due to platform 1214 * channel muxing restrictions. Since there is no guarantee 1215 * that this will ever be resolved, and the signal must be 1216 * acquired AFTER acquiring the physical channel, we will let 1217 * them be NACK:ed with -EBUSY here. The drivers can retry 1218 * the prep() call if they are eager on doing this using DMA. 1219 */ 1220 if (plchan->slave) { 1221 pl08x_free_txd_list(pl08x, plchan); 1222 pl08x_free_txd(pl08x, txd); 1223 spin_unlock_irqrestore(&plchan->lock, flags); 1224 return -EBUSY; 1225 } 1226 } else 1227 /* 1228 * Else we're all set, paused and ready to roll, status 1229 * will switch to PL08X_CHAN_RUNNING when we call 1230 * issue_pending(). If there is something running on the 1231 * channel already we don't change its state. 1232 */ 1233 if (plchan->state == PL08X_CHAN_IDLE) 1234 plchan->state = PL08X_CHAN_PAUSED; 1235 1236 spin_unlock_irqrestore(&plchan->lock, flags); 1237 1238 return 0; 1239 } 1240 1241 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1242 unsigned long flags) 1243 { 1244 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1245 1246 if (txd) { 1247 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1248 txd->tx.flags = flags; 1249 txd->tx.tx_submit = pl08x_tx_submit; 1250 INIT_LIST_HEAD(&txd->node); 1251 INIT_LIST_HEAD(&txd->dsg_list); 1252 1253 /* Always enable error and terminal interrupts */ 1254 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1255 PL080_CONFIG_TC_IRQ_MASK; 1256 } 1257 return txd; 1258 } 1259 1260 /* 1261 * Initialize a descriptor to be used by memcpy submit 1262 */ 1263 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1264 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1265 size_t len, unsigned long flags) 1266 { 1267 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1268 struct pl08x_driver_data *pl08x = plchan->host; 1269 struct pl08x_txd *txd; 1270 struct pl08x_sg *dsg; 1271 int ret; 1272 1273 txd = pl08x_get_txd(plchan, flags); 1274 if (!txd) { 1275 dev_err(&pl08x->adev->dev, 1276 "%s no memory for descriptor\n", __func__); 1277 return NULL; 1278 } 1279 1280 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1281 if (!dsg) { 1282 pl08x_free_txd(pl08x, txd); 1283 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1284 __func__); 1285 return NULL; 1286 } 1287 list_add_tail(&dsg->node, &txd->dsg_list); 1288 1289 txd->direction = DMA_NONE; 1290 dsg->src_addr = src; 1291 dsg->dst_addr = dest; 1292 dsg->len = len; 1293 1294 /* Set platform data for m2m */ 1295 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1296 txd->cctl = pl08x->pd->memcpy_channel.cctl & 1297 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1298 1299 /* Both to be incremented or the code will break */ 1300 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1301 1302 if (pl08x->vd->dualmaster) 1303 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1304 pl08x->mem_buses); 1305 1306 ret = pl08x_prep_channel_resources(plchan, txd); 1307 if (ret) 1308 return NULL; 1309 1310 return &txd->tx; 1311 } 1312 1313 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1314 struct dma_chan *chan, struct scatterlist *sgl, 1315 unsigned int sg_len, enum dma_transfer_direction direction, 1316 unsigned long flags, void *context) 1317 { 1318 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1319 struct pl08x_driver_data *pl08x = plchan->host; 1320 struct pl08x_txd *txd; 1321 struct pl08x_sg *dsg; 1322 struct scatterlist *sg; 1323 dma_addr_t slave_addr; 1324 int ret, tmp; 1325 1326 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1327 __func__, sgl->length, plchan->name); 1328 1329 txd = pl08x_get_txd(plchan, flags); 1330 if (!txd) { 1331 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1332 return NULL; 1333 } 1334 1335 if (direction != plchan->runtime_direction) 1336 dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1337 "the direction configured for the PrimeCell\n", 1338 __func__); 1339 1340 /* 1341 * Set up addresses, the PrimeCell configured address 1342 * will take precedence since this may configure the 1343 * channel target address dynamically at runtime. 1344 */ 1345 txd->direction = direction; 1346 1347 if (direction == DMA_MEM_TO_DEV) { 1348 txd->cctl = plchan->dst_cctl; 1349 slave_addr = plchan->dst_addr; 1350 } else if (direction == DMA_DEV_TO_MEM) { 1351 txd->cctl = plchan->src_cctl; 1352 slave_addr = plchan->src_addr; 1353 } else { 1354 pl08x_free_txd(pl08x, txd); 1355 dev_err(&pl08x->adev->dev, 1356 "%s direction unsupported\n", __func__); 1357 return NULL; 1358 } 1359 1360 if (plchan->device_fc) 1361 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1362 PL080_FLOW_PER2MEM_PER; 1363 else 1364 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1365 PL080_FLOW_PER2MEM; 1366 1367 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1368 1369 for_each_sg(sgl, sg, sg_len, tmp) { 1370 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1371 if (!dsg) { 1372 pl08x_free_txd(pl08x, txd); 1373 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1374 __func__); 1375 return NULL; 1376 } 1377 list_add_tail(&dsg->node, &txd->dsg_list); 1378 1379 dsg->len = sg_dma_len(sg); 1380 if (direction == DMA_MEM_TO_DEV) { 1381 dsg->src_addr = sg_phys(sg); 1382 dsg->dst_addr = slave_addr; 1383 } else { 1384 dsg->src_addr = slave_addr; 1385 dsg->dst_addr = sg_phys(sg); 1386 } 1387 } 1388 1389 ret = pl08x_prep_channel_resources(plchan, txd); 1390 if (ret) 1391 return NULL; 1392 1393 return &txd->tx; 1394 } 1395 1396 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1397 unsigned long arg) 1398 { 1399 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1400 struct pl08x_driver_data *pl08x = plchan->host; 1401 unsigned long flags; 1402 int ret = 0; 1403 1404 /* Controls applicable to inactive channels */ 1405 if (cmd == DMA_SLAVE_CONFIG) { 1406 return dma_set_runtime_config(chan, 1407 (struct dma_slave_config *)arg); 1408 } 1409 1410 /* 1411 * Anything succeeds on channels with no physical allocation and 1412 * no queued transfers. 1413 */ 1414 spin_lock_irqsave(&plchan->lock, flags); 1415 if (!plchan->phychan && !plchan->at) { 1416 spin_unlock_irqrestore(&plchan->lock, flags); 1417 return 0; 1418 } 1419 1420 switch (cmd) { 1421 case DMA_TERMINATE_ALL: 1422 plchan->state = PL08X_CHAN_IDLE; 1423 1424 if (plchan->phychan) { 1425 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 1426 1427 /* 1428 * Mark physical channel as free and free any slave 1429 * signal 1430 */ 1431 release_phy_channel(plchan); 1432 plchan->phychan_hold = 0; 1433 } 1434 /* Dequeue jobs and free LLIs */ 1435 if (plchan->at) { 1436 pl08x_free_txd(pl08x, plchan->at); 1437 plchan->at = NULL; 1438 } 1439 /* Dequeue jobs not yet fired as well */ 1440 pl08x_free_txd_list(pl08x, plchan); 1441 break; 1442 case DMA_PAUSE: 1443 pl08x_pause_phy_chan(plchan->phychan); 1444 plchan->state = PL08X_CHAN_PAUSED; 1445 break; 1446 case DMA_RESUME: 1447 pl08x_resume_phy_chan(plchan->phychan); 1448 plchan->state = PL08X_CHAN_RUNNING; 1449 break; 1450 default: 1451 /* Unknown command */ 1452 ret = -ENXIO; 1453 break; 1454 } 1455 1456 spin_unlock_irqrestore(&plchan->lock, flags); 1457 1458 return ret; 1459 } 1460 1461 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1462 { 1463 struct pl08x_dma_chan *plchan; 1464 char *name = chan_id; 1465 1466 /* Reject channels for devices not bound to this driver */ 1467 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1468 return false; 1469 1470 plchan = to_pl08x_chan(chan); 1471 1472 /* Check that the channel is not taken! */ 1473 if (!strcmp(plchan->name, name)) 1474 return true; 1475 1476 return false; 1477 } 1478 1479 /* 1480 * Just check that the device is there and active 1481 * TODO: turn this bit on/off depending on the number of physical channels 1482 * actually used, if it is zero... well shut it off. That will save some 1483 * power. Cut the clock at the same time. 1484 */ 1485 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1486 { 1487 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1488 } 1489 1490 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1491 { 1492 struct device *dev = txd->tx.chan->device->dev; 1493 struct pl08x_sg *dsg; 1494 1495 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1496 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1497 list_for_each_entry(dsg, &txd->dsg_list, node) 1498 dma_unmap_single(dev, dsg->src_addr, dsg->len, 1499 DMA_TO_DEVICE); 1500 else { 1501 list_for_each_entry(dsg, &txd->dsg_list, node) 1502 dma_unmap_page(dev, dsg->src_addr, dsg->len, 1503 DMA_TO_DEVICE); 1504 } 1505 } 1506 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1507 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1508 list_for_each_entry(dsg, &txd->dsg_list, node) 1509 dma_unmap_single(dev, dsg->dst_addr, dsg->len, 1510 DMA_FROM_DEVICE); 1511 else 1512 list_for_each_entry(dsg, &txd->dsg_list, node) 1513 dma_unmap_page(dev, dsg->dst_addr, dsg->len, 1514 DMA_FROM_DEVICE); 1515 } 1516 } 1517 1518 static void pl08x_tasklet(unsigned long data) 1519 { 1520 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1521 struct pl08x_driver_data *pl08x = plchan->host; 1522 struct pl08x_txd *txd; 1523 unsigned long flags; 1524 1525 spin_lock_irqsave(&plchan->lock, flags); 1526 1527 txd = plchan->at; 1528 plchan->at = NULL; 1529 1530 if (txd) { 1531 /* Update last completed */ 1532 dma_cookie_complete(&txd->tx); 1533 } 1534 1535 /* If a new descriptor is queued, set it up plchan->at is NULL here */ 1536 if (!list_empty(&plchan->pend_list)) { 1537 struct pl08x_txd *next; 1538 1539 next = list_first_entry(&plchan->pend_list, 1540 struct pl08x_txd, 1541 node); 1542 list_del(&next->node); 1543 1544 pl08x_start_txd(plchan, next); 1545 } else if (plchan->phychan_hold) { 1546 /* 1547 * This channel is still in use - we have a new txd being 1548 * prepared and will soon be queued. Don't give up the 1549 * physical channel. 1550 */ 1551 } else { 1552 struct pl08x_dma_chan *waiting = NULL; 1553 1554 /* 1555 * No more jobs, so free up the physical channel 1556 * Free any allocated signal on slave transfers too 1557 */ 1558 release_phy_channel(plchan); 1559 plchan->state = PL08X_CHAN_IDLE; 1560 1561 /* 1562 * And NOW before anyone else can grab that free:d up 1563 * physical channel, see if there is some memcpy pending 1564 * that seriously needs to start because of being stacked 1565 * up while we were choking the physical channels with data. 1566 */ 1567 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1568 chan.device_node) { 1569 if (waiting->state == PL08X_CHAN_WAITING && 1570 waiting->waiting != NULL) { 1571 int ret; 1572 1573 /* This should REALLY not fail now */ 1574 ret = prep_phy_channel(waiting, 1575 waiting->waiting); 1576 BUG_ON(ret); 1577 waiting->phychan_hold--; 1578 waiting->state = PL08X_CHAN_RUNNING; 1579 waiting->waiting = NULL; 1580 pl08x_issue_pending(&waiting->chan); 1581 break; 1582 } 1583 } 1584 } 1585 1586 spin_unlock_irqrestore(&plchan->lock, flags); 1587 1588 if (txd) { 1589 dma_async_tx_callback callback = txd->tx.callback; 1590 void *callback_param = txd->tx.callback_param; 1591 1592 /* Don't try to unmap buffers on slave channels */ 1593 if (!plchan->slave) 1594 pl08x_unmap_buffers(txd); 1595 1596 /* Free the descriptor */ 1597 spin_lock_irqsave(&plchan->lock, flags); 1598 pl08x_free_txd(pl08x, txd); 1599 spin_unlock_irqrestore(&plchan->lock, flags); 1600 1601 /* Callback to signal completion */ 1602 if (callback) 1603 callback(callback_param); 1604 } 1605 } 1606 1607 static irqreturn_t pl08x_irq(int irq, void *dev) 1608 { 1609 struct pl08x_driver_data *pl08x = dev; 1610 u32 mask = 0, err, tc, i; 1611 1612 /* check & clear - ERR & TC interrupts */ 1613 err = readl(pl08x->base + PL080_ERR_STATUS); 1614 if (err) { 1615 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1616 __func__, err); 1617 writel(err, pl08x->base + PL080_ERR_CLEAR); 1618 } 1619 tc = readl(pl08x->base + PL080_INT_STATUS); 1620 if (tc) 1621 writel(tc, pl08x->base + PL080_TC_CLEAR); 1622 1623 if (!err && !tc) 1624 return IRQ_NONE; 1625 1626 for (i = 0; i < pl08x->vd->channels; i++) { 1627 if (((1 << i) & err) || ((1 << i) & tc)) { 1628 /* Locate physical channel */ 1629 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1630 struct pl08x_dma_chan *plchan = phychan->serving; 1631 1632 if (!plchan) { 1633 dev_err(&pl08x->adev->dev, 1634 "%s Error TC interrupt on unused channel: 0x%08x\n", 1635 __func__, i); 1636 continue; 1637 } 1638 1639 /* Schedule tasklet on this channel */ 1640 tasklet_schedule(&plchan->tasklet); 1641 mask |= (1 << i); 1642 } 1643 } 1644 1645 return mask ? IRQ_HANDLED : IRQ_NONE; 1646 } 1647 1648 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1649 { 1650 u32 cctl = pl08x_cctl(chan->cd->cctl); 1651 1652 chan->slave = true; 1653 chan->name = chan->cd->bus_id; 1654 chan->src_addr = chan->cd->addr; 1655 chan->dst_addr = chan->cd->addr; 1656 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | 1657 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); 1658 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | 1659 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); 1660 } 1661 1662 /* 1663 * Initialise the DMAC memcpy/slave channels. 1664 * Make a local wrapper to hold required data 1665 */ 1666 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1667 struct dma_device *dmadev, unsigned int channels, bool slave) 1668 { 1669 struct pl08x_dma_chan *chan; 1670 int i; 1671 1672 INIT_LIST_HEAD(&dmadev->channels); 1673 1674 /* 1675 * Register as many many memcpy as we have physical channels, 1676 * we won't always be able to use all but the code will have 1677 * to cope with that situation. 1678 */ 1679 for (i = 0; i < channels; i++) { 1680 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1681 if (!chan) { 1682 dev_err(&pl08x->adev->dev, 1683 "%s no memory for channel\n", __func__); 1684 return -ENOMEM; 1685 } 1686 1687 chan->host = pl08x; 1688 chan->state = PL08X_CHAN_IDLE; 1689 1690 if (slave) { 1691 chan->cd = &pl08x->pd->slave_channels[i]; 1692 pl08x_dma_slave_init(chan); 1693 } else { 1694 chan->cd = &pl08x->pd->memcpy_channel; 1695 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1696 if (!chan->name) { 1697 kfree(chan); 1698 return -ENOMEM; 1699 } 1700 } 1701 if (chan->cd->circular_buffer) { 1702 dev_err(&pl08x->adev->dev, 1703 "channel %s: circular buffers not supported\n", 1704 chan->name); 1705 kfree(chan); 1706 continue; 1707 } 1708 dev_dbg(&pl08x->adev->dev, 1709 "initialize virtual channel \"%s\"\n", 1710 chan->name); 1711 1712 chan->chan.device = dmadev; 1713 dma_cookie_init(&chan->chan); 1714 1715 spin_lock_init(&chan->lock); 1716 INIT_LIST_HEAD(&chan->pend_list); 1717 tasklet_init(&chan->tasklet, pl08x_tasklet, 1718 (unsigned long) chan); 1719 1720 list_add_tail(&chan->chan.device_node, &dmadev->channels); 1721 } 1722 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1723 i, slave ? "slave" : "memcpy"); 1724 return i; 1725 } 1726 1727 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1728 { 1729 struct pl08x_dma_chan *chan = NULL; 1730 struct pl08x_dma_chan *next; 1731 1732 list_for_each_entry_safe(chan, 1733 next, &dmadev->channels, chan.device_node) { 1734 list_del(&chan->chan.device_node); 1735 kfree(chan); 1736 } 1737 } 1738 1739 #ifdef CONFIG_DEBUG_FS 1740 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1741 { 1742 switch (state) { 1743 case PL08X_CHAN_IDLE: 1744 return "idle"; 1745 case PL08X_CHAN_RUNNING: 1746 return "running"; 1747 case PL08X_CHAN_PAUSED: 1748 return "paused"; 1749 case PL08X_CHAN_WAITING: 1750 return "waiting"; 1751 default: 1752 break; 1753 } 1754 return "UNKNOWN STATE"; 1755 } 1756 1757 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1758 { 1759 struct pl08x_driver_data *pl08x = s->private; 1760 struct pl08x_dma_chan *chan; 1761 struct pl08x_phy_chan *ch; 1762 unsigned long flags; 1763 int i; 1764 1765 seq_printf(s, "PL08x physical channels:\n"); 1766 seq_printf(s, "CHANNEL:\tUSER:\n"); 1767 seq_printf(s, "--------\t-----\n"); 1768 for (i = 0; i < pl08x->vd->channels; i++) { 1769 struct pl08x_dma_chan *virt_chan; 1770 1771 ch = &pl08x->phy_chans[i]; 1772 1773 spin_lock_irqsave(&ch->lock, flags); 1774 virt_chan = ch->serving; 1775 1776 seq_printf(s, "%d\t\t%s\n", 1777 ch->id, virt_chan ? virt_chan->name : "(none)"); 1778 1779 spin_unlock_irqrestore(&ch->lock, flags); 1780 } 1781 1782 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1783 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1784 seq_printf(s, "--------\t------\n"); 1785 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1786 seq_printf(s, "%s\t\t%s\n", chan->name, 1787 pl08x_state_str(chan->state)); 1788 } 1789 1790 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1791 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1792 seq_printf(s, "--------\t------\n"); 1793 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1794 seq_printf(s, "%s\t\t%s\n", chan->name, 1795 pl08x_state_str(chan->state)); 1796 } 1797 1798 return 0; 1799 } 1800 1801 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1802 { 1803 return single_open(file, pl08x_debugfs_show, inode->i_private); 1804 } 1805 1806 static const struct file_operations pl08x_debugfs_operations = { 1807 .open = pl08x_debugfs_open, 1808 .read = seq_read, 1809 .llseek = seq_lseek, 1810 .release = single_release, 1811 }; 1812 1813 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1814 { 1815 /* Expose a simple debugfs interface to view all clocks */ 1816 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 1817 S_IFREG | S_IRUGO, NULL, pl08x, 1818 &pl08x_debugfs_operations); 1819 } 1820 1821 #else 1822 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1823 { 1824 } 1825 #endif 1826 1827 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 1828 { 1829 struct pl08x_driver_data *pl08x; 1830 const struct vendor_data *vd = id->data; 1831 int ret = 0; 1832 int i; 1833 1834 ret = amba_request_regions(adev, NULL); 1835 if (ret) 1836 return ret; 1837 1838 /* Create the driver state holder */ 1839 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 1840 if (!pl08x) { 1841 ret = -ENOMEM; 1842 goto out_no_pl08x; 1843 } 1844 1845 pm_runtime_set_active(&adev->dev); 1846 pm_runtime_enable(&adev->dev); 1847 1848 /* Initialize memcpy engine */ 1849 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1850 pl08x->memcpy.dev = &adev->dev; 1851 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1852 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1853 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1854 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1855 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1856 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1857 pl08x->memcpy.device_control = pl08x_control; 1858 1859 /* Initialize slave engine */ 1860 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1861 pl08x->slave.dev = &adev->dev; 1862 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1863 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1864 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1865 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1866 pl08x->slave.device_issue_pending = pl08x_issue_pending; 1867 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1868 pl08x->slave.device_control = pl08x_control; 1869 1870 /* Get the platform data */ 1871 pl08x->pd = dev_get_platdata(&adev->dev); 1872 if (!pl08x->pd) { 1873 dev_err(&adev->dev, "no platform data supplied\n"); 1874 goto out_no_platdata; 1875 } 1876 1877 /* Assign useful pointers to the driver state */ 1878 pl08x->adev = adev; 1879 pl08x->vd = vd; 1880 1881 /* By default, AHB1 only. If dualmaster, from platform */ 1882 pl08x->lli_buses = PL08X_AHB1; 1883 pl08x->mem_buses = PL08X_AHB1; 1884 if (pl08x->vd->dualmaster) { 1885 pl08x->lli_buses = pl08x->pd->lli_buses; 1886 pl08x->mem_buses = pl08x->pd->mem_buses; 1887 } 1888 1889 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1890 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1891 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1892 if (!pl08x->pool) { 1893 ret = -ENOMEM; 1894 goto out_no_lli_pool; 1895 } 1896 1897 spin_lock_init(&pl08x->lock); 1898 1899 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1900 if (!pl08x->base) { 1901 ret = -ENOMEM; 1902 goto out_no_ioremap; 1903 } 1904 1905 /* Turn on the PL08x */ 1906 pl08x_ensure_on(pl08x); 1907 1908 /* Attach the interrupt handler */ 1909 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1910 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1911 1912 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1913 DRIVER_NAME, pl08x); 1914 if (ret) { 1915 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1916 __func__, adev->irq[0]); 1917 goto out_no_irq; 1918 } 1919 1920 /* Initialize physical channels */ 1921 pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1922 GFP_KERNEL); 1923 if (!pl08x->phy_chans) { 1924 dev_err(&adev->dev, "%s failed to allocate " 1925 "physical channel holders\n", 1926 __func__); 1927 goto out_no_phychans; 1928 } 1929 1930 for (i = 0; i < vd->channels; i++) { 1931 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 1932 1933 ch->id = i; 1934 ch->base = pl08x->base + PL080_Cx_BASE(i); 1935 spin_lock_init(&ch->lock); 1936 ch->serving = NULL; 1937 ch->signal = -1; 1938 dev_dbg(&adev->dev, "physical channel %d is %s\n", 1939 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1940 } 1941 1942 /* Register as many memcpy channels as there are physical channels */ 1943 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 1944 pl08x->vd->channels, false); 1945 if (ret <= 0) { 1946 dev_warn(&pl08x->adev->dev, 1947 "%s failed to enumerate memcpy channels - %d\n", 1948 __func__, ret); 1949 goto out_no_memcpy; 1950 } 1951 pl08x->memcpy.chancnt = ret; 1952 1953 /* Register slave channels */ 1954 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1955 pl08x->pd->num_slave_channels, true); 1956 if (ret <= 0) { 1957 dev_warn(&pl08x->adev->dev, 1958 "%s failed to enumerate slave channels - %d\n", 1959 __func__, ret); 1960 goto out_no_slave; 1961 } 1962 pl08x->slave.chancnt = ret; 1963 1964 ret = dma_async_device_register(&pl08x->memcpy); 1965 if (ret) { 1966 dev_warn(&pl08x->adev->dev, 1967 "%s failed to register memcpy as an async device - %d\n", 1968 __func__, ret); 1969 goto out_no_memcpy_reg; 1970 } 1971 1972 ret = dma_async_device_register(&pl08x->slave); 1973 if (ret) { 1974 dev_warn(&pl08x->adev->dev, 1975 "%s failed to register slave as an async device - %d\n", 1976 __func__, ret); 1977 goto out_no_slave_reg; 1978 } 1979 1980 amba_set_drvdata(adev, pl08x); 1981 init_pl08x_debugfs(pl08x); 1982 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1983 amba_part(adev), amba_rev(adev), 1984 (unsigned long long)adev->res.start, adev->irq[0]); 1985 1986 pm_runtime_put(&adev->dev); 1987 return 0; 1988 1989 out_no_slave_reg: 1990 dma_async_device_unregister(&pl08x->memcpy); 1991 out_no_memcpy_reg: 1992 pl08x_free_virtual_channels(&pl08x->slave); 1993 out_no_slave: 1994 pl08x_free_virtual_channels(&pl08x->memcpy); 1995 out_no_memcpy: 1996 kfree(pl08x->phy_chans); 1997 out_no_phychans: 1998 free_irq(adev->irq[0], pl08x); 1999 out_no_irq: 2000 iounmap(pl08x->base); 2001 out_no_ioremap: 2002 dma_pool_destroy(pl08x->pool); 2003 out_no_lli_pool: 2004 out_no_platdata: 2005 pm_runtime_put(&adev->dev); 2006 pm_runtime_disable(&adev->dev); 2007 2008 kfree(pl08x); 2009 out_no_pl08x: 2010 amba_release_regions(adev); 2011 return ret; 2012 } 2013 2014 /* PL080 has 8 channels and the PL080 have just 2 */ 2015 static struct vendor_data vendor_pl080 = { 2016 .channels = 8, 2017 .dualmaster = true, 2018 }; 2019 2020 static struct vendor_data vendor_pl081 = { 2021 .channels = 2, 2022 .dualmaster = false, 2023 }; 2024 2025 static struct amba_id pl08x_ids[] = { 2026 /* PL080 */ 2027 { 2028 .id = 0x00041080, 2029 .mask = 0x000fffff, 2030 .data = &vendor_pl080, 2031 }, 2032 /* PL081 */ 2033 { 2034 .id = 0x00041081, 2035 .mask = 0x000fffff, 2036 .data = &vendor_pl081, 2037 }, 2038 /* Nomadik 8815 PL080 variant */ 2039 { 2040 .id = 0x00280880, 2041 .mask = 0x00ffffff, 2042 .data = &vendor_pl080, 2043 }, 2044 { 0, 0 }, 2045 }; 2046 2047 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2048 2049 static struct amba_driver pl08x_amba_driver = { 2050 .drv.name = DRIVER_NAME, 2051 .id_table = pl08x_ids, 2052 .probe = pl08x_probe, 2053 }; 2054 2055 static int __init pl08x_init(void) 2056 { 2057 int retval; 2058 retval = amba_driver_register(&pl08x_amba_driver); 2059 if (retval) 2060 printk(KERN_WARNING DRIVER_NAME 2061 "failed to register as an AMBA device (%d)\n", 2062 retval); 2063 return retval; 2064 } 2065 subsys_initcall(pl08x_init); 2066