1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 * 22 * The full GNU General Public License is in this distribution in the file 23 * called COPYING. 24 * 25 * Documentation: ARM DDI 0196G == PL080 26 * Documentation: ARM DDI 0218E == PL081 27 * 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * channel. 30 * 31 * The PL080 has 8 channels available for simultaneous use, and the PL081 32 * has only two channels. So on these DMA controllers the number of channels 33 * and the number of incoming DMA signals are two totally different things. 34 * It is usually not possible to theoretically handle all physical signals, 35 * so a multiplexing scheme with possible denial of use is necessary. 36 * 37 * The PL080 has a dual bus master, PL081 has a single master. 38 * 39 * Memory to peripheral transfer may be visualized as 40 * Get data from memory to DMAC 41 * Until no data left 42 * On burst request from peripheral 43 * Destination burst from DMAC to peripheral 44 * Clear burst request 45 * Raise terminal count interrupt 46 * 47 * For peripherals with a FIFO: 48 * Source burst size == half the depth of the peripheral FIFO 49 * Destination burst size == the depth of the peripheral FIFO 50 * 51 * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 * signals, the DMA controller will simply facilitate its AHB master.) 53 * 54 * ASSUMES default (little) endianness for DMA transfers 55 * 56 * The PL08x has two flow control settings: 57 * - DMAC flow control: the transfer size defines the number of transfers 58 * which occur for the current LLI entry, and the DMAC raises TC at the 59 * end of every LLI entry. Observed behaviour shows the DMAC listening 60 * to both the BREQ and SREQ signals (contrary to documented), 61 * transferring data if either is active. The LBREQ and LSREQ signals 62 * are ignored. 63 * 64 * - Peripheral flow control: the transfer size is ignored (and should be 65 * zero). The data is transferred from the current LLI entry, until 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 * will then move to the next LLI entry. 68 * 69 * Global TODO: 70 * - Break out common code from arch/arm/mach-s3c64xx and share 71 */ 72 #include <linux/amba/bus.h> 73 #include <linux/amba/pl08x.h> 74 #include <linux/debugfs.h> 75 #include <linux/delay.h> 76 #include <linux/device.h> 77 #include <linux/dmaengine.h> 78 #include <linux/dmapool.h> 79 #include <linux/dma-mapping.h> 80 #include <linux/init.h> 81 #include <linux/interrupt.h> 82 #include <linux/module.h> 83 #include <linux/pm_runtime.h> 84 #include <linux/seq_file.h> 85 #include <linux/slab.h> 86 #include <asm/hardware/pl080.h> 87 88 #include "dmaengine.h" 89 #include "virt-dma.h" 90 91 #define DRIVER_NAME "pl08xdmac" 92 93 static struct amba_driver pl08x_amba_driver; 94 struct pl08x_driver_data; 95 96 /** 97 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 98 * @channels: the number of channels available in this variant 99 * @dualmaster: whether this version supports dual AHB masters or not. 100 * @nomadik: whether the channels have Nomadik security extension bits 101 * that need to be checked for permission before use and some registers are 102 * missing 103 */ 104 struct vendor_data { 105 u8 channels; 106 bool dualmaster; 107 bool nomadik; 108 }; 109 110 /* 111 * PL08X private data structures 112 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, 113 * start & end do not - their bus bit info is in cctl. Also note that these 114 * are fixed 32-bit quantities. 115 */ 116 struct pl08x_lli { 117 u32 src; 118 u32 dst; 119 u32 lli; 120 u32 cctl; 121 }; 122 123 /** 124 * struct pl08x_bus_data - information of source or destination 125 * busses for a transfer 126 * @addr: current address 127 * @maxwidth: the maximum width of a transfer on this bus 128 * @buswidth: the width of this bus in bytes: 1, 2 or 4 129 */ 130 struct pl08x_bus_data { 131 dma_addr_t addr; 132 u8 maxwidth; 133 u8 buswidth; 134 }; 135 136 /** 137 * struct pl08x_phy_chan - holder for the physical channels 138 * @id: physical index to this channel 139 * @lock: a lock to use when altering an instance of this struct 140 * @serving: the virtual channel currently being served by this physical 141 * channel 142 * @locked: channel unavailable for the system, e.g. dedicated to secure 143 * world 144 */ 145 struct pl08x_phy_chan { 146 unsigned int id; 147 void __iomem *base; 148 spinlock_t lock; 149 struct pl08x_dma_chan *serving; 150 bool locked; 151 }; 152 153 /** 154 * struct pl08x_sg - structure containing data per sg 155 * @src_addr: src address of sg 156 * @dst_addr: dst address of sg 157 * @len: transfer len in bytes 158 * @node: node for txd's dsg_list 159 */ 160 struct pl08x_sg { 161 dma_addr_t src_addr; 162 dma_addr_t dst_addr; 163 size_t len; 164 struct list_head node; 165 }; 166 167 /** 168 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 169 * @vd: virtual DMA descriptor 170 * @dsg_list: list of children sg's 171 * @llis_bus: DMA memory address (physical) start for the LLIs 172 * @llis_va: virtual memory address start for the LLIs 173 * @cctl: control reg values for current txd 174 * @ccfg: config reg values for current txd 175 * @done: this marks completed descriptors, which should not have their 176 * mux released. 177 */ 178 struct pl08x_txd { 179 struct virt_dma_desc vd; 180 struct list_head dsg_list; 181 dma_addr_t llis_bus; 182 struct pl08x_lli *llis_va; 183 /* Default cctl value for LLIs */ 184 u32 cctl; 185 /* 186 * Settings to be put into the physical channel when we 187 * trigger this txd. Other registers are in llis_va[0]. 188 */ 189 u32 ccfg; 190 bool done; 191 }; 192 193 /** 194 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel 195 * states 196 * @PL08X_CHAN_IDLE: the channel is idle 197 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 198 * channel and is running a transfer on it 199 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 200 * channel, but the transfer is currently paused 201 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 202 * channel to become available (only pertains to memcpy channels) 203 */ 204 enum pl08x_dma_chan_state { 205 PL08X_CHAN_IDLE, 206 PL08X_CHAN_RUNNING, 207 PL08X_CHAN_PAUSED, 208 PL08X_CHAN_WAITING, 209 }; 210 211 /** 212 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 213 * @vc: wrappped virtual channel 214 * @phychan: the physical channel utilized by this channel, if there is one 215 * @name: name of channel 216 * @cd: channel platform data 217 * @runtime_addr: address for RX/TX according to the runtime config 218 * @at: active transaction on this channel 219 * @lock: a lock for this channel data 220 * @host: a pointer to the host (internal use) 221 * @state: whether the channel is idle, paused, running etc 222 * @slave: whether this channel is a device (slave) or for memcpy 223 * @signal: the physical DMA request signal which this channel is using 224 * @mux_use: count of descriptors using this DMA request signal setting 225 */ 226 struct pl08x_dma_chan { 227 struct virt_dma_chan vc; 228 struct pl08x_phy_chan *phychan; 229 const char *name; 230 const struct pl08x_channel_data *cd; 231 struct dma_slave_config cfg; 232 struct pl08x_txd *at; 233 struct pl08x_driver_data *host; 234 enum pl08x_dma_chan_state state; 235 bool slave; 236 int signal; 237 unsigned mux_use; 238 }; 239 240 /** 241 * struct pl08x_driver_data - the local state holder for the PL08x 242 * @slave: slave engine for this instance 243 * @memcpy: memcpy engine for this instance 244 * @base: virtual memory base (remapped) for the PL08x 245 * @adev: the corresponding AMBA (PrimeCell) bus entry 246 * @vd: vendor data for this PL08x variant 247 * @pd: platform data passed in from the platform/machine 248 * @phy_chans: array of data for the physical channels 249 * @pool: a pool for the LLI descriptors 250 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 251 * fetches 252 * @mem_buses: set to indicate memory transfers on AHB2. 253 * @lock: a spinlock for this struct 254 */ 255 struct pl08x_driver_data { 256 struct dma_device slave; 257 struct dma_device memcpy; 258 void __iomem *base; 259 struct amba_device *adev; 260 const struct vendor_data *vd; 261 struct pl08x_platform_data *pd; 262 struct pl08x_phy_chan *phy_chans; 263 struct dma_pool *pool; 264 u8 lli_buses; 265 u8 mem_buses; 266 }; 267 268 /* 269 * PL08X specific defines 270 */ 271 272 /* Size (bytes) of each LLI buffer allocated for one transfer */ 273 # define PL08X_LLI_TSFR_SIZE 0x2000 274 275 /* Maximum times we call dma_pool_alloc on this pool without freeing */ 276 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 277 #define PL08X_ALIGN 8 278 279 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 280 { 281 return container_of(chan, struct pl08x_dma_chan, vc.chan); 282 } 283 284 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 285 { 286 return container_of(tx, struct pl08x_txd, vd.tx); 287 } 288 289 /* 290 * Mux handling. 291 * 292 * This gives us the DMA request input to the PL08x primecell which the 293 * peripheral described by the channel data will be routed to, possibly 294 * via a board/SoC specific external MUX. One important point to note 295 * here is that this does not depend on the physical channel. 296 */ 297 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 298 { 299 const struct pl08x_platform_data *pd = plchan->host->pd; 300 int ret; 301 302 if (plchan->mux_use++ == 0 && pd->get_signal) { 303 ret = pd->get_signal(plchan->cd); 304 if (ret < 0) { 305 plchan->mux_use = 0; 306 return ret; 307 } 308 309 plchan->signal = ret; 310 } 311 return 0; 312 } 313 314 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 315 { 316 const struct pl08x_platform_data *pd = plchan->host->pd; 317 318 if (plchan->signal >= 0) { 319 WARN_ON(plchan->mux_use == 0); 320 321 if (--plchan->mux_use == 0 && pd->put_signal) { 322 pd->put_signal(plchan->cd, plchan->signal); 323 plchan->signal = -1; 324 } 325 } 326 } 327 328 /* 329 * Physical channel handling 330 */ 331 332 /* Whether a certain channel is busy or not */ 333 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 334 { 335 unsigned int val; 336 337 val = readl(ch->base + PL080_CH_CONFIG); 338 return val & PL080_CONFIG_ACTIVE; 339 } 340 341 /* 342 * Set the initial DMA register values i.e. those for the first LLI 343 * The next LLI pointer and the configuration interrupt bit have 344 * been set when the LLIs were constructed. Poke them into the hardware 345 * and start the transfer. 346 */ 347 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 348 { 349 struct pl08x_driver_data *pl08x = plchan->host; 350 struct pl08x_phy_chan *phychan = plchan->phychan; 351 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 352 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 353 struct pl08x_lli *lli; 354 u32 val; 355 356 list_del(&txd->vd.node); 357 358 plchan->at = txd; 359 360 /* Wait for channel inactive */ 361 while (pl08x_phy_channel_busy(phychan)) 362 cpu_relax(); 363 364 lli = &txd->llis_va[0]; 365 366 dev_vdbg(&pl08x->adev->dev, 367 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 368 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 369 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, 370 txd->ccfg); 371 372 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); 373 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); 374 writel(lli->lli, phychan->base + PL080_CH_LLI); 375 writel(lli->cctl, phychan->base + PL080_CH_CONTROL); 376 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); 377 378 /* Enable the DMA channel */ 379 /* Do not access config register until channel shows as disabled */ 380 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 381 cpu_relax(); 382 383 /* Do not access config register until channel shows as inactive */ 384 val = readl(phychan->base + PL080_CH_CONFIG); 385 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 386 val = readl(phychan->base + PL080_CH_CONFIG); 387 388 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); 389 } 390 391 /* 392 * Pause the channel by setting the HALT bit. 393 * 394 * For M->P transfers, pause the DMAC first and then stop the peripheral - 395 * the FIFO can only drain if the peripheral is still requesting data. 396 * (note: this can still timeout if the DMAC FIFO never drains of data.) 397 * 398 * For P->M transfers, disable the peripheral first to stop it filling 399 * the DMAC FIFO, and then pause the DMAC. 400 */ 401 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 402 { 403 u32 val; 404 int timeout; 405 406 /* Set the HALT bit and wait for the FIFO to drain */ 407 val = readl(ch->base + PL080_CH_CONFIG); 408 val |= PL080_CONFIG_HALT; 409 writel(val, ch->base + PL080_CH_CONFIG); 410 411 /* Wait for channel inactive */ 412 for (timeout = 1000; timeout; timeout--) { 413 if (!pl08x_phy_channel_busy(ch)) 414 break; 415 udelay(1); 416 } 417 if (pl08x_phy_channel_busy(ch)) 418 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 419 } 420 421 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 422 { 423 u32 val; 424 425 /* Clear the HALT bit */ 426 val = readl(ch->base + PL080_CH_CONFIG); 427 val &= ~PL080_CONFIG_HALT; 428 writel(val, ch->base + PL080_CH_CONFIG); 429 } 430 431 /* 432 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 433 * clears any pending interrupt status. This should not be used for 434 * an on-going transfer, but as a method of shutting down a channel 435 * (eg, when it's no longer used) or terminating a transfer. 436 */ 437 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 438 struct pl08x_phy_chan *ch) 439 { 440 u32 val = readl(ch->base + PL080_CH_CONFIG); 441 442 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 443 PL080_CONFIG_TC_IRQ_MASK); 444 445 writel(val, ch->base + PL080_CH_CONFIG); 446 447 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 448 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 449 } 450 451 static inline u32 get_bytes_in_cctl(u32 cctl) 452 { 453 /* The source width defines the number of bytes */ 454 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 455 456 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 457 case PL080_WIDTH_8BIT: 458 break; 459 case PL080_WIDTH_16BIT: 460 bytes *= 2; 461 break; 462 case PL080_WIDTH_32BIT: 463 bytes *= 4; 464 break; 465 } 466 return bytes; 467 } 468 469 /* The channel should be paused when calling this */ 470 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 471 { 472 struct pl08x_phy_chan *ch; 473 struct pl08x_txd *txd; 474 size_t bytes = 0; 475 476 ch = plchan->phychan; 477 txd = plchan->at; 478 479 /* 480 * Follow the LLIs to get the number of remaining 481 * bytes in the currently active transaction. 482 */ 483 if (ch && txd) { 484 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 485 486 /* First get the remaining bytes in the active transfer */ 487 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 488 489 if (clli) { 490 struct pl08x_lli *llis_va = txd->llis_va; 491 dma_addr_t llis_bus = txd->llis_bus; 492 int index; 493 494 BUG_ON(clli < llis_bus || clli >= llis_bus + 495 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); 496 497 /* 498 * Locate the next LLI - as this is an array, 499 * it's simple maths to find. 500 */ 501 index = (clli - llis_bus) / sizeof(struct pl08x_lli); 502 503 for (; index < MAX_NUM_TSFR_LLIS; index++) { 504 bytes += get_bytes_in_cctl(llis_va[index].cctl); 505 506 /* 507 * A LLI pointer of 0 terminates the LLI list 508 */ 509 if (!llis_va[index].lli) 510 break; 511 } 512 } 513 } 514 515 return bytes; 516 } 517 518 /* 519 * Allocate a physical channel for a virtual channel 520 * 521 * Try to locate a physical channel to be used for this transfer. If all 522 * are taken return NULL and the requester will have to cope by using 523 * some fallback PIO mode or retrying later. 524 */ 525 static struct pl08x_phy_chan * 526 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 527 struct pl08x_dma_chan *virt_chan) 528 { 529 struct pl08x_phy_chan *ch = NULL; 530 unsigned long flags; 531 int i; 532 533 for (i = 0; i < pl08x->vd->channels; i++) { 534 ch = &pl08x->phy_chans[i]; 535 536 spin_lock_irqsave(&ch->lock, flags); 537 538 if (!ch->locked && !ch->serving) { 539 ch->serving = virt_chan; 540 spin_unlock_irqrestore(&ch->lock, flags); 541 break; 542 } 543 544 spin_unlock_irqrestore(&ch->lock, flags); 545 } 546 547 if (i == pl08x->vd->channels) { 548 /* No physical channel available, cope with it */ 549 return NULL; 550 } 551 552 return ch; 553 } 554 555 /* Mark the physical channel as free. Note, this write is atomic. */ 556 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 557 struct pl08x_phy_chan *ch) 558 { 559 ch->serving = NULL; 560 } 561 562 /* 563 * Try to allocate a physical channel. When successful, assign it to 564 * this virtual channel, and initiate the next descriptor. The 565 * virtual channel lock must be held at this point. 566 */ 567 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 568 { 569 struct pl08x_driver_data *pl08x = plchan->host; 570 struct pl08x_phy_chan *ch; 571 572 ch = pl08x_get_phy_channel(pl08x, plchan); 573 if (!ch) { 574 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 575 plchan->state = PL08X_CHAN_WAITING; 576 return; 577 } 578 579 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 580 ch->id, plchan->name); 581 582 plchan->phychan = ch; 583 plchan->state = PL08X_CHAN_RUNNING; 584 pl08x_start_next_txd(plchan); 585 } 586 587 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 588 struct pl08x_dma_chan *plchan) 589 { 590 struct pl08x_driver_data *pl08x = plchan->host; 591 592 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 593 ch->id, plchan->name); 594 595 /* 596 * We do this without taking the lock; we're really only concerned 597 * about whether this pointer is NULL or not, and we're guaranteed 598 * that this will only be called when it _already_ is non-NULL. 599 */ 600 ch->serving = plchan; 601 plchan->phychan = ch; 602 plchan->state = PL08X_CHAN_RUNNING; 603 pl08x_start_next_txd(plchan); 604 } 605 606 /* 607 * Free a physical DMA channel, potentially reallocating it to another 608 * virtual channel if we have any pending. 609 */ 610 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 611 { 612 struct pl08x_driver_data *pl08x = plchan->host; 613 struct pl08x_dma_chan *p, *next; 614 615 retry: 616 next = NULL; 617 618 /* Find a waiting virtual channel for the next transfer. */ 619 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 620 if (p->state == PL08X_CHAN_WAITING) { 621 next = p; 622 break; 623 } 624 625 if (!next) { 626 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 627 if (p->state == PL08X_CHAN_WAITING) { 628 next = p; 629 break; 630 } 631 } 632 633 /* Ensure that the physical channel is stopped */ 634 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 635 636 if (next) { 637 bool success; 638 639 /* 640 * Eww. We know this isn't going to deadlock 641 * but lockdep probably doesn't. 642 */ 643 spin_lock(&next->vc.lock); 644 /* Re-check the state now that we have the lock */ 645 success = next->state == PL08X_CHAN_WAITING; 646 if (success) 647 pl08x_phy_reassign_start(plchan->phychan, next); 648 spin_unlock(&next->vc.lock); 649 650 /* If the state changed, try to find another channel */ 651 if (!success) 652 goto retry; 653 } else { 654 /* No more jobs, so free up the physical channel */ 655 pl08x_put_phy_channel(pl08x, plchan->phychan); 656 } 657 658 plchan->phychan = NULL; 659 plchan->state = PL08X_CHAN_IDLE; 660 } 661 662 /* 663 * LLI handling 664 */ 665 666 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 667 { 668 switch (coded) { 669 case PL080_WIDTH_8BIT: 670 return 1; 671 case PL080_WIDTH_16BIT: 672 return 2; 673 case PL080_WIDTH_32BIT: 674 return 4; 675 default: 676 break; 677 } 678 BUG(); 679 return 0; 680 } 681 682 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 683 size_t tsize) 684 { 685 u32 retbits = cctl; 686 687 /* Remove all src, dst and transfer size bits */ 688 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 689 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 690 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 691 692 /* Then set the bits according to the parameters */ 693 switch (srcwidth) { 694 case 1: 695 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 696 break; 697 case 2: 698 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 699 break; 700 case 4: 701 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 702 break; 703 default: 704 BUG(); 705 break; 706 } 707 708 switch (dstwidth) { 709 case 1: 710 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 711 break; 712 case 2: 713 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 714 break; 715 case 4: 716 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 717 break; 718 default: 719 BUG(); 720 break; 721 } 722 723 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 724 return retbits; 725 } 726 727 struct pl08x_lli_build_data { 728 struct pl08x_txd *txd; 729 struct pl08x_bus_data srcbus; 730 struct pl08x_bus_data dstbus; 731 size_t remainder; 732 u32 lli_bus; 733 }; 734 735 /* 736 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 737 * victim in case src & dest are not similarly aligned. i.e. If after aligning 738 * masters address with width requirements of transfer (by sending few byte by 739 * byte data), slave is still not aligned, then its width will be reduced to 740 * BYTE. 741 * - prefers the destination bus if both available 742 * - prefers bus with fixed address (i.e. peripheral) 743 */ 744 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 745 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 746 { 747 if (!(cctl & PL080_CONTROL_DST_INCR)) { 748 *mbus = &bd->dstbus; 749 *sbus = &bd->srcbus; 750 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 751 *mbus = &bd->srcbus; 752 *sbus = &bd->dstbus; 753 } else { 754 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 755 *mbus = &bd->dstbus; 756 *sbus = &bd->srcbus; 757 } else { 758 *mbus = &bd->srcbus; 759 *sbus = &bd->dstbus; 760 } 761 } 762 } 763 764 /* 765 * Fills in one LLI for a certain transfer descriptor and advance the counter 766 */ 767 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, 768 int num_llis, int len, u32 cctl) 769 { 770 struct pl08x_lli *llis_va = bd->txd->llis_va; 771 dma_addr_t llis_bus = bd->txd->llis_bus; 772 773 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 774 775 llis_va[num_llis].cctl = cctl; 776 llis_va[num_llis].src = bd->srcbus.addr; 777 llis_va[num_llis].dst = bd->dstbus.addr; 778 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * 779 sizeof(struct pl08x_lli); 780 llis_va[num_llis].lli |= bd->lli_bus; 781 782 if (cctl & PL080_CONTROL_SRC_INCR) 783 bd->srcbus.addr += len; 784 if (cctl & PL080_CONTROL_DST_INCR) 785 bd->dstbus.addr += len; 786 787 BUG_ON(bd->remainder < len); 788 789 bd->remainder -= len; 790 } 791 792 static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, 793 u32 *cctl, u32 len, int num_llis, size_t *total_bytes) 794 { 795 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 796 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); 797 (*total_bytes) += len; 798 } 799 800 /* 801 * This fills in the table of LLIs for the transfer descriptor 802 * Note that we assume we never have to change the burst sizes 803 * Return 0 for error 804 */ 805 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 806 struct pl08x_txd *txd) 807 { 808 struct pl08x_bus_data *mbus, *sbus; 809 struct pl08x_lli_build_data bd; 810 int num_llis = 0; 811 u32 cctl, early_bytes = 0; 812 size_t max_bytes_per_lli, total_bytes; 813 struct pl08x_lli *llis_va; 814 struct pl08x_sg *dsg; 815 816 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 817 if (!txd->llis_va) { 818 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 819 return 0; 820 } 821 822 bd.txd = txd; 823 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 824 cctl = txd->cctl; 825 826 /* Find maximum width of the source bus */ 827 bd.srcbus.maxwidth = 828 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 829 PL080_CONTROL_SWIDTH_SHIFT); 830 831 /* Find maximum width of the destination bus */ 832 bd.dstbus.maxwidth = 833 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 834 PL080_CONTROL_DWIDTH_SHIFT); 835 836 list_for_each_entry(dsg, &txd->dsg_list, node) { 837 total_bytes = 0; 838 cctl = txd->cctl; 839 840 bd.srcbus.addr = dsg->src_addr; 841 bd.dstbus.addr = dsg->dst_addr; 842 bd.remainder = dsg->len; 843 bd.srcbus.buswidth = bd.srcbus.maxwidth; 844 bd.dstbus.buswidth = bd.dstbus.maxwidth; 845 846 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 847 848 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", 849 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 850 bd.srcbus.buswidth, 851 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 852 bd.dstbus.buswidth, 853 bd.remainder); 854 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 855 mbus == &bd.srcbus ? "src" : "dst", 856 sbus == &bd.srcbus ? "src" : "dst"); 857 858 /* 859 * Zero length is only allowed if all these requirements are 860 * met: 861 * - flow controller is peripheral. 862 * - src.addr is aligned to src.width 863 * - dst.addr is aligned to dst.width 864 * 865 * sg_len == 1 should be true, as there can be two cases here: 866 * 867 * - Memory addresses are contiguous and are not scattered. 868 * Here, Only one sg will be passed by user driver, with 869 * memory address and zero length. We pass this to controller 870 * and after the transfer it will receive the last burst 871 * request from peripheral and so transfer finishes. 872 * 873 * - Memory addresses are scattered and are not contiguous. 874 * Here, Obviously as DMA controller doesn't know when a lli's 875 * transfer gets over, it can't load next lli. So in this 876 * case, there has to be an assumption that only one lli is 877 * supported. Thus, we can't have scattered addresses. 878 */ 879 if (!bd.remainder) { 880 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 881 PL080_CONFIG_FLOW_CONTROL_SHIFT; 882 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 883 (fc <= PL080_FLOW_SRC2DST_SRC))) { 884 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 885 __func__); 886 return 0; 887 } 888 889 if ((bd.srcbus.addr % bd.srcbus.buswidth) || 890 (bd.dstbus.addr % bd.dstbus.buswidth)) { 891 dev_err(&pl08x->adev->dev, 892 "%s src & dst address must be aligned to src" 893 " & dst width if peripheral is flow controller", 894 __func__); 895 return 0; 896 } 897 898 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 899 bd.dstbus.buswidth, 0); 900 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); 901 break; 902 } 903 904 /* 905 * Send byte by byte for following cases 906 * - Less than a bus width available 907 * - until master bus is aligned 908 */ 909 if (bd.remainder < mbus->buswidth) 910 early_bytes = bd.remainder; 911 else if ((mbus->addr) % (mbus->buswidth)) { 912 early_bytes = mbus->buswidth - (mbus->addr) % 913 (mbus->buswidth); 914 if ((bd.remainder - early_bytes) < mbus->buswidth) 915 early_bytes = bd.remainder; 916 } 917 918 if (early_bytes) { 919 dev_vdbg(&pl08x->adev->dev, 920 "%s byte width LLIs (remain 0x%08x)\n", 921 __func__, bd.remainder); 922 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 923 &total_bytes); 924 } 925 926 if (bd.remainder) { 927 /* 928 * Master now aligned 929 * - if slave is not then we must set its width down 930 */ 931 if (sbus->addr % sbus->buswidth) { 932 dev_dbg(&pl08x->adev->dev, 933 "%s set down bus width to one byte\n", 934 __func__); 935 936 sbus->buswidth = 1; 937 } 938 939 /* 940 * Bytes transferred = tsize * src width, not 941 * MIN(buswidths) 942 */ 943 max_bytes_per_lli = bd.srcbus.buswidth * 944 PL080_CONTROL_TRANSFER_SIZE_MASK; 945 dev_vdbg(&pl08x->adev->dev, 946 "%s max bytes per lli = %zu\n", 947 __func__, max_bytes_per_lli); 948 949 /* 950 * Make largest possible LLIs until less than one bus 951 * width left 952 */ 953 while (bd.remainder > (mbus->buswidth - 1)) { 954 size_t lli_len, tsize, width; 955 956 /* 957 * If enough left try to send max possible, 958 * otherwise try to send the remainder 959 */ 960 lli_len = min(bd.remainder, max_bytes_per_lli); 961 962 /* 963 * Check against maximum bus alignment: 964 * Calculate actual transfer size in relation to 965 * bus width an get a maximum remainder of the 966 * highest bus width - 1 967 */ 968 width = max(mbus->buswidth, sbus->buswidth); 969 lli_len = (lli_len / width) * width; 970 tsize = lli_len / bd.srcbus.buswidth; 971 972 dev_vdbg(&pl08x->adev->dev, 973 "%s fill lli with single lli chunk of " 974 "size 0x%08zx (remainder 0x%08zx)\n", 975 __func__, lli_len, bd.remainder); 976 977 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 978 bd.dstbus.buswidth, tsize); 979 pl08x_fill_lli_for_desc(&bd, num_llis++, 980 lli_len, cctl); 981 total_bytes += lli_len; 982 } 983 984 /* 985 * Send any odd bytes 986 */ 987 if (bd.remainder) { 988 dev_vdbg(&pl08x->adev->dev, 989 "%s align with boundary, send odd bytes (remain %zu)\n", 990 __func__, bd.remainder); 991 prep_byte_width_lli(&bd, &cctl, bd.remainder, 992 num_llis++, &total_bytes); 993 } 994 } 995 996 if (total_bytes != dsg->len) { 997 dev_err(&pl08x->adev->dev, 998 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 999 __func__, total_bytes, dsg->len); 1000 return 0; 1001 } 1002 1003 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1004 dev_err(&pl08x->adev->dev, 1005 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1006 __func__, (u32) MAX_NUM_TSFR_LLIS); 1007 return 0; 1008 } 1009 } 1010 1011 llis_va = txd->llis_va; 1012 /* The final LLI terminates the LLI. */ 1013 llis_va[num_llis - 1].lli = 0; 1014 /* The final LLI element shall also fire an interrupt. */ 1015 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 1016 1017 #ifdef VERBOSE_DEBUG 1018 { 1019 int i; 1020 1021 dev_vdbg(&pl08x->adev->dev, 1022 "%-3s %-9s %-10s %-10s %-10s %s\n", 1023 "lli", "", "csrc", "cdst", "clli", "cctl"); 1024 for (i = 0; i < num_llis; i++) { 1025 dev_vdbg(&pl08x->adev->dev, 1026 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1027 i, &llis_va[i], llis_va[i].src, 1028 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl 1029 ); 1030 } 1031 } 1032 #endif 1033 1034 return num_llis; 1035 } 1036 1037 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1038 struct pl08x_txd *txd) 1039 { 1040 struct pl08x_sg *dsg, *_dsg; 1041 1042 if (txd->llis_va) 1043 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1044 1045 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1046 list_del(&dsg->node); 1047 kfree(dsg); 1048 } 1049 1050 kfree(txd); 1051 } 1052 1053 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1054 { 1055 struct device *dev = txd->vd.tx.chan->device->dev; 1056 struct pl08x_sg *dsg; 1057 1058 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1059 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1060 list_for_each_entry(dsg, &txd->dsg_list, node) 1061 dma_unmap_single(dev, dsg->src_addr, dsg->len, 1062 DMA_TO_DEVICE); 1063 else { 1064 list_for_each_entry(dsg, &txd->dsg_list, node) 1065 dma_unmap_page(dev, dsg->src_addr, dsg->len, 1066 DMA_TO_DEVICE); 1067 } 1068 } 1069 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1070 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1071 list_for_each_entry(dsg, &txd->dsg_list, node) 1072 dma_unmap_single(dev, dsg->dst_addr, dsg->len, 1073 DMA_FROM_DEVICE); 1074 else 1075 list_for_each_entry(dsg, &txd->dsg_list, node) 1076 dma_unmap_page(dev, dsg->dst_addr, dsg->len, 1077 DMA_FROM_DEVICE); 1078 } 1079 } 1080 1081 static void pl08x_desc_free(struct virt_dma_desc *vd) 1082 { 1083 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1084 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1085 1086 if (!plchan->slave) 1087 pl08x_unmap_buffers(txd); 1088 1089 if (!txd->done) 1090 pl08x_release_mux(plchan); 1091 1092 pl08x_free_txd(plchan->host, txd); 1093 } 1094 1095 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1096 struct pl08x_dma_chan *plchan) 1097 { 1098 LIST_HEAD(head); 1099 struct pl08x_txd *txd; 1100 1101 vchan_get_all_descriptors(&plchan->vc, &head); 1102 1103 while (!list_empty(&head)) { 1104 txd = list_first_entry(&head, struct pl08x_txd, vd.node); 1105 list_del(&txd->vd.node); 1106 pl08x_desc_free(&txd->vd); 1107 } 1108 } 1109 1110 /* 1111 * The DMA ENGINE API 1112 */ 1113 static int pl08x_alloc_chan_resources(struct dma_chan *chan) 1114 { 1115 return 0; 1116 } 1117 1118 static void pl08x_free_chan_resources(struct dma_chan *chan) 1119 { 1120 /* Ensure all queued descriptors are freed */ 1121 vchan_free_chan_resources(to_virt_chan(chan)); 1122 } 1123 1124 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1125 struct dma_chan *chan, unsigned long flags) 1126 { 1127 struct dma_async_tx_descriptor *retval = NULL; 1128 1129 return retval; 1130 } 1131 1132 /* 1133 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1134 * If slaves are relying on interrupts to signal completion this function 1135 * must not be called with interrupts disabled. 1136 */ 1137 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1138 dma_cookie_t cookie, struct dma_tx_state *txstate) 1139 { 1140 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1141 struct virt_dma_desc *vd; 1142 unsigned long flags; 1143 enum dma_status ret; 1144 size_t bytes = 0; 1145 1146 ret = dma_cookie_status(chan, cookie, txstate); 1147 if (ret == DMA_SUCCESS) 1148 return ret; 1149 1150 /* 1151 * There's no point calculating the residue if there's 1152 * no txstate to store the value. 1153 */ 1154 if (!txstate) { 1155 if (plchan->state == PL08X_CHAN_PAUSED) 1156 ret = DMA_PAUSED; 1157 return ret; 1158 } 1159 1160 spin_lock_irqsave(&plchan->vc.lock, flags); 1161 ret = dma_cookie_status(chan, cookie, txstate); 1162 if (ret != DMA_SUCCESS) { 1163 vd = vchan_find_desc(&plchan->vc, cookie); 1164 if (vd) { 1165 /* On the issued list, so hasn't been processed yet */ 1166 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1167 struct pl08x_sg *dsg; 1168 1169 list_for_each_entry(dsg, &txd->dsg_list, node) 1170 bytes += dsg->len; 1171 } else { 1172 bytes = pl08x_getbytes_chan(plchan); 1173 } 1174 } 1175 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1176 1177 /* 1178 * This cookie not complete yet 1179 * Get number of bytes left in the active transactions and queue 1180 */ 1181 dma_set_residue(txstate, bytes); 1182 1183 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1184 ret = DMA_PAUSED; 1185 1186 /* Whether waiting or running, we're in progress */ 1187 return ret; 1188 } 1189 1190 /* PrimeCell DMA extension */ 1191 struct burst_table { 1192 u32 burstwords; 1193 u32 reg; 1194 }; 1195 1196 static const struct burst_table burst_sizes[] = { 1197 { 1198 .burstwords = 256, 1199 .reg = PL080_BSIZE_256, 1200 }, 1201 { 1202 .burstwords = 128, 1203 .reg = PL080_BSIZE_128, 1204 }, 1205 { 1206 .burstwords = 64, 1207 .reg = PL080_BSIZE_64, 1208 }, 1209 { 1210 .burstwords = 32, 1211 .reg = PL080_BSIZE_32, 1212 }, 1213 { 1214 .burstwords = 16, 1215 .reg = PL080_BSIZE_16, 1216 }, 1217 { 1218 .burstwords = 8, 1219 .reg = PL080_BSIZE_8, 1220 }, 1221 { 1222 .burstwords = 4, 1223 .reg = PL080_BSIZE_4, 1224 }, 1225 { 1226 .burstwords = 0, 1227 .reg = PL080_BSIZE_1, 1228 }, 1229 }; 1230 1231 /* 1232 * Given the source and destination available bus masks, select which 1233 * will be routed to each port. We try to have source and destination 1234 * on separate ports, but always respect the allowable settings. 1235 */ 1236 static u32 pl08x_select_bus(u8 src, u8 dst) 1237 { 1238 u32 cctl = 0; 1239 1240 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1241 cctl |= PL080_CONTROL_DST_AHB2; 1242 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1243 cctl |= PL080_CONTROL_SRC_AHB2; 1244 1245 return cctl; 1246 } 1247 1248 static u32 pl08x_cctl(u32 cctl) 1249 { 1250 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1251 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1252 PL080_CONTROL_PROT_MASK); 1253 1254 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1255 return cctl | PL080_CONTROL_PROT_SYS; 1256 } 1257 1258 static u32 pl08x_width(enum dma_slave_buswidth width) 1259 { 1260 switch (width) { 1261 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1262 return PL080_WIDTH_8BIT; 1263 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1264 return PL080_WIDTH_16BIT; 1265 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1266 return PL080_WIDTH_32BIT; 1267 default: 1268 return ~0; 1269 } 1270 } 1271 1272 static u32 pl08x_burst(u32 maxburst) 1273 { 1274 int i; 1275 1276 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1277 if (burst_sizes[i].burstwords <= maxburst) 1278 break; 1279 1280 return burst_sizes[i].reg; 1281 } 1282 1283 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1284 enum dma_slave_buswidth addr_width, u32 maxburst) 1285 { 1286 u32 width, burst, cctl = 0; 1287 1288 width = pl08x_width(addr_width); 1289 if (width == ~0) 1290 return ~0; 1291 1292 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1293 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1294 1295 /* 1296 * If this channel will only request single transfers, set this 1297 * down to ONE element. Also select one element if no maxburst 1298 * is specified. 1299 */ 1300 if (plchan->cd->single) 1301 maxburst = 1; 1302 1303 burst = pl08x_burst(maxburst); 1304 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1305 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1306 1307 return pl08x_cctl(cctl); 1308 } 1309 1310 static int dma_set_runtime_config(struct dma_chan *chan, 1311 struct dma_slave_config *config) 1312 { 1313 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1314 1315 if (!plchan->slave) 1316 return -EINVAL; 1317 1318 /* Reject definitely invalid configurations */ 1319 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1320 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1321 return -EINVAL; 1322 1323 plchan->cfg = *config; 1324 1325 return 0; 1326 } 1327 1328 /* 1329 * Slave transactions callback to the slave device to allow 1330 * synchronization of slave DMA signals with the DMAC enable 1331 */ 1332 static void pl08x_issue_pending(struct dma_chan *chan) 1333 { 1334 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1335 unsigned long flags; 1336 1337 spin_lock_irqsave(&plchan->vc.lock, flags); 1338 if (vchan_issue_pending(&plchan->vc)) { 1339 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1340 pl08x_phy_alloc_and_start(plchan); 1341 } 1342 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1343 } 1344 1345 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1346 { 1347 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1348 1349 if (txd) { 1350 INIT_LIST_HEAD(&txd->dsg_list); 1351 1352 /* Always enable error and terminal interrupts */ 1353 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1354 PL080_CONFIG_TC_IRQ_MASK; 1355 } 1356 return txd; 1357 } 1358 1359 /* 1360 * Initialize a descriptor to be used by memcpy submit 1361 */ 1362 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1363 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1364 size_t len, unsigned long flags) 1365 { 1366 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1367 struct pl08x_driver_data *pl08x = plchan->host; 1368 struct pl08x_txd *txd; 1369 struct pl08x_sg *dsg; 1370 int ret; 1371 1372 txd = pl08x_get_txd(plchan); 1373 if (!txd) { 1374 dev_err(&pl08x->adev->dev, 1375 "%s no memory for descriptor\n", __func__); 1376 return NULL; 1377 } 1378 1379 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1380 if (!dsg) { 1381 pl08x_free_txd(pl08x, txd); 1382 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1383 __func__); 1384 return NULL; 1385 } 1386 list_add_tail(&dsg->node, &txd->dsg_list); 1387 1388 dsg->src_addr = src; 1389 dsg->dst_addr = dest; 1390 dsg->len = len; 1391 1392 /* Set platform data for m2m */ 1393 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1394 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1395 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1396 1397 /* Both to be incremented or the code will break */ 1398 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1399 1400 if (pl08x->vd->dualmaster) 1401 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1402 pl08x->mem_buses); 1403 1404 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1405 if (!ret) { 1406 pl08x_free_txd(pl08x, txd); 1407 return NULL; 1408 } 1409 1410 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1411 } 1412 1413 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1414 struct dma_chan *chan, struct scatterlist *sgl, 1415 unsigned int sg_len, enum dma_transfer_direction direction, 1416 unsigned long flags, void *context) 1417 { 1418 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1419 struct pl08x_driver_data *pl08x = plchan->host; 1420 struct pl08x_txd *txd; 1421 struct pl08x_sg *dsg; 1422 struct scatterlist *sg; 1423 enum dma_slave_buswidth addr_width; 1424 dma_addr_t slave_addr; 1425 int ret, tmp; 1426 u8 src_buses, dst_buses; 1427 u32 maxburst, cctl; 1428 1429 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1430 __func__, sg_dma_len(sgl), plchan->name); 1431 1432 txd = pl08x_get_txd(plchan); 1433 if (!txd) { 1434 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1435 return NULL; 1436 } 1437 1438 /* 1439 * Set up addresses, the PrimeCell configured address 1440 * will take precedence since this may configure the 1441 * channel target address dynamically at runtime. 1442 */ 1443 if (direction == DMA_MEM_TO_DEV) { 1444 cctl = PL080_CONTROL_SRC_INCR; 1445 slave_addr = plchan->cfg.dst_addr; 1446 addr_width = plchan->cfg.dst_addr_width; 1447 maxburst = plchan->cfg.dst_maxburst; 1448 src_buses = pl08x->mem_buses; 1449 dst_buses = plchan->cd->periph_buses; 1450 } else if (direction == DMA_DEV_TO_MEM) { 1451 cctl = PL080_CONTROL_DST_INCR; 1452 slave_addr = plchan->cfg.src_addr; 1453 addr_width = plchan->cfg.src_addr_width; 1454 maxburst = plchan->cfg.src_maxburst; 1455 src_buses = plchan->cd->periph_buses; 1456 dst_buses = pl08x->mem_buses; 1457 } else { 1458 pl08x_free_txd(pl08x, txd); 1459 dev_err(&pl08x->adev->dev, 1460 "%s direction unsupported\n", __func__); 1461 return NULL; 1462 } 1463 1464 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1465 if (cctl == ~0) { 1466 pl08x_free_txd(pl08x, txd); 1467 dev_err(&pl08x->adev->dev, 1468 "DMA slave configuration botched?\n"); 1469 return NULL; 1470 } 1471 1472 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1473 1474 if (plchan->cfg.device_fc) 1475 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1476 PL080_FLOW_PER2MEM_PER; 1477 else 1478 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1479 PL080_FLOW_PER2MEM; 1480 1481 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1482 1483 ret = pl08x_request_mux(plchan); 1484 if (ret < 0) { 1485 pl08x_free_txd(pl08x, txd); 1486 dev_dbg(&pl08x->adev->dev, 1487 "unable to mux for transfer on %s due to platform restrictions\n", 1488 plchan->name); 1489 return NULL; 1490 } 1491 1492 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1493 plchan->signal, plchan->name); 1494 1495 /* Assign the flow control signal to this channel */ 1496 if (direction == DMA_MEM_TO_DEV) 1497 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1498 else 1499 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1500 1501 for_each_sg(sgl, sg, sg_len, tmp) { 1502 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1503 if (!dsg) { 1504 pl08x_release_mux(plchan); 1505 pl08x_free_txd(pl08x, txd); 1506 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1507 __func__); 1508 return NULL; 1509 } 1510 list_add_tail(&dsg->node, &txd->dsg_list); 1511 1512 dsg->len = sg_dma_len(sg); 1513 if (direction == DMA_MEM_TO_DEV) { 1514 dsg->src_addr = sg_dma_address(sg); 1515 dsg->dst_addr = slave_addr; 1516 } else { 1517 dsg->src_addr = slave_addr; 1518 dsg->dst_addr = sg_dma_address(sg); 1519 } 1520 } 1521 1522 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1523 if (!ret) { 1524 pl08x_release_mux(plchan); 1525 pl08x_free_txd(pl08x, txd); 1526 return NULL; 1527 } 1528 1529 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1530 } 1531 1532 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1533 unsigned long arg) 1534 { 1535 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1536 struct pl08x_driver_data *pl08x = plchan->host; 1537 unsigned long flags; 1538 int ret = 0; 1539 1540 /* Controls applicable to inactive channels */ 1541 if (cmd == DMA_SLAVE_CONFIG) { 1542 return dma_set_runtime_config(chan, 1543 (struct dma_slave_config *)arg); 1544 } 1545 1546 /* 1547 * Anything succeeds on channels with no physical allocation and 1548 * no queued transfers. 1549 */ 1550 spin_lock_irqsave(&plchan->vc.lock, flags); 1551 if (!plchan->phychan && !plchan->at) { 1552 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1553 return 0; 1554 } 1555 1556 switch (cmd) { 1557 case DMA_TERMINATE_ALL: 1558 plchan->state = PL08X_CHAN_IDLE; 1559 1560 if (plchan->phychan) { 1561 /* 1562 * Mark physical channel as free and free any slave 1563 * signal 1564 */ 1565 pl08x_phy_free(plchan); 1566 } 1567 /* Dequeue jobs and free LLIs */ 1568 if (plchan->at) { 1569 pl08x_desc_free(&plchan->at->vd); 1570 plchan->at = NULL; 1571 } 1572 /* Dequeue jobs not yet fired as well */ 1573 pl08x_free_txd_list(pl08x, plchan); 1574 break; 1575 case DMA_PAUSE: 1576 pl08x_pause_phy_chan(plchan->phychan); 1577 plchan->state = PL08X_CHAN_PAUSED; 1578 break; 1579 case DMA_RESUME: 1580 pl08x_resume_phy_chan(plchan->phychan); 1581 plchan->state = PL08X_CHAN_RUNNING; 1582 break; 1583 default: 1584 /* Unknown command */ 1585 ret = -ENXIO; 1586 break; 1587 } 1588 1589 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1590 1591 return ret; 1592 } 1593 1594 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1595 { 1596 struct pl08x_dma_chan *plchan; 1597 char *name = chan_id; 1598 1599 /* Reject channels for devices not bound to this driver */ 1600 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1601 return false; 1602 1603 plchan = to_pl08x_chan(chan); 1604 1605 /* Check that the channel is not taken! */ 1606 if (!strcmp(plchan->name, name)) 1607 return true; 1608 1609 return false; 1610 } 1611 1612 /* 1613 * Just check that the device is there and active 1614 * TODO: turn this bit on/off depending on the number of physical channels 1615 * actually used, if it is zero... well shut it off. That will save some 1616 * power. Cut the clock at the same time. 1617 */ 1618 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1619 { 1620 /* The Nomadik variant does not have the config register */ 1621 if (pl08x->vd->nomadik) 1622 return; 1623 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1624 } 1625 1626 static irqreturn_t pl08x_irq(int irq, void *dev) 1627 { 1628 struct pl08x_driver_data *pl08x = dev; 1629 u32 mask = 0, err, tc, i; 1630 1631 /* check & clear - ERR & TC interrupts */ 1632 err = readl(pl08x->base + PL080_ERR_STATUS); 1633 if (err) { 1634 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1635 __func__, err); 1636 writel(err, pl08x->base + PL080_ERR_CLEAR); 1637 } 1638 tc = readl(pl08x->base + PL080_TC_STATUS); 1639 if (tc) 1640 writel(tc, pl08x->base + PL080_TC_CLEAR); 1641 1642 if (!err && !tc) 1643 return IRQ_NONE; 1644 1645 for (i = 0; i < pl08x->vd->channels; i++) { 1646 if (((1 << i) & err) || ((1 << i) & tc)) { 1647 /* Locate physical channel */ 1648 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1649 struct pl08x_dma_chan *plchan = phychan->serving; 1650 struct pl08x_txd *tx; 1651 1652 if (!plchan) { 1653 dev_err(&pl08x->adev->dev, 1654 "%s Error TC interrupt on unused channel: 0x%08x\n", 1655 __func__, i); 1656 continue; 1657 } 1658 1659 spin_lock(&plchan->vc.lock); 1660 tx = plchan->at; 1661 if (tx) { 1662 plchan->at = NULL; 1663 /* 1664 * This descriptor is done, release its mux 1665 * reservation. 1666 */ 1667 pl08x_release_mux(plchan); 1668 tx->done = true; 1669 vchan_cookie_complete(&tx->vd); 1670 1671 /* 1672 * And start the next descriptor (if any), 1673 * otherwise free this channel. 1674 */ 1675 if (vchan_next_desc(&plchan->vc)) 1676 pl08x_start_next_txd(plchan); 1677 else 1678 pl08x_phy_free(plchan); 1679 } 1680 spin_unlock(&plchan->vc.lock); 1681 1682 mask |= (1 << i); 1683 } 1684 } 1685 1686 return mask ? IRQ_HANDLED : IRQ_NONE; 1687 } 1688 1689 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1690 { 1691 chan->slave = true; 1692 chan->name = chan->cd->bus_id; 1693 chan->cfg.src_addr = chan->cd->addr; 1694 chan->cfg.dst_addr = chan->cd->addr; 1695 } 1696 1697 /* 1698 * Initialise the DMAC memcpy/slave channels. 1699 * Make a local wrapper to hold required data 1700 */ 1701 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1702 struct dma_device *dmadev, unsigned int channels, bool slave) 1703 { 1704 struct pl08x_dma_chan *chan; 1705 int i; 1706 1707 INIT_LIST_HEAD(&dmadev->channels); 1708 1709 /* 1710 * Register as many many memcpy as we have physical channels, 1711 * we won't always be able to use all but the code will have 1712 * to cope with that situation. 1713 */ 1714 for (i = 0; i < channels; i++) { 1715 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1716 if (!chan) { 1717 dev_err(&pl08x->adev->dev, 1718 "%s no memory for channel\n", __func__); 1719 return -ENOMEM; 1720 } 1721 1722 chan->host = pl08x; 1723 chan->state = PL08X_CHAN_IDLE; 1724 chan->signal = -1; 1725 1726 if (slave) { 1727 chan->cd = &pl08x->pd->slave_channels[i]; 1728 pl08x_dma_slave_init(chan); 1729 } else { 1730 chan->cd = &pl08x->pd->memcpy_channel; 1731 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1732 if (!chan->name) { 1733 kfree(chan); 1734 return -ENOMEM; 1735 } 1736 } 1737 dev_dbg(&pl08x->adev->dev, 1738 "initialize virtual channel \"%s\"\n", 1739 chan->name); 1740 1741 chan->vc.desc_free = pl08x_desc_free; 1742 vchan_init(&chan->vc, dmadev); 1743 } 1744 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1745 i, slave ? "slave" : "memcpy"); 1746 return i; 1747 } 1748 1749 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1750 { 1751 struct pl08x_dma_chan *chan = NULL; 1752 struct pl08x_dma_chan *next; 1753 1754 list_for_each_entry_safe(chan, 1755 next, &dmadev->channels, vc.chan.device_node) { 1756 list_del(&chan->vc.chan.device_node); 1757 kfree(chan); 1758 } 1759 } 1760 1761 #ifdef CONFIG_DEBUG_FS 1762 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1763 { 1764 switch (state) { 1765 case PL08X_CHAN_IDLE: 1766 return "idle"; 1767 case PL08X_CHAN_RUNNING: 1768 return "running"; 1769 case PL08X_CHAN_PAUSED: 1770 return "paused"; 1771 case PL08X_CHAN_WAITING: 1772 return "waiting"; 1773 default: 1774 break; 1775 } 1776 return "UNKNOWN STATE"; 1777 } 1778 1779 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1780 { 1781 struct pl08x_driver_data *pl08x = s->private; 1782 struct pl08x_dma_chan *chan; 1783 struct pl08x_phy_chan *ch; 1784 unsigned long flags; 1785 int i; 1786 1787 seq_printf(s, "PL08x physical channels:\n"); 1788 seq_printf(s, "CHANNEL:\tUSER:\n"); 1789 seq_printf(s, "--------\t-----\n"); 1790 for (i = 0; i < pl08x->vd->channels; i++) { 1791 struct pl08x_dma_chan *virt_chan; 1792 1793 ch = &pl08x->phy_chans[i]; 1794 1795 spin_lock_irqsave(&ch->lock, flags); 1796 virt_chan = ch->serving; 1797 1798 seq_printf(s, "%d\t\t%s%s\n", 1799 ch->id, 1800 virt_chan ? virt_chan->name : "(none)", 1801 ch->locked ? " LOCKED" : ""); 1802 1803 spin_unlock_irqrestore(&ch->lock, flags); 1804 } 1805 1806 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1807 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1808 seq_printf(s, "--------\t------\n"); 1809 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 1810 seq_printf(s, "%s\t\t%s\n", chan->name, 1811 pl08x_state_str(chan->state)); 1812 } 1813 1814 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1815 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1816 seq_printf(s, "--------\t------\n"); 1817 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 1818 seq_printf(s, "%s\t\t%s\n", chan->name, 1819 pl08x_state_str(chan->state)); 1820 } 1821 1822 return 0; 1823 } 1824 1825 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1826 { 1827 return single_open(file, pl08x_debugfs_show, inode->i_private); 1828 } 1829 1830 static const struct file_operations pl08x_debugfs_operations = { 1831 .open = pl08x_debugfs_open, 1832 .read = seq_read, 1833 .llseek = seq_lseek, 1834 .release = single_release, 1835 }; 1836 1837 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1838 { 1839 /* Expose a simple debugfs interface to view all clocks */ 1840 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 1841 S_IFREG | S_IRUGO, NULL, pl08x, 1842 &pl08x_debugfs_operations); 1843 } 1844 1845 #else 1846 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1847 { 1848 } 1849 #endif 1850 1851 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 1852 { 1853 struct pl08x_driver_data *pl08x; 1854 const struct vendor_data *vd = id->data; 1855 int ret = 0; 1856 int i; 1857 1858 ret = amba_request_regions(adev, NULL); 1859 if (ret) 1860 return ret; 1861 1862 /* Create the driver state holder */ 1863 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 1864 if (!pl08x) { 1865 ret = -ENOMEM; 1866 goto out_no_pl08x; 1867 } 1868 1869 /* Initialize memcpy engine */ 1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1871 pl08x->memcpy.dev = &adev->dev; 1872 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1873 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1874 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1875 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1876 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1877 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1878 pl08x->memcpy.device_control = pl08x_control; 1879 1880 /* Initialize slave engine */ 1881 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1882 pl08x->slave.dev = &adev->dev; 1883 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1884 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1885 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1886 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1887 pl08x->slave.device_issue_pending = pl08x_issue_pending; 1888 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1889 pl08x->slave.device_control = pl08x_control; 1890 1891 /* Get the platform data */ 1892 pl08x->pd = dev_get_platdata(&adev->dev); 1893 if (!pl08x->pd) { 1894 dev_err(&adev->dev, "no platform data supplied\n"); 1895 goto out_no_platdata; 1896 } 1897 1898 /* Assign useful pointers to the driver state */ 1899 pl08x->adev = adev; 1900 pl08x->vd = vd; 1901 1902 /* By default, AHB1 only. If dualmaster, from platform */ 1903 pl08x->lli_buses = PL08X_AHB1; 1904 pl08x->mem_buses = PL08X_AHB1; 1905 if (pl08x->vd->dualmaster) { 1906 pl08x->lli_buses = pl08x->pd->lli_buses; 1907 pl08x->mem_buses = pl08x->pd->mem_buses; 1908 } 1909 1910 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1911 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1912 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1913 if (!pl08x->pool) { 1914 ret = -ENOMEM; 1915 goto out_no_lli_pool; 1916 } 1917 1918 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1919 if (!pl08x->base) { 1920 ret = -ENOMEM; 1921 goto out_no_ioremap; 1922 } 1923 1924 /* Turn on the PL08x */ 1925 pl08x_ensure_on(pl08x); 1926 1927 /* Attach the interrupt handler */ 1928 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1929 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1930 1931 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1932 DRIVER_NAME, pl08x); 1933 if (ret) { 1934 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1935 __func__, adev->irq[0]); 1936 goto out_no_irq; 1937 } 1938 1939 /* Initialize physical channels */ 1940 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1941 GFP_KERNEL); 1942 if (!pl08x->phy_chans) { 1943 dev_err(&adev->dev, "%s failed to allocate " 1944 "physical channel holders\n", 1945 __func__); 1946 goto out_no_phychans; 1947 } 1948 1949 for (i = 0; i < vd->channels; i++) { 1950 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 1951 1952 ch->id = i; 1953 ch->base = pl08x->base + PL080_Cx_BASE(i); 1954 spin_lock_init(&ch->lock); 1955 1956 /* 1957 * Nomadik variants can have channels that are locked 1958 * down for the secure world only. Lock up these channels 1959 * by perpetually serving a dummy virtual channel. 1960 */ 1961 if (vd->nomadik) { 1962 u32 val; 1963 1964 val = readl(ch->base + PL080_CH_CONFIG); 1965 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 1966 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 1967 ch->locked = true; 1968 } 1969 } 1970 1971 dev_dbg(&adev->dev, "physical channel %d is %s\n", 1972 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1973 } 1974 1975 /* Register as many memcpy channels as there are physical channels */ 1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 1977 pl08x->vd->channels, false); 1978 if (ret <= 0) { 1979 dev_warn(&pl08x->adev->dev, 1980 "%s failed to enumerate memcpy channels - %d\n", 1981 __func__, ret); 1982 goto out_no_memcpy; 1983 } 1984 pl08x->memcpy.chancnt = ret; 1985 1986 /* Register slave channels */ 1987 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1988 pl08x->pd->num_slave_channels, true); 1989 if (ret <= 0) { 1990 dev_warn(&pl08x->adev->dev, 1991 "%s failed to enumerate slave channels - %d\n", 1992 __func__, ret); 1993 goto out_no_slave; 1994 } 1995 pl08x->slave.chancnt = ret; 1996 1997 ret = dma_async_device_register(&pl08x->memcpy); 1998 if (ret) { 1999 dev_warn(&pl08x->adev->dev, 2000 "%s failed to register memcpy as an async device - %d\n", 2001 __func__, ret); 2002 goto out_no_memcpy_reg; 2003 } 2004 2005 ret = dma_async_device_register(&pl08x->slave); 2006 if (ret) { 2007 dev_warn(&pl08x->adev->dev, 2008 "%s failed to register slave as an async device - %d\n", 2009 __func__, ret); 2010 goto out_no_slave_reg; 2011 } 2012 2013 amba_set_drvdata(adev, pl08x); 2014 init_pl08x_debugfs(pl08x); 2015 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 2016 amba_part(adev), amba_rev(adev), 2017 (unsigned long long)adev->res.start, adev->irq[0]); 2018 2019 return 0; 2020 2021 out_no_slave_reg: 2022 dma_async_device_unregister(&pl08x->memcpy); 2023 out_no_memcpy_reg: 2024 pl08x_free_virtual_channels(&pl08x->slave); 2025 out_no_slave: 2026 pl08x_free_virtual_channels(&pl08x->memcpy); 2027 out_no_memcpy: 2028 kfree(pl08x->phy_chans); 2029 out_no_phychans: 2030 free_irq(adev->irq[0], pl08x); 2031 out_no_irq: 2032 iounmap(pl08x->base); 2033 out_no_ioremap: 2034 dma_pool_destroy(pl08x->pool); 2035 out_no_lli_pool: 2036 out_no_platdata: 2037 kfree(pl08x); 2038 out_no_pl08x: 2039 amba_release_regions(adev); 2040 return ret; 2041 } 2042 2043 /* PL080 has 8 channels and the PL080 have just 2 */ 2044 static struct vendor_data vendor_pl080 = { 2045 .channels = 8, 2046 .dualmaster = true, 2047 }; 2048 2049 static struct vendor_data vendor_nomadik = { 2050 .channels = 8, 2051 .dualmaster = true, 2052 .nomadik = true, 2053 }; 2054 2055 static struct vendor_data vendor_pl081 = { 2056 .channels = 2, 2057 .dualmaster = false, 2058 }; 2059 2060 static struct amba_id pl08x_ids[] = { 2061 /* PL080 */ 2062 { 2063 .id = 0x00041080, 2064 .mask = 0x000fffff, 2065 .data = &vendor_pl080, 2066 }, 2067 /* PL081 */ 2068 { 2069 .id = 0x00041081, 2070 .mask = 0x000fffff, 2071 .data = &vendor_pl081, 2072 }, 2073 /* Nomadik 8815 PL080 variant */ 2074 { 2075 .id = 0x00280080, 2076 .mask = 0x00ffffff, 2077 .data = &vendor_nomadik, 2078 }, 2079 { 0, 0 }, 2080 }; 2081 2082 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2083 2084 static struct amba_driver pl08x_amba_driver = { 2085 .drv.name = DRIVER_NAME, 2086 .id_table = pl08x_ids, 2087 .probe = pl08x_probe, 2088 }; 2089 2090 static int __init pl08x_init(void) 2091 { 2092 int retval; 2093 retval = amba_driver_register(&pl08x_amba_driver); 2094 if (retval) 2095 printk(KERN_WARNING DRIVER_NAME 2096 "failed to register as an AMBA device (%d)\n", 2097 retval); 2098 return retval; 2099 } 2100 subsys_initcall(pl08x_init); 2101