1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 * 22 * The full GNU General Public License is in this distribution in the file 23 * called COPYING. 24 * 25 * Documentation: ARM DDI 0196G == PL080 26 * Documentation: ARM DDI 0218E == PL081 27 * 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * channel. 30 * 31 * The PL080 has 8 channels available for simultaneous use, and the PL081 32 * has only two channels. So on these DMA controllers the number of channels 33 * and the number of incoming DMA signals are two totally different things. 34 * It is usually not possible to theoretically handle all physical signals, 35 * so a multiplexing scheme with possible denial of use is necessary. 36 * 37 * The PL080 has a dual bus master, PL081 has a single master. 38 * 39 * Memory to peripheral transfer may be visualized as 40 * Get data from memory to DMAC 41 * Until no data left 42 * On burst request from peripheral 43 * Destination burst from DMAC to peripheral 44 * Clear burst request 45 * Raise terminal count interrupt 46 * 47 * For peripherals with a FIFO: 48 * Source burst size == half the depth of the peripheral FIFO 49 * Destination burst size == the depth of the peripheral FIFO 50 * 51 * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 * signals, the DMA controller will simply facilitate its AHB master.) 53 * 54 * ASSUMES default (little) endianness for DMA transfers 55 * 56 * The PL08x has two flow control settings: 57 * - DMAC flow control: the transfer size defines the number of transfers 58 * which occur for the current LLI entry, and the DMAC raises TC at the 59 * end of every LLI entry. Observed behaviour shows the DMAC listening 60 * to both the BREQ and SREQ signals (contrary to documented), 61 * transferring data if either is active. The LBREQ and LSREQ signals 62 * are ignored. 63 * 64 * - Peripheral flow control: the transfer size is ignored (and should be 65 * zero). The data is transferred from the current LLI entry, until 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 * will then move to the next LLI entry. 68 * 69 * Global TODO: 70 * - Break out common code from arch/arm/mach-s3c64xx and share 71 */ 72 #include <linux/amba/bus.h> 73 #include <linux/amba/pl08x.h> 74 #include <linux/debugfs.h> 75 #include <linux/delay.h> 76 #include <linux/device.h> 77 #include <linux/dmaengine.h> 78 #include <linux/dmapool.h> 79 #include <linux/dma-mapping.h> 80 #include <linux/init.h> 81 #include <linux/interrupt.h> 82 #include <linux/module.h> 83 #include <linux/pm_runtime.h> 84 #include <linux/seq_file.h> 85 #include <linux/slab.h> 86 #include <linux/amba/pl080.h> 87 88 #include "dmaengine.h" 89 #include "virt-dma.h" 90 91 #define DRIVER_NAME "pl08xdmac" 92 93 static struct amba_driver pl08x_amba_driver; 94 struct pl08x_driver_data; 95 96 /** 97 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 98 * @channels: the number of channels available in this variant 99 * @dualmaster: whether this version supports dual AHB masters or not. 100 * @nomadik: whether the channels have Nomadik security extension bits 101 * that need to be checked for permission before use and some registers are 102 * missing 103 */ 104 struct vendor_data { 105 u8 channels; 106 bool dualmaster; 107 bool nomadik; 108 }; 109 110 /* 111 * PL08X private data structures 112 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, 113 * start & end do not - their bus bit info is in cctl. Also note that these 114 * are fixed 32-bit quantities. 115 */ 116 struct pl08x_lli { 117 u32 src; 118 u32 dst; 119 u32 lli; 120 u32 cctl; 121 }; 122 123 /** 124 * struct pl08x_bus_data - information of source or destination 125 * busses for a transfer 126 * @addr: current address 127 * @maxwidth: the maximum width of a transfer on this bus 128 * @buswidth: the width of this bus in bytes: 1, 2 or 4 129 */ 130 struct pl08x_bus_data { 131 dma_addr_t addr; 132 u8 maxwidth; 133 u8 buswidth; 134 }; 135 136 /** 137 * struct pl08x_phy_chan - holder for the physical channels 138 * @id: physical index to this channel 139 * @lock: a lock to use when altering an instance of this struct 140 * @serving: the virtual channel currently being served by this physical 141 * channel 142 * @locked: channel unavailable for the system, e.g. dedicated to secure 143 * world 144 */ 145 struct pl08x_phy_chan { 146 unsigned int id; 147 void __iomem *base; 148 spinlock_t lock; 149 struct pl08x_dma_chan *serving; 150 bool locked; 151 }; 152 153 /** 154 * struct pl08x_sg - structure containing data per sg 155 * @src_addr: src address of sg 156 * @dst_addr: dst address of sg 157 * @len: transfer len in bytes 158 * @node: node for txd's dsg_list 159 */ 160 struct pl08x_sg { 161 dma_addr_t src_addr; 162 dma_addr_t dst_addr; 163 size_t len; 164 struct list_head node; 165 }; 166 167 /** 168 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 169 * @vd: virtual DMA descriptor 170 * @dsg_list: list of children sg's 171 * @llis_bus: DMA memory address (physical) start for the LLIs 172 * @llis_va: virtual memory address start for the LLIs 173 * @cctl: control reg values for current txd 174 * @ccfg: config reg values for current txd 175 * @done: this marks completed descriptors, which should not have their 176 * mux released. 177 */ 178 struct pl08x_txd { 179 struct virt_dma_desc vd; 180 struct list_head dsg_list; 181 dma_addr_t llis_bus; 182 struct pl08x_lli *llis_va; 183 /* Default cctl value for LLIs */ 184 u32 cctl; 185 /* 186 * Settings to be put into the physical channel when we 187 * trigger this txd. Other registers are in llis_va[0]. 188 */ 189 u32 ccfg; 190 bool done; 191 }; 192 193 /** 194 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel 195 * states 196 * @PL08X_CHAN_IDLE: the channel is idle 197 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 198 * channel and is running a transfer on it 199 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 200 * channel, but the transfer is currently paused 201 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 202 * channel to become available (only pertains to memcpy channels) 203 */ 204 enum pl08x_dma_chan_state { 205 PL08X_CHAN_IDLE, 206 PL08X_CHAN_RUNNING, 207 PL08X_CHAN_PAUSED, 208 PL08X_CHAN_WAITING, 209 }; 210 211 /** 212 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 213 * @vc: wrappped virtual channel 214 * @phychan: the physical channel utilized by this channel, if there is one 215 * @name: name of channel 216 * @cd: channel platform data 217 * @runtime_addr: address for RX/TX according to the runtime config 218 * @at: active transaction on this channel 219 * @lock: a lock for this channel data 220 * @host: a pointer to the host (internal use) 221 * @state: whether the channel is idle, paused, running etc 222 * @slave: whether this channel is a device (slave) or for memcpy 223 * @signal: the physical DMA request signal which this channel is using 224 * @mux_use: count of descriptors using this DMA request signal setting 225 */ 226 struct pl08x_dma_chan { 227 struct virt_dma_chan vc; 228 struct pl08x_phy_chan *phychan; 229 const char *name; 230 const struct pl08x_channel_data *cd; 231 struct dma_slave_config cfg; 232 struct pl08x_txd *at; 233 struct pl08x_driver_data *host; 234 enum pl08x_dma_chan_state state; 235 bool slave; 236 int signal; 237 unsigned mux_use; 238 }; 239 240 /** 241 * struct pl08x_driver_data - the local state holder for the PL08x 242 * @slave: slave engine for this instance 243 * @memcpy: memcpy engine for this instance 244 * @base: virtual memory base (remapped) for the PL08x 245 * @adev: the corresponding AMBA (PrimeCell) bus entry 246 * @vd: vendor data for this PL08x variant 247 * @pd: platform data passed in from the platform/machine 248 * @phy_chans: array of data for the physical channels 249 * @pool: a pool for the LLI descriptors 250 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 251 * fetches 252 * @mem_buses: set to indicate memory transfers on AHB2. 253 * @lock: a spinlock for this struct 254 */ 255 struct pl08x_driver_data { 256 struct dma_device slave; 257 struct dma_device memcpy; 258 void __iomem *base; 259 struct amba_device *adev; 260 const struct vendor_data *vd; 261 struct pl08x_platform_data *pd; 262 struct pl08x_phy_chan *phy_chans; 263 struct dma_pool *pool; 264 u8 lli_buses; 265 u8 mem_buses; 266 }; 267 268 /* 269 * PL08X specific defines 270 */ 271 272 /* Size (bytes) of each LLI buffer allocated for one transfer */ 273 # define PL08X_LLI_TSFR_SIZE 0x2000 274 275 /* Maximum times we call dma_pool_alloc on this pool without freeing */ 276 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 277 #define PL08X_ALIGN 8 278 279 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 280 { 281 return container_of(chan, struct pl08x_dma_chan, vc.chan); 282 } 283 284 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 285 { 286 return container_of(tx, struct pl08x_txd, vd.tx); 287 } 288 289 /* 290 * Mux handling. 291 * 292 * This gives us the DMA request input to the PL08x primecell which the 293 * peripheral described by the channel data will be routed to, possibly 294 * via a board/SoC specific external MUX. One important point to note 295 * here is that this does not depend on the physical channel. 296 */ 297 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 298 { 299 const struct pl08x_platform_data *pd = plchan->host->pd; 300 int ret; 301 302 if (plchan->mux_use++ == 0 && pd->get_signal) { 303 ret = pd->get_signal(plchan->cd); 304 if (ret < 0) { 305 plchan->mux_use = 0; 306 return ret; 307 } 308 309 plchan->signal = ret; 310 } 311 return 0; 312 } 313 314 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 315 { 316 const struct pl08x_platform_data *pd = plchan->host->pd; 317 318 if (plchan->signal >= 0) { 319 WARN_ON(plchan->mux_use == 0); 320 321 if (--plchan->mux_use == 0 && pd->put_signal) { 322 pd->put_signal(plchan->cd, plchan->signal); 323 plchan->signal = -1; 324 } 325 } 326 } 327 328 /* 329 * Physical channel handling 330 */ 331 332 /* Whether a certain channel is busy or not */ 333 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 334 { 335 unsigned int val; 336 337 val = readl(ch->base + PL080_CH_CONFIG); 338 return val & PL080_CONFIG_ACTIVE; 339 } 340 341 /* 342 * Set the initial DMA register values i.e. those for the first LLI 343 * The next LLI pointer and the configuration interrupt bit have 344 * been set when the LLIs were constructed. Poke them into the hardware 345 * and start the transfer. 346 */ 347 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 348 { 349 struct pl08x_driver_data *pl08x = plchan->host; 350 struct pl08x_phy_chan *phychan = plchan->phychan; 351 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 352 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 353 struct pl08x_lli *lli; 354 u32 val; 355 356 list_del(&txd->vd.node); 357 358 plchan->at = txd; 359 360 /* Wait for channel inactive */ 361 while (pl08x_phy_channel_busy(phychan)) 362 cpu_relax(); 363 364 lli = &txd->llis_va[0]; 365 366 dev_vdbg(&pl08x->adev->dev, 367 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 368 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 369 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, 370 txd->ccfg); 371 372 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); 373 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); 374 writel(lli->lli, phychan->base + PL080_CH_LLI); 375 writel(lli->cctl, phychan->base + PL080_CH_CONTROL); 376 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); 377 378 /* Enable the DMA channel */ 379 /* Do not access config register until channel shows as disabled */ 380 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 381 cpu_relax(); 382 383 /* Do not access config register until channel shows as inactive */ 384 val = readl(phychan->base + PL080_CH_CONFIG); 385 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 386 val = readl(phychan->base + PL080_CH_CONFIG); 387 388 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); 389 } 390 391 /* 392 * Pause the channel by setting the HALT bit. 393 * 394 * For M->P transfers, pause the DMAC first and then stop the peripheral - 395 * the FIFO can only drain if the peripheral is still requesting data. 396 * (note: this can still timeout if the DMAC FIFO never drains of data.) 397 * 398 * For P->M transfers, disable the peripheral first to stop it filling 399 * the DMAC FIFO, and then pause the DMAC. 400 */ 401 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 402 { 403 u32 val; 404 int timeout; 405 406 /* Set the HALT bit and wait for the FIFO to drain */ 407 val = readl(ch->base + PL080_CH_CONFIG); 408 val |= PL080_CONFIG_HALT; 409 writel(val, ch->base + PL080_CH_CONFIG); 410 411 /* Wait for channel inactive */ 412 for (timeout = 1000; timeout; timeout--) { 413 if (!pl08x_phy_channel_busy(ch)) 414 break; 415 udelay(1); 416 } 417 if (pl08x_phy_channel_busy(ch)) 418 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 419 } 420 421 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 422 { 423 u32 val; 424 425 /* Clear the HALT bit */ 426 val = readl(ch->base + PL080_CH_CONFIG); 427 val &= ~PL080_CONFIG_HALT; 428 writel(val, ch->base + PL080_CH_CONFIG); 429 } 430 431 /* 432 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 433 * clears any pending interrupt status. This should not be used for 434 * an on-going transfer, but as a method of shutting down a channel 435 * (eg, when it's no longer used) or terminating a transfer. 436 */ 437 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 438 struct pl08x_phy_chan *ch) 439 { 440 u32 val = readl(ch->base + PL080_CH_CONFIG); 441 442 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 443 PL080_CONFIG_TC_IRQ_MASK); 444 445 writel(val, ch->base + PL080_CH_CONFIG); 446 447 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 448 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 449 } 450 451 static inline u32 get_bytes_in_cctl(u32 cctl) 452 { 453 /* The source width defines the number of bytes */ 454 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 455 456 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 457 case PL080_WIDTH_8BIT: 458 break; 459 case PL080_WIDTH_16BIT: 460 bytes *= 2; 461 break; 462 case PL080_WIDTH_32BIT: 463 bytes *= 4; 464 break; 465 } 466 return bytes; 467 } 468 469 /* The channel should be paused when calling this */ 470 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 471 { 472 struct pl08x_phy_chan *ch; 473 struct pl08x_txd *txd; 474 size_t bytes = 0; 475 476 ch = plchan->phychan; 477 txd = plchan->at; 478 479 /* 480 * Follow the LLIs to get the number of remaining 481 * bytes in the currently active transaction. 482 */ 483 if (ch && txd) { 484 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 485 486 /* First get the remaining bytes in the active transfer */ 487 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 488 489 if (clli) { 490 struct pl08x_lli *llis_va = txd->llis_va; 491 dma_addr_t llis_bus = txd->llis_bus; 492 int index; 493 494 BUG_ON(clli < llis_bus || clli >= llis_bus + 495 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); 496 497 /* 498 * Locate the next LLI - as this is an array, 499 * it's simple maths to find. 500 */ 501 index = (clli - llis_bus) / sizeof(struct pl08x_lli); 502 503 for (; index < MAX_NUM_TSFR_LLIS; index++) { 504 bytes += get_bytes_in_cctl(llis_va[index].cctl); 505 506 /* 507 * A LLI pointer of 0 terminates the LLI list 508 */ 509 if (!llis_va[index].lli) 510 break; 511 } 512 } 513 } 514 515 return bytes; 516 } 517 518 /* 519 * Allocate a physical channel for a virtual channel 520 * 521 * Try to locate a physical channel to be used for this transfer. If all 522 * are taken return NULL and the requester will have to cope by using 523 * some fallback PIO mode or retrying later. 524 */ 525 static struct pl08x_phy_chan * 526 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 527 struct pl08x_dma_chan *virt_chan) 528 { 529 struct pl08x_phy_chan *ch = NULL; 530 unsigned long flags; 531 int i; 532 533 for (i = 0; i < pl08x->vd->channels; i++) { 534 ch = &pl08x->phy_chans[i]; 535 536 spin_lock_irqsave(&ch->lock, flags); 537 538 if (!ch->locked && !ch->serving) { 539 ch->serving = virt_chan; 540 spin_unlock_irqrestore(&ch->lock, flags); 541 break; 542 } 543 544 spin_unlock_irqrestore(&ch->lock, flags); 545 } 546 547 if (i == pl08x->vd->channels) { 548 /* No physical channel available, cope with it */ 549 return NULL; 550 } 551 552 return ch; 553 } 554 555 /* Mark the physical channel as free. Note, this write is atomic. */ 556 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 557 struct pl08x_phy_chan *ch) 558 { 559 ch->serving = NULL; 560 } 561 562 /* 563 * Try to allocate a physical channel. When successful, assign it to 564 * this virtual channel, and initiate the next descriptor. The 565 * virtual channel lock must be held at this point. 566 */ 567 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 568 { 569 struct pl08x_driver_data *pl08x = plchan->host; 570 struct pl08x_phy_chan *ch; 571 572 ch = pl08x_get_phy_channel(pl08x, plchan); 573 if (!ch) { 574 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 575 plchan->state = PL08X_CHAN_WAITING; 576 return; 577 } 578 579 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 580 ch->id, plchan->name); 581 582 plchan->phychan = ch; 583 plchan->state = PL08X_CHAN_RUNNING; 584 pl08x_start_next_txd(plchan); 585 } 586 587 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 588 struct pl08x_dma_chan *plchan) 589 { 590 struct pl08x_driver_data *pl08x = plchan->host; 591 592 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 593 ch->id, plchan->name); 594 595 /* 596 * We do this without taking the lock; we're really only concerned 597 * about whether this pointer is NULL or not, and we're guaranteed 598 * that this will only be called when it _already_ is non-NULL. 599 */ 600 ch->serving = plchan; 601 plchan->phychan = ch; 602 plchan->state = PL08X_CHAN_RUNNING; 603 pl08x_start_next_txd(plchan); 604 } 605 606 /* 607 * Free a physical DMA channel, potentially reallocating it to another 608 * virtual channel if we have any pending. 609 */ 610 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 611 { 612 struct pl08x_driver_data *pl08x = plchan->host; 613 struct pl08x_dma_chan *p, *next; 614 615 retry: 616 next = NULL; 617 618 /* Find a waiting virtual channel for the next transfer. */ 619 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 620 if (p->state == PL08X_CHAN_WAITING) { 621 next = p; 622 break; 623 } 624 625 if (!next) { 626 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 627 if (p->state == PL08X_CHAN_WAITING) { 628 next = p; 629 break; 630 } 631 } 632 633 /* Ensure that the physical channel is stopped */ 634 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 635 636 if (next) { 637 bool success; 638 639 /* 640 * Eww. We know this isn't going to deadlock 641 * but lockdep probably doesn't. 642 */ 643 spin_lock(&next->vc.lock); 644 /* Re-check the state now that we have the lock */ 645 success = next->state == PL08X_CHAN_WAITING; 646 if (success) 647 pl08x_phy_reassign_start(plchan->phychan, next); 648 spin_unlock(&next->vc.lock); 649 650 /* If the state changed, try to find another channel */ 651 if (!success) 652 goto retry; 653 } else { 654 /* No more jobs, so free up the physical channel */ 655 pl08x_put_phy_channel(pl08x, plchan->phychan); 656 } 657 658 plchan->phychan = NULL; 659 plchan->state = PL08X_CHAN_IDLE; 660 } 661 662 /* 663 * LLI handling 664 */ 665 666 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 667 { 668 switch (coded) { 669 case PL080_WIDTH_8BIT: 670 return 1; 671 case PL080_WIDTH_16BIT: 672 return 2; 673 case PL080_WIDTH_32BIT: 674 return 4; 675 default: 676 break; 677 } 678 BUG(); 679 return 0; 680 } 681 682 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 683 size_t tsize) 684 { 685 u32 retbits = cctl; 686 687 /* Remove all src, dst and transfer size bits */ 688 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 689 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 690 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 691 692 /* Then set the bits according to the parameters */ 693 switch (srcwidth) { 694 case 1: 695 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 696 break; 697 case 2: 698 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 699 break; 700 case 4: 701 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 702 break; 703 default: 704 BUG(); 705 break; 706 } 707 708 switch (dstwidth) { 709 case 1: 710 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 711 break; 712 case 2: 713 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 714 break; 715 case 4: 716 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 717 break; 718 default: 719 BUG(); 720 break; 721 } 722 723 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 724 return retbits; 725 } 726 727 struct pl08x_lli_build_data { 728 struct pl08x_txd *txd; 729 struct pl08x_bus_data srcbus; 730 struct pl08x_bus_data dstbus; 731 size_t remainder; 732 u32 lli_bus; 733 }; 734 735 /* 736 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 737 * victim in case src & dest are not similarly aligned. i.e. If after aligning 738 * masters address with width requirements of transfer (by sending few byte by 739 * byte data), slave is still not aligned, then its width will be reduced to 740 * BYTE. 741 * - prefers the destination bus if both available 742 * - prefers bus with fixed address (i.e. peripheral) 743 */ 744 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 745 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 746 { 747 if (!(cctl & PL080_CONTROL_DST_INCR)) { 748 *mbus = &bd->dstbus; 749 *sbus = &bd->srcbus; 750 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 751 *mbus = &bd->srcbus; 752 *sbus = &bd->dstbus; 753 } else { 754 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 755 *mbus = &bd->dstbus; 756 *sbus = &bd->srcbus; 757 } else { 758 *mbus = &bd->srcbus; 759 *sbus = &bd->dstbus; 760 } 761 } 762 } 763 764 /* 765 * Fills in one LLI for a certain transfer descriptor and advance the counter 766 */ 767 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, 768 int num_llis, int len, u32 cctl) 769 { 770 struct pl08x_lli *llis_va = bd->txd->llis_va; 771 dma_addr_t llis_bus = bd->txd->llis_bus; 772 773 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 774 775 llis_va[num_llis].cctl = cctl; 776 llis_va[num_llis].src = bd->srcbus.addr; 777 llis_va[num_llis].dst = bd->dstbus.addr; 778 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * 779 sizeof(struct pl08x_lli); 780 llis_va[num_llis].lli |= bd->lli_bus; 781 782 if (cctl & PL080_CONTROL_SRC_INCR) 783 bd->srcbus.addr += len; 784 if (cctl & PL080_CONTROL_DST_INCR) 785 bd->dstbus.addr += len; 786 787 BUG_ON(bd->remainder < len); 788 789 bd->remainder -= len; 790 } 791 792 static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, 793 u32 *cctl, u32 len, int num_llis, size_t *total_bytes) 794 { 795 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 796 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); 797 (*total_bytes) += len; 798 } 799 800 /* 801 * This fills in the table of LLIs for the transfer descriptor 802 * Note that we assume we never have to change the burst sizes 803 * Return 0 for error 804 */ 805 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 806 struct pl08x_txd *txd) 807 { 808 struct pl08x_bus_data *mbus, *sbus; 809 struct pl08x_lli_build_data bd; 810 int num_llis = 0; 811 u32 cctl, early_bytes = 0; 812 size_t max_bytes_per_lli, total_bytes; 813 struct pl08x_lli *llis_va; 814 struct pl08x_sg *dsg; 815 816 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 817 if (!txd->llis_va) { 818 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 819 return 0; 820 } 821 822 bd.txd = txd; 823 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 824 cctl = txd->cctl; 825 826 /* Find maximum width of the source bus */ 827 bd.srcbus.maxwidth = 828 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 829 PL080_CONTROL_SWIDTH_SHIFT); 830 831 /* Find maximum width of the destination bus */ 832 bd.dstbus.maxwidth = 833 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 834 PL080_CONTROL_DWIDTH_SHIFT); 835 836 list_for_each_entry(dsg, &txd->dsg_list, node) { 837 total_bytes = 0; 838 cctl = txd->cctl; 839 840 bd.srcbus.addr = dsg->src_addr; 841 bd.dstbus.addr = dsg->dst_addr; 842 bd.remainder = dsg->len; 843 bd.srcbus.buswidth = bd.srcbus.maxwidth; 844 bd.dstbus.buswidth = bd.dstbus.maxwidth; 845 846 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 847 848 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", 849 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 850 bd.srcbus.buswidth, 851 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 852 bd.dstbus.buswidth, 853 bd.remainder); 854 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 855 mbus == &bd.srcbus ? "src" : "dst", 856 sbus == &bd.srcbus ? "src" : "dst"); 857 858 /* 859 * Zero length is only allowed if all these requirements are 860 * met: 861 * - flow controller is peripheral. 862 * - src.addr is aligned to src.width 863 * - dst.addr is aligned to dst.width 864 * 865 * sg_len == 1 should be true, as there can be two cases here: 866 * 867 * - Memory addresses are contiguous and are not scattered. 868 * Here, Only one sg will be passed by user driver, with 869 * memory address and zero length. We pass this to controller 870 * and after the transfer it will receive the last burst 871 * request from peripheral and so transfer finishes. 872 * 873 * - Memory addresses are scattered and are not contiguous. 874 * Here, Obviously as DMA controller doesn't know when a lli's 875 * transfer gets over, it can't load next lli. So in this 876 * case, there has to be an assumption that only one lli is 877 * supported. Thus, we can't have scattered addresses. 878 */ 879 if (!bd.remainder) { 880 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 881 PL080_CONFIG_FLOW_CONTROL_SHIFT; 882 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 883 (fc <= PL080_FLOW_SRC2DST_SRC))) { 884 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 885 __func__); 886 return 0; 887 } 888 889 if ((bd.srcbus.addr % bd.srcbus.buswidth) || 890 (bd.dstbus.addr % bd.dstbus.buswidth)) { 891 dev_err(&pl08x->adev->dev, 892 "%s src & dst address must be aligned to src" 893 " & dst width if peripheral is flow controller", 894 __func__); 895 return 0; 896 } 897 898 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 899 bd.dstbus.buswidth, 0); 900 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); 901 break; 902 } 903 904 /* 905 * Send byte by byte for following cases 906 * - Less than a bus width available 907 * - until master bus is aligned 908 */ 909 if (bd.remainder < mbus->buswidth) 910 early_bytes = bd.remainder; 911 else if ((mbus->addr) % (mbus->buswidth)) { 912 early_bytes = mbus->buswidth - (mbus->addr) % 913 (mbus->buswidth); 914 if ((bd.remainder - early_bytes) < mbus->buswidth) 915 early_bytes = bd.remainder; 916 } 917 918 if (early_bytes) { 919 dev_vdbg(&pl08x->adev->dev, 920 "%s byte width LLIs (remain 0x%08x)\n", 921 __func__, bd.remainder); 922 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 923 &total_bytes); 924 } 925 926 if (bd.remainder) { 927 /* 928 * Master now aligned 929 * - if slave is not then we must set its width down 930 */ 931 if (sbus->addr % sbus->buswidth) { 932 dev_dbg(&pl08x->adev->dev, 933 "%s set down bus width to one byte\n", 934 __func__); 935 936 sbus->buswidth = 1; 937 } 938 939 /* 940 * Bytes transferred = tsize * src width, not 941 * MIN(buswidths) 942 */ 943 max_bytes_per_lli = bd.srcbus.buswidth * 944 PL080_CONTROL_TRANSFER_SIZE_MASK; 945 dev_vdbg(&pl08x->adev->dev, 946 "%s max bytes per lli = %zu\n", 947 __func__, max_bytes_per_lli); 948 949 /* 950 * Make largest possible LLIs until less than one bus 951 * width left 952 */ 953 while (bd.remainder > (mbus->buswidth - 1)) { 954 size_t lli_len, tsize, width; 955 956 /* 957 * If enough left try to send max possible, 958 * otherwise try to send the remainder 959 */ 960 lli_len = min(bd.remainder, max_bytes_per_lli); 961 962 /* 963 * Check against maximum bus alignment: 964 * Calculate actual transfer size in relation to 965 * bus width an get a maximum remainder of the 966 * highest bus width - 1 967 */ 968 width = max(mbus->buswidth, sbus->buswidth); 969 lli_len = (lli_len / width) * width; 970 tsize = lli_len / bd.srcbus.buswidth; 971 972 dev_vdbg(&pl08x->adev->dev, 973 "%s fill lli with single lli chunk of " 974 "size 0x%08zx (remainder 0x%08zx)\n", 975 __func__, lli_len, bd.remainder); 976 977 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 978 bd.dstbus.buswidth, tsize); 979 pl08x_fill_lli_for_desc(&bd, num_llis++, 980 lli_len, cctl); 981 total_bytes += lli_len; 982 } 983 984 /* 985 * Send any odd bytes 986 */ 987 if (bd.remainder) { 988 dev_vdbg(&pl08x->adev->dev, 989 "%s align with boundary, send odd bytes (remain %zu)\n", 990 __func__, bd.remainder); 991 prep_byte_width_lli(&bd, &cctl, bd.remainder, 992 num_llis++, &total_bytes); 993 } 994 } 995 996 if (total_bytes != dsg->len) { 997 dev_err(&pl08x->adev->dev, 998 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 999 __func__, total_bytes, dsg->len); 1000 return 0; 1001 } 1002 1003 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1004 dev_err(&pl08x->adev->dev, 1005 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1006 __func__, (u32) MAX_NUM_TSFR_LLIS); 1007 return 0; 1008 } 1009 } 1010 1011 llis_va = txd->llis_va; 1012 /* The final LLI terminates the LLI. */ 1013 llis_va[num_llis - 1].lli = 0; 1014 /* The final LLI element shall also fire an interrupt. */ 1015 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 1016 1017 #ifdef VERBOSE_DEBUG 1018 { 1019 int i; 1020 1021 dev_vdbg(&pl08x->adev->dev, 1022 "%-3s %-9s %-10s %-10s %-10s %s\n", 1023 "lli", "", "csrc", "cdst", "clli", "cctl"); 1024 for (i = 0; i < num_llis; i++) { 1025 dev_vdbg(&pl08x->adev->dev, 1026 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1027 i, &llis_va[i], llis_va[i].src, 1028 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl 1029 ); 1030 } 1031 } 1032 #endif 1033 1034 return num_llis; 1035 } 1036 1037 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1038 struct pl08x_txd *txd) 1039 { 1040 struct pl08x_sg *dsg, *_dsg; 1041 1042 if (txd->llis_va) 1043 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1044 1045 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1046 list_del(&dsg->node); 1047 kfree(dsg); 1048 } 1049 1050 kfree(txd); 1051 } 1052 1053 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1054 { 1055 struct device *dev = txd->vd.tx.chan->device->dev; 1056 struct pl08x_sg *dsg; 1057 1058 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1059 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1060 list_for_each_entry(dsg, &txd->dsg_list, node) 1061 dma_unmap_single(dev, dsg->src_addr, dsg->len, 1062 DMA_TO_DEVICE); 1063 else { 1064 list_for_each_entry(dsg, &txd->dsg_list, node) 1065 dma_unmap_page(dev, dsg->src_addr, dsg->len, 1066 DMA_TO_DEVICE); 1067 } 1068 } 1069 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1070 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1071 list_for_each_entry(dsg, &txd->dsg_list, node) 1072 dma_unmap_single(dev, dsg->dst_addr, dsg->len, 1073 DMA_FROM_DEVICE); 1074 else 1075 list_for_each_entry(dsg, &txd->dsg_list, node) 1076 dma_unmap_page(dev, dsg->dst_addr, dsg->len, 1077 DMA_FROM_DEVICE); 1078 } 1079 } 1080 1081 static void pl08x_desc_free(struct virt_dma_desc *vd) 1082 { 1083 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1084 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1085 1086 if (!plchan->slave) 1087 pl08x_unmap_buffers(txd); 1088 1089 if (!txd->done) 1090 pl08x_release_mux(plchan); 1091 1092 pl08x_free_txd(plchan->host, txd); 1093 } 1094 1095 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1096 struct pl08x_dma_chan *plchan) 1097 { 1098 LIST_HEAD(head); 1099 1100 vchan_get_all_descriptors(&plchan->vc, &head); 1101 vchan_dma_desc_free_list(&plchan->vc, &head); 1102 } 1103 1104 /* 1105 * The DMA ENGINE API 1106 */ 1107 static int pl08x_alloc_chan_resources(struct dma_chan *chan) 1108 { 1109 return 0; 1110 } 1111 1112 static void pl08x_free_chan_resources(struct dma_chan *chan) 1113 { 1114 /* Ensure all queued descriptors are freed */ 1115 vchan_free_chan_resources(to_virt_chan(chan)); 1116 } 1117 1118 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1119 struct dma_chan *chan, unsigned long flags) 1120 { 1121 struct dma_async_tx_descriptor *retval = NULL; 1122 1123 return retval; 1124 } 1125 1126 /* 1127 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1128 * If slaves are relying on interrupts to signal completion this function 1129 * must not be called with interrupts disabled. 1130 */ 1131 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1132 dma_cookie_t cookie, struct dma_tx_state *txstate) 1133 { 1134 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1135 struct virt_dma_desc *vd; 1136 unsigned long flags; 1137 enum dma_status ret; 1138 size_t bytes = 0; 1139 1140 ret = dma_cookie_status(chan, cookie, txstate); 1141 if (ret == DMA_SUCCESS) 1142 return ret; 1143 1144 /* 1145 * There's no point calculating the residue if there's 1146 * no txstate to store the value. 1147 */ 1148 if (!txstate) { 1149 if (plchan->state == PL08X_CHAN_PAUSED) 1150 ret = DMA_PAUSED; 1151 return ret; 1152 } 1153 1154 spin_lock_irqsave(&plchan->vc.lock, flags); 1155 ret = dma_cookie_status(chan, cookie, txstate); 1156 if (ret != DMA_SUCCESS) { 1157 vd = vchan_find_desc(&plchan->vc, cookie); 1158 if (vd) { 1159 /* On the issued list, so hasn't been processed yet */ 1160 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1161 struct pl08x_sg *dsg; 1162 1163 list_for_each_entry(dsg, &txd->dsg_list, node) 1164 bytes += dsg->len; 1165 } else { 1166 bytes = pl08x_getbytes_chan(plchan); 1167 } 1168 } 1169 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1170 1171 /* 1172 * This cookie not complete yet 1173 * Get number of bytes left in the active transactions and queue 1174 */ 1175 dma_set_residue(txstate, bytes); 1176 1177 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1178 ret = DMA_PAUSED; 1179 1180 /* Whether waiting or running, we're in progress */ 1181 return ret; 1182 } 1183 1184 /* PrimeCell DMA extension */ 1185 struct burst_table { 1186 u32 burstwords; 1187 u32 reg; 1188 }; 1189 1190 static const struct burst_table burst_sizes[] = { 1191 { 1192 .burstwords = 256, 1193 .reg = PL080_BSIZE_256, 1194 }, 1195 { 1196 .burstwords = 128, 1197 .reg = PL080_BSIZE_128, 1198 }, 1199 { 1200 .burstwords = 64, 1201 .reg = PL080_BSIZE_64, 1202 }, 1203 { 1204 .burstwords = 32, 1205 .reg = PL080_BSIZE_32, 1206 }, 1207 { 1208 .burstwords = 16, 1209 .reg = PL080_BSIZE_16, 1210 }, 1211 { 1212 .burstwords = 8, 1213 .reg = PL080_BSIZE_8, 1214 }, 1215 { 1216 .burstwords = 4, 1217 .reg = PL080_BSIZE_4, 1218 }, 1219 { 1220 .burstwords = 0, 1221 .reg = PL080_BSIZE_1, 1222 }, 1223 }; 1224 1225 /* 1226 * Given the source and destination available bus masks, select which 1227 * will be routed to each port. We try to have source and destination 1228 * on separate ports, but always respect the allowable settings. 1229 */ 1230 static u32 pl08x_select_bus(u8 src, u8 dst) 1231 { 1232 u32 cctl = 0; 1233 1234 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1235 cctl |= PL080_CONTROL_DST_AHB2; 1236 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1237 cctl |= PL080_CONTROL_SRC_AHB2; 1238 1239 return cctl; 1240 } 1241 1242 static u32 pl08x_cctl(u32 cctl) 1243 { 1244 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1245 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1246 PL080_CONTROL_PROT_MASK); 1247 1248 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1249 return cctl | PL080_CONTROL_PROT_SYS; 1250 } 1251 1252 static u32 pl08x_width(enum dma_slave_buswidth width) 1253 { 1254 switch (width) { 1255 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1256 return PL080_WIDTH_8BIT; 1257 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1258 return PL080_WIDTH_16BIT; 1259 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1260 return PL080_WIDTH_32BIT; 1261 default: 1262 return ~0; 1263 } 1264 } 1265 1266 static u32 pl08x_burst(u32 maxburst) 1267 { 1268 int i; 1269 1270 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1271 if (burst_sizes[i].burstwords <= maxburst) 1272 break; 1273 1274 return burst_sizes[i].reg; 1275 } 1276 1277 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1278 enum dma_slave_buswidth addr_width, u32 maxburst) 1279 { 1280 u32 width, burst, cctl = 0; 1281 1282 width = pl08x_width(addr_width); 1283 if (width == ~0) 1284 return ~0; 1285 1286 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1287 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1288 1289 /* 1290 * If this channel will only request single transfers, set this 1291 * down to ONE element. Also select one element if no maxburst 1292 * is specified. 1293 */ 1294 if (plchan->cd->single) 1295 maxburst = 1; 1296 1297 burst = pl08x_burst(maxburst); 1298 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1299 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1300 1301 return pl08x_cctl(cctl); 1302 } 1303 1304 static int dma_set_runtime_config(struct dma_chan *chan, 1305 struct dma_slave_config *config) 1306 { 1307 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1308 1309 if (!plchan->slave) 1310 return -EINVAL; 1311 1312 /* Reject definitely invalid configurations */ 1313 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1314 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1315 return -EINVAL; 1316 1317 plchan->cfg = *config; 1318 1319 return 0; 1320 } 1321 1322 /* 1323 * Slave transactions callback to the slave device to allow 1324 * synchronization of slave DMA signals with the DMAC enable 1325 */ 1326 static void pl08x_issue_pending(struct dma_chan *chan) 1327 { 1328 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&plchan->vc.lock, flags); 1332 if (vchan_issue_pending(&plchan->vc)) { 1333 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1334 pl08x_phy_alloc_and_start(plchan); 1335 } 1336 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1337 } 1338 1339 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1340 { 1341 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1342 1343 if (txd) { 1344 INIT_LIST_HEAD(&txd->dsg_list); 1345 1346 /* Always enable error and terminal interrupts */ 1347 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1348 PL080_CONFIG_TC_IRQ_MASK; 1349 } 1350 return txd; 1351 } 1352 1353 /* 1354 * Initialize a descriptor to be used by memcpy submit 1355 */ 1356 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1357 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1358 size_t len, unsigned long flags) 1359 { 1360 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1361 struct pl08x_driver_data *pl08x = plchan->host; 1362 struct pl08x_txd *txd; 1363 struct pl08x_sg *dsg; 1364 int ret; 1365 1366 txd = pl08x_get_txd(plchan); 1367 if (!txd) { 1368 dev_err(&pl08x->adev->dev, 1369 "%s no memory for descriptor\n", __func__); 1370 return NULL; 1371 } 1372 1373 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1374 if (!dsg) { 1375 pl08x_free_txd(pl08x, txd); 1376 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1377 __func__); 1378 return NULL; 1379 } 1380 list_add_tail(&dsg->node, &txd->dsg_list); 1381 1382 dsg->src_addr = src; 1383 dsg->dst_addr = dest; 1384 dsg->len = len; 1385 1386 /* Set platform data for m2m */ 1387 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1388 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1389 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1390 1391 /* Both to be incremented or the code will break */ 1392 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1393 1394 if (pl08x->vd->dualmaster) 1395 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1396 pl08x->mem_buses); 1397 1398 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1399 if (!ret) { 1400 pl08x_free_txd(pl08x, txd); 1401 return NULL; 1402 } 1403 1404 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1405 } 1406 1407 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1408 struct dma_chan *chan, struct scatterlist *sgl, 1409 unsigned int sg_len, enum dma_transfer_direction direction, 1410 unsigned long flags, void *context) 1411 { 1412 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1413 struct pl08x_driver_data *pl08x = plchan->host; 1414 struct pl08x_txd *txd; 1415 struct pl08x_sg *dsg; 1416 struct scatterlist *sg; 1417 enum dma_slave_buswidth addr_width; 1418 dma_addr_t slave_addr; 1419 int ret, tmp; 1420 u8 src_buses, dst_buses; 1421 u32 maxburst, cctl; 1422 1423 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1424 __func__, sg_dma_len(sgl), plchan->name); 1425 1426 txd = pl08x_get_txd(plchan); 1427 if (!txd) { 1428 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1429 return NULL; 1430 } 1431 1432 /* 1433 * Set up addresses, the PrimeCell configured address 1434 * will take precedence since this may configure the 1435 * channel target address dynamically at runtime. 1436 */ 1437 if (direction == DMA_MEM_TO_DEV) { 1438 cctl = PL080_CONTROL_SRC_INCR; 1439 slave_addr = plchan->cfg.dst_addr; 1440 addr_width = plchan->cfg.dst_addr_width; 1441 maxburst = plchan->cfg.dst_maxburst; 1442 src_buses = pl08x->mem_buses; 1443 dst_buses = plchan->cd->periph_buses; 1444 } else if (direction == DMA_DEV_TO_MEM) { 1445 cctl = PL080_CONTROL_DST_INCR; 1446 slave_addr = plchan->cfg.src_addr; 1447 addr_width = plchan->cfg.src_addr_width; 1448 maxburst = plchan->cfg.src_maxburst; 1449 src_buses = plchan->cd->periph_buses; 1450 dst_buses = pl08x->mem_buses; 1451 } else { 1452 pl08x_free_txd(pl08x, txd); 1453 dev_err(&pl08x->adev->dev, 1454 "%s direction unsupported\n", __func__); 1455 return NULL; 1456 } 1457 1458 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1459 if (cctl == ~0) { 1460 pl08x_free_txd(pl08x, txd); 1461 dev_err(&pl08x->adev->dev, 1462 "DMA slave configuration botched?\n"); 1463 return NULL; 1464 } 1465 1466 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1467 1468 if (plchan->cfg.device_fc) 1469 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1470 PL080_FLOW_PER2MEM_PER; 1471 else 1472 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1473 PL080_FLOW_PER2MEM; 1474 1475 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1476 1477 ret = pl08x_request_mux(plchan); 1478 if (ret < 0) { 1479 pl08x_free_txd(pl08x, txd); 1480 dev_dbg(&pl08x->adev->dev, 1481 "unable to mux for transfer on %s due to platform restrictions\n", 1482 plchan->name); 1483 return NULL; 1484 } 1485 1486 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1487 plchan->signal, plchan->name); 1488 1489 /* Assign the flow control signal to this channel */ 1490 if (direction == DMA_MEM_TO_DEV) 1491 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1492 else 1493 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1494 1495 for_each_sg(sgl, sg, sg_len, tmp) { 1496 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1497 if (!dsg) { 1498 pl08x_release_mux(plchan); 1499 pl08x_free_txd(pl08x, txd); 1500 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1501 __func__); 1502 return NULL; 1503 } 1504 list_add_tail(&dsg->node, &txd->dsg_list); 1505 1506 dsg->len = sg_dma_len(sg); 1507 if (direction == DMA_MEM_TO_DEV) { 1508 dsg->src_addr = sg_dma_address(sg); 1509 dsg->dst_addr = slave_addr; 1510 } else { 1511 dsg->src_addr = slave_addr; 1512 dsg->dst_addr = sg_dma_address(sg); 1513 } 1514 } 1515 1516 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1517 if (!ret) { 1518 pl08x_release_mux(plchan); 1519 pl08x_free_txd(pl08x, txd); 1520 return NULL; 1521 } 1522 1523 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1524 } 1525 1526 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1527 unsigned long arg) 1528 { 1529 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1530 struct pl08x_driver_data *pl08x = plchan->host; 1531 unsigned long flags; 1532 int ret = 0; 1533 1534 /* Controls applicable to inactive channels */ 1535 if (cmd == DMA_SLAVE_CONFIG) { 1536 return dma_set_runtime_config(chan, 1537 (struct dma_slave_config *)arg); 1538 } 1539 1540 /* 1541 * Anything succeeds on channels with no physical allocation and 1542 * no queued transfers. 1543 */ 1544 spin_lock_irqsave(&plchan->vc.lock, flags); 1545 if (!plchan->phychan && !plchan->at) { 1546 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1547 return 0; 1548 } 1549 1550 switch (cmd) { 1551 case DMA_TERMINATE_ALL: 1552 plchan->state = PL08X_CHAN_IDLE; 1553 1554 if (plchan->phychan) { 1555 /* 1556 * Mark physical channel as free and free any slave 1557 * signal 1558 */ 1559 pl08x_phy_free(plchan); 1560 } 1561 /* Dequeue jobs and free LLIs */ 1562 if (plchan->at) { 1563 pl08x_desc_free(&plchan->at->vd); 1564 plchan->at = NULL; 1565 } 1566 /* Dequeue jobs not yet fired as well */ 1567 pl08x_free_txd_list(pl08x, plchan); 1568 break; 1569 case DMA_PAUSE: 1570 pl08x_pause_phy_chan(plchan->phychan); 1571 plchan->state = PL08X_CHAN_PAUSED; 1572 break; 1573 case DMA_RESUME: 1574 pl08x_resume_phy_chan(plchan->phychan); 1575 plchan->state = PL08X_CHAN_RUNNING; 1576 break; 1577 default: 1578 /* Unknown command */ 1579 ret = -ENXIO; 1580 break; 1581 } 1582 1583 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1584 1585 return ret; 1586 } 1587 1588 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1589 { 1590 struct pl08x_dma_chan *plchan; 1591 char *name = chan_id; 1592 1593 /* Reject channels for devices not bound to this driver */ 1594 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1595 return false; 1596 1597 plchan = to_pl08x_chan(chan); 1598 1599 /* Check that the channel is not taken! */ 1600 if (!strcmp(plchan->name, name)) 1601 return true; 1602 1603 return false; 1604 } 1605 1606 /* 1607 * Just check that the device is there and active 1608 * TODO: turn this bit on/off depending on the number of physical channels 1609 * actually used, if it is zero... well shut it off. That will save some 1610 * power. Cut the clock at the same time. 1611 */ 1612 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1613 { 1614 /* The Nomadik variant does not have the config register */ 1615 if (pl08x->vd->nomadik) 1616 return; 1617 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1618 } 1619 1620 static irqreturn_t pl08x_irq(int irq, void *dev) 1621 { 1622 struct pl08x_driver_data *pl08x = dev; 1623 u32 mask = 0, err, tc, i; 1624 1625 /* check & clear - ERR & TC interrupts */ 1626 err = readl(pl08x->base + PL080_ERR_STATUS); 1627 if (err) { 1628 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1629 __func__, err); 1630 writel(err, pl08x->base + PL080_ERR_CLEAR); 1631 } 1632 tc = readl(pl08x->base + PL080_TC_STATUS); 1633 if (tc) 1634 writel(tc, pl08x->base + PL080_TC_CLEAR); 1635 1636 if (!err && !tc) 1637 return IRQ_NONE; 1638 1639 for (i = 0; i < pl08x->vd->channels; i++) { 1640 if (((1 << i) & err) || ((1 << i) & tc)) { 1641 /* Locate physical channel */ 1642 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1643 struct pl08x_dma_chan *plchan = phychan->serving; 1644 struct pl08x_txd *tx; 1645 1646 if (!plchan) { 1647 dev_err(&pl08x->adev->dev, 1648 "%s Error TC interrupt on unused channel: 0x%08x\n", 1649 __func__, i); 1650 continue; 1651 } 1652 1653 spin_lock(&plchan->vc.lock); 1654 tx = plchan->at; 1655 if (tx) { 1656 plchan->at = NULL; 1657 /* 1658 * This descriptor is done, release its mux 1659 * reservation. 1660 */ 1661 pl08x_release_mux(plchan); 1662 tx->done = true; 1663 vchan_cookie_complete(&tx->vd); 1664 1665 /* 1666 * And start the next descriptor (if any), 1667 * otherwise free this channel. 1668 */ 1669 if (vchan_next_desc(&plchan->vc)) 1670 pl08x_start_next_txd(plchan); 1671 else 1672 pl08x_phy_free(plchan); 1673 } 1674 spin_unlock(&plchan->vc.lock); 1675 1676 mask |= (1 << i); 1677 } 1678 } 1679 1680 return mask ? IRQ_HANDLED : IRQ_NONE; 1681 } 1682 1683 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1684 { 1685 chan->slave = true; 1686 chan->name = chan->cd->bus_id; 1687 chan->cfg.src_addr = chan->cd->addr; 1688 chan->cfg.dst_addr = chan->cd->addr; 1689 } 1690 1691 /* 1692 * Initialise the DMAC memcpy/slave channels. 1693 * Make a local wrapper to hold required data 1694 */ 1695 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1696 struct dma_device *dmadev, unsigned int channels, bool slave) 1697 { 1698 struct pl08x_dma_chan *chan; 1699 int i; 1700 1701 INIT_LIST_HEAD(&dmadev->channels); 1702 1703 /* 1704 * Register as many many memcpy as we have physical channels, 1705 * we won't always be able to use all but the code will have 1706 * to cope with that situation. 1707 */ 1708 for (i = 0; i < channels; i++) { 1709 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1710 if (!chan) { 1711 dev_err(&pl08x->adev->dev, 1712 "%s no memory for channel\n", __func__); 1713 return -ENOMEM; 1714 } 1715 1716 chan->host = pl08x; 1717 chan->state = PL08X_CHAN_IDLE; 1718 chan->signal = -1; 1719 1720 if (slave) { 1721 chan->cd = &pl08x->pd->slave_channels[i]; 1722 pl08x_dma_slave_init(chan); 1723 } else { 1724 chan->cd = &pl08x->pd->memcpy_channel; 1725 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1726 if (!chan->name) { 1727 kfree(chan); 1728 return -ENOMEM; 1729 } 1730 } 1731 dev_dbg(&pl08x->adev->dev, 1732 "initialize virtual channel \"%s\"\n", 1733 chan->name); 1734 1735 chan->vc.desc_free = pl08x_desc_free; 1736 vchan_init(&chan->vc, dmadev); 1737 } 1738 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1739 i, slave ? "slave" : "memcpy"); 1740 return i; 1741 } 1742 1743 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1744 { 1745 struct pl08x_dma_chan *chan = NULL; 1746 struct pl08x_dma_chan *next; 1747 1748 list_for_each_entry_safe(chan, 1749 next, &dmadev->channels, vc.chan.device_node) { 1750 list_del(&chan->vc.chan.device_node); 1751 kfree(chan); 1752 } 1753 } 1754 1755 #ifdef CONFIG_DEBUG_FS 1756 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1757 { 1758 switch (state) { 1759 case PL08X_CHAN_IDLE: 1760 return "idle"; 1761 case PL08X_CHAN_RUNNING: 1762 return "running"; 1763 case PL08X_CHAN_PAUSED: 1764 return "paused"; 1765 case PL08X_CHAN_WAITING: 1766 return "waiting"; 1767 default: 1768 break; 1769 } 1770 return "UNKNOWN STATE"; 1771 } 1772 1773 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1774 { 1775 struct pl08x_driver_data *pl08x = s->private; 1776 struct pl08x_dma_chan *chan; 1777 struct pl08x_phy_chan *ch; 1778 unsigned long flags; 1779 int i; 1780 1781 seq_printf(s, "PL08x physical channels:\n"); 1782 seq_printf(s, "CHANNEL:\tUSER:\n"); 1783 seq_printf(s, "--------\t-----\n"); 1784 for (i = 0; i < pl08x->vd->channels; i++) { 1785 struct pl08x_dma_chan *virt_chan; 1786 1787 ch = &pl08x->phy_chans[i]; 1788 1789 spin_lock_irqsave(&ch->lock, flags); 1790 virt_chan = ch->serving; 1791 1792 seq_printf(s, "%d\t\t%s%s\n", 1793 ch->id, 1794 virt_chan ? virt_chan->name : "(none)", 1795 ch->locked ? " LOCKED" : ""); 1796 1797 spin_unlock_irqrestore(&ch->lock, flags); 1798 } 1799 1800 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1801 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1802 seq_printf(s, "--------\t------\n"); 1803 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 1804 seq_printf(s, "%s\t\t%s\n", chan->name, 1805 pl08x_state_str(chan->state)); 1806 } 1807 1808 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1809 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1810 seq_printf(s, "--------\t------\n"); 1811 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 1812 seq_printf(s, "%s\t\t%s\n", chan->name, 1813 pl08x_state_str(chan->state)); 1814 } 1815 1816 return 0; 1817 } 1818 1819 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1820 { 1821 return single_open(file, pl08x_debugfs_show, inode->i_private); 1822 } 1823 1824 static const struct file_operations pl08x_debugfs_operations = { 1825 .open = pl08x_debugfs_open, 1826 .read = seq_read, 1827 .llseek = seq_lseek, 1828 .release = single_release, 1829 }; 1830 1831 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1832 { 1833 /* Expose a simple debugfs interface to view all clocks */ 1834 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 1835 S_IFREG | S_IRUGO, NULL, pl08x, 1836 &pl08x_debugfs_operations); 1837 } 1838 1839 #else 1840 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1841 { 1842 } 1843 #endif 1844 1845 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 1846 { 1847 struct pl08x_driver_data *pl08x; 1848 const struct vendor_data *vd = id->data; 1849 int ret = 0; 1850 int i; 1851 1852 ret = amba_request_regions(adev, NULL); 1853 if (ret) 1854 return ret; 1855 1856 /* Create the driver state holder */ 1857 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 1858 if (!pl08x) { 1859 ret = -ENOMEM; 1860 goto out_no_pl08x; 1861 } 1862 1863 /* Initialize memcpy engine */ 1864 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1865 pl08x->memcpy.dev = &adev->dev; 1866 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1867 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1868 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1869 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1870 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1871 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1872 pl08x->memcpy.device_control = pl08x_control; 1873 1874 /* Initialize slave engine */ 1875 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1876 pl08x->slave.dev = &adev->dev; 1877 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1878 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1879 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1880 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1881 pl08x->slave.device_issue_pending = pl08x_issue_pending; 1882 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1883 pl08x->slave.device_control = pl08x_control; 1884 1885 /* Get the platform data */ 1886 pl08x->pd = dev_get_platdata(&adev->dev); 1887 if (!pl08x->pd) { 1888 dev_err(&adev->dev, "no platform data supplied\n"); 1889 ret = -EINVAL; 1890 goto out_no_platdata; 1891 } 1892 1893 /* Assign useful pointers to the driver state */ 1894 pl08x->adev = adev; 1895 pl08x->vd = vd; 1896 1897 /* By default, AHB1 only. If dualmaster, from platform */ 1898 pl08x->lli_buses = PL08X_AHB1; 1899 pl08x->mem_buses = PL08X_AHB1; 1900 if (pl08x->vd->dualmaster) { 1901 pl08x->lli_buses = pl08x->pd->lli_buses; 1902 pl08x->mem_buses = pl08x->pd->mem_buses; 1903 } 1904 1905 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1906 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1907 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1908 if (!pl08x->pool) { 1909 ret = -ENOMEM; 1910 goto out_no_lli_pool; 1911 } 1912 1913 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1914 if (!pl08x->base) { 1915 ret = -ENOMEM; 1916 goto out_no_ioremap; 1917 } 1918 1919 /* Turn on the PL08x */ 1920 pl08x_ensure_on(pl08x); 1921 1922 /* Attach the interrupt handler */ 1923 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1924 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1925 1926 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1927 DRIVER_NAME, pl08x); 1928 if (ret) { 1929 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1930 __func__, adev->irq[0]); 1931 goto out_no_irq; 1932 } 1933 1934 /* Initialize physical channels */ 1935 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1936 GFP_KERNEL); 1937 if (!pl08x->phy_chans) { 1938 dev_err(&adev->dev, "%s failed to allocate " 1939 "physical channel holders\n", 1940 __func__); 1941 ret = -ENOMEM; 1942 goto out_no_phychans; 1943 } 1944 1945 for (i = 0; i < vd->channels; i++) { 1946 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 1947 1948 ch->id = i; 1949 ch->base = pl08x->base + PL080_Cx_BASE(i); 1950 spin_lock_init(&ch->lock); 1951 1952 /* 1953 * Nomadik variants can have channels that are locked 1954 * down for the secure world only. Lock up these channels 1955 * by perpetually serving a dummy virtual channel. 1956 */ 1957 if (vd->nomadik) { 1958 u32 val; 1959 1960 val = readl(ch->base + PL080_CH_CONFIG); 1961 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 1962 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 1963 ch->locked = true; 1964 } 1965 } 1966 1967 dev_dbg(&adev->dev, "physical channel %d is %s\n", 1968 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1969 } 1970 1971 /* Register as many memcpy channels as there are physical channels */ 1972 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 1973 pl08x->vd->channels, false); 1974 if (ret <= 0) { 1975 dev_warn(&pl08x->adev->dev, 1976 "%s failed to enumerate memcpy channels - %d\n", 1977 __func__, ret); 1978 goto out_no_memcpy; 1979 } 1980 pl08x->memcpy.chancnt = ret; 1981 1982 /* Register slave channels */ 1983 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1984 pl08x->pd->num_slave_channels, true); 1985 if (ret <= 0) { 1986 dev_warn(&pl08x->adev->dev, 1987 "%s failed to enumerate slave channels - %d\n", 1988 __func__, ret); 1989 goto out_no_slave; 1990 } 1991 pl08x->slave.chancnt = ret; 1992 1993 ret = dma_async_device_register(&pl08x->memcpy); 1994 if (ret) { 1995 dev_warn(&pl08x->adev->dev, 1996 "%s failed to register memcpy as an async device - %d\n", 1997 __func__, ret); 1998 goto out_no_memcpy_reg; 1999 } 2000 2001 ret = dma_async_device_register(&pl08x->slave); 2002 if (ret) { 2003 dev_warn(&pl08x->adev->dev, 2004 "%s failed to register slave as an async device - %d\n", 2005 __func__, ret); 2006 goto out_no_slave_reg; 2007 } 2008 2009 amba_set_drvdata(adev, pl08x); 2010 init_pl08x_debugfs(pl08x); 2011 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 2012 amba_part(adev), amba_rev(adev), 2013 (unsigned long long)adev->res.start, adev->irq[0]); 2014 2015 return 0; 2016 2017 out_no_slave_reg: 2018 dma_async_device_unregister(&pl08x->memcpy); 2019 out_no_memcpy_reg: 2020 pl08x_free_virtual_channels(&pl08x->slave); 2021 out_no_slave: 2022 pl08x_free_virtual_channels(&pl08x->memcpy); 2023 out_no_memcpy: 2024 kfree(pl08x->phy_chans); 2025 out_no_phychans: 2026 free_irq(adev->irq[0], pl08x); 2027 out_no_irq: 2028 iounmap(pl08x->base); 2029 out_no_ioremap: 2030 dma_pool_destroy(pl08x->pool); 2031 out_no_lli_pool: 2032 out_no_platdata: 2033 kfree(pl08x); 2034 out_no_pl08x: 2035 amba_release_regions(adev); 2036 return ret; 2037 } 2038 2039 /* PL080 has 8 channels and the PL080 have just 2 */ 2040 static struct vendor_data vendor_pl080 = { 2041 .channels = 8, 2042 .dualmaster = true, 2043 }; 2044 2045 static struct vendor_data vendor_nomadik = { 2046 .channels = 8, 2047 .dualmaster = true, 2048 .nomadik = true, 2049 }; 2050 2051 static struct vendor_data vendor_pl081 = { 2052 .channels = 2, 2053 .dualmaster = false, 2054 }; 2055 2056 static struct amba_id pl08x_ids[] = { 2057 /* PL080 */ 2058 { 2059 .id = 0x00041080, 2060 .mask = 0x000fffff, 2061 .data = &vendor_pl080, 2062 }, 2063 /* PL081 */ 2064 { 2065 .id = 0x00041081, 2066 .mask = 0x000fffff, 2067 .data = &vendor_pl081, 2068 }, 2069 /* Nomadik 8815 PL080 variant */ 2070 { 2071 .id = 0x00280080, 2072 .mask = 0x00ffffff, 2073 .data = &vendor_nomadik, 2074 }, 2075 { 0, 0 }, 2076 }; 2077 2078 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2079 2080 static struct amba_driver pl08x_amba_driver = { 2081 .drv.name = DRIVER_NAME, 2082 .id_table = pl08x_ids, 2083 .probe = pl08x_probe, 2084 }; 2085 2086 static int __init pl08x_init(void) 2087 { 2088 int retval; 2089 retval = amba_driver_register(&pl08x_amba_driver); 2090 if (retval) 2091 printk(KERN_WARNING DRIVER_NAME 2092 "failed to register as an AMBA device (%d)\n", 2093 retval); 2094 return retval; 2095 } 2096 subsys_initcall(pl08x_init); 2097