1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * The full GNU General Public License is in this distribution in the file 19 * called COPYING. 20 * 21 * Documentation: ARM DDI 0196G == PL080 22 * Documentation: ARM DDI 0218E == PL081 23 * Documentation: S3C6410 User's Manual == PL080S 24 * 25 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 26 * channel. 27 * 28 * The PL080 has 8 channels available for simultaneous use, and the PL081 29 * has only two channels. So on these DMA controllers the number of channels 30 * and the number of incoming DMA signals are two totally different things. 31 * It is usually not possible to theoretically handle all physical signals, 32 * so a multiplexing scheme with possible denial of use is necessary. 33 * 34 * The PL080 has a dual bus master, PL081 has a single master. 35 * 36 * PL080S is a version modified by Samsung and used in S3C64xx SoCs. 37 * It differs in following aspects: 38 * - CH_CONFIG register at different offset, 39 * - separate CH_CONTROL2 register for transfer size, 40 * - bigger maximum transfer size, 41 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, 42 * - no support for peripheral flow control. 43 * 44 * Memory to peripheral transfer may be visualized as 45 * Get data from memory to DMAC 46 * Until no data left 47 * On burst request from peripheral 48 * Destination burst from DMAC to peripheral 49 * Clear burst request 50 * Raise terminal count interrupt 51 * 52 * For peripherals with a FIFO: 53 * Source burst size == half the depth of the peripheral FIFO 54 * Destination burst size == the depth of the peripheral FIFO 55 * 56 * (Bursts are irrelevant for mem to mem transfers - there are no burst 57 * signals, the DMA controller will simply facilitate its AHB master.) 58 * 59 * ASSUMES default (little) endianness for DMA transfers 60 * 61 * The PL08x has two flow control settings: 62 * - DMAC flow control: the transfer size defines the number of transfers 63 * which occur for the current LLI entry, and the DMAC raises TC at the 64 * end of every LLI entry. Observed behaviour shows the DMAC listening 65 * to both the BREQ and SREQ signals (contrary to documented), 66 * transferring data if either is active. The LBREQ and LSREQ signals 67 * are ignored. 68 * 69 * - Peripheral flow control: the transfer size is ignored (and should be 70 * zero). The data is transferred from the current LLI entry, until 71 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 72 * will then move to the next LLI entry. Unsupported by PL080S. 73 */ 74 #include <linux/amba/bus.h> 75 #include <linux/amba/pl08x.h> 76 #include <linux/debugfs.h> 77 #include <linux/delay.h> 78 #include <linux/device.h> 79 #include <linux/dmaengine.h> 80 #include <linux/dmapool.h> 81 #include <linux/dma-mapping.h> 82 #include <linux/export.h> 83 #include <linux/init.h> 84 #include <linux/interrupt.h> 85 #include <linux/module.h> 86 #include <linux/of.h> 87 #include <linux/of_dma.h> 88 #include <linux/pm_runtime.h> 89 #include <linux/seq_file.h> 90 #include <linux/slab.h> 91 #include <linux/amba/pl080.h> 92 93 #include "dmaengine.h" 94 #include "virt-dma.h" 95 96 #define DRIVER_NAME "pl08xdmac" 97 98 #define PL80X_DMA_BUSWIDTHS \ 99 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 100 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 101 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 102 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 103 104 static struct amba_driver pl08x_amba_driver; 105 struct pl08x_driver_data; 106 107 /** 108 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 109 * @channels: the number of channels available in this variant 110 * @signals: the number of request signals available from the hardware 111 * @dualmaster: whether this version supports dual AHB masters or not. 112 * @nomadik: whether the channels have Nomadik security extension bits 113 * that need to be checked for permission before use and some registers are 114 * missing 115 * @pl080s: whether this version is a PL080S, which has separate register and 116 * LLI word for transfer size. 117 * @max_transfer_size: the maximum single element transfer size for this 118 * PL08x variant. 119 */ 120 struct vendor_data { 121 u8 config_offset; 122 u8 channels; 123 u8 signals; 124 bool dualmaster; 125 bool nomadik; 126 bool pl080s; 127 u32 max_transfer_size; 128 }; 129 130 /** 131 * struct pl08x_bus_data - information of source or destination 132 * busses for a transfer 133 * @addr: current address 134 * @maxwidth: the maximum width of a transfer on this bus 135 * @buswidth: the width of this bus in bytes: 1, 2 or 4 136 */ 137 struct pl08x_bus_data { 138 dma_addr_t addr; 139 u8 maxwidth; 140 u8 buswidth; 141 }; 142 143 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) 144 145 /** 146 * struct pl08x_phy_chan - holder for the physical channels 147 * @id: physical index to this channel 148 * @lock: a lock to use when altering an instance of this struct 149 * @serving: the virtual channel currently being served by this physical 150 * channel 151 * @locked: channel unavailable for the system, e.g. dedicated to secure 152 * world 153 */ 154 struct pl08x_phy_chan { 155 unsigned int id; 156 void __iomem *base; 157 void __iomem *reg_config; 158 spinlock_t lock; 159 struct pl08x_dma_chan *serving; 160 bool locked; 161 }; 162 163 /** 164 * struct pl08x_sg - structure containing data per sg 165 * @src_addr: src address of sg 166 * @dst_addr: dst address of sg 167 * @len: transfer len in bytes 168 * @node: node for txd's dsg_list 169 */ 170 struct pl08x_sg { 171 dma_addr_t src_addr; 172 dma_addr_t dst_addr; 173 size_t len; 174 struct list_head node; 175 }; 176 177 /** 178 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 179 * @vd: virtual DMA descriptor 180 * @dsg_list: list of children sg's 181 * @llis_bus: DMA memory address (physical) start for the LLIs 182 * @llis_va: virtual memory address start for the LLIs 183 * @cctl: control reg values for current txd 184 * @ccfg: config reg values for current txd 185 * @done: this marks completed descriptors, which should not have their 186 * mux released. 187 * @cyclic: indicate cyclic transfers 188 */ 189 struct pl08x_txd { 190 struct virt_dma_desc vd; 191 struct list_head dsg_list; 192 dma_addr_t llis_bus; 193 u32 *llis_va; 194 /* Default cctl value for LLIs */ 195 u32 cctl; 196 /* 197 * Settings to be put into the physical channel when we 198 * trigger this txd. Other registers are in llis_va[0]. 199 */ 200 u32 ccfg; 201 bool done; 202 bool cyclic; 203 }; 204 205 /** 206 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel 207 * states 208 * @PL08X_CHAN_IDLE: the channel is idle 209 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 210 * channel and is running a transfer on it 211 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 212 * channel, but the transfer is currently paused 213 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 214 * channel to become available (only pertains to memcpy channels) 215 */ 216 enum pl08x_dma_chan_state { 217 PL08X_CHAN_IDLE, 218 PL08X_CHAN_RUNNING, 219 PL08X_CHAN_PAUSED, 220 PL08X_CHAN_WAITING, 221 }; 222 223 /** 224 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 225 * @vc: wrappped virtual channel 226 * @phychan: the physical channel utilized by this channel, if there is one 227 * @name: name of channel 228 * @cd: channel platform data 229 * @runtime_addr: address for RX/TX according to the runtime config 230 * @at: active transaction on this channel 231 * @lock: a lock for this channel data 232 * @host: a pointer to the host (internal use) 233 * @state: whether the channel is idle, paused, running etc 234 * @slave: whether this channel is a device (slave) or for memcpy 235 * @signal: the physical DMA request signal which this channel is using 236 * @mux_use: count of descriptors using this DMA request signal setting 237 */ 238 struct pl08x_dma_chan { 239 struct virt_dma_chan vc; 240 struct pl08x_phy_chan *phychan; 241 const char *name; 242 struct pl08x_channel_data *cd; 243 struct dma_slave_config cfg; 244 struct pl08x_txd *at; 245 struct pl08x_driver_data *host; 246 enum pl08x_dma_chan_state state; 247 bool slave; 248 int signal; 249 unsigned mux_use; 250 }; 251 252 /** 253 * struct pl08x_driver_data - the local state holder for the PL08x 254 * @slave: slave engine for this instance 255 * @memcpy: memcpy engine for this instance 256 * @base: virtual memory base (remapped) for the PL08x 257 * @adev: the corresponding AMBA (PrimeCell) bus entry 258 * @vd: vendor data for this PL08x variant 259 * @pd: platform data passed in from the platform/machine 260 * @phy_chans: array of data for the physical channels 261 * @pool: a pool for the LLI descriptors 262 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 263 * fetches 264 * @mem_buses: set to indicate memory transfers on AHB2. 265 * @lock: a spinlock for this struct 266 */ 267 struct pl08x_driver_data { 268 struct dma_device slave; 269 struct dma_device memcpy; 270 void __iomem *base; 271 struct amba_device *adev; 272 const struct vendor_data *vd; 273 struct pl08x_platform_data *pd; 274 struct pl08x_phy_chan *phy_chans; 275 struct dma_pool *pool; 276 u8 lli_buses; 277 u8 mem_buses; 278 u8 lli_words; 279 }; 280 281 /* 282 * PL08X specific defines 283 */ 284 285 /* The order of words in an LLI. */ 286 #define PL080_LLI_SRC 0 287 #define PL080_LLI_DST 1 288 #define PL080_LLI_LLI 2 289 #define PL080_LLI_CCTL 3 290 #define PL080S_LLI_CCTL2 4 291 292 /* Total words in an LLI. */ 293 #define PL080_LLI_WORDS 4 294 #define PL080S_LLI_WORDS 8 295 296 /* 297 * Number of LLIs in each LLI buffer allocated for one transfer 298 * (maximum times we call dma_pool_alloc on this pool without freeing) 299 */ 300 #define MAX_NUM_TSFR_LLIS 512 301 #define PL08X_ALIGN 8 302 303 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 304 { 305 return container_of(chan, struct pl08x_dma_chan, vc.chan); 306 } 307 308 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 309 { 310 return container_of(tx, struct pl08x_txd, vd.tx); 311 } 312 313 /* 314 * Mux handling. 315 * 316 * This gives us the DMA request input to the PL08x primecell which the 317 * peripheral described by the channel data will be routed to, possibly 318 * via a board/SoC specific external MUX. One important point to note 319 * here is that this does not depend on the physical channel. 320 */ 321 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 322 { 323 const struct pl08x_platform_data *pd = plchan->host->pd; 324 int ret; 325 326 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { 327 ret = pd->get_xfer_signal(plchan->cd); 328 if (ret < 0) { 329 plchan->mux_use = 0; 330 return ret; 331 } 332 333 plchan->signal = ret; 334 } 335 return 0; 336 } 337 338 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 339 { 340 const struct pl08x_platform_data *pd = plchan->host->pd; 341 342 if (plchan->signal >= 0) { 343 WARN_ON(plchan->mux_use == 0); 344 345 if (--plchan->mux_use == 0 && pd->put_xfer_signal) { 346 pd->put_xfer_signal(plchan->cd, plchan->signal); 347 plchan->signal = -1; 348 } 349 } 350 } 351 352 /* 353 * Physical channel handling 354 */ 355 356 /* Whether a certain channel is busy or not */ 357 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 358 { 359 unsigned int val; 360 361 val = readl(ch->reg_config); 362 return val & PL080_CONFIG_ACTIVE; 363 } 364 365 static void pl08x_write_lli(struct pl08x_driver_data *pl08x, 366 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) 367 { 368 if (pl08x->vd->pl080s) 369 dev_vdbg(&pl08x->adev->dev, 370 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 371 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", 372 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 373 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], 374 lli[PL080S_LLI_CCTL2], ccfg); 375 else 376 dev_vdbg(&pl08x->adev->dev, 377 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 378 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 379 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 380 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); 381 382 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); 383 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); 384 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); 385 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); 386 387 if (pl08x->vd->pl080s) 388 writel_relaxed(lli[PL080S_LLI_CCTL2], 389 phychan->base + PL080S_CH_CONTROL2); 390 391 writel(ccfg, phychan->reg_config); 392 } 393 394 /* 395 * Set the initial DMA register values i.e. those for the first LLI 396 * The next LLI pointer and the configuration interrupt bit have 397 * been set when the LLIs were constructed. Poke them into the hardware 398 * and start the transfer. 399 */ 400 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 401 { 402 struct pl08x_driver_data *pl08x = plchan->host; 403 struct pl08x_phy_chan *phychan = plchan->phychan; 404 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 405 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 406 u32 val; 407 408 list_del(&txd->vd.node); 409 410 plchan->at = txd; 411 412 /* Wait for channel inactive */ 413 while (pl08x_phy_channel_busy(phychan)) 414 cpu_relax(); 415 416 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); 417 418 /* Enable the DMA channel */ 419 /* Do not access config register until channel shows as disabled */ 420 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 421 cpu_relax(); 422 423 /* Do not access config register until channel shows as inactive */ 424 val = readl(phychan->reg_config); 425 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 426 val = readl(phychan->reg_config); 427 428 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); 429 } 430 431 /* 432 * Pause the channel by setting the HALT bit. 433 * 434 * For M->P transfers, pause the DMAC first and then stop the peripheral - 435 * the FIFO can only drain if the peripheral is still requesting data. 436 * (note: this can still timeout if the DMAC FIFO never drains of data.) 437 * 438 * For P->M transfers, disable the peripheral first to stop it filling 439 * the DMAC FIFO, and then pause the DMAC. 440 */ 441 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 442 { 443 u32 val; 444 int timeout; 445 446 /* Set the HALT bit and wait for the FIFO to drain */ 447 val = readl(ch->reg_config); 448 val |= PL080_CONFIG_HALT; 449 writel(val, ch->reg_config); 450 451 /* Wait for channel inactive */ 452 for (timeout = 1000; timeout; timeout--) { 453 if (!pl08x_phy_channel_busy(ch)) 454 break; 455 udelay(1); 456 } 457 if (pl08x_phy_channel_busy(ch)) 458 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 459 } 460 461 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 462 { 463 u32 val; 464 465 /* Clear the HALT bit */ 466 val = readl(ch->reg_config); 467 val &= ~PL080_CONFIG_HALT; 468 writel(val, ch->reg_config); 469 } 470 471 /* 472 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 473 * clears any pending interrupt status. This should not be used for 474 * an on-going transfer, but as a method of shutting down a channel 475 * (eg, when it's no longer used) or terminating a transfer. 476 */ 477 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 478 struct pl08x_phy_chan *ch) 479 { 480 u32 val = readl(ch->reg_config); 481 482 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 483 PL080_CONFIG_TC_IRQ_MASK); 484 485 writel(val, ch->reg_config); 486 487 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 488 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 489 } 490 491 static inline u32 get_bytes_in_cctl(u32 cctl) 492 { 493 /* The source width defines the number of bytes */ 494 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 495 496 cctl &= PL080_CONTROL_SWIDTH_MASK; 497 498 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 499 case PL080_WIDTH_8BIT: 500 break; 501 case PL080_WIDTH_16BIT: 502 bytes *= 2; 503 break; 504 case PL080_WIDTH_32BIT: 505 bytes *= 4; 506 break; 507 } 508 return bytes; 509 } 510 511 static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) 512 { 513 /* The source width defines the number of bytes */ 514 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; 515 516 cctl &= PL080_CONTROL_SWIDTH_MASK; 517 518 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 519 case PL080_WIDTH_8BIT: 520 break; 521 case PL080_WIDTH_16BIT: 522 bytes *= 2; 523 break; 524 case PL080_WIDTH_32BIT: 525 bytes *= 4; 526 break; 527 } 528 return bytes; 529 } 530 531 /* The channel should be paused when calling this */ 532 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 533 { 534 struct pl08x_driver_data *pl08x = plchan->host; 535 const u32 *llis_va, *llis_va_limit; 536 struct pl08x_phy_chan *ch; 537 dma_addr_t llis_bus; 538 struct pl08x_txd *txd; 539 u32 llis_max_words; 540 size_t bytes; 541 u32 clli; 542 543 ch = plchan->phychan; 544 txd = plchan->at; 545 546 if (!ch || !txd) 547 return 0; 548 549 /* 550 * Follow the LLIs to get the number of remaining 551 * bytes in the currently active transaction. 552 */ 553 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 554 555 /* First get the remaining bytes in the active transfer */ 556 if (pl08x->vd->pl080s) 557 bytes = get_bytes_in_cctl_pl080s( 558 readl(ch->base + PL080_CH_CONTROL), 559 readl(ch->base + PL080S_CH_CONTROL2)); 560 else 561 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 562 563 if (!clli) 564 return bytes; 565 566 llis_va = txd->llis_va; 567 llis_bus = txd->llis_bus; 568 569 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; 570 BUG_ON(clli < llis_bus || clli >= llis_bus + 571 sizeof(u32) * llis_max_words); 572 573 /* 574 * Locate the next LLI - as this is an array, 575 * it's simple maths to find. 576 */ 577 llis_va += (clli - llis_bus) / sizeof(u32); 578 579 llis_va_limit = llis_va + llis_max_words; 580 581 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { 582 if (pl08x->vd->pl080s) 583 bytes += get_bytes_in_cctl_pl080s( 584 llis_va[PL080_LLI_CCTL], 585 llis_va[PL080S_LLI_CCTL2]); 586 else 587 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); 588 589 /* 590 * A LLI pointer going backward terminates the LLI list 591 */ 592 if (llis_va[PL080_LLI_LLI] <= clli) 593 break; 594 } 595 596 return bytes; 597 } 598 599 /* 600 * Allocate a physical channel for a virtual channel 601 * 602 * Try to locate a physical channel to be used for this transfer. If all 603 * are taken return NULL and the requester will have to cope by using 604 * some fallback PIO mode or retrying later. 605 */ 606 static struct pl08x_phy_chan * 607 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 608 struct pl08x_dma_chan *virt_chan) 609 { 610 struct pl08x_phy_chan *ch = NULL; 611 unsigned long flags; 612 int i; 613 614 for (i = 0; i < pl08x->vd->channels; i++) { 615 ch = &pl08x->phy_chans[i]; 616 617 spin_lock_irqsave(&ch->lock, flags); 618 619 if (!ch->locked && !ch->serving) { 620 ch->serving = virt_chan; 621 spin_unlock_irqrestore(&ch->lock, flags); 622 break; 623 } 624 625 spin_unlock_irqrestore(&ch->lock, flags); 626 } 627 628 if (i == pl08x->vd->channels) { 629 /* No physical channel available, cope with it */ 630 return NULL; 631 } 632 633 return ch; 634 } 635 636 /* Mark the physical channel as free. Note, this write is atomic. */ 637 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 638 struct pl08x_phy_chan *ch) 639 { 640 ch->serving = NULL; 641 } 642 643 /* 644 * Try to allocate a physical channel. When successful, assign it to 645 * this virtual channel, and initiate the next descriptor. The 646 * virtual channel lock must be held at this point. 647 */ 648 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 649 { 650 struct pl08x_driver_data *pl08x = plchan->host; 651 struct pl08x_phy_chan *ch; 652 653 ch = pl08x_get_phy_channel(pl08x, plchan); 654 if (!ch) { 655 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 656 plchan->state = PL08X_CHAN_WAITING; 657 return; 658 } 659 660 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 661 ch->id, plchan->name); 662 663 plchan->phychan = ch; 664 plchan->state = PL08X_CHAN_RUNNING; 665 pl08x_start_next_txd(plchan); 666 } 667 668 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 669 struct pl08x_dma_chan *plchan) 670 { 671 struct pl08x_driver_data *pl08x = plchan->host; 672 673 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 674 ch->id, plchan->name); 675 676 /* 677 * We do this without taking the lock; we're really only concerned 678 * about whether this pointer is NULL or not, and we're guaranteed 679 * that this will only be called when it _already_ is non-NULL. 680 */ 681 ch->serving = plchan; 682 plchan->phychan = ch; 683 plchan->state = PL08X_CHAN_RUNNING; 684 pl08x_start_next_txd(plchan); 685 } 686 687 /* 688 * Free a physical DMA channel, potentially reallocating it to another 689 * virtual channel if we have any pending. 690 */ 691 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 692 { 693 struct pl08x_driver_data *pl08x = plchan->host; 694 struct pl08x_dma_chan *p, *next; 695 696 retry: 697 next = NULL; 698 699 /* Find a waiting virtual channel for the next transfer. */ 700 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 701 if (p->state == PL08X_CHAN_WAITING) { 702 next = p; 703 break; 704 } 705 706 if (!next) { 707 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 708 if (p->state == PL08X_CHAN_WAITING) { 709 next = p; 710 break; 711 } 712 } 713 714 /* Ensure that the physical channel is stopped */ 715 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 716 717 if (next) { 718 bool success; 719 720 /* 721 * Eww. We know this isn't going to deadlock 722 * but lockdep probably doesn't. 723 */ 724 spin_lock(&next->vc.lock); 725 /* Re-check the state now that we have the lock */ 726 success = next->state == PL08X_CHAN_WAITING; 727 if (success) 728 pl08x_phy_reassign_start(plchan->phychan, next); 729 spin_unlock(&next->vc.lock); 730 731 /* If the state changed, try to find another channel */ 732 if (!success) 733 goto retry; 734 } else { 735 /* No more jobs, so free up the physical channel */ 736 pl08x_put_phy_channel(pl08x, plchan->phychan); 737 } 738 739 plchan->phychan = NULL; 740 plchan->state = PL08X_CHAN_IDLE; 741 } 742 743 /* 744 * LLI handling 745 */ 746 747 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 748 { 749 switch (coded) { 750 case PL080_WIDTH_8BIT: 751 return 1; 752 case PL080_WIDTH_16BIT: 753 return 2; 754 case PL080_WIDTH_32BIT: 755 return 4; 756 default: 757 break; 758 } 759 BUG(); 760 return 0; 761 } 762 763 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 764 size_t tsize) 765 { 766 u32 retbits = cctl; 767 768 /* Remove all src, dst and transfer size bits */ 769 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 770 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 771 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 772 773 /* Then set the bits according to the parameters */ 774 switch (srcwidth) { 775 case 1: 776 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 777 break; 778 case 2: 779 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 780 break; 781 case 4: 782 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 783 break; 784 default: 785 BUG(); 786 break; 787 } 788 789 switch (dstwidth) { 790 case 1: 791 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 792 break; 793 case 2: 794 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 795 break; 796 case 4: 797 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 798 break; 799 default: 800 BUG(); 801 break; 802 } 803 804 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; 805 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 806 return retbits; 807 } 808 809 struct pl08x_lli_build_data { 810 struct pl08x_txd *txd; 811 struct pl08x_bus_data srcbus; 812 struct pl08x_bus_data dstbus; 813 size_t remainder; 814 u32 lli_bus; 815 }; 816 817 /* 818 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 819 * victim in case src & dest are not similarly aligned. i.e. If after aligning 820 * masters address with width requirements of transfer (by sending few byte by 821 * byte data), slave is still not aligned, then its width will be reduced to 822 * BYTE. 823 * - prefers the destination bus if both available 824 * - prefers bus with fixed address (i.e. peripheral) 825 */ 826 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 827 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 828 { 829 if (!(cctl & PL080_CONTROL_DST_INCR)) { 830 *mbus = &bd->dstbus; 831 *sbus = &bd->srcbus; 832 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 833 *mbus = &bd->srcbus; 834 *sbus = &bd->dstbus; 835 } else { 836 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 837 *mbus = &bd->dstbus; 838 *sbus = &bd->srcbus; 839 } else { 840 *mbus = &bd->srcbus; 841 *sbus = &bd->dstbus; 842 } 843 } 844 } 845 846 /* 847 * Fills in one LLI for a certain transfer descriptor and advance the counter 848 */ 849 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 850 struct pl08x_lli_build_data *bd, 851 int num_llis, int len, u32 cctl, u32 cctl2) 852 { 853 u32 offset = num_llis * pl08x->lli_words; 854 u32 *llis_va = bd->txd->llis_va + offset; 855 dma_addr_t llis_bus = bd->txd->llis_bus; 856 857 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 858 859 /* Advance the offset to next LLI. */ 860 offset += pl08x->lli_words; 861 862 llis_va[PL080_LLI_SRC] = bd->srcbus.addr; 863 llis_va[PL080_LLI_DST] = bd->dstbus.addr; 864 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); 865 llis_va[PL080_LLI_LLI] |= bd->lli_bus; 866 llis_va[PL080_LLI_CCTL] = cctl; 867 if (pl08x->vd->pl080s) 868 llis_va[PL080S_LLI_CCTL2] = cctl2; 869 870 if (cctl & PL080_CONTROL_SRC_INCR) 871 bd->srcbus.addr += len; 872 if (cctl & PL080_CONTROL_DST_INCR) 873 bd->dstbus.addr += len; 874 875 BUG_ON(bd->remainder < len); 876 877 bd->remainder -= len; 878 } 879 880 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, 881 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, 882 int num_llis, size_t *total_bytes) 883 { 884 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 885 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); 886 (*total_bytes) += len; 887 } 888 889 #ifdef VERBOSE_DEBUG 890 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 891 const u32 *llis_va, int num_llis) 892 { 893 int i; 894 895 if (pl08x->vd->pl080s) { 896 dev_vdbg(&pl08x->adev->dev, 897 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", 898 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); 899 for (i = 0; i < num_llis; i++) { 900 dev_vdbg(&pl08x->adev->dev, 901 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 902 i, llis_va, llis_va[PL080_LLI_SRC], 903 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 904 llis_va[PL080_LLI_CCTL], 905 llis_va[PL080S_LLI_CCTL2]); 906 llis_va += pl08x->lli_words; 907 } 908 } else { 909 dev_vdbg(&pl08x->adev->dev, 910 "%-3s %-9s %-10s %-10s %-10s %s\n", 911 "lli", "", "csrc", "cdst", "clli", "cctl"); 912 for (i = 0; i < num_llis; i++) { 913 dev_vdbg(&pl08x->adev->dev, 914 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 915 i, llis_va, llis_va[PL080_LLI_SRC], 916 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 917 llis_va[PL080_LLI_CCTL]); 918 llis_va += pl08x->lli_words; 919 } 920 } 921 } 922 #else 923 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 924 const u32 *llis_va, int num_llis) {} 925 #endif 926 927 /* 928 * This fills in the table of LLIs for the transfer descriptor 929 * Note that we assume we never have to change the burst sizes 930 * Return 0 for error 931 */ 932 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 933 struct pl08x_txd *txd) 934 { 935 struct pl08x_bus_data *mbus, *sbus; 936 struct pl08x_lli_build_data bd; 937 int num_llis = 0; 938 u32 cctl, early_bytes = 0; 939 size_t max_bytes_per_lli, total_bytes; 940 u32 *llis_va, *last_lli; 941 struct pl08x_sg *dsg; 942 943 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 944 if (!txd->llis_va) { 945 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 946 return 0; 947 } 948 949 bd.txd = txd; 950 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 951 cctl = txd->cctl; 952 953 /* Find maximum width of the source bus */ 954 bd.srcbus.maxwidth = 955 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 956 PL080_CONTROL_SWIDTH_SHIFT); 957 958 /* Find maximum width of the destination bus */ 959 bd.dstbus.maxwidth = 960 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 961 PL080_CONTROL_DWIDTH_SHIFT); 962 963 list_for_each_entry(dsg, &txd->dsg_list, node) { 964 total_bytes = 0; 965 cctl = txd->cctl; 966 967 bd.srcbus.addr = dsg->src_addr; 968 bd.dstbus.addr = dsg->dst_addr; 969 bd.remainder = dsg->len; 970 bd.srcbus.buswidth = bd.srcbus.maxwidth; 971 bd.dstbus.buswidth = bd.dstbus.maxwidth; 972 973 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 974 975 dev_vdbg(&pl08x->adev->dev, 976 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", 977 (u64)bd.srcbus.addr, 978 cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 979 bd.srcbus.buswidth, 980 (u64)bd.dstbus.addr, 981 cctl & PL080_CONTROL_DST_INCR ? "+" : "", 982 bd.dstbus.buswidth, 983 bd.remainder); 984 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 985 mbus == &bd.srcbus ? "src" : "dst", 986 sbus == &bd.srcbus ? "src" : "dst"); 987 988 /* 989 * Zero length is only allowed if all these requirements are 990 * met: 991 * - flow controller is peripheral. 992 * - src.addr is aligned to src.width 993 * - dst.addr is aligned to dst.width 994 * 995 * sg_len == 1 should be true, as there can be two cases here: 996 * 997 * - Memory addresses are contiguous and are not scattered. 998 * Here, Only one sg will be passed by user driver, with 999 * memory address and zero length. We pass this to controller 1000 * and after the transfer it will receive the last burst 1001 * request from peripheral and so transfer finishes. 1002 * 1003 * - Memory addresses are scattered and are not contiguous. 1004 * Here, Obviously as DMA controller doesn't know when a lli's 1005 * transfer gets over, it can't load next lli. So in this 1006 * case, there has to be an assumption that only one lli is 1007 * supported. Thus, we can't have scattered addresses. 1008 */ 1009 if (!bd.remainder) { 1010 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 1011 PL080_CONFIG_FLOW_CONTROL_SHIFT; 1012 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 1013 (fc <= PL080_FLOW_SRC2DST_SRC))) { 1014 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 1015 __func__); 1016 return 0; 1017 } 1018 1019 if (!IS_BUS_ALIGNED(&bd.srcbus) || 1020 !IS_BUS_ALIGNED(&bd.dstbus)) { 1021 dev_err(&pl08x->adev->dev, 1022 "%s src & dst address must be aligned to src" 1023 " & dst width if peripheral is flow controller", 1024 __func__); 1025 return 0; 1026 } 1027 1028 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1029 bd.dstbus.buswidth, 0); 1030 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1031 0, cctl, 0); 1032 break; 1033 } 1034 1035 /* 1036 * Send byte by byte for following cases 1037 * - Less than a bus width available 1038 * - until master bus is aligned 1039 */ 1040 if (bd.remainder < mbus->buswidth) 1041 early_bytes = bd.remainder; 1042 else if (!IS_BUS_ALIGNED(mbus)) { 1043 early_bytes = mbus->buswidth - 1044 (mbus->addr & (mbus->buswidth - 1)); 1045 if ((bd.remainder - early_bytes) < mbus->buswidth) 1046 early_bytes = bd.remainder; 1047 } 1048 1049 if (early_bytes) { 1050 dev_vdbg(&pl08x->adev->dev, 1051 "%s byte width LLIs (remain 0x%08zx)\n", 1052 __func__, bd.remainder); 1053 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, 1054 num_llis++, &total_bytes); 1055 } 1056 1057 if (bd.remainder) { 1058 /* 1059 * Master now aligned 1060 * - if slave is not then we must set its width down 1061 */ 1062 if (!IS_BUS_ALIGNED(sbus)) { 1063 dev_dbg(&pl08x->adev->dev, 1064 "%s set down bus width to one byte\n", 1065 __func__); 1066 1067 sbus->buswidth = 1; 1068 } 1069 1070 /* 1071 * Bytes transferred = tsize * src width, not 1072 * MIN(buswidths) 1073 */ 1074 max_bytes_per_lli = bd.srcbus.buswidth * 1075 pl08x->vd->max_transfer_size; 1076 dev_vdbg(&pl08x->adev->dev, 1077 "%s max bytes per lli = %zu\n", 1078 __func__, max_bytes_per_lli); 1079 1080 /* 1081 * Make largest possible LLIs until less than one bus 1082 * width left 1083 */ 1084 while (bd.remainder > (mbus->buswidth - 1)) { 1085 size_t lli_len, tsize, width; 1086 1087 /* 1088 * If enough left try to send max possible, 1089 * otherwise try to send the remainder 1090 */ 1091 lli_len = min(bd.remainder, max_bytes_per_lli); 1092 1093 /* 1094 * Check against maximum bus alignment: 1095 * Calculate actual transfer size in relation to 1096 * bus width an get a maximum remainder of the 1097 * highest bus width - 1 1098 */ 1099 width = max(mbus->buswidth, sbus->buswidth); 1100 lli_len = (lli_len / width) * width; 1101 tsize = lli_len / bd.srcbus.buswidth; 1102 1103 dev_vdbg(&pl08x->adev->dev, 1104 "%s fill lli with single lli chunk of " 1105 "size 0x%08zx (remainder 0x%08zx)\n", 1106 __func__, lli_len, bd.remainder); 1107 1108 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1109 bd.dstbus.buswidth, tsize); 1110 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1111 lli_len, cctl, tsize); 1112 total_bytes += lli_len; 1113 } 1114 1115 /* 1116 * Send any odd bytes 1117 */ 1118 if (bd.remainder) { 1119 dev_vdbg(&pl08x->adev->dev, 1120 "%s align with boundary, send odd bytes (remain %zu)\n", 1121 __func__, bd.remainder); 1122 prep_byte_width_lli(pl08x, &bd, &cctl, 1123 bd.remainder, num_llis++, &total_bytes); 1124 } 1125 } 1126 1127 if (total_bytes != dsg->len) { 1128 dev_err(&pl08x->adev->dev, 1129 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 1130 __func__, total_bytes, dsg->len); 1131 return 0; 1132 } 1133 1134 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1135 dev_err(&pl08x->adev->dev, 1136 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1137 __func__, MAX_NUM_TSFR_LLIS); 1138 return 0; 1139 } 1140 } 1141 1142 llis_va = txd->llis_va; 1143 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; 1144 1145 if (txd->cyclic) { 1146 /* Link back to the first LLI. */ 1147 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; 1148 } else { 1149 /* The final LLI terminates the LLI. */ 1150 last_lli[PL080_LLI_LLI] = 0; 1151 /* The final LLI element shall also fire an interrupt. */ 1152 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; 1153 } 1154 1155 pl08x_dump_lli(pl08x, llis_va, num_llis); 1156 1157 return num_llis; 1158 } 1159 1160 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1161 struct pl08x_txd *txd) 1162 { 1163 struct pl08x_sg *dsg, *_dsg; 1164 1165 if (txd->llis_va) 1166 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1167 1168 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1169 list_del(&dsg->node); 1170 kfree(dsg); 1171 } 1172 1173 kfree(txd); 1174 } 1175 1176 static void pl08x_desc_free(struct virt_dma_desc *vd) 1177 { 1178 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1179 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1180 1181 dma_descriptor_unmap(&vd->tx); 1182 if (!txd->done) 1183 pl08x_release_mux(plchan); 1184 1185 pl08x_free_txd(plchan->host, txd); 1186 } 1187 1188 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1189 struct pl08x_dma_chan *plchan) 1190 { 1191 LIST_HEAD(head); 1192 1193 vchan_get_all_descriptors(&plchan->vc, &head); 1194 vchan_dma_desc_free_list(&plchan->vc, &head); 1195 } 1196 1197 /* 1198 * The DMA ENGINE API 1199 */ 1200 static void pl08x_free_chan_resources(struct dma_chan *chan) 1201 { 1202 /* Ensure all queued descriptors are freed */ 1203 vchan_free_chan_resources(to_virt_chan(chan)); 1204 } 1205 1206 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1207 struct dma_chan *chan, unsigned long flags) 1208 { 1209 struct dma_async_tx_descriptor *retval = NULL; 1210 1211 return retval; 1212 } 1213 1214 /* 1215 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1216 * If slaves are relying on interrupts to signal completion this function 1217 * must not be called with interrupts disabled. 1218 */ 1219 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1220 dma_cookie_t cookie, struct dma_tx_state *txstate) 1221 { 1222 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1223 struct virt_dma_desc *vd; 1224 unsigned long flags; 1225 enum dma_status ret; 1226 size_t bytes = 0; 1227 1228 ret = dma_cookie_status(chan, cookie, txstate); 1229 if (ret == DMA_COMPLETE) 1230 return ret; 1231 1232 /* 1233 * There's no point calculating the residue if there's 1234 * no txstate to store the value. 1235 */ 1236 if (!txstate) { 1237 if (plchan->state == PL08X_CHAN_PAUSED) 1238 ret = DMA_PAUSED; 1239 return ret; 1240 } 1241 1242 spin_lock_irqsave(&plchan->vc.lock, flags); 1243 ret = dma_cookie_status(chan, cookie, txstate); 1244 if (ret != DMA_COMPLETE) { 1245 vd = vchan_find_desc(&plchan->vc, cookie); 1246 if (vd) { 1247 /* On the issued list, so hasn't been processed yet */ 1248 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1249 struct pl08x_sg *dsg; 1250 1251 list_for_each_entry(dsg, &txd->dsg_list, node) 1252 bytes += dsg->len; 1253 } else { 1254 bytes = pl08x_getbytes_chan(plchan); 1255 } 1256 } 1257 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1258 1259 /* 1260 * This cookie not complete yet 1261 * Get number of bytes left in the active transactions and queue 1262 */ 1263 dma_set_residue(txstate, bytes); 1264 1265 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1266 ret = DMA_PAUSED; 1267 1268 /* Whether waiting or running, we're in progress */ 1269 return ret; 1270 } 1271 1272 /* PrimeCell DMA extension */ 1273 struct burst_table { 1274 u32 burstwords; 1275 u32 reg; 1276 }; 1277 1278 static const struct burst_table burst_sizes[] = { 1279 { 1280 .burstwords = 256, 1281 .reg = PL080_BSIZE_256, 1282 }, 1283 { 1284 .burstwords = 128, 1285 .reg = PL080_BSIZE_128, 1286 }, 1287 { 1288 .burstwords = 64, 1289 .reg = PL080_BSIZE_64, 1290 }, 1291 { 1292 .burstwords = 32, 1293 .reg = PL080_BSIZE_32, 1294 }, 1295 { 1296 .burstwords = 16, 1297 .reg = PL080_BSIZE_16, 1298 }, 1299 { 1300 .burstwords = 8, 1301 .reg = PL080_BSIZE_8, 1302 }, 1303 { 1304 .burstwords = 4, 1305 .reg = PL080_BSIZE_4, 1306 }, 1307 { 1308 .burstwords = 0, 1309 .reg = PL080_BSIZE_1, 1310 }, 1311 }; 1312 1313 /* 1314 * Given the source and destination available bus masks, select which 1315 * will be routed to each port. We try to have source and destination 1316 * on separate ports, but always respect the allowable settings. 1317 */ 1318 static u32 pl08x_select_bus(u8 src, u8 dst) 1319 { 1320 u32 cctl = 0; 1321 1322 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1323 cctl |= PL080_CONTROL_DST_AHB2; 1324 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1325 cctl |= PL080_CONTROL_SRC_AHB2; 1326 1327 return cctl; 1328 } 1329 1330 static u32 pl08x_cctl(u32 cctl) 1331 { 1332 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1333 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1334 PL080_CONTROL_PROT_MASK); 1335 1336 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1337 return cctl | PL080_CONTROL_PROT_SYS; 1338 } 1339 1340 static u32 pl08x_width(enum dma_slave_buswidth width) 1341 { 1342 switch (width) { 1343 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1344 return PL080_WIDTH_8BIT; 1345 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1346 return PL080_WIDTH_16BIT; 1347 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1348 return PL080_WIDTH_32BIT; 1349 default: 1350 return ~0; 1351 } 1352 } 1353 1354 static u32 pl08x_burst(u32 maxburst) 1355 { 1356 int i; 1357 1358 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1359 if (burst_sizes[i].burstwords <= maxburst) 1360 break; 1361 1362 return burst_sizes[i].reg; 1363 } 1364 1365 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1366 enum dma_slave_buswidth addr_width, u32 maxburst) 1367 { 1368 u32 width, burst, cctl = 0; 1369 1370 width = pl08x_width(addr_width); 1371 if (width == ~0) 1372 return ~0; 1373 1374 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1375 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1376 1377 /* 1378 * If this channel will only request single transfers, set this 1379 * down to ONE element. Also select one element if no maxburst 1380 * is specified. 1381 */ 1382 if (plchan->cd->single) 1383 maxburst = 1; 1384 1385 burst = pl08x_burst(maxburst); 1386 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1387 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1388 1389 return pl08x_cctl(cctl); 1390 } 1391 1392 /* 1393 * Slave transactions callback to the slave device to allow 1394 * synchronization of slave DMA signals with the DMAC enable 1395 */ 1396 static void pl08x_issue_pending(struct dma_chan *chan) 1397 { 1398 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1399 unsigned long flags; 1400 1401 spin_lock_irqsave(&plchan->vc.lock, flags); 1402 if (vchan_issue_pending(&plchan->vc)) { 1403 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1404 pl08x_phy_alloc_and_start(plchan); 1405 } 1406 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1407 } 1408 1409 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1410 { 1411 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1412 1413 if (txd) { 1414 INIT_LIST_HEAD(&txd->dsg_list); 1415 1416 /* Always enable error and terminal interrupts */ 1417 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1418 PL080_CONFIG_TC_IRQ_MASK; 1419 } 1420 return txd; 1421 } 1422 1423 /* 1424 * Initialize a descriptor to be used by memcpy submit 1425 */ 1426 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1427 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1428 size_t len, unsigned long flags) 1429 { 1430 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1431 struct pl08x_driver_data *pl08x = plchan->host; 1432 struct pl08x_txd *txd; 1433 struct pl08x_sg *dsg; 1434 int ret; 1435 1436 txd = pl08x_get_txd(plchan); 1437 if (!txd) { 1438 dev_err(&pl08x->adev->dev, 1439 "%s no memory for descriptor\n", __func__); 1440 return NULL; 1441 } 1442 1443 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1444 if (!dsg) { 1445 pl08x_free_txd(pl08x, txd); 1446 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1447 __func__); 1448 return NULL; 1449 } 1450 list_add_tail(&dsg->node, &txd->dsg_list); 1451 1452 dsg->src_addr = src; 1453 dsg->dst_addr = dest; 1454 dsg->len = len; 1455 1456 /* Set platform data for m2m */ 1457 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1458 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1459 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1460 1461 /* Both to be incremented or the code will break */ 1462 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1463 1464 if (pl08x->vd->dualmaster) 1465 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1466 pl08x->mem_buses); 1467 1468 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1469 if (!ret) { 1470 pl08x_free_txd(pl08x, txd); 1471 return NULL; 1472 } 1473 1474 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1475 } 1476 1477 static struct pl08x_txd *pl08x_init_txd( 1478 struct dma_chan *chan, 1479 enum dma_transfer_direction direction, 1480 dma_addr_t *slave_addr) 1481 { 1482 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1483 struct pl08x_driver_data *pl08x = plchan->host; 1484 struct pl08x_txd *txd; 1485 enum dma_slave_buswidth addr_width; 1486 int ret, tmp; 1487 u8 src_buses, dst_buses; 1488 u32 maxburst, cctl; 1489 1490 txd = pl08x_get_txd(plchan); 1491 if (!txd) { 1492 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1493 return NULL; 1494 } 1495 1496 /* 1497 * Set up addresses, the PrimeCell configured address 1498 * will take precedence since this may configure the 1499 * channel target address dynamically at runtime. 1500 */ 1501 if (direction == DMA_MEM_TO_DEV) { 1502 cctl = PL080_CONTROL_SRC_INCR; 1503 *slave_addr = plchan->cfg.dst_addr; 1504 addr_width = plchan->cfg.dst_addr_width; 1505 maxburst = plchan->cfg.dst_maxburst; 1506 src_buses = pl08x->mem_buses; 1507 dst_buses = plchan->cd->periph_buses; 1508 } else if (direction == DMA_DEV_TO_MEM) { 1509 cctl = PL080_CONTROL_DST_INCR; 1510 *slave_addr = plchan->cfg.src_addr; 1511 addr_width = plchan->cfg.src_addr_width; 1512 maxburst = plchan->cfg.src_maxburst; 1513 src_buses = plchan->cd->periph_buses; 1514 dst_buses = pl08x->mem_buses; 1515 } else { 1516 pl08x_free_txd(pl08x, txd); 1517 dev_err(&pl08x->adev->dev, 1518 "%s direction unsupported\n", __func__); 1519 return NULL; 1520 } 1521 1522 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1523 if (cctl == ~0) { 1524 pl08x_free_txd(pl08x, txd); 1525 dev_err(&pl08x->adev->dev, 1526 "DMA slave configuration botched?\n"); 1527 return NULL; 1528 } 1529 1530 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1531 1532 if (plchan->cfg.device_fc) 1533 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1534 PL080_FLOW_PER2MEM_PER; 1535 else 1536 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1537 PL080_FLOW_PER2MEM; 1538 1539 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1540 1541 ret = pl08x_request_mux(plchan); 1542 if (ret < 0) { 1543 pl08x_free_txd(pl08x, txd); 1544 dev_dbg(&pl08x->adev->dev, 1545 "unable to mux for transfer on %s due to platform restrictions\n", 1546 plchan->name); 1547 return NULL; 1548 } 1549 1550 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1551 plchan->signal, plchan->name); 1552 1553 /* Assign the flow control signal to this channel */ 1554 if (direction == DMA_MEM_TO_DEV) 1555 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1556 else 1557 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1558 1559 return txd; 1560 } 1561 1562 static int pl08x_tx_add_sg(struct pl08x_txd *txd, 1563 enum dma_transfer_direction direction, 1564 dma_addr_t slave_addr, 1565 dma_addr_t buf_addr, 1566 unsigned int len) 1567 { 1568 struct pl08x_sg *dsg; 1569 1570 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1571 if (!dsg) 1572 return -ENOMEM; 1573 1574 list_add_tail(&dsg->node, &txd->dsg_list); 1575 1576 dsg->len = len; 1577 if (direction == DMA_MEM_TO_DEV) { 1578 dsg->src_addr = buf_addr; 1579 dsg->dst_addr = slave_addr; 1580 } else { 1581 dsg->src_addr = slave_addr; 1582 dsg->dst_addr = buf_addr; 1583 } 1584 1585 return 0; 1586 } 1587 1588 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1589 struct dma_chan *chan, struct scatterlist *sgl, 1590 unsigned int sg_len, enum dma_transfer_direction direction, 1591 unsigned long flags, void *context) 1592 { 1593 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1594 struct pl08x_driver_data *pl08x = plchan->host; 1595 struct pl08x_txd *txd; 1596 struct scatterlist *sg; 1597 int ret, tmp; 1598 dma_addr_t slave_addr; 1599 1600 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1601 __func__, sg_dma_len(sgl), plchan->name); 1602 1603 txd = pl08x_init_txd(chan, direction, &slave_addr); 1604 if (!txd) 1605 return NULL; 1606 1607 for_each_sg(sgl, sg, sg_len, tmp) { 1608 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1609 sg_dma_address(sg), 1610 sg_dma_len(sg)); 1611 if (ret) { 1612 pl08x_release_mux(plchan); 1613 pl08x_free_txd(pl08x, txd); 1614 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1615 __func__); 1616 return NULL; 1617 } 1618 } 1619 1620 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1621 if (!ret) { 1622 pl08x_release_mux(plchan); 1623 pl08x_free_txd(pl08x, txd); 1624 return NULL; 1625 } 1626 1627 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1628 } 1629 1630 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( 1631 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1632 size_t period_len, enum dma_transfer_direction direction, 1633 unsigned long flags) 1634 { 1635 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1636 struct pl08x_driver_data *pl08x = plchan->host; 1637 struct pl08x_txd *txd; 1638 int ret, tmp; 1639 dma_addr_t slave_addr; 1640 1641 dev_dbg(&pl08x->adev->dev, 1642 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", 1643 __func__, period_len, buf_len, 1644 direction == DMA_MEM_TO_DEV ? "to" : "from", 1645 plchan->name); 1646 1647 txd = pl08x_init_txd(chan, direction, &slave_addr); 1648 if (!txd) 1649 return NULL; 1650 1651 txd->cyclic = true; 1652 txd->cctl |= PL080_CONTROL_TC_IRQ_EN; 1653 for (tmp = 0; tmp < buf_len; tmp += period_len) { 1654 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1655 buf_addr + tmp, period_len); 1656 if (ret) { 1657 pl08x_release_mux(plchan); 1658 pl08x_free_txd(pl08x, txd); 1659 return NULL; 1660 } 1661 } 1662 1663 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1664 if (!ret) { 1665 pl08x_release_mux(plchan); 1666 pl08x_free_txd(pl08x, txd); 1667 return NULL; 1668 } 1669 1670 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1671 } 1672 1673 static int pl08x_config(struct dma_chan *chan, 1674 struct dma_slave_config *config) 1675 { 1676 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1677 struct pl08x_driver_data *pl08x = plchan->host; 1678 1679 if (!plchan->slave) 1680 return -EINVAL; 1681 1682 /* Reject definitely invalid configurations */ 1683 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1684 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1685 return -EINVAL; 1686 1687 if (config->device_fc && pl08x->vd->pl080s) { 1688 dev_err(&pl08x->adev->dev, 1689 "%s: PL080S does not support peripheral flow control\n", 1690 __func__); 1691 return -EINVAL; 1692 } 1693 1694 plchan->cfg = *config; 1695 1696 return 0; 1697 } 1698 1699 static int pl08x_terminate_all(struct dma_chan *chan) 1700 { 1701 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1702 struct pl08x_driver_data *pl08x = plchan->host; 1703 unsigned long flags; 1704 1705 spin_lock_irqsave(&plchan->vc.lock, flags); 1706 if (!plchan->phychan && !plchan->at) { 1707 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1708 return 0; 1709 } 1710 1711 plchan->state = PL08X_CHAN_IDLE; 1712 1713 if (plchan->phychan) { 1714 /* 1715 * Mark physical channel as free and free any slave 1716 * signal 1717 */ 1718 pl08x_phy_free(plchan); 1719 } 1720 /* Dequeue jobs and free LLIs */ 1721 if (plchan->at) { 1722 pl08x_desc_free(&plchan->at->vd); 1723 plchan->at = NULL; 1724 } 1725 /* Dequeue jobs not yet fired as well */ 1726 pl08x_free_txd_list(pl08x, plchan); 1727 1728 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1729 1730 return 0; 1731 } 1732 1733 static int pl08x_pause(struct dma_chan *chan) 1734 { 1735 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1736 unsigned long flags; 1737 1738 /* 1739 * Anything succeeds on channels with no physical allocation and 1740 * no queued transfers. 1741 */ 1742 spin_lock_irqsave(&plchan->vc.lock, flags); 1743 if (!plchan->phychan && !plchan->at) { 1744 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1745 return 0; 1746 } 1747 1748 pl08x_pause_phy_chan(plchan->phychan); 1749 plchan->state = PL08X_CHAN_PAUSED; 1750 1751 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1752 1753 return 0; 1754 } 1755 1756 static int pl08x_resume(struct dma_chan *chan) 1757 { 1758 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1759 unsigned long flags; 1760 1761 /* 1762 * Anything succeeds on channels with no physical allocation and 1763 * no queued transfers. 1764 */ 1765 spin_lock_irqsave(&plchan->vc.lock, flags); 1766 if (!plchan->phychan && !plchan->at) { 1767 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1768 return 0; 1769 } 1770 1771 pl08x_resume_phy_chan(plchan->phychan); 1772 plchan->state = PL08X_CHAN_RUNNING; 1773 1774 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1775 1776 return 0; 1777 } 1778 1779 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1780 { 1781 struct pl08x_dma_chan *plchan; 1782 char *name = chan_id; 1783 1784 /* Reject channels for devices not bound to this driver */ 1785 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1786 return false; 1787 1788 plchan = to_pl08x_chan(chan); 1789 1790 /* Check that the channel is not taken! */ 1791 if (!strcmp(plchan->name, name)) 1792 return true; 1793 1794 return false; 1795 } 1796 EXPORT_SYMBOL_GPL(pl08x_filter_id); 1797 1798 /* 1799 * Just check that the device is there and active 1800 * TODO: turn this bit on/off depending on the number of physical channels 1801 * actually used, if it is zero... well shut it off. That will save some 1802 * power. Cut the clock at the same time. 1803 */ 1804 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1805 { 1806 /* The Nomadik variant does not have the config register */ 1807 if (pl08x->vd->nomadik) 1808 return; 1809 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1810 } 1811 1812 static irqreturn_t pl08x_irq(int irq, void *dev) 1813 { 1814 struct pl08x_driver_data *pl08x = dev; 1815 u32 mask = 0, err, tc, i; 1816 1817 /* check & clear - ERR & TC interrupts */ 1818 err = readl(pl08x->base + PL080_ERR_STATUS); 1819 if (err) { 1820 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1821 __func__, err); 1822 writel(err, pl08x->base + PL080_ERR_CLEAR); 1823 } 1824 tc = readl(pl08x->base + PL080_TC_STATUS); 1825 if (tc) 1826 writel(tc, pl08x->base + PL080_TC_CLEAR); 1827 1828 if (!err && !tc) 1829 return IRQ_NONE; 1830 1831 for (i = 0; i < pl08x->vd->channels; i++) { 1832 if (((1 << i) & err) || ((1 << i) & tc)) { 1833 /* Locate physical channel */ 1834 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1835 struct pl08x_dma_chan *plchan = phychan->serving; 1836 struct pl08x_txd *tx; 1837 1838 if (!plchan) { 1839 dev_err(&pl08x->adev->dev, 1840 "%s Error TC interrupt on unused channel: 0x%08x\n", 1841 __func__, i); 1842 continue; 1843 } 1844 1845 spin_lock(&plchan->vc.lock); 1846 tx = plchan->at; 1847 if (tx && tx->cyclic) { 1848 vchan_cyclic_callback(&tx->vd); 1849 } else if (tx) { 1850 plchan->at = NULL; 1851 /* 1852 * This descriptor is done, release its mux 1853 * reservation. 1854 */ 1855 pl08x_release_mux(plchan); 1856 tx->done = true; 1857 vchan_cookie_complete(&tx->vd); 1858 1859 /* 1860 * And start the next descriptor (if any), 1861 * otherwise free this channel. 1862 */ 1863 if (vchan_next_desc(&plchan->vc)) 1864 pl08x_start_next_txd(plchan); 1865 else 1866 pl08x_phy_free(plchan); 1867 } 1868 spin_unlock(&plchan->vc.lock); 1869 1870 mask |= (1 << i); 1871 } 1872 } 1873 1874 return mask ? IRQ_HANDLED : IRQ_NONE; 1875 } 1876 1877 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1878 { 1879 chan->slave = true; 1880 chan->name = chan->cd->bus_id; 1881 chan->cfg.src_addr = chan->cd->addr; 1882 chan->cfg.dst_addr = chan->cd->addr; 1883 } 1884 1885 /* 1886 * Initialise the DMAC memcpy/slave channels. 1887 * Make a local wrapper to hold required data 1888 */ 1889 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1890 struct dma_device *dmadev, unsigned int channels, bool slave) 1891 { 1892 struct pl08x_dma_chan *chan; 1893 int i; 1894 1895 INIT_LIST_HEAD(&dmadev->channels); 1896 1897 /* 1898 * Register as many many memcpy as we have physical channels, 1899 * we won't always be able to use all but the code will have 1900 * to cope with that situation. 1901 */ 1902 for (i = 0; i < channels; i++) { 1903 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1904 if (!chan) { 1905 dev_err(&pl08x->adev->dev, 1906 "%s no memory for channel\n", __func__); 1907 return -ENOMEM; 1908 } 1909 1910 chan->host = pl08x; 1911 chan->state = PL08X_CHAN_IDLE; 1912 chan->signal = -1; 1913 1914 if (slave) { 1915 chan->cd = &pl08x->pd->slave_channels[i]; 1916 /* 1917 * Some implementations have muxed signals, whereas some 1918 * use a mux in front of the signals and need dynamic 1919 * assignment of signals. 1920 */ 1921 chan->signal = i; 1922 pl08x_dma_slave_init(chan); 1923 } else { 1924 chan->cd = &pl08x->pd->memcpy_channel; 1925 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1926 if (!chan->name) { 1927 kfree(chan); 1928 return -ENOMEM; 1929 } 1930 } 1931 dev_dbg(&pl08x->adev->dev, 1932 "initialize virtual channel \"%s\"\n", 1933 chan->name); 1934 1935 chan->vc.desc_free = pl08x_desc_free; 1936 vchan_init(&chan->vc, dmadev); 1937 } 1938 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1939 i, slave ? "slave" : "memcpy"); 1940 return i; 1941 } 1942 1943 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1944 { 1945 struct pl08x_dma_chan *chan = NULL; 1946 struct pl08x_dma_chan *next; 1947 1948 list_for_each_entry_safe(chan, 1949 next, &dmadev->channels, vc.chan.device_node) { 1950 list_del(&chan->vc.chan.device_node); 1951 kfree(chan); 1952 } 1953 } 1954 1955 #ifdef CONFIG_DEBUG_FS 1956 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1957 { 1958 switch (state) { 1959 case PL08X_CHAN_IDLE: 1960 return "idle"; 1961 case PL08X_CHAN_RUNNING: 1962 return "running"; 1963 case PL08X_CHAN_PAUSED: 1964 return "paused"; 1965 case PL08X_CHAN_WAITING: 1966 return "waiting"; 1967 default: 1968 break; 1969 } 1970 return "UNKNOWN STATE"; 1971 } 1972 1973 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1974 { 1975 struct pl08x_driver_data *pl08x = s->private; 1976 struct pl08x_dma_chan *chan; 1977 struct pl08x_phy_chan *ch; 1978 unsigned long flags; 1979 int i; 1980 1981 seq_printf(s, "PL08x physical channels:\n"); 1982 seq_printf(s, "CHANNEL:\tUSER:\n"); 1983 seq_printf(s, "--------\t-----\n"); 1984 for (i = 0; i < pl08x->vd->channels; i++) { 1985 struct pl08x_dma_chan *virt_chan; 1986 1987 ch = &pl08x->phy_chans[i]; 1988 1989 spin_lock_irqsave(&ch->lock, flags); 1990 virt_chan = ch->serving; 1991 1992 seq_printf(s, "%d\t\t%s%s\n", 1993 ch->id, 1994 virt_chan ? virt_chan->name : "(none)", 1995 ch->locked ? " LOCKED" : ""); 1996 1997 spin_unlock_irqrestore(&ch->lock, flags); 1998 } 1999 2000 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 2001 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2002 seq_printf(s, "--------\t------\n"); 2003 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 2004 seq_printf(s, "%s\t\t%s\n", chan->name, 2005 pl08x_state_str(chan->state)); 2006 } 2007 2008 seq_printf(s, "\nPL08x virtual slave channels:\n"); 2009 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2010 seq_printf(s, "--------\t------\n"); 2011 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2012 seq_printf(s, "%s\t\t%s\n", chan->name, 2013 pl08x_state_str(chan->state)); 2014 } 2015 2016 return 0; 2017 } 2018 2019 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 2020 { 2021 return single_open(file, pl08x_debugfs_show, inode->i_private); 2022 } 2023 2024 static const struct file_operations pl08x_debugfs_operations = { 2025 .open = pl08x_debugfs_open, 2026 .read = seq_read, 2027 .llseek = seq_lseek, 2028 .release = single_release, 2029 }; 2030 2031 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2032 { 2033 /* Expose a simple debugfs interface to view all clocks */ 2034 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2035 S_IFREG | S_IRUGO, NULL, pl08x, 2036 &pl08x_debugfs_operations); 2037 } 2038 2039 #else 2040 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2041 { 2042 } 2043 #endif 2044 2045 #ifdef CONFIG_OF 2046 static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, 2047 u32 id) 2048 { 2049 struct pl08x_dma_chan *chan; 2050 2051 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2052 if (chan->signal == id) 2053 return &chan->vc.chan; 2054 } 2055 2056 return NULL; 2057 } 2058 2059 static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, 2060 struct of_dma *ofdma) 2061 { 2062 struct pl08x_driver_data *pl08x = ofdma->of_dma_data; 2063 struct dma_chan *dma_chan; 2064 struct pl08x_dma_chan *plchan; 2065 2066 if (!pl08x) 2067 return NULL; 2068 2069 if (dma_spec->args_count != 2) { 2070 dev_err(&pl08x->adev->dev, 2071 "DMA channel translation requires two cells\n"); 2072 return NULL; 2073 } 2074 2075 dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); 2076 if (!dma_chan) { 2077 dev_err(&pl08x->adev->dev, 2078 "DMA slave channel not found\n"); 2079 return NULL; 2080 } 2081 2082 plchan = to_pl08x_chan(dma_chan); 2083 dev_dbg(&pl08x->adev->dev, 2084 "translated channel for signal %d\n", 2085 dma_spec->args[0]); 2086 2087 /* Augment channel data for applicable AHB buses */ 2088 plchan->cd->periph_buses = dma_spec->args[1]; 2089 return dma_get_slave_channel(dma_chan); 2090 } 2091 2092 static int pl08x_of_probe(struct amba_device *adev, 2093 struct pl08x_driver_data *pl08x, 2094 struct device_node *np) 2095 { 2096 struct pl08x_platform_data *pd; 2097 struct pl08x_channel_data *chanp = NULL; 2098 u32 cctl_memcpy = 0; 2099 u32 val; 2100 int ret; 2101 int i; 2102 2103 pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); 2104 if (!pd) 2105 return -ENOMEM; 2106 2107 /* Eligible bus masters for fetching LLIs */ 2108 if (of_property_read_bool(np, "lli-bus-interface-ahb1")) 2109 pd->lli_buses |= PL08X_AHB1; 2110 if (of_property_read_bool(np, "lli-bus-interface-ahb2")) 2111 pd->lli_buses |= PL08X_AHB2; 2112 if (!pd->lli_buses) { 2113 dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n"); 2114 pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2; 2115 } 2116 2117 /* Eligible bus masters for memory access */ 2118 if (of_property_read_bool(np, "mem-bus-interface-ahb1")) 2119 pd->mem_buses |= PL08X_AHB1; 2120 if (of_property_read_bool(np, "mem-bus-interface-ahb2")) 2121 pd->mem_buses |= PL08X_AHB2; 2122 if (!pd->mem_buses) { 2123 dev_info(&adev->dev, "no bus masters for memory stated, assume all\n"); 2124 pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2; 2125 } 2126 2127 /* Parse the memcpy channel properties */ 2128 ret = of_property_read_u32(np, "memcpy-burst-size", &val); 2129 if (ret) { 2130 dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n"); 2131 val = 1; 2132 } 2133 switch (val) { 2134 default: 2135 dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); 2136 /* Fall through */ 2137 case 1: 2138 cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | 2139 PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; 2140 break; 2141 case 4: 2142 cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | 2143 PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; 2144 break; 2145 case 8: 2146 cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | 2147 PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; 2148 break; 2149 case 16: 2150 cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | 2151 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; 2152 break; 2153 case 32: 2154 cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | 2155 PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; 2156 break; 2157 case 64: 2158 cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | 2159 PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; 2160 break; 2161 case 128: 2162 cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | 2163 PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; 2164 break; 2165 case 256: 2166 cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | 2167 PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; 2168 break; 2169 } 2170 2171 ret = of_property_read_u32(np, "memcpy-bus-width", &val); 2172 if (ret) { 2173 dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n"); 2174 val = 8; 2175 } 2176 switch (val) { 2177 default: 2178 dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); 2179 /* Fall through */ 2180 case 8: 2181 cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | 2182 PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 2183 break; 2184 case 16: 2185 cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | 2186 PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 2187 break; 2188 case 32: 2189 cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | 2190 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 2191 break; 2192 } 2193 2194 /* This is currently the only thing making sense */ 2195 cctl_memcpy |= PL080_CONTROL_PROT_SYS; 2196 2197 /* Set up memcpy channel */ 2198 pd->memcpy_channel.bus_id = "memcpy"; 2199 pd->memcpy_channel.cctl_memcpy = cctl_memcpy; 2200 /* Use the buses that can access memory, obviously */ 2201 pd->memcpy_channel.periph_buses = pd->mem_buses; 2202 2203 /* 2204 * Allocate channel data for all possible slave channels (one 2205 * for each possible signal), channels will then be allocated 2206 * for a device and have it's AHB interfaces set up at 2207 * translation time. 2208 */ 2209 chanp = devm_kcalloc(&adev->dev, 2210 pl08x->vd->signals, 2211 sizeof(struct pl08x_channel_data), 2212 GFP_KERNEL); 2213 if (!chanp) 2214 return -ENOMEM; 2215 2216 pd->slave_channels = chanp; 2217 for (i = 0; i < pl08x->vd->signals; i++) { 2218 /* chanp->periph_buses will be assigned at translation */ 2219 chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); 2220 chanp++; 2221 } 2222 pd->num_slave_channels = pl08x->vd->signals; 2223 2224 pl08x->pd = pd; 2225 2226 return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, 2227 pl08x); 2228 } 2229 #else 2230 static inline int pl08x_of_probe(struct amba_device *adev, 2231 struct pl08x_driver_data *pl08x, 2232 struct device_node *np) 2233 { 2234 return -EINVAL; 2235 } 2236 #endif 2237 2238 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 2239 { 2240 struct pl08x_driver_data *pl08x; 2241 const struct vendor_data *vd = id->data; 2242 struct device_node *np = adev->dev.of_node; 2243 u32 tsfr_size; 2244 int ret = 0; 2245 int i; 2246 2247 ret = amba_request_regions(adev, NULL); 2248 if (ret) 2249 return ret; 2250 2251 /* Ensure that we can do DMA */ 2252 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); 2253 if (ret) 2254 goto out_no_pl08x; 2255 2256 /* Create the driver state holder */ 2257 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2258 if (!pl08x) { 2259 ret = -ENOMEM; 2260 goto out_no_pl08x; 2261 } 2262 2263 /* Assign useful pointers to the driver state */ 2264 pl08x->adev = adev; 2265 pl08x->vd = vd; 2266 2267 /* Initialize memcpy engine */ 2268 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 2269 pl08x->memcpy.dev = &adev->dev; 2270 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2271 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2272 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2273 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2274 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2275 pl08x->memcpy.device_config = pl08x_config; 2276 pl08x->memcpy.device_pause = pl08x_pause; 2277 pl08x->memcpy.device_resume = pl08x_resume; 2278 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2279 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2280 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2281 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); 2282 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2283 2284 /* Initialize slave engine */ 2285 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2286 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); 2287 pl08x->slave.dev = &adev->dev; 2288 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 2289 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2290 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2291 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2292 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2293 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2294 pl08x->slave.device_config = pl08x_config; 2295 pl08x->slave.device_pause = pl08x_pause; 2296 pl08x->slave.device_resume = pl08x_resume; 2297 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2298 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2299 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2300 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2301 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2302 2303 /* Get the platform data */ 2304 pl08x->pd = dev_get_platdata(&adev->dev); 2305 if (!pl08x->pd) { 2306 if (np) { 2307 ret = pl08x_of_probe(adev, pl08x, np); 2308 if (ret) 2309 goto out_no_platdata; 2310 } else { 2311 dev_err(&adev->dev, "no platform data supplied\n"); 2312 ret = -EINVAL; 2313 goto out_no_platdata; 2314 } 2315 } 2316 2317 /* By default, AHB1 only. If dualmaster, from platform */ 2318 pl08x->lli_buses = PL08X_AHB1; 2319 pl08x->mem_buses = PL08X_AHB1; 2320 if (pl08x->vd->dualmaster) { 2321 pl08x->lli_buses = pl08x->pd->lli_buses; 2322 pl08x->mem_buses = pl08x->pd->mem_buses; 2323 } 2324 2325 if (vd->pl080s) 2326 pl08x->lli_words = PL080S_LLI_WORDS; 2327 else 2328 pl08x->lli_words = PL080_LLI_WORDS; 2329 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); 2330 2331 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2332 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2333 tsfr_size, PL08X_ALIGN, 0); 2334 if (!pl08x->pool) { 2335 ret = -ENOMEM; 2336 goto out_no_lli_pool; 2337 } 2338 2339 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2340 if (!pl08x->base) { 2341 ret = -ENOMEM; 2342 goto out_no_ioremap; 2343 } 2344 2345 /* Turn on the PL08x */ 2346 pl08x_ensure_on(pl08x); 2347 2348 /* Attach the interrupt handler */ 2349 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2350 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2351 2352 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); 2353 if (ret) { 2354 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2355 __func__, adev->irq[0]); 2356 goto out_no_irq; 2357 } 2358 2359 /* Initialize physical channels */ 2360 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2361 GFP_KERNEL); 2362 if (!pl08x->phy_chans) { 2363 dev_err(&adev->dev, "%s failed to allocate " 2364 "physical channel holders\n", 2365 __func__); 2366 ret = -ENOMEM; 2367 goto out_no_phychans; 2368 } 2369 2370 for (i = 0; i < vd->channels; i++) { 2371 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2372 2373 ch->id = i; 2374 ch->base = pl08x->base + PL080_Cx_BASE(i); 2375 ch->reg_config = ch->base + vd->config_offset; 2376 spin_lock_init(&ch->lock); 2377 2378 /* 2379 * Nomadik variants can have channels that are locked 2380 * down for the secure world only. Lock up these channels 2381 * by perpetually serving a dummy virtual channel. 2382 */ 2383 if (vd->nomadik) { 2384 u32 val; 2385 2386 val = readl(ch->reg_config); 2387 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2388 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2389 ch->locked = true; 2390 } 2391 } 2392 2393 dev_dbg(&adev->dev, "physical channel %d is %s\n", 2394 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2395 } 2396 2397 /* Register as many memcpy channels as there are physical channels */ 2398 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2399 pl08x->vd->channels, false); 2400 if (ret <= 0) { 2401 dev_warn(&pl08x->adev->dev, 2402 "%s failed to enumerate memcpy channels - %d\n", 2403 __func__, ret); 2404 goto out_no_memcpy; 2405 } 2406 2407 /* Register slave channels */ 2408 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2409 pl08x->pd->num_slave_channels, true); 2410 if (ret < 0) { 2411 dev_warn(&pl08x->adev->dev, 2412 "%s failed to enumerate slave channels - %d\n", 2413 __func__, ret); 2414 goto out_no_slave; 2415 } 2416 2417 ret = dma_async_device_register(&pl08x->memcpy); 2418 if (ret) { 2419 dev_warn(&pl08x->adev->dev, 2420 "%s failed to register memcpy as an async device - %d\n", 2421 __func__, ret); 2422 goto out_no_memcpy_reg; 2423 } 2424 2425 ret = dma_async_device_register(&pl08x->slave); 2426 if (ret) { 2427 dev_warn(&pl08x->adev->dev, 2428 "%s failed to register slave as an async device - %d\n", 2429 __func__, ret); 2430 goto out_no_slave_reg; 2431 } 2432 2433 amba_set_drvdata(adev, pl08x); 2434 init_pl08x_debugfs(pl08x); 2435 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", 2436 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), 2437 (unsigned long long)adev->res.start, adev->irq[0]); 2438 2439 return 0; 2440 2441 out_no_slave_reg: 2442 dma_async_device_unregister(&pl08x->memcpy); 2443 out_no_memcpy_reg: 2444 pl08x_free_virtual_channels(&pl08x->slave); 2445 out_no_slave: 2446 pl08x_free_virtual_channels(&pl08x->memcpy); 2447 out_no_memcpy: 2448 kfree(pl08x->phy_chans); 2449 out_no_phychans: 2450 free_irq(adev->irq[0], pl08x); 2451 out_no_irq: 2452 iounmap(pl08x->base); 2453 out_no_ioremap: 2454 dma_pool_destroy(pl08x->pool); 2455 out_no_lli_pool: 2456 out_no_platdata: 2457 kfree(pl08x); 2458 out_no_pl08x: 2459 amba_release_regions(adev); 2460 return ret; 2461 } 2462 2463 /* PL080 has 8 channels and the PL080 have just 2 */ 2464 static struct vendor_data vendor_pl080 = { 2465 .config_offset = PL080_CH_CONFIG, 2466 .channels = 8, 2467 .signals = 16, 2468 .dualmaster = true, 2469 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2470 }; 2471 2472 static struct vendor_data vendor_nomadik = { 2473 .config_offset = PL080_CH_CONFIG, 2474 .channels = 8, 2475 .signals = 32, 2476 .dualmaster = true, 2477 .nomadik = true, 2478 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2479 }; 2480 2481 static struct vendor_data vendor_pl080s = { 2482 .config_offset = PL080S_CH_CONFIG, 2483 .channels = 8, 2484 .signals = 32, 2485 .pl080s = true, 2486 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, 2487 }; 2488 2489 static struct vendor_data vendor_pl081 = { 2490 .config_offset = PL080_CH_CONFIG, 2491 .channels = 2, 2492 .signals = 16, 2493 .dualmaster = false, 2494 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2495 }; 2496 2497 static struct amba_id pl08x_ids[] = { 2498 /* Samsung PL080S variant */ 2499 { 2500 .id = 0x0a141080, 2501 .mask = 0xffffffff, 2502 .data = &vendor_pl080s, 2503 }, 2504 /* PL080 */ 2505 { 2506 .id = 0x00041080, 2507 .mask = 0x000fffff, 2508 .data = &vendor_pl080, 2509 }, 2510 /* PL081 */ 2511 { 2512 .id = 0x00041081, 2513 .mask = 0x000fffff, 2514 .data = &vendor_pl081, 2515 }, 2516 /* Nomadik 8815 PL080 variant */ 2517 { 2518 .id = 0x00280080, 2519 .mask = 0x00ffffff, 2520 .data = &vendor_nomadik, 2521 }, 2522 { 0, 0 }, 2523 }; 2524 2525 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2526 2527 static struct amba_driver pl08x_amba_driver = { 2528 .drv.name = DRIVER_NAME, 2529 .id_table = pl08x_ids, 2530 .probe = pl08x_probe, 2531 }; 2532 2533 static int __init pl08x_init(void) 2534 { 2535 int retval; 2536 retval = amba_driver_register(&pl08x_amba_driver); 2537 if (retval) 2538 printk(KERN_WARNING DRIVER_NAME 2539 "failed to register as an AMBA device (%d)\n", 2540 retval); 2541 return retval; 2542 } 2543 subsys_initcall(pl08x_init); 2544