1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * The full GNU General Public License is in this distribution in the file 19 * called COPYING. 20 * 21 * Documentation: ARM DDI 0196G == PL080 22 * Documentation: ARM DDI 0218E == PL081 23 * Documentation: S3C6410 User's Manual == PL080S 24 * 25 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 26 * channel. 27 * 28 * The PL080 has 8 channels available for simultaneous use, and the PL081 29 * has only two channels. So on these DMA controllers the number of channels 30 * and the number of incoming DMA signals are two totally different things. 31 * It is usually not possible to theoretically handle all physical signals, 32 * so a multiplexing scheme with possible denial of use is necessary. 33 * 34 * The PL080 has a dual bus master, PL081 has a single master. 35 * 36 * PL080S is a version modified by Samsung and used in S3C64xx SoCs. 37 * It differs in following aspects: 38 * - CH_CONFIG register at different offset, 39 * - separate CH_CONTROL2 register for transfer size, 40 * - bigger maximum transfer size, 41 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, 42 * - no support for peripheral flow control. 43 * 44 * Memory to peripheral transfer may be visualized as 45 * Get data from memory to DMAC 46 * Until no data left 47 * On burst request from peripheral 48 * Destination burst from DMAC to peripheral 49 * Clear burst request 50 * Raise terminal count interrupt 51 * 52 * For peripherals with a FIFO: 53 * Source burst size == half the depth of the peripheral FIFO 54 * Destination burst size == the depth of the peripheral FIFO 55 * 56 * (Bursts are irrelevant for mem to mem transfers - there are no burst 57 * signals, the DMA controller will simply facilitate its AHB master.) 58 * 59 * ASSUMES default (little) endianness for DMA transfers 60 * 61 * The PL08x has two flow control settings: 62 * - DMAC flow control: the transfer size defines the number of transfers 63 * which occur for the current LLI entry, and the DMAC raises TC at the 64 * end of every LLI entry. Observed behaviour shows the DMAC listening 65 * to both the BREQ and SREQ signals (contrary to documented), 66 * transferring data if either is active. The LBREQ and LSREQ signals 67 * are ignored. 68 * 69 * - Peripheral flow control: the transfer size is ignored (and should be 70 * zero). The data is transferred from the current LLI entry, until 71 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 72 * will then move to the next LLI entry. Unsupported by PL080S. 73 */ 74 #include <linux/amba/bus.h> 75 #include <linux/amba/pl08x.h> 76 #include <linux/debugfs.h> 77 #include <linux/delay.h> 78 #include <linux/device.h> 79 #include <linux/dmaengine.h> 80 #include <linux/dmapool.h> 81 #include <linux/dma-mapping.h> 82 #include <linux/export.h> 83 #include <linux/init.h> 84 #include <linux/interrupt.h> 85 #include <linux/module.h> 86 #include <linux/of.h> 87 #include <linux/of_dma.h> 88 #include <linux/pm_runtime.h> 89 #include <linux/seq_file.h> 90 #include <linux/slab.h> 91 #include <linux/amba/pl080.h> 92 93 #include "dmaengine.h" 94 #include "virt-dma.h" 95 96 #define DRIVER_NAME "pl08xdmac" 97 98 #define PL80X_DMA_BUSWIDTHS \ 99 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 100 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 101 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 102 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 103 104 static struct amba_driver pl08x_amba_driver; 105 struct pl08x_driver_data; 106 107 /** 108 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 109 * @channels: the number of channels available in this variant 110 * @signals: the number of request signals available from the hardware 111 * @dualmaster: whether this version supports dual AHB masters or not. 112 * @nomadik: whether the channels have Nomadik security extension bits 113 * that need to be checked for permission before use and some registers are 114 * missing 115 * @pl080s: whether this version is a PL080S, which has separate register and 116 * LLI word for transfer size. 117 * @max_transfer_size: the maximum single element transfer size for this 118 * PL08x variant. 119 */ 120 struct vendor_data { 121 u8 config_offset; 122 u8 channels; 123 u8 signals; 124 bool dualmaster; 125 bool nomadik; 126 bool pl080s; 127 u32 max_transfer_size; 128 }; 129 130 /** 131 * struct pl08x_bus_data - information of source or destination 132 * busses for a transfer 133 * @addr: current address 134 * @maxwidth: the maximum width of a transfer on this bus 135 * @buswidth: the width of this bus in bytes: 1, 2 or 4 136 */ 137 struct pl08x_bus_data { 138 dma_addr_t addr; 139 u8 maxwidth; 140 u8 buswidth; 141 }; 142 143 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) 144 145 /** 146 * struct pl08x_phy_chan - holder for the physical channels 147 * @id: physical index to this channel 148 * @lock: a lock to use when altering an instance of this struct 149 * @serving: the virtual channel currently being served by this physical 150 * channel 151 * @locked: channel unavailable for the system, e.g. dedicated to secure 152 * world 153 */ 154 struct pl08x_phy_chan { 155 unsigned int id; 156 void __iomem *base; 157 void __iomem *reg_config; 158 spinlock_t lock; 159 struct pl08x_dma_chan *serving; 160 bool locked; 161 }; 162 163 /** 164 * struct pl08x_sg - structure containing data per sg 165 * @src_addr: src address of sg 166 * @dst_addr: dst address of sg 167 * @len: transfer len in bytes 168 * @node: node for txd's dsg_list 169 */ 170 struct pl08x_sg { 171 dma_addr_t src_addr; 172 dma_addr_t dst_addr; 173 size_t len; 174 struct list_head node; 175 }; 176 177 /** 178 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 179 * @vd: virtual DMA descriptor 180 * @dsg_list: list of children sg's 181 * @llis_bus: DMA memory address (physical) start for the LLIs 182 * @llis_va: virtual memory address start for the LLIs 183 * @cctl: control reg values for current txd 184 * @ccfg: config reg values for current txd 185 * @done: this marks completed descriptors, which should not have their 186 * mux released. 187 * @cyclic: indicate cyclic transfers 188 */ 189 struct pl08x_txd { 190 struct virt_dma_desc vd; 191 struct list_head dsg_list; 192 dma_addr_t llis_bus; 193 u32 *llis_va; 194 /* Default cctl value for LLIs */ 195 u32 cctl; 196 /* 197 * Settings to be put into the physical channel when we 198 * trigger this txd. Other registers are in llis_va[0]. 199 */ 200 u32 ccfg; 201 bool done; 202 bool cyclic; 203 }; 204 205 /** 206 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel 207 * states 208 * @PL08X_CHAN_IDLE: the channel is idle 209 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 210 * channel and is running a transfer on it 211 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 212 * channel, but the transfer is currently paused 213 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 214 * channel to become available (only pertains to memcpy channels) 215 */ 216 enum pl08x_dma_chan_state { 217 PL08X_CHAN_IDLE, 218 PL08X_CHAN_RUNNING, 219 PL08X_CHAN_PAUSED, 220 PL08X_CHAN_WAITING, 221 }; 222 223 /** 224 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 225 * @vc: wrappped virtual channel 226 * @phychan: the physical channel utilized by this channel, if there is one 227 * @name: name of channel 228 * @cd: channel platform data 229 * @runtime_addr: address for RX/TX according to the runtime config 230 * @at: active transaction on this channel 231 * @lock: a lock for this channel data 232 * @host: a pointer to the host (internal use) 233 * @state: whether the channel is idle, paused, running etc 234 * @slave: whether this channel is a device (slave) or for memcpy 235 * @signal: the physical DMA request signal which this channel is using 236 * @mux_use: count of descriptors using this DMA request signal setting 237 */ 238 struct pl08x_dma_chan { 239 struct virt_dma_chan vc; 240 struct pl08x_phy_chan *phychan; 241 const char *name; 242 struct pl08x_channel_data *cd; 243 struct dma_slave_config cfg; 244 struct pl08x_txd *at; 245 struct pl08x_driver_data *host; 246 enum pl08x_dma_chan_state state; 247 bool slave; 248 int signal; 249 unsigned mux_use; 250 }; 251 252 /** 253 * struct pl08x_driver_data - the local state holder for the PL08x 254 * @slave: slave engine for this instance 255 * @memcpy: memcpy engine for this instance 256 * @base: virtual memory base (remapped) for the PL08x 257 * @adev: the corresponding AMBA (PrimeCell) bus entry 258 * @vd: vendor data for this PL08x variant 259 * @pd: platform data passed in from the platform/machine 260 * @phy_chans: array of data for the physical channels 261 * @pool: a pool for the LLI descriptors 262 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 263 * fetches 264 * @mem_buses: set to indicate memory transfers on AHB2. 265 * @lock: a spinlock for this struct 266 */ 267 struct pl08x_driver_data { 268 struct dma_device slave; 269 struct dma_device memcpy; 270 void __iomem *base; 271 struct amba_device *adev; 272 const struct vendor_data *vd; 273 struct pl08x_platform_data *pd; 274 struct pl08x_phy_chan *phy_chans; 275 struct dma_pool *pool; 276 u8 lli_buses; 277 u8 mem_buses; 278 u8 lli_words; 279 }; 280 281 /* 282 * PL08X specific defines 283 */ 284 285 /* The order of words in an LLI. */ 286 #define PL080_LLI_SRC 0 287 #define PL080_LLI_DST 1 288 #define PL080_LLI_LLI 2 289 #define PL080_LLI_CCTL 3 290 #define PL080S_LLI_CCTL2 4 291 292 /* Total words in an LLI. */ 293 #define PL080_LLI_WORDS 4 294 #define PL080S_LLI_WORDS 8 295 296 /* 297 * Number of LLIs in each LLI buffer allocated for one transfer 298 * (maximum times we call dma_pool_alloc on this pool without freeing) 299 */ 300 #define MAX_NUM_TSFR_LLIS 512 301 #define PL08X_ALIGN 8 302 303 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 304 { 305 return container_of(chan, struct pl08x_dma_chan, vc.chan); 306 } 307 308 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 309 { 310 return container_of(tx, struct pl08x_txd, vd.tx); 311 } 312 313 /* 314 * Mux handling. 315 * 316 * This gives us the DMA request input to the PL08x primecell which the 317 * peripheral described by the channel data will be routed to, possibly 318 * via a board/SoC specific external MUX. One important point to note 319 * here is that this does not depend on the physical channel. 320 */ 321 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 322 { 323 const struct pl08x_platform_data *pd = plchan->host->pd; 324 int ret; 325 326 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { 327 ret = pd->get_xfer_signal(plchan->cd); 328 if (ret < 0) { 329 plchan->mux_use = 0; 330 return ret; 331 } 332 333 plchan->signal = ret; 334 } 335 return 0; 336 } 337 338 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 339 { 340 const struct pl08x_platform_data *pd = plchan->host->pd; 341 342 if (plchan->signal >= 0) { 343 WARN_ON(plchan->mux_use == 0); 344 345 if (--plchan->mux_use == 0 && pd->put_xfer_signal) { 346 pd->put_xfer_signal(plchan->cd, plchan->signal); 347 plchan->signal = -1; 348 } 349 } 350 } 351 352 /* 353 * Physical channel handling 354 */ 355 356 /* Whether a certain channel is busy or not */ 357 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 358 { 359 unsigned int val; 360 361 val = readl(ch->reg_config); 362 return val & PL080_CONFIG_ACTIVE; 363 } 364 365 static void pl08x_write_lli(struct pl08x_driver_data *pl08x, 366 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) 367 { 368 if (pl08x->vd->pl080s) 369 dev_vdbg(&pl08x->adev->dev, 370 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 371 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", 372 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 373 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], 374 lli[PL080S_LLI_CCTL2], ccfg); 375 else 376 dev_vdbg(&pl08x->adev->dev, 377 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 378 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 379 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 380 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); 381 382 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); 383 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); 384 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); 385 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); 386 387 if (pl08x->vd->pl080s) 388 writel_relaxed(lli[PL080S_LLI_CCTL2], 389 phychan->base + PL080S_CH_CONTROL2); 390 391 writel(ccfg, phychan->reg_config); 392 } 393 394 /* 395 * Set the initial DMA register values i.e. those for the first LLI 396 * The next LLI pointer and the configuration interrupt bit have 397 * been set when the LLIs were constructed. Poke them into the hardware 398 * and start the transfer. 399 */ 400 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 401 { 402 struct pl08x_driver_data *pl08x = plchan->host; 403 struct pl08x_phy_chan *phychan = plchan->phychan; 404 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 405 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 406 u32 val; 407 408 list_del(&txd->vd.node); 409 410 plchan->at = txd; 411 412 /* Wait for channel inactive */ 413 while (pl08x_phy_channel_busy(phychan)) 414 cpu_relax(); 415 416 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); 417 418 /* Enable the DMA channel */ 419 /* Do not access config register until channel shows as disabled */ 420 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 421 cpu_relax(); 422 423 /* Do not access config register until channel shows as inactive */ 424 val = readl(phychan->reg_config); 425 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 426 val = readl(phychan->reg_config); 427 428 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); 429 } 430 431 /* 432 * Pause the channel by setting the HALT bit. 433 * 434 * For M->P transfers, pause the DMAC first and then stop the peripheral - 435 * the FIFO can only drain if the peripheral is still requesting data. 436 * (note: this can still timeout if the DMAC FIFO never drains of data.) 437 * 438 * For P->M transfers, disable the peripheral first to stop it filling 439 * the DMAC FIFO, and then pause the DMAC. 440 */ 441 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 442 { 443 u32 val; 444 int timeout; 445 446 /* Set the HALT bit and wait for the FIFO to drain */ 447 val = readl(ch->reg_config); 448 val |= PL080_CONFIG_HALT; 449 writel(val, ch->reg_config); 450 451 /* Wait for channel inactive */ 452 for (timeout = 1000; timeout; timeout--) { 453 if (!pl08x_phy_channel_busy(ch)) 454 break; 455 udelay(1); 456 } 457 if (pl08x_phy_channel_busy(ch)) 458 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 459 } 460 461 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 462 { 463 u32 val; 464 465 /* Clear the HALT bit */ 466 val = readl(ch->reg_config); 467 val &= ~PL080_CONFIG_HALT; 468 writel(val, ch->reg_config); 469 } 470 471 /* 472 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 473 * clears any pending interrupt status. This should not be used for 474 * an on-going transfer, but as a method of shutting down a channel 475 * (eg, when it's no longer used) or terminating a transfer. 476 */ 477 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 478 struct pl08x_phy_chan *ch) 479 { 480 u32 val = readl(ch->reg_config); 481 482 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 483 PL080_CONFIG_TC_IRQ_MASK); 484 485 writel(val, ch->reg_config); 486 487 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 488 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 489 } 490 491 static inline u32 get_bytes_in_cctl(u32 cctl) 492 { 493 /* The source width defines the number of bytes */ 494 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 495 496 cctl &= PL080_CONTROL_SWIDTH_MASK; 497 498 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 499 case PL080_WIDTH_8BIT: 500 break; 501 case PL080_WIDTH_16BIT: 502 bytes *= 2; 503 break; 504 case PL080_WIDTH_32BIT: 505 bytes *= 4; 506 break; 507 } 508 return bytes; 509 } 510 511 static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) 512 { 513 /* The source width defines the number of bytes */ 514 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; 515 516 cctl &= PL080_CONTROL_SWIDTH_MASK; 517 518 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 519 case PL080_WIDTH_8BIT: 520 break; 521 case PL080_WIDTH_16BIT: 522 bytes *= 2; 523 break; 524 case PL080_WIDTH_32BIT: 525 bytes *= 4; 526 break; 527 } 528 return bytes; 529 } 530 531 /* The channel should be paused when calling this */ 532 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 533 { 534 struct pl08x_driver_data *pl08x = plchan->host; 535 const u32 *llis_va, *llis_va_limit; 536 struct pl08x_phy_chan *ch; 537 dma_addr_t llis_bus; 538 struct pl08x_txd *txd; 539 u32 llis_max_words; 540 size_t bytes; 541 u32 clli; 542 543 ch = plchan->phychan; 544 txd = plchan->at; 545 546 if (!ch || !txd) 547 return 0; 548 549 /* 550 * Follow the LLIs to get the number of remaining 551 * bytes in the currently active transaction. 552 */ 553 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 554 555 /* First get the remaining bytes in the active transfer */ 556 if (pl08x->vd->pl080s) 557 bytes = get_bytes_in_cctl_pl080s( 558 readl(ch->base + PL080_CH_CONTROL), 559 readl(ch->base + PL080S_CH_CONTROL2)); 560 else 561 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 562 563 if (!clli) 564 return bytes; 565 566 llis_va = txd->llis_va; 567 llis_bus = txd->llis_bus; 568 569 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; 570 BUG_ON(clli < llis_bus || clli >= llis_bus + 571 sizeof(u32) * llis_max_words); 572 573 /* 574 * Locate the next LLI - as this is an array, 575 * it's simple maths to find. 576 */ 577 llis_va += (clli - llis_bus) / sizeof(u32); 578 579 llis_va_limit = llis_va + llis_max_words; 580 581 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { 582 if (pl08x->vd->pl080s) 583 bytes += get_bytes_in_cctl_pl080s( 584 llis_va[PL080_LLI_CCTL], 585 llis_va[PL080S_LLI_CCTL2]); 586 else 587 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); 588 589 /* 590 * A LLI pointer going backward terminates the LLI list 591 */ 592 if (llis_va[PL080_LLI_LLI] <= clli) 593 break; 594 } 595 596 return bytes; 597 } 598 599 /* 600 * Allocate a physical channel for a virtual channel 601 * 602 * Try to locate a physical channel to be used for this transfer. If all 603 * are taken return NULL and the requester will have to cope by using 604 * some fallback PIO mode or retrying later. 605 */ 606 static struct pl08x_phy_chan * 607 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 608 struct pl08x_dma_chan *virt_chan) 609 { 610 struct pl08x_phy_chan *ch = NULL; 611 unsigned long flags; 612 int i; 613 614 for (i = 0; i < pl08x->vd->channels; i++) { 615 ch = &pl08x->phy_chans[i]; 616 617 spin_lock_irqsave(&ch->lock, flags); 618 619 if (!ch->locked && !ch->serving) { 620 ch->serving = virt_chan; 621 spin_unlock_irqrestore(&ch->lock, flags); 622 break; 623 } 624 625 spin_unlock_irqrestore(&ch->lock, flags); 626 } 627 628 if (i == pl08x->vd->channels) { 629 /* No physical channel available, cope with it */ 630 return NULL; 631 } 632 633 return ch; 634 } 635 636 /* Mark the physical channel as free. Note, this write is atomic. */ 637 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 638 struct pl08x_phy_chan *ch) 639 { 640 ch->serving = NULL; 641 } 642 643 /* 644 * Try to allocate a physical channel. When successful, assign it to 645 * this virtual channel, and initiate the next descriptor. The 646 * virtual channel lock must be held at this point. 647 */ 648 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 649 { 650 struct pl08x_driver_data *pl08x = plchan->host; 651 struct pl08x_phy_chan *ch; 652 653 ch = pl08x_get_phy_channel(pl08x, plchan); 654 if (!ch) { 655 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 656 plchan->state = PL08X_CHAN_WAITING; 657 return; 658 } 659 660 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 661 ch->id, plchan->name); 662 663 plchan->phychan = ch; 664 plchan->state = PL08X_CHAN_RUNNING; 665 pl08x_start_next_txd(plchan); 666 } 667 668 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 669 struct pl08x_dma_chan *plchan) 670 { 671 struct pl08x_driver_data *pl08x = plchan->host; 672 673 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 674 ch->id, plchan->name); 675 676 /* 677 * We do this without taking the lock; we're really only concerned 678 * about whether this pointer is NULL or not, and we're guaranteed 679 * that this will only be called when it _already_ is non-NULL. 680 */ 681 ch->serving = plchan; 682 plchan->phychan = ch; 683 plchan->state = PL08X_CHAN_RUNNING; 684 pl08x_start_next_txd(plchan); 685 } 686 687 /* 688 * Free a physical DMA channel, potentially reallocating it to another 689 * virtual channel if we have any pending. 690 */ 691 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 692 { 693 struct pl08x_driver_data *pl08x = plchan->host; 694 struct pl08x_dma_chan *p, *next; 695 696 retry: 697 next = NULL; 698 699 /* Find a waiting virtual channel for the next transfer. */ 700 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 701 if (p->state == PL08X_CHAN_WAITING) { 702 next = p; 703 break; 704 } 705 706 if (!next) { 707 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 708 if (p->state == PL08X_CHAN_WAITING) { 709 next = p; 710 break; 711 } 712 } 713 714 /* Ensure that the physical channel is stopped */ 715 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 716 717 if (next) { 718 bool success; 719 720 /* 721 * Eww. We know this isn't going to deadlock 722 * but lockdep probably doesn't. 723 */ 724 spin_lock(&next->vc.lock); 725 /* Re-check the state now that we have the lock */ 726 success = next->state == PL08X_CHAN_WAITING; 727 if (success) 728 pl08x_phy_reassign_start(plchan->phychan, next); 729 spin_unlock(&next->vc.lock); 730 731 /* If the state changed, try to find another channel */ 732 if (!success) 733 goto retry; 734 } else { 735 /* No more jobs, so free up the physical channel */ 736 pl08x_put_phy_channel(pl08x, plchan->phychan); 737 } 738 739 plchan->phychan = NULL; 740 plchan->state = PL08X_CHAN_IDLE; 741 } 742 743 /* 744 * LLI handling 745 */ 746 747 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 748 { 749 switch (coded) { 750 case PL080_WIDTH_8BIT: 751 return 1; 752 case PL080_WIDTH_16BIT: 753 return 2; 754 case PL080_WIDTH_32BIT: 755 return 4; 756 default: 757 break; 758 } 759 BUG(); 760 return 0; 761 } 762 763 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 764 size_t tsize) 765 { 766 u32 retbits = cctl; 767 768 /* Remove all src, dst and transfer size bits */ 769 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 770 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 771 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 772 773 /* Then set the bits according to the parameters */ 774 switch (srcwidth) { 775 case 1: 776 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 777 break; 778 case 2: 779 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 780 break; 781 case 4: 782 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 783 break; 784 default: 785 BUG(); 786 break; 787 } 788 789 switch (dstwidth) { 790 case 1: 791 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 792 break; 793 case 2: 794 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 795 break; 796 case 4: 797 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 798 break; 799 default: 800 BUG(); 801 break; 802 } 803 804 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; 805 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 806 return retbits; 807 } 808 809 struct pl08x_lli_build_data { 810 struct pl08x_txd *txd; 811 struct pl08x_bus_data srcbus; 812 struct pl08x_bus_data dstbus; 813 size_t remainder; 814 u32 lli_bus; 815 }; 816 817 /* 818 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 819 * victim in case src & dest are not similarly aligned. i.e. If after aligning 820 * masters address with width requirements of transfer (by sending few byte by 821 * byte data), slave is still not aligned, then its width will be reduced to 822 * BYTE. 823 * - prefers the destination bus if both available 824 * - prefers bus with fixed address (i.e. peripheral) 825 */ 826 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 827 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 828 { 829 if (!(cctl & PL080_CONTROL_DST_INCR)) { 830 *mbus = &bd->dstbus; 831 *sbus = &bd->srcbus; 832 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 833 *mbus = &bd->srcbus; 834 *sbus = &bd->dstbus; 835 } else { 836 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 837 *mbus = &bd->dstbus; 838 *sbus = &bd->srcbus; 839 } else { 840 *mbus = &bd->srcbus; 841 *sbus = &bd->dstbus; 842 } 843 } 844 } 845 846 /* 847 * Fills in one LLI for a certain transfer descriptor and advance the counter 848 */ 849 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 850 struct pl08x_lli_build_data *bd, 851 int num_llis, int len, u32 cctl, u32 cctl2) 852 { 853 u32 offset = num_llis * pl08x->lli_words; 854 u32 *llis_va = bd->txd->llis_va + offset; 855 dma_addr_t llis_bus = bd->txd->llis_bus; 856 857 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 858 859 /* Advance the offset to next LLI. */ 860 offset += pl08x->lli_words; 861 862 llis_va[PL080_LLI_SRC] = bd->srcbus.addr; 863 llis_va[PL080_LLI_DST] = bd->dstbus.addr; 864 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); 865 llis_va[PL080_LLI_LLI] |= bd->lli_bus; 866 llis_va[PL080_LLI_CCTL] = cctl; 867 if (pl08x->vd->pl080s) 868 llis_va[PL080S_LLI_CCTL2] = cctl2; 869 870 if (cctl & PL080_CONTROL_SRC_INCR) 871 bd->srcbus.addr += len; 872 if (cctl & PL080_CONTROL_DST_INCR) 873 bd->dstbus.addr += len; 874 875 BUG_ON(bd->remainder < len); 876 877 bd->remainder -= len; 878 } 879 880 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, 881 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, 882 int num_llis, size_t *total_bytes) 883 { 884 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 885 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); 886 (*total_bytes) += len; 887 } 888 889 #ifdef VERBOSE_DEBUG 890 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 891 const u32 *llis_va, int num_llis) 892 { 893 int i; 894 895 if (pl08x->vd->pl080s) { 896 dev_vdbg(&pl08x->adev->dev, 897 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", 898 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); 899 for (i = 0; i < num_llis; i++) { 900 dev_vdbg(&pl08x->adev->dev, 901 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 902 i, llis_va, llis_va[PL080_LLI_SRC], 903 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 904 llis_va[PL080_LLI_CCTL], 905 llis_va[PL080S_LLI_CCTL2]); 906 llis_va += pl08x->lli_words; 907 } 908 } else { 909 dev_vdbg(&pl08x->adev->dev, 910 "%-3s %-9s %-10s %-10s %-10s %s\n", 911 "lli", "", "csrc", "cdst", "clli", "cctl"); 912 for (i = 0; i < num_llis; i++) { 913 dev_vdbg(&pl08x->adev->dev, 914 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 915 i, llis_va, llis_va[PL080_LLI_SRC], 916 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 917 llis_va[PL080_LLI_CCTL]); 918 llis_va += pl08x->lli_words; 919 } 920 } 921 } 922 #else 923 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 924 const u32 *llis_va, int num_llis) {} 925 #endif 926 927 /* 928 * This fills in the table of LLIs for the transfer descriptor 929 * Note that we assume we never have to change the burst sizes 930 * Return 0 for error 931 */ 932 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 933 struct pl08x_txd *txd) 934 { 935 struct pl08x_bus_data *mbus, *sbus; 936 struct pl08x_lli_build_data bd; 937 int num_llis = 0; 938 u32 cctl, early_bytes = 0; 939 size_t max_bytes_per_lli, total_bytes; 940 u32 *llis_va, *last_lli; 941 struct pl08x_sg *dsg; 942 943 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 944 if (!txd->llis_va) { 945 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 946 return 0; 947 } 948 949 bd.txd = txd; 950 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 951 cctl = txd->cctl; 952 953 /* Find maximum width of the source bus */ 954 bd.srcbus.maxwidth = 955 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 956 PL080_CONTROL_SWIDTH_SHIFT); 957 958 /* Find maximum width of the destination bus */ 959 bd.dstbus.maxwidth = 960 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 961 PL080_CONTROL_DWIDTH_SHIFT); 962 963 list_for_each_entry(dsg, &txd->dsg_list, node) { 964 total_bytes = 0; 965 cctl = txd->cctl; 966 967 bd.srcbus.addr = dsg->src_addr; 968 bd.dstbus.addr = dsg->dst_addr; 969 bd.remainder = dsg->len; 970 bd.srcbus.buswidth = bd.srcbus.maxwidth; 971 bd.dstbus.buswidth = bd.dstbus.maxwidth; 972 973 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 974 975 dev_vdbg(&pl08x->adev->dev, 976 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", 977 (u64)bd.srcbus.addr, 978 cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 979 bd.srcbus.buswidth, 980 (u64)bd.dstbus.addr, 981 cctl & PL080_CONTROL_DST_INCR ? "+" : "", 982 bd.dstbus.buswidth, 983 bd.remainder); 984 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 985 mbus == &bd.srcbus ? "src" : "dst", 986 sbus == &bd.srcbus ? "src" : "dst"); 987 988 /* 989 * Zero length is only allowed if all these requirements are 990 * met: 991 * - flow controller is peripheral. 992 * - src.addr is aligned to src.width 993 * - dst.addr is aligned to dst.width 994 * 995 * sg_len == 1 should be true, as there can be two cases here: 996 * 997 * - Memory addresses are contiguous and are not scattered. 998 * Here, Only one sg will be passed by user driver, with 999 * memory address and zero length. We pass this to controller 1000 * and after the transfer it will receive the last burst 1001 * request from peripheral and so transfer finishes. 1002 * 1003 * - Memory addresses are scattered and are not contiguous. 1004 * Here, Obviously as DMA controller doesn't know when a lli's 1005 * transfer gets over, it can't load next lli. So in this 1006 * case, there has to be an assumption that only one lli is 1007 * supported. Thus, we can't have scattered addresses. 1008 */ 1009 if (!bd.remainder) { 1010 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 1011 PL080_CONFIG_FLOW_CONTROL_SHIFT; 1012 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 1013 (fc <= PL080_FLOW_SRC2DST_SRC))) { 1014 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 1015 __func__); 1016 return 0; 1017 } 1018 1019 if (!IS_BUS_ALIGNED(&bd.srcbus) || 1020 !IS_BUS_ALIGNED(&bd.dstbus)) { 1021 dev_err(&pl08x->adev->dev, 1022 "%s src & dst address must be aligned to src" 1023 " & dst width if peripheral is flow controller", 1024 __func__); 1025 return 0; 1026 } 1027 1028 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1029 bd.dstbus.buswidth, 0); 1030 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1031 0, cctl, 0); 1032 break; 1033 } 1034 1035 /* 1036 * Send byte by byte for following cases 1037 * - Less than a bus width available 1038 * - until master bus is aligned 1039 */ 1040 if (bd.remainder < mbus->buswidth) 1041 early_bytes = bd.remainder; 1042 else if (!IS_BUS_ALIGNED(mbus)) { 1043 early_bytes = mbus->buswidth - 1044 (mbus->addr & (mbus->buswidth - 1)); 1045 if ((bd.remainder - early_bytes) < mbus->buswidth) 1046 early_bytes = bd.remainder; 1047 } 1048 1049 if (early_bytes) { 1050 dev_vdbg(&pl08x->adev->dev, 1051 "%s byte width LLIs (remain 0x%08zx)\n", 1052 __func__, bd.remainder); 1053 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, 1054 num_llis++, &total_bytes); 1055 } 1056 1057 if (bd.remainder) { 1058 /* 1059 * Master now aligned 1060 * - if slave is not then we must set its width down 1061 */ 1062 if (!IS_BUS_ALIGNED(sbus)) { 1063 dev_dbg(&pl08x->adev->dev, 1064 "%s set down bus width to one byte\n", 1065 __func__); 1066 1067 sbus->buswidth = 1; 1068 } 1069 1070 /* 1071 * Bytes transferred = tsize * src width, not 1072 * MIN(buswidths) 1073 */ 1074 max_bytes_per_lli = bd.srcbus.buswidth * 1075 pl08x->vd->max_transfer_size; 1076 dev_vdbg(&pl08x->adev->dev, 1077 "%s max bytes per lli = %zu\n", 1078 __func__, max_bytes_per_lli); 1079 1080 /* 1081 * Make largest possible LLIs until less than one bus 1082 * width left 1083 */ 1084 while (bd.remainder > (mbus->buswidth - 1)) { 1085 size_t lli_len, tsize, width; 1086 1087 /* 1088 * If enough left try to send max possible, 1089 * otherwise try to send the remainder 1090 */ 1091 lli_len = min(bd.remainder, max_bytes_per_lli); 1092 1093 /* 1094 * Check against maximum bus alignment: 1095 * Calculate actual transfer size in relation to 1096 * bus width an get a maximum remainder of the 1097 * highest bus width - 1 1098 */ 1099 width = max(mbus->buswidth, sbus->buswidth); 1100 lli_len = (lli_len / width) * width; 1101 tsize = lli_len / bd.srcbus.buswidth; 1102 1103 dev_vdbg(&pl08x->adev->dev, 1104 "%s fill lli with single lli chunk of " 1105 "size 0x%08zx (remainder 0x%08zx)\n", 1106 __func__, lli_len, bd.remainder); 1107 1108 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1109 bd.dstbus.buswidth, tsize); 1110 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1111 lli_len, cctl, tsize); 1112 total_bytes += lli_len; 1113 } 1114 1115 /* 1116 * Send any odd bytes 1117 */ 1118 if (bd.remainder) { 1119 dev_vdbg(&pl08x->adev->dev, 1120 "%s align with boundary, send odd bytes (remain %zu)\n", 1121 __func__, bd.remainder); 1122 prep_byte_width_lli(pl08x, &bd, &cctl, 1123 bd.remainder, num_llis++, &total_bytes); 1124 } 1125 } 1126 1127 if (total_bytes != dsg->len) { 1128 dev_err(&pl08x->adev->dev, 1129 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 1130 __func__, total_bytes, dsg->len); 1131 return 0; 1132 } 1133 1134 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1135 dev_err(&pl08x->adev->dev, 1136 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1137 __func__, MAX_NUM_TSFR_LLIS); 1138 return 0; 1139 } 1140 } 1141 1142 llis_va = txd->llis_va; 1143 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; 1144 1145 if (txd->cyclic) { 1146 /* Link back to the first LLI. */ 1147 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; 1148 } else { 1149 /* The final LLI terminates the LLI. */ 1150 last_lli[PL080_LLI_LLI] = 0; 1151 /* The final LLI element shall also fire an interrupt. */ 1152 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; 1153 } 1154 1155 pl08x_dump_lli(pl08x, llis_va, num_llis); 1156 1157 return num_llis; 1158 } 1159 1160 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1161 struct pl08x_txd *txd) 1162 { 1163 struct pl08x_sg *dsg, *_dsg; 1164 1165 if (txd->llis_va) 1166 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1167 1168 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1169 list_del(&dsg->node); 1170 kfree(dsg); 1171 } 1172 1173 kfree(txd); 1174 } 1175 1176 static void pl08x_desc_free(struct virt_dma_desc *vd) 1177 { 1178 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1179 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1180 1181 dma_descriptor_unmap(&vd->tx); 1182 if (!txd->done) 1183 pl08x_release_mux(plchan); 1184 1185 pl08x_free_txd(plchan->host, txd); 1186 } 1187 1188 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1189 struct pl08x_dma_chan *plchan) 1190 { 1191 LIST_HEAD(head); 1192 1193 vchan_get_all_descriptors(&plchan->vc, &head); 1194 vchan_dma_desc_free_list(&plchan->vc, &head); 1195 } 1196 1197 /* 1198 * The DMA ENGINE API 1199 */ 1200 static void pl08x_free_chan_resources(struct dma_chan *chan) 1201 { 1202 /* Ensure all queued descriptors are freed */ 1203 vchan_free_chan_resources(to_virt_chan(chan)); 1204 } 1205 1206 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1207 struct dma_chan *chan, unsigned long flags) 1208 { 1209 struct dma_async_tx_descriptor *retval = NULL; 1210 1211 return retval; 1212 } 1213 1214 /* 1215 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1216 * If slaves are relying on interrupts to signal completion this function 1217 * must not be called with interrupts disabled. 1218 */ 1219 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1220 dma_cookie_t cookie, struct dma_tx_state *txstate) 1221 { 1222 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1223 struct virt_dma_desc *vd; 1224 unsigned long flags; 1225 enum dma_status ret; 1226 size_t bytes = 0; 1227 1228 ret = dma_cookie_status(chan, cookie, txstate); 1229 if (ret == DMA_COMPLETE) 1230 return ret; 1231 1232 /* 1233 * There's no point calculating the residue if there's 1234 * no txstate to store the value. 1235 */ 1236 if (!txstate) { 1237 if (plchan->state == PL08X_CHAN_PAUSED) 1238 ret = DMA_PAUSED; 1239 return ret; 1240 } 1241 1242 spin_lock_irqsave(&plchan->vc.lock, flags); 1243 ret = dma_cookie_status(chan, cookie, txstate); 1244 if (ret != DMA_COMPLETE) { 1245 vd = vchan_find_desc(&plchan->vc, cookie); 1246 if (vd) { 1247 /* On the issued list, so hasn't been processed yet */ 1248 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1249 struct pl08x_sg *dsg; 1250 1251 list_for_each_entry(dsg, &txd->dsg_list, node) 1252 bytes += dsg->len; 1253 } else { 1254 bytes = pl08x_getbytes_chan(plchan); 1255 } 1256 } 1257 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1258 1259 /* 1260 * This cookie not complete yet 1261 * Get number of bytes left in the active transactions and queue 1262 */ 1263 dma_set_residue(txstate, bytes); 1264 1265 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1266 ret = DMA_PAUSED; 1267 1268 /* Whether waiting or running, we're in progress */ 1269 return ret; 1270 } 1271 1272 /* PrimeCell DMA extension */ 1273 struct burst_table { 1274 u32 burstwords; 1275 u32 reg; 1276 }; 1277 1278 static const struct burst_table burst_sizes[] = { 1279 { 1280 .burstwords = 256, 1281 .reg = PL080_BSIZE_256, 1282 }, 1283 { 1284 .burstwords = 128, 1285 .reg = PL080_BSIZE_128, 1286 }, 1287 { 1288 .burstwords = 64, 1289 .reg = PL080_BSIZE_64, 1290 }, 1291 { 1292 .burstwords = 32, 1293 .reg = PL080_BSIZE_32, 1294 }, 1295 { 1296 .burstwords = 16, 1297 .reg = PL080_BSIZE_16, 1298 }, 1299 { 1300 .burstwords = 8, 1301 .reg = PL080_BSIZE_8, 1302 }, 1303 { 1304 .burstwords = 4, 1305 .reg = PL080_BSIZE_4, 1306 }, 1307 { 1308 .burstwords = 0, 1309 .reg = PL080_BSIZE_1, 1310 }, 1311 }; 1312 1313 /* 1314 * Given the source and destination available bus masks, select which 1315 * will be routed to each port. We try to have source and destination 1316 * on separate ports, but always respect the allowable settings. 1317 */ 1318 static u32 pl08x_select_bus(u8 src, u8 dst) 1319 { 1320 u32 cctl = 0; 1321 1322 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1323 cctl |= PL080_CONTROL_DST_AHB2; 1324 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1325 cctl |= PL080_CONTROL_SRC_AHB2; 1326 1327 return cctl; 1328 } 1329 1330 static u32 pl08x_cctl(u32 cctl) 1331 { 1332 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1333 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1334 PL080_CONTROL_PROT_MASK); 1335 1336 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1337 return cctl | PL080_CONTROL_PROT_SYS; 1338 } 1339 1340 static u32 pl08x_width(enum dma_slave_buswidth width) 1341 { 1342 switch (width) { 1343 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1344 return PL080_WIDTH_8BIT; 1345 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1346 return PL080_WIDTH_16BIT; 1347 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1348 return PL080_WIDTH_32BIT; 1349 default: 1350 return ~0; 1351 } 1352 } 1353 1354 static u32 pl08x_burst(u32 maxburst) 1355 { 1356 int i; 1357 1358 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1359 if (burst_sizes[i].burstwords <= maxburst) 1360 break; 1361 1362 return burst_sizes[i].reg; 1363 } 1364 1365 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1366 enum dma_slave_buswidth addr_width, u32 maxburst) 1367 { 1368 u32 width, burst, cctl = 0; 1369 1370 width = pl08x_width(addr_width); 1371 if (width == ~0) 1372 return ~0; 1373 1374 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1375 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1376 1377 /* 1378 * If this channel will only request single transfers, set this 1379 * down to ONE element. Also select one element if no maxburst 1380 * is specified. 1381 */ 1382 if (plchan->cd->single) 1383 maxburst = 1; 1384 1385 burst = pl08x_burst(maxburst); 1386 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1387 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1388 1389 return pl08x_cctl(cctl); 1390 } 1391 1392 /* 1393 * Slave transactions callback to the slave device to allow 1394 * synchronization of slave DMA signals with the DMAC enable 1395 */ 1396 static void pl08x_issue_pending(struct dma_chan *chan) 1397 { 1398 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1399 unsigned long flags; 1400 1401 spin_lock_irqsave(&plchan->vc.lock, flags); 1402 if (vchan_issue_pending(&plchan->vc)) { 1403 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1404 pl08x_phy_alloc_and_start(plchan); 1405 } 1406 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1407 } 1408 1409 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1410 { 1411 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1412 1413 if (txd) { 1414 INIT_LIST_HEAD(&txd->dsg_list); 1415 1416 /* Always enable error and terminal interrupts */ 1417 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1418 PL080_CONFIG_TC_IRQ_MASK; 1419 } 1420 return txd; 1421 } 1422 1423 /* 1424 * Initialize a descriptor to be used by memcpy submit 1425 */ 1426 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1427 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1428 size_t len, unsigned long flags) 1429 { 1430 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1431 struct pl08x_driver_data *pl08x = plchan->host; 1432 struct pl08x_txd *txd; 1433 struct pl08x_sg *dsg; 1434 int ret; 1435 1436 txd = pl08x_get_txd(plchan); 1437 if (!txd) { 1438 dev_err(&pl08x->adev->dev, 1439 "%s no memory for descriptor\n", __func__); 1440 return NULL; 1441 } 1442 1443 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1444 if (!dsg) { 1445 pl08x_free_txd(pl08x, txd); 1446 return NULL; 1447 } 1448 list_add_tail(&dsg->node, &txd->dsg_list); 1449 1450 dsg->src_addr = src; 1451 dsg->dst_addr = dest; 1452 dsg->len = len; 1453 1454 /* Set platform data for m2m */ 1455 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1456 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1457 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1458 1459 /* Both to be incremented or the code will break */ 1460 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1461 1462 if (pl08x->vd->dualmaster) 1463 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1464 pl08x->mem_buses); 1465 1466 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1467 if (!ret) { 1468 pl08x_free_txd(pl08x, txd); 1469 return NULL; 1470 } 1471 1472 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1473 } 1474 1475 static struct pl08x_txd *pl08x_init_txd( 1476 struct dma_chan *chan, 1477 enum dma_transfer_direction direction, 1478 dma_addr_t *slave_addr) 1479 { 1480 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1481 struct pl08x_driver_data *pl08x = plchan->host; 1482 struct pl08x_txd *txd; 1483 enum dma_slave_buswidth addr_width; 1484 int ret, tmp; 1485 u8 src_buses, dst_buses; 1486 u32 maxburst, cctl; 1487 1488 txd = pl08x_get_txd(plchan); 1489 if (!txd) { 1490 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1491 return NULL; 1492 } 1493 1494 /* 1495 * Set up addresses, the PrimeCell configured address 1496 * will take precedence since this may configure the 1497 * channel target address dynamically at runtime. 1498 */ 1499 if (direction == DMA_MEM_TO_DEV) { 1500 cctl = PL080_CONTROL_SRC_INCR; 1501 *slave_addr = plchan->cfg.dst_addr; 1502 addr_width = plchan->cfg.dst_addr_width; 1503 maxburst = plchan->cfg.dst_maxburst; 1504 src_buses = pl08x->mem_buses; 1505 dst_buses = plchan->cd->periph_buses; 1506 } else if (direction == DMA_DEV_TO_MEM) { 1507 cctl = PL080_CONTROL_DST_INCR; 1508 *slave_addr = plchan->cfg.src_addr; 1509 addr_width = plchan->cfg.src_addr_width; 1510 maxburst = plchan->cfg.src_maxburst; 1511 src_buses = plchan->cd->periph_buses; 1512 dst_buses = pl08x->mem_buses; 1513 } else { 1514 pl08x_free_txd(pl08x, txd); 1515 dev_err(&pl08x->adev->dev, 1516 "%s direction unsupported\n", __func__); 1517 return NULL; 1518 } 1519 1520 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1521 if (cctl == ~0) { 1522 pl08x_free_txd(pl08x, txd); 1523 dev_err(&pl08x->adev->dev, 1524 "DMA slave configuration botched?\n"); 1525 return NULL; 1526 } 1527 1528 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1529 1530 if (plchan->cfg.device_fc) 1531 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1532 PL080_FLOW_PER2MEM_PER; 1533 else 1534 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1535 PL080_FLOW_PER2MEM; 1536 1537 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1538 1539 ret = pl08x_request_mux(plchan); 1540 if (ret < 0) { 1541 pl08x_free_txd(pl08x, txd); 1542 dev_dbg(&pl08x->adev->dev, 1543 "unable to mux for transfer on %s due to platform restrictions\n", 1544 plchan->name); 1545 return NULL; 1546 } 1547 1548 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1549 plchan->signal, plchan->name); 1550 1551 /* Assign the flow control signal to this channel */ 1552 if (direction == DMA_MEM_TO_DEV) 1553 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1554 else 1555 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1556 1557 return txd; 1558 } 1559 1560 static int pl08x_tx_add_sg(struct pl08x_txd *txd, 1561 enum dma_transfer_direction direction, 1562 dma_addr_t slave_addr, 1563 dma_addr_t buf_addr, 1564 unsigned int len) 1565 { 1566 struct pl08x_sg *dsg; 1567 1568 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1569 if (!dsg) 1570 return -ENOMEM; 1571 1572 list_add_tail(&dsg->node, &txd->dsg_list); 1573 1574 dsg->len = len; 1575 if (direction == DMA_MEM_TO_DEV) { 1576 dsg->src_addr = buf_addr; 1577 dsg->dst_addr = slave_addr; 1578 } else { 1579 dsg->src_addr = slave_addr; 1580 dsg->dst_addr = buf_addr; 1581 } 1582 1583 return 0; 1584 } 1585 1586 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1587 struct dma_chan *chan, struct scatterlist *sgl, 1588 unsigned int sg_len, enum dma_transfer_direction direction, 1589 unsigned long flags, void *context) 1590 { 1591 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1592 struct pl08x_driver_data *pl08x = plchan->host; 1593 struct pl08x_txd *txd; 1594 struct scatterlist *sg; 1595 int ret, tmp; 1596 dma_addr_t slave_addr; 1597 1598 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1599 __func__, sg_dma_len(sgl), plchan->name); 1600 1601 txd = pl08x_init_txd(chan, direction, &slave_addr); 1602 if (!txd) 1603 return NULL; 1604 1605 for_each_sg(sgl, sg, sg_len, tmp) { 1606 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1607 sg_dma_address(sg), 1608 sg_dma_len(sg)); 1609 if (ret) { 1610 pl08x_release_mux(plchan); 1611 pl08x_free_txd(pl08x, txd); 1612 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1613 __func__); 1614 return NULL; 1615 } 1616 } 1617 1618 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1619 if (!ret) { 1620 pl08x_release_mux(plchan); 1621 pl08x_free_txd(pl08x, txd); 1622 return NULL; 1623 } 1624 1625 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1626 } 1627 1628 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( 1629 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1630 size_t period_len, enum dma_transfer_direction direction, 1631 unsigned long flags) 1632 { 1633 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1634 struct pl08x_driver_data *pl08x = plchan->host; 1635 struct pl08x_txd *txd; 1636 int ret, tmp; 1637 dma_addr_t slave_addr; 1638 1639 dev_dbg(&pl08x->adev->dev, 1640 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", 1641 __func__, period_len, buf_len, 1642 direction == DMA_MEM_TO_DEV ? "to" : "from", 1643 plchan->name); 1644 1645 txd = pl08x_init_txd(chan, direction, &slave_addr); 1646 if (!txd) 1647 return NULL; 1648 1649 txd->cyclic = true; 1650 txd->cctl |= PL080_CONTROL_TC_IRQ_EN; 1651 for (tmp = 0; tmp < buf_len; tmp += period_len) { 1652 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1653 buf_addr + tmp, period_len); 1654 if (ret) { 1655 pl08x_release_mux(plchan); 1656 pl08x_free_txd(pl08x, txd); 1657 return NULL; 1658 } 1659 } 1660 1661 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1662 if (!ret) { 1663 pl08x_release_mux(plchan); 1664 pl08x_free_txd(pl08x, txd); 1665 return NULL; 1666 } 1667 1668 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1669 } 1670 1671 static int pl08x_config(struct dma_chan *chan, 1672 struct dma_slave_config *config) 1673 { 1674 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1675 struct pl08x_driver_data *pl08x = plchan->host; 1676 1677 if (!plchan->slave) 1678 return -EINVAL; 1679 1680 /* Reject definitely invalid configurations */ 1681 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1682 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1683 return -EINVAL; 1684 1685 if (config->device_fc && pl08x->vd->pl080s) { 1686 dev_err(&pl08x->adev->dev, 1687 "%s: PL080S does not support peripheral flow control\n", 1688 __func__); 1689 return -EINVAL; 1690 } 1691 1692 plchan->cfg = *config; 1693 1694 return 0; 1695 } 1696 1697 static int pl08x_terminate_all(struct dma_chan *chan) 1698 { 1699 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1700 struct pl08x_driver_data *pl08x = plchan->host; 1701 unsigned long flags; 1702 1703 spin_lock_irqsave(&plchan->vc.lock, flags); 1704 if (!plchan->phychan && !plchan->at) { 1705 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1706 return 0; 1707 } 1708 1709 plchan->state = PL08X_CHAN_IDLE; 1710 1711 if (plchan->phychan) { 1712 /* 1713 * Mark physical channel as free and free any slave 1714 * signal 1715 */ 1716 pl08x_phy_free(plchan); 1717 } 1718 /* Dequeue jobs and free LLIs */ 1719 if (plchan->at) { 1720 pl08x_desc_free(&plchan->at->vd); 1721 plchan->at = NULL; 1722 } 1723 /* Dequeue jobs not yet fired as well */ 1724 pl08x_free_txd_list(pl08x, plchan); 1725 1726 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1727 1728 return 0; 1729 } 1730 1731 static int pl08x_pause(struct dma_chan *chan) 1732 { 1733 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1734 unsigned long flags; 1735 1736 /* 1737 * Anything succeeds on channels with no physical allocation and 1738 * no queued transfers. 1739 */ 1740 spin_lock_irqsave(&plchan->vc.lock, flags); 1741 if (!plchan->phychan && !plchan->at) { 1742 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1743 return 0; 1744 } 1745 1746 pl08x_pause_phy_chan(plchan->phychan); 1747 plchan->state = PL08X_CHAN_PAUSED; 1748 1749 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1750 1751 return 0; 1752 } 1753 1754 static int pl08x_resume(struct dma_chan *chan) 1755 { 1756 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1757 unsigned long flags; 1758 1759 /* 1760 * Anything succeeds on channels with no physical allocation and 1761 * no queued transfers. 1762 */ 1763 spin_lock_irqsave(&plchan->vc.lock, flags); 1764 if (!plchan->phychan && !plchan->at) { 1765 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1766 return 0; 1767 } 1768 1769 pl08x_resume_phy_chan(plchan->phychan); 1770 plchan->state = PL08X_CHAN_RUNNING; 1771 1772 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1773 1774 return 0; 1775 } 1776 1777 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1778 { 1779 struct pl08x_dma_chan *plchan; 1780 char *name = chan_id; 1781 1782 /* Reject channels for devices not bound to this driver */ 1783 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1784 return false; 1785 1786 plchan = to_pl08x_chan(chan); 1787 1788 /* Check that the channel is not taken! */ 1789 if (!strcmp(plchan->name, name)) 1790 return true; 1791 1792 return false; 1793 } 1794 EXPORT_SYMBOL_GPL(pl08x_filter_id); 1795 1796 /* 1797 * Just check that the device is there and active 1798 * TODO: turn this bit on/off depending on the number of physical channels 1799 * actually used, if it is zero... well shut it off. That will save some 1800 * power. Cut the clock at the same time. 1801 */ 1802 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1803 { 1804 /* The Nomadik variant does not have the config register */ 1805 if (pl08x->vd->nomadik) 1806 return; 1807 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1808 } 1809 1810 static irqreturn_t pl08x_irq(int irq, void *dev) 1811 { 1812 struct pl08x_driver_data *pl08x = dev; 1813 u32 mask = 0, err, tc, i; 1814 1815 /* check & clear - ERR & TC interrupts */ 1816 err = readl(pl08x->base + PL080_ERR_STATUS); 1817 if (err) { 1818 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1819 __func__, err); 1820 writel(err, pl08x->base + PL080_ERR_CLEAR); 1821 } 1822 tc = readl(pl08x->base + PL080_TC_STATUS); 1823 if (tc) 1824 writel(tc, pl08x->base + PL080_TC_CLEAR); 1825 1826 if (!err && !tc) 1827 return IRQ_NONE; 1828 1829 for (i = 0; i < pl08x->vd->channels; i++) { 1830 if (((1 << i) & err) || ((1 << i) & tc)) { 1831 /* Locate physical channel */ 1832 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1833 struct pl08x_dma_chan *plchan = phychan->serving; 1834 struct pl08x_txd *tx; 1835 1836 if (!plchan) { 1837 dev_err(&pl08x->adev->dev, 1838 "%s Error TC interrupt on unused channel: 0x%08x\n", 1839 __func__, i); 1840 continue; 1841 } 1842 1843 spin_lock(&plchan->vc.lock); 1844 tx = plchan->at; 1845 if (tx && tx->cyclic) { 1846 vchan_cyclic_callback(&tx->vd); 1847 } else if (tx) { 1848 plchan->at = NULL; 1849 /* 1850 * This descriptor is done, release its mux 1851 * reservation. 1852 */ 1853 pl08x_release_mux(plchan); 1854 tx->done = true; 1855 vchan_cookie_complete(&tx->vd); 1856 1857 /* 1858 * And start the next descriptor (if any), 1859 * otherwise free this channel. 1860 */ 1861 if (vchan_next_desc(&plchan->vc)) 1862 pl08x_start_next_txd(plchan); 1863 else 1864 pl08x_phy_free(plchan); 1865 } 1866 spin_unlock(&plchan->vc.lock); 1867 1868 mask |= (1 << i); 1869 } 1870 } 1871 1872 return mask ? IRQ_HANDLED : IRQ_NONE; 1873 } 1874 1875 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1876 { 1877 chan->slave = true; 1878 chan->name = chan->cd->bus_id; 1879 chan->cfg.src_addr = chan->cd->addr; 1880 chan->cfg.dst_addr = chan->cd->addr; 1881 } 1882 1883 /* 1884 * Initialise the DMAC memcpy/slave channels. 1885 * Make a local wrapper to hold required data 1886 */ 1887 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1888 struct dma_device *dmadev, unsigned int channels, bool slave) 1889 { 1890 struct pl08x_dma_chan *chan; 1891 int i; 1892 1893 INIT_LIST_HEAD(&dmadev->channels); 1894 1895 /* 1896 * Register as many many memcpy as we have physical channels, 1897 * we won't always be able to use all but the code will have 1898 * to cope with that situation. 1899 */ 1900 for (i = 0; i < channels; i++) { 1901 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1902 if (!chan) 1903 return -ENOMEM; 1904 1905 chan->host = pl08x; 1906 chan->state = PL08X_CHAN_IDLE; 1907 chan->signal = -1; 1908 1909 if (slave) { 1910 chan->cd = &pl08x->pd->slave_channels[i]; 1911 /* 1912 * Some implementations have muxed signals, whereas some 1913 * use a mux in front of the signals and need dynamic 1914 * assignment of signals. 1915 */ 1916 chan->signal = i; 1917 pl08x_dma_slave_init(chan); 1918 } else { 1919 chan->cd = &pl08x->pd->memcpy_channel; 1920 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1921 if (!chan->name) { 1922 kfree(chan); 1923 return -ENOMEM; 1924 } 1925 } 1926 dev_dbg(&pl08x->adev->dev, 1927 "initialize virtual channel \"%s\"\n", 1928 chan->name); 1929 1930 chan->vc.desc_free = pl08x_desc_free; 1931 vchan_init(&chan->vc, dmadev); 1932 } 1933 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1934 i, slave ? "slave" : "memcpy"); 1935 return i; 1936 } 1937 1938 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1939 { 1940 struct pl08x_dma_chan *chan = NULL; 1941 struct pl08x_dma_chan *next; 1942 1943 list_for_each_entry_safe(chan, 1944 next, &dmadev->channels, vc.chan.device_node) { 1945 list_del(&chan->vc.chan.device_node); 1946 kfree(chan); 1947 } 1948 } 1949 1950 #ifdef CONFIG_DEBUG_FS 1951 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1952 { 1953 switch (state) { 1954 case PL08X_CHAN_IDLE: 1955 return "idle"; 1956 case PL08X_CHAN_RUNNING: 1957 return "running"; 1958 case PL08X_CHAN_PAUSED: 1959 return "paused"; 1960 case PL08X_CHAN_WAITING: 1961 return "waiting"; 1962 default: 1963 break; 1964 } 1965 return "UNKNOWN STATE"; 1966 } 1967 1968 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1969 { 1970 struct pl08x_driver_data *pl08x = s->private; 1971 struct pl08x_dma_chan *chan; 1972 struct pl08x_phy_chan *ch; 1973 unsigned long flags; 1974 int i; 1975 1976 seq_printf(s, "PL08x physical channels:\n"); 1977 seq_printf(s, "CHANNEL:\tUSER:\n"); 1978 seq_printf(s, "--------\t-----\n"); 1979 for (i = 0; i < pl08x->vd->channels; i++) { 1980 struct pl08x_dma_chan *virt_chan; 1981 1982 ch = &pl08x->phy_chans[i]; 1983 1984 spin_lock_irqsave(&ch->lock, flags); 1985 virt_chan = ch->serving; 1986 1987 seq_printf(s, "%d\t\t%s%s\n", 1988 ch->id, 1989 virt_chan ? virt_chan->name : "(none)", 1990 ch->locked ? " LOCKED" : ""); 1991 1992 spin_unlock_irqrestore(&ch->lock, flags); 1993 } 1994 1995 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1996 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1997 seq_printf(s, "--------\t------\n"); 1998 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 1999 seq_printf(s, "%s\t\t%s\n", chan->name, 2000 pl08x_state_str(chan->state)); 2001 } 2002 2003 seq_printf(s, "\nPL08x virtual slave channels:\n"); 2004 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2005 seq_printf(s, "--------\t------\n"); 2006 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2007 seq_printf(s, "%s\t\t%s\n", chan->name, 2008 pl08x_state_str(chan->state)); 2009 } 2010 2011 return 0; 2012 } 2013 2014 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 2015 { 2016 return single_open(file, pl08x_debugfs_show, inode->i_private); 2017 } 2018 2019 static const struct file_operations pl08x_debugfs_operations = { 2020 .open = pl08x_debugfs_open, 2021 .read = seq_read, 2022 .llseek = seq_lseek, 2023 .release = single_release, 2024 }; 2025 2026 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2027 { 2028 /* Expose a simple debugfs interface to view all clocks */ 2029 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2030 S_IFREG | S_IRUGO, NULL, pl08x, 2031 &pl08x_debugfs_operations); 2032 } 2033 2034 #else 2035 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2036 { 2037 } 2038 #endif 2039 2040 #ifdef CONFIG_OF 2041 static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, 2042 u32 id) 2043 { 2044 struct pl08x_dma_chan *chan; 2045 2046 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2047 if (chan->signal == id) 2048 return &chan->vc.chan; 2049 } 2050 2051 return NULL; 2052 } 2053 2054 static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, 2055 struct of_dma *ofdma) 2056 { 2057 struct pl08x_driver_data *pl08x = ofdma->of_dma_data; 2058 struct dma_chan *dma_chan; 2059 struct pl08x_dma_chan *plchan; 2060 2061 if (!pl08x) 2062 return NULL; 2063 2064 if (dma_spec->args_count != 2) { 2065 dev_err(&pl08x->adev->dev, 2066 "DMA channel translation requires two cells\n"); 2067 return NULL; 2068 } 2069 2070 dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); 2071 if (!dma_chan) { 2072 dev_err(&pl08x->adev->dev, 2073 "DMA slave channel not found\n"); 2074 return NULL; 2075 } 2076 2077 plchan = to_pl08x_chan(dma_chan); 2078 dev_dbg(&pl08x->adev->dev, 2079 "translated channel for signal %d\n", 2080 dma_spec->args[0]); 2081 2082 /* Augment channel data for applicable AHB buses */ 2083 plchan->cd->periph_buses = dma_spec->args[1]; 2084 return dma_get_slave_channel(dma_chan); 2085 } 2086 2087 static int pl08x_of_probe(struct amba_device *adev, 2088 struct pl08x_driver_data *pl08x, 2089 struct device_node *np) 2090 { 2091 struct pl08x_platform_data *pd; 2092 struct pl08x_channel_data *chanp = NULL; 2093 u32 cctl_memcpy = 0; 2094 u32 val; 2095 int ret; 2096 int i; 2097 2098 pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); 2099 if (!pd) 2100 return -ENOMEM; 2101 2102 /* Eligible bus masters for fetching LLIs */ 2103 if (of_property_read_bool(np, "lli-bus-interface-ahb1")) 2104 pd->lli_buses |= PL08X_AHB1; 2105 if (of_property_read_bool(np, "lli-bus-interface-ahb2")) 2106 pd->lli_buses |= PL08X_AHB2; 2107 if (!pd->lli_buses) { 2108 dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n"); 2109 pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2; 2110 } 2111 2112 /* Eligible bus masters for memory access */ 2113 if (of_property_read_bool(np, "mem-bus-interface-ahb1")) 2114 pd->mem_buses |= PL08X_AHB1; 2115 if (of_property_read_bool(np, "mem-bus-interface-ahb2")) 2116 pd->mem_buses |= PL08X_AHB2; 2117 if (!pd->mem_buses) { 2118 dev_info(&adev->dev, "no bus masters for memory stated, assume all\n"); 2119 pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2; 2120 } 2121 2122 /* Parse the memcpy channel properties */ 2123 ret = of_property_read_u32(np, "memcpy-burst-size", &val); 2124 if (ret) { 2125 dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n"); 2126 val = 1; 2127 } 2128 switch (val) { 2129 default: 2130 dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); 2131 /* Fall through */ 2132 case 1: 2133 cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | 2134 PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; 2135 break; 2136 case 4: 2137 cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | 2138 PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; 2139 break; 2140 case 8: 2141 cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | 2142 PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; 2143 break; 2144 case 16: 2145 cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | 2146 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; 2147 break; 2148 case 32: 2149 cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | 2150 PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; 2151 break; 2152 case 64: 2153 cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | 2154 PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; 2155 break; 2156 case 128: 2157 cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | 2158 PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; 2159 break; 2160 case 256: 2161 cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | 2162 PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; 2163 break; 2164 } 2165 2166 ret = of_property_read_u32(np, "memcpy-bus-width", &val); 2167 if (ret) { 2168 dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n"); 2169 val = 8; 2170 } 2171 switch (val) { 2172 default: 2173 dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); 2174 /* Fall through */ 2175 case 8: 2176 cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | 2177 PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 2178 break; 2179 case 16: 2180 cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | 2181 PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 2182 break; 2183 case 32: 2184 cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | 2185 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 2186 break; 2187 } 2188 2189 /* This is currently the only thing making sense */ 2190 cctl_memcpy |= PL080_CONTROL_PROT_SYS; 2191 2192 /* Set up memcpy channel */ 2193 pd->memcpy_channel.bus_id = "memcpy"; 2194 pd->memcpy_channel.cctl_memcpy = cctl_memcpy; 2195 /* Use the buses that can access memory, obviously */ 2196 pd->memcpy_channel.periph_buses = pd->mem_buses; 2197 2198 /* 2199 * Allocate channel data for all possible slave channels (one 2200 * for each possible signal), channels will then be allocated 2201 * for a device and have it's AHB interfaces set up at 2202 * translation time. 2203 */ 2204 chanp = devm_kcalloc(&adev->dev, 2205 pl08x->vd->signals, 2206 sizeof(struct pl08x_channel_data), 2207 GFP_KERNEL); 2208 if (!chanp) 2209 return -ENOMEM; 2210 2211 pd->slave_channels = chanp; 2212 for (i = 0; i < pl08x->vd->signals; i++) { 2213 /* chanp->periph_buses will be assigned at translation */ 2214 chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); 2215 chanp++; 2216 } 2217 pd->num_slave_channels = pl08x->vd->signals; 2218 2219 pl08x->pd = pd; 2220 2221 return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, 2222 pl08x); 2223 } 2224 #else 2225 static inline int pl08x_of_probe(struct amba_device *adev, 2226 struct pl08x_driver_data *pl08x, 2227 struct device_node *np) 2228 { 2229 return -EINVAL; 2230 } 2231 #endif 2232 2233 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 2234 { 2235 struct pl08x_driver_data *pl08x; 2236 const struct vendor_data *vd = id->data; 2237 struct device_node *np = adev->dev.of_node; 2238 u32 tsfr_size; 2239 int ret = 0; 2240 int i; 2241 2242 ret = amba_request_regions(adev, NULL); 2243 if (ret) 2244 return ret; 2245 2246 /* Ensure that we can do DMA */ 2247 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); 2248 if (ret) 2249 goto out_no_pl08x; 2250 2251 /* Create the driver state holder */ 2252 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2253 if (!pl08x) { 2254 ret = -ENOMEM; 2255 goto out_no_pl08x; 2256 } 2257 2258 /* Assign useful pointers to the driver state */ 2259 pl08x->adev = adev; 2260 pl08x->vd = vd; 2261 2262 /* Initialize memcpy engine */ 2263 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 2264 pl08x->memcpy.dev = &adev->dev; 2265 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2266 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2267 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2268 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2269 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2270 pl08x->memcpy.device_config = pl08x_config; 2271 pl08x->memcpy.device_pause = pl08x_pause; 2272 pl08x->memcpy.device_resume = pl08x_resume; 2273 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2274 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2275 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2276 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); 2277 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2278 2279 /* Initialize slave engine */ 2280 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2281 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); 2282 pl08x->slave.dev = &adev->dev; 2283 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 2284 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2285 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2286 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2287 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2288 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2289 pl08x->slave.device_config = pl08x_config; 2290 pl08x->slave.device_pause = pl08x_pause; 2291 pl08x->slave.device_resume = pl08x_resume; 2292 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2293 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2294 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2295 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2296 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2297 2298 /* Get the platform data */ 2299 pl08x->pd = dev_get_platdata(&adev->dev); 2300 if (!pl08x->pd) { 2301 if (np) { 2302 ret = pl08x_of_probe(adev, pl08x, np); 2303 if (ret) 2304 goto out_no_platdata; 2305 } else { 2306 dev_err(&adev->dev, "no platform data supplied\n"); 2307 ret = -EINVAL; 2308 goto out_no_platdata; 2309 } 2310 } 2311 2312 /* By default, AHB1 only. If dualmaster, from platform */ 2313 pl08x->lli_buses = PL08X_AHB1; 2314 pl08x->mem_buses = PL08X_AHB1; 2315 if (pl08x->vd->dualmaster) { 2316 pl08x->lli_buses = pl08x->pd->lli_buses; 2317 pl08x->mem_buses = pl08x->pd->mem_buses; 2318 } 2319 2320 if (vd->pl080s) 2321 pl08x->lli_words = PL080S_LLI_WORDS; 2322 else 2323 pl08x->lli_words = PL080_LLI_WORDS; 2324 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); 2325 2326 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2327 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2328 tsfr_size, PL08X_ALIGN, 0); 2329 if (!pl08x->pool) { 2330 ret = -ENOMEM; 2331 goto out_no_lli_pool; 2332 } 2333 2334 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2335 if (!pl08x->base) { 2336 ret = -ENOMEM; 2337 goto out_no_ioremap; 2338 } 2339 2340 /* Turn on the PL08x */ 2341 pl08x_ensure_on(pl08x); 2342 2343 /* Attach the interrupt handler */ 2344 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2345 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2346 2347 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); 2348 if (ret) { 2349 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2350 __func__, adev->irq[0]); 2351 goto out_no_irq; 2352 } 2353 2354 /* Initialize physical channels */ 2355 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2356 GFP_KERNEL); 2357 if (!pl08x->phy_chans) { 2358 ret = -ENOMEM; 2359 goto out_no_phychans; 2360 } 2361 2362 for (i = 0; i < vd->channels; i++) { 2363 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2364 2365 ch->id = i; 2366 ch->base = pl08x->base + PL080_Cx_BASE(i); 2367 ch->reg_config = ch->base + vd->config_offset; 2368 spin_lock_init(&ch->lock); 2369 2370 /* 2371 * Nomadik variants can have channels that are locked 2372 * down for the secure world only. Lock up these channels 2373 * by perpetually serving a dummy virtual channel. 2374 */ 2375 if (vd->nomadik) { 2376 u32 val; 2377 2378 val = readl(ch->reg_config); 2379 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2380 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2381 ch->locked = true; 2382 } 2383 } 2384 2385 dev_dbg(&adev->dev, "physical channel %d is %s\n", 2386 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2387 } 2388 2389 /* Register as many memcpy channels as there are physical channels */ 2390 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2391 pl08x->vd->channels, false); 2392 if (ret <= 0) { 2393 dev_warn(&pl08x->adev->dev, 2394 "%s failed to enumerate memcpy channels - %d\n", 2395 __func__, ret); 2396 goto out_no_memcpy; 2397 } 2398 2399 /* Register slave channels */ 2400 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2401 pl08x->pd->num_slave_channels, true); 2402 if (ret < 0) { 2403 dev_warn(&pl08x->adev->dev, 2404 "%s failed to enumerate slave channels - %d\n", 2405 __func__, ret); 2406 goto out_no_slave; 2407 } 2408 2409 ret = dma_async_device_register(&pl08x->memcpy); 2410 if (ret) { 2411 dev_warn(&pl08x->adev->dev, 2412 "%s failed to register memcpy as an async device - %d\n", 2413 __func__, ret); 2414 goto out_no_memcpy_reg; 2415 } 2416 2417 ret = dma_async_device_register(&pl08x->slave); 2418 if (ret) { 2419 dev_warn(&pl08x->adev->dev, 2420 "%s failed to register slave as an async device - %d\n", 2421 __func__, ret); 2422 goto out_no_slave_reg; 2423 } 2424 2425 amba_set_drvdata(adev, pl08x); 2426 init_pl08x_debugfs(pl08x); 2427 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", 2428 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), 2429 (unsigned long long)adev->res.start, adev->irq[0]); 2430 2431 return 0; 2432 2433 out_no_slave_reg: 2434 dma_async_device_unregister(&pl08x->memcpy); 2435 out_no_memcpy_reg: 2436 pl08x_free_virtual_channels(&pl08x->slave); 2437 out_no_slave: 2438 pl08x_free_virtual_channels(&pl08x->memcpy); 2439 out_no_memcpy: 2440 kfree(pl08x->phy_chans); 2441 out_no_phychans: 2442 free_irq(adev->irq[0], pl08x); 2443 out_no_irq: 2444 iounmap(pl08x->base); 2445 out_no_ioremap: 2446 dma_pool_destroy(pl08x->pool); 2447 out_no_lli_pool: 2448 out_no_platdata: 2449 kfree(pl08x); 2450 out_no_pl08x: 2451 amba_release_regions(adev); 2452 return ret; 2453 } 2454 2455 /* PL080 has 8 channels and the PL080 have just 2 */ 2456 static struct vendor_data vendor_pl080 = { 2457 .config_offset = PL080_CH_CONFIG, 2458 .channels = 8, 2459 .signals = 16, 2460 .dualmaster = true, 2461 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2462 }; 2463 2464 static struct vendor_data vendor_nomadik = { 2465 .config_offset = PL080_CH_CONFIG, 2466 .channels = 8, 2467 .signals = 32, 2468 .dualmaster = true, 2469 .nomadik = true, 2470 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2471 }; 2472 2473 static struct vendor_data vendor_pl080s = { 2474 .config_offset = PL080S_CH_CONFIG, 2475 .channels = 8, 2476 .signals = 32, 2477 .pl080s = true, 2478 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, 2479 }; 2480 2481 static struct vendor_data vendor_pl081 = { 2482 .config_offset = PL080_CH_CONFIG, 2483 .channels = 2, 2484 .signals = 16, 2485 .dualmaster = false, 2486 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2487 }; 2488 2489 static struct amba_id pl08x_ids[] = { 2490 /* Samsung PL080S variant */ 2491 { 2492 .id = 0x0a141080, 2493 .mask = 0xffffffff, 2494 .data = &vendor_pl080s, 2495 }, 2496 /* PL080 */ 2497 { 2498 .id = 0x00041080, 2499 .mask = 0x000fffff, 2500 .data = &vendor_pl080, 2501 }, 2502 /* PL081 */ 2503 { 2504 .id = 0x00041081, 2505 .mask = 0x000fffff, 2506 .data = &vendor_pl081, 2507 }, 2508 /* Nomadik 8815 PL080 variant */ 2509 { 2510 .id = 0x00280080, 2511 .mask = 0x00ffffff, 2512 .data = &vendor_nomadik, 2513 }, 2514 { 0, 0 }, 2515 }; 2516 2517 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2518 2519 static struct amba_driver pl08x_amba_driver = { 2520 .drv.name = DRIVER_NAME, 2521 .id_table = pl08x_ids, 2522 .probe = pl08x_probe, 2523 }; 2524 2525 static int __init pl08x_init(void) 2526 { 2527 int retval; 2528 retval = amba_driver_register(&pl08x_amba_driver); 2529 if (retval) 2530 printk(KERN_WARNING DRIVER_NAME 2531 "failed to register as an AMBA device (%d)\n", 2532 retval); 2533 return retval; 2534 } 2535 subsys_initcall(pl08x_init); 2536