1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * The full GNU General Public License is in this distribution in the file 19 * called COPYING. 20 * 21 * Documentation: ARM DDI 0196G == PL080 22 * Documentation: ARM DDI 0218E == PL081 23 * Documentation: S3C6410 User's Manual == PL080S 24 * 25 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 26 * channel. 27 * 28 * The PL080 has 8 channels available for simultaneous use, and the PL081 29 * has only two channels. So on these DMA controllers the number of channels 30 * and the number of incoming DMA signals are two totally different things. 31 * It is usually not possible to theoretically handle all physical signals, 32 * so a multiplexing scheme with possible denial of use is necessary. 33 * 34 * The PL080 has a dual bus master, PL081 has a single master. 35 * 36 * PL080S is a version modified by Samsung and used in S3C64xx SoCs. 37 * It differs in following aspects: 38 * - CH_CONFIG register at different offset, 39 * - separate CH_CONTROL2 register for transfer size, 40 * - bigger maximum transfer size, 41 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, 42 * - no support for peripheral flow control. 43 * 44 * Memory to peripheral transfer may be visualized as 45 * Get data from memory to DMAC 46 * Until no data left 47 * On burst request from peripheral 48 * Destination burst from DMAC to peripheral 49 * Clear burst request 50 * Raise terminal count interrupt 51 * 52 * For peripherals with a FIFO: 53 * Source burst size == half the depth of the peripheral FIFO 54 * Destination burst size == the depth of the peripheral FIFO 55 * 56 * (Bursts are irrelevant for mem to mem transfers - there are no burst 57 * signals, the DMA controller will simply facilitate its AHB master.) 58 * 59 * ASSUMES default (little) endianness for DMA transfers 60 * 61 * The PL08x has two flow control settings: 62 * - DMAC flow control: the transfer size defines the number of transfers 63 * which occur for the current LLI entry, and the DMAC raises TC at the 64 * end of every LLI entry. Observed behaviour shows the DMAC listening 65 * to both the BREQ and SREQ signals (contrary to documented), 66 * transferring data if either is active. The LBREQ and LSREQ signals 67 * are ignored. 68 * 69 * - Peripheral flow control: the transfer size is ignored (and should be 70 * zero). The data is transferred from the current LLI entry, until 71 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 72 * will then move to the next LLI entry. Unsupported by PL080S. 73 */ 74 #include <linux/amba/bus.h> 75 #include <linux/amba/pl08x.h> 76 #include <linux/debugfs.h> 77 #include <linux/delay.h> 78 #include <linux/device.h> 79 #include <linux/dmaengine.h> 80 #include <linux/dmapool.h> 81 #include <linux/dma-mapping.h> 82 #include <linux/export.h> 83 #include <linux/init.h> 84 #include <linux/interrupt.h> 85 #include <linux/module.h> 86 #include <linux/of.h> 87 #include <linux/of_dma.h> 88 #include <linux/pm_runtime.h> 89 #include <linux/seq_file.h> 90 #include <linux/slab.h> 91 #include <linux/amba/pl080.h> 92 93 #include "dmaengine.h" 94 #include "virt-dma.h" 95 96 #define DRIVER_NAME "pl08xdmac" 97 98 #define PL80X_DMA_BUSWIDTHS \ 99 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 100 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 101 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 102 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 103 104 static struct amba_driver pl08x_amba_driver; 105 struct pl08x_driver_data; 106 107 /** 108 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 109 * @config_offset: offset to the configuration register 110 * @channels: the number of channels available in this variant 111 * @signals: the number of request signals available from the hardware 112 * @dualmaster: whether this version supports dual AHB masters or not. 113 * @nomadik: whether the channels have Nomadik security extension bits 114 * that need to be checked for permission before use and some registers are 115 * missing 116 * @pl080s: whether this version is a PL080S, which has separate register and 117 * LLI word for transfer size. 118 * @max_transfer_size: the maximum single element transfer size for this 119 * PL08x variant. 120 */ 121 struct vendor_data { 122 u8 config_offset; 123 u8 channels; 124 u8 signals; 125 bool dualmaster; 126 bool nomadik; 127 bool pl080s; 128 u32 max_transfer_size; 129 }; 130 131 /** 132 * struct pl08x_bus_data - information of source or destination 133 * busses for a transfer 134 * @addr: current address 135 * @maxwidth: the maximum width of a transfer on this bus 136 * @buswidth: the width of this bus in bytes: 1, 2 or 4 137 */ 138 struct pl08x_bus_data { 139 dma_addr_t addr; 140 u8 maxwidth; 141 u8 buswidth; 142 }; 143 144 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) 145 146 /** 147 * struct pl08x_phy_chan - holder for the physical channels 148 * @id: physical index to this channel 149 * @base: memory base address for this physical channel 150 * @reg_config: configuration address for this physical channel 151 * @lock: a lock to use when altering an instance of this struct 152 * @serving: the virtual channel currently being served by this physical 153 * channel 154 * @locked: channel unavailable for the system, e.g. dedicated to secure 155 * world 156 */ 157 struct pl08x_phy_chan { 158 unsigned int id; 159 void __iomem *base; 160 void __iomem *reg_config; 161 spinlock_t lock; 162 struct pl08x_dma_chan *serving; 163 bool locked; 164 }; 165 166 /** 167 * struct pl08x_sg - structure containing data per sg 168 * @src_addr: src address of sg 169 * @dst_addr: dst address of sg 170 * @len: transfer len in bytes 171 * @node: node for txd's dsg_list 172 */ 173 struct pl08x_sg { 174 dma_addr_t src_addr; 175 dma_addr_t dst_addr; 176 size_t len; 177 struct list_head node; 178 }; 179 180 /** 181 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 182 * @vd: virtual DMA descriptor 183 * @dsg_list: list of children sg's 184 * @llis_bus: DMA memory address (physical) start for the LLIs 185 * @llis_va: virtual memory address start for the LLIs 186 * @cctl: control reg values for current txd 187 * @ccfg: config reg values for current txd 188 * @done: this marks completed descriptors, which should not have their 189 * mux released. 190 * @cyclic: indicate cyclic transfers 191 */ 192 struct pl08x_txd { 193 struct virt_dma_desc vd; 194 struct list_head dsg_list; 195 dma_addr_t llis_bus; 196 u32 *llis_va; 197 /* Default cctl value for LLIs */ 198 u32 cctl; 199 /* 200 * Settings to be put into the physical channel when we 201 * trigger this txd. Other registers are in llis_va[0]. 202 */ 203 u32 ccfg; 204 bool done; 205 bool cyclic; 206 }; 207 208 /** 209 * enum pl08x_dma_chan_state - holds the PL08x specific virtual channel 210 * states 211 * @PL08X_CHAN_IDLE: the channel is idle 212 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 213 * channel and is running a transfer on it 214 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 215 * channel, but the transfer is currently paused 216 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 217 * channel to become available (only pertains to memcpy channels) 218 */ 219 enum pl08x_dma_chan_state { 220 PL08X_CHAN_IDLE, 221 PL08X_CHAN_RUNNING, 222 PL08X_CHAN_PAUSED, 223 PL08X_CHAN_WAITING, 224 }; 225 226 /** 227 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 228 * @vc: wrappped virtual channel 229 * @phychan: the physical channel utilized by this channel, if there is one 230 * @name: name of channel 231 * @cd: channel platform data 232 * @cfg: slave configuration 233 * @at: active transaction on this channel 234 * @host: a pointer to the host (internal use) 235 * @state: whether the channel is idle, paused, running etc 236 * @slave: whether this channel is a device (slave) or for memcpy 237 * @signal: the physical DMA request signal which this channel is using 238 * @mux_use: count of descriptors using this DMA request signal setting 239 */ 240 struct pl08x_dma_chan { 241 struct virt_dma_chan vc; 242 struct pl08x_phy_chan *phychan; 243 const char *name; 244 struct pl08x_channel_data *cd; 245 struct dma_slave_config cfg; 246 struct pl08x_txd *at; 247 struct pl08x_driver_data *host; 248 enum pl08x_dma_chan_state state; 249 bool slave; 250 int signal; 251 unsigned mux_use; 252 }; 253 254 /** 255 * struct pl08x_driver_data - the local state holder for the PL08x 256 * @slave: slave engine for this instance 257 * @memcpy: memcpy engine for this instance 258 * @base: virtual memory base (remapped) for the PL08x 259 * @adev: the corresponding AMBA (PrimeCell) bus entry 260 * @vd: vendor data for this PL08x variant 261 * @pd: platform data passed in from the platform/machine 262 * @phy_chans: array of data for the physical channels 263 * @pool: a pool for the LLI descriptors 264 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 265 * fetches 266 * @mem_buses: set to indicate memory transfers on AHB2. 267 * @lli_words: how many words are used in each LLI item for this variant 268 */ 269 struct pl08x_driver_data { 270 struct dma_device slave; 271 struct dma_device memcpy; 272 void __iomem *base; 273 struct amba_device *adev; 274 const struct vendor_data *vd; 275 struct pl08x_platform_data *pd; 276 struct pl08x_phy_chan *phy_chans; 277 struct dma_pool *pool; 278 u8 lli_buses; 279 u8 mem_buses; 280 u8 lli_words; 281 }; 282 283 /* 284 * PL08X specific defines 285 */ 286 287 /* The order of words in an LLI. */ 288 #define PL080_LLI_SRC 0 289 #define PL080_LLI_DST 1 290 #define PL080_LLI_LLI 2 291 #define PL080_LLI_CCTL 3 292 #define PL080S_LLI_CCTL2 4 293 294 /* Total words in an LLI. */ 295 #define PL080_LLI_WORDS 4 296 #define PL080S_LLI_WORDS 8 297 298 /* 299 * Number of LLIs in each LLI buffer allocated for one transfer 300 * (maximum times we call dma_pool_alloc on this pool without freeing) 301 */ 302 #define MAX_NUM_TSFR_LLIS 512 303 #define PL08X_ALIGN 8 304 305 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 306 { 307 return container_of(chan, struct pl08x_dma_chan, vc.chan); 308 } 309 310 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 311 { 312 return container_of(tx, struct pl08x_txd, vd.tx); 313 } 314 315 /* 316 * Mux handling. 317 * 318 * This gives us the DMA request input to the PL08x primecell which the 319 * peripheral described by the channel data will be routed to, possibly 320 * via a board/SoC specific external MUX. One important point to note 321 * here is that this does not depend on the physical channel. 322 */ 323 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 324 { 325 const struct pl08x_platform_data *pd = plchan->host->pd; 326 int ret; 327 328 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { 329 ret = pd->get_xfer_signal(plchan->cd); 330 if (ret < 0) { 331 plchan->mux_use = 0; 332 return ret; 333 } 334 335 plchan->signal = ret; 336 } 337 return 0; 338 } 339 340 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 341 { 342 const struct pl08x_platform_data *pd = plchan->host->pd; 343 344 if (plchan->signal >= 0) { 345 WARN_ON(plchan->mux_use == 0); 346 347 if (--plchan->mux_use == 0 && pd->put_xfer_signal) { 348 pd->put_xfer_signal(plchan->cd, plchan->signal); 349 plchan->signal = -1; 350 } 351 } 352 } 353 354 /* 355 * Physical channel handling 356 */ 357 358 /* Whether a certain channel is busy or not */ 359 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 360 { 361 unsigned int val; 362 363 val = readl(ch->reg_config); 364 return val & PL080_CONFIG_ACTIVE; 365 } 366 367 static void pl08x_write_lli(struct pl08x_driver_data *pl08x, 368 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) 369 { 370 if (pl08x->vd->pl080s) 371 dev_vdbg(&pl08x->adev->dev, 372 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 373 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", 374 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 375 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], 376 lli[PL080S_LLI_CCTL2], ccfg); 377 else 378 dev_vdbg(&pl08x->adev->dev, 379 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 380 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 381 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 382 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); 383 384 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); 385 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); 386 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); 387 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); 388 389 if (pl08x->vd->pl080s) 390 writel_relaxed(lli[PL080S_LLI_CCTL2], 391 phychan->base + PL080S_CH_CONTROL2); 392 393 writel(ccfg, phychan->reg_config); 394 } 395 396 /* 397 * Set the initial DMA register values i.e. those for the first LLI 398 * The next LLI pointer and the configuration interrupt bit have 399 * been set when the LLIs were constructed. Poke them into the hardware 400 * and start the transfer. 401 */ 402 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 403 { 404 struct pl08x_driver_data *pl08x = plchan->host; 405 struct pl08x_phy_chan *phychan = plchan->phychan; 406 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 407 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 408 u32 val; 409 410 list_del(&txd->vd.node); 411 412 plchan->at = txd; 413 414 /* Wait for channel inactive */ 415 while (pl08x_phy_channel_busy(phychan)) 416 cpu_relax(); 417 418 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); 419 420 /* Enable the DMA channel */ 421 /* Do not access config register until channel shows as disabled */ 422 while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id)) 423 cpu_relax(); 424 425 /* Do not access config register until channel shows as inactive */ 426 val = readl(phychan->reg_config); 427 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 428 val = readl(phychan->reg_config); 429 430 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); 431 } 432 433 /* 434 * Pause the channel by setting the HALT bit. 435 * 436 * For M->P transfers, pause the DMAC first and then stop the peripheral - 437 * the FIFO can only drain if the peripheral is still requesting data. 438 * (note: this can still timeout if the DMAC FIFO never drains of data.) 439 * 440 * For P->M transfers, disable the peripheral first to stop it filling 441 * the DMAC FIFO, and then pause the DMAC. 442 */ 443 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 444 { 445 u32 val; 446 int timeout; 447 448 /* Set the HALT bit and wait for the FIFO to drain */ 449 val = readl(ch->reg_config); 450 val |= PL080_CONFIG_HALT; 451 writel(val, ch->reg_config); 452 453 /* Wait for channel inactive */ 454 for (timeout = 1000; timeout; timeout--) { 455 if (!pl08x_phy_channel_busy(ch)) 456 break; 457 udelay(1); 458 } 459 if (pl08x_phy_channel_busy(ch)) 460 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 461 } 462 463 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 464 { 465 u32 val; 466 467 /* Clear the HALT bit */ 468 val = readl(ch->reg_config); 469 val &= ~PL080_CONFIG_HALT; 470 writel(val, ch->reg_config); 471 } 472 473 /* 474 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 475 * clears any pending interrupt status. This should not be used for 476 * an on-going transfer, but as a method of shutting down a channel 477 * (eg, when it's no longer used) or terminating a transfer. 478 */ 479 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 480 struct pl08x_phy_chan *ch) 481 { 482 u32 val = readl(ch->reg_config); 483 484 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 485 PL080_CONFIG_TC_IRQ_MASK); 486 487 writel(val, ch->reg_config); 488 489 writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR); 490 writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); 491 } 492 493 static inline u32 get_bytes_in_cctl(u32 cctl) 494 { 495 /* The source width defines the number of bytes */ 496 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 497 498 cctl &= PL080_CONTROL_SWIDTH_MASK; 499 500 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 501 case PL080_WIDTH_8BIT: 502 break; 503 case PL080_WIDTH_16BIT: 504 bytes *= 2; 505 break; 506 case PL080_WIDTH_32BIT: 507 bytes *= 4; 508 break; 509 } 510 return bytes; 511 } 512 513 static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) 514 { 515 /* The source width defines the number of bytes */ 516 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; 517 518 cctl &= PL080_CONTROL_SWIDTH_MASK; 519 520 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 521 case PL080_WIDTH_8BIT: 522 break; 523 case PL080_WIDTH_16BIT: 524 bytes *= 2; 525 break; 526 case PL080_WIDTH_32BIT: 527 bytes *= 4; 528 break; 529 } 530 return bytes; 531 } 532 533 /* The channel should be paused when calling this */ 534 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 535 { 536 struct pl08x_driver_data *pl08x = plchan->host; 537 const u32 *llis_va, *llis_va_limit; 538 struct pl08x_phy_chan *ch; 539 dma_addr_t llis_bus; 540 struct pl08x_txd *txd; 541 u32 llis_max_words; 542 size_t bytes; 543 u32 clli; 544 545 ch = plchan->phychan; 546 txd = plchan->at; 547 548 if (!ch || !txd) 549 return 0; 550 551 /* 552 * Follow the LLIs to get the number of remaining 553 * bytes in the currently active transaction. 554 */ 555 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 556 557 /* First get the remaining bytes in the active transfer */ 558 if (pl08x->vd->pl080s) 559 bytes = get_bytes_in_cctl_pl080s( 560 readl(ch->base + PL080_CH_CONTROL), 561 readl(ch->base + PL080S_CH_CONTROL2)); 562 else 563 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 564 565 if (!clli) 566 return bytes; 567 568 llis_va = txd->llis_va; 569 llis_bus = txd->llis_bus; 570 571 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; 572 BUG_ON(clli < llis_bus || clli >= llis_bus + 573 sizeof(u32) * llis_max_words); 574 575 /* 576 * Locate the next LLI - as this is an array, 577 * it's simple maths to find. 578 */ 579 llis_va += (clli - llis_bus) / sizeof(u32); 580 581 llis_va_limit = llis_va + llis_max_words; 582 583 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { 584 if (pl08x->vd->pl080s) 585 bytes += get_bytes_in_cctl_pl080s( 586 llis_va[PL080_LLI_CCTL], 587 llis_va[PL080S_LLI_CCTL2]); 588 else 589 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); 590 591 /* 592 * A LLI pointer going backward terminates the LLI list 593 */ 594 if (llis_va[PL080_LLI_LLI] <= clli) 595 break; 596 } 597 598 return bytes; 599 } 600 601 /* 602 * Allocate a physical channel for a virtual channel 603 * 604 * Try to locate a physical channel to be used for this transfer. If all 605 * are taken return NULL and the requester will have to cope by using 606 * some fallback PIO mode or retrying later. 607 */ 608 static struct pl08x_phy_chan * 609 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 610 struct pl08x_dma_chan *virt_chan) 611 { 612 struct pl08x_phy_chan *ch = NULL; 613 unsigned long flags; 614 int i; 615 616 for (i = 0; i < pl08x->vd->channels; i++) { 617 ch = &pl08x->phy_chans[i]; 618 619 spin_lock_irqsave(&ch->lock, flags); 620 621 if (!ch->locked && !ch->serving) { 622 ch->serving = virt_chan; 623 spin_unlock_irqrestore(&ch->lock, flags); 624 break; 625 } 626 627 spin_unlock_irqrestore(&ch->lock, flags); 628 } 629 630 if (i == pl08x->vd->channels) { 631 /* No physical channel available, cope with it */ 632 return NULL; 633 } 634 635 return ch; 636 } 637 638 /* Mark the physical channel as free. Note, this write is atomic. */ 639 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 640 struct pl08x_phy_chan *ch) 641 { 642 ch->serving = NULL; 643 } 644 645 /* 646 * Try to allocate a physical channel. When successful, assign it to 647 * this virtual channel, and initiate the next descriptor. The 648 * virtual channel lock must be held at this point. 649 */ 650 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 651 { 652 struct pl08x_driver_data *pl08x = plchan->host; 653 struct pl08x_phy_chan *ch; 654 655 ch = pl08x_get_phy_channel(pl08x, plchan); 656 if (!ch) { 657 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 658 plchan->state = PL08X_CHAN_WAITING; 659 return; 660 } 661 662 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 663 ch->id, plchan->name); 664 665 plchan->phychan = ch; 666 plchan->state = PL08X_CHAN_RUNNING; 667 pl08x_start_next_txd(plchan); 668 } 669 670 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 671 struct pl08x_dma_chan *plchan) 672 { 673 struct pl08x_driver_data *pl08x = plchan->host; 674 675 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 676 ch->id, plchan->name); 677 678 /* 679 * We do this without taking the lock; we're really only concerned 680 * about whether this pointer is NULL or not, and we're guaranteed 681 * that this will only be called when it _already_ is non-NULL. 682 */ 683 ch->serving = plchan; 684 plchan->phychan = ch; 685 plchan->state = PL08X_CHAN_RUNNING; 686 pl08x_start_next_txd(plchan); 687 } 688 689 /* 690 * Free a physical DMA channel, potentially reallocating it to another 691 * virtual channel if we have any pending. 692 */ 693 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 694 { 695 struct pl08x_driver_data *pl08x = plchan->host; 696 struct pl08x_dma_chan *p, *next; 697 698 retry: 699 next = NULL; 700 701 /* Find a waiting virtual channel for the next transfer. */ 702 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 703 if (p->state == PL08X_CHAN_WAITING) { 704 next = p; 705 break; 706 } 707 708 if (!next) { 709 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 710 if (p->state == PL08X_CHAN_WAITING) { 711 next = p; 712 break; 713 } 714 } 715 716 /* Ensure that the physical channel is stopped */ 717 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 718 719 if (next) { 720 bool success; 721 722 /* 723 * Eww. We know this isn't going to deadlock 724 * but lockdep probably doesn't. 725 */ 726 spin_lock(&next->vc.lock); 727 /* Re-check the state now that we have the lock */ 728 success = next->state == PL08X_CHAN_WAITING; 729 if (success) 730 pl08x_phy_reassign_start(plchan->phychan, next); 731 spin_unlock(&next->vc.lock); 732 733 /* If the state changed, try to find another channel */ 734 if (!success) 735 goto retry; 736 } else { 737 /* No more jobs, so free up the physical channel */ 738 pl08x_put_phy_channel(pl08x, plchan->phychan); 739 } 740 741 plchan->phychan = NULL; 742 plchan->state = PL08X_CHAN_IDLE; 743 } 744 745 /* 746 * LLI handling 747 */ 748 749 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 750 { 751 switch (coded) { 752 case PL080_WIDTH_8BIT: 753 return 1; 754 case PL080_WIDTH_16BIT: 755 return 2; 756 case PL080_WIDTH_32BIT: 757 return 4; 758 default: 759 break; 760 } 761 BUG(); 762 return 0; 763 } 764 765 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 766 size_t tsize) 767 { 768 u32 retbits = cctl; 769 770 /* Remove all src, dst and transfer size bits */ 771 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 772 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 773 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 774 775 /* Then set the bits according to the parameters */ 776 switch (srcwidth) { 777 case 1: 778 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 779 break; 780 case 2: 781 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 782 break; 783 case 4: 784 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 785 break; 786 default: 787 BUG(); 788 break; 789 } 790 791 switch (dstwidth) { 792 case 1: 793 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 794 break; 795 case 2: 796 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 797 break; 798 case 4: 799 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 800 break; 801 default: 802 BUG(); 803 break; 804 } 805 806 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; 807 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 808 return retbits; 809 } 810 811 struct pl08x_lli_build_data { 812 struct pl08x_txd *txd; 813 struct pl08x_bus_data srcbus; 814 struct pl08x_bus_data dstbus; 815 size_t remainder; 816 u32 lli_bus; 817 }; 818 819 /* 820 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 821 * victim in case src & dest are not similarly aligned. i.e. If after aligning 822 * masters address with width requirements of transfer (by sending few byte by 823 * byte data), slave is still not aligned, then its width will be reduced to 824 * BYTE. 825 * - prefers the destination bus if both available 826 * - prefers bus with fixed address (i.e. peripheral) 827 */ 828 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 829 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 830 { 831 if (!(cctl & PL080_CONTROL_DST_INCR)) { 832 *mbus = &bd->dstbus; 833 *sbus = &bd->srcbus; 834 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 835 *mbus = &bd->srcbus; 836 *sbus = &bd->dstbus; 837 } else { 838 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 839 *mbus = &bd->dstbus; 840 *sbus = &bd->srcbus; 841 } else { 842 *mbus = &bd->srcbus; 843 *sbus = &bd->dstbus; 844 } 845 } 846 } 847 848 /* 849 * Fills in one LLI for a certain transfer descriptor and advance the counter 850 */ 851 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 852 struct pl08x_lli_build_data *bd, 853 int num_llis, int len, u32 cctl, u32 cctl2) 854 { 855 u32 offset = num_llis * pl08x->lli_words; 856 u32 *llis_va = bd->txd->llis_va + offset; 857 dma_addr_t llis_bus = bd->txd->llis_bus; 858 859 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 860 861 /* Advance the offset to next LLI. */ 862 offset += pl08x->lli_words; 863 864 llis_va[PL080_LLI_SRC] = bd->srcbus.addr; 865 llis_va[PL080_LLI_DST] = bd->dstbus.addr; 866 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); 867 llis_va[PL080_LLI_LLI] |= bd->lli_bus; 868 llis_va[PL080_LLI_CCTL] = cctl; 869 if (pl08x->vd->pl080s) 870 llis_va[PL080S_LLI_CCTL2] = cctl2; 871 872 if (cctl & PL080_CONTROL_SRC_INCR) 873 bd->srcbus.addr += len; 874 if (cctl & PL080_CONTROL_DST_INCR) 875 bd->dstbus.addr += len; 876 877 BUG_ON(bd->remainder < len); 878 879 bd->remainder -= len; 880 } 881 882 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, 883 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, 884 int num_llis, size_t *total_bytes) 885 { 886 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 887 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); 888 (*total_bytes) += len; 889 } 890 891 #ifdef VERBOSE_DEBUG 892 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 893 const u32 *llis_va, int num_llis) 894 { 895 int i; 896 897 if (pl08x->vd->pl080s) { 898 dev_vdbg(&pl08x->adev->dev, 899 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", 900 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); 901 for (i = 0; i < num_llis; i++) { 902 dev_vdbg(&pl08x->adev->dev, 903 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 904 i, llis_va, llis_va[PL080_LLI_SRC], 905 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 906 llis_va[PL080_LLI_CCTL], 907 llis_va[PL080S_LLI_CCTL2]); 908 llis_va += pl08x->lli_words; 909 } 910 } else { 911 dev_vdbg(&pl08x->adev->dev, 912 "%-3s %-9s %-10s %-10s %-10s %s\n", 913 "lli", "", "csrc", "cdst", "clli", "cctl"); 914 for (i = 0; i < num_llis; i++) { 915 dev_vdbg(&pl08x->adev->dev, 916 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 917 i, llis_va, llis_va[PL080_LLI_SRC], 918 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 919 llis_va[PL080_LLI_CCTL]); 920 llis_va += pl08x->lli_words; 921 } 922 } 923 } 924 #else 925 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 926 const u32 *llis_va, int num_llis) {} 927 #endif 928 929 /* 930 * This fills in the table of LLIs for the transfer descriptor 931 * Note that we assume we never have to change the burst sizes 932 * Return 0 for error 933 */ 934 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 935 struct pl08x_txd *txd) 936 { 937 struct pl08x_bus_data *mbus, *sbus; 938 struct pl08x_lli_build_data bd; 939 int num_llis = 0; 940 u32 cctl, early_bytes = 0; 941 size_t max_bytes_per_lli, total_bytes; 942 u32 *llis_va, *last_lli; 943 struct pl08x_sg *dsg; 944 945 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 946 if (!txd->llis_va) { 947 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 948 return 0; 949 } 950 951 bd.txd = txd; 952 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 953 cctl = txd->cctl; 954 955 /* Find maximum width of the source bus */ 956 bd.srcbus.maxwidth = 957 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 958 PL080_CONTROL_SWIDTH_SHIFT); 959 960 /* Find maximum width of the destination bus */ 961 bd.dstbus.maxwidth = 962 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 963 PL080_CONTROL_DWIDTH_SHIFT); 964 965 list_for_each_entry(dsg, &txd->dsg_list, node) { 966 total_bytes = 0; 967 cctl = txd->cctl; 968 969 bd.srcbus.addr = dsg->src_addr; 970 bd.dstbus.addr = dsg->dst_addr; 971 bd.remainder = dsg->len; 972 bd.srcbus.buswidth = bd.srcbus.maxwidth; 973 bd.dstbus.buswidth = bd.dstbus.maxwidth; 974 975 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 976 977 dev_vdbg(&pl08x->adev->dev, 978 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", 979 (u64)bd.srcbus.addr, 980 cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 981 bd.srcbus.buswidth, 982 (u64)bd.dstbus.addr, 983 cctl & PL080_CONTROL_DST_INCR ? "+" : "", 984 bd.dstbus.buswidth, 985 bd.remainder); 986 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 987 mbus == &bd.srcbus ? "src" : "dst", 988 sbus == &bd.srcbus ? "src" : "dst"); 989 990 /* 991 * Zero length is only allowed if all these requirements are 992 * met: 993 * - flow controller is peripheral. 994 * - src.addr is aligned to src.width 995 * - dst.addr is aligned to dst.width 996 * 997 * sg_len == 1 should be true, as there can be two cases here: 998 * 999 * - Memory addresses are contiguous and are not scattered. 1000 * Here, Only one sg will be passed by user driver, with 1001 * memory address and zero length. We pass this to controller 1002 * and after the transfer it will receive the last burst 1003 * request from peripheral and so transfer finishes. 1004 * 1005 * - Memory addresses are scattered and are not contiguous. 1006 * Here, Obviously as DMA controller doesn't know when a lli's 1007 * transfer gets over, it can't load next lli. So in this 1008 * case, there has to be an assumption that only one lli is 1009 * supported. Thus, we can't have scattered addresses. 1010 */ 1011 if (!bd.remainder) { 1012 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 1013 PL080_CONFIG_FLOW_CONTROL_SHIFT; 1014 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 1015 (fc <= PL080_FLOW_SRC2DST_SRC))) { 1016 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 1017 __func__); 1018 return 0; 1019 } 1020 1021 if (!IS_BUS_ALIGNED(&bd.srcbus) || 1022 !IS_BUS_ALIGNED(&bd.dstbus)) { 1023 dev_err(&pl08x->adev->dev, 1024 "%s src & dst address must be aligned to src" 1025 " & dst width if peripheral is flow controller", 1026 __func__); 1027 return 0; 1028 } 1029 1030 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1031 bd.dstbus.buswidth, 0); 1032 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1033 0, cctl, 0); 1034 break; 1035 } 1036 1037 /* 1038 * Send byte by byte for following cases 1039 * - Less than a bus width available 1040 * - until master bus is aligned 1041 */ 1042 if (bd.remainder < mbus->buswidth) 1043 early_bytes = bd.remainder; 1044 else if (!IS_BUS_ALIGNED(mbus)) { 1045 early_bytes = mbus->buswidth - 1046 (mbus->addr & (mbus->buswidth - 1)); 1047 if ((bd.remainder - early_bytes) < mbus->buswidth) 1048 early_bytes = bd.remainder; 1049 } 1050 1051 if (early_bytes) { 1052 dev_vdbg(&pl08x->adev->dev, 1053 "%s byte width LLIs (remain 0x%08zx)\n", 1054 __func__, bd.remainder); 1055 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, 1056 num_llis++, &total_bytes); 1057 } 1058 1059 if (bd.remainder) { 1060 /* 1061 * Master now aligned 1062 * - if slave is not then we must set its width down 1063 */ 1064 if (!IS_BUS_ALIGNED(sbus)) { 1065 dev_dbg(&pl08x->adev->dev, 1066 "%s set down bus width to one byte\n", 1067 __func__); 1068 1069 sbus->buswidth = 1; 1070 } 1071 1072 /* 1073 * Bytes transferred = tsize * src width, not 1074 * MIN(buswidths) 1075 */ 1076 max_bytes_per_lli = bd.srcbus.buswidth * 1077 pl08x->vd->max_transfer_size; 1078 dev_vdbg(&pl08x->adev->dev, 1079 "%s max bytes per lli = %zu\n", 1080 __func__, max_bytes_per_lli); 1081 1082 /* 1083 * Make largest possible LLIs until less than one bus 1084 * width left 1085 */ 1086 while (bd.remainder > (mbus->buswidth - 1)) { 1087 size_t lli_len, tsize, width; 1088 1089 /* 1090 * If enough left try to send max possible, 1091 * otherwise try to send the remainder 1092 */ 1093 lli_len = min(bd.remainder, max_bytes_per_lli); 1094 1095 /* 1096 * Check against maximum bus alignment: 1097 * Calculate actual transfer size in relation to 1098 * bus width an get a maximum remainder of the 1099 * highest bus width - 1 1100 */ 1101 width = max(mbus->buswidth, sbus->buswidth); 1102 lli_len = (lli_len / width) * width; 1103 tsize = lli_len / bd.srcbus.buswidth; 1104 1105 dev_vdbg(&pl08x->adev->dev, 1106 "%s fill lli with single lli chunk of " 1107 "size 0x%08zx (remainder 0x%08zx)\n", 1108 __func__, lli_len, bd.remainder); 1109 1110 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1111 bd.dstbus.buswidth, tsize); 1112 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1113 lli_len, cctl, tsize); 1114 total_bytes += lli_len; 1115 } 1116 1117 /* 1118 * Send any odd bytes 1119 */ 1120 if (bd.remainder) { 1121 dev_vdbg(&pl08x->adev->dev, 1122 "%s align with boundary, send odd bytes (remain %zu)\n", 1123 __func__, bd.remainder); 1124 prep_byte_width_lli(pl08x, &bd, &cctl, 1125 bd.remainder, num_llis++, &total_bytes); 1126 } 1127 } 1128 1129 if (total_bytes != dsg->len) { 1130 dev_err(&pl08x->adev->dev, 1131 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 1132 __func__, total_bytes, dsg->len); 1133 return 0; 1134 } 1135 1136 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1137 dev_err(&pl08x->adev->dev, 1138 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1139 __func__, MAX_NUM_TSFR_LLIS); 1140 return 0; 1141 } 1142 } 1143 1144 llis_va = txd->llis_va; 1145 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; 1146 1147 if (txd->cyclic) { 1148 /* Link back to the first LLI. */ 1149 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; 1150 } else { 1151 /* The final LLI terminates the LLI. */ 1152 last_lli[PL080_LLI_LLI] = 0; 1153 /* The final LLI element shall also fire an interrupt. */ 1154 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; 1155 } 1156 1157 pl08x_dump_lli(pl08x, llis_va, num_llis); 1158 1159 return num_llis; 1160 } 1161 1162 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1163 struct pl08x_txd *txd) 1164 { 1165 struct pl08x_sg *dsg, *_dsg; 1166 1167 if (txd->llis_va) 1168 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1169 1170 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1171 list_del(&dsg->node); 1172 kfree(dsg); 1173 } 1174 1175 kfree(txd); 1176 } 1177 1178 static void pl08x_desc_free(struct virt_dma_desc *vd) 1179 { 1180 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1181 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1182 1183 dma_descriptor_unmap(&vd->tx); 1184 if (!txd->done) 1185 pl08x_release_mux(plchan); 1186 1187 pl08x_free_txd(plchan->host, txd); 1188 } 1189 1190 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1191 struct pl08x_dma_chan *plchan) 1192 { 1193 LIST_HEAD(head); 1194 1195 vchan_get_all_descriptors(&plchan->vc, &head); 1196 vchan_dma_desc_free_list(&plchan->vc, &head); 1197 } 1198 1199 /* 1200 * The DMA ENGINE API 1201 */ 1202 static void pl08x_free_chan_resources(struct dma_chan *chan) 1203 { 1204 /* Ensure all queued descriptors are freed */ 1205 vchan_free_chan_resources(to_virt_chan(chan)); 1206 } 1207 1208 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1209 struct dma_chan *chan, unsigned long flags) 1210 { 1211 struct dma_async_tx_descriptor *retval = NULL; 1212 1213 return retval; 1214 } 1215 1216 /* 1217 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1218 * If slaves are relying on interrupts to signal completion this function 1219 * must not be called with interrupts disabled. 1220 */ 1221 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1222 dma_cookie_t cookie, struct dma_tx_state *txstate) 1223 { 1224 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1225 struct virt_dma_desc *vd; 1226 unsigned long flags; 1227 enum dma_status ret; 1228 size_t bytes = 0; 1229 1230 ret = dma_cookie_status(chan, cookie, txstate); 1231 if (ret == DMA_COMPLETE) 1232 return ret; 1233 1234 /* 1235 * There's no point calculating the residue if there's 1236 * no txstate to store the value. 1237 */ 1238 if (!txstate) { 1239 if (plchan->state == PL08X_CHAN_PAUSED) 1240 ret = DMA_PAUSED; 1241 return ret; 1242 } 1243 1244 spin_lock_irqsave(&plchan->vc.lock, flags); 1245 ret = dma_cookie_status(chan, cookie, txstate); 1246 if (ret != DMA_COMPLETE) { 1247 vd = vchan_find_desc(&plchan->vc, cookie); 1248 if (vd) { 1249 /* On the issued list, so hasn't been processed yet */ 1250 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1251 struct pl08x_sg *dsg; 1252 1253 list_for_each_entry(dsg, &txd->dsg_list, node) 1254 bytes += dsg->len; 1255 } else { 1256 bytes = pl08x_getbytes_chan(plchan); 1257 } 1258 } 1259 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1260 1261 /* 1262 * This cookie not complete yet 1263 * Get number of bytes left in the active transactions and queue 1264 */ 1265 dma_set_residue(txstate, bytes); 1266 1267 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1268 ret = DMA_PAUSED; 1269 1270 /* Whether waiting or running, we're in progress */ 1271 return ret; 1272 } 1273 1274 /* PrimeCell DMA extension */ 1275 struct burst_table { 1276 u32 burstwords; 1277 u32 reg; 1278 }; 1279 1280 static const struct burst_table burst_sizes[] = { 1281 { 1282 .burstwords = 256, 1283 .reg = PL080_BSIZE_256, 1284 }, 1285 { 1286 .burstwords = 128, 1287 .reg = PL080_BSIZE_128, 1288 }, 1289 { 1290 .burstwords = 64, 1291 .reg = PL080_BSIZE_64, 1292 }, 1293 { 1294 .burstwords = 32, 1295 .reg = PL080_BSIZE_32, 1296 }, 1297 { 1298 .burstwords = 16, 1299 .reg = PL080_BSIZE_16, 1300 }, 1301 { 1302 .burstwords = 8, 1303 .reg = PL080_BSIZE_8, 1304 }, 1305 { 1306 .burstwords = 4, 1307 .reg = PL080_BSIZE_4, 1308 }, 1309 { 1310 .burstwords = 0, 1311 .reg = PL080_BSIZE_1, 1312 }, 1313 }; 1314 1315 /* 1316 * Given the source and destination available bus masks, select which 1317 * will be routed to each port. We try to have source and destination 1318 * on separate ports, but always respect the allowable settings. 1319 */ 1320 static u32 pl08x_select_bus(u8 src, u8 dst) 1321 { 1322 u32 cctl = 0; 1323 1324 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1325 cctl |= PL080_CONTROL_DST_AHB2; 1326 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1327 cctl |= PL080_CONTROL_SRC_AHB2; 1328 1329 return cctl; 1330 } 1331 1332 static u32 pl08x_cctl(u32 cctl) 1333 { 1334 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1335 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1336 PL080_CONTROL_PROT_MASK); 1337 1338 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1339 return cctl | PL080_CONTROL_PROT_SYS; 1340 } 1341 1342 static u32 pl08x_width(enum dma_slave_buswidth width) 1343 { 1344 switch (width) { 1345 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1346 return PL080_WIDTH_8BIT; 1347 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1348 return PL080_WIDTH_16BIT; 1349 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1350 return PL080_WIDTH_32BIT; 1351 default: 1352 return ~0; 1353 } 1354 } 1355 1356 static u32 pl08x_burst(u32 maxburst) 1357 { 1358 int i; 1359 1360 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1361 if (burst_sizes[i].burstwords <= maxburst) 1362 break; 1363 1364 return burst_sizes[i].reg; 1365 } 1366 1367 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1368 enum dma_slave_buswidth addr_width, u32 maxburst) 1369 { 1370 u32 width, burst, cctl = 0; 1371 1372 width = pl08x_width(addr_width); 1373 if (width == ~0) 1374 return ~0; 1375 1376 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1377 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1378 1379 /* 1380 * If this channel will only request single transfers, set this 1381 * down to ONE element. Also select one element if no maxburst 1382 * is specified. 1383 */ 1384 if (plchan->cd->single) 1385 maxburst = 1; 1386 1387 burst = pl08x_burst(maxburst); 1388 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1389 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1390 1391 return pl08x_cctl(cctl); 1392 } 1393 1394 /* 1395 * Slave transactions callback to the slave device to allow 1396 * synchronization of slave DMA signals with the DMAC enable 1397 */ 1398 static void pl08x_issue_pending(struct dma_chan *chan) 1399 { 1400 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1401 unsigned long flags; 1402 1403 spin_lock_irqsave(&plchan->vc.lock, flags); 1404 if (vchan_issue_pending(&plchan->vc)) { 1405 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1406 pl08x_phy_alloc_and_start(plchan); 1407 } 1408 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1409 } 1410 1411 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1412 { 1413 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1414 1415 if (txd) { 1416 INIT_LIST_HEAD(&txd->dsg_list); 1417 1418 /* Always enable error and terminal interrupts */ 1419 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1420 PL080_CONFIG_TC_IRQ_MASK; 1421 } 1422 return txd; 1423 } 1424 1425 /* 1426 * Initialize a descriptor to be used by memcpy submit 1427 */ 1428 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1429 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1430 size_t len, unsigned long flags) 1431 { 1432 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1433 struct pl08x_driver_data *pl08x = plchan->host; 1434 struct pl08x_txd *txd; 1435 struct pl08x_sg *dsg; 1436 int ret; 1437 1438 txd = pl08x_get_txd(plchan); 1439 if (!txd) { 1440 dev_err(&pl08x->adev->dev, 1441 "%s no memory for descriptor\n", __func__); 1442 return NULL; 1443 } 1444 1445 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1446 if (!dsg) { 1447 pl08x_free_txd(pl08x, txd); 1448 return NULL; 1449 } 1450 list_add_tail(&dsg->node, &txd->dsg_list); 1451 1452 dsg->src_addr = src; 1453 dsg->dst_addr = dest; 1454 dsg->len = len; 1455 1456 /* Set platform data for m2m */ 1457 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1458 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1459 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1460 1461 /* Both to be incremented or the code will break */ 1462 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1463 1464 if (pl08x->vd->dualmaster) 1465 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1466 pl08x->mem_buses); 1467 1468 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1469 if (!ret) { 1470 pl08x_free_txd(pl08x, txd); 1471 return NULL; 1472 } 1473 1474 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1475 } 1476 1477 static struct pl08x_txd *pl08x_init_txd( 1478 struct dma_chan *chan, 1479 enum dma_transfer_direction direction, 1480 dma_addr_t *slave_addr) 1481 { 1482 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1483 struct pl08x_driver_data *pl08x = plchan->host; 1484 struct pl08x_txd *txd; 1485 enum dma_slave_buswidth addr_width; 1486 int ret, tmp; 1487 u8 src_buses, dst_buses; 1488 u32 maxburst, cctl; 1489 1490 txd = pl08x_get_txd(plchan); 1491 if (!txd) { 1492 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1493 return NULL; 1494 } 1495 1496 /* 1497 * Set up addresses, the PrimeCell configured address 1498 * will take precedence since this may configure the 1499 * channel target address dynamically at runtime. 1500 */ 1501 if (direction == DMA_MEM_TO_DEV) { 1502 cctl = PL080_CONTROL_SRC_INCR; 1503 *slave_addr = plchan->cfg.dst_addr; 1504 addr_width = plchan->cfg.dst_addr_width; 1505 maxburst = plchan->cfg.dst_maxburst; 1506 src_buses = pl08x->mem_buses; 1507 dst_buses = plchan->cd->periph_buses; 1508 } else if (direction == DMA_DEV_TO_MEM) { 1509 cctl = PL080_CONTROL_DST_INCR; 1510 *slave_addr = plchan->cfg.src_addr; 1511 addr_width = plchan->cfg.src_addr_width; 1512 maxburst = plchan->cfg.src_maxburst; 1513 src_buses = plchan->cd->periph_buses; 1514 dst_buses = pl08x->mem_buses; 1515 } else { 1516 pl08x_free_txd(pl08x, txd); 1517 dev_err(&pl08x->adev->dev, 1518 "%s direction unsupported\n", __func__); 1519 return NULL; 1520 } 1521 1522 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1523 if (cctl == ~0) { 1524 pl08x_free_txd(pl08x, txd); 1525 dev_err(&pl08x->adev->dev, 1526 "DMA slave configuration botched?\n"); 1527 return NULL; 1528 } 1529 1530 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1531 1532 if (plchan->cfg.device_fc) 1533 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1534 PL080_FLOW_PER2MEM_PER; 1535 else 1536 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1537 PL080_FLOW_PER2MEM; 1538 1539 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1540 1541 ret = pl08x_request_mux(plchan); 1542 if (ret < 0) { 1543 pl08x_free_txd(pl08x, txd); 1544 dev_dbg(&pl08x->adev->dev, 1545 "unable to mux for transfer on %s due to platform restrictions\n", 1546 plchan->name); 1547 return NULL; 1548 } 1549 1550 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1551 plchan->signal, plchan->name); 1552 1553 /* Assign the flow control signal to this channel */ 1554 if (direction == DMA_MEM_TO_DEV) 1555 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1556 else 1557 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1558 1559 return txd; 1560 } 1561 1562 static int pl08x_tx_add_sg(struct pl08x_txd *txd, 1563 enum dma_transfer_direction direction, 1564 dma_addr_t slave_addr, 1565 dma_addr_t buf_addr, 1566 unsigned int len) 1567 { 1568 struct pl08x_sg *dsg; 1569 1570 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1571 if (!dsg) 1572 return -ENOMEM; 1573 1574 list_add_tail(&dsg->node, &txd->dsg_list); 1575 1576 dsg->len = len; 1577 if (direction == DMA_MEM_TO_DEV) { 1578 dsg->src_addr = buf_addr; 1579 dsg->dst_addr = slave_addr; 1580 } else { 1581 dsg->src_addr = slave_addr; 1582 dsg->dst_addr = buf_addr; 1583 } 1584 1585 return 0; 1586 } 1587 1588 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1589 struct dma_chan *chan, struct scatterlist *sgl, 1590 unsigned int sg_len, enum dma_transfer_direction direction, 1591 unsigned long flags, void *context) 1592 { 1593 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1594 struct pl08x_driver_data *pl08x = plchan->host; 1595 struct pl08x_txd *txd; 1596 struct scatterlist *sg; 1597 int ret, tmp; 1598 dma_addr_t slave_addr; 1599 1600 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1601 __func__, sg_dma_len(sgl), plchan->name); 1602 1603 txd = pl08x_init_txd(chan, direction, &slave_addr); 1604 if (!txd) 1605 return NULL; 1606 1607 for_each_sg(sgl, sg, sg_len, tmp) { 1608 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1609 sg_dma_address(sg), 1610 sg_dma_len(sg)); 1611 if (ret) { 1612 pl08x_release_mux(plchan); 1613 pl08x_free_txd(pl08x, txd); 1614 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1615 __func__); 1616 return NULL; 1617 } 1618 } 1619 1620 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1621 if (!ret) { 1622 pl08x_release_mux(plchan); 1623 pl08x_free_txd(pl08x, txd); 1624 return NULL; 1625 } 1626 1627 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1628 } 1629 1630 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( 1631 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1632 size_t period_len, enum dma_transfer_direction direction, 1633 unsigned long flags) 1634 { 1635 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1636 struct pl08x_driver_data *pl08x = plchan->host; 1637 struct pl08x_txd *txd; 1638 int ret, tmp; 1639 dma_addr_t slave_addr; 1640 1641 dev_dbg(&pl08x->adev->dev, 1642 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", 1643 __func__, period_len, buf_len, 1644 direction == DMA_MEM_TO_DEV ? "to" : "from", 1645 plchan->name); 1646 1647 txd = pl08x_init_txd(chan, direction, &slave_addr); 1648 if (!txd) 1649 return NULL; 1650 1651 txd->cyclic = true; 1652 txd->cctl |= PL080_CONTROL_TC_IRQ_EN; 1653 for (tmp = 0; tmp < buf_len; tmp += period_len) { 1654 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1655 buf_addr + tmp, period_len); 1656 if (ret) { 1657 pl08x_release_mux(plchan); 1658 pl08x_free_txd(pl08x, txd); 1659 return NULL; 1660 } 1661 } 1662 1663 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1664 if (!ret) { 1665 pl08x_release_mux(plchan); 1666 pl08x_free_txd(pl08x, txd); 1667 return NULL; 1668 } 1669 1670 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1671 } 1672 1673 static int pl08x_config(struct dma_chan *chan, 1674 struct dma_slave_config *config) 1675 { 1676 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1677 struct pl08x_driver_data *pl08x = plchan->host; 1678 1679 if (!plchan->slave) 1680 return -EINVAL; 1681 1682 /* Reject definitely invalid configurations */ 1683 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1684 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1685 return -EINVAL; 1686 1687 if (config->device_fc && pl08x->vd->pl080s) { 1688 dev_err(&pl08x->adev->dev, 1689 "%s: PL080S does not support peripheral flow control\n", 1690 __func__); 1691 return -EINVAL; 1692 } 1693 1694 plchan->cfg = *config; 1695 1696 return 0; 1697 } 1698 1699 static int pl08x_terminate_all(struct dma_chan *chan) 1700 { 1701 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1702 struct pl08x_driver_data *pl08x = plchan->host; 1703 unsigned long flags; 1704 1705 spin_lock_irqsave(&plchan->vc.lock, flags); 1706 if (!plchan->phychan && !plchan->at) { 1707 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1708 return 0; 1709 } 1710 1711 plchan->state = PL08X_CHAN_IDLE; 1712 1713 if (plchan->phychan) { 1714 /* 1715 * Mark physical channel as free and free any slave 1716 * signal 1717 */ 1718 pl08x_phy_free(plchan); 1719 } 1720 /* Dequeue jobs and free LLIs */ 1721 if (plchan->at) { 1722 pl08x_desc_free(&plchan->at->vd); 1723 plchan->at = NULL; 1724 } 1725 /* Dequeue jobs not yet fired as well */ 1726 pl08x_free_txd_list(pl08x, plchan); 1727 1728 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1729 1730 return 0; 1731 } 1732 1733 static int pl08x_pause(struct dma_chan *chan) 1734 { 1735 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1736 unsigned long flags; 1737 1738 /* 1739 * Anything succeeds on channels with no physical allocation and 1740 * no queued transfers. 1741 */ 1742 spin_lock_irqsave(&plchan->vc.lock, flags); 1743 if (!plchan->phychan && !plchan->at) { 1744 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1745 return 0; 1746 } 1747 1748 pl08x_pause_phy_chan(plchan->phychan); 1749 plchan->state = PL08X_CHAN_PAUSED; 1750 1751 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1752 1753 return 0; 1754 } 1755 1756 static int pl08x_resume(struct dma_chan *chan) 1757 { 1758 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1759 unsigned long flags; 1760 1761 /* 1762 * Anything succeeds on channels with no physical allocation and 1763 * no queued transfers. 1764 */ 1765 spin_lock_irqsave(&plchan->vc.lock, flags); 1766 if (!plchan->phychan && !plchan->at) { 1767 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1768 return 0; 1769 } 1770 1771 pl08x_resume_phy_chan(plchan->phychan); 1772 plchan->state = PL08X_CHAN_RUNNING; 1773 1774 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1775 1776 return 0; 1777 } 1778 1779 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1780 { 1781 struct pl08x_dma_chan *plchan; 1782 char *name = chan_id; 1783 1784 /* Reject channels for devices not bound to this driver */ 1785 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1786 return false; 1787 1788 plchan = to_pl08x_chan(chan); 1789 1790 /* Check that the channel is not taken! */ 1791 if (!strcmp(plchan->name, name)) 1792 return true; 1793 1794 return false; 1795 } 1796 EXPORT_SYMBOL_GPL(pl08x_filter_id); 1797 1798 static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id) 1799 { 1800 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1801 1802 return plchan->cd == chan_id; 1803 } 1804 1805 /* 1806 * Just check that the device is there and active 1807 * TODO: turn this bit on/off depending on the number of physical channels 1808 * actually used, if it is zero... well shut it off. That will save some 1809 * power. Cut the clock at the same time. 1810 */ 1811 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1812 { 1813 /* The Nomadik variant does not have the config register */ 1814 if (pl08x->vd->nomadik) 1815 return; 1816 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1817 } 1818 1819 static irqreturn_t pl08x_irq(int irq, void *dev) 1820 { 1821 struct pl08x_driver_data *pl08x = dev; 1822 u32 mask = 0, err, tc, i; 1823 1824 /* check & clear - ERR & TC interrupts */ 1825 err = readl(pl08x->base + PL080_ERR_STATUS); 1826 if (err) { 1827 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1828 __func__, err); 1829 writel(err, pl08x->base + PL080_ERR_CLEAR); 1830 } 1831 tc = readl(pl08x->base + PL080_TC_STATUS); 1832 if (tc) 1833 writel(tc, pl08x->base + PL080_TC_CLEAR); 1834 1835 if (!err && !tc) 1836 return IRQ_NONE; 1837 1838 for (i = 0; i < pl08x->vd->channels; i++) { 1839 if ((BIT(i) & err) || (BIT(i) & tc)) { 1840 /* Locate physical channel */ 1841 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1842 struct pl08x_dma_chan *plchan = phychan->serving; 1843 struct pl08x_txd *tx; 1844 1845 if (!plchan) { 1846 dev_err(&pl08x->adev->dev, 1847 "%s Error TC interrupt on unused channel: 0x%08x\n", 1848 __func__, i); 1849 continue; 1850 } 1851 1852 spin_lock(&plchan->vc.lock); 1853 tx = plchan->at; 1854 if (tx && tx->cyclic) { 1855 vchan_cyclic_callback(&tx->vd); 1856 } else if (tx) { 1857 plchan->at = NULL; 1858 /* 1859 * This descriptor is done, release its mux 1860 * reservation. 1861 */ 1862 pl08x_release_mux(plchan); 1863 tx->done = true; 1864 vchan_cookie_complete(&tx->vd); 1865 1866 /* 1867 * And start the next descriptor (if any), 1868 * otherwise free this channel. 1869 */ 1870 if (vchan_next_desc(&plchan->vc)) 1871 pl08x_start_next_txd(plchan); 1872 else 1873 pl08x_phy_free(plchan); 1874 } 1875 spin_unlock(&plchan->vc.lock); 1876 1877 mask |= BIT(i); 1878 } 1879 } 1880 1881 return mask ? IRQ_HANDLED : IRQ_NONE; 1882 } 1883 1884 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1885 { 1886 chan->slave = true; 1887 chan->name = chan->cd->bus_id; 1888 chan->cfg.src_addr = chan->cd->addr; 1889 chan->cfg.dst_addr = chan->cd->addr; 1890 } 1891 1892 /* 1893 * Initialise the DMAC memcpy/slave channels. 1894 * Make a local wrapper to hold required data 1895 */ 1896 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1897 struct dma_device *dmadev, unsigned int channels, bool slave) 1898 { 1899 struct pl08x_dma_chan *chan; 1900 int i; 1901 1902 INIT_LIST_HEAD(&dmadev->channels); 1903 1904 /* 1905 * Register as many many memcpy as we have physical channels, 1906 * we won't always be able to use all but the code will have 1907 * to cope with that situation. 1908 */ 1909 for (i = 0; i < channels; i++) { 1910 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1911 if (!chan) 1912 return -ENOMEM; 1913 1914 chan->host = pl08x; 1915 chan->state = PL08X_CHAN_IDLE; 1916 chan->signal = -1; 1917 1918 if (slave) { 1919 chan->cd = &pl08x->pd->slave_channels[i]; 1920 /* 1921 * Some implementations have muxed signals, whereas some 1922 * use a mux in front of the signals and need dynamic 1923 * assignment of signals. 1924 */ 1925 chan->signal = i; 1926 pl08x_dma_slave_init(chan); 1927 } else { 1928 chan->cd = &pl08x->pd->memcpy_channel; 1929 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1930 if (!chan->name) { 1931 kfree(chan); 1932 return -ENOMEM; 1933 } 1934 } 1935 dev_dbg(&pl08x->adev->dev, 1936 "initialize virtual channel \"%s\"\n", 1937 chan->name); 1938 1939 chan->vc.desc_free = pl08x_desc_free; 1940 vchan_init(&chan->vc, dmadev); 1941 } 1942 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1943 i, slave ? "slave" : "memcpy"); 1944 return i; 1945 } 1946 1947 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1948 { 1949 struct pl08x_dma_chan *chan = NULL; 1950 struct pl08x_dma_chan *next; 1951 1952 list_for_each_entry_safe(chan, 1953 next, &dmadev->channels, vc.chan.device_node) { 1954 list_del(&chan->vc.chan.device_node); 1955 kfree(chan); 1956 } 1957 } 1958 1959 #ifdef CONFIG_DEBUG_FS 1960 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1961 { 1962 switch (state) { 1963 case PL08X_CHAN_IDLE: 1964 return "idle"; 1965 case PL08X_CHAN_RUNNING: 1966 return "running"; 1967 case PL08X_CHAN_PAUSED: 1968 return "paused"; 1969 case PL08X_CHAN_WAITING: 1970 return "waiting"; 1971 default: 1972 break; 1973 } 1974 return "UNKNOWN STATE"; 1975 } 1976 1977 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1978 { 1979 struct pl08x_driver_data *pl08x = s->private; 1980 struct pl08x_dma_chan *chan; 1981 struct pl08x_phy_chan *ch; 1982 unsigned long flags; 1983 int i; 1984 1985 seq_printf(s, "PL08x physical channels:\n"); 1986 seq_printf(s, "CHANNEL:\tUSER:\n"); 1987 seq_printf(s, "--------\t-----\n"); 1988 for (i = 0; i < pl08x->vd->channels; i++) { 1989 struct pl08x_dma_chan *virt_chan; 1990 1991 ch = &pl08x->phy_chans[i]; 1992 1993 spin_lock_irqsave(&ch->lock, flags); 1994 virt_chan = ch->serving; 1995 1996 seq_printf(s, "%d\t\t%s%s\n", 1997 ch->id, 1998 virt_chan ? virt_chan->name : "(none)", 1999 ch->locked ? " LOCKED" : ""); 2000 2001 spin_unlock_irqrestore(&ch->lock, flags); 2002 } 2003 2004 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 2005 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2006 seq_printf(s, "--------\t------\n"); 2007 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 2008 seq_printf(s, "%s\t\t%s\n", chan->name, 2009 pl08x_state_str(chan->state)); 2010 } 2011 2012 seq_printf(s, "\nPL08x virtual slave channels:\n"); 2013 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2014 seq_printf(s, "--------\t------\n"); 2015 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2016 seq_printf(s, "%s\t\t%s\n", chan->name, 2017 pl08x_state_str(chan->state)); 2018 } 2019 2020 return 0; 2021 } 2022 2023 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 2024 { 2025 return single_open(file, pl08x_debugfs_show, inode->i_private); 2026 } 2027 2028 static const struct file_operations pl08x_debugfs_operations = { 2029 .open = pl08x_debugfs_open, 2030 .read = seq_read, 2031 .llseek = seq_lseek, 2032 .release = single_release, 2033 }; 2034 2035 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2036 { 2037 /* Expose a simple debugfs interface to view all clocks */ 2038 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2039 S_IFREG | S_IRUGO, NULL, pl08x, 2040 &pl08x_debugfs_operations); 2041 } 2042 2043 #else 2044 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2045 { 2046 } 2047 #endif 2048 2049 #ifdef CONFIG_OF 2050 static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, 2051 u32 id) 2052 { 2053 struct pl08x_dma_chan *chan; 2054 2055 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2056 if (chan->signal == id) 2057 return &chan->vc.chan; 2058 } 2059 2060 return NULL; 2061 } 2062 2063 static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, 2064 struct of_dma *ofdma) 2065 { 2066 struct pl08x_driver_data *pl08x = ofdma->of_dma_data; 2067 struct dma_chan *dma_chan; 2068 struct pl08x_dma_chan *plchan; 2069 2070 if (!pl08x) 2071 return NULL; 2072 2073 if (dma_spec->args_count != 2) { 2074 dev_err(&pl08x->adev->dev, 2075 "DMA channel translation requires two cells\n"); 2076 return NULL; 2077 } 2078 2079 dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); 2080 if (!dma_chan) { 2081 dev_err(&pl08x->adev->dev, 2082 "DMA slave channel not found\n"); 2083 return NULL; 2084 } 2085 2086 plchan = to_pl08x_chan(dma_chan); 2087 dev_dbg(&pl08x->adev->dev, 2088 "translated channel for signal %d\n", 2089 dma_spec->args[0]); 2090 2091 /* Augment channel data for applicable AHB buses */ 2092 plchan->cd->periph_buses = dma_spec->args[1]; 2093 return dma_get_slave_channel(dma_chan); 2094 } 2095 2096 static int pl08x_of_probe(struct amba_device *adev, 2097 struct pl08x_driver_data *pl08x, 2098 struct device_node *np) 2099 { 2100 struct pl08x_platform_data *pd; 2101 struct pl08x_channel_data *chanp = NULL; 2102 u32 cctl_memcpy = 0; 2103 u32 val; 2104 int ret; 2105 int i; 2106 2107 pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); 2108 if (!pd) 2109 return -ENOMEM; 2110 2111 /* Eligible bus masters for fetching LLIs */ 2112 if (of_property_read_bool(np, "lli-bus-interface-ahb1")) 2113 pd->lli_buses |= PL08X_AHB1; 2114 if (of_property_read_bool(np, "lli-bus-interface-ahb2")) 2115 pd->lli_buses |= PL08X_AHB2; 2116 if (!pd->lli_buses) { 2117 dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n"); 2118 pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2; 2119 } 2120 2121 /* Eligible bus masters for memory access */ 2122 if (of_property_read_bool(np, "mem-bus-interface-ahb1")) 2123 pd->mem_buses |= PL08X_AHB1; 2124 if (of_property_read_bool(np, "mem-bus-interface-ahb2")) 2125 pd->mem_buses |= PL08X_AHB2; 2126 if (!pd->mem_buses) { 2127 dev_info(&adev->dev, "no bus masters for memory stated, assume all\n"); 2128 pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2; 2129 } 2130 2131 /* Parse the memcpy channel properties */ 2132 ret = of_property_read_u32(np, "memcpy-burst-size", &val); 2133 if (ret) { 2134 dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n"); 2135 val = 1; 2136 } 2137 switch (val) { 2138 default: 2139 dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); 2140 /* Fall through */ 2141 case 1: 2142 cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | 2143 PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; 2144 break; 2145 case 4: 2146 cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | 2147 PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; 2148 break; 2149 case 8: 2150 cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | 2151 PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; 2152 break; 2153 case 16: 2154 cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | 2155 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; 2156 break; 2157 case 32: 2158 cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | 2159 PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; 2160 break; 2161 case 64: 2162 cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | 2163 PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; 2164 break; 2165 case 128: 2166 cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | 2167 PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; 2168 break; 2169 case 256: 2170 cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | 2171 PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; 2172 break; 2173 } 2174 2175 ret = of_property_read_u32(np, "memcpy-bus-width", &val); 2176 if (ret) { 2177 dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n"); 2178 val = 8; 2179 } 2180 switch (val) { 2181 default: 2182 dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); 2183 /* Fall through */ 2184 case 8: 2185 cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | 2186 PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 2187 break; 2188 case 16: 2189 cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | 2190 PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 2191 break; 2192 case 32: 2193 cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | 2194 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 2195 break; 2196 } 2197 2198 /* This is currently the only thing making sense */ 2199 cctl_memcpy |= PL080_CONTROL_PROT_SYS; 2200 2201 /* Set up memcpy channel */ 2202 pd->memcpy_channel.bus_id = "memcpy"; 2203 pd->memcpy_channel.cctl_memcpy = cctl_memcpy; 2204 /* Use the buses that can access memory, obviously */ 2205 pd->memcpy_channel.periph_buses = pd->mem_buses; 2206 2207 /* 2208 * Allocate channel data for all possible slave channels (one 2209 * for each possible signal), channels will then be allocated 2210 * for a device and have it's AHB interfaces set up at 2211 * translation time. 2212 */ 2213 chanp = devm_kcalloc(&adev->dev, 2214 pl08x->vd->signals, 2215 sizeof(struct pl08x_channel_data), 2216 GFP_KERNEL); 2217 if (!chanp) 2218 return -ENOMEM; 2219 2220 pd->slave_channels = chanp; 2221 for (i = 0; i < pl08x->vd->signals; i++) { 2222 /* chanp->periph_buses will be assigned at translation */ 2223 chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); 2224 chanp++; 2225 } 2226 pd->num_slave_channels = pl08x->vd->signals; 2227 2228 pl08x->pd = pd; 2229 2230 return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, 2231 pl08x); 2232 } 2233 #else 2234 static inline int pl08x_of_probe(struct amba_device *adev, 2235 struct pl08x_driver_data *pl08x, 2236 struct device_node *np) 2237 { 2238 return -EINVAL; 2239 } 2240 #endif 2241 2242 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 2243 { 2244 struct pl08x_driver_data *pl08x; 2245 const struct vendor_data *vd = id->data; 2246 struct device_node *np = adev->dev.of_node; 2247 u32 tsfr_size; 2248 int ret = 0; 2249 int i; 2250 2251 ret = amba_request_regions(adev, NULL); 2252 if (ret) 2253 return ret; 2254 2255 /* Ensure that we can do DMA */ 2256 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); 2257 if (ret) 2258 goto out_no_pl08x; 2259 2260 /* Create the driver state holder */ 2261 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2262 if (!pl08x) { 2263 ret = -ENOMEM; 2264 goto out_no_pl08x; 2265 } 2266 2267 /* Assign useful pointers to the driver state */ 2268 pl08x->adev = adev; 2269 pl08x->vd = vd; 2270 2271 /* Initialize memcpy engine */ 2272 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 2273 pl08x->memcpy.dev = &adev->dev; 2274 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2275 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2276 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2277 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2278 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2279 pl08x->memcpy.device_config = pl08x_config; 2280 pl08x->memcpy.device_pause = pl08x_pause; 2281 pl08x->memcpy.device_resume = pl08x_resume; 2282 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2283 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2284 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2285 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); 2286 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2287 2288 /* Initialize slave engine */ 2289 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2290 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); 2291 pl08x->slave.dev = &adev->dev; 2292 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 2293 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2294 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2295 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2296 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2297 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2298 pl08x->slave.device_config = pl08x_config; 2299 pl08x->slave.device_pause = pl08x_pause; 2300 pl08x->slave.device_resume = pl08x_resume; 2301 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2302 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2303 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2304 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2305 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2306 2307 /* Get the platform data */ 2308 pl08x->pd = dev_get_platdata(&adev->dev); 2309 if (!pl08x->pd) { 2310 if (np) { 2311 ret = pl08x_of_probe(adev, pl08x, np); 2312 if (ret) 2313 goto out_no_platdata; 2314 } else { 2315 dev_err(&adev->dev, "no platform data supplied\n"); 2316 ret = -EINVAL; 2317 goto out_no_platdata; 2318 } 2319 } else { 2320 pl08x->slave.filter.map = pl08x->pd->slave_map; 2321 pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len; 2322 pl08x->slave.filter.fn = pl08x_filter_fn; 2323 } 2324 2325 /* By default, AHB1 only. If dualmaster, from platform */ 2326 pl08x->lli_buses = PL08X_AHB1; 2327 pl08x->mem_buses = PL08X_AHB1; 2328 if (pl08x->vd->dualmaster) { 2329 pl08x->lli_buses = pl08x->pd->lli_buses; 2330 pl08x->mem_buses = pl08x->pd->mem_buses; 2331 } 2332 2333 if (vd->pl080s) 2334 pl08x->lli_words = PL080S_LLI_WORDS; 2335 else 2336 pl08x->lli_words = PL080_LLI_WORDS; 2337 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); 2338 2339 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2340 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2341 tsfr_size, PL08X_ALIGN, 0); 2342 if (!pl08x->pool) { 2343 ret = -ENOMEM; 2344 goto out_no_lli_pool; 2345 } 2346 2347 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2348 if (!pl08x->base) { 2349 ret = -ENOMEM; 2350 goto out_no_ioremap; 2351 } 2352 2353 /* Turn on the PL08x */ 2354 pl08x_ensure_on(pl08x); 2355 2356 /* Attach the interrupt handler */ 2357 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2358 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2359 2360 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); 2361 if (ret) { 2362 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2363 __func__, adev->irq[0]); 2364 goto out_no_irq; 2365 } 2366 2367 /* Initialize physical channels */ 2368 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2369 GFP_KERNEL); 2370 if (!pl08x->phy_chans) { 2371 ret = -ENOMEM; 2372 goto out_no_phychans; 2373 } 2374 2375 for (i = 0; i < vd->channels; i++) { 2376 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2377 2378 ch->id = i; 2379 ch->base = pl08x->base + PL080_Cx_BASE(i); 2380 ch->reg_config = ch->base + vd->config_offset; 2381 spin_lock_init(&ch->lock); 2382 2383 /* 2384 * Nomadik variants can have channels that are locked 2385 * down for the secure world only. Lock up these channels 2386 * by perpetually serving a dummy virtual channel. 2387 */ 2388 if (vd->nomadik) { 2389 u32 val; 2390 2391 val = readl(ch->reg_config); 2392 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2393 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2394 ch->locked = true; 2395 } 2396 } 2397 2398 dev_dbg(&adev->dev, "physical channel %d is %s\n", 2399 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2400 } 2401 2402 /* Register as many memcpy channels as there are physical channels */ 2403 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2404 pl08x->vd->channels, false); 2405 if (ret <= 0) { 2406 dev_warn(&pl08x->adev->dev, 2407 "%s failed to enumerate memcpy channels - %d\n", 2408 __func__, ret); 2409 goto out_no_memcpy; 2410 } 2411 2412 /* Register slave channels */ 2413 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2414 pl08x->pd->num_slave_channels, true); 2415 if (ret < 0) { 2416 dev_warn(&pl08x->adev->dev, 2417 "%s failed to enumerate slave channels - %d\n", 2418 __func__, ret); 2419 goto out_no_slave; 2420 } 2421 2422 ret = dma_async_device_register(&pl08x->memcpy); 2423 if (ret) { 2424 dev_warn(&pl08x->adev->dev, 2425 "%s failed to register memcpy as an async device - %d\n", 2426 __func__, ret); 2427 goto out_no_memcpy_reg; 2428 } 2429 2430 ret = dma_async_device_register(&pl08x->slave); 2431 if (ret) { 2432 dev_warn(&pl08x->adev->dev, 2433 "%s failed to register slave as an async device - %d\n", 2434 __func__, ret); 2435 goto out_no_slave_reg; 2436 } 2437 2438 amba_set_drvdata(adev, pl08x); 2439 init_pl08x_debugfs(pl08x); 2440 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", 2441 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), 2442 (unsigned long long)adev->res.start, adev->irq[0]); 2443 2444 return 0; 2445 2446 out_no_slave_reg: 2447 dma_async_device_unregister(&pl08x->memcpy); 2448 out_no_memcpy_reg: 2449 pl08x_free_virtual_channels(&pl08x->slave); 2450 out_no_slave: 2451 pl08x_free_virtual_channels(&pl08x->memcpy); 2452 out_no_memcpy: 2453 kfree(pl08x->phy_chans); 2454 out_no_phychans: 2455 free_irq(adev->irq[0], pl08x); 2456 out_no_irq: 2457 iounmap(pl08x->base); 2458 out_no_ioremap: 2459 dma_pool_destroy(pl08x->pool); 2460 out_no_lli_pool: 2461 out_no_platdata: 2462 kfree(pl08x); 2463 out_no_pl08x: 2464 amba_release_regions(adev); 2465 return ret; 2466 } 2467 2468 /* PL080 has 8 channels and the PL080 have just 2 */ 2469 static struct vendor_data vendor_pl080 = { 2470 .config_offset = PL080_CH_CONFIG, 2471 .channels = 8, 2472 .signals = 16, 2473 .dualmaster = true, 2474 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2475 }; 2476 2477 static struct vendor_data vendor_nomadik = { 2478 .config_offset = PL080_CH_CONFIG, 2479 .channels = 8, 2480 .signals = 32, 2481 .dualmaster = true, 2482 .nomadik = true, 2483 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2484 }; 2485 2486 static struct vendor_data vendor_pl080s = { 2487 .config_offset = PL080S_CH_CONFIG, 2488 .channels = 8, 2489 .signals = 32, 2490 .pl080s = true, 2491 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, 2492 }; 2493 2494 static struct vendor_data vendor_pl081 = { 2495 .config_offset = PL080_CH_CONFIG, 2496 .channels = 2, 2497 .signals = 16, 2498 .dualmaster = false, 2499 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2500 }; 2501 2502 static struct amba_id pl08x_ids[] = { 2503 /* Samsung PL080S variant */ 2504 { 2505 .id = 0x0a141080, 2506 .mask = 0xffffffff, 2507 .data = &vendor_pl080s, 2508 }, 2509 /* PL080 */ 2510 { 2511 .id = 0x00041080, 2512 .mask = 0x000fffff, 2513 .data = &vendor_pl080, 2514 }, 2515 /* PL081 */ 2516 { 2517 .id = 0x00041081, 2518 .mask = 0x000fffff, 2519 .data = &vendor_pl081, 2520 }, 2521 /* Nomadik 8815 PL080 variant */ 2522 { 2523 .id = 0x00280080, 2524 .mask = 0x00ffffff, 2525 .data = &vendor_nomadik, 2526 }, 2527 { 0, 0 }, 2528 }; 2529 2530 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2531 2532 static struct amba_driver pl08x_amba_driver = { 2533 .drv.name = DRIVER_NAME, 2534 .id_table = pl08x_ids, 2535 .probe = pl08x_probe, 2536 }; 2537 2538 static int __init pl08x_init(void) 2539 { 2540 int retval; 2541 retval = amba_driver_register(&pl08x_amba_driver); 2542 if (retval) 2543 printk(KERN_WARNING DRIVER_NAME 2544 "failed to register as an AMBA device (%d)\n", 2545 retval); 2546 return retval; 2547 } 2548 subsys_initcall(pl08x_init); 2549