1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * Copyirght (c) 2017 Linaro Ltd. 5 * 6 * Author: Peter Pearse <peter.pearse@arm.com> 7 * Author: Linus Walleij <linus.walleij@linaro.org> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * The full GNU General Public License is in this distribution in the file 20 * called COPYING. 21 * 22 * Documentation: ARM DDI 0196G == PL080 23 * Documentation: ARM DDI 0218E == PL081 24 * Documentation: S3C6410 User's Manual == PL080S 25 * 26 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 27 * channel. 28 * 29 * The PL080 has 8 channels available for simultaneous use, and the PL081 30 * has only two channels. So on these DMA controllers the number of channels 31 * and the number of incoming DMA signals are two totally different things. 32 * It is usually not possible to theoretically handle all physical signals, 33 * so a multiplexing scheme with possible denial of use is necessary. 34 * 35 * The PL080 has a dual bus master, PL081 has a single master. 36 * 37 * PL080S is a version modified by Samsung and used in S3C64xx SoCs. 38 * It differs in following aspects: 39 * - CH_CONFIG register at different offset, 40 * - separate CH_CONTROL2 register for transfer size, 41 * - bigger maximum transfer size, 42 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, 43 * - no support for peripheral flow control. 44 * 45 * Memory to peripheral transfer may be visualized as 46 * Get data from memory to DMAC 47 * Until no data left 48 * On burst request from peripheral 49 * Destination burst from DMAC to peripheral 50 * Clear burst request 51 * Raise terminal count interrupt 52 * 53 * For peripherals with a FIFO: 54 * Source burst size == half the depth of the peripheral FIFO 55 * Destination burst size == the depth of the peripheral FIFO 56 * 57 * (Bursts are irrelevant for mem to mem transfers - there are no burst 58 * signals, the DMA controller will simply facilitate its AHB master.) 59 * 60 * ASSUMES default (little) endianness for DMA transfers 61 * 62 * The PL08x has two flow control settings: 63 * - DMAC flow control: the transfer size defines the number of transfers 64 * which occur for the current LLI entry, and the DMAC raises TC at the 65 * end of every LLI entry. Observed behaviour shows the DMAC listening 66 * to both the BREQ and SREQ signals (contrary to documented), 67 * transferring data if either is active. The LBREQ and LSREQ signals 68 * are ignored. 69 * 70 * - Peripheral flow control: the transfer size is ignored (and should be 71 * zero). The data is transferred from the current LLI entry, until 72 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 73 * will then move to the next LLI entry. Unsupported by PL080S. 74 */ 75 #include <linux/amba/bus.h> 76 #include <linux/amba/pl08x.h> 77 #include <linux/debugfs.h> 78 #include <linux/delay.h> 79 #include <linux/device.h> 80 #include <linux/dmaengine.h> 81 #include <linux/dmapool.h> 82 #include <linux/dma-mapping.h> 83 #include <linux/export.h> 84 #include <linux/init.h> 85 #include <linux/interrupt.h> 86 #include <linux/module.h> 87 #include <linux/of.h> 88 #include <linux/of_dma.h> 89 #include <linux/pm_runtime.h> 90 #include <linux/seq_file.h> 91 #include <linux/slab.h> 92 #include <linux/amba/pl080.h> 93 94 #include "dmaengine.h" 95 #include "virt-dma.h" 96 97 #define DRIVER_NAME "pl08xdmac" 98 99 #define PL80X_DMA_BUSWIDTHS \ 100 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 101 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 102 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 103 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 104 105 static struct amba_driver pl08x_amba_driver; 106 struct pl08x_driver_data; 107 108 /** 109 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 110 * @config_offset: offset to the configuration register 111 * @channels: the number of channels available in this variant 112 * @signals: the number of request signals available from the hardware 113 * @dualmaster: whether this version supports dual AHB masters or not. 114 * @nomadik: whether this variant is a ST Microelectronics Nomadik, where the 115 * channels have Nomadik security extension bits that need to be checked 116 * for permission before use and some registers are missing 117 * @pl080s: whether this variant is a Samsung PL080S, which has separate 118 * register and LLI word for transfer size. 119 * @ftdmac020: whether this variant is a Faraday Technology FTDMAC020 120 * @max_transfer_size: the maximum single element transfer size for this 121 * PL08x variant. 122 */ 123 struct vendor_data { 124 u8 config_offset; 125 u8 channels; 126 u8 signals; 127 bool dualmaster; 128 bool nomadik; 129 bool pl080s; 130 bool ftdmac020; 131 u32 max_transfer_size; 132 }; 133 134 /** 135 * struct pl08x_bus_data - information of source or destination 136 * busses for a transfer 137 * @addr: current address 138 * @maxwidth: the maximum width of a transfer on this bus 139 * @buswidth: the width of this bus in bytes: 1, 2 or 4 140 */ 141 struct pl08x_bus_data { 142 dma_addr_t addr; 143 u8 maxwidth; 144 u8 buswidth; 145 }; 146 147 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) 148 149 /** 150 * struct pl08x_phy_chan - holder for the physical channels 151 * @id: physical index to this channel 152 * @base: memory base address for this physical channel 153 * @reg_config: configuration address for this physical channel 154 * @reg_control: control address for this physical channel 155 * @reg_src: transfer source address register 156 * @reg_dst: transfer destination address register 157 * @reg_lli: transfer LLI address register 158 * @reg_busy: if the variant has a special per-channel busy register, 159 * this contains a pointer to it 160 * @lock: a lock to use when altering an instance of this struct 161 * @serving: the virtual channel currently being served by this physical 162 * channel 163 * @locked: channel unavailable for the system, e.g. dedicated to secure 164 * world 165 * @ftdmac020: channel is on a FTDMAC020 166 * @pl080s: channel is on a PL08s 167 */ 168 struct pl08x_phy_chan { 169 unsigned int id; 170 void __iomem *base; 171 void __iomem *reg_config; 172 void __iomem *reg_control; 173 void __iomem *reg_src; 174 void __iomem *reg_dst; 175 void __iomem *reg_lli; 176 void __iomem *reg_busy; 177 spinlock_t lock; 178 struct pl08x_dma_chan *serving; 179 bool locked; 180 bool ftdmac020; 181 bool pl080s; 182 }; 183 184 /** 185 * struct pl08x_sg - structure containing data per sg 186 * @src_addr: src address of sg 187 * @dst_addr: dst address of sg 188 * @len: transfer len in bytes 189 * @node: node for txd's dsg_list 190 */ 191 struct pl08x_sg { 192 dma_addr_t src_addr; 193 dma_addr_t dst_addr; 194 size_t len; 195 struct list_head node; 196 }; 197 198 /** 199 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 200 * @vd: virtual DMA descriptor 201 * @dsg_list: list of children sg's 202 * @llis_bus: DMA memory address (physical) start for the LLIs 203 * @llis_va: virtual memory address start for the LLIs 204 * @cctl: control reg values for current txd 205 * @ccfg: config reg values for current txd 206 * @done: this marks completed descriptors, which should not have their 207 * mux released. 208 * @cyclic: indicate cyclic transfers 209 */ 210 struct pl08x_txd { 211 struct virt_dma_desc vd; 212 struct list_head dsg_list; 213 dma_addr_t llis_bus; 214 u32 *llis_va; 215 /* Default cctl value for LLIs */ 216 u32 cctl; 217 /* 218 * Settings to be put into the physical channel when we 219 * trigger this txd. Other registers are in llis_va[0]. 220 */ 221 u32 ccfg; 222 bool done; 223 bool cyclic; 224 }; 225 226 /** 227 * enum pl08x_dma_chan_state - holds the PL08x specific virtual channel 228 * states 229 * @PL08X_CHAN_IDLE: the channel is idle 230 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 231 * channel and is running a transfer on it 232 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 233 * channel, but the transfer is currently paused 234 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 235 * channel to become available (only pertains to memcpy channels) 236 */ 237 enum pl08x_dma_chan_state { 238 PL08X_CHAN_IDLE, 239 PL08X_CHAN_RUNNING, 240 PL08X_CHAN_PAUSED, 241 PL08X_CHAN_WAITING, 242 }; 243 244 /** 245 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 246 * @vc: wrappped virtual channel 247 * @phychan: the physical channel utilized by this channel, if there is one 248 * @name: name of channel 249 * @cd: channel platform data 250 * @cfg: slave configuration 251 * @at: active transaction on this channel 252 * @host: a pointer to the host (internal use) 253 * @state: whether the channel is idle, paused, running etc 254 * @slave: whether this channel is a device (slave) or for memcpy 255 * @signal: the physical DMA request signal which this channel is using 256 * @mux_use: count of descriptors using this DMA request signal setting 257 * @waiting_at: time in jiffies when this channel moved to waiting state 258 */ 259 struct pl08x_dma_chan { 260 struct virt_dma_chan vc; 261 struct pl08x_phy_chan *phychan; 262 const char *name; 263 struct pl08x_channel_data *cd; 264 struct dma_slave_config cfg; 265 struct pl08x_txd *at; 266 struct pl08x_driver_data *host; 267 enum pl08x_dma_chan_state state; 268 bool slave; 269 int signal; 270 unsigned mux_use; 271 unsigned long waiting_at; 272 }; 273 274 /** 275 * struct pl08x_driver_data - the local state holder for the PL08x 276 * @slave: optional slave engine for this instance 277 * @memcpy: memcpy engine for this instance 278 * @has_slave: the PL08x has a slave engine (routed signals) 279 * @base: virtual memory base (remapped) for the PL08x 280 * @adev: the corresponding AMBA (PrimeCell) bus entry 281 * @vd: vendor data for this PL08x variant 282 * @pd: platform data passed in from the platform/machine 283 * @phy_chans: array of data for the physical channels 284 * @pool: a pool for the LLI descriptors 285 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 286 * fetches 287 * @mem_buses: set to indicate memory transfers on AHB2. 288 * @lli_words: how many words are used in each LLI item for this variant 289 */ 290 struct pl08x_driver_data { 291 struct dma_device slave; 292 struct dma_device memcpy; 293 bool has_slave; 294 void __iomem *base; 295 struct amba_device *adev; 296 const struct vendor_data *vd; 297 struct pl08x_platform_data *pd; 298 struct pl08x_phy_chan *phy_chans; 299 struct dma_pool *pool; 300 u8 lli_buses; 301 u8 mem_buses; 302 u8 lli_words; 303 }; 304 305 /* 306 * PL08X specific defines 307 */ 308 309 /* The order of words in an LLI. */ 310 #define PL080_LLI_SRC 0 311 #define PL080_LLI_DST 1 312 #define PL080_LLI_LLI 2 313 #define PL080_LLI_CCTL 3 314 #define PL080S_LLI_CCTL2 4 315 316 /* Total words in an LLI. */ 317 #define PL080_LLI_WORDS 4 318 #define PL080S_LLI_WORDS 8 319 320 /* 321 * Number of LLIs in each LLI buffer allocated for one transfer 322 * (maximum times we call dma_pool_alloc on this pool without freeing) 323 */ 324 #define MAX_NUM_TSFR_LLIS 512 325 #define PL08X_ALIGN 8 326 327 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 328 { 329 return container_of(chan, struct pl08x_dma_chan, vc.chan); 330 } 331 332 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 333 { 334 return container_of(tx, struct pl08x_txd, vd.tx); 335 } 336 337 /* 338 * Mux handling. 339 * 340 * This gives us the DMA request input to the PL08x primecell which the 341 * peripheral described by the channel data will be routed to, possibly 342 * via a board/SoC specific external MUX. One important point to note 343 * here is that this does not depend on the physical channel. 344 */ 345 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 346 { 347 const struct pl08x_platform_data *pd = plchan->host->pd; 348 int ret; 349 350 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { 351 ret = pd->get_xfer_signal(plchan->cd); 352 if (ret < 0) { 353 plchan->mux_use = 0; 354 return ret; 355 } 356 357 plchan->signal = ret; 358 } 359 return 0; 360 } 361 362 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 363 { 364 const struct pl08x_platform_data *pd = plchan->host->pd; 365 366 if (plchan->signal >= 0) { 367 WARN_ON(plchan->mux_use == 0); 368 369 if (--plchan->mux_use == 0 && pd->put_xfer_signal) { 370 pd->put_xfer_signal(plchan->cd, plchan->signal); 371 plchan->signal = -1; 372 } 373 } 374 } 375 376 /* 377 * Physical channel handling 378 */ 379 380 /* Whether a certain channel is busy or not */ 381 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 382 { 383 unsigned int val; 384 385 /* If we have a special busy register, take a shortcut */ 386 if (ch->reg_busy) { 387 val = readl(ch->reg_busy); 388 return !!(val & BIT(ch->id)); 389 } 390 val = readl(ch->reg_config); 391 return val & PL080_CONFIG_ACTIVE; 392 } 393 394 /* 395 * pl08x_write_lli() - Write an LLI into the DMA controller. 396 * 397 * The PL08x derivatives support linked lists, but the first item of the 398 * list containing the source, destination, control word and next LLI is 399 * ignored. Instead the driver has to write those values directly into the 400 * SRC, DST, LLI and control registers. On FTDMAC020 also the SIZE 401 * register need to be set up for the first transfer. 402 */ 403 static void pl08x_write_lli(struct pl08x_driver_data *pl08x, 404 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) 405 { 406 if (pl08x->vd->pl080s) 407 dev_vdbg(&pl08x->adev->dev, 408 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 409 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", 410 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 411 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], 412 lli[PL080S_LLI_CCTL2], ccfg); 413 else 414 dev_vdbg(&pl08x->adev->dev, 415 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 416 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 417 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 418 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); 419 420 writel_relaxed(lli[PL080_LLI_SRC], phychan->reg_src); 421 writel_relaxed(lli[PL080_LLI_DST], phychan->reg_dst); 422 writel_relaxed(lli[PL080_LLI_LLI], phychan->reg_lli); 423 424 /* 425 * The FTMAC020 has a different layout in the CCTL word of the LLI 426 * and the CCTL register which is split in CSR and SIZE registers. 427 * Convert the LLI item CCTL into the proper values to write into 428 * the CSR and SIZE registers. 429 */ 430 if (phychan->ftdmac020) { 431 u32 llictl = lli[PL080_LLI_CCTL]; 432 u32 val = 0; 433 434 /* Write the transfer size (12 bits) to the size register */ 435 writel_relaxed(llictl & FTDMAC020_LLI_TRANSFER_SIZE_MASK, 436 phychan->base + FTDMAC020_CH_SIZE); 437 /* 438 * Then write the control bits 28..16 to the control register 439 * by shuffleing the bits around to where they are in the 440 * main register. The mapping is as follows: 441 * Bit 28: TC_MSK - mask on all except last LLI 442 * Bit 27..25: SRC_WIDTH 443 * Bit 24..22: DST_WIDTH 444 * Bit 21..20: SRCAD_CTRL 445 * Bit 19..17: DSTAD_CTRL 446 * Bit 17: SRC_SEL 447 * Bit 16: DST_SEL 448 */ 449 if (llictl & FTDMAC020_LLI_TC_MSK) 450 val |= FTDMAC020_CH_CSR_TC_MSK; 451 val |= ((llictl & FTDMAC020_LLI_SRC_WIDTH_MSK) >> 452 (FTDMAC020_LLI_SRC_WIDTH_SHIFT - 453 FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT)); 454 val |= ((llictl & FTDMAC020_LLI_DST_WIDTH_MSK) >> 455 (FTDMAC020_LLI_DST_WIDTH_SHIFT - 456 FTDMAC020_CH_CSR_DST_WIDTH_SHIFT)); 457 val |= ((llictl & FTDMAC020_LLI_SRCAD_CTL_MSK) >> 458 (FTDMAC020_LLI_SRCAD_CTL_SHIFT - 459 FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT)); 460 val |= ((llictl & FTDMAC020_LLI_DSTAD_CTL_MSK) >> 461 (FTDMAC020_LLI_DSTAD_CTL_SHIFT - 462 FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT)); 463 if (llictl & FTDMAC020_LLI_SRC_SEL) 464 val |= FTDMAC020_CH_CSR_SRC_SEL; 465 if (llictl & FTDMAC020_LLI_DST_SEL) 466 val |= FTDMAC020_CH_CSR_DST_SEL; 467 468 /* 469 * Set up the bits that exist in the CSR but are not 470 * part the LLI, i.e. only gets written to the control 471 * register right here. 472 * 473 * FIXME: do not just handle memcpy, also handle slave DMA. 474 */ 475 switch (pl08x->pd->memcpy_burst_size) { 476 default: 477 case PL08X_BURST_SZ_1: 478 val |= PL080_BSIZE_1 << 479 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 480 break; 481 case PL08X_BURST_SZ_4: 482 val |= PL080_BSIZE_4 << 483 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 484 break; 485 case PL08X_BURST_SZ_8: 486 val |= PL080_BSIZE_8 << 487 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 488 break; 489 case PL08X_BURST_SZ_16: 490 val |= PL080_BSIZE_16 << 491 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 492 break; 493 case PL08X_BURST_SZ_32: 494 val |= PL080_BSIZE_32 << 495 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 496 break; 497 case PL08X_BURST_SZ_64: 498 val |= PL080_BSIZE_64 << 499 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 500 break; 501 case PL08X_BURST_SZ_128: 502 val |= PL080_BSIZE_128 << 503 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 504 break; 505 case PL08X_BURST_SZ_256: 506 val |= PL080_BSIZE_256 << 507 FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; 508 break; 509 } 510 511 /* Protection flags */ 512 if (pl08x->pd->memcpy_prot_buff) 513 val |= FTDMAC020_CH_CSR_PROT2; 514 if (pl08x->pd->memcpy_prot_cache) 515 val |= FTDMAC020_CH_CSR_PROT3; 516 /* We are the kernel, so we are in privileged mode */ 517 val |= FTDMAC020_CH_CSR_PROT1; 518 519 writel_relaxed(val, phychan->reg_control); 520 } else { 521 /* Bits are just identical */ 522 writel_relaxed(lli[PL080_LLI_CCTL], phychan->reg_control); 523 } 524 525 /* Second control word on the PL080s */ 526 if (pl08x->vd->pl080s) 527 writel_relaxed(lli[PL080S_LLI_CCTL2], 528 phychan->base + PL080S_CH_CONTROL2); 529 530 writel(ccfg, phychan->reg_config); 531 } 532 533 /* 534 * Set the initial DMA register values i.e. those for the first LLI 535 * The next LLI pointer and the configuration interrupt bit have 536 * been set when the LLIs were constructed. Poke them into the hardware 537 * and start the transfer. 538 */ 539 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 540 { 541 struct pl08x_driver_data *pl08x = plchan->host; 542 struct pl08x_phy_chan *phychan = plchan->phychan; 543 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 544 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 545 u32 val; 546 547 list_del(&txd->vd.node); 548 549 plchan->at = txd; 550 551 /* Wait for channel inactive */ 552 while (pl08x_phy_channel_busy(phychan)) 553 cpu_relax(); 554 555 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); 556 557 /* Enable the DMA channel */ 558 /* Do not access config register until channel shows as disabled */ 559 while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id)) 560 cpu_relax(); 561 562 /* Do not access config register until channel shows as inactive */ 563 if (phychan->ftdmac020) { 564 val = readl(phychan->reg_config); 565 while (val & FTDMAC020_CH_CFG_BUSY) 566 val = readl(phychan->reg_config); 567 568 val = readl(phychan->reg_control); 569 while (val & FTDMAC020_CH_CSR_EN) 570 val = readl(phychan->reg_control); 571 572 writel(val | FTDMAC020_CH_CSR_EN, 573 phychan->reg_control); 574 } else { 575 val = readl(phychan->reg_config); 576 while ((val & PL080_CONFIG_ACTIVE) || 577 (val & PL080_CONFIG_ENABLE)) 578 val = readl(phychan->reg_config); 579 580 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); 581 } 582 } 583 584 /* 585 * Pause the channel by setting the HALT bit. 586 * 587 * For M->P transfers, pause the DMAC first and then stop the peripheral - 588 * the FIFO can only drain if the peripheral is still requesting data. 589 * (note: this can still timeout if the DMAC FIFO never drains of data.) 590 * 591 * For P->M transfers, disable the peripheral first to stop it filling 592 * the DMAC FIFO, and then pause the DMAC. 593 */ 594 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 595 { 596 u32 val; 597 int timeout; 598 599 if (ch->ftdmac020) { 600 /* Use the enable bit on the FTDMAC020 */ 601 val = readl(ch->reg_control); 602 val &= ~FTDMAC020_CH_CSR_EN; 603 writel(val, ch->reg_control); 604 return; 605 } 606 607 /* Set the HALT bit and wait for the FIFO to drain */ 608 val = readl(ch->reg_config); 609 val |= PL080_CONFIG_HALT; 610 writel(val, ch->reg_config); 611 612 /* Wait for channel inactive */ 613 for (timeout = 1000; timeout; timeout--) { 614 if (!pl08x_phy_channel_busy(ch)) 615 break; 616 udelay(1); 617 } 618 if (pl08x_phy_channel_busy(ch)) 619 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 620 } 621 622 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 623 { 624 u32 val; 625 626 /* Use the enable bit on the FTDMAC020 */ 627 if (ch->ftdmac020) { 628 val = readl(ch->reg_control); 629 val |= FTDMAC020_CH_CSR_EN; 630 writel(val, ch->reg_control); 631 return; 632 } 633 634 /* Clear the HALT bit */ 635 val = readl(ch->reg_config); 636 val &= ~PL080_CONFIG_HALT; 637 writel(val, ch->reg_config); 638 } 639 640 /* 641 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 642 * clears any pending interrupt status. This should not be used for 643 * an on-going transfer, but as a method of shutting down a channel 644 * (eg, when it's no longer used) or terminating a transfer. 645 */ 646 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 647 struct pl08x_phy_chan *ch) 648 { 649 u32 val; 650 651 /* The layout for the FTDMAC020 is different */ 652 if (ch->ftdmac020) { 653 /* Disable all interrupts */ 654 val = readl(ch->reg_config); 655 val |= (FTDMAC020_CH_CFG_INT_ABT_MASK | 656 FTDMAC020_CH_CFG_INT_ERR_MASK | 657 FTDMAC020_CH_CFG_INT_TC_MASK); 658 writel(val, ch->reg_config); 659 660 /* Abort and disable channel */ 661 val = readl(ch->reg_control); 662 val &= ~FTDMAC020_CH_CSR_EN; 663 val |= FTDMAC020_CH_CSR_ABT; 664 writel(val, ch->reg_control); 665 666 /* Clear ABT and ERR interrupt flags */ 667 writel(BIT(ch->id) | BIT(ch->id + 16), 668 pl08x->base + PL080_ERR_CLEAR); 669 writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); 670 671 return; 672 } 673 674 val = readl(ch->reg_config); 675 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 676 PL080_CONFIG_TC_IRQ_MASK); 677 writel(val, ch->reg_config); 678 679 writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR); 680 writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); 681 } 682 683 static u32 get_bytes_in_phy_channel(struct pl08x_phy_chan *ch) 684 { 685 u32 val; 686 u32 bytes; 687 688 if (ch->ftdmac020) { 689 bytes = readl(ch->base + FTDMAC020_CH_SIZE); 690 691 val = readl(ch->reg_control); 692 val &= FTDMAC020_CH_CSR_SRC_WIDTH_MSK; 693 val >>= FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT; 694 } else if (ch->pl080s) { 695 val = readl(ch->base + PL080S_CH_CONTROL2); 696 bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK; 697 698 val = readl(ch->reg_control); 699 val &= PL080_CONTROL_SWIDTH_MASK; 700 val >>= PL080_CONTROL_SWIDTH_SHIFT; 701 } else { 702 /* Plain PL08x */ 703 val = readl(ch->reg_control); 704 bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK; 705 706 val &= PL080_CONTROL_SWIDTH_MASK; 707 val >>= PL080_CONTROL_SWIDTH_SHIFT; 708 } 709 710 switch (val) { 711 case PL080_WIDTH_8BIT: 712 break; 713 case PL080_WIDTH_16BIT: 714 bytes *= 2; 715 break; 716 case PL080_WIDTH_32BIT: 717 bytes *= 4; 718 break; 719 } 720 return bytes; 721 } 722 723 static u32 get_bytes_in_lli(struct pl08x_phy_chan *ch, const u32 *llis_va) 724 { 725 u32 val; 726 u32 bytes; 727 728 if (ch->ftdmac020) { 729 val = llis_va[PL080_LLI_CCTL]; 730 bytes = val & FTDMAC020_LLI_TRANSFER_SIZE_MASK; 731 732 val = llis_va[PL080_LLI_CCTL]; 733 val &= FTDMAC020_LLI_SRC_WIDTH_MSK; 734 val >>= FTDMAC020_LLI_SRC_WIDTH_SHIFT; 735 } else if (ch->pl080s) { 736 val = llis_va[PL080S_LLI_CCTL2]; 737 bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK; 738 739 val = llis_va[PL080_LLI_CCTL]; 740 val &= PL080_CONTROL_SWIDTH_MASK; 741 val >>= PL080_CONTROL_SWIDTH_SHIFT; 742 } else { 743 /* Plain PL08x */ 744 val = llis_va[PL080_LLI_CCTL]; 745 bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK; 746 747 val &= PL080_CONTROL_SWIDTH_MASK; 748 val >>= PL080_CONTROL_SWIDTH_SHIFT; 749 } 750 751 switch (val) { 752 case PL080_WIDTH_8BIT: 753 break; 754 case PL080_WIDTH_16BIT: 755 bytes *= 2; 756 break; 757 case PL080_WIDTH_32BIT: 758 bytes *= 4; 759 break; 760 } 761 return bytes; 762 } 763 764 /* The channel should be paused when calling this */ 765 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 766 { 767 struct pl08x_driver_data *pl08x = plchan->host; 768 const u32 *llis_va, *llis_va_limit; 769 struct pl08x_phy_chan *ch; 770 dma_addr_t llis_bus; 771 struct pl08x_txd *txd; 772 u32 llis_max_words; 773 size_t bytes; 774 u32 clli; 775 776 ch = plchan->phychan; 777 txd = plchan->at; 778 779 if (!ch || !txd) 780 return 0; 781 782 /* 783 * Follow the LLIs to get the number of remaining 784 * bytes in the currently active transaction. 785 */ 786 clli = readl(ch->reg_lli) & ~PL080_LLI_LM_AHB2; 787 788 /* First get the remaining bytes in the active transfer */ 789 bytes = get_bytes_in_phy_channel(ch); 790 791 if (!clli) 792 return bytes; 793 794 llis_va = txd->llis_va; 795 llis_bus = txd->llis_bus; 796 797 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; 798 BUG_ON(clli < llis_bus || clli >= llis_bus + 799 sizeof(u32) * llis_max_words); 800 801 /* 802 * Locate the next LLI - as this is an array, 803 * it's simple maths to find. 804 */ 805 llis_va += (clli - llis_bus) / sizeof(u32); 806 807 llis_va_limit = llis_va + llis_max_words; 808 809 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { 810 bytes += get_bytes_in_lli(ch, llis_va); 811 812 /* 813 * A LLI pointer going backward terminates the LLI list 814 */ 815 if (llis_va[PL080_LLI_LLI] <= clli) 816 break; 817 } 818 819 return bytes; 820 } 821 822 /* 823 * Allocate a physical channel for a virtual channel 824 * 825 * Try to locate a physical channel to be used for this transfer. If all 826 * are taken return NULL and the requester will have to cope by using 827 * some fallback PIO mode or retrying later. 828 */ 829 static struct pl08x_phy_chan * 830 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 831 struct pl08x_dma_chan *virt_chan) 832 { 833 struct pl08x_phy_chan *ch = NULL; 834 unsigned long flags; 835 int i; 836 837 for (i = 0; i < pl08x->vd->channels; i++) { 838 ch = &pl08x->phy_chans[i]; 839 840 spin_lock_irqsave(&ch->lock, flags); 841 842 if (!ch->locked && !ch->serving) { 843 ch->serving = virt_chan; 844 spin_unlock_irqrestore(&ch->lock, flags); 845 break; 846 } 847 848 spin_unlock_irqrestore(&ch->lock, flags); 849 } 850 851 if (i == pl08x->vd->channels) { 852 /* No physical channel available, cope with it */ 853 return NULL; 854 } 855 856 return ch; 857 } 858 859 /* Mark the physical channel as free. Note, this write is atomic. */ 860 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 861 struct pl08x_phy_chan *ch) 862 { 863 ch->serving = NULL; 864 } 865 866 /* 867 * Try to allocate a physical channel. When successful, assign it to 868 * this virtual channel, and initiate the next descriptor. The 869 * virtual channel lock must be held at this point. 870 */ 871 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 872 { 873 struct pl08x_driver_data *pl08x = plchan->host; 874 struct pl08x_phy_chan *ch; 875 876 ch = pl08x_get_phy_channel(pl08x, plchan); 877 if (!ch) { 878 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 879 plchan->state = PL08X_CHAN_WAITING; 880 plchan->waiting_at = jiffies; 881 return; 882 } 883 884 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 885 ch->id, plchan->name); 886 887 plchan->phychan = ch; 888 plchan->state = PL08X_CHAN_RUNNING; 889 pl08x_start_next_txd(plchan); 890 } 891 892 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 893 struct pl08x_dma_chan *plchan) 894 { 895 struct pl08x_driver_data *pl08x = plchan->host; 896 897 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 898 ch->id, plchan->name); 899 900 /* 901 * We do this without taking the lock; we're really only concerned 902 * about whether this pointer is NULL or not, and we're guaranteed 903 * that this will only be called when it _already_ is non-NULL. 904 */ 905 ch->serving = plchan; 906 plchan->phychan = ch; 907 plchan->state = PL08X_CHAN_RUNNING; 908 pl08x_start_next_txd(plchan); 909 } 910 911 /* 912 * Free a physical DMA channel, potentially reallocating it to another 913 * virtual channel if we have any pending. 914 */ 915 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 916 { 917 struct pl08x_driver_data *pl08x = plchan->host; 918 struct pl08x_dma_chan *p, *next; 919 unsigned long waiting_at; 920 retry: 921 next = NULL; 922 waiting_at = jiffies; 923 924 /* 925 * Find a waiting virtual channel for the next transfer. 926 * To be fair, time when each channel reached waiting state is compared 927 * to select channel that is waiting for the longest time. 928 */ 929 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 930 if (p->state == PL08X_CHAN_WAITING && 931 p->waiting_at <= waiting_at) { 932 next = p; 933 waiting_at = p->waiting_at; 934 } 935 936 if (!next && pl08x->has_slave) { 937 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 938 if (p->state == PL08X_CHAN_WAITING && 939 p->waiting_at <= waiting_at) { 940 next = p; 941 waiting_at = p->waiting_at; 942 } 943 } 944 945 /* Ensure that the physical channel is stopped */ 946 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 947 948 if (next) { 949 bool success; 950 951 /* 952 * Eww. We know this isn't going to deadlock 953 * but lockdep probably doesn't. 954 */ 955 spin_lock(&next->vc.lock); 956 /* Re-check the state now that we have the lock */ 957 success = next->state == PL08X_CHAN_WAITING; 958 if (success) 959 pl08x_phy_reassign_start(plchan->phychan, next); 960 spin_unlock(&next->vc.lock); 961 962 /* If the state changed, try to find another channel */ 963 if (!success) 964 goto retry; 965 } else { 966 /* No more jobs, so free up the physical channel */ 967 pl08x_put_phy_channel(pl08x, plchan->phychan); 968 } 969 970 plchan->phychan = NULL; 971 plchan->state = PL08X_CHAN_IDLE; 972 } 973 974 /* 975 * LLI handling 976 */ 977 978 static inline unsigned int 979 pl08x_get_bytes_for_lli(struct pl08x_driver_data *pl08x, 980 u32 cctl, 981 bool source) 982 { 983 u32 val; 984 985 if (pl08x->vd->ftdmac020) { 986 if (source) 987 val = (cctl & FTDMAC020_LLI_SRC_WIDTH_MSK) >> 988 FTDMAC020_LLI_SRC_WIDTH_SHIFT; 989 else 990 val = (cctl & FTDMAC020_LLI_DST_WIDTH_MSK) >> 991 FTDMAC020_LLI_DST_WIDTH_SHIFT; 992 } else { 993 if (source) 994 val = (cctl & PL080_CONTROL_SWIDTH_MASK) >> 995 PL080_CONTROL_SWIDTH_SHIFT; 996 else 997 val = (cctl & PL080_CONTROL_DWIDTH_MASK) >> 998 PL080_CONTROL_DWIDTH_SHIFT; 999 } 1000 1001 switch (val) { 1002 case PL080_WIDTH_8BIT: 1003 return 1; 1004 case PL080_WIDTH_16BIT: 1005 return 2; 1006 case PL080_WIDTH_32BIT: 1007 return 4; 1008 default: 1009 break; 1010 } 1011 BUG(); 1012 return 0; 1013 } 1014 1015 static inline u32 pl08x_lli_control_bits(struct pl08x_driver_data *pl08x, 1016 u32 cctl, 1017 u8 srcwidth, u8 dstwidth, 1018 size_t tsize) 1019 { 1020 u32 retbits = cctl; 1021 1022 /* 1023 * Remove all src, dst and transfer size bits, then set the 1024 * width and size according to the parameters. The bit offsets 1025 * are different in the FTDMAC020 so we need to accound for this. 1026 */ 1027 if (pl08x->vd->ftdmac020) { 1028 retbits &= ~FTDMAC020_LLI_DST_WIDTH_MSK; 1029 retbits &= ~FTDMAC020_LLI_SRC_WIDTH_MSK; 1030 retbits &= ~FTDMAC020_LLI_TRANSFER_SIZE_MASK; 1031 1032 switch (srcwidth) { 1033 case 1: 1034 retbits |= PL080_WIDTH_8BIT << 1035 FTDMAC020_LLI_SRC_WIDTH_SHIFT; 1036 break; 1037 case 2: 1038 retbits |= PL080_WIDTH_16BIT << 1039 FTDMAC020_LLI_SRC_WIDTH_SHIFT; 1040 break; 1041 case 4: 1042 retbits |= PL080_WIDTH_32BIT << 1043 FTDMAC020_LLI_SRC_WIDTH_SHIFT; 1044 break; 1045 default: 1046 BUG(); 1047 break; 1048 } 1049 1050 switch (dstwidth) { 1051 case 1: 1052 retbits |= PL080_WIDTH_8BIT << 1053 FTDMAC020_LLI_DST_WIDTH_SHIFT; 1054 break; 1055 case 2: 1056 retbits |= PL080_WIDTH_16BIT << 1057 FTDMAC020_LLI_DST_WIDTH_SHIFT; 1058 break; 1059 case 4: 1060 retbits |= PL080_WIDTH_32BIT << 1061 FTDMAC020_LLI_DST_WIDTH_SHIFT; 1062 break; 1063 default: 1064 BUG(); 1065 break; 1066 } 1067 1068 tsize &= FTDMAC020_LLI_TRANSFER_SIZE_MASK; 1069 retbits |= tsize << FTDMAC020_LLI_TRANSFER_SIZE_SHIFT; 1070 } else { 1071 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 1072 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 1073 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 1074 1075 switch (srcwidth) { 1076 case 1: 1077 retbits |= PL080_WIDTH_8BIT << 1078 PL080_CONTROL_SWIDTH_SHIFT; 1079 break; 1080 case 2: 1081 retbits |= PL080_WIDTH_16BIT << 1082 PL080_CONTROL_SWIDTH_SHIFT; 1083 break; 1084 case 4: 1085 retbits |= PL080_WIDTH_32BIT << 1086 PL080_CONTROL_SWIDTH_SHIFT; 1087 break; 1088 default: 1089 BUG(); 1090 break; 1091 } 1092 1093 switch (dstwidth) { 1094 case 1: 1095 retbits |= PL080_WIDTH_8BIT << 1096 PL080_CONTROL_DWIDTH_SHIFT; 1097 break; 1098 case 2: 1099 retbits |= PL080_WIDTH_16BIT << 1100 PL080_CONTROL_DWIDTH_SHIFT; 1101 break; 1102 case 4: 1103 retbits |= PL080_WIDTH_32BIT << 1104 PL080_CONTROL_DWIDTH_SHIFT; 1105 break; 1106 default: 1107 BUG(); 1108 break; 1109 } 1110 1111 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; 1112 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 1113 } 1114 1115 return retbits; 1116 } 1117 1118 struct pl08x_lli_build_data { 1119 struct pl08x_txd *txd; 1120 struct pl08x_bus_data srcbus; 1121 struct pl08x_bus_data dstbus; 1122 size_t remainder; 1123 u32 lli_bus; 1124 }; 1125 1126 /* 1127 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 1128 * victim in case src & dest are not similarly aligned. i.e. If after aligning 1129 * masters address with width requirements of transfer (by sending few byte by 1130 * byte data), slave is still not aligned, then its width will be reduced to 1131 * BYTE. 1132 * - prefers the destination bus if both available 1133 * - prefers bus with fixed address (i.e. peripheral) 1134 */ 1135 static void pl08x_choose_master_bus(struct pl08x_driver_data *pl08x, 1136 struct pl08x_lli_build_data *bd, 1137 struct pl08x_bus_data **mbus, 1138 struct pl08x_bus_data **sbus, 1139 u32 cctl) 1140 { 1141 bool dst_incr; 1142 bool src_incr; 1143 1144 /* 1145 * The FTDMAC020 only supports memory-to-memory transfer, so 1146 * source and destination always increase. 1147 */ 1148 if (pl08x->vd->ftdmac020) { 1149 dst_incr = true; 1150 src_incr = true; 1151 } else { 1152 dst_incr = !!(cctl & PL080_CONTROL_DST_INCR); 1153 src_incr = !!(cctl & PL080_CONTROL_SRC_INCR); 1154 } 1155 1156 /* 1157 * If either bus is not advancing, i.e. it is a peripheral, that 1158 * one becomes master 1159 */ 1160 if (!dst_incr) { 1161 *mbus = &bd->dstbus; 1162 *sbus = &bd->srcbus; 1163 } else if (!src_incr) { 1164 *mbus = &bd->srcbus; 1165 *sbus = &bd->dstbus; 1166 } else { 1167 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 1168 *mbus = &bd->dstbus; 1169 *sbus = &bd->srcbus; 1170 } else { 1171 *mbus = &bd->srcbus; 1172 *sbus = &bd->dstbus; 1173 } 1174 } 1175 } 1176 1177 /* 1178 * Fills in one LLI for a certain transfer descriptor and advance the counter 1179 */ 1180 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 1181 struct pl08x_lli_build_data *bd, 1182 int num_llis, int len, u32 cctl, u32 cctl2) 1183 { 1184 u32 offset = num_llis * pl08x->lli_words; 1185 u32 *llis_va = bd->txd->llis_va + offset; 1186 dma_addr_t llis_bus = bd->txd->llis_bus; 1187 1188 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 1189 1190 /* Advance the offset to next LLI. */ 1191 offset += pl08x->lli_words; 1192 1193 llis_va[PL080_LLI_SRC] = bd->srcbus.addr; 1194 llis_va[PL080_LLI_DST] = bd->dstbus.addr; 1195 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); 1196 llis_va[PL080_LLI_LLI] |= bd->lli_bus; 1197 llis_va[PL080_LLI_CCTL] = cctl; 1198 if (pl08x->vd->pl080s) 1199 llis_va[PL080S_LLI_CCTL2] = cctl2; 1200 1201 if (pl08x->vd->ftdmac020) { 1202 /* FIXME: only memcpy so far so both increase */ 1203 bd->srcbus.addr += len; 1204 bd->dstbus.addr += len; 1205 } else { 1206 if (cctl & PL080_CONTROL_SRC_INCR) 1207 bd->srcbus.addr += len; 1208 if (cctl & PL080_CONTROL_DST_INCR) 1209 bd->dstbus.addr += len; 1210 } 1211 1212 BUG_ON(bd->remainder < len); 1213 1214 bd->remainder -= len; 1215 } 1216 1217 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, 1218 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, 1219 int num_llis, size_t *total_bytes) 1220 { 1221 *cctl = pl08x_lli_control_bits(pl08x, *cctl, 1, 1, len); 1222 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); 1223 (*total_bytes) += len; 1224 } 1225 1226 #if 1 1227 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 1228 const u32 *llis_va, int num_llis) 1229 { 1230 int i; 1231 1232 if (pl08x->vd->pl080s) { 1233 dev_vdbg(&pl08x->adev->dev, 1234 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", 1235 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); 1236 for (i = 0; i < num_llis; i++) { 1237 dev_vdbg(&pl08x->adev->dev, 1238 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1239 i, llis_va, llis_va[PL080_LLI_SRC], 1240 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 1241 llis_va[PL080_LLI_CCTL], 1242 llis_va[PL080S_LLI_CCTL2]); 1243 llis_va += pl08x->lli_words; 1244 } 1245 } else { 1246 dev_vdbg(&pl08x->adev->dev, 1247 "%-3s %-9s %-10s %-10s %-10s %s\n", 1248 "lli", "", "csrc", "cdst", "clli", "cctl"); 1249 for (i = 0; i < num_llis; i++) { 1250 dev_vdbg(&pl08x->adev->dev, 1251 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 1252 i, llis_va, llis_va[PL080_LLI_SRC], 1253 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 1254 llis_va[PL080_LLI_CCTL]); 1255 llis_va += pl08x->lli_words; 1256 } 1257 } 1258 } 1259 #else 1260 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 1261 const u32 *llis_va, int num_llis) {} 1262 #endif 1263 1264 /* 1265 * This fills in the table of LLIs for the transfer descriptor 1266 * Note that we assume we never have to change the burst sizes 1267 * Return 0 for error 1268 */ 1269 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 1270 struct pl08x_txd *txd) 1271 { 1272 struct pl08x_bus_data *mbus, *sbus; 1273 struct pl08x_lli_build_data bd; 1274 int num_llis = 0; 1275 u32 cctl, early_bytes = 0; 1276 size_t max_bytes_per_lli, total_bytes; 1277 u32 *llis_va, *last_lli; 1278 struct pl08x_sg *dsg; 1279 1280 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 1281 if (!txd->llis_va) { 1282 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 1283 return 0; 1284 } 1285 1286 bd.txd = txd; 1287 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 1288 cctl = txd->cctl; 1289 1290 /* Find maximum width of the source bus */ 1291 bd.srcbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, true); 1292 1293 /* Find maximum width of the destination bus */ 1294 bd.dstbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, false); 1295 1296 list_for_each_entry(dsg, &txd->dsg_list, node) { 1297 total_bytes = 0; 1298 cctl = txd->cctl; 1299 1300 bd.srcbus.addr = dsg->src_addr; 1301 bd.dstbus.addr = dsg->dst_addr; 1302 bd.remainder = dsg->len; 1303 bd.srcbus.buswidth = bd.srcbus.maxwidth; 1304 bd.dstbus.buswidth = bd.dstbus.maxwidth; 1305 1306 pl08x_choose_master_bus(pl08x, &bd, &mbus, &sbus, cctl); 1307 1308 dev_vdbg(&pl08x->adev->dev, 1309 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", 1310 (u64)bd.srcbus.addr, 1311 cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 1312 bd.srcbus.buswidth, 1313 (u64)bd.dstbus.addr, 1314 cctl & PL080_CONTROL_DST_INCR ? "+" : "", 1315 bd.dstbus.buswidth, 1316 bd.remainder); 1317 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 1318 mbus == &bd.srcbus ? "src" : "dst", 1319 sbus == &bd.srcbus ? "src" : "dst"); 1320 1321 /* 1322 * Zero length is only allowed if all these requirements are 1323 * met: 1324 * - flow controller is peripheral. 1325 * - src.addr is aligned to src.width 1326 * - dst.addr is aligned to dst.width 1327 * 1328 * sg_len == 1 should be true, as there can be two cases here: 1329 * 1330 * - Memory addresses are contiguous and are not scattered. 1331 * Here, Only one sg will be passed by user driver, with 1332 * memory address and zero length. We pass this to controller 1333 * and after the transfer it will receive the last burst 1334 * request from peripheral and so transfer finishes. 1335 * 1336 * - Memory addresses are scattered and are not contiguous. 1337 * Here, Obviously as DMA controller doesn't know when a lli's 1338 * transfer gets over, it can't load next lli. So in this 1339 * case, there has to be an assumption that only one lli is 1340 * supported. Thus, we can't have scattered addresses. 1341 */ 1342 if (!bd.remainder) { 1343 u32 fc; 1344 1345 /* FTDMAC020 only does memory-to-memory */ 1346 if (pl08x->vd->ftdmac020) 1347 fc = PL080_FLOW_MEM2MEM; 1348 else 1349 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 1350 PL080_CONFIG_FLOW_CONTROL_SHIFT; 1351 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 1352 (fc <= PL080_FLOW_SRC2DST_SRC))) { 1353 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 1354 __func__); 1355 return 0; 1356 } 1357 1358 if (!IS_BUS_ALIGNED(&bd.srcbus) || 1359 !IS_BUS_ALIGNED(&bd.dstbus)) { 1360 dev_err(&pl08x->adev->dev, 1361 "%s src & dst address must be aligned to src" 1362 " & dst width if peripheral is flow controller", 1363 __func__); 1364 return 0; 1365 } 1366 1367 cctl = pl08x_lli_control_bits(pl08x, cctl, 1368 bd.srcbus.buswidth, bd.dstbus.buswidth, 1369 0); 1370 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1371 0, cctl, 0); 1372 break; 1373 } 1374 1375 /* 1376 * Send byte by byte for following cases 1377 * - Less than a bus width available 1378 * - until master bus is aligned 1379 */ 1380 if (bd.remainder < mbus->buswidth) 1381 early_bytes = bd.remainder; 1382 else if (!IS_BUS_ALIGNED(mbus)) { 1383 early_bytes = mbus->buswidth - 1384 (mbus->addr & (mbus->buswidth - 1)); 1385 if ((bd.remainder - early_bytes) < mbus->buswidth) 1386 early_bytes = bd.remainder; 1387 } 1388 1389 if (early_bytes) { 1390 dev_vdbg(&pl08x->adev->dev, 1391 "%s byte width LLIs (remain 0x%08zx)\n", 1392 __func__, bd.remainder); 1393 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, 1394 num_llis++, &total_bytes); 1395 } 1396 1397 if (bd.remainder) { 1398 /* 1399 * Master now aligned 1400 * - if slave is not then we must set its width down 1401 */ 1402 if (!IS_BUS_ALIGNED(sbus)) { 1403 dev_dbg(&pl08x->adev->dev, 1404 "%s set down bus width to one byte\n", 1405 __func__); 1406 1407 sbus->buswidth = 1; 1408 } 1409 1410 /* 1411 * Bytes transferred = tsize * src width, not 1412 * MIN(buswidths) 1413 */ 1414 max_bytes_per_lli = bd.srcbus.buswidth * 1415 pl08x->vd->max_transfer_size; 1416 dev_vdbg(&pl08x->adev->dev, 1417 "%s max bytes per lli = %zu\n", 1418 __func__, max_bytes_per_lli); 1419 1420 /* 1421 * Make largest possible LLIs until less than one bus 1422 * width left 1423 */ 1424 while (bd.remainder > (mbus->buswidth - 1)) { 1425 size_t lli_len, tsize, width; 1426 1427 /* 1428 * If enough left try to send max possible, 1429 * otherwise try to send the remainder 1430 */ 1431 lli_len = min(bd.remainder, max_bytes_per_lli); 1432 1433 /* 1434 * Check against maximum bus alignment: 1435 * Calculate actual transfer size in relation to 1436 * bus width an get a maximum remainder of the 1437 * highest bus width - 1 1438 */ 1439 width = max(mbus->buswidth, sbus->buswidth); 1440 lli_len = (lli_len / width) * width; 1441 tsize = lli_len / bd.srcbus.buswidth; 1442 1443 dev_vdbg(&pl08x->adev->dev, 1444 "%s fill lli with single lli chunk of " 1445 "size 0x%08zx (remainder 0x%08zx)\n", 1446 __func__, lli_len, bd.remainder); 1447 1448 cctl = pl08x_lli_control_bits(pl08x, cctl, 1449 bd.srcbus.buswidth, bd.dstbus.buswidth, 1450 tsize); 1451 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1452 lli_len, cctl, tsize); 1453 total_bytes += lli_len; 1454 } 1455 1456 /* 1457 * Send any odd bytes 1458 */ 1459 if (bd.remainder) { 1460 dev_vdbg(&pl08x->adev->dev, 1461 "%s align with boundary, send odd bytes (remain %zu)\n", 1462 __func__, bd.remainder); 1463 prep_byte_width_lli(pl08x, &bd, &cctl, 1464 bd.remainder, num_llis++, &total_bytes); 1465 } 1466 } 1467 1468 if (total_bytes != dsg->len) { 1469 dev_err(&pl08x->adev->dev, 1470 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 1471 __func__, total_bytes, dsg->len); 1472 return 0; 1473 } 1474 1475 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1476 dev_err(&pl08x->adev->dev, 1477 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1478 __func__, MAX_NUM_TSFR_LLIS); 1479 return 0; 1480 } 1481 } 1482 1483 llis_va = txd->llis_va; 1484 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; 1485 1486 if (txd->cyclic) { 1487 /* Link back to the first LLI. */ 1488 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; 1489 } else { 1490 /* The final LLI terminates the LLI. */ 1491 last_lli[PL080_LLI_LLI] = 0; 1492 /* The final LLI element shall also fire an interrupt. */ 1493 if (pl08x->vd->ftdmac020) 1494 last_lli[PL080_LLI_CCTL] &= ~FTDMAC020_LLI_TC_MSK; 1495 else 1496 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; 1497 } 1498 1499 pl08x_dump_lli(pl08x, llis_va, num_llis); 1500 1501 return num_llis; 1502 } 1503 1504 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1505 struct pl08x_txd *txd) 1506 { 1507 struct pl08x_sg *dsg, *_dsg; 1508 1509 if (txd->llis_va) 1510 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1511 1512 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1513 list_del(&dsg->node); 1514 kfree(dsg); 1515 } 1516 1517 kfree(txd); 1518 } 1519 1520 static void pl08x_desc_free(struct virt_dma_desc *vd) 1521 { 1522 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1523 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1524 1525 dma_descriptor_unmap(&vd->tx); 1526 if (!txd->done) 1527 pl08x_release_mux(plchan); 1528 1529 pl08x_free_txd(plchan->host, txd); 1530 } 1531 1532 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1533 struct pl08x_dma_chan *plchan) 1534 { 1535 LIST_HEAD(head); 1536 1537 vchan_get_all_descriptors(&plchan->vc, &head); 1538 vchan_dma_desc_free_list(&plchan->vc, &head); 1539 } 1540 1541 /* 1542 * The DMA ENGINE API 1543 */ 1544 static void pl08x_free_chan_resources(struct dma_chan *chan) 1545 { 1546 /* Ensure all queued descriptors are freed */ 1547 vchan_free_chan_resources(to_virt_chan(chan)); 1548 } 1549 1550 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1551 struct dma_chan *chan, unsigned long flags) 1552 { 1553 struct dma_async_tx_descriptor *retval = NULL; 1554 1555 return retval; 1556 } 1557 1558 /* 1559 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1560 * If slaves are relying on interrupts to signal completion this function 1561 * must not be called with interrupts disabled. 1562 */ 1563 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1564 dma_cookie_t cookie, struct dma_tx_state *txstate) 1565 { 1566 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1567 struct virt_dma_desc *vd; 1568 unsigned long flags; 1569 enum dma_status ret; 1570 size_t bytes = 0; 1571 1572 ret = dma_cookie_status(chan, cookie, txstate); 1573 if (ret == DMA_COMPLETE) 1574 return ret; 1575 1576 /* 1577 * There's no point calculating the residue if there's 1578 * no txstate to store the value. 1579 */ 1580 if (!txstate) { 1581 if (plchan->state == PL08X_CHAN_PAUSED) 1582 ret = DMA_PAUSED; 1583 return ret; 1584 } 1585 1586 spin_lock_irqsave(&plchan->vc.lock, flags); 1587 ret = dma_cookie_status(chan, cookie, txstate); 1588 if (ret != DMA_COMPLETE) { 1589 vd = vchan_find_desc(&plchan->vc, cookie); 1590 if (vd) { 1591 /* On the issued list, so hasn't been processed yet */ 1592 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1593 struct pl08x_sg *dsg; 1594 1595 list_for_each_entry(dsg, &txd->dsg_list, node) 1596 bytes += dsg->len; 1597 } else { 1598 bytes = pl08x_getbytes_chan(plchan); 1599 } 1600 } 1601 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1602 1603 /* 1604 * This cookie not complete yet 1605 * Get number of bytes left in the active transactions and queue 1606 */ 1607 dma_set_residue(txstate, bytes); 1608 1609 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1610 ret = DMA_PAUSED; 1611 1612 /* Whether waiting or running, we're in progress */ 1613 return ret; 1614 } 1615 1616 /* PrimeCell DMA extension */ 1617 struct burst_table { 1618 u32 burstwords; 1619 u32 reg; 1620 }; 1621 1622 static const struct burst_table burst_sizes[] = { 1623 { 1624 .burstwords = 256, 1625 .reg = PL080_BSIZE_256, 1626 }, 1627 { 1628 .burstwords = 128, 1629 .reg = PL080_BSIZE_128, 1630 }, 1631 { 1632 .burstwords = 64, 1633 .reg = PL080_BSIZE_64, 1634 }, 1635 { 1636 .burstwords = 32, 1637 .reg = PL080_BSIZE_32, 1638 }, 1639 { 1640 .burstwords = 16, 1641 .reg = PL080_BSIZE_16, 1642 }, 1643 { 1644 .burstwords = 8, 1645 .reg = PL080_BSIZE_8, 1646 }, 1647 { 1648 .burstwords = 4, 1649 .reg = PL080_BSIZE_4, 1650 }, 1651 { 1652 .burstwords = 0, 1653 .reg = PL080_BSIZE_1, 1654 }, 1655 }; 1656 1657 /* 1658 * Given the source and destination available bus masks, select which 1659 * will be routed to each port. We try to have source and destination 1660 * on separate ports, but always respect the allowable settings. 1661 */ 1662 static u32 pl08x_select_bus(bool ftdmac020, u8 src, u8 dst) 1663 { 1664 u32 cctl = 0; 1665 u32 dst_ahb2; 1666 u32 src_ahb2; 1667 1668 /* The FTDMAC020 use different bits to indicate src/dst bus */ 1669 if (ftdmac020) { 1670 dst_ahb2 = FTDMAC020_LLI_DST_SEL; 1671 src_ahb2 = FTDMAC020_LLI_SRC_SEL; 1672 } else { 1673 dst_ahb2 = PL080_CONTROL_DST_AHB2; 1674 src_ahb2 = PL080_CONTROL_SRC_AHB2; 1675 } 1676 1677 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1678 cctl |= dst_ahb2; 1679 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1680 cctl |= src_ahb2; 1681 1682 return cctl; 1683 } 1684 1685 static u32 pl08x_cctl(u32 cctl) 1686 { 1687 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1688 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1689 PL080_CONTROL_PROT_MASK); 1690 1691 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1692 return cctl | PL080_CONTROL_PROT_SYS; 1693 } 1694 1695 static u32 pl08x_width(enum dma_slave_buswidth width) 1696 { 1697 switch (width) { 1698 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1699 return PL080_WIDTH_8BIT; 1700 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1701 return PL080_WIDTH_16BIT; 1702 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1703 return PL080_WIDTH_32BIT; 1704 default: 1705 return ~0; 1706 } 1707 } 1708 1709 static u32 pl08x_burst(u32 maxburst) 1710 { 1711 int i; 1712 1713 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1714 if (burst_sizes[i].burstwords <= maxburst) 1715 break; 1716 1717 return burst_sizes[i].reg; 1718 } 1719 1720 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1721 enum dma_slave_buswidth addr_width, u32 maxburst) 1722 { 1723 u32 width, burst, cctl = 0; 1724 1725 width = pl08x_width(addr_width); 1726 if (width == ~0) 1727 return ~0; 1728 1729 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1730 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1731 1732 /* 1733 * If this channel will only request single transfers, set this 1734 * down to ONE element. Also select one element if no maxburst 1735 * is specified. 1736 */ 1737 if (plchan->cd->single) 1738 maxburst = 1; 1739 1740 burst = pl08x_burst(maxburst); 1741 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1742 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1743 1744 return pl08x_cctl(cctl); 1745 } 1746 1747 /* 1748 * Slave transactions callback to the slave device to allow 1749 * synchronization of slave DMA signals with the DMAC enable 1750 */ 1751 static void pl08x_issue_pending(struct dma_chan *chan) 1752 { 1753 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1754 unsigned long flags; 1755 1756 spin_lock_irqsave(&plchan->vc.lock, flags); 1757 if (vchan_issue_pending(&plchan->vc)) { 1758 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1759 pl08x_phy_alloc_and_start(plchan); 1760 } 1761 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1762 } 1763 1764 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1765 { 1766 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1767 1768 if (txd) 1769 INIT_LIST_HEAD(&txd->dsg_list); 1770 return txd; 1771 } 1772 1773 static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x) 1774 { 1775 u32 cctl = 0; 1776 1777 /* Conjure cctl */ 1778 switch (pl08x->pd->memcpy_burst_size) { 1779 default: 1780 dev_err(&pl08x->adev->dev, 1781 "illegal burst size for memcpy, set to 1\n"); 1782 /* Fall through */ 1783 case PL08X_BURST_SZ_1: 1784 cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | 1785 PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; 1786 break; 1787 case PL08X_BURST_SZ_4: 1788 cctl |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | 1789 PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; 1790 break; 1791 case PL08X_BURST_SZ_8: 1792 cctl |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | 1793 PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; 1794 break; 1795 case PL08X_BURST_SZ_16: 1796 cctl |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | 1797 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; 1798 break; 1799 case PL08X_BURST_SZ_32: 1800 cctl |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | 1801 PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; 1802 break; 1803 case PL08X_BURST_SZ_64: 1804 cctl |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | 1805 PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; 1806 break; 1807 case PL08X_BURST_SZ_128: 1808 cctl |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | 1809 PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; 1810 break; 1811 case PL08X_BURST_SZ_256: 1812 cctl |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | 1813 PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; 1814 break; 1815 } 1816 1817 switch (pl08x->pd->memcpy_bus_width) { 1818 default: 1819 dev_err(&pl08x->adev->dev, 1820 "illegal bus width for memcpy, set to 8 bits\n"); 1821 /* Fall through */ 1822 case PL08X_BUS_WIDTH_8_BITS: 1823 cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | 1824 PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 1825 break; 1826 case PL08X_BUS_WIDTH_16_BITS: 1827 cctl |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | 1828 PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 1829 break; 1830 case PL08X_BUS_WIDTH_32_BITS: 1831 cctl |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | 1832 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 1833 break; 1834 } 1835 1836 /* Protection flags */ 1837 if (pl08x->pd->memcpy_prot_buff) 1838 cctl |= PL080_CONTROL_PROT_BUFF; 1839 if (pl08x->pd->memcpy_prot_cache) 1840 cctl |= PL080_CONTROL_PROT_CACHE; 1841 1842 /* We are the kernel, so we are in privileged mode */ 1843 cctl |= PL080_CONTROL_PROT_SYS; 1844 1845 /* Both to be incremented or the code will break */ 1846 cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1847 1848 if (pl08x->vd->dualmaster) 1849 cctl |= pl08x_select_bus(false, 1850 pl08x->mem_buses, 1851 pl08x->mem_buses); 1852 1853 return cctl; 1854 } 1855 1856 static u32 pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data *pl08x) 1857 { 1858 u32 cctl = 0; 1859 1860 /* Conjure cctl */ 1861 switch (pl08x->pd->memcpy_bus_width) { 1862 default: 1863 dev_err(&pl08x->adev->dev, 1864 "illegal bus width for memcpy, set to 8 bits\n"); 1865 /* Fall through */ 1866 case PL08X_BUS_WIDTH_8_BITS: 1867 cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | 1868 PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; 1869 break; 1870 case PL08X_BUS_WIDTH_16_BITS: 1871 cctl |= PL080_WIDTH_16BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | 1872 PL080_WIDTH_16BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; 1873 break; 1874 case PL08X_BUS_WIDTH_32_BITS: 1875 cctl |= PL080_WIDTH_32BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | 1876 PL080_WIDTH_32BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; 1877 break; 1878 } 1879 1880 /* 1881 * By default mask the TC IRQ on all LLIs, it will be unmasked on 1882 * the last LLI item by other code. 1883 */ 1884 cctl |= FTDMAC020_LLI_TC_MSK; 1885 1886 /* 1887 * Both to be incremented so leave bits FTDMAC020_LLI_SRCAD_CTL 1888 * and FTDMAC020_LLI_DSTAD_CTL as zero 1889 */ 1890 if (pl08x->vd->dualmaster) 1891 cctl |= pl08x_select_bus(true, 1892 pl08x->mem_buses, 1893 pl08x->mem_buses); 1894 1895 return cctl; 1896 } 1897 1898 /* 1899 * Initialize a descriptor to be used by memcpy submit 1900 */ 1901 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1902 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1903 size_t len, unsigned long flags) 1904 { 1905 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1906 struct pl08x_driver_data *pl08x = plchan->host; 1907 struct pl08x_txd *txd; 1908 struct pl08x_sg *dsg; 1909 int ret; 1910 1911 txd = pl08x_get_txd(plchan); 1912 if (!txd) { 1913 dev_err(&pl08x->adev->dev, 1914 "%s no memory for descriptor\n", __func__); 1915 return NULL; 1916 } 1917 1918 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1919 if (!dsg) { 1920 pl08x_free_txd(pl08x, txd); 1921 return NULL; 1922 } 1923 list_add_tail(&dsg->node, &txd->dsg_list); 1924 1925 dsg->src_addr = src; 1926 dsg->dst_addr = dest; 1927 dsg->len = len; 1928 if (pl08x->vd->ftdmac020) { 1929 /* Writing CCFG zero ENABLES all interrupts */ 1930 txd->ccfg = 0; 1931 txd->cctl = pl08x_ftdmac020_memcpy_cctl(pl08x); 1932 } else { 1933 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1934 PL080_CONFIG_TC_IRQ_MASK | 1935 PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1936 txd->cctl = pl08x_memcpy_cctl(pl08x); 1937 } 1938 1939 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1940 if (!ret) { 1941 pl08x_free_txd(pl08x, txd); 1942 return NULL; 1943 } 1944 1945 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1946 } 1947 1948 static struct pl08x_txd *pl08x_init_txd( 1949 struct dma_chan *chan, 1950 enum dma_transfer_direction direction, 1951 dma_addr_t *slave_addr) 1952 { 1953 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1954 struct pl08x_driver_data *pl08x = plchan->host; 1955 struct pl08x_txd *txd; 1956 enum dma_slave_buswidth addr_width; 1957 int ret, tmp; 1958 u8 src_buses, dst_buses; 1959 u32 maxburst, cctl; 1960 1961 txd = pl08x_get_txd(plchan); 1962 if (!txd) { 1963 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1964 return NULL; 1965 } 1966 1967 /* 1968 * Set up addresses, the PrimeCell configured address 1969 * will take precedence since this may configure the 1970 * channel target address dynamically at runtime. 1971 */ 1972 if (direction == DMA_MEM_TO_DEV) { 1973 cctl = PL080_CONTROL_SRC_INCR; 1974 *slave_addr = plchan->cfg.dst_addr; 1975 addr_width = plchan->cfg.dst_addr_width; 1976 maxburst = plchan->cfg.dst_maxburst; 1977 src_buses = pl08x->mem_buses; 1978 dst_buses = plchan->cd->periph_buses; 1979 } else if (direction == DMA_DEV_TO_MEM) { 1980 cctl = PL080_CONTROL_DST_INCR; 1981 *slave_addr = plchan->cfg.src_addr; 1982 addr_width = plchan->cfg.src_addr_width; 1983 maxburst = plchan->cfg.src_maxburst; 1984 src_buses = plchan->cd->periph_buses; 1985 dst_buses = pl08x->mem_buses; 1986 } else { 1987 pl08x_free_txd(pl08x, txd); 1988 dev_err(&pl08x->adev->dev, 1989 "%s direction unsupported\n", __func__); 1990 return NULL; 1991 } 1992 1993 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1994 if (cctl == ~0) { 1995 pl08x_free_txd(pl08x, txd); 1996 dev_err(&pl08x->adev->dev, 1997 "DMA slave configuration botched?\n"); 1998 return NULL; 1999 } 2000 2001 txd->cctl = cctl | pl08x_select_bus(false, src_buses, dst_buses); 2002 2003 if (plchan->cfg.device_fc) 2004 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 2005 PL080_FLOW_PER2MEM_PER; 2006 else 2007 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 2008 PL080_FLOW_PER2MEM; 2009 2010 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 2011 PL080_CONFIG_TC_IRQ_MASK | 2012 tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 2013 2014 ret = pl08x_request_mux(plchan); 2015 if (ret < 0) { 2016 pl08x_free_txd(pl08x, txd); 2017 dev_dbg(&pl08x->adev->dev, 2018 "unable to mux for transfer on %s due to platform restrictions\n", 2019 plchan->name); 2020 return NULL; 2021 } 2022 2023 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 2024 plchan->signal, plchan->name); 2025 2026 /* Assign the flow control signal to this channel */ 2027 if (direction == DMA_MEM_TO_DEV) 2028 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 2029 else 2030 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 2031 2032 return txd; 2033 } 2034 2035 static int pl08x_tx_add_sg(struct pl08x_txd *txd, 2036 enum dma_transfer_direction direction, 2037 dma_addr_t slave_addr, 2038 dma_addr_t buf_addr, 2039 unsigned int len) 2040 { 2041 struct pl08x_sg *dsg; 2042 2043 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 2044 if (!dsg) 2045 return -ENOMEM; 2046 2047 list_add_tail(&dsg->node, &txd->dsg_list); 2048 2049 dsg->len = len; 2050 if (direction == DMA_MEM_TO_DEV) { 2051 dsg->src_addr = buf_addr; 2052 dsg->dst_addr = slave_addr; 2053 } else { 2054 dsg->src_addr = slave_addr; 2055 dsg->dst_addr = buf_addr; 2056 } 2057 2058 return 0; 2059 } 2060 2061 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 2062 struct dma_chan *chan, struct scatterlist *sgl, 2063 unsigned int sg_len, enum dma_transfer_direction direction, 2064 unsigned long flags, void *context) 2065 { 2066 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2067 struct pl08x_driver_data *pl08x = plchan->host; 2068 struct pl08x_txd *txd; 2069 struct scatterlist *sg; 2070 int ret, tmp; 2071 dma_addr_t slave_addr; 2072 2073 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 2074 __func__, sg_dma_len(sgl), plchan->name); 2075 2076 txd = pl08x_init_txd(chan, direction, &slave_addr); 2077 if (!txd) 2078 return NULL; 2079 2080 for_each_sg(sgl, sg, sg_len, tmp) { 2081 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 2082 sg_dma_address(sg), 2083 sg_dma_len(sg)); 2084 if (ret) { 2085 pl08x_release_mux(plchan); 2086 pl08x_free_txd(pl08x, txd); 2087 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 2088 __func__); 2089 return NULL; 2090 } 2091 } 2092 2093 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 2094 if (!ret) { 2095 pl08x_release_mux(plchan); 2096 pl08x_free_txd(pl08x, txd); 2097 return NULL; 2098 } 2099 2100 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 2101 } 2102 2103 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( 2104 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 2105 size_t period_len, enum dma_transfer_direction direction, 2106 unsigned long flags) 2107 { 2108 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2109 struct pl08x_driver_data *pl08x = plchan->host; 2110 struct pl08x_txd *txd; 2111 int ret, tmp; 2112 dma_addr_t slave_addr; 2113 2114 dev_dbg(&pl08x->adev->dev, 2115 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", 2116 __func__, period_len, buf_len, 2117 direction == DMA_MEM_TO_DEV ? "to" : "from", 2118 plchan->name); 2119 2120 txd = pl08x_init_txd(chan, direction, &slave_addr); 2121 if (!txd) 2122 return NULL; 2123 2124 txd->cyclic = true; 2125 txd->cctl |= PL080_CONTROL_TC_IRQ_EN; 2126 for (tmp = 0; tmp < buf_len; tmp += period_len) { 2127 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 2128 buf_addr + tmp, period_len); 2129 if (ret) { 2130 pl08x_release_mux(plchan); 2131 pl08x_free_txd(pl08x, txd); 2132 return NULL; 2133 } 2134 } 2135 2136 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 2137 if (!ret) { 2138 pl08x_release_mux(plchan); 2139 pl08x_free_txd(pl08x, txd); 2140 return NULL; 2141 } 2142 2143 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 2144 } 2145 2146 static int pl08x_config(struct dma_chan *chan, 2147 struct dma_slave_config *config) 2148 { 2149 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2150 struct pl08x_driver_data *pl08x = plchan->host; 2151 2152 if (!plchan->slave) 2153 return -EINVAL; 2154 2155 /* Reject definitely invalid configurations */ 2156 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 2157 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 2158 return -EINVAL; 2159 2160 if (config->device_fc && pl08x->vd->pl080s) { 2161 dev_err(&pl08x->adev->dev, 2162 "%s: PL080S does not support peripheral flow control\n", 2163 __func__); 2164 return -EINVAL; 2165 } 2166 2167 plchan->cfg = *config; 2168 2169 return 0; 2170 } 2171 2172 static int pl08x_terminate_all(struct dma_chan *chan) 2173 { 2174 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2175 struct pl08x_driver_data *pl08x = plchan->host; 2176 unsigned long flags; 2177 2178 spin_lock_irqsave(&plchan->vc.lock, flags); 2179 if (!plchan->phychan && !plchan->at) { 2180 spin_unlock_irqrestore(&plchan->vc.lock, flags); 2181 return 0; 2182 } 2183 2184 plchan->state = PL08X_CHAN_IDLE; 2185 2186 if (plchan->phychan) { 2187 /* 2188 * Mark physical channel as free and free any slave 2189 * signal 2190 */ 2191 pl08x_phy_free(plchan); 2192 } 2193 /* Dequeue jobs and free LLIs */ 2194 if (plchan->at) { 2195 vchan_terminate_vdesc(&plchan->at->vd); 2196 plchan->at = NULL; 2197 } 2198 /* Dequeue jobs not yet fired as well */ 2199 pl08x_free_txd_list(pl08x, plchan); 2200 2201 spin_unlock_irqrestore(&plchan->vc.lock, flags); 2202 2203 return 0; 2204 } 2205 2206 static void pl08x_synchronize(struct dma_chan *chan) 2207 { 2208 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2209 2210 vchan_synchronize(&plchan->vc); 2211 } 2212 2213 static int pl08x_pause(struct dma_chan *chan) 2214 { 2215 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2216 unsigned long flags; 2217 2218 /* 2219 * Anything succeeds on channels with no physical allocation and 2220 * no queued transfers. 2221 */ 2222 spin_lock_irqsave(&plchan->vc.lock, flags); 2223 if (!plchan->phychan && !plchan->at) { 2224 spin_unlock_irqrestore(&plchan->vc.lock, flags); 2225 return 0; 2226 } 2227 2228 pl08x_pause_phy_chan(plchan->phychan); 2229 plchan->state = PL08X_CHAN_PAUSED; 2230 2231 spin_unlock_irqrestore(&plchan->vc.lock, flags); 2232 2233 return 0; 2234 } 2235 2236 static int pl08x_resume(struct dma_chan *chan) 2237 { 2238 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2239 unsigned long flags; 2240 2241 /* 2242 * Anything succeeds on channels with no physical allocation and 2243 * no queued transfers. 2244 */ 2245 spin_lock_irqsave(&plchan->vc.lock, flags); 2246 if (!plchan->phychan && !plchan->at) { 2247 spin_unlock_irqrestore(&plchan->vc.lock, flags); 2248 return 0; 2249 } 2250 2251 pl08x_resume_phy_chan(plchan->phychan); 2252 plchan->state = PL08X_CHAN_RUNNING; 2253 2254 spin_unlock_irqrestore(&plchan->vc.lock, flags); 2255 2256 return 0; 2257 } 2258 2259 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 2260 { 2261 struct pl08x_dma_chan *plchan; 2262 char *name = chan_id; 2263 2264 /* Reject channels for devices not bound to this driver */ 2265 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 2266 return false; 2267 2268 plchan = to_pl08x_chan(chan); 2269 2270 /* Check that the channel is not taken! */ 2271 if (!strcmp(plchan->name, name)) 2272 return true; 2273 2274 return false; 2275 } 2276 EXPORT_SYMBOL_GPL(pl08x_filter_id); 2277 2278 static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id) 2279 { 2280 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 2281 2282 return plchan->cd == chan_id; 2283 } 2284 2285 /* 2286 * Just check that the device is there and active 2287 * TODO: turn this bit on/off depending on the number of physical channels 2288 * actually used, if it is zero... well shut it off. That will save some 2289 * power. Cut the clock at the same time. 2290 */ 2291 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 2292 { 2293 /* The Nomadik variant does not have the config register */ 2294 if (pl08x->vd->nomadik) 2295 return; 2296 /* The FTDMAC020 variant does this in another register */ 2297 if (pl08x->vd->ftdmac020) { 2298 writel(PL080_CONFIG_ENABLE, pl08x->base + FTDMAC020_CSR); 2299 return; 2300 } 2301 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 2302 } 2303 2304 static irqreturn_t pl08x_irq(int irq, void *dev) 2305 { 2306 struct pl08x_driver_data *pl08x = dev; 2307 u32 mask = 0, err, tc, i; 2308 2309 /* check & clear - ERR & TC interrupts */ 2310 err = readl(pl08x->base + PL080_ERR_STATUS); 2311 if (err) { 2312 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 2313 __func__, err); 2314 writel(err, pl08x->base + PL080_ERR_CLEAR); 2315 } 2316 tc = readl(pl08x->base + PL080_TC_STATUS); 2317 if (tc) 2318 writel(tc, pl08x->base + PL080_TC_CLEAR); 2319 2320 if (!err && !tc) 2321 return IRQ_NONE; 2322 2323 for (i = 0; i < pl08x->vd->channels; i++) { 2324 if ((BIT(i) & err) || (BIT(i) & tc)) { 2325 /* Locate physical channel */ 2326 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 2327 struct pl08x_dma_chan *plchan = phychan->serving; 2328 struct pl08x_txd *tx; 2329 2330 if (!plchan) { 2331 dev_err(&pl08x->adev->dev, 2332 "%s Error TC interrupt on unused channel: 0x%08x\n", 2333 __func__, i); 2334 continue; 2335 } 2336 2337 spin_lock(&plchan->vc.lock); 2338 tx = plchan->at; 2339 if (tx && tx->cyclic) { 2340 vchan_cyclic_callback(&tx->vd); 2341 } else if (tx) { 2342 plchan->at = NULL; 2343 /* 2344 * This descriptor is done, release its mux 2345 * reservation. 2346 */ 2347 pl08x_release_mux(plchan); 2348 tx->done = true; 2349 vchan_cookie_complete(&tx->vd); 2350 2351 /* 2352 * And start the next descriptor (if any), 2353 * otherwise free this channel. 2354 */ 2355 if (vchan_next_desc(&plchan->vc)) 2356 pl08x_start_next_txd(plchan); 2357 else 2358 pl08x_phy_free(plchan); 2359 } 2360 spin_unlock(&plchan->vc.lock); 2361 2362 mask |= BIT(i); 2363 } 2364 } 2365 2366 return mask ? IRQ_HANDLED : IRQ_NONE; 2367 } 2368 2369 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 2370 { 2371 chan->slave = true; 2372 chan->name = chan->cd->bus_id; 2373 chan->cfg.src_addr = chan->cd->addr; 2374 chan->cfg.dst_addr = chan->cd->addr; 2375 } 2376 2377 /* 2378 * Initialise the DMAC memcpy/slave channels. 2379 * Make a local wrapper to hold required data 2380 */ 2381 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 2382 struct dma_device *dmadev, unsigned int channels, bool slave) 2383 { 2384 struct pl08x_dma_chan *chan; 2385 int i; 2386 2387 INIT_LIST_HEAD(&dmadev->channels); 2388 2389 /* 2390 * Register as many many memcpy as we have physical channels, 2391 * we won't always be able to use all but the code will have 2392 * to cope with that situation. 2393 */ 2394 for (i = 0; i < channels; i++) { 2395 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 2396 if (!chan) 2397 return -ENOMEM; 2398 2399 chan->host = pl08x; 2400 chan->state = PL08X_CHAN_IDLE; 2401 chan->signal = -1; 2402 2403 if (slave) { 2404 chan->cd = &pl08x->pd->slave_channels[i]; 2405 /* 2406 * Some implementations have muxed signals, whereas some 2407 * use a mux in front of the signals and need dynamic 2408 * assignment of signals. 2409 */ 2410 chan->signal = i; 2411 pl08x_dma_slave_init(chan); 2412 } else { 2413 chan->cd = kzalloc(sizeof(*chan->cd), GFP_KERNEL); 2414 if (!chan->cd) { 2415 kfree(chan); 2416 return -ENOMEM; 2417 } 2418 chan->cd->bus_id = "memcpy"; 2419 chan->cd->periph_buses = pl08x->pd->mem_buses; 2420 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 2421 if (!chan->name) { 2422 kfree(chan->cd); 2423 kfree(chan); 2424 return -ENOMEM; 2425 } 2426 } 2427 dev_dbg(&pl08x->adev->dev, 2428 "initialize virtual channel \"%s\"\n", 2429 chan->name); 2430 2431 chan->vc.desc_free = pl08x_desc_free; 2432 vchan_init(&chan->vc, dmadev); 2433 } 2434 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 2435 i, slave ? "slave" : "memcpy"); 2436 return i; 2437 } 2438 2439 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 2440 { 2441 struct pl08x_dma_chan *chan = NULL; 2442 struct pl08x_dma_chan *next; 2443 2444 list_for_each_entry_safe(chan, 2445 next, &dmadev->channels, vc.chan.device_node) { 2446 list_del(&chan->vc.chan.device_node); 2447 kfree(chan); 2448 } 2449 } 2450 2451 #ifdef CONFIG_DEBUG_FS 2452 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 2453 { 2454 switch (state) { 2455 case PL08X_CHAN_IDLE: 2456 return "idle"; 2457 case PL08X_CHAN_RUNNING: 2458 return "running"; 2459 case PL08X_CHAN_PAUSED: 2460 return "paused"; 2461 case PL08X_CHAN_WAITING: 2462 return "waiting"; 2463 default: 2464 break; 2465 } 2466 return "UNKNOWN STATE"; 2467 } 2468 2469 static int pl08x_debugfs_show(struct seq_file *s, void *data) 2470 { 2471 struct pl08x_driver_data *pl08x = s->private; 2472 struct pl08x_dma_chan *chan; 2473 struct pl08x_phy_chan *ch; 2474 unsigned long flags; 2475 int i; 2476 2477 seq_printf(s, "PL08x physical channels:\n"); 2478 seq_printf(s, "CHANNEL:\tUSER:\n"); 2479 seq_printf(s, "--------\t-----\n"); 2480 for (i = 0; i < pl08x->vd->channels; i++) { 2481 struct pl08x_dma_chan *virt_chan; 2482 2483 ch = &pl08x->phy_chans[i]; 2484 2485 spin_lock_irqsave(&ch->lock, flags); 2486 virt_chan = ch->serving; 2487 2488 seq_printf(s, "%d\t\t%s%s\n", 2489 ch->id, 2490 virt_chan ? virt_chan->name : "(none)", 2491 ch->locked ? " LOCKED" : ""); 2492 2493 spin_unlock_irqrestore(&ch->lock, flags); 2494 } 2495 2496 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 2497 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2498 seq_printf(s, "--------\t------\n"); 2499 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 2500 seq_printf(s, "%s\t\t%s\n", chan->name, 2501 pl08x_state_str(chan->state)); 2502 } 2503 2504 if (pl08x->has_slave) { 2505 seq_printf(s, "\nPL08x virtual slave channels:\n"); 2506 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2507 seq_printf(s, "--------\t------\n"); 2508 list_for_each_entry(chan, &pl08x->slave.channels, 2509 vc.chan.device_node) { 2510 seq_printf(s, "%s\t\t%s\n", chan->name, 2511 pl08x_state_str(chan->state)); 2512 } 2513 } 2514 2515 return 0; 2516 } 2517 2518 DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs); 2519 2520 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2521 { 2522 /* Expose a simple debugfs interface to view all clocks */ 2523 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2524 S_IFREG | S_IRUGO, NULL, pl08x, 2525 &pl08x_debugfs_fops); 2526 } 2527 2528 #else 2529 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2530 { 2531 } 2532 #endif 2533 2534 #ifdef CONFIG_OF 2535 static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, 2536 u32 id) 2537 { 2538 struct pl08x_dma_chan *chan; 2539 2540 /* Trying to get a slave channel from something with no slave support */ 2541 if (!pl08x->has_slave) 2542 return NULL; 2543 2544 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2545 if (chan->signal == id) 2546 return &chan->vc.chan; 2547 } 2548 2549 return NULL; 2550 } 2551 2552 static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, 2553 struct of_dma *ofdma) 2554 { 2555 struct pl08x_driver_data *pl08x = ofdma->of_dma_data; 2556 struct dma_chan *dma_chan; 2557 struct pl08x_dma_chan *plchan; 2558 2559 if (!pl08x) 2560 return NULL; 2561 2562 if (dma_spec->args_count != 2) { 2563 dev_err(&pl08x->adev->dev, 2564 "DMA channel translation requires two cells\n"); 2565 return NULL; 2566 } 2567 2568 dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); 2569 if (!dma_chan) { 2570 dev_err(&pl08x->adev->dev, 2571 "DMA slave channel not found\n"); 2572 return NULL; 2573 } 2574 2575 plchan = to_pl08x_chan(dma_chan); 2576 dev_dbg(&pl08x->adev->dev, 2577 "translated channel for signal %d\n", 2578 dma_spec->args[0]); 2579 2580 /* Augment channel data for applicable AHB buses */ 2581 plchan->cd->periph_buses = dma_spec->args[1]; 2582 return dma_get_slave_channel(dma_chan); 2583 } 2584 2585 static int pl08x_of_probe(struct amba_device *adev, 2586 struct pl08x_driver_data *pl08x, 2587 struct device_node *np) 2588 { 2589 struct pl08x_platform_data *pd; 2590 struct pl08x_channel_data *chanp = NULL; 2591 u32 val; 2592 int ret; 2593 int i; 2594 2595 pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); 2596 if (!pd) 2597 return -ENOMEM; 2598 2599 /* Eligible bus masters for fetching LLIs */ 2600 if (of_property_read_bool(np, "lli-bus-interface-ahb1")) 2601 pd->lli_buses |= PL08X_AHB1; 2602 if (of_property_read_bool(np, "lli-bus-interface-ahb2")) 2603 pd->lli_buses |= PL08X_AHB2; 2604 if (!pd->lli_buses) { 2605 dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n"); 2606 pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2; 2607 } 2608 2609 /* Eligible bus masters for memory access */ 2610 if (of_property_read_bool(np, "mem-bus-interface-ahb1")) 2611 pd->mem_buses |= PL08X_AHB1; 2612 if (of_property_read_bool(np, "mem-bus-interface-ahb2")) 2613 pd->mem_buses |= PL08X_AHB2; 2614 if (!pd->mem_buses) { 2615 dev_info(&adev->dev, "no bus masters for memory stated, assume all\n"); 2616 pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2; 2617 } 2618 2619 /* Parse the memcpy channel properties */ 2620 ret = of_property_read_u32(np, "memcpy-burst-size", &val); 2621 if (ret) { 2622 dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n"); 2623 val = 1; 2624 } 2625 switch (val) { 2626 default: 2627 dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); 2628 /* Fall through */ 2629 case 1: 2630 pd->memcpy_burst_size = PL08X_BURST_SZ_1; 2631 break; 2632 case 4: 2633 pd->memcpy_burst_size = PL08X_BURST_SZ_4; 2634 break; 2635 case 8: 2636 pd->memcpy_burst_size = PL08X_BURST_SZ_8; 2637 break; 2638 case 16: 2639 pd->memcpy_burst_size = PL08X_BURST_SZ_16; 2640 break; 2641 case 32: 2642 pd->memcpy_burst_size = PL08X_BURST_SZ_32; 2643 break; 2644 case 64: 2645 pd->memcpy_burst_size = PL08X_BURST_SZ_64; 2646 break; 2647 case 128: 2648 pd->memcpy_burst_size = PL08X_BURST_SZ_128; 2649 break; 2650 case 256: 2651 pd->memcpy_burst_size = PL08X_BURST_SZ_256; 2652 break; 2653 } 2654 2655 ret = of_property_read_u32(np, "memcpy-bus-width", &val); 2656 if (ret) { 2657 dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n"); 2658 val = 8; 2659 } 2660 switch (val) { 2661 default: 2662 dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); 2663 /* Fall through */ 2664 case 8: 2665 pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS; 2666 break; 2667 case 16: 2668 pd->memcpy_bus_width = PL08X_BUS_WIDTH_16_BITS; 2669 break; 2670 case 32: 2671 pd->memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS; 2672 break; 2673 } 2674 2675 /* 2676 * Allocate channel data for all possible slave channels (one 2677 * for each possible signal), channels will then be allocated 2678 * for a device and have it's AHB interfaces set up at 2679 * translation time. 2680 */ 2681 if (pl08x->vd->signals) { 2682 chanp = devm_kcalloc(&adev->dev, 2683 pl08x->vd->signals, 2684 sizeof(struct pl08x_channel_data), 2685 GFP_KERNEL); 2686 if (!chanp) 2687 return -ENOMEM; 2688 2689 pd->slave_channels = chanp; 2690 for (i = 0; i < pl08x->vd->signals; i++) { 2691 /* 2692 * chanp->periph_buses will be assigned at translation 2693 */ 2694 chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); 2695 chanp++; 2696 } 2697 pd->num_slave_channels = pl08x->vd->signals; 2698 } 2699 2700 pl08x->pd = pd; 2701 2702 return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, 2703 pl08x); 2704 } 2705 #else 2706 static inline int pl08x_of_probe(struct amba_device *adev, 2707 struct pl08x_driver_data *pl08x, 2708 struct device_node *np) 2709 { 2710 return -EINVAL; 2711 } 2712 #endif 2713 2714 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 2715 { 2716 struct pl08x_driver_data *pl08x; 2717 struct vendor_data *vd = id->data; 2718 struct device_node *np = adev->dev.of_node; 2719 u32 tsfr_size; 2720 int ret = 0; 2721 int i; 2722 2723 ret = amba_request_regions(adev, NULL); 2724 if (ret) 2725 return ret; 2726 2727 /* Ensure that we can do DMA */ 2728 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); 2729 if (ret) 2730 goto out_no_pl08x; 2731 2732 /* Create the driver state holder */ 2733 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2734 if (!pl08x) { 2735 ret = -ENOMEM; 2736 goto out_no_pl08x; 2737 } 2738 2739 /* Assign useful pointers to the driver state */ 2740 pl08x->adev = adev; 2741 pl08x->vd = vd; 2742 2743 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2744 if (!pl08x->base) { 2745 ret = -ENOMEM; 2746 goto out_no_ioremap; 2747 } 2748 2749 if (vd->ftdmac020) { 2750 u32 val; 2751 2752 val = readl(pl08x->base + FTDMAC020_REVISION); 2753 dev_info(&pl08x->adev->dev, "FTDMAC020 %d.%d rel %d\n", 2754 (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff); 2755 val = readl(pl08x->base + FTDMAC020_FEATURE); 2756 dev_info(&pl08x->adev->dev, "FTDMAC020 %d channels, " 2757 "%s built-in bridge, %s, %s linked lists\n", 2758 (val >> 12) & 0x0f, 2759 (val & BIT(10)) ? "no" : "has", 2760 (val & BIT(9)) ? "AHB0 and AHB1" : "AHB0", 2761 (val & BIT(8)) ? "supports" : "does not support"); 2762 2763 /* Vendor data from feature register */ 2764 if (!(val & BIT(8))) 2765 dev_warn(&pl08x->adev->dev, 2766 "linked lists not supported, required\n"); 2767 vd->channels = (val >> 12) & 0x0f; 2768 vd->dualmaster = !!(val & BIT(9)); 2769 } 2770 2771 /* Initialize memcpy engine */ 2772 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 2773 pl08x->memcpy.dev = &adev->dev; 2774 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2775 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2776 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2777 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2778 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2779 pl08x->memcpy.device_config = pl08x_config; 2780 pl08x->memcpy.device_pause = pl08x_pause; 2781 pl08x->memcpy.device_resume = pl08x_resume; 2782 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2783 pl08x->memcpy.device_synchronize = pl08x_synchronize; 2784 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2785 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2786 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); 2787 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2788 if (vd->ftdmac020) 2789 pl08x->memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES; 2790 2791 2792 /* 2793 * Initialize slave engine, if the block has no signals, that means 2794 * we have no slave support. 2795 */ 2796 if (vd->signals) { 2797 pl08x->has_slave = true; 2798 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2799 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); 2800 pl08x->slave.dev = &adev->dev; 2801 pl08x->slave.device_free_chan_resources = 2802 pl08x_free_chan_resources; 2803 pl08x->slave.device_prep_dma_interrupt = 2804 pl08x_prep_dma_interrupt; 2805 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2806 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2807 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2808 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2809 pl08x->slave.device_config = pl08x_config; 2810 pl08x->slave.device_pause = pl08x_pause; 2811 pl08x->slave.device_resume = pl08x_resume; 2812 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2813 pl08x->slave.device_synchronize = pl08x_synchronize; 2814 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2815 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2816 pl08x->slave.directions = 2817 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2818 pl08x->slave.residue_granularity = 2819 DMA_RESIDUE_GRANULARITY_SEGMENT; 2820 } 2821 2822 /* Get the platform data */ 2823 pl08x->pd = dev_get_platdata(&adev->dev); 2824 if (!pl08x->pd) { 2825 if (np) { 2826 ret = pl08x_of_probe(adev, pl08x, np); 2827 if (ret) 2828 goto out_no_platdata; 2829 } else { 2830 dev_err(&adev->dev, "no platform data supplied\n"); 2831 ret = -EINVAL; 2832 goto out_no_platdata; 2833 } 2834 } else { 2835 pl08x->slave.filter.map = pl08x->pd->slave_map; 2836 pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len; 2837 pl08x->slave.filter.fn = pl08x_filter_fn; 2838 } 2839 2840 /* By default, AHB1 only. If dualmaster, from platform */ 2841 pl08x->lli_buses = PL08X_AHB1; 2842 pl08x->mem_buses = PL08X_AHB1; 2843 if (pl08x->vd->dualmaster) { 2844 pl08x->lli_buses = pl08x->pd->lli_buses; 2845 pl08x->mem_buses = pl08x->pd->mem_buses; 2846 } 2847 2848 if (vd->pl080s) 2849 pl08x->lli_words = PL080S_LLI_WORDS; 2850 else 2851 pl08x->lli_words = PL080_LLI_WORDS; 2852 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); 2853 2854 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2855 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2856 tsfr_size, PL08X_ALIGN, 0); 2857 if (!pl08x->pool) { 2858 ret = -ENOMEM; 2859 goto out_no_lli_pool; 2860 } 2861 2862 /* Turn on the PL08x */ 2863 pl08x_ensure_on(pl08x); 2864 2865 /* Clear any pending interrupts */ 2866 if (vd->ftdmac020) 2867 /* This variant has error IRQs in bits 16-19 */ 2868 writel(0x0000FFFF, pl08x->base + PL080_ERR_CLEAR); 2869 else 2870 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2871 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2872 2873 /* Attach the interrupt handler */ 2874 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); 2875 if (ret) { 2876 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2877 __func__, adev->irq[0]); 2878 goto out_no_irq; 2879 } 2880 2881 /* Initialize physical channels */ 2882 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2883 GFP_KERNEL); 2884 if (!pl08x->phy_chans) { 2885 ret = -ENOMEM; 2886 goto out_no_phychans; 2887 } 2888 2889 for (i = 0; i < vd->channels; i++) { 2890 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2891 2892 ch->id = i; 2893 ch->base = pl08x->base + PL080_Cx_BASE(i); 2894 if (vd->ftdmac020) { 2895 /* FTDMA020 has a special channel busy register */ 2896 ch->reg_busy = ch->base + FTDMAC020_CH_BUSY; 2897 ch->reg_config = ch->base + FTDMAC020_CH_CFG; 2898 ch->reg_control = ch->base + FTDMAC020_CH_CSR; 2899 ch->reg_src = ch->base + FTDMAC020_CH_SRC_ADDR; 2900 ch->reg_dst = ch->base + FTDMAC020_CH_DST_ADDR; 2901 ch->reg_lli = ch->base + FTDMAC020_CH_LLP; 2902 ch->ftdmac020 = true; 2903 } else { 2904 ch->reg_config = ch->base + vd->config_offset; 2905 ch->reg_control = ch->base + PL080_CH_CONTROL; 2906 ch->reg_src = ch->base + PL080_CH_SRC_ADDR; 2907 ch->reg_dst = ch->base + PL080_CH_DST_ADDR; 2908 ch->reg_lli = ch->base + PL080_CH_LLI; 2909 } 2910 if (vd->pl080s) 2911 ch->pl080s = true; 2912 2913 spin_lock_init(&ch->lock); 2914 2915 /* 2916 * Nomadik variants can have channels that are locked 2917 * down for the secure world only. Lock up these channels 2918 * by perpetually serving a dummy virtual channel. 2919 */ 2920 if (vd->nomadik) { 2921 u32 val; 2922 2923 val = readl(ch->reg_config); 2924 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2925 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2926 ch->locked = true; 2927 } 2928 } 2929 2930 dev_dbg(&adev->dev, "physical channel %d is %s\n", 2931 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2932 } 2933 2934 /* Register as many memcpy channels as there are physical channels */ 2935 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2936 pl08x->vd->channels, false); 2937 if (ret <= 0) { 2938 dev_warn(&pl08x->adev->dev, 2939 "%s failed to enumerate memcpy channels - %d\n", 2940 __func__, ret); 2941 goto out_no_memcpy; 2942 } 2943 2944 /* Register slave channels */ 2945 if (pl08x->has_slave) { 2946 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2947 pl08x->pd->num_slave_channels, true); 2948 if (ret < 0) { 2949 dev_warn(&pl08x->adev->dev, 2950 "%s failed to enumerate slave channels - %d\n", 2951 __func__, ret); 2952 goto out_no_slave; 2953 } 2954 } 2955 2956 ret = dma_async_device_register(&pl08x->memcpy); 2957 if (ret) { 2958 dev_warn(&pl08x->adev->dev, 2959 "%s failed to register memcpy as an async device - %d\n", 2960 __func__, ret); 2961 goto out_no_memcpy_reg; 2962 } 2963 2964 if (pl08x->has_slave) { 2965 ret = dma_async_device_register(&pl08x->slave); 2966 if (ret) { 2967 dev_warn(&pl08x->adev->dev, 2968 "%s failed to register slave as an async device - %d\n", 2969 __func__, ret); 2970 goto out_no_slave_reg; 2971 } 2972 } 2973 2974 amba_set_drvdata(adev, pl08x); 2975 init_pl08x_debugfs(pl08x); 2976 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", 2977 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), 2978 (unsigned long long)adev->res.start, adev->irq[0]); 2979 2980 return 0; 2981 2982 out_no_slave_reg: 2983 dma_async_device_unregister(&pl08x->memcpy); 2984 out_no_memcpy_reg: 2985 if (pl08x->has_slave) 2986 pl08x_free_virtual_channels(&pl08x->slave); 2987 out_no_slave: 2988 pl08x_free_virtual_channels(&pl08x->memcpy); 2989 out_no_memcpy: 2990 kfree(pl08x->phy_chans); 2991 out_no_phychans: 2992 free_irq(adev->irq[0], pl08x); 2993 out_no_irq: 2994 dma_pool_destroy(pl08x->pool); 2995 out_no_lli_pool: 2996 out_no_platdata: 2997 iounmap(pl08x->base); 2998 out_no_ioremap: 2999 kfree(pl08x); 3000 out_no_pl08x: 3001 amba_release_regions(adev); 3002 return ret; 3003 } 3004 3005 /* PL080 has 8 channels and the PL080 have just 2 */ 3006 static struct vendor_data vendor_pl080 = { 3007 .config_offset = PL080_CH_CONFIG, 3008 .channels = 8, 3009 .signals = 16, 3010 .dualmaster = true, 3011 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 3012 }; 3013 3014 static struct vendor_data vendor_nomadik = { 3015 .config_offset = PL080_CH_CONFIG, 3016 .channels = 8, 3017 .signals = 32, 3018 .dualmaster = true, 3019 .nomadik = true, 3020 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 3021 }; 3022 3023 static struct vendor_data vendor_pl080s = { 3024 .config_offset = PL080S_CH_CONFIG, 3025 .channels = 8, 3026 .signals = 32, 3027 .pl080s = true, 3028 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, 3029 }; 3030 3031 static struct vendor_data vendor_pl081 = { 3032 .config_offset = PL080_CH_CONFIG, 3033 .channels = 2, 3034 .signals = 16, 3035 .dualmaster = false, 3036 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 3037 }; 3038 3039 static struct vendor_data vendor_ftdmac020 = { 3040 .config_offset = PL080_CH_CONFIG, 3041 .ftdmac020 = true, 3042 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 3043 }; 3044 3045 static const struct amba_id pl08x_ids[] = { 3046 /* Samsung PL080S variant */ 3047 { 3048 .id = 0x0a141080, 3049 .mask = 0xffffffff, 3050 .data = &vendor_pl080s, 3051 }, 3052 /* PL080 */ 3053 { 3054 .id = 0x00041080, 3055 .mask = 0x000fffff, 3056 .data = &vendor_pl080, 3057 }, 3058 /* PL081 */ 3059 { 3060 .id = 0x00041081, 3061 .mask = 0x000fffff, 3062 .data = &vendor_pl081, 3063 }, 3064 /* Nomadik 8815 PL080 variant */ 3065 { 3066 .id = 0x00280080, 3067 .mask = 0x00ffffff, 3068 .data = &vendor_nomadik, 3069 }, 3070 /* Faraday Technology FTDMAC020 */ 3071 { 3072 .id = 0x0003b080, 3073 .mask = 0x000fffff, 3074 .data = &vendor_ftdmac020, 3075 }, 3076 { 0, 0 }, 3077 }; 3078 3079 MODULE_DEVICE_TABLE(amba, pl08x_ids); 3080 3081 static struct amba_driver pl08x_amba_driver = { 3082 .drv.name = DRIVER_NAME, 3083 .id_table = pl08x_ids, 3084 .probe = pl08x_probe, 3085 }; 3086 3087 static int __init pl08x_init(void) 3088 { 3089 int retval; 3090 retval = amba_driver_register(&pl08x_amba_driver); 3091 if (retval) 3092 printk(KERN_WARNING DRIVER_NAME 3093 "failed to register as an AMBA device (%d)\n", 3094 retval); 3095 return retval; 3096 } 3097 subsys_initcall(pl08x_init); 3098