amba-pl08x.c (640414171818c6293c23e74a28d1c69b2a1a7fe5) amba-pl08x.c (ec5b103ecfde929004b691f29183255aeeadecd5)
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it

--- 10 unchanged lines hidden (view full) ---

19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * The full GNU General Public License is in this distribution in the file
23 * called COPYING.
24 *
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it

--- 10 unchanged lines hidden (view full) ---

19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * The full GNU General Public License is in this distribution in the file
23 * called COPYING.
24 *
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
27 * Documentation: S3C6410 User's Manual == PL080S
27 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * channel.
30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
36 *
37 * The PL080 has a dual bus master, PL081 has a single master.
38 *
28 *
29 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
30 * channel.
31 *
32 * The PL080 has 8 channels available for simultaneous use, and the PL081
33 * has only two channels. So on these DMA controllers the number of channels
34 * and the number of incoming DMA signals are two totally different things.
35 * It is usually not possible to theoretically handle all physical signals,
36 * so a multiplexing scheme with possible denial of use is necessary.
37 *
38 * The PL080 has a dual bus master, PL081 has a single master.
39 *
40 * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
41 * It differs in following aspects:
42 * - CH_CONFIG register at different offset,
43 * - separate CH_CONTROL2 register for transfer size,
44 * - bigger maximum transfer size,
45 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
46 * - no support for peripheral flow control.
47 *
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
41 * Until no data left
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
44 * Clear burst request
45 * Raise terminal count interrupt
46 *

--- 12 unchanged lines hidden (view full) ---

59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
62 * are ignored.
63 *
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
48 * Memory to peripheral transfer may be visualized as
49 * Get data from memory to DMAC
50 * Until no data left
51 * On burst request from peripheral
52 * Destination burst from DMAC to peripheral
53 * Clear burst request
54 * Raise terminal count interrupt
55 *

--- 12 unchanged lines hidden (view full) ---

68 * end of every LLI entry. Observed behaviour shows the DMAC listening
69 * to both the BREQ and SREQ signals (contrary to documented),
70 * transferring data if either is active. The LBREQ and LSREQ signals
71 * are ignored.
72 *
73 * - Peripheral flow control: the transfer size is ignored (and should be
74 * zero). The data is transferred from the current LLI entry, until
75 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
68 *
69 * Global TODO:
70 * - Break out common code from arch/arm/mach-s3c64xx and share
76 * will then move to the next LLI entry. Unsupported by PL080S.
71 */
72#include <linux/amba/bus.h>
73#include <linux/amba/pl08x.h>
74#include <linux/debugfs.h>
75#include <linux/delay.h>
76#include <linux/device.h>
77#include <linux/dmaengine.h>
78#include <linux/dmapool.h>

--- 16 unchanged lines hidden (view full) ---

95
96/**
97 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
98 * @channels: the number of channels available in this variant
99 * @dualmaster: whether this version supports dual AHB masters or not.
100 * @nomadik: whether the channels have Nomadik security extension bits
101 * that need to be checked for permission before use and some registers are
102 * missing
77 */
78#include <linux/amba/bus.h>
79#include <linux/amba/pl08x.h>
80#include <linux/debugfs.h>
81#include <linux/delay.h>
82#include <linux/device.h>
83#include <linux/dmaengine.h>
84#include <linux/dmapool.h>

--- 16 unchanged lines hidden (view full) ---

101
102/**
103 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
104 * @channels: the number of channels available in this variant
105 * @dualmaster: whether this version supports dual AHB masters or not.
106 * @nomadik: whether the channels have Nomadik security extension bits
107 * that need to be checked for permission before use and some registers are
108 * missing
109 * @pl080s: whether this version is a PL080S, which has separate register and
110 * LLI word for transfer size.
103 */
104struct vendor_data {
111 */
112struct vendor_data {
113 u8 config_offset;
105 u8 channels;
106 bool dualmaster;
107 bool nomadik;
114 u8 channels;
115 bool dualmaster;
116 bool nomadik;
117 bool pl080s;
118 u32 max_transfer_size;
108};
109
119};
120
110/*
111 * PL08X private data structures
112 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
113 * start & end do not - their bus bit info is in cctl. Also note that these
114 * are fixed 32-bit quantities.
115 */
116struct pl08x_lli {
117 u32 src;
118 u32 dst;
119 u32 lli;
120 u32 cctl;
121};
122
123/**
124 * struct pl08x_bus_data - information of source or destination
125 * busses for a transfer
126 * @addr: current address
127 * @maxwidth: the maximum width of a transfer on this bus
128 * @buswidth: the width of this bus in bytes: 1, 2 or 4
129 */
130struct pl08x_bus_data {

--- 11 unchanged lines hidden (view full) ---

142 * @serving: the virtual channel currently being served by this physical
143 * channel
144 * @locked: channel unavailable for the system, e.g. dedicated to secure
145 * world
146 */
147struct pl08x_phy_chan {
148 unsigned int id;
149 void __iomem *base;
121/**
122 * struct pl08x_bus_data - information of source or destination
123 * busses for a transfer
124 * @addr: current address
125 * @maxwidth: the maximum width of a transfer on this bus
126 * @buswidth: the width of this bus in bytes: 1, 2 or 4
127 */
128struct pl08x_bus_data {

--- 11 unchanged lines hidden (view full) ---

140 * @serving: the virtual channel currently being served by this physical
141 * channel
142 * @locked: channel unavailable for the system, e.g. dedicated to secure
143 * world
144 */
145struct pl08x_phy_chan {
146 unsigned int id;
147 void __iomem *base;
148 void __iomem *reg_config;
150 spinlock_t lock;
151 struct pl08x_dma_chan *serving;
152 bool locked;
153};
154
155/**
156 * struct pl08x_sg - structure containing data per sg
157 * @src_addr: src address of sg

--- 13 unchanged lines hidden (view full) ---

171 * @vd: virtual DMA descriptor
172 * @dsg_list: list of children sg's
173 * @llis_bus: DMA memory address (physical) start for the LLIs
174 * @llis_va: virtual memory address start for the LLIs
175 * @cctl: control reg values for current txd
176 * @ccfg: config reg values for current txd
177 * @done: this marks completed descriptors, which should not have their
178 * mux released.
149 spinlock_t lock;
150 struct pl08x_dma_chan *serving;
151 bool locked;
152};
153
154/**
155 * struct pl08x_sg - structure containing data per sg
156 * @src_addr: src address of sg

--- 13 unchanged lines hidden (view full) ---

170 * @vd: virtual DMA descriptor
171 * @dsg_list: list of children sg's
172 * @llis_bus: DMA memory address (physical) start for the LLIs
173 * @llis_va: virtual memory address start for the LLIs
174 * @cctl: control reg values for current txd
175 * @ccfg: config reg values for current txd
176 * @done: this marks completed descriptors, which should not have their
177 * mux released.
178 * @cyclic: indicate cyclic transfers
179 */
180struct pl08x_txd {
181 struct virt_dma_desc vd;
182 struct list_head dsg_list;
183 dma_addr_t llis_bus;
179 */
180struct pl08x_txd {
181 struct virt_dma_desc vd;
182 struct list_head dsg_list;
183 dma_addr_t llis_bus;
184 struct pl08x_lli *llis_va;
184 u32 *llis_va;
185 /* Default cctl value for LLIs */
186 u32 cctl;
187 /*
188 * Settings to be put into the physical channel when we
189 * trigger this txd. Other registers are in llis_va[0].
190 */
191 u32 ccfg;
192 bool done;
185 /* Default cctl value for LLIs */
186 u32 cctl;
187 /*
188 * Settings to be put into the physical channel when we
189 * trigger this txd. Other registers are in llis_va[0].
190 */
191 u32 ccfg;
192 bool done;
193 bool cyclic;
193};
194
195/**
196 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
197 * states
198 * @PL08X_CHAN_IDLE: the channel is idle
199 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
200 * channel and is running a transfer on it

--- 59 unchanged lines hidden (view full) ---

260 void __iomem *base;
261 struct amba_device *adev;
262 const struct vendor_data *vd;
263 struct pl08x_platform_data *pd;
264 struct pl08x_phy_chan *phy_chans;
265 struct dma_pool *pool;
266 u8 lli_buses;
267 u8 mem_buses;
194};
195
196/**
197 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
198 * states
199 * @PL08X_CHAN_IDLE: the channel is idle
200 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
201 * channel and is running a transfer on it

--- 59 unchanged lines hidden (view full) ---

261 void __iomem *base;
262 struct amba_device *adev;
263 const struct vendor_data *vd;
264 struct pl08x_platform_data *pd;
265 struct pl08x_phy_chan *phy_chans;
266 struct dma_pool *pool;
267 u8 lli_buses;
268 u8 mem_buses;
269 u8 lli_words;
268};
269
270/*
271 * PL08X specific defines
272 */
273
270};
271
272/*
273 * PL08X specific defines
274 */
275
274/* Size (bytes) of each LLI buffer allocated for one transfer */
275# define PL08X_LLI_TSFR_SIZE 0x2000
276/* The order of words in an LLI. */
277#define PL080_LLI_SRC 0
278#define PL080_LLI_DST 1
279#define PL080_LLI_LLI 2
280#define PL080_LLI_CCTL 3
281#define PL080S_LLI_CCTL2 4
276
282
277/* Maximum times we call dma_pool_alloc on this pool without freeing */
278#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
283/* Total words in an LLI. */
284#define PL080_LLI_WORDS 4
285#define PL080S_LLI_WORDS 8
286
287/*
288 * Number of LLIs in each LLI buffer allocated for one transfer
289 * (maximum times we call dma_pool_alloc on this pool without freeing)
290 */
291#define MAX_NUM_TSFR_LLIS 512
279#define PL08X_ALIGN 8
280
281static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
282{
283 return container_of(chan, struct pl08x_dma_chan, vc.chan);
284}
285
286static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)

--- 44 unchanged lines hidden (view full) ---

331 * Physical channel handling
332 */
333
334/* Whether a certain channel is busy or not */
335static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
336{
337 unsigned int val;
338
292#define PL08X_ALIGN 8
293
294static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
295{
296 return container_of(chan, struct pl08x_dma_chan, vc.chan);
297}
298
299static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)

--- 44 unchanged lines hidden (view full) ---

344 * Physical channel handling
345 */
346
347/* Whether a certain channel is busy or not */
348static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
349{
350 unsigned int val;
351
339 val = readl(ch->base + PL080_CH_CONFIG);
352 val = readl(ch->reg_config);
340 return val & PL080_CONFIG_ACTIVE;
341}
342
353 return val & PL080_CONFIG_ACTIVE;
354}
355
356static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
357 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
358{
359 if (pl08x->vd->pl080s)
360 dev_vdbg(&pl08x->adev->dev,
361 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
362 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
363 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
364 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
365 lli[PL080S_LLI_CCTL2], ccfg);
366 else
367 dev_vdbg(&pl08x->adev->dev,
368 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
369 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
370 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
371 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
372
373 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
374 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
375 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
376 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
377
378 if (pl08x->vd->pl080s)
379 writel_relaxed(lli[PL080S_LLI_CCTL2],
380 phychan->base + PL080S_CH_CONTROL2);
381
382 writel(ccfg, phychan->reg_config);
383}
384
343/*
344 * Set the initial DMA register values i.e. those for the first LLI
345 * The next LLI pointer and the configuration interrupt bit have
346 * been set when the LLIs were constructed. Poke them into the hardware
347 * and start the transfer.
348 */
349static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
350{
351 struct pl08x_driver_data *pl08x = plchan->host;
352 struct pl08x_phy_chan *phychan = plchan->phychan;
353 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
354 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
385/*
386 * Set the initial DMA register values i.e. those for the first LLI
387 * The next LLI pointer and the configuration interrupt bit have
388 * been set when the LLIs were constructed. Poke them into the hardware
389 * and start the transfer.
390 */
391static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
392{
393 struct pl08x_driver_data *pl08x = plchan->host;
394 struct pl08x_phy_chan *phychan = plchan->phychan;
395 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
396 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
355 struct pl08x_lli *lli;
356 u32 val;
357
358 list_del(&txd->vd.node);
359
360 plchan->at = txd;
361
362 /* Wait for channel inactive */
363 while (pl08x_phy_channel_busy(phychan))
364 cpu_relax();
365
397 u32 val;
398
399 list_del(&txd->vd.node);
400
401 plchan->at = txd;
402
403 /* Wait for channel inactive */
404 while (pl08x_phy_channel_busy(phychan))
405 cpu_relax();
406
366 lli = &txd->llis_va[0];
407 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
367
408
368 dev_vdbg(&pl08x->adev->dev,
369 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
370 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
371 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
372 txd->ccfg);
373
374 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
375 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
376 writel(lli->lli, phychan->base + PL080_CH_LLI);
377 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
378 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
379
380 /* Enable the DMA channel */
381 /* Do not access config register until channel shows as disabled */
382 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
383 cpu_relax();
384
385 /* Do not access config register until channel shows as inactive */
409 /* Enable the DMA channel */
410 /* Do not access config register until channel shows as disabled */
411 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
412 cpu_relax();
413
414 /* Do not access config register until channel shows as inactive */
386 val = readl(phychan->base + PL080_CH_CONFIG);
415 val = readl(phychan->reg_config);
387 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
416 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
388 val = readl(phychan->base + PL080_CH_CONFIG);
417 val = readl(phychan->reg_config);
389
418
390 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
419 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
391}
392
393/*
394 * Pause the channel by setting the HALT bit.
395 *
396 * For M->P transfers, pause the DMAC first and then stop the peripheral -
397 * the FIFO can only drain if the peripheral is still requesting data.
398 * (note: this can still timeout if the DMAC FIFO never drains of data.)
399 *
400 * For P->M transfers, disable the peripheral first to stop it filling
401 * the DMAC FIFO, and then pause the DMAC.
402 */
403static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
404{
405 u32 val;
406 int timeout;
407
408 /* Set the HALT bit and wait for the FIFO to drain */
420}
421
422/*
423 * Pause the channel by setting the HALT bit.
424 *
425 * For M->P transfers, pause the DMAC first and then stop the peripheral -
426 * the FIFO can only drain if the peripheral is still requesting data.
427 * (note: this can still timeout if the DMAC FIFO never drains of data.)
428 *
429 * For P->M transfers, disable the peripheral first to stop it filling
430 * the DMAC FIFO, and then pause the DMAC.
431 */
432static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
433{
434 u32 val;
435 int timeout;
436
437 /* Set the HALT bit and wait for the FIFO to drain */
409 val = readl(ch->base + PL080_CH_CONFIG);
438 val = readl(ch->reg_config);
410 val |= PL080_CONFIG_HALT;
439 val |= PL080_CONFIG_HALT;
411 writel(val, ch->base + PL080_CH_CONFIG);
440 writel(val, ch->reg_config);
412
413 /* Wait for channel inactive */
414 for (timeout = 1000; timeout; timeout--) {
415 if (!pl08x_phy_channel_busy(ch))
416 break;
417 udelay(1);
418 }
419 if (pl08x_phy_channel_busy(ch))
420 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
421}
422
423static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
424{
425 u32 val;
426
427 /* Clear the HALT bit */
441
442 /* Wait for channel inactive */
443 for (timeout = 1000; timeout; timeout--) {
444 if (!pl08x_phy_channel_busy(ch))
445 break;
446 udelay(1);
447 }
448 if (pl08x_phy_channel_busy(ch))
449 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
450}
451
452static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
453{
454 u32 val;
455
456 /* Clear the HALT bit */
428 val = readl(ch->base + PL080_CH_CONFIG);
457 val = readl(ch->reg_config);
429 val &= ~PL080_CONFIG_HALT;
458 val &= ~PL080_CONFIG_HALT;
430 writel(val, ch->base + PL080_CH_CONFIG);
459 writel(val, ch->reg_config);
431}
432
433/*
434 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
435 * clears any pending interrupt status. This should not be used for
436 * an on-going transfer, but as a method of shutting down a channel
437 * (eg, when it's no longer used) or terminating a transfer.
438 */
439static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
440 struct pl08x_phy_chan *ch)
441{
460}
461
462/*
463 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
464 * clears any pending interrupt status. This should not be used for
465 * an on-going transfer, but as a method of shutting down a channel
466 * (eg, when it's no longer used) or terminating a transfer.
467 */
468static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
469 struct pl08x_phy_chan *ch)
470{
442 u32 val = readl(ch->base + PL080_CH_CONFIG);
471 u32 val = readl(ch->reg_config);
443
444 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
445 PL080_CONFIG_TC_IRQ_MASK);
446
472
473 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
474 PL080_CONFIG_TC_IRQ_MASK);
475
447 writel(val, ch->base + PL080_CH_CONFIG);
476 writel(val, ch->reg_config);
448
449 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
450 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
451}
452
453static inline u32 get_bytes_in_cctl(u32 cctl)
454{
455 /* The source width defines the number of bytes */
456 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
457
477
478 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
479 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
480}
481
482static inline u32 get_bytes_in_cctl(u32 cctl)
483{
484 /* The source width defines the number of bytes */
485 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
486
487 cctl &= PL080_CONTROL_SWIDTH_MASK;
488
458 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
459 case PL080_WIDTH_8BIT:
460 break;
461 case PL080_WIDTH_16BIT:
462 bytes *= 2;
463 break;
464 case PL080_WIDTH_32BIT:
465 bytes *= 4;
466 break;
467 }
468 return bytes;
469}
470
489 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
490 case PL080_WIDTH_8BIT:
491 break;
492 case PL080_WIDTH_16BIT:
493 bytes *= 2;
494 break;
495 case PL080_WIDTH_32BIT:
496 bytes *= 4;
497 break;
498 }
499 return bytes;
500}
501
502static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
503{
504 /* The source width defines the number of bytes */
505 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
506
507 cctl &= PL080_CONTROL_SWIDTH_MASK;
508
509 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
510 case PL080_WIDTH_8BIT:
511 break;
512 case PL080_WIDTH_16BIT:
513 bytes *= 2;
514 break;
515 case PL080_WIDTH_32BIT:
516 bytes *= 4;
517 break;
518 }
519 return bytes;
520}
521
471/* The channel should be paused when calling this */
472static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
473{
522/* The channel should be paused when calling this */
523static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
524{
525 struct pl08x_driver_data *pl08x = plchan->host;
526 const u32 *llis_va, *llis_va_limit;
474 struct pl08x_phy_chan *ch;
527 struct pl08x_phy_chan *ch;
528 dma_addr_t llis_bus;
475 struct pl08x_txd *txd;
529 struct pl08x_txd *txd;
476 size_t bytes = 0;
530 u32 llis_max_words;
531 size_t bytes;
532 u32 clli;
477
478 ch = plchan->phychan;
479 txd = plchan->at;
480
533
534 ch = plchan->phychan;
535 txd = plchan->at;
536
537 if (!ch || !txd)
538 return 0;
539
481 /*
482 * Follow the LLIs to get the number of remaining
483 * bytes in the currently active transaction.
484 */
540 /*
541 * Follow the LLIs to get the number of remaining
542 * bytes in the currently active transaction.
543 */
485 if (ch && txd) {
486 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
544 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
487
545
488 /* First get the remaining bytes in the active transfer */
546 /* First get the remaining bytes in the active transfer */
547 if (pl08x->vd->pl080s)
548 bytes = get_bytes_in_cctl_pl080s(
549 readl(ch->base + PL080_CH_CONTROL),
550 readl(ch->base + PL080S_CH_CONTROL2));
551 else
489 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
490
552 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
553
491 if (clli) {
492 struct pl08x_lli *llis_va = txd->llis_va;
493 dma_addr_t llis_bus = txd->llis_bus;
494 int index;
554 if (!clli)
555 return bytes;
495
556
496 BUG_ON(clli < llis_bus || clli >= llis_bus +
497 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
557 llis_va = txd->llis_va;
558 llis_bus = txd->llis_bus;
498
559
499 /*
500 * Locate the next LLI - as this is an array,
501 * it's simple maths to find.
502 */
503 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
560 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
561 BUG_ON(clli < llis_bus || clli >= llis_bus +
562 sizeof(u32) * llis_max_words);
504
563
505 for (; index < MAX_NUM_TSFR_LLIS; index++) {
506 bytes += get_bytes_in_cctl(llis_va[index].cctl);
564 /*
565 * Locate the next LLI - as this is an array,
566 * it's simple maths to find.
567 */
568 llis_va += (clli - llis_bus) / sizeof(u32);
507
569
508 /*
509 * A LLI pointer of 0 terminates the LLI list
510 */
511 if (!llis_va[index].lli)
512 break;
513 }
514 }
570 llis_va_limit = llis_va + llis_max_words;
571
572 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
573 if (pl08x->vd->pl080s)
574 bytes += get_bytes_in_cctl_pl080s(
575 llis_va[PL080_LLI_CCTL],
576 llis_va[PL080S_LLI_CCTL2]);
577 else
578 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
579
580 /*
581 * A LLI pointer going backward terminates the LLI list
582 */
583 if (llis_va[PL080_LLI_LLI] <= clli)
584 break;
515 }
516
517 return bytes;
518}
519
520/*
521 * Allocate a physical channel for a virtual channel
522 *

--- 194 unchanged lines hidden (view full) ---

717 case 4:
718 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
719 break;
720 default:
721 BUG();
722 break;
723 }
724
585 }
586
587 return bytes;
588}
589
590/*
591 * Allocate a physical channel for a virtual channel
592 *

--- 194 unchanged lines hidden (view full) ---

787 case 4:
788 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
789 break;
790 default:
791 BUG();
792 break;
793 }
794
795 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
725 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
726 return retbits;
727}
728
729struct pl08x_lli_build_data {
730 struct pl08x_txd *txd;
731 struct pl08x_bus_data srcbus;
732 struct pl08x_bus_data dstbus;

--- 28 unchanged lines hidden (view full) ---

761 *sbus = &bd->dstbus;
762 }
763 }
764}
765
766/*
767 * Fills in one LLI for a certain transfer descriptor and advance the counter
768 */
796 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
797 return retbits;
798}
799
800struct pl08x_lli_build_data {
801 struct pl08x_txd *txd;
802 struct pl08x_bus_data srcbus;
803 struct pl08x_bus_data dstbus;

--- 28 unchanged lines hidden (view full) ---

832 *sbus = &bd->dstbus;
833 }
834 }
835}
836
837/*
838 * Fills in one LLI for a certain transfer descriptor and advance the counter
839 */
769static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
770 int num_llis, int len, u32 cctl)
840static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
841 struct pl08x_lli_build_data *bd,
842 int num_llis, int len, u32 cctl, u32 cctl2)
771{
843{
772 struct pl08x_lli *llis_va = bd->txd->llis_va;
844 u32 offset = num_llis * pl08x->lli_words;
845 u32 *llis_va = bd->txd->llis_va + offset;
773 dma_addr_t llis_bus = bd->txd->llis_bus;
774
775 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
776
846 dma_addr_t llis_bus = bd->txd->llis_bus;
847
848 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
849
777 llis_va[num_llis].cctl = cctl;
778 llis_va[num_llis].src = bd->srcbus.addr;
779 llis_va[num_llis].dst = bd->dstbus.addr;
780 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
781 sizeof(struct pl08x_lli);
782 llis_va[num_llis].lli |= bd->lli_bus;
850 /* Advance the offset to next LLI. */
851 offset += pl08x->lli_words;
783
852
853 llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
854 llis_va[PL080_LLI_DST] = bd->dstbus.addr;
855 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
856 llis_va[PL080_LLI_LLI] |= bd->lli_bus;
857 llis_va[PL080_LLI_CCTL] = cctl;
858 if (pl08x->vd->pl080s)
859 llis_va[PL080S_LLI_CCTL2] = cctl2;
860
784 if (cctl & PL080_CONTROL_SRC_INCR)
785 bd->srcbus.addr += len;
786 if (cctl & PL080_CONTROL_DST_INCR)
787 bd->dstbus.addr += len;
788
789 BUG_ON(bd->remainder < len);
790
791 bd->remainder -= len;
792}
793
861 if (cctl & PL080_CONTROL_SRC_INCR)
862 bd->srcbus.addr += len;
863 if (cctl & PL080_CONTROL_DST_INCR)
864 bd->dstbus.addr += len;
865
866 BUG_ON(bd->remainder < len);
867
868 bd->remainder -= len;
869}
870
794static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
795 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
871static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
872 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
873 int num_llis, size_t *total_bytes)
796{
797 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
874{
875 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
798 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
876 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
799 (*total_bytes) += len;
800}
801
877 (*total_bytes) += len;
878}
879
880#ifdef VERBOSE_DEBUG
881static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
882 const u32 *llis_va, int num_llis)
883{
884 int i;
885
886 if (pl08x->vd->pl080s) {
887 dev_vdbg(&pl08x->adev->dev,
888 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
889 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
890 for (i = 0; i < num_llis; i++) {
891 dev_vdbg(&pl08x->adev->dev,
892 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
893 i, llis_va, llis_va[PL080_LLI_SRC],
894 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
895 llis_va[PL080_LLI_CCTL],
896 llis_va[PL080S_LLI_CCTL2]);
897 llis_va += pl08x->lli_words;
898 }
899 } else {
900 dev_vdbg(&pl08x->adev->dev,
901 "%-3s %-9s %-10s %-10s %-10s %s\n",
902 "lli", "", "csrc", "cdst", "clli", "cctl");
903 for (i = 0; i < num_llis; i++) {
904 dev_vdbg(&pl08x->adev->dev,
905 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
906 i, llis_va, llis_va[PL080_LLI_SRC],
907 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
908 llis_va[PL080_LLI_CCTL]);
909 llis_va += pl08x->lli_words;
910 }
911 }
912}
913#else
914static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
915 const u32 *llis_va, int num_llis) {}
916#endif
917
802/*
803 * This fills in the table of LLIs for the transfer descriptor
804 * Note that we assume we never have to change the burst sizes
805 * Return 0 for error
806 */
807static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
808 struct pl08x_txd *txd)
809{
810 struct pl08x_bus_data *mbus, *sbus;
811 struct pl08x_lli_build_data bd;
812 int num_llis = 0;
813 u32 cctl, early_bytes = 0;
814 size_t max_bytes_per_lli, total_bytes;
918/*
919 * This fills in the table of LLIs for the transfer descriptor
920 * Note that we assume we never have to change the burst sizes
921 * Return 0 for error
922 */
923static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
924 struct pl08x_txd *txd)
925{
926 struct pl08x_bus_data *mbus, *sbus;
927 struct pl08x_lli_build_data bd;
928 int num_llis = 0;
929 u32 cctl, early_bytes = 0;
930 size_t max_bytes_per_lli, total_bytes;
815 struct pl08x_lli *llis_va;
931 u32 *llis_va, *last_lli;
816 struct pl08x_sg *dsg;
817
818 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
819 if (!txd->llis_va) {
820 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
821 return 0;
822 }
823

--- 73 unchanged lines hidden (view full) ---

897 "%s src & dst address must be aligned to src"
898 " & dst width if peripheral is flow controller",
899 __func__);
900 return 0;
901 }
902
903 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
904 bd.dstbus.buswidth, 0);
932 struct pl08x_sg *dsg;
933
934 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
935 if (!txd->llis_va) {
936 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
937 return 0;
938 }
939

--- 73 unchanged lines hidden (view full) ---

1013 "%s src & dst address must be aligned to src"
1014 " & dst width if peripheral is flow controller",
1015 __func__);
1016 return 0;
1017 }
1018
1019 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
1020 bd.dstbus.buswidth, 0);
905 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
1021 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
1022 0, cctl, 0);
906 break;
907 }
908
909 /*
910 * Send byte by byte for following cases
911 * - Less than a bus width available
912 * - until master bus is aligned
913 */

--- 5 unchanged lines hidden (view full) ---

919 if ((bd.remainder - early_bytes) < mbus->buswidth)
920 early_bytes = bd.remainder;
921 }
922
923 if (early_bytes) {
924 dev_vdbg(&pl08x->adev->dev,
925 "%s byte width LLIs (remain 0x%08x)\n",
926 __func__, bd.remainder);
1023 break;
1024 }
1025
1026 /*
1027 * Send byte by byte for following cases
1028 * - Less than a bus width available
1029 * - until master bus is aligned
1030 */

--- 5 unchanged lines hidden (view full) ---

1036 if ((bd.remainder - early_bytes) < mbus->buswidth)
1037 early_bytes = bd.remainder;
1038 }
1039
1040 if (early_bytes) {
1041 dev_vdbg(&pl08x->adev->dev,
1042 "%s byte width LLIs (remain 0x%08x)\n",
1043 __func__, bd.remainder);
927 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
928 &total_bytes);
1044 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
1045 num_llis++, &total_bytes);
929 }
930
931 if (bd.remainder) {
932 /*
933 * Master now aligned
934 * - if slave is not then we must set its width down
935 */
936 if (!IS_BUS_ALIGNED(sbus)) {

--- 4 unchanged lines hidden (view full) ---

941 sbus->buswidth = 1;
942 }
943
944 /*
945 * Bytes transferred = tsize * src width, not
946 * MIN(buswidths)
947 */
948 max_bytes_per_lli = bd.srcbus.buswidth *
1046 }
1047
1048 if (bd.remainder) {
1049 /*
1050 * Master now aligned
1051 * - if slave is not then we must set its width down
1052 */
1053 if (!IS_BUS_ALIGNED(sbus)) {

--- 4 unchanged lines hidden (view full) ---

1058 sbus->buswidth = 1;
1059 }
1060
1061 /*
1062 * Bytes transferred = tsize * src width, not
1063 * MIN(buswidths)
1064 */
1065 max_bytes_per_lli = bd.srcbus.buswidth *
949 PL080_CONTROL_TRANSFER_SIZE_MASK;
1066 pl08x->vd->max_transfer_size;
950 dev_vdbg(&pl08x->adev->dev,
951 "%s max bytes per lli = %zu\n",
952 __func__, max_bytes_per_lli);
953
954 /*
955 * Make largest possible LLIs until less than one bus
956 * width left
957 */

--- 18 unchanged lines hidden (view full) ---

976
977 dev_vdbg(&pl08x->adev->dev,
978 "%s fill lli with single lli chunk of "
979 "size 0x%08zx (remainder 0x%08zx)\n",
980 __func__, lli_len, bd.remainder);
981
982 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
983 bd.dstbus.buswidth, tsize);
1067 dev_vdbg(&pl08x->adev->dev,
1068 "%s max bytes per lli = %zu\n",
1069 __func__, max_bytes_per_lli);
1070
1071 /*
1072 * Make largest possible LLIs until less than one bus
1073 * width left
1074 */

--- 18 unchanged lines hidden (view full) ---

1093
1094 dev_vdbg(&pl08x->adev->dev,
1095 "%s fill lli with single lli chunk of "
1096 "size 0x%08zx (remainder 0x%08zx)\n",
1097 __func__, lli_len, bd.remainder);
1098
1099 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
1100 bd.dstbus.buswidth, tsize);
984 pl08x_fill_lli_for_desc(&bd, num_llis++,
985 lli_len, cctl);
1101 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
1102 lli_len, cctl, tsize);
986 total_bytes += lli_len;
987 }
988
989 /*
990 * Send any odd bytes
991 */
992 if (bd.remainder) {
993 dev_vdbg(&pl08x->adev->dev,
994 "%s align with boundary, send odd bytes (remain %zu)\n",
995 __func__, bd.remainder);
1103 total_bytes += lli_len;
1104 }
1105
1106 /*
1107 * Send any odd bytes
1108 */
1109 if (bd.remainder) {
1110 dev_vdbg(&pl08x->adev->dev,
1111 "%s align with boundary, send odd bytes (remain %zu)\n",
1112 __func__, bd.remainder);
996 prep_byte_width_lli(&bd, &cctl, bd.remainder,
997 num_llis++, &total_bytes);
1113 prep_byte_width_lli(pl08x, &bd, &cctl,
1114 bd.remainder, num_llis++, &total_bytes);
998 }
999 }
1000
1001 if (total_bytes != dsg->len) {
1002 dev_err(&pl08x->adev->dev,
1003 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1004 __func__, total_bytes, dsg->len);
1005 return 0;
1006 }
1007
1008 if (num_llis >= MAX_NUM_TSFR_LLIS) {
1009 dev_err(&pl08x->adev->dev,
1010 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
1115 }
1116 }
1117
1118 if (total_bytes != dsg->len) {
1119 dev_err(&pl08x->adev->dev,
1120 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1121 __func__, total_bytes, dsg->len);
1122 return 0;
1123 }
1124
1125 if (num_llis >= MAX_NUM_TSFR_LLIS) {
1126 dev_err(&pl08x->adev->dev,
1127 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
1011 __func__, (u32) MAX_NUM_TSFR_LLIS);
1128 __func__, MAX_NUM_TSFR_LLIS);
1012 return 0;
1013 }
1014 }
1015
1016 llis_va = txd->llis_va;
1129 return 0;
1130 }
1131 }
1132
1133 llis_va = txd->llis_va;
1017 /* The final LLI terminates the LLI. */
1018 llis_va[num_llis - 1].lli = 0;
1019 /* The final LLI element shall also fire an interrupt. */
1020 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
1134 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
1021
1135
1022#ifdef VERBOSE_DEBUG
1023 {
1024 int i;
1025
1026 dev_vdbg(&pl08x->adev->dev,
1027 "%-3s %-9s %-10s %-10s %-10s %s\n",
1028 "lli", "", "csrc", "cdst", "clli", "cctl");
1029 for (i = 0; i < num_llis; i++) {
1030 dev_vdbg(&pl08x->adev->dev,
1031 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1032 i, &llis_va[i], llis_va[i].src,
1033 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
1034 );
1035 }
1136 if (txd->cyclic) {
1137 /* Link back to the first LLI. */
1138 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
1139 } else {
1140 /* The final LLI terminates the LLI. */
1141 last_lli[PL080_LLI_LLI] = 0;
1142 /* The final LLI element shall also fire an interrupt. */
1143 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
1036 }
1144 }
1037#endif
1038
1145
1146 pl08x_dump_lli(pl08x, llis_va, num_llis);
1147
1039 return num_llis;
1040}
1041
1042static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
1043 struct pl08x_txd *txd)
1044{
1045 struct pl08x_sg *dsg, *_dsg;
1046

--- 258 unchanged lines hidden (view full) ---

1305
1306 return pl08x_cctl(cctl);
1307}
1308
1309static int dma_set_runtime_config(struct dma_chan *chan,
1310 struct dma_slave_config *config)
1311{
1312 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1148 return num_llis;
1149}
1150
1151static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
1152 struct pl08x_txd *txd)
1153{
1154 struct pl08x_sg *dsg, *_dsg;
1155

--- 258 unchanged lines hidden (view full) ---

1414
1415 return pl08x_cctl(cctl);
1416}
1417
1418static int dma_set_runtime_config(struct dma_chan *chan,
1419 struct dma_slave_config *config)
1420{
1421 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1422 struct pl08x_driver_data *pl08x = plchan->host;
1313
1314 if (!plchan->slave)
1315 return -EINVAL;
1316
1317 /* Reject definitely invalid configurations */
1318 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1319 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1320 return -EINVAL;
1321
1423
1424 if (!plchan->slave)
1425 return -EINVAL;
1426
1427 /* Reject definitely invalid configurations */
1428 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1429 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1430 return -EINVAL;
1431
1432 if (config->device_fc && pl08x->vd->pl080s) {
1433 dev_err(&pl08x->adev->dev,
1434 "%s: PL080S does not support peripheral flow control\n",
1435 __func__);
1436 return -EINVAL;
1437 }
1438
1322 plchan->cfg = *config;
1323
1324 return 0;
1325}
1326
1327/*
1328 * Slave transactions callback to the slave device to allow
1329 * synchronization of slave DMA signals with the DMAC enable

--- 74 unchanged lines hidden (view full) ---

1404 if (!ret) {
1405 pl08x_free_txd(pl08x, txd);
1406 return NULL;
1407 }
1408
1409 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1410}
1411
1439 plchan->cfg = *config;
1440
1441 return 0;
1442}
1443
1444/*
1445 * Slave transactions callback to the slave device to allow
1446 * synchronization of slave DMA signals with the DMAC enable

--- 74 unchanged lines hidden (view full) ---

1521 if (!ret) {
1522 pl08x_free_txd(pl08x, txd);
1523 return NULL;
1524 }
1525
1526 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1527}
1528
1412static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1413 struct dma_chan *chan, struct scatterlist *sgl,
1414 unsigned int sg_len, enum dma_transfer_direction direction,
1415 unsigned long flags, void *context)
1529static struct pl08x_txd *pl08x_init_txd(
1530 struct dma_chan *chan,
1531 enum dma_transfer_direction direction,
1532 dma_addr_t *slave_addr)
1416{
1417 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1418 struct pl08x_driver_data *pl08x = plchan->host;
1419 struct pl08x_txd *txd;
1533{
1534 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1535 struct pl08x_driver_data *pl08x = plchan->host;
1536 struct pl08x_txd *txd;
1420 struct pl08x_sg *dsg;
1421 struct scatterlist *sg;
1422 enum dma_slave_buswidth addr_width;
1537 enum dma_slave_buswidth addr_width;
1423 dma_addr_t slave_addr;
1424 int ret, tmp;
1425 u8 src_buses, dst_buses;
1426 u32 maxburst, cctl;
1427
1538 int ret, tmp;
1539 u8 src_buses, dst_buses;
1540 u32 maxburst, cctl;
1541
1428 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1429 __func__, sg_dma_len(sgl), plchan->name);
1430
1431 txd = pl08x_get_txd(plchan);
1432 if (!txd) {
1433 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1434 return NULL;
1435 }
1436
1437 /*
1438 * Set up addresses, the PrimeCell configured address
1439 * will take precedence since this may configure the
1440 * channel target address dynamically at runtime.
1441 */
1442 if (direction == DMA_MEM_TO_DEV) {
1443 cctl = PL080_CONTROL_SRC_INCR;
1542 txd = pl08x_get_txd(plchan);
1543 if (!txd) {
1544 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1545 return NULL;
1546 }
1547
1548 /*
1549 * Set up addresses, the PrimeCell configured address
1550 * will take precedence since this may configure the
1551 * channel target address dynamically at runtime.
1552 */
1553 if (direction == DMA_MEM_TO_DEV) {
1554 cctl = PL080_CONTROL_SRC_INCR;
1444 slave_addr = plchan->cfg.dst_addr;
1555 *slave_addr = plchan->cfg.dst_addr;
1445 addr_width = plchan->cfg.dst_addr_width;
1446 maxburst = plchan->cfg.dst_maxburst;
1447 src_buses = pl08x->mem_buses;
1448 dst_buses = plchan->cd->periph_buses;
1449 } else if (direction == DMA_DEV_TO_MEM) {
1450 cctl = PL080_CONTROL_DST_INCR;
1556 addr_width = plchan->cfg.dst_addr_width;
1557 maxburst = plchan->cfg.dst_maxburst;
1558 src_buses = pl08x->mem_buses;
1559 dst_buses = plchan->cd->periph_buses;
1560 } else if (direction == DMA_DEV_TO_MEM) {
1561 cctl = PL080_CONTROL_DST_INCR;
1451 slave_addr = plchan->cfg.src_addr;
1562 *slave_addr = plchan->cfg.src_addr;
1452 addr_width = plchan->cfg.src_addr_width;
1453 maxburst = plchan->cfg.src_maxburst;
1454 src_buses = plchan->cd->periph_buses;
1455 dst_buses = pl08x->mem_buses;
1456 } else {
1457 pl08x_free_txd(pl08x, txd);
1458 dev_err(&pl08x->adev->dev,
1459 "%s direction unsupported\n", __func__);

--- 32 unchanged lines hidden (view full) ---

1492 plchan->signal, plchan->name);
1493
1494 /* Assign the flow control signal to this channel */
1495 if (direction == DMA_MEM_TO_DEV)
1496 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1497 else
1498 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1499
1563 addr_width = plchan->cfg.src_addr_width;
1564 maxburst = plchan->cfg.src_maxburst;
1565 src_buses = plchan->cd->periph_buses;
1566 dst_buses = pl08x->mem_buses;
1567 } else {
1568 pl08x_free_txd(pl08x, txd);
1569 dev_err(&pl08x->adev->dev,
1570 "%s direction unsupported\n", __func__);

--- 32 unchanged lines hidden (view full) ---

1603 plchan->signal, plchan->name);
1604
1605 /* Assign the flow control signal to this channel */
1606 if (direction == DMA_MEM_TO_DEV)
1607 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1608 else
1609 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1610
1611 return txd;
1612}
1613
1614static int pl08x_tx_add_sg(struct pl08x_txd *txd,
1615 enum dma_transfer_direction direction,
1616 dma_addr_t slave_addr,
1617 dma_addr_t buf_addr,
1618 unsigned int len)
1619{
1620 struct pl08x_sg *dsg;
1621
1622 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1623 if (!dsg)
1624 return -ENOMEM;
1625
1626 list_add_tail(&dsg->node, &txd->dsg_list);
1627
1628 dsg->len = len;
1629 if (direction == DMA_MEM_TO_DEV) {
1630 dsg->src_addr = buf_addr;
1631 dsg->dst_addr = slave_addr;
1632 } else {
1633 dsg->src_addr = slave_addr;
1634 dsg->dst_addr = buf_addr;
1635 }
1636
1637 return 0;
1638}
1639
1640static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1641 struct dma_chan *chan, struct scatterlist *sgl,
1642 unsigned int sg_len, enum dma_transfer_direction direction,
1643 unsigned long flags, void *context)
1644{
1645 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1646 struct pl08x_driver_data *pl08x = plchan->host;
1647 struct pl08x_txd *txd;
1648 struct scatterlist *sg;
1649 int ret, tmp;
1650 dma_addr_t slave_addr;
1651
1652 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1653 __func__, sg_dma_len(sgl), plchan->name);
1654
1655 txd = pl08x_init_txd(chan, direction, &slave_addr);
1656 if (!txd)
1657 return NULL;
1658
1500 for_each_sg(sgl, sg, sg_len, tmp) {
1659 for_each_sg(sgl, sg, sg_len, tmp) {
1501 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1502 if (!dsg) {
1660 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1661 sg_dma_address(sg),
1662 sg_dma_len(sg));
1663 if (ret) {
1503 pl08x_release_mux(plchan);
1504 pl08x_free_txd(pl08x, txd);
1505 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1506 __func__);
1507 return NULL;
1508 }
1664 pl08x_release_mux(plchan);
1665 pl08x_free_txd(pl08x, txd);
1666 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1667 __func__);
1668 return NULL;
1669 }
1509 list_add_tail(&dsg->node, &txd->dsg_list);
1670 }
1510
1671
1511 dsg->len = sg_dma_len(sg);
1512 if (direction == DMA_MEM_TO_DEV) {
1513 dsg->src_addr = sg_dma_address(sg);
1514 dsg->dst_addr = slave_addr;
1515 } else {
1516 dsg->src_addr = slave_addr;
1517 dsg->dst_addr = sg_dma_address(sg);
1672 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1673 if (!ret) {
1674 pl08x_release_mux(plchan);
1675 pl08x_free_txd(pl08x, txd);
1676 return NULL;
1677 }
1678
1679 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1680}
1681
1682static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1683 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1684 size_t period_len, enum dma_transfer_direction direction,
1685 unsigned long flags, void *context)
1686{
1687 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1688 struct pl08x_driver_data *pl08x = plchan->host;
1689 struct pl08x_txd *txd;
1690 int ret, tmp;
1691 dma_addr_t slave_addr;
1692
1693 dev_dbg(&pl08x->adev->dev,
1694 "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
1695 __func__, period_len, buf_len,
1696 direction == DMA_MEM_TO_DEV ? "to" : "from",
1697 plchan->name);
1698
1699 txd = pl08x_init_txd(chan, direction, &slave_addr);
1700 if (!txd)
1701 return NULL;
1702
1703 txd->cyclic = true;
1704 txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
1705 for (tmp = 0; tmp < buf_len; tmp += period_len) {
1706 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1707 buf_addr + tmp, period_len);
1708 if (ret) {
1709 pl08x_release_mux(plchan);
1710 pl08x_free_txd(pl08x, txd);
1711 return NULL;
1518 }
1519 }
1520
1521 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1522 if (!ret) {
1523 pl08x_release_mux(plchan);
1524 pl08x_free_txd(pl08x, txd);
1525 return NULL;

--- 126 unchanged lines hidden (view full) ---

1652 dev_err(&pl08x->adev->dev,
1653 "%s Error TC interrupt on unused channel: 0x%08x\n",
1654 __func__, i);
1655 continue;
1656 }
1657
1658 spin_lock(&plchan->vc.lock);
1659 tx = plchan->at;
1712 }
1713 }
1714
1715 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1716 if (!ret) {
1717 pl08x_release_mux(plchan);
1718 pl08x_free_txd(pl08x, txd);
1719 return NULL;

--- 126 unchanged lines hidden (view full) ---

1846 dev_err(&pl08x->adev->dev,
1847 "%s Error TC interrupt on unused channel: 0x%08x\n",
1848 __func__, i);
1849 continue;
1850 }
1851
1852 spin_lock(&plchan->vc.lock);
1853 tx = plchan->at;
1660 if (tx) {
1854 if (tx && tx->cyclic) {
1855 vchan_cyclic_callback(&tx->vd);
1856 } else if (tx) {
1661 plchan->at = NULL;
1662 /*
1663 * This descriptor is done, release its mux
1664 * reservation.
1665 */
1666 pl08x_release_mux(plchan);
1667 tx->done = true;
1668 vchan_cookie_complete(&tx->vd);

--- 177 unchanged lines hidden (view full) ---

1846{
1847}
1848#endif
1849
1850static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1851{
1852 struct pl08x_driver_data *pl08x;
1853 const struct vendor_data *vd = id->data;
1857 plchan->at = NULL;
1858 /*
1859 * This descriptor is done, release its mux
1860 * reservation.
1861 */
1862 pl08x_release_mux(plchan);
1863 tx->done = true;
1864 vchan_cookie_complete(&tx->vd);

--- 177 unchanged lines hidden (view full) ---

2042{
2043}
2044#endif
2045
2046static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2047{
2048 struct pl08x_driver_data *pl08x;
2049 const struct vendor_data *vd = id->data;
2050 u32 tsfr_size;
1854 int ret = 0;
1855 int i;
1856
1857 ret = amba_request_regions(adev, NULL);
1858 if (ret)
1859 return ret;
1860
1861 /* Create the driver state holder */

--- 11 unchanged lines hidden (view full) ---

1873 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1874 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1875 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1876 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1877 pl08x->memcpy.device_control = pl08x_control;
1878
1879 /* Initialize slave engine */
1880 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
2051 int ret = 0;
2052 int i;
2053
2054 ret = amba_request_regions(adev, NULL);
2055 if (ret)
2056 return ret;
2057
2058 /* Create the driver state holder */

--- 11 unchanged lines hidden (view full) ---

2070 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
2071 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2072 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2073 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
2074 pl08x->memcpy.device_control = pl08x_control;
2075
2076 /* Initialize slave engine */
2077 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
2078 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
1881 pl08x->slave.dev = &adev->dev;
1882 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1883 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1884 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1885 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1886 pl08x->slave.device_issue_pending = pl08x_issue_pending;
1887 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2079 pl08x->slave.dev = &adev->dev;
2080 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
2081 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
2082 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2083 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
2084 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2085 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2086 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
1888 pl08x->slave.device_control = pl08x_control;
1889
1890 /* Get the platform data */
1891 pl08x->pd = dev_get_platdata(&adev->dev);
1892 if (!pl08x->pd) {
1893 dev_err(&adev->dev, "no platform data supplied\n");
1894 ret = -EINVAL;
1895 goto out_no_platdata;

--- 6 unchanged lines hidden (view full) ---

1902 /* By default, AHB1 only. If dualmaster, from platform */
1903 pl08x->lli_buses = PL08X_AHB1;
1904 pl08x->mem_buses = PL08X_AHB1;
1905 if (pl08x->vd->dualmaster) {
1906 pl08x->lli_buses = pl08x->pd->lli_buses;
1907 pl08x->mem_buses = pl08x->pd->mem_buses;
1908 }
1909
2087 pl08x->slave.device_control = pl08x_control;
2088
2089 /* Get the platform data */
2090 pl08x->pd = dev_get_platdata(&adev->dev);
2091 if (!pl08x->pd) {
2092 dev_err(&adev->dev, "no platform data supplied\n");
2093 ret = -EINVAL;
2094 goto out_no_platdata;

--- 6 unchanged lines hidden (view full) ---

2101 /* By default, AHB1 only. If dualmaster, from platform */
2102 pl08x->lli_buses = PL08X_AHB1;
2103 pl08x->mem_buses = PL08X_AHB1;
2104 if (pl08x->vd->dualmaster) {
2105 pl08x->lli_buses = pl08x->pd->lli_buses;
2106 pl08x->mem_buses = pl08x->pd->mem_buses;
2107 }
2108
2109 if (vd->pl080s)
2110 pl08x->lli_words = PL080S_LLI_WORDS;
2111 else
2112 pl08x->lli_words = PL080_LLI_WORDS;
2113 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
2114
1910 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1911 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
2115 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2116 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1912 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
2117 tsfr_size, PL08X_ALIGN, 0);
1913 if (!pl08x->pool) {
1914 ret = -ENOMEM;
1915 goto out_no_lli_pool;
1916 }
1917
1918 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
1919 if (!pl08x->base) {
1920 ret = -ENOMEM;

--- 26 unchanged lines hidden (view full) ---

1947 goto out_no_phychans;
1948 }
1949
1950 for (i = 0; i < vd->channels; i++) {
1951 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
1952
1953 ch->id = i;
1954 ch->base = pl08x->base + PL080_Cx_BASE(i);
2118 if (!pl08x->pool) {
2119 ret = -ENOMEM;
2120 goto out_no_lli_pool;
2121 }
2122
2123 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2124 if (!pl08x->base) {
2125 ret = -ENOMEM;

--- 26 unchanged lines hidden (view full) ---

2152 goto out_no_phychans;
2153 }
2154
2155 for (i = 0; i < vd->channels; i++) {
2156 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2157
2158 ch->id = i;
2159 ch->base = pl08x->base + PL080_Cx_BASE(i);
2160 ch->reg_config = ch->base + vd->config_offset;
1955 spin_lock_init(&ch->lock);
1956
1957 /*
1958 * Nomadik variants can have channels that are locked
1959 * down for the secure world only. Lock up these channels
1960 * by perpetually serving a dummy virtual channel.
1961 */
1962 if (vd->nomadik) {
1963 u32 val;
1964
2161 spin_lock_init(&ch->lock);
2162
2163 /*
2164 * Nomadik variants can have channels that are locked
2165 * down for the secure world only. Lock up these channels
2166 * by perpetually serving a dummy virtual channel.
2167 */
2168 if (vd->nomadik) {
2169 u32 val;
2170
1965 val = readl(ch->base + PL080_CH_CONFIG);
2171 val = readl(ch->reg_config);
1966 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
1967 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
1968 ch->locked = true;
1969 }
1970 }
1971
1972 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1973 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");

--- 34 unchanged lines hidden (view full) ---

2008 dev_warn(&pl08x->adev->dev,
2009 "%s failed to register slave as an async device - %d\n",
2010 __func__, ret);
2011 goto out_no_slave_reg;
2012 }
2013
2014 amba_set_drvdata(adev, pl08x);
2015 init_pl08x_debugfs(pl08x);
2172 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2173 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2174 ch->locked = true;
2175 }
2176 }
2177
2178 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2179 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");

--- 34 unchanged lines hidden (view full) ---

2214 dev_warn(&pl08x->adev->dev,
2215 "%s failed to register slave as an async device - %d\n",
2216 __func__, ret);
2217 goto out_no_slave_reg;
2218 }
2219
2220 amba_set_drvdata(adev, pl08x);
2221 init_pl08x_debugfs(pl08x);
2016 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2017 amba_part(adev), amba_rev(adev),
2222 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2223 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
2018 (unsigned long long)adev->res.start, adev->irq[0]);
2019
2020 return 0;
2021
2022out_no_slave_reg:
2023 dma_async_device_unregister(&pl08x->memcpy);
2024out_no_memcpy_reg:
2025 pl08x_free_virtual_channels(&pl08x->slave);

--- 12 unchanged lines hidden (view full) ---

2038 kfree(pl08x);
2039out_no_pl08x:
2040 amba_release_regions(adev);
2041 return ret;
2042}
2043
2044/* PL080 has 8 channels and the PL080 have just 2 */
2045static struct vendor_data vendor_pl080 = {
2224 (unsigned long long)adev->res.start, adev->irq[0]);
2225
2226 return 0;
2227
2228out_no_slave_reg:
2229 dma_async_device_unregister(&pl08x->memcpy);
2230out_no_memcpy_reg:
2231 pl08x_free_virtual_channels(&pl08x->slave);

--- 12 unchanged lines hidden (view full) ---

2244 kfree(pl08x);
2245out_no_pl08x:
2246 amba_release_regions(adev);
2247 return ret;
2248}
2249
2250/* PL080 has 8 channels and the PL080 have just 2 */
2251static struct vendor_data vendor_pl080 = {
2252 .config_offset = PL080_CH_CONFIG,
2046 .channels = 8,
2047 .dualmaster = true,
2253 .channels = 8,
2254 .dualmaster = true,
2255 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2048};
2049
2050static struct vendor_data vendor_nomadik = {
2256};
2257
2258static struct vendor_data vendor_nomadik = {
2259 .config_offset = PL080_CH_CONFIG,
2051 .channels = 8,
2052 .dualmaster = true,
2053 .nomadik = true,
2260 .channels = 8,
2261 .dualmaster = true,
2262 .nomadik = true,
2263 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2054};
2055
2264};
2265
2266static struct vendor_data vendor_pl080s = {
2267 .config_offset = PL080S_CH_CONFIG,
2268 .channels = 8,
2269 .pl080s = true,
2270 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
2271};
2272
2056static struct vendor_data vendor_pl081 = {
2273static struct vendor_data vendor_pl081 = {
2274 .config_offset = PL080_CH_CONFIG,
2057 .channels = 2,
2058 .dualmaster = false,
2275 .channels = 2,
2276 .dualmaster = false,
2277 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2059};
2060
2061static struct amba_id pl08x_ids[] = {
2278};
2279
2280static struct amba_id pl08x_ids[] = {
2281 /* Samsung PL080S variant */
2282 {
2283 .id = 0x0a141080,
2284 .mask = 0xffffffff,
2285 .data = &vendor_pl080s,
2286 },
2062 /* PL080 */
2063 {
2064 .id = 0x00041080,
2065 .mask = 0x000fffff,
2066 .data = &vendor_pl080,
2067 },
2068 /* PL081 */
2069 {

--- 32 unchanged lines hidden ---
2287 /* PL080 */
2288 {
2289 .id = 0x00041080,
2290 .mask = 0x000fffff,
2291 .data = &vendor_pl080,
2292 },
2293 /* PL081 */
2294 {

--- 32 unchanged lines hidden ---