1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8acd6aaSZhangfei Gao /*
3c8acd6aaSZhangfei Gao * Copyright 2012 Marvell International Ltd.
4c8acd6aaSZhangfei Gao */
52b7f65b1SJoe Perches
67331205aSThierry Reding #include <linux/err.h>
7c8acd6aaSZhangfei Gao #include <linux/module.h>
8c8acd6aaSZhangfei Gao #include <linux/init.h>
9c8acd6aaSZhangfei Gao #include <linux/types.h>
10c8acd6aaSZhangfei Gao #include <linux/interrupt.h>
11c8acd6aaSZhangfei Gao #include <linux/dma-mapping.h>
12c8acd6aaSZhangfei Gao #include <linux/slab.h>
13c8acd6aaSZhangfei Gao #include <linux/dmaengine.h>
14c8acd6aaSZhangfei Gao #include <linux/platform_device.h>
15c8acd6aaSZhangfei Gao #include <linux/device.h>
16c8acd6aaSZhangfei Gao #include <linux/platform_data/mmp_dma.h>
17c8acd6aaSZhangfei Gao #include <linux/dmapool.h>
18c8acd6aaSZhangfei Gao #include <linux/of_device.h>
19a9a7cf08SDaniel Mack #include <linux/of_dma.h>
20c8acd6aaSZhangfei Gao #include <linux/of.h>
21c8acd6aaSZhangfei Gao
22c8acd6aaSZhangfei Gao #include "dmaengine.h"
23c8acd6aaSZhangfei Gao
24c8acd6aaSZhangfei Gao #define DCSR 0x0000
25c8acd6aaSZhangfei Gao #define DALGN 0x00a0
26c8acd6aaSZhangfei Gao #define DINT 0x00f0
27c8acd6aaSZhangfei Gao #define DDADR 0x0200
281b38da26SDaniel Mack #define DSADR(n) (0x0204 + ((n) << 4))
291b38da26SDaniel Mack #define DTADR(n) (0x0208 + ((n) << 4))
30c8acd6aaSZhangfei Gao #define DCMD 0x020c
31c8acd6aaSZhangfei Gao
322b7f65b1SJoe Perches #define DCSR_RUN BIT(31) /* Run Bit (read / write) */
332b7f65b1SJoe Perches #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
342b7f65b1SJoe Perches #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
352b7f65b1SJoe Perches #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
362b7f65b1SJoe Perches #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
372b7f65b1SJoe Perches #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
382b7f65b1SJoe Perches #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
392b7f65b1SJoe Perches #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
40c8acd6aaSZhangfei Gao
412b7f65b1SJoe Perches #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
422b7f65b1SJoe Perches #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
432b7f65b1SJoe Perches #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
442b7f65b1SJoe Perches #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
452b7f65b1SJoe Perches #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
462b7f65b1SJoe Perches #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
472b7f65b1SJoe Perches #define DCSR_EORINTR BIT(9) /* The end of Receive */
48c8acd6aaSZhangfei Gao
492b7f65b1SJoe Perches #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
502b7f65b1SJoe Perches #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
51c8acd6aaSZhangfei Gao #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
52c8acd6aaSZhangfei Gao
53c8acd6aaSZhangfei Gao #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
542b7f65b1SJoe Perches #define DDADR_STOP BIT(0) /* Stop (read / write) */
55c8acd6aaSZhangfei Gao
562b7f65b1SJoe Perches #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
572b7f65b1SJoe Perches #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
582b7f65b1SJoe Perches #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
592b7f65b1SJoe Perches #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
602b7f65b1SJoe Perches #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
612b7f65b1SJoe Perches #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
622b7f65b1SJoe Perches #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
63c8acd6aaSZhangfei Gao #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
64c8acd6aaSZhangfei Gao #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
65c8acd6aaSZhangfei Gao #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
66c8acd6aaSZhangfei Gao #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
67c8acd6aaSZhangfei Gao #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
68c8acd6aaSZhangfei Gao #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
69c8acd6aaSZhangfei Gao #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
70c8acd6aaSZhangfei Gao
711ac0e845SDaniel Mack #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
72c8acd6aaSZhangfei Gao
73c8acd6aaSZhangfei Gao struct mmp_pdma_desc_hw {
74c8acd6aaSZhangfei Gao u32 ddadr; /* Points to the next descriptor + flags */
75c8acd6aaSZhangfei Gao u32 dsadr; /* DSADR value for the current transfer */
76c8acd6aaSZhangfei Gao u32 dtadr; /* DTADR value for the current transfer */
77c8acd6aaSZhangfei Gao u32 dcmd; /* DCMD value for the current transfer */
78c8acd6aaSZhangfei Gao } __aligned(32);
79c8acd6aaSZhangfei Gao
80c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw {
81c8acd6aaSZhangfei Gao struct mmp_pdma_desc_hw desc;
82c8acd6aaSZhangfei Gao struct list_head node;
83c8acd6aaSZhangfei Gao struct list_head tx_list;
84c8acd6aaSZhangfei Gao struct dma_async_tx_descriptor async_tx;
85c8acd6aaSZhangfei Gao };
86c8acd6aaSZhangfei Gao
87c8acd6aaSZhangfei Gao struct mmp_pdma_phy;
88c8acd6aaSZhangfei Gao
89c8acd6aaSZhangfei Gao struct mmp_pdma_chan {
90c8acd6aaSZhangfei Gao struct device *dev;
91c8acd6aaSZhangfei Gao struct dma_chan chan;
92c8acd6aaSZhangfei Gao struct dma_async_tx_descriptor desc;
93c8acd6aaSZhangfei Gao struct mmp_pdma_phy *phy;
94c8acd6aaSZhangfei Gao enum dma_transfer_direction dir;
9556b94b02SVinod Koul struct dma_slave_config slave_config;
96c8acd6aaSZhangfei Gao
9750440d74SDaniel Mack struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
9850440d74SDaniel Mack * is in cyclic mode */
9950440d74SDaniel Mack
100c8acd6aaSZhangfei Gao /* channel's basic info */
101c8acd6aaSZhangfei Gao struct tasklet_struct tasklet;
102c8acd6aaSZhangfei Gao u32 dcmd;
103c8acd6aaSZhangfei Gao u32 drcmr;
104c8acd6aaSZhangfei Gao u32 dev_addr;
105c8acd6aaSZhangfei Gao
106c8acd6aaSZhangfei Gao /* list for desc */
107c8acd6aaSZhangfei Gao spinlock_t desc_lock; /* Descriptor list lock */
108c8acd6aaSZhangfei Gao struct list_head chain_pending; /* Link descriptors queue for pending */
109c8acd6aaSZhangfei Gao struct list_head chain_running; /* Link descriptors queue for running */
110c8acd6aaSZhangfei Gao bool idle; /* channel statue machine */
1116fc4573cSDaniel Mack bool byte_align;
112c8acd6aaSZhangfei Gao
113c8acd6aaSZhangfei Gao struct dma_pool *desc_pool; /* Descriptors pool */
114c8acd6aaSZhangfei Gao };
115c8acd6aaSZhangfei Gao
116c8acd6aaSZhangfei Gao struct mmp_pdma_phy {
117c8acd6aaSZhangfei Gao int idx;
118c8acd6aaSZhangfei Gao void __iomem *base;
119c8acd6aaSZhangfei Gao struct mmp_pdma_chan *vchan;
120c8acd6aaSZhangfei Gao };
121c8acd6aaSZhangfei Gao
122c8acd6aaSZhangfei Gao struct mmp_pdma_device {
123c8acd6aaSZhangfei Gao int dma_channels;
124c8acd6aaSZhangfei Gao void __iomem *base;
125c8acd6aaSZhangfei Gao struct device *dev;
126c8acd6aaSZhangfei Gao struct dma_device device;
127c8acd6aaSZhangfei Gao struct mmp_pdma_phy *phy;
128027f28b7SXiang Wang spinlock_t phy_lock; /* protect alloc/free phy channels */
129c8acd6aaSZhangfei Gao };
130c8acd6aaSZhangfei Gao
1312b7f65b1SJoe Perches #define tx_to_mmp_pdma_desc(tx) \
1322b7f65b1SJoe Perches container_of(tx, struct mmp_pdma_desc_sw, async_tx)
1332b7f65b1SJoe Perches #define to_mmp_pdma_desc(lh) \
1342b7f65b1SJoe Perches container_of(lh, struct mmp_pdma_desc_sw, node)
1352b7f65b1SJoe Perches #define to_mmp_pdma_chan(dchan) \
1362b7f65b1SJoe Perches container_of(dchan, struct mmp_pdma_chan, chan)
1372b7f65b1SJoe Perches #define to_mmp_pdma_dev(dmadev) \
1382b7f65b1SJoe Perches container_of(dmadev, struct mmp_pdma_device, device)
139c8acd6aaSZhangfei Gao
14056b94b02SVinod Koul static int mmp_pdma_config_write(struct dma_chan *dchan,
14156b94b02SVinod Koul struct dma_slave_config *cfg,
14256b94b02SVinod Koul enum dma_transfer_direction direction);
14356b94b02SVinod Koul
set_desc(struct mmp_pdma_phy * phy,dma_addr_t addr)144c8acd6aaSZhangfei Gao static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
145c8acd6aaSZhangfei Gao {
146c8acd6aaSZhangfei Gao u32 reg = (phy->idx << 4) + DDADR;
147c8acd6aaSZhangfei Gao
148c8acd6aaSZhangfei Gao writel(addr, phy->base + reg);
149c8acd6aaSZhangfei Gao }
150c8acd6aaSZhangfei Gao
enable_chan(struct mmp_pdma_phy * phy)151c8acd6aaSZhangfei Gao static void enable_chan(struct mmp_pdma_phy *phy)
152c8acd6aaSZhangfei Gao {
1536fc4573cSDaniel Mack u32 reg, dalgn;
154c8acd6aaSZhangfei Gao
155c8acd6aaSZhangfei Gao if (!phy->vchan)
156c8acd6aaSZhangfei Gao return;
157c8acd6aaSZhangfei Gao
1588b298dedSDaniel Mack reg = DRCMR(phy->vchan->drcmr);
159c8acd6aaSZhangfei Gao writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
160c8acd6aaSZhangfei Gao
1616fc4573cSDaniel Mack dalgn = readl(phy->base + DALGN);
1626fc4573cSDaniel Mack if (phy->vchan->byte_align)
1636fc4573cSDaniel Mack dalgn |= 1 << phy->idx;
1646fc4573cSDaniel Mack else
1656fc4573cSDaniel Mack dalgn &= ~(1 << phy->idx);
1666fc4573cSDaniel Mack writel(dalgn, phy->base + DALGN);
1676fc4573cSDaniel Mack
168c8acd6aaSZhangfei Gao reg = (phy->idx << 2) + DCSR;
1692b7f65b1SJoe Perches writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
170c8acd6aaSZhangfei Gao }
171c8acd6aaSZhangfei Gao
disable_chan(struct mmp_pdma_phy * phy)172c8acd6aaSZhangfei Gao static void disable_chan(struct mmp_pdma_phy *phy)
173c8acd6aaSZhangfei Gao {
174c8acd6aaSZhangfei Gao u32 reg;
175c8acd6aaSZhangfei Gao
1762b7f65b1SJoe Perches if (!phy)
1772b7f65b1SJoe Perches return;
1782b7f65b1SJoe Perches
179c8acd6aaSZhangfei Gao reg = (phy->idx << 2) + DCSR;
1802b7f65b1SJoe Perches writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
181c8acd6aaSZhangfei Gao }
182c8acd6aaSZhangfei Gao
clear_chan_irq(struct mmp_pdma_phy * phy)183c8acd6aaSZhangfei Gao static int clear_chan_irq(struct mmp_pdma_phy *phy)
184c8acd6aaSZhangfei Gao {
185c8acd6aaSZhangfei Gao u32 dcsr;
186c8acd6aaSZhangfei Gao u32 dint = readl(phy->base + DINT);
187c8acd6aaSZhangfei Gao u32 reg = (phy->idx << 2) + DCSR;
188c8acd6aaSZhangfei Gao
1892b7f65b1SJoe Perches if (!(dint & BIT(phy->idx)))
1902b7f65b1SJoe Perches return -EAGAIN;
1912b7f65b1SJoe Perches
192c8acd6aaSZhangfei Gao /* clear irq */
193c8acd6aaSZhangfei Gao dcsr = readl(phy->base + reg);
194c8acd6aaSZhangfei Gao writel(dcsr, phy->base + reg);
195c8acd6aaSZhangfei Gao if ((dcsr & DCSR_BUSERR) && (phy->vchan))
196c8acd6aaSZhangfei Gao dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
1972b7f65b1SJoe Perches
198c8acd6aaSZhangfei Gao return 0;
199c8acd6aaSZhangfei Gao }
200c8acd6aaSZhangfei Gao
mmp_pdma_chan_handler(int irq,void * dev_id)201c8acd6aaSZhangfei Gao static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
202c8acd6aaSZhangfei Gao {
203c8acd6aaSZhangfei Gao struct mmp_pdma_phy *phy = dev_id;
204c8acd6aaSZhangfei Gao
2052b7f65b1SJoe Perches if (clear_chan_irq(phy) != 0)
2062b7f65b1SJoe Perches return IRQ_NONE;
2072b7f65b1SJoe Perches
208c8acd6aaSZhangfei Gao tasklet_schedule(&phy->vchan->tasklet);
209c8acd6aaSZhangfei Gao return IRQ_HANDLED;
210c8acd6aaSZhangfei Gao }
211c8acd6aaSZhangfei Gao
mmp_pdma_int_handler(int irq,void * dev_id)212c8acd6aaSZhangfei Gao static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
213c8acd6aaSZhangfei Gao {
214c8acd6aaSZhangfei Gao struct mmp_pdma_device *pdev = dev_id;
215c8acd6aaSZhangfei Gao struct mmp_pdma_phy *phy;
216c8acd6aaSZhangfei Gao u32 dint = readl(pdev->base + DINT);
217c8acd6aaSZhangfei Gao int i, ret;
218c8acd6aaSZhangfei Gao int irq_num = 0;
219c8acd6aaSZhangfei Gao
220c8acd6aaSZhangfei Gao while (dint) {
221c8acd6aaSZhangfei Gao i = __ffs(dint);
2223a314f14SQiao Zhou /* only handle interrupts belonging to pdma driver*/
2233a314f14SQiao Zhou if (i >= pdev->dma_channels)
2243a314f14SQiao Zhou break;
225c8acd6aaSZhangfei Gao dint &= (dint - 1);
226c8acd6aaSZhangfei Gao phy = &pdev->phy[i];
227c8acd6aaSZhangfei Gao ret = mmp_pdma_chan_handler(irq, phy);
228c8acd6aaSZhangfei Gao if (ret == IRQ_HANDLED)
229c8acd6aaSZhangfei Gao irq_num++;
230c8acd6aaSZhangfei Gao }
231c8acd6aaSZhangfei Gao
232c8acd6aaSZhangfei Gao if (irq_num)
233c8acd6aaSZhangfei Gao return IRQ_HANDLED;
2342b7f65b1SJoe Perches
235c8acd6aaSZhangfei Gao return IRQ_NONE;
236c8acd6aaSZhangfei Gao }
237c8acd6aaSZhangfei Gao
238c8acd6aaSZhangfei Gao /* lookup free phy channel as descending priority */
lookup_phy(struct mmp_pdma_chan * pchan)239c8acd6aaSZhangfei Gao static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
240c8acd6aaSZhangfei Gao {
241c8acd6aaSZhangfei Gao int prio, i;
242c8acd6aaSZhangfei Gao struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
243638a542cSDaniel Mack struct mmp_pdma_phy *phy, *found = NULL;
244027f28b7SXiang Wang unsigned long flags;
245c8acd6aaSZhangfei Gao
246c8acd6aaSZhangfei Gao /*
247c8acd6aaSZhangfei Gao * dma channel priorities
248c8acd6aaSZhangfei Gao * ch 0 - 3, 16 - 19 <--> (0)
249c8acd6aaSZhangfei Gao * ch 4 - 7, 20 - 23 <--> (1)
250c8acd6aaSZhangfei Gao * ch 8 - 11, 24 - 27 <--> (2)
251c8acd6aaSZhangfei Gao * ch 12 - 15, 28 - 31 <--> (3)
252c8acd6aaSZhangfei Gao */
253027f28b7SXiang Wang
254027f28b7SXiang Wang spin_lock_irqsave(&pdev->phy_lock, flags);
2552b7f65b1SJoe Perches for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
256c8acd6aaSZhangfei Gao for (i = 0; i < pdev->dma_channels; i++) {
2572b7f65b1SJoe Perches if (prio != (i & 0xf) >> 2)
258c8acd6aaSZhangfei Gao continue;
259c8acd6aaSZhangfei Gao phy = &pdev->phy[i];
260c8acd6aaSZhangfei Gao if (!phy->vchan) {
261c8acd6aaSZhangfei Gao phy->vchan = pchan;
262638a542cSDaniel Mack found = phy;
263638a542cSDaniel Mack goto out_unlock;
264c8acd6aaSZhangfei Gao }
265c8acd6aaSZhangfei Gao }
266c8acd6aaSZhangfei Gao }
267c8acd6aaSZhangfei Gao
268638a542cSDaniel Mack out_unlock:
269027f28b7SXiang Wang spin_unlock_irqrestore(&pdev->phy_lock, flags);
270638a542cSDaniel Mack return found;
271c8acd6aaSZhangfei Gao }
272c8acd6aaSZhangfei Gao
mmp_pdma_free_phy(struct mmp_pdma_chan * pchan)273027f28b7SXiang Wang static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
274027f28b7SXiang Wang {
275027f28b7SXiang Wang struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
276027f28b7SXiang Wang unsigned long flags;
27726a2dfdeSXiang Wang u32 reg;
278027f28b7SXiang Wang
279027f28b7SXiang Wang if (!pchan->phy)
280027f28b7SXiang Wang return;
281027f28b7SXiang Wang
28226a2dfdeSXiang Wang /* clear the channel mapping in DRCMR */
283a2a7c176SLaurent Pinchart reg = DRCMR(pchan->drcmr);
28426a2dfdeSXiang Wang writel(0, pchan->phy->base + reg);
28526a2dfdeSXiang Wang
286027f28b7SXiang Wang spin_lock_irqsave(&pdev->phy_lock, flags);
287027f28b7SXiang Wang pchan->phy->vchan = NULL;
288027f28b7SXiang Wang pchan->phy = NULL;
289027f28b7SXiang Wang spin_unlock_irqrestore(&pdev->phy_lock, flags);
290027f28b7SXiang Wang }
291027f28b7SXiang Wang
2926cfb8321SLee Jones /*
293c8acd6aaSZhangfei Gao * start_pending_queue - transfer any pending transactions
294c8acd6aaSZhangfei Gao * pending list ==> running list
295c8acd6aaSZhangfei Gao */
start_pending_queue(struct mmp_pdma_chan * chan)296c8acd6aaSZhangfei Gao static void start_pending_queue(struct mmp_pdma_chan *chan)
297c8acd6aaSZhangfei Gao {
298c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *desc;
299c8acd6aaSZhangfei Gao
300c8acd6aaSZhangfei Gao /* still in running, irq will start the pending list */
301c8acd6aaSZhangfei Gao if (!chan->idle) {
302c8acd6aaSZhangfei Gao dev_dbg(chan->dev, "DMA controller still busy\n");
303c8acd6aaSZhangfei Gao return;
304c8acd6aaSZhangfei Gao }
305c8acd6aaSZhangfei Gao
306c8acd6aaSZhangfei Gao if (list_empty(&chan->chain_pending)) {
307c8acd6aaSZhangfei Gao /* chance to re-fetch phy channel with higher prio */
308027f28b7SXiang Wang mmp_pdma_free_phy(chan);
309c8acd6aaSZhangfei Gao dev_dbg(chan->dev, "no pending list\n");
310c8acd6aaSZhangfei Gao return;
311c8acd6aaSZhangfei Gao }
312c8acd6aaSZhangfei Gao
313c8acd6aaSZhangfei Gao if (!chan->phy) {
314c8acd6aaSZhangfei Gao chan->phy = lookup_phy(chan);
315c8acd6aaSZhangfei Gao if (!chan->phy) {
316c8acd6aaSZhangfei Gao dev_dbg(chan->dev, "no free dma channel\n");
317c8acd6aaSZhangfei Gao return;
318c8acd6aaSZhangfei Gao }
319c8acd6aaSZhangfei Gao }
320c8acd6aaSZhangfei Gao
321c8acd6aaSZhangfei Gao /*
322c8acd6aaSZhangfei Gao * pending -> running
323c8acd6aaSZhangfei Gao * reintilize pending list
324c8acd6aaSZhangfei Gao */
325c8acd6aaSZhangfei Gao desc = list_first_entry(&chan->chain_pending,
326c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw, node);
327c8acd6aaSZhangfei Gao list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
328c8acd6aaSZhangfei Gao
329c8acd6aaSZhangfei Gao /*
330c8acd6aaSZhangfei Gao * Program the descriptor's address into the DMA controller,
331c8acd6aaSZhangfei Gao * then start the DMA transaction
332c8acd6aaSZhangfei Gao */
333c8acd6aaSZhangfei Gao set_desc(chan->phy, desc->async_tx.phys);
334c8acd6aaSZhangfei Gao enable_chan(chan->phy);
335c8acd6aaSZhangfei Gao chan->idle = false;
336c8acd6aaSZhangfei Gao }
337c8acd6aaSZhangfei Gao
338c8acd6aaSZhangfei Gao
339c8acd6aaSZhangfei Gao /* desc->tx_list ==> pending list */
mmp_pdma_tx_submit(struct dma_async_tx_descriptor * tx)340c8acd6aaSZhangfei Gao static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
341c8acd6aaSZhangfei Gao {
342c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
343c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
344c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *child;
345c8acd6aaSZhangfei Gao unsigned long flags;
346c8acd6aaSZhangfei Gao dma_cookie_t cookie = -EBUSY;
347c8acd6aaSZhangfei Gao
348c8acd6aaSZhangfei Gao spin_lock_irqsave(&chan->desc_lock, flags);
349c8acd6aaSZhangfei Gao
350c8acd6aaSZhangfei Gao list_for_each_entry(child, &desc->tx_list, node) {
351c8acd6aaSZhangfei Gao cookie = dma_cookie_assign(&child->async_tx);
352c8acd6aaSZhangfei Gao }
353c8acd6aaSZhangfei Gao
3540cd61561SDaniel Mack /* softly link to pending list - desc->tx_list ==> pending list */
3550cd61561SDaniel Mack list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
356c8acd6aaSZhangfei Gao
357c8acd6aaSZhangfei Gao spin_unlock_irqrestore(&chan->desc_lock, flags);
358c8acd6aaSZhangfei Gao
359c8acd6aaSZhangfei Gao return cookie;
360c8acd6aaSZhangfei Gao }
361c8acd6aaSZhangfei Gao
36269c9f0aeSJingoo Han static struct mmp_pdma_desc_sw *
mmp_pdma_alloc_descriptor(struct mmp_pdma_chan * chan)36369c9f0aeSJingoo Han mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
364c8acd6aaSZhangfei Gao {
365c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *desc;
366c8acd6aaSZhangfei Gao dma_addr_t pdesc;
367c8acd6aaSZhangfei Gao
3681c85a844SJulia Lawall desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
369c8acd6aaSZhangfei Gao if (!desc) {
370c8acd6aaSZhangfei Gao dev_err(chan->dev, "out of memory for link descriptor\n");
371c8acd6aaSZhangfei Gao return NULL;
372c8acd6aaSZhangfei Gao }
373c8acd6aaSZhangfei Gao
374c8acd6aaSZhangfei Gao INIT_LIST_HEAD(&desc->tx_list);
375c8acd6aaSZhangfei Gao dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
376c8acd6aaSZhangfei Gao /* each desc has submit */
377c8acd6aaSZhangfei Gao desc->async_tx.tx_submit = mmp_pdma_tx_submit;
378c8acd6aaSZhangfei Gao desc->async_tx.phys = pdesc;
379c8acd6aaSZhangfei Gao
380c8acd6aaSZhangfei Gao return desc;
381c8acd6aaSZhangfei Gao }
382c8acd6aaSZhangfei Gao
3836cfb8321SLee Jones /*
384c8acd6aaSZhangfei Gao * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
385c8acd6aaSZhangfei Gao *
386c8acd6aaSZhangfei Gao * This function will create a dma pool for descriptor allocation.
387c8acd6aaSZhangfei Gao * Request irq only when channel is requested
388c8acd6aaSZhangfei Gao * Return - The number of allocated descriptors.
389c8acd6aaSZhangfei Gao */
390c8acd6aaSZhangfei Gao
mmp_pdma_alloc_chan_resources(struct dma_chan * dchan)391c8acd6aaSZhangfei Gao static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
392c8acd6aaSZhangfei Gao {
393c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
394c8acd6aaSZhangfei Gao
395c8acd6aaSZhangfei Gao if (chan->desc_pool)
396c8acd6aaSZhangfei Gao return 1;
397c8acd6aaSZhangfei Gao
3982b7f65b1SJoe Perches chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
3992b7f65b1SJoe Perches chan->dev,
400c8acd6aaSZhangfei Gao sizeof(struct mmp_pdma_desc_sw),
4012b7f65b1SJoe Perches __alignof__(struct mmp_pdma_desc_sw),
4022b7f65b1SJoe Perches 0);
403c8acd6aaSZhangfei Gao if (!chan->desc_pool) {
404c8acd6aaSZhangfei Gao dev_err(chan->dev, "unable to allocate descriptor pool\n");
405c8acd6aaSZhangfei Gao return -ENOMEM;
406c8acd6aaSZhangfei Gao }
4072b7f65b1SJoe Perches
408027f28b7SXiang Wang mmp_pdma_free_phy(chan);
409c8acd6aaSZhangfei Gao chan->idle = true;
410c8acd6aaSZhangfei Gao chan->dev_addr = 0;
411c8acd6aaSZhangfei Gao return 1;
412c8acd6aaSZhangfei Gao }
413c8acd6aaSZhangfei Gao
mmp_pdma_free_desc_list(struct mmp_pdma_chan * chan,struct list_head * list)414c8acd6aaSZhangfei Gao static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
415c8acd6aaSZhangfei Gao struct list_head *list)
416c8acd6aaSZhangfei Gao {
417c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *desc, *_desc;
418c8acd6aaSZhangfei Gao
419c8acd6aaSZhangfei Gao list_for_each_entry_safe(desc, _desc, list, node) {
420c8acd6aaSZhangfei Gao list_del(&desc->node);
421c8acd6aaSZhangfei Gao dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
422c8acd6aaSZhangfei Gao }
423c8acd6aaSZhangfei Gao }
424c8acd6aaSZhangfei Gao
mmp_pdma_free_chan_resources(struct dma_chan * dchan)425c8acd6aaSZhangfei Gao static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
426c8acd6aaSZhangfei Gao {
427c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
428c8acd6aaSZhangfei Gao unsigned long flags;
429c8acd6aaSZhangfei Gao
430c8acd6aaSZhangfei Gao spin_lock_irqsave(&chan->desc_lock, flags);
431c8acd6aaSZhangfei Gao mmp_pdma_free_desc_list(chan, &chan->chain_pending);
432c8acd6aaSZhangfei Gao mmp_pdma_free_desc_list(chan, &chan->chain_running);
433c8acd6aaSZhangfei Gao spin_unlock_irqrestore(&chan->desc_lock, flags);
434c8acd6aaSZhangfei Gao
435c8acd6aaSZhangfei Gao dma_pool_destroy(chan->desc_pool);
436c8acd6aaSZhangfei Gao chan->desc_pool = NULL;
437c8acd6aaSZhangfei Gao chan->idle = true;
438c8acd6aaSZhangfei Gao chan->dev_addr = 0;
439027f28b7SXiang Wang mmp_pdma_free_phy(chan);
440c8acd6aaSZhangfei Gao return;
441c8acd6aaSZhangfei Gao }
442c8acd6aaSZhangfei Gao
443c8acd6aaSZhangfei Gao static struct dma_async_tx_descriptor *
mmp_pdma_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)444c8acd6aaSZhangfei Gao mmp_pdma_prep_memcpy(struct dma_chan *dchan,
445c8acd6aaSZhangfei Gao dma_addr_t dma_dst, dma_addr_t dma_src,
446c8acd6aaSZhangfei Gao size_t len, unsigned long flags)
447c8acd6aaSZhangfei Gao {
448c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan;
449c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
450c8acd6aaSZhangfei Gao size_t copy = 0;
451c8acd6aaSZhangfei Gao
452c8acd6aaSZhangfei Gao if (!dchan)
453c8acd6aaSZhangfei Gao return NULL;
454c8acd6aaSZhangfei Gao
455c8acd6aaSZhangfei Gao if (!len)
456c8acd6aaSZhangfei Gao return NULL;
457c8acd6aaSZhangfei Gao
458c8acd6aaSZhangfei Gao chan = to_mmp_pdma_chan(dchan);
4596fc4573cSDaniel Mack chan->byte_align = false;
460c8acd6aaSZhangfei Gao
461c8acd6aaSZhangfei Gao if (!chan->dir) {
462c8acd6aaSZhangfei Gao chan->dir = DMA_MEM_TO_MEM;
463c8acd6aaSZhangfei Gao chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
464c8acd6aaSZhangfei Gao chan->dcmd |= DCMD_BURST32;
465c8acd6aaSZhangfei Gao }
466c8acd6aaSZhangfei Gao
467c8acd6aaSZhangfei Gao do {
468c8acd6aaSZhangfei Gao /* Allocate the link descriptor from DMA pool */
469c8acd6aaSZhangfei Gao new = mmp_pdma_alloc_descriptor(chan);
470c8acd6aaSZhangfei Gao if (!new) {
471c8acd6aaSZhangfei Gao dev_err(chan->dev, "no memory for desc\n");
472c8acd6aaSZhangfei Gao goto fail;
473c8acd6aaSZhangfei Gao }
474c8acd6aaSZhangfei Gao
475c8acd6aaSZhangfei Gao copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
4766fc4573cSDaniel Mack if (dma_src & 0x7 || dma_dst & 0x7)
4776fc4573cSDaniel Mack chan->byte_align = true;
478c8acd6aaSZhangfei Gao
479c8acd6aaSZhangfei Gao new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
480c8acd6aaSZhangfei Gao new->desc.dsadr = dma_src;
481c8acd6aaSZhangfei Gao new->desc.dtadr = dma_dst;
482c8acd6aaSZhangfei Gao
483c8acd6aaSZhangfei Gao if (!first)
484c8acd6aaSZhangfei Gao first = new;
485c8acd6aaSZhangfei Gao else
486c8acd6aaSZhangfei Gao prev->desc.ddadr = new->async_tx.phys;
487c8acd6aaSZhangfei Gao
488c8acd6aaSZhangfei Gao new->async_tx.cookie = 0;
489c8acd6aaSZhangfei Gao async_tx_ack(&new->async_tx);
490c8acd6aaSZhangfei Gao
491c8acd6aaSZhangfei Gao prev = new;
492c8acd6aaSZhangfei Gao len -= copy;
493c8acd6aaSZhangfei Gao
494c8acd6aaSZhangfei Gao if (chan->dir == DMA_MEM_TO_DEV) {
495c8acd6aaSZhangfei Gao dma_src += copy;
496c8acd6aaSZhangfei Gao } else if (chan->dir == DMA_DEV_TO_MEM) {
497c8acd6aaSZhangfei Gao dma_dst += copy;
498c8acd6aaSZhangfei Gao } else if (chan->dir == DMA_MEM_TO_MEM) {
499c8acd6aaSZhangfei Gao dma_src += copy;
500c8acd6aaSZhangfei Gao dma_dst += copy;
501c8acd6aaSZhangfei Gao }
502c8acd6aaSZhangfei Gao
503c8acd6aaSZhangfei Gao /* Insert the link descriptor to the LD ring */
504c8acd6aaSZhangfei Gao list_add_tail(&new->node, &first->tx_list);
505c8acd6aaSZhangfei Gao } while (len);
506c8acd6aaSZhangfei Gao
507c8acd6aaSZhangfei Gao first->async_tx.flags = flags; /* client is in control of this ack */
508c8acd6aaSZhangfei Gao first->async_tx.cookie = -EBUSY;
509c8acd6aaSZhangfei Gao
510c8acd6aaSZhangfei Gao /* last desc and fire IRQ */
511c8acd6aaSZhangfei Gao new->desc.ddadr = DDADR_STOP;
512c8acd6aaSZhangfei Gao new->desc.dcmd |= DCMD_ENDIRQEN;
513c8acd6aaSZhangfei Gao
51450440d74SDaniel Mack chan->cyclic_first = NULL;
51550440d74SDaniel Mack
516c8acd6aaSZhangfei Gao return &first->async_tx;
517c8acd6aaSZhangfei Gao
518c8acd6aaSZhangfei Gao fail:
519c8acd6aaSZhangfei Gao if (first)
520c8acd6aaSZhangfei Gao mmp_pdma_free_desc_list(chan, &first->tx_list);
521c8acd6aaSZhangfei Gao return NULL;
522c8acd6aaSZhangfei Gao }
523c8acd6aaSZhangfei Gao
524c8acd6aaSZhangfei Gao static struct dma_async_tx_descriptor *
mmp_pdma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)525c8acd6aaSZhangfei Gao mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
526c8acd6aaSZhangfei Gao unsigned int sg_len, enum dma_transfer_direction dir,
527c8acd6aaSZhangfei Gao unsigned long flags, void *context)
528c8acd6aaSZhangfei Gao {
529c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
530c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
531c8acd6aaSZhangfei Gao size_t len, avail;
532c8acd6aaSZhangfei Gao struct scatterlist *sg;
533c8acd6aaSZhangfei Gao dma_addr_t addr;
534c8acd6aaSZhangfei Gao int i;
535c8acd6aaSZhangfei Gao
536c8acd6aaSZhangfei Gao if ((sgl == NULL) || (sg_len == 0))
537c8acd6aaSZhangfei Gao return NULL;
538c8acd6aaSZhangfei Gao
5396fc4573cSDaniel Mack chan->byte_align = false;
5406fc4573cSDaniel Mack
54156b94b02SVinod Koul mmp_pdma_config_write(dchan, &chan->slave_config, dir);
54256b94b02SVinod Koul
543c8acd6aaSZhangfei Gao for_each_sg(sgl, sg, sg_len, i) {
544c8acd6aaSZhangfei Gao addr = sg_dma_address(sg);
545c8acd6aaSZhangfei Gao avail = sg_dma_len(sgl);
546c8acd6aaSZhangfei Gao
547c8acd6aaSZhangfei Gao do {
548c8acd6aaSZhangfei Gao len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
5496fc4573cSDaniel Mack if (addr & 0x7)
5506fc4573cSDaniel Mack chan->byte_align = true;
551c8acd6aaSZhangfei Gao
552c8acd6aaSZhangfei Gao /* allocate and populate the descriptor */
553c8acd6aaSZhangfei Gao new = mmp_pdma_alloc_descriptor(chan);
554c8acd6aaSZhangfei Gao if (!new) {
555c8acd6aaSZhangfei Gao dev_err(chan->dev, "no memory for desc\n");
556c8acd6aaSZhangfei Gao goto fail;
557c8acd6aaSZhangfei Gao }
558c8acd6aaSZhangfei Gao
559c8acd6aaSZhangfei Gao new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
560c8acd6aaSZhangfei Gao if (dir == DMA_MEM_TO_DEV) {
561c8acd6aaSZhangfei Gao new->desc.dsadr = addr;
562c8acd6aaSZhangfei Gao new->desc.dtadr = chan->dev_addr;
563c8acd6aaSZhangfei Gao } else {
564c8acd6aaSZhangfei Gao new->desc.dsadr = chan->dev_addr;
565c8acd6aaSZhangfei Gao new->desc.dtadr = addr;
566c8acd6aaSZhangfei Gao }
567c8acd6aaSZhangfei Gao
568c8acd6aaSZhangfei Gao if (!first)
569c8acd6aaSZhangfei Gao first = new;
570c8acd6aaSZhangfei Gao else
571c8acd6aaSZhangfei Gao prev->desc.ddadr = new->async_tx.phys;
572c8acd6aaSZhangfei Gao
573c8acd6aaSZhangfei Gao new->async_tx.cookie = 0;
574c8acd6aaSZhangfei Gao async_tx_ack(&new->async_tx);
575c8acd6aaSZhangfei Gao prev = new;
576c8acd6aaSZhangfei Gao
577c8acd6aaSZhangfei Gao /* Insert the link descriptor to the LD ring */
578c8acd6aaSZhangfei Gao list_add_tail(&new->node, &first->tx_list);
579c8acd6aaSZhangfei Gao
580c8acd6aaSZhangfei Gao /* update metadata */
581c8acd6aaSZhangfei Gao addr += len;
582c8acd6aaSZhangfei Gao avail -= len;
583c8acd6aaSZhangfei Gao } while (avail);
584c8acd6aaSZhangfei Gao }
585c8acd6aaSZhangfei Gao
586c8acd6aaSZhangfei Gao first->async_tx.cookie = -EBUSY;
587c8acd6aaSZhangfei Gao first->async_tx.flags = flags;
588c8acd6aaSZhangfei Gao
589c8acd6aaSZhangfei Gao /* last desc and fire IRQ */
590c8acd6aaSZhangfei Gao new->desc.ddadr = DDADR_STOP;
591c8acd6aaSZhangfei Gao new->desc.dcmd |= DCMD_ENDIRQEN;
592c8acd6aaSZhangfei Gao
59350440d74SDaniel Mack chan->dir = dir;
59450440d74SDaniel Mack chan->cyclic_first = NULL;
59550440d74SDaniel Mack
59650440d74SDaniel Mack return &first->async_tx;
59750440d74SDaniel Mack
59850440d74SDaniel Mack fail:
59950440d74SDaniel Mack if (first)
60050440d74SDaniel Mack mmp_pdma_free_desc_list(chan, &first->tx_list);
60150440d74SDaniel Mack return NULL;
60250440d74SDaniel Mack }
60350440d74SDaniel Mack
6042b7f65b1SJoe Perches static struct dma_async_tx_descriptor *
mmp_pdma_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)6052b7f65b1SJoe Perches mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
6062b7f65b1SJoe Perches dma_addr_t buf_addr, size_t len, size_t period_len,
6072b7f65b1SJoe Perches enum dma_transfer_direction direction,
60831c1e5a1SLaurent Pinchart unsigned long flags)
60950440d74SDaniel Mack {
61050440d74SDaniel Mack struct mmp_pdma_chan *chan;
61150440d74SDaniel Mack struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
61250440d74SDaniel Mack dma_addr_t dma_src, dma_dst;
61350440d74SDaniel Mack
61450440d74SDaniel Mack if (!dchan || !len || !period_len)
61550440d74SDaniel Mack return NULL;
61650440d74SDaniel Mack
61750440d74SDaniel Mack /* the buffer length must be a multiple of period_len */
61850440d74SDaniel Mack if (len % period_len != 0)
61950440d74SDaniel Mack return NULL;
62050440d74SDaniel Mack
62150440d74SDaniel Mack if (period_len > PDMA_MAX_DESC_BYTES)
62250440d74SDaniel Mack return NULL;
62350440d74SDaniel Mack
62450440d74SDaniel Mack chan = to_mmp_pdma_chan(dchan);
62556b94b02SVinod Koul mmp_pdma_config_write(dchan, &chan->slave_config, direction);
62650440d74SDaniel Mack
62750440d74SDaniel Mack switch (direction) {
62850440d74SDaniel Mack case DMA_MEM_TO_DEV:
62950440d74SDaniel Mack dma_src = buf_addr;
63050440d74SDaniel Mack dma_dst = chan->dev_addr;
63150440d74SDaniel Mack break;
63250440d74SDaniel Mack case DMA_DEV_TO_MEM:
63350440d74SDaniel Mack dma_dst = buf_addr;
63450440d74SDaniel Mack dma_src = chan->dev_addr;
63550440d74SDaniel Mack break;
63650440d74SDaniel Mack default:
63750440d74SDaniel Mack dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
63850440d74SDaniel Mack return NULL;
63950440d74SDaniel Mack }
64050440d74SDaniel Mack
64150440d74SDaniel Mack chan->dir = direction;
64250440d74SDaniel Mack
64350440d74SDaniel Mack do {
64450440d74SDaniel Mack /* Allocate the link descriptor from DMA pool */
64550440d74SDaniel Mack new = mmp_pdma_alloc_descriptor(chan);
64650440d74SDaniel Mack if (!new) {
64750440d74SDaniel Mack dev_err(chan->dev, "no memory for desc\n");
64850440d74SDaniel Mack goto fail;
64950440d74SDaniel Mack }
65050440d74SDaniel Mack
6512b7f65b1SJoe Perches new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
6522b7f65b1SJoe Perches (DCMD_LENGTH & period_len));
65350440d74SDaniel Mack new->desc.dsadr = dma_src;
65450440d74SDaniel Mack new->desc.dtadr = dma_dst;
65550440d74SDaniel Mack
65650440d74SDaniel Mack if (!first)
65750440d74SDaniel Mack first = new;
65850440d74SDaniel Mack else
65950440d74SDaniel Mack prev->desc.ddadr = new->async_tx.phys;
66050440d74SDaniel Mack
66150440d74SDaniel Mack new->async_tx.cookie = 0;
66250440d74SDaniel Mack async_tx_ack(&new->async_tx);
66350440d74SDaniel Mack
66450440d74SDaniel Mack prev = new;
66550440d74SDaniel Mack len -= period_len;
66650440d74SDaniel Mack
66750440d74SDaniel Mack if (chan->dir == DMA_MEM_TO_DEV)
66850440d74SDaniel Mack dma_src += period_len;
66950440d74SDaniel Mack else
67050440d74SDaniel Mack dma_dst += period_len;
67150440d74SDaniel Mack
67250440d74SDaniel Mack /* Insert the link descriptor to the LD ring */
67350440d74SDaniel Mack list_add_tail(&new->node, &first->tx_list);
67450440d74SDaniel Mack } while (len);
67550440d74SDaniel Mack
67650440d74SDaniel Mack first->async_tx.flags = flags; /* client is in control of this ack */
67750440d74SDaniel Mack first->async_tx.cookie = -EBUSY;
67850440d74SDaniel Mack
67950440d74SDaniel Mack /* make the cyclic link */
68050440d74SDaniel Mack new->desc.ddadr = first->async_tx.phys;
68150440d74SDaniel Mack chan->cyclic_first = first;
68250440d74SDaniel Mack
683c8acd6aaSZhangfei Gao return &first->async_tx;
684c8acd6aaSZhangfei Gao
685c8acd6aaSZhangfei Gao fail:
686c8acd6aaSZhangfei Gao if (first)
687c8acd6aaSZhangfei Gao mmp_pdma_free_desc_list(chan, &first->tx_list);
688c8acd6aaSZhangfei Gao return NULL;
689c8acd6aaSZhangfei Gao }
690c8acd6aaSZhangfei Gao
mmp_pdma_config_write(struct dma_chan * dchan,struct dma_slave_config * cfg,enum dma_transfer_direction direction)69156b94b02SVinod Koul static int mmp_pdma_config_write(struct dma_chan *dchan,
69256b94b02SVinod Koul struct dma_slave_config *cfg,
69356b94b02SVinod Koul enum dma_transfer_direction direction)
694c8acd6aaSZhangfei Gao {
695c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
696c8acd6aaSZhangfei Gao u32 maxburst = 0, addr = 0;
697c8acd6aaSZhangfei Gao enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
698c8acd6aaSZhangfei Gao
699c8acd6aaSZhangfei Gao if (!dchan)
700c8acd6aaSZhangfei Gao return -EINVAL;
701c8acd6aaSZhangfei Gao
70256b94b02SVinod Koul if (direction == DMA_DEV_TO_MEM) {
703c8acd6aaSZhangfei Gao chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
704c8acd6aaSZhangfei Gao maxburst = cfg->src_maxburst;
705c8acd6aaSZhangfei Gao width = cfg->src_addr_width;
706c8acd6aaSZhangfei Gao addr = cfg->src_addr;
70756b94b02SVinod Koul } else if (direction == DMA_MEM_TO_DEV) {
708c8acd6aaSZhangfei Gao chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
709c8acd6aaSZhangfei Gao maxburst = cfg->dst_maxburst;
710c8acd6aaSZhangfei Gao width = cfg->dst_addr_width;
711c8acd6aaSZhangfei Gao addr = cfg->dst_addr;
712c8acd6aaSZhangfei Gao }
713c8acd6aaSZhangfei Gao
714c8acd6aaSZhangfei Gao if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
715c8acd6aaSZhangfei Gao chan->dcmd |= DCMD_WIDTH1;
716c8acd6aaSZhangfei Gao else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
717c8acd6aaSZhangfei Gao chan->dcmd |= DCMD_WIDTH2;
718c8acd6aaSZhangfei Gao else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
719c8acd6aaSZhangfei Gao chan->dcmd |= DCMD_WIDTH4;
720c8acd6aaSZhangfei Gao
721c8acd6aaSZhangfei Gao if (maxburst == 8)
722c8acd6aaSZhangfei Gao chan->dcmd |= DCMD_BURST8;
723c8acd6aaSZhangfei Gao else if (maxburst == 16)
724c8acd6aaSZhangfei Gao chan->dcmd |= DCMD_BURST16;
725c8acd6aaSZhangfei Gao else if (maxburst == 32)
726c8acd6aaSZhangfei Gao chan->dcmd |= DCMD_BURST32;
727c8acd6aaSZhangfei Gao
72856b94b02SVinod Koul chan->dir = direction;
729c8acd6aaSZhangfei Gao chan->dev_addr = addr;
730a0abd671SMaxime Ripard
731a0abd671SMaxime Ripard return 0;
732c8acd6aaSZhangfei Gao }
733c8acd6aaSZhangfei Gao
mmp_pdma_config(struct dma_chan * dchan,struct dma_slave_config * cfg)73456b94b02SVinod Koul static int mmp_pdma_config(struct dma_chan *dchan,
73556b94b02SVinod Koul struct dma_slave_config *cfg)
73656b94b02SVinod Koul {
73756b94b02SVinod Koul struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
73856b94b02SVinod Koul
73956b94b02SVinod Koul memcpy(&chan->slave_config, cfg, sizeof(*cfg));
74056b94b02SVinod Koul return 0;
74156b94b02SVinod Koul }
74256b94b02SVinod Koul
mmp_pdma_terminate_all(struct dma_chan * dchan)743a0abd671SMaxime Ripard static int mmp_pdma_terminate_all(struct dma_chan *dchan)
744a0abd671SMaxime Ripard {
745a0abd671SMaxime Ripard struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
746a0abd671SMaxime Ripard unsigned long flags;
747a0abd671SMaxime Ripard
748a0abd671SMaxime Ripard if (!dchan)
749a0abd671SMaxime Ripard return -EINVAL;
750a0abd671SMaxime Ripard
751a0abd671SMaxime Ripard disable_chan(chan->phy);
752a0abd671SMaxime Ripard mmp_pdma_free_phy(chan);
753a0abd671SMaxime Ripard spin_lock_irqsave(&chan->desc_lock, flags);
754a0abd671SMaxime Ripard mmp_pdma_free_desc_list(chan, &chan->chain_pending);
755a0abd671SMaxime Ripard mmp_pdma_free_desc_list(chan, &chan->chain_running);
756a0abd671SMaxime Ripard spin_unlock_irqrestore(&chan->desc_lock, flags);
757a0abd671SMaxime Ripard chan->idle = true;
758a0abd671SMaxime Ripard
7592b7f65b1SJoe Perches return 0;
760c8acd6aaSZhangfei Gao }
761c8acd6aaSZhangfei Gao
mmp_pdma_residue(struct mmp_pdma_chan * chan,dma_cookie_t cookie)7621b38da26SDaniel Mack static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
7631b38da26SDaniel Mack dma_cookie_t cookie)
7641b38da26SDaniel Mack {
7651b38da26SDaniel Mack struct mmp_pdma_desc_sw *sw;
7661b38da26SDaniel Mack u32 curr, residue = 0;
7671b38da26SDaniel Mack bool passed = false;
7681b38da26SDaniel Mack bool cyclic = chan->cyclic_first != NULL;
7691b38da26SDaniel Mack
7701b38da26SDaniel Mack /*
7711b38da26SDaniel Mack * If the channel does not have a phy pointer anymore, it has already
7721b38da26SDaniel Mack * been completed. Therefore, its residue is 0.
7731b38da26SDaniel Mack */
7741b38da26SDaniel Mack if (!chan->phy)
7751b38da26SDaniel Mack return 0;
7761b38da26SDaniel Mack
7771b38da26SDaniel Mack if (chan->dir == DMA_DEV_TO_MEM)
7781b38da26SDaniel Mack curr = readl(chan->phy->base + DTADR(chan->phy->idx));
7791b38da26SDaniel Mack else
7801b38da26SDaniel Mack curr = readl(chan->phy->base + DSADR(chan->phy->idx));
7811b38da26SDaniel Mack
7821b38da26SDaniel Mack list_for_each_entry(sw, &chan->chain_running, node) {
7831b38da26SDaniel Mack u32 start, end, len;
7841b38da26SDaniel Mack
7851b38da26SDaniel Mack if (chan->dir == DMA_DEV_TO_MEM)
7861b38da26SDaniel Mack start = sw->desc.dtadr;
7871b38da26SDaniel Mack else
7881b38da26SDaniel Mack start = sw->desc.dsadr;
7891b38da26SDaniel Mack
7901b38da26SDaniel Mack len = sw->desc.dcmd & DCMD_LENGTH;
7911b38da26SDaniel Mack end = start + len;
7921b38da26SDaniel Mack
7931b38da26SDaniel Mack /*
7941b38da26SDaniel Mack * 'passed' will be latched once we found the descriptor which
7951b38da26SDaniel Mack * lies inside the boundaries of the curr pointer. All
7961b38da26SDaniel Mack * descriptors that occur in the list _after_ we found that
7971b38da26SDaniel Mack * partially handled descriptor are still to be processed and
7981b38da26SDaniel Mack * are hence added to the residual bytes counter.
7991b38da26SDaniel Mack */
8001b38da26SDaniel Mack
8011b38da26SDaniel Mack if (passed) {
8021b38da26SDaniel Mack residue += len;
8031b38da26SDaniel Mack } else if (curr >= start && curr <= end) {
8041b38da26SDaniel Mack residue += end - curr;
8051b38da26SDaniel Mack passed = true;
8061b38da26SDaniel Mack }
8071b38da26SDaniel Mack
8081b38da26SDaniel Mack /*
8091b38da26SDaniel Mack * Descriptors that have the ENDIRQEN bit set mark the end of a
8101b38da26SDaniel Mack * transaction chain, and the cookie assigned with it has been
8111b38da26SDaniel Mack * returned previously from mmp_pdma_tx_submit().
8121b38da26SDaniel Mack *
8131b38da26SDaniel Mack * In case we have multiple transactions in the running chain,
8141b38da26SDaniel Mack * and the cookie does not match the one the user asked us
8151b38da26SDaniel Mack * about, reset the state variables and start over.
8161b38da26SDaniel Mack *
8171b38da26SDaniel Mack * This logic does not apply to cyclic transactions, where all
8181b38da26SDaniel Mack * descriptors have the ENDIRQEN bit set, and for which we
8191b38da26SDaniel Mack * can't have multiple transactions on one channel anyway.
8201b38da26SDaniel Mack */
8211b38da26SDaniel Mack if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
8221b38da26SDaniel Mack continue;
8231b38da26SDaniel Mack
8241b38da26SDaniel Mack if (sw->async_tx.cookie == cookie) {
8251b38da26SDaniel Mack return residue;
8261b38da26SDaniel Mack } else {
8271b38da26SDaniel Mack residue = 0;
8281b38da26SDaniel Mack passed = false;
8291b38da26SDaniel Mack }
8301b38da26SDaniel Mack }
8311b38da26SDaniel Mack
8321b38da26SDaniel Mack /* We should only get here in case of cyclic transactions */
8331b38da26SDaniel Mack return residue;
8341b38da26SDaniel Mack }
8351b38da26SDaniel Mack
mmp_pdma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)836c8acd6aaSZhangfei Gao static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
8372b7f65b1SJoe Perches dma_cookie_t cookie,
8382b7f65b1SJoe Perches struct dma_tx_state *txstate)
839c8acd6aaSZhangfei Gao {
8401b38da26SDaniel Mack struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
8411b38da26SDaniel Mack enum dma_status ret;
8421b38da26SDaniel Mack
8431b38da26SDaniel Mack ret = dma_cookie_status(dchan, cookie, txstate);
8441b38da26SDaniel Mack if (likely(ret != DMA_ERROR))
8451b38da26SDaniel Mack dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
8461b38da26SDaniel Mack
8471b38da26SDaniel Mack return ret;
848c8acd6aaSZhangfei Gao }
849c8acd6aaSZhangfei Gao
8506cfb8321SLee Jones /*
851c8acd6aaSZhangfei Gao * mmp_pdma_issue_pending - Issue the DMA start command
852c8acd6aaSZhangfei Gao * pending list ==> running list
853c8acd6aaSZhangfei Gao */
mmp_pdma_issue_pending(struct dma_chan * dchan)854c8acd6aaSZhangfei Gao static void mmp_pdma_issue_pending(struct dma_chan *dchan)
855c8acd6aaSZhangfei Gao {
856c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
857c8acd6aaSZhangfei Gao unsigned long flags;
858c8acd6aaSZhangfei Gao
859c8acd6aaSZhangfei Gao spin_lock_irqsave(&chan->desc_lock, flags);
860c8acd6aaSZhangfei Gao start_pending_queue(chan);
861c8acd6aaSZhangfei Gao spin_unlock_irqrestore(&chan->desc_lock, flags);
862c8acd6aaSZhangfei Gao }
863c8acd6aaSZhangfei Gao
864c8acd6aaSZhangfei Gao /*
865c8acd6aaSZhangfei Gao * dma_do_tasklet
866c8acd6aaSZhangfei Gao * Do call back
867c8acd6aaSZhangfei Gao * Start pending list
868c8acd6aaSZhangfei Gao */
dma_do_tasklet(struct tasklet_struct * t)86977a4f4f7SAllen Pais static void dma_do_tasklet(struct tasklet_struct *t)
870c8acd6aaSZhangfei Gao {
87177a4f4f7SAllen Pais struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
872c8acd6aaSZhangfei Gao struct mmp_pdma_desc_sw *desc, *_desc;
873c8acd6aaSZhangfei Gao LIST_HEAD(chain_cleanup);
874c8acd6aaSZhangfei Gao unsigned long flags;
8759c1e511cSDave Jiang struct dmaengine_desc_callback cb;
876c8acd6aaSZhangfei Gao
87750440d74SDaniel Mack if (chan->cyclic_first) {
878c8acd6aaSZhangfei Gao spin_lock_irqsave(&chan->desc_lock, flags);
87950440d74SDaniel Mack desc = chan->cyclic_first;
8809c1e511cSDave Jiang dmaengine_desc_get_callback(&desc->async_tx, &cb);
88150440d74SDaniel Mack spin_unlock_irqrestore(&chan->desc_lock, flags);
88250440d74SDaniel Mack
8839c1e511cSDave Jiang dmaengine_desc_callback_invoke(&cb, NULL);
88450440d74SDaniel Mack
88550440d74SDaniel Mack return;
88650440d74SDaniel Mack }
88750440d74SDaniel Mack
88850440d74SDaniel Mack /* submit pending list; callback for each desc; free desc */
88950440d74SDaniel Mack spin_lock_irqsave(&chan->desc_lock, flags);
890c8acd6aaSZhangfei Gao
891b721f9e8SDaniel Mack list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
892b721f9e8SDaniel Mack /*
893b721f9e8SDaniel Mack * move the descriptors to a temporary list so we can drop
894b721f9e8SDaniel Mack * the lock during the entire cleanup operation
895b721f9e8SDaniel Mack */
896f358c289SWei Yongjun list_move(&desc->node, &chain_cleanup);
897c8acd6aaSZhangfei Gao
898b721f9e8SDaniel Mack /*
899b721f9e8SDaniel Mack * Look for the first list entry which has the ENDIRQEN flag
900b721f9e8SDaniel Mack * set. That is the descriptor we got an interrupt for, so
901b721f9e8SDaniel Mack * complete that transaction and its cookie.
902b721f9e8SDaniel Mack */
903b721f9e8SDaniel Mack if (desc->desc.dcmd & DCMD_ENDIRQEN) {
904b721f9e8SDaniel Mack dma_cookie_t cookie = desc->async_tx.cookie;
905c8acd6aaSZhangfei Gao dma_cookie_complete(&desc->async_tx);
906c8acd6aaSZhangfei Gao dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
907b721f9e8SDaniel Mack break;
908b721f9e8SDaniel Mack }
909c8acd6aaSZhangfei Gao }
910c8acd6aaSZhangfei Gao
911c8acd6aaSZhangfei Gao /*
912b721f9e8SDaniel Mack * The hardware is idle and ready for more when the
913b721f9e8SDaniel Mack * chain_running list is empty.
914c8acd6aaSZhangfei Gao */
915b721f9e8SDaniel Mack chan->idle = list_empty(&chan->chain_running);
916c8acd6aaSZhangfei Gao
917c8acd6aaSZhangfei Gao /* Start any pending transactions automatically */
918c8acd6aaSZhangfei Gao start_pending_queue(chan);
919c8acd6aaSZhangfei Gao spin_unlock_irqrestore(&chan->desc_lock, flags);
920c8acd6aaSZhangfei Gao
921c8acd6aaSZhangfei Gao /* Run the callback for each descriptor, in order */
922c8acd6aaSZhangfei Gao list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
923c8acd6aaSZhangfei Gao struct dma_async_tx_descriptor *txd = &desc->async_tx;
924c8acd6aaSZhangfei Gao
925c8acd6aaSZhangfei Gao /* Remove from the list of transactions */
926c8acd6aaSZhangfei Gao list_del(&desc->node);
927c8acd6aaSZhangfei Gao /* Run the link descriptor callback function */
9289c1e511cSDave Jiang dmaengine_desc_get_callback(txd, &cb);
9299c1e511cSDave Jiang dmaengine_desc_callback_invoke(&cb, NULL);
930c8acd6aaSZhangfei Gao
931c8acd6aaSZhangfei Gao dma_pool_free(chan->desc_pool, desc, txd->phys);
932c8acd6aaSZhangfei Gao }
933c8acd6aaSZhangfei Gao }
934c8acd6aaSZhangfei Gao
mmp_pdma_remove(struct platform_device * op)9354bf27b8bSGreg Kroah-Hartman static int mmp_pdma_remove(struct platform_device *op)
936c8acd6aaSZhangfei Gao {
937c8acd6aaSZhangfei Gao struct mmp_pdma_device *pdev = platform_get_drvdata(op);
938a4601892SVinod Koul struct mmp_pdma_phy *phy;
939a4601892SVinod Koul int i, irq = 0, irq_num = 0;
940a4601892SVinod Koul
94139716c56SChuhong Yuan if (op->dev.of_node)
94239716c56SChuhong Yuan of_dma_controller_free(op->dev.of_node);
943a4601892SVinod Koul
944a4601892SVinod Koul for (i = 0; i < pdev->dma_channels; i++) {
945a4601892SVinod Koul if (platform_get_irq(op, i) > 0)
946a4601892SVinod Koul irq_num++;
947a4601892SVinod Koul }
948a4601892SVinod Koul
949a4601892SVinod Koul if (irq_num != pdev->dma_channels) {
950a4601892SVinod Koul irq = platform_get_irq(op, 0);
951a4601892SVinod Koul devm_free_irq(&op->dev, irq, pdev);
952a4601892SVinod Koul } else {
953a4601892SVinod Koul for (i = 0; i < pdev->dma_channels; i++) {
954a4601892SVinod Koul phy = &pdev->phy[i];
955a4601892SVinod Koul irq = platform_get_irq(op, i);
956a4601892SVinod Koul devm_free_irq(&op->dev, irq, phy);
957a4601892SVinod Koul }
958a4601892SVinod Koul }
959c8acd6aaSZhangfei Gao
960c8acd6aaSZhangfei Gao dma_async_device_unregister(&pdev->device);
961c8acd6aaSZhangfei Gao return 0;
962c8acd6aaSZhangfei Gao }
963c8acd6aaSZhangfei Gao
mmp_pdma_chan_init(struct mmp_pdma_device * pdev,int idx,int irq)9642b7f65b1SJoe Perches static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
965c8acd6aaSZhangfei Gao {
966c8acd6aaSZhangfei Gao struct mmp_pdma_phy *phy = &pdev->phy[idx];
967c8acd6aaSZhangfei Gao struct mmp_pdma_chan *chan;
968c8acd6aaSZhangfei Gao int ret;
969c8acd6aaSZhangfei Gao
970593d9c2eSLaurent Pinchart chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
971c8acd6aaSZhangfei Gao if (chan == NULL)
972c8acd6aaSZhangfei Gao return -ENOMEM;
973c8acd6aaSZhangfei Gao
974c8acd6aaSZhangfei Gao phy->idx = idx;
975c8acd6aaSZhangfei Gao phy->base = pdev->base;
976c8acd6aaSZhangfei Gao
977c8acd6aaSZhangfei Gao if (irq) {
978f0b50777SChao Xie ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
979f0b50777SChao Xie IRQF_SHARED, "pdma", phy);
980c8acd6aaSZhangfei Gao if (ret) {
981c8acd6aaSZhangfei Gao dev_err(pdev->dev, "channel request irq fail!\n");
982c8acd6aaSZhangfei Gao return ret;
983c8acd6aaSZhangfei Gao }
984c8acd6aaSZhangfei Gao }
985c8acd6aaSZhangfei Gao
986c8acd6aaSZhangfei Gao spin_lock_init(&chan->desc_lock);
987c8acd6aaSZhangfei Gao chan->dev = pdev->dev;
988c8acd6aaSZhangfei Gao chan->chan.device = &pdev->device;
98977a4f4f7SAllen Pais tasklet_setup(&chan->tasklet, dma_do_tasklet);
990c8acd6aaSZhangfei Gao INIT_LIST_HEAD(&chan->chain_pending);
991c8acd6aaSZhangfei Gao INIT_LIST_HEAD(&chan->chain_running);
992c8acd6aaSZhangfei Gao
993c8acd6aaSZhangfei Gao /* register virt channel to dma engine */
9942b7f65b1SJoe Perches list_add_tail(&chan->chan.device_node, &pdev->device.channels);
995c8acd6aaSZhangfei Gao
996c8acd6aaSZhangfei Gao return 0;
997c8acd6aaSZhangfei Gao }
998c8acd6aaSZhangfei Gao
99957c03422SFabian Frederick static const struct of_device_id mmp_pdma_dt_ids[] = {
1000c8acd6aaSZhangfei Gao { .compatible = "marvell,pdma-1.0", },
1001c8acd6aaSZhangfei Gao {}
1002c8acd6aaSZhangfei Gao };
1003c8acd6aaSZhangfei Gao MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
1004c8acd6aaSZhangfei Gao
mmp_pdma_dma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1005a9a7cf08SDaniel Mack static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
1006a9a7cf08SDaniel Mack struct of_dma *ofdma)
1007a9a7cf08SDaniel Mack {
1008a9a7cf08SDaniel Mack struct mmp_pdma_device *d = ofdma->of_dma_data;
10098010dad5SStephen Warren struct dma_chan *chan;
1010a9a7cf08SDaniel Mack
10118010dad5SStephen Warren chan = dma_get_any_slave_channel(&d->device);
10128010dad5SStephen Warren if (!chan)
1013a9a7cf08SDaniel Mack return NULL;
1014a9a7cf08SDaniel Mack
10152b7f65b1SJoe Perches to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
10162b7f65b1SJoe Perches
10172b7f65b1SJoe Perches return chan;
1018a9a7cf08SDaniel Mack }
1019a9a7cf08SDaniel Mack
mmp_pdma_probe(struct platform_device * op)1020463a1f8bSBill Pemberton static int mmp_pdma_probe(struct platform_device *op)
1021c8acd6aaSZhangfei Gao {
1022c8acd6aaSZhangfei Gao struct mmp_pdma_device *pdev;
1023c8acd6aaSZhangfei Gao const struct of_device_id *of_id;
1024c8acd6aaSZhangfei Gao struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1025c8acd6aaSZhangfei Gao int i, ret, irq = 0;
1026c8acd6aaSZhangfei Gao int dma_channels = 0, irq_num = 0;
1027ecb9b424SRobert Jarzmik const enum dma_slave_buswidth widths =
1028ecb9b424SRobert Jarzmik DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1029ecb9b424SRobert Jarzmik DMA_SLAVE_BUSWIDTH_4_BYTES;
1030c8acd6aaSZhangfei Gao
1031c8acd6aaSZhangfei Gao pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1032c8acd6aaSZhangfei Gao if (!pdev)
1033c8acd6aaSZhangfei Gao return -ENOMEM;
10342b7f65b1SJoe Perches
1035c8acd6aaSZhangfei Gao pdev->dev = &op->dev;
1036c8acd6aaSZhangfei Gao
1037027f28b7SXiang Wang spin_lock_init(&pdev->phy_lock);
1038027f28b7SXiang Wang
1039*4b23603aSTudor Ambarus pdev->base = devm_platform_ioremap_resource(op, 0);
10407331205aSThierry Reding if (IS_ERR(pdev->base))
10417331205aSThierry Reding return PTR_ERR(pdev->base);
1042c8acd6aaSZhangfei Gao
1043c8acd6aaSZhangfei Gao of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
1044607c04a0SKrzysztof Kozlowski if (of_id) {
1045607c04a0SKrzysztof Kozlowski /* Parse new and deprecated dma-channels properties */
1046607c04a0SKrzysztof Kozlowski if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
1047607c04a0SKrzysztof Kozlowski &dma_channels))
10482b7f65b1SJoe Perches of_property_read_u32(pdev->dev->of_node, "#dma-channels",
10492b7f65b1SJoe Perches &dma_channels);
1050607c04a0SKrzysztof Kozlowski } else if (pdata && pdata->dma_channels) {
1051c8acd6aaSZhangfei Gao dma_channels = pdata->dma_channels;
1052607c04a0SKrzysztof Kozlowski } else {
1053c8acd6aaSZhangfei Gao dma_channels = 32; /* default 32 channel */
1054607c04a0SKrzysztof Kozlowski }
1055c8acd6aaSZhangfei Gao pdev->dma_channels = dma_channels;
1056c8acd6aaSZhangfei Gao
1057c8acd6aaSZhangfei Gao for (i = 0; i < dma_channels; i++) {
10585bc382ecSLubomir Rintel if (platform_get_irq_optional(op, i) > 0)
1059c8acd6aaSZhangfei Gao irq_num++;
1060c8acd6aaSZhangfei Gao }
1061c8acd6aaSZhangfei Gao
1062593d9c2eSLaurent Pinchart pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
10632b7f65b1SJoe Perches GFP_KERNEL);
1064c8acd6aaSZhangfei Gao if (pdev->phy == NULL)
1065c8acd6aaSZhangfei Gao return -ENOMEM;
1066c8acd6aaSZhangfei Gao
1067c8acd6aaSZhangfei Gao INIT_LIST_HEAD(&pdev->device.channels);
1068c8acd6aaSZhangfei Gao
1069c8acd6aaSZhangfei Gao if (irq_num != dma_channels) {
1070c8acd6aaSZhangfei Gao /* all chan share one irq, demux inside */
1071c8acd6aaSZhangfei Gao irq = platform_get_irq(op, 0);
1072f0b50777SChao Xie ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1073f0b50777SChao Xie IRQF_SHARED, "pdma", pdev);
1074c8acd6aaSZhangfei Gao if (ret)
1075c8acd6aaSZhangfei Gao return ret;
1076c8acd6aaSZhangfei Gao }
1077c8acd6aaSZhangfei Gao
1078c8acd6aaSZhangfei Gao for (i = 0; i < dma_channels; i++) {
1079c8acd6aaSZhangfei Gao irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1080c8acd6aaSZhangfei Gao ret = mmp_pdma_chan_init(pdev, i, irq);
1081c8acd6aaSZhangfei Gao if (ret)
1082c8acd6aaSZhangfei Gao return ret;
1083c8acd6aaSZhangfei Gao }
1084c8acd6aaSZhangfei Gao
1085c8acd6aaSZhangfei Gao dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1086c8acd6aaSZhangfei Gao dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
108750440d74SDaniel Mack dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
1088023bf55fSDaniel Mack dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
1089c8acd6aaSZhangfei Gao pdev->device.dev = &op->dev;
1090c8acd6aaSZhangfei Gao pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1091c8acd6aaSZhangfei Gao pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1092c8acd6aaSZhangfei Gao pdev->device.device_tx_status = mmp_pdma_tx_status;
1093c8acd6aaSZhangfei Gao pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1094c8acd6aaSZhangfei Gao pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
109550440d74SDaniel Mack pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1096c8acd6aaSZhangfei Gao pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1097a0abd671SMaxime Ripard pdev->device.device_config = mmp_pdma_config;
1098a0abd671SMaxime Ripard pdev->device.device_terminate_all = mmp_pdma_terminate_all;
109977a68e56SMaxime Ripard pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
1100ecb9b424SRobert Jarzmik pdev->device.src_addr_widths = widths;
1101ecb9b424SRobert Jarzmik pdev->device.dst_addr_widths = widths;
1102ecb9b424SRobert Jarzmik pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1103ecb9b424SRobert Jarzmik pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1104c8acd6aaSZhangfei Gao
1105c8acd6aaSZhangfei Gao if (pdev->dev->coherent_dma_mask)
1106c8acd6aaSZhangfei Gao dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
1107c8acd6aaSZhangfei Gao else
1108c8acd6aaSZhangfei Gao dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
1109c8acd6aaSZhangfei Gao
1110c8acd6aaSZhangfei Gao ret = dma_async_device_register(&pdev->device);
1111c8acd6aaSZhangfei Gao if (ret) {
1112c8acd6aaSZhangfei Gao dev_err(pdev->device.dev, "unable to register\n");
1113c8acd6aaSZhangfei Gao return ret;
1114c8acd6aaSZhangfei Gao }
1115c8acd6aaSZhangfei Gao
1116a9a7cf08SDaniel Mack if (op->dev.of_node) {
1117a9a7cf08SDaniel Mack /* Device-tree DMA controller registration */
1118a9a7cf08SDaniel Mack ret = of_dma_controller_register(op->dev.of_node,
1119a9a7cf08SDaniel Mack mmp_pdma_dma_xlate, pdev);
1120a9a7cf08SDaniel Mack if (ret < 0) {
1121a9a7cf08SDaniel Mack dev_err(&op->dev, "of_dma_controller_register failed\n");
11229bf9e0b4SXin Xiong dma_async_device_unregister(&pdev->device);
1123a9a7cf08SDaniel Mack return ret;
1124a9a7cf08SDaniel Mack }
1125a9a7cf08SDaniel Mack }
1126a9a7cf08SDaniel Mack
1127086b0af1SWei Yongjun platform_set_drvdata(op, pdev);
1128419d1f12SDaniel Mack dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1129c8acd6aaSZhangfei Gao return 0;
1130c8acd6aaSZhangfei Gao }
1131c8acd6aaSZhangfei Gao
1132c8acd6aaSZhangfei Gao static const struct platform_device_id mmp_pdma_id_table[] = {
1133c8acd6aaSZhangfei Gao { "mmp-pdma", },
1134c8acd6aaSZhangfei Gao { },
1135c8acd6aaSZhangfei Gao };
1136c8acd6aaSZhangfei Gao
1137c8acd6aaSZhangfei Gao static struct platform_driver mmp_pdma_driver = {
1138c8acd6aaSZhangfei Gao .driver = {
1139c8acd6aaSZhangfei Gao .name = "mmp-pdma",
1140c8acd6aaSZhangfei Gao .of_match_table = mmp_pdma_dt_ids,
1141c8acd6aaSZhangfei Gao },
1142c8acd6aaSZhangfei Gao .id_table = mmp_pdma_id_table,
1143c8acd6aaSZhangfei Gao .probe = mmp_pdma_probe,
1144a7d6e3ecSBill Pemberton .remove = mmp_pdma_remove,
1145c8acd6aaSZhangfei Gao };
1146c8acd6aaSZhangfei Gao
1147c8acd6aaSZhangfei Gao module_platform_driver(mmp_pdma_driver);
1148c8acd6aaSZhangfei Gao
11492b7f65b1SJoe Perches MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
1150c8acd6aaSZhangfei Gao MODULE_AUTHOR("Marvell International Ltd.");
1151c8acd6aaSZhangfei Gao MODULE_LICENSE("GPL v2");
1152