1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Copyright 2013-2014 Freescale Semiconductor, Inc.
4 * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
5 */
6 #ifndef _FSL_EDMA_COMMON_H_
7 #define _FSL_EDMA_COMMON_H_
8
9 #include <linux/dma-direction.h>
10 #include <linux/platform_device.h>
11 #include "virt-dma.h"
12
13 #define EDMA_CR_EDBG BIT(1)
14 #define EDMA_CR_ERCA BIT(2)
15 #define EDMA_CR_ERGA BIT(3)
16 #define EDMA_CR_HOE BIT(4)
17 #define EDMA_CR_HALT BIT(5)
18 #define EDMA_CR_CLM BIT(6)
19 #define EDMA_CR_EMLM BIT(7)
20 #define EDMA_CR_ECX BIT(16)
21 #define EDMA_CR_CX BIT(17)
22
23 #define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
24 #define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
25 #define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
26 #define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
27
28 #define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
29 #define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
30 #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
31 #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
32
33 #define EDMA_TCD_ITER_MASK GENMASK(14, 0)
34 #define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
35 #define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
36
37 #define EDMA_TCD_CSR_START BIT(0)
38 #define EDMA_TCD_CSR_INT_MAJOR BIT(1)
39 #define EDMA_TCD_CSR_INT_HALF BIT(2)
40 #define EDMA_TCD_CSR_D_REQ BIT(3)
41 #define EDMA_TCD_CSR_E_SG BIT(4)
42 #define EDMA_TCD_CSR_E_LINK BIT(5)
43 #define EDMA_TCD_CSR_ACTIVE BIT(6)
44 #define EDMA_TCD_CSR_DONE BIT(7)
45
46 #define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
47 #define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
48 #define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
49 #define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
50
51 #define EDMAMUX_CHCFG_DIS 0x0
52 #define EDMAMUX_CHCFG_ENBL 0x80
53 #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
54
55 #define DMAMUX_NR 2
56
57 #define EDMA_TCD 0x1000
58
59 #define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
60 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
61 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
62 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
63
64 #define EDMA_V3_CH_SBR_RD BIT(22)
65 #define EDMA_V3_CH_SBR_WR BIT(21)
66 #define EDMA_V3_CH_CSR_ERQ BIT(0)
67 #define EDMA_V3_CH_CSR_EARQ BIT(1)
68 #define EDMA_V3_CH_CSR_EEI BIT(2)
69 #define EDMA_V3_CH_CSR_DONE BIT(30)
70 #define EDMA_V3_CH_CSR_ACTIVE BIT(31)
71
72 enum fsl_edma_pm_state {
73 RUNNING = 0,
74 SUSPENDED,
75 };
76
77 struct fsl_edma_hw_tcd {
78 __le32 saddr;
79 __le16 soff;
80 __le16 attr;
81 __le32 nbytes;
82 __le32 slast;
83 __le32 daddr;
84 __le16 doff;
85 __le16 citer;
86 __le32 dlast_sga;
87 __le16 csr;
88 __le16 biter;
89 };
90
91 struct fsl_edma3_ch_reg {
92 __le32 ch_csr;
93 __le32 ch_es;
94 __le32 ch_int;
95 __le32 ch_sbr;
96 __le32 ch_pri;
97 __le32 ch_mux;
98 __le32 ch_mattr; /* edma4, reserved for edma3 */
99 __le32 ch_reserved;
100 struct fsl_edma_hw_tcd tcd;
101 } __packed;
102
103 /*
104 * These are iomem pointers, for both v32 and v64.
105 */
106 struct edma_regs {
107 void __iomem *cr;
108 void __iomem *es;
109 void __iomem *erqh;
110 void __iomem *erql; /* aka erq on v32 */
111 void __iomem *eeih;
112 void __iomem *eeil; /* aka eei on v32 */
113 void __iomem *seei;
114 void __iomem *ceei;
115 void __iomem *serq;
116 void __iomem *cerq;
117 void __iomem *cint;
118 void __iomem *cerr;
119 void __iomem *ssrt;
120 void __iomem *cdne;
121 void __iomem *inth;
122 void __iomem *intl;
123 void __iomem *errh;
124 void __iomem *errl;
125 };
126
127 struct fsl_edma_sw_tcd {
128 dma_addr_t ptcd;
129 struct fsl_edma_hw_tcd *vtcd;
130 };
131
132 struct fsl_edma_chan {
133 struct virt_dma_chan vchan;
134 enum dma_status status;
135 enum fsl_edma_pm_state pm_state;
136 bool idle;
137 u32 slave_id;
138 struct fsl_edma_engine *edma;
139 struct fsl_edma_desc *edesc;
140 struct dma_slave_config cfg;
141 u32 attr;
142 bool is_sw;
143 struct dma_pool *tcd_pool;
144 dma_addr_t dma_dev_addr;
145 u32 dma_dev_size;
146 enum dma_data_direction dma_dir;
147 char chan_name[32];
148 struct fsl_edma_hw_tcd __iomem *tcd;
149 void __iomem *mux_addr;
150 u32 real_count;
151 struct work_struct issue_worker;
152 struct platform_device *pdev;
153 struct device *pd_dev;
154 u32 srcid;
155 struct clk *clk;
156 int priority;
157 int hw_chanid;
158 int txirq;
159 bool is_rxchan;
160 bool is_remote;
161 bool is_multi_fifo;
162 };
163
164 struct fsl_edma_desc {
165 struct virt_dma_desc vdesc;
166 struct fsl_edma_chan *echan;
167 bool iscyclic;
168 enum dma_transfer_direction dirn;
169 unsigned int n_tcds;
170 struct fsl_edma_sw_tcd tcd[];
171 };
172
173 #define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
174 #define FSL_EDMA_DRV_MUX_SWAP BIT(1)
175 #define FSL_EDMA_DRV_CONFIG32 BIT(2)
176 #define FSL_EDMA_DRV_WRAP_IO BIT(3)
177 #define FSL_EDMA_DRV_EDMA64 BIT(4)
178 #define FSL_EDMA_DRV_HAS_PD BIT(5)
179 #define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
180 #define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
181 #define FSL_EDMA_DRV_MEM_REMOTE BIT(8)
182 /* control and status register is in tcd address space, edma3 reg layout */
183 #define FSL_EDMA_DRV_SPLIT_REG BIT(9)
184 #define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
185 #define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
186 #define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
187 /* Need clean CHn_CSR DONE before enable TCD's ESG */
188 #define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
189 /* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
190 #define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
191
192 #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
193 FSL_EDMA_DRV_BUS_8BYTE | \
194 FSL_EDMA_DRV_DEV_TO_DEV | \
195 FSL_EDMA_DRV_ALIGN_64BYTE | \
196 FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
197 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
198
199 #define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
200 FSL_EDMA_DRV_BUS_8BYTE | \
201 FSL_EDMA_DRV_DEV_TO_DEV | \
202 FSL_EDMA_DRV_ALIGN_64BYTE | \
203 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
204
205 struct fsl_edma_drvdata {
206 u32 dmamuxs; /* only used before v3 */
207 u32 chreg_off;
208 u32 chreg_space_sz;
209 u32 flags;
210 u32 mux_off; /* channel mux register offset */
211 u32 mux_skip; /* how much skip for each channel */
212 int (*setup_irq)(struct platform_device *pdev,
213 struct fsl_edma_engine *fsl_edma);
214 };
215
216 struct fsl_edma_engine {
217 struct dma_device dma_dev;
218 void __iomem *membase;
219 void __iomem *muxbase[DMAMUX_NR];
220 struct clk *muxclk[DMAMUX_NR];
221 struct clk *dmaclk;
222 struct clk *chclk;
223 struct mutex fsl_edma_mutex;
224 const struct fsl_edma_drvdata *drvdata;
225 u32 n_chans;
226 int txirq;
227 int errirq;
228 bool big_endian;
229 struct edma_regs regs;
230 u64 chan_masked;
231 struct fsl_edma_chan chans[];
232 };
233
234 #define edma_read_tcdreg(chan, __name) \
235 (sizeof(chan->tcd->__name) == sizeof(u32) ? \
236 edma_readl(chan->edma, &chan->tcd->__name) : \
237 edma_readw(chan->edma, &chan->tcd->__name))
238
239 #define edma_write_tcdreg(chan, val, __name) \
240 (sizeof(chan->tcd->__name) == sizeof(u32) ? \
241 edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
242 edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
243
244 #define edma_readl_chreg(chan, __name) \
245 edma_readl(chan->edma, \
246 (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
247
248 #define edma_writel_chreg(chan, val, __name) \
249 edma_writel(chan->edma, val, \
250 (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
251
252 /*
253 * R/W functions for big- or little-endian registers:
254 * The eDMA controller's endian is independent of the CPU core's endian.
255 * For the big-endian IP module, the offset for 8-bit or 16-bit registers
256 * should also be swapped opposite to that in little-endian IP.
257 */
edma_readl(struct fsl_edma_engine * edma,void __iomem * addr)258 static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
259 {
260 if (edma->big_endian)
261 return ioread32be(addr);
262 else
263 return ioread32(addr);
264 }
265
edma_readw(struct fsl_edma_engine * edma,void __iomem * addr)266 static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
267 {
268 if (edma->big_endian)
269 return ioread16be(addr);
270 else
271 return ioread16(addr);
272 }
273
edma_writeb(struct fsl_edma_engine * edma,u8 val,void __iomem * addr)274 static inline void edma_writeb(struct fsl_edma_engine *edma,
275 u8 val, void __iomem *addr)
276 {
277 /* swap the reg offset for these in big-endian mode */
278 if (edma->big_endian)
279 iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
280 else
281 iowrite8(val, addr);
282 }
283
edma_writew(struct fsl_edma_engine * edma,u16 val,void __iomem * addr)284 static inline void edma_writew(struct fsl_edma_engine *edma,
285 u16 val, void __iomem *addr)
286 {
287 /* swap the reg offset for these in big-endian mode */
288 if (edma->big_endian)
289 iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
290 else
291 iowrite16(val, addr);
292 }
293
edma_writel(struct fsl_edma_engine * edma,u32 val,void __iomem * addr)294 static inline void edma_writel(struct fsl_edma_engine *edma,
295 u32 val, void __iomem *addr)
296 {
297 if (edma->big_endian)
298 iowrite32be(val, addr);
299 else
300 iowrite32(val, addr);
301 }
302
to_fsl_edma_chan(struct dma_chan * chan)303 static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
304 {
305 return container_of(chan, struct fsl_edma_chan, vchan.chan);
306 }
307
fsl_edma_drvflags(struct fsl_edma_chan * fsl_chan)308 static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
309 {
310 return fsl_chan->edma->drvdata->flags;
311 }
312
to_fsl_edma_desc(struct virt_dma_desc * vd)313 static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
314 {
315 return container_of(vd, struct fsl_edma_desc, vdesc);
316 }
317
fsl_edma_err_chan_handler(struct fsl_edma_chan * fsl_chan)318 static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
319 {
320 fsl_chan->status = DMA_ERROR;
321 fsl_chan->idle = true;
322 }
323
324 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
325 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
326 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
327 unsigned int slot, bool enable);
328 void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
329 int fsl_edma_terminate_all(struct dma_chan *chan);
330 int fsl_edma_pause(struct dma_chan *chan);
331 int fsl_edma_resume(struct dma_chan *chan);
332 int fsl_edma_slave_config(struct dma_chan *chan,
333 struct dma_slave_config *cfg);
334 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
335 dma_cookie_t cookie, struct dma_tx_state *txstate);
336 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
337 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
338 size_t period_len, enum dma_transfer_direction direction,
339 unsigned long flags);
340 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
341 struct dma_chan *chan, struct scatterlist *sgl,
342 unsigned int sg_len, enum dma_transfer_direction direction,
343 unsigned long flags, void *context);
344 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(
345 struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
346 size_t len, unsigned long flags);
347 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
348 void fsl_edma_issue_pending(struct dma_chan *chan);
349 int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
350 void fsl_edma_free_chan_resources(struct dma_chan *chan);
351 void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
352 void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
353
354 #endif /* _FSL_EDMA_COMMON_H_ */
355