1b092529eSPeng Ma // SPDX-License-Identifier: GPL-2.0
2b092529eSPeng Ma // Copyright 2014-2015 Freescale
3b092529eSPeng Ma // Copyright 2018 NXP
4b092529eSPeng Ma
5b092529eSPeng Ma /*
6b092529eSPeng Ma * Driver for NXP Layerscape Queue Direct Memory Access Controller
7b092529eSPeng Ma *
8b092529eSPeng Ma * Author:
9b092529eSPeng Ma * Wen He <wen.he_1@nxp.com>
10b092529eSPeng Ma * Jiaheng Fan <jiaheng.fan@nxp.com>
11b092529eSPeng Ma *
12b092529eSPeng Ma */
13b092529eSPeng Ma
14b092529eSPeng Ma #include <linux/module.h>
15b092529eSPeng Ma #include <linux/delay.h>
16897500c7SRob Herring #include <linux/of.h>
17b092529eSPeng Ma #include <linux/of_dma.h>
18b092529eSPeng Ma #include <linux/dma-mapping.h>
19897500c7SRob Herring #include <linux/platform_device.h>
20b092529eSPeng Ma
21b092529eSPeng Ma #include "virt-dma.h"
22b092529eSPeng Ma #include "fsldma.h"
23b092529eSPeng Ma
24b092529eSPeng Ma /* Register related definition */
25b092529eSPeng Ma #define FSL_QDMA_DMR 0x0
26b092529eSPeng Ma #define FSL_QDMA_DSR 0x4
27b092529eSPeng Ma #define FSL_QDMA_DEIER 0xe00
28b092529eSPeng Ma #define FSL_QDMA_DEDR 0xe04
29b092529eSPeng Ma #define FSL_QDMA_DECFDW0R 0xe10
30b092529eSPeng Ma #define FSL_QDMA_DECFDW1R 0xe14
31b092529eSPeng Ma #define FSL_QDMA_DECFDW2R 0xe18
32b092529eSPeng Ma #define FSL_QDMA_DECFDW3R 0xe1c
33b092529eSPeng Ma #define FSL_QDMA_DECFQIDR 0xe30
34b092529eSPeng Ma #define FSL_QDMA_DECBR 0xe34
35b092529eSPeng Ma
36b092529eSPeng Ma #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
37b092529eSPeng Ma #define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
38b092529eSPeng Ma #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
39b092529eSPeng Ma #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
40b092529eSPeng Ma #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
41b092529eSPeng Ma #define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
42b092529eSPeng Ma #define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
43b092529eSPeng Ma #define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
44b092529eSPeng Ma
45b092529eSPeng Ma #define FSL_QDMA_SQDPAR 0x80c
46b092529eSPeng Ma #define FSL_QDMA_SQEPAR 0x814
47b092529eSPeng Ma #define FSL_QDMA_BSQMR 0x800
48b092529eSPeng Ma #define FSL_QDMA_BSQSR 0x804
49b092529eSPeng Ma #define FSL_QDMA_BSQICR 0x828
50b092529eSPeng Ma #define FSL_QDMA_CQMR 0xa00
51b092529eSPeng Ma #define FSL_QDMA_CQDSCR1 0xa08
52b092529eSPeng Ma #define FSL_QDMA_CQDSCR2 0xa0c
53b092529eSPeng Ma #define FSL_QDMA_CQIER 0xa10
54b092529eSPeng Ma #define FSL_QDMA_CQEDR 0xa14
55b092529eSPeng Ma #define FSL_QDMA_SQCCMR 0xa20
56b092529eSPeng Ma
57b092529eSPeng Ma /* Registers for bit and genmask */
58b092529eSPeng Ma #define FSL_QDMA_CQIDR_SQT BIT(15)
59ab6041e4SKoehrer Mathias (ETAS/EES-SL) #define QDMA_CCDF_FORMAT BIT(29)
60b092529eSPeng Ma #define QDMA_CCDF_SER BIT(30)
61b092529eSPeng Ma #define QDMA_SG_FIN BIT(30)
62b092529eSPeng Ma #define QDMA_SG_LEN_MASK GENMASK(29, 0)
63b092529eSPeng Ma #define QDMA_CCDF_MASK GENMASK(28, 20)
64b092529eSPeng Ma
65b092529eSPeng Ma #define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0)
66b092529eSPeng Ma #define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0)
67b092529eSPeng Ma #define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0)
68b092529eSPeng Ma
69b092529eSPeng Ma #define FSL_QDMA_BCQIER_CQTIE BIT(15)
70b092529eSPeng Ma #define FSL_QDMA_BCQIER_CQPEIE BIT(23)
71b092529eSPeng Ma #define FSL_QDMA_BSQICR_ICEN BIT(31)
72b092529eSPeng Ma
73b092529eSPeng Ma #define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
74b092529eSPeng Ma #define FSL_QDMA_CQIER_MEIE BIT(31)
75b092529eSPeng Ma #define FSL_QDMA_CQIER_TEIE BIT(0)
76b092529eSPeng Ma #define FSL_QDMA_SQCCMR_ENTER_WM BIT(21)
77b092529eSPeng Ma
78b092529eSPeng Ma #define FSL_QDMA_BCQMR_EN BIT(31)
79b092529eSPeng Ma #define FSL_QDMA_BCQMR_EI BIT(30)
80b092529eSPeng Ma #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
81b092529eSPeng Ma #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
82b092529eSPeng Ma
83b092529eSPeng Ma #define FSL_QDMA_BCQSR_QF BIT(16)
84b092529eSPeng Ma #define FSL_QDMA_BCQSR_XOFF BIT(0)
85b092529eSPeng Ma
86b092529eSPeng Ma #define FSL_QDMA_BSQMR_EN BIT(31)
87b092529eSPeng Ma #define FSL_QDMA_BSQMR_DI BIT(30)
88b092529eSPeng Ma #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
89b092529eSPeng Ma
90b092529eSPeng Ma #define FSL_QDMA_BSQSR_QE BIT(17)
91b092529eSPeng Ma
92b092529eSPeng Ma #define FSL_QDMA_DMR_DQD BIT(30)
93b092529eSPeng Ma #define FSL_QDMA_DSR_DB BIT(31)
94b092529eSPeng Ma
95b092529eSPeng Ma /* Size related definition */
96b092529eSPeng Ma #define FSL_QDMA_QUEUE_MAX 8
97b092529eSPeng Ma #define FSL_QDMA_COMMAND_BUFFER_SIZE 64
98b092529eSPeng Ma #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
99b092529eSPeng Ma #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
100b092529eSPeng Ma #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
101b092529eSPeng Ma #define FSL_QDMA_QUEUE_NUM_MAX 8
102b092529eSPeng Ma
103b092529eSPeng Ma /* Field definition for CMD */
104b092529eSPeng Ma #define FSL_QDMA_CMD_RWTTYPE 0x4
105b092529eSPeng Ma #define FSL_QDMA_CMD_LWC 0x2
106b092529eSPeng Ma #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
107b092529eSPeng Ma #define FSL_QDMA_CMD_NS_OFFSET 27
108b092529eSPeng Ma #define FSL_QDMA_CMD_DQOS_OFFSET 24
109b092529eSPeng Ma #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
110b092529eSPeng Ma #define FSL_QDMA_CMD_DSEN_OFFSET 19
111b092529eSPeng Ma #define FSL_QDMA_CMD_LWC_OFFSET 16
1125b696e9cSPeng Ma #define FSL_QDMA_CMD_PF BIT(17)
113b092529eSPeng Ma
114ab6041e4SKoehrer Mathias (ETAS/EES-SL) /* Field definition for Descriptor status */
115ab6041e4SKoehrer Mathias (ETAS/EES-SL) #define QDMA_CCDF_STATUS_RTE BIT(5)
116ab6041e4SKoehrer Mathias (ETAS/EES-SL) #define QDMA_CCDF_STATUS_WTE BIT(4)
117ab6041e4SKoehrer Mathias (ETAS/EES-SL) #define QDMA_CCDF_STATUS_CDE BIT(2)
118ab6041e4SKoehrer Mathias (ETAS/EES-SL) #define QDMA_CCDF_STATUS_SDE BIT(1)
119ab6041e4SKoehrer Mathias (ETAS/EES-SL) #define QDMA_CCDF_STATUS_DDE BIT(0)
120ab6041e4SKoehrer Mathias (ETAS/EES-SL) #define QDMA_CCDF_STATUS_MASK (QDMA_CCDF_STATUS_RTE | \
121ab6041e4SKoehrer Mathias (ETAS/EES-SL) QDMA_CCDF_STATUS_WTE | \
122ab6041e4SKoehrer Mathias (ETAS/EES-SL) QDMA_CCDF_STATUS_CDE | \
123ab6041e4SKoehrer Mathias (ETAS/EES-SL) QDMA_CCDF_STATUS_SDE | \
124ab6041e4SKoehrer Mathias (ETAS/EES-SL) QDMA_CCDF_STATUS_DDE)
125ab6041e4SKoehrer Mathias (ETAS/EES-SL)
126b092529eSPeng Ma /* Field definition for Descriptor offset */
127b092529eSPeng Ma #define QDMA_CCDF_OFFSET 20
1288f95adcfSPeng Ma #define QDMA_SDDF_CMD(x) (((u64)(x)) << 32)
129b092529eSPeng Ma
130b092529eSPeng Ma /* Field definition for safe loop count*/
131b092529eSPeng Ma #define FSL_QDMA_HALT_COUNT 1500
132b092529eSPeng Ma #define FSL_QDMA_MAX_SIZE 16385
133b092529eSPeng Ma #define FSL_QDMA_COMP_TIMEOUT 1000
134b092529eSPeng Ma #define FSL_COMMAND_QUEUE_OVERFLLOW 10
135b092529eSPeng Ma
136b092529eSPeng Ma #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
137b092529eSPeng Ma (((fsl_qdma_engine)->block_offset) * (x))
138b092529eSPeng Ma
139b092529eSPeng Ma /**
140b092529eSPeng Ma * struct fsl_qdma_format - This is the struct holding describing compound
141b092529eSPeng Ma * descriptor format with qDMA.
142b092529eSPeng Ma * @status: Command status and enqueue status notification.
143b092529eSPeng Ma * @cfg: Frame offset and frame format.
144b092529eSPeng Ma * @addr_lo: Holding the compound descriptor of the lower
145b092529eSPeng Ma * 32-bits address in memory 40-bit address.
146b092529eSPeng Ma * @addr_hi: Same as above member, but point high 8-bits in
147b092529eSPeng Ma * memory 40-bit address.
148b092529eSPeng Ma * @__reserved1: Reserved field.
149b092529eSPeng Ma * @cfg8b_w1: Compound descriptor command queue origin produced
150b092529eSPeng Ma * by qDMA and dynamic debug field.
151041c4646SLee Jones * @data: Pointer to the memory 40-bit address, describes DMA
152b092529eSPeng Ma * source information and DMA destination information.
153b092529eSPeng Ma */
154b092529eSPeng Ma struct fsl_qdma_format {
155b092529eSPeng Ma __le32 status;
156b092529eSPeng Ma __le32 cfg;
157b092529eSPeng Ma union {
158b092529eSPeng Ma struct {
159b092529eSPeng Ma __le32 addr_lo;
160b092529eSPeng Ma u8 addr_hi;
161b092529eSPeng Ma u8 __reserved1[2];
162b092529eSPeng Ma u8 cfg8b_w1;
163b092529eSPeng Ma } __packed;
164b092529eSPeng Ma __le64 data;
165b092529eSPeng Ma };
166b092529eSPeng Ma } __packed;
167b092529eSPeng Ma
168b092529eSPeng Ma /* qDMA status notification pre information */
169b092529eSPeng Ma struct fsl_pre_status {
170b092529eSPeng Ma u64 addr;
171b092529eSPeng Ma u8 queue;
172b092529eSPeng Ma };
173b092529eSPeng Ma
174b092529eSPeng Ma static DEFINE_PER_CPU(struct fsl_pre_status, pre);
175b092529eSPeng Ma
176b092529eSPeng Ma struct fsl_qdma_chan {
177b092529eSPeng Ma struct virt_dma_chan vchan;
178b092529eSPeng Ma struct virt_dma_desc vdesc;
179b092529eSPeng Ma enum dma_status status;
180b092529eSPeng Ma struct fsl_qdma_engine *qdma;
181b092529eSPeng Ma struct fsl_qdma_queue *queue;
182b092529eSPeng Ma };
183b092529eSPeng Ma
184b092529eSPeng Ma struct fsl_qdma_queue {
185b092529eSPeng Ma struct fsl_qdma_format *virt_head;
186b092529eSPeng Ma struct fsl_qdma_format *virt_tail;
187b092529eSPeng Ma struct list_head comp_used;
188b092529eSPeng Ma struct list_head comp_free;
189b092529eSPeng Ma struct dma_pool *comp_pool;
190b092529eSPeng Ma struct dma_pool *desc_pool;
191b092529eSPeng Ma spinlock_t queue_lock;
192b092529eSPeng Ma dma_addr_t bus_addr;
193b092529eSPeng Ma u32 n_cq;
194b092529eSPeng Ma u32 id;
195b092529eSPeng Ma struct fsl_qdma_format *cq;
196b092529eSPeng Ma void __iomem *block_base;
197b092529eSPeng Ma };
198b092529eSPeng Ma
199b092529eSPeng Ma struct fsl_qdma_comp {
200b092529eSPeng Ma dma_addr_t bus_addr;
201b092529eSPeng Ma dma_addr_t desc_bus_addr;
202b092529eSPeng Ma struct fsl_qdma_format *virt_addr;
203b092529eSPeng Ma struct fsl_qdma_format *desc_virt_addr;
204b092529eSPeng Ma struct fsl_qdma_chan *qchan;
205b092529eSPeng Ma struct virt_dma_desc vdesc;
206b092529eSPeng Ma struct list_head list;
207b092529eSPeng Ma };
208b092529eSPeng Ma
209b092529eSPeng Ma struct fsl_qdma_engine {
210b092529eSPeng Ma struct dma_device dma_dev;
211b092529eSPeng Ma void __iomem *ctrl_base;
212b092529eSPeng Ma void __iomem *status_base;
213b092529eSPeng Ma void __iomem *block_base;
214b092529eSPeng Ma u32 n_chans;
215b092529eSPeng Ma u32 n_queues;
216b092529eSPeng Ma struct mutex fsl_qdma_mutex;
217b092529eSPeng Ma int error_irq;
218b092529eSPeng Ma int *queue_irq;
219b092529eSPeng Ma u32 feature;
220b092529eSPeng Ma struct fsl_qdma_queue *queue;
221b092529eSPeng Ma struct fsl_qdma_queue **status;
222b092529eSPeng Ma struct fsl_qdma_chan *chans;
223b092529eSPeng Ma int block_number;
224b092529eSPeng Ma int block_offset;
225b092529eSPeng Ma int irq_base;
226b092529eSPeng Ma int desc_allocated;
227b092529eSPeng Ma
228b092529eSPeng Ma };
229b092529eSPeng Ma
230b092529eSPeng Ma static inline u64
qdma_ccdf_addr_get64(const struct fsl_qdma_format * ccdf)231b092529eSPeng Ma qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
232b092529eSPeng Ma {
233b092529eSPeng Ma return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
234b092529eSPeng Ma }
235b092529eSPeng Ma
236b092529eSPeng Ma static inline void
qdma_desc_addr_set64(struct fsl_qdma_format * ccdf,u64 addr)237b092529eSPeng Ma qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
238b092529eSPeng Ma {
239b092529eSPeng Ma ccdf->addr_hi = upper_32_bits(addr);
240b092529eSPeng Ma ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
241b092529eSPeng Ma }
242b092529eSPeng Ma
243b092529eSPeng Ma static inline u8
qdma_ccdf_get_queue(const struct fsl_qdma_format * ccdf)244b092529eSPeng Ma qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
245b092529eSPeng Ma {
246b092529eSPeng Ma return ccdf->cfg8b_w1 & U8_MAX;
247b092529eSPeng Ma }
248b092529eSPeng Ma
249b092529eSPeng Ma static inline int
qdma_ccdf_get_offset(const struct fsl_qdma_format * ccdf)250b092529eSPeng Ma qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
251b092529eSPeng Ma {
252b092529eSPeng Ma return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
253b092529eSPeng Ma }
254b092529eSPeng Ma
255b092529eSPeng Ma static inline void
qdma_ccdf_set_format(struct fsl_qdma_format * ccdf,int offset)256b092529eSPeng Ma qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
257b092529eSPeng Ma {
258ab6041e4SKoehrer Mathias (ETAS/EES-SL) ccdf->cfg = cpu_to_le32(QDMA_CCDF_FORMAT |
259ab6041e4SKoehrer Mathias (ETAS/EES-SL) (offset << QDMA_CCDF_OFFSET));
260b092529eSPeng Ma }
261b092529eSPeng Ma
262b092529eSPeng Ma static inline int
qdma_ccdf_get_status(const struct fsl_qdma_format * ccdf)263b092529eSPeng Ma qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
264b092529eSPeng Ma {
265ab6041e4SKoehrer Mathias (ETAS/EES-SL) return (le32_to_cpu(ccdf->status) & QDMA_CCDF_STATUS_MASK);
266b092529eSPeng Ma }
267b092529eSPeng Ma
268b092529eSPeng Ma static inline void
qdma_ccdf_set_ser(struct fsl_qdma_format * ccdf,int status)269b092529eSPeng Ma qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
270b092529eSPeng Ma {
271b092529eSPeng Ma ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
272b092529eSPeng Ma }
273b092529eSPeng Ma
qdma_csgf_set_len(struct fsl_qdma_format * csgf,int len)274b092529eSPeng Ma static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
275b092529eSPeng Ma {
276b092529eSPeng Ma csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
277b092529eSPeng Ma }
278b092529eSPeng Ma
qdma_csgf_set_f(struct fsl_qdma_format * csgf,int len)279b092529eSPeng Ma static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
280b092529eSPeng Ma {
281b092529eSPeng Ma csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
282b092529eSPeng Ma }
283b092529eSPeng Ma
qdma_readl(struct fsl_qdma_engine * qdma,void __iomem * addr)284b092529eSPeng Ma static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
285b092529eSPeng Ma {
286b092529eSPeng Ma return FSL_DMA_IN(qdma, addr, 32);
287b092529eSPeng Ma }
288b092529eSPeng Ma
qdma_writel(struct fsl_qdma_engine * qdma,u32 val,void __iomem * addr)289b092529eSPeng Ma static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
290b092529eSPeng Ma void __iomem *addr)
291b092529eSPeng Ma {
292b092529eSPeng Ma FSL_DMA_OUT(qdma, addr, val, 32);
293b092529eSPeng Ma }
294b092529eSPeng Ma
to_fsl_qdma_chan(struct dma_chan * chan)295b092529eSPeng Ma static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
296b092529eSPeng Ma {
297b092529eSPeng Ma return container_of(chan, struct fsl_qdma_chan, vchan.chan);
298b092529eSPeng Ma }
299b092529eSPeng Ma
to_fsl_qdma_comp(struct virt_dma_desc * vd)300b092529eSPeng Ma static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
301b092529eSPeng Ma {
302b092529eSPeng Ma return container_of(vd, struct fsl_qdma_comp, vdesc);
303b092529eSPeng Ma }
304b092529eSPeng Ma
fsl_qdma_free_chan_resources(struct dma_chan * chan)305b092529eSPeng Ma static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
306b092529eSPeng Ma {
307b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
308b092529eSPeng Ma struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
309b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
310b092529eSPeng Ma struct fsl_qdma_comp *comp_temp, *_comp_temp;
311b092529eSPeng Ma unsigned long flags;
312b092529eSPeng Ma LIST_HEAD(head);
313b092529eSPeng Ma
314b092529eSPeng Ma spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
315b092529eSPeng Ma vchan_get_all_descriptors(&fsl_chan->vchan, &head);
316b092529eSPeng Ma spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
317b092529eSPeng Ma
318b092529eSPeng Ma vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
319b092529eSPeng Ma
3204b048178SChen Zhou if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
321b092529eSPeng Ma return;
322b092529eSPeng Ma
323b092529eSPeng Ma list_for_each_entry_safe(comp_temp, _comp_temp,
324b092529eSPeng Ma &fsl_queue->comp_used, list) {
325b092529eSPeng Ma dma_pool_free(fsl_queue->comp_pool,
326b092529eSPeng Ma comp_temp->virt_addr,
327b092529eSPeng Ma comp_temp->bus_addr);
328b092529eSPeng Ma dma_pool_free(fsl_queue->desc_pool,
329b092529eSPeng Ma comp_temp->desc_virt_addr,
330b092529eSPeng Ma comp_temp->desc_bus_addr);
331b092529eSPeng Ma list_del(&comp_temp->list);
332b092529eSPeng Ma kfree(comp_temp);
333b092529eSPeng Ma }
334b092529eSPeng Ma
335b092529eSPeng Ma list_for_each_entry_safe(comp_temp, _comp_temp,
336b092529eSPeng Ma &fsl_queue->comp_free, list) {
337b092529eSPeng Ma dma_pool_free(fsl_queue->comp_pool,
338b092529eSPeng Ma comp_temp->virt_addr,
339b092529eSPeng Ma comp_temp->bus_addr);
340b092529eSPeng Ma dma_pool_free(fsl_queue->desc_pool,
341b092529eSPeng Ma comp_temp->desc_virt_addr,
342b092529eSPeng Ma comp_temp->desc_bus_addr);
343b092529eSPeng Ma list_del(&comp_temp->list);
344b092529eSPeng Ma kfree(comp_temp);
345b092529eSPeng Ma }
346b092529eSPeng Ma
347b092529eSPeng Ma dma_pool_destroy(fsl_queue->comp_pool);
348b092529eSPeng Ma dma_pool_destroy(fsl_queue->desc_pool);
349b092529eSPeng Ma
350b092529eSPeng Ma fsl_qdma->desc_allocated--;
351b092529eSPeng Ma fsl_queue->comp_pool = NULL;
352b092529eSPeng Ma fsl_queue->desc_pool = NULL;
353b092529eSPeng Ma }
354b092529eSPeng Ma
fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp * fsl_comp,dma_addr_t dst,dma_addr_t src,u32 len)355b092529eSPeng Ma static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
356b092529eSPeng Ma dma_addr_t dst, dma_addr_t src, u32 len)
357b092529eSPeng Ma {
3588f95adcfSPeng Ma u32 cmd;
359b092529eSPeng Ma struct fsl_qdma_format *sdf, *ddf;
360b092529eSPeng Ma struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
361b092529eSPeng Ma
362b092529eSPeng Ma ccdf = fsl_comp->virt_addr;
363b092529eSPeng Ma csgf_desc = fsl_comp->virt_addr + 1;
364b092529eSPeng Ma csgf_src = fsl_comp->virt_addr + 2;
365b092529eSPeng Ma csgf_dest = fsl_comp->virt_addr + 3;
366b092529eSPeng Ma sdf = fsl_comp->desc_virt_addr;
367b092529eSPeng Ma ddf = fsl_comp->desc_virt_addr + 1;
368b092529eSPeng Ma
369b092529eSPeng Ma memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
370b092529eSPeng Ma memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
371b092529eSPeng Ma /* Head Command Descriptor(Frame Descriptor) */
372b092529eSPeng Ma qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
373b092529eSPeng Ma qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
374b092529eSPeng Ma qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
375b092529eSPeng Ma /* Status notification is enqueued to status queue. */
376b092529eSPeng Ma /* Compound Command Descriptor(Frame List Table) */
377b092529eSPeng Ma qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
378b092529eSPeng Ma /* It must be 32 as Compound S/G Descriptor */
379b092529eSPeng Ma qdma_csgf_set_len(csgf_desc, 32);
380b092529eSPeng Ma qdma_desc_addr_set64(csgf_src, src);
381b092529eSPeng Ma qdma_csgf_set_len(csgf_src, len);
382b092529eSPeng Ma qdma_desc_addr_set64(csgf_dest, dst);
383b092529eSPeng Ma qdma_csgf_set_len(csgf_dest, len);
384b092529eSPeng Ma /* This entry is the last entry. */
385b092529eSPeng Ma qdma_csgf_set_f(csgf_dest, len);
386b092529eSPeng Ma /* Descriptor Buffer */
3878f95adcfSPeng Ma cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
3885b696e9cSPeng Ma FSL_QDMA_CMD_RWTTYPE_OFFSET) |
3895b696e9cSPeng Ma FSL_QDMA_CMD_PF;
3908f95adcfSPeng Ma sdf->data = QDMA_SDDF_CMD(cmd);
3918f95adcfSPeng Ma
3928f95adcfSPeng Ma cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
393b092529eSPeng Ma FSL_QDMA_CMD_RWTTYPE_OFFSET);
3948f95adcfSPeng Ma cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
3958f95adcfSPeng Ma ddf->data = QDMA_SDDF_CMD(cmd);
396b092529eSPeng Ma }
397b092529eSPeng Ma
398b092529eSPeng Ma /*
399b092529eSPeng Ma * Pre-request full command descriptor for enqueue.
400b092529eSPeng Ma */
fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue * queue)401b092529eSPeng Ma static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
402b092529eSPeng Ma {
403b092529eSPeng Ma int i;
404b092529eSPeng Ma struct fsl_qdma_comp *comp_temp, *_comp_temp;
405b092529eSPeng Ma
406b092529eSPeng Ma for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
407b092529eSPeng Ma comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
408b092529eSPeng Ma if (!comp_temp)
409b092529eSPeng Ma goto err_alloc;
410b092529eSPeng Ma comp_temp->virt_addr =
411b092529eSPeng Ma dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
412b092529eSPeng Ma &comp_temp->bus_addr);
413b092529eSPeng Ma if (!comp_temp->virt_addr)
414b092529eSPeng Ma goto err_dma_alloc;
415b092529eSPeng Ma
416b092529eSPeng Ma comp_temp->desc_virt_addr =
417b092529eSPeng Ma dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
418b092529eSPeng Ma &comp_temp->desc_bus_addr);
419b092529eSPeng Ma if (!comp_temp->desc_virt_addr)
420b092529eSPeng Ma goto err_desc_dma_alloc;
421b092529eSPeng Ma
422b092529eSPeng Ma list_add_tail(&comp_temp->list, &queue->comp_free);
423b092529eSPeng Ma }
424b092529eSPeng Ma
425b092529eSPeng Ma return 0;
426b092529eSPeng Ma
427b092529eSPeng Ma err_desc_dma_alloc:
428b092529eSPeng Ma dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
429b092529eSPeng Ma comp_temp->bus_addr);
430b092529eSPeng Ma
431b092529eSPeng Ma err_dma_alloc:
432b092529eSPeng Ma kfree(comp_temp);
433b092529eSPeng Ma
434b092529eSPeng Ma err_alloc:
435b092529eSPeng Ma list_for_each_entry_safe(comp_temp, _comp_temp,
436b092529eSPeng Ma &queue->comp_free, list) {
437b092529eSPeng Ma if (comp_temp->virt_addr)
438b092529eSPeng Ma dma_pool_free(queue->comp_pool,
439b092529eSPeng Ma comp_temp->virt_addr,
440b092529eSPeng Ma comp_temp->bus_addr);
441b092529eSPeng Ma if (comp_temp->desc_virt_addr)
442b092529eSPeng Ma dma_pool_free(queue->desc_pool,
443b092529eSPeng Ma comp_temp->desc_virt_addr,
444b092529eSPeng Ma comp_temp->desc_bus_addr);
445b092529eSPeng Ma
446b092529eSPeng Ma list_del(&comp_temp->list);
447b092529eSPeng Ma kfree(comp_temp);
448b092529eSPeng Ma }
449b092529eSPeng Ma
450b092529eSPeng Ma return -ENOMEM;
451b092529eSPeng Ma }
452b092529eSPeng Ma
453b092529eSPeng Ma /*
454b092529eSPeng Ma * Request a command descriptor for enqueue.
455b092529eSPeng Ma */
456b092529eSPeng Ma static struct fsl_qdma_comp
fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan * fsl_chan)457b092529eSPeng Ma *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
458b092529eSPeng Ma {
459b092529eSPeng Ma unsigned long flags;
460b092529eSPeng Ma struct fsl_qdma_comp *comp_temp;
461b092529eSPeng Ma int timeout = FSL_QDMA_COMP_TIMEOUT;
462b092529eSPeng Ma struct fsl_qdma_queue *queue = fsl_chan->queue;
463b092529eSPeng Ma
464b092529eSPeng Ma while (timeout--) {
465b092529eSPeng Ma spin_lock_irqsave(&queue->queue_lock, flags);
466b092529eSPeng Ma if (!list_empty(&queue->comp_free)) {
467b092529eSPeng Ma comp_temp = list_first_entry(&queue->comp_free,
468b092529eSPeng Ma struct fsl_qdma_comp,
469b092529eSPeng Ma list);
470b092529eSPeng Ma list_del(&comp_temp->list);
471b092529eSPeng Ma
472b092529eSPeng Ma spin_unlock_irqrestore(&queue->queue_lock, flags);
473b092529eSPeng Ma comp_temp->qchan = fsl_chan;
474b092529eSPeng Ma return comp_temp;
475b092529eSPeng Ma }
476b092529eSPeng Ma spin_unlock_irqrestore(&queue->queue_lock, flags);
477b092529eSPeng Ma udelay(1);
478b092529eSPeng Ma }
479b092529eSPeng Ma
480b092529eSPeng Ma return NULL;
481b092529eSPeng Ma }
482b092529eSPeng Ma
483b092529eSPeng Ma static struct fsl_qdma_queue
fsl_qdma_alloc_queue_resources(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)484b092529eSPeng Ma *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
485b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma)
486b092529eSPeng Ma {
487b092529eSPeng Ma int ret, len, i, j;
488b092529eSPeng Ma int queue_num, block_number;
489b092529eSPeng Ma unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
490b092529eSPeng Ma struct fsl_qdma_queue *queue_head, *queue_temp;
491b092529eSPeng Ma
492b092529eSPeng Ma queue_num = fsl_qdma->n_queues;
493b092529eSPeng Ma block_number = fsl_qdma->block_number;
494b092529eSPeng Ma
495b092529eSPeng Ma if (queue_num > FSL_QDMA_QUEUE_MAX)
496b092529eSPeng Ma queue_num = FSL_QDMA_QUEUE_MAX;
497b092529eSPeng Ma len = sizeof(*queue_head) * queue_num * block_number;
498b092529eSPeng Ma queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
499b092529eSPeng Ma if (!queue_head)
500b092529eSPeng Ma return NULL;
501b092529eSPeng Ma
502b092529eSPeng Ma ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
503b092529eSPeng Ma queue_size, queue_num);
504b092529eSPeng Ma if (ret) {
505b092529eSPeng Ma dev_err(&pdev->dev, "Can't get queue-sizes.\n");
506b092529eSPeng Ma return NULL;
507b092529eSPeng Ma }
508b092529eSPeng Ma for (j = 0; j < block_number; j++) {
509b092529eSPeng Ma for (i = 0; i < queue_num; i++) {
510b092529eSPeng Ma if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
511b092529eSPeng Ma queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
512b092529eSPeng Ma dev_err(&pdev->dev,
513b092529eSPeng Ma "Get wrong queue-sizes.\n");
514b092529eSPeng Ma return NULL;
515b092529eSPeng Ma }
516b092529eSPeng Ma queue_temp = queue_head + i + (j * queue_num);
517b092529eSPeng Ma
518b092529eSPeng Ma queue_temp->cq =
5195cd8a515SChristophe JAILLET dmam_alloc_coherent(&pdev->dev,
520b092529eSPeng Ma sizeof(struct fsl_qdma_format) *
521b092529eSPeng Ma queue_size[i],
522b092529eSPeng Ma &queue_temp->bus_addr,
523b092529eSPeng Ma GFP_KERNEL);
524b092529eSPeng Ma if (!queue_temp->cq)
525b092529eSPeng Ma return NULL;
526b092529eSPeng Ma queue_temp->block_base = fsl_qdma->block_base +
527b092529eSPeng Ma FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
528b092529eSPeng Ma queue_temp->n_cq = queue_size[i];
529b092529eSPeng Ma queue_temp->id = i;
530b092529eSPeng Ma queue_temp->virt_head = queue_temp->cq;
531b092529eSPeng Ma queue_temp->virt_tail = queue_temp->cq;
532b092529eSPeng Ma /*
533b092529eSPeng Ma * List for queue command buffer
534b092529eSPeng Ma */
535b092529eSPeng Ma INIT_LIST_HEAD(&queue_temp->comp_used);
536b092529eSPeng Ma spin_lock_init(&queue_temp->queue_lock);
537b092529eSPeng Ma }
538b092529eSPeng Ma }
539b092529eSPeng Ma return queue_head;
540b092529eSPeng Ma }
541b092529eSPeng Ma
542b092529eSPeng Ma static struct fsl_qdma_queue
fsl_qdma_prep_status_queue(struct platform_device * pdev)543b092529eSPeng Ma *fsl_qdma_prep_status_queue(struct platform_device *pdev)
544b092529eSPeng Ma {
545b092529eSPeng Ma int ret;
546b092529eSPeng Ma unsigned int status_size;
547b092529eSPeng Ma struct fsl_qdma_queue *status_head;
548b092529eSPeng Ma struct device_node *np = pdev->dev.of_node;
549b092529eSPeng Ma
550b092529eSPeng Ma ret = of_property_read_u32(np, "status-sizes", &status_size);
551b092529eSPeng Ma if (ret) {
552b092529eSPeng Ma dev_err(&pdev->dev, "Can't get status-sizes.\n");
553b092529eSPeng Ma return NULL;
554b092529eSPeng Ma }
555b092529eSPeng Ma if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
556b092529eSPeng Ma status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
557b092529eSPeng Ma dev_err(&pdev->dev, "Get wrong status_size.\n");
558b092529eSPeng Ma return NULL;
559b092529eSPeng Ma }
560b092529eSPeng Ma status_head = devm_kzalloc(&pdev->dev,
561b092529eSPeng Ma sizeof(*status_head), GFP_KERNEL);
562b092529eSPeng Ma if (!status_head)
563b092529eSPeng Ma return NULL;
564b092529eSPeng Ma
565b092529eSPeng Ma /*
566b092529eSPeng Ma * Buffer for queue command
567b092529eSPeng Ma */
568df6a1dc7SChristophe JAILLET status_head->cq = dmam_alloc_coherent(&pdev->dev,
569b092529eSPeng Ma sizeof(struct fsl_qdma_format) *
570b092529eSPeng Ma status_size,
571b092529eSPeng Ma &status_head->bus_addr,
572b092529eSPeng Ma GFP_KERNEL);
573b092529eSPeng Ma if (!status_head->cq) {
574b092529eSPeng Ma devm_kfree(&pdev->dev, status_head);
575b092529eSPeng Ma return NULL;
576b092529eSPeng Ma }
577b092529eSPeng Ma status_head->n_cq = status_size;
578b092529eSPeng Ma status_head->virt_head = status_head->cq;
579b092529eSPeng Ma status_head->virt_tail = status_head->cq;
580b092529eSPeng Ma status_head->comp_pool = NULL;
581b092529eSPeng Ma
582b092529eSPeng Ma return status_head;
583b092529eSPeng Ma }
584b092529eSPeng Ma
fsl_qdma_halt(struct fsl_qdma_engine * fsl_qdma)585b092529eSPeng Ma static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
586b092529eSPeng Ma {
587b092529eSPeng Ma u32 reg;
588b092529eSPeng Ma int i, j, count = FSL_QDMA_HALT_COUNT;
589b092529eSPeng Ma void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
590b092529eSPeng Ma
591b092529eSPeng Ma /* Disable the command queue and wait for idle state. */
592b092529eSPeng Ma reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
593b092529eSPeng Ma reg |= FSL_QDMA_DMR_DQD;
594b092529eSPeng Ma qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
595b092529eSPeng Ma for (j = 0; j < fsl_qdma->block_number; j++) {
596b092529eSPeng Ma block = fsl_qdma->block_base +
597b092529eSPeng Ma FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
598b092529eSPeng Ma for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
599b092529eSPeng Ma qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
600b092529eSPeng Ma }
601b092529eSPeng Ma while (1) {
602b092529eSPeng Ma reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
603b092529eSPeng Ma if (!(reg & FSL_QDMA_DSR_DB))
604b092529eSPeng Ma break;
605b092529eSPeng Ma if (count-- < 0)
606b092529eSPeng Ma return -EBUSY;
607b092529eSPeng Ma udelay(100);
608b092529eSPeng Ma }
609b092529eSPeng Ma
610b092529eSPeng Ma for (j = 0; j < fsl_qdma->block_number; j++) {
611b092529eSPeng Ma block = fsl_qdma->block_base +
612b092529eSPeng Ma FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
613b092529eSPeng Ma
614b092529eSPeng Ma /* Disable status queue. */
615b092529eSPeng Ma qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
616b092529eSPeng Ma
617b092529eSPeng Ma /*
618b092529eSPeng Ma * clear the command queue interrupt detect register for
619b092529eSPeng Ma * all queues.
620b092529eSPeng Ma */
621b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
622b092529eSPeng Ma block + FSL_QDMA_BCQIDR(0));
623b092529eSPeng Ma }
624b092529eSPeng Ma
625b092529eSPeng Ma return 0;
626b092529eSPeng Ma }
627b092529eSPeng Ma
628b092529eSPeng Ma static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine * fsl_qdma,void * block,int id)629b092529eSPeng Ma fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
630b092529eSPeng Ma void *block,
631b092529eSPeng Ma int id)
632b092529eSPeng Ma {
633b092529eSPeng Ma bool duplicate;
634b092529eSPeng Ma u32 reg, i, count;
635ab6041e4SKoehrer Mathias (ETAS/EES-SL) u8 completion_status;
636b092529eSPeng Ma struct fsl_qdma_queue *temp_queue;
637b092529eSPeng Ma struct fsl_qdma_format *status_addr;
638b092529eSPeng Ma struct fsl_qdma_comp *fsl_comp = NULL;
639b092529eSPeng Ma struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
640b092529eSPeng Ma struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
641b092529eSPeng Ma
642b092529eSPeng Ma count = FSL_QDMA_MAX_SIZE;
643b092529eSPeng Ma
644b092529eSPeng Ma while (count--) {
645b092529eSPeng Ma duplicate = 0;
646b092529eSPeng Ma reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
647b092529eSPeng Ma if (reg & FSL_QDMA_BSQSR_QE)
648b092529eSPeng Ma return 0;
649b092529eSPeng Ma
650b092529eSPeng Ma status_addr = fsl_status->virt_head;
651b092529eSPeng Ma
652b092529eSPeng Ma if (qdma_ccdf_get_queue(status_addr) ==
653b092529eSPeng Ma __this_cpu_read(pre.queue) &&
654b092529eSPeng Ma qdma_ccdf_addr_get64(status_addr) ==
655b092529eSPeng Ma __this_cpu_read(pre.addr))
656b092529eSPeng Ma duplicate = 1;
657b092529eSPeng Ma i = qdma_ccdf_get_queue(status_addr) +
658b092529eSPeng Ma id * fsl_qdma->n_queues;
659b092529eSPeng Ma __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
660b092529eSPeng Ma __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
661b092529eSPeng Ma temp_queue = fsl_queue + i;
662b092529eSPeng Ma
663b092529eSPeng Ma spin_lock(&temp_queue->queue_lock);
664b092529eSPeng Ma if (list_empty(&temp_queue->comp_used)) {
665b092529eSPeng Ma if (!duplicate) {
666b092529eSPeng Ma spin_unlock(&temp_queue->queue_lock);
667b092529eSPeng Ma return -EAGAIN;
668b092529eSPeng Ma }
669b092529eSPeng Ma } else {
670b092529eSPeng Ma fsl_comp = list_first_entry(&temp_queue->comp_used,
671b092529eSPeng Ma struct fsl_qdma_comp, list);
672b092529eSPeng Ma if (fsl_comp->bus_addr + 16 !=
673b092529eSPeng Ma __this_cpu_read(pre.addr)) {
674b092529eSPeng Ma if (!duplicate) {
675b092529eSPeng Ma spin_unlock(&temp_queue->queue_lock);
676b092529eSPeng Ma return -EAGAIN;
677b092529eSPeng Ma }
678b092529eSPeng Ma }
679b092529eSPeng Ma }
680b092529eSPeng Ma
681b092529eSPeng Ma if (duplicate) {
682b092529eSPeng Ma reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
683b092529eSPeng Ma reg |= FSL_QDMA_BSQMR_DI;
684b092529eSPeng Ma qdma_desc_addr_set64(status_addr, 0x0);
685b092529eSPeng Ma fsl_status->virt_head++;
686b092529eSPeng Ma if (fsl_status->virt_head == fsl_status->cq
687b092529eSPeng Ma + fsl_status->n_cq)
688b092529eSPeng Ma fsl_status->virt_head = fsl_status->cq;
689b092529eSPeng Ma qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
690b092529eSPeng Ma spin_unlock(&temp_queue->queue_lock);
691b092529eSPeng Ma continue;
692b092529eSPeng Ma }
693b092529eSPeng Ma list_del(&fsl_comp->list);
694b092529eSPeng Ma
695ab6041e4SKoehrer Mathias (ETAS/EES-SL) completion_status = qdma_ccdf_get_status(status_addr);
696ab6041e4SKoehrer Mathias (ETAS/EES-SL)
697b092529eSPeng Ma reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
698b092529eSPeng Ma reg |= FSL_QDMA_BSQMR_DI;
699b092529eSPeng Ma qdma_desc_addr_set64(status_addr, 0x0);
700b092529eSPeng Ma fsl_status->virt_head++;
701b092529eSPeng Ma if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
702b092529eSPeng Ma fsl_status->virt_head = fsl_status->cq;
703b092529eSPeng Ma qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
704b092529eSPeng Ma spin_unlock(&temp_queue->queue_lock);
705b092529eSPeng Ma
706ab6041e4SKoehrer Mathias (ETAS/EES-SL) /* The completion_status is evaluated here
707ab6041e4SKoehrer Mathias (ETAS/EES-SL) * (outside of spin lock)
708ab6041e4SKoehrer Mathias (ETAS/EES-SL) */
709ab6041e4SKoehrer Mathias (ETAS/EES-SL) if (completion_status) {
710ab6041e4SKoehrer Mathias (ETAS/EES-SL) /* A completion error occurred! */
711ab6041e4SKoehrer Mathias (ETAS/EES-SL) if (completion_status & QDMA_CCDF_STATUS_WTE) {
712ab6041e4SKoehrer Mathias (ETAS/EES-SL) /* Write transaction error */
713ab6041e4SKoehrer Mathias (ETAS/EES-SL) fsl_comp->vdesc.tx_result.result =
714ab6041e4SKoehrer Mathias (ETAS/EES-SL) DMA_TRANS_WRITE_FAILED;
715ab6041e4SKoehrer Mathias (ETAS/EES-SL) } else if (completion_status & QDMA_CCDF_STATUS_RTE) {
716ab6041e4SKoehrer Mathias (ETAS/EES-SL) /* Read transaction error */
717ab6041e4SKoehrer Mathias (ETAS/EES-SL) fsl_comp->vdesc.tx_result.result =
718ab6041e4SKoehrer Mathias (ETAS/EES-SL) DMA_TRANS_READ_FAILED;
719ab6041e4SKoehrer Mathias (ETAS/EES-SL) } else {
720ab6041e4SKoehrer Mathias (ETAS/EES-SL) /* Command/source/destination
721ab6041e4SKoehrer Mathias (ETAS/EES-SL) * description error
722ab6041e4SKoehrer Mathias (ETAS/EES-SL) */
723ab6041e4SKoehrer Mathias (ETAS/EES-SL) fsl_comp->vdesc.tx_result.result =
724ab6041e4SKoehrer Mathias (ETAS/EES-SL) DMA_TRANS_ABORTED;
725ab6041e4SKoehrer Mathias (ETAS/EES-SL) dev_err(fsl_qdma->dma_dev.dev,
726ab6041e4SKoehrer Mathias (ETAS/EES-SL) "DMA status descriptor error %x\n",
727ab6041e4SKoehrer Mathias (ETAS/EES-SL) completion_status);
728ab6041e4SKoehrer Mathias (ETAS/EES-SL) }
729ab6041e4SKoehrer Mathias (ETAS/EES-SL) }
730ab6041e4SKoehrer Mathias (ETAS/EES-SL)
731b092529eSPeng Ma spin_lock(&fsl_comp->qchan->vchan.lock);
732b092529eSPeng Ma vchan_cookie_complete(&fsl_comp->vdesc);
733b092529eSPeng Ma fsl_comp->qchan->status = DMA_COMPLETE;
734b092529eSPeng Ma spin_unlock(&fsl_comp->qchan->vchan.lock);
735b092529eSPeng Ma }
736b092529eSPeng Ma
737b092529eSPeng Ma return 0;
738b092529eSPeng Ma }
739b092529eSPeng Ma
fsl_qdma_error_handler(int irq,void * dev_id)740b092529eSPeng Ma static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
741b092529eSPeng Ma {
742b092529eSPeng Ma unsigned int intr;
743b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma = dev_id;
744b092529eSPeng Ma void __iomem *status = fsl_qdma->status_base;
745ab6041e4SKoehrer Mathias (ETAS/EES-SL) unsigned int decfdw0r;
746ab6041e4SKoehrer Mathias (ETAS/EES-SL) unsigned int decfdw1r;
747ab6041e4SKoehrer Mathias (ETAS/EES-SL) unsigned int decfdw2r;
748ab6041e4SKoehrer Mathias (ETAS/EES-SL) unsigned int decfdw3r;
749b092529eSPeng Ma
750b092529eSPeng Ma intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
751b092529eSPeng Ma
752ab6041e4SKoehrer Mathias (ETAS/EES-SL) if (intr) {
753ab6041e4SKoehrer Mathias (ETAS/EES-SL) decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
754ab6041e4SKoehrer Mathias (ETAS/EES-SL) decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
755ab6041e4SKoehrer Mathias (ETAS/EES-SL) decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
756ab6041e4SKoehrer Mathias (ETAS/EES-SL) decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
757ab6041e4SKoehrer Mathias (ETAS/EES-SL) dev_err(fsl_qdma->dma_dev.dev,
758ab6041e4SKoehrer Mathias (ETAS/EES-SL) "DMA transaction error! (%x: %x-%x-%x-%x)\n",
759ab6041e4SKoehrer Mathias (ETAS/EES-SL) intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
760ab6041e4SKoehrer Mathias (ETAS/EES-SL) }
761b092529eSPeng Ma
762b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
763b092529eSPeng Ma return IRQ_HANDLED;
764b092529eSPeng Ma }
765b092529eSPeng Ma
fsl_qdma_queue_handler(int irq,void * dev_id)766b092529eSPeng Ma static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
767b092529eSPeng Ma {
768b092529eSPeng Ma int id;
769b092529eSPeng Ma unsigned int intr, reg;
770b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma = dev_id;
771b092529eSPeng Ma void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
772b092529eSPeng Ma
773b092529eSPeng Ma id = irq - fsl_qdma->irq_base;
774b092529eSPeng Ma if (id < 0 && id > fsl_qdma->block_number) {
775b092529eSPeng Ma dev_err(fsl_qdma->dma_dev.dev,
776b092529eSPeng Ma "irq %d is wrong irq_base is %d\n",
777b092529eSPeng Ma irq, fsl_qdma->irq_base);
778b092529eSPeng Ma }
779b092529eSPeng Ma
780b092529eSPeng Ma block = fsl_qdma->block_base +
781b092529eSPeng Ma FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
782b092529eSPeng Ma
783b092529eSPeng Ma intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
784b092529eSPeng Ma
785b092529eSPeng Ma if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
786b092529eSPeng Ma intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
787b092529eSPeng Ma
788b092529eSPeng Ma if (intr != 0) {
789b092529eSPeng Ma reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
790b092529eSPeng Ma reg |= FSL_QDMA_DMR_DQD;
791b092529eSPeng Ma qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
792b092529eSPeng Ma qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
793b092529eSPeng Ma dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
794b092529eSPeng Ma }
795b092529eSPeng Ma
796b092529eSPeng Ma /* Clear all detected events and interrupts. */
797b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
798b092529eSPeng Ma block + FSL_QDMA_BCQIDR(0));
799b092529eSPeng Ma
800b092529eSPeng Ma return IRQ_HANDLED;
801b092529eSPeng Ma }
802b092529eSPeng Ma
803b092529eSPeng Ma static int
fsl_qdma_irq_init(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)804b092529eSPeng Ma fsl_qdma_irq_init(struct platform_device *pdev,
805b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma)
806b092529eSPeng Ma {
807b092529eSPeng Ma int i;
808b092529eSPeng Ma int cpu;
809b092529eSPeng Ma int ret;
8109f119924SVinod Koul char irq_name[32];
811b092529eSPeng Ma
812b092529eSPeng Ma fsl_qdma->error_irq =
813b092529eSPeng Ma platform_get_irq_byname(pdev, "qdma-error");
814e17be6e1SStephen Boyd if (fsl_qdma->error_irq < 0)
815b092529eSPeng Ma return fsl_qdma->error_irq;
816b092529eSPeng Ma
817b092529eSPeng Ma ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
818b092529eSPeng Ma fsl_qdma_error_handler, 0,
819b092529eSPeng Ma "qDMA error", fsl_qdma);
820b092529eSPeng Ma if (ret) {
821b092529eSPeng Ma dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
822b092529eSPeng Ma return ret;
823b092529eSPeng Ma }
824b092529eSPeng Ma
825b092529eSPeng Ma for (i = 0; i < fsl_qdma->block_number; i++) {
826b092529eSPeng Ma sprintf(irq_name, "qdma-queue%d", i);
827b092529eSPeng Ma fsl_qdma->queue_irq[i] =
828b092529eSPeng Ma platform_get_irq_byname(pdev, irq_name);
829b092529eSPeng Ma
830e17be6e1SStephen Boyd if (fsl_qdma->queue_irq[i] < 0)
831b092529eSPeng Ma return fsl_qdma->queue_irq[i];
832b092529eSPeng Ma
833b092529eSPeng Ma ret = devm_request_irq(&pdev->dev,
834b092529eSPeng Ma fsl_qdma->queue_irq[i],
835b092529eSPeng Ma fsl_qdma_queue_handler,
836b092529eSPeng Ma 0,
837b092529eSPeng Ma "qDMA queue",
838b092529eSPeng Ma fsl_qdma);
839b092529eSPeng Ma if (ret) {
840b092529eSPeng Ma dev_err(&pdev->dev,
841b092529eSPeng Ma "Can't register qDMA queue IRQ.\n");
842b092529eSPeng Ma return ret;
843b092529eSPeng Ma }
844b092529eSPeng Ma
845b092529eSPeng Ma cpu = i % num_online_cpus();
846b092529eSPeng Ma ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
847b092529eSPeng Ma get_cpu_mask(cpu));
848b092529eSPeng Ma if (ret) {
849b092529eSPeng Ma dev_err(&pdev->dev,
850b092529eSPeng Ma "Can't set cpu %d affinity to IRQ %d.\n",
851b092529eSPeng Ma cpu,
852b092529eSPeng Ma fsl_qdma->queue_irq[i]);
853b092529eSPeng Ma return ret;
854b092529eSPeng Ma }
855b092529eSPeng Ma }
856b092529eSPeng Ma
857b092529eSPeng Ma return 0;
858b092529eSPeng Ma }
859b092529eSPeng Ma
fsl_qdma_irq_exit(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)860b092529eSPeng Ma static void fsl_qdma_irq_exit(struct platform_device *pdev,
861b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma)
862b092529eSPeng Ma {
863b092529eSPeng Ma int i;
864b092529eSPeng Ma
865b092529eSPeng Ma devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
866b092529eSPeng Ma for (i = 0; i < fsl_qdma->block_number; i++)
867b092529eSPeng Ma devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
868b092529eSPeng Ma }
869b092529eSPeng Ma
fsl_qdma_reg_init(struct fsl_qdma_engine * fsl_qdma)870b092529eSPeng Ma static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
871b092529eSPeng Ma {
872b092529eSPeng Ma u32 reg;
873b092529eSPeng Ma int i, j, ret;
874b092529eSPeng Ma struct fsl_qdma_queue *temp;
875b092529eSPeng Ma void __iomem *status = fsl_qdma->status_base;
876b092529eSPeng Ma void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
877b092529eSPeng Ma struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
878b092529eSPeng Ma
879b092529eSPeng Ma /* Try to halt the qDMA engine first. */
880b092529eSPeng Ma ret = fsl_qdma_halt(fsl_qdma);
881b092529eSPeng Ma if (ret) {
882b092529eSPeng Ma dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
883b092529eSPeng Ma return ret;
884b092529eSPeng Ma }
885b092529eSPeng Ma
886b092529eSPeng Ma for (i = 0; i < fsl_qdma->block_number; i++) {
887b092529eSPeng Ma /*
888b092529eSPeng Ma * Clear the command queue interrupt detect register for
889b092529eSPeng Ma * all queues.
890b092529eSPeng Ma */
891b092529eSPeng Ma
892b092529eSPeng Ma block = fsl_qdma->block_base +
893b092529eSPeng Ma FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
894b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
895b092529eSPeng Ma block + FSL_QDMA_BCQIDR(0));
896b092529eSPeng Ma }
897b092529eSPeng Ma
898b092529eSPeng Ma for (j = 0; j < fsl_qdma->block_number; j++) {
899b092529eSPeng Ma block = fsl_qdma->block_base +
900b092529eSPeng Ma FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
901b092529eSPeng Ma for (i = 0; i < fsl_qdma->n_queues; i++) {
902b092529eSPeng Ma temp = fsl_queue + i + (j * fsl_qdma->n_queues);
903b092529eSPeng Ma /*
904b092529eSPeng Ma * Initialize Command Queue registers to
905b092529eSPeng Ma * point to the first
906b092529eSPeng Ma * command descriptor in memory.
907b092529eSPeng Ma * Dequeue Pointer Address Registers
908b092529eSPeng Ma * Enqueue Pointer Address Registers
909b092529eSPeng Ma */
910b092529eSPeng Ma
911b092529eSPeng Ma qdma_writel(fsl_qdma, temp->bus_addr,
912b092529eSPeng Ma block + FSL_QDMA_BCQDPA_SADDR(i));
913b092529eSPeng Ma qdma_writel(fsl_qdma, temp->bus_addr,
914b092529eSPeng Ma block + FSL_QDMA_BCQEPA_SADDR(i));
915b092529eSPeng Ma
916b092529eSPeng Ma /* Initialize the queue mode. */
917b092529eSPeng Ma reg = FSL_QDMA_BCQMR_EN;
918b092529eSPeng Ma reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
919b092529eSPeng Ma reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
920b092529eSPeng Ma qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
921b092529eSPeng Ma }
922b092529eSPeng Ma
923b092529eSPeng Ma /*
924b092529eSPeng Ma * Workaround for erratum: ERR010812.
925b092529eSPeng Ma * We must enable XOFF to avoid the enqueue rejection occurs.
926b092529eSPeng Ma * Setting SQCCMR ENTER_WM to 0x20.
927b092529eSPeng Ma */
928b092529eSPeng Ma
929b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
930b092529eSPeng Ma block + FSL_QDMA_SQCCMR);
931b092529eSPeng Ma
932b092529eSPeng Ma /*
933b092529eSPeng Ma * Initialize status queue registers to point to the first
934b092529eSPeng Ma * command descriptor in memory.
935b092529eSPeng Ma * Dequeue Pointer Address Registers
936b092529eSPeng Ma * Enqueue Pointer Address Registers
937b092529eSPeng Ma */
938b092529eSPeng Ma
939b092529eSPeng Ma qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
940b092529eSPeng Ma block + FSL_QDMA_SQEPAR);
941b092529eSPeng Ma qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
942b092529eSPeng Ma block + FSL_QDMA_SQDPAR);
943b092529eSPeng Ma /* Initialize status queue interrupt. */
944b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
945b092529eSPeng Ma block + FSL_QDMA_BCQIER(0));
946b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
947b092529eSPeng Ma FSL_QDMA_BSQICR_ICST(5) | 0x8000,
948b092529eSPeng Ma block + FSL_QDMA_BSQICR);
949b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
950b092529eSPeng Ma FSL_QDMA_CQIER_TEIE,
951b092529eSPeng Ma block + FSL_QDMA_CQIER);
952b092529eSPeng Ma
953b092529eSPeng Ma /* Initialize the status queue mode. */
954b092529eSPeng Ma reg = FSL_QDMA_BSQMR_EN;
955b092529eSPeng Ma reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
956b092529eSPeng Ma (fsl_qdma->status[j]->n_cq) - 6);
957b092529eSPeng Ma
958b092529eSPeng Ma qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
959b092529eSPeng Ma reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
960b092529eSPeng Ma }
961b092529eSPeng Ma
962b092529eSPeng Ma /* Initialize controller interrupt register. */
963b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
964b092529eSPeng Ma qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
965b092529eSPeng Ma
966b092529eSPeng Ma reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
967b092529eSPeng Ma reg &= ~FSL_QDMA_DMR_DQD;
968b092529eSPeng Ma qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
969b092529eSPeng Ma
970b092529eSPeng Ma return 0;
971b092529eSPeng Ma }
972b092529eSPeng Ma
973b092529eSPeng Ma static struct dma_async_tx_descriptor *
fsl_qdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)974b092529eSPeng Ma fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
975b092529eSPeng Ma dma_addr_t src, size_t len, unsigned long flags)
976b092529eSPeng Ma {
977b092529eSPeng Ma struct fsl_qdma_comp *fsl_comp;
978b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
979b092529eSPeng Ma
980b092529eSPeng Ma fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
981b092529eSPeng Ma
982b092529eSPeng Ma if (!fsl_comp)
983b092529eSPeng Ma return NULL;
984b092529eSPeng Ma
985b092529eSPeng Ma fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
986b092529eSPeng Ma
987b092529eSPeng Ma return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
988b092529eSPeng Ma }
989b092529eSPeng Ma
fsl_qdma_enqueue_desc(struct fsl_qdma_chan * fsl_chan)990b092529eSPeng Ma static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
991b092529eSPeng Ma {
992b092529eSPeng Ma u32 reg;
993b092529eSPeng Ma struct virt_dma_desc *vdesc;
994b092529eSPeng Ma struct fsl_qdma_comp *fsl_comp;
995b092529eSPeng Ma struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
996b092529eSPeng Ma void __iomem *block = fsl_queue->block_base;
997b092529eSPeng Ma
998b092529eSPeng Ma reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
999b092529eSPeng Ma if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
1000b092529eSPeng Ma return;
1001b092529eSPeng Ma vdesc = vchan_next_desc(&fsl_chan->vchan);
1002b092529eSPeng Ma if (!vdesc)
1003b092529eSPeng Ma return;
1004b092529eSPeng Ma list_del(&vdesc->node);
1005b092529eSPeng Ma fsl_comp = to_fsl_qdma_comp(vdesc);
1006b092529eSPeng Ma
1007b092529eSPeng Ma memcpy(fsl_queue->virt_head++,
1008b092529eSPeng Ma fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
1009b092529eSPeng Ma if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
1010b092529eSPeng Ma fsl_queue->virt_head = fsl_queue->cq;
1011b092529eSPeng Ma
1012b092529eSPeng Ma list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
1013b092529eSPeng Ma barrier();
1014b092529eSPeng Ma reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
1015b092529eSPeng Ma reg |= FSL_QDMA_BCQMR_EI;
1016b092529eSPeng Ma qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
1017b092529eSPeng Ma fsl_chan->status = DMA_IN_PROGRESS;
1018b092529eSPeng Ma }
1019b092529eSPeng Ma
fsl_qdma_free_desc(struct virt_dma_desc * vdesc)1020b092529eSPeng Ma static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
1021b092529eSPeng Ma {
1022b092529eSPeng Ma unsigned long flags;
1023b092529eSPeng Ma struct fsl_qdma_comp *fsl_comp;
1024b092529eSPeng Ma struct fsl_qdma_queue *fsl_queue;
1025b092529eSPeng Ma
1026b092529eSPeng Ma fsl_comp = to_fsl_qdma_comp(vdesc);
1027b092529eSPeng Ma fsl_queue = fsl_comp->qchan->queue;
1028b092529eSPeng Ma
1029b092529eSPeng Ma spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1030b092529eSPeng Ma list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
1031b092529eSPeng Ma spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1032b092529eSPeng Ma }
1033b092529eSPeng Ma
fsl_qdma_issue_pending(struct dma_chan * chan)1034b092529eSPeng Ma static void fsl_qdma_issue_pending(struct dma_chan *chan)
1035b092529eSPeng Ma {
1036b092529eSPeng Ma unsigned long flags;
1037b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1038b092529eSPeng Ma struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1039b092529eSPeng Ma
1040b092529eSPeng Ma spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1041b092529eSPeng Ma spin_lock(&fsl_chan->vchan.lock);
1042b092529eSPeng Ma if (vchan_issue_pending(&fsl_chan->vchan))
1043b092529eSPeng Ma fsl_qdma_enqueue_desc(fsl_chan);
1044b092529eSPeng Ma spin_unlock(&fsl_chan->vchan.lock);
1045b092529eSPeng Ma spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1046b092529eSPeng Ma }
1047b092529eSPeng Ma
fsl_qdma_synchronize(struct dma_chan * chan)1048b092529eSPeng Ma static void fsl_qdma_synchronize(struct dma_chan *chan)
1049b092529eSPeng Ma {
1050b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1051b092529eSPeng Ma
1052b092529eSPeng Ma vchan_synchronize(&fsl_chan->vchan);
1053b092529eSPeng Ma }
1054b092529eSPeng Ma
fsl_qdma_terminate_all(struct dma_chan * chan)1055b092529eSPeng Ma static int fsl_qdma_terminate_all(struct dma_chan *chan)
1056b092529eSPeng Ma {
1057b092529eSPeng Ma LIST_HEAD(head);
1058b092529eSPeng Ma unsigned long flags;
1059b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1060b092529eSPeng Ma
1061b092529eSPeng Ma spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1062b092529eSPeng Ma vchan_get_all_descriptors(&fsl_chan->vchan, &head);
1063b092529eSPeng Ma spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1064b092529eSPeng Ma vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
1065b092529eSPeng Ma return 0;
1066b092529eSPeng Ma }
1067b092529eSPeng Ma
fsl_qdma_alloc_chan_resources(struct dma_chan * chan)1068b092529eSPeng Ma static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
1069b092529eSPeng Ma {
1070b092529eSPeng Ma int ret;
1071b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1072b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1073b092529eSPeng Ma struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1074b092529eSPeng Ma
1075b092529eSPeng Ma if (fsl_queue->comp_pool && fsl_queue->desc_pool)
1076b092529eSPeng Ma return fsl_qdma->desc_allocated;
1077b092529eSPeng Ma
1078b092529eSPeng Ma INIT_LIST_HEAD(&fsl_queue->comp_free);
1079b092529eSPeng Ma
1080b092529eSPeng Ma /*
1081b092529eSPeng Ma * The dma pool for queue command buffer
1082b092529eSPeng Ma */
1083b092529eSPeng Ma fsl_queue->comp_pool =
1084b092529eSPeng Ma dma_pool_create("comp_pool",
1085b092529eSPeng Ma chan->device->dev,
1086b092529eSPeng Ma FSL_QDMA_COMMAND_BUFFER_SIZE,
1087b092529eSPeng Ma 64, 0);
1088b092529eSPeng Ma if (!fsl_queue->comp_pool)
1089b092529eSPeng Ma return -ENOMEM;
1090b092529eSPeng Ma
1091b092529eSPeng Ma /*
1092b092529eSPeng Ma * The dma pool for Descriptor(SD/DD) buffer
1093b092529eSPeng Ma */
1094b092529eSPeng Ma fsl_queue->desc_pool =
1095b092529eSPeng Ma dma_pool_create("desc_pool",
1096b092529eSPeng Ma chan->device->dev,
1097b092529eSPeng Ma FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
1098b092529eSPeng Ma 32, 0);
1099b092529eSPeng Ma if (!fsl_queue->desc_pool)
1100b092529eSPeng Ma goto err_desc_pool;
1101b092529eSPeng Ma
1102b092529eSPeng Ma ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
1103b092529eSPeng Ma if (ret) {
1104b092529eSPeng Ma dev_err(chan->device->dev,
1105b092529eSPeng Ma "failed to alloc dma buffer for S/G descriptor\n");
1106b092529eSPeng Ma goto err_mem;
1107b092529eSPeng Ma }
1108b092529eSPeng Ma
1109b092529eSPeng Ma fsl_qdma->desc_allocated++;
1110b092529eSPeng Ma return fsl_qdma->desc_allocated;
1111b092529eSPeng Ma
1112b092529eSPeng Ma err_mem:
1113b092529eSPeng Ma dma_pool_destroy(fsl_queue->desc_pool);
1114b092529eSPeng Ma err_desc_pool:
1115b092529eSPeng Ma dma_pool_destroy(fsl_queue->comp_pool);
1116b092529eSPeng Ma return -ENOMEM;
1117b092529eSPeng Ma }
1118b092529eSPeng Ma
fsl_qdma_probe(struct platform_device * pdev)1119b092529eSPeng Ma static int fsl_qdma_probe(struct platform_device *pdev)
1120b092529eSPeng Ma {
1121b092529eSPeng Ma int ret, i;
1122b092529eSPeng Ma int blk_num, blk_off;
1123b092529eSPeng Ma u32 len, chans, queues;
1124b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan;
1125b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma;
1126b092529eSPeng Ma struct device_node *np = pdev->dev.of_node;
1127b092529eSPeng Ma
1128b092529eSPeng Ma ret = of_property_read_u32(np, "dma-channels", &chans);
1129b092529eSPeng Ma if (ret) {
1130b092529eSPeng Ma dev_err(&pdev->dev, "Can't get dma-channels.\n");
1131b092529eSPeng Ma return ret;
1132b092529eSPeng Ma }
1133b092529eSPeng Ma
1134b092529eSPeng Ma ret = of_property_read_u32(np, "block-offset", &blk_off);
1135b092529eSPeng Ma if (ret) {
1136b092529eSPeng Ma dev_err(&pdev->dev, "Can't get block-offset.\n");
1137b092529eSPeng Ma return ret;
1138b092529eSPeng Ma }
1139b092529eSPeng Ma
1140b092529eSPeng Ma ret = of_property_read_u32(np, "block-number", &blk_num);
1141b092529eSPeng Ma if (ret) {
1142b092529eSPeng Ma dev_err(&pdev->dev, "Can't get block-number.\n");
1143b092529eSPeng Ma return ret;
1144b092529eSPeng Ma }
1145b092529eSPeng Ma
1146b092529eSPeng Ma blk_num = min_t(int, blk_num, num_online_cpus());
1147b092529eSPeng Ma
1148b092529eSPeng Ma len = sizeof(*fsl_qdma);
1149b092529eSPeng Ma fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1150b092529eSPeng Ma if (!fsl_qdma)
1151b092529eSPeng Ma return -ENOMEM;
1152b092529eSPeng Ma
1153b092529eSPeng Ma len = sizeof(*fsl_chan) * chans;
1154b092529eSPeng Ma fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1155b092529eSPeng Ma if (!fsl_qdma->chans)
1156b092529eSPeng Ma return -ENOMEM;
1157b092529eSPeng Ma
1158b092529eSPeng Ma len = sizeof(struct fsl_qdma_queue *) * blk_num;
1159b092529eSPeng Ma fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1160b092529eSPeng Ma if (!fsl_qdma->status)
1161b092529eSPeng Ma return -ENOMEM;
1162b092529eSPeng Ma
1163b092529eSPeng Ma len = sizeof(int) * blk_num;
1164b092529eSPeng Ma fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1165b092529eSPeng Ma if (!fsl_qdma->queue_irq)
1166b092529eSPeng Ma return -ENOMEM;
1167b092529eSPeng Ma
1168b092529eSPeng Ma ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
1169b092529eSPeng Ma if (ret) {
1170b092529eSPeng Ma dev_err(&pdev->dev, "Can't get queues.\n");
1171b092529eSPeng Ma return ret;
1172b092529eSPeng Ma }
1173b092529eSPeng Ma
1174b092529eSPeng Ma fsl_qdma->desc_allocated = 0;
1175b092529eSPeng Ma fsl_qdma->n_chans = chans;
1176b092529eSPeng Ma fsl_qdma->n_queues = queues;
1177b092529eSPeng Ma fsl_qdma->block_number = blk_num;
1178b092529eSPeng Ma fsl_qdma->block_offset = blk_off;
1179b092529eSPeng Ma
1180b092529eSPeng Ma mutex_init(&fsl_qdma->fsl_qdma_mutex);
1181b092529eSPeng Ma
1182b092529eSPeng Ma for (i = 0; i < fsl_qdma->block_number; i++) {
1183b092529eSPeng Ma fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1184b092529eSPeng Ma if (!fsl_qdma->status[i])
1185b092529eSPeng Ma return -ENOMEM;
1186b092529eSPeng Ma }
11874b23603aSTudor Ambarus fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0);
1188b092529eSPeng Ma if (IS_ERR(fsl_qdma->ctrl_base))
1189b092529eSPeng Ma return PTR_ERR(fsl_qdma->ctrl_base);
1190b092529eSPeng Ma
11914b23603aSTudor Ambarus fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1);
1192b092529eSPeng Ma if (IS_ERR(fsl_qdma->status_base))
1193b092529eSPeng Ma return PTR_ERR(fsl_qdma->status_base);
1194b092529eSPeng Ma
11954b23603aSTudor Ambarus fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2);
1196b092529eSPeng Ma if (IS_ERR(fsl_qdma->block_base))
1197b092529eSPeng Ma return PTR_ERR(fsl_qdma->block_base);
1198b092529eSPeng Ma fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1199b092529eSPeng Ma if (!fsl_qdma->queue)
1200b092529eSPeng Ma return -ENOMEM;
1201b092529eSPeng Ma
1202b092529eSPeng Ma fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
120341814c4eSKrzysztof Kozlowski if (fsl_qdma->irq_base < 0)
120441814c4eSKrzysztof Kozlowski return fsl_qdma->irq_base;
120541814c4eSKrzysztof Kozlowski
1206b092529eSPeng Ma fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1207b092529eSPeng Ma INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1208b092529eSPeng Ma
1209b092529eSPeng Ma for (i = 0; i < fsl_qdma->n_chans; i++) {
1210b092529eSPeng Ma struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1211b092529eSPeng Ma
1212b092529eSPeng Ma fsl_chan->qdma = fsl_qdma;
1213b092529eSPeng Ma fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1214b092529eSPeng Ma fsl_qdma->block_number);
1215b092529eSPeng Ma fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
1216b092529eSPeng Ma vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1217b092529eSPeng Ma }
1218b092529eSPeng Ma
1219b092529eSPeng Ma dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1220b092529eSPeng Ma
1221b092529eSPeng Ma fsl_qdma->dma_dev.dev = &pdev->dev;
1222b092529eSPeng Ma fsl_qdma->dma_dev.device_free_chan_resources =
1223b092529eSPeng Ma fsl_qdma_free_chan_resources;
1224b092529eSPeng Ma fsl_qdma->dma_dev.device_alloc_chan_resources =
1225b092529eSPeng Ma fsl_qdma_alloc_chan_resources;
1226b092529eSPeng Ma fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1227b092529eSPeng Ma fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1228b092529eSPeng Ma fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1229b092529eSPeng Ma fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1230b092529eSPeng Ma fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1231b092529eSPeng Ma
1232f0c07993SRobin Gong ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1233f0c07993SRobin Gong if (ret) {
1234f0c07993SRobin Gong dev_err(&pdev->dev, "dma_set_mask failure.\n");
1235f0c07993SRobin Gong return ret;
1236f0c07993SRobin Gong }
1237b092529eSPeng Ma
1238b092529eSPeng Ma platform_set_drvdata(pdev, fsl_qdma);
1239b092529eSPeng Ma
1240b092529eSPeng Ma ret = fsl_qdma_reg_init(fsl_qdma);
1241b092529eSPeng Ma if (ret) {
1242b092529eSPeng Ma dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
1243b092529eSPeng Ma return ret;
1244b092529eSPeng Ma }
1245b092529eSPeng Ma
1246*a69c8bbbSCurtis Klein ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1247*a69c8bbbSCurtis Klein if (ret)
1248*a69c8bbbSCurtis Klein return ret;
1249*a69c8bbbSCurtis Klein
1250*a69c8bbbSCurtis Klein ret = dma_async_device_register(&fsl_qdma->dma_dev);
1251*a69c8bbbSCurtis Klein if (ret) {
1252*a69c8bbbSCurtis Klein dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
1253*a69c8bbbSCurtis Klein return ret;
1254*a69c8bbbSCurtis Klein }
1255*a69c8bbbSCurtis Klein
1256b092529eSPeng Ma return 0;
1257b092529eSPeng Ma }
1258b092529eSPeng Ma
fsl_qdma_cleanup_vchan(struct dma_device * dmadev)1259b092529eSPeng Ma static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
1260b092529eSPeng Ma {
1261b092529eSPeng Ma struct fsl_qdma_chan *chan, *_chan;
1262b092529eSPeng Ma
1263b092529eSPeng Ma list_for_each_entry_safe(chan, _chan,
1264b092529eSPeng Ma &dmadev->channels, vchan.chan.device_node) {
1265b092529eSPeng Ma list_del(&chan->vchan.chan.device_node);
1266b092529eSPeng Ma tasklet_kill(&chan->vchan.task);
1267b092529eSPeng Ma }
1268b092529eSPeng Ma }
1269b092529eSPeng Ma
fsl_qdma_remove(struct platform_device * pdev)1270b092529eSPeng Ma static int fsl_qdma_remove(struct platform_device *pdev)
1271b092529eSPeng Ma {
1272b092529eSPeng Ma struct device_node *np = pdev->dev.of_node;
1273b092529eSPeng Ma struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1274b092529eSPeng Ma
1275b092529eSPeng Ma fsl_qdma_irq_exit(pdev, fsl_qdma);
1276b092529eSPeng Ma fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1277b092529eSPeng Ma of_dma_controller_free(np);
1278b092529eSPeng Ma dma_async_device_unregister(&fsl_qdma->dma_dev);
1279b092529eSPeng Ma
1280b092529eSPeng Ma return 0;
1281b092529eSPeng Ma }
1282b092529eSPeng Ma
1283b092529eSPeng Ma static const struct of_device_id fsl_qdma_dt_ids[] = {
1284b092529eSPeng Ma { .compatible = "fsl,ls1021a-qdma", },
1285b092529eSPeng Ma { /* sentinel */ }
1286b092529eSPeng Ma };
1287b092529eSPeng Ma MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
1288b092529eSPeng Ma
1289b092529eSPeng Ma static struct platform_driver fsl_qdma_driver = {
1290b092529eSPeng Ma .driver = {
1291b092529eSPeng Ma .name = "fsl-qdma",
1292b092529eSPeng Ma .of_match_table = fsl_qdma_dt_ids,
1293b092529eSPeng Ma },
1294b092529eSPeng Ma .probe = fsl_qdma_probe,
1295b092529eSPeng Ma .remove = fsl_qdma_remove,
1296b092529eSPeng Ma };
1297b092529eSPeng Ma
1298b092529eSPeng Ma module_platform_driver(fsl_qdma_driver);
1299b092529eSPeng Ma
1300b092529eSPeng Ma MODULE_ALIAS("platform:fsl-qdma");
1301279cc97cSArnd Bergmann MODULE_LICENSE("GPL v2");
1302b092529eSPeng Ma MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
1303