xref: /openbmc/linux/drivers/dma/fsl-qdma.c (revision 816ffd28002651a469e86d1118a225862e392ecd)
1  // SPDX-License-Identifier: GPL-2.0
2  // Copyright 2014-2015 Freescale
3  // Copyright 2018 NXP
4  
5  /*
6   * Driver for NXP Layerscape Queue Direct Memory Access Controller
7   *
8   * Author:
9   *  Wen He <wen.he_1@nxp.com>
10   *  Jiaheng Fan <jiaheng.fan@nxp.com>
11   *
12   */
13  
14  #include <linux/module.h>
15  #include <linux/delay.h>
16  #include <linux/of.h>
17  #include <linux/of_dma.h>
18  #include <linux/dma-mapping.h>
19  #include <linux/platform_device.h>
20  
21  #include "virt-dma.h"
22  #include "fsldma.h"
23  
24  /* Register related definition */
25  #define FSL_QDMA_DMR			0x0
26  #define FSL_QDMA_DSR			0x4
27  #define FSL_QDMA_DEIER			0xe00
28  #define FSL_QDMA_DEDR			0xe04
29  #define FSL_QDMA_DECFDW0R		0xe10
30  #define FSL_QDMA_DECFDW1R		0xe14
31  #define FSL_QDMA_DECFDW2R		0xe18
32  #define FSL_QDMA_DECFDW3R		0xe1c
33  #define FSL_QDMA_DECFQIDR		0xe30
34  #define FSL_QDMA_DECBR			0xe34
35  
36  #define FSL_QDMA_BCQMR(x)		(0xc0 + 0x100 * (x))
37  #define FSL_QDMA_BCQSR(x)		(0xc4 + 0x100 * (x))
38  #define FSL_QDMA_BCQEDPA_SADDR(x)	(0xc8 + 0x100 * (x))
39  #define FSL_QDMA_BCQDPA_SADDR(x)	(0xcc + 0x100 * (x))
40  #define FSL_QDMA_BCQEEPA_SADDR(x)	(0xd0 + 0x100 * (x))
41  #define FSL_QDMA_BCQEPA_SADDR(x)	(0xd4 + 0x100 * (x))
42  #define FSL_QDMA_BCQIER(x)		(0xe0 + 0x100 * (x))
43  #define FSL_QDMA_BCQIDR(x)		(0xe4 + 0x100 * (x))
44  
45  #define FSL_QDMA_SQDPAR			0x80c
46  #define FSL_QDMA_SQEPAR			0x814
47  #define FSL_QDMA_BSQMR			0x800
48  #define FSL_QDMA_BSQSR			0x804
49  #define FSL_QDMA_BSQICR			0x828
50  #define FSL_QDMA_CQMR			0xa00
51  #define FSL_QDMA_CQDSCR1		0xa08
52  #define FSL_QDMA_CQDSCR2                0xa0c
53  #define FSL_QDMA_CQIER			0xa10
54  #define FSL_QDMA_CQEDR			0xa14
55  #define FSL_QDMA_SQCCMR			0xa20
56  
57  /* Registers for bit and genmask */
58  #define FSL_QDMA_CQIDR_SQT		BIT(15)
59  #define QDMA_CCDF_FORMAT		BIT(29)
60  #define QDMA_CCDF_SER			BIT(30)
61  #define QDMA_SG_FIN			BIT(30)
62  #define QDMA_SG_LEN_MASK		GENMASK(29, 0)
63  #define QDMA_CCDF_MASK			GENMASK(28, 20)
64  
65  #define FSL_QDMA_DEDR_CLEAR		GENMASK(31, 0)
66  #define FSL_QDMA_BCQIDR_CLEAR		GENMASK(31, 0)
67  #define FSL_QDMA_DEIER_CLEAR		GENMASK(31, 0)
68  
69  #define FSL_QDMA_BCQIER_CQTIE		BIT(15)
70  #define FSL_QDMA_BCQIER_CQPEIE		BIT(23)
71  #define FSL_QDMA_BSQICR_ICEN		BIT(31)
72  
73  #define FSL_QDMA_BSQICR_ICST(x)		((x) << 16)
74  #define FSL_QDMA_CQIER_MEIE		BIT(31)
75  #define FSL_QDMA_CQIER_TEIE		BIT(0)
76  #define FSL_QDMA_SQCCMR_ENTER_WM	BIT(21)
77  
78  #define FSL_QDMA_BCQMR_EN		BIT(31)
79  #define FSL_QDMA_BCQMR_EI		BIT(30)
80  #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
81  #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
82  
83  #define FSL_QDMA_BCQSR_QF		BIT(16)
84  #define FSL_QDMA_BCQSR_XOFF		BIT(0)
85  
86  #define FSL_QDMA_BSQMR_EN		BIT(31)
87  #define FSL_QDMA_BSQMR_DI		BIT(30)
88  #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
89  
90  #define FSL_QDMA_BSQSR_QE		BIT(17)
91  
92  #define FSL_QDMA_DMR_DQD		BIT(30)
93  #define FSL_QDMA_DSR_DB		BIT(31)
94  
95  /* Size related definition */
96  #define FSL_QDMA_QUEUE_MAX		8
97  #define FSL_QDMA_COMMAND_BUFFER_SIZE	64
98  #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
99  #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
100  #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
101  #define FSL_QDMA_QUEUE_NUM_MAX		8
102  
103  /* Field definition for CMD */
104  #define FSL_QDMA_CMD_RWTTYPE		0x4
105  #define FSL_QDMA_CMD_LWC                0x2
106  #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
107  #define FSL_QDMA_CMD_NS_OFFSET		27
108  #define FSL_QDMA_CMD_DQOS_OFFSET	24
109  #define FSL_QDMA_CMD_WTHROTL_OFFSET	20
110  #define FSL_QDMA_CMD_DSEN_OFFSET	19
111  #define FSL_QDMA_CMD_LWC_OFFSET		16
112  #define FSL_QDMA_CMD_PF			BIT(17)
113  
114  /* Field definition for Descriptor status */
115  #define QDMA_CCDF_STATUS_RTE		BIT(5)
116  #define QDMA_CCDF_STATUS_WTE		BIT(4)
117  #define QDMA_CCDF_STATUS_CDE		BIT(2)
118  #define QDMA_CCDF_STATUS_SDE		BIT(1)
119  #define QDMA_CCDF_STATUS_DDE		BIT(0)
120  #define QDMA_CCDF_STATUS_MASK		(QDMA_CCDF_STATUS_RTE | \
121  					QDMA_CCDF_STATUS_WTE | \
122  					QDMA_CCDF_STATUS_CDE | \
123  					QDMA_CCDF_STATUS_SDE | \
124  					QDMA_CCDF_STATUS_DDE)
125  
126  /* Field definition for Descriptor offset */
127  #define QDMA_CCDF_OFFSET		20
128  #define QDMA_SDDF_CMD(x)		(((u64)(x)) << 32)
129  
130  /* Field definition for safe loop count*/
131  #define FSL_QDMA_HALT_COUNT		1500
132  #define FSL_QDMA_MAX_SIZE		16385
133  #define	FSL_QDMA_COMP_TIMEOUT		1000
134  #define FSL_COMMAND_QUEUE_OVERFLLOW	10
135  
136  #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
137  	(((fsl_qdma_engine)->block_offset) * (x))
138  
139  /**
140   * struct fsl_qdma_format - This is the struct holding describing compound
141   *			    descriptor format with qDMA.
142   * @status:		    Command status and enqueue status notification.
143   * @cfg:		    Frame offset and frame format.
144   * @addr_lo:		    Holding the compound descriptor of the lower
145   *			    32-bits address in memory 40-bit address.
146   * @addr_hi:		    Same as above member, but point high 8-bits in
147   *			    memory 40-bit address.
148   * @__reserved1:	    Reserved field.
149   * @cfg8b_w1:		    Compound descriptor command queue origin produced
150   *			    by qDMA and dynamic debug field.
151   * @data:		    Pointer to the memory 40-bit address, describes DMA
152   *			    source information and DMA destination information.
153   */
154  struct fsl_qdma_format {
155  	__le32 status;
156  	__le32 cfg;
157  	union {
158  		struct {
159  			__le32 addr_lo;
160  			u8 addr_hi;
161  			u8 __reserved1[2];
162  			u8 cfg8b_w1;
163  		} __packed;
164  		__le64 data;
165  	};
166  } __packed;
167  
168  /* qDMA status notification pre information */
169  struct fsl_pre_status {
170  	u64 addr;
171  	u8 queue;
172  };
173  
174  static DEFINE_PER_CPU(struct fsl_pre_status, pre);
175  
176  struct fsl_qdma_chan {
177  	struct virt_dma_chan		vchan;
178  	struct virt_dma_desc		vdesc;
179  	enum dma_status			status;
180  	struct fsl_qdma_engine		*qdma;
181  	struct fsl_qdma_queue		*queue;
182  };
183  
184  struct fsl_qdma_queue {
185  	struct fsl_qdma_format	*virt_head;
186  	struct fsl_qdma_format	*virt_tail;
187  	struct list_head	comp_used;
188  	struct list_head	comp_free;
189  	struct dma_pool		*comp_pool;
190  	struct dma_pool		*desc_pool;
191  	spinlock_t		queue_lock;
192  	dma_addr_t		bus_addr;
193  	u32                     n_cq;
194  	u32			id;
195  	struct fsl_qdma_format	*cq;
196  	void __iomem		*block_base;
197  };
198  
199  struct fsl_qdma_comp {
200  	dma_addr_t              bus_addr;
201  	dma_addr_t              desc_bus_addr;
202  	struct fsl_qdma_format	*virt_addr;
203  	struct fsl_qdma_format	*desc_virt_addr;
204  	struct fsl_qdma_chan	*qchan;
205  	struct virt_dma_desc    vdesc;
206  	struct list_head	list;
207  };
208  
209  struct fsl_qdma_engine {
210  	struct dma_device	dma_dev;
211  	void __iomem		*ctrl_base;
212  	void __iomem            *status_base;
213  	void __iomem		*block_base;
214  	u32			n_chans;
215  	u32			n_queues;
216  	struct mutex            fsl_qdma_mutex;
217  	int			error_irq;
218  	int			*queue_irq;
219  	u32			feature;
220  	struct fsl_qdma_queue	*queue;
221  	struct fsl_qdma_queue	**status;
222  	struct fsl_qdma_chan	*chans;
223  	int			block_number;
224  	int			block_offset;
225  	int			irq_base;
226  	int			desc_allocated;
227  
228  };
229  
230  static inline u64
qdma_ccdf_addr_get64(const struct fsl_qdma_format * ccdf)231  qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
232  {
233  	return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
234  }
235  
236  static inline void
qdma_desc_addr_set64(struct fsl_qdma_format * ccdf,u64 addr)237  qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
238  {
239  	ccdf->addr_hi = upper_32_bits(addr);
240  	ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
241  }
242  
243  static inline u8
qdma_ccdf_get_queue(const struct fsl_qdma_format * ccdf)244  qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
245  {
246  	return ccdf->cfg8b_w1 & U8_MAX;
247  }
248  
249  static inline int
qdma_ccdf_get_offset(const struct fsl_qdma_format * ccdf)250  qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
251  {
252  	return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
253  }
254  
255  static inline void
qdma_ccdf_set_format(struct fsl_qdma_format * ccdf,int offset)256  qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
257  {
258  	ccdf->cfg = cpu_to_le32(QDMA_CCDF_FORMAT |
259  				(offset << QDMA_CCDF_OFFSET));
260  }
261  
262  static inline int
qdma_ccdf_get_status(const struct fsl_qdma_format * ccdf)263  qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
264  {
265  	return (le32_to_cpu(ccdf->status) & QDMA_CCDF_STATUS_MASK);
266  }
267  
268  static inline void
qdma_ccdf_set_ser(struct fsl_qdma_format * ccdf,int status)269  qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
270  {
271  	ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
272  }
273  
qdma_csgf_set_len(struct fsl_qdma_format * csgf,int len)274  static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
275  {
276  	csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
277  }
278  
qdma_csgf_set_f(struct fsl_qdma_format * csgf,int len)279  static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
280  {
281  	csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
282  }
283  
qdma_readl(struct fsl_qdma_engine * qdma,void __iomem * addr)284  static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
285  {
286  	return FSL_DMA_IN(qdma, addr, 32);
287  }
288  
qdma_writel(struct fsl_qdma_engine * qdma,u32 val,void __iomem * addr)289  static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
290  			void __iomem *addr)
291  {
292  	FSL_DMA_OUT(qdma, addr, val, 32);
293  }
294  
to_fsl_qdma_chan(struct dma_chan * chan)295  static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
296  {
297  	return container_of(chan, struct fsl_qdma_chan, vchan.chan);
298  }
299  
to_fsl_qdma_comp(struct virt_dma_desc * vd)300  static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
301  {
302  	return container_of(vd, struct fsl_qdma_comp, vdesc);
303  }
304  
fsl_qdma_free_chan_resources(struct dma_chan * chan)305  static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
306  {
307  	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
308  	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
309  	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
310  	struct fsl_qdma_comp *comp_temp, *_comp_temp;
311  	unsigned long flags;
312  	LIST_HEAD(head);
313  
314  	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
315  	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
316  	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
317  
318  	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
319  
320  	if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
321  		return;
322  
323  	list_for_each_entry_safe(comp_temp, _comp_temp,
324  				 &fsl_queue->comp_used,	list) {
325  		dma_pool_free(fsl_queue->comp_pool,
326  			      comp_temp->virt_addr,
327  			      comp_temp->bus_addr);
328  		dma_pool_free(fsl_queue->desc_pool,
329  			      comp_temp->desc_virt_addr,
330  			      comp_temp->desc_bus_addr);
331  		list_del(&comp_temp->list);
332  		kfree(comp_temp);
333  	}
334  
335  	list_for_each_entry_safe(comp_temp, _comp_temp,
336  				 &fsl_queue->comp_free, list) {
337  		dma_pool_free(fsl_queue->comp_pool,
338  			      comp_temp->virt_addr,
339  			      comp_temp->bus_addr);
340  		dma_pool_free(fsl_queue->desc_pool,
341  			      comp_temp->desc_virt_addr,
342  			      comp_temp->desc_bus_addr);
343  		list_del(&comp_temp->list);
344  		kfree(comp_temp);
345  	}
346  
347  	dma_pool_destroy(fsl_queue->comp_pool);
348  	dma_pool_destroy(fsl_queue->desc_pool);
349  
350  	fsl_qdma->desc_allocated--;
351  	fsl_queue->comp_pool = NULL;
352  	fsl_queue->desc_pool = NULL;
353  }
354  
fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp * fsl_comp,dma_addr_t dst,dma_addr_t src,u32 len)355  static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
356  				      dma_addr_t dst, dma_addr_t src, u32 len)
357  {
358  	u32 cmd;
359  	struct fsl_qdma_format *sdf, *ddf;
360  	struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
361  
362  	ccdf = fsl_comp->virt_addr;
363  	csgf_desc = fsl_comp->virt_addr + 1;
364  	csgf_src = fsl_comp->virt_addr + 2;
365  	csgf_dest = fsl_comp->virt_addr + 3;
366  	sdf = fsl_comp->desc_virt_addr;
367  	ddf = fsl_comp->desc_virt_addr + 1;
368  
369  	memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
370  	memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
371  	/* Head Command Descriptor(Frame Descriptor) */
372  	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
373  	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
374  	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
375  	/* Status notification is enqueued to status queue. */
376  	/* Compound Command Descriptor(Frame List Table) */
377  	qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
378  	/* It must be 32 as Compound S/G Descriptor */
379  	qdma_csgf_set_len(csgf_desc, 32);
380  	qdma_desc_addr_set64(csgf_src, src);
381  	qdma_csgf_set_len(csgf_src, len);
382  	qdma_desc_addr_set64(csgf_dest, dst);
383  	qdma_csgf_set_len(csgf_dest, len);
384  	/* This entry is the last entry. */
385  	qdma_csgf_set_f(csgf_dest, len);
386  	/* Descriptor Buffer */
387  	cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
388  			  FSL_QDMA_CMD_RWTTYPE_OFFSET) |
389  			  FSL_QDMA_CMD_PF;
390  	sdf->data = QDMA_SDDF_CMD(cmd);
391  
392  	cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
393  			  FSL_QDMA_CMD_RWTTYPE_OFFSET);
394  	cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
395  	ddf->data = QDMA_SDDF_CMD(cmd);
396  }
397  
398  /*
399   * Pre-request full command descriptor for enqueue.
400   */
fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue * queue)401  static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
402  {
403  	int i;
404  	struct fsl_qdma_comp *comp_temp, *_comp_temp;
405  
406  	for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
407  		comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
408  		if (!comp_temp)
409  			goto err_alloc;
410  		comp_temp->virt_addr =
411  			dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
412  				       &comp_temp->bus_addr);
413  		if (!comp_temp->virt_addr)
414  			goto err_dma_alloc;
415  
416  		comp_temp->desc_virt_addr =
417  			dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
418  				       &comp_temp->desc_bus_addr);
419  		if (!comp_temp->desc_virt_addr)
420  			goto err_desc_dma_alloc;
421  
422  		list_add_tail(&comp_temp->list, &queue->comp_free);
423  	}
424  
425  	return 0;
426  
427  err_desc_dma_alloc:
428  	dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
429  		      comp_temp->bus_addr);
430  
431  err_dma_alloc:
432  	kfree(comp_temp);
433  
434  err_alloc:
435  	list_for_each_entry_safe(comp_temp, _comp_temp,
436  				 &queue->comp_free, list) {
437  		if (comp_temp->virt_addr)
438  			dma_pool_free(queue->comp_pool,
439  				      comp_temp->virt_addr,
440  				      comp_temp->bus_addr);
441  		if (comp_temp->desc_virt_addr)
442  			dma_pool_free(queue->desc_pool,
443  				      comp_temp->desc_virt_addr,
444  				      comp_temp->desc_bus_addr);
445  
446  		list_del(&comp_temp->list);
447  		kfree(comp_temp);
448  	}
449  
450  	return -ENOMEM;
451  }
452  
453  /*
454   * Request a command descriptor for enqueue.
455   */
456  static struct fsl_qdma_comp
fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan * fsl_chan)457  *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
458  {
459  	unsigned long flags;
460  	struct fsl_qdma_comp *comp_temp;
461  	int timeout = FSL_QDMA_COMP_TIMEOUT;
462  	struct fsl_qdma_queue *queue = fsl_chan->queue;
463  
464  	while (timeout--) {
465  		spin_lock_irqsave(&queue->queue_lock, flags);
466  		if (!list_empty(&queue->comp_free)) {
467  			comp_temp = list_first_entry(&queue->comp_free,
468  						     struct fsl_qdma_comp,
469  						     list);
470  			list_del(&comp_temp->list);
471  
472  			spin_unlock_irqrestore(&queue->queue_lock, flags);
473  			comp_temp->qchan = fsl_chan;
474  			return comp_temp;
475  		}
476  		spin_unlock_irqrestore(&queue->queue_lock, flags);
477  		udelay(1);
478  	}
479  
480  	return NULL;
481  }
482  
483  static struct fsl_qdma_queue
fsl_qdma_alloc_queue_resources(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)484  *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
485  				struct fsl_qdma_engine *fsl_qdma)
486  {
487  	int ret, len, i, j;
488  	int queue_num, block_number;
489  	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
490  	struct fsl_qdma_queue *queue_head, *queue_temp;
491  
492  	queue_num = fsl_qdma->n_queues;
493  	block_number = fsl_qdma->block_number;
494  
495  	if (queue_num > FSL_QDMA_QUEUE_MAX)
496  		queue_num = FSL_QDMA_QUEUE_MAX;
497  	len = sizeof(*queue_head) * queue_num * block_number;
498  	queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
499  	if (!queue_head)
500  		return NULL;
501  
502  	ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
503  					     queue_size, queue_num);
504  	if (ret) {
505  		dev_err(&pdev->dev, "Can't get queue-sizes.\n");
506  		return NULL;
507  	}
508  	for (j = 0; j < block_number; j++) {
509  		for (i = 0; i < queue_num; i++) {
510  			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
511  			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
512  				dev_err(&pdev->dev,
513  					"Get wrong queue-sizes.\n");
514  				return NULL;
515  			}
516  			queue_temp = queue_head + i + (j * queue_num);
517  
518  			queue_temp->cq =
519  			dmam_alloc_coherent(&pdev->dev,
520  					    sizeof(struct fsl_qdma_format) *
521  					    queue_size[i],
522  					    &queue_temp->bus_addr,
523  					    GFP_KERNEL);
524  			if (!queue_temp->cq)
525  				return NULL;
526  			queue_temp->block_base = fsl_qdma->block_base +
527  				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
528  			queue_temp->n_cq = queue_size[i];
529  			queue_temp->id = i;
530  			queue_temp->virt_head = queue_temp->cq;
531  			queue_temp->virt_tail = queue_temp->cq;
532  			/*
533  			 * List for queue command buffer
534  			 */
535  			INIT_LIST_HEAD(&queue_temp->comp_used);
536  			spin_lock_init(&queue_temp->queue_lock);
537  		}
538  	}
539  	return queue_head;
540  }
541  
542  static struct fsl_qdma_queue
fsl_qdma_prep_status_queue(struct platform_device * pdev)543  *fsl_qdma_prep_status_queue(struct platform_device *pdev)
544  {
545  	int ret;
546  	unsigned int status_size;
547  	struct fsl_qdma_queue *status_head;
548  	struct device_node *np = pdev->dev.of_node;
549  
550  	ret = of_property_read_u32(np, "status-sizes", &status_size);
551  	if (ret) {
552  		dev_err(&pdev->dev, "Can't get status-sizes.\n");
553  		return NULL;
554  	}
555  	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
556  	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
557  		dev_err(&pdev->dev, "Get wrong status_size.\n");
558  		return NULL;
559  	}
560  	status_head = devm_kzalloc(&pdev->dev,
561  				   sizeof(*status_head), GFP_KERNEL);
562  	if (!status_head)
563  		return NULL;
564  
565  	/*
566  	 * Buffer for queue command
567  	 */
568  	status_head->cq = dmam_alloc_coherent(&pdev->dev,
569  					      sizeof(struct fsl_qdma_format) *
570  					      status_size,
571  					      &status_head->bus_addr,
572  					      GFP_KERNEL);
573  	if (!status_head->cq) {
574  		devm_kfree(&pdev->dev, status_head);
575  		return NULL;
576  	}
577  	status_head->n_cq = status_size;
578  	status_head->virt_head = status_head->cq;
579  	status_head->virt_tail = status_head->cq;
580  	status_head->comp_pool = NULL;
581  
582  	return status_head;
583  }
584  
fsl_qdma_halt(struct fsl_qdma_engine * fsl_qdma)585  static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
586  {
587  	u32 reg;
588  	int i, j, count = FSL_QDMA_HALT_COUNT;
589  	void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
590  
591  	/* Disable the command queue and wait for idle state. */
592  	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
593  	reg |= FSL_QDMA_DMR_DQD;
594  	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
595  	for (j = 0; j < fsl_qdma->block_number; j++) {
596  		block = fsl_qdma->block_base +
597  			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
598  		for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
599  			qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
600  	}
601  	while (1) {
602  		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
603  		if (!(reg & FSL_QDMA_DSR_DB))
604  			break;
605  		if (count-- < 0)
606  			return -EBUSY;
607  		udelay(100);
608  	}
609  
610  	for (j = 0; j < fsl_qdma->block_number; j++) {
611  		block = fsl_qdma->block_base +
612  			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
613  
614  		/* Disable status queue. */
615  		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
616  
617  		/*
618  		 * clear the command queue interrupt detect register for
619  		 * all queues.
620  		 */
621  		qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
622  			    block + FSL_QDMA_BCQIDR(0));
623  	}
624  
625  	return 0;
626  }
627  
628  static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine * fsl_qdma,void * block,int id)629  fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
630  				 void *block,
631  				 int id)
632  {
633  	bool duplicate;
634  	u32 reg, i, count;
635  	u8 completion_status;
636  	struct fsl_qdma_queue *temp_queue;
637  	struct fsl_qdma_format *status_addr;
638  	struct fsl_qdma_comp *fsl_comp = NULL;
639  	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
640  	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
641  
642  	count = FSL_QDMA_MAX_SIZE;
643  
644  	while (count--) {
645  		duplicate = 0;
646  		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
647  		if (reg & FSL_QDMA_BSQSR_QE)
648  			return 0;
649  
650  		status_addr = fsl_status->virt_head;
651  
652  		if (qdma_ccdf_get_queue(status_addr) ==
653  		   __this_cpu_read(pre.queue) &&
654  			qdma_ccdf_addr_get64(status_addr) ==
655  			__this_cpu_read(pre.addr))
656  			duplicate = 1;
657  		i = qdma_ccdf_get_queue(status_addr) +
658  			id * fsl_qdma->n_queues;
659  		__this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
660  		__this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
661  		temp_queue = fsl_queue + i;
662  
663  		spin_lock(&temp_queue->queue_lock);
664  		if (list_empty(&temp_queue->comp_used)) {
665  			if (!duplicate) {
666  				spin_unlock(&temp_queue->queue_lock);
667  				return -EAGAIN;
668  			}
669  		} else {
670  			fsl_comp = list_first_entry(&temp_queue->comp_used,
671  						    struct fsl_qdma_comp, list);
672  			if (fsl_comp->bus_addr + 16 !=
673  				__this_cpu_read(pre.addr)) {
674  				if (!duplicate) {
675  					spin_unlock(&temp_queue->queue_lock);
676  					return -EAGAIN;
677  				}
678  			}
679  		}
680  
681  		if (duplicate) {
682  			reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
683  			reg |= FSL_QDMA_BSQMR_DI;
684  			qdma_desc_addr_set64(status_addr, 0x0);
685  			fsl_status->virt_head++;
686  			if (fsl_status->virt_head == fsl_status->cq
687  						   + fsl_status->n_cq)
688  				fsl_status->virt_head = fsl_status->cq;
689  			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
690  			spin_unlock(&temp_queue->queue_lock);
691  			continue;
692  		}
693  		list_del(&fsl_comp->list);
694  
695  		completion_status = qdma_ccdf_get_status(status_addr);
696  
697  		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
698  		reg |= FSL_QDMA_BSQMR_DI;
699  		qdma_desc_addr_set64(status_addr, 0x0);
700  		fsl_status->virt_head++;
701  		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
702  			fsl_status->virt_head = fsl_status->cq;
703  		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
704  		spin_unlock(&temp_queue->queue_lock);
705  
706  		/* The completion_status is evaluated here
707  		 * (outside of spin lock)
708  		 */
709  		if (completion_status) {
710  			/* A completion error occurred! */
711  			if (completion_status & QDMA_CCDF_STATUS_WTE) {
712  				/* Write transaction error */
713  				fsl_comp->vdesc.tx_result.result =
714  					DMA_TRANS_WRITE_FAILED;
715  			} else if (completion_status & QDMA_CCDF_STATUS_RTE) {
716  				/* Read transaction error */
717  				fsl_comp->vdesc.tx_result.result =
718  					DMA_TRANS_READ_FAILED;
719  			} else {
720  				/* Command/source/destination
721  				 * description error
722  				 */
723  				fsl_comp->vdesc.tx_result.result =
724  					DMA_TRANS_ABORTED;
725  				dev_err(fsl_qdma->dma_dev.dev,
726  					"DMA status descriptor error %x\n",
727  					completion_status);
728  			}
729  		}
730  
731  		spin_lock(&fsl_comp->qchan->vchan.lock);
732  		vchan_cookie_complete(&fsl_comp->vdesc);
733  		fsl_comp->qchan->status = DMA_COMPLETE;
734  		spin_unlock(&fsl_comp->qchan->vchan.lock);
735  	}
736  
737  	return 0;
738  }
739  
fsl_qdma_error_handler(int irq,void * dev_id)740  static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
741  {
742  	unsigned int intr;
743  	struct fsl_qdma_engine *fsl_qdma = dev_id;
744  	void __iomem *status = fsl_qdma->status_base;
745  	unsigned int decfdw0r;
746  	unsigned int decfdw1r;
747  	unsigned int decfdw2r;
748  	unsigned int decfdw3r;
749  
750  	intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
751  
752  	if (intr) {
753  		decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
754  		decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
755  		decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
756  		decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
757  		dev_err(fsl_qdma->dma_dev.dev,
758  			"DMA transaction error! (%x: %x-%x-%x-%x)\n",
759  			intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
760  	}
761  
762  	qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
763  	return IRQ_HANDLED;
764  }
765  
fsl_qdma_queue_handler(int irq,void * dev_id)766  static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
767  {
768  	int id;
769  	unsigned int intr, reg;
770  	struct fsl_qdma_engine *fsl_qdma = dev_id;
771  	void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
772  
773  	id = irq - fsl_qdma->irq_base;
774  	if (id < 0 && id > fsl_qdma->block_number) {
775  		dev_err(fsl_qdma->dma_dev.dev,
776  			"irq %d is wrong irq_base is %d\n",
777  			irq, fsl_qdma->irq_base);
778  	}
779  
780  	block = fsl_qdma->block_base +
781  		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
782  
783  	intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
784  
785  	if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
786  		intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
787  
788  	if (intr != 0) {
789  		reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
790  		reg |= FSL_QDMA_DMR_DQD;
791  		qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
792  		qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
793  		dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
794  	}
795  
796  	/* Clear all detected events and interrupts. */
797  	qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
798  		    block + FSL_QDMA_BCQIDR(0));
799  
800  	return IRQ_HANDLED;
801  }
802  
803  static int
fsl_qdma_irq_init(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)804  fsl_qdma_irq_init(struct platform_device *pdev,
805  		  struct fsl_qdma_engine *fsl_qdma)
806  {
807  	int i;
808  	int cpu;
809  	int ret;
810  	char irq_name[32];
811  
812  	fsl_qdma->error_irq =
813  		platform_get_irq_byname(pdev, "qdma-error");
814  	if (fsl_qdma->error_irq < 0)
815  		return fsl_qdma->error_irq;
816  
817  	ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
818  			       fsl_qdma_error_handler, 0,
819  			       "qDMA error", fsl_qdma);
820  	if (ret) {
821  		dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
822  		return  ret;
823  	}
824  
825  	for (i = 0; i < fsl_qdma->block_number; i++) {
826  		sprintf(irq_name, "qdma-queue%d", i);
827  		fsl_qdma->queue_irq[i] =
828  				platform_get_irq_byname(pdev, irq_name);
829  
830  		if (fsl_qdma->queue_irq[i] < 0)
831  			return fsl_qdma->queue_irq[i];
832  
833  		ret = devm_request_irq(&pdev->dev,
834  				       fsl_qdma->queue_irq[i],
835  				       fsl_qdma_queue_handler,
836  				       0,
837  				       "qDMA queue",
838  				       fsl_qdma);
839  		if (ret) {
840  			dev_err(&pdev->dev,
841  				"Can't register qDMA queue IRQ.\n");
842  			return  ret;
843  		}
844  
845  		cpu = i % num_online_cpus();
846  		ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
847  					    get_cpu_mask(cpu));
848  		if (ret) {
849  			dev_err(&pdev->dev,
850  				"Can't set cpu %d affinity to IRQ %d.\n",
851  				cpu,
852  				fsl_qdma->queue_irq[i]);
853  			return  ret;
854  		}
855  	}
856  
857  	return 0;
858  }
859  
fsl_qdma_irq_exit(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)860  static void fsl_qdma_irq_exit(struct platform_device *pdev,
861  			      struct fsl_qdma_engine *fsl_qdma)
862  {
863  	int i;
864  
865  	devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
866  	for (i = 0; i < fsl_qdma->block_number; i++)
867  		devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
868  }
869  
fsl_qdma_reg_init(struct fsl_qdma_engine * fsl_qdma)870  static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
871  {
872  	u32 reg;
873  	int i, j, ret;
874  	struct fsl_qdma_queue *temp;
875  	void __iomem *status = fsl_qdma->status_base;
876  	void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
877  	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
878  
879  	/* Try to halt the qDMA engine first. */
880  	ret = fsl_qdma_halt(fsl_qdma);
881  	if (ret) {
882  		dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
883  		return ret;
884  	}
885  
886  	for (i = 0; i < fsl_qdma->block_number; i++) {
887  		/*
888  		 * Clear the command queue interrupt detect register for
889  		 * all queues.
890  		 */
891  
892  		block = fsl_qdma->block_base +
893  			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
894  		qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
895  			    block + FSL_QDMA_BCQIDR(0));
896  	}
897  
898  	for (j = 0; j < fsl_qdma->block_number; j++) {
899  		block = fsl_qdma->block_base +
900  			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
901  		for (i = 0; i < fsl_qdma->n_queues; i++) {
902  			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
903  			/*
904  			 * Initialize Command Queue registers to
905  			 * point to the first
906  			 * command descriptor in memory.
907  			 * Dequeue Pointer Address Registers
908  			 * Enqueue Pointer Address Registers
909  			 */
910  
911  			qdma_writel(fsl_qdma, temp->bus_addr,
912  				    block + FSL_QDMA_BCQDPA_SADDR(i));
913  			qdma_writel(fsl_qdma, temp->bus_addr,
914  				    block + FSL_QDMA_BCQEPA_SADDR(i));
915  
916  			/* Initialize the queue mode. */
917  			reg = FSL_QDMA_BCQMR_EN;
918  			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
919  			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
920  			qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
921  		}
922  
923  		/*
924  		 * Workaround for erratum: ERR010812.
925  		 * We must enable XOFF to avoid the enqueue rejection occurs.
926  		 * Setting SQCCMR ENTER_WM to 0x20.
927  		 */
928  
929  		qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
930  			    block + FSL_QDMA_SQCCMR);
931  
932  		/*
933  		 * Initialize status queue registers to point to the first
934  		 * command descriptor in memory.
935  		 * Dequeue Pointer Address Registers
936  		 * Enqueue Pointer Address Registers
937  		 */
938  
939  		qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
940  			    block + FSL_QDMA_SQEPAR);
941  		qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
942  			    block + FSL_QDMA_SQDPAR);
943  		/* Initialize status queue interrupt. */
944  		qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
945  			    block + FSL_QDMA_BCQIER(0));
946  		qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
947  				   FSL_QDMA_BSQICR_ICST(5) | 0x8000,
948  				   block + FSL_QDMA_BSQICR);
949  		qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
950  				   FSL_QDMA_CQIER_TEIE,
951  				   block + FSL_QDMA_CQIER);
952  
953  		/* Initialize the status queue mode. */
954  		reg = FSL_QDMA_BSQMR_EN;
955  		reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
956  			(fsl_qdma->status[j]->n_cq) - 6);
957  
958  		qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
959  		reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
960  	}
961  
962  	/* Initialize controller interrupt register. */
963  	qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
964  	qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
965  
966  	reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
967  	reg &= ~FSL_QDMA_DMR_DQD;
968  	qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
969  
970  	return 0;
971  }
972  
973  static struct dma_async_tx_descriptor *
fsl_qdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)974  fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
975  		     dma_addr_t src, size_t len, unsigned long flags)
976  {
977  	struct fsl_qdma_comp *fsl_comp;
978  	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
979  
980  	fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
981  
982  	if (!fsl_comp)
983  		return NULL;
984  
985  	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
986  
987  	return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
988  }
989  
fsl_qdma_enqueue_desc(struct fsl_qdma_chan * fsl_chan)990  static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
991  {
992  	u32 reg;
993  	struct virt_dma_desc *vdesc;
994  	struct fsl_qdma_comp *fsl_comp;
995  	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
996  	void __iomem *block = fsl_queue->block_base;
997  
998  	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
999  	if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
1000  		return;
1001  	vdesc = vchan_next_desc(&fsl_chan->vchan);
1002  	if (!vdesc)
1003  		return;
1004  	list_del(&vdesc->node);
1005  	fsl_comp = to_fsl_qdma_comp(vdesc);
1006  
1007  	memcpy(fsl_queue->virt_head++,
1008  	       fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
1009  	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
1010  		fsl_queue->virt_head = fsl_queue->cq;
1011  
1012  	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
1013  	barrier();
1014  	reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
1015  	reg |= FSL_QDMA_BCQMR_EI;
1016  	qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
1017  	fsl_chan->status = DMA_IN_PROGRESS;
1018  }
1019  
fsl_qdma_free_desc(struct virt_dma_desc * vdesc)1020  static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
1021  {
1022  	unsigned long flags;
1023  	struct fsl_qdma_comp *fsl_comp;
1024  	struct fsl_qdma_queue *fsl_queue;
1025  
1026  	fsl_comp = to_fsl_qdma_comp(vdesc);
1027  	fsl_queue = fsl_comp->qchan->queue;
1028  
1029  	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1030  	list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
1031  	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1032  }
1033  
fsl_qdma_issue_pending(struct dma_chan * chan)1034  static void fsl_qdma_issue_pending(struct dma_chan *chan)
1035  {
1036  	unsigned long flags;
1037  	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1038  	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1039  
1040  	spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1041  	spin_lock(&fsl_chan->vchan.lock);
1042  	if (vchan_issue_pending(&fsl_chan->vchan))
1043  		fsl_qdma_enqueue_desc(fsl_chan);
1044  	spin_unlock(&fsl_chan->vchan.lock);
1045  	spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1046  }
1047  
fsl_qdma_synchronize(struct dma_chan * chan)1048  static void fsl_qdma_synchronize(struct dma_chan *chan)
1049  {
1050  	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1051  
1052  	vchan_synchronize(&fsl_chan->vchan);
1053  }
1054  
fsl_qdma_terminate_all(struct dma_chan * chan)1055  static int fsl_qdma_terminate_all(struct dma_chan *chan)
1056  {
1057  	LIST_HEAD(head);
1058  	unsigned long flags;
1059  	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1060  
1061  	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1062  	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
1063  	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1064  	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
1065  	return 0;
1066  }
1067  
fsl_qdma_alloc_chan_resources(struct dma_chan * chan)1068  static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
1069  {
1070  	int ret;
1071  	struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1072  	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1073  	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1074  
1075  	if (fsl_queue->comp_pool && fsl_queue->desc_pool)
1076  		return fsl_qdma->desc_allocated;
1077  
1078  	INIT_LIST_HEAD(&fsl_queue->comp_free);
1079  
1080  	/*
1081  	 * The dma pool for queue command buffer
1082  	 */
1083  	fsl_queue->comp_pool =
1084  	dma_pool_create("comp_pool",
1085  			chan->device->dev,
1086  			FSL_QDMA_COMMAND_BUFFER_SIZE,
1087  			64, 0);
1088  	if (!fsl_queue->comp_pool)
1089  		return -ENOMEM;
1090  
1091  	/*
1092  	 * The dma pool for Descriptor(SD/DD) buffer
1093  	 */
1094  	fsl_queue->desc_pool =
1095  	dma_pool_create("desc_pool",
1096  			chan->device->dev,
1097  			FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
1098  			32, 0);
1099  	if (!fsl_queue->desc_pool)
1100  		goto err_desc_pool;
1101  
1102  	ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
1103  	if (ret) {
1104  		dev_err(chan->device->dev,
1105  			"failed to alloc dma buffer for S/G descriptor\n");
1106  		goto err_mem;
1107  	}
1108  
1109  	fsl_qdma->desc_allocated++;
1110  	return fsl_qdma->desc_allocated;
1111  
1112  err_mem:
1113  	dma_pool_destroy(fsl_queue->desc_pool);
1114  err_desc_pool:
1115  	dma_pool_destroy(fsl_queue->comp_pool);
1116  	return -ENOMEM;
1117  }
1118  
fsl_qdma_probe(struct platform_device * pdev)1119  static int fsl_qdma_probe(struct platform_device *pdev)
1120  {
1121  	int ret, i;
1122  	int blk_num, blk_off;
1123  	u32 len, chans, queues;
1124  	struct fsl_qdma_chan *fsl_chan;
1125  	struct fsl_qdma_engine *fsl_qdma;
1126  	struct device_node *np = pdev->dev.of_node;
1127  
1128  	ret = of_property_read_u32(np, "dma-channels", &chans);
1129  	if (ret) {
1130  		dev_err(&pdev->dev, "Can't get dma-channels.\n");
1131  		return ret;
1132  	}
1133  
1134  	ret = of_property_read_u32(np, "block-offset", &blk_off);
1135  	if (ret) {
1136  		dev_err(&pdev->dev, "Can't get block-offset.\n");
1137  		return ret;
1138  	}
1139  
1140  	ret = of_property_read_u32(np, "block-number", &blk_num);
1141  	if (ret) {
1142  		dev_err(&pdev->dev, "Can't get block-number.\n");
1143  		return ret;
1144  	}
1145  
1146  	blk_num = min_t(int, blk_num, num_online_cpus());
1147  
1148  	len = sizeof(*fsl_qdma);
1149  	fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1150  	if (!fsl_qdma)
1151  		return -ENOMEM;
1152  
1153  	len = sizeof(*fsl_chan) * chans;
1154  	fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1155  	if (!fsl_qdma->chans)
1156  		return -ENOMEM;
1157  
1158  	len = sizeof(struct fsl_qdma_queue *) * blk_num;
1159  	fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1160  	if (!fsl_qdma->status)
1161  		return -ENOMEM;
1162  
1163  	len = sizeof(int) * blk_num;
1164  	fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1165  	if (!fsl_qdma->queue_irq)
1166  		return -ENOMEM;
1167  
1168  	ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
1169  	if (ret) {
1170  		dev_err(&pdev->dev, "Can't get queues.\n");
1171  		return ret;
1172  	}
1173  
1174  	fsl_qdma->desc_allocated = 0;
1175  	fsl_qdma->n_chans = chans;
1176  	fsl_qdma->n_queues = queues;
1177  	fsl_qdma->block_number = blk_num;
1178  	fsl_qdma->block_offset = blk_off;
1179  
1180  	mutex_init(&fsl_qdma->fsl_qdma_mutex);
1181  
1182  	for (i = 0; i < fsl_qdma->block_number; i++) {
1183  		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1184  		if (!fsl_qdma->status[i])
1185  			return -ENOMEM;
1186  	}
1187  	fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0);
1188  	if (IS_ERR(fsl_qdma->ctrl_base))
1189  		return PTR_ERR(fsl_qdma->ctrl_base);
1190  
1191  	fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1);
1192  	if (IS_ERR(fsl_qdma->status_base))
1193  		return PTR_ERR(fsl_qdma->status_base);
1194  
1195  	fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2);
1196  	if (IS_ERR(fsl_qdma->block_base))
1197  		return PTR_ERR(fsl_qdma->block_base);
1198  	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1199  	if (!fsl_qdma->queue)
1200  		return -ENOMEM;
1201  
1202  	fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
1203  	if (fsl_qdma->irq_base < 0)
1204  		return fsl_qdma->irq_base;
1205  
1206  	fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1207  	INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1208  
1209  	for (i = 0; i < fsl_qdma->n_chans; i++) {
1210  		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1211  
1212  		fsl_chan->qdma = fsl_qdma;
1213  		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1214  							fsl_qdma->block_number);
1215  		fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
1216  		vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1217  	}
1218  
1219  	dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1220  
1221  	fsl_qdma->dma_dev.dev = &pdev->dev;
1222  	fsl_qdma->dma_dev.device_free_chan_resources =
1223  		fsl_qdma_free_chan_resources;
1224  	fsl_qdma->dma_dev.device_alloc_chan_resources =
1225  		fsl_qdma_alloc_chan_resources;
1226  	fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1227  	fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1228  	fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1229  	fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1230  	fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1231  
1232  	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1233  	if (ret) {
1234  		dev_err(&pdev->dev, "dma_set_mask failure.\n");
1235  		return ret;
1236  	}
1237  
1238  	platform_set_drvdata(pdev, fsl_qdma);
1239  
1240  	ret = fsl_qdma_reg_init(fsl_qdma);
1241  	if (ret) {
1242  		dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
1243  		return ret;
1244  	}
1245  
1246  	ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1247  	if (ret)
1248  		return ret;
1249  
1250  	ret = dma_async_device_register(&fsl_qdma->dma_dev);
1251  	if (ret) {
1252  		dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
1253  		return ret;
1254  	}
1255  
1256  	return 0;
1257  }
1258  
fsl_qdma_cleanup_vchan(struct dma_device * dmadev)1259  static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
1260  {
1261  	struct fsl_qdma_chan *chan, *_chan;
1262  
1263  	list_for_each_entry_safe(chan, _chan,
1264  				 &dmadev->channels, vchan.chan.device_node) {
1265  		list_del(&chan->vchan.chan.device_node);
1266  		tasklet_kill(&chan->vchan.task);
1267  	}
1268  }
1269  
fsl_qdma_remove(struct platform_device * pdev)1270  static int fsl_qdma_remove(struct platform_device *pdev)
1271  {
1272  	struct device_node *np = pdev->dev.of_node;
1273  	struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1274  
1275  	fsl_qdma_irq_exit(pdev, fsl_qdma);
1276  	fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1277  	of_dma_controller_free(np);
1278  	dma_async_device_unregister(&fsl_qdma->dma_dev);
1279  
1280  	return 0;
1281  }
1282  
1283  static const struct of_device_id fsl_qdma_dt_ids[] = {
1284  	{ .compatible = "fsl,ls1021a-qdma", },
1285  	{ /* sentinel */ }
1286  };
1287  MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
1288  
1289  static struct platform_driver fsl_qdma_driver = {
1290  	.driver		= {
1291  		.name	= "fsl-qdma",
1292  		.of_match_table = fsl_qdma_dt_ids,
1293  	},
1294  	.probe          = fsl_qdma_probe,
1295  	.remove		= fsl_qdma_remove,
1296  };
1297  
1298  module_platform_driver(fsl_qdma_driver);
1299  
1300  MODULE_ALIAS("platform:fsl-qdma");
1301  MODULE_LICENSE("GPL v2");
1302  MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
1303