xref: /openbmc/linux/drivers/dma/ti/edma.c (revision 96d3c5a7d20ec546e44695983fe0508c6f904248)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * TI EDMA DMA engine driver
4   *
5   * Copyright 2012 Texas Instruments
6   */
7  
8  #include <linux/dmaengine.h>
9  #include <linux/dma-mapping.h>
10  #include <linux/bitmap.h>
11  #include <linux/err.h>
12  #include <linux/init.h>
13  #include <linux/interrupt.h>
14  #include <linux/list.h>
15  #include <linux/module.h>
16  #include <linux/platform_device.h>
17  #include <linux/slab.h>
18  #include <linux/spinlock.h>
19  #include <linux/of.h>
20  #include <linux/of_dma.h>
21  #include <linux/of_irq.h>
22  #include <linux/of_address.h>
23  #include <linux/pm_runtime.h>
24  
25  #include <linux/platform_data/edma.h>
26  
27  #include "../dmaengine.h"
28  #include "../virt-dma.h"
29  
30  /* Offsets matching "struct edmacc_param" */
31  #define PARM_OPT		0x00
32  #define PARM_SRC		0x04
33  #define PARM_A_B_CNT		0x08
34  #define PARM_DST		0x0c
35  #define PARM_SRC_DST_BIDX	0x10
36  #define PARM_LINK_BCNTRLD	0x14
37  #define PARM_SRC_DST_CIDX	0x18
38  #define PARM_CCNT		0x1c
39  
40  #define PARM_SIZE		0x20
41  
42  /* Offsets for EDMA CC global channel registers and their shadows */
43  #define SH_ER			0x00	/* 64 bits */
44  #define SH_ECR			0x08	/* 64 bits */
45  #define SH_ESR			0x10	/* 64 bits */
46  #define SH_CER			0x18	/* 64 bits */
47  #define SH_EER			0x20	/* 64 bits */
48  #define SH_EECR			0x28	/* 64 bits */
49  #define SH_EESR			0x30	/* 64 bits */
50  #define SH_SER			0x38	/* 64 bits */
51  #define SH_SECR			0x40	/* 64 bits */
52  #define SH_IER			0x50	/* 64 bits */
53  #define SH_IECR			0x58	/* 64 bits */
54  #define SH_IESR			0x60	/* 64 bits */
55  #define SH_IPR			0x68	/* 64 bits */
56  #define SH_ICR			0x70	/* 64 bits */
57  #define SH_IEVAL		0x78
58  #define SH_QER			0x80
59  #define SH_QEER			0x84
60  #define SH_QEECR		0x88
61  #define SH_QEESR		0x8c
62  #define SH_QSER			0x90
63  #define SH_QSECR		0x94
64  #define SH_SIZE			0x200
65  
66  /* Offsets for EDMA CC global registers */
67  #define EDMA_REV		0x0000
68  #define EDMA_CCCFG		0x0004
69  #define EDMA_QCHMAP		0x0200	/* 8 registers */
70  #define EDMA_DMAQNUM		0x0240	/* 8 registers (4 on OMAP-L1xx) */
71  #define EDMA_QDMAQNUM		0x0260
72  #define EDMA_QUETCMAP		0x0280
73  #define EDMA_QUEPRI		0x0284
74  #define EDMA_EMR		0x0300	/* 64 bits */
75  #define EDMA_EMCR		0x0308	/* 64 bits */
76  #define EDMA_QEMR		0x0310
77  #define EDMA_QEMCR		0x0314
78  #define EDMA_CCERR		0x0318
79  #define EDMA_CCERRCLR		0x031c
80  #define EDMA_EEVAL		0x0320
81  #define EDMA_DRAE		0x0340	/* 4 x 64 bits*/
82  #define EDMA_QRAE		0x0380	/* 4 registers */
83  #define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
84  #define EDMA_QSTAT		0x0600	/* 2 registers */
85  #define EDMA_QWMTHRA		0x0620
86  #define EDMA_QWMTHRB		0x0624
87  #define EDMA_CCSTAT		0x0640
88  
89  #define EDMA_M			0x1000	/* global channel registers */
90  #define EDMA_ECR		0x1008
91  #define EDMA_ECRH		0x100C
92  #define EDMA_SHADOW0		0x2000	/* 4 shadow regions */
93  #define EDMA_PARM		0x4000	/* PaRAM entries */
94  
95  #define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
96  
97  #define EDMA_DCHMAP		0x0100  /* 64 registers */
98  
99  /* CCCFG register */
100  #define GET_NUM_DMACH(x)	(x & 0x7) /* bits 0-2 */
101  #define GET_NUM_QDMACH(x)	((x & 0x70) >> 4) /* bits 4-6 */
102  #define GET_NUM_PAENTRY(x)	((x & 0x7000) >> 12) /* bits 12-14 */
103  #define GET_NUM_EVQUE(x)	((x & 0x70000) >> 16) /* bits 16-18 */
104  #define GET_NUM_REGN(x)		((x & 0x300000) >> 20) /* bits 20-21 */
105  #define CHMAP_EXIST		BIT(24)
106  
107  /* CCSTAT register */
108  #define EDMA_CCSTAT_ACTV	BIT(4)
109  
110  /*
111   * Max of 20 segments per channel to conserve PaRAM slots
112   * Also note that MAX_NR_SG should be at least the no.of periods
113   * that are required for ASoC, otherwise DMA prep calls will
114   * fail. Today davinci-pcm is the only user of this driver and
115   * requires at least 17 slots, so we setup the default to 20.
116   */
117  #define MAX_NR_SG		20
118  #define EDMA_MAX_SLOTS		MAX_NR_SG
119  #define EDMA_DESCRIPTORS	16
120  
121  #define EDMA_CHANNEL_ANY		-1	/* for edma_alloc_channel() */
122  #define EDMA_SLOT_ANY			-1	/* for edma_alloc_slot() */
123  #define EDMA_CONT_PARAMS_ANY		 1001
124  #define EDMA_CONT_PARAMS_FIXED_EXACT	 1002
125  #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
126  
127  /*
128   * 64bit array registers are split into two 32bit registers:
129   * reg0: channel/event 0-31
130   * reg1: channel/event 32-63
131   *
132   * bit 5 in the channel number tells the array index (0/1)
133   * bit 0-4 (0x1f) is the bit offset within the register
134   */
135  #define EDMA_REG_ARRAY_INDEX(channel)	((channel) >> 5)
136  #define EDMA_CHANNEL_BIT(channel)	(BIT((channel) & 0x1f))
137  
138  /* PaRAM slots are laid out like this */
139  struct edmacc_param {
140  	u32 opt;
141  	u32 src;
142  	u32 a_b_cnt;
143  	u32 dst;
144  	u32 src_dst_bidx;
145  	u32 link_bcntrld;
146  	u32 src_dst_cidx;
147  	u32 ccnt;
148  } __packed;
149  
150  /* fields in edmacc_param.opt */
151  #define SAM		BIT(0)
152  #define DAM		BIT(1)
153  #define SYNCDIM		BIT(2)
154  #define STATIC		BIT(3)
155  #define EDMA_FWID	(0x07 << 8)
156  #define TCCMODE		BIT(11)
157  #define EDMA_TCC(t)	((t) << 12)
158  #define TCINTEN		BIT(20)
159  #define ITCINTEN	BIT(21)
160  #define TCCHEN		BIT(22)
161  #define ITCCHEN		BIT(23)
162  
163  struct edma_pset {
164  	u32				len;
165  	dma_addr_t			addr;
166  	struct edmacc_param		param;
167  };
168  
169  struct edma_desc {
170  	struct virt_dma_desc		vdesc;
171  	struct list_head		node;
172  	enum dma_transfer_direction	direction;
173  	int				cyclic;
174  	bool				polled;
175  	int				absync;
176  	int				pset_nr;
177  	struct edma_chan		*echan;
178  	int				processed;
179  
180  	/*
181  	 * The following 4 elements are used for residue accounting.
182  	 *
183  	 * - processed_stat: the number of SG elements we have traversed
184  	 * so far to cover accounting. This is updated directly to processed
185  	 * during edma_callback and is always <= processed, because processed
186  	 * refers to the number of pending transfer (programmed to EDMA
187  	 * controller), where as processed_stat tracks number of transfers
188  	 * accounted for so far.
189  	 *
190  	 * - residue: The amount of bytes we have left to transfer for this desc
191  	 *
192  	 * - residue_stat: The residue in bytes of data we have covered
193  	 * so far for accounting. This is updated directly to residue
194  	 * during callbacks to keep it current.
195  	 *
196  	 * - sg_len: Tracks the length of the current intermediate transfer,
197  	 * this is required to update the residue during intermediate transfer
198  	 * completion callback.
199  	 */
200  	int				processed_stat;
201  	u32				sg_len;
202  	u32				residue;
203  	u32				residue_stat;
204  
205  	struct edma_pset		pset[];
206  };
207  
208  struct edma_cc;
209  
210  struct edma_tc {
211  	struct device_node		*node;
212  	u16				id;
213  };
214  
215  struct edma_chan {
216  	struct virt_dma_chan		vchan;
217  	struct list_head		node;
218  	struct edma_desc		*edesc;
219  	struct edma_cc			*ecc;
220  	struct edma_tc			*tc;
221  	int				ch_num;
222  	bool				alloced;
223  	bool				hw_triggered;
224  	int				slot[EDMA_MAX_SLOTS];
225  	int				missed;
226  	struct dma_slave_config		cfg;
227  };
228  
229  struct edma_cc {
230  	struct device			*dev;
231  	struct edma_soc_info		*info;
232  	void __iomem			*base;
233  	int				id;
234  	bool				legacy_mode;
235  
236  	/* eDMA3 resource information */
237  	unsigned			num_channels;
238  	unsigned			num_qchannels;
239  	unsigned			num_region;
240  	unsigned			num_slots;
241  	unsigned			num_tc;
242  	bool				chmap_exist;
243  	enum dma_event_q		default_queue;
244  
245  	unsigned int			ccint;
246  	unsigned int			ccerrint;
247  
248  	/*
249  	 * The slot_inuse bit for each PaRAM slot is clear unless the slot is
250  	 * in use by Linux or if it is allocated to be used by DSP.
251  	 */
252  	unsigned long *slot_inuse;
253  
254  	/*
255  	 * For tracking reserved channels used by DSP.
256  	 * If the bit is cleared, the channel is allocated to be used by DSP
257  	 * and Linux must not touch it.
258  	 */
259  	unsigned long *channels_mask;
260  
261  	struct dma_device		dma_slave;
262  	struct dma_device		*dma_memcpy;
263  	struct edma_chan		*slave_chans;
264  	struct edma_tc			*tc_list;
265  	int				dummy_slot;
266  };
267  
268  /* dummy param set used to (re)initialize parameter RAM slots */
269  static const struct edmacc_param dummy_paramset = {
270  	.link_bcntrld = 0xffff,
271  	.ccnt = 1,
272  };
273  
274  #define EDMA_BINDING_LEGACY	0
275  #define EDMA_BINDING_TPCC	1
276  static const u32 edma_binding_type[] = {
277  	[EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
278  	[EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
279  };
280  
281  static const struct of_device_id edma_of_ids[] = {
282  	{
283  		.compatible = "ti,edma3",
284  		.data = &edma_binding_type[EDMA_BINDING_LEGACY],
285  	},
286  	{
287  		.compatible = "ti,edma3-tpcc",
288  		.data = &edma_binding_type[EDMA_BINDING_TPCC],
289  	},
290  	{}
291  };
292  MODULE_DEVICE_TABLE(of, edma_of_ids);
293  
294  static const struct of_device_id edma_tptc_of_ids[] = {
295  	{ .compatible = "ti,edma3-tptc", },
296  	{}
297  };
298  MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
299  
300  static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
301  {
302  	return (unsigned int)__raw_readl(ecc->base + offset);
303  }
304  
305  static inline void edma_write(struct edma_cc *ecc, int offset, int val)
306  {
307  	__raw_writel(val, ecc->base + offset);
308  }
309  
310  static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
311  			       unsigned or)
312  {
313  	unsigned val = edma_read(ecc, offset);
314  
315  	val &= and;
316  	val |= or;
317  	edma_write(ecc, offset, val);
318  }
319  
320  static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
321  {
322  	unsigned val = edma_read(ecc, offset);
323  
324  	val |= or;
325  	edma_write(ecc, offset, val);
326  }
327  
328  static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
329  					   int i)
330  {
331  	return edma_read(ecc, offset + (i << 2));
332  }
333  
334  static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
335  				    unsigned val)
336  {
337  	edma_write(ecc, offset + (i << 2), val);
338  }
339  
340  static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
341  				     unsigned and, unsigned or)
342  {
343  	edma_modify(ecc, offset + (i << 2), and, or);
344  }
345  
346  static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
347  				  unsigned or)
348  {
349  	edma_or(ecc, offset + ((i * 2 + j) << 2), or);
350  }
351  
352  static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
353  				     int j, unsigned val)
354  {
355  	edma_write(ecc, offset + ((i * 2 + j) << 2), val);
356  }
357  
358  static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
359  						   int offset, int i)
360  {
361  	return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
362  }
363  
364  static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
365  				      unsigned val)
366  {
367  	edma_write(ecc, EDMA_SHADOW0 + offset, val);
368  }
369  
370  static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
371  					    int i, unsigned val)
372  {
373  	edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
374  }
375  
376  static inline void edma_param_modify(struct edma_cc *ecc, int offset,
377  				     int param_no, unsigned and, unsigned or)
378  {
379  	edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
380  }
381  
382  static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
383  					  int priority)
384  {
385  	int bit = queue_no * 4;
386  
387  	edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
388  }
389  
390  static void edma_set_chmap(struct edma_chan *echan, int slot)
391  {
392  	struct edma_cc *ecc = echan->ecc;
393  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
394  
395  	if (ecc->chmap_exist) {
396  		slot = EDMA_CHAN_SLOT(slot);
397  		edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
398  	}
399  }
400  
401  static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
402  {
403  	struct edma_cc *ecc = echan->ecc;
404  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
405  	int idx = EDMA_REG_ARRAY_INDEX(channel);
406  	int ch_bit = EDMA_CHANNEL_BIT(channel);
407  
408  	if (enable) {
409  		edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
410  		edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
411  	} else {
412  		edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
413  	}
414  }
415  
416  /*
417   * paRAM slot management functions
418   */
419  static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
420  			    const struct edmacc_param *param)
421  {
422  	slot = EDMA_CHAN_SLOT(slot);
423  	if (slot >= ecc->num_slots)
424  		return;
425  	memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
426  }
427  
428  static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
429  			   struct edmacc_param *param)
430  {
431  	slot = EDMA_CHAN_SLOT(slot);
432  	if (slot >= ecc->num_slots)
433  		return -EINVAL;
434  	memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
435  
436  	return 0;
437  }
438  
439  /**
440   * edma_alloc_slot - allocate DMA parameter RAM
441   * @ecc: pointer to edma_cc struct
442   * @slot: specific slot to allocate; negative for "any unused slot"
443   *
444   * This allocates a parameter RAM slot, initializing it to hold a
445   * dummy transfer.  Slots allocated using this routine have not been
446   * mapped to a hardware DMA channel, and will normally be used by
447   * linking to them from a slot associated with a DMA channel.
448   *
449   * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
450   * slots may be allocated on behalf of DSP firmware.
451   *
452   * Returns the number of the slot, else negative errno.
453   */
454  static int edma_alloc_slot(struct edma_cc *ecc, int slot)
455  {
456  	if (slot >= 0) {
457  		slot = EDMA_CHAN_SLOT(slot);
458  		/* Requesting entry paRAM slot for a HW triggered channel. */
459  		if (ecc->chmap_exist && slot < ecc->num_channels)
460  			slot = EDMA_SLOT_ANY;
461  	}
462  
463  	if (slot < 0) {
464  		if (ecc->chmap_exist)
465  			slot = 0;
466  		else
467  			slot = ecc->num_channels;
468  		for (;;) {
469  			slot = find_next_zero_bit(ecc->slot_inuse,
470  						  ecc->num_slots,
471  						  slot);
472  			if (slot == ecc->num_slots)
473  				return -ENOMEM;
474  			if (!test_and_set_bit(slot, ecc->slot_inuse))
475  				break;
476  		}
477  	} else if (slot >= ecc->num_slots) {
478  		return -EINVAL;
479  	} else if (test_and_set_bit(slot, ecc->slot_inuse)) {
480  		return -EBUSY;
481  	}
482  
483  	edma_write_slot(ecc, slot, &dummy_paramset);
484  
485  	return EDMA_CTLR_CHAN(ecc->id, slot);
486  }
487  
488  static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
489  {
490  	slot = EDMA_CHAN_SLOT(slot);
491  	if (slot >= ecc->num_slots)
492  		return;
493  
494  	edma_write_slot(ecc, slot, &dummy_paramset);
495  	clear_bit(slot, ecc->slot_inuse);
496  }
497  
498  /**
499   * edma_link - link one parameter RAM slot to another
500   * @ecc: pointer to edma_cc struct
501   * @from: parameter RAM slot originating the link
502   * @to: parameter RAM slot which is the link target
503   *
504   * The originating slot should not be part of any active DMA transfer.
505   */
506  static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
507  {
508  	if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
509  		dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
510  
511  	from = EDMA_CHAN_SLOT(from);
512  	to = EDMA_CHAN_SLOT(to);
513  	if (from >= ecc->num_slots || to >= ecc->num_slots)
514  		return;
515  
516  	edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
517  			  PARM_OFFSET(to));
518  }
519  
520  /**
521   * edma_get_position - returns the current transfer point
522   * @ecc: pointer to edma_cc struct
523   * @slot: parameter RAM slot being examined
524   * @dst:  true selects the dest position, false the source
525   *
526   * Returns the position of the current active slot
527   */
528  static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
529  				    bool dst)
530  {
531  	u32 offs;
532  
533  	slot = EDMA_CHAN_SLOT(slot);
534  	offs = PARM_OFFSET(slot);
535  	offs += dst ? PARM_DST : PARM_SRC;
536  
537  	return edma_read(ecc, offs);
538  }
539  
540  /*
541   * Channels with event associations will be triggered by their hardware
542   * events, and channels without such associations will be triggered by
543   * software.  (At this writing there is no interface for using software
544   * triggers except with channels that don't support hardware triggers.)
545   */
546  static void edma_start(struct edma_chan *echan)
547  {
548  	struct edma_cc *ecc = echan->ecc;
549  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
550  	int idx = EDMA_REG_ARRAY_INDEX(channel);
551  	int ch_bit = EDMA_CHANNEL_BIT(channel);
552  
553  	if (!echan->hw_triggered) {
554  		/* EDMA channels without event association */
555  		dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
556  			edma_shadow0_read_array(ecc, SH_ESR, idx));
557  		edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
558  	} else {
559  		/* EDMA channel with event association */
560  		dev_dbg(ecc->dev, "ER%d %08x\n", idx,
561  			edma_shadow0_read_array(ecc, SH_ER, idx));
562  		/* Clear any pending event or error */
563  		edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
564  		edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
565  		/* Clear any SER */
566  		edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
567  		edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
568  		dev_dbg(ecc->dev, "EER%d %08x\n", idx,
569  			edma_shadow0_read_array(ecc, SH_EER, idx));
570  	}
571  }
572  
573  static void edma_stop(struct edma_chan *echan)
574  {
575  	struct edma_cc *ecc = echan->ecc;
576  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
577  	int idx = EDMA_REG_ARRAY_INDEX(channel);
578  	int ch_bit = EDMA_CHANNEL_BIT(channel);
579  
580  	edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
581  	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
582  	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
583  	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
584  
585  	/* clear possibly pending completion interrupt */
586  	edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
587  
588  	dev_dbg(ecc->dev, "EER%d %08x\n", idx,
589  		edma_shadow0_read_array(ecc, SH_EER, idx));
590  
591  	/* REVISIT:  consider guarding against inappropriate event
592  	 * chaining by overwriting with dummy_paramset.
593  	 */
594  }
595  
596  /*
597   * Temporarily disable EDMA hardware events on the specified channel,
598   * preventing them from triggering new transfers
599   */
600  static void edma_pause(struct edma_chan *echan)
601  {
602  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
603  
604  	edma_shadow0_write_array(echan->ecc, SH_EECR,
605  				 EDMA_REG_ARRAY_INDEX(channel),
606  				 EDMA_CHANNEL_BIT(channel));
607  }
608  
609  /* Re-enable EDMA hardware events on the specified channel.  */
610  static void edma_resume(struct edma_chan *echan)
611  {
612  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
613  
614  	edma_shadow0_write_array(echan->ecc, SH_EESR,
615  				 EDMA_REG_ARRAY_INDEX(channel),
616  				 EDMA_CHANNEL_BIT(channel));
617  }
618  
619  static void edma_trigger_channel(struct edma_chan *echan)
620  {
621  	struct edma_cc *ecc = echan->ecc;
622  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
623  	int idx = EDMA_REG_ARRAY_INDEX(channel);
624  	int ch_bit = EDMA_CHANNEL_BIT(channel);
625  
626  	edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
627  
628  	dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
629  		edma_shadow0_read_array(ecc, SH_ESR, idx));
630  }
631  
632  static void edma_clean_channel(struct edma_chan *echan)
633  {
634  	struct edma_cc *ecc = echan->ecc;
635  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
636  	int idx = EDMA_REG_ARRAY_INDEX(channel);
637  	int ch_bit = EDMA_CHANNEL_BIT(channel);
638  
639  	dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
640  		edma_read_array(ecc, EDMA_EMR, idx));
641  	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
642  	/* Clear the corresponding EMR bits */
643  	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
644  	/* Clear any SER */
645  	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
646  	edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
647  }
648  
649  /* Move channel to a specific event queue */
650  static void edma_assign_channel_eventq(struct edma_chan *echan,
651  				       enum dma_event_q eventq_no)
652  {
653  	struct edma_cc *ecc = echan->ecc;
654  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
655  	int bit = (channel & 0x7) * 4;
656  
657  	/* default to low priority queue */
658  	if (eventq_no == EVENTQ_DEFAULT)
659  		eventq_no = ecc->default_queue;
660  	if (eventq_no >= ecc->num_tc)
661  		return;
662  
663  	eventq_no &= 7;
664  	edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
665  			  eventq_no << bit);
666  }
667  
668  static int edma_alloc_channel(struct edma_chan *echan,
669  			      enum dma_event_q eventq_no)
670  {
671  	struct edma_cc *ecc = echan->ecc;
672  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
673  
674  	if (!test_bit(echan->ch_num, ecc->channels_mask)) {
675  		dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
676  			echan->ch_num);
677  		return -EINVAL;
678  	}
679  
680  	/* ensure access through shadow region 0 */
681  	edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
682  		       EDMA_CHANNEL_BIT(channel));
683  
684  	/* ensure no events are pending */
685  	edma_stop(echan);
686  
687  	edma_setup_interrupt(echan, true);
688  
689  	edma_assign_channel_eventq(echan, eventq_no);
690  
691  	return 0;
692  }
693  
694  static void edma_free_channel(struct edma_chan *echan)
695  {
696  	/* ensure no events are pending */
697  	edma_stop(echan);
698  	/* REVISIT should probably take out of shadow region 0 */
699  	edma_setup_interrupt(echan, false);
700  }
701  
702  static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
703  {
704  	return container_of(c, struct edma_chan, vchan.chan);
705  }
706  
707  static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
708  {
709  	return container_of(tx, struct edma_desc, vdesc.tx);
710  }
711  
712  static void edma_desc_free(struct virt_dma_desc *vdesc)
713  {
714  	kfree(container_of(vdesc, struct edma_desc, vdesc));
715  }
716  
717  /* Dispatch a queued descriptor to the controller (caller holds lock) */
718  static void edma_execute(struct edma_chan *echan)
719  {
720  	struct edma_cc *ecc = echan->ecc;
721  	struct virt_dma_desc *vdesc;
722  	struct edma_desc *edesc;
723  	struct device *dev = echan->vchan.chan.device->dev;
724  	int i, j, left, nslots;
725  
726  	if (!echan->edesc) {
727  		/* Setup is needed for the first transfer */
728  		vdesc = vchan_next_desc(&echan->vchan);
729  		if (!vdesc)
730  			return;
731  		list_del(&vdesc->node);
732  		echan->edesc = to_edma_desc(&vdesc->tx);
733  	}
734  
735  	edesc = echan->edesc;
736  
737  	/* Find out how many left */
738  	left = edesc->pset_nr - edesc->processed;
739  	nslots = min(MAX_NR_SG, left);
740  	edesc->sg_len = 0;
741  
742  	/* Write descriptor PaRAM set(s) */
743  	for (i = 0; i < nslots; i++) {
744  		j = i + edesc->processed;
745  		edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
746  		edesc->sg_len += edesc->pset[j].len;
747  		dev_vdbg(dev,
748  			 "\n pset[%d]:\n"
749  			 "  chnum\t%d\n"
750  			 "  slot\t%d\n"
751  			 "  opt\t%08x\n"
752  			 "  src\t%08x\n"
753  			 "  dst\t%08x\n"
754  			 "  abcnt\t%08x\n"
755  			 "  ccnt\t%08x\n"
756  			 "  bidx\t%08x\n"
757  			 "  cidx\t%08x\n"
758  			 "  lkrld\t%08x\n",
759  			 j, echan->ch_num, echan->slot[i],
760  			 edesc->pset[j].param.opt,
761  			 edesc->pset[j].param.src,
762  			 edesc->pset[j].param.dst,
763  			 edesc->pset[j].param.a_b_cnt,
764  			 edesc->pset[j].param.ccnt,
765  			 edesc->pset[j].param.src_dst_bidx,
766  			 edesc->pset[j].param.src_dst_cidx,
767  			 edesc->pset[j].param.link_bcntrld);
768  		/* Link to the previous slot if not the last set */
769  		if (i != (nslots - 1))
770  			edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
771  	}
772  
773  	edesc->processed += nslots;
774  
775  	/*
776  	 * If this is either the last set in a set of SG-list transactions
777  	 * then setup a link to the dummy slot, this results in all future
778  	 * events being absorbed and that's OK because we're done
779  	 */
780  	if (edesc->processed == edesc->pset_nr) {
781  		if (edesc->cyclic)
782  			edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
783  		else
784  			edma_link(ecc, echan->slot[nslots - 1],
785  				  echan->ecc->dummy_slot);
786  	}
787  
788  	if (echan->missed) {
789  		/*
790  		 * This happens due to setup times between intermediate
791  		 * transfers in long SG lists which have to be broken up into
792  		 * transfers of MAX_NR_SG
793  		 */
794  		dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
795  		edma_clean_channel(echan);
796  		edma_stop(echan);
797  		edma_start(echan);
798  		edma_trigger_channel(echan);
799  		echan->missed = 0;
800  	} else if (edesc->processed <= MAX_NR_SG) {
801  		dev_dbg(dev, "first transfer starting on channel %d\n",
802  			echan->ch_num);
803  		edma_start(echan);
804  	} else {
805  		dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
806  			echan->ch_num, edesc->processed);
807  		edma_resume(echan);
808  	}
809  }
810  
811  static int edma_terminate_all(struct dma_chan *chan)
812  {
813  	struct edma_chan *echan = to_edma_chan(chan);
814  	unsigned long flags;
815  	LIST_HEAD(head);
816  
817  	spin_lock_irqsave(&echan->vchan.lock, flags);
818  
819  	/*
820  	 * Stop DMA activity: we assume the callback will not be called
821  	 * after edma_dma() returns (even if it does, it will see
822  	 * echan->edesc is NULL and exit.)
823  	 */
824  	if (echan->edesc) {
825  		edma_stop(echan);
826  		/* Move the cyclic channel back to default queue */
827  		if (!echan->tc && echan->edesc->cyclic)
828  			edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
829  
830  		vchan_terminate_vdesc(&echan->edesc->vdesc);
831  		echan->edesc = NULL;
832  	}
833  
834  	vchan_get_all_descriptors(&echan->vchan, &head);
835  	spin_unlock_irqrestore(&echan->vchan.lock, flags);
836  	vchan_dma_desc_free_list(&echan->vchan, &head);
837  
838  	return 0;
839  }
840  
841  static void edma_synchronize(struct dma_chan *chan)
842  {
843  	struct edma_chan *echan = to_edma_chan(chan);
844  
845  	vchan_synchronize(&echan->vchan);
846  }
847  
848  static int edma_slave_config(struct dma_chan *chan,
849  	struct dma_slave_config *cfg)
850  {
851  	struct edma_chan *echan = to_edma_chan(chan);
852  
853  	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
854  	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
855  		return -EINVAL;
856  
857  	if (cfg->src_maxburst > chan->device->max_burst ||
858  	    cfg->dst_maxburst > chan->device->max_burst)
859  		return -EINVAL;
860  
861  	memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
862  
863  	return 0;
864  }
865  
866  static int edma_dma_pause(struct dma_chan *chan)
867  {
868  	struct edma_chan *echan = to_edma_chan(chan);
869  
870  	if (!echan->edesc)
871  		return -EINVAL;
872  
873  	edma_pause(echan);
874  	return 0;
875  }
876  
877  static int edma_dma_resume(struct dma_chan *chan)
878  {
879  	struct edma_chan *echan = to_edma_chan(chan);
880  
881  	edma_resume(echan);
882  	return 0;
883  }
884  
885  /*
886   * A PaRAM set configuration abstraction used by other modes
887   * @chan: Channel who's PaRAM set we're configuring
888   * @pset: PaRAM set to initialize and setup.
889   * @src_addr: Source address of the DMA
890   * @dst_addr: Destination address of the DMA
891   * @burst: In units of dev_width, how much to send
892   * @dev_width: How much is the dev_width
893   * @dma_length: Total length of the DMA transfer
894   * @direction: Direction of the transfer
895   */
896  static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
897  			    dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
898  			    unsigned int acnt, unsigned int dma_length,
899  			    enum dma_transfer_direction direction)
900  {
901  	struct edma_chan *echan = to_edma_chan(chan);
902  	struct device *dev = chan->device->dev;
903  	struct edmacc_param *param = &epset->param;
904  	int bcnt, ccnt, cidx;
905  	int src_bidx, dst_bidx, src_cidx, dst_cidx;
906  	int absync;
907  
908  	/* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
909  	if (!burst)
910  		burst = 1;
911  	/*
912  	 * If the maxburst is equal to the fifo width, use
913  	 * A-synced transfers. This allows for large contiguous
914  	 * buffer transfers using only one PaRAM set.
915  	 */
916  	if (burst == 1) {
917  		/*
918  		 * For the A-sync case, bcnt and ccnt are the remainder
919  		 * and quotient respectively of the division of:
920  		 * (dma_length / acnt) by (SZ_64K -1). This is so
921  		 * that in case bcnt over flows, we have ccnt to use.
922  		 * Note: In A-sync transfer only, bcntrld is used, but it
923  		 * only applies for sg_dma_len(sg) >= SZ_64K.
924  		 * In this case, the best way adopted is- bccnt for the
925  		 * first frame will be the remainder below. Then for
926  		 * every successive frame, bcnt will be SZ_64K-1. This
927  		 * is assured as bcntrld = 0xffff in end of function.
928  		 */
929  		absync = false;
930  		ccnt = dma_length / acnt / (SZ_64K - 1);
931  		bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
932  		/*
933  		 * If bcnt is non-zero, we have a remainder and hence an
934  		 * extra frame to transfer, so increment ccnt.
935  		 */
936  		if (bcnt)
937  			ccnt++;
938  		else
939  			bcnt = SZ_64K - 1;
940  		cidx = acnt;
941  	} else {
942  		/*
943  		 * If maxburst is greater than the fifo address_width,
944  		 * use AB-synced transfers where A count is the fifo
945  		 * address_width and B count is the maxburst. In this
946  		 * case, we are limited to transfers of C count frames
947  		 * of (address_width * maxburst) where C count is limited
948  		 * to SZ_64K-1. This places an upper bound on the length
949  		 * of an SG segment that can be handled.
950  		 */
951  		absync = true;
952  		bcnt = burst;
953  		ccnt = dma_length / (acnt * bcnt);
954  		if (ccnt > (SZ_64K - 1)) {
955  			dev_err(dev, "Exceeded max SG segment size\n");
956  			return -EINVAL;
957  		}
958  		cidx = acnt * bcnt;
959  	}
960  
961  	epset->len = dma_length;
962  
963  	if (direction == DMA_MEM_TO_DEV) {
964  		src_bidx = acnt;
965  		src_cidx = cidx;
966  		dst_bidx = 0;
967  		dst_cidx = 0;
968  		epset->addr = src_addr;
969  	} else if (direction == DMA_DEV_TO_MEM)  {
970  		src_bidx = 0;
971  		src_cidx = 0;
972  		dst_bidx = acnt;
973  		dst_cidx = cidx;
974  		epset->addr = dst_addr;
975  	} else if (direction == DMA_MEM_TO_MEM)  {
976  		src_bidx = acnt;
977  		src_cidx = cidx;
978  		dst_bidx = acnt;
979  		dst_cidx = cidx;
980  		epset->addr = src_addr;
981  	} else {
982  		dev_err(dev, "%s: direction not implemented yet\n", __func__);
983  		return -EINVAL;
984  	}
985  
986  	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
987  	/* Configure A or AB synchronized transfers */
988  	if (absync)
989  		param->opt |= SYNCDIM;
990  
991  	param->src = src_addr;
992  	param->dst = dst_addr;
993  
994  	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
995  	param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
996  
997  	param->a_b_cnt = bcnt << 16 | acnt;
998  	param->ccnt = ccnt;
999  	/*
1000  	 * Only time when (bcntrld) auto reload is required is for
1001  	 * A-sync case, and in this case, a requirement of reload value
1002  	 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1003  	 * and then later will be populated by edma_execute.
1004  	 */
1005  	param->link_bcntrld = 0xffffffff;
1006  	return absync;
1007  }
1008  
1009  static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1010  	struct dma_chan *chan, struct scatterlist *sgl,
1011  	unsigned int sg_len, enum dma_transfer_direction direction,
1012  	unsigned long tx_flags, void *context)
1013  {
1014  	struct edma_chan *echan = to_edma_chan(chan);
1015  	struct device *dev = chan->device->dev;
1016  	struct edma_desc *edesc;
1017  	dma_addr_t src_addr = 0, dst_addr = 0;
1018  	enum dma_slave_buswidth dev_width;
1019  	u32 burst;
1020  	struct scatterlist *sg;
1021  	int i, nslots, ret;
1022  
1023  	if (unlikely(!echan || !sgl || !sg_len))
1024  		return NULL;
1025  
1026  	if (direction == DMA_DEV_TO_MEM) {
1027  		src_addr = echan->cfg.src_addr;
1028  		dev_width = echan->cfg.src_addr_width;
1029  		burst = echan->cfg.src_maxburst;
1030  	} else if (direction == DMA_MEM_TO_DEV) {
1031  		dst_addr = echan->cfg.dst_addr;
1032  		dev_width = echan->cfg.dst_addr_width;
1033  		burst = echan->cfg.dst_maxburst;
1034  	} else {
1035  		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1036  		return NULL;
1037  	}
1038  
1039  	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1040  		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1041  		return NULL;
1042  	}
1043  
1044  	edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
1045  	if (!edesc)
1046  		return NULL;
1047  
1048  	edesc->pset_nr = sg_len;
1049  	edesc->residue = 0;
1050  	edesc->direction = direction;
1051  	edesc->echan = echan;
1052  
1053  	/* Allocate a PaRAM slot, if needed */
1054  	nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1055  
1056  	for (i = 0; i < nslots; i++) {
1057  		if (echan->slot[i] < 0) {
1058  			echan->slot[i] =
1059  				edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1060  			if (echan->slot[i] < 0) {
1061  				kfree(edesc);
1062  				dev_err(dev, "%s: Failed to allocate slot\n",
1063  					__func__);
1064  				return NULL;
1065  			}
1066  		}
1067  	}
1068  
1069  	/* Configure PaRAM sets for each SG */
1070  	for_each_sg(sgl, sg, sg_len, i) {
1071  		/* Get address for each SG */
1072  		if (direction == DMA_DEV_TO_MEM)
1073  			dst_addr = sg_dma_address(sg);
1074  		else
1075  			src_addr = sg_dma_address(sg);
1076  
1077  		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1078  				       dst_addr, burst, dev_width,
1079  				       sg_dma_len(sg), direction);
1080  		if (ret < 0) {
1081  			kfree(edesc);
1082  			return NULL;
1083  		}
1084  
1085  		edesc->absync = ret;
1086  		edesc->residue += sg_dma_len(sg);
1087  
1088  		if (i == sg_len - 1)
1089  			/* Enable completion interrupt */
1090  			edesc->pset[i].param.opt |= TCINTEN;
1091  		else if (!((i+1) % MAX_NR_SG))
1092  			/*
1093  			 * Enable early completion interrupt for the
1094  			 * intermediateset. In this case the driver will be
1095  			 * notified when the paRAM set is submitted to TC. This
1096  			 * will allow more time to set up the next set of slots.
1097  			 */
1098  			edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
1099  	}
1100  	edesc->residue_stat = edesc->residue;
1101  
1102  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1103  }
1104  
1105  static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1106  	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1107  	size_t len, unsigned long tx_flags)
1108  {
1109  	int ret, nslots;
1110  	struct edma_desc *edesc;
1111  	struct device *dev = chan->device->dev;
1112  	struct edma_chan *echan = to_edma_chan(chan);
1113  	unsigned int width, pset_len, array_size;
1114  
1115  	if (unlikely(!echan || !len))
1116  		return NULL;
1117  
1118  	/* Align the array size (acnt block) with the transfer properties */
1119  	switch (__ffs((src | dest | len))) {
1120  	case 0:
1121  		array_size = SZ_32K - 1;
1122  		break;
1123  	case 1:
1124  		array_size = SZ_32K - 2;
1125  		break;
1126  	default:
1127  		array_size = SZ_32K - 4;
1128  		break;
1129  	}
1130  
1131  	if (len < SZ_64K) {
1132  		/*
1133  		 * Transfer size less than 64K can be handled with one paRAM
1134  		 * slot and with one burst.
1135  		 * ACNT = length
1136  		 */
1137  		width = len;
1138  		pset_len = len;
1139  		nslots = 1;
1140  	} else {
1141  		/*
1142  		 * Transfer size bigger than 64K will be handled with maximum of
1143  		 * two paRAM slots.
1144  		 * slot1: (full_length / 32767) times 32767 bytes bursts.
1145  		 *	  ACNT = 32767, length1: (full_length / 32767) * 32767
1146  		 * slot2: the remaining amount of data after slot1.
1147  		 *	  ACNT = full_length - length1, length2 = ACNT
1148  		 *
1149  		 * When the full_length is a multiple of 32767 one slot can be
1150  		 * used to complete the transfer.
1151  		 */
1152  		width = array_size;
1153  		pset_len = rounddown(len, width);
1154  		/* One slot is enough for lengths multiple of (SZ_32K -1) */
1155  		if (unlikely(pset_len == len))
1156  			nslots = 1;
1157  		else
1158  			nslots = 2;
1159  	}
1160  
1161  	edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
1162  	if (!edesc)
1163  		return NULL;
1164  
1165  	edesc->pset_nr = nslots;
1166  	edesc->residue = edesc->residue_stat = len;
1167  	edesc->direction = DMA_MEM_TO_MEM;
1168  	edesc->echan = echan;
1169  
1170  	ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1171  			       width, pset_len, DMA_MEM_TO_MEM);
1172  	if (ret < 0) {
1173  		kfree(edesc);
1174  		return NULL;
1175  	}
1176  
1177  	edesc->absync = ret;
1178  
1179  	edesc->pset[0].param.opt |= ITCCHEN;
1180  	if (nslots == 1) {
1181  		/* Enable transfer complete interrupt if requested */
1182  		if (tx_flags & DMA_PREP_INTERRUPT)
1183  			edesc->pset[0].param.opt |= TCINTEN;
1184  	} else {
1185  		/* Enable transfer complete chaining for the first slot */
1186  		edesc->pset[0].param.opt |= TCCHEN;
1187  
1188  		if (echan->slot[1] < 0) {
1189  			echan->slot[1] = edma_alloc_slot(echan->ecc,
1190  							 EDMA_SLOT_ANY);
1191  			if (echan->slot[1] < 0) {
1192  				kfree(edesc);
1193  				dev_err(dev, "%s: Failed to allocate slot\n",
1194  					__func__);
1195  				return NULL;
1196  			}
1197  		}
1198  		dest += pset_len;
1199  		src += pset_len;
1200  		pset_len = width = len % array_size;
1201  
1202  		ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1203  				       width, pset_len, DMA_MEM_TO_MEM);
1204  		if (ret < 0) {
1205  			kfree(edesc);
1206  			return NULL;
1207  		}
1208  
1209  		edesc->pset[1].param.opt |= ITCCHEN;
1210  		/* Enable transfer complete interrupt if requested */
1211  		if (tx_flags & DMA_PREP_INTERRUPT)
1212  			edesc->pset[1].param.opt |= TCINTEN;
1213  	}
1214  
1215  	if (!(tx_flags & DMA_PREP_INTERRUPT))
1216  		edesc->polled = true;
1217  
1218  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1219  }
1220  
1221  static struct dma_async_tx_descriptor *
1222  edma_prep_dma_interleaved(struct dma_chan *chan,
1223  			  struct dma_interleaved_template *xt,
1224  			  unsigned long tx_flags)
1225  {
1226  	struct device *dev = chan->device->dev;
1227  	struct edma_chan *echan = to_edma_chan(chan);
1228  	struct edmacc_param *param;
1229  	struct edma_desc *edesc;
1230  	size_t src_icg, dst_icg;
1231  	int src_bidx, dst_bidx;
1232  
1233  	/* Slave mode is not supported */
1234  	if (is_slave_direction(xt->dir))
1235  		return NULL;
1236  
1237  	if (xt->frame_size != 1 || xt->numf == 0)
1238  		return NULL;
1239  
1240  	if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
1241  		return NULL;
1242  
1243  	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1244  	if (src_icg) {
1245  		src_bidx = src_icg + xt->sgl[0].size;
1246  	} else if (xt->src_inc) {
1247  		src_bidx = xt->sgl[0].size;
1248  	} else {
1249  		dev_err(dev, "%s: SRC constant addressing is not supported\n",
1250  			__func__);
1251  		return NULL;
1252  	}
1253  
1254  	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1255  	if (dst_icg) {
1256  		dst_bidx = dst_icg + xt->sgl[0].size;
1257  	} else if (xt->dst_inc) {
1258  		dst_bidx = xt->sgl[0].size;
1259  	} else {
1260  		dev_err(dev, "%s: DST constant addressing is not supported\n",
1261  			__func__);
1262  		return NULL;
1263  	}
1264  
1265  	if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
1266  		return NULL;
1267  
1268  	edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
1269  	if (!edesc)
1270  		return NULL;
1271  
1272  	edesc->direction = DMA_MEM_TO_MEM;
1273  	edesc->echan = echan;
1274  	edesc->pset_nr = 1;
1275  
1276  	param = &edesc->pset[0].param;
1277  
1278  	param->src = xt->src_start;
1279  	param->dst = xt->dst_start;
1280  	param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
1281  	param->ccnt = 1;
1282  	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1283  	param->src_dst_cidx = 0;
1284  
1285  	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1286  	param->opt |= ITCCHEN;
1287  	/* Enable transfer complete interrupt if requested */
1288  	if (tx_flags & DMA_PREP_INTERRUPT)
1289  		param->opt |= TCINTEN;
1290  	else
1291  		edesc->polled = true;
1292  
1293  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1294  }
1295  
1296  static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1297  	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1298  	size_t period_len, enum dma_transfer_direction direction,
1299  	unsigned long tx_flags)
1300  {
1301  	struct edma_chan *echan = to_edma_chan(chan);
1302  	struct device *dev = chan->device->dev;
1303  	struct edma_desc *edesc;
1304  	dma_addr_t src_addr, dst_addr;
1305  	enum dma_slave_buswidth dev_width;
1306  	bool use_intermediate = false;
1307  	u32 burst;
1308  	int i, ret, nslots;
1309  
1310  	if (unlikely(!echan || !buf_len || !period_len))
1311  		return NULL;
1312  
1313  	if (direction == DMA_DEV_TO_MEM) {
1314  		src_addr = echan->cfg.src_addr;
1315  		dst_addr = buf_addr;
1316  		dev_width = echan->cfg.src_addr_width;
1317  		burst = echan->cfg.src_maxburst;
1318  	} else if (direction == DMA_MEM_TO_DEV) {
1319  		src_addr = buf_addr;
1320  		dst_addr = echan->cfg.dst_addr;
1321  		dev_width = echan->cfg.dst_addr_width;
1322  		burst = echan->cfg.dst_maxburst;
1323  	} else {
1324  		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1325  		return NULL;
1326  	}
1327  
1328  	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1329  		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1330  		return NULL;
1331  	}
1332  
1333  	if (unlikely(buf_len % period_len)) {
1334  		dev_err(dev, "Period should be multiple of Buffer length\n");
1335  		return NULL;
1336  	}
1337  
1338  	nslots = (buf_len / period_len) + 1;
1339  
1340  	/*
1341  	 * Cyclic DMA users such as audio cannot tolerate delays introduced
1342  	 * by cases where the number of periods is more than the maximum
1343  	 * number of SGs the EDMA driver can handle at a time. For DMA types
1344  	 * such as Slave SGs, such delays are tolerable and synchronized,
1345  	 * but the synchronization is difficult to achieve with Cyclic and
1346  	 * cannot be guaranteed, so we error out early.
1347  	 */
1348  	if (nslots > MAX_NR_SG) {
1349  		/*
1350  		 * If the burst and period sizes are the same, we can put
1351  		 * the full buffer into a single period and activate
1352  		 * intermediate interrupts. This will produce interrupts
1353  		 * after each burst, which is also after each desired period.
1354  		 */
1355  		if (burst == period_len) {
1356  			period_len = buf_len;
1357  			nslots = 2;
1358  			use_intermediate = true;
1359  		} else {
1360  			return NULL;
1361  		}
1362  	}
1363  
1364  	edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
1365  	if (!edesc)
1366  		return NULL;
1367  
1368  	edesc->cyclic = 1;
1369  	edesc->pset_nr = nslots;
1370  	edesc->residue = edesc->residue_stat = buf_len;
1371  	edesc->direction = direction;
1372  	edesc->echan = echan;
1373  
1374  	dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1375  		__func__, echan->ch_num, nslots, period_len, buf_len);
1376  
1377  	for (i = 0; i < nslots; i++) {
1378  		/* Allocate a PaRAM slot, if needed */
1379  		if (echan->slot[i] < 0) {
1380  			echan->slot[i] =
1381  				edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1382  			if (echan->slot[i] < 0) {
1383  				kfree(edesc);
1384  				dev_err(dev, "%s: Failed to allocate slot\n",
1385  					__func__);
1386  				return NULL;
1387  			}
1388  		}
1389  
1390  		if (i == nslots - 1) {
1391  			memcpy(&edesc->pset[i], &edesc->pset[0],
1392  			       sizeof(edesc->pset[0]));
1393  			break;
1394  		}
1395  
1396  		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1397  				       dst_addr, burst, dev_width, period_len,
1398  				       direction);
1399  		if (ret < 0) {
1400  			kfree(edesc);
1401  			return NULL;
1402  		}
1403  
1404  		if (direction == DMA_DEV_TO_MEM)
1405  			dst_addr += period_len;
1406  		else
1407  			src_addr += period_len;
1408  
1409  		dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1410  		dev_vdbg(dev,
1411  			"\n pset[%d]:\n"
1412  			"  chnum\t%d\n"
1413  			"  slot\t%d\n"
1414  			"  opt\t%08x\n"
1415  			"  src\t%08x\n"
1416  			"  dst\t%08x\n"
1417  			"  abcnt\t%08x\n"
1418  			"  ccnt\t%08x\n"
1419  			"  bidx\t%08x\n"
1420  			"  cidx\t%08x\n"
1421  			"  lkrld\t%08x\n",
1422  			i, echan->ch_num, echan->slot[i],
1423  			edesc->pset[i].param.opt,
1424  			edesc->pset[i].param.src,
1425  			edesc->pset[i].param.dst,
1426  			edesc->pset[i].param.a_b_cnt,
1427  			edesc->pset[i].param.ccnt,
1428  			edesc->pset[i].param.src_dst_bidx,
1429  			edesc->pset[i].param.src_dst_cidx,
1430  			edesc->pset[i].param.link_bcntrld);
1431  
1432  		edesc->absync = ret;
1433  
1434  		/*
1435  		 * Enable period interrupt only if it is requested
1436  		 */
1437  		if (tx_flags & DMA_PREP_INTERRUPT) {
1438  			edesc->pset[i].param.opt |= TCINTEN;
1439  
1440  			/* Also enable intermediate interrupts if necessary */
1441  			if (use_intermediate)
1442  				edesc->pset[i].param.opt |= ITCINTEN;
1443  		}
1444  	}
1445  
1446  	/* Place the cyclic channel to highest priority queue */
1447  	if (!echan->tc)
1448  		edma_assign_channel_eventq(echan, EVENTQ_0);
1449  
1450  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1451  }
1452  
1453  static void edma_completion_handler(struct edma_chan *echan)
1454  {
1455  	struct device *dev = echan->vchan.chan.device->dev;
1456  	struct edma_desc *edesc;
1457  
1458  	spin_lock(&echan->vchan.lock);
1459  	edesc = echan->edesc;
1460  	if (edesc) {
1461  		if (edesc->cyclic) {
1462  			vchan_cyclic_callback(&edesc->vdesc);
1463  			spin_unlock(&echan->vchan.lock);
1464  			return;
1465  		} else if (edesc->processed == edesc->pset_nr) {
1466  			edesc->residue = 0;
1467  			edma_stop(echan);
1468  			vchan_cookie_complete(&edesc->vdesc);
1469  			echan->edesc = NULL;
1470  
1471  			dev_dbg(dev, "Transfer completed on channel %d\n",
1472  				echan->ch_num);
1473  		} else {
1474  			dev_dbg(dev, "Sub transfer completed on channel %d\n",
1475  				echan->ch_num);
1476  
1477  			edma_pause(echan);
1478  
1479  			/* Update statistics for tx_status */
1480  			edesc->residue -= edesc->sg_len;
1481  			edesc->residue_stat = edesc->residue;
1482  			edesc->processed_stat = edesc->processed;
1483  		}
1484  		edma_execute(echan);
1485  	}
1486  
1487  	spin_unlock(&echan->vchan.lock);
1488  }
1489  
1490  /* eDMA interrupt handler */
1491  static irqreturn_t dma_irq_handler(int irq, void *data)
1492  {
1493  	struct edma_cc *ecc = data;
1494  	int ctlr;
1495  	u32 sh_ier;
1496  	u32 sh_ipr;
1497  	u32 bank;
1498  
1499  	ctlr = ecc->id;
1500  	if (ctlr < 0)
1501  		return IRQ_NONE;
1502  
1503  	dev_vdbg(ecc->dev, "dma_irq_handler\n");
1504  
1505  	sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1506  	if (!sh_ipr) {
1507  		sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1508  		if (!sh_ipr)
1509  			return IRQ_NONE;
1510  		sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1511  		bank = 1;
1512  	} else {
1513  		sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1514  		bank = 0;
1515  	}
1516  
1517  	do {
1518  		u32 slot;
1519  		u32 channel;
1520  
1521  		slot = __ffs(sh_ipr);
1522  		sh_ipr &= ~(BIT(slot));
1523  
1524  		if (sh_ier & BIT(slot)) {
1525  			channel = (bank << 5) | slot;
1526  			/* Clear the corresponding IPR bits */
1527  			edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1528  			edma_completion_handler(&ecc->slave_chans[channel]);
1529  		}
1530  	} while (sh_ipr);
1531  
1532  	edma_shadow0_write(ecc, SH_IEVAL, 1);
1533  	return IRQ_HANDLED;
1534  }
1535  
1536  static void edma_error_handler(struct edma_chan *echan)
1537  {
1538  	struct edma_cc *ecc = echan->ecc;
1539  	struct device *dev = echan->vchan.chan.device->dev;
1540  	struct edmacc_param p;
1541  	int err;
1542  
1543  	if (!echan->edesc)
1544  		return;
1545  
1546  	spin_lock(&echan->vchan.lock);
1547  
1548  	err = edma_read_slot(ecc, echan->slot[0], &p);
1549  
1550  	/*
1551  	 * Issue later based on missed flag which will be sure
1552  	 * to happen as:
1553  	 * (1) we finished transmitting an intermediate slot and
1554  	 *     edma_execute is coming up.
1555  	 * (2) or we finished current transfer and issue will
1556  	 *     call edma_execute.
1557  	 *
1558  	 * Important note: issuing can be dangerous here and
1559  	 * lead to some nasty recursion when we are in a NULL
1560  	 * slot. So we avoid doing so and set the missed flag.
1561  	 */
1562  	if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
1563  		dev_dbg(dev, "Error on null slot, setting miss\n");
1564  		echan->missed = 1;
1565  	} else {
1566  		/*
1567  		 * The slot is already programmed but the event got
1568  		 * missed, so its safe to issue it here.
1569  		 */
1570  		dev_dbg(dev, "Missed event, TRIGGERING\n");
1571  		edma_clean_channel(echan);
1572  		edma_stop(echan);
1573  		edma_start(echan);
1574  		edma_trigger_channel(echan);
1575  	}
1576  	spin_unlock(&echan->vchan.lock);
1577  }
1578  
1579  static inline bool edma_error_pending(struct edma_cc *ecc)
1580  {
1581  	if (edma_read_array(ecc, EDMA_EMR, 0) ||
1582  	    edma_read_array(ecc, EDMA_EMR, 1) ||
1583  	    edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1584  		return true;
1585  
1586  	return false;
1587  }
1588  
1589  /* eDMA error interrupt handler */
1590  static irqreturn_t dma_ccerr_handler(int irq, void *data)
1591  {
1592  	struct edma_cc *ecc = data;
1593  	int i, j;
1594  	int ctlr;
1595  	unsigned int cnt = 0;
1596  	unsigned int val;
1597  
1598  	ctlr = ecc->id;
1599  	if (ctlr < 0)
1600  		return IRQ_NONE;
1601  
1602  	dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1603  
1604  	if (!edma_error_pending(ecc)) {
1605  		/*
1606  		 * The registers indicate no pending error event but the irq
1607  		 * handler has been called.
1608  		 * Ask eDMA to re-evaluate the error registers.
1609  		 */
1610  		dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
1611  			__func__);
1612  		edma_write(ecc, EDMA_EEVAL, 1);
1613  		return IRQ_NONE;
1614  	}
1615  
1616  	while (1) {
1617  		/* Event missed register(s) */
1618  		for (j = 0; j < 2; j++) {
1619  			unsigned long emr;
1620  
1621  			val = edma_read_array(ecc, EDMA_EMR, j);
1622  			if (!val)
1623  				continue;
1624  
1625  			dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1626  			emr = val;
1627  			for_each_set_bit(i, &emr, 32) {
1628  				int k = (j << 5) + i;
1629  
1630  				/* Clear the corresponding EMR bits */
1631  				edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1632  				/* Clear any SER */
1633  				edma_shadow0_write_array(ecc, SH_SECR, j,
1634  							 BIT(i));
1635  				edma_error_handler(&ecc->slave_chans[k]);
1636  			}
1637  		}
1638  
1639  		val = edma_read(ecc, EDMA_QEMR);
1640  		if (val) {
1641  			dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1642  			/* Not reported, just clear the interrupt reason. */
1643  			edma_write(ecc, EDMA_QEMCR, val);
1644  			edma_shadow0_write(ecc, SH_QSECR, val);
1645  		}
1646  
1647  		val = edma_read(ecc, EDMA_CCERR);
1648  		if (val) {
1649  			dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1650  			/* Not reported, just clear the interrupt reason. */
1651  			edma_write(ecc, EDMA_CCERRCLR, val);
1652  		}
1653  
1654  		if (!edma_error_pending(ecc))
1655  			break;
1656  		cnt++;
1657  		if (cnt > 10)
1658  			break;
1659  	}
1660  	edma_write(ecc, EDMA_EEVAL, 1);
1661  	return IRQ_HANDLED;
1662  }
1663  
1664  /* Alloc channel resources */
1665  static int edma_alloc_chan_resources(struct dma_chan *chan)
1666  {
1667  	struct edma_chan *echan = to_edma_chan(chan);
1668  	struct edma_cc *ecc = echan->ecc;
1669  	struct device *dev = ecc->dev;
1670  	enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1671  	int ret;
1672  
1673  	if (echan->tc) {
1674  		eventq_no = echan->tc->id;
1675  	} else if (ecc->tc_list) {
1676  		/* memcpy channel */
1677  		echan->tc = &ecc->tc_list[ecc->info->default_queue];
1678  		eventq_no = echan->tc->id;
1679  	}
1680  
1681  	ret = edma_alloc_channel(echan, eventq_no);
1682  	if (ret)
1683  		return ret;
1684  
1685  	echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1686  	if (echan->slot[0] < 0) {
1687  		dev_err(dev, "Entry slot allocation failed for channel %u\n",
1688  			EDMA_CHAN_SLOT(echan->ch_num));
1689  		ret = echan->slot[0];
1690  		goto err_slot;
1691  	}
1692  
1693  	/* Set up channel -> slot mapping for the entry slot */
1694  	edma_set_chmap(echan, echan->slot[0]);
1695  	echan->alloced = true;
1696  
1697  	dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1698  		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1699  		echan->hw_triggered ? "HW" : "SW");
1700  
1701  	return 0;
1702  
1703  err_slot:
1704  	edma_free_channel(echan);
1705  	return ret;
1706  }
1707  
1708  /* Free channel resources */
1709  static void edma_free_chan_resources(struct dma_chan *chan)
1710  {
1711  	struct edma_chan *echan = to_edma_chan(chan);
1712  	struct device *dev = echan->ecc->dev;
1713  	int i;
1714  
1715  	/* Terminate transfers */
1716  	edma_stop(echan);
1717  
1718  	vchan_free_chan_resources(&echan->vchan);
1719  
1720  	/* Free EDMA PaRAM slots */
1721  	for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1722  		if (echan->slot[i] >= 0) {
1723  			edma_free_slot(echan->ecc, echan->slot[i]);
1724  			echan->slot[i] = -1;
1725  		}
1726  	}
1727  
1728  	/* Set entry slot to the dummy slot */
1729  	edma_set_chmap(echan, echan->ecc->dummy_slot);
1730  
1731  	/* Free EDMA channel */
1732  	if (echan->alloced) {
1733  		edma_free_channel(echan);
1734  		echan->alloced = false;
1735  	}
1736  
1737  	echan->tc = NULL;
1738  	echan->hw_triggered = false;
1739  
1740  	dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1741  		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1742  }
1743  
1744  /* Send pending descriptor to hardware */
1745  static void edma_issue_pending(struct dma_chan *chan)
1746  {
1747  	struct edma_chan *echan = to_edma_chan(chan);
1748  	unsigned long flags;
1749  
1750  	spin_lock_irqsave(&echan->vchan.lock, flags);
1751  	if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1752  		edma_execute(echan);
1753  	spin_unlock_irqrestore(&echan->vchan.lock, flags);
1754  }
1755  
1756  /*
1757   * This limit exists to avoid a possible infinite loop when waiting for proof
1758   * that a particular transfer is completed. This limit can be hit if there
1759   * are large bursts to/from slow devices or the CPU is never able to catch
1760   * the DMA hardware idle. On an AM335x transferring 48 bytes from the UART
1761   * RX-FIFO, as many as 55 loops have been seen.
1762   */
1763  #define EDMA_MAX_TR_WAIT_LOOPS 1000
1764  
1765  static u32 edma_residue(struct edma_desc *edesc)
1766  {
1767  	bool dst = edesc->direction == DMA_DEV_TO_MEM;
1768  	int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1769  	struct edma_chan *echan = edesc->echan;
1770  	struct edma_pset *pset = edesc->pset;
1771  	dma_addr_t done, pos, pos_old;
1772  	int channel = EDMA_CHAN_SLOT(echan->ch_num);
1773  	int idx = EDMA_REG_ARRAY_INDEX(channel);
1774  	int ch_bit = EDMA_CHANNEL_BIT(channel);
1775  	int event_reg;
1776  	int i;
1777  
1778  	/*
1779  	 * We always read the dst/src position from the first RamPar
1780  	 * pset. That's the one which is active now.
1781  	 */
1782  	pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1783  
1784  	/*
1785  	 * "pos" may represent a transfer request that is still being
1786  	 * processed by the EDMACC or EDMATC. We will busy wait until
1787  	 * any one of the situations occurs:
1788  	 *   1. while and event is pending for the channel
1789  	 *   2. a position updated
1790  	 *   3. we hit the loop limit
1791  	 */
1792  	if (is_slave_direction(edesc->direction))
1793  		event_reg = SH_ER;
1794  	else
1795  		event_reg = SH_ESR;
1796  
1797  	pos_old = pos;
1798  	while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
1799  		pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1800  		if (pos != pos_old)
1801  			break;
1802  
1803  		if (!--loop_count) {
1804  			dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1805  				"%s: timeout waiting for PaRAM update\n",
1806  				__func__);
1807  			break;
1808  		}
1809  
1810  		cpu_relax();
1811  	}
1812  
1813  	/*
1814  	 * Cyclic is simple. Just subtract pset[0].addr from pos.
1815  	 *
1816  	 * We never update edesc->residue in the cyclic case, so we
1817  	 * can tell the remaining room to the end of the circular
1818  	 * buffer.
1819  	 */
1820  	if (edesc->cyclic) {
1821  		done = pos - pset->addr;
1822  		edesc->residue_stat = edesc->residue - done;
1823  		return edesc->residue_stat;
1824  	}
1825  
1826  	/*
1827  	 * If the position is 0, then EDMA loaded the closing dummy slot, the
1828  	 * transfer is completed
1829  	 */
1830  	if (!pos)
1831  		return 0;
1832  	/*
1833  	 * For SG operation we catch up with the last processed
1834  	 * status.
1835  	 */
1836  	pset += edesc->processed_stat;
1837  
1838  	for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1839  		/*
1840  		 * If we are inside this pset address range, we know
1841  		 * this is the active one. Get the current delta and
1842  		 * stop walking the psets.
1843  		 */
1844  		if (pos >= pset->addr && pos < pset->addr + pset->len)
1845  			return edesc->residue_stat - (pos - pset->addr);
1846  
1847  		/* Otherwise mark it done and update residue_stat. */
1848  		edesc->processed_stat++;
1849  		edesc->residue_stat -= pset->len;
1850  	}
1851  	return edesc->residue_stat;
1852  }
1853  
1854  /* Check request completion status */
1855  static enum dma_status edma_tx_status(struct dma_chan *chan,
1856  				      dma_cookie_t cookie,
1857  				      struct dma_tx_state *txstate)
1858  {
1859  	struct edma_chan *echan = to_edma_chan(chan);
1860  	struct dma_tx_state txstate_tmp;
1861  	enum dma_status ret;
1862  	unsigned long flags;
1863  
1864  	ret = dma_cookie_status(chan, cookie, txstate);
1865  
1866  	if (ret == DMA_COMPLETE)
1867  		return ret;
1868  
1869  	/* Provide a dummy dma_tx_state for completion checking */
1870  	if (!txstate)
1871  		txstate = &txstate_tmp;
1872  
1873  	spin_lock_irqsave(&echan->vchan.lock, flags);
1874  	if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
1875  		txstate->residue = edma_residue(echan->edesc);
1876  	} else {
1877  		struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
1878  							      cookie);
1879  
1880  		if (vdesc)
1881  			txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1882  		else
1883  			txstate->residue = 0;
1884  	}
1885  
1886  	/*
1887  	 * Mark the cookie completed if the residue is 0 for non cyclic
1888  	 * transfers
1889  	 */
1890  	if (ret != DMA_COMPLETE && !txstate->residue &&
1891  	    echan->edesc && echan->edesc->polled &&
1892  	    echan->edesc->vdesc.tx.cookie == cookie) {
1893  		edma_stop(echan);
1894  		vchan_cookie_complete(&echan->edesc->vdesc);
1895  		echan->edesc = NULL;
1896  		edma_execute(echan);
1897  		ret = DMA_COMPLETE;
1898  	}
1899  
1900  	spin_unlock_irqrestore(&echan->vchan.lock, flags);
1901  
1902  	return ret;
1903  }
1904  
1905  static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1906  {
1907  	if (!memcpy_channels)
1908  		return false;
1909  	while (*memcpy_channels != -1) {
1910  		if (*memcpy_channels == ch_num)
1911  			return true;
1912  		memcpy_channels++;
1913  	}
1914  	return false;
1915  }
1916  
1917  #define EDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1918  				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1919  				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1920  				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1921  
1922  static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1923  {
1924  	struct dma_device *s_ddev = &ecc->dma_slave;
1925  	struct dma_device *m_ddev = NULL;
1926  	s32 *memcpy_channels = ecc->info->memcpy_channels;
1927  	int i, j;
1928  
1929  	dma_cap_zero(s_ddev->cap_mask);
1930  	dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1931  	dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1932  	if (ecc->legacy_mode && !memcpy_channels) {
1933  		dev_warn(ecc->dev,
1934  			 "Legacy memcpy is enabled, things might not work\n");
1935  
1936  		dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1937  		dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
1938  		s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1939  		s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
1940  		s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1941  	}
1942  
1943  	s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1944  	s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1945  	s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1946  	s_ddev->device_free_chan_resources = edma_free_chan_resources;
1947  	s_ddev->device_issue_pending = edma_issue_pending;
1948  	s_ddev->device_tx_status = edma_tx_status;
1949  	s_ddev->device_config = edma_slave_config;
1950  	s_ddev->device_pause = edma_dma_pause;
1951  	s_ddev->device_resume = edma_dma_resume;
1952  	s_ddev->device_terminate_all = edma_terminate_all;
1953  	s_ddev->device_synchronize = edma_synchronize;
1954  
1955  	s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1956  	s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1957  	s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
1958  	s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1959  	s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
1960  
1961  	s_ddev->dev = ecc->dev;
1962  	INIT_LIST_HEAD(&s_ddev->channels);
1963  
1964  	if (memcpy_channels) {
1965  		m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
1966  		if (!m_ddev) {
1967  			dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
1968  			memcpy_channels = NULL;
1969  			goto ch_setup;
1970  		}
1971  		ecc->dma_memcpy = m_ddev;
1972  
1973  		dma_cap_zero(m_ddev->cap_mask);
1974  		dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
1975  		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
1976  
1977  		m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1978  		m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
1979  		m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1980  		m_ddev->device_free_chan_resources = edma_free_chan_resources;
1981  		m_ddev->device_issue_pending = edma_issue_pending;
1982  		m_ddev->device_tx_status = edma_tx_status;
1983  		m_ddev->device_config = edma_slave_config;
1984  		m_ddev->device_pause = edma_dma_pause;
1985  		m_ddev->device_resume = edma_dma_resume;
1986  		m_ddev->device_terminate_all = edma_terminate_all;
1987  		m_ddev->device_synchronize = edma_synchronize;
1988  
1989  		m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1990  		m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1991  		m_ddev->directions = BIT(DMA_MEM_TO_MEM);
1992  		m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1993  
1994  		m_ddev->dev = ecc->dev;
1995  		INIT_LIST_HEAD(&m_ddev->channels);
1996  	} else if (!ecc->legacy_mode) {
1997  		dev_info(ecc->dev, "memcpy is disabled\n");
1998  	}
1999  
2000  ch_setup:
2001  	for (i = 0; i < ecc->num_channels; i++) {
2002  		struct edma_chan *echan = &ecc->slave_chans[i];
2003  		echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
2004  		echan->ecc = ecc;
2005  		echan->vchan.desc_free = edma_desc_free;
2006  
2007  		if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
2008  			vchan_init(&echan->vchan, m_ddev);
2009  		else
2010  			vchan_init(&echan->vchan, s_ddev);
2011  
2012  		INIT_LIST_HEAD(&echan->node);
2013  		for (j = 0; j < EDMA_MAX_SLOTS; j++)
2014  			echan->slot[j] = -1;
2015  	}
2016  }
2017  
2018  static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
2019  			      struct edma_cc *ecc)
2020  {
2021  	int i;
2022  	u32 value, cccfg;
2023  	s8 (*queue_priority_map)[2];
2024  
2025  	/* Decode the eDMA3 configuration from CCCFG register */
2026  	cccfg = edma_read(ecc, EDMA_CCCFG);
2027  
2028  	value = GET_NUM_REGN(cccfg);
2029  	ecc->num_region = BIT(value);
2030  
2031  	value = GET_NUM_DMACH(cccfg);
2032  	ecc->num_channels = BIT(value + 1);
2033  
2034  	value = GET_NUM_QDMACH(cccfg);
2035  	ecc->num_qchannels = value * 2;
2036  
2037  	value = GET_NUM_PAENTRY(cccfg);
2038  	ecc->num_slots = BIT(value + 4);
2039  
2040  	value = GET_NUM_EVQUE(cccfg);
2041  	ecc->num_tc = value + 1;
2042  
2043  	ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
2044  
2045  	dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
2046  	dev_dbg(dev, "num_region: %u\n", ecc->num_region);
2047  	dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
2048  	dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
2049  	dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
2050  	dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
2051  	dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
2052  
2053  	/* Nothing need to be done if queue priority is provided */
2054  	if (pdata->queue_priority_mapping)
2055  		return 0;
2056  
2057  	/*
2058  	 * Configure TC/queue priority as follows:
2059  	 * Q0 - priority 0
2060  	 * Q1 - priority 1
2061  	 * Q2 - priority 2
2062  	 * ...
2063  	 * The meaning of priority numbers: 0 highest priority, 7 lowest
2064  	 * priority. So Q0 is the highest priority queue and the last queue has
2065  	 * the lowest priority.
2066  	 */
2067  	queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
2068  					  GFP_KERNEL);
2069  	if (!queue_priority_map)
2070  		return -ENOMEM;
2071  
2072  	for (i = 0; i < ecc->num_tc; i++) {
2073  		queue_priority_map[i][0] = i;
2074  		queue_priority_map[i][1] = i;
2075  	}
2076  	queue_priority_map[i][0] = -1;
2077  	queue_priority_map[i][1] = -1;
2078  
2079  	pdata->queue_priority_mapping = queue_priority_map;
2080  	/* Default queue has the lowest priority */
2081  	pdata->default_queue = i - 1;
2082  
2083  	return 0;
2084  }
2085  
2086  #if IS_ENABLED(CONFIG_OF)
2087  static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
2088  			       size_t sz)
2089  {
2090  	const char pname[] = "ti,edma-xbar-event-map";
2091  	struct resource res;
2092  	void __iomem *xbar;
2093  	s16 (*xbar_chans)[2];
2094  	size_t nelm = sz / sizeof(s16);
2095  	u32 shift, offset, mux;
2096  	int ret, i;
2097  
2098  	xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
2099  	if (!xbar_chans)
2100  		return -ENOMEM;
2101  
2102  	ret = of_address_to_resource(dev->of_node, 1, &res);
2103  	if (ret)
2104  		return -ENOMEM;
2105  
2106  	xbar = devm_ioremap(dev, res.start, resource_size(&res));
2107  	if (!xbar)
2108  		return -ENOMEM;
2109  
2110  	ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
2111  					 nelm);
2112  	if (ret)
2113  		return -EIO;
2114  
2115  	/* Invalidate last entry for the other user of this mess */
2116  	nelm >>= 1;
2117  	xbar_chans[nelm][0] = -1;
2118  	xbar_chans[nelm][1] = -1;
2119  
2120  	for (i = 0; i < nelm; i++) {
2121  		shift = (xbar_chans[i][1] & 0x03) << 3;
2122  		offset = xbar_chans[i][1] & 0xfffffffc;
2123  		mux = readl(xbar + offset);
2124  		mux &= ~(0xff << shift);
2125  		mux |= xbar_chans[i][0] << shift;
2126  		writel(mux, (xbar + offset));
2127  	}
2128  
2129  	pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2130  	return 0;
2131  }
2132  
2133  static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2134  						     bool legacy_mode)
2135  {
2136  	struct edma_soc_info *info;
2137  	struct property *prop;
2138  	int sz, ret;
2139  
2140  	info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2141  	if (!info)
2142  		return ERR_PTR(-ENOMEM);
2143  
2144  	if (legacy_mode) {
2145  		prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
2146  					&sz);
2147  		if (prop) {
2148  			ret = edma_xbar_event_map(dev, info, sz);
2149  			if (ret)
2150  				return ERR_PTR(ret);
2151  		}
2152  		return info;
2153  	}
2154  
2155  	/* Get the list of channels allocated to be used for memcpy */
2156  	prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
2157  	if (prop) {
2158  		const char pname[] = "ti,edma-memcpy-channels";
2159  		size_t nelm = sz / sizeof(s32);
2160  		s32 *memcpy_ch;
2161  
2162  		memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2163  					 GFP_KERNEL);
2164  		if (!memcpy_ch)
2165  			return ERR_PTR(-ENOMEM);
2166  
2167  		ret = of_property_read_u32_array(dev->of_node, pname,
2168  						 (u32 *)memcpy_ch, nelm);
2169  		if (ret)
2170  			return ERR_PTR(ret);
2171  
2172  		memcpy_ch[nelm] = -1;
2173  		info->memcpy_channels = memcpy_ch;
2174  	}
2175  
2176  	prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2177  				&sz);
2178  	if (prop) {
2179  		const char pname[] = "ti,edma-reserved-slot-ranges";
2180  		u32 (*tmp)[2];
2181  		s16 (*rsv_slots)[2];
2182  		size_t nelm = sz / sizeof(*tmp);
2183  		struct edma_rsv_info *rsv_info;
2184  		int i;
2185  
2186  		if (!nelm)
2187  			return info;
2188  
2189  		tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2190  		if (!tmp)
2191  			return ERR_PTR(-ENOMEM);
2192  
2193  		rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2194  		if (!rsv_info) {
2195  			kfree(tmp);
2196  			return ERR_PTR(-ENOMEM);
2197  		}
2198  
2199  		rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2200  					 GFP_KERNEL);
2201  		if (!rsv_slots) {
2202  			kfree(tmp);
2203  			return ERR_PTR(-ENOMEM);
2204  		}
2205  
2206  		ret = of_property_read_u32_array(dev->of_node, pname,
2207  						 (u32 *)tmp, nelm * 2);
2208  		if (ret) {
2209  			kfree(tmp);
2210  			return ERR_PTR(ret);
2211  		}
2212  
2213  		for (i = 0; i < nelm; i++) {
2214  			rsv_slots[i][0] = tmp[i][0];
2215  			rsv_slots[i][1] = tmp[i][1];
2216  		}
2217  		rsv_slots[nelm][0] = -1;
2218  		rsv_slots[nelm][1] = -1;
2219  
2220  		info->rsv = rsv_info;
2221  		info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2222  
2223  		kfree(tmp);
2224  	}
2225  
2226  	return info;
2227  }
2228  
2229  static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2230  				      struct of_dma *ofdma)
2231  {
2232  	struct edma_cc *ecc = ofdma->of_dma_data;
2233  	struct dma_chan *chan = NULL;
2234  	struct edma_chan *echan;
2235  	int i;
2236  
2237  	if (!ecc || dma_spec->args_count < 1)
2238  		return NULL;
2239  
2240  	for (i = 0; i < ecc->num_channels; i++) {
2241  		echan = &ecc->slave_chans[i];
2242  		if (echan->ch_num == dma_spec->args[0]) {
2243  			chan = &echan->vchan.chan;
2244  			break;
2245  		}
2246  	}
2247  
2248  	if (!chan)
2249  		return NULL;
2250  
2251  	if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2252  		goto out;
2253  
2254  	if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2255  	    dma_spec->args[1] < echan->ecc->num_tc) {
2256  		echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2257  		goto out;
2258  	}
2259  
2260  	return NULL;
2261  out:
2262  	/* The channel is going to be used as HW synchronized */
2263  	echan->hw_triggered = true;
2264  	return dma_get_slave_channel(chan);
2265  }
2266  #else
2267  static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2268  						     bool legacy_mode)
2269  {
2270  	return ERR_PTR(-EINVAL);
2271  }
2272  
2273  static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2274  				      struct of_dma *ofdma)
2275  {
2276  	return NULL;
2277  }
2278  #endif
2279  
2280  static bool edma_filter_fn(struct dma_chan *chan, void *param);
2281  
2282  static int edma_probe(struct platform_device *pdev)
2283  {
2284  	struct edma_soc_info	*info = pdev->dev.platform_data;
2285  	s8			(*queue_priority_mapping)[2];
2286  	const s16		(*reserved)[2];
2287  	int			i, irq;
2288  	char			*irq_name;
2289  	struct resource		*mem;
2290  	struct device_node	*node = pdev->dev.of_node;
2291  	struct device		*dev = &pdev->dev;
2292  	struct edma_cc		*ecc;
2293  	bool			legacy_mode = true;
2294  	int ret;
2295  
2296  	if (node) {
2297  		const struct of_device_id *match;
2298  
2299  		match = of_match_node(edma_of_ids, node);
2300  		if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
2301  			legacy_mode = false;
2302  
2303  		info = edma_setup_info_from_dt(dev, legacy_mode);
2304  		if (IS_ERR(info)) {
2305  			dev_err(dev, "failed to get DT data\n");
2306  			return PTR_ERR(info);
2307  		}
2308  	}
2309  
2310  	if (!info)
2311  		return -ENODEV;
2312  
2313  	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2314  	if (ret)
2315  		return ret;
2316  
2317  	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2318  	if (!ecc)
2319  		return -ENOMEM;
2320  
2321  	ecc->dev = dev;
2322  	ecc->id = pdev->id;
2323  	ecc->legacy_mode = legacy_mode;
2324  	/* When booting with DT the pdev->id is -1 */
2325  	if (ecc->id < 0)
2326  		ecc->id = 0;
2327  
2328  	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2329  	if (!mem) {
2330  		dev_dbg(dev, "mem resource not found, using index 0\n");
2331  		mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2332  		if (!mem) {
2333  			dev_err(dev, "no mem resource?\n");
2334  			return -ENODEV;
2335  		}
2336  	}
2337  	ecc->base = devm_ioremap_resource(dev, mem);
2338  	if (IS_ERR(ecc->base))
2339  		return PTR_ERR(ecc->base);
2340  
2341  	platform_set_drvdata(pdev, ecc);
2342  
2343  	pm_runtime_enable(dev);
2344  	ret = pm_runtime_get_sync(dev);
2345  	if (ret < 0) {
2346  		dev_err(dev, "pm_runtime_get_sync() failed\n");
2347  		pm_runtime_disable(dev);
2348  		return ret;
2349  	}
2350  
2351  	/* Get eDMA3 configuration from IP */
2352  	ret = edma_setup_from_hw(dev, info, ecc);
2353  	if (ret)
2354  		goto err_disable_pm;
2355  
2356  	/* Allocate memory based on the information we got from the IP */
2357  	ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2358  					sizeof(*ecc->slave_chans), GFP_KERNEL);
2359  
2360  	ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2361  				       sizeof(unsigned long), GFP_KERNEL);
2362  
2363  	ecc->channels_mask = devm_kcalloc(dev,
2364  					   BITS_TO_LONGS(ecc->num_channels),
2365  					   sizeof(unsigned long), GFP_KERNEL);
2366  	if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
2367  		ret = -ENOMEM;
2368  		goto err_disable_pm;
2369  	}
2370  
2371  	/* Mark all channels available initially */
2372  	bitmap_fill(ecc->channels_mask, ecc->num_channels);
2373  
2374  	ecc->default_queue = info->default_queue;
2375  
2376  	if (info->rsv) {
2377  		/* Set the reserved slots in inuse list */
2378  		reserved = info->rsv->rsv_slots;
2379  		if (reserved) {
2380  			for (i = 0; reserved[i][0] != -1; i++)
2381  				bitmap_set(ecc->slot_inuse, reserved[i][0],
2382  					   reserved[i][1]);
2383  		}
2384  
2385  		/* Clear channels not usable for Linux */
2386  		reserved = info->rsv->rsv_chans;
2387  		if (reserved) {
2388  			for (i = 0; reserved[i][0] != -1; i++)
2389  				bitmap_clear(ecc->channels_mask, reserved[i][0],
2390  					     reserved[i][1]);
2391  		}
2392  	}
2393  
2394  	for (i = 0; i < ecc->num_slots; i++) {
2395  		/* Reset only unused - not reserved - paRAM slots */
2396  		if (!test_bit(i, ecc->slot_inuse))
2397  			edma_write_slot(ecc, i, &dummy_paramset);
2398  	}
2399  
2400  	irq = platform_get_irq_byname(pdev, "edma3_ccint");
2401  	if (irq < 0 && node)
2402  		irq = irq_of_parse_and_map(node, 0);
2403  
2404  	if (irq > 0) {
2405  		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2406  					  dev_name(dev));
2407  		if (!irq_name) {
2408  			ret = -ENOMEM;
2409  			goto err_disable_pm;
2410  		}
2411  
2412  		ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2413  				       ecc);
2414  		if (ret) {
2415  			dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2416  			goto err_disable_pm;
2417  		}
2418  		ecc->ccint = irq;
2419  	}
2420  
2421  	irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2422  	if (irq < 0 && node)
2423  		irq = irq_of_parse_and_map(node, 2);
2424  
2425  	if (irq > 0) {
2426  		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2427  					  dev_name(dev));
2428  		if (!irq_name) {
2429  			ret = -ENOMEM;
2430  			goto err_disable_pm;
2431  		}
2432  
2433  		ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2434  				       ecc);
2435  		if (ret) {
2436  			dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2437  			goto err_disable_pm;
2438  		}
2439  		ecc->ccerrint = irq;
2440  	}
2441  
2442  	ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2443  	if (ecc->dummy_slot < 0) {
2444  		dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2445  		ret = ecc->dummy_slot;
2446  		goto err_disable_pm;
2447  	}
2448  
2449  	queue_priority_mapping = info->queue_priority_mapping;
2450  
2451  	if (!ecc->legacy_mode) {
2452  		int lowest_priority = 0;
2453  		unsigned int array_max;
2454  		struct of_phandle_args tc_args;
2455  
2456  		ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2457  					    sizeof(*ecc->tc_list), GFP_KERNEL);
2458  		if (!ecc->tc_list) {
2459  			ret = -ENOMEM;
2460  			goto err_reg1;
2461  		}
2462  
2463  		for (i = 0;; i++) {
2464  			ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2465  							       1, i, &tc_args);
2466  			if (ret || i == ecc->num_tc)
2467  				break;
2468  
2469  			ecc->tc_list[i].node = tc_args.np;
2470  			ecc->tc_list[i].id = i;
2471  			queue_priority_mapping[i][1] = tc_args.args[0];
2472  			if (queue_priority_mapping[i][1] > lowest_priority) {
2473  				lowest_priority = queue_priority_mapping[i][1];
2474  				info->default_queue = i;
2475  			}
2476  		}
2477  
2478  		/* See if we have optional dma-channel-mask array */
2479  		array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
2480  		ret = of_property_read_variable_u32_array(node,
2481  						"dma-channel-mask",
2482  						(u32 *)ecc->channels_mask,
2483  						1, array_max);
2484  		if (ret > 0 && ret != array_max)
2485  			dev_warn(dev, "dma-channel-mask is not complete.\n");
2486  		else if (ret == -EOVERFLOW || ret == -ENODATA)
2487  			dev_warn(dev,
2488  				 "dma-channel-mask is out of range or empty\n");
2489  	}
2490  
2491  	/* Event queue priority mapping */
2492  	for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2493  		edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2494  					      queue_priority_mapping[i][1]);
2495  
2496  	edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
2497  	edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
2498  	edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
2499  
2500  	ecc->info = info;
2501  
2502  	/* Init the dma device and channels */
2503  	edma_dma_init(ecc, legacy_mode);
2504  
2505  	for (i = 0; i < ecc->num_channels; i++) {
2506  		/* Do not touch reserved channels */
2507  		if (!test_bit(i, ecc->channels_mask))
2508  			continue;
2509  
2510  		/* Assign all channels to the default queue */
2511  		edma_assign_channel_eventq(&ecc->slave_chans[i],
2512  					   info->default_queue);
2513  		/* Set entry slot to the dummy slot */
2514  		edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2515  	}
2516  
2517  	ecc->dma_slave.filter.map = info->slave_map;
2518  	ecc->dma_slave.filter.mapcnt = info->slavecnt;
2519  	ecc->dma_slave.filter.fn = edma_filter_fn;
2520  
2521  	ret = dma_async_device_register(&ecc->dma_slave);
2522  	if (ret) {
2523  		dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2524  		goto err_reg1;
2525  	}
2526  
2527  	if (ecc->dma_memcpy) {
2528  		ret = dma_async_device_register(ecc->dma_memcpy);
2529  		if (ret) {
2530  			dev_err(dev, "memcpy ddev registration failed (%d)\n",
2531  				ret);
2532  			dma_async_device_unregister(&ecc->dma_slave);
2533  			goto err_reg1;
2534  		}
2535  	}
2536  
2537  	if (node)
2538  		of_dma_controller_register(node, of_edma_xlate, ecc);
2539  
2540  	dev_info(dev, "TI EDMA DMA engine driver\n");
2541  
2542  	return 0;
2543  
2544  err_reg1:
2545  	edma_free_slot(ecc, ecc->dummy_slot);
2546  err_disable_pm:
2547  	pm_runtime_put_sync(dev);
2548  	pm_runtime_disable(dev);
2549  	return ret;
2550  }
2551  
2552  static void edma_cleanupp_vchan(struct dma_device *dmadev)
2553  {
2554  	struct edma_chan *echan, *_echan;
2555  
2556  	list_for_each_entry_safe(echan, _echan,
2557  			&dmadev->channels, vchan.chan.device_node) {
2558  		list_del(&echan->vchan.chan.device_node);
2559  		tasklet_kill(&echan->vchan.task);
2560  	}
2561  }
2562  
2563  static int edma_remove(struct platform_device *pdev)
2564  {
2565  	struct device *dev = &pdev->dev;
2566  	struct edma_cc *ecc = dev_get_drvdata(dev);
2567  
2568  	devm_free_irq(dev, ecc->ccint, ecc);
2569  	devm_free_irq(dev, ecc->ccerrint, ecc);
2570  
2571  	edma_cleanupp_vchan(&ecc->dma_slave);
2572  
2573  	if (dev->of_node)
2574  		of_dma_controller_free(dev->of_node);
2575  	dma_async_device_unregister(&ecc->dma_slave);
2576  	if (ecc->dma_memcpy)
2577  		dma_async_device_unregister(ecc->dma_memcpy);
2578  	edma_free_slot(ecc, ecc->dummy_slot);
2579  	pm_runtime_put_sync(dev);
2580  	pm_runtime_disable(dev);
2581  
2582  	return 0;
2583  }
2584  
2585  #ifdef CONFIG_PM_SLEEP
2586  static int edma_pm_suspend(struct device *dev)
2587  {
2588  	struct edma_cc *ecc = dev_get_drvdata(dev);
2589  	struct edma_chan *echan = ecc->slave_chans;
2590  	int i;
2591  
2592  	for (i = 0; i < ecc->num_channels; i++) {
2593  		if (echan[i].alloced)
2594  			edma_setup_interrupt(&echan[i], false);
2595  	}
2596  
2597  	return 0;
2598  }
2599  
2600  static int edma_pm_resume(struct device *dev)
2601  {
2602  	struct edma_cc *ecc = dev_get_drvdata(dev);
2603  	struct edma_chan *echan = ecc->slave_chans;
2604  	int i;
2605  	s8 (*queue_priority_mapping)[2];
2606  
2607  	/* re initialize dummy slot to dummy param set */
2608  	edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
2609  
2610  	queue_priority_mapping = ecc->info->queue_priority_mapping;
2611  
2612  	/* Event queue priority mapping */
2613  	for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2614  		edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2615  					      queue_priority_mapping[i][1]);
2616  
2617  	for (i = 0; i < ecc->num_channels; i++) {
2618  		if (echan[i].alloced) {
2619  			/* ensure access through shadow region 0 */
2620  			edma_or_array2(ecc, EDMA_DRAE, 0,
2621  				       EDMA_REG_ARRAY_INDEX(i),
2622  				       EDMA_CHANNEL_BIT(i));
2623  
2624  			edma_setup_interrupt(&echan[i], true);
2625  
2626  			/* Set up channel -> slot mapping for the entry slot */
2627  			edma_set_chmap(&echan[i], echan[i].slot[0]);
2628  		}
2629  	}
2630  
2631  	return 0;
2632  }
2633  #endif
2634  
2635  static const struct dev_pm_ops edma_pm_ops = {
2636  	SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2637  };
2638  
2639  static struct platform_driver edma_driver = {
2640  	.probe		= edma_probe,
2641  	.remove		= edma_remove,
2642  	.driver = {
2643  		.name	= "edma",
2644  		.pm	= &edma_pm_ops,
2645  		.of_match_table = edma_of_ids,
2646  	},
2647  };
2648  
2649  static int edma_tptc_probe(struct platform_device *pdev)
2650  {
2651  	pm_runtime_enable(&pdev->dev);
2652  	return pm_runtime_get_sync(&pdev->dev);
2653  }
2654  
2655  static struct platform_driver edma_tptc_driver = {
2656  	.probe		= edma_tptc_probe,
2657  	.driver = {
2658  		.name	= "edma3-tptc",
2659  		.of_match_table = edma_tptc_of_ids,
2660  	},
2661  };
2662  
2663  static bool edma_filter_fn(struct dma_chan *chan, void *param)
2664  {
2665  	bool match = false;
2666  
2667  	if (chan->device->dev->driver == &edma_driver.driver) {
2668  		struct edma_chan *echan = to_edma_chan(chan);
2669  		unsigned ch_req = *(unsigned *)param;
2670  		if (ch_req == echan->ch_num) {
2671  			/* The channel is going to be used as HW synchronized */
2672  			echan->hw_triggered = true;
2673  			match = true;
2674  		}
2675  	}
2676  	return match;
2677  }
2678  
2679  static int edma_init(void)
2680  {
2681  	int ret;
2682  
2683  	ret = platform_driver_register(&edma_tptc_driver);
2684  	if (ret)
2685  		return ret;
2686  
2687  	return platform_driver_register(&edma_driver);
2688  }
2689  subsys_initcall(edma_init);
2690  
2691  static void __exit edma_exit(void)
2692  {
2693  	platform_driver_unregister(&edma_driver);
2694  	platform_driver_unregister(&edma_tptc_driver);
2695  }
2696  module_exit(edma_exit);
2697  
2698  MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2699  MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2700  MODULE_LICENSE("GPL v2");
2701