xref: /openbmc/linux/drivers/dma/ti/k3-udma.c (revision 6548d543)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/sys_soc.h>
21 #include <linux/of.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
31 
32 #include "../virt-dma.h"
33 #include "k3-udma.h"
34 #include "k3-psil-priv.h"
35 
36 struct udma_static_tr {
37 	u8 elsize; /* RPSTR0 */
38 	u16 elcnt; /* RPSTR0 */
39 	u16 bstcnt; /* RPSTR1 */
40 };
41 
42 #define K3_UDMA_MAX_RFLOWS		1024
43 #define K3_UDMA_DEFAULT_RING_SIZE	16
44 
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE		0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG	1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID	2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG	4
50 
51 #define UDMA_RFLOW_DSTTAG_NONE		0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG	1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID	2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO	4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI	5
56 
57 struct udma_chan;
58 
59 enum k3_dma_type {
60 	DMA_TYPE_UDMA = 0,
61 	DMA_TYPE_BCDMA,
62 	DMA_TYPE_PKTDMA,
63 };
64 
65 enum udma_mmr {
66 	MMR_GCFG = 0,
67 	MMR_BCHANRT,
68 	MMR_RCHANRT,
69 	MMR_TCHANRT,
70 	MMR_LAST,
71 };
72 
73 static const char * const mmr_names[] = {
74 	[MMR_GCFG] = "gcfg",
75 	[MMR_BCHANRT] = "bchanrt",
76 	[MMR_RCHANRT] = "rchanrt",
77 	[MMR_TCHANRT] = "tchanrt",
78 };
79 
80 struct udma_tchan {
81 	void __iomem *reg_rt;
82 
83 	int id;
84 	struct k3_ring *t_ring; /* Transmit ring */
85 	struct k3_ring *tc_ring; /* Transmit Completion ring */
86 	int tflow_id; /* applicable only for PKTDMA */
87 
88 };
89 
90 #define udma_bchan udma_tchan
91 
92 struct udma_rflow {
93 	int id;
94 	struct k3_ring *fd_ring; /* Free Descriptor ring */
95 	struct k3_ring *r_ring; /* Receive ring */
96 };
97 
98 struct udma_rchan {
99 	void __iomem *reg_rt;
100 
101 	int id;
102 };
103 
104 struct udma_oes_offsets {
105 	/* K3 UDMA Output Event Offset */
106 	u32 udma_rchan;
107 
108 	/* BCDMA Output Event Offsets */
109 	u32 bcdma_bchan_data;
110 	u32 bcdma_bchan_ring;
111 	u32 bcdma_tchan_data;
112 	u32 bcdma_tchan_ring;
113 	u32 bcdma_rchan_data;
114 	u32 bcdma_rchan_ring;
115 
116 	/* PKTDMA Output Event Offsets */
117 	u32 pktdma_tchan_flow;
118 	u32 pktdma_rchan_flow;
119 };
120 
121 #define UDMA_FLAG_PDMA_ACC32		BIT(0)
122 #define UDMA_FLAG_PDMA_BURST		BIT(1)
123 #define UDMA_FLAG_TDTYPE		BIT(2)
124 #define UDMA_FLAG_BURST_SIZE		BIT(3)
125 #define UDMA_FLAGS_J7_CLASS		(UDMA_FLAG_PDMA_ACC32 | \
126 					 UDMA_FLAG_PDMA_BURST | \
127 					 UDMA_FLAG_TDTYPE | \
128 					 UDMA_FLAG_BURST_SIZE)
129 
130 struct udma_match_data {
131 	enum k3_dma_type type;
132 	u32 psil_base;
133 	bool enable_memcpy_support;
134 	u32 flags;
135 	u32 statictr_z_mask;
136 	u8 burst_size[3];
137 	struct udma_soc_data *soc_data;
138 };
139 
140 struct udma_soc_data {
141 	struct udma_oes_offsets oes;
142 	u32 bcdma_trigger_event_offset;
143 };
144 
145 struct udma_hwdesc {
146 	size_t cppi5_desc_size;
147 	void *cppi5_desc_vaddr;
148 	dma_addr_t cppi5_desc_paddr;
149 
150 	/* TR descriptor internal pointers */
151 	void *tr_req_base;
152 	struct cppi5_tr_resp_t *tr_resp_base;
153 };
154 
155 struct udma_rx_flush {
156 	struct udma_hwdesc hwdescs[2];
157 
158 	size_t buffer_size;
159 	void *buffer_vaddr;
160 	dma_addr_t buffer_paddr;
161 };
162 
163 struct udma_tpl {
164 	u8 levels;
165 	u32 start_idx[3];
166 };
167 
168 struct udma_dev {
169 	struct dma_device ddev;
170 	struct device *dev;
171 	void __iomem *mmrs[MMR_LAST];
172 	const struct udma_match_data *match_data;
173 	const struct udma_soc_data *soc_data;
174 
175 	struct udma_tpl bchan_tpl;
176 	struct udma_tpl tchan_tpl;
177 	struct udma_tpl rchan_tpl;
178 
179 	size_t desc_align; /* alignment to use for descriptors */
180 
181 	struct udma_tisci_rm tisci_rm;
182 
183 	struct k3_ringacc *ringacc;
184 
185 	struct work_struct purge_work;
186 	struct list_head desc_to_purge;
187 	spinlock_t lock;
188 
189 	struct udma_rx_flush rx_flush;
190 
191 	int bchan_cnt;
192 	int tchan_cnt;
193 	int echan_cnt;
194 	int rchan_cnt;
195 	int rflow_cnt;
196 	int tflow_cnt;
197 	unsigned long *bchan_map;
198 	unsigned long *tchan_map;
199 	unsigned long *rchan_map;
200 	unsigned long *rflow_gp_map;
201 	unsigned long *rflow_gp_map_allocated;
202 	unsigned long *rflow_in_use;
203 	unsigned long *tflow_map;
204 
205 	struct udma_bchan *bchans;
206 	struct udma_tchan *tchans;
207 	struct udma_rchan *rchans;
208 	struct udma_rflow *rflows;
209 
210 	struct udma_chan *channels;
211 	u32 psil_base;
212 	u32 atype;
213 	u32 asel;
214 };
215 
216 struct udma_desc {
217 	struct virt_dma_desc vd;
218 
219 	bool terminated;
220 
221 	enum dma_transfer_direction dir;
222 
223 	struct udma_static_tr static_tr;
224 	u32 residue;
225 
226 	unsigned int sglen;
227 	unsigned int desc_idx; /* Only used for cyclic in packet mode */
228 	unsigned int tr_idx;
229 
230 	u32 metadata_size;
231 	void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
232 
233 	unsigned int hwdesc_count;
234 	struct udma_hwdesc hwdesc[];
235 };
236 
237 enum udma_chan_state {
238 	UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
239 	UDMA_CHAN_IS_ACTIVE, /* Normal operation */
240 	UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
241 };
242 
243 struct udma_tx_drain {
244 	struct delayed_work work;
245 	ktime_t tstamp;
246 	u32 residue;
247 };
248 
249 struct udma_chan_config {
250 	bool pkt_mode; /* TR or packet */
251 	bool needs_epib; /* EPIB is needed for the communication or not */
252 	u32 psd_size; /* size of Protocol Specific Data */
253 	u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
254 	u32 hdesc_size; /* Size of a packet descriptor in packet mode */
255 	bool notdpkt; /* Suppress sending TDC packet */
256 	int remote_thread_id;
257 	u32 atype;
258 	u32 asel;
259 	u32 src_thread;
260 	u32 dst_thread;
261 	enum psil_endpoint_type ep_type;
262 	bool enable_acc32;
263 	bool enable_burst;
264 	enum udma_tp_level channel_tpl; /* Channel Throughput Level */
265 
266 	u32 tr_trigger_type;
267 	unsigned long tx_flags;
268 
269 	/* PKDMA mapped channel */
270 	int mapped_channel_id;
271 	/* PKTDMA default tflow or rflow for mapped channel */
272 	int default_flow_id;
273 
274 	enum dma_transfer_direction dir;
275 };
276 
277 struct udma_chan {
278 	struct virt_dma_chan vc;
279 	struct dma_slave_config	cfg;
280 	struct udma_dev *ud;
281 	struct device *dma_dev;
282 	struct udma_desc *desc;
283 	struct udma_desc *terminated_desc;
284 	struct udma_static_tr static_tr;
285 	char *name;
286 
287 	struct udma_bchan *bchan;
288 	struct udma_tchan *tchan;
289 	struct udma_rchan *rchan;
290 	struct udma_rflow *rflow;
291 
292 	bool psil_paired;
293 
294 	int irq_num_ring;
295 	int irq_num_udma;
296 
297 	bool cyclic;
298 	bool paused;
299 
300 	enum udma_chan_state state;
301 	struct completion teardown_completed;
302 
303 	struct udma_tx_drain tx_drain;
304 
305 	/* Channel configuration parameters */
306 	struct udma_chan_config config;
307 	/* Channel configuration parameters (backup) */
308 	struct udma_chan_config backup_config;
309 
310 	/* dmapool for packet mode descriptors */
311 	bool use_dma_pool;
312 	struct dma_pool *hdesc_pool;
313 
314 	u32 id;
315 };
316 
317 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
318 {
319 	return container_of(d, struct udma_dev, ddev);
320 }
321 
322 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
323 {
324 	return container_of(c, struct udma_chan, vc.chan);
325 }
326 
327 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
328 {
329 	return container_of(t, struct udma_desc, vd.tx);
330 }
331 
332 /* Generic register access functions */
333 static inline u32 udma_read(void __iomem *base, int reg)
334 {
335 	return readl(base + reg);
336 }
337 
338 static inline void udma_write(void __iomem *base, int reg, u32 val)
339 {
340 	writel(val, base + reg);
341 }
342 
343 static inline void udma_update_bits(void __iomem *base, int reg,
344 				    u32 mask, u32 val)
345 {
346 	u32 tmp, orig;
347 
348 	orig = readl(base + reg);
349 	tmp = orig & ~mask;
350 	tmp |= (val & mask);
351 
352 	if (tmp != orig)
353 		writel(tmp, base + reg);
354 }
355 
356 /* TCHANRT */
357 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
358 {
359 	if (!uc->tchan)
360 		return 0;
361 	return udma_read(uc->tchan->reg_rt, reg);
362 }
363 
364 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
365 {
366 	if (!uc->tchan)
367 		return;
368 	udma_write(uc->tchan->reg_rt, reg, val);
369 }
370 
371 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
372 					    u32 mask, u32 val)
373 {
374 	if (!uc->tchan)
375 		return;
376 	udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
377 }
378 
379 /* RCHANRT */
380 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
381 {
382 	if (!uc->rchan)
383 		return 0;
384 	return udma_read(uc->rchan->reg_rt, reg);
385 }
386 
387 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
388 {
389 	if (!uc->rchan)
390 		return;
391 	udma_write(uc->rchan->reg_rt, reg, val);
392 }
393 
394 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
395 					    u32 mask, u32 val)
396 {
397 	if (!uc->rchan)
398 		return;
399 	udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
400 }
401 
402 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
403 {
404 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
405 
406 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
407 	return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
408 					      tisci_rm->tisci_navss_dev_id,
409 					      src_thread, dst_thread);
410 }
411 
412 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
413 			     u32 dst_thread)
414 {
415 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
416 
417 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
418 	return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
419 						tisci_rm->tisci_navss_dev_id,
420 						src_thread, dst_thread);
421 }
422 
423 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
424 {
425 	struct device *chan_dev = &chan->dev->device;
426 
427 	if (asel == 0) {
428 		/* No special handling for the channel */
429 		chan->dev->chan_dma_dev = false;
430 
431 		chan_dev->dma_coherent = false;
432 		chan_dev->dma_parms = NULL;
433 	} else if (asel == 14 || asel == 15) {
434 		chan->dev->chan_dma_dev = true;
435 
436 		chan_dev->dma_coherent = true;
437 		dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
438 		chan_dev->dma_parms = chan_dev->parent->dma_parms;
439 	} else {
440 		dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
441 
442 		chan_dev->dma_coherent = false;
443 		chan_dev->dma_parms = NULL;
444 	}
445 }
446 
447 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
448 {
449 	int i;
450 
451 	for (i = 0; i < tpl_map->levels; i++) {
452 		if (chan_id >= tpl_map->start_idx[i])
453 			return i;
454 	}
455 
456 	return 0;
457 }
458 
459 static void udma_reset_uchan(struct udma_chan *uc)
460 {
461 	memset(&uc->config, 0, sizeof(uc->config));
462 	uc->config.remote_thread_id = -1;
463 	uc->config.mapped_channel_id = -1;
464 	uc->config.default_flow_id = -1;
465 	uc->state = UDMA_CHAN_IS_IDLE;
466 }
467 
468 static void udma_dump_chan_stdata(struct udma_chan *uc)
469 {
470 	struct device *dev = uc->ud->dev;
471 	u32 offset;
472 	int i;
473 
474 	if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
475 		dev_dbg(dev, "TCHAN State data:\n");
476 		for (i = 0; i < 32; i++) {
477 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
478 			dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
479 				udma_tchanrt_read(uc, offset));
480 		}
481 	}
482 
483 	if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
484 		dev_dbg(dev, "RCHAN State data:\n");
485 		for (i = 0; i < 32; i++) {
486 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
487 			dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
488 				udma_rchanrt_read(uc, offset));
489 		}
490 	}
491 }
492 
493 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
494 						    int idx)
495 {
496 	return d->hwdesc[idx].cppi5_desc_paddr;
497 }
498 
499 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
500 {
501 	return d->hwdesc[idx].cppi5_desc_vaddr;
502 }
503 
504 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
505 						   dma_addr_t paddr)
506 {
507 	struct udma_desc *d = uc->terminated_desc;
508 
509 	if (d) {
510 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
511 								   d->desc_idx);
512 
513 		if (desc_paddr != paddr)
514 			d = NULL;
515 	}
516 
517 	if (!d) {
518 		d = uc->desc;
519 		if (d) {
520 			dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
521 								d->desc_idx);
522 
523 			if (desc_paddr != paddr)
524 				d = NULL;
525 		}
526 	}
527 
528 	return d;
529 }
530 
531 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
532 {
533 	if (uc->use_dma_pool) {
534 		int i;
535 
536 		for (i = 0; i < d->hwdesc_count; i++) {
537 			if (!d->hwdesc[i].cppi5_desc_vaddr)
538 				continue;
539 
540 			dma_pool_free(uc->hdesc_pool,
541 				      d->hwdesc[i].cppi5_desc_vaddr,
542 				      d->hwdesc[i].cppi5_desc_paddr);
543 
544 			d->hwdesc[i].cppi5_desc_vaddr = NULL;
545 		}
546 	} else if (d->hwdesc[0].cppi5_desc_vaddr) {
547 		dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
548 				  d->hwdesc[0].cppi5_desc_vaddr,
549 				  d->hwdesc[0].cppi5_desc_paddr);
550 
551 		d->hwdesc[0].cppi5_desc_vaddr = NULL;
552 	}
553 }
554 
555 static void udma_purge_desc_work(struct work_struct *work)
556 {
557 	struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
558 	struct virt_dma_desc *vd, *_vd;
559 	unsigned long flags;
560 	LIST_HEAD(head);
561 
562 	spin_lock_irqsave(&ud->lock, flags);
563 	list_splice_tail_init(&ud->desc_to_purge, &head);
564 	spin_unlock_irqrestore(&ud->lock, flags);
565 
566 	list_for_each_entry_safe(vd, _vd, &head, node) {
567 		struct udma_chan *uc = to_udma_chan(vd->tx.chan);
568 		struct udma_desc *d = to_udma_desc(&vd->tx);
569 
570 		udma_free_hwdesc(uc, d);
571 		list_del(&vd->node);
572 		kfree(d);
573 	}
574 
575 	/* If more to purge, schedule the work again */
576 	if (!list_empty(&ud->desc_to_purge))
577 		schedule_work(&ud->purge_work);
578 }
579 
580 static void udma_desc_free(struct virt_dma_desc *vd)
581 {
582 	struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
583 	struct udma_chan *uc = to_udma_chan(vd->tx.chan);
584 	struct udma_desc *d = to_udma_desc(&vd->tx);
585 	unsigned long flags;
586 
587 	if (uc->terminated_desc == d)
588 		uc->terminated_desc = NULL;
589 
590 	if (uc->use_dma_pool) {
591 		udma_free_hwdesc(uc, d);
592 		kfree(d);
593 		return;
594 	}
595 
596 	spin_lock_irqsave(&ud->lock, flags);
597 	list_add_tail(&vd->node, &ud->desc_to_purge);
598 	spin_unlock_irqrestore(&ud->lock, flags);
599 
600 	schedule_work(&ud->purge_work);
601 }
602 
603 static bool udma_is_chan_running(struct udma_chan *uc)
604 {
605 	u32 trt_ctl = 0;
606 	u32 rrt_ctl = 0;
607 
608 	if (uc->tchan)
609 		trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
610 	if (uc->rchan)
611 		rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
612 
613 	if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
614 		return true;
615 
616 	return false;
617 }
618 
619 static bool udma_is_chan_paused(struct udma_chan *uc)
620 {
621 	u32 val, pause_mask;
622 
623 	switch (uc->config.dir) {
624 	case DMA_DEV_TO_MEM:
625 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
626 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
627 		break;
628 	case DMA_MEM_TO_DEV:
629 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
630 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
631 		break;
632 	case DMA_MEM_TO_MEM:
633 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
634 		pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
635 		break;
636 	default:
637 		return false;
638 	}
639 
640 	if (val & pause_mask)
641 		return true;
642 
643 	return false;
644 }
645 
646 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
647 {
648 	return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
649 }
650 
651 static int udma_push_to_ring(struct udma_chan *uc, int idx)
652 {
653 	struct udma_desc *d = uc->desc;
654 	struct k3_ring *ring = NULL;
655 	dma_addr_t paddr;
656 
657 	switch (uc->config.dir) {
658 	case DMA_DEV_TO_MEM:
659 		ring = uc->rflow->fd_ring;
660 		break;
661 	case DMA_MEM_TO_DEV:
662 	case DMA_MEM_TO_MEM:
663 		ring = uc->tchan->t_ring;
664 		break;
665 	default:
666 		return -EINVAL;
667 	}
668 
669 	/* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
670 	if (idx == -1) {
671 		paddr = udma_get_rx_flush_hwdesc_paddr(uc);
672 	} else {
673 		paddr = udma_curr_cppi5_desc_paddr(d, idx);
674 
675 		wmb(); /* Ensure that writes are not moved over this point */
676 	}
677 
678 	return k3_ringacc_ring_push(ring, &paddr);
679 }
680 
681 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
682 {
683 	if (uc->config.dir != DMA_DEV_TO_MEM)
684 		return false;
685 
686 	if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
687 		return true;
688 
689 	return false;
690 }
691 
692 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
693 {
694 	struct k3_ring *ring = NULL;
695 	int ret;
696 
697 	switch (uc->config.dir) {
698 	case DMA_DEV_TO_MEM:
699 		ring = uc->rflow->r_ring;
700 		break;
701 	case DMA_MEM_TO_DEV:
702 	case DMA_MEM_TO_MEM:
703 		ring = uc->tchan->tc_ring;
704 		break;
705 	default:
706 		return -ENOENT;
707 	}
708 
709 	ret = k3_ringacc_ring_pop(ring, addr);
710 	if (ret)
711 		return ret;
712 
713 	rmb(); /* Ensure that reads are not moved before this point */
714 
715 	/* Teardown completion */
716 	if (cppi5_desc_is_tdcm(*addr))
717 		return 0;
718 
719 	/* Check for flush descriptor */
720 	if (udma_desc_is_rx_flush(uc, *addr))
721 		return -ENOENT;
722 
723 	return 0;
724 }
725 
726 static void udma_reset_rings(struct udma_chan *uc)
727 {
728 	struct k3_ring *ring1 = NULL;
729 	struct k3_ring *ring2 = NULL;
730 
731 	switch (uc->config.dir) {
732 	case DMA_DEV_TO_MEM:
733 		if (uc->rchan) {
734 			ring1 = uc->rflow->fd_ring;
735 			ring2 = uc->rflow->r_ring;
736 		}
737 		break;
738 	case DMA_MEM_TO_DEV:
739 	case DMA_MEM_TO_MEM:
740 		if (uc->tchan) {
741 			ring1 = uc->tchan->t_ring;
742 			ring2 = uc->tchan->tc_ring;
743 		}
744 		break;
745 	default:
746 		break;
747 	}
748 
749 	if (ring1)
750 		k3_ringacc_ring_reset_dma(ring1,
751 					  k3_ringacc_ring_get_occ(ring1));
752 	if (ring2)
753 		k3_ringacc_ring_reset(ring2);
754 
755 	/* make sure we are not leaking memory by stalled descriptor */
756 	if (uc->terminated_desc) {
757 		udma_desc_free(&uc->terminated_desc->vd);
758 		uc->terminated_desc = NULL;
759 	}
760 }
761 
762 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
763 {
764 	if (uc->desc->dir == DMA_DEV_TO_MEM) {
765 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
766 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
767 		if (uc->config.ep_type != PSIL_EP_NATIVE)
768 			udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
769 	} else {
770 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
771 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
772 		if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
773 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
774 	}
775 }
776 
777 static void udma_reset_counters(struct udma_chan *uc)
778 {
779 	u32 val;
780 
781 	if (uc->tchan) {
782 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
783 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
784 
785 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
786 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
787 
788 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
789 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
790 
791 		if (!uc->bchan) {
792 			val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
793 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
794 		}
795 	}
796 
797 	if (uc->rchan) {
798 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
799 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
800 
801 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
802 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
803 
804 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
805 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
806 
807 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
808 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
809 	}
810 }
811 
812 static int udma_reset_chan(struct udma_chan *uc, bool hard)
813 {
814 	switch (uc->config.dir) {
815 	case DMA_DEV_TO_MEM:
816 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
817 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
818 		break;
819 	case DMA_MEM_TO_DEV:
820 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
821 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
822 		break;
823 	case DMA_MEM_TO_MEM:
824 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
825 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
826 		break;
827 	default:
828 		return -EINVAL;
829 	}
830 
831 	/* Reset all counters */
832 	udma_reset_counters(uc);
833 
834 	/* Hard reset: re-initialize the channel to reset */
835 	if (hard) {
836 		struct udma_chan_config ucc_backup;
837 		int ret;
838 
839 		memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
840 		uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
841 
842 		/* restore the channel configuration */
843 		memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
844 		ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
845 		if (ret)
846 			return ret;
847 
848 		/*
849 		 * Setting forced teardown after forced reset helps recovering
850 		 * the rchan.
851 		 */
852 		if (uc->config.dir == DMA_DEV_TO_MEM)
853 			udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
854 					   UDMA_CHAN_RT_CTL_EN |
855 					   UDMA_CHAN_RT_CTL_TDOWN |
856 					   UDMA_CHAN_RT_CTL_FTDOWN);
857 	}
858 	uc->state = UDMA_CHAN_IS_IDLE;
859 
860 	return 0;
861 }
862 
863 static void udma_start_desc(struct udma_chan *uc)
864 {
865 	struct udma_chan_config *ucc = &uc->config;
866 
867 	if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
868 	    (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
869 		int i;
870 
871 		/*
872 		 * UDMA only: Push all descriptors to ring for packet mode
873 		 * cyclic or RX
874 		 * PKTDMA supports pre-linked descriptor and cyclic is not
875 		 * supported
876 		 */
877 		for (i = 0; i < uc->desc->sglen; i++)
878 			udma_push_to_ring(uc, i);
879 	} else {
880 		udma_push_to_ring(uc, 0);
881 	}
882 }
883 
884 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
885 {
886 	/* Only PDMAs have staticTR */
887 	if (uc->config.ep_type == PSIL_EP_NATIVE)
888 		return false;
889 
890 	/* Check if the staticTR configuration has changed for TX */
891 	if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
892 		return true;
893 
894 	return false;
895 }
896 
897 static int udma_start(struct udma_chan *uc)
898 {
899 	struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
900 
901 	if (!vd) {
902 		uc->desc = NULL;
903 		return -ENOENT;
904 	}
905 
906 	list_del(&vd->node);
907 
908 	uc->desc = to_udma_desc(&vd->tx);
909 
910 	/* Channel is already running and does not need reconfiguration */
911 	if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
912 		udma_start_desc(uc);
913 		goto out;
914 	}
915 
916 	/* Make sure that we clear the teardown bit, if it is set */
917 	udma_reset_chan(uc, false);
918 
919 	/* Push descriptors before we start the channel */
920 	udma_start_desc(uc);
921 
922 	switch (uc->desc->dir) {
923 	case DMA_DEV_TO_MEM:
924 		/* Config remote TR */
925 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
926 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
927 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
928 			const struct udma_match_data *match_data =
929 							uc->ud->match_data;
930 
931 			if (uc->config.enable_acc32)
932 				val |= PDMA_STATIC_TR_XY_ACC32;
933 			if (uc->config.enable_burst)
934 				val |= PDMA_STATIC_TR_XY_BURST;
935 
936 			udma_rchanrt_write(uc,
937 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
938 					   val);
939 
940 			udma_rchanrt_write(uc,
941 				UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
942 				PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
943 						 match_data->statictr_z_mask));
944 
945 			/* save the current staticTR configuration */
946 			memcpy(&uc->static_tr, &uc->desc->static_tr,
947 			       sizeof(uc->static_tr));
948 		}
949 
950 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
951 				   UDMA_CHAN_RT_CTL_EN);
952 
953 		/* Enable remote */
954 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
955 				   UDMA_PEER_RT_EN_ENABLE);
956 
957 		break;
958 	case DMA_MEM_TO_DEV:
959 		/* Config remote TR */
960 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
961 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
962 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
963 
964 			if (uc->config.enable_acc32)
965 				val |= PDMA_STATIC_TR_XY_ACC32;
966 			if (uc->config.enable_burst)
967 				val |= PDMA_STATIC_TR_XY_BURST;
968 
969 			udma_tchanrt_write(uc,
970 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
971 					   val);
972 
973 			/* save the current staticTR configuration */
974 			memcpy(&uc->static_tr, &uc->desc->static_tr,
975 			       sizeof(uc->static_tr));
976 		}
977 
978 		/* Enable remote */
979 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
980 				   UDMA_PEER_RT_EN_ENABLE);
981 
982 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
983 				   UDMA_CHAN_RT_CTL_EN);
984 
985 		break;
986 	case DMA_MEM_TO_MEM:
987 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
988 				   UDMA_CHAN_RT_CTL_EN);
989 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
990 				   UDMA_CHAN_RT_CTL_EN);
991 
992 		break;
993 	default:
994 		return -EINVAL;
995 	}
996 
997 	uc->state = UDMA_CHAN_IS_ACTIVE;
998 out:
999 
1000 	return 0;
1001 }
1002 
1003 static int udma_stop(struct udma_chan *uc)
1004 {
1005 	enum udma_chan_state old_state = uc->state;
1006 
1007 	uc->state = UDMA_CHAN_IS_TERMINATING;
1008 	reinit_completion(&uc->teardown_completed);
1009 
1010 	switch (uc->config.dir) {
1011 	case DMA_DEV_TO_MEM:
1012 		if (!uc->cyclic && !uc->desc)
1013 			udma_push_to_ring(uc, -1);
1014 
1015 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1016 				   UDMA_PEER_RT_EN_ENABLE |
1017 				   UDMA_PEER_RT_EN_TEARDOWN);
1018 		break;
1019 	case DMA_MEM_TO_DEV:
1020 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1021 				   UDMA_PEER_RT_EN_ENABLE |
1022 				   UDMA_PEER_RT_EN_FLUSH);
1023 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1024 				   UDMA_CHAN_RT_CTL_EN |
1025 				   UDMA_CHAN_RT_CTL_TDOWN);
1026 		break;
1027 	case DMA_MEM_TO_MEM:
1028 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1029 				   UDMA_CHAN_RT_CTL_EN |
1030 				   UDMA_CHAN_RT_CTL_TDOWN);
1031 		break;
1032 	default:
1033 		uc->state = old_state;
1034 		complete_all(&uc->teardown_completed);
1035 		return -EINVAL;
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1042 {
1043 	struct udma_desc *d = uc->desc;
1044 	struct cppi5_host_desc_t *h_desc;
1045 
1046 	h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1047 	cppi5_hdesc_reset_to_original(h_desc);
1048 	udma_push_to_ring(uc, d->desc_idx);
1049 	d->desc_idx = (d->desc_idx + 1) % d->sglen;
1050 }
1051 
1052 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1053 {
1054 	struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1055 
1056 	memcpy(d->metadata, h_desc->epib, d->metadata_size);
1057 }
1058 
1059 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1060 {
1061 	u32 peer_bcnt, bcnt;
1062 
1063 	/*
1064 	 * Only TX towards PDMA is affected.
1065 	 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1066 	 * completion calculation, consumer must ensure that there is no stale
1067 	 * data in DMA fabric in this case.
1068 	 */
1069 	if (uc->config.ep_type == PSIL_EP_NATIVE ||
1070 	    uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
1071 		return true;
1072 
1073 	peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1074 	bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1075 
1076 	/* Transfer is incomplete, store current residue and time stamp */
1077 	if (peer_bcnt < bcnt) {
1078 		uc->tx_drain.residue = bcnt - peer_bcnt;
1079 		uc->tx_drain.tstamp = ktime_get();
1080 		return false;
1081 	}
1082 
1083 	return true;
1084 }
1085 
1086 static void udma_check_tx_completion(struct work_struct *work)
1087 {
1088 	struct udma_chan *uc = container_of(work, typeof(*uc),
1089 					    tx_drain.work.work);
1090 	bool desc_done = true;
1091 	u32 residue_diff;
1092 	ktime_t time_diff;
1093 	unsigned long delay;
1094 
1095 	while (1) {
1096 		if (uc->desc) {
1097 			/* Get previous residue and time stamp */
1098 			residue_diff = uc->tx_drain.residue;
1099 			time_diff = uc->tx_drain.tstamp;
1100 			/*
1101 			 * Get current residue and time stamp or see if
1102 			 * transfer is complete
1103 			 */
1104 			desc_done = udma_is_desc_really_done(uc, uc->desc);
1105 		}
1106 
1107 		if (!desc_done) {
1108 			/*
1109 			 * Find the time delta and residue delta w.r.t
1110 			 * previous poll
1111 			 */
1112 			time_diff = ktime_sub(uc->tx_drain.tstamp,
1113 					      time_diff) + 1;
1114 			residue_diff -= uc->tx_drain.residue;
1115 			if (residue_diff) {
1116 				/*
1117 				 * Try to guess when we should check
1118 				 * next time by calculating rate at
1119 				 * which data is being drained at the
1120 				 * peer device
1121 				 */
1122 				delay = (time_diff / residue_diff) *
1123 					uc->tx_drain.residue;
1124 			} else {
1125 				/* No progress, check again in 1 second  */
1126 				schedule_delayed_work(&uc->tx_drain.work, HZ);
1127 				break;
1128 			}
1129 
1130 			usleep_range(ktime_to_us(delay),
1131 				     ktime_to_us(delay) + 10);
1132 			continue;
1133 		}
1134 
1135 		if (uc->desc) {
1136 			struct udma_desc *d = uc->desc;
1137 
1138 			udma_decrement_byte_counters(uc, d->residue);
1139 			udma_start(uc);
1140 			vchan_cookie_complete(&d->vd);
1141 			break;
1142 		}
1143 
1144 		break;
1145 	}
1146 }
1147 
1148 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1149 {
1150 	struct udma_chan *uc = data;
1151 	struct udma_desc *d;
1152 	dma_addr_t paddr = 0;
1153 
1154 	if (udma_pop_from_ring(uc, &paddr) || !paddr)
1155 		return IRQ_HANDLED;
1156 
1157 	spin_lock(&uc->vc.lock);
1158 
1159 	/* Teardown completion message */
1160 	if (cppi5_desc_is_tdcm(paddr)) {
1161 		complete_all(&uc->teardown_completed);
1162 
1163 		if (uc->terminated_desc) {
1164 			udma_desc_free(&uc->terminated_desc->vd);
1165 			uc->terminated_desc = NULL;
1166 		}
1167 
1168 		if (!uc->desc)
1169 			udma_start(uc);
1170 
1171 		goto out;
1172 	}
1173 
1174 	d = udma_udma_desc_from_paddr(uc, paddr);
1175 
1176 	if (d) {
1177 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1178 								   d->desc_idx);
1179 		if (desc_paddr != paddr) {
1180 			dev_err(uc->ud->dev, "not matching descriptors!\n");
1181 			goto out;
1182 		}
1183 
1184 		if (d == uc->desc) {
1185 			/* active descriptor */
1186 			if (uc->cyclic) {
1187 				udma_cyclic_packet_elapsed(uc);
1188 				vchan_cyclic_callback(&d->vd);
1189 			} else {
1190 				if (udma_is_desc_really_done(uc, d)) {
1191 					udma_decrement_byte_counters(uc, d->residue);
1192 					udma_start(uc);
1193 					vchan_cookie_complete(&d->vd);
1194 				} else {
1195 					schedule_delayed_work(&uc->tx_drain.work,
1196 							      0);
1197 				}
1198 			}
1199 		} else {
1200 			/*
1201 			 * terminated descriptor, mark the descriptor as
1202 			 * completed to update the channel's cookie marker
1203 			 */
1204 			dma_cookie_complete(&d->vd.tx);
1205 		}
1206 	}
1207 out:
1208 	spin_unlock(&uc->vc.lock);
1209 
1210 	return IRQ_HANDLED;
1211 }
1212 
1213 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1214 {
1215 	struct udma_chan *uc = data;
1216 	struct udma_desc *d;
1217 
1218 	spin_lock(&uc->vc.lock);
1219 	d = uc->desc;
1220 	if (d) {
1221 		d->tr_idx = (d->tr_idx + 1) % d->sglen;
1222 
1223 		if (uc->cyclic) {
1224 			vchan_cyclic_callback(&d->vd);
1225 		} else {
1226 			/* TODO: figure out the real amount of data */
1227 			udma_decrement_byte_counters(uc, d->residue);
1228 			udma_start(uc);
1229 			vchan_cookie_complete(&d->vd);
1230 		}
1231 	}
1232 
1233 	spin_unlock(&uc->vc.lock);
1234 
1235 	return IRQ_HANDLED;
1236 }
1237 
1238 /**
1239  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1240  * @ud: UDMA device
1241  * @from: Start the search from this flow id number
1242  * @cnt: Number of consecutive flow ids to allocate
1243  *
1244  * Allocate range of RX flow ids for future use, those flows can be requested
1245  * only using explicit flow id number. if @from is set to -1 it will try to find
1246  * first free range. if @from is positive value it will force allocation only
1247  * of the specified range of flows.
1248  *
1249  * Returns -ENOMEM if can't find free range.
1250  * -EEXIST if requested range is busy.
1251  * -EINVAL if wrong input values passed.
1252  * Returns flow id on success.
1253  */
1254 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1255 {
1256 	int start, tmp_from;
1257 	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1258 
1259 	tmp_from = from;
1260 	if (tmp_from < 0)
1261 		tmp_from = ud->rchan_cnt;
1262 	/* default flows can't be allocated and accessible only by id */
1263 	if (tmp_from < ud->rchan_cnt)
1264 		return -EINVAL;
1265 
1266 	if (tmp_from + cnt > ud->rflow_cnt)
1267 		return -EINVAL;
1268 
1269 	bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1270 		  ud->rflow_cnt);
1271 
1272 	start = bitmap_find_next_zero_area(tmp,
1273 					   ud->rflow_cnt,
1274 					   tmp_from, cnt, 0);
1275 	if (start >= ud->rflow_cnt)
1276 		return -ENOMEM;
1277 
1278 	if (from >= 0 && start != from)
1279 		return -EEXIST;
1280 
1281 	bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1282 	return start;
1283 }
1284 
1285 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1286 {
1287 	if (from < ud->rchan_cnt)
1288 		return -EINVAL;
1289 	if (from + cnt > ud->rflow_cnt)
1290 		return -EINVAL;
1291 
1292 	bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1293 	return 0;
1294 }
1295 
1296 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1297 {
1298 	/*
1299 	 * Attempt to request rflow by ID can be made for any rflow
1300 	 * if not in use with assumption that caller knows what's doing.
1301 	 * TI-SCI FW will perform additional permission check ant way, it's
1302 	 * safe
1303 	 */
1304 
1305 	if (id < 0 || id >= ud->rflow_cnt)
1306 		return ERR_PTR(-ENOENT);
1307 
1308 	if (test_bit(id, ud->rflow_in_use))
1309 		return ERR_PTR(-ENOENT);
1310 
1311 	if (ud->rflow_gp_map) {
1312 		/* GP rflow has to be allocated first */
1313 		if (!test_bit(id, ud->rflow_gp_map) &&
1314 		    !test_bit(id, ud->rflow_gp_map_allocated))
1315 			return ERR_PTR(-EINVAL);
1316 	}
1317 
1318 	dev_dbg(ud->dev, "get rflow%d\n", id);
1319 	set_bit(id, ud->rflow_in_use);
1320 	return &ud->rflows[id];
1321 }
1322 
1323 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1324 {
1325 	if (!test_bit(rflow->id, ud->rflow_in_use)) {
1326 		dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1327 		return;
1328 	}
1329 
1330 	dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1331 	clear_bit(rflow->id, ud->rflow_in_use);
1332 }
1333 
1334 #define UDMA_RESERVE_RESOURCE(res)					\
1335 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,	\
1336 					       enum udma_tp_level tpl,	\
1337 					       int id)			\
1338 {									\
1339 	if (id >= 0) {							\
1340 		if (test_bit(id, ud->res##_map)) {			\
1341 			dev_err(ud->dev, "res##%d is in use\n", id);	\
1342 			return ERR_PTR(-ENOENT);			\
1343 		}							\
1344 	} else {							\
1345 		int start;						\
1346 									\
1347 		if (tpl >= ud->res##_tpl.levels)			\
1348 			tpl = ud->res##_tpl.levels - 1;			\
1349 									\
1350 		start = ud->res##_tpl.start_idx[tpl];			\
1351 									\
1352 		id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,	\
1353 					start);				\
1354 		if (id == ud->res##_cnt) {				\
1355 			return ERR_PTR(-ENOENT);			\
1356 		}							\
1357 	}								\
1358 									\
1359 	set_bit(id, ud->res##_map);					\
1360 	return &ud->res##s[id];						\
1361 }
1362 
1363 UDMA_RESERVE_RESOURCE(bchan);
1364 UDMA_RESERVE_RESOURCE(tchan);
1365 UDMA_RESERVE_RESOURCE(rchan);
1366 
1367 static int bcdma_get_bchan(struct udma_chan *uc)
1368 {
1369 	struct udma_dev *ud = uc->ud;
1370 	enum udma_tp_level tpl;
1371 	int ret;
1372 
1373 	if (uc->bchan) {
1374 		dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1375 			uc->id, uc->bchan->id);
1376 		return 0;
1377 	}
1378 
1379 	/*
1380 	 * Use normal channels for peripherals, and highest TPL channel for
1381 	 * mem2mem
1382 	 */
1383 	if (uc->config.tr_trigger_type)
1384 		tpl = 0;
1385 	else
1386 		tpl = ud->bchan_tpl.levels - 1;
1387 
1388 	uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1389 	if (IS_ERR(uc->bchan)) {
1390 		ret = PTR_ERR(uc->bchan);
1391 		uc->bchan = NULL;
1392 		return ret;
1393 	}
1394 
1395 	uc->tchan = uc->bchan;
1396 
1397 	return 0;
1398 }
1399 
1400 static int udma_get_tchan(struct udma_chan *uc)
1401 {
1402 	struct udma_dev *ud = uc->ud;
1403 	int ret;
1404 
1405 	if (uc->tchan) {
1406 		dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1407 			uc->id, uc->tchan->id);
1408 		return 0;
1409 	}
1410 
1411 	/*
1412 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1413 	 * For PKTDMA mapped channels it is configured to a channel which must
1414 	 * be used to service the peripheral.
1415 	 */
1416 	uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1417 					 uc->config.mapped_channel_id);
1418 	if (IS_ERR(uc->tchan)) {
1419 		ret = PTR_ERR(uc->tchan);
1420 		uc->tchan = NULL;
1421 		return ret;
1422 	}
1423 
1424 	if (ud->tflow_cnt) {
1425 		int tflow_id;
1426 
1427 		/* Only PKTDMA have support for tx flows */
1428 		if (uc->config.default_flow_id >= 0)
1429 			tflow_id = uc->config.default_flow_id;
1430 		else
1431 			tflow_id = uc->tchan->id;
1432 
1433 		if (test_bit(tflow_id, ud->tflow_map)) {
1434 			dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1435 			clear_bit(uc->tchan->id, ud->tchan_map);
1436 			uc->tchan = NULL;
1437 			return -ENOENT;
1438 		}
1439 
1440 		uc->tchan->tflow_id = tflow_id;
1441 		set_bit(tflow_id, ud->tflow_map);
1442 	} else {
1443 		uc->tchan->tflow_id = -1;
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 static int udma_get_rchan(struct udma_chan *uc)
1450 {
1451 	struct udma_dev *ud = uc->ud;
1452 	int ret;
1453 
1454 	if (uc->rchan) {
1455 		dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1456 			uc->id, uc->rchan->id);
1457 		return 0;
1458 	}
1459 
1460 	/*
1461 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1462 	 * For PKTDMA mapped channels it is configured to a channel which must
1463 	 * be used to service the peripheral.
1464 	 */
1465 	uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1466 					 uc->config.mapped_channel_id);
1467 	if (IS_ERR(uc->rchan)) {
1468 		ret = PTR_ERR(uc->rchan);
1469 		uc->rchan = NULL;
1470 		return ret;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int udma_get_chan_pair(struct udma_chan *uc)
1477 {
1478 	struct udma_dev *ud = uc->ud;
1479 	int chan_id, end;
1480 
1481 	if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1482 		dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1483 			 uc->id, uc->tchan->id);
1484 		return 0;
1485 	}
1486 
1487 	if (uc->tchan) {
1488 		dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1489 			uc->id, uc->tchan->id);
1490 		return -EBUSY;
1491 	} else if (uc->rchan) {
1492 		dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1493 			uc->id, uc->rchan->id);
1494 		return -EBUSY;
1495 	}
1496 
1497 	/* Can be optimized, but let's have it like this for now */
1498 	end = min(ud->tchan_cnt, ud->rchan_cnt);
1499 	/*
1500 	 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1501 	 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1502 	 */
1503 	chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1504 	for (; chan_id < end; chan_id++) {
1505 		if (!test_bit(chan_id, ud->tchan_map) &&
1506 		    !test_bit(chan_id, ud->rchan_map))
1507 			break;
1508 	}
1509 
1510 	if (chan_id == end)
1511 		return -ENOENT;
1512 
1513 	set_bit(chan_id, ud->tchan_map);
1514 	set_bit(chan_id, ud->rchan_map);
1515 	uc->tchan = &ud->tchans[chan_id];
1516 	uc->rchan = &ud->rchans[chan_id];
1517 
1518 	/* UDMA does not use tx flows */
1519 	uc->tchan->tflow_id = -1;
1520 
1521 	return 0;
1522 }
1523 
1524 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1525 {
1526 	struct udma_dev *ud = uc->ud;
1527 	int ret;
1528 
1529 	if (!uc->rchan) {
1530 		dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1531 		return -EINVAL;
1532 	}
1533 
1534 	if (uc->rflow) {
1535 		dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1536 			uc->id, uc->rflow->id);
1537 		return 0;
1538 	}
1539 
1540 	uc->rflow = __udma_get_rflow(ud, flow_id);
1541 	if (IS_ERR(uc->rflow)) {
1542 		ret = PTR_ERR(uc->rflow);
1543 		uc->rflow = NULL;
1544 		return ret;
1545 	}
1546 
1547 	return 0;
1548 }
1549 
1550 static void bcdma_put_bchan(struct udma_chan *uc)
1551 {
1552 	struct udma_dev *ud = uc->ud;
1553 
1554 	if (uc->bchan) {
1555 		dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1556 			uc->bchan->id);
1557 		clear_bit(uc->bchan->id, ud->bchan_map);
1558 		uc->bchan = NULL;
1559 		uc->tchan = NULL;
1560 	}
1561 }
1562 
1563 static void udma_put_rchan(struct udma_chan *uc)
1564 {
1565 	struct udma_dev *ud = uc->ud;
1566 
1567 	if (uc->rchan) {
1568 		dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1569 			uc->rchan->id);
1570 		clear_bit(uc->rchan->id, ud->rchan_map);
1571 		uc->rchan = NULL;
1572 	}
1573 }
1574 
1575 static void udma_put_tchan(struct udma_chan *uc)
1576 {
1577 	struct udma_dev *ud = uc->ud;
1578 
1579 	if (uc->tchan) {
1580 		dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1581 			uc->tchan->id);
1582 		clear_bit(uc->tchan->id, ud->tchan_map);
1583 
1584 		if (uc->tchan->tflow_id >= 0)
1585 			clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1586 
1587 		uc->tchan = NULL;
1588 	}
1589 }
1590 
1591 static void udma_put_rflow(struct udma_chan *uc)
1592 {
1593 	struct udma_dev *ud = uc->ud;
1594 
1595 	if (uc->rflow) {
1596 		dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1597 			uc->rflow->id);
1598 		__udma_put_rflow(ud, uc->rflow);
1599 		uc->rflow = NULL;
1600 	}
1601 }
1602 
1603 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1604 {
1605 	if (!uc->bchan)
1606 		return;
1607 
1608 	k3_ringacc_ring_free(uc->bchan->tc_ring);
1609 	k3_ringacc_ring_free(uc->bchan->t_ring);
1610 	uc->bchan->tc_ring = NULL;
1611 	uc->bchan->t_ring = NULL;
1612 	k3_configure_chan_coherency(&uc->vc.chan, 0);
1613 
1614 	bcdma_put_bchan(uc);
1615 }
1616 
1617 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1618 {
1619 	struct k3_ring_cfg ring_cfg;
1620 	struct udma_dev *ud = uc->ud;
1621 	int ret;
1622 
1623 	ret = bcdma_get_bchan(uc);
1624 	if (ret)
1625 		return ret;
1626 
1627 	ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1628 					    &uc->bchan->t_ring,
1629 					    &uc->bchan->tc_ring);
1630 	if (ret) {
1631 		ret = -EBUSY;
1632 		goto err_ring;
1633 	}
1634 
1635 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1636 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1637 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1638 	ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1639 
1640 	k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1641 	ring_cfg.asel = ud->asel;
1642 	ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1643 
1644 	ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1645 	if (ret)
1646 		goto err_ringcfg;
1647 
1648 	return 0;
1649 
1650 err_ringcfg:
1651 	k3_ringacc_ring_free(uc->bchan->tc_ring);
1652 	uc->bchan->tc_ring = NULL;
1653 	k3_ringacc_ring_free(uc->bchan->t_ring);
1654 	uc->bchan->t_ring = NULL;
1655 	k3_configure_chan_coherency(&uc->vc.chan, 0);
1656 err_ring:
1657 	bcdma_put_bchan(uc);
1658 
1659 	return ret;
1660 }
1661 
1662 static void udma_free_tx_resources(struct udma_chan *uc)
1663 {
1664 	if (!uc->tchan)
1665 		return;
1666 
1667 	k3_ringacc_ring_free(uc->tchan->t_ring);
1668 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1669 	uc->tchan->t_ring = NULL;
1670 	uc->tchan->tc_ring = NULL;
1671 
1672 	udma_put_tchan(uc);
1673 }
1674 
1675 static int udma_alloc_tx_resources(struct udma_chan *uc)
1676 {
1677 	struct k3_ring_cfg ring_cfg;
1678 	struct udma_dev *ud = uc->ud;
1679 	struct udma_tchan *tchan;
1680 	int ring_idx, ret;
1681 
1682 	ret = udma_get_tchan(uc);
1683 	if (ret)
1684 		return ret;
1685 
1686 	tchan = uc->tchan;
1687 	if (tchan->tflow_id >= 0)
1688 		ring_idx = tchan->tflow_id;
1689 	else
1690 		ring_idx = ud->bchan_cnt + tchan->id;
1691 
1692 	ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1693 					    &tchan->t_ring,
1694 					    &tchan->tc_ring);
1695 	if (ret) {
1696 		ret = -EBUSY;
1697 		goto err_ring;
1698 	}
1699 
1700 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1701 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1702 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1703 	if (ud->match_data->type == DMA_TYPE_UDMA) {
1704 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1705 	} else {
1706 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1707 
1708 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1709 		ring_cfg.asel = uc->config.asel;
1710 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1711 	}
1712 
1713 	ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1714 	ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1715 
1716 	if (ret)
1717 		goto err_ringcfg;
1718 
1719 	return 0;
1720 
1721 err_ringcfg:
1722 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1723 	uc->tchan->tc_ring = NULL;
1724 	k3_ringacc_ring_free(uc->tchan->t_ring);
1725 	uc->tchan->t_ring = NULL;
1726 err_ring:
1727 	udma_put_tchan(uc);
1728 
1729 	return ret;
1730 }
1731 
1732 static void udma_free_rx_resources(struct udma_chan *uc)
1733 {
1734 	if (!uc->rchan)
1735 		return;
1736 
1737 	if (uc->rflow) {
1738 		struct udma_rflow *rflow = uc->rflow;
1739 
1740 		k3_ringacc_ring_free(rflow->fd_ring);
1741 		k3_ringacc_ring_free(rflow->r_ring);
1742 		rflow->fd_ring = NULL;
1743 		rflow->r_ring = NULL;
1744 
1745 		udma_put_rflow(uc);
1746 	}
1747 
1748 	udma_put_rchan(uc);
1749 }
1750 
1751 static int udma_alloc_rx_resources(struct udma_chan *uc)
1752 {
1753 	struct udma_dev *ud = uc->ud;
1754 	struct k3_ring_cfg ring_cfg;
1755 	struct udma_rflow *rflow;
1756 	int fd_ring_id;
1757 	int ret;
1758 
1759 	ret = udma_get_rchan(uc);
1760 	if (ret)
1761 		return ret;
1762 
1763 	/* For MEM_TO_MEM we don't need rflow or rings */
1764 	if (uc->config.dir == DMA_MEM_TO_MEM)
1765 		return 0;
1766 
1767 	if (uc->config.default_flow_id >= 0)
1768 		ret = udma_get_rflow(uc, uc->config.default_flow_id);
1769 	else
1770 		ret = udma_get_rflow(uc, uc->rchan->id);
1771 
1772 	if (ret) {
1773 		ret = -EBUSY;
1774 		goto err_rflow;
1775 	}
1776 
1777 	rflow = uc->rflow;
1778 	if (ud->tflow_cnt)
1779 		fd_ring_id = ud->tflow_cnt + rflow->id;
1780 	else
1781 		fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1782 			     uc->rchan->id;
1783 
1784 	ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1785 					    &rflow->fd_ring, &rflow->r_ring);
1786 	if (ret) {
1787 		ret = -EBUSY;
1788 		goto err_ring;
1789 	}
1790 
1791 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1792 
1793 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1794 	if (ud->match_data->type == DMA_TYPE_UDMA) {
1795 		if (uc->config.pkt_mode)
1796 			ring_cfg.size = SG_MAX_SEGMENTS;
1797 		else
1798 			ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1799 
1800 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1801 	} else {
1802 		ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1803 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1804 
1805 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1806 		ring_cfg.asel = uc->config.asel;
1807 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1808 	}
1809 
1810 	ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1811 
1812 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1813 	ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1814 
1815 	if (ret)
1816 		goto err_ringcfg;
1817 
1818 	return 0;
1819 
1820 err_ringcfg:
1821 	k3_ringacc_ring_free(rflow->r_ring);
1822 	rflow->r_ring = NULL;
1823 	k3_ringacc_ring_free(rflow->fd_ring);
1824 	rflow->fd_ring = NULL;
1825 err_ring:
1826 	udma_put_rflow(uc);
1827 err_rflow:
1828 	udma_put_rchan(uc);
1829 
1830 	return ret;
1831 }
1832 
1833 #define TISCI_BCDMA_BCHAN_VALID_PARAMS (			\
1834 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1835 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1836 
1837 #define TISCI_BCDMA_TCHAN_VALID_PARAMS (			\
1838 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1839 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1840 
1841 #define TISCI_BCDMA_RCHAN_VALID_PARAMS (			\
1842 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1843 
1844 #define TISCI_UDMA_TCHAN_VALID_PARAMS (				\
1845 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1846 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |	\
1847 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |	\
1848 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1849 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |	\
1850 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1851 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1852 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1853 
1854 #define TISCI_UDMA_RCHAN_VALID_PARAMS (				\
1855 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1856 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1857 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1858 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1859 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |	\
1860 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |	\
1861 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |	\
1862 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |	\
1863 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1864 
1865 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1866 {
1867 	struct udma_dev *ud = uc->ud;
1868 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1869 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1870 	struct udma_tchan *tchan = uc->tchan;
1871 	struct udma_rchan *rchan = uc->rchan;
1872 	u8 burst_size = 0;
1873 	int ret;
1874 	u8 tpl;
1875 
1876 	/* Non synchronized - mem to mem type of transfer */
1877 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1878 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1879 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1880 
1881 	if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1882 		tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1883 
1884 		burst_size = ud->match_data->burst_size[tpl];
1885 	}
1886 
1887 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1888 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1889 	req_tx.index = tchan->id;
1890 	req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1891 	req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1892 	req_tx.txcq_qnum = tc_ring;
1893 	req_tx.tx_atype = ud->atype;
1894 	if (burst_size) {
1895 		req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1896 		req_tx.tx_burst_size = burst_size;
1897 	}
1898 
1899 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1900 	if (ret) {
1901 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1902 		return ret;
1903 	}
1904 
1905 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1906 	req_rx.nav_id = tisci_rm->tisci_dev_id;
1907 	req_rx.index = rchan->id;
1908 	req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1909 	req_rx.rxcq_qnum = tc_ring;
1910 	req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1911 	req_rx.rx_atype = ud->atype;
1912 	if (burst_size) {
1913 		req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1914 		req_rx.rx_burst_size = burst_size;
1915 	}
1916 
1917 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1918 	if (ret)
1919 		dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1920 
1921 	return ret;
1922 }
1923 
1924 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1925 {
1926 	struct udma_dev *ud = uc->ud;
1927 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1928 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1929 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1930 	struct udma_bchan *bchan = uc->bchan;
1931 	u8 burst_size = 0;
1932 	int ret;
1933 	u8 tpl;
1934 
1935 	if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1936 		tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1937 
1938 		burst_size = ud->match_data->burst_size[tpl];
1939 	}
1940 
1941 	req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1942 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1943 	req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1944 	req_tx.index = bchan->id;
1945 	if (burst_size) {
1946 		req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1947 		req_tx.tx_burst_size = burst_size;
1948 	}
1949 
1950 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1951 	if (ret)
1952 		dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1953 
1954 	return ret;
1955 }
1956 
1957 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1958 {
1959 	struct udma_dev *ud = uc->ud;
1960 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1961 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1962 	struct udma_tchan *tchan = uc->tchan;
1963 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1964 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1965 	u32 mode, fetch_size;
1966 	int ret;
1967 
1968 	if (uc->config.pkt_mode) {
1969 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1970 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1971 						   uc->config.psd_size, 0);
1972 	} else {
1973 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1974 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
1975 	}
1976 
1977 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1978 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1979 	req_tx.index = tchan->id;
1980 	req_tx.tx_chan_type = mode;
1981 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1982 	req_tx.tx_fetch_size = fetch_size >> 2;
1983 	req_tx.txcq_qnum = tc_ring;
1984 	req_tx.tx_atype = uc->config.atype;
1985 	if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1986 	    ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1987 		/* wait for peer to complete the teardown for PDMAs */
1988 		req_tx.valid_params |=
1989 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1990 		req_tx.tx_tdtype = 1;
1991 	}
1992 
1993 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1994 	if (ret)
1995 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1996 
1997 	return ret;
1998 }
1999 
2000 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2001 {
2002 	struct udma_dev *ud = uc->ud;
2003 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2004 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2005 	struct udma_tchan *tchan = uc->tchan;
2006 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2007 	int ret;
2008 
2009 	req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2010 	req_tx.nav_id = tisci_rm->tisci_dev_id;
2011 	req_tx.index = tchan->id;
2012 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2013 	if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2014 		/* wait for peer to complete the teardown for PDMAs */
2015 		req_tx.valid_params |=
2016 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2017 		req_tx.tx_tdtype = 1;
2018 	}
2019 
2020 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2021 	if (ret)
2022 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2023 
2024 	return ret;
2025 }
2026 
2027 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2028 
2029 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2030 {
2031 	struct udma_dev *ud = uc->ud;
2032 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2033 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2034 	struct udma_rchan *rchan = uc->rchan;
2035 	int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2036 	int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2037 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2038 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2039 	u32 mode, fetch_size;
2040 	int ret;
2041 
2042 	if (uc->config.pkt_mode) {
2043 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2044 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2045 						   uc->config.psd_size, 0);
2046 	} else {
2047 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2048 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
2049 	}
2050 
2051 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2052 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2053 	req_rx.index = rchan->id;
2054 	req_rx.rx_fetch_size =  fetch_size >> 2;
2055 	req_rx.rxcq_qnum = rx_ring;
2056 	req_rx.rx_chan_type = mode;
2057 	req_rx.rx_atype = uc->config.atype;
2058 
2059 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2060 	if (ret) {
2061 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2062 		return ret;
2063 	}
2064 
2065 	flow_req.valid_params =
2066 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2067 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2068 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2069 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2070 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2071 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2072 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2073 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2074 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2075 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2076 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2077 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2078 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2079 
2080 	flow_req.nav_id = tisci_rm->tisci_dev_id;
2081 	flow_req.flow_index = rchan->id;
2082 
2083 	if (uc->config.needs_epib)
2084 		flow_req.rx_einfo_present = 1;
2085 	else
2086 		flow_req.rx_einfo_present = 0;
2087 	if (uc->config.psd_size)
2088 		flow_req.rx_psinfo_present = 1;
2089 	else
2090 		flow_req.rx_psinfo_present = 0;
2091 	flow_req.rx_error_handling = 1;
2092 	flow_req.rx_dest_qnum = rx_ring;
2093 	flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2094 	flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2095 	flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2096 	flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2097 	flow_req.rx_fdq0_sz0_qnum = fd_ring;
2098 	flow_req.rx_fdq1_qnum = fd_ring;
2099 	flow_req.rx_fdq2_qnum = fd_ring;
2100 	flow_req.rx_fdq3_qnum = fd_ring;
2101 
2102 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2103 
2104 	if (ret)
2105 		dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2106 
2107 	return 0;
2108 }
2109 
2110 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2111 {
2112 	struct udma_dev *ud = uc->ud;
2113 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2114 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2115 	struct udma_rchan *rchan = uc->rchan;
2116 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2117 	int ret;
2118 
2119 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2120 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2121 	req_rx.index = rchan->id;
2122 
2123 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2124 	if (ret)
2125 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2126 
2127 	return ret;
2128 }
2129 
2130 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2131 {
2132 	struct udma_dev *ud = uc->ud;
2133 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2134 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2135 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2136 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2137 	int ret;
2138 
2139 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2140 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2141 	req_rx.index = uc->rchan->id;
2142 
2143 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2144 	if (ret) {
2145 		dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2146 		return ret;
2147 	}
2148 
2149 	flow_req.valid_params =
2150 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2151 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2152 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2153 
2154 	flow_req.nav_id = tisci_rm->tisci_dev_id;
2155 	flow_req.flow_index = uc->rflow->id;
2156 
2157 	if (uc->config.needs_epib)
2158 		flow_req.rx_einfo_present = 1;
2159 	else
2160 		flow_req.rx_einfo_present = 0;
2161 	if (uc->config.psd_size)
2162 		flow_req.rx_psinfo_present = 1;
2163 	else
2164 		flow_req.rx_psinfo_present = 0;
2165 	flow_req.rx_error_handling = 1;
2166 
2167 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2168 
2169 	if (ret)
2170 		dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2171 			ret);
2172 
2173 	return ret;
2174 }
2175 
2176 static int udma_alloc_chan_resources(struct dma_chan *chan)
2177 {
2178 	struct udma_chan *uc = to_udma_chan(chan);
2179 	struct udma_dev *ud = to_udma_dev(chan->device);
2180 	const struct udma_soc_data *soc_data = ud->soc_data;
2181 	struct k3_ring *irq_ring;
2182 	u32 irq_udma_idx;
2183 	int ret;
2184 
2185 	uc->dma_dev = ud->dev;
2186 
2187 	if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2188 		uc->use_dma_pool = true;
2189 		/* in case of MEM_TO_MEM we have maximum of two TRs */
2190 		if (uc->config.dir == DMA_MEM_TO_MEM) {
2191 			uc->config.hdesc_size = cppi5_trdesc_calc_size(
2192 					sizeof(struct cppi5_tr_type15_t), 2);
2193 			uc->config.pkt_mode = false;
2194 		}
2195 	}
2196 
2197 	if (uc->use_dma_pool) {
2198 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2199 						 uc->config.hdesc_size,
2200 						 ud->desc_align,
2201 						 0);
2202 		if (!uc->hdesc_pool) {
2203 			dev_err(ud->ddev.dev,
2204 				"Descriptor pool allocation failed\n");
2205 			uc->use_dma_pool = false;
2206 			ret = -ENOMEM;
2207 			goto err_cleanup;
2208 		}
2209 	}
2210 
2211 	/*
2212 	 * Make sure that the completion is in a known state:
2213 	 * No teardown, the channel is idle
2214 	 */
2215 	reinit_completion(&uc->teardown_completed);
2216 	complete_all(&uc->teardown_completed);
2217 	uc->state = UDMA_CHAN_IS_IDLE;
2218 
2219 	switch (uc->config.dir) {
2220 	case DMA_MEM_TO_MEM:
2221 		/* Non synchronized - mem to mem type of transfer */
2222 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2223 			uc->id);
2224 
2225 		ret = udma_get_chan_pair(uc);
2226 		if (ret)
2227 			goto err_cleanup;
2228 
2229 		ret = udma_alloc_tx_resources(uc);
2230 		if (ret) {
2231 			udma_put_rchan(uc);
2232 			goto err_cleanup;
2233 		}
2234 
2235 		ret = udma_alloc_rx_resources(uc);
2236 		if (ret) {
2237 			udma_free_tx_resources(uc);
2238 			goto err_cleanup;
2239 		}
2240 
2241 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2242 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2243 					K3_PSIL_DST_THREAD_ID_OFFSET;
2244 
2245 		irq_ring = uc->tchan->tc_ring;
2246 		irq_udma_idx = uc->tchan->id;
2247 
2248 		ret = udma_tisci_m2m_channel_config(uc);
2249 		break;
2250 	case DMA_MEM_TO_DEV:
2251 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2252 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2253 			uc->id);
2254 
2255 		ret = udma_alloc_tx_resources(uc);
2256 		if (ret)
2257 			goto err_cleanup;
2258 
2259 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2260 		uc->config.dst_thread = uc->config.remote_thread_id;
2261 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2262 
2263 		irq_ring = uc->tchan->tc_ring;
2264 		irq_udma_idx = uc->tchan->id;
2265 
2266 		ret = udma_tisci_tx_channel_config(uc);
2267 		break;
2268 	case DMA_DEV_TO_MEM:
2269 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2270 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2271 			uc->id);
2272 
2273 		ret = udma_alloc_rx_resources(uc);
2274 		if (ret)
2275 			goto err_cleanup;
2276 
2277 		uc->config.src_thread = uc->config.remote_thread_id;
2278 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2279 					K3_PSIL_DST_THREAD_ID_OFFSET;
2280 
2281 		irq_ring = uc->rflow->r_ring;
2282 		irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2283 
2284 		ret = udma_tisci_rx_channel_config(uc);
2285 		break;
2286 	default:
2287 		/* Can not happen */
2288 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2289 			__func__, uc->id, uc->config.dir);
2290 		ret = -EINVAL;
2291 		goto err_cleanup;
2292 
2293 	}
2294 
2295 	/* check if the channel configuration was successful */
2296 	if (ret)
2297 		goto err_res_free;
2298 
2299 	if (udma_is_chan_running(uc)) {
2300 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2301 		udma_reset_chan(uc, false);
2302 		if (udma_is_chan_running(uc)) {
2303 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2304 			ret = -EBUSY;
2305 			goto err_res_free;
2306 		}
2307 	}
2308 
2309 	/* PSI-L pairing */
2310 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2311 	if (ret) {
2312 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2313 			uc->config.src_thread, uc->config.dst_thread);
2314 		goto err_res_free;
2315 	}
2316 
2317 	uc->psil_paired = true;
2318 
2319 	uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2320 	if (uc->irq_num_ring <= 0) {
2321 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2322 			k3_ringacc_get_ring_id(irq_ring));
2323 		ret = -EINVAL;
2324 		goto err_psi_free;
2325 	}
2326 
2327 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2328 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2329 	if (ret) {
2330 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2331 		goto err_irq_free;
2332 	}
2333 
2334 	/* Event from UDMA (TR events) only needed for slave TR mode channels */
2335 	if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2336 		uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2337 		if (uc->irq_num_udma <= 0) {
2338 			dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2339 				irq_udma_idx);
2340 			free_irq(uc->irq_num_ring, uc);
2341 			ret = -EINVAL;
2342 			goto err_irq_free;
2343 		}
2344 
2345 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2346 				  uc->name, uc);
2347 		if (ret) {
2348 			dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2349 				uc->id);
2350 			free_irq(uc->irq_num_ring, uc);
2351 			goto err_irq_free;
2352 		}
2353 	} else {
2354 		uc->irq_num_udma = 0;
2355 	}
2356 
2357 	udma_reset_rings(uc);
2358 
2359 	return 0;
2360 
2361 err_irq_free:
2362 	uc->irq_num_ring = 0;
2363 	uc->irq_num_udma = 0;
2364 err_psi_free:
2365 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2366 	uc->psil_paired = false;
2367 err_res_free:
2368 	udma_free_tx_resources(uc);
2369 	udma_free_rx_resources(uc);
2370 err_cleanup:
2371 	udma_reset_uchan(uc);
2372 
2373 	if (uc->use_dma_pool) {
2374 		dma_pool_destroy(uc->hdesc_pool);
2375 		uc->use_dma_pool = false;
2376 	}
2377 
2378 	return ret;
2379 }
2380 
2381 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2382 {
2383 	struct udma_chan *uc = to_udma_chan(chan);
2384 	struct udma_dev *ud = to_udma_dev(chan->device);
2385 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2386 	u32 irq_udma_idx, irq_ring_idx;
2387 	int ret;
2388 
2389 	/* Only TR mode is supported */
2390 	uc->config.pkt_mode = false;
2391 
2392 	/*
2393 	 * Make sure that the completion is in a known state:
2394 	 * No teardown, the channel is idle
2395 	 */
2396 	reinit_completion(&uc->teardown_completed);
2397 	complete_all(&uc->teardown_completed);
2398 	uc->state = UDMA_CHAN_IS_IDLE;
2399 
2400 	switch (uc->config.dir) {
2401 	case DMA_MEM_TO_MEM:
2402 		/* Non synchronized - mem to mem type of transfer */
2403 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2404 			uc->id);
2405 
2406 		ret = bcdma_alloc_bchan_resources(uc);
2407 		if (ret)
2408 			return ret;
2409 
2410 		irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2411 		irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2412 
2413 		ret = bcdma_tisci_m2m_channel_config(uc);
2414 		break;
2415 	case DMA_MEM_TO_DEV:
2416 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2417 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2418 			uc->id);
2419 
2420 		ret = udma_alloc_tx_resources(uc);
2421 		if (ret) {
2422 			uc->config.remote_thread_id = -1;
2423 			return ret;
2424 		}
2425 
2426 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2427 		uc->config.dst_thread = uc->config.remote_thread_id;
2428 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2429 
2430 		irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2431 		irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2432 
2433 		ret = bcdma_tisci_tx_channel_config(uc);
2434 		break;
2435 	case DMA_DEV_TO_MEM:
2436 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2437 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2438 			uc->id);
2439 
2440 		ret = udma_alloc_rx_resources(uc);
2441 		if (ret) {
2442 			uc->config.remote_thread_id = -1;
2443 			return ret;
2444 		}
2445 
2446 		uc->config.src_thread = uc->config.remote_thread_id;
2447 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2448 					K3_PSIL_DST_THREAD_ID_OFFSET;
2449 
2450 		irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2451 		irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2452 
2453 		ret = bcdma_tisci_rx_channel_config(uc);
2454 		break;
2455 	default:
2456 		/* Can not happen */
2457 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2458 			__func__, uc->id, uc->config.dir);
2459 		return -EINVAL;
2460 	}
2461 
2462 	/* check if the channel configuration was successful */
2463 	if (ret)
2464 		goto err_res_free;
2465 
2466 	if (udma_is_chan_running(uc)) {
2467 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2468 		udma_reset_chan(uc, false);
2469 		if (udma_is_chan_running(uc)) {
2470 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2471 			ret = -EBUSY;
2472 			goto err_res_free;
2473 		}
2474 	}
2475 
2476 	uc->dma_dev = dmaengine_get_dma_device(chan);
2477 	if (uc->config.dir == DMA_MEM_TO_MEM  && !uc->config.tr_trigger_type) {
2478 		uc->config.hdesc_size = cppi5_trdesc_calc_size(
2479 					sizeof(struct cppi5_tr_type15_t), 2);
2480 
2481 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2482 						 uc->config.hdesc_size,
2483 						 ud->desc_align,
2484 						 0);
2485 		if (!uc->hdesc_pool) {
2486 			dev_err(ud->ddev.dev,
2487 				"Descriptor pool allocation failed\n");
2488 			uc->use_dma_pool = false;
2489 			ret = -ENOMEM;
2490 			goto err_res_free;
2491 		}
2492 
2493 		uc->use_dma_pool = true;
2494 	} else if (uc->config.dir != DMA_MEM_TO_MEM) {
2495 		/* PSI-L pairing */
2496 		ret = navss_psil_pair(ud, uc->config.src_thread,
2497 				      uc->config.dst_thread);
2498 		if (ret) {
2499 			dev_err(ud->dev,
2500 				"PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2501 				uc->config.src_thread, uc->config.dst_thread);
2502 			goto err_res_free;
2503 		}
2504 
2505 		uc->psil_paired = true;
2506 	}
2507 
2508 	uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2509 	if (uc->irq_num_ring <= 0) {
2510 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2511 			irq_ring_idx);
2512 		ret = -EINVAL;
2513 		goto err_psi_free;
2514 	}
2515 
2516 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2517 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2518 	if (ret) {
2519 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2520 		goto err_irq_free;
2521 	}
2522 
2523 	/* Event from BCDMA (TR events) only needed for slave channels */
2524 	if (is_slave_direction(uc->config.dir)) {
2525 		uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2526 		if (uc->irq_num_udma <= 0) {
2527 			dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2528 				irq_udma_idx);
2529 			free_irq(uc->irq_num_ring, uc);
2530 			ret = -EINVAL;
2531 			goto err_irq_free;
2532 		}
2533 
2534 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2535 				  uc->name, uc);
2536 		if (ret) {
2537 			dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2538 				uc->id);
2539 			free_irq(uc->irq_num_ring, uc);
2540 			goto err_irq_free;
2541 		}
2542 	} else {
2543 		uc->irq_num_udma = 0;
2544 	}
2545 
2546 	udma_reset_rings(uc);
2547 
2548 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2549 				  udma_check_tx_completion);
2550 	return 0;
2551 
2552 err_irq_free:
2553 	uc->irq_num_ring = 0;
2554 	uc->irq_num_udma = 0;
2555 err_psi_free:
2556 	if (uc->psil_paired)
2557 		navss_psil_unpair(ud, uc->config.src_thread,
2558 				  uc->config.dst_thread);
2559 	uc->psil_paired = false;
2560 err_res_free:
2561 	bcdma_free_bchan_resources(uc);
2562 	udma_free_tx_resources(uc);
2563 	udma_free_rx_resources(uc);
2564 
2565 	udma_reset_uchan(uc);
2566 
2567 	if (uc->use_dma_pool) {
2568 		dma_pool_destroy(uc->hdesc_pool);
2569 		uc->use_dma_pool = false;
2570 	}
2571 
2572 	return ret;
2573 }
2574 
2575 static int bcdma_router_config(struct dma_chan *chan)
2576 {
2577 	struct k3_event_route_data *router_data = chan->route_data;
2578 	struct udma_chan *uc = to_udma_chan(chan);
2579 	u32 trigger_event;
2580 
2581 	if (!uc->bchan)
2582 		return -EINVAL;
2583 
2584 	if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2585 		return -EINVAL;
2586 
2587 	trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2588 	trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2589 
2590 	return router_data->set_event(router_data->priv, trigger_event);
2591 }
2592 
2593 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2594 {
2595 	struct udma_chan *uc = to_udma_chan(chan);
2596 	struct udma_dev *ud = to_udma_dev(chan->device);
2597 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2598 	u32 irq_ring_idx;
2599 	int ret;
2600 
2601 	/*
2602 	 * Make sure that the completion is in a known state:
2603 	 * No teardown, the channel is idle
2604 	 */
2605 	reinit_completion(&uc->teardown_completed);
2606 	complete_all(&uc->teardown_completed);
2607 	uc->state = UDMA_CHAN_IS_IDLE;
2608 
2609 	switch (uc->config.dir) {
2610 	case DMA_MEM_TO_DEV:
2611 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2612 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2613 			uc->id);
2614 
2615 		ret = udma_alloc_tx_resources(uc);
2616 		if (ret) {
2617 			uc->config.remote_thread_id = -1;
2618 			return ret;
2619 		}
2620 
2621 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2622 		uc->config.dst_thread = uc->config.remote_thread_id;
2623 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2624 
2625 		irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2626 
2627 		ret = pktdma_tisci_tx_channel_config(uc);
2628 		break;
2629 	case DMA_DEV_TO_MEM:
2630 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2631 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2632 			uc->id);
2633 
2634 		ret = udma_alloc_rx_resources(uc);
2635 		if (ret) {
2636 			uc->config.remote_thread_id = -1;
2637 			return ret;
2638 		}
2639 
2640 		uc->config.src_thread = uc->config.remote_thread_id;
2641 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2642 					K3_PSIL_DST_THREAD_ID_OFFSET;
2643 
2644 		irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2645 
2646 		ret = pktdma_tisci_rx_channel_config(uc);
2647 		break;
2648 	default:
2649 		/* Can not happen */
2650 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2651 			__func__, uc->id, uc->config.dir);
2652 		return -EINVAL;
2653 	}
2654 
2655 	/* check if the channel configuration was successful */
2656 	if (ret)
2657 		goto err_res_free;
2658 
2659 	if (udma_is_chan_running(uc)) {
2660 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2661 		udma_reset_chan(uc, false);
2662 		if (udma_is_chan_running(uc)) {
2663 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2664 			ret = -EBUSY;
2665 			goto err_res_free;
2666 		}
2667 	}
2668 
2669 	uc->dma_dev = dmaengine_get_dma_device(chan);
2670 	uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2671 					 uc->config.hdesc_size, ud->desc_align,
2672 					 0);
2673 	if (!uc->hdesc_pool) {
2674 		dev_err(ud->ddev.dev,
2675 			"Descriptor pool allocation failed\n");
2676 		uc->use_dma_pool = false;
2677 		ret = -ENOMEM;
2678 		goto err_res_free;
2679 	}
2680 
2681 	uc->use_dma_pool = true;
2682 
2683 	/* PSI-L pairing */
2684 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2685 	if (ret) {
2686 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2687 			uc->config.src_thread, uc->config.dst_thread);
2688 		goto err_res_free;
2689 	}
2690 
2691 	uc->psil_paired = true;
2692 
2693 	uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2694 	if (uc->irq_num_ring <= 0) {
2695 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2696 			irq_ring_idx);
2697 		ret = -EINVAL;
2698 		goto err_psi_free;
2699 	}
2700 
2701 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2702 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2703 	if (ret) {
2704 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2705 		goto err_irq_free;
2706 	}
2707 
2708 	uc->irq_num_udma = 0;
2709 
2710 	udma_reset_rings(uc);
2711 
2712 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2713 				  udma_check_tx_completion);
2714 
2715 	if (uc->tchan)
2716 		dev_dbg(ud->dev,
2717 			"chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2718 			uc->id, uc->tchan->id, uc->tchan->tflow_id,
2719 			uc->config.remote_thread_id);
2720 	else if (uc->rchan)
2721 		dev_dbg(ud->dev,
2722 			"chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2723 			uc->id, uc->rchan->id, uc->rflow->id,
2724 			uc->config.remote_thread_id);
2725 	return 0;
2726 
2727 err_irq_free:
2728 	uc->irq_num_ring = 0;
2729 err_psi_free:
2730 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2731 	uc->psil_paired = false;
2732 err_res_free:
2733 	udma_free_tx_resources(uc);
2734 	udma_free_rx_resources(uc);
2735 
2736 	udma_reset_uchan(uc);
2737 
2738 	dma_pool_destroy(uc->hdesc_pool);
2739 	uc->use_dma_pool = false;
2740 
2741 	return ret;
2742 }
2743 
2744 static int udma_slave_config(struct dma_chan *chan,
2745 			     struct dma_slave_config *cfg)
2746 {
2747 	struct udma_chan *uc = to_udma_chan(chan);
2748 
2749 	memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2750 
2751 	return 0;
2752 }
2753 
2754 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2755 					    size_t tr_size, int tr_count,
2756 					    enum dma_transfer_direction dir)
2757 {
2758 	struct udma_hwdesc *hwdesc;
2759 	struct cppi5_desc_hdr_t *tr_desc;
2760 	struct udma_desc *d;
2761 	u32 reload_count = 0;
2762 	u32 ring_id;
2763 
2764 	switch (tr_size) {
2765 	case 16:
2766 	case 32:
2767 	case 64:
2768 	case 128:
2769 		break;
2770 	default:
2771 		dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2772 		return NULL;
2773 	}
2774 
2775 	/* We have only one descriptor containing multiple TRs */
2776 	d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2777 	if (!d)
2778 		return NULL;
2779 
2780 	d->sglen = tr_count;
2781 
2782 	d->hwdesc_count = 1;
2783 	hwdesc = &d->hwdesc[0];
2784 
2785 	/* Allocate memory for DMA ring descriptor */
2786 	if (uc->use_dma_pool) {
2787 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2788 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2789 						GFP_NOWAIT,
2790 						&hwdesc->cppi5_desc_paddr);
2791 	} else {
2792 		hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2793 								 tr_count);
2794 		hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2795 						uc->ud->desc_align);
2796 		hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2797 						hwdesc->cppi5_desc_size,
2798 						&hwdesc->cppi5_desc_paddr,
2799 						GFP_NOWAIT);
2800 	}
2801 
2802 	if (!hwdesc->cppi5_desc_vaddr) {
2803 		kfree(d);
2804 		return NULL;
2805 	}
2806 
2807 	/* Start of the TR req records */
2808 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2809 	/* Start address of the TR response array */
2810 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2811 
2812 	tr_desc = hwdesc->cppi5_desc_vaddr;
2813 
2814 	if (uc->cyclic)
2815 		reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2816 
2817 	if (dir == DMA_DEV_TO_MEM)
2818 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2819 	else
2820 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2821 
2822 	cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2823 	cppi5_desc_set_pktids(tr_desc, uc->id,
2824 			      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2825 	cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2826 
2827 	return d;
2828 }
2829 
2830 /**
2831  * udma_get_tr_counters - calculate TR counters for a given length
2832  * @len: Length of the trasnfer
2833  * @align_to: Preferred alignment
2834  * @tr0_cnt0: First TR icnt0
2835  * @tr0_cnt1: First TR icnt1
2836  * @tr1_cnt0: Second (if used) TR icnt0
2837  *
2838  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2839  * For len >= SZ_64K two TRs are used in a simple way:
2840  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2841  * Second TR: the remaining length (tr1_cnt0)
2842  *
2843  * Returns the number of TRs the length needs (1 or 2)
2844  * -EINVAL if the length can not be supported
2845  */
2846 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2847 				u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2848 {
2849 	if (len < SZ_64K) {
2850 		*tr0_cnt0 = len;
2851 		*tr0_cnt1 = 1;
2852 
2853 		return 1;
2854 	}
2855 
2856 	if (align_to > 3)
2857 		align_to = 3;
2858 
2859 realign:
2860 	*tr0_cnt0 = SZ_64K - BIT(align_to);
2861 	if (len / *tr0_cnt0 >= SZ_64K) {
2862 		if (align_to) {
2863 			align_to--;
2864 			goto realign;
2865 		}
2866 		return -EINVAL;
2867 	}
2868 
2869 	*tr0_cnt1 = len / *tr0_cnt0;
2870 	*tr1_cnt0 = len % *tr0_cnt0;
2871 
2872 	return 2;
2873 }
2874 
2875 static struct udma_desc *
2876 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2877 		      unsigned int sglen, enum dma_transfer_direction dir,
2878 		      unsigned long tx_flags, void *context)
2879 {
2880 	struct scatterlist *sgent;
2881 	struct udma_desc *d;
2882 	struct cppi5_tr_type1_t *tr_req = NULL;
2883 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2884 	unsigned int i;
2885 	size_t tr_size;
2886 	int num_tr = 0;
2887 	int tr_idx = 0;
2888 	u64 asel;
2889 
2890 	/* estimate the number of TRs we will need */
2891 	for_each_sg(sgl, sgent, sglen, i) {
2892 		if (sg_dma_len(sgent) < SZ_64K)
2893 			num_tr++;
2894 		else
2895 			num_tr += 2;
2896 	}
2897 
2898 	/* Now allocate and setup the descriptor. */
2899 	tr_size = sizeof(struct cppi5_tr_type1_t);
2900 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2901 	if (!d)
2902 		return NULL;
2903 
2904 	d->sglen = sglen;
2905 
2906 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2907 		asel = 0;
2908 	else
2909 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2910 
2911 	tr_req = d->hwdesc[0].tr_req_base;
2912 	for_each_sg(sgl, sgent, sglen, i) {
2913 		dma_addr_t sg_addr = sg_dma_address(sgent);
2914 
2915 		num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2916 					      &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2917 		if (num_tr < 0) {
2918 			dev_err(uc->ud->dev, "size %u is not supported\n",
2919 				sg_dma_len(sgent));
2920 			udma_free_hwdesc(uc, d);
2921 			kfree(d);
2922 			return NULL;
2923 		}
2924 
2925 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2926 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2927 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2928 
2929 		sg_addr |= asel;
2930 		tr_req[tr_idx].addr = sg_addr;
2931 		tr_req[tr_idx].icnt0 = tr0_cnt0;
2932 		tr_req[tr_idx].icnt1 = tr0_cnt1;
2933 		tr_req[tr_idx].dim1 = tr0_cnt0;
2934 		tr_idx++;
2935 
2936 		if (num_tr == 2) {
2937 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2938 				      false, false,
2939 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2940 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2941 					 CPPI5_TR_CSF_SUPR_EVT);
2942 
2943 			tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2944 			tr_req[tr_idx].icnt0 = tr1_cnt0;
2945 			tr_req[tr_idx].icnt1 = 1;
2946 			tr_req[tr_idx].dim1 = tr1_cnt0;
2947 			tr_idx++;
2948 		}
2949 
2950 		d->residue += sg_dma_len(sgent);
2951 	}
2952 
2953 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2954 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2955 
2956 	return d;
2957 }
2958 
2959 static struct udma_desc *
2960 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2961 				unsigned int sglen,
2962 				enum dma_transfer_direction dir,
2963 				unsigned long tx_flags, void *context)
2964 {
2965 	struct scatterlist *sgent;
2966 	struct cppi5_tr_type15_t *tr_req = NULL;
2967 	enum dma_slave_buswidth dev_width;
2968 	u32 csf = CPPI5_TR_CSF_SUPR_EVT;
2969 	u16 tr_cnt0, tr_cnt1;
2970 	dma_addr_t dev_addr;
2971 	struct udma_desc *d;
2972 	unsigned int i;
2973 	size_t tr_size, sg_len;
2974 	int num_tr = 0;
2975 	int tr_idx = 0;
2976 	u32 burst, trigger_size, port_window;
2977 	u64 asel;
2978 
2979 	if (dir == DMA_DEV_TO_MEM) {
2980 		dev_addr = uc->cfg.src_addr;
2981 		dev_width = uc->cfg.src_addr_width;
2982 		burst = uc->cfg.src_maxburst;
2983 		port_window = uc->cfg.src_port_window_size;
2984 	} else if (dir == DMA_MEM_TO_DEV) {
2985 		dev_addr = uc->cfg.dst_addr;
2986 		dev_width = uc->cfg.dst_addr_width;
2987 		burst = uc->cfg.dst_maxburst;
2988 		port_window = uc->cfg.dst_port_window_size;
2989 	} else {
2990 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2991 		return NULL;
2992 	}
2993 
2994 	if (!burst)
2995 		burst = 1;
2996 
2997 	if (port_window) {
2998 		if (port_window != burst) {
2999 			dev_err(uc->ud->dev,
3000 				"The burst must be equal to port_window\n");
3001 			return NULL;
3002 		}
3003 
3004 		tr_cnt0 = dev_width * port_window;
3005 		tr_cnt1 = 1;
3006 	} else {
3007 		tr_cnt0 = dev_width;
3008 		tr_cnt1 = burst;
3009 	}
3010 	trigger_size = tr_cnt0 * tr_cnt1;
3011 
3012 	/* estimate the number of TRs we will need */
3013 	for_each_sg(sgl, sgent, sglen, i) {
3014 		sg_len = sg_dma_len(sgent);
3015 
3016 		if (sg_len % trigger_size) {
3017 			dev_err(uc->ud->dev,
3018 				"Not aligned SG entry (%zu for %u)\n", sg_len,
3019 				trigger_size);
3020 			return NULL;
3021 		}
3022 
3023 		if (sg_len / trigger_size < SZ_64K)
3024 			num_tr++;
3025 		else
3026 			num_tr += 2;
3027 	}
3028 
3029 	/* Now allocate and setup the descriptor. */
3030 	tr_size = sizeof(struct cppi5_tr_type15_t);
3031 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3032 	if (!d)
3033 		return NULL;
3034 
3035 	d->sglen = sglen;
3036 
3037 	if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3038 		asel = 0;
3039 		csf |= CPPI5_TR_CSF_EOL_ICNT0;
3040 	} else {
3041 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3042 		dev_addr |= asel;
3043 	}
3044 
3045 	tr_req = d->hwdesc[0].tr_req_base;
3046 	for_each_sg(sgl, sgent, sglen, i) {
3047 		u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3048 		dma_addr_t sg_addr = sg_dma_address(sgent);
3049 
3050 		sg_len = sg_dma_len(sgent);
3051 		num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3052 					      &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3053 		if (num_tr < 0) {
3054 			dev_err(uc->ud->dev, "size %zu is not supported\n",
3055 				sg_len);
3056 			udma_free_hwdesc(uc, d);
3057 			kfree(d);
3058 			return NULL;
3059 		}
3060 
3061 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3062 			      true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3063 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
3064 		cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3065 				     uc->config.tr_trigger_type,
3066 				     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3067 
3068 		sg_addr |= asel;
3069 		if (dir == DMA_DEV_TO_MEM) {
3070 			tr_req[tr_idx].addr = dev_addr;
3071 			tr_req[tr_idx].icnt0 = tr_cnt0;
3072 			tr_req[tr_idx].icnt1 = tr_cnt1;
3073 			tr_req[tr_idx].icnt2 = tr0_cnt2;
3074 			tr_req[tr_idx].icnt3 = tr0_cnt3;
3075 			tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3076 
3077 			tr_req[tr_idx].daddr = sg_addr;
3078 			tr_req[tr_idx].dicnt0 = tr_cnt0;
3079 			tr_req[tr_idx].dicnt1 = tr_cnt1;
3080 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
3081 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
3082 			tr_req[tr_idx].ddim1 = tr_cnt0;
3083 			tr_req[tr_idx].ddim2 = trigger_size;
3084 			tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3085 		} else {
3086 			tr_req[tr_idx].addr = sg_addr;
3087 			tr_req[tr_idx].icnt0 = tr_cnt0;
3088 			tr_req[tr_idx].icnt1 = tr_cnt1;
3089 			tr_req[tr_idx].icnt2 = tr0_cnt2;
3090 			tr_req[tr_idx].icnt3 = tr0_cnt3;
3091 			tr_req[tr_idx].dim1 = tr_cnt0;
3092 			tr_req[tr_idx].dim2 = trigger_size;
3093 			tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3094 
3095 			tr_req[tr_idx].daddr = dev_addr;
3096 			tr_req[tr_idx].dicnt0 = tr_cnt0;
3097 			tr_req[tr_idx].dicnt1 = tr_cnt1;
3098 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
3099 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
3100 			tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3101 		}
3102 
3103 		tr_idx++;
3104 
3105 		if (num_tr == 2) {
3106 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3107 				      false, true,
3108 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3109 			cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
3110 			cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3111 					     uc->config.tr_trigger_type,
3112 					     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3113 					     0, 0);
3114 
3115 			sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3116 			if (dir == DMA_DEV_TO_MEM) {
3117 				tr_req[tr_idx].addr = dev_addr;
3118 				tr_req[tr_idx].icnt0 = tr_cnt0;
3119 				tr_req[tr_idx].icnt1 = tr_cnt1;
3120 				tr_req[tr_idx].icnt2 = tr1_cnt2;
3121 				tr_req[tr_idx].icnt3 = 1;
3122 				tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3123 
3124 				tr_req[tr_idx].daddr = sg_addr;
3125 				tr_req[tr_idx].dicnt0 = tr_cnt0;
3126 				tr_req[tr_idx].dicnt1 = tr_cnt1;
3127 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
3128 				tr_req[tr_idx].dicnt3 = 1;
3129 				tr_req[tr_idx].ddim1 = tr_cnt0;
3130 				tr_req[tr_idx].ddim2 = trigger_size;
3131 			} else {
3132 				tr_req[tr_idx].addr = sg_addr;
3133 				tr_req[tr_idx].icnt0 = tr_cnt0;
3134 				tr_req[tr_idx].icnt1 = tr_cnt1;
3135 				tr_req[tr_idx].icnt2 = tr1_cnt2;
3136 				tr_req[tr_idx].icnt3 = 1;
3137 				tr_req[tr_idx].dim1 = tr_cnt0;
3138 				tr_req[tr_idx].dim2 = trigger_size;
3139 
3140 				tr_req[tr_idx].daddr = dev_addr;
3141 				tr_req[tr_idx].dicnt0 = tr_cnt0;
3142 				tr_req[tr_idx].dicnt1 = tr_cnt1;
3143 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
3144 				tr_req[tr_idx].dicnt3 = 1;
3145 				tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3146 			}
3147 			tr_idx++;
3148 		}
3149 
3150 		d->residue += sg_len;
3151 	}
3152 
3153 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP);
3154 
3155 	return d;
3156 }
3157 
3158 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3159 				   enum dma_slave_buswidth dev_width,
3160 				   u16 elcnt)
3161 {
3162 	if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3163 		return 0;
3164 
3165 	/* Bus width translates to the element size (ES) */
3166 	switch (dev_width) {
3167 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
3168 		d->static_tr.elsize = 0;
3169 		break;
3170 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
3171 		d->static_tr.elsize = 1;
3172 		break;
3173 	case DMA_SLAVE_BUSWIDTH_3_BYTES:
3174 		d->static_tr.elsize = 2;
3175 		break;
3176 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
3177 		d->static_tr.elsize = 3;
3178 		break;
3179 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
3180 		d->static_tr.elsize = 4;
3181 		break;
3182 	default: /* not reached */
3183 		return -EINVAL;
3184 	}
3185 
3186 	d->static_tr.elcnt = elcnt;
3187 
3188 	/*
3189 	 * PDMA must to close the packet when the channel is in packet mode.
3190 	 * For TR mode when the channel is not cyclic we also need PDMA to close
3191 	 * the packet otherwise the transfer will stall because PDMA holds on
3192 	 * the data it has received from the peripheral.
3193 	 */
3194 	if (uc->config.pkt_mode || !uc->cyclic) {
3195 		unsigned int div = dev_width * elcnt;
3196 
3197 		if (uc->cyclic)
3198 			d->static_tr.bstcnt = d->residue / d->sglen / div;
3199 		else
3200 			d->static_tr.bstcnt = d->residue / div;
3201 
3202 		if (uc->config.dir == DMA_DEV_TO_MEM &&
3203 		    d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3204 			return -EINVAL;
3205 	} else {
3206 		d->static_tr.bstcnt = 0;
3207 	}
3208 
3209 	return 0;
3210 }
3211 
3212 static struct udma_desc *
3213 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3214 		       unsigned int sglen, enum dma_transfer_direction dir,
3215 		       unsigned long tx_flags, void *context)
3216 {
3217 	struct scatterlist *sgent;
3218 	struct cppi5_host_desc_t *h_desc = NULL;
3219 	struct udma_desc *d;
3220 	u32 ring_id;
3221 	unsigned int i;
3222 	u64 asel;
3223 
3224 	d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3225 	if (!d)
3226 		return NULL;
3227 
3228 	d->sglen = sglen;
3229 	d->hwdesc_count = sglen;
3230 
3231 	if (dir == DMA_DEV_TO_MEM)
3232 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3233 	else
3234 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3235 
3236 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3237 		asel = 0;
3238 	else
3239 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3240 
3241 	for_each_sg(sgl, sgent, sglen, i) {
3242 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3243 		dma_addr_t sg_addr = sg_dma_address(sgent);
3244 		struct cppi5_host_desc_t *desc;
3245 		size_t sg_len = sg_dma_len(sgent);
3246 
3247 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3248 						GFP_NOWAIT,
3249 						&hwdesc->cppi5_desc_paddr);
3250 		if (!hwdesc->cppi5_desc_vaddr) {
3251 			dev_err(uc->ud->dev,
3252 				"descriptor%d allocation failed\n", i);
3253 
3254 			udma_free_hwdesc(uc, d);
3255 			kfree(d);
3256 			return NULL;
3257 		}
3258 
3259 		d->residue += sg_len;
3260 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3261 		desc = hwdesc->cppi5_desc_vaddr;
3262 
3263 		if (i == 0) {
3264 			cppi5_hdesc_init(desc, 0, 0);
3265 			/* Flow and Packed ID */
3266 			cppi5_desc_set_pktids(&desc->hdr, uc->id,
3267 					      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3268 			cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3269 		} else {
3270 			cppi5_hdesc_reset_hbdesc(desc);
3271 			cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3272 		}
3273 
3274 		/* attach the sg buffer to the descriptor */
3275 		sg_addr |= asel;
3276 		cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3277 
3278 		/* Attach link as host buffer descriptor */
3279 		if (h_desc)
3280 			cppi5_hdesc_link_hbdesc(h_desc,
3281 						hwdesc->cppi5_desc_paddr | asel);
3282 
3283 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3284 		    dir == DMA_MEM_TO_DEV)
3285 			h_desc = desc;
3286 	}
3287 
3288 	if (d->residue >= SZ_4M) {
3289 		dev_err(uc->ud->dev,
3290 			"%s: Transfer size %u is over the supported 4M range\n",
3291 			__func__, d->residue);
3292 		udma_free_hwdesc(uc, d);
3293 		kfree(d);
3294 		return NULL;
3295 	}
3296 
3297 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3298 	cppi5_hdesc_set_pktlen(h_desc, d->residue);
3299 
3300 	return d;
3301 }
3302 
3303 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3304 				void *data, size_t len)
3305 {
3306 	struct udma_desc *d = to_udma_desc(desc);
3307 	struct udma_chan *uc = to_udma_chan(desc->chan);
3308 	struct cppi5_host_desc_t *h_desc;
3309 	u32 psd_size = len;
3310 	u32 flags = 0;
3311 
3312 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3313 		return -ENOTSUPP;
3314 
3315 	if (!data || len > uc->config.metadata_size)
3316 		return -EINVAL;
3317 
3318 	if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3319 		return -EINVAL;
3320 
3321 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3322 	if (d->dir == DMA_MEM_TO_DEV)
3323 		memcpy(h_desc->epib, data, len);
3324 
3325 	if (uc->config.needs_epib)
3326 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3327 
3328 	d->metadata = data;
3329 	d->metadata_size = len;
3330 	if (uc->config.needs_epib)
3331 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3332 
3333 	cppi5_hdesc_update_flags(h_desc, flags);
3334 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3335 
3336 	return 0;
3337 }
3338 
3339 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3340 				   size_t *payload_len, size_t *max_len)
3341 {
3342 	struct udma_desc *d = to_udma_desc(desc);
3343 	struct udma_chan *uc = to_udma_chan(desc->chan);
3344 	struct cppi5_host_desc_t *h_desc;
3345 
3346 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3347 		return ERR_PTR(-ENOTSUPP);
3348 
3349 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3350 
3351 	*max_len = uc->config.metadata_size;
3352 
3353 	*payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3354 		       CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3355 	*payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3356 
3357 	return h_desc->epib;
3358 }
3359 
3360 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3361 				 size_t payload_len)
3362 {
3363 	struct udma_desc *d = to_udma_desc(desc);
3364 	struct udma_chan *uc = to_udma_chan(desc->chan);
3365 	struct cppi5_host_desc_t *h_desc;
3366 	u32 psd_size = payload_len;
3367 	u32 flags = 0;
3368 
3369 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
3370 		return -ENOTSUPP;
3371 
3372 	if (payload_len > uc->config.metadata_size)
3373 		return -EINVAL;
3374 
3375 	if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3376 		return -EINVAL;
3377 
3378 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3379 
3380 	if (uc->config.needs_epib) {
3381 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3382 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3383 	}
3384 
3385 	cppi5_hdesc_update_flags(h_desc, flags);
3386 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3387 
3388 	return 0;
3389 }
3390 
3391 static struct dma_descriptor_metadata_ops metadata_ops = {
3392 	.attach = udma_attach_metadata,
3393 	.get_ptr = udma_get_metadata_ptr,
3394 	.set_len = udma_set_metadata_len,
3395 };
3396 
3397 static struct dma_async_tx_descriptor *
3398 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3399 		   unsigned int sglen, enum dma_transfer_direction dir,
3400 		   unsigned long tx_flags, void *context)
3401 {
3402 	struct udma_chan *uc = to_udma_chan(chan);
3403 	enum dma_slave_buswidth dev_width;
3404 	struct udma_desc *d;
3405 	u32 burst;
3406 
3407 	if (dir != uc->config.dir &&
3408 	    (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3409 		dev_err(chan->device->dev,
3410 			"%s: chan%d is for %s, not supporting %s\n",
3411 			__func__, uc->id,
3412 			dmaengine_get_direction_text(uc->config.dir),
3413 			dmaengine_get_direction_text(dir));
3414 		return NULL;
3415 	}
3416 
3417 	if (dir == DMA_DEV_TO_MEM) {
3418 		dev_width = uc->cfg.src_addr_width;
3419 		burst = uc->cfg.src_maxburst;
3420 	} else if (dir == DMA_MEM_TO_DEV) {
3421 		dev_width = uc->cfg.dst_addr_width;
3422 		burst = uc->cfg.dst_maxburst;
3423 	} else {
3424 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3425 		return NULL;
3426 	}
3427 
3428 	if (!burst)
3429 		burst = 1;
3430 
3431 	uc->config.tx_flags = tx_flags;
3432 
3433 	if (uc->config.pkt_mode)
3434 		d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3435 					   context);
3436 	else if (is_slave_direction(uc->config.dir))
3437 		d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3438 					  context);
3439 	else
3440 		d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3441 						    tx_flags, context);
3442 
3443 	if (!d)
3444 		return NULL;
3445 
3446 	d->dir = dir;
3447 	d->desc_idx = 0;
3448 	d->tr_idx = 0;
3449 
3450 	/* static TR for remote PDMA */
3451 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
3452 		dev_err(uc->ud->dev,
3453 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3454 			__func__, d->static_tr.bstcnt);
3455 
3456 		udma_free_hwdesc(uc, d);
3457 		kfree(d);
3458 		return NULL;
3459 	}
3460 
3461 	if (uc->config.metadata_size)
3462 		d->vd.tx.metadata_ops = &metadata_ops;
3463 
3464 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3465 }
3466 
3467 static struct udma_desc *
3468 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3469 			size_t buf_len, size_t period_len,
3470 			enum dma_transfer_direction dir, unsigned long flags)
3471 {
3472 	struct udma_desc *d;
3473 	size_t tr_size, period_addr;
3474 	struct cppi5_tr_type1_t *tr_req;
3475 	unsigned int periods = buf_len / period_len;
3476 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3477 	unsigned int i;
3478 	int num_tr;
3479 
3480 	num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3481 				      &tr0_cnt1, &tr1_cnt0);
3482 	if (num_tr < 0) {
3483 		dev_err(uc->ud->dev, "size %zu is not supported\n",
3484 			period_len);
3485 		return NULL;
3486 	}
3487 
3488 	/* Now allocate and setup the descriptor. */
3489 	tr_size = sizeof(struct cppi5_tr_type1_t);
3490 	d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3491 	if (!d)
3492 		return NULL;
3493 
3494 	tr_req = d->hwdesc[0].tr_req_base;
3495 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3496 		period_addr = buf_addr;
3497 	else
3498 		period_addr = buf_addr |
3499 			((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3500 
3501 	for (i = 0; i < periods; i++) {
3502 		int tr_idx = i * num_tr;
3503 
3504 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3505 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3506 
3507 		tr_req[tr_idx].addr = period_addr;
3508 		tr_req[tr_idx].icnt0 = tr0_cnt0;
3509 		tr_req[tr_idx].icnt1 = tr0_cnt1;
3510 		tr_req[tr_idx].dim1 = tr0_cnt0;
3511 
3512 		if (num_tr == 2) {
3513 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3514 					 CPPI5_TR_CSF_SUPR_EVT);
3515 			tr_idx++;
3516 
3517 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3518 				      false, false,
3519 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3520 
3521 			tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3522 			tr_req[tr_idx].icnt0 = tr1_cnt0;
3523 			tr_req[tr_idx].icnt1 = 1;
3524 			tr_req[tr_idx].dim1 = tr1_cnt0;
3525 		}
3526 
3527 		if (!(flags & DMA_PREP_INTERRUPT))
3528 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3529 					 CPPI5_TR_CSF_SUPR_EVT);
3530 
3531 		period_addr += period_len;
3532 	}
3533 
3534 	return d;
3535 }
3536 
3537 static struct udma_desc *
3538 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3539 			 size_t buf_len, size_t period_len,
3540 			 enum dma_transfer_direction dir, unsigned long flags)
3541 {
3542 	struct udma_desc *d;
3543 	u32 ring_id;
3544 	int i;
3545 	int periods = buf_len / period_len;
3546 
3547 	if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3548 		return NULL;
3549 
3550 	if (period_len >= SZ_4M)
3551 		return NULL;
3552 
3553 	d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3554 	if (!d)
3555 		return NULL;
3556 
3557 	d->hwdesc_count = periods;
3558 
3559 	/* TODO: re-check this... */
3560 	if (dir == DMA_DEV_TO_MEM)
3561 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3562 	else
3563 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3564 
3565 	if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3566 		buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3567 
3568 	for (i = 0; i < periods; i++) {
3569 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3570 		dma_addr_t period_addr = buf_addr + (period_len * i);
3571 		struct cppi5_host_desc_t *h_desc;
3572 
3573 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3574 						GFP_NOWAIT,
3575 						&hwdesc->cppi5_desc_paddr);
3576 		if (!hwdesc->cppi5_desc_vaddr) {
3577 			dev_err(uc->ud->dev,
3578 				"descriptor%d allocation failed\n", i);
3579 
3580 			udma_free_hwdesc(uc, d);
3581 			kfree(d);
3582 			return NULL;
3583 		}
3584 
3585 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3586 		h_desc = hwdesc->cppi5_desc_vaddr;
3587 
3588 		cppi5_hdesc_init(h_desc, 0, 0);
3589 		cppi5_hdesc_set_pktlen(h_desc, period_len);
3590 
3591 		/* Flow and Packed ID */
3592 		cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3593 				      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3594 		cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3595 
3596 		/* attach each period to a new descriptor */
3597 		cppi5_hdesc_attach_buf(h_desc,
3598 				       period_addr, period_len,
3599 				       period_addr, period_len);
3600 	}
3601 
3602 	return d;
3603 }
3604 
3605 static struct dma_async_tx_descriptor *
3606 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3607 		     size_t period_len, enum dma_transfer_direction dir,
3608 		     unsigned long flags)
3609 {
3610 	struct udma_chan *uc = to_udma_chan(chan);
3611 	enum dma_slave_buswidth dev_width;
3612 	struct udma_desc *d;
3613 	u32 burst;
3614 
3615 	if (dir != uc->config.dir) {
3616 		dev_err(chan->device->dev,
3617 			"%s: chan%d is for %s, not supporting %s\n",
3618 			__func__, uc->id,
3619 			dmaengine_get_direction_text(uc->config.dir),
3620 			dmaengine_get_direction_text(dir));
3621 		return NULL;
3622 	}
3623 
3624 	uc->cyclic = true;
3625 
3626 	if (dir == DMA_DEV_TO_MEM) {
3627 		dev_width = uc->cfg.src_addr_width;
3628 		burst = uc->cfg.src_maxburst;
3629 	} else if (dir == DMA_MEM_TO_DEV) {
3630 		dev_width = uc->cfg.dst_addr_width;
3631 		burst = uc->cfg.dst_maxburst;
3632 	} else {
3633 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3634 		return NULL;
3635 	}
3636 
3637 	if (!burst)
3638 		burst = 1;
3639 
3640 	if (uc->config.pkt_mode)
3641 		d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3642 					     dir, flags);
3643 	else
3644 		d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3645 					    dir, flags);
3646 
3647 	if (!d)
3648 		return NULL;
3649 
3650 	d->sglen = buf_len / period_len;
3651 
3652 	d->dir = dir;
3653 	d->residue = buf_len;
3654 
3655 	/* static TR for remote PDMA */
3656 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
3657 		dev_err(uc->ud->dev,
3658 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3659 			__func__, d->static_tr.bstcnt);
3660 
3661 		udma_free_hwdesc(uc, d);
3662 		kfree(d);
3663 		return NULL;
3664 	}
3665 
3666 	if (uc->config.metadata_size)
3667 		d->vd.tx.metadata_ops = &metadata_ops;
3668 
3669 	return vchan_tx_prep(&uc->vc, &d->vd, flags);
3670 }
3671 
3672 static struct dma_async_tx_descriptor *
3673 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3674 		     size_t len, unsigned long tx_flags)
3675 {
3676 	struct udma_chan *uc = to_udma_chan(chan);
3677 	struct udma_desc *d;
3678 	struct cppi5_tr_type15_t *tr_req;
3679 	int num_tr;
3680 	size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3681 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3682 	u32 csf = CPPI5_TR_CSF_SUPR_EVT;
3683 
3684 	if (uc->config.dir != DMA_MEM_TO_MEM) {
3685 		dev_err(chan->device->dev,
3686 			"%s: chan%d is for %s, not supporting %s\n",
3687 			__func__, uc->id,
3688 			dmaengine_get_direction_text(uc->config.dir),
3689 			dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3690 		return NULL;
3691 	}
3692 
3693 	num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3694 				      &tr0_cnt1, &tr1_cnt0);
3695 	if (num_tr < 0) {
3696 		dev_err(uc->ud->dev, "size %zu is not supported\n",
3697 			len);
3698 		return NULL;
3699 	}
3700 
3701 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3702 	if (!d)
3703 		return NULL;
3704 
3705 	d->dir = DMA_MEM_TO_MEM;
3706 	d->desc_idx = 0;
3707 	d->tr_idx = 0;
3708 	d->residue = len;
3709 
3710 	if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3711 		src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3712 		dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3713 	} else {
3714 		csf |= CPPI5_TR_CSF_EOL_ICNT0;
3715 	}
3716 
3717 	tr_req = d->hwdesc[0].tr_req_base;
3718 
3719 	cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3720 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3721 	cppi5_tr_csf_set(&tr_req[0].flags, csf);
3722 
3723 	tr_req[0].addr = src;
3724 	tr_req[0].icnt0 = tr0_cnt0;
3725 	tr_req[0].icnt1 = tr0_cnt1;
3726 	tr_req[0].icnt2 = 1;
3727 	tr_req[0].icnt3 = 1;
3728 	tr_req[0].dim1 = tr0_cnt0;
3729 
3730 	tr_req[0].daddr = dest;
3731 	tr_req[0].dicnt0 = tr0_cnt0;
3732 	tr_req[0].dicnt1 = tr0_cnt1;
3733 	tr_req[0].dicnt2 = 1;
3734 	tr_req[0].dicnt3 = 1;
3735 	tr_req[0].ddim1 = tr0_cnt0;
3736 
3737 	if (num_tr == 2) {
3738 		cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3739 			      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3740 		cppi5_tr_csf_set(&tr_req[1].flags, csf);
3741 
3742 		tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3743 		tr_req[1].icnt0 = tr1_cnt0;
3744 		tr_req[1].icnt1 = 1;
3745 		tr_req[1].icnt2 = 1;
3746 		tr_req[1].icnt3 = 1;
3747 
3748 		tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3749 		tr_req[1].dicnt0 = tr1_cnt0;
3750 		tr_req[1].dicnt1 = 1;
3751 		tr_req[1].dicnt2 = 1;
3752 		tr_req[1].dicnt3 = 1;
3753 	}
3754 
3755 	cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP);
3756 
3757 	if (uc->config.metadata_size)
3758 		d->vd.tx.metadata_ops = &metadata_ops;
3759 
3760 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3761 }
3762 
3763 static void udma_issue_pending(struct dma_chan *chan)
3764 {
3765 	struct udma_chan *uc = to_udma_chan(chan);
3766 	unsigned long flags;
3767 
3768 	spin_lock_irqsave(&uc->vc.lock, flags);
3769 
3770 	/* If we have something pending and no active descriptor, then */
3771 	if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3772 		/*
3773 		 * start a descriptor if the channel is NOT [marked as
3774 		 * terminating _and_ it is still running (teardown has not
3775 		 * completed yet)].
3776 		 */
3777 		if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3778 		      udma_is_chan_running(uc)))
3779 			udma_start(uc);
3780 	}
3781 
3782 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3783 }
3784 
3785 static enum dma_status udma_tx_status(struct dma_chan *chan,
3786 				      dma_cookie_t cookie,
3787 				      struct dma_tx_state *txstate)
3788 {
3789 	struct udma_chan *uc = to_udma_chan(chan);
3790 	enum dma_status ret;
3791 	unsigned long flags;
3792 
3793 	spin_lock_irqsave(&uc->vc.lock, flags);
3794 
3795 	ret = dma_cookie_status(chan, cookie, txstate);
3796 
3797 	if (!udma_is_chan_running(uc))
3798 		ret = DMA_COMPLETE;
3799 
3800 	if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3801 		ret = DMA_PAUSED;
3802 
3803 	if (ret == DMA_COMPLETE || !txstate)
3804 		goto out;
3805 
3806 	if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3807 		u32 peer_bcnt = 0;
3808 		u32 bcnt = 0;
3809 		u32 residue = uc->desc->residue;
3810 		u32 delay = 0;
3811 
3812 		if (uc->desc->dir == DMA_MEM_TO_DEV) {
3813 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3814 
3815 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3816 				peer_bcnt = udma_tchanrt_read(uc,
3817 						UDMA_CHAN_RT_PEER_BCNT_REG);
3818 
3819 				if (bcnt > peer_bcnt)
3820 					delay = bcnt - peer_bcnt;
3821 			}
3822 		} else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3823 			bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3824 
3825 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3826 				peer_bcnt = udma_rchanrt_read(uc,
3827 						UDMA_CHAN_RT_PEER_BCNT_REG);
3828 
3829 				if (peer_bcnt > bcnt)
3830 					delay = peer_bcnt - bcnt;
3831 			}
3832 		} else {
3833 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3834 		}
3835 
3836 		if (bcnt && !(bcnt % uc->desc->residue))
3837 			residue = 0;
3838 		else
3839 			residue -= bcnt % uc->desc->residue;
3840 
3841 		if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3842 			ret = DMA_COMPLETE;
3843 			delay = 0;
3844 		}
3845 
3846 		dma_set_residue(txstate, residue);
3847 		dma_set_in_flight_bytes(txstate, delay);
3848 
3849 	} else {
3850 		ret = DMA_COMPLETE;
3851 	}
3852 
3853 out:
3854 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3855 	return ret;
3856 }
3857 
3858 static int udma_pause(struct dma_chan *chan)
3859 {
3860 	struct udma_chan *uc = to_udma_chan(chan);
3861 
3862 	/* pause the channel */
3863 	switch (uc->config.dir) {
3864 	case DMA_DEV_TO_MEM:
3865 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3866 					 UDMA_PEER_RT_EN_PAUSE,
3867 					 UDMA_PEER_RT_EN_PAUSE);
3868 		break;
3869 	case DMA_MEM_TO_DEV:
3870 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3871 					 UDMA_PEER_RT_EN_PAUSE,
3872 					 UDMA_PEER_RT_EN_PAUSE);
3873 		break;
3874 	case DMA_MEM_TO_MEM:
3875 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3876 					 UDMA_CHAN_RT_CTL_PAUSE,
3877 					 UDMA_CHAN_RT_CTL_PAUSE);
3878 		break;
3879 	default:
3880 		return -EINVAL;
3881 	}
3882 
3883 	return 0;
3884 }
3885 
3886 static int udma_resume(struct dma_chan *chan)
3887 {
3888 	struct udma_chan *uc = to_udma_chan(chan);
3889 
3890 	/* resume the channel */
3891 	switch (uc->config.dir) {
3892 	case DMA_DEV_TO_MEM:
3893 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3894 					 UDMA_PEER_RT_EN_PAUSE, 0);
3895 
3896 		break;
3897 	case DMA_MEM_TO_DEV:
3898 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3899 					 UDMA_PEER_RT_EN_PAUSE, 0);
3900 		break;
3901 	case DMA_MEM_TO_MEM:
3902 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3903 					 UDMA_CHAN_RT_CTL_PAUSE, 0);
3904 		break;
3905 	default:
3906 		return -EINVAL;
3907 	}
3908 
3909 	return 0;
3910 }
3911 
3912 static int udma_terminate_all(struct dma_chan *chan)
3913 {
3914 	struct udma_chan *uc = to_udma_chan(chan);
3915 	unsigned long flags;
3916 	LIST_HEAD(head);
3917 
3918 	spin_lock_irqsave(&uc->vc.lock, flags);
3919 
3920 	if (udma_is_chan_running(uc))
3921 		udma_stop(uc);
3922 
3923 	if (uc->desc) {
3924 		uc->terminated_desc = uc->desc;
3925 		uc->desc = NULL;
3926 		uc->terminated_desc->terminated = true;
3927 		cancel_delayed_work(&uc->tx_drain.work);
3928 	}
3929 
3930 	uc->paused = false;
3931 
3932 	vchan_get_all_descriptors(&uc->vc, &head);
3933 	spin_unlock_irqrestore(&uc->vc.lock, flags);
3934 	vchan_dma_desc_free_list(&uc->vc, &head);
3935 
3936 	return 0;
3937 }
3938 
3939 static void udma_synchronize(struct dma_chan *chan)
3940 {
3941 	struct udma_chan *uc = to_udma_chan(chan);
3942 	unsigned long timeout = msecs_to_jiffies(1000);
3943 
3944 	vchan_synchronize(&uc->vc);
3945 
3946 	if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3947 		timeout = wait_for_completion_timeout(&uc->teardown_completed,
3948 						      timeout);
3949 		if (!timeout) {
3950 			dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3951 				 uc->id);
3952 			udma_dump_chan_stdata(uc);
3953 			udma_reset_chan(uc, true);
3954 		}
3955 	}
3956 
3957 	udma_reset_chan(uc, false);
3958 	if (udma_is_chan_running(uc))
3959 		dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3960 
3961 	cancel_delayed_work_sync(&uc->tx_drain.work);
3962 	udma_reset_rings(uc);
3963 }
3964 
3965 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3966 				   struct virt_dma_desc *vd,
3967 				   struct dmaengine_result *result)
3968 {
3969 	struct udma_chan *uc = to_udma_chan(&vc->chan);
3970 	struct udma_desc *d;
3971 	u8 status;
3972 
3973 	if (!vd)
3974 		return;
3975 
3976 	d = to_udma_desc(&vd->tx);
3977 
3978 	if (d->metadata_size)
3979 		udma_fetch_epib(uc, d);
3980 
3981 	if (result) {
3982 		void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3983 
3984 		if (cppi5_desc_get_type(desc_vaddr) ==
3985 		    CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3986 			/* Provide residue information for the client */
3987 			result->residue = d->residue -
3988 					  cppi5_hdesc_get_pktlen(desc_vaddr);
3989 			if (result->residue)
3990 				result->result = DMA_TRANS_ABORTED;
3991 			else
3992 				result->result = DMA_TRANS_NOERROR;
3993 		} else {
3994 			result->residue = 0;
3995 			/* Propagate TR Response errors to the client */
3996 			status = d->hwdesc[0].tr_resp_base->status;
3997 			if (status)
3998 				result->result = DMA_TRANS_ABORTED;
3999 			else
4000 				result->result = DMA_TRANS_NOERROR;
4001 		}
4002 	}
4003 }
4004 
4005 /*
4006  * This tasklet handles the completion of a DMA descriptor by
4007  * calling its callback and freeing it.
4008  */
4009 static void udma_vchan_complete(struct tasklet_struct *t)
4010 {
4011 	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
4012 	struct virt_dma_desc *vd, *_vd;
4013 	struct dmaengine_desc_callback cb;
4014 	LIST_HEAD(head);
4015 
4016 	spin_lock_irq(&vc->lock);
4017 	list_splice_tail_init(&vc->desc_completed, &head);
4018 	vd = vc->cyclic;
4019 	if (vd) {
4020 		vc->cyclic = NULL;
4021 		dmaengine_desc_get_callback(&vd->tx, &cb);
4022 	} else {
4023 		memset(&cb, 0, sizeof(cb));
4024 	}
4025 	spin_unlock_irq(&vc->lock);
4026 
4027 	udma_desc_pre_callback(vc, vd, NULL);
4028 	dmaengine_desc_callback_invoke(&cb, NULL);
4029 
4030 	list_for_each_entry_safe(vd, _vd, &head, node) {
4031 		struct dmaengine_result result;
4032 
4033 		dmaengine_desc_get_callback(&vd->tx, &cb);
4034 
4035 		list_del(&vd->node);
4036 
4037 		udma_desc_pre_callback(vc, vd, &result);
4038 		dmaengine_desc_callback_invoke(&cb, &result);
4039 
4040 		vchan_vdesc_fini(vd);
4041 	}
4042 }
4043 
4044 static void udma_free_chan_resources(struct dma_chan *chan)
4045 {
4046 	struct udma_chan *uc = to_udma_chan(chan);
4047 	struct udma_dev *ud = to_udma_dev(chan->device);
4048 
4049 	udma_terminate_all(chan);
4050 	if (uc->terminated_desc) {
4051 		udma_reset_chan(uc, false);
4052 		udma_reset_rings(uc);
4053 	}
4054 
4055 	cancel_delayed_work_sync(&uc->tx_drain.work);
4056 
4057 	if (uc->irq_num_ring > 0) {
4058 		free_irq(uc->irq_num_ring, uc);
4059 
4060 		uc->irq_num_ring = 0;
4061 	}
4062 	if (uc->irq_num_udma > 0) {
4063 		free_irq(uc->irq_num_udma, uc);
4064 
4065 		uc->irq_num_udma = 0;
4066 	}
4067 
4068 	/* Release PSI-L pairing */
4069 	if (uc->psil_paired) {
4070 		navss_psil_unpair(ud, uc->config.src_thread,
4071 				  uc->config.dst_thread);
4072 		uc->psil_paired = false;
4073 	}
4074 
4075 	vchan_free_chan_resources(&uc->vc);
4076 	tasklet_kill(&uc->vc.task);
4077 
4078 	bcdma_free_bchan_resources(uc);
4079 	udma_free_tx_resources(uc);
4080 	udma_free_rx_resources(uc);
4081 	udma_reset_uchan(uc);
4082 
4083 	if (uc->use_dma_pool) {
4084 		dma_pool_destroy(uc->hdesc_pool);
4085 		uc->use_dma_pool = false;
4086 	}
4087 }
4088 
4089 static struct platform_driver udma_driver;
4090 static struct platform_driver bcdma_driver;
4091 static struct platform_driver pktdma_driver;
4092 
4093 struct udma_filter_param {
4094 	int remote_thread_id;
4095 	u32 atype;
4096 	u32 asel;
4097 	u32 tr_trigger_type;
4098 };
4099 
4100 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4101 {
4102 	struct udma_chan_config *ucc;
4103 	struct psil_endpoint_config *ep_config;
4104 	struct udma_filter_param *filter_param;
4105 	struct udma_chan *uc;
4106 	struct udma_dev *ud;
4107 
4108 	if (chan->device->dev->driver != &udma_driver.driver &&
4109 	    chan->device->dev->driver != &bcdma_driver.driver &&
4110 	    chan->device->dev->driver != &pktdma_driver.driver)
4111 		return false;
4112 
4113 	uc = to_udma_chan(chan);
4114 	ucc = &uc->config;
4115 	ud = uc->ud;
4116 	filter_param = param;
4117 
4118 	if (filter_param->atype > 2) {
4119 		dev_err(ud->dev, "Invalid channel atype: %u\n",
4120 			filter_param->atype);
4121 		return false;
4122 	}
4123 
4124 	if (filter_param->asel > 15) {
4125 		dev_err(ud->dev, "Invalid channel asel: %u\n",
4126 			filter_param->asel);
4127 		return false;
4128 	}
4129 
4130 	ucc->remote_thread_id = filter_param->remote_thread_id;
4131 	ucc->atype = filter_param->atype;
4132 	ucc->asel = filter_param->asel;
4133 	ucc->tr_trigger_type = filter_param->tr_trigger_type;
4134 
4135 	if (ucc->tr_trigger_type) {
4136 		ucc->dir = DMA_MEM_TO_MEM;
4137 		goto triggered_bchan;
4138 	} else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4139 		ucc->dir = DMA_MEM_TO_DEV;
4140 	} else {
4141 		ucc->dir = DMA_DEV_TO_MEM;
4142 	}
4143 
4144 	ep_config = psil_get_ep_config(ucc->remote_thread_id);
4145 	if (IS_ERR(ep_config)) {
4146 		dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4147 			ucc->remote_thread_id);
4148 		ucc->dir = DMA_MEM_TO_MEM;
4149 		ucc->remote_thread_id = -1;
4150 		ucc->atype = 0;
4151 		ucc->asel = 0;
4152 		return false;
4153 	}
4154 
4155 	if (ud->match_data->type == DMA_TYPE_BCDMA &&
4156 	    ep_config->pkt_mode) {
4157 		dev_err(ud->dev,
4158 			"Only TR mode is supported (psi-l thread 0x%04x)\n",
4159 			ucc->remote_thread_id);
4160 		ucc->dir = DMA_MEM_TO_MEM;
4161 		ucc->remote_thread_id = -1;
4162 		ucc->atype = 0;
4163 		ucc->asel = 0;
4164 		return false;
4165 	}
4166 
4167 	ucc->pkt_mode = ep_config->pkt_mode;
4168 	ucc->channel_tpl = ep_config->channel_tpl;
4169 	ucc->notdpkt = ep_config->notdpkt;
4170 	ucc->ep_type = ep_config->ep_type;
4171 
4172 	if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4173 	    ep_config->mapped_channel_id >= 0) {
4174 		ucc->mapped_channel_id = ep_config->mapped_channel_id;
4175 		ucc->default_flow_id = ep_config->default_flow_id;
4176 	} else {
4177 		ucc->mapped_channel_id = -1;
4178 		ucc->default_flow_id = -1;
4179 	}
4180 
4181 	if (ucc->ep_type != PSIL_EP_NATIVE) {
4182 		const struct udma_match_data *match_data = ud->match_data;
4183 
4184 		if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4185 			ucc->enable_acc32 = ep_config->pdma_acc32;
4186 		if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4187 			ucc->enable_burst = ep_config->pdma_burst;
4188 	}
4189 
4190 	ucc->needs_epib = ep_config->needs_epib;
4191 	ucc->psd_size = ep_config->psd_size;
4192 	ucc->metadata_size =
4193 			(ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4194 			ucc->psd_size;
4195 
4196 	if (ucc->pkt_mode)
4197 		ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4198 				 ucc->metadata_size, ud->desc_align);
4199 
4200 	dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4201 		ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4202 
4203 	return true;
4204 
4205 triggered_bchan:
4206 	dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4207 		ucc->tr_trigger_type);
4208 
4209 	return true;
4210 
4211 }
4212 
4213 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4214 				      struct of_dma *ofdma)
4215 {
4216 	struct udma_dev *ud = ofdma->of_dma_data;
4217 	dma_cap_mask_t mask = ud->ddev.cap_mask;
4218 	struct udma_filter_param filter_param;
4219 	struct dma_chan *chan;
4220 
4221 	if (ud->match_data->type == DMA_TYPE_BCDMA) {
4222 		if (dma_spec->args_count != 3)
4223 			return NULL;
4224 
4225 		filter_param.tr_trigger_type = dma_spec->args[0];
4226 		filter_param.remote_thread_id = dma_spec->args[1];
4227 		filter_param.asel = dma_spec->args[2];
4228 		filter_param.atype = 0;
4229 	} else {
4230 		if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4231 			return NULL;
4232 
4233 		filter_param.remote_thread_id = dma_spec->args[0];
4234 		filter_param.tr_trigger_type = 0;
4235 		if (dma_spec->args_count == 2) {
4236 			if (ud->match_data->type == DMA_TYPE_UDMA) {
4237 				filter_param.atype = dma_spec->args[1];
4238 				filter_param.asel = 0;
4239 			} else {
4240 				filter_param.atype = 0;
4241 				filter_param.asel = dma_spec->args[1];
4242 			}
4243 		} else {
4244 			filter_param.atype = 0;
4245 			filter_param.asel = 0;
4246 		}
4247 	}
4248 
4249 	chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4250 				     ofdma->of_node);
4251 	if (!chan) {
4252 		dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4253 		return ERR_PTR(-EINVAL);
4254 	}
4255 
4256 	return chan;
4257 }
4258 
4259 static struct udma_match_data am654_main_data = {
4260 	.type = DMA_TYPE_UDMA,
4261 	.psil_base = 0x1000,
4262 	.enable_memcpy_support = true,
4263 	.statictr_z_mask = GENMASK(11, 0),
4264 	.burst_size = {
4265 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4266 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4267 		0, /* No UH Channels */
4268 	},
4269 };
4270 
4271 static struct udma_match_data am654_mcu_data = {
4272 	.type = DMA_TYPE_UDMA,
4273 	.psil_base = 0x6000,
4274 	.enable_memcpy_support = false,
4275 	.statictr_z_mask = GENMASK(11, 0),
4276 	.burst_size = {
4277 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4278 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4279 		0, /* No UH Channels */
4280 	},
4281 };
4282 
4283 static struct udma_match_data j721e_main_data = {
4284 	.type = DMA_TYPE_UDMA,
4285 	.psil_base = 0x1000,
4286 	.enable_memcpy_support = true,
4287 	.flags = UDMA_FLAGS_J7_CLASS,
4288 	.statictr_z_mask = GENMASK(23, 0),
4289 	.burst_size = {
4290 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4291 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4292 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4293 	},
4294 };
4295 
4296 static struct udma_match_data j721e_mcu_data = {
4297 	.type = DMA_TYPE_UDMA,
4298 	.psil_base = 0x6000,
4299 	.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4300 	.flags = UDMA_FLAGS_J7_CLASS,
4301 	.statictr_z_mask = GENMASK(23, 0),
4302 	.burst_size = {
4303 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4304 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4305 		0, /* No UH Channels */
4306 	},
4307 };
4308 
4309 static struct udma_soc_data am62a_dmss_csi_soc_data = {
4310 	.oes = {
4311 		.bcdma_rchan_data = 0xe00,
4312 		.bcdma_rchan_ring = 0x1000,
4313 	},
4314 };
4315 
4316 static struct udma_soc_data j721s2_bcdma_csi_soc_data = {
4317 	.oes = {
4318 		.bcdma_tchan_data = 0x800,
4319 		.bcdma_tchan_ring = 0xa00,
4320 		.bcdma_rchan_data = 0xe00,
4321 		.bcdma_rchan_ring = 0x1000,
4322 	},
4323 };
4324 
4325 static struct udma_match_data am62a_bcdma_csirx_data = {
4326 	.type = DMA_TYPE_BCDMA,
4327 	.psil_base = 0x3100,
4328 	.enable_memcpy_support = false,
4329 	.burst_size = {
4330 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4331 		0, /* No H Channels */
4332 		0, /* No UH Channels */
4333 	},
4334 	.soc_data = &am62a_dmss_csi_soc_data,
4335 };
4336 
4337 static struct udma_match_data am64_bcdma_data = {
4338 	.type = DMA_TYPE_BCDMA,
4339 	.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4340 	.enable_memcpy_support = true, /* Supported via bchan */
4341 	.flags = UDMA_FLAGS_J7_CLASS,
4342 	.statictr_z_mask = GENMASK(23, 0),
4343 	.burst_size = {
4344 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4345 		0, /* No H Channels */
4346 		0, /* No UH Channels */
4347 	},
4348 };
4349 
4350 static struct udma_match_data am64_pktdma_data = {
4351 	.type = DMA_TYPE_PKTDMA,
4352 	.psil_base = 0x1000,
4353 	.enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4354 	.flags = UDMA_FLAGS_J7_CLASS,
4355 	.statictr_z_mask = GENMASK(23, 0),
4356 	.burst_size = {
4357 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4358 		0, /* No H Channels */
4359 		0, /* No UH Channels */
4360 	},
4361 };
4362 
4363 static struct udma_match_data j721s2_bcdma_csi_data = {
4364 	.type = DMA_TYPE_BCDMA,
4365 	.psil_base = 0x2000,
4366 	.enable_memcpy_support = false,
4367 	.burst_size = {
4368 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4369 		0, /* No H Channels */
4370 		0, /* No UH Channels */
4371 	},
4372 	.soc_data = &j721s2_bcdma_csi_soc_data,
4373 };
4374 
4375 static const struct of_device_id udma_of_match[] = {
4376 	{
4377 		.compatible = "ti,am654-navss-main-udmap",
4378 		.data = &am654_main_data,
4379 	},
4380 	{
4381 		.compatible = "ti,am654-navss-mcu-udmap",
4382 		.data = &am654_mcu_data,
4383 	}, {
4384 		.compatible = "ti,j721e-navss-main-udmap",
4385 		.data = &j721e_main_data,
4386 	}, {
4387 		.compatible = "ti,j721e-navss-mcu-udmap",
4388 		.data = &j721e_mcu_data,
4389 	},
4390 	{
4391 		.compatible = "ti,am64-dmss-bcdma",
4392 		.data = &am64_bcdma_data,
4393 	},
4394 	{
4395 		.compatible = "ti,am64-dmss-pktdma",
4396 		.data = &am64_pktdma_data,
4397 	},
4398 	{
4399 		.compatible = "ti,am62a-dmss-bcdma-csirx",
4400 		.data = &am62a_bcdma_csirx_data,
4401 	},
4402 	{
4403 		.compatible = "ti,j721s2-dmss-bcdma-csi",
4404 		.data = &j721s2_bcdma_csi_data,
4405 	},
4406 	{ /* Sentinel */ },
4407 };
4408 
4409 static struct udma_soc_data am654_soc_data = {
4410 	.oes = {
4411 		.udma_rchan = 0x200,
4412 	},
4413 };
4414 
4415 static struct udma_soc_data j721e_soc_data = {
4416 	.oes = {
4417 		.udma_rchan = 0x400,
4418 	},
4419 };
4420 
4421 static struct udma_soc_data j7200_soc_data = {
4422 	.oes = {
4423 		.udma_rchan = 0x80,
4424 	},
4425 };
4426 
4427 static struct udma_soc_data am64_soc_data = {
4428 	.oes = {
4429 		.bcdma_bchan_data = 0x2200,
4430 		.bcdma_bchan_ring = 0x2400,
4431 		.bcdma_tchan_data = 0x2800,
4432 		.bcdma_tchan_ring = 0x2a00,
4433 		.bcdma_rchan_data = 0x2e00,
4434 		.bcdma_rchan_ring = 0x3000,
4435 		.pktdma_tchan_flow = 0x1200,
4436 		.pktdma_rchan_flow = 0x1600,
4437 	},
4438 	.bcdma_trigger_event_offset = 0xc400,
4439 };
4440 
4441 static const struct soc_device_attribute k3_soc_devices[] = {
4442 	{ .family = "AM65X", .data = &am654_soc_data },
4443 	{ .family = "J721E", .data = &j721e_soc_data },
4444 	{ .family = "J7200", .data = &j7200_soc_data },
4445 	{ .family = "AM64X", .data = &am64_soc_data },
4446 	{ .family = "J721S2", .data = &j721e_soc_data},
4447 	{ .family = "AM62X", .data = &am64_soc_data },
4448 	{ .family = "AM62AX", .data = &am64_soc_data },
4449 	{ .family = "J784S4", .data = &j721e_soc_data },
4450 	{ /* sentinel */ }
4451 };
4452 
4453 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4454 {
4455 	u32 cap2, cap3, cap4;
4456 	int i;
4457 
4458 	ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4459 	if (IS_ERR(ud->mmrs[MMR_GCFG]))
4460 		return PTR_ERR(ud->mmrs[MMR_GCFG]);
4461 
4462 	cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4463 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4464 
4465 	switch (ud->match_data->type) {
4466 	case DMA_TYPE_UDMA:
4467 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4468 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4469 		ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4470 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4471 		break;
4472 	case DMA_TYPE_BCDMA:
4473 		ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4474 		ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4475 		ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4476 		ud->rflow_cnt = ud->rchan_cnt;
4477 		break;
4478 	case DMA_TYPE_PKTDMA:
4479 		cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4480 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4481 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4482 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4483 		ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4484 		break;
4485 	default:
4486 		return -EINVAL;
4487 	}
4488 
4489 	for (i = 1; i < MMR_LAST; i++) {
4490 		if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4491 			continue;
4492 		if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4493 			continue;
4494 		if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4495 			continue;
4496 
4497 		ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4498 		if (IS_ERR(ud->mmrs[i]))
4499 			return PTR_ERR(ud->mmrs[i]);
4500 	}
4501 
4502 	return 0;
4503 }
4504 
4505 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4506 				      struct ti_sci_resource_desc *rm_desc,
4507 				      char *name)
4508 {
4509 	bitmap_clear(map, rm_desc->start, rm_desc->num);
4510 	bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4511 	dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4512 		rm_desc->start, rm_desc->num, rm_desc->start_sec,
4513 		rm_desc->num_sec);
4514 }
4515 
4516 static const char * const range_names[] = {
4517 	[RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4518 	[RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4519 	[RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4520 	[RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4521 	[RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4522 };
4523 
4524 static int udma_setup_resources(struct udma_dev *ud)
4525 {
4526 	int ret, i, j;
4527 	struct device *dev = ud->dev;
4528 	struct ti_sci_resource *rm_res, irq_res;
4529 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4530 	u32 cap3;
4531 
4532 	/* Set up the throughput level start indexes */
4533 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4534 	if (of_device_is_compatible(dev->of_node,
4535 				    "ti,am654-navss-main-udmap")) {
4536 		ud->tchan_tpl.levels = 2;
4537 		ud->tchan_tpl.start_idx[0] = 8;
4538 	} else if (of_device_is_compatible(dev->of_node,
4539 					   "ti,am654-navss-mcu-udmap")) {
4540 		ud->tchan_tpl.levels = 2;
4541 		ud->tchan_tpl.start_idx[0] = 2;
4542 	} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4543 		ud->tchan_tpl.levels = 3;
4544 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4545 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4546 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4547 		ud->tchan_tpl.levels = 2;
4548 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4549 	} else {
4550 		ud->tchan_tpl.levels = 1;
4551 	}
4552 
4553 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4554 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4555 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4556 
4557 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4558 					   sizeof(unsigned long), GFP_KERNEL);
4559 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4560 				  GFP_KERNEL);
4561 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4562 					   sizeof(unsigned long), GFP_KERNEL);
4563 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4564 				  GFP_KERNEL);
4565 	ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4566 					      sizeof(unsigned long),
4567 					      GFP_KERNEL);
4568 	ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4569 						  BITS_TO_LONGS(ud->rflow_cnt),
4570 						  sizeof(unsigned long),
4571 						  GFP_KERNEL);
4572 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4573 					sizeof(unsigned long),
4574 					GFP_KERNEL);
4575 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4576 				  GFP_KERNEL);
4577 
4578 	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4579 	    !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4580 	    !ud->rflows || !ud->rflow_in_use)
4581 		return -ENOMEM;
4582 
4583 	/*
4584 	 * RX flows with the same Ids as RX channels are reserved to be used
4585 	 * as default flows if remote HW can't generate flow_ids. Those
4586 	 * RX flows can be requested only explicitly by id.
4587 	 */
4588 	bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4589 
4590 	/* by default no GP rflows are assigned to Linux */
4591 	bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4592 
4593 	/* Get resource ranges from tisci */
4594 	for (i = 0; i < RM_RANGE_LAST; i++) {
4595 		if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4596 			continue;
4597 
4598 		tisci_rm->rm_ranges[i] =
4599 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4600 						    tisci_rm->tisci_dev_id,
4601 						    (char *)range_names[i]);
4602 	}
4603 
4604 	/* tchan ranges */
4605 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4606 	if (IS_ERR(rm_res)) {
4607 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4608 		irq_res.sets = 1;
4609 	} else {
4610 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4611 		for (i = 0; i < rm_res->sets; i++)
4612 			udma_mark_resource_ranges(ud, ud->tchan_map,
4613 						  &rm_res->desc[i], "tchan");
4614 		irq_res.sets = rm_res->sets;
4615 	}
4616 
4617 	/* rchan and matching default flow ranges */
4618 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4619 	if (IS_ERR(rm_res)) {
4620 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4621 		irq_res.sets++;
4622 	} else {
4623 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4624 		for (i = 0; i < rm_res->sets; i++)
4625 			udma_mark_resource_ranges(ud, ud->rchan_map,
4626 						  &rm_res->desc[i], "rchan");
4627 		irq_res.sets += rm_res->sets;
4628 	}
4629 
4630 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4631 	if (!irq_res.desc)
4632 		return -ENOMEM;
4633 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4634 	if (IS_ERR(rm_res)) {
4635 		irq_res.desc[0].start = 0;
4636 		irq_res.desc[0].num = ud->tchan_cnt;
4637 		i = 1;
4638 	} else {
4639 		for (i = 0; i < rm_res->sets; i++) {
4640 			irq_res.desc[i].start = rm_res->desc[i].start;
4641 			irq_res.desc[i].num = rm_res->desc[i].num;
4642 			irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4643 			irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4644 		}
4645 	}
4646 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4647 	if (IS_ERR(rm_res)) {
4648 		irq_res.desc[i].start = 0;
4649 		irq_res.desc[i].num = ud->rchan_cnt;
4650 	} else {
4651 		for (j = 0; j < rm_res->sets; j++, i++) {
4652 			if (rm_res->desc[j].num) {
4653 				irq_res.desc[i].start = rm_res->desc[j].start +
4654 						ud->soc_data->oes.udma_rchan;
4655 				irq_res.desc[i].num = rm_res->desc[j].num;
4656 			}
4657 			if (rm_res->desc[j].num_sec) {
4658 				irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4659 						ud->soc_data->oes.udma_rchan;
4660 				irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4661 			}
4662 		}
4663 	}
4664 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4665 	kfree(irq_res.desc);
4666 	if (ret) {
4667 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4668 		return ret;
4669 	}
4670 
4671 	/* GP rflow ranges */
4672 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4673 	if (IS_ERR(rm_res)) {
4674 		/* all gp flows are assigned exclusively to Linux */
4675 		bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4676 			     ud->rflow_cnt - ud->rchan_cnt);
4677 	} else {
4678 		for (i = 0; i < rm_res->sets; i++)
4679 			udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4680 						  &rm_res->desc[i], "gp-rflow");
4681 	}
4682 
4683 	return 0;
4684 }
4685 
4686 static int bcdma_setup_resources(struct udma_dev *ud)
4687 {
4688 	int ret, i, j;
4689 	struct device *dev = ud->dev;
4690 	struct ti_sci_resource *rm_res, irq_res;
4691 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4692 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4693 	u32 cap;
4694 
4695 	/* Set up the throughput level start indexes */
4696 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4697 	if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4698 		ud->bchan_tpl.levels = 3;
4699 		ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4700 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4701 	} else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4702 		ud->bchan_tpl.levels = 2;
4703 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4704 	} else {
4705 		ud->bchan_tpl.levels = 1;
4706 	}
4707 
4708 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4709 	if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4710 		ud->rchan_tpl.levels = 3;
4711 		ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4712 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4713 	} else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4714 		ud->rchan_tpl.levels = 2;
4715 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4716 	} else {
4717 		ud->rchan_tpl.levels = 1;
4718 	}
4719 
4720 	if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4721 		ud->tchan_tpl.levels = 3;
4722 		ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4723 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4724 	} else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4725 		ud->tchan_tpl.levels = 2;
4726 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4727 	} else {
4728 		ud->tchan_tpl.levels = 1;
4729 	}
4730 
4731 	ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4732 					   sizeof(unsigned long), GFP_KERNEL);
4733 	ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4734 				  GFP_KERNEL);
4735 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4736 					   sizeof(unsigned long), GFP_KERNEL);
4737 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4738 				  GFP_KERNEL);
4739 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4740 					   sizeof(unsigned long), GFP_KERNEL);
4741 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4742 				  GFP_KERNEL);
4743 	/* BCDMA do not really have flows, but the driver expect it */
4744 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4745 					sizeof(unsigned long),
4746 					GFP_KERNEL);
4747 	ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4748 				  GFP_KERNEL);
4749 
4750 	if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4751 	    !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4752 	    !ud->rflows)
4753 		return -ENOMEM;
4754 
4755 	/* Get resource ranges from tisci */
4756 	for (i = 0; i < RM_RANGE_LAST; i++) {
4757 		if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4758 			continue;
4759 		if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4760 			continue;
4761 		if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4762 			continue;
4763 		if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4764 			continue;
4765 
4766 		tisci_rm->rm_ranges[i] =
4767 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4768 						    tisci_rm->tisci_dev_id,
4769 						    (char *)range_names[i]);
4770 	}
4771 
4772 	irq_res.sets = 0;
4773 
4774 	/* bchan ranges */
4775 	if (ud->bchan_cnt) {
4776 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4777 		if (IS_ERR(rm_res)) {
4778 			bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4779 			irq_res.sets++;
4780 		} else {
4781 			bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4782 			for (i = 0; i < rm_res->sets; i++)
4783 				udma_mark_resource_ranges(ud, ud->bchan_map,
4784 							  &rm_res->desc[i],
4785 							  "bchan");
4786 			irq_res.sets += rm_res->sets;
4787 		}
4788 	}
4789 
4790 	/* tchan ranges */
4791 	if (ud->tchan_cnt) {
4792 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4793 		if (IS_ERR(rm_res)) {
4794 			bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4795 			irq_res.sets += 2;
4796 		} else {
4797 			bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4798 			for (i = 0; i < rm_res->sets; i++)
4799 				udma_mark_resource_ranges(ud, ud->tchan_map,
4800 							  &rm_res->desc[i],
4801 							  "tchan");
4802 			irq_res.sets += rm_res->sets * 2;
4803 		}
4804 	}
4805 
4806 	/* rchan ranges */
4807 	if (ud->rchan_cnt) {
4808 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4809 		if (IS_ERR(rm_res)) {
4810 			bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4811 			irq_res.sets += 2;
4812 		} else {
4813 			bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4814 			for (i = 0; i < rm_res->sets; i++)
4815 				udma_mark_resource_ranges(ud, ud->rchan_map,
4816 							  &rm_res->desc[i],
4817 							  "rchan");
4818 			irq_res.sets += rm_res->sets * 2;
4819 		}
4820 	}
4821 
4822 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4823 	if (!irq_res.desc)
4824 		return -ENOMEM;
4825 	if (ud->bchan_cnt) {
4826 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4827 		if (IS_ERR(rm_res)) {
4828 			irq_res.desc[0].start = oes->bcdma_bchan_ring;
4829 			irq_res.desc[0].num = ud->bchan_cnt;
4830 			i = 1;
4831 		} else {
4832 			for (i = 0; i < rm_res->sets; i++) {
4833 				irq_res.desc[i].start = rm_res->desc[i].start +
4834 							oes->bcdma_bchan_ring;
4835 				irq_res.desc[i].num = rm_res->desc[i].num;
4836 			}
4837 		}
4838 	} else {
4839 		i = 0;
4840 	}
4841 
4842 	if (ud->tchan_cnt) {
4843 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4844 		if (IS_ERR(rm_res)) {
4845 			irq_res.desc[i].start = oes->bcdma_tchan_data;
4846 			irq_res.desc[i].num = ud->tchan_cnt;
4847 			irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4848 			irq_res.desc[i + 1].num = ud->tchan_cnt;
4849 			i += 2;
4850 		} else {
4851 			for (j = 0; j < rm_res->sets; j++, i += 2) {
4852 				irq_res.desc[i].start = rm_res->desc[j].start +
4853 							oes->bcdma_tchan_data;
4854 				irq_res.desc[i].num = rm_res->desc[j].num;
4855 
4856 				irq_res.desc[i + 1].start = rm_res->desc[j].start +
4857 							oes->bcdma_tchan_ring;
4858 				irq_res.desc[i + 1].num = rm_res->desc[j].num;
4859 			}
4860 		}
4861 	}
4862 	if (ud->rchan_cnt) {
4863 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4864 		if (IS_ERR(rm_res)) {
4865 			irq_res.desc[i].start = oes->bcdma_rchan_data;
4866 			irq_res.desc[i].num = ud->rchan_cnt;
4867 			irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4868 			irq_res.desc[i + 1].num = ud->rchan_cnt;
4869 			i += 2;
4870 		} else {
4871 			for (j = 0; j < rm_res->sets; j++, i += 2) {
4872 				irq_res.desc[i].start = rm_res->desc[j].start +
4873 							oes->bcdma_rchan_data;
4874 				irq_res.desc[i].num = rm_res->desc[j].num;
4875 
4876 				irq_res.desc[i + 1].start = rm_res->desc[j].start +
4877 							oes->bcdma_rchan_ring;
4878 				irq_res.desc[i + 1].num = rm_res->desc[j].num;
4879 			}
4880 		}
4881 	}
4882 
4883 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4884 	kfree(irq_res.desc);
4885 	if (ret) {
4886 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4887 		return ret;
4888 	}
4889 
4890 	return 0;
4891 }
4892 
4893 static int pktdma_setup_resources(struct udma_dev *ud)
4894 {
4895 	int ret, i, j;
4896 	struct device *dev = ud->dev;
4897 	struct ti_sci_resource *rm_res, irq_res;
4898 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4899 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4900 	u32 cap3;
4901 
4902 	/* Set up the throughput level start indexes */
4903 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4904 	if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4905 		ud->tchan_tpl.levels = 3;
4906 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4907 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4908 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4909 		ud->tchan_tpl.levels = 2;
4910 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4911 	} else {
4912 		ud->tchan_tpl.levels = 1;
4913 	}
4914 
4915 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4916 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4917 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4918 
4919 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4920 					   sizeof(unsigned long), GFP_KERNEL);
4921 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4922 				  GFP_KERNEL);
4923 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4924 					   sizeof(unsigned long), GFP_KERNEL);
4925 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4926 				  GFP_KERNEL);
4927 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4928 					sizeof(unsigned long),
4929 					GFP_KERNEL);
4930 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4931 				  GFP_KERNEL);
4932 	ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4933 					   sizeof(unsigned long), GFP_KERNEL);
4934 
4935 	if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4936 	    !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4937 		return -ENOMEM;
4938 
4939 	/* Get resource ranges from tisci */
4940 	for (i = 0; i < RM_RANGE_LAST; i++) {
4941 		if (i == RM_RANGE_BCHAN)
4942 			continue;
4943 
4944 		tisci_rm->rm_ranges[i] =
4945 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4946 						    tisci_rm->tisci_dev_id,
4947 						    (char *)range_names[i]);
4948 	}
4949 
4950 	/* tchan ranges */
4951 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4952 	if (IS_ERR(rm_res)) {
4953 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4954 	} else {
4955 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4956 		for (i = 0; i < rm_res->sets; i++)
4957 			udma_mark_resource_ranges(ud, ud->tchan_map,
4958 						  &rm_res->desc[i], "tchan");
4959 	}
4960 
4961 	/* rchan ranges */
4962 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4963 	if (IS_ERR(rm_res)) {
4964 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4965 	} else {
4966 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4967 		for (i = 0; i < rm_res->sets; i++)
4968 			udma_mark_resource_ranges(ud, ud->rchan_map,
4969 						  &rm_res->desc[i], "rchan");
4970 	}
4971 
4972 	/* rflow ranges */
4973 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4974 	if (IS_ERR(rm_res)) {
4975 		/* all rflows are assigned exclusively to Linux */
4976 		bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4977 		irq_res.sets = 1;
4978 	} else {
4979 		bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4980 		for (i = 0; i < rm_res->sets; i++)
4981 			udma_mark_resource_ranges(ud, ud->rflow_in_use,
4982 						  &rm_res->desc[i], "rflow");
4983 		irq_res.sets = rm_res->sets;
4984 	}
4985 
4986 	/* tflow ranges */
4987 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4988 	if (IS_ERR(rm_res)) {
4989 		/* all tflows are assigned exclusively to Linux */
4990 		bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4991 		irq_res.sets++;
4992 	} else {
4993 		bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4994 		for (i = 0; i < rm_res->sets; i++)
4995 			udma_mark_resource_ranges(ud, ud->tflow_map,
4996 						  &rm_res->desc[i], "tflow");
4997 		irq_res.sets += rm_res->sets;
4998 	}
4999 
5000 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
5001 	if (!irq_res.desc)
5002 		return -ENOMEM;
5003 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
5004 	if (IS_ERR(rm_res)) {
5005 		irq_res.desc[0].start = oes->pktdma_tchan_flow;
5006 		irq_res.desc[0].num = ud->tflow_cnt;
5007 		i = 1;
5008 	} else {
5009 		for (i = 0; i < rm_res->sets; i++) {
5010 			irq_res.desc[i].start = rm_res->desc[i].start +
5011 						oes->pktdma_tchan_flow;
5012 			irq_res.desc[i].num = rm_res->desc[i].num;
5013 		}
5014 	}
5015 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
5016 	if (IS_ERR(rm_res)) {
5017 		irq_res.desc[i].start = oes->pktdma_rchan_flow;
5018 		irq_res.desc[i].num = ud->rflow_cnt;
5019 	} else {
5020 		for (j = 0; j < rm_res->sets; j++, i++) {
5021 			irq_res.desc[i].start = rm_res->desc[j].start +
5022 						oes->pktdma_rchan_flow;
5023 			irq_res.desc[i].num = rm_res->desc[j].num;
5024 		}
5025 	}
5026 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
5027 	kfree(irq_res.desc);
5028 	if (ret) {
5029 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
5030 		return ret;
5031 	}
5032 
5033 	return 0;
5034 }
5035 
5036 static int setup_resources(struct udma_dev *ud)
5037 {
5038 	struct device *dev = ud->dev;
5039 	int ch_count, ret;
5040 
5041 	switch (ud->match_data->type) {
5042 	case DMA_TYPE_UDMA:
5043 		ret = udma_setup_resources(ud);
5044 		break;
5045 	case DMA_TYPE_BCDMA:
5046 		ret = bcdma_setup_resources(ud);
5047 		break;
5048 	case DMA_TYPE_PKTDMA:
5049 		ret = pktdma_setup_resources(ud);
5050 		break;
5051 	default:
5052 		return -EINVAL;
5053 	}
5054 
5055 	if (ret)
5056 		return ret;
5057 
5058 	ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
5059 	if (ud->bchan_cnt)
5060 		ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
5061 	ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
5062 	ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5063 	if (!ch_count)
5064 		return -ENODEV;
5065 
5066 	ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5067 				    GFP_KERNEL);
5068 	if (!ud->channels)
5069 		return -ENOMEM;
5070 
5071 	switch (ud->match_data->type) {
5072 	case DMA_TYPE_UDMA:
5073 		dev_info(dev,
5074 			 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5075 			 ch_count,
5076 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5077 						       ud->tchan_cnt),
5078 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5079 						       ud->rchan_cnt),
5080 			 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5081 						       ud->rflow_cnt));
5082 		break;
5083 	case DMA_TYPE_BCDMA:
5084 		dev_info(dev,
5085 			 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5086 			 ch_count,
5087 			 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5088 						       ud->bchan_cnt),
5089 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5090 						       ud->tchan_cnt),
5091 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5092 						       ud->rchan_cnt));
5093 		break;
5094 	case DMA_TYPE_PKTDMA:
5095 		dev_info(dev,
5096 			 "Channels: %d (tchan: %u, rchan: %u)\n",
5097 			 ch_count,
5098 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5099 						       ud->tchan_cnt),
5100 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5101 						       ud->rchan_cnt));
5102 		break;
5103 	default:
5104 		break;
5105 	}
5106 
5107 	return ch_count;
5108 }
5109 
5110 static int udma_setup_rx_flush(struct udma_dev *ud)
5111 {
5112 	struct udma_rx_flush *rx_flush = &ud->rx_flush;
5113 	struct cppi5_desc_hdr_t *tr_desc;
5114 	struct cppi5_tr_type1_t *tr_req;
5115 	struct cppi5_host_desc_t *desc;
5116 	struct device *dev = ud->dev;
5117 	struct udma_hwdesc *hwdesc;
5118 	size_t tr_size;
5119 
5120 	/* Allocate 1K buffer for discarded data on RX channel teardown */
5121 	rx_flush->buffer_size = SZ_1K;
5122 	rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5123 					      GFP_KERNEL);
5124 	if (!rx_flush->buffer_vaddr)
5125 		return -ENOMEM;
5126 
5127 	rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5128 						rx_flush->buffer_size,
5129 						DMA_TO_DEVICE);
5130 	if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5131 		return -ENOMEM;
5132 
5133 	/* Set up descriptor to be used for TR mode */
5134 	hwdesc = &rx_flush->hwdescs[0];
5135 	tr_size = sizeof(struct cppi5_tr_type1_t);
5136 	hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5137 	hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5138 					ud->desc_align);
5139 
5140 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5141 						GFP_KERNEL);
5142 	if (!hwdesc->cppi5_desc_vaddr)
5143 		return -ENOMEM;
5144 
5145 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5146 						  hwdesc->cppi5_desc_size,
5147 						  DMA_TO_DEVICE);
5148 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5149 		return -ENOMEM;
5150 
5151 	/* Start of the TR req records */
5152 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5153 	/* Start address of the TR response array */
5154 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5155 
5156 	tr_desc = hwdesc->cppi5_desc_vaddr;
5157 	cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5158 	cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5159 	cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5160 
5161 	tr_req = hwdesc->tr_req_base;
5162 	cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5163 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5164 	cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5165 
5166 	tr_req->addr = rx_flush->buffer_paddr;
5167 	tr_req->icnt0 = rx_flush->buffer_size;
5168 	tr_req->icnt1 = 1;
5169 
5170 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5171 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5172 
5173 	/* Set up descriptor to be used for packet mode */
5174 	hwdesc = &rx_flush->hwdescs[1];
5175 	hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5176 					CPPI5_INFO0_HDESC_EPIB_SIZE +
5177 					CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5178 					ud->desc_align);
5179 
5180 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5181 						GFP_KERNEL);
5182 	if (!hwdesc->cppi5_desc_vaddr)
5183 		return -ENOMEM;
5184 
5185 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5186 						  hwdesc->cppi5_desc_size,
5187 						  DMA_TO_DEVICE);
5188 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5189 		return -ENOMEM;
5190 
5191 	desc = hwdesc->cppi5_desc_vaddr;
5192 	cppi5_hdesc_init(desc, 0, 0);
5193 	cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5194 	cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5195 
5196 	cppi5_hdesc_attach_buf(desc,
5197 			       rx_flush->buffer_paddr, rx_flush->buffer_size,
5198 			       rx_flush->buffer_paddr, rx_flush->buffer_size);
5199 
5200 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5201 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5202 	return 0;
5203 }
5204 
5205 #ifdef CONFIG_DEBUG_FS
5206 static void udma_dbg_summary_show_chan(struct seq_file *s,
5207 				       struct dma_chan *chan)
5208 {
5209 	struct udma_chan *uc = to_udma_chan(chan);
5210 	struct udma_chan_config *ucc = &uc->config;
5211 
5212 	seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5213 		   chan->dbg_client_name ?: "in-use");
5214 	if (ucc->tr_trigger_type)
5215 		seq_puts(s, " (triggered, ");
5216 	else
5217 		seq_printf(s, " (%s, ",
5218 			   dmaengine_get_direction_text(uc->config.dir));
5219 
5220 	switch (uc->config.dir) {
5221 	case DMA_MEM_TO_MEM:
5222 		if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5223 			seq_printf(s, "bchan%d)\n", uc->bchan->id);
5224 			return;
5225 		}
5226 
5227 		seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5228 			   ucc->src_thread, ucc->dst_thread);
5229 		break;
5230 	case DMA_DEV_TO_MEM:
5231 		seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5232 			   ucc->src_thread, ucc->dst_thread);
5233 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5234 			seq_printf(s, "rflow%d, ", uc->rflow->id);
5235 		break;
5236 	case DMA_MEM_TO_DEV:
5237 		seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5238 			   ucc->src_thread, ucc->dst_thread);
5239 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5240 			seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5241 		break;
5242 	default:
5243 		seq_printf(s, ")\n");
5244 		return;
5245 	}
5246 
5247 	if (ucc->ep_type == PSIL_EP_NATIVE) {
5248 		seq_printf(s, "PSI-L Native");
5249 		if (ucc->metadata_size) {
5250 			seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5251 			if (ucc->psd_size)
5252 				seq_printf(s, " PSDsize:%u", ucc->psd_size);
5253 			seq_printf(s, " ]");
5254 		}
5255 	} else {
5256 		seq_printf(s, "PDMA");
5257 		if (ucc->enable_acc32 || ucc->enable_burst)
5258 			seq_printf(s, "[%s%s ]",
5259 				   ucc->enable_acc32 ? " ACC32" : "",
5260 				   ucc->enable_burst ? " BURST" : "");
5261 	}
5262 
5263 	seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5264 }
5265 
5266 static void udma_dbg_summary_show(struct seq_file *s,
5267 				  struct dma_device *dma_dev)
5268 {
5269 	struct dma_chan *chan;
5270 
5271 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5272 		if (chan->client_count)
5273 			udma_dbg_summary_show_chan(s, chan);
5274 	}
5275 }
5276 #endif /* CONFIG_DEBUG_FS */
5277 
5278 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5279 {
5280 	const struct udma_match_data *match_data = ud->match_data;
5281 	u8 tpl;
5282 
5283 	if (!match_data->enable_memcpy_support)
5284 		return DMAENGINE_ALIGN_8_BYTES;
5285 
5286 	/* Get the highest TPL level the device supports for memcpy */
5287 	if (ud->bchan_cnt)
5288 		tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5289 	else if (ud->tchan_cnt)
5290 		tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5291 	else
5292 		return DMAENGINE_ALIGN_8_BYTES;
5293 
5294 	switch (match_data->burst_size[tpl]) {
5295 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5296 		return DMAENGINE_ALIGN_256_BYTES;
5297 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5298 		return DMAENGINE_ALIGN_128_BYTES;
5299 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5300 	fallthrough;
5301 	default:
5302 		return DMAENGINE_ALIGN_64_BYTES;
5303 	}
5304 }
5305 
5306 #define TI_UDMAC_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5307 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5308 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5309 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5310 				 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5311 
5312 static int udma_probe(struct platform_device *pdev)
5313 {
5314 	struct device_node *navss_node = pdev->dev.parent->of_node;
5315 	const struct soc_device_attribute *soc;
5316 	struct device *dev = &pdev->dev;
5317 	struct udma_dev *ud;
5318 	const struct of_device_id *match;
5319 	int i, ret;
5320 	int ch_count;
5321 
5322 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5323 	if (ret)
5324 		dev_err(dev, "failed to set dma mask stuff\n");
5325 
5326 	ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5327 	if (!ud)
5328 		return -ENOMEM;
5329 
5330 	match = of_match_node(udma_of_match, dev->of_node);
5331 	if (!match) {
5332 		dev_err(dev, "No compatible match found\n");
5333 		return -ENODEV;
5334 	}
5335 	ud->match_data = match->data;
5336 
5337 	ud->soc_data = ud->match_data->soc_data;
5338 	if (!ud->soc_data) {
5339 		soc = soc_device_match(k3_soc_devices);
5340 		if (!soc) {
5341 			dev_err(dev, "No compatible SoC found\n");
5342 			return -ENODEV;
5343 		}
5344 		ud->soc_data = soc->data;
5345 	}
5346 
5347 	ret = udma_get_mmrs(pdev, ud);
5348 	if (ret)
5349 		return ret;
5350 
5351 	ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5352 	if (IS_ERR(ud->tisci_rm.tisci))
5353 		return PTR_ERR(ud->tisci_rm.tisci);
5354 
5355 	ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5356 				   &ud->tisci_rm.tisci_dev_id);
5357 	if (ret) {
5358 		dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5359 		return ret;
5360 	}
5361 	pdev->id = ud->tisci_rm.tisci_dev_id;
5362 
5363 	ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5364 				   &ud->tisci_rm.tisci_navss_dev_id);
5365 	if (ret) {
5366 		dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5367 		return ret;
5368 	}
5369 
5370 	if (ud->match_data->type == DMA_TYPE_UDMA) {
5371 		ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5372 					   &ud->atype);
5373 		if (!ret && ud->atype > 2) {
5374 			dev_err(dev, "Invalid atype: %u\n", ud->atype);
5375 			return -EINVAL;
5376 		}
5377 	} else {
5378 		ret = of_property_read_u32(dev->of_node, "ti,asel",
5379 					   &ud->asel);
5380 		if (!ret && ud->asel > 15) {
5381 			dev_err(dev, "Invalid asel: %u\n", ud->asel);
5382 			return -EINVAL;
5383 		}
5384 	}
5385 
5386 	ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5387 	ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5388 
5389 	if (ud->match_data->type == DMA_TYPE_UDMA) {
5390 		ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5391 	} else {
5392 		struct k3_ringacc_init_data ring_init_data;
5393 
5394 		ring_init_data.tisci = ud->tisci_rm.tisci;
5395 		ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5396 		if (ud->match_data->type == DMA_TYPE_BCDMA) {
5397 			ring_init_data.num_rings = ud->bchan_cnt +
5398 						   ud->tchan_cnt +
5399 						   ud->rchan_cnt;
5400 		} else {
5401 			ring_init_data.num_rings = ud->rflow_cnt +
5402 						   ud->tflow_cnt;
5403 		}
5404 
5405 		ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5406 	}
5407 
5408 	if (IS_ERR(ud->ringacc))
5409 		return PTR_ERR(ud->ringacc);
5410 
5411 	dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5412 					    DOMAIN_BUS_TI_SCI_INTA_MSI);
5413 	if (!dev->msi.domain) {
5414 		return -EPROBE_DEFER;
5415 	}
5416 
5417 	dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5418 	/* cyclic operation is not supported via PKTDMA */
5419 	if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5420 		dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5421 		ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5422 	}
5423 
5424 	ud->ddev.device_config = udma_slave_config;
5425 	ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5426 	ud->ddev.device_issue_pending = udma_issue_pending;
5427 	ud->ddev.device_tx_status = udma_tx_status;
5428 	ud->ddev.device_pause = udma_pause;
5429 	ud->ddev.device_resume = udma_resume;
5430 	ud->ddev.device_terminate_all = udma_terminate_all;
5431 	ud->ddev.device_synchronize = udma_synchronize;
5432 #ifdef CONFIG_DEBUG_FS
5433 	ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5434 #endif
5435 
5436 	switch (ud->match_data->type) {
5437 	case DMA_TYPE_UDMA:
5438 		ud->ddev.device_alloc_chan_resources =
5439 					udma_alloc_chan_resources;
5440 		break;
5441 	case DMA_TYPE_BCDMA:
5442 		ud->ddev.device_alloc_chan_resources =
5443 					bcdma_alloc_chan_resources;
5444 		ud->ddev.device_router_config = bcdma_router_config;
5445 		break;
5446 	case DMA_TYPE_PKTDMA:
5447 		ud->ddev.device_alloc_chan_resources =
5448 					pktdma_alloc_chan_resources;
5449 		break;
5450 	default:
5451 		return -EINVAL;
5452 	}
5453 	ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5454 
5455 	ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5456 	ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5457 	ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5458 	ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5459 	ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5460 				       DESC_METADATA_ENGINE;
5461 	if (ud->match_data->enable_memcpy_support &&
5462 	    !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5463 		dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5464 		ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5465 		ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5466 	}
5467 
5468 	ud->ddev.dev = dev;
5469 	ud->dev = dev;
5470 	ud->psil_base = ud->match_data->psil_base;
5471 
5472 	INIT_LIST_HEAD(&ud->ddev.channels);
5473 	INIT_LIST_HEAD(&ud->desc_to_purge);
5474 
5475 	ch_count = setup_resources(ud);
5476 	if (ch_count <= 0)
5477 		return ch_count;
5478 
5479 	spin_lock_init(&ud->lock);
5480 	INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5481 
5482 	ud->desc_align = 64;
5483 	if (ud->desc_align < dma_get_cache_alignment())
5484 		ud->desc_align = dma_get_cache_alignment();
5485 
5486 	ret = udma_setup_rx_flush(ud);
5487 	if (ret)
5488 		return ret;
5489 
5490 	for (i = 0; i < ud->bchan_cnt; i++) {
5491 		struct udma_bchan *bchan = &ud->bchans[i];
5492 
5493 		bchan->id = i;
5494 		bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5495 	}
5496 
5497 	for (i = 0; i < ud->tchan_cnt; i++) {
5498 		struct udma_tchan *tchan = &ud->tchans[i];
5499 
5500 		tchan->id = i;
5501 		tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5502 	}
5503 
5504 	for (i = 0; i < ud->rchan_cnt; i++) {
5505 		struct udma_rchan *rchan = &ud->rchans[i];
5506 
5507 		rchan->id = i;
5508 		rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5509 	}
5510 
5511 	for (i = 0; i < ud->rflow_cnt; i++) {
5512 		struct udma_rflow *rflow = &ud->rflows[i];
5513 
5514 		rflow->id = i;
5515 	}
5516 
5517 	for (i = 0; i < ch_count; i++) {
5518 		struct udma_chan *uc = &ud->channels[i];
5519 
5520 		uc->ud = ud;
5521 		uc->vc.desc_free = udma_desc_free;
5522 		uc->id = i;
5523 		uc->bchan = NULL;
5524 		uc->tchan = NULL;
5525 		uc->rchan = NULL;
5526 		uc->config.remote_thread_id = -1;
5527 		uc->config.mapped_channel_id = -1;
5528 		uc->config.default_flow_id = -1;
5529 		uc->config.dir = DMA_MEM_TO_MEM;
5530 		uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5531 					  dev_name(dev), i);
5532 
5533 		vchan_init(&uc->vc, &ud->ddev);
5534 		/* Use custom vchan completion handling */
5535 		tasklet_setup(&uc->vc.task, udma_vchan_complete);
5536 		init_completion(&uc->teardown_completed);
5537 		INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5538 	}
5539 
5540 	/* Configure the copy_align to the maximum burst size the device supports */
5541 	ud->ddev.copy_align = udma_get_copy_align(ud);
5542 
5543 	ret = dma_async_device_register(&ud->ddev);
5544 	if (ret) {
5545 		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5546 		return ret;
5547 	}
5548 
5549 	platform_set_drvdata(pdev, ud);
5550 
5551 	ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5552 	if (ret) {
5553 		dev_err(dev, "failed to register of_dma controller\n");
5554 		dma_async_device_unregister(&ud->ddev);
5555 	}
5556 
5557 	return ret;
5558 }
5559 
5560 static int __maybe_unused udma_pm_suspend(struct device *dev)
5561 {
5562 	struct udma_dev *ud = dev_get_drvdata(dev);
5563 	struct dma_device *dma_dev = &ud->ddev;
5564 	struct dma_chan *chan;
5565 	struct udma_chan *uc;
5566 
5567 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5568 		if (chan->client_count) {
5569 			uc = to_udma_chan(chan);
5570 			/* backup the channel configuration */
5571 			memcpy(&uc->backup_config, &uc->config,
5572 			       sizeof(struct udma_chan_config));
5573 			dev_dbg(dev, "Suspending channel %s\n",
5574 				dma_chan_name(chan));
5575 			ud->ddev.device_free_chan_resources(chan);
5576 		}
5577 	}
5578 
5579 	return 0;
5580 }
5581 
5582 static int __maybe_unused udma_pm_resume(struct device *dev)
5583 {
5584 	struct udma_dev *ud = dev_get_drvdata(dev);
5585 	struct dma_device *dma_dev = &ud->ddev;
5586 	struct dma_chan *chan;
5587 	struct udma_chan *uc;
5588 	int ret;
5589 
5590 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5591 		if (chan->client_count) {
5592 			uc = to_udma_chan(chan);
5593 			/* restore the channel configuration */
5594 			memcpy(&uc->config, &uc->backup_config,
5595 			       sizeof(struct udma_chan_config));
5596 			dev_dbg(dev, "Resuming channel %s\n",
5597 				dma_chan_name(chan));
5598 			ret = ud->ddev.device_alloc_chan_resources(chan);
5599 			if (ret)
5600 				return ret;
5601 		}
5602 	}
5603 
5604 	return 0;
5605 }
5606 
5607 static const struct dev_pm_ops udma_pm_ops = {
5608 	SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume)
5609 };
5610 
5611 static struct platform_driver udma_driver = {
5612 	.driver = {
5613 		.name	= "ti-udma",
5614 		.of_match_table = udma_of_match,
5615 		.suppress_bind_attrs = true,
5616 		.pm = &udma_pm_ops,
5617 	},
5618 	.probe		= udma_probe,
5619 };
5620 
5621 module_platform_driver(udma_driver);
5622 MODULE_LICENSE("GPL v2");
5623 
5624 /* Private interfaces to UDMA */
5625 #include "k3-udma-private.c"
5626