xref: /openbmc/linux/drivers/dma/ti/k3-udma.c (revision fc772314)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/workqueue.h>
24 #include <linux/completion.h>
25 #include <linux/soc/ti/k3-ringacc.h>
26 #include <linux/soc/ti/ti_sci_protocol.h>
27 #include <linux/soc/ti/ti_sci_inta_msi.h>
28 #include <linux/dma/ti-cppi5.h>
29 
30 #include "../virt-dma.h"
31 #include "k3-udma.h"
32 #include "k3-psil-priv.h"
33 
34 struct udma_static_tr {
35 	u8 elsize; /* RPSTR0 */
36 	u16 elcnt; /* RPSTR0 */
37 	u16 bstcnt; /* RPSTR1 */
38 };
39 
40 #define K3_UDMA_MAX_RFLOWS		1024
41 #define K3_UDMA_DEFAULT_RING_SIZE	16
42 
43 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
44 #define UDMA_RFLOW_SRCTAG_NONE		0
45 #define UDMA_RFLOW_SRCTAG_CFG_TAG	1
46 #define UDMA_RFLOW_SRCTAG_FLOW_ID	2
47 #define UDMA_RFLOW_SRCTAG_SRC_TAG	4
48 
49 #define UDMA_RFLOW_DSTTAG_NONE		0
50 #define UDMA_RFLOW_DSTTAG_CFG_TAG	1
51 #define UDMA_RFLOW_DSTTAG_FLOW_ID	2
52 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO	4
53 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI	5
54 
55 struct udma_chan;
56 
57 enum udma_mmr {
58 	MMR_GCFG = 0,
59 	MMR_RCHANRT,
60 	MMR_TCHANRT,
61 	MMR_LAST,
62 };
63 
64 static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
65 
66 struct udma_tchan {
67 	void __iomem *reg_rt;
68 
69 	int id;
70 	struct k3_ring *t_ring; /* Transmit ring */
71 	struct k3_ring *tc_ring; /* Transmit Completion ring */
72 };
73 
74 struct udma_rflow {
75 	int id;
76 	struct k3_ring *fd_ring; /* Free Descriptor ring */
77 	struct k3_ring *r_ring; /* Receive ring */
78 };
79 
80 struct udma_rchan {
81 	void __iomem *reg_rt;
82 
83 	int id;
84 };
85 
86 #define UDMA_FLAG_PDMA_ACC32		BIT(0)
87 #define UDMA_FLAG_PDMA_BURST		BIT(1)
88 
89 struct udma_match_data {
90 	u32 psil_base;
91 	bool enable_memcpy_support;
92 	u32 flags;
93 	u32 statictr_z_mask;
94 	u32 rchan_oes_offset;
95 };
96 
97 struct udma_hwdesc {
98 	size_t cppi5_desc_size;
99 	void *cppi5_desc_vaddr;
100 	dma_addr_t cppi5_desc_paddr;
101 
102 	/* TR descriptor internal pointers */
103 	void *tr_req_base;
104 	struct cppi5_tr_resp_t *tr_resp_base;
105 };
106 
107 struct udma_rx_flush {
108 	struct udma_hwdesc hwdescs[2];
109 
110 	size_t buffer_size;
111 	void *buffer_vaddr;
112 	dma_addr_t buffer_paddr;
113 };
114 
115 struct udma_dev {
116 	struct dma_device ddev;
117 	struct device *dev;
118 	void __iomem *mmrs[MMR_LAST];
119 	const struct udma_match_data *match_data;
120 
121 	u8 tpl_levels;
122 	u32 tpl_start_idx[3];
123 
124 	size_t desc_align; /* alignment to use for descriptors */
125 
126 	struct udma_tisci_rm tisci_rm;
127 
128 	struct k3_ringacc *ringacc;
129 
130 	struct work_struct purge_work;
131 	struct list_head desc_to_purge;
132 	spinlock_t lock;
133 
134 	struct udma_rx_flush rx_flush;
135 
136 	int tchan_cnt;
137 	int echan_cnt;
138 	int rchan_cnt;
139 	int rflow_cnt;
140 	unsigned long *tchan_map;
141 	unsigned long *rchan_map;
142 	unsigned long *rflow_gp_map;
143 	unsigned long *rflow_gp_map_allocated;
144 	unsigned long *rflow_in_use;
145 
146 	struct udma_tchan *tchans;
147 	struct udma_rchan *rchans;
148 	struct udma_rflow *rflows;
149 
150 	struct udma_chan *channels;
151 	u32 psil_base;
152 	u32 atype;
153 };
154 
155 struct udma_desc {
156 	struct virt_dma_desc vd;
157 
158 	bool terminated;
159 
160 	enum dma_transfer_direction dir;
161 
162 	struct udma_static_tr static_tr;
163 	u32 residue;
164 
165 	unsigned int sglen;
166 	unsigned int desc_idx; /* Only used for cyclic in packet mode */
167 	unsigned int tr_idx;
168 
169 	u32 metadata_size;
170 	void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
171 
172 	unsigned int hwdesc_count;
173 	struct udma_hwdesc hwdesc[];
174 };
175 
176 enum udma_chan_state {
177 	UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
178 	UDMA_CHAN_IS_ACTIVE, /* Normal operation */
179 	UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
180 };
181 
182 struct udma_tx_drain {
183 	struct delayed_work work;
184 	ktime_t tstamp;
185 	u32 residue;
186 };
187 
188 struct udma_chan_config {
189 	bool pkt_mode; /* TR or packet */
190 	bool needs_epib; /* EPIB is needed for the communication or not */
191 	u32 psd_size; /* size of Protocol Specific Data */
192 	u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
193 	u32 hdesc_size; /* Size of a packet descriptor in packet mode */
194 	bool notdpkt; /* Suppress sending TDC packet */
195 	int remote_thread_id;
196 	u32 atype;
197 	u32 src_thread;
198 	u32 dst_thread;
199 	enum psil_endpoint_type ep_type;
200 	bool enable_acc32;
201 	bool enable_burst;
202 	enum udma_tp_level channel_tpl; /* Channel Throughput Level */
203 
204 	enum dma_transfer_direction dir;
205 };
206 
207 struct udma_chan {
208 	struct virt_dma_chan vc;
209 	struct dma_slave_config	cfg;
210 	struct udma_dev *ud;
211 	struct udma_desc *desc;
212 	struct udma_desc *terminated_desc;
213 	struct udma_static_tr static_tr;
214 	char *name;
215 
216 	struct udma_tchan *tchan;
217 	struct udma_rchan *rchan;
218 	struct udma_rflow *rflow;
219 
220 	bool psil_paired;
221 
222 	int irq_num_ring;
223 	int irq_num_udma;
224 
225 	bool cyclic;
226 	bool paused;
227 
228 	enum udma_chan_state state;
229 	struct completion teardown_completed;
230 
231 	struct udma_tx_drain tx_drain;
232 
233 	u32 bcnt; /* number of bytes completed since the start of the channel */
234 
235 	/* Channel configuration parameters */
236 	struct udma_chan_config config;
237 
238 	/* dmapool for packet mode descriptors */
239 	bool use_dma_pool;
240 	struct dma_pool *hdesc_pool;
241 
242 	u32 id;
243 };
244 
245 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
246 {
247 	return container_of(d, struct udma_dev, ddev);
248 }
249 
250 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
251 {
252 	return container_of(c, struct udma_chan, vc.chan);
253 }
254 
255 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
256 {
257 	return container_of(t, struct udma_desc, vd.tx);
258 }
259 
260 /* Generic register access functions */
261 static inline u32 udma_read(void __iomem *base, int reg)
262 {
263 	return readl(base + reg);
264 }
265 
266 static inline void udma_write(void __iomem *base, int reg, u32 val)
267 {
268 	writel(val, base + reg);
269 }
270 
271 static inline void udma_update_bits(void __iomem *base, int reg,
272 				    u32 mask, u32 val)
273 {
274 	u32 tmp, orig;
275 
276 	orig = readl(base + reg);
277 	tmp = orig & ~mask;
278 	tmp |= (val & mask);
279 
280 	if (tmp != orig)
281 		writel(tmp, base + reg);
282 }
283 
284 /* TCHANRT */
285 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
286 {
287 	if (!uc->tchan)
288 		return 0;
289 	return udma_read(uc->tchan->reg_rt, reg);
290 }
291 
292 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
293 {
294 	if (!uc->tchan)
295 		return;
296 	udma_write(uc->tchan->reg_rt, reg, val);
297 }
298 
299 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
300 					    u32 mask, u32 val)
301 {
302 	if (!uc->tchan)
303 		return;
304 	udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
305 }
306 
307 /* RCHANRT */
308 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
309 {
310 	if (!uc->rchan)
311 		return 0;
312 	return udma_read(uc->rchan->reg_rt, reg);
313 }
314 
315 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
316 {
317 	if (!uc->rchan)
318 		return;
319 	udma_write(uc->rchan->reg_rt, reg, val);
320 }
321 
322 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
323 					    u32 mask, u32 val)
324 {
325 	if (!uc->rchan)
326 		return;
327 	udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
328 }
329 
330 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
331 {
332 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
333 
334 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
335 	return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
336 					      tisci_rm->tisci_navss_dev_id,
337 					      src_thread, dst_thread);
338 }
339 
340 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
341 			     u32 dst_thread)
342 {
343 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
344 
345 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
346 	return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
347 						tisci_rm->tisci_navss_dev_id,
348 						src_thread, dst_thread);
349 }
350 
351 static void udma_reset_uchan(struct udma_chan *uc)
352 {
353 	memset(&uc->config, 0, sizeof(uc->config));
354 	uc->config.remote_thread_id = -1;
355 	uc->state = UDMA_CHAN_IS_IDLE;
356 }
357 
358 static void udma_dump_chan_stdata(struct udma_chan *uc)
359 {
360 	struct device *dev = uc->ud->dev;
361 	u32 offset;
362 	int i;
363 
364 	if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
365 		dev_dbg(dev, "TCHAN State data:\n");
366 		for (i = 0; i < 32; i++) {
367 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
368 			dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
369 				udma_tchanrt_read(uc, offset));
370 		}
371 	}
372 
373 	if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
374 		dev_dbg(dev, "RCHAN State data:\n");
375 		for (i = 0; i < 32; i++) {
376 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
377 			dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
378 				udma_rchanrt_read(uc, offset));
379 		}
380 	}
381 }
382 
383 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
384 						    int idx)
385 {
386 	return d->hwdesc[idx].cppi5_desc_paddr;
387 }
388 
389 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
390 {
391 	return d->hwdesc[idx].cppi5_desc_vaddr;
392 }
393 
394 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
395 						   dma_addr_t paddr)
396 {
397 	struct udma_desc *d = uc->terminated_desc;
398 
399 	if (d) {
400 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
401 								   d->desc_idx);
402 
403 		if (desc_paddr != paddr)
404 			d = NULL;
405 	}
406 
407 	if (!d) {
408 		d = uc->desc;
409 		if (d) {
410 			dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
411 								d->desc_idx);
412 
413 			if (desc_paddr != paddr)
414 				d = NULL;
415 		}
416 	}
417 
418 	return d;
419 }
420 
421 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
422 {
423 	if (uc->use_dma_pool) {
424 		int i;
425 
426 		for (i = 0; i < d->hwdesc_count; i++) {
427 			if (!d->hwdesc[i].cppi5_desc_vaddr)
428 				continue;
429 
430 			dma_pool_free(uc->hdesc_pool,
431 				      d->hwdesc[i].cppi5_desc_vaddr,
432 				      d->hwdesc[i].cppi5_desc_paddr);
433 
434 			d->hwdesc[i].cppi5_desc_vaddr = NULL;
435 		}
436 	} else if (d->hwdesc[0].cppi5_desc_vaddr) {
437 		struct udma_dev *ud = uc->ud;
438 
439 		dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
440 				  d->hwdesc[0].cppi5_desc_vaddr,
441 				  d->hwdesc[0].cppi5_desc_paddr);
442 
443 		d->hwdesc[0].cppi5_desc_vaddr = NULL;
444 	}
445 }
446 
447 static void udma_purge_desc_work(struct work_struct *work)
448 {
449 	struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
450 	struct virt_dma_desc *vd, *_vd;
451 	unsigned long flags;
452 	LIST_HEAD(head);
453 
454 	spin_lock_irqsave(&ud->lock, flags);
455 	list_splice_tail_init(&ud->desc_to_purge, &head);
456 	spin_unlock_irqrestore(&ud->lock, flags);
457 
458 	list_for_each_entry_safe(vd, _vd, &head, node) {
459 		struct udma_chan *uc = to_udma_chan(vd->tx.chan);
460 		struct udma_desc *d = to_udma_desc(&vd->tx);
461 
462 		udma_free_hwdesc(uc, d);
463 		list_del(&vd->node);
464 		kfree(d);
465 	}
466 
467 	/* If more to purge, schedule the work again */
468 	if (!list_empty(&ud->desc_to_purge))
469 		schedule_work(&ud->purge_work);
470 }
471 
472 static void udma_desc_free(struct virt_dma_desc *vd)
473 {
474 	struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
475 	struct udma_chan *uc = to_udma_chan(vd->tx.chan);
476 	struct udma_desc *d = to_udma_desc(&vd->tx);
477 	unsigned long flags;
478 
479 	if (uc->terminated_desc == d)
480 		uc->terminated_desc = NULL;
481 
482 	if (uc->use_dma_pool) {
483 		udma_free_hwdesc(uc, d);
484 		kfree(d);
485 		return;
486 	}
487 
488 	spin_lock_irqsave(&ud->lock, flags);
489 	list_add_tail(&vd->node, &ud->desc_to_purge);
490 	spin_unlock_irqrestore(&ud->lock, flags);
491 
492 	schedule_work(&ud->purge_work);
493 }
494 
495 static bool udma_is_chan_running(struct udma_chan *uc)
496 {
497 	u32 trt_ctl = 0;
498 	u32 rrt_ctl = 0;
499 
500 	if (uc->tchan)
501 		trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
502 	if (uc->rchan)
503 		rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
504 
505 	if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
506 		return true;
507 
508 	return false;
509 }
510 
511 static bool udma_is_chan_paused(struct udma_chan *uc)
512 {
513 	u32 val, pause_mask;
514 
515 	switch (uc->config.dir) {
516 	case DMA_DEV_TO_MEM:
517 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
518 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
519 		break;
520 	case DMA_MEM_TO_DEV:
521 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
522 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
523 		break;
524 	case DMA_MEM_TO_MEM:
525 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
526 		pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
527 		break;
528 	default:
529 		return false;
530 	}
531 
532 	if (val & pause_mask)
533 		return true;
534 
535 	return false;
536 }
537 
538 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
539 {
540 	return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
541 }
542 
543 static int udma_push_to_ring(struct udma_chan *uc, int idx)
544 {
545 	struct udma_desc *d = uc->desc;
546 	struct k3_ring *ring = NULL;
547 	dma_addr_t paddr;
548 
549 	switch (uc->config.dir) {
550 	case DMA_DEV_TO_MEM:
551 		ring = uc->rflow->fd_ring;
552 		break;
553 	case DMA_MEM_TO_DEV:
554 	case DMA_MEM_TO_MEM:
555 		ring = uc->tchan->t_ring;
556 		break;
557 	default:
558 		return -EINVAL;
559 	}
560 
561 	/* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
562 	if (idx == -1) {
563 		paddr = udma_get_rx_flush_hwdesc_paddr(uc);
564 	} else {
565 		paddr = udma_curr_cppi5_desc_paddr(d, idx);
566 
567 		wmb(); /* Ensure that writes are not moved over this point */
568 	}
569 
570 	return k3_ringacc_ring_push(ring, &paddr);
571 }
572 
573 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
574 {
575 	if (uc->config.dir != DMA_DEV_TO_MEM)
576 		return false;
577 
578 	if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
579 		return true;
580 
581 	return false;
582 }
583 
584 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
585 {
586 	struct k3_ring *ring = NULL;
587 	int ret;
588 
589 	switch (uc->config.dir) {
590 	case DMA_DEV_TO_MEM:
591 		ring = uc->rflow->r_ring;
592 		break;
593 	case DMA_MEM_TO_DEV:
594 	case DMA_MEM_TO_MEM:
595 		ring = uc->tchan->tc_ring;
596 		break;
597 	default:
598 		return -ENOENT;
599 	}
600 
601 	ret = k3_ringacc_ring_pop(ring, addr);
602 	if (ret)
603 		return ret;
604 
605 	rmb(); /* Ensure that reads are not moved before this point */
606 
607 	/* Teardown completion */
608 	if (cppi5_desc_is_tdcm(*addr))
609 		return 0;
610 
611 	/* Check for flush descriptor */
612 	if (udma_desc_is_rx_flush(uc, *addr))
613 		return -ENOENT;
614 
615 	return 0;
616 }
617 
618 static void udma_reset_rings(struct udma_chan *uc)
619 {
620 	struct k3_ring *ring1 = NULL;
621 	struct k3_ring *ring2 = NULL;
622 
623 	switch (uc->config.dir) {
624 	case DMA_DEV_TO_MEM:
625 		if (uc->rchan) {
626 			ring1 = uc->rflow->fd_ring;
627 			ring2 = uc->rflow->r_ring;
628 		}
629 		break;
630 	case DMA_MEM_TO_DEV:
631 	case DMA_MEM_TO_MEM:
632 		if (uc->tchan) {
633 			ring1 = uc->tchan->t_ring;
634 			ring2 = uc->tchan->tc_ring;
635 		}
636 		break;
637 	default:
638 		break;
639 	}
640 
641 	if (ring1)
642 		k3_ringacc_ring_reset_dma(ring1,
643 					  k3_ringacc_ring_get_occ(ring1));
644 	if (ring2)
645 		k3_ringacc_ring_reset(ring2);
646 
647 	/* make sure we are not leaking memory by stalled descriptor */
648 	if (uc->terminated_desc) {
649 		udma_desc_free(&uc->terminated_desc->vd);
650 		uc->terminated_desc = NULL;
651 	}
652 }
653 
654 static void udma_reset_counters(struct udma_chan *uc)
655 {
656 	u32 val;
657 
658 	if (uc->tchan) {
659 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
660 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
661 
662 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
663 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
664 
665 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
666 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
667 
668 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
669 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
670 	}
671 
672 	if (uc->rchan) {
673 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
674 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
675 
676 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
677 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
678 
679 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
680 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
681 
682 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
683 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
684 	}
685 
686 	uc->bcnt = 0;
687 }
688 
689 static int udma_reset_chan(struct udma_chan *uc, bool hard)
690 {
691 	switch (uc->config.dir) {
692 	case DMA_DEV_TO_MEM:
693 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
694 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
695 		break;
696 	case DMA_MEM_TO_DEV:
697 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
698 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
699 		break;
700 	case DMA_MEM_TO_MEM:
701 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
702 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
703 		break;
704 	default:
705 		return -EINVAL;
706 	}
707 
708 	/* Reset all counters */
709 	udma_reset_counters(uc);
710 
711 	/* Hard reset: re-initialize the channel to reset */
712 	if (hard) {
713 		struct udma_chan_config ucc_backup;
714 		int ret;
715 
716 		memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
717 		uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
718 
719 		/* restore the channel configuration */
720 		memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
721 		ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
722 		if (ret)
723 			return ret;
724 
725 		/*
726 		 * Setting forced teardown after forced reset helps recovering
727 		 * the rchan.
728 		 */
729 		if (uc->config.dir == DMA_DEV_TO_MEM)
730 			udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
731 					   UDMA_CHAN_RT_CTL_EN |
732 					   UDMA_CHAN_RT_CTL_TDOWN |
733 					   UDMA_CHAN_RT_CTL_FTDOWN);
734 	}
735 	uc->state = UDMA_CHAN_IS_IDLE;
736 
737 	return 0;
738 }
739 
740 static void udma_start_desc(struct udma_chan *uc)
741 {
742 	struct udma_chan_config *ucc = &uc->config;
743 
744 	if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
745 		int i;
746 
747 		/* Push all descriptors to ring for packet mode cyclic or RX */
748 		for (i = 0; i < uc->desc->sglen; i++)
749 			udma_push_to_ring(uc, i);
750 	} else {
751 		udma_push_to_ring(uc, 0);
752 	}
753 }
754 
755 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
756 {
757 	/* Only PDMAs have staticTR */
758 	if (uc->config.ep_type == PSIL_EP_NATIVE)
759 		return false;
760 
761 	/* Check if the staticTR configuration has changed for TX */
762 	if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
763 		return true;
764 
765 	return false;
766 }
767 
768 static int udma_start(struct udma_chan *uc)
769 {
770 	struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
771 
772 	if (!vd) {
773 		uc->desc = NULL;
774 		return -ENOENT;
775 	}
776 
777 	list_del(&vd->node);
778 
779 	uc->desc = to_udma_desc(&vd->tx);
780 
781 	/* Channel is already running and does not need reconfiguration */
782 	if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
783 		udma_start_desc(uc);
784 		goto out;
785 	}
786 
787 	/* Make sure that we clear the teardown bit, if it is set */
788 	udma_reset_chan(uc, false);
789 
790 	/* Push descriptors before we start the channel */
791 	udma_start_desc(uc);
792 
793 	switch (uc->desc->dir) {
794 	case DMA_DEV_TO_MEM:
795 		/* Config remote TR */
796 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
797 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
798 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
799 			const struct udma_match_data *match_data =
800 							uc->ud->match_data;
801 
802 			if (uc->config.enable_acc32)
803 				val |= PDMA_STATIC_TR_XY_ACC32;
804 			if (uc->config.enable_burst)
805 				val |= PDMA_STATIC_TR_XY_BURST;
806 
807 			udma_rchanrt_write(uc,
808 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
809 					   val);
810 
811 			udma_rchanrt_write(uc,
812 				UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
813 				PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
814 						 match_data->statictr_z_mask));
815 
816 			/* save the current staticTR configuration */
817 			memcpy(&uc->static_tr, &uc->desc->static_tr,
818 			       sizeof(uc->static_tr));
819 		}
820 
821 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
822 				   UDMA_CHAN_RT_CTL_EN);
823 
824 		/* Enable remote */
825 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
826 				   UDMA_PEER_RT_EN_ENABLE);
827 
828 		break;
829 	case DMA_MEM_TO_DEV:
830 		/* Config remote TR */
831 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
832 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
833 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
834 
835 			if (uc->config.enable_acc32)
836 				val |= PDMA_STATIC_TR_XY_ACC32;
837 			if (uc->config.enable_burst)
838 				val |= PDMA_STATIC_TR_XY_BURST;
839 
840 			udma_tchanrt_write(uc,
841 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
842 					   val);
843 
844 			/* save the current staticTR configuration */
845 			memcpy(&uc->static_tr, &uc->desc->static_tr,
846 			       sizeof(uc->static_tr));
847 		}
848 
849 		/* Enable remote */
850 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
851 				   UDMA_PEER_RT_EN_ENABLE);
852 
853 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
854 				   UDMA_CHAN_RT_CTL_EN);
855 
856 		break;
857 	case DMA_MEM_TO_MEM:
858 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
859 				   UDMA_CHAN_RT_CTL_EN);
860 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
861 				   UDMA_CHAN_RT_CTL_EN);
862 
863 		break;
864 	default:
865 		return -EINVAL;
866 	}
867 
868 	uc->state = UDMA_CHAN_IS_ACTIVE;
869 out:
870 
871 	return 0;
872 }
873 
874 static int udma_stop(struct udma_chan *uc)
875 {
876 	enum udma_chan_state old_state = uc->state;
877 
878 	uc->state = UDMA_CHAN_IS_TERMINATING;
879 	reinit_completion(&uc->teardown_completed);
880 
881 	switch (uc->config.dir) {
882 	case DMA_DEV_TO_MEM:
883 		if (!uc->cyclic && !uc->desc)
884 			udma_push_to_ring(uc, -1);
885 
886 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
887 				   UDMA_PEER_RT_EN_ENABLE |
888 				   UDMA_PEER_RT_EN_TEARDOWN);
889 		break;
890 	case DMA_MEM_TO_DEV:
891 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
892 				   UDMA_PEER_RT_EN_ENABLE |
893 				   UDMA_PEER_RT_EN_FLUSH);
894 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
895 				   UDMA_CHAN_RT_CTL_EN |
896 				   UDMA_CHAN_RT_CTL_TDOWN);
897 		break;
898 	case DMA_MEM_TO_MEM:
899 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
900 				   UDMA_CHAN_RT_CTL_EN |
901 				   UDMA_CHAN_RT_CTL_TDOWN);
902 		break;
903 	default:
904 		uc->state = old_state;
905 		complete_all(&uc->teardown_completed);
906 		return -EINVAL;
907 	}
908 
909 	return 0;
910 }
911 
912 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
913 {
914 	struct udma_desc *d = uc->desc;
915 	struct cppi5_host_desc_t *h_desc;
916 
917 	h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
918 	cppi5_hdesc_reset_to_original(h_desc);
919 	udma_push_to_ring(uc, d->desc_idx);
920 	d->desc_idx = (d->desc_idx + 1) % d->sglen;
921 }
922 
923 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
924 {
925 	struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
926 
927 	memcpy(d->metadata, h_desc->epib, d->metadata_size);
928 }
929 
930 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
931 {
932 	u32 peer_bcnt, bcnt;
933 
934 	/* Only TX towards PDMA is affected */
935 	if (uc->config.ep_type == PSIL_EP_NATIVE ||
936 	    uc->config.dir != DMA_MEM_TO_DEV)
937 		return true;
938 
939 	peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
940 	bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
941 
942 	/* Transfer is incomplete, store current residue and time stamp */
943 	if (peer_bcnt < bcnt) {
944 		uc->tx_drain.residue = bcnt - peer_bcnt;
945 		uc->tx_drain.tstamp = ktime_get();
946 		return false;
947 	}
948 
949 	return true;
950 }
951 
952 static void udma_check_tx_completion(struct work_struct *work)
953 {
954 	struct udma_chan *uc = container_of(work, typeof(*uc),
955 					    tx_drain.work.work);
956 	bool desc_done = true;
957 	u32 residue_diff;
958 	ktime_t time_diff;
959 	unsigned long delay;
960 
961 	while (1) {
962 		if (uc->desc) {
963 			/* Get previous residue and time stamp */
964 			residue_diff = uc->tx_drain.residue;
965 			time_diff = uc->tx_drain.tstamp;
966 			/*
967 			 * Get current residue and time stamp or see if
968 			 * transfer is complete
969 			 */
970 			desc_done = udma_is_desc_really_done(uc, uc->desc);
971 		}
972 
973 		if (!desc_done) {
974 			/*
975 			 * Find the time delta and residue delta w.r.t
976 			 * previous poll
977 			 */
978 			time_diff = ktime_sub(uc->tx_drain.tstamp,
979 					      time_diff) + 1;
980 			residue_diff -= uc->tx_drain.residue;
981 			if (residue_diff) {
982 				/*
983 				 * Try to guess when we should check
984 				 * next time by calculating rate at
985 				 * which data is being drained at the
986 				 * peer device
987 				 */
988 				delay = (time_diff / residue_diff) *
989 					uc->tx_drain.residue;
990 			} else {
991 				/* No progress, check again in 1 second  */
992 				schedule_delayed_work(&uc->tx_drain.work, HZ);
993 				break;
994 			}
995 
996 			usleep_range(ktime_to_us(delay),
997 				     ktime_to_us(delay) + 10);
998 			continue;
999 		}
1000 
1001 		if (uc->desc) {
1002 			struct udma_desc *d = uc->desc;
1003 
1004 			uc->bcnt += d->residue;
1005 			udma_start(uc);
1006 			vchan_cookie_complete(&d->vd);
1007 			break;
1008 		}
1009 
1010 		break;
1011 	}
1012 }
1013 
1014 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1015 {
1016 	struct udma_chan *uc = data;
1017 	struct udma_desc *d;
1018 	unsigned long flags;
1019 	dma_addr_t paddr = 0;
1020 
1021 	if (udma_pop_from_ring(uc, &paddr) || !paddr)
1022 		return IRQ_HANDLED;
1023 
1024 	spin_lock_irqsave(&uc->vc.lock, flags);
1025 
1026 	/* Teardown completion message */
1027 	if (cppi5_desc_is_tdcm(paddr)) {
1028 		complete_all(&uc->teardown_completed);
1029 
1030 		if (uc->terminated_desc) {
1031 			udma_desc_free(&uc->terminated_desc->vd);
1032 			uc->terminated_desc = NULL;
1033 		}
1034 
1035 		if (!uc->desc)
1036 			udma_start(uc);
1037 
1038 		goto out;
1039 	}
1040 
1041 	d = udma_udma_desc_from_paddr(uc, paddr);
1042 
1043 	if (d) {
1044 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1045 								   d->desc_idx);
1046 		if (desc_paddr != paddr) {
1047 			dev_err(uc->ud->dev, "not matching descriptors!\n");
1048 			goto out;
1049 		}
1050 
1051 		if (d == uc->desc) {
1052 			/* active descriptor */
1053 			if (uc->cyclic) {
1054 				udma_cyclic_packet_elapsed(uc);
1055 				vchan_cyclic_callback(&d->vd);
1056 			} else {
1057 				if (udma_is_desc_really_done(uc, d)) {
1058 					uc->bcnt += d->residue;
1059 					udma_start(uc);
1060 					vchan_cookie_complete(&d->vd);
1061 				} else {
1062 					schedule_delayed_work(&uc->tx_drain.work,
1063 							      0);
1064 				}
1065 			}
1066 		} else {
1067 			/*
1068 			 * terminated descriptor, mark the descriptor as
1069 			 * completed to update the channel's cookie marker
1070 			 */
1071 			dma_cookie_complete(&d->vd.tx);
1072 		}
1073 	}
1074 out:
1075 	spin_unlock_irqrestore(&uc->vc.lock, flags);
1076 
1077 	return IRQ_HANDLED;
1078 }
1079 
1080 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1081 {
1082 	struct udma_chan *uc = data;
1083 	struct udma_desc *d;
1084 	unsigned long flags;
1085 
1086 	spin_lock_irqsave(&uc->vc.lock, flags);
1087 	d = uc->desc;
1088 	if (d) {
1089 		d->tr_idx = (d->tr_idx + 1) % d->sglen;
1090 
1091 		if (uc->cyclic) {
1092 			vchan_cyclic_callback(&d->vd);
1093 		} else {
1094 			/* TODO: figure out the real amount of data */
1095 			uc->bcnt += d->residue;
1096 			udma_start(uc);
1097 			vchan_cookie_complete(&d->vd);
1098 		}
1099 	}
1100 
1101 	spin_unlock_irqrestore(&uc->vc.lock, flags);
1102 
1103 	return IRQ_HANDLED;
1104 }
1105 
1106 /**
1107  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1108  * @ud: UDMA device
1109  * @from: Start the search from this flow id number
1110  * @cnt: Number of consecutive flow ids to allocate
1111  *
1112  * Allocate range of RX flow ids for future use, those flows can be requested
1113  * only using explicit flow id number. if @from is set to -1 it will try to find
1114  * first free range. if @from is positive value it will force allocation only
1115  * of the specified range of flows.
1116  *
1117  * Returns -ENOMEM if can't find free range.
1118  * -EEXIST if requested range is busy.
1119  * -EINVAL if wrong input values passed.
1120  * Returns flow id on success.
1121  */
1122 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1123 {
1124 	int start, tmp_from;
1125 	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1126 
1127 	tmp_from = from;
1128 	if (tmp_from < 0)
1129 		tmp_from = ud->rchan_cnt;
1130 	/* default flows can't be allocated and accessible only by id */
1131 	if (tmp_from < ud->rchan_cnt)
1132 		return -EINVAL;
1133 
1134 	if (tmp_from + cnt > ud->rflow_cnt)
1135 		return -EINVAL;
1136 
1137 	bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1138 		  ud->rflow_cnt);
1139 
1140 	start = bitmap_find_next_zero_area(tmp,
1141 					   ud->rflow_cnt,
1142 					   tmp_from, cnt, 0);
1143 	if (start >= ud->rflow_cnt)
1144 		return -ENOMEM;
1145 
1146 	if (from >= 0 && start != from)
1147 		return -EEXIST;
1148 
1149 	bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1150 	return start;
1151 }
1152 
1153 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1154 {
1155 	if (from < ud->rchan_cnt)
1156 		return -EINVAL;
1157 	if (from + cnt > ud->rflow_cnt)
1158 		return -EINVAL;
1159 
1160 	bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1161 	return 0;
1162 }
1163 
1164 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1165 {
1166 	/*
1167 	 * Attempt to request rflow by ID can be made for any rflow
1168 	 * if not in use with assumption that caller knows what's doing.
1169 	 * TI-SCI FW will perform additional permission check ant way, it's
1170 	 * safe
1171 	 */
1172 
1173 	if (id < 0 || id >= ud->rflow_cnt)
1174 		return ERR_PTR(-ENOENT);
1175 
1176 	if (test_bit(id, ud->rflow_in_use))
1177 		return ERR_PTR(-ENOENT);
1178 
1179 	/* GP rflow has to be allocated first */
1180 	if (!test_bit(id, ud->rflow_gp_map) &&
1181 	    !test_bit(id, ud->rflow_gp_map_allocated))
1182 		return ERR_PTR(-EINVAL);
1183 
1184 	dev_dbg(ud->dev, "get rflow%d\n", id);
1185 	set_bit(id, ud->rflow_in_use);
1186 	return &ud->rflows[id];
1187 }
1188 
1189 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1190 {
1191 	if (!test_bit(rflow->id, ud->rflow_in_use)) {
1192 		dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1193 		return;
1194 	}
1195 
1196 	dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1197 	clear_bit(rflow->id, ud->rflow_in_use);
1198 }
1199 
1200 #define UDMA_RESERVE_RESOURCE(res)					\
1201 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,	\
1202 					       enum udma_tp_level tpl,	\
1203 					       int id)			\
1204 {									\
1205 	if (id >= 0) {							\
1206 		if (test_bit(id, ud->res##_map)) {			\
1207 			dev_err(ud->dev, "res##%d is in use\n", id);	\
1208 			return ERR_PTR(-ENOENT);			\
1209 		}							\
1210 	} else {							\
1211 		int start;						\
1212 									\
1213 		if (tpl >= ud->tpl_levels)				\
1214 			tpl = ud->tpl_levels - 1;			\
1215 									\
1216 		start = ud->tpl_start_idx[tpl];				\
1217 									\
1218 		id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,	\
1219 					start);				\
1220 		if (id == ud->res##_cnt) {				\
1221 			return ERR_PTR(-ENOENT);			\
1222 		}							\
1223 	}								\
1224 									\
1225 	set_bit(id, ud->res##_map);					\
1226 	return &ud->res##s[id];						\
1227 }
1228 
1229 UDMA_RESERVE_RESOURCE(tchan);
1230 UDMA_RESERVE_RESOURCE(rchan);
1231 
1232 static int udma_get_tchan(struct udma_chan *uc)
1233 {
1234 	struct udma_dev *ud = uc->ud;
1235 
1236 	if (uc->tchan) {
1237 		dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1238 			uc->id, uc->tchan->id);
1239 		return 0;
1240 	}
1241 
1242 	uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
1243 
1244 	return PTR_ERR_OR_ZERO(uc->tchan);
1245 }
1246 
1247 static int udma_get_rchan(struct udma_chan *uc)
1248 {
1249 	struct udma_dev *ud = uc->ud;
1250 
1251 	if (uc->rchan) {
1252 		dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1253 			uc->id, uc->rchan->id);
1254 		return 0;
1255 	}
1256 
1257 	uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
1258 
1259 	return PTR_ERR_OR_ZERO(uc->rchan);
1260 }
1261 
1262 static int udma_get_chan_pair(struct udma_chan *uc)
1263 {
1264 	struct udma_dev *ud = uc->ud;
1265 	int chan_id, end;
1266 
1267 	if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1268 		dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1269 			 uc->id, uc->tchan->id);
1270 		return 0;
1271 	}
1272 
1273 	if (uc->tchan) {
1274 		dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1275 			uc->id, uc->tchan->id);
1276 		return -EBUSY;
1277 	} else if (uc->rchan) {
1278 		dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1279 			uc->id, uc->rchan->id);
1280 		return -EBUSY;
1281 	}
1282 
1283 	/* Can be optimized, but let's have it like this for now */
1284 	end = min(ud->tchan_cnt, ud->rchan_cnt);
1285 	/* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
1286 	chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
1287 	for (; chan_id < end; chan_id++) {
1288 		if (!test_bit(chan_id, ud->tchan_map) &&
1289 		    !test_bit(chan_id, ud->rchan_map))
1290 			break;
1291 	}
1292 
1293 	if (chan_id == end)
1294 		return -ENOENT;
1295 
1296 	set_bit(chan_id, ud->tchan_map);
1297 	set_bit(chan_id, ud->rchan_map);
1298 	uc->tchan = &ud->tchans[chan_id];
1299 	uc->rchan = &ud->rchans[chan_id];
1300 
1301 	return 0;
1302 }
1303 
1304 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1305 {
1306 	struct udma_dev *ud = uc->ud;
1307 
1308 	if (!uc->rchan) {
1309 		dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1310 		return -EINVAL;
1311 	}
1312 
1313 	if (uc->rflow) {
1314 		dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1315 			uc->id, uc->rflow->id);
1316 		return 0;
1317 	}
1318 
1319 	uc->rflow = __udma_get_rflow(ud, flow_id);
1320 
1321 	return PTR_ERR_OR_ZERO(uc->rflow);
1322 }
1323 
1324 static void udma_put_rchan(struct udma_chan *uc)
1325 {
1326 	struct udma_dev *ud = uc->ud;
1327 
1328 	if (uc->rchan) {
1329 		dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1330 			uc->rchan->id);
1331 		clear_bit(uc->rchan->id, ud->rchan_map);
1332 		uc->rchan = NULL;
1333 	}
1334 }
1335 
1336 static void udma_put_tchan(struct udma_chan *uc)
1337 {
1338 	struct udma_dev *ud = uc->ud;
1339 
1340 	if (uc->tchan) {
1341 		dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1342 			uc->tchan->id);
1343 		clear_bit(uc->tchan->id, ud->tchan_map);
1344 		uc->tchan = NULL;
1345 	}
1346 }
1347 
1348 static void udma_put_rflow(struct udma_chan *uc)
1349 {
1350 	struct udma_dev *ud = uc->ud;
1351 
1352 	if (uc->rflow) {
1353 		dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1354 			uc->rflow->id);
1355 		__udma_put_rflow(ud, uc->rflow);
1356 		uc->rflow = NULL;
1357 	}
1358 }
1359 
1360 static void udma_free_tx_resources(struct udma_chan *uc)
1361 {
1362 	if (!uc->tchan)
1363 		return;
1364 
1365 	k3_ringacc_ring_free(uc->tchan->t_ring);
1366 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1367 	uc->tchan->t_ring = NULL;
1368 	uc->tchan->tc_ring = NULL;
1369 
1370 	udma_put_tchan(uc);
1371 }
1372 
1373 static int udma_alloc_tx_resources(struct udma_chan *uc)
1374 {
1375 	struct k3_ring_cfg ring_cfg;
1376 	struct udma_dev *ud = uc->ud;
1377 	int ret;
1378 
1379 	ret = udma_get_tchan(uc);
1380 	if (ret)
1381 		return ret;
1382 
1383 	ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
1384 					    &uc->tchan->t_ring,
1385 					    &uc->tchan->tc_ring);
1386 	if (ret) {
1387 		ret = -EBUSY;
1388 		goto err_ring;
1389 	}
1390 
1391 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1392 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1393 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1394 	ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1395 
1396 	ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1397 	ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1398 
1399 	if (ret)
1400 		goto err_ringcfg;
1401 
1402 	return 0;
1403 
1404 err_ringcfg:
1405 	k3_ringacc_ring_free(uc->tchan->tc_ring);
1406 	uc->tchan->tc_ring = NULL;
1407 	k3_ringacc_ring_free(uc->tchan->t_ring);
1408 	uc->tchan->t_ring = NULL;
1409 err_ring:
1410 	udma_put_tchan(uc);
1411 
1412 	return ret;
1413 }
1414 
1415 static void udma_free_rx_resources(struct udma_chan *uc)
1416 {
1417 	if (!uc->rchan)
1418 		return;
1419 
1420 	if (uc->rflow) {
1421 		struct udma_rflow *rflow = uc->rflow;
1422 
1423 		k3_ringacc_ring_free(rflow->fd_ring);
1424 		k3_ringacc_ring_free(rflow->r_ring);
1425 		rflow->fd_ring = NULL;
1426 		rflow->r_ring = NULL;
1427 
1428 		udma_put_rflow(uc);
1429 	}
1430 
1431 	udma_put_rchan(uc);
1432 }
1433 
1434 static int udma_alloc_rx_resources(struct udma_chan *uc)
1435 {
1436 	struct udma_dev *ud = uc->ud;
1437 	struct k3_ring_cfg ring_cfg;
1438 	struct udma_rflow *rflow;
1439 	int fd_ring_id;
1440 	int ret;
1441 
1442 	ret = udma_get_rchan(uc);
1443 	if (ret)
1444 		return ret;
1445 
1446 	/* For MEM_TO_MEM we don't need rflow or rings */
1447 	if (uc->config.dir == DMA_MEM_TO_MEM)
1448 		return 0;
1449 
1450 	ret = udma_get_rflow(uc, uc->rchan->id);
1451 	if (ret) {
1452 		ret = -EBUSY;
1453 		goto err_rflow;
1454 	}
1455 
1456 	rflow = uc->rflow;
1457 	fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1458 	ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1459 					    &rflow->fd_ring, &rflow->r_ring);
1460 	if (ret) {
1461 		ret = -EBUSY;
1462 		goto err_ring;
1463 	}
1464 
1465 	memset(&ring_cfg, 0, sizeof(ring_cfg));
1466 
1467 	if (uc->config.pkt_mode)
1468 		ring_cfg.size = SG_MAX_SEGMENTS;
1469 	else
1470 		ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1471 
1472 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1473 	ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1474 
1475 	ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1476 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1477 	ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1478 
1479 	if (ret)
1480 		goto err_ringcfg;
1481 
1482 	return 0;
1483 
1484 err_ringcfg:
1485 	k3_ringacc_ring_free(rflow->r_ring);
1486 	rflow->r_ring = NULL;
1487 	k3_ringacc_ring_free(rflow->fd_ring);
1488 	rflow->fd_ring = NULL;
1489 err_ring:
1490 	udma_put_rflow(uc);
1491 err_rflow:
1492 	udma_put_rchan(uc);
1493 
1494 	return ret;
1495 }
1496 
1497 #define TISCI_TCHAN_VALID_PARAMS (				\
1498 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1499 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |	\
1500 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |	\
1501 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1502 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |	\
1503 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1504 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1505 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1506 
1507 #define TISCI_RCHAN_VALID_PARAMS (				\
1508 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
1509 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
1510 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
1511 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
1512 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |	\
1513 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |	\
1514 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |	\
1515 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |	\
1516 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1517 
1518 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1519 {
1520 	struct udma_dev *ud = uc->ud;
1521 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1522 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1523 	struct udma_tchan *tchan = uc->tchan;
1524 	struct udma_rchan *rchan = uc->rchan;
1525 	int ret = 0;
1526 
1527 	/* Non synchronized - mem to mem type of transfer */
1528 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1529 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1530 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1531 
1532 	req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1533 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1534 	req_tx.index = tchan->id;
1535 	req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1536 	req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1537 	req_tx.txcq_qnum = tc_ring;
1538 	req_tx.tx_atype = ud->atype;
1539 
1540 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1541 	if (ret) {
1542 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1543 		return ret;
1544 	}
1545 
1546 	req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1547 	req_rx.nav_id = tisci_rm->tisci_dev_id;
1548 	req_rx.index = rchan->id;
1549 	req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1550 	req_rx.rxcq_qnum = tc_ring;
1551 	req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1552 	req_rx.rx_atype = ud->atype;
1553 
1554 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1555 	if (ret)
1556 		dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1557 
1558 	return ret;
1559 }
1560 
1561 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1562 {
1563 	struct udma_dev *ud = uc->ud;
1564 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1565 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1566 	struct udma_tchan *tchan = uc->tchan;
1567 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1568 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1569 	u32 mode, fetch_size;
1570 	int ret = 0;
1571 
1572 	if (uc->config.pkt_mode) {
1573 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1574 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1575 						   uc->config.psd_size, 0);
1576 	} else {
1577 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1578 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
1579 	}
1580 
1581 	req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1582 	req_tx.nav_id = tisci_rm->tisci_dev_id;
1583 	req_tx.index = tchan->id;
1584 	req_tx.tx_chan_type = mode;
1585 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1586 	req_tx.tx_fetch_size = fetch_size >> 2;
1587 	req_tx.txcq_qnum = tc_ring;
1588 	req_tx.tx_atype = uc->config.atype;
1589 
1590 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1591 	if (ret)
1592 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1593 
1594 	return ret;
1595 }
1596 
1597 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1598 {
1599 	struct udma_dev *ud = uc->ud;
1600 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1601 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1602 	struct udma_rchan *rchan = uc->rchan;
1603 	int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1604 	int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1605 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1606 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1607 	u32 mode, fetch_size;
1608 	int ret = 0;
1609 
1610 	if (uc->config.pkt_mode) {
1611 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1612 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1613 						   uc->config.psd_size, 0);
1614 	} else {
1615 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1616 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
1617 	}
1618 
1619 	req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1620 	req_rx.nav_id = tisci_rm->tisci_dev_id;
1621 	req_rx.index = rchan->id;
1622 	req_rx.rx_fetch_size =  fetch_size >> 2;
1623 	req_rx.rxcq_qnum = rx_ring;
1624 	req_rx.rx_chan_type = mode;
1625 	req_rx.rx_atype = uc->config.atype;
1626 
1627 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1628 	if (ret) {
1629 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1630 		return ret;
1631 	}
1632 
1633 	flow_req.valid_params =
1634 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1635 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1636 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1637 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1638 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1639 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1640 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1641 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1642 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1643 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1644 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1645 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1646 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1647 
1648 	flow_req.nav_id = tisci_rm->tisci_dev_id;
1649 	flow_req.flow_index = rchan->id;
1650 
1651 	if (uc->config.needs_epib)
1652 		flow_req.rx_einfo_present = 1;
1653 	else
1654 		flow_req.rx_einfo_present = 0;
1655 	if (uc->config.psd_size)
1656 		flow_req.rx_psinfo_present = 1;
1657 	else
1658 		flow_req.rx_psinfo_present = 0;
1659 	flow_req.rx_error_handling = 1;
1660 	flow_req.rx_dest_qnum = rx_ring;
1661 	flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1662 	flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1663 	flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1664 	flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1665 	flow_req.rx_fdq0_sz0_qnum = fd_ring;
1666 	flow_req.rx_fdq1_qnum = fd_ring;
1667 	flow_req.rx_fdq2_qnum = fd_ring;
1668 	flow_req.rx_fdq3_qnum = fd_ring;
1669 
1670 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1671 
1672 	if (ret)
1673 		dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1674 
1675 	return 0;
1676 }
1677 
1678 static int udma_alloc_chan_resources(struct dma_chan *chan)
1679 {
1680 	struct udma_chan *uc = to_udma_chan(chan);
1681 	struct udma_dev *ud = to_udma_dev(chan->device);
1682 	const struct udma_match_data *match_data = ud->match_data;
1683 	struct k3_ring *irq_ring;
1684 	u32 irq_udma_idx;
1685 	int ret;
1686 
1687 	if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1688 		uc->use_dma_pool = true;
1689 		/* in case of MEM_TO_MEM we have maximum of two TRs */
1690 		if (uc->config.dir == DMA_MEM_TO_MEM) {
1691 			uc->config.hdesc_size = cppi5_trdesc_calc_size(
1692 					sizeof(struct cppi5_tr_type15_t), 2);
1693 			uc->config.pkt_mode = false;
1694 		}
1695 	}
1696 
1697 	if (uc->use_dma_pool) {
1698 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1699 						 uc->config.hdesc_size,
1700 						 ud->desc_align,
1701 						 0);
1702 		if (!uc->hdesc_pool) {
1703 			dev_err(ud->ddev.dev,
1704 				"Descriptor pool allocation failed\n");
1705 			uc->use_dma_pool = false;
1706 			ret = -ENOMEM;
1707 			goto err_cleanup;
1708 		}
1709 	}
1710 
1711 	/*
1712 	 * Make sure that the completion is in a known state:
1713 	 * No teardown, the channel is idle
1714 	 */
1715 	reinit_completion(&uc->teardown_completed);
1716 	complete_all(&uc->teardown_completed);
1717 	uc->state = UDMA_CHAN_IS_IDLE;
1718 
1719 	switch (uc->config.dir) {
1720 	case DMA_MEM_TO_MEM:
1721 		/* Non synchronized - mem to mem type of transfer */
1722 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1723 			uc->id);
1724 
1725 		ret = udma_get_chan_pair(uc);
1726 		if (ret)
1727 			goto err_cleanup;
1728 
1729 		ret = udma_alloc_tx_resources(uc);
1730 		if (ret) {
1731 			udma_put_rchan(uc);
1732 			goto err_cleanup;
1733 		}
1734 
1735 		ret = udma_alloc_rx_resources(uc);
1736 		if (ret) {
1737 			udma_free_tx_resources(uc);
1738 			goto err_cleanup;
1739 		}
1740 
1741 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
1742 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1743 					K3_PSIL_DST_THREAD_ID_OFFSET;
1744 
1745 		irq_ring = uc->tchan->tc_ring;
1746 		irq_udma_idx = uc->tchan->id;
1747 
1748 		ret = udma_tisci_m2m_channel_config(uc);
1749 		break;
1750 	case DMA_MEM_TO_DEV:
1751 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
1752 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1753 			uc->id);
1754 
1755 		ret = udma_alloc_tx_resources(uc);
1756 		if (ret)
1757 			goto err_cleanup;
1758 
1759 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
1760 		uc->config.dst_thread = uc->config.remote_thread_id;
1761 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1762 
1763 		irq_ring = uc->tchan->tc_ring;
1764 		irq_udma_idx = uc->tchan->id;
1765 
1766 		ret = udma_tisci_tx_channel_config(uc);
1767 		break;
1768 	case DMA_DEV_TO_MEM:
1769 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
1770 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1771 			uc->id);
1772 
1773 		ret = udma_alloc_rx_resources(uc);
1774 		if (ret)
1775 			goto err_cleanup;
1776 
1777 		uc->config.src_thread = uc->config.remote_thread_id;
1778 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1779 					K3_PSIL_DST_THREAD_ID_OFFSET;
1780 
1781 		irq_ring = uc->rflow->r_ring;
1782 		irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
1783 
1784 		ret = udma_tisci_rx_channel_config(uc);
1785 		break;
1786 	default:
1787 		/* Can not happen */
1788 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1789 			__func__, uc->id, uc->config.dir);
1790 		ret = -EINVAL;
1791 		goto err_cleanup;
1792 
1793 	}
1794 
1795 	/* check if the channel configuration was successful */
1796 	if (ret)
1797 		goto err_res_free;
1798 
1799 	if (udma_is_chan_running(uc)) {
1800 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1801 		udma_reset_chan(uc, false);
1802 		if (udma_is_chan_running(uc)) {
1803 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1804 			ret = -EBUSY;
1805 			goto err_res_free;
1806 		}
1807 	}
1808 
1809 	/* PSI-L pairing */
1810 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1811 	if (ret) {
1812 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1813 			uc->config.src_thread, uc->config.dst_thread);
1814 		goto err_res_free;
1815 	}
1816 
1817 	uc->psil_paired = true;
1818 
1819 	uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1820 	if (uc->irq_num_ring <= 0) {
1821 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1822 			k3_ringacc_get_ring_id(irq_ring));
1823 		ret = -EINVAL;
1824 		goto err_psi_free;
1825 	}
1826 
1827 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1828 			  IRQF_TRIGGER_HIGH, uc->name, uc);
1829 	if (ret) {
1830 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1831 		goto err_irq_free;
1832 	}
1833 
1834 	/* Event from UDMA (TR events) only needed for slave TR mode channels */
1835 	if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1836 		uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1837 							    irq_udma_idx);
1838 		if (uc->irq_num_udma <= 0) {
1839 			dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1840 				irq_udma_idx);
1841 			free_irq(uc->irq_num_ring, uc);
1842 			ret = -EINVAL;
1843 			goto err_irq_free;
1844 		}
1845 
1846 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1847 				  uc->name, uc);
1848 		if (ret) {
1849 			dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1850 				uc->id);
1851 			free_irq(uc->irq_num_ring, uc);
1852 			goto err_irq_free;
1853 		}
1854 	} else {
1855 		uc->irq_num_udma = 0;
1856 	}
1857 
1858 	udma_reset_rings(uc);
1859 
1860 	return 0;
1861 
1862 err_irq_free:
1863 	uc->irq_num_ring = 0;
1864 	uc->irq_num_udma = 0;
1865 err_psi_free:
1866 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1867 	uc->psil_paired = false;
1868 err_res_free:
1869 	udma_free_tx_resources(uc);
1870 	udma_free_rx_resources(uc);
1871 err_cleanup:
1872 	udma_reset_uchan(uc);
1873 
1874 	if (uc->use_dma_pool) {
1875 		dma_pool_destroy(uc->hdesc_pool);
1876 		uc->use_dma_pool = false;
1877 	}
1878 
1879 	return ret;
1880 }
1881 
1882 static int udma_slave_config(struct dma_chan *chan,
1883 			     struct dma_slave_config *cfg)
1884 {
1885 	struct udma_chan *uc = to_udma_chan(chan);
1886 
1887 	memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1888 
1889 	return 0;
1890 }
1891 
1892 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1893 					    size_t tr_size, int tr_count,
1894 					    enum dma_transfer_direction dir)
1895 {
1896 	struct udma_hwdesc *hwdesc;
1897 	struct cppi5_desc_hdr_t *tr_desc;
1898 	struct udma_desc *d;
1899 	u32 reload_count = 0;
1900 	u32 ring_id;
1901 
1902 	switch (tr_size) {
1903 	case 16:
1904 	case 32:
1905 	case 64:
1906 	case 128:
1907 		break;
1908 	default:
1909 		dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1910 		return NULL;
1911 	}
1912 
1913 	/* We have only one descriptor containing multiple TRs */
1914 	d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1915 	if (!d)
1916 		return NULL;
1917 
1918 	d->sglen = tr_count;
1919 
1920 	d->hwdesc_count = 1;
1921 	hwdesc = &d->hwdesc[0];
1922 
1923 	/* Allocate memory for DMA ring descriptor */
1924 	if (uc->use_dma_pool) {
1925 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1926 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1927 						GFP_NOWAIT,
1928 						&hwdesc->cppi5_desc_paddr);
1929 	} else {
1930 		hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1931 								 tr_count);
1932 		hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1933 						uc->ud->desc_align);
1934 		hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1935 						hwdesc->cppi5_desc_size,
1936 						&hwdesc->cppi5_desc_paddr,
1937 						GFP_NOWAIT);
1938 	}
1939 
1940 	if (!hwdesc->cppi5_desc_vaddr) {
1941 		kfree(d);
1942 		return NULL;
1943 	}
1944 
1945 	/* Start of the TR req records */
1946 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1947 	/* Start address of the TR response array */
1948 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
1949 
1950 	tr_desc = hwdesc->cppi5_desc_vaddr;
1951 
1952 	if (uc->cyclic)
1953 		reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
1954 
1955 	if (dir == DMA_DEV_TO_MEM)
1956 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1957 	else
1958 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
1959 
1960 	cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
1961 	cppi5_desc_set_pktids(tr_desc, uc->id,
1962 			      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
1963 	cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
1964 
1965 	return d;
1966 }
1967 
1968 /**
1969  * udma_get_tr_counters - calculate TR counters for a given length
1970  * @len: Length of the trasnfer
1971  * @align_to: Preferred alignment
1972  * @tr0_cnt0: First TR icnt0
1973  * @tr0_cnt1: First TR icnt1
1974  * @tr1_cnt0: Second (if used) TR icnt0
1975  *
1976  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
1977  * For len >= SZ_64K two TRs are used in a simple way:
1978  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
1979  * Second TR: the remaining length (tr1_cnt0)
1980  *
1981  * Returns the number of TRs the length needs (1 or 2)
1982  * -EINVAL if the length can not be supported
1983  */
1984 static int udma_get_tr_counters(size_t len, unsigned long align_to,
1985 				u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
1986 {
1987 	if (len < SZ_64K) {
1988 		*tr0_cnt0 = len;
1989 		*tr0_cnt1 = 1;
1990 
1991 		return 1;
1992 	}
1993 
1994 	if (align_to > 3)
1995 		align_to = 3;
1996 
1997 realign:
1998 	*tr0_cnt0 = SZ_64K - BIT(align_to);
1999 	if (len / *tr0_cnt0 >= SZ_64K) {
2000 		if (align_to) {
2001 			align_to--;
2002 			goto realign;
2003 		}
2004 		return -EINVAL;
2005 	}
2006 
2007 	*tr0_cnt1 = len / *tr0_cnt0;
2008 	*tr1_cnt0 = len % *tr0_cnt0;
2009 
2010 	return 2;
2011 }
2012 
2013 static struct udma_desc *
2014 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2015 		      unsigned int sglen, enum dma_transfer_direction dir,
2016 		      unsigned long tx_flags, void *context)
2017 {
2018 	struct scatterlist *sgent;
2019 	struct udma_desc *d;
2020 	struct cppi5_tr_type1_t *tr_req = NULL;
2021 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2022 	unsigned int i;
2023 	size_t tr_size;
2024 	int num_tr = 0;
2025 	int tr_idx = 0;
2026 
2027 	if (!is_slave_direction(dir)) {
2028 		dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
2029 		return NULL;
2030 	}
2031 
2032 	/* estimate the number of TRs we will need */
2033 	for_each_sg(sgl, sgent, sglen, i) {
2034 		if (sg_dma_len(sgent) < SZ_64K)
2035 			num_tr++;
2036 		else
2037 			num_tr += 2;
2038 	}
2039 
2040 	/* Now allocate and setup the descriptor. */
2041 	tr_size = sizeof(struct cppi5_tr_type1_t);
2042 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2043 	if (!d)
2044 		return NULL;
2045 
2046 	d->sglen = sglen;
2047 
2048 	tr_req = d->hwdesc[0].tr_req_base;
2049 	for_each_sg(sgl, sgent, sglen, i) {
2050 		dma_addr_t sg_addr = sg_dma_address(sgent);
2051 
2052 		num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2053 					      &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2054 		if (num_tr < 0) {
2055 			dev_err(uc->ud->dev, "size %u is not supported\n",
2056 				sg_dma_len(sgent));
2057 			udma_free_hwdesc(uc, d);
2058 			kfree(d);
2059 			return NULL;
2060 		}
2061 
2062 		cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
2063 			      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2064 		cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
2065 
2066 		tr_req[tr_idx].addr = sg_addr;
2067 		tr_req[tr_idx].icnt0 = tr0_cnt0;
2068 		tr_req[tr_idx].icnt1 = tr0_cnt1;
2069 		tr_req[tr_idx].dim1 = tr0_cnt0;
2070 		tr_idx++;
2071 
2072 		if (num_tr == 2) {
2073 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2074 				      false, false,
2075 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2076 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2077 					 CPPI5_TR_CSF_SUPR_EVT);
2078 
2079 			tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2080 			tr_req[tr_idx].icnt0 = tr1_cnt0;
2081 			tr_req[tr_idx].icnt1 = 1;
2082 			tr_req[tr_idx].dim1 = tr1_cnt0;
2083 			tr_idx++;
2084 		}
2085 
2086 		d->residue += sg_dma_len(sgent);
2087 	}
2088 
2089 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2090 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2091 
2092 	return d;
2093 }
2094 
2095 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2096 				   enum dma_slave_buswidth dev_width,
2097 				   u16 elcnt)
2098 {
2099 	if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2100 		return 0;
2101 
2102 	/* Bus width translates to the element size (ES) */
2103 	switch (dev_width) {
2104 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
2105 		d->static_tr.elsize = 0;
2106 		break;
2107 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
2108 		d->static_tr.elsize = 1;
2109 		break;
2110 	case DMA_SLAVE_BUSWIDTH_3_BYTES:
2111 		d->static_tr.elsize = 2;
2112 		break;
2113 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
2114 		d->static_tr.elsize = 3;
2115 		break;
2116 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
2117 		d->static_tr.elsize = 4;
2118 		break;
2119 	default: /* not reached */
2120 		return -EINVAL;
2121 	}
2122 
2123 	d->static_tr.elcnt = elcnt;
2124 
2125 	/*
2126 	 * PDMA must to close the packet when the channel is in packet mode.
2127 	 * For TR mode when the channel is not cyclic we also need PDMA to close
2128 	 * the packet otherwise the transfer will stall because PDMA holds on
2129 	 * the data it has received from the peripheral.
2130 	 */
2131 	if (uc->config.pkt_mode || !uc->cyclic) {
2132 		unsigned int div = dev_width * elcnt;
2133 
2134 		if (uc->cyclic)
2135 			d->static_tr.bstcnt = d->residue / d->sglen / div;
2136 		else
2137 			d->static_tr.bstcnt = d->residue / div;
2138 
2139 		if (uc->config.dir == DMA_DEV_TO_MEM &&
2140 		    d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2141 			return -EINVAL;
2142 	} else {
2143 		d->static_tr.bstcnt = 0;
2144 	}
2145 
2146 	return 0;
2147 }
2148 
2149 static struct udma_desc *
2150 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2151 		       unsigned int sglen, enum dma_transfer_direction dir,
2152 		       unsigned long tx_flags, void *context)
2153 {
2154 	struct scatterlist *sgent;
2155 	struct cppi5_host_desc_t *h_desc = NULL;
2156 	struct udma_desc *d;
2157 	u32 ring_id;
2158 	unsigned int i;
2159 
2160 	d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
2161 	if (!d)
2162 		return NULL;
2163 
2164 	d->sglen = sglen;
2165 	d->hwdesc_count = sglen;
2166 
2167 	if (dir == DMA_DEV_TO_MEM)
2168 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2169 	else
2170 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2171 
2172 	for_each_sg(sgl, sgent, sglen, i) {
2173 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2174 		dma_addr_t sg_addr = sg_dma_address(sgent);
2175 		struct cppi5_host_desc_t *desc;
2176 		size_t sg_len = sg_dma_len(sgent);
2177 
2178 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2179 						GFP_NOWAIT,
2180 						&hwdesc->cppi5_desc_paddr);
2181 		if (!hwdesc->cppi5_desc_vaddr) {
2182 			dev_err(uc->ud->dev,
2183 				"descriptor%d allocation failed\n", i);
2184 
2185 			udma_free_hwdesc(uc, d);
2186 			kfree(d);
2187 			return NULL;
2188 		}
2189 
2190 		d->residue += sg_len;
2191 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2192 		desc = hwdesc->cppi5_desc_vaddr;
2193 
2194 		if (i == 0) {
2195 			cppi5_hdesc_init(desc, 0, 0);
2196 			/* Flow and Packed ID */
2197 			cppi5_desc_set_pktids(&desc->hdr, uc->id,
2198 					      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2199 			cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2200 		} else {
2201 			cppi5_hdesc_reset_hbdesc(desc);
2202 			cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2203 		}
2204 
2205 		/* attach the sg buffer to the descriptor */
2206 		cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2207 
2208 		/* Attach link as host buffer descriptor */
2209 		if (h_desc)
2210 			cppi5_hdesc_link_hbdesc(h_desc,
2211 						hwdesc->cppi5_desc_paddr);
2212 
2213 		if (dir == DMA_MEM_TO_DEV)
2214 			h_desc = desc;
2215 	}
2216 
2217 	if (d->residue >= SZ_4M) {
2218 		dev_err(uc->ud->dev,
2219 			"%s: Transfer size %u is over the supported 4M range\n",
2220 			__func__, d->residue);
2221 		udma_free_hwdesc(uc, d);
2222 		kfree(d);
2223 		return NULL;
2224 	}
2225 
2226 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2227 	cppi5_hdesc_set_pktlen(h_desc, d->residue);
2228 
2229 	return d;
2230 }
2231 
2232 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2233 				void *data, size_t len)
2234 {
2235 	struct udma_desc *d = to_udma_desc(desc);
2236 	struct udma_chan *uc = to_udma_chan(desc->chan);
2237 	struct cppi5_host_desc_t *h_desc;
2238 	u32 psd_size = len;
2239 	u32 flags = 0;
2240 
2241 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
2242 		return -ENOTSUPP;
2243 
2244 	if (!data || len > uc->config.metadata_size)
2245 		return -EINVAL;
2246 
2247 	if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2248 		return -EINVAL;
2249 
2250 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2251 	if (d->dir == DMA_MEM_TO_DEV)
2252 		memcpy(h_desc->epib, data, len);
2253 
2254 	if (uc->config.needs_epib)
2255 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2256 
2257 	d->metadata = data;
2258 	d->metadata_size = len;
2259 	if (uc->config.needs_epib)
2260 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2261 
2262 	cppi5_hdesc_update_flags(h_desc, flags);
2263 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2264 
2265 	return 0;
2266 }
2267 
2268 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2269 				   size_t *payload_len, size_t *max_len)
2270 {
2271 	struct udma_desc *d = to_udma_desc(desc);
2272 	struct udma_chan *uc = to_udma_chan(desc->chan);
2273 	struct cppi5_host_desc_t *h_desc;
2274 
2275 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
2276 		return ERR_PTR(-ENOTSUPP);
2277 
2278 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2279 
2280 	*max_len = uc->config.metadata_size;
2281 
2282 	*payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2283 		       CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2284 	*payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2285 
2286 	return h_desc->epib;
2287 }
2288 
2289 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2290 				 size_t payload_len)
2291 {
2292 	struct udma_desc *d = to_udma_desc(desc);
2293 	struct udma_chan *uc = to_udma_chan(desc->chan);
2294 	struct cppi5_host_desc_t *h_desc;
2295 	u32 psd_size = payload_len;
2296 	u32 flags = 0;
2297 
2298 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
2299 		return -ENOTSUPP;
2300 
2301 	if (payload_len > uc->config.metadata_size)
2302 		return -EINVAL;
2303 
2304 	if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2305 		return -EINVAL;
2306 
2307 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2308 
2309 	if (uc->config.needs_epib) {
2310 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2311 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2312 	}
2313 
2314 	cppi5_hdesc_update_flags(h_desc, flags);
2315 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2316 
2317 	return 0;
2318 }
2319 
2320 static struct dma_descriptor_metadata_ops metadata_ops = {
2321 	.attach = udma_attach_metadata,
2322 	.get_ptr = udma_get_metadata_ptr,
2323 	.set_len = udma_set_metadata_len,
2324 };
2325 
2326 static struct dma_async_tx_descriptor *
2327 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2328 		   unsigned int sglen, enum dma_transfer_direction dir,
2329 		   unsigned long tx_flags, void *context)
2330 {
2331 	struct udma_chan *uc = to_udma_chan(chan);
2332 	enum dma_slave_buswidth dev_width;
2333 	struct udma_desc *d;
2334 	u32 burst;
2335 
2336 	if (dir != uc->config.dir) {
2337 		dev_err(chan->device->dev,
2338 			"%s: chan%d is for %s, not supporting %s\n",
2339 			__func__, uc->id,
2340 			dmaengine_get_direction_text(uc->config.dir),
2341 			dmaengine_get_direction_text(dir));
2342 		return NULL;
2343 	}
2344 
2345 	if (dir == DMA_DEV_TO_MEM) {
2346 		dev_width = uc->cfg.src_addr_width;
2347 		burst = uc->cfg.src_maxburst;
2348 	} else if (dir == DMA_MEM_TO_DEV) {
2349 		dev_width = uc->cfg.dst_addr_width;
2350 		burst = uc->cfg.dst_maxburst;
2351 	} else {
2352 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2353 		return NULL;
2354 	}
2355 
2356 	if (!burst)
2357 		burst = 1;
2358 
2359 	if (uc->config.pkt_mode)
2360 		d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2361 					   context);
2362 	else
2363 		d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2364 					  context);
2365 
2366 	if (!d)
2367 		return NULL;
2368 
2369 	d->dir = dir;
2370 	d->desc_idx = 0;
2371 	d->tr_idx = 0;
2372 
2373 	/* static TR for remote PDMA */
2374 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
2375 		dev_err(uc->ud->dev,
2376 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2377 			__func__, d->static_tr.bstcnt);
2378 
2379 		udma_free_hwdesc(uc, d);
2380 		kfree(d);
2381 		return NULL;
2382 	}
2383 
2384 	if (uc->config.metadata_size)
2385 		d->vd.tx.metadata_ops = &metadata_ops;
2386 
2387 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2388 }
2389 
2390 static struct udma_desc *
2391 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2392 			size_t buf_len, size_t period_len,
2393 			enum dma_transfer_direction dir, unsigned long flags)
2394 {
2395 	struct udma_desc *d;
2396 	size_t tr_size, period_addr;
2397 	struct cppi5_tr_type1_t *tr_req;
2398 	unsigned int periods = buf_len / period_len;
2399 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2400 	unsigned int i;
2401 	int num_tr;
2402 
2403 	if (!is_slave_direction(dir)) {
2404 		dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
2405 		return NULL;
2406 	}
2407 
2408 	num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2409 				      &tr0_cnt1, &tr1_cnt0);
2410 	if (num_tr < 0) {
2411 		dev_err(uc->ud->dev, "size %zu is not supported\n",
2412 			period_len);
2413 		return NULL;
2414 	}
2415 
2416 	/* Now allocate and setup the descriptor. */
2417 	tr_size = sizeof(struct cppi5_tr_type1_t);
2418 	d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
2419 	if (!d)
2420 		return NULL;
2421 
2422 	tr_req = d->hwdesc[0].tr_req_base;
2423 	period_addr = buf_addr;
2424 	for (i = 0; i < periods; i++) {
2425 		int tr_idx = i * num_tr;
2426 
2427 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2428 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2429 
2430 		tr_req[tr_idx].addr = period_addr;
2431 		tr_req[tr_idx].icnt0 = tr0_cnt0;
2432 		tr_req[tr_idx].icnt1 = tr0_cnt1;
2433 		tr_req[tr_idx].dim1 = tr0_cnt0;
2434 
2435 		if (num_tr == 2) {
2436 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2437 					 CPPI5_TR_CSF_SUPR_EVT);
2438 			tr_idx++;
2439 
2440 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2441 				      false, false,
2442 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2443 
2444 			tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2445 			tr_req[tr_idx].icnt0 = tr1_cnt0;
2446 			tr_req[tr_idx].icnt1 = 1;
2447 			tr_req[tr_idx].dim1 = tr1_cnt0;
2448 		}
2449 
2450 		if (!(flags & DMA_PREP_INTERRUPT))
2451 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2452 					 CPPI5_TR_CSF_SUPR_EVT);
2453 
2454 		period_addr += period_len;
2455 	}
2456 
2457 	return d;
2458 }
2459 
2460 static struct udma_desc *
2461 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2462 			 size_t buf_len, size_t period_len,
2463 			 enum dma_transfer_direction dir, unsigned long flags)
2464 {
2465 	struct udma_desc *d;
2466 	u32 ring_id;
2467 	int i;
2468 	int periods = buf_len / period_len;
2469 
2470 	if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2471 		return NULL;
2472 
2473 	if (period_len >= SZ_4M)
2474 		return NULL;
2475 
2476 	d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
2477 	if (!d)
2478 		return NULL;
2479 
2480 	d->hwdesc_count = periods;
2481 
2482 	/* TODO: re-check this... */
2483 	if (dir == DMA_DEV_TO_MEM)
2484 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2485 	else
2486 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2487 
2488 	for (i = 0; i < periods; i++) {
2489 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2490 		dma_addr_t period_addr = buf_addr + (period_len * i);
2491 		struct cppi5_host_desc_t *h_desc;
2492 
2493 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2494 						GFP_NOWAIT,
2495 						&hwdesc->cppi5_desc_paddr);
2496 		if (!hwdesc->cppi5_desc_vaddr) {
2497 			dev_err(uc->ud->dev,
2498 				"descriptor%d allocation failed\n", i);
2499 
2500 			udma_free_hwdesc(uc, d);
2501 			kfree(d);
2502 			return NULL;
2503 		}
2504 
2505 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2506 		h_desc = hwdesc->cppi5_desc_vaddr;
2507 
2508 		cppi5_hdesc_init(h_desc, 0, 0);
2509 		cppi5_hdesc_set_pktlen(h_desc, period_len);
2510 
2511 		/* Flow and Packed ID */
2512 		cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2513 				      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2514 		cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2515 
2516 		/* attach each period to a new descriptor */
2517 		cppi5_hdesc_attach_buf(h_desc,
2518 				       period_addr, period_len,
2519 				       period_addr, period_len);
2520 	}
2521 
2522 	return d;
2523 }
2524 
2525 static struct dma_async_tx_descriptor *
2526 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2527 		     size_t period_len, enum dma_transfer_direction dir,
2528 		     unsigned long flags)
2529 {
2530 	struct udma_chan *uc = to_udma_chan(chan);
2531 	enum dma_slave_buswidth dev_width;
2532 	struct udma_desc *d;
2533 	u32 burst;
2534 
2535 	if (dir != uc->config.dir) {
2536 		dev_err(chan->device->dev,
2537 			"%s: chan%d is for %s, not supporting %s\n",
2538 			__func__, uc->id,
2539 			dmaengine_get_direction_text(uc->config.dir),
2540 			dmaengine_get_direction_text(dir));
2541 		return NULL;
2542 	}
2543 
2544 	uc->cyclic = true;
2545 
2546 	if (dir == DMA_DEV_TO_MEM) {
2547 		dev_width = uc->cfg.src_addr_width;
2548 		burst = uc->cfg.src_maxburst;
2549 	} else if (dir == DMA_MEM_TO_DEV) {
2550 		dev_width = uc->cfg.dst_addr_width;
2551 		burst = uc->cfg.dst_maxburst;
2552 	} else {
2553 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2554 		return NULL;
2555 	}
2556 
2557 	if (!burst)
2558 		burst = 1;
2559 
2560 	if (uc->config.pkt_mode)
2561 		d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2562 					     dir, flags);
2563 	else
2564 		d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2565 					    dir, flags);
2566 
2567 	if (!d)
2568 		return NULL;
2569 
2570 	d->sglen = buf_len / period_len;
2571 
2572 	d->dir = dir;
2573 	d->residue = buf_len;
2574 
2575 	/* static TR for remote PDMA */
2576 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
2577 		dev_err(uc->ud->dev,
2578 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2579 			__func__, d->static_tr.bstcnt);
2580 
2581 		udma_free_hwdesc(uc, d);
2582 		kfree(d);
2583 		return NULL;
2584 	}
2585 
2586 	if (uc->config.metadata_size)
2587 		d->vd.tx.metadata_ops = &metadata_ops;
2588 
2589 	return vchan_tx_prep(&uc->vc, &d->vd, flags);
2590 }
2591 
2592 static struct dma_async_tx_descriptor *
2593 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2594 		     size_t len, unsigned long tx_flags)
2595 {
2596 	struct udma_chan *uc = to_udma_chan(chan);
2597 	struct udma_desc *d;
2598 	struct cppi5_tr_type15_t *tr_req;
2599 	int num_tr;
2600 	size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2601 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2602 
2603 	if (uc->config.dir != DMA_MEM_TO_MEM) {
2604 		dev_err(chan->device->dev,
2605 			"%s: chan%d is for %s, not supporting %s\n",
2606 			__func__, uc->id,
2607 			dmaengine_get_direction_text(uc->config.dir),
2608 			dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2609 		return NULL;
2610 	}
2611 
2612 	num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2613 				      &tr0_cnt1, &tr1_cnt0);
2614 	if (num_tr < 0) {
2615 		dev_err(uc->ud->dev, "size %zu is not supported\n",
2616 			len);
2617 		return NULL;
2618 	}
2619 
2620 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2621 	if (!d)
2622 		return NULL;
2623 
2624 	d->dir = DMA_MEM_TO_MEM;
2625 	d->desc_idx = 0;
2626 	d->tr_idx = 0;
2627 	d->residue = len;
2628 
2629 	tr_req = d->hwdesc[0].tr_req_base;
2630 
2631 	cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2632 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2633 	cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2634 
2635 	tr_req[0].addr = src;
2636 	tr_req[0].icnt0 = tr0_cnt0;
2637 	tr_req[0].icnt1 = tr0_cnt1;
2638 	tr_req[0].icnt2 = 1;
2639 	tr_req[0].icnt3 = 1;
2640 	tr_req[0].dim1 = tr0_cnt0;
2641 
2642 	tr_req[0].daddr = dest;
2643 	tr_req[0].dicnt0 = tr0_cnt0;
2644 	tr_req[0].dicnt1 = tr0_cnt1;
2645 	tr_req[0].dicnt2 = 1;
2646 	tr_req[0].dicnt3 = 1;
2647 	tr_req[0].ddim1 = tr0_cnt0;
2648 
2649 	if (num_tr == 2) {
2650 		cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2651 			      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2652 		cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2653 
2654 		tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2655 		tr_req[1].icnt0 = tr1_cnt0;
2656 		tr_req[1].icnt1 = 1;
2657 		tr_req[1].icnt2 = 1;
2658 		tr_req[1].icnt3 = 1;
2659 
2660 		tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2661 		tr_req[1].dicnt0 = tr1_cnt0;
2662 		tr_req[1].dicnt1 = 1;
2663 		tr_req[1].dicnt2 = 1;
2664 		tr_req[1].dicnt3 = 1;
2665 	}
2666 
2667 	cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
2668 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2669 
2670 	if (uc->config.metadata_size)
2671 		d->vd.tx.metadata_ops = &metadata_ops;
2672 
2673 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2674 }
2675 
2676 static void udma_issue_pending(struct dma_chan *chan)
2677 {
2678 	struct udma_chan *uc = to_udma_chan(chan);
2679 	unsigned long flags;
2680 
2681 	spin_lock_irqsave(&uc->vc.lock, flags);
2682 
2683 	/* If we have something pending and no active descriptor, then */
2684 	if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2685 		/*
2686 		 * start a descriptor if the channel is NOT [marked as
2687 		 * terminating _and_ it is still running (teardown has not
2688 		 * completed yet)].
2689 		 */
2690 		if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2691 		      udma_is_chan_running(uc)))
2692 			udma_start(uc);
2693 	}
2694 
2695 	spin_unlock_irqrestore(&uc->vc.lock, flags);
2696 }
2697 
2698 static enum dma_status udma_tx_status(struct dma_chan *chan,
2699 				      dma_cookie_t cookie,
2700 				      struct dma_tx_state *txstate)
2701 {
2702 	struct udma_chan *uc = to_udma_chan(chan);
2703 	enum dma_status ret;
2704 	unsigned long flags;
2705 
2706 	spin_lock_irqsave(&uc->vc.lock, flags);
2707 
2708 	ret = dma_cookie_status(chan, cookie, txstate);
2709 
2710 	if (!udma_is_chan_running(uc))
2711 		ret = DMA_COMPLETE;
2712 
2713 	if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2714 		ret = DMA_PAUSED;
2715 
2716 	if (ret == DMA_COMPLETE || !txstate)
2717 		goto out;
2718 
2719 	if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2720 		u32 peer_bcnt = 0;
2721 		u32 bcnt = 0;
2722 		u32 residue = uc->desc->residue;
2723 		u32 delay = 0;
2724 
2725 		if (uc->desc->dir == DMA_MEM_TO_DEV) {
2726 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
2727 
2728 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
2729 				peer_bcnt = udma_tchanrt_read(uc,
2730 						UDMA_CHAN_RT_PEER_BCNT_REG);
2731 
2732 				if (bcnt > peer_bcnt)
2733 					delay = bcnt - peer_bcnt;
2734 			}
2735 		} else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2736 			bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
2737 
2738 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
2739 				peer_bcnt = udma_rchanrt_read(uc,
2740 						UDMA_CHAN_RT_PEER_BCNT_REG);
2741 
2742 				if (peer_bcnt > bcnt)
2743 					delay = peer_bcnt - bcnt;
2744 			}
2745 		} else {
2746 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
2747 		}
2748 
2749 		bcnt -= uc->bcnt;
2750 		if (bcnt && !(bcnt % uc->desc->residue))
2751 			residue = 0;
2752 		else
2753 			residue -= bcnt % uc->desc->residue;
2754 
2755 		if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2756 			ret = DMA_COMPLETE;
2757 			delay = 0;
2758 		}
2759 
2760 		dma_set_residue(txstate, residue);
2761 		dma_set_in_flight_bytes(txstate, delay);
2762 
2763 	} else {
2764 		ret = DMA_COMPLETE;
2765 	}
2766 
2767 out:
2768 	spin_unlock_irqrestore(&uc->vc.lock, flags);
2769 	return ret;
2770 }
2771 
2772 static int udma_pause(struct dma_chan *chan)
2773 {
2774 	struct udma_chan *uc = to_udma_chan(chan);
2775 
2776 	/* pause the channel */
2777 	switch (uc->config.dir) {
2778 	case DMA_DEV_TO_MEM:
2779 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2780 					 UDMA_PEER_RT_EN_PAUSE,
2781 					 UDMA_PEER_RT_EN_PAUSE);
2782 		break;
2783 	case DMA_MEM_TO_DEV:
2784 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2785 					 UDMA_PEER_RT_EN_PAUSE,
2786 					 UDMA_PEER_RT_EN_PAUSE);
2787 		break;
2788 	case DMA_MEM_TO_MEM:
2789 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
2790 					 UDMA_CHAN_RT_CTL_PAUSE,
2791 					 UDMA_CHAN_RT_CTL_PAUSE);
2792 		break;
2793 	default:
2794 		return -EINVAL;
2795 	}
2796 
2797 	return 0;
2798 }
2799 
2800 static int udma_resume(struct dma_chan *chan)
2801 {
2802 	struct udma_chan *uc = to_udma_chan(chan);
2803 
2804 	/* resume the channel */
2805 	switch (uc->config.dir) {
2806 	case DMA_DEV_TO_MEM:
2807 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2808 					 UDMA_PEER_RT_EN_PAUSE, 0);
2809 
2810 		break;
2811 	case DMA_MEM_TO_DEV:
2812 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2813 					 UDMA_PEER_RT_EN_PAUSE, 0);
2814 		break;
2815 	case DMA_MEM_TO_MEM:
2816 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
2817 					 UDMA_CHAN_RT_CTL_PAUSE, 0);
2818 		break;
2819 	default:
2820 		return -EINVAL;
2821 	}
2822 
2823 	return 0;
2824 }
2825 
2826 static int udma_terminate_all(struct dma_chan *chan)
2827 {
2828 	struct udma_chan *uc = to_udma_chan(chan);
2829 	unsigned long flags;
2830 	LIST_HEAD(head);
2831 
2832 	spin_lock_irqsave(&uc->vc.lock, flags);
2833 
2834 	if (udma_is_chan_running(uc))
2835 		udma_stop(uc);
2836 
2837 	if (uc->desc) {
2838 		uc->terminated_desc = uc->desc;
2839 		uc->desc = NULL;
2840 		uc->terminated_desc->terminated = true;
2841 		cancel_delayed_work(&uc->tx_drain.work);
2842 	}
2843 
2844 	uc->paused = false;
2845 
2846 	vchan_get_all_descriptors(&uc->vc, &head);
2847 	spin_unlock_irqrestore(&uc->vc.lock, flags);
2848 	vchan_dma_desc_free_list(&uc->vc, &head);
2849 
2850 	return 0;
2851 }
2852 
2853 static void udma_synchronize(struct dma_chan *chan)
2854 {
2855 	struct udma_chan *uc = to_udma_chan(chan);
2856 	unsigned long timeout = msecs_to_jiffies(1000);
2857 
2858 	vchan_synchronize(&uc->vc);
2859 
2860 	if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2861 		timeout = wait_for_completion_timeout(&uc->teardown_completed,
2862 						      timeout);
2863 		if (!timeout) {
2864 			dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2865 				 uc->id);
2866 			udma_dump_chan_stdata(uc);
2867 			udma_reset_chan(uc, true);
2868 		}
2869 	}
2870 
2871 	udma_reset_chan(uc, false);
2872 	if (udma_is_chan_running(uc))
2873 		dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2874 
2875 	cancel_delayed_work_sync(&uc->tx_drain.work);
2876 	udma_reset_rings(uc);
2877 }
2878 
2879 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2880 				   struct virt_dma_desc *vd,
2881 				   struct dmaengine_result *result)
2882 {
2883 	struct udma_chan *uc = to_udma_chan(&vc->chan);
2884 	struct udma_desc *d;
2885 
2886 	if (!vd)
2887 		return;
2888 
2889 	d = to_udma_desc(&vd->tx);
2890 
2891 	if (d->metadata_size)
2892 		udma_fetch_epib(uc, d);
2893 
2894 	/* Provide residue information for the client */
2895 	if (result) {
2896 		void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2897 
2898 		if (cppi5_desc_get_type(desc_vaddr) ==
2899 		    CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2900 			result->residue = d->residue -
2901 					  cppi5_hdesc_get_pktlen(desc_vaddr);
2902 			if (result->residue)
2903 				result->result = DMA_TRANS_ABORTED;
2904 			else
2905 				result->result = DMA_TRANS_NOERROR;
2906 		} else {
2907 			result->residue = 0;
2908 			result->result = DMA_TRANS_NOERROR;
2909 		}
2910 	}
2911 }
2912 
2913 /*
2914  * This tasklet handles the completion of a DMA descriptor by
2915  * calling its callback and freeing it.
2916  */
2917 static void udma_vchan_complete(unsigned long arg)
2918 {
2919 	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
2920 	struct virt_dma_desc *vd, *_vd;
2921 	struct dmaengine_desc_callback cb;
2922 	LIST_HEAD(head);
2923 
2924 	spin_lock_irq(&vc->lock);
2925 	list_splice_tail_init(&vc->desc_completed, &head);
2926 	vd = vc->cyclic;
2927 	if (vd) {
2928 		vc->cyclic = NULL;
2929 		dmaengine_desc_get_callback(&vd->tx, &cb);
2930 	} else {
2931 		memset(&cb, 0, sizeof(cb));
2932 	}
2933 	spin_unlock_irq(&vc->lock);
2934 
2935 	udma_desc_pre_callback(vc, vd, NULL);
2936 	dmaengine_desc_callback_invoke(&cb, NULL);
2937 
2938 	list_for_each_entry_safe(vd, _vd, &head, node) {
2939 		struct dmaengine_result result;
2940 
2941 		dmaengine_desc_get_callback(&vd->tx, &cb);
2942 
2943 		list_del(&vd->node);
2944 
2945 		udma_desc_pre_callback(vc, vd, &result);
2946 		dmaengine_desc_callback_invoke(&cb, &result);
2947 
2948 		vchan_vdesc_fini(vd);
2949 	}
2950 }
2951 
2952 static void udma_free_chan_resources(struct dma_chan *chan)
2953 {
2954 	struct udma_chan *uc = to_udma_chan(chan);
2955 	struct udma_dev *ud = to_udma_dev(chan->device);
2956 
2957 	udma_terminate_all(chan);
2958 	if (uc->terminated_desc) {
2959 		udma_reset_chan(uc, false);
2960 		udma_reset_rings(uc);
2961 	}
2962 
2963 	cancel_delayed_work_sync(&uc->tx_drain.work);
2964 
2965 	if (uc->irq_num_ring > 0) {
2966 		free_irq(uc->irq_num_ring, uc);
2967 
2968 		uc->irq_num_ring = 0;
2969 	}
2970 	if (uc->irq_num_udma > 0) {
2971 		free_irq(uc->irq_num_udma, uc);
2972 
2973 		uc->irq_num_udma = 0;
2974 	}
2975 
2976 	/* Release PSI-L pairing */
2977 	if (uc->psil_paired) {
2978 		navss_psil_unpair(ud, uc->config.src_thread,
2979 				  uc->config.dst_thread);
2980 		uc->psil_paired = false;
2981 	}
2982 
2983 	vchan_free_chan_resources(&uc->vc);
2984 	tasklet_kill(&uc->vc.task);
2985 
2986 	udma_free_tx_resources(uc);
2987 	udma_free_rx_resources(uc);
2988 	udma_reset_uchan(uc);
2989 
2990 	if (uc->use_dma_pool) {
2991 		dma_pool_destroy(uc->hdesc_pool);
2992 		uc->use_dma_pool = false;
2993 	}
2994 }
2995 
2996 static struct platform_driver udma_driver;
2997 
2998 struct udma_filter_param {
2999 	int remote_thread_id;
3000 	u32 atype;
3001 };
3002 
3003 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
3004 {
3005 	struct udma_chan_config *ucc;
3006 	struct psil_endpoint_config *ep_config;
3007 	struct udma_filter_param *filter_param;
3008 	struct udma_chan *uc;
3009 	struct udma_dev *ud;
3010 
3011 	if (chan->device->dev->driver != &udma_driver.driver)
3012 		return false;
3013 
3014 	uc = to_udma_chan(chan);
3015 	ucc = &uc->config;
3016 	ud = uc->ud;
3017 	filter_param = param;
3018 
3019 	if (filter_param->atype > 2) {
3020 		dev_err(ud->dev, "Invalid channel atype: %u\n",
3021 			filter_param->atype);
3022 		return false;
3023 	}
3024 
3025 	ucc->remote_thread_id = filter_param->remote_thread_id;
3026 	ucc->atype = filter_param->atype;
3027 
3028 	if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3029 		ucc->dir = DMA_MEM_TO_DEV;
3030 	else
3031 		ucc->dir = DMA_DEV_TO_MEM;
3032 
3033 	ep_config = psil_get_ep_config(ucc->remote_thread_id);
3034 	if (IS_ERR(ep_config)) {
3035 		dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3036 			ucc->remote_thread_id);
3037 		ucc->dir = DMA_MEM_TO_MEM;
3038 		ucc->remote_thread_id = -1;
3039 		ucc->atype = 0;
3040 		return false;
3041 	}
3042 
3043 	ucc->pkt_mode = ep_config->pkt_mode;
3044 	ucc->channel_tpl = ep_config->channel_tpl;
3045 	ucc->notdpkt = ep_config->notdpkt;
3046 	ucc->ep_type = ep_config->ep_type;
3047 
3048 	if (ucc->ep_type != PSIL_EP_NATIVE) {
3049 		const struct udma_match_data *match_data = ud->match_data;
3050 
3051 		if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3052 			ucc->enable_acc32 = ep_config->pdma_acc32;
3053 		if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3054 			ucc->enable_burst = ep_config->pdma_burst;
3055 	}
3056 
3057 	ucc->needs_epib = ep_config->needs_epib;
3058 	ucc->psd_size = ep_config->psd_size;
3059 	ucc->metadata_size =
3060 			(ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3061 			ucc->psd_size;
3062 
3063 	if (ucc->pkt_mode)
3064 		ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3065 				 ucc->metadata_size, ud->desc_align);
3066 
3067 	dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3068 		ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3069 
3070 	return true;
3071 }
3072 
3073 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3074 				      struct of_dma *ofdma)
3075 {
3076 	struct udma_dev *ud = ofdma->of_dma_data;
3077 	dma_cap_mask_t mask = ud->ddev.cap_mask;
3078 	struct udma_filter_param filter_param;
3079 	struct dma_chan *chan;
3080 
3081 	if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
3082 		return NULL;
3083 
3084 	filter_param.remote_thread_id = dma_spec->args[0];
3085 	if (dma_spec->args_count == 2)
3086 		filter_param.atype = dma_spec->args[1];
3087 	else
3088 		filter_param.atype = 0;
3089 
3090 	chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3091 				     ofdma->of_node);
3092 	if (!chan) {
3093 		dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3094 		return ERR_PTR(-EINVAL);
3095 	}
3096 
3097 	return chan;
3098 }
3099 
3100 static struct udma_match_data am654_main_data = {
3101 	.psil_base = 0x1000,
3102 	.enable_memcpy_support = true,
3103 	.statictr_z_mask = GENMASK(11, 0),
3104 	.rchan_oes_offset = 0x2000,
3105 };
3106 
3107 static struct udma_match_data am654_mcu_data = {
3108 	.psil_base = 0x6000,
3109 	.enable_memcpy_support = false,
3110 	.statictr_z_mask = GENMASK(11, 0),
3111 	.rchan_oes_offset = 0x2000,
3112 };
3113 
3114 static struct udma_match_data j721e_main_data = {
3115 	.psil_base = 0x1000,
3116 	.enable_memcpy_support = true,
3117 	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3118 	.statictr_z_mask = GENMASK(23, 0),
3119 	.rchan_oes_offset = 0x400,
3120 };
3121 
3122 static struct udma_match_data j721e_mcu_data = {
3123 	.psil_base = 0x6000,
3124 	.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
3125 	.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3126 	.statictr_z_mask = GENMASK(23, 0),
3127 	.rchan_oes_offset = 0x400,
3128 };
3129 
3130 static const struct of_device_id udma_of_match[] = {
3131 	{
3132 		.compatible = "ti,am654-navss-main-udmap",
3133 		.data = &am654_main_data,
3134 	},
3135 	{
3136 		.compatible = "ti,am654-navss-mcu-udmap",
3137 		.data = &am654_mcu_data,
3138 	}, {
3139 		.compatible = "ti,j721e-navss-main-udmap",
3140 		.data = &j721e_main_data,
3141 	}, {
3142 		.compatible = "ti,j721e-navss-mcu-udmap",
3143 		.data = &j721e_mcu_data,
3144 	},
3145 	{ /* Sentinel */ },
3146 };
3147 
3148 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3149 {
3150 	struct resource *res;
3151 	int i;
3152 
3153 	for (i = 0; i < MMR_LAST; i++) {
3154 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3155 						   mmr_names[i]);
3156 		ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
3157 		if (IS_ERR(ud->mmrs[i]))
3158 			return PTR_ERR(ud->mmrs[i]);
3159 	}
3160 
3161 	return 0;
3162 }
3163 
3164 static int udma_setup_resources(struct udma_dev *ud)
3165 {
3166 	struct device *dev = ud->dev;
3167 	int ch_count, ret, i, j;
3168 	u32 cap2, cap3;
3169 	struct ti_sci_resource_desc *rm_desc;
3170 	struct ti_sci_resource *rm_res, irq_res;
3171 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3172 	static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3173 						    "ti,sci-rm-range-rchan",
3174 						    "ti,sci-rm-range-rflow" };
3175 
3176 	cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
3177 	cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
3178 
3179 	ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
3180 	ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
3181 	ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
3182 	ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
3183 	ch_count  = ud->tchan_cnt + ud->rchan_cnt;
3184 
3185 	/* Set up the throughput level start indexes */
3186 	if (of_device_is_compatible(dev->of_node,
3187 				    "ti,am654-navss-main-udmap")) {
3188 		ud->tpl_levels = 2;
3189 		ud->tpl_start_idx[0] = 8;
3190 	} else if (of_device_is_compatible(dev->of_node,
3191 					   "ti,am654-navss-mcu-udmap")) {
3192 		ud->tpl_levels = 2;
3193 		ud->tpl_start_idx[0] = 2;
3194 	} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
3195 		ud->tpl_levels = 3;
3196 		ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
3197 		ud->tpl_start_idx[0] = ud->tpl_start_idx[1] +
3198 				       UDMA_CAP3_HCHAN_CNT(cap3);
3199 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
3200 		ud->tpl_levels = 2;
3201 		ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
3202 	} else {
3203 		ud->tpl_levels = 1;
3204 	}
3205 
3206 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3207 					   sizeof(unsigned long), GFP_KERNEL);
3208 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3209 				  GFP_KERNEL);
3210 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3211 					   sizeof(unsigned long), GFP_KERNEL);
3212 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3213 				  GFP_KERNEL);
3214 	ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3215 					      sizeof(unsigned long),
3216 					      GFP_KERNEL);
3217 	ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3218 						  BITS_TO_LONGS(ud->rflow_cnt),
3219 						  sizeof(unsigned long),
3220 						  GFP_KERNEL);
3221 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3222 					sizeof(unsigned long),
3223 					GFP_KERNEL);
3224 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3225 				  GFP_KERNEL);
3226 
3227 	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3228 	    !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3229 	    !ud->rflows || !ud->rflow_in_use)
3230 		return -ENOMEM;
3231 
3232 	/*
3233 	 * RX flows with the same Ids as RX channels are reserved to be used
3234 	 * as default flows if remote HW can't generate flow_ids. Those
3235 	 * RX flows can be requested only explicitly by id.
3236 	 */
3237 	bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3238 
3239 	/* by default no GP rflows are assigned to Linux */
3240 	bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3241 
3242 	/* Get resource ranges from tisci */
3243 	for (i = 0; i < RM_RANGE_LAST; i++)
3244 		tisci_rm->rm_ranges[i] =
3245 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3246 						    tisci_rm->tisci_dev_id,
3247 						    (char *)range_names[i]);
3248 
3249 	/* tchan ranges */
3250 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3251 	if (IS_ERR(rm_res)) {
3252 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3253 	} else {
3254 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3255 		for (i = 0; i < rm_res->sets; i++) {
3256 			rm_desc = &rm_res->desc[i];
3257 			bitmap_clear(ud->tchan_map, rm_desc->start,
3258 				     rm_desc->num);
3259 			dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3260 				rm_desc->start, rm_desc->num);
3261 		}
3262 	}
3263 	irq_res.sets = rm_res->sets;
3264 
3265 	/* rchan and matching default flow ranges */
3266 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3267 	if (IS_ERR(rm_res)) {
3268 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3269 	} else {
3270 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3271 		for (i = 0; i < rm_res->sets; i++) {
3272 			rm_desc = &rm_res->desc[i];
3273 			bitmap_clear(ud->rchan_map, rm_desc->start,
3274 				     rm_desc->num);
3275 			dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3276 				rm_desc->start, rm_desc->num);
3277 		}
3278 	}
3279 
3280 	irq_res.sets += rm_res->sets;
3281 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3282 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3283 	for (i = 0; i < rm_res->sets; i++) {
3284 		irq_res.desc[i].start = rm_res->desc[i].start;
3285 		irq_res.desc[i].num = rm_res->desc[i].num;
3286 	}
3287 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3288 	for (j = 0; j < rm_res->sets; j++, i++) {
3289 		irq_res.desc[i].start = rm_res->desc[j].start +
3290 					ud->match_data->rchan_oes_offset;
3291 		irq_res.desc[i].num = rm_res->desc[j].num;
3292 	}
3293 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3294 	kfree(irq_res.desc);
3295 	if (ret) {
3296 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3297 		return ret;
3298 	}
3299 
3300 	/* GP rflow ranges */
3301 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3302 	if (IS_ERR(rm_res)) {
3303 		/* all gp flows are assigned exclusively to Linux */
3304 		bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3305 			     ud->rflow_cnt - ud->rchan_cnt);
3306 	} else {
3307 		for (i = 0; i < rm_res->sets; i++) {
3308 			rm_desc = &rm_res->desc[i];
3309 			bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3310 				     rm_desc->num);
3311 			dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3312 				rm_desc->start, rm_desc->num);
3313 		}
3314 	}
3315 
3316 	ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3317 	ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3318 	if (!ch_count)
3319 		return -ENODEV;
3320 
3321 	ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3322 				    GFP_KERNEL);
3323 	if (!ud->channels)
3324 		return -ENOMEM;
3325 
3326 	dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3327 		 ch_count,
3328 		 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3329 		 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3330 		 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3331 					       ud->rflow_cnt));
3332 
3333 	return ch_count;
3334 }
3335 
3336 static int udma_setup_rx_flush(struct udma_dev *ud)
3337 {
3338 	struct udma_rx_flush *rx_flush = &ud->rx_flush;
3339 	struct cppi5_desc_hdr_t *tr_desc;
3340 	struct cppi5_tr_type1_t *tr_req;
3341 	struct cppi5_host_desc_t *desc;
3342 	struct device *dev = ud->dev;
3343 	struct udma_hwdesc *hwdesc;
3344 	size_t tr_size;
3345 
3346 	/* Allocate 1K buffer for discarded data on RX channel teardown */
3347 	rx_flush->buffer_size = SZ_1K;
3348 	rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3349 					      GFP_KERNEL);
3350 	if (!rx_flush->buffer_vaddr)
3351 		return -ENOMEM;
3352 
3353 	rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3354 						rx_flush->buffer_size,
3355 						DMA_TO_DEVICE);
3356 	if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3357 		return -ENOMEM;
3358 
3359 	/* Set up descriptor to be used for TR mode */
3360 	hwdesc = &rx_flush->hwdescs[0];
3361 	tr_size = sizeof(struct cppi5_tr_type1_t);
3362 	hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3363 	hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3364 					ud->desc_align);
3365 
3366 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3367 						GFP_KERNEL);
3368 	if (!hwdesc->cppi5_desc_vaddr)
3369 		return -ENOMEM;
3370 
3371 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3372 						  hwdesc->cppi5_desc_size,
3373 						  DMA_TO_DEVICE);
3374 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3375 		return -ENOMEM;
3376 
3377 	/* Start of the TR req records */
3378 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3379 	/* Start address of the TR response array */
3380 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3381 
3382 	tr_desc = hwdesc->cppi5_desc_vaddr;
3383 	cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3384 	cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3385 	cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3386 
3387 	tr_req = hwdesc->tr_req_base;
3388 	cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3389 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3390 	cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3391 
3392 	tr_req->addr = rx_flush->buffer_paddr;
3393 	tr_req->icnt0 = rx_flush->buffer_size;
3394 	tr_req->icnt1 = 1;
3395 
3396 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3397 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3398 
3399 	/* Set up descriptor to be used for packet mode */
3400 	hwdesc = &rx_flush->hwdescs[1];
3401 	hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3402 					CPPI5_INFO0_HDESC_EPIB_SIZE +
3403 					CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3404 					ud->desc_align);
3405 
3406 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3407 						GFP_KERNEL);
3408 	if (!hwdesc->cppi5_desc_vaddr)
3409 		return -ENOMEM;
3410 
3411 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3412 						  hwdesc->cppi5_desc_size,
3413 						  DMA_TO_DEVICE);
3414 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3415 		return -ENOMEM;
3416 
3417 	desc = hwdesc->cppi5_desc_vaddr;
3418 	cppi5_hdesc_init(desc, 0, 0);
3419 	cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3420 	cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3421 
3422 	cppi5_hdesc_attach_buf(desc,
3423 			       rx_flush->buffer_paddr, rx_flush->buffer_size,
3424 			       rx_flush->buffer_paddr, rx_flush->buffer_size);
3425 
3426 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3427 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3428 	return 0;
3429 }
3430 
3431 #ifdef CONFIG_DEBUG_FS
3432 static void udma_dbg_summary_show_chan(struct seq_file *s,
3433 				       struct dma_chan *chan)
3434 {
3435 	struct udma_chan *uc = to_udma_chan(chan);
3436 	struct udma_chan_config *ucc = &uc->config;
3437 
3438 	seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3439 		   chan->dbg_client_name ?: "in-use");
3440 	seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3441 
3442 	switch (uc->config.dir) {
3443 	case DMA_MEM_TO_MEM:
3444 		seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3445 			   ucc->src_thread, ucc->dst_thread);
3446 		break;
3447 	case DMA_DEV_TO_MEM:
3448 		seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3449 			   ucc->src_thread, ucc->dst_thread);
3450 		break;
3451 	case DMA_MEM_TO_DEV:
3452 		seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3453 			   ucc->src_thread, ucc->dst_thread);
3454 		break;
3455 	default:
3456 		seq_printf(s, ")\n");
3457 		return;
3458 	}
3459 
3460 	if (ucc->ep_type == PSIL_EP_NATIVE) {
3461 		seq_printf(s, "PSI-L Native");
3462 		if (ucc->metadata_size) {
3463 			seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3464 			if (ucc->psd_size)
3465 				seq_printf(s, " PSDsize:%u", ucc->psd_size);
3466 			seq_printf(s, " ]");
3467 		}
3468 	} else {
3469 		seq_printf(s, "PDMA");
3470 		if (ucc->enable_acc32 || ucc->enable_burst)
3471 			seq_printf(s, "[%s%s ]",
3472 				   ucc->enable_acc32 ? " ACC32" : "",
3473 				   ucc->enable_burst ? " BURST" : "");
3474 	}
3475 
3476 	seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3477 }
3478 
3479 static void udma_dbg_summary_show(struct seq_file *s,
3480 				  struct dma_device *dma_dev)
3481 {
3482 	struct dma_chan *chan;
3483 
3484 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
3485 		if (chan->client_count)
3486 			udma_dbg_summary_show_chan(s, chan);
3487 	}
3488 }
3489 #endif /* CONFIG_DEBUG_FS */
3490 
3491 #define TI_UDMAC_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3492 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3493 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3494 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3495 				 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3496 
3497 static int udma_probe(struct platform_device *pdev)
3498 {
3499 	struct device_node *navss_node = pdev->dev.parent->of_node;
3500 	struct device *dev = &pdev->dev;
3501 	struct udma_dev *ud;
3502 	const struct of_device_id *match;
3503 	int i, ret;
3504 	int ch_count;
3505 
3506 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3507 	if (ret)
3508 		dev_err(dev, "failed to set dma mask stuff\n");
3509 
3510 	ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3511 	if (!ud)
3512 		return -ENOMEM;
3513 
3514 	ret = udma_get_mmrs(pdev, ud);
3515 	if (ret)
3516 		return ret;
3517 
3518 	ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3519 	if (IS_ERR(ud->tisci_rm.tisci))
3520 		return PTR_ERR(ud->tisci_rm.tisci);
3521 
3522 	ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3523 				   &ud->tisci_rm.tisci_dev_id);
3524 	if (ret) {
3525 		dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3526 		return ret;
3527 	}
3528 	pdev->id = ud->tisci_rm.tisci_dev_id;
3529 
3530 	ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3531 				   &ud->tisci_rm.tisci_navss_dev_id);
3532 	if (ret) {
3533 		dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3534 		return ret;
3535 	}
3536 
3537 	ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
3538 	if (!ret && ud->atype > 2) {
3539 		dev_err(dev, "Invalid atype: %u\n", ud->atype);
3540 		return -EINVAL;
3541 	}
3542 
3543 	ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3544 	ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3545 
3546 	ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3547 	if (IS_ERR(ud->ringacc))
3548 		return PTR_ERR(ud->ringacc);
3549 
3550 	dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3551 					    DOMAIN_BUS_TI_SCI_INTA_MSI);
3552 	if (!dev->msi_domain) {
3553 		dev_err(dev, "Failed to get MSI domain\n");
3554 		return -EPROBE_DEFER;
3555 	}
3556 
3557 	match = of_match_node(udma_of_match, dev->of_node);
3558 	if (!match) {
3559 		dev_err(dev, "No compatible match found\n");
3560 		return -ENODEV;
3561 	}
3562 	ud->match_data = match->data;
3563 
3564 	dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3565 	dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3566 
3567 	ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3568 	ud->ddev.device_config = udma_slave_config;
3569 	ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3570 	ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3571 	ud->ddev.device_issue_pending = udma_issue_pending;
3572 	ud->ddev.device_tx_status = udma_tx_status;
3573 	ud->ddev.device_pause = udma_pause;
3574 	ud->ddev.device_resume = udma_resume;
3575 	ud->ddev.device_terminate_all = udma_terminate_all;
3576 	ud->ddev.device_synchronize = udma_synchronize;
3577 #ifdef CONFIG_DEBUG_FS
3578 	ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3579 #endif
3580 
3581 	ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3582 	ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3583 	ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3584 	ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3585 	ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3586 	ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3587 	ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3588 				       DESC_METADATA_ENGINE;
3589 	if (ud->match_data->enable_memcpy_support) {
3590 		dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3591 		ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3592 		ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3593 	}
3594 
3595 	ud->ddev.dev = dev;
3596 	ud->dev = dev;
3597 	ud->psil_base = ud->match_data->psil_base;
3598 
3599 	INIT_LIST_HEAD(&ud->ddev.channels);
3600 	INIT_LIST_HEAD(&ud->desc_to_purge);
3601 
3602 	ch_count = udma_setup_resources(ud);
3603 	if (ch_count <= 0)
3604 		return ch_count;
3605 
3606 	spin_lock_init(&ud->lock);
3607 	INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3608 
3609 	ud->desc_align = 64;
3610 	if (ud->desc_align < dma_get_cache_alignment())
3611 		ud->desc_align = dma_get_cache_alignment();
3612 
3613 	ret = udma_setup_rx_flush(ud);
3614 	if (ret)
3615 		return ret;
3616 
3617 	for (i = 0; i < ud->tchan_cnt; i++) {
3618 		struct udma_tchan *tchan = &ud->tchans[i];
3619 
3620 		tchan->id = i;
3621 		tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3622 	}
3623 
3624 	for (i = 0; i < ud->rchan_cnt; i++) {
3625 		struct udma_rchan *rchan = &ud->rchans[i];
3626 
3627 		rchan->id = i;
3628 		rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3629 	}
3630 
3631 	for (i = 0; i < ud->rflow_cnt; i++) {
3632 		struct udma_rflow *rflow = &ud->rflows[i];
3633 
3634 		rflow->id = i;
3635 	}
3636 
3637 	for (i = 0; i < ch_count; i++) {
3638 		struct udma_chan *uc = &ud->channels[i];
3639 
3640 		uc->ud = ud;
3641 		uc->vc.desc_free = udma_desc_free;
3642 		uc->id = i;
3643 		uc->tchan = NULL;
3644 		uc->rchan = NULL;
3645 		uc->config.remote_thread_id = -1;
3646 		uc->config.dir = DMA_MEM_TO_MEM;
3647 		uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3648 					  dev_name(dev), i);
3649 
3650 		vchan_init(&uc->vc, &ud->ddev);
3651 		/* Use custom vchan completion handling */
3652 		tasklet_init(&uc->vc.task, udma_vchan_complete,
3653 			     (unsigned long)&uc->vc);
3654 		init_completion(&uc->teardown_completed);
3655 		INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
3656 	}
3657 
3658 	ret = dma_async_device_register(&ud->ddev);
3659 	if (ret) {
3660 		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3661 		return ret;
3662 	}
3663 
3664 	platform_set_drvdata(pdev, ud);
3665 
3666 	ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3667 	if (ret) {
3668 		dev_err(dev, "failed to register of_dma controller\n");
3669 		dma_async_device_unregister(&ud->ddev);
3670 	}
3671 
3672 	return ret;
3673 }
3674 
3675 static struct platform_driver udma_driver = {
3676 	.driver = {
3677 		.name	= "ti-udma",
3678 		.of_match_table = udma_of_match,
3679 		.suppress_bind_attrs = true,
3680 	},
3681 	.probe		= udma_probe,
3682 };
3683 builtin_platform_driver(udma_driver);
3684 
3685 /* Private interfaces to UDMA */
3686 #include "k3-udma-private.c"
3687