xref: /openbmc/linux/drivers/dma/ti/k3-udma.c (revision 7e24a55b2122746c2eef192296fc84624354f895)
125dcb5ddSPeter Ujfalusi // SPDX-License-Identifier: GPL-2.0
225dcb5ddSPeter Ujfalusi /*
325dcb5ddSPeter Ujfalusi  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
425dcb5ddSPeter Ujfalusi  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
525dcb5ddSPeter Ujfalusi  */
625dcb5ddSPeter Ujfalusi 
725dcb5ddSPeter Ujfalusi #include <linux/kernel.h>
856b0a668SKevin Hilman #include <linux/module.h>
91c83767cSVignesh Raghavendra #include <linux/delay.h>
1025dcb5ddSPeter Ujfalusi #include <linux/dmaengine.h>
1125dcb5ddSPeter Ujfalusi #include <linux/dma-mapping.h>
1225dcb5ddSPeter Ujfalusi #include <linux/dmapool.h>
1325dcb5ddSPeter Ujfalusi #include <linux/err.h>
1425dcb5ddSPeter Ujfalusi #include <linux/init.h>
1525dcb5ddSPeter Ujfalusi #include <linux/interrupt.h>
1625dcb5ddSPeter Ujfalusi #include <linux/list.h>
1725dcb5ddSPeter Ujfalusi #include <linux/platform_device.h>
1825dcb5ddSPeter Ujfalusi #include <linux/slab.h>
1925dcb5ddSPeter Ujfalusi #include <linux/spinlock.h>
20f9b0366fSPeter Ujfalusi #include <linux/sys_soc.h>
2125dcb5ddSPeter Ujfalusi #include <linux/of.h>
2225dcb5ddSPeter Ujfalusi #include <linux/of_dma.h>
2325dcb5ddSPeter Ujfalusi #include <linux/of_irq.h>
2425dcb5ddSPeter Ujfalusi #include <linux/workqueue.h>
2525dcb5ddSPeter Ujfalusi #include <linux/completion.h>
2625dcb5ddSPeter Ujfalusi #include <linux/soc/ti/k3-ringacc.h>
2725dcb5ddSPeter Ujfalusi #include <linux/soc/ti/ti_sci_protocol.h>
2825dcb5ddSPeter Ujfalusi #include <linux/soc/ti/ti_sci_inta_msi.h>
2901779473SPeter Ujfalusi #include <linux/dma/k3-event-router.h>
3025dcb5ddSPeter Ujfalusi #include <linux/dma/ti-cppi5.h>
3125dcb5ddSPeter Ujfalusi 
3225dcb5ddSPeter Ujfalusi #include "../virt-dma.h"
3325dcb5ddSPeter Ujfalusi #include "k3-udma.h"
3425dcb5ddSPeter Ujfalusi #include "k3-psil-priv.h"
3525dcb5ddSPeter Ujfalusi 
3625dcb5ddSPeter Ujfalusi struct udma_static_tr {
3725dcb5ddSPeter Ujfalusi 	u8 elsize; /* RPSTR0 */
3825dcb5ddSPeter Ujfalusi 	u16 elcnt; /* RPSTR0 */
3925dcb5ddSPeter Ujfalusi 	u16 bstcnt; /* RPSTR1 */
4025dcb5ddSPeter Ujfalusi };
4125dcb5ddSPeter Ujfalusi 
4225dcb5ddSPeter Ujfalusi #define K3_UDMA_MAX_RFLOWS		1024
4325dcb5ddSPeter Ujfalusi #define K3_UDMA_DEFAULT_RING_SIZE	16
4425dcb5ddSPeter Ujfalusi 
4525dcb5ddSPeter Ujfalusi /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
4625dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_SRCTAG_NONE		0
4725dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_SRCTAG_CFG_TAG	1
4825dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_SRCTAG_FLOW_ID	2
4925dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_SRCTAG_SRC_TAG	4
5025dcb5ddSPeter Ujfalusi 
5125dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_DSTTAG_NONE		0
5225dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_DSTTAG_CFG_TAG	1
5325dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_DSTTAG_FLOW_ID	2
5425dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_DSTTAG_DST_TAG_LO	4
5525dcb5ddSPeter Ujfalusi #define UDMA_RFLOW_DSTTAG_DST_TAG_HI	5
5625dcb5ddSPeter Ujfalusi 
5725dcb5ddSPeter Ujfalusi struct udma_chan;
5825dcb5ddSPeter Ujfalusi 
5901779473SPeter Ujfalusi enum k3_dma_type {
6001779473SPeter Ujfalusi 	DMA_TYPE_UDMA = 0,
6101779473SPeter Ujfalusi 	DMA_TYPE_BCDMA,
62d2abc982SPeter Ujfalusi 	DMA_TYPE_PKTDMA,
6301779473SPeter Ujfalusi };
6401779473SPeter Ujfalusi 
6525dcb5ddSPeter Ujfalusi enum udma_mmr {
6625dcb5ddSPeter Ujfalusi 	MMR_GCFG = 0,
6701779473SPeter Ujfalusi 	MMR_BCHANRT,
6825dcb5ddSPeter Ujfalusi 	MMR_RCHANRT,
6925dcb5ddSPeter Ujfalusi 	MMR_TCHANRT,
7025dcb5ddSPeter Ujfalusi 	MMR_LAST,
7125dcb5ddSPeter Ujfalusi };
7225dcb5ddSPeter Ujfalusi 
7301779473SPeter Ujfalusi static const char * const mmr_names[] = {
7401779473SPeter Ujfalusi 	[MMR_GCFG] = "gcfg",
7501779473SPeter Ujfalusi 	[MMR_BCHANRT] = "bchanrt",
7601779473SPeter Ujfalusi 	[MMR_RCHANRT] = "rchanrt",
7701779473SPeter Ujfalusi 	[MMR_TCHANRT] = "tchanrt",
7801779473SPeter Ujfalusi };
7925dcb5ddSPeter Ujfalusi 
8025dcb5ddSPeter Ujfalusi struct udma_tchan {
8125dcb5ddSPeter Ujfalusi 	void __iomem *reg_rt;
8225dcb5ddSPeter Ujfalusi 
8325dcb5ddSPeter Ujfalusi 	int id;
8425dcb5ddSPeter Ujfalusi 	struct k3_ring *t_ring; /* Transmit ring */
8525dcb5ddSPeter Ujfalusi 	struct k3_ring *tc_ring; /* Transmit Completion ring */
86d2abc982SPeter Ujfalusi 	int tflow_id; /* applicable only for PKTDMA */
87d2abc982SPeter Ujfalusi 
8825dcb5ddSPeter Ujfalusi };
8925dcb5ddSPeter Ujfalusi 
9001779473SPeter Ujfalusi #define udma_bchan udma_tchan
9101779473SPeter Ujfalusi 
9225dcb5ddSPeter Ujfalusi struct udma_rflow {
9325dcb5ddSPeter Ujfalusi 	int id;
9425dcb5ddSPeter Ujfalusi 	struct k3_ring *fd_ring; /* Free Descriptor ring */
9525dcb5ddSPeter Ujfalusi 	struct k3_ring *r_ring; /* Receive ring */
9625dcb5ddSPeter Ujfalusi };
9725dcb5ddSPeter Ujfalusi 
9825dcb5ddSPeter Ujfalusi struct udma_rchan {
9925dcb5ddSPeter Ujfalusi 	void __iomem *reg_rt;
10025dcb5ddSPeter Ujfalusi 
10125dcb5ddSPeter Ujfalusi 	int id;
10225dcb5ddSPeter Ujfalusi };
10325dcb5ddSPeter Ujfalusi 
10401779473SPeter Ujfalusi struct udma_oes_offsets {
10501779473SPeter Ujfalusi 	/* K3 UDMA Output Event Offset */
10601779473SPeter Ujfalusi 	u32 udma_rchan;
10701779473SPeter Ujfalusi 
10801779473SPeter Ujfalusi 	/* BCDMA Output Event Offsets */
10901779473SPeter Ujfalusi 	u32 bcdma_bchan_data;
11001779473SPeter Ujfalusi 	u32 bcdma_bchan_ring;
11101779473SPeter Ujfalusi 	u32 bcdma_tchan_data;
11201779473SPeter Ujfalusi 	u32 bcdma_tchan_ring;
11301779473SPeter Ujfalusi 	u32 bcdma_rchan_data;
11401779473SPeter Ujfalusi 	u32 bcdma_rchan_ring;
115d2abc982SPeter Ujfalusi 
116d2abc982SPeter Ujfalusi 	/* PKTDMA Output Event Offsets */
117d2abc982SPeter Ujfalusi 	u32 pktdma_tchan_flow;
118d2abc982SPeter Ujfalusi 	u32 pktdma_rchan_flow;
11901779473SPeter Ujfalusi };
12001779473SPeter Ujfalusi 
12125dcb5ddSPeter Ujfalusi #define UDMA_FLAG_PDMA_ACC32		BIT(0)
12225dcb5ddSPeter Ujfalusi #define UDMA_FLAG_PDMA_BURST		BIT(1)
1235e1cb1cbSPeter Ujfalusi #define UDMA_FLAG_TDTYPE		BIT(2)
124046d679bSPeter Ujfalusi #define UDMA_FLAG_BURST_SIZE		BIT(3)
125046d679bSPeter Ujfalusi #define UDMA_FLAGS_J7_CLASS		(UDMA_FLAG_PDMA_ACC32 | \
126046d679bSPeter Ujfalusi 					 UDMA_FLAG_PDMA_BURST | \
127046d679bSPeter Ujfalusi 					 UDMA_FLAG_TDTYPE | \
128046d679bSPeter Ujfalusi 					 UDMA_FLAG_BURST_SIZE)
12925dcb5ddSPeter Ujfalusi 
13025dcb5ddSPeter Ujfalusi struct udma_match_data {
13101779473SPeter Ujfalusi 	enum k3_dma_type type;
13225dcb5ddSPeter Ujfalusi 	u32 psil_base;
13325dcb5ddSPeter Ujfalusi 	bool enable_memcpy_support;
13425dcb5ddSPeter Ujfalusi 	u32 flags;
13525dcb5ddSPeter Ujfalusi 	u32 statictr_z_mask;
136046d679bSPeter Ujfalusi 	u8 burst_size[3];
1373f58e106SVignesh Raghavendra 	struct udma_soc_data *soc_data;
138f9b0366fSPeter Ujfalusi };
139f9b0366fSPeter Ujfalusi 
140f9b0366fSPeter Ujfalusi struct udma_soc_data {
14101779473SPeter Ujfalusi 	struct udma_oes_offsets oes;
14201779473SPeter Ujfalusi 	u32 bcdma_trigger_event_offset;
14325dcb5ddSPeter Ujfalusi };
14425dcb5ddSPeter Ujfalusi 
14516cd3c67SPeter Ujfalusi struct udma_hwdesc {
14616cd3c67SPeter Ujfalusi 	size_t cppi5_desc_size;
14716cd3c67SPeter Ujfalusi 	void *cppi5_desc_vaddr;
14816cd3c67SPeter Ujfalusi 	dma_addr_t cppi5_desc_paddr;
14916cd3c67SPeter Ujfalusi 
15016cd3c67SPeter Ujfalusi 	/* TR descriptor internal pointers */
15116cd3c67SPeter Ujfalusi 	void *tr_req_base;
15216cd3c67SPeter Ujfalusi 	struct cppi5_tr_resp_t *tr_resp_base;
15316cd3c67SPeter Ujfalusi };
15416cd3c67SPeter Ujfalusi 
15516cd3c67SPeter Ujfalusi struct udma_rx_flush {
15616cd3c67SPeter Ujfalusi 	struct udma_hwdesc hwdescs[2];
15716cd3c67SPeter Ujfalusi 
15816cd3c67SPeter Ujfalusi 	size_t buffer_size;
15916cd3c67SPeter Ujfalusi 	void *buffer_vaddr;
16016cd3c67SPeter Ujfalusi 	dma_addr_t buffer_paddr;
16116cd3c67SPeter Ujfalusi };
16216cd3c67SPeter Ujfalusi 
16388448980SPeter Ujfalusi struct udma_tpl {
16488448980SPeter Ujfalusi 	u8 levels;
16588448980SPeter Ujfalusi 	u32 start_idx[3];
16688448980SPeter Ujfalusi };
16788448980SPeter Ujfalusi 
16825dcb5ddSPeter Ujfalusi struct udma_dev {
16925dcb5ddSPeter Ujfalusi 	struct dma_device ddev;
17025dcb5ddSPeter Ujfalusi 	struct device *dev;
17125dcb5ddSPeter Ujfalusi 	void __iomem *mmrs[MMR_LAST];
17225dcb5ddSPeter Ujfalusi 	const struct udma_match_data *match_data;
173f9b0366fSPeter Ujfalusi 	const struct udma_soc_data *soc_data;
17425dcb5ddSPeter Ujfalusi 
17588448980SPeter Ujfalusi 	struct udma_tpl bchan_tpl;
17688448980SPeter Ujfalusi 	struct udma_tpl tchan_tpl;
17788448980SPeter Ujfalusi 	struct udma_tpl rchan_tpl;
178daf4ad04SPeter Ujfalusi 
17925dcb5ddSPeter Ujfalusi 	size_t desc_align; /* alignment to use for descriptors */
18025dcb5ddSPeter Ujfalusi 
18125dcb5ddSPeter Ujfalusi 	struct udma_tisci_rm tisci_rm;
18225dcb5ddSPeter Ujfalusi 
18325dcb5ddSPeter Ujfalusi 	struct k3_ringacc *ringacc;
18425dcb5ddSPeter Ujfalusi 
18525dcb5ddSPeter Ujfalusi 	struct work_struct purge_work;
18625dcb5ddSPeter Ujfalusi 	struct list_head desc_to_purge;
18725dcb5ddSPeter Ujfalusi 	spinlock_t lock;
18825dcb5ddSPeter Ujfalusi 
18916cd3c67SPeter Ujfalusi 	struct udma_rx_flush rx_flush;
19016cd3c67SPeter Ujfalusi 
19101779473SPeter Ujfalusi 	int bchan_cnt;
19225dcb5ddSPeter Ujfalusi 	int tchan_cnt;
19325dcb5ddSPeter Ujfalusi 	int echan_cnt;
19425dcb5ddSPeter Ujfalusi 	int rchan_cnt;
19525dcb5ddSPeter Ujfalusi 	int rflow_cnt;
196d2abc982SPeter Ujfalusi 	int tflow_cnt;
19701779473SPeter Ujfalusi 	unsigned long *bchan_map;
19825dcb5ddSPeter Ujfalusi 	unsigned long *tchan_map;
19925dcb5ddSPeter Ujfalusi 	unsigned long *rchan_map;
20025dcb5ddSPeter Ujfalusi 	unsigned long *rflow_gp_map;
20125dcb5ddSPeter Ujfalusi 	unsigned long *rflow_gp_map_allocated;
20225dcb5ddSPeter Ujfalusi 	unsigned long *rflow_in_use;
203d2abc982SPeter Ujfalusi 	unsigned long *tflow_map;
20425dcb5ddSPeter Ujfalusi 
20501779473SPeter Ujfalusi 	struct udma_bchan *bchans;
20625dcb5ddSPeter Ujfalusi 	struct udma_tchan *tchans;
20725dcb5ddSPeter Ujfalusi 	struct udma_rchan *rchans;
20825dcb5ddSPeter Ujfalusi 	struct udma_rflow *rflows;
20925dcb5ddSPeter Ujfalusi 
21025dcb5ddSPeter Ujfalusi 	struct udma_chan *channels;
21125dcb5ddSPeter Ujfalusi 	u32 psil_base;
2120ebcf1a2SPeter Ujfalusi 	u32 atype;
21301779473SPeter Ujfalusi 	u32 asel;
21425dcb5ddSPeter Ujfalusi };
21525dcb5ddSPeter Ujfalusi 
21625dcb5ddSPeter Ujfalusi struct udma_desc {
21725dcb5ddSPeter Ujfalusi 	struct virt_dma_desc vd;
21825dcb5ddSPeter Ujfalusi 
21925dcb5ddSPeter Ujfalusi 	bool terminated;
22025dcb5ddSPeter Ujfalusi 
22125dcb5ddSPeter Ujfalusi 	enum dma_transfer_direction dir;
22225dcb5ddSPeter Ujfalusi 
22325dcb5ddSPeter Ujfalusi 	struct udma_static_tr static_tr;
22425dcb5ddSPeter Ujfalusi 	u32 residue;
22525dcb5ddSPeter Ujfalusi 
22625dcb5ddSPeter Ujfalusi 	unsigned int sglen;
22725dcb5ddSPeter Ujfalusi 	unsigned int desc_idx; /* Only used for cyclic in packet mode */
22825dcb5ddSPeter Ujfalusi 	unsigned int tr_idx;
22925dcb5ddSPeter Ujfalusi 
23025dcb5ddSPeter Ujfalusi 	u32 metadata_size;
23125dcb5ddSPeter Ujfalusi 	void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
23225dcb5ddSPeter Ujfalusi 
23325dcb5ddSPeter Ujfalusi 	unsigned int hwdesc_count;
234466f966bSGustavo A. R. Silva 	struct udma_hwdesc hwdesc[];
23525dcb5ddSPeter Ujfalusi };
23625dcb5ddSPeter Ujfalusi 
23725dcb5ddSPeter Ujfalusi enum udma_chan_state {
23825dcb5ddSPeter Ujfalusi 	UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
23925dcb5ddSPeter Ujfalusi 	UDMA_CHAN_IS_ACTIVE, /* Normal operation */
24025dcb5ddSPeter Ujfalusi 	UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
24125dcb5ddSPeter Ujfalusi };
24225dcb5ddSPeter Ujfalusi 
24325dcb5ddSPeter Ujfalusi struct udma_tx_drain {
24425dcb5ddSPeter Ujfalusi 	struct delayed_work work;
2451c83767cSVignesh Raghavendra 	ktime_t tstamp;
24625dcb5ddSPeter Ujfalusi 	u32 residue;
24725dcb5ddSPeter Ujfalusi };
24825dcb5ddSPeter Ujfalusi 
24925dcb5ddSPeter Ujfalusi struct udma_chan_config {
25025dcb5ddSPeter Ujfalusi 	bool pkt_mode; /* TR or packet */
25125dcb5ddSPeter Ujfalusi 	bool needs_epib; /* EPIB is needed for the communication or not */
25225dcb5ddSPeter Ujfalusi 	u32 psd_size; /* size of Protocol Specific Data */
25325dcb5ddSPeter Ujfalusi 	u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
25425dcb5ddSPeter Ujfalusi 	u32 hdesc_size; /* Size of a packet descriptor in packet mode */
25525dcb5ddSPeter Ujfalusi 	bool notdpkt; /* Suppress sending TDC packet */
25625dcb5ddSPeter Ujfalusi 	int remote_thread_id;
2570ebcf1a2SPeter Ujfalusi 	u32 atype;
25801779473SPeter Ujfalusi 	u32 asel;
25925dcb5ddSPeter Ujfalusi 	u32 src_thread;
26025dcb5ddSPeter Ujfalusi 	u32 dst_thread;
26125dcb5ddSPeter Ujfalusi 	enum psil_endpoint_type ep_type;
26225dcb5ddSPeter Ujfalusi 	bool enable_acc32;
26325dcb5ddSPeter Ujfalusi 	bool enable_burst;
26425dcb5ddSPeter Ujfalusi 	enum udma_tp_level channel_tpl; /* Channel Throughput Level */
26525dcb5ddSPeter Ujfalusi 
26601779473SPeter Ujfalusi 	u32 tr_trigger_type;
267e8e2f92bSVaishnav Achath 	unsigned long tx_flags;
26801779473SPeter Ujfalusi 
269d2abc982SPeter Ujfalusi 	/* PKDMA mapped channel */
270d2abc982SPeter Ujfalusi 	int mapped_channel_id;
271d2abc982SPeter Ujfalusi 	/* PKTDMA default tflow or rflow for mapped channel */
272d2abc982SPeter Ujfalusi 	int default_flow_id;
273d2abc982SPeter Ujfalusi 
27425dcb5ddSPeter Ujfalusi 	enum dma_transfer_direction dir;
27525dcb5ddSPeter Ujfalusi };
27625dcb5ddSPeter Ujfalusi 
27725dcb5ddSPeter Ujfalusi struct udma_chan {
27825dcb5ddSPeter Ujfalusi 	struct virt_dma_chan vc;
27925dcb5ddSPeter Ujfalusi 	struct dma_slave_config	cfg;
28025dcb5ddSPeter Ujfalusi 	struct udma_dev *ud;
28101779473SPeter Ujfalusi 	struct device *dma_dev;
28225dcb5ddSPeter Ujfalusi 	struct udma_desc *desc;
28325dcb5ddSPeter Ujfalusi 	struct udma_desc *terminated_desc;
28425dcb5ddSPeter Ujfalusi 	struct udma_static_tr static_tr;
28525dcb5ddSPeter Ujfalusi 	char *name;
28625dcb5ddSPeter Ujfalusi 
28701779473SPeter Ujfalusi 	struct udma_bchan *bchan;
28825dcb5ddSPeter Ujfalusi 	struct udma_tchan *tchan;
28925dcb5ddSPeter Ujfalusi 	struct udma_rchan *rchan;
29025dcb5ddSPeter Ujfalusi 	struct udma_rflow *rflow;
29125dcb5ddSPeter Ujfalusi 
29225dcb5ddSPeter Ujfalusi 	bool psil_paired;
29325dcb5ddSPeter Ujfalusi 
29425dcb5ddSPeter Ujfalusi 	int irq_num_ring;
29525dcb5ddSPeter Ujfalusi 	int irq_num_udma;
29625dcb5ddSPeter Ujfalusi 
29725dcb5ddSPeter Ujfalusi 	bool cyclic;
29825dcb5ddSPeter Ujfalusi 	bool paused;
29925dcb5ddSPeter Ujfalusi 
30025dcb5ddSPeter Ujfalusi 	enum udma_chan_state state;
30125dcb5ddSPeter Ujfalusi 	struct completion teardown_completed;
30225dcb5ddSPeter Ujfalusi 
30325dcb5ddSPeter Ujfalusi 	struct udma_tx_drain tx_drain;
30425dcb5ddSPeter Ujfalusi 
30525dcb5ddSPeter Ujfalusi 	/* Channel configuration parameters */
30625dcb5ddSPeter Ujfalusi 	struct udma_chan_config config;
307fbe05149SVignesh Raghavendra 	/* Channel configuration parameters (backup) */
308fbe05149SVignesh Raghavendra 	struct udma_chan_config backup_config;
30925dcb5ddSPeter Ujfalusi 
31025dcb5ddSPeter Ujfalusi 	/* dmapool for packet mode descriptors */
31125dcb5ddSPeter Ujfalusi 	bool use_dma_pool;
31225dcb5ddSPeter Ujfalusi 	struct dma_pool *hdesc_pool;
31325dcb5ddSPeter Ujfalusi 
31425dcb5ddSPeter Ujfalusi 	u32 id;
31525dcb5ddSPeter Ujfalusi };
31625dcb5ddSPeter Ujfalusi 
to_udma_dev(struct dma_device * d)31725dcb5ddSPeter Ujfalusi static inline struct udma_dev *to_udma_dev(struct dma_device *d)
31825dcb5ddSPeter Ujfalusi {
31925dcb5ddSPeter Ujfalusi 	return container_of(d, struct udma_dev, ddev);
32025dcb5ddSPeter Ujfalusi }
32125dcb5ddSPeter Ujfalusi 
to_udma_chan(struct dma_chan * c)32225dcb5ddSPeter Ujfalusi static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
32325dcb5ddSPeter Ujfalusi {
32425dcb5ddSPeter Ujfalusi 	return container_of(c, struct udma_chan, vc.chan);
32525dcb5ddSPeter Ujfalusi }
32625dcb5ddSPeter Ujfalusi 
to_udma_desc(struct dma_async_tx_descriptor * t)32725dcb5ddSPeter Ujfalusi static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
32825dcb5ddSPeter Ujfalusi {
32925dcb5ddSPeter Ujfalusi 	return container_of(t, struct udma_desc, vd.tx);
33025dcb5ddSPeter Ujfalusi }
33125dcb5ddSPeter Ujfalusi 
33225dcb5ddSPeter Ujfalusi /* Generic register access functions */
udma_read(void __iomem * base,int reg)33325dcb5ddSPeter Ujfalusi static inline u32 udma_read(void __iomem *base, int reg)
33425dcb5ddSPeter Ujfalusi {
33525dcb5ddSPeter Ujfalusi 	return readl(base + reg);
33625dcb5ddSPeter Ujfalusi }
33725dcb5ddSPeter Ujfalusi 
udma_write(void __iomem * base,int reg,u32 val)33825dcb5ddSPeter Ujfalusi static inline void udma_write(void __iomem *base, int reg, u32 val)
33925dcb5ddSPeter Ujfalusi {
34025dcb5ddSPeter Ujfalusi 	writel(val, base + reg);
34125dcb5ddSPeter Ujfalusi }
34225dcb5ddSPeter Ujfalusi 
udma_update_bits(void __iomem * base,int reg,u32 mask,u32 val)34325dcb5ddSPeter Ujfalusi static inline void udma_update_bits(void __iomem *base, int reg,
34425dcb5ddSPeter Ujfalusi 				    u32 mask, u32 val)
34525dcb5ddSPeter Ujfalusi {
34625dcb5ddSPeter Ujfalusi 	u32 tmp, orig;
34725dcb5ddSPeter Ujfalusi 
34825dcb5ddSPeter Ujfalusi 	orig = readl(base + reg);
34925dcb5ddSPeter Ujfalusi 	tmp = orig & ~mask;
35025dcb5ddSPeter Ujfalusi 	tmp |= (val & mask);
35125dcb5ddSPeter Ujfalusi 
35225dcb5ddSPeter Ujfalusi 	if (tmp != orig)
35325dcb5ddSPeter Ujfalusi 		writel(tmp, base + reg);
35425dcb5ddSPeter Ujfalusi }
35525dcb5ddSPeter Ujfalusi 
35625dcb5ddSPeter Ujfalusi /* TCHANRT */
udma_tchanrt_read(struct udma_chan * uc,int reg)357db375dcbSPeter Ujfalusi static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
35825dcb5ddSPeter Ujfalusi {
359db375dcbSPeter Ujfalusi 	if (!uc->tchan)
36025dcb5ddSPeter Ujfalusi 		return 0;
361db375dcbSPeter Ujfalusi 	return udma_read(uc->tchan->reg_rt, reg);
36225dcb5ddSPeter Ujfalusi }
36325dcb5ddSPeter Ujfalusi 
udma_tchanrt_write(struct udma_chan * uc,int reg,u32 val)364db375dcbSPeter Ujfalusi static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
36525dcb5ddSPeter Ujfalusi {
366db375dcbSPeter Ujfalusi 	if (!uc->tchan)
36725dcb5ddSPeter Ujfalusi 		return;
368db375dcbSPeter Ujfalusi 	udma_write(uc->tchan->reg_rt, reg, val);
36925dcb5ddSPeter Ujfalusi }
37025dcb5ddSPeter Ujfalusi 
udma_tchanrt_update_bits(struct udma_chan * uc,int reg,u32 mask,u32 val)371db375dcbSPeter Ujfalusi static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
37225dcb5ddSPeter Ujfalusi 					    u32 mask, u32 val)
37325dcb5ddSPeter Ujfalusi {
374db375dcbSPeter Ujfalusi 	if (!uc->tchan)
37525dcb5ddSPeter Ujfalusi 		return;
376db375dcbSPeter Ujfalusi 	udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
37725dcb5ddSPeter Ujfalusi }
37825dcb5ddSPeter Ujfalusi 
37925dcb5ddSPeter Ujfalusi /* RCHANRT */
udma_rchanrt_read(struct udma_chan * uc,int reg)380db375dcbSPeter Ujfalusi static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
38125dcb5ddSPeter Ujfalusi {
382db375dcbSPeter Ujfalusi 	if (!uc->rchan)
38325dcb5ddSPeter Ujfalusi 		return 0;
384db375dcbSPeter Ujfalusi 	return udma_read(uc->rchan->reg_rt, reg);
38525dcb5ddSPeter Ujfalusi }
38625dcb5ddSPeter Ujfalusi 
udma_rchanrt_write(struct udma_chan * uc,int reg,u32 val)387db375dcbSPeter Ujfalusi static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
38825dcb5ddSPeter Ujfalusi {
389db375dcbSPeter Ujfalusi 	if (!uc->rchan)
39025dcb5ddSPeter Ujfalusi 		return;
391db375dcbSPeter Ujfalusi 	udma_write(uc->rchan->reg_rt, reg, val);
39225dcb5ddSPeter Ujfalusi }
39325dcb5ddSPeter Ujfalusi 
udma_rchanrt_update_bits(struct udma_chan * uc,int reg,u32 mask,u32 val)394db375dcbSPeter Ujfalusi static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
39525dcb5ddSPeter Ujfalusi 					    u32 mask, u32 val)
39625dcb5ddSPeter Ujfalusi {
397db375dcbSPeter Ujfalusi 	if (!uc->rchan)
39825dcb5ddSPeter Ujfalusi 		return;
399db375dcbSPeter Ujfalusi 	udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
40025dcb5ddSPeter Ujfalusi }
40125dcb5ddSPeter Ujfalusi 
navss_psil_pair(struct udma_dev * ud,u32 src_thread,u32 dst_thread)40225dcb5ddSPeter Ujfalusi static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
40325dcb5ddSPeter Ujfalusi {
40425dcb5ddSPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
40525dcb5ddSPeter Ujfalusi 
40625dcb5ddSPeter Ujfalusi 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
40725dcb5ddSPeter Ujfalusi 	return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
40825dcb5ddSPeter Ujfalusi 					      tisci_rm->tisci_navss_dev_id,
40925dcb5ddSPeter Ujfalusi 					      src_thread, dst_thread);
41025dcb5ddSPeter Ujfalusi }
41125dcb5ddSPeter Ujfalusi 
navss_psil_unpair(struct udma_dev * ud,u32 src_thread,u32 dst_thread)41225dcb5ddSPeter Ujfalusi static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
41325dcb5ddSPeter Ujfalusi 			     u32 dst_thread)
41425dcb5ddSPeter Ujfalusi {
41525dcb5ddSPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
41625dcb5ddSPeter Ujfalusi 
41725dcb5ddSPeter Ujfalusi 	dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
41825dcb5ddSPeter Ujfalusi 	return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
41925dcb5ddSPeter Ujfalusi 						tisci_rm->tisci_navss_dev_id,
42025dcb5ddSPeter Ujfalusi 						src_thread, dst_thread);
42125dcb5ddSPeter Ujfalusi }
42225dcb5ddSPeter Ujfalusi 
k3_configure_chan_coherency(struct dma_chan * chan,u32 asel)42301779473SPeter Ujfalusi static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
42401779473SPeter Ujfalusi {
42501779473SPeter Ujfalusi 	struct device *chan_dev = &chan->dev->device;
42601779473SPeter Ujfalusi 
42701779473SPeter Ujfalusi 	if (asel == 0) {
42801779473SPeter Ujfalusi 		/* No special handling for the channel */
42901779473SPeter Ujfalusi 		chan->dev->chan_dma_dev = false;
43001779473SPeter Ujfalusi 
43101779473SPeter Ujfalusi 		chan_dev->dma_coherent = false;
43201779473SPeter Ujfalusi 		chan_dev->dma_parms = NULL;
43301779473SPeter Ujfalusi 	} else if (asel == 14 || asel == 15) {
43401779473SPeter Ujfalusi 		chan->dev->chan_dma_dev = true;
43501779473SPeter Ujfalusi 
43601779473SPeter Ujfalusi 		chan_dev->dma_coherent = true;
43701779473SPeter Ujfalusi 		dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
43801779473SPeter Ujfalusi 		chan_dev->dma_parms = chan_dev->parent->dma_parms;
43901779473SPeter Ujfalusi 	} else {
44001779473SPeter Ujfalusi 		dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
44101779473SPeter Ujfalusi 
44201779473SPeter Ujfalusi 		chan_dev->dma_coherent = false;
44301779473SPeter Ujfalusi 		chan_dev->dma_parms = NULL;
44401779473SPeter Ujfalusi 	}
44501779473SPeter Ujfalusi }
44601779473SPeter Ujfalusi 
udma_get_chan_tpl_index(struct udma_tpl * tpl_map,int chan_id)447046d679bSPeter Ujfalusi static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
448046d679bSPeter Ujfalusi {
449046d679bSPeter Ujfalusi 	int i;
450046d679bSPeter Ujfalusi 
451046d679bSPeter Ujfalusi 	for (i = 0; i < tpl_map->levels; i++) {
452046d679bSPeter Ujfalusi 		if (chan_id >= tpl_map->start_idx[i])
453046d679bSPeter Ujfalusi 			return i;
454046d679bSPeter Ujfalusi 	}
455046d679bSPeter Ujfalusi 
456046d679bSPeter Ujfalusi 	return 0;
457046d679bSPeter Ujfalusi }
458046d679bSPeter Ujfalusi 
udma_reset_uchan(struct udma_chan * uc)45925dcb5ddSPeter Ujfalusi static void udma_reset_uchan(struct udma_chan *uc)
46025dcb5ddSPeter Ujfalusi {
46125dcb5ddSPeter Ujfalusi 	memset(&uc->config, 0, sizeof(uc->config));
46225dcb5ddSPeter Ujfalusi 	uc->config.remote_thread_id = -1;
463d2abc982SPeter Ujfalusi 	uc->config.mapped_channel_id = -1;
464d2abc982SPeter Ujfalusi 	uc->config.default_flow_id = -1;
46525dcb5ddSPeter Ujfalusi 	uc->state = UDMA_CHAN_IS_IDLE;
46625dcb5ddSPeter Ujfalusi }
46725dcb5ddSPeter Ujfalusi 
udma_dump_chan_stdata(struct udma_chan * uc)46825dcb5ddSPeter Ujfalusi static void udma_dump_chan_stdata(struct udma_chan *uc)
46925dcb5ddSPeter Ujfalusi {
47025dcb5ddSPeter Ujfalusi 	struct device *dev = uc->ud->dev;
47125dcb5ddSPeter Ujfalusi 	u32 offset;
47225dcb5ddSPeter Ujfalusi 	int i;
47325dcb5ddSPeter Ujfalusi 
47425dcb5ddSPeter Ujfalusi 	if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
47525dcb5ddSPeter Ujfalusi 		dev_dbg(dev, "TCHAN State data:\n");
47625dcb5ddSPeter Ujfalusi 		for (i = 0; i < 32; i++) {
477bc7e5523SPeter Ujfalusi 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
47825dcb5ddSPeter Ujfalusi 			dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
479db375dcbSPeter Ujfalusi 				udma_tchanrt_read(uc, offset));
48025dcb5ddSPeter Ujfalusi 		}
48125dcb5ddSPeter Ujfalusi 	}
48225dcb5ddSPeter Ujfalusi 
48325dcb5ddSPeter Ujfalusi 	if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
48425dcb5ddSPeter Ujfalusi 		dev_dbg(dev, "RCHAN State data:\n");
48525dcb5ddSPeter Ujfalusi 		for (i = 0; i < 32; i++) {
486bc7e5523SPeter Ujfalusi 			offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
48725dcb5ddSPeter Ujfalusi 			dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
488db375dcbSPeter Ujfalusi 				udma_rchanrt_read(uc, offset));
48925dcb5ddSPeter Ujfalusi 		}
49025dcb5ddSPeter Ujfalusi 	}
49125dcb5ddSPeter Ujfalusi }
49225dcb5ddSPeter Ujfalusi 
udma_curr_cppi5_desc_paddr(struct udma_desc * d,int idx)49325dcb5ddSPeter Ujfalusi static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
49425dcb5ddSPeter Ujfalusi 						    int idx)
49525dcb5ddSPeter Ujfalusi {
49625dcb5ddSPeter Ujfalusi 	return d->hwdesc[idx].cppi5_desc_paddr;
49725dcb5ddSPeter Ujfalusi }
49825dcb5ddSPeter Ujfalusi 
udma_curr_cppi5_desc_vaddr(struct udma_desc * d,int idx)49925dcb5ddSPeter Ujfalusi static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
50025dcb5ddSPeter Ujfalusi {
50125dcb5ddSPeter Ujfalusi 	return d->hwdesc[idx].cppi5_desc_vaddr;
50225dcb5ddSPeter Ujfalusi }
50325dcb5ddSPeter Ujfalusi 
udma_udma_desc_from_paddr(struct udma_chan * uc,dma_addr_t paddr)50425dcb5ddSPeter Ujfalusi static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
50525dcb5ddSPeter Ujfalusi 						   dma_addr_t paddr)
50625dcb5ddSPeter Ujfalusi {
50725dcb5ddSPeter Ujfalusi 	struct udma_desc *d = uc->terminated_desc;
50825dcb5ddSPeter Ujfalusi 
50925dcb5ddSPeter Ujfalusi 	if (d) {
51025dcb5ddSPeter Ujfalusi 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
51125dcb5ddSPeter Ujfalusi 								   d->desc_idx);
51225dcb5ddSPeter Ujfalusi 
51325dcb5ddSPeter Ujfalusi 		if (desc_paddr != paddr)
51425dcb5ddSPeter Ujfalusi 			d = NULL;
51525dcb5ddSPeter Ujfalusi 	}
51625dcb5ddSPeter Ujfalusi 
51725dcb5ddSPeter Ujfalusi 	if (!d) {
51825dcb5ddSPeter Ujfalusi 		d = uc->desc;
51925dcb5ddSPeter Ujfalusi 		if (d) {
52025dcb5ddSPeter Ujfalusi 			dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
52125dcb5ddSPeter Ujfalusi 								d->desc_idx);
52225dcb5ddSPeter Ujfalusi 
52325dcb5ddSPeter Ujfalusi 			if (desc_paddr != paddr)
52425dcb5ddSPeter Ujfalusi 				d = NULL;
52525dcb5ddSPeter Ujfalusi 		}
52625dcb5ddSPeter Ujfalusi 	}
52725dcb5ddSPeter Ujfalusi 
52825dcb5ddSPeter Ujfalusi 	return d;
52925dcb5ddSPeter Ujfalusi }
53025dcb5ddSPeter Ujfalusi 
udma_free_hwdesc(struct udma_chan * uc,struct udma_desc * d)53125dcb5ddSPeter Ujfalusi static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
53225dcb5ddSPeter Ujfalusi {
53325dcb5ddSPeter Ujfalusi 	if (uc->use_dma_pool) {
53425dcb5ddSPeter Ujfalusi 		int i;
53525dcb5ddSPeter Ujfalusi 
53625dcb5ddSPeter Ujfalusi 		for (i = 0; i < d->hwdesc_count; i++) {
53725dcb5ddSPeter Ujfalusi 			if (!d->hwdesc[i].cppi5_desc_vaddr)
53825dcb5ddSPeter Ujfalusi 				continue;
53925dcb5ddSPeter Ujfalusi 
54025dcb5ddSPeter Ujfalusi 			dma_pool_free(uc->hdesc_pool,
54125dcb5ddSPeter Ujfalusi 				      d->hwdesc[i].cppi5_desc_vaddr,
54225dcb5ddSPeter Ujfalusi 				      d->hwdesc[i].cppi5_desc_paddr);
54325dcb5ddSPeter Ujfalusi 
54425dcb5ddSPeter Ujfalusi 			d->hwdesc[i].cppi5_desc_vaddr = NULL;
54525dcb5ddSPeter Ujfalusi 		}
54625dcb5ddSPeter Ujfalusi 	} else if (d->hwdesc[0].cppi5_desc_vaddr) {
54701779473SPeter Ujfalusi 		dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
54825dcb5ddSPeter Ujfalusi 				  d->hwdesc[0].cppi5_desc_vaddr,
54925dcb5ddSPeter Ujfalusi 				  d->hwdesc[0].cppi5_desc_paddr);
55025dcb5ddSPeter Ujfalusi 
55125dcb5ddSPeter Ujfalusi 		d->hwdesc[0].cppi5_desc_vaddr = NULL;
55225dcb5ddSPeter Ujfalusi 	}
55325dcb5ddSPeter Ujfalusi }
55425dcb5ddSPeter Ujfalusi 
udma_purge_desc_work(struct work_struct * work)55525dcb5ddSPeter Ujfalusi static void udma_purge_desc_work(struct work_struct *work)
55625dcb5ddSPeter Ujfalusi {
55725dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
55825dcb5ddSPeter Ujfalusi 	struct virt_dma_desc *vd, *_vd;
55925dcb5ddSPeter Ujfalusi 	unsigned long flags;
56025dcb5ddSPeter Ujfalusi 	LIST_HEAD(head);
56125dcb5ddSPeter Ujfalusi 
56225dcb5ddSPeter Ujfalusi 	spin_lock_irqsave(&ud->lock, flags);
56325dcb5ddSPeter Ujfalusi 	list_splice_tail_init(&ud->desc_to_purge, &head);
56425dcb5ddSPeter Ujfalusi 	spin_unlock_irqrestore(&ud->lock, flags);
56525dcb5ddSPeter Ujfalusi 
56625dcb5ddSPeter Ujfalusi 	list_for_each_entry_safe(vd, _vd, &head, node) {
56725dcb5ddSPeter Ujfalusi 		struct udma_chan *uc = to_udma_chan(vd->tx.chan);
56825dcb5ddSPeter Ujfalusi 		struct udma_desc *d = to_udma_desc(&vd->tx);
56925dcb5ddSPeter Ujfalusi 
57025dcb5ddSPeter Ujfalusi 		udma_free_hwdesc(uc, d);
57125dcb5ddSPeter Ujfalusi 		list_del(&vd->node);
57225dcb5ddSPeter Ujfalusi 		kfree(d);
57325dcb5ddSPeter Ujfalusi 	}
57425dcb5ddSPeter Ujfalusi 
57525dcb5ddSPeter Ujfalusi 	/* If more to purge, schedule the work again */
57625dcb5ddSPeter Ujfalusi 	if (!list_empty(&ud->desc_to_purge))
57725dcb5ddSPeter Ujfalusi 		schedule_work(&ud->purge_work);
57825dcb5ddSPeter Ujfalusi }
57925dcb5ddSPeter Ujfalusi 
udma_desc_free(struct virt_dma_desc * vd)58025dcb5ddSPeter Ujfalusi static void udma_desc_free(struct virt_dma_desc *vd)
58125dcb5ddSPeter Ujfalusi {
58225dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
58325dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(vd->tx.chan);
58425dcb5ddSPeter Ujfalusi 	struct udma_desc *d = to_udma_desc(&vd->tx);
58525dcb5ddSPeter Ujfalusi 	unsigned long flags;
58625dcb5ddSPeter Ujfalusi 
58725dcb5ddSPeter Ujfalusi 	if (uc->terminated_desc == d)
58825dcb5ddSPeter Ujfalusi 		uc->terminated_desc = NULL;
58925dcb5ddSPeter Ujfalusi 
59025dcb5ddSPeter Ujfalusi 	if (uc->use_dma_pool) {
59125dcb5ddSPeter Ujfalusi 		udma_free_hwdesc(uc, d);
59225dcb5ddSPeter Ujfalusi 		kfree(d);
59325dcb5ddSPeter Ujfalusi 		return;
59425dcb5ddSPeter Ujfalusi 	}
59525dcb5ddSPeter Ujfalusi 
59625dcb5ddSPeter Ujfalusi 	spin_lock_irqsave(&ud->lock, flags);
59725dcb5ddSPeter Ujfalusi 	list_add_tail(&vd->node, &ud->desc_to_purge);
59825dcb5ddSPeter Ujfalusi 	spin_unlock_irqrestore(&ud->lock, flags);
59925dcb5ddSPeter Ujfalusi 
60025dcb5ddSPeter Ujfalusi 	schedule_work(&ud->purge_work);
60125dcb5ddSPeter Ujfalusi }
60225dcb5ddSPeter Ujfalusi 
udma_is_chan_running(struct udma_chan * uc)60325dcb5ddSPeter Ujfalusi static bool udma_is_chan_running(struct udma_chan *uc)
60425dcb5ddSPeter Ujfalusi {
60525dcb5ddSPeter Ujfalusi 	u32 trt_ctl = 0;
60625dcb5ddSPeter Ujfalusi 	u32 rrt_ctl = 0;
60725dcb5ddSPeter Ujfalusi 
60825dcb5ddSPeter Ujfalusi 	if (uc->tchan)
609db375dcbSPeter Ujfalusi 		trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
61025dcb5ddSPeter Ujfalusi 	if (uc->rchan)
611db375dcbSPeter Ujfalusi 		rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
61225dcb5ddSPeter Ujfalusi 
61325dcb5ddSPeter Ujfalusi 	if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
61425dcb5ddSPeter Ujfalusi 		return true;
61525dcb5ddSPeter Ujfalusi 
61625dcb5ddSPeter Ujfalusi 	return false;
61725dcb5ddSPeter Ujfalusi }
61825dcb5ddSPeter Ujfalusi 
udma_is_chan_paused(struct udma_chan * uc)61925dcb5ddSPeter Ujfalusi static bool udma_is_chan_paused(struct udma_chan *uc)
62025dcb5ddSPeter Ujfalusi {
62125dcb5ddSPeter Ujfalusi 	u32 val, pause_mask;
62225dcb5ddSPeter Ujfalusi 
623c7450bb2SPeter Ujfalusi 	switch (uc->config.dir) {
62425dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
625db375dcbSPeter Ujfalusi 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
62625dcb5ddSPeter Ujfalusi 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
62725dcb5ddSPeter Ujfalusi 		break;
62825dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
629db375dcbSPeter Ujfalusi 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
63025dcb5ddSPeter Ujfalusi 		pause_mask = UDMA_PEER_RT_EN_PAUSE;
63125dcb5ddSPeter Ujfalusi 		break;
63225dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
633db375dcbSPeter Ujfalusi 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
63425dcb5ddSPeter Ujfalusi 		pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
63525dcb5ddSPeter Ujfalusi 		break;
63625dcb5ddSPeter Ujfalusi 	default:
63725dcb5ddSPeter Ujfalusi 		return false;
63825dcb5ddSPeter Ujfalusi 	}
63925dcb5ddSPeter Ujfalusi 
64025dcb5ddSPeter Ujfalusi 	if (val & pause_mask)
64125dcb5ddSPeter Ujfalusi 		return true;
64225dcb5ddSPeter Ujfalusi 
64325dcb5ddSPeter Ujfalusi 	return false;
64425dcb5ddSPeter Ujfalusi }
64525dcb5ddSPeter Ujfalusi 
udma_get_rx_flush_hwdesc_paddr(struct udma_chan * uc)64616cd3c67SPeter Ujfalusi static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
64716cd3c67SPeter Ujfalusi {
64816cd3c67SPeter Ujfalusi 	return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
64916cd3c67SPeter Ujfalusi }
65016cd3c67SPeter Ujfalusi 
udma_push_to_ring(struct udma_chan * uc,int idx)65125dcb5ddSPeter Ujfalusi static int udma_push_to_ring(struct udma_chan *uc, int idx)
65225dcb5ddSPeter Ujfalusi {
65325dcb5ddSPeter Ujfalusi 	struct udma_desc *d = uc->desc;
65425dcb5ddSPeter Ujfalusi 	struct k3_ring *ring = NULL;
65516cd3c67SPeter Ujfalusi 	dma_addr_t paddr;
65625dcb5ddSPeter Ujfalusi 
65725dcb5ddSPeter Ujfalusi 	switch (uc->config.dir) {
65825dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
65925dcb5ddSPeter Ujfalusi 		ring = uc->rflow->fd_ring;
66025dcb5ddSPeter Ujfalusi 		break;
66125dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
66225dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
66325dcb5ddSPeter Ujfalusi 		ring = uc->tchan->t_ring;
66425dcb5ddSPeter Ujfalusi 		break;
66525dcb5ddSPeter Ujfalusi 	default:
66616cd3c67SPeter Ujfalusi 		return -EINVAL;
66725dcb5ddSPeter Ujfalusi 	}
66825dcb5ddSPeter Ujfalusi 
66916cd3c67SPeter Ujfalusi 	/* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
67016cd3c67SPeter Ujfalusi 	if (idx == -1) {
67116cd3c67SPeter Ujfalusi 		paddr = udma_get_rx_flush_hwdesc_paddr(uc);
67216cd3c67SPeter Ujfalusi 	} else {
67316cd3c67SPeter Ujfalusi 		paddr = udma_curr_cppi5_desc_paddr(d, idx);
67425dcb5ddSPeter Ujfalusi 
67525dcb5ddSPeter Ujfalusi 		wmb(); /* Ensure that writes are not moved over this point */
67625dcb5ddSPeter Ujfalusi 	}
67725dcb5ddSPeter Ujfalusi 
6786fea8735SPeter Ujfalusi 	return k3_ringacc_ring_push(ring, &paddr);
67925dcb5ddSPeter Ujfalusi }
68025dcb5ddSPeter Ujfalusi 
udma_desc_is_rx_flush(struct udma_chan * uc,dma_addr_t addr)68116cd3c67SPeter Ujfalusi static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
68216cd3c67SPeter Ujfalusi {
68316cd3c67SPeter Ujfalusi 	if (uc->config.dir != DMA_DEV_TO_MEM)
68416cd3c67SPeter Ujfalusi 		return false;
68516cd3c67SPeter Ujfalusi 
68616cd3c67SPeter Ujfalusi 	if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
68716cd3c67SPeter Ujfalusi 		return true;
68816cd3c67SPeter Ujfalusi 
68916cd3c67SPeter Ujfalusi 	return false;
69016cd3c67SPeter Ujfalusi }
69116cd3c67SPeter Ujfalusi 
udma_pop_from_ring(struct udma_chan * uc,dma_addr_t * addr)69225dcb5ddSPeter Ujfalusi static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
69325dcb5ddSPeter Ujfalusi {
69425dcb5ddSPeter Ujfalusi 	struct k3_ring *ring = NULL;
6953b8bee2aSPeter Ujfalusi 	int ret;
69625dcb5ddSPeter Ujfalusi 
69725dcb5ddSPeter Ujfalusi 	switch (uc->config.dir) {
69825dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
69925dcb5ddSPeter Ujfalusi 		ring = uc->rflow->r_ring;
70025dcb5ddSPeter Ujfalusi 		break;
70125dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
70225dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
70325dcb5ddSPeter Ujfalusi 		ring = uc->tchan->tc_ring;
70425dcb5ddSPeter Ujfalusi 		break;
70525dcb5ddSPeter Ujfalusi 	default:
7063b8bee2aSPeter Ujfalusi 		return -ENOENT;
70725dcb5ddSPeter Ujfalusi 	}
70825dcb5ddSPeter Ujfalusi 
70925dcb5ddSPeter Ujfalusi 	ret = k3_ringacc_ring_pop(ring, addr);
71025dcb5ddSPeter Ujfalusi 	if (ret)
71125dcb5ddSPeter Ujfalusi 		return ret;
71225dcb5ddSPeter Ujfalusi 
7132166d960SPeter Ujfalusi 	rmb(); /* Ensure that reads are not moved before this point */
7142166d960SPeter Ujfalusi 
71525dcb5ddSPeter Ujfalusi 	/* Teardown completion */
71625dcb5ddSPeter Ujfalusi 	if (cppi5_desc_is_tdcm(*addr))
7173b8bee2aSPeter Ujfalusi 		return 0;
71825dcb5ddSPeter Ujfalusi 
71916cd3c67SPeter Ujfalusi 	/* Check for flush descriptor */
72016cd3c67SPeter Ujfalusi 	if (udma_desc_is_rx_flush(uc, *addr))
72116cd3c67SPeter Ujfalusi 		return -ENOENT;
72216cd3c67SPeter Ujfalusi 
7233b8bee2aSPeter Ujfalusi 	return 0;
72425dcb5ddSPeter Ujfalusi }
72525dcb5ddSPeter Ujfalusi 
udma_reset_rings(struct udma_chan * uc)72625dcb5ddSPeter Ujfalusi static void udma_reset_rings(struct udma_chan *uc)
72725dcb5ddSPeter Ujfalusi {
72825dcb5ddSPeter Ujfalusi 	struct k3_ring *ring1 = NULL;
72925dcb5ddSPeter Ujfalusi 	struct k3_ring *ring2 = NULL;
73025dcb5ddSPeter Ujfalusi 
73125dcb5ddSPeter Ujfalusi 	switch (uc->config.dir) {
73225dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
73325dcb5ddSPeter Ujfalusi 		if (uc->rchan) {
73425dcb5ddSPeter Ujfalusi 			ring1 = uc->rflow->fd_ring;
73525dcb5ddSPeter Ujfalusi 			ring2 = uc->rflow->r_ring;
73625dcb5ddSPeter Ujfalusi 		}
73725dcb5ddSPeter Ujfalusi 		break;
73825dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
73925dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
74025dcb5ddSPeter Ujfalusi 		if (uc->tchan) {
74125dcb5ddSPeter Ujfalusi 			ring1 = uc->tchan->t_ring;
74225dcb5ddSPeter Ujfalusi 			ring2 = uc->tchan->tc_ring;
74325dcb5ddSPeter Ujfalusi 		}
74425dcb5ddSPeter Ujfalusi 		break;
74525dcb5ddSPeter Ujfalusi 	default:
74625dcb5ddSPeter Ujfalusi 		break;
74725dcb5ddSPeter Ujfalusi 	}
74825dcb5ddSPeter Ujfalusi 
74925dcb5ddSPeter Ujfalusi 	if (ring1)
75025dcb5ddSPeter Ujfalusi 		k3_ringacc_ring_reset_dma(ring1,
75125dcb5ddSPeter Ujfalusi 					  k3_ringacc_ring_get_occ(ring1));
75225dcb5ddSPeter Ujfalusi 	if (ring2)
75325dcb5ddSPeter Ujfalusi 		k3_ringacc_ring_reset(ring2);
75425dcb5ddSPeter Ujfalusi 
75525dcb5ddSPeter Ujfalusi 	/* make sure we are not leaking memory by stalled descriptor */
75625dcb5ddSPeter Ujfalusi 	if (uc->terminated_desc) {
75725dcb5ddSPeter Ujfalusi 		udma_desc_free(&uc->terminated_desc->vd);
75825dcb5ddSPeter Ujfalusi 		uc->terminated_desc = NULL;
75925dcb5ddSPeter Ujfalusi 	}
76025dcb5ddSPeter Ujfalusi }
76125dcb5ddSPeter Ujfalusi 
udma_decrement_byte_counters(struct udma_chan * uc,u32 val)7627c94dcfaSVaishnav Achath static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
7637c94dcfaSVaishnav Achath {
7647c94dcfaSVaishnav Achath 	if (uc->desc->dir == DMA_DEV_TO_MEM) {
7657c94dcfaSVaishnav Achath 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
7667c94dcfaSVaishnav Achath 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
767efab2589SJayesh Choudhary 		if (uc->config.ep_type != PSIL_EP_NATIVE)
7687c94dcfaSVaishnav Achath 			udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
7697c94dcfaSVaishnav Achath 	} else {
7707c94dcfaSVaishnav Achath 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
7717c94dcfaSVaishnav Achath 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
772efab2589SJayesh Choudhary 		if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
7737c94dcfaSVaishnav Achath 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
7747c94dcfaSVaishnav Achath 	}
7757c94dcfaSVaishnav Achath }
7767c94dcfaSVaishnav Achath 
udma_reset_counters(struct udma_chan * uc)77725dcb5ddSPeter Ujfalusi static void udma_reset_counters(struct udma_chan *uc)
77825dcb5ddSPeter Ujfalusi {
77925dcb5ddSPeter Ujfalusi 	u32 val;
78025dcb5ddSPeter Ujfalusi 
78125dcb5ddSPeter Ujfalusi 	if (uc->tchan) {
782db375dcbSPeter Ujfalusi 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
783db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
78425dcb5ddSPeter Ujfalusi 
785db375dcbSPeter Ujfalusi 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
786db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
78725dcb5ddSPeter Ujfalusi 
788db375dcbSPeter Ujfalusi 		val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
789db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
79025dcb5ddSPeter Ujfalusi 
79101779473SPeter Ujfalusi 		if (!uc->bchan) {
792db375dcbSPeter Ujfalusi 			val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
793db375dcbSPeter Ujfalusi 			udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
79425dcb5ddSPeter Ujfalusi 		}
79501779473SPeter Ujfalusi 	}
79625dcb5ddSPeter Ujfalusi 
79725dcb5ddSPeter Ujfalusi 	if (uc->rchan) {
798db375dcbSPeter Ujfalusi 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
799db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
80025dcb5ddSPeter Ujfalusi 
801db375dcbSPeter Ujfalusi 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
802db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
80325dcb5ddSPeter Ujfalusi 
804db375dcbSPeter Ujfalusi 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
805db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
80625dcb5ddSPeter Ujfalusi 
807db375dcbSPeter Ujfalusi 		val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
808db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
80925dcb5ddSPeter Ujfalusi 	}
81025dcb5ddSPeter Ujfalusi }
81125dcb5ddSPeter Ujfalusi 
udma_reset_chan(struct udma_chan * uc,bool hard)81225dcb5ddSPeter Ujfalusi static int udma_reset_chan(struct udma_chan *uc, bool hard)
81325dcb5ddSPeter Ujfalusi {
81425dcb5ddSPeter Ujfalusi 	switch (uc->config.dir) {
81525dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
816db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
817db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
81825dcb5ddSPeter Ujfalusi 		break;
81925dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
820db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
821db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
82225dcb5ddSPeter Ujfalusi 		break;
82325dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
824db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
825db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
82625dcb5ddSPeter Ujfalusi 		break;
82725dcb5ddSPeter Ujfalusi 	default:
82825dcb5ddSPeter Ujfalusi 		return -EINVAL;
82925dcb5ddSPeter Ujfalusi 	}
83025dcb5ddSPeter Ujfalusi 
83125dcb5ddSPeter Ujfalusi 	/* Reset all counters */
83225dcb5ddSPeter Ujfalusi 	udma_reset_counters(uc);
83325dcb5ddSPeter Ujfalusi 
83425dcb5ddSPeter Ujfalusi 	/* Hard reset: re-initialize the channel to reset */
83525dcb5ddSPeter Ujfalusi 	if (hard) {
83625dcb5ddSPeter Ujfalusi 		struct udma_chan_config ucc_backup;
83725dcb5ddSPeter Ujfalusi 		int ret;
83825dcb5ddSPeter Ujfalusi 
83925dcb5ddSPeter Ujfalusi 		memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
84025dcb5ddSPeter Ujfalusi 		uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
84125dcb5ddSPeter Ujfalusi 
84225dcb5ddSPeter Ujfalusi 		/* restore the channel configuration */
84325dcb5ddSPeter Ujfalusi 		memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
84425dcb5ddSPeter Ujfalusi 		ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
84525dcb5ddSPeter Ujfalusi 		if (ret)
84625dcb5ddSPeter Ujfalusi 			return ret;
84725dcb5ddSPeter Ujfalusi 
84825dcb5ddSPeter Ujfalusi 		/*
84925dcb5ddSPeter Ujfalusi 		 * Setting forced teardown after forced reset helps recovering
85025dcb5ddSPeter Ujfalusi 		 * the rchan.
85125dcb5ddSPeter Ujfalusi 		 */
85225dcb5ddSPeter Ujfalusi 		if (uc->config.dir == DMA_DEV_TO_MEM)
853db375dcbSPeter Ujfalusi 			udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
85425dcb5ddSPeter Ujfalusi 					   UDMA_CHAN_RT_CTL_EN |
85525dcb5ddSPeter Ujfalusi 					   UDMA_CHAN_RT_CTL_TDOWN |
85625dcb5ddSPeter Ujfalusi 					   UDMA_CHAN_RT_CTL_FTDOWN);
85725dcb5ddSPeter Ujfalusi 	}
85825dcb5ddSPeter Ujfalusi 	uc->state = UDMA_CHAN_IS_IDLE;
85925dcb5ddSPeter Ujfalusi 
86025dcb5ddSPeter Ujfalusi 	return 0;
86125dcb5ddSPeter Ujfalusi }
86225dcb5ddSPeter Ujfalusi 
udma_start_desc(struct udma_chan * uc)86325dcb5ddSPeter Ujfalusi static void udma_start_desc(struct udma_chan *uc)
86425dcb5ddSPeter Ujfalusi {
86525dcb5ddSPeter Ujfalusi 	struct udma_chan_config *ucc = &uc->config;
86625dcb5ddSPeter Ujfalusi 
867d2abc982SPeter Ujfalusi 	if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
868d2abc982SPeter Ujfalusi 	    (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
86925dcb5ddSPeter Ujfalusi 		int i;
87025dcb5ddSPeter Ujfalusi 
871d2abc982SPeter Ujfalusi 		/*
872d2abc982SPeter Ujfalusi 		 * UDMA only: Push all descriptors to ring for packet mode
873d2abc982SPeter Ujfalusi 		 * cyclic or RX
874d2abc982SPeter Ujfalusi 		 * PKTDMA supports pre-linked descriptor and cyclic is not
875d2abc982SPeter Ujfalusi 		 * supported
876d2abc982SPeter Ujfalusi 		 */
87725dcb5ddSPeter Ujfalusi 		for (i = 0; i < uc->desc->sglen; i++)
87825dcb5ddSPeter Ujfalusi 			udma_push_to_ring(uc, i);
87925dcb5ddSPeter Ujfalusi 	} else {
88025dcb5ddSPeter Ujfalusi 		udma_push_to_ring(uc, 0);
88125dcb5ddSPeter Ujfalusi 	}
88225dcb5ddSPeter Ujfalusi }
88325dcb5ddSPeter Ujfalusi 
udma_chan_needs_reconfiguration(struct udma_chan * uc)88425dcb5ddSPeter Ujfalusi static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
88525dcb5ddSPeter Ujfalusi {
88625dcb5ddSPeter Ujfalusi 	/* Only PDMAs have staticTR */
88725dcb5ddSPeter Ujfalusi 	if (uc->config.ep_type == PSIL_EP_NATIVE)
88825dcb5ddSPeter Ujfalusi 		return false;
88925dcb5ddSPeter Ujfalusi 
89025dcb5ddSPeter Ujfalusi 	/* Check if the staticTR configuration has changed for TX */
89125dcb5ddSPeter Ujfalusi 	if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
89225dcb5ddSPeter Ujfalusi 		return true;
89325dcb5ddSPeter Ujfalusi 
89425dcb5ddSPeter Ujfalusi 	return false;
89525dcb5ddSPeter Ujfalusi }
89625dcb5ddSPeter Ujfalusi 
udma_start(struct udma_chan * uc)89725dcb5ddSPeter Ujfalusi static int udma_start(struct udma_chan *uc)
89825dcb5ddSPeter Ujfalusi {
89925dcb5ddSPeter Ujfalusi 	struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
90025dcb5ddSPeter Ujfalusi 
90125dcb5ddSPeter Ujfalusi 	if (!vd) {
90225dcb5ddSPeter Ujfalusi 		uc->desc = NULL;
90325dcb5ddSPeter Ujfalusi 		return -ENOENT;
90425dcb5ddSPeter Ujfalusi 	}
90525dcb5ddSPeter Ujfalusi 
90625dcb5ddSPeter Ujfalusi 	list_del(&vd->node);
90725dcb5ddSPeter Ujfalusi 
90825dcb5ddSPeter Ujfalusi 	uc->desc = to_udma_desc(&vd->tx);
90925dcb5ddSPeter Ujfalusi 
91025dcb5ddSPeter Ujfalusi 	/* Channel is already running and does not need reconfiguration */
91125dcb5ddSPeter Ujfalusi 	if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
91225dcb5ddSPeter Ujfalusi 		udma_start_desc(uc);
91325dcb5ddSPeter Ujfalusi 		goto out;
91425dcb5ddSPeter Ujfalusi 	}
91525dcb5ddSPeter Ujfalusi 
91625dcb5ddSPeter Ujfalusi 	/* Make sure that we clear the teardown bit, if it is set */
91725dcb5ddSPeter Ujfalusi 	udma_reset_chan(uc, false);
91825dcb5ddSPeter Ujfalusi 
91925dcb5ddSPeter Ujfalusi 	/* Push descriptors before we start the channel */
92025dcb5ddSPeter Ujfalusi 	udma_start_desc(uc);
92125dcb5ddSPeter Ujfalusi 
92225dcb5ddSPeter Ujfalusi 	switch (uc->desc->dir) {
92325dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
92425dcb5ddSPeter Ujfalusi 		/* Config remote TR */
92525dcb5ddSPeter Ujfalusi 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
92625dcb5ddSPeter Ujfalusi 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
92725dcb5ddSPeter Ujfalusi 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
92825dcb5ddSPeter Ujfalusi 			const struct udma_match_data *match_data =
92925dcb5ddSPeter Ujfalusi 							uc->ud->match_data;
93025dcb5ddSPeter Ujfalusi 
93125dcb5ddSPeter Ujfalusi 			if (uc->config.enable_acc32)
93225dcb5ddSPeter Ujfalusi 				val |= PDMA_STATIC_TR_XY_ACC32;
93325dcb5ddSPeter Ujfalusi 			if (uc->config.enable_burst)
93425dcb5ddSPeter Ujfalusi 				val |= PDMA_STATIC_TR_XY_BURST;
93525dcb5ddSPeter Ujfalusi 
936db375dcbSPeter Ujfalusi 			udma_rchanrt_write(uc,
937db375dcbSPeter Ujfalusi 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
938db375dcbSPeter Ujfalusi 					   val);
93925dcb5ddSPeter Ujfalusi 
940db375dcbSPeter Ujfalusi 			udma_rchanrt_write(uc,
941bc7e5523SPeter Ujfalusi 				UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
94225dcb5ddSPeter Ujfalusi 				PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
94325dcb5ddSPeter Ujfalusi 						 match_data->statictr_z_mask));
94425dcb5ddSPeter Ujfalusi 
94525dcb5ddSPeter Ujfalusi 			/* save the current staticTR configuration */
94625dcb5ddSPeter Ujfalusi 			memcpy(&uc->static_tr, &uc->desc->static_tr,
94725dcb5ddSPeter Ujfalusi 			       sizeof(uc->static_tr));
94825dcb5ddSPeter Ujfalusi 		}
94925dcb5ddSPeter Ujfalusi 
950db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
95125dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_EN);
95225dcb5ddSPeter Ujfalusi 
95325dcb5ddSPeter Ujfalusi 		/* Enable remote */
954db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
95525dcb5ddSPeter Ujfalusi 				   UDMA_PEER_RT_EN_ENABLE);
95625dcb5ddSPeter Ujfalusi 
95725dcb5ddSPeter Ujfalusi 		break;
95825dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
95925dcb5ddSPeter Ujfalusi 		/* Config remote TR */
96025dcb5ddSPeter Ujfalusi 		if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
96125dcb5ddSPeter Ujfalusi 			u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
96225dcb5ddSPeter Ujfalusi 				  PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
96325dcb5ddSPeter Ujfalusi 
96425dcb5ddSPeter Ujfalusi 			if (uc->config.enable_acc32)
96525dcb5ddSPeter Ujfalusi 				val |= PDMA_STATIC_TR_XY_ACC32;
96625dcb5ddSPeter Ujfalusi 			if (uc->config.enable_burst)
96725dcb5ddSPeter Ujfalusi 				val |= PDMA_STATIC_TR_XY_BURST;
96825dcb5ddSPeter Ujfalusi 
969db375dcbSPeter Ujfalusi 			udma_tchanrt_write(uc,
970db375dcbSPeter Ujfalusi 					   UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
971db375dcbSPeter Ujfalusi 					   val);
97225dcb5ddSPeter Ujfalusi 
97325dcb5ddSPeter Ujfalusi 			/* save the current staticTR configuration */
97425dcb5ddSPeter Ujfalusi 			memcpy(&uc->static_tr, &uc->desc->static_tr,
97525dcb5ddSPeter Ujfalusi 			       sizeof(uc->static_tr));
97625dcb5ddSPeter Ujfalusi 		}
97725dcb5ddSPeter Ujfalusi 
97825dcb5ddSPeter Ujfalusi 		/* Enable remote */
979db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
98025dcb5ddSPeter Ujfalusi 				   UDMA_PEER_RT_EN_ENABLE);
98125dcb5ddSPeter Ujfalusi 
982db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
98325dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_EN);
98425dcb5ddSPeter Ujfalusi 
98525dcb5ddSPeter Ujfalusi 		break;
98625dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
987db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
98825dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_EN);
989db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
99025dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_EN);
99125dcb5ddSPeter Ujfalusi 
99225dcb5ddSPeter Ujfalusi 		break;
99325dcb5ddSPeter Ujfalusi 	default:
99425dcb5ddSPeter Ujfalusi 		return -EINVAL;
99525dcb5ddSPeter Ujfalusi 	}
99625dcb5ddSPeter Ujfalusi 
99725dcb5ddSPeter Ujfalusi 	uc->state = UDMA_CHAN_IS_ACTIVE;
99825dcb5ddSPeter Ujfalusi out:
99925dcb5ddSPeter Ujfalusi 
100025dcb5ddSPeter Ujfalusi 	return 0;
100125dcb5ddSPeter Ujfalusi }
100225dcb5ddSPeter Ujfalusi 
udma_stop(struct udma_chan * uc)100325dcb5ddSPeter Ujfalusi static int udma_stop(struct udma_chan *uc)
100425dcb5ddSPeter Ujfalusi {
100525dcb5ddSPeter Ujfalusi 	enum udma_chan_state old_state = uc->state;
100625dcb5ddSPeter Ujfalusi 
100725dcb5ddSPeter Ujfalusi 	uc->state = UDMA_CHAN_IS_TERMINATING;
100825dcb5ddSPeter Ujfalusi 	reinit_completion(&uc->teardown_completed);
100925dcb5ddSPeter Ujfalusi 
101025dcb5ddSPeter Ujfalusi 	switch (uc->config.dir) {
101125dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
101216cd3c67SPeter Ujfalusi 		if (!uc->cyclic && !uc->desc)
101316cd3c67SPeter Ujfalusi 			udma_push_to_ring(uc, -1);
101416cd3c67SPeter Ujfalusi 
1015db375dcbSPeter Ujfalusi 		udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
101625dcb5ddSPeter Ujfalusi 				   UDMA_PEER_RT_EN_ENABLE |
101725dcb5ddSPeter Ujfalusi 				   UDMA_PEER_RT_EN_TEARDOWN);
101825dcb5ddSPeter Ujfalusi 		break;
101925dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
1020db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
102125dcb5ddSPeter Ujfalusi 				   UDMA_PEER_RT_EN_ENABLE |
102225dcb5ddSPeter Ujfalusi 				   UDMA_PEER_RT_EN_FLUSH);
1023db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
102425dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_EN |
102525dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_TDOWN);
102625dcb5ddSPeter Ujfalusi 		break;
102725dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
1028db375dcbSPeter Ujfalusi 		udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
102925dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_EN |
103025dcb5ddSPeter Ujfalusi 				   UDMA_CHAN_RT_CTL_TDOWN);
103125dcb5ddSPeter Ujfalusi 		break;
103225dcb5ddSPeter Ujfalusi 	default:
103325dcb5ddSPeter Ujfalusi 		uc->state = old_state;
103425dcb5ddSPeter Ujfalusi 		complete_all(&uc->teardown_completed);
103525dcb5ddSPeter Ujfalusi 		return -EINVAL;
103625dcb5ddSPeter Ujfalusi 	}
103725dcb5ddSPeter Ujfalusi 
103825dcb5ddSPeter Ujfalusi 	return 0;
103925dcb5ddSPeter Ujfalusi }
104025dcb5ddSPeter Ujfalusi 
udma_cyclic_packet_elapsed(struct udma_chan * uc)104125dcb5ddSPeter Ujfalusi static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
104225dcb5ddSPeter Ujfalusi {
104325dcb5ddSPeter Ujfalusi 	struct udma_desc *d = uc->desc;
104425dcb5ddSPeter Ujfalusi 	struct cppi5_host_desc_t *h_desc;
104525dcb5ddSPeter Ujfalusi 
104625dcb5ddSPeter Ujfalusi 	h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
104725dcb5ddSPeter Ujfalusi 	cppi5_hdesc_reset_to_original(h_desc);
104825dcb5ddSPeter Ujfalusi 	udma_push_to_ring(uc, d->desc_idx);
104925dcb5ddSPeter Ujfalusi 	d->desc_idx = (d->desc_idx + 1) % d->sglen;
105025dcb5ddSPeter Ujfalusi }
105125dcb5ddSPeter Ujfalusi 
udma_fetch_epib(struct udma_chan * uc,struct udma_desc * d)105225dcb5ddSPeter Ujfalusi static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
105325dcb5ddSPeter Ujfalusi {
105425dcb5ddSPeter Ujfalusi 	struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
105525dcb5ddSPeter Ujfalusi 
105625dcb5ddSPeter Ujfalusi 	memcpy(d->metadata, h_desc->epib, d->metadata_size);
105725dcb5ddSPeter Ujfalusi }
105825dcb5ddSPeter Ujfalusi 
udma_is_desc_really_done(struct udma_chan * uc,struct udma_desc * d)105925dcb5ddSPeter Ujfalusi static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
106025dcb5ddSPeter Ujfalusi {
106125dcb5ddSPeter Ujfalusi 	u32 peer_bcnt, bcnt;
106225dcb5ddSPeter Ujfalusi 
1063e8e2f92bSVaishnav Achath 	/*
1064e8e2f92bSVaishnav Achath 	 * Only TX towards PDMA is affected.
1065e8e2f92bSVaishnav Achath 	 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1066e8e2f92bSVaishnav Achath 	 * completion calculation, consumer must ensure that there is no stale
1067e8e2f92bSVaishnav Achath 	 * data in DMA fabric in this case.
1068e8e2f92bSVaishnav Achath 	 */
106925dcb5ddSPeter Ujfalusi 	if (uc->config.ep_type == PSIL_EP_NATIVE ||
1070e8e2f92bSVaishnav Achath 	    uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
107125dcb5ddSPeter Ujfalusi 		return true;
107225dcb5ddSPeter Ujfalusi 
1073db375dcbSPeter Ujfalusi 	peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1074db375dcbSPeter Ujfalusi 	bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
107525dcb5ddSPeter Ujfalusi 
10761c83767cSVignesh Raghavendra 	/* Transfer is incomplete, store current residue and time stamp */
107725dcb5ddSPeter Ujfalusi 	if (peer_bcnt < bcnt) {
107825dcb5ddSPeter Ujfalusi 		uc->tx_drain.residue = bcnt - peer_bcnt;
10791c83767cSVignesh Raghavendra 		uc->tx_drain.tstamp = ktime_get();
108025dcb5ddSPeter Ujfalusi 		return false;
108125dcb5ddSPeter Ujfalusi 	}
108225dcb5ddSPeter Ujfalusi 
108325dcb5ddSPeter Ujfalusi 	return true;
108425dcb5ddSPeter Ujfalusi }
108525dcb5ddSPeter Ujfalusi 
udma_check_tx_completion(struct work_struct * work)108625dcb5ddSPeter Ujfalusi static void udma_check_tx_completion(struct work_struct *work)
108725dcb5ddSPeter Ujfalusi {
108825dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = container_of(work, typeof(*uc),
108925dcb5ddSPeter Ujfalusi 					    tx_drain.work.work);
109025dcb5ddSPeter Ujfalusi 	bool desc_done = true;
109125dcb5ddSPeter Ujfalusi 	u32 residue_diff;
10921c83767cSVignesh Raghavendra 	ktime_t time_diff;
10931c83767cSVignesh Raghavendra 	unsigned long delay;
109425dcb5ddSPeter Ujfalusi 
10951c83767cSVignesh Raghavendra 	while (1) {
109625dcb5ddSPeter Ujfalusi 		if (uc->desc) {
10971c83767cSVignesh Raghavendra 			/* Get previous residue and time stamp */
109825dcb5ddSPeter Ujfalusi 			residue_diff = uc->tx_drain.residue;
10991c83767cSVignesh Raghavendra 			time_diff = uc->tx_drain.tstamp;
11001c83767cSVignesh Raghavendra 			/*
11011c83767cSVignesh Raghavendra 			 * Get current residue and time stamp or see if
11021c83767cSVignesh Raghavendra 			 * transfer is complete
11031c83767cSVignesh Raghavendra 			 */
110425dcb5ddSPeter Ujfalusi 			desc_done = udma_is_desc_really_done(uc, uc->desc);
110525dcb5ddSPeter Ujfalusi 		}
110625dcb5ddSPeter Ujfalusi 
110725dcb5ddSPeter Ujfalusi 		if (!desc_done) {
11081c83767cSVignesh Raghavendra 			/*
11091c83767cSVignesh Raghavendra 			 * Find the time delta and residue delta w.r.t
11101c83767cSVignesh Raghavendra 			 * previous poll
11111c83767cSVignesh Raghavendra 			 */
11121c83767cSVignesh Raghavendra 			time_diff = ktime_sub(uc->tx_drain.tstamp,
11131c83767cSVignesh Raghavendra 					      time_diff) + 1;
111425dcb5ddSPeter Ujfalusi 			residue_diff -= uc->tx_drain.residue;
111525dcb5ddSPeter Ujfalusi 			if (residue_diff) {
11161c83767cSVignesh Raghavendra 				/*
11171c83767cSVignesh Raghavendra 				 * Try to guess when we should check
11181c83767cSVignesh Raghavendra 				 * next time by calculating rate at
11191c83767cSVignesh Raghavendra 				 * which data is being drained at the
11201c83767cSVignesh Raghavendra 				 * peer device
11211c83767cSVignesh Raghavendra 				 */
11221c83767cSVignesh Raghavendra 				delay = (time_diff / residue_diff) *
11231c83767cSVignesh Raghavendra 					uc->tx_drain.residue;
112425dcb5ddSPeter Ujfalusi 			} else {
112525dcb5ddSPeter Ujfalusi 				/* No progress, check again in 1 second  */
11261c83767cSVignesh Raghavendra 				schedule_delayed_work(&uc->tx_drain.work, HZ);
11271c83767cSVignesh Raghavendra 				break;
112825dcb5ddSPeter Ujfalusi 			}
112925dcb5ddSPeter Ujfalusi 
11301c83767cSVignesh Raghavendra 			usleep_range(ktime_to_us(delay),
11311c83767cSVignesh Raghavendra 				     ktime_to_us(delay) + 10);
11321c83767cSVignesh Raghavendra 			continue;
11331c83767cSVignesh Raghavendra 		}
11341c83767cSVignesh Raghavendra 
11351c83767cSVignesh Raghavendra 		if (uc->desc) {
113625dcb5ddSPeter Ujfalusi 			struct udma_desc *d = uc->desc;
113725dcb5ddSPeter Ujfalusi 
11387c94dcfaSVaishnav Achath 			udma_decrement_byte_counters(uc, d->residue);
113925dcb5ddSPeter Ujfalusi 			udma_start(uc);
114025dcb5ddSPeter Ujfalusi 			vchan_cookie_complete(&d->vd);
11411c83767cSVignesh Raghavendra 			break;
11421c83767cSVignesh Raghavendra 		}
11431c83767cSVignesh Raghavendra 
11441c83767cSVignesh Raghavendra 		break;
114525dcb5ddSPeter Ujfalusi 	}
114625dcb5ddSPeter Ujfalusi }
114725dcb5ddSPeter Ujfalusi 
udma_ring_irq_handler(int irq,void * data)114825dcb5ddSPeter Ujfalusi static irqreturn_t udma_ring_irq_handler(int irq, void *data)
114925dcb5ddSPeter Ujfalusi {
115025dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = data;
115125dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
115225dcb5ddSPeter Ujfalusi 	dma_addr_t paddr = 0;
115325dcb5ddSPeter Ujfalusi 
115425dcb5ddSPeter Ujfalusi 	if (udma_pop_from_ring(uc, &paddr) || !paddr)
115525dcb5ddSPeter Ujfalusi 		return IRQ_HANDLED;
115625dcb5ddSPeter Ujfalusi 
1157e991c06eSBarry Song 	spin_lock(&uc->vc.lock);
115825dcb5ddSPeter Ujfalusi 
115925dcb5ddSPeter Ujfalusi 	/* Teardown completion message */
116025dcb5ddSPeter Ujfalusi 	if (cppi5_desc_is_tdcm(paddr)) {
116125dcb5ddSPeter Ujfalusi 		complete_all(&uc->teardown_completed);
116225dcb5ddSPeter Ujfalusi 
116325dcb5ddSPeter Ujfalusi 		if (uc->terminated_desc) {
116425dcb5ddSPeter Ujfalusi 			udma_desc_free(&uc->terminated_desc->vd);
116525dcb5ddSPeter Ujfalusi 			uc->terminated_desc = NULL;
116625dcb5ddSPeter Ujfalusi 		}
116725dcb5ddSPeter Ujfalusi 
116825dcb5ddSPeter Ujfalusi 		if (!uc->desc)
116925dcb5ddSPeter Ujfalusi 			udma_start(uc);
117025dcb5ddSPeter Ujfalusi 
117125dcb5ddSPeter Ujfalusi 		goto out;
117225dcb5ddSPeter Ujfalusi 	}
117325dcb5ddSPeter Ujfalusi 
117425dcb5ddSPeter Ujfalusi 	d = udma_udma_desc_from_paddr(uc, paddr);
117525dcb5ddSPeter Ujfalusi 
117625dcb5ddSPeter Ujfalusi 	if (d) {
117725dcb5ddSPeter Ujfalusi 		dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
117825dcb5ddSPeter Ujfalusi 								   d->desc_idx);
117925dcb5ddSPeter Ujfalusi 		if (desc_paddr != paddr) {
118025dcb5ddSPeter Ujfalusi 			dev_err(uc->ud->dev, "not matching descriptors!\n");
118125dcb5ddSPeter Ujfalusi 			goto out;
118225dcb5ddSPeter Ujfalusi 		}
118325dcb5ddSPeter Ujfalusi 
118425dcb5ddSPeter Ujfalusi 		if (d == uc->desc) {
11858390318cSPeter Ujfalusi 			/* active descriptor */
11868390318cSPeter Ujfalusi 			if (uc->cyclic) {
118725dcb5ddSPeter Ujfalusi 				udma_cyclic_packet_elapsed(uc);
118825dcb5ddSPeter Ujfalusi 				vchan_cyclic_callback(&d->vd);
118925dcb5ddSPeter Ujfalusi 			} else {
11908390318cSPeter Ujfalusi 				if (udma_is_desc_really_done(uc, d)) {
11917c94dcfaSVaishnav Achath 					udma_decrement_byte_counters(uc, d->residue);
119225dcb5ddSPeter Ujfalusi 					udma_start(uc);
11938390318cSPeter Ujfalusi 					vchan_cookie_complete(&d->vd);
119425dcb5ddSPeter Ujfalusi 				} else {
119525dcb5ddSPeter Ujfalusi 					schedule_delayed_work(&uc->tx_drain.work,
119625dcb5ddSPeter Ujfalusi 							      0);
119725dcb5ddSPeter Ujfalusi 				}
119825dcb5ddSPeter Ujfalusi 			}
11998390318cSPeter Ujfalusi 		} else {
12008390318cSPeter Ujfalusi 			/*
12018390318cSPeter Ujfalusi 			 * terminated descriptor, mark the descriptor as
12028390318cSPeter Ujfalusi 			 * completed to update the channel's cookie marker
12038390318cSPeter Ujfalusi 			 */
12048390318cSPeter Ujfalusi 			dma_cookie_complete(&d->vd.tx);
120525dcb5ddSPeter Ujfalusi 		}
120625dcb5ddSPeter Ujfalusi 	}
120725dcb5ddSPeter Ujfalusi out:
1208e991c06eSBarry Song 	spin_unlock(&uc->vc.lock);
120925dcb5ddSPeter Ujfalusi 
121025dcb5ddSPeter Ujfalusi 	return IRQ_HANDLED;
121125dcb5ddSPeter Ujfalusi }
121225dcb5ddSPeter Ujfalusi 
udma_udma_irq_handler(int irq,void * data)121325dcb5ddSPeter Ujfalusi static irqreturn_t udma_udma_irq_handler(int irq, void *data)
121425dcb5ddSPeter Ujfalusi {
121525dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = data;
121625dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
121725dcb5ddSPeter Ujfalusi 
1218e991c06eSBarry Song 	spin_lock(&uc->vc.lock);
121925dcb5ddSPeter Ujfalusi 	d = uc->desc;
122025dcb5ddSPeter Ujfalusi 	if (d) {
122125dcb5ddSPeter Ujfalusi 		d->tr_idx = (d->tr_idx + 1) % d->sglen;
122225dcb5ddSPeter Ujfalusi 
122325dcb5ddSPeter Ujfalusi 		if (uc->cyclic) {
122425dcb5ddSPeter Ujfalusi 			vchan_cyclic_callback(&d->vd);
122525dcb5ddSPeter Ujfalusi 		} else {
122625dcb5ddSPeter Ujfalusi 			/* TODO: figure out the real amount of data */
12277c94dcfaSVaishnav Achath 			udma_decrement_byte_counters(uc, d->residue);
122825dcb5ddSPeter Ujfalusi 			udma_start(uc);
122925dcb5ddSPeter Ujfalusi 			vchan_cookie_complete(&d->vd);
123025dcb5ddSPeter Ujfalusi 		}
123125dcb5ddSPeter Ujfalusi 	}
123225dcb5ddSPeter Ujfalusi 
1233e991c06eSBarry Song 	spin_unlock(&uc->vc.lock);
123425dcb5ddSPeter Ujfalusi 
123525dcb5ddSPeter Ujfalusi 	return IRQ_HANDLED;
123625dcb5ddSPeter Ujfalusi }
123725dcb5ddSPeter Ujfalusi 
1238d7024191SGrygorii Strashko /**
1239d7024191SGrygorii Strashko  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1240d7024191SGrygorii Strashko  * @ud: UDMA device
1241d7024191SGrygorii Strashko  * @from: Start the search from this flow id number
1242d7024191SGrygorii Strashko  * @cnt: Number of consecutive flow ids to allocate
1243d7024191SGrygorii Strashko  *
1244d7024191SGrygorii Strashko  * Allocate range of RX flow ids for future use, those flows can be requested
1245d7024191SGrygorii Strashko  * only using explicit flow id number. if @from is set to -1 it will try to find
1246d7024191SGrygorii Strashko  * first free range. if @from is positive value it will force allocation only
1247d7024191SGrygorii Strashko  * of the specified range of flows.
1248d7024191SGrygorii Strashko  *
1249d7024191SGrygorii Strashko  * Returns -ENOMEM if can't find free range.
1250d7024191SGrygorii Strashko  * -EEXIST if requested range is busy.
1251d7024191SGrygorii Strashko  * -EINVAL if wrong input values passed.
1252d7024191SGrygorii Strashko  * Returns flow id on success.
1253d7024191SGrygorii Strashko  */
__udma_alloc_gp_rflow_range(struct udma_dev * ud,int from,int cnt)1254d7024191SGrygorii Strashko static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1255d7024191SGrygorii Strashko {
1256d7024191SGrygorii Strashko 	int start, tmp_from;
1257d7024191SGrygorii Strashko 	DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1258d7024191SGrygorii Strashko 
1259d7024191SGrygorii Strashko 	tmp_from = from;
1260d7024191SGrygorii Strashko 	if (tmp_from < 0)
1261d7024191SGrygorii Strashko 		tmp_from = ud->rchan_cnt;
1262d7024191SGrygorii Strashko 	/* default flows can't be allocated and accessible only by id */
1263d7024191SGrygorii Strashko 	if (tmp_from < ud->rchan_cnt)
1264d7024191SGrygorii Strashko 		return -EINVAL;
1265d7024191SGrygorii Strashko 
1266d7024191SGrygorii Strashko 	if (tmp_from + cnt > ud->rflow_cnt)
1267d7024191SGrygorii Strashko 		return -EINVAL;
1268d7024191SGrygorii Strashko 
1269d7024191SGrygorii Strashko 	bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1270d7024191SGrygorii Strashko 		  ud->rflow_cnt);
1271d7024191SGrygorii Strashko 
1272d7024191SGrygorii Strashko 	start = bitmap_find_next_zero_area(tmp,
1273d7024191SGrygorii Strashko 					   ud->rflow_cnt,
1274d7024191SGrygorii Strashko 					   tmp_from, cnt, 0);
1275d7024191SGrygorii Strashko 	if (start >= ud->rflow_cnt)
1276d7024191SGrygorii Strashko 		return -ENOMEM;
1277d7024191SGrygorii Strashko 
1278d7024191SGrygorii Strashko 	if (from >= 0 && start != from)
1279d7024191SGrygorii Strashko 		return -EEXIST;
1280d7024191SGrygorii Strashko 
1281d7024191SGrygorii Strashko 	bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1282d7024191SGrygorii Strashko 	return start;
1283d7024191SGrygorii Strashko }
1284d7024191SGrygorii Strashko 
__udma_free_gp_rflow_range(struct udma_dev * ud,int from,int cnt)1285d7024191SGrygorii Strashko static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1286d7024191SGrygorii Strashko {
1287d7024191SGrygorii Strashko 	if (from < ud->rchan_cnt)
1288d7024191SGrygorii Strashko 		return -EINVAL;
1289d7024191SGrygorii Strashko 	if (from + cnt > ud->rflow_cnt)
1290d7024191SGrygorii Strashko 		return -EINVAL;
1291d7024191SGrygorii Strashko 
1292d7024191SGrygorii Strashko 	bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1293d7024191SGrygorii Strashko 	return 0;
1294d7024191SGrygorii Strashko }
1295d7024191SGrygorii Strashko 
__udma_get_rflow(struct udma_dev * ud,int id)129625dcb5ddSPeter Ujfalusi static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
129725dcb5ddSPeter Ujfalusi {
129825dcb5ddSPeter Ujfalusi 	/*
129925dcb5ddSPeter Ujfalusi 	 * Attempt to request rflow by ID can be made for any rflow
130025dcb5ddSPeter Ujfalusi 	 * if not in use with assumption that caller knows what's doing.
130125dcb5ddSPeter Ujfalusi 	 * TI-SCI FW will perform additional permission check ant way, it's
130225dcb5ddSPeter Ujfalusi 	 * safe
130325dcb5ddSPeter Ujfalusi 	 */
130425dcb5ddSPeter Ujfalusi 
130525dcb5ddSPeter Ujfalusi 	if (id < 0 || id >= ud->rflow_cnt)
130625dcb5ddSPeter Ujfalusi 		return ERR_PTR(-ENOENT);
130725dcb5ddSPeter Ujfalusi 
130825dcb5ddSPeter Ujfalusi 	if (test_bit(id, ud->rflow_in_use))
130925dcb5ddSPeter Ujfalusi 		return ERR_PTR(-ENOENT);
131025dcb5ddSPeter Ujfalusi 
1311d2abc982SPeter Ujfalusi 	if (ud->rflow_gp_map) {
131225dcb5ddSPeter Ujfalusi 		/* GP rflow has to be allocated first */
131325dcb5ddSPeter Ujfalusi 		if (!test_bit(id, ud->rflow_gp_map) &&
131425dcb5ddSPeter Ujfalusi 		    !test_bit(id, ud->rflow_gp_map_allocated))
131525dcb5ddSPeter Ujfalusi 			return ERR_PTR(-EINVAL);
1316d2abc982SPeter Ujfalusi 	}
131725dcb5ddSPeter Ujfalusi 
131825dcb5ddSPeter Ujfalusi 	dev_dbg(ud->dev, "get rflow%d\n", id);
131925dcb5ddSPeter Ujfalusi 	set_bit(id, ud->rflow_in_use);
132025dcb5ddSPeter Ujfalusi 	return &ud->rflows[id];
132125dcb5ddSPeter Ujfalusi }
132225dcb5ddSPeter Ujfalusi 
__udma_put_rflow(struct udma_dev * ud,struct udma_rflow * rflow)132325dcb5ddSPeter Ujfalusi static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
132425dcb5ddSPeter Ujfalusi {
132525dcb5ddSPeter Ujfalusi 	if (!test_bit(rflow->id, ud->rflow_in_use)) {
132625dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
132725dcb5ddSPeter Ujfalusi 		return;
132825dcb5ddSPeter Ujfalusi 	}
132925dcb5ddSPeter Ujfalusi 
133025dcb5ddSPeter Ujfalusi 	dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
133125dcb5ddSPeter Ujfalusi 	clear_bit(rflow->id, ud->rflow_in_use);
133225dcb5ddSPeter Ujfalusi }
133325dcb5ddSPeter Ujfalusi 
133425dcb5ddSPeter Ujfalusi #define UDMA_RESERVE_RESOURCE(res)					\
133525dcb5ddSPeter Ujfalusi static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,	\
133625dcb5ddSPeter Ujfalusi 					       enum udma_tp_level tpl,	\
133725dcb5ddSPeter Ujfalusi 					       int id)			\
133825dcb5ddSPeter Ujfalusi {									\
133925dcb5ddSPeter Ujfalusi 	if (id >= 0) {							\
134025dcb5ddSPeter Ujfalusi 		if (test_bit(id, ud->res##_map)) {			\
134125dcb5ddSPeter Ujfalusi 			dev_err(ud->dev, "res##%d is in use\n", id);	\
134225dcb5ddSPeter Ujfalusi 			return ERR_PTR(-ENOENT);			\
134325dcb5ddSPeter Ujfalusi 		}							\
134425dcb5ddSPeter Ujfalusi 	} else {							\
134525dcb5ddSPeter Ujfalusi 		int start;						\
134625dcb5ddSPeter Ujfalusi 									\
134788448980SPeter Ujfalusi 		if (tpl >= ud->res##_tpl.levels)			\
134888448980SPeter Ujfalusi 			tpl = ud->res##_tpl.levels - 1;			\
134925dcb5ddSPeter Ujfalusi 									\
135088448980SPeter Ujfalusi 		start = ud->res##_tpl.start_idx[tpl];			\
135125dcb5ddSPeter Ujfalusi 									\
135225dcb5ddSPeter Ujfalusi 		id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,	\
135325dcb5ddSPeter Ujfalusi 					start);				\
135425dcb5ddSPeter Ujfalusi 		if (id == ud->res##_cnt) {				\
135525dcb5ddSPeter Ujfalusi 			return ERR_PTR(-ENOENT);			\
135625dcb5ddSPeter Ujfalusi 		}							\
135725dcb5ddSPeter Ujfalusi 	}								\
135825dcb5ddSPeter Ujfalusi 									\
135925dcb5ddSPeter Ujfalusi 	set_bit(id, ud->res##_map);					\
136025dcb5ddSPeter Ujfalusi 	return &ud->res##s[id];						\
136125dcb5ddSPeter Ujfalusi }
136225dcb5ddSPeter Ujfalusi 
136388448980SPeter Ujfalusi UDMA_RESERVE_RESOURCE(bchan);
136425dcb5ddSPeter Ujfalusi UDMA_RESERVE_RESOURCE(tchan);
136525dcb5ddSPeter Ujfalusi UDMA_RESERVE_RESOURCE(rchan);
136625dcb5ddSPeter Ujfalusi 
bcdma_get_bchan(struct udma_chan * uc)136701779473SPeter Ujfalusi static int bcdma_get_bchan(struct udma_chan *uc)
136801779473SPeter Ujfalusi {
136901779473SPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
137088448980SPeter Ujfalusi 	enum udma_tp_level tpl;
13715c6c6d60SKishon Vijay Abraham I 	int ret;
137201779473SPeter Ujfalusi 
137301779473SPeter Ujfalusi 	if (uc->bchan) {
137401779473SPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
137501779473SPeter Ujfalusi 			uc->id, uc->bchan->id);
137601779473SPeter Ujfalusi 		return 0;
137701779473SPeter Ujfalusi 	}
137801779473SPeter Ujfalusi 
137988448980SPeter Ujfalusi 	/*
138088448980SPeter Ujfalusi 	 * Use normal channels for peripherals, and highest TPL channel for
138188448980SPeter Ujfalusi 	 * mem2mem
138288448980SPeter Ujfalusi 	 */
138388448980SPeter Ujfalusi 	if (uc->config.tr_trigger_type)
138488448980SPeter Ujfalusi 		tpl = 0;
138588448980SPeter Ujfalusi 	else
138688448980SPeter Ujfalusi 		tpl = ud->bchan_tpl.levels - 1;
138788448980SPeter Ujfalusi 
138888448980SPeter Ujfalusi 	uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
13895c6c6d60SKishon Vijay Abraham I 	if (IS_ERR(uc->bchan)) {
13905c6c6d60SKishon Vijay Abraham I 		ret = PTR_ERR(uc->bchan);
13915c6c6d60SKishon Vijay Abraham I 		uc->bchan = NULL;
13925c6c6d60SKishon Vijay Abraham I 		return ret;
13935c6c6d60SKishon Vijay Abraham I 	}
139401779473SPeter Ujfalusi 
139501779473SPeter Ujfalusi 	uc->tchan = uc->bchan;
139601779473SPeter Ujfalusi 
139701779473SPeter Ujfalusi 	return 0;
139801779473SPeter Ujfalusi }
139901779473SPeter Ujfalusi 
udma_get_tchan(struct udma_chan * uc)140025dcb5ddSPeter Ujfalusi static int udma_get_tchan(struct udma_chan *uc)
140125dcb5ddSPeter Ujfalusi {
140225dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
1403eb91224eSKishon Vijay Abraham I 	int ret;
140425dcb5ddSPeter Ujfalusi 
140525dcb5ddSPeter Ujfalusi 	if (uc->tchan) {
140625dcb5ddSPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
140725dcb5ddSPeter Ujfalusi 			uc->id, uc->tchan->id);
140825dcb5ddSPeter Ujfalusi 		return 0;
140925dcb5ddSPeter Ujfalusi 	}
141025dcb5ddSPeter Ujfalusi 
1411d2abc982SPeter Ujfalusi 	/*
1412d2abc982SPeter Ujfalusi 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1413d2abc982SPeter Ujfalusi 	 * For PKTDMA mapped channels it is configured to a channel which must
1414d2abc982SPeter Ujfalusi 	 * be used to service the peripheral.
1415d2abc982SPeter Ujfalusi 	 */
1416d2abc982SPeter Ujfalusi 	uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1417d2abc982SPeter Ujfalusi 					 uc->config.mapped_channel_id);
1418eb91224eSKishon Vijay Abraham I 	if (IS_ERR(uc->tchan)) {
1419eb91224eSKishon Vijay Abraham I 		ret = PTR_ERR(uc->tchan);
1420eb91224eSKishon Vijay Abraham I 		uc->tchan = NULL;
1421eb91224eSKishon Vijay Abraham I 		return ret;
1422eb91224eSKishon Vijay Abraham I 	}
142325dcb5ddSPeter Ujfalusi 
1424d2abc982SPeter Ujfalusi 	if (ud->tflow_cnt) {
1425d2abc982SPeter Ujfalusi 		int tflow_id;
1426d2abc982SPeter Ujfalusi 
1427d2abc982SPeter Ujfalusi 		/* Only PKTDMA have support for tx flows */
1428d2abc982SPeter Ujfalusi 		if (uc->config.default_flow_id >= 0)
1429d2abc982SPeter Ujfalusi 			tflow_id = uc->config.default_flow_id;
1430d2abc982SPeter Ujfalusi 		else
1431d2abc982SPeter Ujfalusi 			tflow_id = uc->tchan->id;
1432d2abc982SPeter Ujfalusi 
1433d2abc982SPeter Ujfalusi 		if (test_bit(tflow_id, ud->tflow_map)) {
1434d2abc982SPeter Ujfalusi 			dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1435d2abc982SPeter Ujfalusi 			clear_bit(uc->tchan->id, ud->tchan_map);
1436d2abc982SPeter Ujfalusi 			uc->tchan = NULL;
1437d2abc982SPeter Ujfalusi 			return -ENOENT;
1438d2abc982SPeter Ujfalusi 		}
1439d2abc982SPeter Ujfalusi 
1440d2abc982SPeter Ujfalusi 		uc->tchan->tflow_id = tflow_id;
1441d2abc982SPeter Ujfalusi 		set_bit(tflow_id, ud->tflow_map);
1442d2abc982SPeter Ujfalusi 	} else {
1443d2abc982SPeter Ujfalusi 		uc->tchan->tflow_id = -1;
1444d2abc982SPeter Ujfalusi 	}
1445d2abc982SPeter Ujfalusi 
1446d2abc982SPeter Ujfalusi 	return 0;
144725dcb5ddSPeter Ujfalusi }
144825dcb5ddSPeter Ujfalusi 
udma_get_rchan(struct udma_chan * uc)144925dcb5ddSPeter Ujfalusi static int udma_get_rchan(struct udma_chan *uc)
145025dcb5ddSPeter Ujfalusi {
145125dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
1452eb91224eSKishon Vijay Abraham I 	int ret;
145325dcb5ddSPeter Ujfalusi 
145425dcb5ddSPeter Ujfalusi 	if (uc->rchan) {
145525dcb5ddSPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
145625dcb5ddSPeter Ujfalusi 			uc->id, uc->rchan->id);
145725dcb5ddSPeter Ujfalusi 		return 0;
145825dcb5ddSPeter Ujfalusi 	}
145925dcb5ddSPeter Ujfalusi 
1460d2abc982SPeter Ujfalusi 	/*
1461d2abc982SPeter Ujfalusi 	 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1462d2abc982SPeter Ujfalusi 	 * For PKTDMA mapped channels it is configured to a channel which must
1463d2abc982SPeter Ujfalusi 	 * be used to service the peripheral.
1464d2abc982SPeter Ujfalusi 	 */
1465d2abc982SPeter Ujfalusi 	uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1466d2abc982SPeter Ujfalusi 					 uc->config.mapped_channel_id);
1467eb91224eSKishon Vijay Abraham I 	if (IS_ERR(uc->rchan)) {
1468eb91224eSKishon Vijay Abraham I 		ret = PTR_ERR(uc->rchan);
1469eb91224eSKishon Vijay Abraham I 		uc->rchan = NULL;
1470eb91224eSKishon Vijay Abraham I 		return ret;
1471eb91224eSKishon Vijay Abraham I 	}
147225dcb5ddSPeter Ujfalusi 
1473eb91224eSKishon Vijay Abraham I 	return 0;
147425dcb5ddSPeter Ujfalusi }
147525dcb5ddSPeter Ujfalusi 
udma_get_chan_pair(struct udma_chan * uc)147625dcb5ddSPeter Ujfalusi static int udma_get_chan_pair(struct udma_chan *uc)
147725dcb5ddSPeter Ujfalusi {
147825dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
147925dcb5ddSPeter Ujfalusi 	int chan_id, end;
148025dcb5ddSPeter Ujfalusi 
148125dcb5ddSPeter Ujfalusi 	if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
148225dcb5ddSPeter Ujfalusi 		dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
148325dcb5ddSPeter Ujfalusi 			 uc->id, uc->tchan->id);
148425dcb5ddSPeter Ujfalusi 		return 0;
148525dcb5ddSPeter Ujfalusi 	}
148625dcb5ddSPeter Ujfalusi 
148725dcb5ddSPeter Ujfalusi 	if (uc->tchan) {
148825dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
148925dcb5ddSPeter Ujfalusi 			uc->id, uc->tchan->id);
149025dcb5ddSPeter Ujfalusi 		return -EBUSY;
149125dcb5ddSPeter Ujfalusi 	} else if (uc->rchan) {
149225dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
149325dcb5ddSPeter Ujfalusi 			uc->id, uc->rchan->id);
149425dcb5ddSPeter Ujfalusi 		return -EBUSY;
149525dcb5ddSPeter Ujfalusi 	}
149625dcb5ddSPeter Ujfalusi 
149725dcb5ddSPeter Ujfalusi 	/* Can be optimized, but let's have it like this for now */
149825dcb5ddSPeter Ujfalusi 	end = min(ud->tchan_cnt, ud->rchan_cnt);
149988448980SPeter Ujfalusi 	/*
150088448980SPeter Ujfalusi 	 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
150188448980SPeter Ujfalusi 	 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
150288448980SPeter Ujfalusi 	 */
150388448980SPeter Ujfalusi 	chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
150425dcb5ddSPeter Ujfalusi 	for (; chan_id < end; chan_id++) {
150525dcb5ddSPeter Ujfalusi 		if (!test_bit(chan_id, ud->tchan_map) &&
150625dcb5ddSPeter Ujfalusi 		    !test_bit(chan_id, ud->rchan_map))
150725dcb5ddSPeter Ujfalusi 			break;
150825dcb5ddSPeter Ujfalusi 	}
150925dcb5ddSPeter Ujfalusi 
151025dcb5ddSPeter Ujfalusi 	if (chan_id == end)
151125dcb5ddSPeter Ujfalusi 		return -ENOENT;
151225dcb5ddSPeter Ujfalusi 
151325dcb5ddSPeter Ujfalusi 	set_bit(chan_id, ud->tchan_map);
151425dcb5ddSPeter Ujfalusi 	set_bit(chan_id, ud->rchan_map);
151525dcb5ddSPeter Ujfalusi 	uc->tchan = &ud->tchans[chan_id];
151625dcb5ddSPeter Ujfalusi 	uc->rchan = &ud->rchans[chan_id];
151725dcb5ddSPeter Ujfalusi 
1518d2abc982SPeter Ujfalusi 	/* UDMA does not use tx flows */
1519d2abc982SPeter Ujfalusi 	uc->tchan->tflow_id = -1;
1520d2abc982SPeter Ujfalusi 
152125dcb5ddSPeter Ujfalusi 	return 0;
152225dcb5ddSPeter Ujfalusi }
152325dcb5ddSPeter Ujfalusi 
udma_get_rflow(struct udma_chan * uc,int flow_id)152425dcb5ddSPeter Ujfalusi static int udma_get_rflow(struct udma_chan *uc, int flow_id)
152525dcb5ddSPeter Ujfalusi {
152625dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
1527eb91224eSKishon Vijay Abraham I 	int ret;
152825dcb5ddSPeter Ujfalusi 
152925dcb5ddSPeter Ujfalusi 	if (!uc->rchan) {
153025dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
153125dcb5ddSPeter Ujfalusi 		return -EINVAL;
153225dcb5ddSPeter Ujfalusi 	}
153325dcb5ddSPeter Ujfalusi 
153425dcb5ddSPeter Ujfalusi 	if (uc->rflow) {
153525dcb5ddSPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
153625dcb5ddSPeter Ujfalusi 			uc->id, uc->rflow->id);
153725dcb5ddSPeter Ujfalusi 		return 0;
153825dcb5ddSPeter Ujfalusi 	}
153925dcb5ddSPeter Ujfalusi 
154025dcb5ddSPeter Ujfalusi 	uc->rflow = __udma_get_rflow(ud, flow_id);
1541eb91224eSKishon Vijay Abraham I 	if (IS_ERR(uc->rflow)) {
1542eb91224eSKishon Vijay Abraham I 		ret = PTR_ERR(uc->rflow);
1543eb91224eSKishon Vijay Abraham I 		uc->rflow = NULL;
1544eb91224eSKishon Vijay Abraham I 		return ret;
1545eb91224eSKishon Vijay Abraham I 	}
154625dcb5ddSPeter Ujfalusi 
1547eb91224eSKishon Vijay Abraham I 	return 0;
154825dcb5ddSPeter Ujfalusi }
154925dcb5ddSPeter Ujfalusi 
bcdma_put_bchan(struct udma_chan * uc)155001779473SPeter Ujfalusi static void bcdma_put_bchan(struct udma_chan *uc)
155101779473SPeter Ujfalusi {
155201779473SPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
155301779473SPeter Ujfalusi 
155401779473SPeter Ujfalusi 	if (uc->bchan) {
155501779473SPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
155601779473SPeter Ujfalusi 			uc->bchan->id);
155701779473SPeter Ujfalusi 		clear_bit(uc->bchan->id, ud->bchan_map);
155801779473SPeter Ujfalusi 		uc->bchan = NULL;
155901779473SPeter Ujfalusi 		uc->tchan = NULL;
156001779473SPeter Ujfalusi 	}
156101779473SPeter Ujfalusi }
156201779473SPeter Ujfalusi 
udma_put_rchan(struct udma_chan * uc)156325dcb5ddSPeter Ujfalusi static void udma_put_rchan(struct udma_chan *uc)
156425dcb5ddSPeter Ujfalusi {
156525dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
156625dcb5ddSPeter Ujfalusi 
156725dcb5ddSPeter Ujfalusi 	if (uc->rchan) {
156825dcb5ddSPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
156925dcb5ddSPeter Ujfalusi 			uc->rchan->id);
157025dcb5ddSPeter Ujfalusi 		clear_bit(uc->rchan->id, ud->rchan_map);
157125dcb5ddSPeter Ujfalusi 		uc->rchan = NULL;
157225dcb5ddSPeter Ujfalusi 	}
157325dcb5ddSPeter Ujfalusi }
157425dcb5ddSPeter Ujfalusi 
udma_put_tchan(struct udma_chan * uc)157525dcb5ddSPeter Ujfalusi static void udma_put_tchan(struct udma_chan *uc)
157625dcb5ddSPeter Ujfalusi {
157725dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
157825dcb5ddSPeter Ujfalusi 
157925dcb5ddSPeter Ujfalusi 	if (uc->tchan) {
158025dcb5ddSPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
158125dcb5ddSPeter Ujfalusi 			uc->tchan->id);
158225dcb5ddSPeter Ujfalusi 		clear_bit(uc->tchan->id, ud->tchan_map);
1583d2abc982SPeter Ujfalusi 
1584d2abc982SPeter Ujfalusi 		if (uc->tchan->tflow_id >= 0)
1585d2abc982SPeter Ujfalusi 			clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1586d2abc982SPeter Ujfalusi 
158725dcb5ddSPeter Ujfalusi 		uc->tchan = NULL;
158825dcb5ddSPeter Ujfalusi 	}
158925dcb5ddSPeter Ujfalusi }
159025dcb5ddSPeter Ujfalusi 
udma_put_rflow(struct udma_chan * uc)159125dcb5ddSPeter Ujfalusi static void udma_put_rflow(struct udma_chan *uc)
159225dcb5ddSPeter Ujfalusi {
159325dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
159425dcb5ddSPeter Ujfalusi 
159525dcb5ddSPeter Ujfalusi 	if (uc->rflow) {
159625dcb5ddSPeter Ujfalusi 		dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
159725dcb5ddSPeter Ujfalusi 			uc->rflow->id);
159825dcb5ddSPeter Ujfalusi 		__udma_put_rflow(ud, uc->rflow);
159925dcb5ddSPeter Ujfalusi 		uc->rflow = NULL;
160025dcb5ddSPeter Ujfalusi 	}
160125dcb5ddSPeter Ujfalusi }
160225dcb5ddSPeter Ujfalusi 
bcdma_free_bchan_resources(struct udma_chan * uc)160301779473SPeter Ujfalusi static void bcdma_free_bchan_resources(struct udma_chan *uc)
160401779473SPeter Ujfalusi {
160501779473SPeter Ujfalusi 	if (!uc->bchan)
160601779473SPeter Ujfalusi 		return;
160701779473SPeter Ujfalusi 
160801779473SPeter Ujfalusi 	k3_ringacc_ring_free(uc->bchan->tc_ring);
160901779473SPeter Ujfalusi 	k3_ringacc_ring_free(uc->bchan->t_ring);
161001779473SPeter Ujfalusi 	uc->bchan->tc_ring = NULL;
161101779473SPeter Ujfalusi 	uc->bchan->t_ring = NULL;
161201779473SPeter Ujfalusi 	k3_configure_chan_coherency(&uc->vc.chan, 0);
161301779473SPeter Ujfalusi 
161401779473SPeter Ujfalusi 	bcdma_put_bchan(uc);
161501779473SPeter Ujfalusi }
161601779473SPeter Ujfalusi 
bcdma_alloc_bchan_resources(struct udma_chan * uc)161701779473SPeter Ujfalusi static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
161801779473SPeter Ujfalusi {
161901779473SPeter Ujfalusi 	struct k3_ring_cfg ring_cfg;
162001779473SPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
162101779473SPeter Ujfalusi 	int ret;
162201779473SPeter Ujfalusi 
162301779473SPeter Ujfalusi 	ret = bcdma_get_bchan(uc);
162401779473SPeter Ujfalusi 	if (ret)
162501779473SPeter Ujfalusi 		return ret;
162601779473SPeter Ujfalusi 
162701779473SPeter Ujfalusi 	ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
162801779473SPeter Ujfalusi 					    &uc->bchan->t_ring,
162901779473SPeter Ujfalusi 					    &uc->bchan->tc_ring);
163001779473SPeter Ujfalusi 	if (ret) {
163101779473SPeter Ujfalusi 		ret = -EBUSY;
163201779473SPeter Ujfalusi 		goto err_ring;
163301779473SPeter Ujfalusi 	}
163401779473SPeter Ujfalusi 
163501779473SPeter Ujfalusi 	memset(&ring_cfg, 0, sizeof(ring_cfg));
163601779473SPeter Ujfalusi 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
163701779473SPeter Ujfalusi 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
163801779473SPeter Ujfalusi 	ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
163901779473SPeter Ujfalusi 
164001779473SPeter Ujfalusi 	k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
164101779473SPeter Ujfalusi 	ring_cfg.asel = ud->asel;
164201779473SPeter Ujfalusi 	ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
164301779473SPeter Ujfalusi 
164401779473SPeter Ujfalusi 	ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
164501779473SPeter Ujfalusi 	if (ret)
164601779473SPeter Ujfalusi 		goto err_ringcfg;
164701779473SPeter Ujfalusi 
164801779473SPeter Ujfalusi 	return 0;
164901779473SPeter Ujfalusi 
165001779473SPeter Ujfalusi err_ringcfg:
165101779473SPeter Ujfalusi 	k3_ringacc_ring_free(uc->bchan->tc_ring);
165201779473SPeter Ujfalusi 	uc->bchan->tc_ring = NULL;
165301779473SPeter Ujfalusi 	k3_ringacc_ring_free(uc->bchan->t_ring);
165401779473SPeter Ujfalusi 	uc->bchan->t_ring = NULL;
165501779473SPeter Ujfalusi 	k3_configure_chan_coherency(&uc->vc.chan, 0);
165601779473SPeter Ujfalusi err_ring:
165701779473SPeter Ujfalusi 	bcdma_put_bchan(uc);
165801779473SPeter Ujfalusi 
165901779473SPeter Ujfalusi 	return ret;
166001779473SPeter Ujfalusi }
166101779473SPeter Ujfalusi 
udma_free_tx_resources(struct udma_chan * uc)166225dcb5ddSPeter Ujfalusi static void udma_free_tx_resources(struct udma_chan *uc)
166325dcb5ddSPeter Ujfalusi {
166425dcb5ddSPeter Ujfalusi 	if (!uc->tchan)
166525dcb5ddSPeter Ujfalusi 		return;
166625dcb5ddSPeter Ujfalusi 
166725dcb5ddSPeter Ujfalusi 	k3_ringacc_ring_free(uc->tchan->t_ring);
166825dcb5ddSPeter Ujfalusi 	k3_ringacc_ring_free(uc->tchan->tc_ring);
166925dcb5ddSPeter Ujfalusi 	uc->tchan->t_ring = NULL;
167025dcb5ddSPeter Ujfalusi 	uc->tchan->tc_ring = NULL;
167125dcb5ddSPeter Ujfalusi 
167225dcb5ddSPeter Ujfalusi 	udma_put_tchan(uc);
167325dcb5ddSPeter Ujfalusi }
167425dcb5ddSPeter Ujfalusi 
udma_alloc_tx_resources(struct udma_chan * uc)167525dcb5ddSPeter Ujfalusi static int udma_alloc_tx_resources(struct udma_chan *uc)
167625dcb5ddSPeter Ujfalusi {
167725dcb5ddSPeter Ujfalusi 	struct k3_ring_cfg ring_cfg;
167825dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
167901779473SPeter Ujfalusi 	struct udma_tchan *tchan;
168001779473SPeter Ujfalusi 	int ring_idx, ret;
168125dcb5ddSPeter Ujfalusi 
168225dcb5ddSPeter Ujfalusi 	ret = udma_get_tchan(uc);
168325dcb5ddSPeter Ujfalusi 	if (ret)
168425dcb5ddSPeter Ujfalusi 		return ret;
168525dcb5ddSPeter Ujfalusi 
168601779473SPeter Ujfalusi 	tchan = uc->tchan;
1687d2abc982SPeter Ujfalusi 	if (tchan->tflow_id >= 0)
1688d2abc982SPeter Ujfalusi 		ring_idx = tchan->tflow_id;
1689d2abc982SPeter Ujfalusi 	else
169001779473SPeter Ujfalusi 		ring_idx = ud->bchan_cnt + tchan->id;
169101779473SPeter Ujfalusi 
169201779473SPeter Ujfalusi 	ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
169301779473SPeter Ujfalusi 					    &tchan->t_ring,
169401779473SPeter Ujfalusi 					    &tchan->tc_ring);
16954927b1abSPeter Ujfalusi 	if (ret) {
169625dcb5ddSPeter Ujfalusi 		ret = -EBUSY;
16974927b1abSPeter Ujfalusi 		goto err_ring;
169825dcb5ddSPeter Ujfalusi 	}
169925dcb5ddSPeter Ujfalusi 
170025dcb5ddSPeter Ujfalusi 	memset(&ring_cfg, 0, sizeof(ring_cfg));
170125dcb5ddSPeter Ujfalusi 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
170225dcb5ddSPeter Ujfalusi 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
170301779473SPeter Ujfalusi 	if (ud->match_data->type == DMA_TYPE_UDMA) {
170425dcb5ddSPeter Ujfalusi 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
170501779473SPeter Ujfalusi 	} else {
170601779473SPeter Ujfalusi 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
170725dcb5ddSPeter Ujfalusi 
170801779473SPeter Ujfalusi 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
170901779473SPeter Ujfalusi 		ring_cfg.asel = uc->config.asel;
171001779473SPeter Ujfalusi 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
171101779473SPeter Ujfalusi 	}
171201779473SPeter Ujfalusi 
171301779473SPeter Ujfalusi 	ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
171401779473SPeter Ujfalusi 	ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
171525dcb5ddSPeter Ujfalusi 
171625dcb5ddSPeter Ujfalusi 	if (ret)
171725dcb5ddSPeter Ujfalusi 		goto err_ringcfg;
171825dcb5ddSPeter Ujfalusi 
171925dcb5ddSPeter Ujfalusi 	return 0;
172025dcb5ddSPeter Ujfalusi 
172125dcb5ddSPeter Ujfalusi err_ringcfg:
172225dcb5ddSPeter Ujfalusi 	k3_ringacc_ring_free(uc->tchan->tc_ring);
172325dcb5ddSPeter Ujfalusi 	uc->tchan->tc_ring = NULL;
172425dcb5ddSPeter Ujfalusi 	k3_ringacc_ring_free(uc->tchan->t_ring);
172525dcb5ddSPeter Ujfalusi 	uc->tchan->t_ring = NULL;
17264927b1abSPeter Ujfalusi err_ring:
172725dcb5ddSPeter Ujfalusi 	udma_put_tchan(uc);
172825dcb5ddSPeter Ujfalusi 
172925dcb5ddSPeter Ujfalusi 	return ret;
173025dcb5ddSPeter Ujfalusi }
173125dcb5ddSPeter Ujfalusi 
udma_free_rx_resources(struct udma_chan * uc)173225dcb5ddSPeter Ujfalusi static void udma_free_rx_resources(struct udma_chan *uc)
173325dcb5ddSPeter Ujfalusi {
173425dcb5ddSPeter Ujfalusi 	if (!uc->rchan)
173525dcb5ddSPeter Ujfalusi 		return;
173625dcb5ddSPeter Ujfalusi 
173725dcb5ddSPeter Ujfalusi 	if (uc->rflow) {
173825dcb5ddSPeter Ujfalusi 		struct udma_rflow *rflow = uc->rflow;
173925dcb5ddSPeter Ujfalusi 
174025dcb5ddSPeter Ujfalusi 		k3_ringacc_ring_free(rflow->fd_ring);
174125dcb5ddSPeter Ujfalusi 		k3_ringacc_ring_free(rflow->r_ring);
174225dcb5ddSPeter Ujfalusi 		rflow->fd_ring = NULL;
174325dcb5ddSPeter Ujfalusi 		rflow->r_ring = NULL;
174425dcb5ddSPeter Ujfalusi 
174525dcb5ddSPeter Ujfalusi 		udma_put_rflow(uc);
174625dcb5ddSPeter Ujfalusi 	}
174725dcb5ddSPeter Ujfalusi 
174825dcb5ddSPeter Ujfalusi 	udma_put_rchan(uc);
174925dcb5ddSPeter Ujfalusi }
175025dcb5ddSPeter Ujfalusi 
udma_alloc_rx_resources(struct udma_chan * uc)175125dcb5ddSPeter Ujfalusi static int udma_alloc_rx_resources(struct udma_chan *uc)
175225dcb5ddSPeter Ujfalusi {
175325dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
175425dcb5ddSPeter Ujfalusi 	struct k3_ring_cfg ring_cfg;
175525dcb5ddSPeter Ujfalusi 	struct udma_rflow *rflow;
175625dcb5ddSPeter Ujfalusi 	int fd_ring_id;
175725dcb5ddSPeter Ujfalusi 	int ret;
175825dcb5ddSPeter Ujfalusi 
175925dcb5ddSPeter Ujfalusi 	ret = udma_get_rchan(uc);
176025dcb5ddSPeter Ujfalusi 	if (ret)
176125dcb5ddSPeter Ujfalusi 		return ret;
176225dcb5ddSPeter Ujfalusi 
176325dcb5ddSPeter Ujfalusi 	/* For MEM_TO_MEM we don't need rflow or rings */
176425dcb5ddSPeter Ujfalusi 	if (uc->config.dir == DMA_MEM_TO_MEM)
176525dcb5ddSPeter Ujfalusi 		return 0;
176625dcb5ddSPeter Ujfalusi 
1767d2abc982SPeter Ujfalusi 	if (uc->config.default_flow_id >= 0)
1768d2abc982SPeter Ujfalusi 		ret = udma_get_rflow(uc, uc->config.default_flow_id);
1769d2abc982SPeter Ujfalusi 	else
177025dcb5ddSPeter Ujfalusi 		ret = udma_get_rflow(uc, uc->rchan->id);
1771d2abc982SPeter Ujfalusi 
177225dcb5ddSPeter Ujfalusi 	if (ret) {
177325dcb5ddSPeter Ujfalusi 		ret = -EBUSY;
177425dcb5ddSPeter Ujfalusi 		goto err_rflow;
177525dcb5ddSPeter Ujfalusi 	}
177625dcb5ddSPeter Ujfalusi 
177725dcb5ddSPeter Ujfalusi 	rflow = uc->rflow;
1778d2abc982SPeter Ujfalusi 	if (ud->tflow_cnt)
1779d2abc982SPeter Ujfalusi 		fd_ring_id = ud->tflow_cnt + rflow->id;
1780d2abc982SPeter Ujfalusi 	else
178101779473SPeter Ujfalusi 		fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
178201779473SPeter Ujfalusi 			     uc->rchan->id;
1783d2abc982SPeter Ujfalusi 
17844927b1abSPeter Ujfalusi 	ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
17854927b1abSPeter Ujfalusi 					    &rflow->fd_ring, &rflow->r_ring);
17864927b1abSPeter Ujfalusi 	if (ret) {
178725dcb5ddSPeter Ujfalusi 		ret = -EBUSY;
17884927b1abSPeter Ujfalusi 		goto err_ring;
178925dcb5ddSPeter Ujfalusi 	}
179025dcb5ddSPeter Ujfalusi 
179125dcb5ddSPeter Ujfalusi 	memset(&ring_cfg, 0, sizeof(ring_cfg));
179225dcb5ddSPeter Ujfalusi 
179301779473SPeter Ujfalusi 	ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
179401779473SPeter Ujfalusi 	if (ud->match_data->type == DMA_TYPE_UDMA) {
179525dcb5ddSPeter Ujfalusi 		if (uc->config.pkt_mode)
179625dcb5ddSPeter Ujfalusi 			ring_cfg.size = SG_MAX_SEGMENTS;
179725dcb5ddSPeter Ujfalusi 		else
179825dcb5ddSPeter Ujfalusi 			ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
179925dcb5ddSPeter Ujfalusi 
180025dcb5ddSPeter Ujfalusi 		ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
180101779473SPeter Ujfalusi 	} else {
180201779473SPeter Ujfalusi 		ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
180301779473SPeter Ujfalusi 		ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
180401779473SPeter Ujfalusi 
180501779473SPeter Ujfalusi 		k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
180601779473SPeter Ujfalusi 		ring_cfg.asel = uc->config.asel;
180701779473SPeter Ujfalusi 		ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
180801779473SPeter Ujfalusi 	}
180925dcb5ddSPeter Ujfalusi 
181025dcb5ddSPeter Ujfalusi 	ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
181101779473SPeter Ujfalusi 
181225dcb5ddSPeter Ujfalusi 	ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
181325dcb5ddSPeter Ujfalusi 	ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
181425dcb5ddSPeter Ujfalusi 
181525dcb5ddSPeter Ujfalusi 	if (ret)
181625dcb5ddSPeter Ujfalusi 		goto err_ringcfg;
181725dcb5ddSPeter Ujfalusi 
181825dcb5ddSPeter Ujfalusi 	return 0;
181925dcb5ddSPeter Ujfalusi 
182025dcb5ddSPeter Ujfalusi err_ringcfg:
182125dcb5ddSPeter Ujfalusi 	k3_ringacc_ring_free(rflow->r_ring);
182225dcb5ddSPeter Ujfalusi 	rflow->r_ring = NULL;
182325dcb5ddSPeter Ujfalusi 	k3_ringacc_ring_free(rflow->fd_ring);
182425dcb5ddSPeter Ujfalusi 	rflow->fd_ring = NULL;
18254927b1abSPeter Ujfalusi err_ring:
182625dcb5ddSPeter Ujfalusi 	udma_put_rflow(uc);
182725dcb5ddSPeter Ujfalusi err_rflow:
182825dcb5ddSPeter Ujfalusi 	udma_put_rchan(uc);
182925dcb5ddSPeter Ujfalusi 
183025dcb5ddSPeter Ujfalusi 	return ret;
183125dcb5ddSPeter Ujfalusi }
183225dcb5ddSPeter Ujfalusi 
183301779473SPeter Ujfalusi #define TISCI_BCDMA_BCHAN_VALID_PARAMS (			\
183401779473SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
183501779473SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
183601779473SPeter Ujfalusi 
183701779473SPeter Ujfalusi #define TISCI_BCDMA_TCHAN_VALID_PARAMS (			\
183801779473SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
183901779473SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
184001779473SPeter Ujfalusi 
184101779473SPeter Ujfalusi #define TISCI_BCDMA_RCHAN_VALID_PARAMS (			\
184201779473SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
184301779473SPeter Ujfalusi 
184401779473SPeter Ujfalusi #define TISCI_UDMA_TCHAN_VALID_PARAMS (				\
184525dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
184625dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |	\
184725dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |	\
184825dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
184925dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |	\
185025dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
18510ebcf1a2SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
18520ebcf1a2SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
185325dcb5ddSPeter Ujfalusi 
185401779473SPeter Ujfalusi #define TISCI_UDMA_RCHAN_VALID_PARAMS (				\
185525dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |	\
185625dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |		\
185725dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |		\
185825dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |		\
185925dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |	\
186025dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |	\
186125dcb5ddSPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |	\
18620ebcf1a2SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |	\
18630ebcf1a2SPeter Ujfalusi 	TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
186425dcb5ddSPeter Ujfalusi 
udma_tisci_m2m_channel_config(struct udma_chan * uc)186525dcb5ddSPeter Ujfalusi static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
186625dcb5ddSPeter Ujfalusi {
186725dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
186825dcb5ddSPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
186925dcb5ddSPeter Ujfalusi 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
187025dcb5ddSPeter Ujfalusi 	struct udma_tchan *tchan = uc->tchan;
187125dcb5ddSPeter Ujfalusi 	struct udma_rchan *rchan = uc->rchan;
1872046d679bSPeter Ujfalusi 	u8 burst_size = 0;
1873747ee57bSPeter Ujfalusi 	int ret;
1874046d679bSPeter Ujfalusi 	u8 tpl;
187525dcb5ddSPeter Ujfalusi 
187625dcb5ddSPeter Ujfalusi 	/* Non synchronized - mem to mem type of transfer */
187725dcb5ddSPeter Ujfalusi 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
187825dcb5ddSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
187925dcb5ddSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
188025dcb5ddSPeter Ujfalusi 
1881046d679bSPeter Ujfalusi 	if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1882046d679bSPeter Ujfalusi 		tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1883046d679bSPeter Ujfalusi 
1884046d679bSPeter Ujfalusi 		burst_size = ud->match_data->burst_size[tpl];
1885046d679bSPeter Ujfalusi 	}
1886046d679bSPeter Ujfalusi 
188701779473SPeter Ujfalusi 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
188825dcb5ddSPeter Ujfalusi 	req_tx.nav_id = tisci_rm->tisci_dev_id;
188925dcb5ddSPeter Ujfalusi 	req_tx.index = tchan->id;
189025dcb5ddSPeter Ujfalusi 	req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
189125dcb5ddSPeter Ujfalusi 	req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
189225dcb5ddSPeter Ujfalusi 	req_tx.txcq_qnum = tc_ring;
18930ebcf1a2SPeter Ujfalusi 	req_tx.tx_atype = ud->atype;
1894046d679bSPeter Ujfalusi 	if (burst_size) {
1895046d679bSPeter Ujfalusi 		req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1896046d679bSPeter Ujfalusi 		req_tx.tx_burst_size = burst_size;
1897046d679bSPeter Ujfalusi 	}
189825dcb5ddSPeter Ujfalusi 
189925dcb5ddSPeter Ujfalusi 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
190025dcb5ddSPeter Ujfalusi 	if (ret) {
190125dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
190225dcb5ddSPeter Ujfalusi 		return ret;
190325dcb5ddSPeter Ujfalusi 	}
190425dcb5ddSPeter Ujfalusi 
190501779473SPeter Ujfalusi 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
190625dcb5ddSPeter Ujfalusi 	req_rx.nav_id = tisci_rm->tisci_dev_id;
190725dcb5ddSPeter Ujfalusi 	req_rx.index = rchan->id;
190825dcb5ddSPeter Ujfalusi 	req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
190925dcb5ddSPeter Ujfalusi 	req_rx.rxcq_qnum = tc_ring;
191025dcb5ddSPeter Ujfalusi 	req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
19110ebcf1a2SPeter Ujfalusi 	req_rx.rx_atype = ud->atype;
1912046d679bSPeter Ujfalusi 	if (burst_size) {
1913046d679bSPeter Ujfalusi 		req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1914046d679bSPeter Ujfalusi 		req_rx.rx_burst_size = burst_size;
1915046d679bSPeter Ujfalusi 	}
191625dcb5ddSPeter Ujfalusi 
191725dcb5ddSPeter Ujfalusi 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
191825dcb5ddSPeter Ujfalusi 	if (ret)
191925dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
192025dcb5ddSPeter Ujfalusi 
192125dcb5ddSPeter Ujfalusi 	return ret;
192225dcb5ddSPeter Ujfalusi }
192325dcb5ddSPeter Ujfalusi 
bcdma_tisci_m2m_channel_config(struct udma_chan * uc)192401779473SPeter Ujfalusi static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
192501779473SPeter Ujfalusi {
192601779473SPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
192701779473SPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
192801779473SPeter Ujfalusi 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
192901779473SPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
193001779473SPeter Ujfalusi 	struct udma_bchan *bchan = uc->bchan;
1931046d679bSPeter Ujfalusi 	u8 burst_size = 0;
1932747ee57bSPeter Ujfalusi 	int ret;
1933046d679bSPeter Ujfalusi 	u8 tpl;
1934046d679bSPeter Ujfalusi 
1935046d679bSPeter Ujfalusi 	if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1936046d679bSPeter Ujfalusi 		tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1937046d679bSPeter Ujfalusi 
1938046d679bSPeter Ujfalusi 		burst_size = ud->match_data->burst_size[tpl];
1939046d679bSPeter Ujfalusi 	}
194001779473SPeter Ujfalusi 
194101779473SPeter Ujfalusi 	req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
194201779473SPeter Ujfalusi 	req_tx.nav_id = tisci_rm->tisci_dev_id;
194301779473SPeter Ujfalusi 	req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
194401779473SPeter Ujfalusi 	req_tx.index = bchan->id;
1945046d679bSPeter Ujfalusi 	if (burst_size) {
1946046d679bSPeter Ujfalusi 		req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1947046d679bSPeter Ujfalusi 		req_tx.tx_burst_size = burst_size;
1948046d679bSPeter Ujfalusi 	}
194901779473SPeter Ujfalusi 
195001779473SPeter Ujfalusi 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
195101779473SPeter Ujfalusi 	if (ret)
195201779473SPeter Ujfalusi 		dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
195301779473SPeter Ujfalusi 
195401779473SPeter Ujfalusi 	return ret;
195501779473SPeter Ujfalusi }
195601779473SPeter Ujfalusi 
udma_tisci_tx_channel_config(struct udma_chan * uc)195725dcb5ddSPeter Ujfalusi static int udma_tisci_tx_channel_config(struct udma_chan *uc)
195825dcb5ddSPeter Ujfalusi {
195925dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
196025dcb5ddSPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
196125dcb5ddSPeter Ujfalusi 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
196225dcb5ddSPeter Ujfalusi 	struct udma_tchan *tchan = uc->tchan;
196325dcb5ddSPeter Ujfalusi 	int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
196425dcb5ddSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
196525dcb5ddSPeter Ujfalusi 	u32 mode, fetch_size;
1966747ee57bSPeter Ujfalusi 	int ret;
196725dcb5ddSPeter Ujfalusi 
196825dcb5ddSPeter Ujfalusi 	if (uc->config.pkt_mode) {
196925dcb5ddSPeter Ujfalusi 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
197025dcb5ddSPeter Ujfalusi 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
197125dcb5ddSPeter Ujfalusi 						   uc->config.psd_size, 0);
197225dcb5ddSPeter Ujfalusi 	} else {
197325dcb5ddSPeter Ujfalusi 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
197425dcb5ddSPeter Ujfalusi 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
197525dcb5ddSPeter Ujfalusi 	}
197625dcb5ddSPeter Ujfalusi 
197701779473SPeter Ujfalusi 	req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
197825dcb5ddSPeter Ujfalusi 	req_tx.nav_id = tisci_rm->tisci_dev_id;
197925dcb5ddSPeter Ujfalusi 	req_tx.index = tchan->id;
198025dcb5ddSPeter Ujfalusi 	req_tx.tx_chan_type = mode;
198125dcb5ddSPeter Ujfalusi 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
198225dcb5ddSPeter Ujfalusi 	req_tx.tx_fetch_size = fetch_size >> 2;
198325dcb5ddSPeter Ujfalusi 	req_tx.txcq_qnum = tc_ring;
19840ebcf1a2SPeter Ujfalusi 	req_tx.tx_atype = uc->config.atype;
19855e1cb1cbSPeter Ujfalusi 	if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
19865e1cb1cbSPeter Ujfalusi 	    ud->match_data->flags & UDMA_FLAG_TDTYPE) {
19875e1cb1cbSPeter Ujfalusi 		/* wait for peer to complete the teardown for PDMAs */
19885e1cb1cbSPeter Ujfalusi 		req_tx.valid_params |=
19895e1cb1cbSPeter Ujfalusi 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
19905e1cb1cbSPeter Ujfalusi 		req_tx.tx_tdtype = 1;
19915e1cb1cbSPeter Ujfalusi 	}
199225dcb5ddSPeter Ujfalusi 
199325dcb5ddSPeter Ujfalusi 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
199425dcb5ddSPeter Ujfalusi 	if (ret)
199525dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
199625dcb5ddSPeter Ujfalusi 
199725dcb5ddSPeter Ujfalusi 	return ret;
199825dcb5ddSPeter Ujfalusi }
199925dcb5ddSPeter Ujfalusi 
bcdma_tisci_tx_channel_config(struct udma_chan * uc)200001779473SPeter Ujfalusi static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
200101779473SPeter Ujfalusi {
200201779473SPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
200301779473SPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
200401779473SPeter Ujfalusi 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
200501779473SPeter Ujfalusi 	struct udma_tchan *tchan = uc->tchan;
200601779473SPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2007747ee57bSPeter Ujfalusi 	int ret;
200801779473SPeter Ujfalusi 
200901779473SPeter Ujfalusi 	req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
201001779473SPeter Ujfalusi 	req_tx.nav_id = tisci_rm->tisci_dev_id;
201101779473SPeter Ujfalusi 	req_tx.index = tchan->id;
201201779473SPeter Ujfalusi 	req_tx.tx_supr_tdpkt = uc->config.notdpkt;
201301779473SPeter Ujfalusi 	if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
201401779473SPeter Ujfalusi 		/* wait for peer to complete the teardown for PDMAs */
201501779473SPeter Ujfalusi 		req_tx.valid_params |=
201601779473SPeter Ujfalusi 				TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
201701779473SPeter Ujfalusi 		req_tx.tx_tdtype = 1;
201801779473SPeter Ujfalusi 	}
201901779473SPeter Ujfalusi 
202001779473SPeter Ujfalusi 	ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
202101779473SPeter Ujfalusi 	if (ret)
202201779473SPeter Ujfalusi 		dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
202301779473SPeter Ujfalusi 
202401779473SPeter Ujfalusi 	return ret;
202501779473SPeter Ujfalusi }
202601779473SPeter Ujfalusi 
2027d2abc982SPeter Ujfalusi #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2028d2abc982SPeter Ujfalusi 
udma_tisci_rx_channel_config(struct udma_chan * uc)202925dcb5ddSPeter Ujfalusi static int udma_tisci_rx_channel_config(struct udma_chan *uc)
203025dcb5ddSPeter Ujfalusi {
203125dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
203225dcb5ddSPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
203325dcb5ddSPeter Ujfalusi 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
203425dcb5ddSPeter Ujfalusi 	struct udma_rchan *rchan = uc->rchan;
203525dcb5ddSPeter Ujfalusi 	int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
203625dcb5ddSPeter Ujfalusi 	int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
203725dcb5ddSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
203825dcb5ddSPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
203925dcb5ddSPeter Ujfalusi 	u32 mode, fetch_size;
2040747ee57bSPeter Ujfalusi 	int ret;
204125dcb5ddSPeter Ujfalusi 
204225dcb5ddSPeter Ujfalusi 	if (uc->config.pkt_mode) {
204325dcb5ddSPeter Ujfalusi 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
204425dcb5ddSPeter Ujfalusi 		fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
204525dcb5ddSPeter Ujfalusi 						   uc->config.psd_size, 0);
204625dcb5ddSPeter Ujfalusi 	} else {
204725dcb5ddSPeter Ujfalusi 		mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
204825dcb5ddSPeter Ujfalusi 		fetch_size = sizeof(struct cppi5_desc_hdr_t);
204925dcb5ddSPeter Ujfalusi 	}
205025dcb5ddSPeter Ujfalusi 
205101779473SPeter Ujfalusi 	req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
205225dcb5ddSPeter Ujfalusi 	req_rx.nav_id = tisci_rm->tisci_dev_id;
205325dcb5ddSPeter Ujfalusi 	req_rx.index = rchan->id;
205425dcb5ddSPeter Ujfalusi 	req_rx.rx_fetch_size =  fetch_size >> 2;
205525dcb5ddSPeter Ujfalusi 	req_rx.rxcq_qnum = rx_ring;
205625dcb5ddSPeter Ujfalusi 	req_rx.rx_chan_type = mode;
20570ebcf1a2SPeter Ujfalusi 	req_rx.rx_atype = uc->config.atype;
205825dcb5ddSPeter Ujfalusi 
205925dcb5ddSPeter Ujfalusi 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
206025dcb5ddSPeter Ujfalusi 	if (ret) {
206125dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
206225dcb5ddSPeter Ujfalusi 		return ret;
206325dcb5ddSPeter Ujfalusi 	}
206425dcb5ddSPeter Ujfalusi 
206525dcb5ddSPeter Ujfalusi 	flow_req.valid_params =
206625dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
206725dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
206825dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
206925dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
207025dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
207125dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
207225dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
207325dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
207425dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
207525dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
207625dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
207725dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
207825dcb5ddSPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
207925dcb5ddSPeter Ujfalusi 
208025dcb5ddSPeter Ujfalusi 	flow_req.nav_id = tisci_rm->tisci_dev_id;
208125dcb5ddSPeter Ujfalusi 	flow_req.flow_index = rchan->id;
208225dcb5ddSPeter Ujfalusi 
208325dcb5ddSPeter Ujfalusi 	if (uc->config.needs_epib)
208425dcb5ddSPeter Ujfalusi 		flow_req.rx_einfo_present = 1;
208525dcb5ddSPeter Ujfalusi 	else
208625dcb5ddSPeter Ujfalusi 		flow_req.rx_einfo_present = 0;
208725dcb5ddSPeter Ujfalusi 	if (uc->config.psd_size)
208825dcb5ddSPeter Ujfalusi 		flow_req.rx_psinfo_present = 1;
208925dcb5ddSPeter Ujfalusi 	else
209025dcb5ddSPeter Ujfalusi 		flow_req.rx_psinfo_present = 0;
209125dcb5ddSPeter Ujfalusi 	flow_req.rx_error_handling = 1;
209225dcb5ddSPeter Ujfalusi 	flow_req.rx_dest_qnum = rx_ring;
209325dcb5ddSPeter Ujfalusi 	flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
209425dcb5ddSPeter Ujfalusi 	flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
209525dcb5ddSPeter Ujfalusi 	flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
209625dcb5ddSPeter Ujfalusi 	flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
209725dcb5ddSPeter Ujfalusi 	flow_req.rx_fdq0_sz0_qnum = fd_ring;
209825dcb5ddSPeter Ujfalusi 	flow_req.rx_fdq1_qnum = fd_ring;
209925dcb5ddSPeter Ujfalusi 	flow_req.rx_fdq2_qnum = fd_ring;
210025dcb5ddSPeter Ujfalusi 	flow_req.rx_fdq3_qnum = fd_ring;
210125dcb5ddSPeter Ujfalusi 
210225dcb5ddSPeter Ujfalusi 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
210325dcb5ddSPeter Ujfalusi 
210425dcb5ddSPeter Ujfalusi 	if (ret)
210525dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
210625dcb5ddSPeter Ujfalusi 
210725dcb5ddSPeter Ujfalusi 	return 0;
210825dcb5ddSPeter Ujfalusi }
210925dcb5ddSPeter Ujfalusi 
bcdma_tisci_rx_channel_config(struct udma_chan * uc)211001779473SPeter Ujfalusi static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
211101779473SPeter Ujfalusi {
211201779473SPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
211301779473SPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
211401779473SPeter Ujfalusi 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
211501779473SPeter Ujfalusi 	struct udma_rchan *rchan = uc->rchan;
211601779473SPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2117747ee57bSPeter Ujfalusi 	int ret;
211801779473SPeter Ujfalusi 
211901779473SPeter Ujfalusi 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
212001779473SPeter Ujfalusi 	req_rx.nav_id = tisci_rm->tisci_dev_id;
212101779473SPeter Ujfalusi 	req_rx.index = rchan->id;
212201779473SPeter Ujfalusi 
212301779473SPeter Ujfalusi 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
212401779473SPeter Ujfalusi 	if (ret)
212501779473SPeter Ujfalusi 		dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
212601779473SPeter Ujfalusi 
212701779473SPeter Ujfalusi 	return ret;
212801779473SPeter Ujfalusi }
212901779473SPeter Ujfalusi 
pktdma_tisci_rx_channel_config(struct udma_chan * uc)2130d2abc982SPeter Ujfalusi static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2131d2abc982SPeter Ujfalusi {
2132d2abc982SPeter Ujfalusi 	struct udma_dev *ud = uc->ud;
2133d2abc982SPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2134d2abc982SPeter Ujfalusi 	const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2135d2abc982SPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2136d2abc982SPeter Ujfalusi 	struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2137747ee57bSPeter Ujfalusi 	int ret;
2138d2abc982SPeter Ujfalusi 
2139d2abc982SPeter Ujfalusi 	req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2140d2abc982SPeter Ujfalusi 	req_rx.nav_id = tisci_rm->tisci_dev_id;
2141d2abc982SPeter Ujfalusi 	req_rx.index = uc->rchan->id;
2142d2abc982SPeter Ujfalusi 
2143d2abc982SPeter Ujfalusi 	ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2144d2abc982SPeter Ujfalusi 	if (ret) {
2145d2abc982SPeter Ujfalusi 		dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2146d2abc982SPeter Ujfalusi 		return ret;
2147d2abc982SPeter Ujfalusi 	}
2148d2abc982SPeter Ujfalusi 
2149d2abc982SPeter Ujfalusi 	flow_req.valid_params =
2150d2abc982SPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2151d2abc982SPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2152d2abc982SPeter Ujfalusi 		TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2153d2abc982SPeter Ujfalusi 
2154d2abc982SPeter Ujfalusi 	flow_req.nav_id = tisci_rm->tisci_dev_id;
2155d2abc982SPeter Ujfalusi 	flow_req.flow_index = uc->rflow->id;
2156d2abc982SPeter Ujfalusi 
2157d2abc982SPeter Ujfalusi 	if (uc->config.needs_epib)
2158d2abc982SPeter Ujfalusi 		flow_req.rx_einfo_present = 1;
2159d2abc982SPeter Ujfalusi 	else
2160d2abc982SPeter Ujfalusi 		flow_req.rx_einfo_present = 0;
2161d2abc982SPeter Ujfalusi 	if (uc->config.psd_size)
2162d2abc982SPeter Ujfalusi 		flow_req.rx_psinfo_present = 1;
2163d2abc982SPeter Ujfalusi 	else
2164d2abc982SPeter Ujfalusi 		flow_req.rx_psinfo_present = 0;
2165d2abc982SPeter Ujfalusi 	flow_req.rx_error_handling = 1;
2166d2abc982SPeter Ujfalusi 
2167d2abc982SPeter Ujfalusi 	ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2168d2abc982SPeter Ujfalusi 
2169d2abc982SPeter Ujfalusi 	if (ret)
2170d2abc982SPeter Ujfalusi 		dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2171d2abc982SPeter Ujfalusi 			ret);
2172d2abc982SPeter Ujfalusi 
2173d2abc982SPeter Ujfalusi 	return ret;
2174d2abc982SPeter Ujfalusi }
2175d2abc982SPeter Ujfalusi 
udma_alloc_chan_resources(struct dma_chan * chan)217625dcb5ddSPeter Ujfalusi static int udma_alloc_chan_resources(struct dma_chan *chan)
217725dcb5ddSPeter Ujfalusi {
217825dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
217925dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = to_udma_dev(chan->device);
2180f9b0366fSPeter Ujfalusi 	const struct udma_soc_data *soc_data = ud->soc_data;
218125dcb5ddSPeter Ujfalusi 	struct k3_ring *irq_ring;
218225dcb5ddSPeter Ujfalusi 	u32 irq_udma_idx;
218325dcb5ddSPeter Ujfalusi 	int ret;
218425dcb5ddSPeter Ujfalusi 
218501779473SPeter Ujfalusi 	uc->dma_dev = ud->dev;
218601779473SPeter Ujfalusi 
218725dcb5ddSPeter Ujfalusi 	if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
218825dcb5ddSPeter Ujfalusi 		uc->use_dma_pool = true;
218925dcb5ddSPeter Ujfalusi 		/* in case of MEM_TO_MEM we have maximum of two TRs */
219025dcb5ddSPeter Ujfalusi 		if (uc->config.dir == DMA_MEM_TO_MEM) {
219125dcb5ddSPeter Ujfalusi 			uc->config.hdesc_size = cppi5_trdesc_calc_size(
219225dcb5ddSPeter Ujfalusi 					sizeof(struct cppi5_tr_type15_t), 2);
219325dcb5ddSPeter Ujfalusi 			uc->config.pkt_mode = false;
219425dcb5ddSPeter Ujfalusi 		}
219525dcb5ddSPeter Ujfalusi 	}
219625dcb5ddSPeter Ujfalusi 
219725dcb5ddSPeter Ujfalusi 	if (uc->use_dma_pool) {
219825dcb5ddSPeter Ujfalusi 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
219925dcb5ddSPeter Ujfalusi 						 uc->config.hdesc_size,
220025dcb5ddSPeter Ujfalusi 						 ud->desc_align,
220125dcb5ddSPeter Ujfalusi 						 0);
220225dcb5ddSPeter Ujfalusi 		if (!uc->hdesc_pool) {
220325dcb5ddSPeter Ujfalusi 			dev_err(ud->ddev.dev,
220425dcb5ddSPeter Ujfalusi 				"Descriptor pool allocation failed\n");
220525dcb5ddSPeter Ujfalusi 			uc->use_dma_pool = false;
22065a9377ccSPeter Ujfalusi 			ret = -ENOMEM;
22075a9377ccSPeter Ujfalusi 			goto err_cleanup;
220825dcb5ddSPeter Ujfalusi 		}
220925dcb5ddSPeter Ujfalusi 	}
221025dcb5ddSPeter Ujfalusi 
221125dcb5ddSPeter Ujfalusi 	/*
221225dcb5ddSPeter Ujfalusi 	 * Make sure that the completion is in a known state:
221325dcb5ddSPeter Ujfalusi 	 * No teardown, the channel is idle
221425dcb5ddSPeter Ujfalusi 	 */
221525dcb5ddSPeter Ujfalusi 	reinit_completion(&uc->teardown_completed);
221625dcb5ddSPeter Ujfalusi 	complete_all(&uc->teardown_completed);
221725dcb5ddSPeter Ujfalusi 	uc->state = UDMA_CHAN_IS_IDLE;
221825dcb5ddSPeter Ujfalusi 
221925dcb5ddSPeter Ujfalusi 	switch (uc->config.dir) {
222025dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
222125dcb5ddSPeter Ujfalusi 		/* Non synchronized - mem to mem type of transfer */
222225dcb5ddSPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
222325dcb5ddSPeter Ujfalusi 			uc->id);
222425dcb5ddSPeter Ujfalusi 
222525dcb5ddSPeter Ujfalusi 		ret = udma_get_chan_pair(uc);
222625dcb5ddSPeter Ujfalusi 		if (ret)
22275a9377ccSPeter Ujfalusi 			goto err_cleanup;
222825dcb5ddSPeter Ujfalusi 
222925dcb5ddSPeter Ujfalusi 		ret = udma_alloc_tx_resources(uc);
22305a9377ccSPeter Ujfalusi 		if (ret) {
22315a9377ccSPeter Ujfalusi 			udma_put_rchan(uc);
22325a9377ccSPeter Ujfalusi 			goto err_cleanup;
22335a9377ccSPeter Ujfalusi 		}
223425dcb5ddSPeter Ujfalusi 
223525dcb5ddSPeter Ujfalusi 		ret = udma_alloc_rx_resources(uc);
223625dcb5ddSPeter Ujfalusi 		if (ret) {
223725dcb5ddSPeter Ujfalusi 			udma_free_tx_resources(uc);
22385a9377ccSPeter Ujfalusi 			goto err_cleanup;
223925dcb5ddSPeter Ujfalusi 		}
224025dcb5ddSPeter Ujfalusi 
224125dcb5ddSPeter Ujfalusi 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
224225dcb5ddSPeter Ujfalusi 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
224325dcb5ddSPeter Ujfalusi 					K3_PSIL_DST_THREAD_ID_OFFSET;
224425dcb5ddSPeter Ujfalusi 
224525dcb5ddSPeter Ujfalusi 		irq_ring = uc->tchan->tc_ring;
224625dcb5ddSPeter Ujfalusi 		irq_udma_idx = uc->tchan->id;
224725dcb5ddSPeter Ujfalusi 
224825dcb5ddSPeter Ujfalusi 		ret = udma_tisci_m2m_channel_config(uc);
224925dcb5ddSPeter Ujfalusi 		break;
225025dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
225125dcb5ddSPeter Ujfalusi 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
225225dcb5ddSPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
225325dcb5ddSPeter Ujfalusi 			uc->id);
225425dcb5ddSPeter Ujfalusi 
225525dcb5ddSPeter Ujfalusi 		ret = udma_alloc_tx_resources(uc);
22565a9377ccSPeter Ujfalusi 		if (ret)
22575a9377ccSPeter Ujfalusi 			goto err_cleanup;
225825dcb5ddSPeter Ujfalusi 
225925dcb5ddSPeter Ujfalusi 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
226025dcb5ddSPeter Ujfalusi 		uc->config.dst_thread = uc->config.remote_thread_id;
226125dcb5ddSPeter Ujfalusi 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
226225dcb5ddSPeter Ujfalusi 
226325dcb5ddSPeter Ujfalusi 		irq_ring = uc->tchan->tc_ring;
226425dcb5ddSPeter Ujfalusi 		irq_udma_idx = uc->tchan->id;
226525dcb5ddSPeter Ujfalusi 
226625dcb5ddSPeter Ujfalusi 		ret = udma_tisci_tx_channel_config(uc);
226725dcb5ddSPeter Ujfalusi 		break;
226825dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
226925dcb5ddSPeter Ujfalusi 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
227025dcb5ddSPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
227125dcb5ddSPeter Ujfalusi 			uc->id);
227225dcb5ddSPeter Ujfalusi 
227325dcb5ddSPeter Ujfalusi 		ret = udma_alloc_rx_resources(uc);
22745a9377ccSPeter Ujfalusi 		if (ret)
22755a9377ccSPeter Ujfalusi 			goto err_cleanup;
227625dcb5ddSPeter Ujfalusi 
227725dcb5ddSPeter Ujfalusi 		uc->config.src_thread = uc->config.remote_thread_id;
227825dcb5ddSPeter Ujfalusi 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
227925dcb5ddSPeter Ujfalusi 					K3_PSIL_DST_THREAD_ID_OFFSET;
228025dcb5ddSPeter Ujfalusi 
228125dcb5ddSPeter Ujfalusi 		irq_ring = uc->rflow->r_ring;
228201779473SPeter Ujfalusi 		irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
228325dcb5ddSPeter Ujfalusi 
228425dcb5ddSPeter Ujfalusi 		ret = udma_tisci_rx_channel_config(uc);
228525dcb5ddSPeter Ujfalusi 		break;
228625dcb5ddSPeter Ujfalusi 	default:
228725dcb5ddSPeter Ujfalusi 		/* Can not happen */
228825dcb5ddSPeter Ujfalusi 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
228925dcb5ddSPeter Ujfalusi 			__func__, uc->id, uc->config.dir);
22905a9377ccSPeter Ujfalusi 		ret = -EINVAL;
22915a9377ccSPeter Ujfalusi 		goto err_cleanup;
22925a9377ccSPeter Ujfalusi 
229325dcb5ddSPeter Ujfalusi 	}
229425dcb5ddSPeter Ujfalusi 
229525dcb5ddSPeter Ujfalusi 	/* check if the channel configuration was successful */
229625dcb5ddSPeter Ujfalusi 	if (ret)
229725dcb5ddSPeter Ujfalusi 		goto err_res_free;
229825dcb5ddSPeter Ujfalusi 
229925dcb5ddSPeter Ujfalusi 	if (udma_is_chan_running(uc)) {
230025dcb5ddSPeter Ujfalusi 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2301b5b0180cSPeter Ujfalusi 		udma_reset_chan(uc, false);
230225dcb5ddSPeter Ujfalusi 		if (udma_is_chan_running(uc)) {
230325dcb5ddSPeter Ujfalusi 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
23047ae6d7bdSPeter Ujfalusi 			ret = -EBUSY;
230525dcb5ddSPeter Ujfalusi 			goto err_res_free;
230625dcb5ddSPeter Ujfalusi 		}
230725dcb5ddSPeter Ujfalusi 	}
230825dcb5ddSPeter Ujfalusi 
230925dcb5ddSPeter Ujfalusi 	/* PSI-L pairing */
231025dcb5ddSPeter Ujfalusi 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
231125dcb5ddSPeter Ujfalusi 	if (ret) {
231225dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
231325dcb5ddSPeter Ujfalusi 			uc->config.src_thread, uc->config.dst_thread);
231425dcb5ddSPeter Ujfalusi 		goto err_res_free;
231525dcb5ddSPeter Ujfalusi 	}
231625dcb5ddSPeter Ujfalusi 
231725dcb5ddSPeter Ujfalusi 	uc->psil_paired = true;
231825dcb5ddSPeter Ujfalusi 
231925dcb5ddSPeter Ujfalusi 	uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
232025dcb5ddSPeter Ujfalusi 	if (uc->irq_num_ring <= 0) {
232125dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
232225dcb5ddSPeter Ujfalusi 			k3_ringacc_get_ring_id(irq_ring));
232325dcb5ddSPeter Ujfalusi 		ret = -EINVAL;
232425dcb5ddSPeter Ujfalusi 		goto err_psi_free;
232525dcb5ddSPeter Ujfalusi 	}
232625dcb5ddSPeter Ujfalusi 
232725dcb5ddSPeter Ujfalusi 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
232825dcb5ddSPeter Ujfalusi 			  IRQF_TRIGGER_HIGH, uc->name, uc);
232925dcb5ddSPeter Ujfalusi 	if (ret) {
233025dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
233125dcb5ddSPeter Ujfalusi 		goto err_irq_free;
233225dcb5ddSPeter Ujfalusi 	}
233325dcb5ddSPeter Ujfalusi 
233425dcb5ddSPeter Ujfalusi 	/* Event from UDMA (TR events) only needed for slave TR mode channels */
233525dcb5ddSPeter Ujfalusi 	if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
233689e0032eSThomas Gleixner 		uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
233725dcb5ddSPeter Ujfalusi 		if (uc->irq_num_udma <= 0) {
233825dcb5ddSPeter Ujfalusi 			dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
233925dcb5ddSPeter Ujfalusi 				irq_udma_idx);
234025dcb5ddSPeter Ujfalusi 			free_irq(uc->irq_num_ring, uc);
234125dcb5ddSPeter Ujfalusi 			ret = -EINVAL;
234225dcb5ddSPeter Ujfalusi 			goto err_irq_free;
234325dcb5ddSPeter Ujfalusi 		}
234425dcb5ddSPeter Ujfalusi 
234525dcb5ddSPeter Ujfalusi 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
234625dcb5ddSPeter Ujfalusi 				  uc->name, uc);
234725dcb5ddSPeter Ujfalusi 		if (ret) {
234825dcb5ddSPeter Ujfalusi 			dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
234925dcb5ddSPeter Ujfalusi 				uc->id);
235025dcb5ddSPeter Ujfalusi 			free_irq(uc->irq_num_ring, uc);
235125dcb5ddSPeter Ujfalusi 			goto err_irq_free;
235225dcb5ddSPeter Ujfalusi 		}
235325dcb5ddSPeter Ujfalusi 	} else {
235425dcb5ddSPeter Ujfalusi 		uc->irq_num_udma = 0;
235525dcb5ddSPeter Ujfalusi 	}
235625dcb5ddSPeter Ujfalusi 
235725dcb5ddSPeter Ujfalusi 	udma_reset_rings(uc);
235825dcb5ddSPeter Ujfalusi 
235925dcb5ddSPeter Ujfalusi 	return 0;
236025dcb5ddSPeter Ujfalusi 
236125dcb5ddSPeter Ujfalusi err_irq_free:
236225dcb5ddSPeter Ujfalusi 	uc->irq_num_ring = 0;
236325dcb5ddSPeter Ujfalusi 	uc->irq_num_udma = 0;
236425dcb5ddSPeter Ujfalusi err_psi_free:
236525dcb5ddSPeter Ujfalusi 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
236625dcb5ddSPeter Ujfalusi 	uc->psil_paired = false;
236725dcb5ddSPeter Ujfalusi err_res_free:
236825dcb5ddSPeter Ujfalusi 	udma_free_tx_resources(uc);
236925dcb5ddSPeter Ujfalusi 	udma_free_rx_resources(uc);
23705a9377ccSPeter Ujfalusi err_cleanup:
237125dcb5ddSPeter Ujfalusi 	udma_reset_uchan(uc);
237225dcb5ddSPeter Ujfalusi 
237325dcb5ddSPeter Ujfalusi 	if (uc->use_dma_pool) {
237425dcb5ddSPeter Ujfalusi 		dma_pool_destroy(uc->hdesc_pool);
237525dcb5ddSPeter Ujfalusi 		uc->use_dma_pool = false;
237625dcb5ddSPeter Ujfalusi 	}
237725dcb5ddSPeter Ujfalusi 
237825dcb5ddSPeter Ujfalusi 	return ret;
237925dcb5ddSPeter Ujfalusi }
238025dcb5ddSPeter Ujfalusi 
bcdma_alloc_chan_resources(struct dma_chan * chan)238101779473SPeter Ujfalusi static int bcdma_alloc_chan_resources(struct dma_chan *chan)
238201779473SPeter Ujfalusi {
238301779473SPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
238401779473SPeter Ujfalusi 	struct udma_dev *ud = to_udma_dev(chan->device);
238501779473SPeter Ujfalusi 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
238601779473SPeter Ujfalusi 	u32 irq_udma_idx, irq_ring_idx;
238701779473SPeter Ujfalusi 	int ret;
238801779473SPeter Ujfalusi 
238901779473SPeter Ujfalusi 	/* Only TR mode is supported */
239001779473SPeter Ujfalusi 	uc->config.pkt_mode = false;
239101779473SPeter Ujfalusi 
239201779473SPeter Ujfalusi 	/*
239301779473SPeter Ujfalusi 	 * Make sure that the completion is in a known state:
239401779473SPeter Ujfalusi 	 * No teardown, the channel is idle
239501779473SPeter Ujfalusi 	 */
239601779473SPeter Ujfalusi 	reinit_completion(&uc->teardown_completed);
239701779473SPeter Ujfalusi 	complete_all(&uc->teardown_completed);
239801779473SPeter Ujfalusi 	uc->state = UDMA_CHAN_IS_IDLE;
239901779473SPeter Ujfalusi 
240001779473SPeter Ujfalusi 	switch (uc->config.dir) {
240101779473SPeter Ujfalusi 	case DMA_MEM_TO_MEM:
240201779473SPeter Ujfalusi 		/* Non synchronized - mem to mem type of transfer */
240301779473SPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
240401779473SPeter Ujfalusi 			uc->id);
240501779473SPeter Ujfalusi 
240601779473SPeter Ujfalusi 		ret = bcdma_alloc_bchan_resources(uc);
240701779473SPeter Ujfalusi 		if (ret)
240801779473SPeter Ujfalusi 			return ret;
240901779473SPeter Ujfalusi 
241001779473SPeter Ujfalusi 		irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
241101779473SPeter Ujfalusi 		irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
241201779473SPeter Ujfalusi 
241301779473SPeter Ujfalusi 		ret = bcdma_tisci_m2m_channel_config(uc);
241401779473SPeter Ujfalusi 		break;
241501779473SPeter Ujfalusi 	case DMA_MEM_TO_DEV:
241601779473SPeter Ujfalusi 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
241701779473SPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
241801779473SPeter Ujfalusi 			uc->id);
241901779473SPeter Ujfalusi 
242001779473SPeter Ujfalusi 		ret = udma_alloc_tx_resources(uc);
242101779473SPeter Ujfalusi 		if (ret) {
242201779473SPeter Ujfalusi 			uc->config.remote_thread_id = -1;
242301779473SPeter Ujfalusi 			return ret;
242401779473SPeter Ujfalusi 		}
242501779473SPeter Ujfalusi 
242601779473SPeter Ujfalusi 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
242701779473SPeter Ujfalusi 		uc->config.dst_thread = uc->config.remote_thread_id;
242801779473SPeter Ujfalusi 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
242901779473SPeter Ujfalusi 
243001779473SPeter Ujfalusi 		irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
243101779473SPeter Ujfalusi 		irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
243201779473SPeter Ujfalusi 
243301779473SPeter Ujfalusi 		ret = bcdma_tisci_tx_channel_config(uc);
243401779473SPeter Ujfalusi 		break;
243501779473SPeter Ujfalusi 	case DMA_DEV_TO_MEM:
243601779473SPeter Ujfalusi 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
243701779473SPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
243801779473SPeter Ujfalusi 			uc->id);
243901779473SPeter Ujfalusi 
244001779473SPeter Ujfalusi 		ret = udma_alloc_rx_resources(uc);
244101779473SPeter Ujfalusi 		if (ret) {
244201779473SPeter Ujfalusi 			uc->config.remote_thread_id = -1;
244301779473SPeter Ujfalusi 			return ret;
244401779473SPeter Ujfalusi 		}
244501779473SPeter Ujfalusi 
244601779473SPeter Ujfalusi 		uc->config.src_thread = uc->config.remote_thread_id;
244701779473SPeter Ujfalusi 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
244801779473SPeter Ujfalusi 					K3_PSIL_DST_THREAD_ID_OFFSET;
244901779473SPeter Ujfalusi 
245001779473SPeter Ujfalusi 		irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
245101779473SPeter Ujfalusi 		irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
245201779473SPeter Ujfalusi 
245301779473SPeter Ujfalusi 		ret = bcdma_tisci_rx_channel_config(uc);
245401779473SPeter Ujfalusi 		break;
245501779473SPeter Ujfalusi 	default:
245601779473SPeter Ujfalusi 		/* Can not happen */
245701779473SPeter Ujfalusi 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
245801779473SPeter Ujfalusi 			__func__, uc->id, uc->config.dir);
245901779473SPeter Ujfalusi 		return -EINVAL;
246001779473SPeter Ujfalusi 	}
246101779473SPeter Ujfalusi 
246201779473SPeter Ujfalusi 	/* check if the channel configuration was successful */
246301779473SPeter Ujfalusi 	if (ret)
246401779473SPeter Ujfalusi 		goto err_res_free;
246501779473SPeter Ujfalusi 
246601779473SPeter Ujfalusi 	if (udma_is_chan_running(uc)) {
246701779473SPeter Ujfalusi 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
246801779473SPeter Ujfalusi 		udma_reset_chan(uc, false);
246901779473SPeter Ujfalusi 		if (udma_is_chan_running(uc)) {
247001779473SPeter Ujfalusi 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
247101779473SPeter Ujfalusi 			ret = -EBUSY;
247201779473SPeter Ujfalusi 			goto err_res_free;
247301779473SPeter Ujfalusi 		}
247401779473SPeter Ujfalusi 	}
247501779473SPeter Ujfalusi 
247601779473SPeter Ujfalusi 	uc->dma_dev = dmaengine_get_dma_device(chan);
247701779473SPeter Ujfalusi 	if (uc->config.dir == DMA_MEM_TO_MEM  && !uc->config.tr_trigger_type) {
247801779473SPeter Ujfalusi 		uc->config.hdesc_size = cppi5_trdesc_calc_size(
247901779473SPeter Ujfalusi 					sizeof(struct cppi5_tr_type15_t), 2);
248001779473SPeter Ujfalusi 
248101779473SPeter Ujfalusi 		uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
248201779473SPeter Ujfalusi 						 uc->config.hdesc_size,
248301779473SPeter Ujfalusi 						 ud->desc_align,
248401779473SPeter Ujfalusi 						 0);
248501779473SPeter Ujfalusi 		if (!uc->hdesc_pool) {
248601779473SPeter Ujfalusi 			dev_err(ud->ddev.dev,
248701779473SPeter Ujfalusi 				"Descriptor pool allocation failed\n");
248801779473SPeter Ujfalusi 			uc->use_dma_pool = false;
2489fed1b6a0SChristophe JAILLET 			ret = -ENOMEM;
2490fed1b6a0SChristophe JAILLET 			goto err_res_free;
249101779473SPeter Ujfalusi 		}
249201779473SPeter Ujfalusi 
249301779473SPeter Ujfalusi 		uc->use_dma_pool = true;
249401779473SPeter Ujfalusi 	} else if (uc->config.dir != DMA_MEM_TO_MEM) {
249501779473SPeter Ujfalusi 		/* PSI-L pairing */
249601779473SPeter Ujfalusi 		ret = navss_psil_pair(ud, uc->config.src_thread,
249701779473SPeter Ujfalusi 				      uc->config.dst_thread);
249801779473SPeter Ujfalusi 		if (ret) {
249901779473SPeter Ujfalusi 			dev_err(ud->dev,
250001779473SPeter Ujfalusi 				"PSI-L pairing failed: 0x%04x -> 0x%04x\n",
250101779473SPeter Ujfalusi 				uc->config.src_thread, uc->config.dst_thread);
250201779473SPeter Ujfalusi 			goto err_res_free;
250301779473SPeter Ujfalusi 		}
250401779473SPeter Ujfalusi 
250501779473SPeter Ujfalusi 		uc->psil_paired = true;
250601779473SPeter Ujfalusi 	}
250701779473SPeter Ujfalusi 
250889e0032eSThomas Gleixner 	uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
250901779473SPeter Ujfalusi 	if (uc->irq_num_ring <= 0) {
251001779473SPeter Ujfalusi 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
251101779473SPeter Ujfalusi 			irq_ring_idx);
251201779473SPeter Ujfalusi 		ret = -EINVAL;
251301779473SPeter Ujfalusi 		goto err_psi_free;
251401779473SPeter Ujfalusi 	}
251501779473SPeter Ujfalusi 
251601779473SPeter Ujfalusi 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
251701779473SPeter Ujfalusi 			  IRQF_TRIGGER_HIGH, uc->name, uc);
251801779473SPeter Ujfalusi 	if (ret) {
251901779473SPeter Ujfalusi 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
252001779473SPeter Ujfalusi 		goto err_irq_free;
252101779473SPeter Ujfalusi 	}
252201779473SPeter Ujfalusi 
252301779473SPeter Ujfalusi 	/* Event from BCDMA (TR events) only needed for slave channels */
252401779473SPeter Ujfalusi 	if (is_slave_direction(uc->config.dir)) {
252589e0032eSThomas Gleixner 		uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
252601779473SPeter Ujfalusi 		if (uc->irq_num_udma <= 0) {
252701779473SPeter Ujfalusi 			dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
252801779473SPeter Ujfalusi 				irq_udma_idx);
252901779473SPeter Ujfalusi 			free_irq(uc->irq_num_ring, uc);
253001779473SPeter Ujfalusi 			ret = -EINVAL;
253101779473SPeter Ujfalusi 			goto err_irq_free;
253201779473SPeter Ujfalusi 		}
253301779473SPeter Ujfalusi 
253401779473SPeter Ujfalusi 		ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
253501779473SPeter Ujfalusi 				  uc->name, uc);
253601779473SPeter Ujfalusi 		if (ret) {
253701779473SPeter Ujfalusi 			dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
253801779473SPeter Ujfalusi 				uc->id);
253901779473SPeter Ujfalusi 			free_irq(uc->irq_num_ring, uc);
254001779473SPeter Ujfalusi 			goto err_irq_free;
254101779473SPeter Ujfalusi 		}
254201779473SPeter Ujfalusi 	} else {
254301779473SPeter Ujfalusi 		uc->irq_num_udma = 0;
254401779473SPeter Ujfalusi 	}
254501779473SPeter Ujfalusi 
254601779473SPeter Ujfalusi 	udma_reset_rings(uc);
254701779473SPeter Ujfalusi 
254801779473SPeter Ujfalusi 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
254901779473SPeter Ujfalusi 				  udma_check_tx_completion);
255001779473SPeter Ujfalusi 	return 0;
255101779473SPeter Ujfalusi 
255201779473SPeter Ujfalusi err_irq_free:
255301779473SPeter Ujfalusi 	uc->irq_num_ring = 0;
255401779473SPeter Ujfalusi 	uc->irq_num_udma = 0;
255501779473SPeter Ujfalusi err_psi_free:
255601779473SPeter Ujfalusi 	if (uc->psil_paired)
255701779473SPeter Ujfalusi 		navss_psil_unpair(ud, uc->config.src_thread,
255801779473SPeter Ujfalusi 				  uc->config.dst_thread);
255901779473SPeter Ujfalusi 	uc->psil_paired = false;
256001779473SPeter Ujfalusi err_res_free:
256101779473SPeter Ujfalusi 	bcdma_free_bchan_resources(uc);
256201779473SPeter Ujfalusi 	udma_free_tx_resources(uc);
256301779473SPeter Ujfalusi 	udma_free_rx_resources(uc);
256401779473SPeter Ujfalusi 
256501779473SPeter Ujfalusi 	udma_reset_uchan(uc);
256601779473SPeter Ujfalusi 
256701779473SPeter Ujfalusi 	if (uc->use_dma_pool) {
256801779473SPeter Ujfalusi 		dma_pool_destroy(uc->hdesc_pool);
256901779473SPeter Ujfalusi 		uc->use_dma_pool = false;
257001779473SPeter Ujfalusi 	}
257101779473SPeter Ujfalusi 
257201779473SPeter Ujfalusi 	return ret;
257301779473SPeter Ujfalusi }
257401779473SPeter Ujfalusi 
bcdma_router_config(struct dma_chan * chan)257501779473SPeter Ujfalusi static int bcdma_router_config(struct dma_chan *chan)
257601779473SPeter Ujfalusi {
257701779473SPeter Ujfalusi 	struct k3_event_route_data *router_data = chan->route_data;
257801779473SPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
257901779473SPeter Ujfalusi 	u32 trigger_event;
258001779473SPeter Ujfalusi 
258101779473SPeter Ujfalusi 	if (!uc->bchan)
258201779473SPeter Ujfalusi 		return -EINVAL;
258301779473SPeter Ujfalusi 
258401779473SPeter Ujfalusi 	if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
258501779473SPeter Ujfalusi 		return -EINVAL;
258601779473SPeter Ujfalusi 
258701779473SPeter Ujfalusi 	trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
258801779473SPeter Ujfalusi 	trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
258901779473SPeter Ujfalusi 
259001779473SPeter Ujfalusi 	return router_data->set_event(router_data->priv, trigger_event);
259101779473SPeter Ujfalusi }
259201779473SPeter Ujfalusi 
pktdma_alloc_chan_resources(struct dma_chan * chan)2593d2abc982SPeter Ujfalusi static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2594d2abc982SPeter Ujfalusi {
2595d2abc982SPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
2596d2abc982SPeter Ujfalusi 	struct udma_dev *ud = to_udma_dev(chan->device);
2597d2abc982SPeter Ujfalusi 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2598d2abc982SPeter Ujfalusi 	u32 irq_ring_idx;
2599d2abc982SPeter Ujfalusi 	int ret;
2600d2abc982SPeter Ujfalusi 
2601d2abc982SPeter Ujfalusi 	/*
2602d2abc982SPeter Ujfalusi 	 * Make sure that the completion is in a known state:
2603d2abc982SPeter Ujfalusi 	 * No teardown, the channel is idle
2604d2abc982SPeter Ujfalusi 	 */
2605d2abc982SPeter Ujfalusi 	reinit_completion(&uc->teardown_completed);
2606d2abc982SPeter Ujfalusi 	complete_all(&uc->teardown_completed);
2607d2abc982SPeter Ujfalusi 	uc->state = UDMA_CHAN_IS_IDLE;
2608d2abc982SPeter Ujfalusi 
2609d2abc982SPeter Ujfalusi 	switch (uc->config.dir) {
2610d2abc982SPeter Ujfalusi 	case DMA_MEM_TO_DEV:
2611d2abc982SPeter Ujfalusi 		/* Slave transfer synchronized - mem to dev (TX) trasnfer */
2612d2abc982SPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2613d2abc982SPeter Ujfalusi 			uc->id);
2614d2abc982SPeter Ujfalusi 
2615d2abc982SPeter Ujfalusi 		ret = udma_alloc_tx_resources(uc);
2616d2abc982SPeter Ujfalusi 		if (ret) {
2617d2abc982SPeter Ujfalusi 			uc->config.remote_thread_id = -1;
2618d2abc982SPeter Ujfalusi 			return ret;
2619d2abc982SPeter Ujfalusi 		}
2620d2abc982SPeter Ujfalusi 
2621d2abc982SPeter Ujfalusi 		uc->config.src_thread = ud->psil_base + uc->tchan->id;
2622d2abc982SPeter Ujfalusi 		uc->config.dst_thread = uc->config.remote_thread_id;
2623d2abc982SPeter Ujfalusi 		uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2624d2abc982SPeter Ujfalusi 
2625d2abc982SPeter Ujfalusi 		irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2626d2abc982SPeter Ujfalusi 
2627d2abc982SPeter Ujfalusi 		ret = pktdma_tisci_tx_channel_config(uc);
2628d2abc982SPeter Ujfalusi 		break;
2629d2abc982SPeter Ujfalusi 	case DMA_DEV_TO_MEM:
2630d2abc982SPeter Ujfalusi 		/* Slave transfer synchronized - dev to mem (RX) trasnfer */
2631d2abc982SPeter Ujfalusi 		dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2632d2abc982SPeter Ujfalusi 			uc->id);
2633d2abc982SPeter Ujfalusi 
2634d2abc982SPeter Ujfalusi 		ret = udma_alloc_rx_resources(uc);
2635d2abc982SPeter Ujfalusi 		if (ret) {
2636d2abc982SPeter Ujfalusi 			uc->config.remote_thread_id = -1;
2637d2abc982SPeter Ujfalusi 			return ret;
2638d2abc982SPeter Ujfalusi 		}
2639d2abc982SPeter Ujfalusi 
2640d2abc982SPeter Ujfalusi 		uc->config.src_thread = uc->config.remote_thread_id;
2641d2abc982SPeter Ujfalusi 		uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2642d2abc982SPeter Ujfalusi 					K3_PSIL_DST_THREAD_ID_OFFSET;
2643d2abc982SPeter Ujfalusi 
2644d2abc982SPeter Ujfalusi 		irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2645d2abc982SPeter Ujfalusi 
2646d2abc982SPeter Ujfalusi 		ret = pktdma_tisci_rx_channel_config(uc);
2647d2abc982SPeter Ujfalusi 		break;
2648d2abc982SPeter Ujfalusi 	default:
2649d2abc982SPeter Ujfalusi 		/* Can not happen */
2650d2abc982SPeter Ujfalusi 		dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2651d2abc982SPeter Ujfalusi 			__func__, uc->id, uc->config.dir);
2652d2abc982SPeter Ujfalusi 		return -EINVAL;
2653d2abc982SPeter Ujfalusi 	}
2654d2abc982SPeter Ujfalusi 
2655d2abc982SPeter Ujfalusi 	/* check if the channel configuration was successful */
2656d2abc982SPeter Ujfalusi 	if (ret)
2657d2abc982SPeter Ujfalusi 		goto err_res_free;
2658d2abc982SPeter Ujfalusi 
2659d2abc982SPeter Ujfalusi 	if (udma_is_chan_running(uc)) {
2660d2abc982SPeter Ujfalusi 		dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2661d2abc982SPeter Ujfalusi 		udma_reset_chan(uc, false);
2662d2abc982SPeter Ujfalusi 		if (udma_is_chan_running(uc)) {
2663d2abc982SPeter Ujfalusi 			dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2664d2abc982SPeter Ujfalusi 			ret = -EBUSY;
2665d2abc982SPeter Ujfalusi 			goto err_res_free;
2666d2abc982SPeter Ujfalusi 		}
2667d2abc982SPeter Ujfalusi 	}
2668d2abc982SPeter Ujfalusi 
2669d2abc982SPeter Ujfalusi 	uc->dma_dev = dmaengine_get_dma_device(chan);
2670d2abc982SPeter Ujfalusi 	uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2671d2abc982SPeter Ujfalusi 					 uc->config.hdesc_size, ud->desc_align,
2672d2abc982SPeter Ujfalusi 					 0);
2673d2abc982SPeter Ujfalusi 	if (!uc->hdesc_pool) {
2674d2abc982SPeter Ujfalusi 		dev_err(ud->ddev.dev,
2675d2abc982SPeter Ujfalusi 			"Descriptor pool allocation failed\n");
2676d2abc982SPeter Ujfalusi 		uc->use_dma_pool = false;
2677d2abc982SPeter Ujfalusi 		ret = -ENOMEM;
2678d2abc982SPeter Ujfalusi 		goto err_res_free;
2679d2abc982SPeter Ujfalusi 	}
2680d2abc982SPeter Ujfalusi 
2681d2abc982SPeter Ujfalusi 	uc->use_dma_pool = true;
2682d2abc982SPeter Ujfalusi 
2683d2abc982SPeter Ujfalusi 	/* PSI-L pairing */
2684d2abc982SPeter Ujfalusi 	ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2685d2abc982SPeter Ujfalusi 	if (ret) {
2686d2abc982SPeter Ujfalusi 		dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2687d2abc982SPeter Ujfalusi 			uc->config.src_thread, uc->config.dst_thread);
2688d2abc982SPeter Ujfalusi 		goto err_res_free;
2689d2abc982SPeter Ujfalusi 	}
2690d2abc982SPeter Ujfalusi 
2691d2abc982SPeter Ujfalusi 	uc->psil_paired = true;
2692d2abc982SPeter Ujfalusi 
269389e0032eSThomas Gleixner 	uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2694d2abc982SPeter Ujfalusi 	if (uc->irq_num_ring <= 0) {
2695d2abc982SPeter Ujfalusi 		dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2696d2abc982SPeter Ujfalusi 			irq_ring_idx);
2697d2abc982SPeter Ujfalusi 		ret = -EINVAL;
2698d2abc982SPeter Ujfalusi 		goto err_psi_free;
2699d2abc982SPeter Ujfalusi 	}
2700d2abc982SPeter Ujfalusi 
2701d2abc982SPeter Ujfalusi 	ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2702d2abc982SPeter Ujfalusi 			  IRQF_TRIGGER_HIGH, uc->name, uc);
2703d2abc982SPeter Ujfalusi 	if (ret) {
2704d2abc982SPeter Ujfalusi 		dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2705d2abc982SPeter Ujfalusi 		goto err_irq_free;
2706d2abc982SPeter Ujfalusi 	}
2707d2abc982SPeter Ujfalusi 
2708d2abc982SPeter Ujfalusi 	uc->irq_num_udma = 0;
2709d2abc982SPeter Ujfalusi 
2710d2abc982SPeter Ujfalusi 	udma_reset_rings(uc);
2711d2abc982SPeter Ujfalusi 
2712d2abc982SPeter Ujfalusi 	INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2713d2abc982SPeter Ujfalusi 				  udma_check_tx_completion);
2714d2abc982SPeter Ujfalusi 
2715d2abc982SPeter Ujfalusi 	if (uc->tchan)
2716d2abc982SPeter Ujfalusi 		dev_dbg(ud->dev,
2717d2abc982SPeter Ujfalusi 			"chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2718d2abc982SPeter Ujfalusi 			uc->id, uc->tchan->id, uc->tchan->tflow_id,
2719d2abc982SPeter Ujfalusi 			uc->config.remote_thread_id);
2720d2abc982SPeter Ujfalusi 	else if (uc->rchan)
2721d2abc982SPeter Ujfalusi 		dev_dbg(ud->dev,
2722d2abc982SPeter Ujfalusi 			"chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2723d2abc982SPeter Ujfalusi 			uc->id, uc->rchan->id, uc->rflow->id,
2724d2abc982SPeter Ujfalusi 			uc->config.remote_thread_id);
2725d2abc982SPeter Ujfalusi 	return 0;
2726d2abc982SPeter Ujfalusi 
2727d2abc982SPeter Ujfalusi err_irq_free:
2728d2abc982SPeter Ujfalusi 	uc->irq_num_ring = 0;
2729d2abc982SPeter Ujfalusi err_psi_free:
2730d2abc982SPeter Ujfalusi 	navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2731d2abc982SPeter Ujfalusi 	uc->psil_paired = false;
2732d2abc982SPeter Ujfalusi err_res_free:
2733d2abc982SPeter Ujfalusi 	udma_free_tx_resources(uc);
2734d2abc982SPeter Ujfalusi 	udma_free_rx_resources(uc);
2735d2abc982SPeter Ujfalusi 
2736d2abc982SPeter Ujfalusi 	udma_reset_uchan(uc);
2737d2abc982SPeter Ujfalusi 
2738d2abc982SPeter Ujfalusi 	dma_pool_destroy(uc->hdesc_pool);
2739d2abc982SPeter Ujfalusi 	uc->use_dma_pool = false;
2740d2abc982SPeter Ujfalusi 
2741d2abc982SPeter Ujfalusi 	return ret;
2742d2abc982SPeter Ujfalusi }
2743d2abc982SPeter Ujfalusi 
udma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)274425dcb5ddSPeter Ujfalusi static int udma_slave_config(struct dma_chan *chan,
274525dcb5ddSPeter Ujfalusi 			     struct dma_slave_config *cfg)
274625dcb5ddSPeter Ujfalusi {
274725dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
274825dcb5ddSPeter Ujfalusi 
274925dcb5ddSPeter Ujfalusi 	memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
275025dcb5ddSPeter Ujfalusi 
275125dcb5ddSPeter Ujfalusi 	return 0;
275225dcb5ddSPeter Ujfalusi }
275325dcb5ddSPeter Ujfalusi 
udma_alloc_tr_desc(struct udma_chan * uc,size_t tr_size,int tr_count,enum dma_transfer_direction dir)275425dcb5ddSPeter Ujfalusi static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
275525dcb5ddSPeter Ujfalusi 					    size_t tr_size, int tr_count,
275625dcb5ddSPeter Ujfalusi 					    enum dma_transfer_direction dir)
275725dcb5ddSPeter Ujfalusi {
275825dcb5ddSPeter Ujfalusi 	struct udma_hwdesc *hwdesc;
275925dcb5ddSPeter Ujfalusi 	struct cppi5_desc_hdr_t *tr_desc;
276025dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
276125dcb5ddSPeter Ujfalusi 	u32 reload_count = 0;
276225dcb5ddSPeter Ujfalusi 	u32 ring_id;
276325dcb5ddSPeter Ujfalusi 
276425dcb5ddSPeter Ujfalusi 	switch (tr_size) {
276525dcb5ddSPeter Ujfalusi 	case 16:
276625dcb5ddSPeter Ujfalusi 	case 32:
276725dcb5ddSPeter Ujfalusi 	case 64:
276825dcb5ddSPeter Ujfalusi 	case 128:
276925dcb5ddSPeter Ujfalusi 		break;
277025dcb5ddSPeter Ujfalusi 	default:
277125dcb5ddSPeter Ujfalusi 		dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
277225dcb5ddSPeter Ujfalusi 		return NULL;
277325dcb5ddSPeter Ujfalusi 	}
277425dcb5ddSPeter Ujfalusi 
277525dcb5ddSPeter Ujfalusi 	/* We have only one descriptor containing multiple TRs */
277625dcb5ddSPeter Ujfalusi 	d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
277725dcb5ddSPeter Ujfalusi 	if (!d)
277825dcb5ddSPeter Ujfalusi 		return NULL;
277925dcb5ddSPeter Ujfalusi 
278025dcb5ddSPeter Ujfalusi 	d->sglen = tr_count;
278125dcb5ddSPeter Ujfalusi 
278225dcb5ddSPeter Ujfalusi 	d->hwdesc_count = 1;
278325dcb5ddSPeter Ujfalusi 	hwdesc = &d->hwdesc[0];
278425dcb5ddSPeter Ujfalusi 
278525dcb5ddSPeter Ujfalusi 	/* Allocate memory for DMA ring descriptor */
278625dcb5ddSPeter Ujfalusi 	if (uc->use_dma_pool) {
278725dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
278825dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
278925dcb5ddSPeter Ujfalusi 						GFP_NOWAIT,
279025dcb5ddSPeter Ujfalusi 						&hwdesc->cppi5_desc_paddr);
279125dcb5ddSPeter Ujfalusi 	} else {
279225dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
279325dcb5ddSPeter Ujfalusi 								 tr_count);
279425dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
279525dcb5ddSPeter Ujfalusi 						uc->ud->desc_align);
279625dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
279725dcb5ddSPeter Ujfalusi 						hwdesc->cppi5_desc_size,
279825dcb5ddSPeter Ujfalusi 						&hwdesc->cppi5_desc_paddr,
279925dcb5ddSPeter Ujfalusi 						GFP_NOWAIT);
280025dcb5ddSPeter Ujfalusi 	}
280125dcb5ddSPeter Ujfalusi 
280225dcb5ddSPeter Ujfalusi 	if (!hwdesc->cppi5_desc_vaddr) {
280325dcb5ddSPeter Ujfalusi 		kfree(d);
280425dcb5ddSPeter Ujfalusi 		return NULL;
280525dcb5ddSPeter Ujfalusi 	}
280625dcb5ddSPeter Ujfalusi 
280725dcb5ddSPeter Ujfalusi 	/* Start of the TR req records */
280825dcb5ddSPeter Ujfalusi 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
280925dcb5ddSPeter Ujfalusi 	/* Start address of the TR response array */
281025dcb5ddSPeter Ujfalusi 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
281125dcb5ddSPeter Ujfalusi 
281225dcb5ddSPeter Ujfalusi 	tr_desc = hwdesc->cppi5_desc_vaddr;
281325dcb5ddSPeter Ujfalusi 
281425dcb5ddSPeter Ujfalusi 	if (uc->cyclic)
281525dcb5ddSPeter Ujfalusi 		reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
281625dcb5ddSPeter Ujfalusi 
281725dcb5ddSPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM)
281825dcb5ddSPeter Ujfalusi 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
281925dcb5ddSPeter Ujfalusi 	else
282025dcb5ddSPeter Ujfalusi 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
282125dcb5ddSPeter Ujfalusi 
282225dcb5ddSPeter Ujfalusi 	cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
282325dcb5ddSPeter Ujfalusi 	cppi5_desc_set_pktids(tr_desc, uc->id,
282425dcb5ddSPeter Ujfalusi 			      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
282525dcb5ddSPeter Ujfalusi 	cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
282625dcb5ddSPeter Ujfalusi 
282725dcb5ddSPeter Ujfalusi 	return d;
282825dcb5ddSPeter Ujfalusi }
282925dcb5ddSPeter Ujfalusi 
2830a9793407SPeter Ujfalusi /**
2831a9793407SPeter Ujfalusi  * udma_get_tr_counters - calculate TR counters for a given length
2832a9793407SPeter Ujfalusi  * @len: Length of the trasnfer
2833a9793407SPeter Ujfalusi  * @align_to: Preferred alignment
2834a9793407SPeter Ujfalusi  * @tr0_cnt0: First TR icnt0
2835a9793407SPeter Ujfalusi  * @tr0_cnt1: First TR icnt1
2836a9793407SPeter Ujfalusi  * @tr1_cnt0: Second (if used) TR icnt0
2837a9793407SPeter Ujfalusi  *
2838a9793407SPeter Ujfalusi  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2839a9793407SPeter Ujfalusi  * For len >= SZ_64K two TRs are used in a simple way:
2840a9793407SPeter Ujfalusi  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2841a9793407SPeter Ujfalusi  * Second TR: the remaining length (tr1_cnt0)
2842a9793407SPeter Ujfalusi  *
2843a9793407SPeter Ujfalusi  * Returns the number of TRs the length needs (1 or 2)
2844a9793407SPeter Ujfalusi  * -EINVAL if the length can not be supported
2845a9793407SPeter Ujfalusi  */
udma_get_tr_counters(size_t len,unsigned long align_to,u16 * tr0_cnt0,u16 * tr0_cnt1,u16 * tr1_cnt0)2846a9793407SPeter Ujfalusi static int udma_get_tr_counters(size_t len, unsigned long align_to,
2847a9793407SPeter Ujfalusi 				u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2848a9793407SPeter Ujfalusi {
2849a9793407SPeter Ujfalusi 	if (len < SZ_64K) {
2850a9793407SPeter Ujfalusi 		*tr0_cnt0 = len;
2851a9793407SPeter Ujfalusi 		*tr0_cnt1 = 1;
2852a9793407SPeter Ujfalusi 
2853a9793407SPeter Ujfalusi 		return 1;
2854a9793407SPeter Ujfalusi 	}
2855a9793407SPeter Ujfalusi 
2856a9793407SPeter Ujfalusi 	if (align_to > 3)
2857a9793407SPeter Ujfalusi 		align_to = 3;
2858a9793407SPeter Ujfalusi 
2859a9793407SPeter Ujfalusi realign:
2860a9793407SPeter Ujfalusi 	*tr0_cnt0 = SZ_64K - BIT(align_to);
2861a9793407SPeter Ujfalusi 	if (len / *tr0_cnt0 >= SZ_64K) {
2862a9793407SPeter Ujfalusi 		if (align_to) {
2863a9793407SPeter Ujfalusi 			align_to--;
2864a9793407SPeter Ujfalusi 			goto realign;
2865a9793407SPeter Ujfalusi 		}
2866a9793407SPeter Ujfalusi 		return -EINVAL;
2867a9793407SPeter Ujfalusi 	}
2868a9793407SPeter Ujfalusi 
2869a9793407SPeter Ujfalusi 	*tr0_cnt1 = len / *tr0_cnt0;
2870a9793407SPeter Ujfalusi 	*tr1_cnt0 = len % *tr0_cnt0;
2871a9793407SPeter Ujfalusi 
2872a9793407SPeter Ujfalusi 	return 2;
2873a9793407SPeter Ujfalusi }
2874a9793407SPeter Ujfalusi 
287525dcb5ddSPeter Ujfalusi static struct udma_desc *
udma_prep_slave_sg_tr(struct udma_chan * uc,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)287625dcb5ddSPeter Ujfalusi udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
287725dcb5ddSPeter Ujfalusi 		      unsigned int sglen, enum dma_transfer_direction dir,
287825dcb5ddSPeter Ujfalusi 		      unsigned long tx_flags, void *context)
287925dcb5ddSPeter Ujfalusi {
288025dcb5ddSPeter Ujfalusi 	struct scatterlist *sgent;
288125dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
288225dcb5ddSPeter Ujfalusi 	struct cppi5_tr_type1_t *tr_req = NULL;
28836cf668a4SPeter Ujfalusi 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
288425dcb5ddSPeter Ujfalusi 	unsigned int i;
28856cf668a4SPeter Ujfalusi 	size_t tr_size;
28866cf668a4SPeter Ujfalusi 	int num_tr = 0;
28876cf668a4SPeter Ujfalusi 	int tr_idx = 0;
288801779473SPeter Ujfalusi 	u64 asel;
288925dcb5ddSPeter Ujfalusi 
28906cf668a4SPeter Ujfalusi 	/* estimate the number of TRs we will need */
28916cf668a4SPeter Ujfalusi 	for_each_sg(sgl, sgent, sglen, i) {
28926cf668a4SPeter Ujfalusi 		if (sg_dma_len(sgent) < SZ_64K)
28936cf668a4SPeter Ujfalusi 			num_tr++;
28946cf668a4SPeter Ujfalusi 		else
28956cf668a4SPeter Ujfalusi 			num_tr += 2;
28966cf668a4SPeter Ujfalusi 	}
289725dcb5ddSPeter Ujfalusi 
289825dcb5ddSPeter Ujfalusi 	/* Now allocate and setup the descriptor. */
289925dcb5ddSPeter Ujfalusi 	tr_size = sizeof(struct cppi5_tr_type1_t);
29006cf668a4SPeter Ujfalusi 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
290125dcb5ddSPeter Ujfalusi 	if (!d)
290225dcb5ddSPeter Ujfalusi 		return NULL;
290325dcb5ddSPeter Ujfalusi 
290425dcb5ddSPeter Ujfalusi 	d->sglen = sglen;
290525dcb5ddSPeter Ujfalusi 
290601779473SPeter Ujfalusi 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
290701779473SPeter Ujfalusi 		asel = 0;
290801779473SPeter Ujfalusi 	else
290901779473SPeter Ujfalusi 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
291001779473SPeter Ujfalusi 
291125dcb5ddSPeter Ujfalusi 	tr_req = d->hwdesc[0].tr_req_base;
291225dcb5ddSPeter Ujfalusi 	for_each_sg(sgl, sgent, sglen, i) {
29136cf668a4SPeter Ujfalusi 		dma_addr_t sg_addr = sg_dma_address(sgent);
29146cf668a4SPeter Ujfalusi 
29156cf668a4SPeter Ujfalusi 		num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
29166cf668a4SPeter Ujfalusi 					      &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
29176cf668a4SPeter Ujfalusi 		if (num_tr < 0) {
29186cf668a4SPeter Ujfalusi 			dev_err(uc->ud->dev, "size %u is not supported\n",
29196cf668a4SPeter Ujfalusi 				sg_dma_len(sgent));
29206cf668a4SPeter Ujfalusi 			udma_free_hwdesc(uc, d);
29216cf668a4SPeter Ujfalusi 			kfree(d);
29226cf668a4SPeter Ujfalusi 			return NULL;
29236cf668a4SPeter Ujfalusi 		}
292425dcb5ddSPeter Ujfalusi 
292533ebffa1SPeter Ujfalusi 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
292633ebffa1SPeter Ujfalusi 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
292733ebffa1SPeter Ujfalusi 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
292825dcb5ddSPeter Ujfalusi 
292901779473SPeter Ujfalusi 		sg_addr |= asel;
29306cf668a4SPeter Ujfalusi 		tr_req[tr_idx].addr = sg_addr;
29316cf668a4SPeter Ujfalusi 		tr_req[tr_idx].icnt0 = tr0_cnt0;
29326cf668a4SPeter Ujfalusi 		tr_req[tr_idx].icnt1 = tr0_cnt1;
29336cf668a4SPeter Ujfalusi 		tr_req[tr_idx].dim1 = tr0_cnt0;
29346cf668a4SPeter Ujfalusi 		tr_idx++;
29356cf668a4SPeter Ujfalusi 
29366cf668a4SPeter Ujfalusi 		if (num_tr == 2) {
29376cf668a4SPeter Ujfalusi 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
29386cf668a4SPeter Ujfalusi 				      false, false,
29396cf668a4SPeter Ujfalusi 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
29406cf668a4SPeter Ujfalusi 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
29416cf668a4SPeter Ujfalusi 					 CPPI5_TR_CSF_SUPR_EVT);
29426cf668a4SPeter Ujfalusi 
29436cf668a4SPeter Ujfalusi 			tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
29446cf668a4SPeter Ujfalusi 			tr_req[tr_idx].icnt0 = tr1_cnt0;
29456cf668a4SPeter Ujfalusi 			tr_req[tr_idx].icnt1 = 1;
29466cf668a4SPeter Ujfalusi 			tr_req[tr_idx].dim1 = tr1_cnt0;
29476cf668a4SPeter Ujfalusi 			tr_idx++;
294825dcb5ddSPeter Ujfalusi 		}
294925dcb5ddSPeter Ujfalusi 
29506cf668a4SPeter Ujfalusi 		d->residue += sg_dma_len(sgent);
29516cf668a4SPeter Ujfalusi 	}
29526cf668a4SPeter Ujfalusi 
2953be4054b8SPeter Ujfalusi 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2954be4054b8SPeter Ujfalusi 			 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
295525dcb5ddSPeter Ujfalusi 
295625dcb5ddSPeter Ujfalusi 	return d;
295725dcb5ddSPeter Ujfalusi }
295825dcb5ddSPeter Ujfalusi 
295901779473SPeter Ujfalusi static struct udma_desc *
udma_prep_slave_sg_triggered_tr(struct udma_chan * uc,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)296001779473SPeter Ujfalusi udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
296101779473SPeter Ujfalusi 				unsigned int sglen,
296201779473SPeter Ujfalusi 				enum dma_transfer_direction dir,
296301779473SPeter Ujfalusi 				unsigned long tx_flags, void *context)
296401779473SPeter Ujfalusi {
296501779473SPeter Ujfalusi 	struct scatterlist *sgent;
296601779473SPeter Ujfalusi 	struct cppi5_tr_type15_t *tr_req = NULL;
296701779473SPeter Ujfalusi 	enum dma_slave_buswidth dev_width;
2968f806bea3SVignesh Raghavendra 	u32 csf = CPPI5_TR_CSF_SUPR_EVT;
296901779473SPeter Ujfalusi 	u16 tr_cnt0, tr_cnt1;
297001779473SPeter Ujfalusi 	dma_addr_t dev_addr;
297101779473SPeter Ujfalusi 	struct udma_desc *d;
297201779473SPeter Ujfalusi 	unsigned int i;
297301779473SPeter Ujfalusi 	size_t tr_size, sg_len;
297401779473SPeter Ujfalusi 	int num_tr = 0;
297501779473SPeter Ujfalusi 	int tr_idx = 0;
297601779473SPeter Ujfalusi 	u32 burst, trigger_size, port_window;
297701779473SPeter Ujfalusi 	u64 asel;
297801779473SPeter Ujfalusi 
297901779473SPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM) {
298001779473SPeter Ujfalusi 		dev_addr = uc->cfg.src_addr;
298101779473SPeter Ujfalusi 		dev_width = uc->cfg.src_addr_width;
298201779473SPeter Ujfalusi 		burst = uc->cfg.src_maxburst;
298301779473SPeter Ujfalusi 		port_window = uc->cfg.src_port_window_size;
298401779473SPeter Ujfalusi 	} else if (dir == DMA_MEM_TO_DEV) {
298501779473SPeter Ujfalusi 		dev_addr = uc->cfg.dst_addr;
298601779473SPeter Ujfalusi 		dev_width = uc->cfg.dst_addr_width;
298701779473SPeter Ujfalusi 		burst = uc->cfg.dst_maxburst;
298801779473SPeter Ujfalusi 		port_window = uc->cfg.dst_port_window_size;
298901779473SPeter Ujfalusi 	} else {
299001779473SPeter Ujfalusi 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
299101779473SPeter Ujfalusi 		return NULL;
299201779473SPeter Ujfalusi 	}
299301779473SPeter Ujfalusi 
299401779473SPeter Ujfalusi 	if (!burst)
299501779473SPeter Ujfalusi 		burst = 1;
299601779473SPeter Ujfalusi 
299701779473SPeter Ujfalusi 	if (port_window) {
299801779473SPeter Ujfalusi 		if (port_window != burst) {
299901779473SPeter Ujfalusi 			dev_err(uc->ud->dev,
300001779473SPeter Ujfalusi 				"The burst must be equal to port_window\n");
300101779473SPeter Ujfalusi 			return NULL;
300201779473SPeter Ujfalusi 		}
300301779473SPeter Ujfalusi 
300401779473SPeter Ujfalusi 		tr_cnt0 = dev_width * port_window;
300501779473SPeter Ujfalusi 		tr_cnt1 = 1;
300601779473SPeter Ujfalusi 	} else {
300701779473SPeter Ujfalusi 		tr_cnt0 = dev_width;
300801779473SPeter Ujfalusi 		tr_cnt1 = burst;
300901779473SPeter Ujfalusi 	}
301001779473SPeter Ujfalusi 	trigger_size = tr_cnt0 * tr_cnt1;
301101779473SPeter Ujfalusi 
301201779473SPeter Ujfalusi 	/* estimate the number of TRs we will need */
301301779473SPeter Ujfalusi 	for_each_sg(sgl, sgent, sglen, i) {
301401779473SPeter Ujfalusi 		sg_len = sg_dma_len(sgent);
301501779473SPeter Ujfalusi 
301601779473SPeter Ujfalusi 		if (sg_len % trigger_size) {
301701779473SPeter Ujfalusi 			dev_err(uc->ud->dev,
301801779473SPeter Ujfalusi 				"Not aligned SG entry (%zu for %u)\n", sg_len,
301901779473SPeter Ujfalusi 				trigger_size);
302001779473SPeter Ujfalusi 			return NULL;
302101779473SPeter Ujfalusi 		}
302201779473SPeter Ujfalusi 
302301779473SPeter Ujfalusi 		if (sg_len / trigger_size < SZ_64K)
302401779473SPeter Ujfalusi 			num_tr++;
302501779473SPeter Ujfalusi 		else
302601779473SPeter Ujfalusi 			num_tr += 2;
302701779473SPeter Ujfalusi 	}
302801779473SPeter Ujfalusi 
302901779473SPeter Ujfalusi 	/* Now allocate and setup the descriptor. */
303001779473SPeter Ujfalusi 	tr_size = sizeof(struct cppi5_tr_type15_t);
303101779473SPeter Ujfalusi 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
303201779473SPeter Ujfalusi 	if (!d)
303301779473SPeter Ujfalusi 		return NULL;
303401779473SPeter Ujfalusi 
303501779473SPeter Ujfalusi 	d->sglen = sglen;
303601779473SPeter Ujfalusi 
303701779473SPeter Ujfalusi 	if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
303801779473SPeter Ujfalusi 		asel = 0;
3039f806bea3SVignesh Raghavendra 		csf |= CPPI5_TR_CSF_EOL_ICNT0;
304001779473SPeter Ujfalusi 	} else {
304101779473SPeter Ujfalusi 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
304201779473SPeter Ujfalusi 		dev_addr |= asel;
304301779473SPeter Ujfalusi 	}
304401779473SPeter Ujfalusi 
304501779473SPeter Ujfalusi 	tr_req = d->hwdesc[0].tr_req_base;
304601779473SPeter Ujfalusi 	for_each_sg(sgl, sgent, sglen, i) {
304701779473SPeter Ujfalusi 		u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
304801779473SPeter Ujfalusi 		dma_addr_t sg_addr = sg_dma_address(sgent);
304901779473SPeter Ujfalusi 
305001779473SPeter Ujfalusi 		sg_len = sg_dma_len(sgent);
305101779473SPeter Ujfalusi 		num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
305201779473SPeter Ujfalusi 					      &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
305301779473SPeter Ujfalusi 		if (num_tr < 0) {
305401779473SPeter Ujfalusi 			dev_err(uc->ud->dev, "size %zu is not supported\n",
305501779473SPeter Ujfalusi 				sg_len);
305601779473SPeter Ujfalusi 			udma_free_hwdesc(uc, d);
305701779473SPeter Ujfalusi 			kfree(d);
305801779473SPeter Ujfalusi 			return NULL;
305901779473SPeter Ujfalusi 		}
306001779473SPeter Ujfalusi 
306101779473SPeter Ujfalusi 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
306201779473SPeter Ujfalusi 			      true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3063f806bea3SVignesh Raghavendra 		cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
306401779473SPeter Ujfalusi 		cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
306501779473SPeter Ujfalusi 				     uc->config.tr_trigger_type,
306601779473SPeter Ujfalusi 				     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
306701779473SPeter Ujfalusi 
306801779473SPeter Ujfalusi 		sg_addr |= asel;
306901779473SPeter Ujfalusi 		if (dir == DMA_DEV_TO_MEM) {
307001779473SPeter Ujfalusi 			tr_req[tr_idx].addr = dev_addr;
307101779473SPeter Ujfalusi 			tr_req[tr_idx].icnt0 = tr_cnt0;
307201779473SPeter Ujfalusi 			tr_req[tr_idx].icnt1 = tr_cnt1;
307301779473SPeter Ujfalusi 			tr_req[tr_idx].icnt2 = tr0_cnt2;
307401779473SPeter Ujfalusi 			tr_req[tr_idx].icnt3 = tr0_cnt3;
307501779473SPeter Ujfalusi 			tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
307601779473SPeter Ujfalusi 
307701779473SPeter Ujfalusi 			tr_req[tr_idx].daddr = sg_addr;
307801779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt0 = tr_cnt0;
307901779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt1 = tr_cnt1;
308001779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
308101779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
308201779473SPeter Ujfalusi 			tr_req[tr_idx].ddim1 = tr_cnt0;
308301779473SPeter Ujfalusi 			tr_req[tr_idx].ddim2 = trigger_size;
308401779473SPeter Ujfalusi 			tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
308501779473SPeter Ujfalusi 		} else {
308601779473SPeter Ujfalusi 			tr_req[tr_idx].addr = sg_addr;
308701779473SPeter Ujfalusi 			tr_req[tr_idx].icnt0 = tr_cnt0;
308801779473SPeter Ujfalusi 			tr_req[tr_idx].icnt1 = tr_cnt1;
308901779473SPeter Ujfalusi 			tr_req[tr_idx].icnt2 = tr0_cnt2;
309001779473SPeter Ujfalusi 			tr_req[tr_idx].icnt3 = tr0_cnt3;
309101779473SPeter Ujfalusi 			tr_req[tr_idx].dim1 = tr_cnt0;
309201779473SPeter Ujfalusi 			tr_req[tr_idx].dim2 = trigger_size;
309301779473SPeter Ujfalusi 			tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
309401779473SPeter Ujfalusi 
309501779473SPeter Ujfalusi 			tr_req[tr_idx].daddr = dev_addr;
309601779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt0 = tr_cnt0;
309701779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt1 = tr_cnt1;
309801779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt2 = tr0_cnt2;
309901779473SPeter Ujfalusi 			tr_req[tr_idx].dicnt3 = tr0_cnt3;
310001779473SPeter Ujfalusi 			tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
310101779473SPeter Ujfalusi 		}
310201779473SPeter Ujfalusi 
310301779473SPeter Ujfalusi 		tr_idx++;
310401779473SPeter Ujfalusi 
310501779473SPeter Ujfalusi 		if (num_tr == 2) {
310601779473SPeter Ujfalusi 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
310701779473SPeter Ujfalusi 				      false, true,
310801779473SPeter Ujfalusi 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3109f806bea3SVignesh Raghavendra 			cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
311001779473SPeter Ujfalusi 			cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
311101779473SPeter Ujfalusi 					     uc->config.tr_trigger_type,
311201779473SPeter Ujfalusi 					     CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
311301779473SPeter Ujfalusi 					     0, 0);
311401779473SPeter Ujfalusi 
311501779473SPeter Ujfalusi 			sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
311601779473SPeter Ujfalusi 			if (dir == DMA_DEV_TO_MEM) {
311701779473SPeter Ujfalusi 				tr_req[tr_idx].addr = dev_addr;
311801779473SPeter Ujfalusi 				tr_req[tr_idx].icnt0 = tr_cnt0;
311901779473SPeter Ujfalusi 				tr_req[tr_idx].icnt1 = tr_cnt1;
312001779473SPeter Ujfalusi 				tr_req[tr_idx].icnt2 = tr1_cnt2;
312101779473SPeter Ujfalusi 				tr_req[tr_idx].icnt3 = 1;
312201779473SPeter Ujfalusi 				tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
312301779473SPeter Ujfalusi 
312401779473SPeter Ujfalusi 				tr_req[tr_idx].daddr = sg_addr;
312501779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt0 = tr_cnt0;
312601779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt1 = tr_cnt1;
312701779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
312801779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt3 = 1;
312901779473SPeter Ujfalusi 				tr_req[tr_idx].ddim1 = tr_cnt0;
313001779473SPeter Ujfalusi 				tr_req[tr_idx].ddim2 = trigger_size;
313101779473SPeter Ujfalusi 			} else {
313201779473SPeter Ujfalusi 				tr_req[tr_idx].addr = sg_addr;
313301779473SPeter Ujfalusi 				tr_req[tr_idx].icnt0 = tr_cnt0;
313401779473SPeter Ujfalusi 				tr_req[tr_idx].icnt1 = tr_cnt1;
313501779473SPeter Ujfalusi 				tr_req[tr_idx].icnt2 = tr1_cnt2;
313601779473SPeter Ujfalusi 				tr_req[tr_idx].icnt3 = 1;
313701779473SPeter Ujfalusi 				tr_req[tr_idx].dim1 = tr_cnt0;
313801779473SPeter Ujfalusi 				tr_req[tr_idx].dim2 = trigger_size;
313901779473SPeter Ujfalusi 
314001779473SPeter Ujfalusi 				tr_req[tr_idx].daddr = dev_addr;
314101779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt0 = tr_cnt0;
314201779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt1 = tr_cnt1;
314301779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt2 = tr1_cnt2;
314401779473SPeter Ujfalusi 				tr_req[tr_idx].dicnt3 = 1;
314501779473SPeter Ujfalusi 				tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
314601779473SPeter Ujfalusi 			}
314701779473SPeter Ujfalusi 			tr_idx++;
314801779473SPeter Ujfalusi 		}
314901779473SPeter Ujfalusi 
315001779473SPeter Ujfalusi 		d->residue += sg_len;
315101779473SPeter Ujfalusi 	}
315201779473SPeter Ujfalusi 
3153f806bea3SVignesh Raghavendra 	cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP);
315401779473SPeter Ujfalusi 
315501779473SPeter Ujfalusi 	return d;
315601779473SPeter Ujfalusi }
315701779473SPeter Ujfalusi 
udma_configure_statictr(struct udma_chan * uc,struct udma_desc * d,enum dma_slave_buswidth dev_width,u16 elcnt)315825dcb5ddSPeter Ujfalusi static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
315925dcb5ddSPeter Ujfalusi 				   enum dma_slave_buswidth dev_width,
316025dcb5ddSPeter Ujfalusi 				   u16 elcnt)
316125dcb5ddSPeter Ujfalusi {
316225dcb5ddSPeter Ujfalusi 	if (uc->config.ep_type != PSIL_EP_PDMA_XY)
316325dcb5ddSPeter Ujfalusi 		return 0;
316425dcb5ddSPeter Ujfalusi 
316525dcb5ddSPeter Ujfalusi 	/* Bus width translates to the element size (ES) */
316625dcb5ddSPeter Ujfalusi 	switch (dev_width) {
316725dcb5ddSPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
316825dcb5ddSPeter Ujfalusi 		d->static_tr.elsize = 0;
316925dcb5ddSPeter Ujfalusi 		break;
317025dcb5ddSPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
317125dcb5ddSPeter Ujfalusi 		d->static_tr.elsize = 1;
317225dcb5ddSPeter Ujfalusi 		break;
317325dcb5ddSPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_3_BYTES:
317425dcb5ddSPeter Ujfalusi 		d->static_tr.elsize = 2;
317525dcb5ddSPeter Ujfalusi 		break;
317625dcb5ddSPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
317725dcb5ddSPeter Ujfalusi 		d->static_tr.elsize = 3;
317825dcb5ddSPeter Ujfalusi 		break;
317925dcb5ddSPeter Ujfalusi 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
318025dcb5ddSPeter Ujfalusi 		d->static_tr.elsize = 4;
318125dcb5ddSPeter Ujfalusi 		break;
318225dcb5ddSPeter Ujfalusi 	default: /* not reached */
318325dcb5ddSPeter Ujfalusi 		return -EINVAL;
318425dcb5ddSPeter Ujfalusi 	}
318525dcb5ddSPeter Ujfalusi 
318625dcb5ddSPeter Ujfalusi 	d->static_tr.elcnt = elcnt;
318725dcb5ddSPeter Ujfalusi 
318825dcb5ddSPeter Ujfalusi 	/*
318925dcb5ddSPeter Ujfalusi 	 * PDMA must to close the packet when the channel is in packet mode.
319025dcb5ddSPeter Ujfalusi 	 * For TR mode when the channel is not cyclic we also need PDMA to close
319125dcb5ddSPeter Ujfalusi 	 * the packet otherwise the transfer will stall because PDMA holds on
319225dcb5ddSPeter Ujfalusi 	 * the data it has received from the peripheral.
319325dcb5ddSPeter Ujfalusi 	 */
319425dcb5ddSPeter Ujfalusi 	if (uc->config.pkt_mode || !uc->cyclic) {
319525dcb5ddSPeter Ujfalusi 		unsigned int div = dev_width * elcnt;
319625dcb5ddSPeter Ujfalusi 
319725dcb5ddSPeter Ujfalusi 		if (uc->cyclic)
319825dcb5ddSPeter Ujfalusi 			d->static_tr.bstcnt = d->residue / d->sglen / div;
319925dcb5ddSPeter Ujfalusi 		else
320025dcb5ddSPeter Ujfalusi 			d->static_tr.bstcnt = d->residue / div;
320125dcb5ddSPeter Ujfalusi 
320225dcb5ddSPeter Ujfalusi 		if (uc->config.dir == DMA_DEV_TO_MEM &&
320325dcb5ddSPeter Ujfalusi 		    d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
320425dcb5ddSPeter Ujfalusi 			return -EINVAL;
320525dcb5ddSPeter Ujfalusi 	} else {
320625dcb5ddSPeter Ujfalusi 		d->static_tr.bstcnt = 0;
320725dcb5ddSPeter Ujfalusi 	}
320825dcb5ddSPeter Ujfalusi 
320925dcb5ddSPeter Ujfalusi 	return 0;
321025dcb5ddSPeter Ujfalusi }
321125dcb5ddSPeter Ujfalusi 
321225dcb5ddSPeter Ujfalusi static struct udma_desc *
udma_prep_slave_sg_pkt(struct udma_chan * uc,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)321325dcb5ddSPeter Ujfalusi udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
321425dcb5ddSPeter Ujfalusi 		       unsigned int sglen, enum dma_transfer_direction dir,
321525dcb5ddSPeter Ujfalusi 		       unsigned long tx_flags, void *context)
321625dcb5ddSPeter Ujfalusi {
321725dcb5ddSPeter Ujfalusi 	struct scatterlist *sgent;
321825dcb5ddSPeter Ujfalusi 	struct cppi5_host_desc_t *h_desc = NULL;
321925dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
322025dcb5ddSPeter Ujfalusi 	u32 ring_id;
322125dcb5ddSPeter Ujfalusi 	unsigned int i;
3222d2abc982SPeter Ujfalusi 	u64 asel;
322325dcb5ddSPeter Ujfalusi 
3224ace52a8cSGustavo A. R. Silva 	d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
322525dcb5ddSPeter Ujfalusi 	if (!d)
322625dcb5ddSPeter Ujfalusi 		return NULL;
322725dcb5ddSPeter Ujfalusi 
322825dcb5ddSPeter Ujfalusi 	d->sglen = sglen;
322925dcb5ddSPeter Ujfalusi 	d->hwdesc_count = sglen;
323025dcb5ddSPeter Ujfalusi 
323125dcb5ddSPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM)
323225dcb5ddSPeter Ujfalusi 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
323325dcb5ddSPeter Ujfalusi 	else
323425dcb5ddSPeter Ujfalusi 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
323525dcb5ddSPeter Ujfalusi 
3236d2abc982SPeter Ujfalusi 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3237d2abc982SPeter Ujfalusi 		asel = 0;
3238d2abc982SPeter Ujfalusi 	else
3239d2abc982SPeter Ujfalusi 		asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3240d2abc982SPeter Ujfalusi 
324125dcb5ddSPeter Ujfalusi 	for_each_sg(sgl, sgent, sglen, i) {
324225dcb5ddSPeter Ujfalusi 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
324325dcb5ddSPeter Ujfalusi 		dma_addr_t sg_addr = sg_dma_address(sgent);
324425dcb5ddSPeter Ujfalusi 		struct cppi5_host_desc_t *desc;
324525dcb5ddSPeter Ujfalusi 		size_t sg_len = sg_dma_len(sgent);
324625dcb5ddSPeter Ujfalusi 
324725dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
324825dcb5ddSPeter Ujfalusi 						GFP_NOWAIT,
324925dcb5ddSPeter Ujfalusi 						&hwdesc->cppi5_desc_paddr);
325025dcb5ddSPeter Ujfalusi 		if (!hwdesc->cppi5_desc_vaddr) {
325125dcb5ddSPeter Ujfalusi 			dev_err(uc->ud->dev,
325225dcb5ddSPeter Ujfalusi 				"descriptor%d allocation failed\n", i);
325325dcb5ddSPeter Ujfalusi 
325425dcb5ddSPeter Ujfalusi 			udma_free_hwdesc(uc, d);
325525dcb5ddSPeter Ujfalusi 			kfree(d);
325625dcb5ddSPeter Ujfalusi 			return NULL;
325725dcb5ddSPeter Ujfalusi 		}
325825dcb5ddSPeter Ujfalusi 
325925dcb5ddSPeter Ujfalusi 		d->residue += sg_len;
326025dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
326125dcb5ddSPeter Ujfalusi 		desc = hwdesc->cppi5_desc_vaddr;
326225dcb5ddSPeter Ujfalusi 
326325dcb5ddSPeter Ujfalusi 		if (i == 0) {
326425dcb5ddSPeter Ujfalusi 			cppi5_hdesc_init(desc, 0, 0);
326525dcb5ddSPeter Ujfalusi 			/* Flow and Packed ID */
326625dcb5ddSPeter Ujfalusi 			cppi5_desc_set_pktids(&desc->hdr, uc->id,
326725dcb5ddSPeter Ujfalusi 					      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
326825dcb5ddSPeter Ujfalusi 			cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
326925dcb5ddSPeter Ujfalusi 		} else {
327025dcb5ddSPeter Ujfalusi 			cppi5_hdesc_reset_hbdesc(desc);
327125dcb5ddSPeter Ujfalusi 			cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
327225dcb5ddSPeter Ujfalusi 		}
327325dcb5ddSPeter Ujfalusi 
327425dcb5ddSPeter Ujfalusi 		/* attach the sg buffer to the descriptor */
3275d2abc982SPeter Ujfalusi 		sg_addr |= asel;
327625dcb5ddSPeter Ujfalusi 		cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
327725dcb5ddSPeter Ujfalusi 
327825dcb5ddSPeter Ujfalusi 		/* Attach link as host buffer descriptor */
327925dcb5ddSPeter Ujfalusi 		if (h_desc)
328025dcb5ddSPeter Ujfalusi 			cppi5_hdesc_link_hbdesc(h_desc,
3281d2abc982SPeter Ujfalusi 						hwdesc->cppi5_desc_paddr | asel);
328225dcb5ddSPeter Ujfalusi 
3283d2abc982SPeter Ujfalusi 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3284d2abc982SPeter Ujfalusi 		    dir == DMA_MEM_TO_DEV)
328525dcb5ddSPeter Ujfalusi 			h_desc = desc;
328625dcb5ddSPeter Ujfalusi 	}
328725dcb5ddSPeter Ujfalusi 
328825dcb5ddSPeter Ujfalusi 	if (d->residue >= SZ_4M) {
328925dcb5ddSPeter Ujfalusi 		dev_err(uc->ud->dev,
329025dcb5ddSPeter Ujfalusi 			"%s: Transfer size %u is over the supported 4M range\n",
329125dcb5ddSPeter Ujfalusi 			__func__, d->residue);
329225dcb5ddSPeter Ujfalusi 		udma_free_hwdesc(uc, d);
329325dcb5ddSPeter Ujfalusi 		kfree(d);
329425dcb5ddSPeter Ujfalusi 		return NULL;
329525dcb5ddSPeter Ujfalusi 	}
329625dcb5ddSPeter Ujfalusi 
329725dcb5ddSPeter Ujfalusi 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
329825dcb5ddSPeter Ujfalusi 	cppi5_hdesc_set_pktlen(h_desc, d->residue);
329925dcb5ddSPeter Ujfalusi 
330025dcb5ddSPeter Ujfalusi 	return d;
330125dcb5ddSPeter Ujfalusi }
330225dcb5ddSPeter Ujfalusi 
udma_attach_metadata(struct dma_async_tx_descriptor * desc,void * data,size_t len)330325dcb5ddSPeter Ujfalusi static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
330425dcb5ddSPeter Ujfalusi 				void *data, size_t len)
330525dcb5ddSPeter Ujfalusi {
330625dcb5ddSPeter Ujfalusi 	struct udma_desc *d = to_udma_desc(desc);
330725dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(desc->chan);
330825dcb5ddSPeter Ujfalusi 	struct cppi5_host_desc_t *h_desc;
330925dcb5ddSPeter Ujfalusi 	u32 psd_size = len;
331025dcb5ddSPeter Ujfalusi 	u32 flags = 0;
331125dcb5ddSPeter Ujfalusi 
331225dcb5ddSPeter Ujfalusi 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
331325dcb5ddSPeter Ujfalusi 		return -ENOTSUPP;
331425dcb5ddSPeter Ujfalusi 
331525dcb5ddSPeter Ujfalusi 	if (!data || len > uc->config.metadata_size)
331625dcb5ddSPeter Ujfalusi 		return -EINVAL;
331725dcb5ddSPeter Ujfalusi 
331825dcb5ddSPeter Ujfalusi 	if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
331925dcb5ddSPeter Ujfalusi 		return -EINVAL;
332025dcb5ddSPeter Ujfalusi 
332125dcb5ddSPeter Ujfalusi 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
332225dcb5ddSPeter Ujfalusi 	if (d->dir == DMA_MEM_TO_DEV)
332325dcb5ddSPeter Ujfalusi 		memcpy(h_desc->epib, data, len);
332425dcb5ddSPeter Ujfalusi 
332525dcb5ddSPeter Ujfalusi 	if (uc->config.needs_epib)
332625dcb5ddSPeter Ujfalusi 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
332725dcb5ddSPeter Ujfalusi 
332825dcb5ddSPeter Ujfalusi 	d->metadata = data;
332925dcb5ddSPeter Ujfalusi 	d->metadata_size = len;
333025dcb5ddSPeter Ujfalusi 	if (uc->config.needs_epib)
333125dcb5ddSPeter Ujfalusi 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
333225dcb5ddSPeter Ujfalusi 
333325dcb5ddSPeter Ujfalusi 	cppi5_hdesc_update_flags(h_desc, flags);
333425dcb5ddSPeter Ujfalusi 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
333525dcb5ddSPeter Ujfalusi 
333625dcb5ddSPeter Ujfalusi 	return 0;
333725dcb5ddSPeter Ujfalusi }
333825dcb5ddSPeter Ujfalusi 
udma_get_metadata_ptr(struct dma_async_tx_descriptor * desc,size_t * payload_len,size_t * max_len)333925dcb5ddSPeter Ujfalusi static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
334025dcb5ddSPeter Ujfalusi 				   size_t *payload_len, size_t *max_len)
334125dcb5ddSPeter Ujfalusi {
334225dcb5ddSPeter Ujfalusi 	struct udma_desc *d = to_udma_desc(desc);
334325dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(desc->chan);
334425dcb5ddSPeter Ujfalusi 	struct cppi5_host_desc_t *h_desc;
334525dcb5ddSPeter Ujfalusi 
334625dcb5ddSPeter Ujfalusi 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
334725dcb5ddSPeter Ujfalusi 		return ERR_PTR(-ENOTSUPP);
334825dcb5ddSPeter Ujfalusi 
334925dcb5ddSPeter Ujfalusi 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
335025dcb5ddSPeter Ujfalusi 
335125dcb5ddSPeter Ujfalusi 	*max_len = uc->config.metadata_size;
335225dcb5ddSPeter Ujfalusi 
335325dcb5ddSPeter Ujfalusi 	*payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
335425dcb5ddSPeter Ujfalusi 		       CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
335525dcb5ddSPeter Ujfalusi 	*payload_len += cppi5_hdesc_get_psdata_size(h_desc);
335625dcb5ddSPeter Ujfalusi 
335725dcb5ddSPeter Ujfalusi 	return h_desc->epib;
335825dcb5ddSPeter Ujfalusi }
335925dcb5ddSPeter Ujfalusi 
udma_set_metadata_len(struct dma_async_tx_descriptor * desc,size_t payload_len)336025dcb5ddSPeter Ujfalusi static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
336125dcb5ddSPeter Ujfalusi 				 size_t payload_len)
336225dcb5ddSPeter Ujfalusi {
336325dcb5ddSPeter Ujfalusi 	struct udma_desc *d = to_udma_desc(desc);
336425dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(desc->chan);
336525dcb5ddSPeter Ujfalusi 	struct cppi5_host_desc_t *h_desc;
336625dcb5ddSPeter Ujfalusi 	u32 psd_size = payload_len;
336725dcb5ddSPeter Ujfalusi 	u32 flags = 0;
336825dcb5ddSPeter Ujfalusi 
336925dcb5ddSPeter Ujfalusi 	if (!uc->config.pkt_mode || !uc->config.metadata_size)
337025dcb5ddSPeter Ujfalusi 		return -ENOTSUPP;
337125dcb5ddSPeter Ujfalusi 
337225dcb5ddSPeter Ujfalusi 	if (payload_len > uc->config.metadata_size)
337325dcb5ddSPeter Ujfalusi 		return -EINVAL;
337425dcb5ddSPeter Ujfalusi 
337525dcb5ddSPeter Ujfalusi 	if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
337625dcb5ddSPeter Ujfalusi 		return -EINVAL;
337725dcb5ddSPeter Ujfalusi 
337825dcb5ddSPeter Ujfalusi 	h_desc = d->hwdesc[0].cppi5_desc_vaddr;
337925dcb5ddSPeter Ujfalusi 
338025dcb5ddSPeter Ujfalusi 	if (uc->config.needs_epib) {
338125dcb5ddSPeter Ujfalusi 		psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
338225dcb5ddSPeter Ujfalusi 		flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
338325dcb5ddSPeter Ujfalusi 	}
338425dcb5ddSPeter Ujfalusi 
338525dcb5ddSPeter Ujfalusi 	cppi5_hdesc_update_flags(h_desc, flags);
338625dcb5ddSPeter Ujfalusi 	cppi5_hdesc_update_psdata_size(h_desc, psd_size);
338725dcb5ddSPeter Ujfalusi 
338825dcb5ddSPeter Ujfalusi 	return 0;
338925dcb5ddSPeter Ujfalusi }
339025dcb5ddSPeter Ujfalusi 
339125dcb5ddSPeter Ujfalusi static struct dma_descriptor_metadata_ops metadata_ops = {
339225dcb5ddSPeter Ujfalusi 	.attach = udma_attach_metadata,
339325dcb5ddSPeter Ujfalusi 	.get_ptr = udma_get_metadata_ptr,
339425dcb5ddSPeter Ujfalusi 	.set_len = udma_set_metadata_len,
339525dcb5ddSPeter Ujfalusi };
339625dcb5ddSPeter Ujfalusi 
339725dcb5ddSPeter Ujfalusi static struct dma_async_tx_descriptor *
udma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sglen,enum dma_transfer_direction dir,unsigned long tx_flags,void * context)339825dcb5ddSPeter Ujfalusi udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
339925dcb5ddSPeter Ujfalusi 		   unsigned int sglen, enum dma_transfer_direction dir,
340025dcb5ddSPeter Ujfalusi 		   unsigned long tx_flags, void *context)
340125dcb5ddSPeter Ujfalusi {
340225dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
340325dcb5ddSPeter Ujfalusi 	enum dma_slave_buswidth dev_width;
340425dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
340525dcb5ddSPeter Ujfalusi 	u32 burst;
340625dcb5ddSPeter Ujfalusi 
340701779473SPeter Ujfalusi 	if (dir != uc->config.dir &&
340801779473SPeter Ujfalusi 	    (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
340925dcb5ddSPeter Ujfalusi 		dev_err(chan->device->dev,
341025dcb5ddSPeter Ujfalusi 			"%s: chan%d is for %s, not supporting %s\n",
341125dcb5ddSPeter Ujfalusi 			__func__, uc->id,
341225dcb5ddSPeter Ujfalusi 			dmaengine_get_direction_text(uc->config.dir),
341325dcb5ddSPeter Ujfalusi 			dmaengine_get_direction_text(dir));
341425dcb5ddSPeter Ujfalusi 		return NULL;
341525dcb5ddSPeter Ujfalusi 	}
341625dcb5ddSPeter Ujfalusi 
341725dcb5ddSPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM) {
341825dcb5ddSPeter Ujfalusi 		dev_width = uc->cfg.src_addr_width;
341925dcb5ddSPeter Ujfalusi 		burst = uc->cfg.src_maxburst;
342025dcb5ddSPeter Ujfalusi 	} else if (dir == DMA_MEM_TO_DEV) {
342125dcb5ddSPeter Ujfalusi 		dev_width = uc->cfg.dst_addr_width;
342225dcb5ddSPeter Ujfalusi 		burst = uc->cfg.dst_maxburst;
342325dcb5ddSPeter Ujfalusi 	} else {
342425dcb5ddSPeter Ujfalusi 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
342525dcb5ddSPeter Ujfalusi 		return NULL;
342625dcb5ddSPeter Ujfalusi 	}
342725dcb5ddSPeter Ujfalusi 
342825dcb5ddSPeter Ujfalusi 	if (!burst)
342925dcb5ddSPeter Ujfalusi 		burst = 1;
343025dcb5ddSPeter Ujfalusi 
3431e8e2f92bSVaishnav Achath 	uc->config.tx_flags = tx_flags;
3432e8e2f92bSVaishnav Achath 
343325dcb5ddSPeter Ujfalusi 	if (uc->config.pkt_mode)
343425dcb5ddSPeter Ujfalusi 		d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
343525dcb5ddSPeter Ujfalusi 					   context);
343601779473SPeter Ujfalusi 	else if (is_slave_direction(uc->config.dir))
343725dcb5ddSPeter Ujfalusi 		d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
343825dcb5ddSPeter Ujfalusi 					  context);
343901779473SPeter Ujfalusi 	else
344001779473SPeter Ujfalusi 		d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
344101779473SPeter Ujfalusi 						    tx_flags, context);
344225dcb5ddSPeter Ujfalusi 
344325dcb5ddSPeter Ujfalusi 	if (!d)
344425dcb5ddSPeter Ujfalusi 		return NULL;
344525dcb5ddSPeter Ujfalusi 
344625dcb5ddSPeter Ujfalusi 	d->dir = dir;
344725dcb5ddSPeter Ujfalusi 	d->desc_idx = 0;
344825dcb5ddSPeter Ujfalusi 	d->tr_idx = 0;
344925dcb5ddSPeter Ujfalusi 
345025dcb5ddSPeter Ujfalusi 	/* static TR for remote PDMA */
345125dcb5ddSPeter Ujfalusi 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
345225dcb5ddSPeter Ujfalusi 		dev_err(uc->ud->dev,
34536c0157beSColin Ian King 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
345425dcb5ddSPeter Ujfalusi 			__func__, d->static_tr.bstcnt);
345525dcb5ddSPeter Ujfalusi 
345625dcb5ddSPeter Ujfalusi 		udma_free_hwdesc(uc, d);
345725dcb5ddSPeter Ujfalusi 		kfree(d);
345825dcb5ddSPeter Ujfalusi 		return NULL;
345925dcb5ddSPeter Ujfalusi 	}
346025dcb5ddSPeter Ujfalusi 
346125dcb5ddSPeter Ujfalusi 	if (uc->config.metadata_size)
346225dcb5ddSPeter Ujfalusi 		d->vd.tx.metadata_ops = &metadata_ops;
346325dcb5ddSPeter Ujfalusi 
346425dcb5ddSPeter Ujfalusi 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
346525dcb5ddSPeter Ujfalusi }
346625dcb5ddSPeter Ujfalusi 
346725dcb5ddSPeter Ujfalusi static struct udma_desc *
udma_prep_dma_cyclic_tr(struct udma_chan * uc,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)346825dcb5ddSPeter Ujfalusi udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
346925dcb5ddSPeter Ujfalusi 			size_t buf_len, size_t period_len,
347025dcb5ddSPeter Ujfalusi 			enum dma_transfer_direction dir, unsigned long flags)
347125dcb5ddSPeter Ujfalusi {
347225dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
34736cf668a4SPeter Ujfalusi 	size_t tr_size, period_addr;
347425dcb5ddSPeter Ujfalusi 	struct cppi5_tr_type1_t *tr_req;
347525dcb5ddSPeter Ujfalusi 	unsigned int periods = buf_len / period_len;
34766cf668a4SPeter Ujfalusi 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
34776cf668a4SPeter Ujfalusi 	unsigned int i;
34786cf668a4SPeter Ujfalusi 	int num_tr;
347925dcb5ddSPeter Ujfalusi 
34806cf668a4SPeter Ujfalusi 	num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
34816cf668a4SPeter Ujfalusi 				      &tr0_cnt1, &tr1_cnt0);
34826cf668a4SPeter Ujfalusi 	if (num_tr < 0) {
34836cf668a4SPeter Ujfalusi 		dev_err(uc->ud->dev, "size %zu is not supported\n",
34846cf668a4SPeter Ujfalusi 			period_len);
34856cf668a4SPeter Ujfalusi 		return NULL;
34866cf668a4SPeter Ujfalusi 	}
348725dcb5ddSPeter Ujfalusi 
348825dcb5ddSPeter Ujfalusi 	/* Now allocate and setup the descriptor. */
348925dcb5ddSPeter Ujfalusi 	tr_size = sizeof(struct cppi5_tr_type1_t);
34906cf668a4SPeter Ujfalusi 	d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
349125dcb5ddSPeter Ujfalusi 	if (!d)
349225dcb5ddSPeter Ujfalusi 		return NULL;
349325dcb5ddSPeter Ujfalusi 
349425dcb5ddSPeter Ujfalusi 	tr_req = d->hwdesc[0].tr_req_base;
349501779473SPeter Ujfalusi 	if (uc->ud->match_data->type == DMA_TYPE_UDMA)
34966cf668a4SPeter Ujfalusi 		period_addr = buf_addr;
349701779473SPeter Ujfalusi 	else
349801779473SPeter Ujfalusi 		period_addr = buf_addr |
349901779473SPeter Ujfalusi 			((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
350001779473SPeter Ujfalusi 
350125dcb5ddSPeter Ujfalusi 	for (i = 0; i < periods; i++) {
35026cf668a4SPeter Ujfalusi 		int tr_idx = i * num_tr;
35036cf668a4SPeter Ujfalusi 
35046cf668a4SPeter Ujfalusi 		cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
35056cf668a4SPeter Ujfalusi 			      false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
35066cf668a4SPeter Ujfalusi 
35076cf668a4SPeter Ujfalusi 		tr_req[tr_idx].addr = period_addr;
35086cf668a4SPeter Ujfalusi 		tr_req[tr_idx].icnt0 = tr0_cnt0;
35096cf668a4SPeter Ujfalusi 		tr_req[tr_idx].icnt1 = tr0_cnt1;
35106cf668a4SPeter Ujfalusi 		tr_req[tr_idx].dim1 = tr0_cnt0;
35116cf668a4SPeter Ujfalusi 
35126cf668a4SPeter Ujfalusi 		if (num_tr == 2) {
35136cf668a4SPeter Ujfalusi 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
35146cf668a4SPeter Ujfalusi 					 CPPI5_TR_CSF_SUPR_EVT);
35156cf668a4SPeter Ujfalusi 			tr_idx++;
35166cf668a4SPeter Ujfalusi 
35176cf668a4SPeter Ujfalusi 			cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
35186cf668a4SPeter Ujfalusi 				      false, false,
351925dcb5ddSPeter Ujfalusi 				      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
352025dcb5ddSPeter Ujfalusi 
35216cf668a4SPeter Ujfalusi 			tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
35226cf668a4SPeter Ujfalusi 			tr_req[tr_idx].icnt0 = tr1_cnt0;
35236cf668a4SPeter Ujfalusi 			tr_req[tr_idx].icnt1 = 1;
35246cf668a4SPeter Ujfalusi 			tr_req[tr_idx].dim1 = tr1_cnt0;
35256cf668a4SPeter Ujfalusi 		}
352625dcb5ddSPeter Ujfalusi 
352725dcb5ddSPeter Ujfalusi 		if (!(flags & DMA_PREP_INTERRUPT))
35286cf668a4SPeter Ujfalusi 			cppi5_tr_csf_set(&tr_req[tr_idx].flags,
352925dcb5ddSPeter Ujfalusi 					 CPPI5_TR_CSF_SUPR_EVT);
35306cf668a4SPeter Ujfalusi 
35316cf668a4SPeter Ujfalusi 		period_addr += period_len;
353225dcb5ddSPeter Ujfalusi 	}
353325dcb5ddSPeter Ujfalusi 
353425dcb5ddSPeter Ujfalusi 	return d;
353525dcb5ddSPeter Ujfalusi }
353625dcb5ddSPeter Ujfalusi 
353725dcb5ddSPeter Ujfalusi static struct udma_desc *
udma_prep_dma_cyclic_pkt(struct udma_chan * uc,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)353825dcb5ddSPeter Ujfalusi udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
353925dcb5ddSPeter Ujfalusi 			 size_t buf_len, size_t period_len,
354025dcb5ddSPeter Ujfalusi 			 enum dma_transfer_direction dir, unsigned long flags)
354125dcb5ddSPeter Ujfalusi {
354225dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
354325dcb5ddSPeter Ujfalusi 	u32 ring_id;
354425dcb5ddSPeter Ujfalusi 	int i;
354525dcb5ddSPeter Ujfalusi 	int periods = buf_len / period_len;
354625dcb5ddSPeter Ujfalusi 
354725dcb5ddSPeter Ujfalusi 	if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
354825dcb5ddSPeter Ujfalusi 		return NULL;
354925dcb5ddSPeter Ujfalusi 
355025dcb5ddSPeter Ujfalusi 	if (period_len >= SZ_4M)
355125dcb5ddSPeter Ujfalusi 		return NULL;
355225dcb5ddSPeter Ujfalusi 
3553ace52a8cSGustavo A. R. Silva 	d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
355425dcb5ddSPeter Ujfalusi 	if (!d)
355525dcb5ddSPeter Ujfalusi 		return NULL;
355625dcb5ddSPeter Ujfalusi 
355725dcb5ddSPeter Ujfalusi 	d->hwdesc_count = periods;
355825dcb5ddSPeter Ujfalusi 
355925dcb5ddSPeter Ujfalusi 	/* TODO: re-check this... */
356025dcb5ddSPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM)
356125dcb5ddSPeter Ujfalusi 		ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
356225dcb5ddSPeter Ujfalusi 	else
356325dcb5ddSPeter Ujfalusi 		ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
356425dcb5ddSPeter Ujfalusi 
3565d2abc982SPeter Ujfalusi 	if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3566d2abc982SPeter Ujfalusi 		buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3567d2abc982SPeter Ujfalusi 
356825dcb5ddSPeter Ujfalusi 	for (i = 0; i < periods; i++) {
356925dcb5ddSPeter Ujfalusi 		struct udma_hwdesc *hwdesc = &d->hwdesc[i];
357025dcb5ddSPeter Ujfalusi 		dma_addr_t period_addr = buf_addr + (period_len * i);
357125dcb5ddSPeter Ujfalusi 		struct cppi5_host_desc_t *h_desc;
357225dcb5ddSPeter Ujfalusi 
357325dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
357425dcb5ddSPeter Ujfalusi 						GFP_NOWAIT,
357525dcb5ddSPeter Ujfalusi 						&hwdesc->cppi5_desc_paddr);
357625dcb5ddSPeter Ujfalusi 		if (!hwdesc->cppi5_desc_vaddr) {
357725dcb5ddSPeter Ujfalusi 			dev_err(uc->ud->dev,
357825dcb5ddSPeter Ujfalusi 				"descriptor%d allocation failed\n", i);
357925dcb5ddSPeter Ujfalusi 
358025dcb5ddSPeter Ujfalusi 			udma_free_hwdesc(uc, d);
358125dcb5ddSPeter Ujfalusi 			kfree(d);
358225dcb5ddSPeter Ujfalusi 			return NULL;
358325dcb5ddSPeter Ujfalusi 		}
358425dcb5ddSPeter Ujfalusi 
358525dcb5ddSPeter Ujfalusi 		hwdesc->cppi5_desc_size = uc->config.hdesc_size;
358625dcb5ddSPeter Ujfalusi 		h_desc = hwdesc->cppi5_desc_vaddr;
358725dcb5ddSPeter Ujfalusi 
358825dcb5ddSPeter Ujfalusi 		cppi5_hdesc_init(h_desc, 0, 0);
358925dcb5ddSPeter Ujfalusi 		cppi5_hdesc_set_pktlen(h_desc, period_len);
359025dcb5ddSPeter Ujfalusi 
359125dcb5ddSPeter Ujfalusi 		/* Flow and Packed ID */
359225dcb5ddSPeter Ujfalusi 		cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
359325dcb5ddSPeter Ujfalusi 				      CPPI5_INFO1_DESC_FLOWID_DEFAULT);
359425dcb5ddSPeter Ujfalusi 		cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
359525dcb5ddSPeter Ujfalusi 
359625dcb5ddSPeter Ujfalusi 		/* attach each period to a new descriptor */
359725dcb5ddSPeter Ujfalusi 		cppi5_hdesc_attach_buf(h_desc,
359825dcb5ddSPeter Ujfalusi 				       period_addr, period_len,
359925dcb5ddSPeter Ujfalusi 				       period_addr, period_len);
360025dcb5ddSPeter Ujfalusi 	}
360125dcb5ddSPeter Ujfalusi 
360225dcb5ddSPeter Ujfalusi 	return d;
360325dcb5ddSPeter Ujfalusi }
360425dcb5ddSPeter Ujfalusi 
360525dcb5ddSPeter Ujfalusi static struct dma_async_tx_descriptor *
udma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)360625dcb5ddSPeter Ujfalusi udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
360725dcb5ddSPeter Ujfalusi 		     size_t period_len, enum dma_transfer_direction dir,
360825dcb5ddSPeter Ujfalusi 		     unsigned long flags)
360925dcb5ddSPeter Ujfalusi {
361025dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
361125dcb5ddSPeter Ujfalusi 	enum dma_slave_buswidth dev_width;
361225dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
361325dcb5ddSPeter Ujfalusi 	u32 burst;
361425dcb5ddSPeter Ujfalusi 
361525dcb5ddSPeter Ujfalusi 	if (dir != uc->config.dir) {
361625dcb5ddSPeter Ujfalusi 		dev_err(chan->device->dev,
361725dcb5ddSPeter Ujfalusi 			"%s: chan%d is for %s, not supporting %s\n",
361825dcb5ddSPeter Ujfalusi 			__func__, uc->id,
361925dcb5ddSPeter Ujfalusi 			dmaengine_get_direction_text(uc->config.dir),
362025dcb5ddSPeter Ujfalusi 			dmaengine_get_direction_text(dir));
362125dcb5ddSPeter Ujfalusi 		return NULL;
362225dcb5ddSPeter Ujfalusi 	}
362325dcb5ddSPeter Ujfalusi 
362425dcb5ddSPeter Ujfalusi 	uc->cyclic = true;
362525dcb5ddSPeter Ujfalusi 
362625dcb5ddSPeter Ujfalusi 	if (dir == DMA_DEV_TO_MEM) {
362725dcb5ddSPeter Ujfalusi 		dev_width = uc->cfg.src_addr_width;
362825dcb5ddSPeter Ujfalusi 		burst = uc->cfg.src_maxburst;
362925dcb5ddSPeter Ujfalusi 	} else if (dir == DMA_MEM_TO_DEV) {
363025dcb5ddSPeter Ujfalusi 		dev_width = uc->cfg.dst_addr_width;
363125dcb5ddSPeter Ujfalusi 		burst = uc->cfg.dst_maxburst;
363225dcb5ddSPeter Ujfalusi 	} else {
363325dcb5ddSPeter Ujfalusi 		dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
363425dcb5ddSPeter Ujfalusi 		return NULL;
363525dcb5ddSPeter Ujfalusi 	}
363625dcb5ddSPeter Ujfalusi 
363725dcb5ddSPeter Ujfalusi 	if (!burst)
363825dcb5ddSPeter Ujfalusi 		burst = 1;
363925dcb5ddSPeter Ujfalusi 
364025dcb5ddSPeter Ujfalusi 	if (uc->config.pkt_mode)
364125dcb5ddSPeter Ujfalusi 		d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
364225dcb5ddSPeter Ujfalusi 					     dir, flags);
364325dcb5ddSPeter Ujfalusi 	else
364425dcb5ddSPeter Ujfalusi 		d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
364525dcb5ddSPeter Ujfalusi 					    dir, flags);
364625dcb5ddSPeter Ujfalusi 
364725dcb5ddSPeter Ujfalusi 	if (!d)
364825dcb5ddSPeter Ujfalusi 		return NULL;
364925dcb5ddSPeter Ujfalusi 
365025dcb5ddSPeter Ujfalusi 	d->sglen = buf_len / period_len;
365125dcb5ddSPeter Ujfalusi 
365225dcb5ddSPeter Ujfalusi 	d->dir = dir;
365325dcb5ddSPeter Ujfalusi 	d->residue = buf_len;
365425dcb5ddSPeter Ujfalusi 
365525dcb5ddSPeter Ujfalusi 	/* static TR for remote PDMA */
365625dcb5ddSPeter Ujfalusi 	if (udma_configure_statictr(uc, d, dev_width, burst)) {
365725dcb5ddSPeter Ujfalusi 		dev_err(uc->ud->dev,
36586c0157beSColin Ian King 			"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
365925dcb5ddSPeter Ujfalusi 			__func__, d->static_tr.bstcnt);
366025dcb5ddSPeter Ujfalusi 
366125dcb5ddSPeter Ujfalusi 		udma_free_hwdesc(uc, d);
366225dcb5ddSPeter Ujfalusi 		kfree(d);
366325dcb5ddSPeter Ujfalusi 		return NULL;
366425dcb5ddSPeter Ujfalusi 	}
366525dcb5ddSPeter Ujfalusi 
366625dcb5ddSPeter Ujfalusi 	if (uc->config.metadata_size)
366725dcb5ddSPeter Ujfalusi 		d->vd.tx.metadata_ops = &metadata_ops;
366825dcb5ddSPeter Ujfalusi 
366925dcb5ddSPeter Ujfalusi 	return vchan_tx_prep(&uc->vc, &d->vd, flags);
367025dcb5ddSPeter Ujfalusi }
367125dcb5ddSPeter Ujfalusi 
367225dcb5ddSPeter Ujfalusi static struct dma_async_tx_descriptor *
udma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long tx_flags)367325dcb5ddSPeter Ujfalusi udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
367425dcb5ddSPeter Ujfalusi 		     size_t len, unsigned long tx_flags)
367525dcb5ddSPeter Ujfalusi {
367625dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
367725dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
367825dcb5ddSPeter Ujfalusi 	struct cppi5_tr_type15_t *tr_req;
367925dcb5ddSPeter Ujfalusi 	int num_tr;
368025dcb5ddSPeter Ujfalusi 	size_t tr_size = sizeof(struct cppi5_tr_type15_t);
368125dcb5ddSPeter Ujfalusi 	u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3682f806bea3SVignesh Raghavendra 	u32 csf = CPPI5_TR_CSF_SUPR_EVT;
368325dcb5ddSPeter Ujfalusi 
368425dcb5ddSPeter Ujfalusi 	if (uc->config.dir != DMA_MEM_TO_MEM) {
368525dcb5ddSPeter Ujfalusi 		dev_err(chan->device->dev,
368625dcb5ddSPeter Ujfalusi 			"%s: chan%d is for %s, not supporting %s\n",
368725dcb5ddSPeter Ujfalusi 			__func__, uc->id,
368825dcb5ddSPeter Ujfalusi 			dmaengine_get_direction_text(uc->config.dir),
368925dcb5ddSPeter Ujfalusi 			dmaengine_get_direction_text(DMA_MEM_TO_MEM));
369025dcb5ddSPeter Ujfalusi 		return NULL;
369125dcb5ddSPeter Ujfalusi 	}
369225dcb5ddSPeter Ujfalusi 
3693a9793407SPeter Ujfalusi 	num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3694a9793407SPeter Ujfalusi 				      &tr0_cnt1, &tr1_cnt0);
3695a9793407SPeter Ujfalusi 	if (num_tr < 0) {
369625dcb5ddSPeter Ujfalusi 		dev_err(uc->ud->dev, "size %zu is not supported\n",
369725dcb5ddSPeter Ujfalusi 			len);
369825dcb5ddSPeter Ujfalusi 		return NULL;
369925dcb5ddSPeter Ujfalusi 	}
370025dcb5ddSPeter Ujfalusi 
370125dcb5ddSPeter Ujfalusi 	d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
370225dcb5ddSPeter Ujfalusi 	if (!d)
370325dcb5ddSPeter Ujfalusi 		return NULL;
370425dcb5ddSPeter Ujfalusi 
370525dcb5ddSPeter Ujfalusi 	d->dir = DMA_MEM_TO_MEM;
370625dcb5ddSPeter Ujfalusi 	d->desc_idx = 0;
370725dcb5ddSPeter Ujfalusi 	d->tr_idx = 0;
370825dcb5ddSPeter Ujfalusi 	d->residue = len;
370925dcb5ddSPeter Ujfalusi 
371001779473SPeter Ujfalusi 	if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
371101779473SPeter Ujfalusi 		src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
371201779473SPeter Ujfalusi 		dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3713f806bea3SVignesh Raghavendra 	} else {
3714f806bea3SVignesh Raghavendra 		csf |= CPPI5_TR_CSF_EOL_ICNT0;
371501779473SPeter Ujfalusi 	}
371601779473SPeter Ujfalusi 
371725dcb5ddSPeter Ujfalusi 	tr_req = d->hwdesc[0].tr_req_base;
371825dcb5ddSPeter Ujfalusi 
371925dcb5ddSPeter Ujfalusi 	cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
372025dcb5ddSPeter Ujfalusi 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3721f806bea3SVignesh Raghavendra 	cppi5_tr_csf_set(&tr_req[0].flags, csf);
372225dcb5ddSPeter Ujfalusi 
372325dcb5ddSPeter Ujfalusi 	tr_req[0].addr = src;
372425dcb5ddSPeter Ujfalusi 	tr_req[0].icnt0 = tr0_cnt0;
372525dcb5ddSPeter Ujfalusi 	tr_req[0].icnt1 = tr0_cnt1;
372625dcb5ddSPeter Ujfalusi 	tr_req[0].icnt2 = 1;
372725dcb5ddSPeter Ujfalusi 	tr_req[0].icnt3 = 1;
372825dcb5ddSPeter Ujfalusi 	tr_req[0].dim1 = tr0_cnt0;
372925dcb5ddSPeter Ujfalusi 
373025dcb5ddSPeter Ujfalusi 	tr_req[0].daddr = dest;
373125dcb5ddSPeter Ujfalusi 	tr_req[0].dicnt0 = tr0_cnt0;
373225dcb5ddSPeter Ujfalusi 	tr_req[0].dicnt1 = tr0_cnt1;
373325dcb5ddSPeter Ujfalusi 	tr_req[0].dicnt2 = 1;
373425dcb5ddSPeter Ujfalusi 	tr_req[0].dicnt3 = 1;
373525dcb5ddSPeter Ujfalusi 	tr_req[0].ddim1 = tr0_cnt0;
373625dcb5ddSPeter Ujfalusi 
373725dcb5ddSPeter Ujfalusi 	if (num_tr == 2) {
373825dcb5ddSPeter Ujfalusi 		cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
373925dcb5ddSPeter Ujfalusi 			      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3740f806bea3SVignesh Raghavendra 		cppi5_tr_csf_set(&tr_req[1].flags, csf);
374125dcb5ddSPeter Ujfalusi 
374225dcb5ddSPeter Ujfalusi 		tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
374325dcb5ddSPeter Ujfalusi 		tr_req[1].icnt0 = tr1_cnt0;
374425dcb5ddSPeter Ujfalusi 		tr_req[1].icnt1 = 1;
374525dcb5ddSPeter Ujfalusi 		tr_req[1].icnt2 = 1;
374625dcb5ddSPeter Ujfalusi 		tr_req[1].icnt3 = 1;
374725dcb5ddSPeter Ujfalusi 
374825dcb5ddSPeter Ujfalusi 		tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
374925dcb5ddSPeter Ujfalusi 		tr_req[1].dicnt0 = tr1_cnt0;
375025dcb5ddSPeter Ujfalusi 		tr_req[1].dicnt1 = 1;
375125dcb5ddSPeter Ujfalusi 		tr_req[1].dicnt2 = 1;
375225dcb5ddSPeter Ujfalusi 		tr_req[1].dicnt3 = 1;
375325dcb5ddSPeter Ujfalusi 	}
375425dcb5ddSPeter Ujfalusi 
3755f806bea3SVignesh Raghavendra 	cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP);
375625dcb5ddSPeter Ujfalusi 
375725dcb5ddSPeter Ujfalusi 	if (uc->config.metadata_size)
375825dcb5ddSPeter Ujfalusi 		d->vd.tx.metadata_ops = &metadata_ops;
375925dcb5ddSPeter Ujfalusi 
376025dcb5ddSPeter Ujfalusi 	return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
376125dcb5ddSPeter Ujfalusi }
376225dcb5ddSPeter Ujfalusi 
udma_issue_pending(struct dma_chan * chan)376325dcb5ddSPeter Ujfalusi static void udma_issue_pending(struct dma_chan *chan)
376425dcb5ddSPeter Ujfalusi {
376525dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
376625dcb5ddSPeter Ujfalusi 	unsigned long flags;
376725dcb5ddSPeter Ujfalusi 
376825dcb5ddSPeter Ujfalusi 	spin_lock_irqsave(&uc->vc.lock, flags);
376925dcb5ddSPeter Ujfalusi 
377025dcb5ddSPeter Ujfalusi 	/* If we have something pending and no active descriptor, then */
377125dcb5ddSPeter Ujfalusi 	if (vchan_issue_pending(&uc->vc) && !uc->desc) {
377225dcb5ddSPeter Ujfalusi 		/*
377325dcb5ddSPeter Ujfalusi 		 * start a descriptor if the channel is NOT [marked as
377425dcb5ddSPeter Ujfalusi 		 * terminating _and_ it is still running (teardown has not
377525dcb5ddSPeter Ujfalusi 		 * completed yet)].
377625dcb5ddSPeter Ujfalusi 		 */
377725dcb5ddSPeter Ujfalusi 		if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
377825dcb5ddSPeter Ujfalusi 		      udma_is_chan_running(uc)))
377925dcb5ddSPeter Ujfalusi 			udma_start(uc);
378025dcb5ddSPeter Ujfalusi 	}
378125dcb5ddSPeter Ujfalusi 
378225dcb5ddSPeter Ujfalusi 	spin_unlock_irqrestore(&uc->vc.lock, flags);
378325dcb5ddSPeter Ujfalusi }
378425dcb5ddSPeter Ujfalusi 
udma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)378525dcb5ddSPeter Ujfalusi static enum dma_status udma_tx_status(struct dma_chan *chan,
378625dcb5ddSPeter Ujfalusi 				      dma_cookie_t cookie,
378725dcb5ddSPeter Ujfalusi 				      struct dma_tx_state *txstate)
378825dcb5ddSPeter Ujfalusi {
378925dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
379025dcb5ddSPeter Ujfalusi 	enum dma_status ret;
379125dcb5ddSPeter Ujfalusi 	unsigned long flags;
379225dcb5ddSPeter Ujfalusi 
379325dcb5ddSPeter Ujfalusi 	spin_lock_irqsave(&uc->vc.lock, flags);
379425dcb5ddSPeter Ujfalusi 
379525dcb5ddSPeter Ujfalusi 	ret = dma_cookie_status(chan, cookie, txstate);
379625dcb5ddSPeter Ujfalusi 
37978390318cSPeter Ujfalusi 	if (!udma_is_chan_running(uc))
37988390318cSPeter Ujfalusi 		ret = DMA_COMPLETE;
37998390318cSPeter Ujfalusi 
380025dcb5ddSPeter Ujfalusi 	if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
380125dcb5ddSPeter Ujfalusi 		ret = DMA_PAUSED;
380225dcb5ddSPeter Ujfalusi 
380325dcb5ddSPeter Ujfalusi 	if (ret == DMA_COMPLETE || !txstate)
380425dcb5ddSPeter Ujfalusi 		goto out;
380525dcb5ddSPeter Ujfalusi 
380625dcb5ddSPeter Ujfalusi 	if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
380725dcb5ddSPeter Ujfalusi 		u32 peer_bcnt = 0;
380825dcb5ddSPeter Ujfalusi 		u32 bcnt = 0;
380925dcb5ddSPeter Ujfalusi 		u32 residue = uc->desc->residue;
381025dcb5ddSPeter Ujfalusi 		u32 delay = 0;
381125dcb5ddSPeter Ujfalusi 
381225dcb5ddSPeter Ujfalusi 		if (uc->desc->dir == DMA_MEM_TO_DEV) {
3813db375dcbSPeter Ujfalusi 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
381425dcb5ddSPeter Ujfalusi 
381525dcb5ddSPeter Ujfalusi 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3816db375dcbSPeter Ujfalusi 				peer_bcnt = udma_tchanrt_read(uc,
3817bc7e5523SPeter Ujfalusi 						UDMA_CHAN_RT_PEER_BCNT_REG);
381825dcb5ddSPeter Ujfalusi 
381925dcb5ddSPeter Ujfalusi 				if (bcnt > peer_bcnt)
382025dcb5ddSPeter Ujfalusi 					delay = bcnt - peer_bcnt;
382125dcb5ddSPeter Ujfalusi 			}
382225dcb5ddSPeter Ujfalusi 		} else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3823db375dcbSPeter Ujfalusi 			bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
382425dcb5ddSPeter Ujfalusi 
382525dcb5ddSPeter Ujfalusi 			if (uc->config.ep_type != PSIL_EP_NATIVE) {
3826db375dcbSPeter Ujfalusi 				peer_bcnt = udma_rchanrt_read(uc,
3827bc7e5523SPeter Ujfalusi 						UDMA_CHAN_RT_PEER_BCNT_REG);
382825dcb5ddSPeter Ujfalusi 
382925dcb5ddSPeter Ujfalusi 				if (peer_bcnt > bcnt)
383025dcb5ddSPeter Ujfalusi 					delay = peer_bcnt - bcnt;
383125dcb5ddSPeter Ujfalusi 			}
383225dcb5ddSPeter Ujfalusi 		} else {
3833db375dcbSPeter Ujfalusi 			bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
383425dcb5ddSPeter Ujfalusi 		}
383525dcb5ddSPeter Ujfalusi 
383625dcb5ddSPeter Ujfalusi 		if (bcnt && !(bcnt % uc->desc->residue))
383725dcb5ddSPeter Ujfalusi 			residue = 0;
383825dcb5ddSPeter Ujfalusi 		else
383925dcb5ddSPeter Ujfalusi 			residue -= bcnt % uc->desc->residue;
384025dcb5ddSPeter Ujfalusi 
384125dcb5ddSPeter Ujfalusi 		if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
384225dcb5ddSPeter Ujfalusi 			ret = DMA_COMPLETE;
384325dcb5ddSPeter Ujfalusi 			delay = 0;
384425dcb5ddSPeter Ujfalusi 		}
384525dcb5ddSPeter Ujfalusi 
384625dcb5ddSPeter Ujfalusi 		dma_set_residue(txstate, residue);
384725dcb5ddSPeter Ujfalusi 		dma_set_in_flight_bytes(txstate, delay);
384825dcb5ddSPeter Ujfalusi 
384925dcb5ddSPeter Ujfalusi 	} else {
385025dcb5ddSPeter Ujfalusi 		ret = DMA_COMPLETE;
385125dcb5ddSPeter Ujfalusi 	}
385225dcb5ddSPeter Ujfalusi 
385325dcb5ddSPeter Ujfalusi out:
385425dcb5ddSPeter Ujfalusi 	spin_unlock_irqrestore(&uc->vc.lock, flags);
385525dcb5ddSPeter Ujfalusi 	return ret;
385625dcb5ddSPeter Ujfalusi }
385725dcb5ddSPeter Ujfalusi 
udma_pause(struct dma_chan * chan)385825dcb5ddSPeter Ujfalusi static int udma_pause(struct dma_chan *chan)
385925dcb5ddSPeter Ujfalusi {
386025dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
386125dcb5ddSPeter Ujfalusi 
386225dcb5ddSPeter Ujfalusi 	/* pause the channel */
3863c7450bb2SPeter Ujfalusi 	switch (uc->config.dir) {
386425dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
3865db375dcbSPeter Ujfalusi 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
386625dcb5ddSPeter Ujfalusi 					 UDMA_PEER_RT_EN_PAUSE,
386725dcb5ddSPeter Ujfalusi 					 UDMA_PEER_RT_EN_PAUSE);
386825dcb5ddSPeter Ujfalusi 		break;
386925dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
3870db375dcbSPeter Ujfalusi 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
387125dcb5ddSPeter Ujfalusi 					 UDMA_PEER_RT_EN_PAUSE,
387225dcb5ddSPeter Ujfalusi 					 UDMA_PEER_RT_EN_PAUSE);
387325dcb5ddSPeter Ujfalusi 		break;
387425dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
3875db375dcbSPeter Ujfalusi 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
387625dcb5ddSPeter Ujfalusi 					 UDMA_CHAN_RT_CTL_PAUSE,
387725dcb5ddSPeter Ujfalusi 					 UDMA_CHAN_RT_CTL_PAUSE);
387825dcb5ddSPeter Ujfalusi 		break;
387925dcb5ddSPeter Ujfalusi 	default:
388025dcb5ddSPeter Ujfalusi 		return -EINVAL;
388125dcb5ddSPeter Ujfalusi 	}
388225dcb5ddSPeter Ujfalusi 
388325dcb5ddSPeter Ujfalusi 	return 0;
388425dcb5ddSPeter Ujfalusi }
388525dcb5ddSPeter Ujfalusi 
udma_resume(struct dma_chan * chan)388625dcb5ddSPeter Ujfalusi static int udma_resume(struct dma_chan *chan)
388725dcb5ddSPeter Ujfalusi {
388825dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
388925dcb5ddSPeter Ujfalusi 
389025dcb5ddSPeter Ujfalusi 	/* resume the channel */
3891c7450bb2SPeter Ujfalusi 	switch (uc->config.dir) {
389225dcb5ddSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
3893db375dcbSPeter Ujfalusi 		udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
389425dcb5ddSPeter Ujfalusi 					 UDMA_PEER_RT_EN_PAUSE, 0);
389525dcb5ddSPeter Ujfalusi 
389625dcb5ddSPeter Ujfalusi 		break;
389725dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
3898db375dcbSPeter Ujfalusi 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
389925dcb5ddSPeter Ujfalusi 					 UDMA_PEER_RT_EN_PAUSE, 0);
390025dcb5ddSPeter Ujfalusi 		break;
390125dcb5ddSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
3902db375dcbSPeter Ujfalusi 		udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
390325dcb5ddSPeter Ujfalusi 					 UDMA_CHAN_RT_CTL_PAUSE, 0);
390425dcb5ddSPeter Ujfalusi 		break;
390525dcb5ddSPeter Ujfalusi 	default:
390625dcb5ddSPeter Ujfalusi 		return -EINVAL;
390725dcb5ddSPeter Ujfalusi 	}
390825dcb5ddSPeter Ujfalusi 
390925dcb5ddSPeter Ujfalusi 	return 0;
391025dcb5ddSPeter Ujfalusi }
391125dcb5ddSPeter Ujfalusi 
udma_terminate_all(struct dma_chan * chan)391225dcb5ddSPeter Ujfalusi static int udma_terminate_all(struct dma_chan *chan)
391325dcb5ddSPeter Ujfalusi {
391425dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
391525dcb5ddSPeter Ujfalusi 	unsigned long flags;
391625dcb5ddSPeter Ujfalusi 	LIST_HEAD(head);
391725dcb5ddSPeter Ujfalusi 
391825dcb5ddSPeter Ujfalusi 	spin_lock_irqsave(&uc->vc.lock, flags);
391925dcb5ddSPeter Ujfalusi 
392025dcb5ddSPeter Ujfalusi 	if (udma_is_chan_running(uc))
392125dcb5ddSPeter Ujfalusi 		udma_stop(uc);
392225dcb5ddSPeter Ujfalusi 
392325dcb5ddSPeter Ujfalusi 	if (uc->desc) {
392425dcb5ddSPeter Ujfalusi 		uc->terminated_desc = uc->desc;
392525dcb5ddSPeter Ujfalusi 		uc->desc = NULL;
392625dcb5ddSPeter Ujfalusi 		uc->terminated_desc->terminated = true;
392725dcb5ddSPeter Ujfalusi 		cancel_delayed_work(&uc->tx_drain.work);
392825dcb5ddSPeter Ujfalusi 	}
392925dcb5ddSPeter Ujfalusi 
393025dcb5ddSPeter Ujfalusi 	uc->paused = false;
393125dcb5ddSPeter Ujfalusi 
393225dcb5ddSPeter Ujfalusi 	vchan_get_all_descriptors(&uc->vc, &head);
393325dcb5ddSPeter Ujfalusi 	spin_unlock_irqrestore(&uc->vc.lock, flags);
393425dcb5ddSPeter Ujfalusi 	vchan_dma_desc_free_list(&uc->vc, &head);
393525dcb5ddSPeter Ujfalusi 
393625dcb5ddSPeter Ujfalusi 	return 0;
393725dcb5ddSPeter Ujfalusi }
393825dcb5ddSPeter Ujfalusi 
udma_synchronize(struct dma_chan * chan)393925dcb5ddSPeter Ujfalusi static void udma_synchronize(struct dma_chan *chan)
394025dcb5ddSPeter Ujfalusi {
394125dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
394225dcb5ddSPeter Ujfalusi 	unsigned long timeout = msecs_to_jiffies(1000);
394325dcb5ddSPeter Ujfalusi 
394425dcb5ddSPeter Ujfalusi 	vchan_synchronize(&uc->vc);
394525dcb5ddSPeter Ujfalusi 
394625dcb5ddSPeter Ujfalusi 	if (uc->state == UDMA_CHAN_IS_TERMINATING) {
394725dcb5ddSPeter Ujfalusi 		timeout = wait_for_completion_timeout(&uc->teardown_completed,
394825dcb5ddSPeter Ujfalusi 						      timeout);
394925dcb5ddSPeter Ujfalusi 		if (!timeout) {
395025dcb5ddSPeter Ujfalusi 			dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
395125dcb5ddSPeter Ujfalusi 				 uc->id);
395225dcb5ddSPeter Ujfalusi 			udma_dump_chan_stdata(uc);
395325dcb5ddSPeter Ujfalusi 			udma_reset_chan(uc, true);
395425dcb5ddSPeter Ujfalusi 		}
395525dcb5ddSPeter Ujfalusi 	}
395625dcb5ddSPeter Ujfalusi 
395725dcb5ddSPeter Ujfalusi 	udma_reset_chan(uc, false);
395825dcb5ddSPeter Ujfalusi 	if (udma_is_chan_running(uc))
395925dcb5ddSPeter Ujfalusi 		dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
396025dcb5ddSPeter Ujfalusi 
396125dcb5ddSPeter Ujfalusi 	cancel_delayed_work_sync(&uc->tx_drain.work);
396225dcb5ddSPeter Ujfalusi 	udma_reset_rings(uc);
396325dcb5ddSPeter Ujfalusi }
396425dcb5ddSPeter Ujfalusi 
udma_desc_pre_callback(struct virt_dma_chan * vc,struct virt_dma_desc * vd,struct dmaengine_result * result)396525dcb5ddSPeter Ujfalusi static void udma_desc_pre_callback(struct virt_dma_chan *vc,
396625dcb5ddSPeter Ujfalusi 				   struct virt_dma_desc *vd,
396725dcb5ddSPeter Ujfalusi 				   struct dmaengine_result *result)
396825dcb5ddSPeter Ujfalusi {
396925dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(&vc->chan);
397025dcb5ddSPeter Ujfalusi 	struct udma_desc *d;
397143ad9840SJai Luthra 	u8 status;
397225dcb5ddSPeter Ujfalusi 
397325dcb5ddSPeter Ujfalusi 	if (!vd)
397425dcb5ddSPeter Ujfalusi 		return;
397525dcb5ddSPeter Ujfalusi 
397625dcb5ddSPeter Ujfalusi 	d = to_udma_desc(&vd->tx);
397725dcb5ddSPeter Ujfalusi 
397825dcb5ddSPeter Ujfalusi 	if (d->metadata_size)
397925dcb5ddSPeter Ujfalusi 		udma_fetch_epib(uc, d);
398025dcb5ddSPeter Ujfalusi 
398125dcb5ddSPeter Ujfalusi 	if (result) {
398225dcb5ddSPeter Ujfalusi 		void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
398325dcb5ddSPeter Ujfalusi 
398425dcb5ddSPeter Ujfalusi 		if (cppi5_desc_get_type(desc_vaddr) ==
398525dcb5ddSPeter Ujfalusi 		    CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
398643ad9840SJai Luthra 			/* Provide residue information for the client */
398725dcb5ddSPeter Ujfalusi 			result->residue = d->residue -
398825dcb5ddSPeter Ujfalusi 					  cppi5_hdesc_get_pktlen(desc_vaddr);
398925dcb5ddSPeter Ujfalusi 			if (result->residue)
399025dcb5ddSPeter Ujfalusi 				result->result = DMA_TRANS_ABORTED;
399125dcb5ddSPeter Ujfalusi 			else
399225dcb5ddSPeter Ujfalusi 				result->result = DMA_TRANS_NOERROR;
399325dcb5ddSPeter Ujfalusi 		} else {
399425dcb5ddSPeter Ujfalusi 			result->residue = 0;
399543ad9840SJai Luthra 			/* Propagate TR Response errors to the client */
399643ad9840SJai Luthra 			status = d->hwdesc[0].tr_resp_base->status;
399743ad9840SJai Luthra 			if (status)
399843ad9840SJai Luthra 				result->result = DMA_TRANS_ABORTED;
399943ad9840SJai Luthra 			else
400025dcb5ddSPeter Ujfalusi 				result->result = DMA_TRANS_NOERROR;
400125dcb5ddSPeter Ujfalusi 		}
400225dcb5ddSPeter Ujfalusi 	}
400325dcb5ddSPeter Ujfalusi }
400425dcb5ddSPeter Ujfalusi 
400525dcb5ddSPeter Ujfalusi /*
400625dcb5ddSPeter Ujfalusi  * This tasklet handles the completion of a DMA descriptor by
400725dcb5ddSPeter Ujfalusi  * calling its callback and freeing it.
400825dcb5ddSPeter Ujfalusi  */
udma_vchan_complete(struct tasklet_struct * t)40092fa9bc98SAllen Pais static void udma_vchan_complete(struct tasklet_struct *t)
401025dcb5ddSPeter Ujfalusi {
40112fa9bc98SAllen Pais 	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
401225dcb5ddSPeter Ujfalusi 	struct virt_dma_desc *vd, *_vd;
401325dcb5ddSPeter Ujfalusi 	struct dmaengine_desc_callback cb;
401425dcb5ddSPeter Ujfalusi 	LIST_HEAD(head);
401525dcb5ddSPeter Ujfalusi 
401625dcb5ddSPeter Ujfalusi 	spin_lock_irq(&vc->lock);
401725dcb5ddSPeter Ujfalusi 	list_splice_tail_init(&vc->desc_completed, &head);
401825dcb5ddSPeter Ujfalusi 	vd = vc->cyclic;
401925dcb5ddSPeter Ujfalusi 	if (vd) {
402025dcb5ddSPeter Ujfalusi 		vc->cyclic = NULL;
402125dcb5ddSPeter Ujfalusi 		dmaengine_desc_get_callback(&vd->tx, &cb);
402225dcb5ddSPeter Ujfalusi 	} else {
402325dcb5ddSPeter Ujfalusi 		memset(&cb, 0, sizeof(cb));
402425dcb5ddSPeter Ujfalusi 	}
402525dcb5ddSPeter Ujfalusi 	spin_unlock_irq(&vc->lock);
402625dcb5ddSPeter Ujfalusi 
402725dcb5ddSPeter Ujfalusi 	udma_desc_pre_callback(vc, vd, NULL);
402825dcb5ddSPeter Ujfalusi 	dmaengine_desc_callback_invoke(&cb, NULL);
402925dcb5ddSPeter Ujfalusi 
403025dcb5ddSPeter Ujfalusi 	list_for_each_entry_safe(vd, _vd, &head, node) {
403125dcb5ddSPeter Ujfalusi 		struct dmaengine_result result;
403225dcb5ddSPeter Ujfalusi 
403325dcb5ddSPeter Ujfalusi 		dmaengine_desc_get_callback(&vd->tx, &cb);
403425dcb5ddSPeter Ujfalusi 
403525dcb5ddSPeter Ujfalusi 		list_del(&vd->node);
403625dcb5ddSPeter Ujfalusi 
403725dcb5ddSPeter Ujfalusi 		udma_desc_pre_callback(vc, vd, &result);
403825dcb5ddSPeter Ujfalusi 		dmaengine_desc_callback_invoke(&cb, &result);
403925dcb5ddSPeter Ujfalusi 
404025dcb5ddSPeter Ujfalusi 		vchan_vdesc_fini(vd);
404125dcb5ddSPeter Ujfalusi 	}
404225dcb5ddSPeter Ujfalusi }
404325dcb5ddSPeter Ujfalusi 
udma_free_chan_resources(struct dma_chan * chan)404425dcb5ddSPeter Ujfalusi static void udma_free_chan_resources(struct dma_chan *chan)
404525dcb5ddSPeter Ujfalusi {
404625dcb5ddSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
404725dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = to_udma_dev(chan->device);
404825dcb5ddSPeter Ujfalusi 
404925dcb5ddSPeter Ujfalusi 	udma_terminate_all(chan);
405025dcb5ddSPeter Ujfalusi 	if (uc->terminated_desc) {
405125dcb5ddSPeter Ujfalusi 		udma_reset_chan(uc, false);
405225dcb5ddSPeter Ujfalusi 		udma_reset_rings(uc);
405325dcb5ddSPeter Ujfalusi 	}
405425dcb5ddSPeter Ujfalusi 
405525dcb5ddSPeter Ujfalusi 	cancel_delayed_work_sync(&uc->tx_drain.work);
405625dcb5ddSPeter Ujfalusi 
405725dcb5ddSPeter Ujfalusi 	if (uc->irq_num_ring > 0) {
405825dcb5ddSPeter Ujfalusi 		free_irq(uc->irq_num_ring, uc);
405925dcb5ddSPeter Ujfalusi 
406025dcb5ddSPeter Ujfalusi 		uc->irq_num_ring = 0;
406125dcb5ddSPeter Ujfalusi 	}
406225dcb5ddSPeter Ujfalusi 	if (uc->irq_num_udma > 0) {
406325dcb5ddSPeter Ujfalusi 		free_irq(uc->irq_num_udma, uc);
406425dcb5ddSPeter Ujfalusi 
406525dcb5ddSPeter Ujfalusi 		uc->irq_num_udma = 0;
406625dcb5ddSPeter Ujfalusi 	}
406725dcb5ddSPeter Ujfalusi 
406825dcb5ddSPeter Ujfalusi 	/* Release PSI-L pairing */
406925dcb5ddSPeter Ujfalusi 	if (uc->psil_paired) {
407025dcb5ddSPeter Ujfalusi 		navss_psil_unpair(ud, uc->config.src_thread,
407125dcb5ddSPeter Ujfalusi 				  uc->config.dst_thread);
407225dcb5ddSPeter Ujfalusi 		uc->psil_paired = false;
407325dcb5ddSPeter Ujfalusi 	}
407425dcb5ddSPeter Ujfalusi 
407525dcb5ddSPeter Ujfalusi 	vchan_free_chan_resources(&uc->vc);
407625dcb5ddSPeter Ujfalusi 	tasklet_kill(&uc->vc.task);
407725dcb5ddSPeter Ujfalusi 
407801779473SPeter Ujfalusi 	bcdma_free_bchan_resources(uc);
407925dcb5ddSPeter Ujfalusi 	udma_free_tx_resources(uc);
408025dcb5ddSPeter Ujfalusi 	udma_free_rx_resources(uc);
408125dcb5ddSPeter Ujfalusi 	udma_reset_uchan(uc);
408225dcb5ddSPeter Ujfalusi 
408325dcb5ddSPeter Ujfalusi 	if (uc->use_dma_pool) {
408425dcb5ddSPeter Ujfalusi 		dma_pool_destroy(uc->hdesc_pool);
408525dcb5ddSPeter Ujfalusi 		uc->use_dma_pool = false;
408625dcb5ddSPeter Ujfalusi 	}
408725dcb5ddSPeter Ujfalusi }
408825dcb5ddSPeter Ujfalusi 
408925dcb5ddSPeter Ujfalusi static struct platform_driver udma_driver;
409001779473SPeter Ujfalusi static struct platform_driver bcdma_driver;
4091d2abc982SPeter Ujfalusi static struct platform_driver pktdma_driver;
409225dcb5ddSPeter Ujfalusi 
40930ebcf1a2SPeter Ujfalusi struct udma_filter_param {
40940ebcf1a2SPeter Ujfalusi 	int remote_thread_id;
40950ebcf1a2SPeter Ujfalusi 	u32 atype;
409601779473SPeter Ujfalusi 	u32 asel;
409701779473SPeter Ujfalusi 	u32 tr_trigger_type;
40980ebcf1a2SPeter Ujfalusi };
40990ebcf1a2SPeter Ujfalusi 
udma_dma_filter_fn(struct dma_chan * chan,void * param)410025dcb5ddSPeter Ujfalusi static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
410125dcb5ddSPeter Ujfalusi {
410225dcb5ddSPeter Ujfalusi 	struct udma_chan_config *ucc;
410325dcb5ddSPeter Ujfalusi 	struct psil_endpoint_config *ep_config;
41040ebcf1a2SPeter Ujfalusi 	struct udma_filter_param *filter_param;
410525dcb5ddSPeter Ujfalusi 	struct udma_chan *uc;
410625dcb5ddSPeter Ujfalusi 	struct udma_dev *ud;
410725dcb5ddSPeter Ujfalusi 
410801779473SPeter Ujfalusi 	if (chan->device->dev->driver != &udma_driver.driver &&
4109d2abc982SPeter Ujfalusi 	    chan->device->dev->driver != &bcdma_driver.driver &&
4110d2abc982SPeter Ujfalusi 	    chan->device->dev->driver != &pktdma_driver.driver)
411125dcb5ddSPeter Ujfalusi 		return false;
411225dcb5ddSPeter Ujfalusi 
411325dcb5ddSPeter Ujfalusi 	uc = to_udma_chan(chan);
411425dcb5ddSPeter Ujfalusi 	ucc = &uc->config;
411525dcb5ddSPeter Ujfalusi 	ud = uc->ud;
41160ebcf1a2SPeter Ujfalusi 	filter_param = param;
411725dcb5ddSPeter Ujfalusi 
41180ebcf1a2SPeter Ujfalusi 	if (filter_param->atype > 2) {
41190ebcf1a2SPeter Ujfalusi 		dev_err(ud->dev, "Invalid channel atype: %u\n",
41200ebcf1a2SPeter Ujfalusi 			filter_param->atype);
41210ebcf1a2SPeter Ujfalusi 		return false;
41220ebcf1a2SPeter Ujfalusi 	}
41230ebcf1a2SPeter Ujfalusi 
412401779473SPeter Ujfalusi 	if (filter_param->asel > 15) {
412501779473SPeter Ujfalusi 		dev_err(ud->dev, "Invalid channel asel: %u\n",
412601779473SPeter Ujfalusi 			filter_param->asel);
412701779473SPeter Ujfalusi 		return false;
412801779473SPeter Ujfalusi 	}
412901779473SPeter Ujfalusi 
41300ebcf1a2SPeter Ujfalusi 	ucc->remote_thread_id = filter_param->remote_thread_id;
41310ebcf1a2SPeter Ujfalusi 	ucc->atype = filter_param->atype;
413201779473SPeter Ujfalusi 	ucc->asel = filter_param->asel;
413301779473SPeter Ujfalusi 	ucc->tr_trigger_type = filter_param->tr_trigger_type;
413425dcb5ddSPeter Ujfalusi 
413501779473SPeter Ujfalusi 	if (ucc->tr_trigger_type) {
413601779473SPeter Ujfalusi 		ucc->dir = DMA_MEM_TO_MEM;
413701779473SPeter Ujfalusi 		goto triggered_bchan;
413801779473SPeter Ujfalusi 	} else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
413925dcb5ddSPeter Ujfalusi 		ucc->dir = DMA_MEM_TO_DEV;
414001779473SPeter Ujfalusi 	} else {
414125dcb5ddSPeter Ujfalusi 		ucc->dir = DMA_DEV_TO_MEM;
414201779473SPeter Ujfalusi 	}
414325dcb5ddSPeter Ujfalusi 
414425dcb5ddSPeter Ujfalusi 	ep_config = psil_get_ep_config(ucc->remote_thread_id);
414525dcb5ddSPeter Ujfalusi 	if (IS_ERR(ep_config)) {
414625dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
414725dcb5ddSPeter Ujfalusi 			ucc->remote_thread_id);
414825dcb5ddSPeter Ujfalusi 		ucc->dir = DMA_MEM_TO_MEM;
414925dcb5ddSPeter Ujfalusi 		ucc->remote_thread_id = -1;
41500ebcf1a2SPeter Ujfalusi 		ucc->atype = 0;
415101779473SPeter Ujfalusi 		ucc->asel = 0;
415201779473SPeter Ujfalusi 		return false;
415301779473SPeter Ujfalusi 	}
415401779473SPeter Ujfalusi 
415501779473SPeter Ujfalusi 	if (ud->match_data->type == DMA_TYPE_BCDMA &&
415601779473SPeter Ujfalusi 	    ep_config->pkt_mode) {
415701779473SPeter Ujfalusi 		dev_err(ud->dev,
415801779473SPeter Ujfalusi 			"Only TR mode is supported (psi-l thread 0x%04x)\n",
415901779473SPeter Ujfalusi 			ucc->remote_thread_id);
416001779473SPeter Ujfalusi 		ucc->dir = DMA_MEM_TO_MEM;
416101779473SPeter Ujfalusi 		ucc->remote_thread_id = -1;
416201779473SPeter Ujfalusi 		ucc->atype = 0;
416301779473SPeter Ujfalusi 		ucc->asel = 0;
416425dcb5ddSPeter Ujfalusi 		return false;
416525dcb5ddSPeter Ujfalusi 	}
416625dcb5ddSPeter Ujfalusi 
416725dcb5ddSPeter Ujfalusi 	ucc->pkt_mode = ep_config->pkt_mode;
416825dcb5ddSPeter Ujfalusi 	ucc->channel_tpl = ep_config->channel_tpl;
416925dcb5ddSPeter Ujfalusi 	ucc->notdpkt = ep_config->notdpkt;
417025dcb5ddSPeter Ujfalusi 	ucc->ep_type = ep_config->ep_type;
417125dcb5ddSPeter Ujfalusi 
4172d2abc982SPeter Ujfalusi 	if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4173d2abc982SPeter Ujfalusi 	    ep_config->mapped_channel_id >= 0) {
4174d2abc982SPeter Ujfalusi 		ucc->mapped_channel_id = ep_config->mapped_channel_id;
4175d2abc982SPeter Ujfalusi 		ucc->default_flow_id = ep_config->default_flow_id;
4176d2abc982SPeter Ujfalusi 	} else {
4177d2abc982SPeter Ujfalusi 		ucc->mapped_channel_id = -1;
4178d2abc982SPeter Ujfalusi 		ucc->default_flow_id = -1;
4179d2abc982SPeter Ujfalusi 	}
4180d2abc982SPeter Ujfalusi 
418125dcb5ddSPeter Ujfalusi 	if (ucc->ep_type != PSIL_EP_NATIVE) {
418225dcb5ddSPeter Ujfalusi 		const struct udma_match_data *match_data = ud->match_data;
418325dcb5ddSPeter Ujfalusi 
418425dcb5ddSPeter Ujfalusi 		if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
418525dcb5ddSPeter Ujfalusi 			ucc->enable_acc32 = ep_config->pdma_acc32;
418625dcb5ddSPeter Ujfalusi 		if (match_data->flags & UDMA_FLAG_PDMA_BURST)
418725dcb5ddSPeter Ujfalusi 			ucc->enable_burst = ep_config->pdma_burst;
418825dcb5ddSPeter Ujfalusi 	}
418925dcb5ddSPeter Ujfalusi 
419025dcb5ddSPeter Ujfalusi 	ucc->needs_epib = ep_config->needs_epib;
419125dcb5ddSPeter Ujfalusi 	ucc->psd_size = ep_config->psd_size;
419225dcb5ddSPeter Ujfalusi 	ucc->metadata_size =
419325dcb5ddSPeter Ujfalusi 			(ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
419425dcb5ddSPeter Ujfalusi 			ucc->psd_size;
419525dcb5ddSPeter Ujfalusi 
419625dcb5ddSPeter Ujfalusi 	if (ucc->pkt_mode)
419725dcb5ddSPeter Ujfalusi 		ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
419825dcb5ddSPeter Ujfalusi 				 ucc->metadata_size, ud->desc_align);
419925dcb5ddSPeter Ujfalusi 
420025dcb5ddSPeter Ujfalusi 	dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
420125dcb5ddSPeter Ujfalusi 		ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
420225dcb5ddSPeter Ujfalusi 
420325dcb5ddSPeter Ujfalusi 	return true;
420401779473SPeter Ujfalusi 
420501779473SPeter Ujfalusi triggered_bchan:
420601779473SPeter Ujfalusi 	dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
420701779473SPeter Ujfalusi 		ucc->tr_trigger_type);
420801779473SPeter Ujfalusi 
420901779473SPeter Ujfalusi 	return true;
421001779473SPeter Ujfalusi 
421125dcb5ddSPeter Ujfalusi }
421225dcb5ddSPeter Ujfalusi 
udma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)421325dcb5ddSPeter Ujfalusi static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
421425dcb5ddSPeter Ujfalusi 				      struct of_dma *ofdma)
421525dcb5ddSPeter Ujfalusi {
421625dcb5ddSPeter Ujfalusi 	struct udma_dev *ud = ofdma->of_dma_data;
421725dcb5ddSPeter Ujfalusi 	dma_cap_mask_t mask = ud->ddev.cap_mask;
42180ebcf1a2SPeter Ujfalusi 	struct udma_filter_param filter_param;
421925dcb5ddSPeter Ujfalusi 	struct dma_chan *chan;
422025dcb5ddSPeter Ujfalusi 
422101779473SPeter Ujfalusi 	if (ud->match_data->type == DMA_TYPE_BCDMA) {
422201779473SPeter Ujfalusi 		if (dma_spec->args_count != 3)
422301779473SPeter Ujfalusi 			return NULL;
422401779473SPeter Ujfalusi 
422501779473SPeter Ujfalusi 		filter_param.tr_trigger_type = dma_spec->args[0];
422601779473SPeter Ujfalusi 		filter_param.remote_thread_id = dma_spec->args[1];
422701779473SPeter Ujfalusi 		filter_param.asel = dma_spec->args[2];
422801779473SPeter Ujfalusi 		filter_param.atype = 0;
422901779473SPeter Ujfalusi 	} else {
42300ebcf1a2SPeter Ujfalusi 		if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
423125dcb5ddSPeter Ujfalusi 			return NULL;
423225dcb5ddSPeter Ujfalusi 
42330ebcf1a2SPeter Ujfalusi 		filter_param.remote_thread_id = dma_spec->args[0];
423401779473SPeter Ujfalusi 		filter_param.tr_trigger_type = 0;
423501779473SPeter Ujfalusi 		if (dma_spec->args_count == 2) {
423601779473SPeter Ujfalusi 			if (ud->match_data->type == DMA_TYPE_UDMA) {
42370ebcf1a2SPeter Ujfalusi 				filter_param.atype = dma_spec->args[1];
423801779473SPeter Ujfalusi 				filter_param.asel = 0;
423901779473SPeter Ujfalusi 			} else {
42400ebcf1a2SPeter Ujfalusi 				filter_param.atype = 0;
424101779473SPeter Ujfalusi 				filter_param.asel = dma_spec->args[1];
424201779473SPeter Ujfalusi 			}
424301779473SPeter Ujfalusi 		} else {
424401779473SPeter Ujfalusi 			filter_param.atype = 0;
424501779473SPeter Ujfalusi 			filter_param.asel = 0;
424601779473SPeter Ujfalusi 		}
424701779473SPeter Ujfalusi 	}
42480ebcf1a2SPeter Ujfalusi 
42490ebcf1a2SPeter Ujfalusi 	chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
42500ebcf1a2SPeter Ujfalusi 				     ofdma->of_node);
425125dcb5ddSPeter Ujfalusi 	if (!chan) {
425225dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "get channel fail in %s.\n", __func__);
425325dcb5ddSPeter Ujfalusi 		return ERR_PTR(-EINVAL);
425425dcb5ddSPeter Ujfalusi 	}
425525dcb5ddSPeter Ujfalusi 
425625dcb5ddSPeter Ujfalusi 	return chan;
425725dcb5ddSPeter Ujfalusi }
425825dcb5ddSPeter Ujfalusi 
425925dcb5ddSPeter Ujfalusi static struct udma_match_data am654_main_data = {
426001779473SPeter Ujfalusi 	.type = DMA_TYPE_UDMA,
426125dcb5ddSPeter Ujfalusi 	.psil_base = 0x1000,
426225dcb5ddSPeter Ujfalusi 	.enable_memcpy_support = true,
426325dcb5ddSPeter Ujfalusi 	.statictr_z_mask = GENMASK(11, 0),
4264046d679bSPeter Ujfalusi 	.burst_size = {
4265046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4266046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4267046d679bSPeter Ujfalusi 		0, /* No UH Channels */
4268046d679bSPeter Ujfalusi 	},
426925dcb5ddSPeter Ujfalusi };
427025dcb5ddSPeter Ujfalusi 
427125dcb5ddSPeter Ujfalusi static struct udma_match_data am654_mcu_data = {
427201779473SPeter Ujfalusi 	.type = DMA_TYPE_UDMA,
427325dcb5ddSPeter Ujfalusi 	.psil_base = 0x6000,
4274a4e68853SPeter Ujfalusi 	.enable_memcpy_support = false,
427525dcb5ddSPeter Ujfalusi 	.statictr_z_mask = GENMASK(11, 0),
4276046d679bSPeter Ujfalusi 	.burst_size = {
4277046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4278046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4279046d679bSPeter Ujfalusi 		0, /* No UH Channels */
4280046d679bSPeter Ujfalusi 	},
428125dcb5ddSPeter Ujfalusi };
428225dcb5ddSPeter Ujfalusi 
428325dcb5ddSPeter Ujfalusi static struct udma_match_data j721e_main_data = {
428401779473SPeter Ujfalusi 	.type = DMA_TYPE_UDMA,
428525dcb5ddSPeter Ujfalusi 	.psil_base = 0x1000,
428625dcb5ddSPeter Ujfalusi 	.enable_memcpy_support = true,
4287046d679bSPeter Ujfalusi 	.flags = UDMA_FLAGS_J7_CLASS,
428825dcb5ddSPeter Ujfalusi 	.statictr_z_mask = GENMASK(23, 0),
4289046d679bSPeter Ujfalusi 	.burst_size = {
4290046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4291046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4292046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4293046d679bSPeter Ujfalusi 	},
429425dcb5ddSPeter Ujfalusi };
429525dcb5ddSPeter Ujfalusi 
429625dcb5ddSPeter Ujfalusi static struct udma_match_data j721e_mcu_data = {
429701779473SPeter Ujfalusi 	.type = DMA_TYPE_UDMA,
429825dcb5ddSPeter Ujfalusi 	.psil_base = 0x6000,
429925dcb5ddSPeter Ujfalusi 	.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4300046d679bSPeter Ujfalusi 	.flags = UDMA_FLAGS_J7_CLASS,
430125dcb5ddSPeter Ujfalusi 	.statictr_z_mask = GENMASK(23, 0),
4302046d679bSPeter Ujfalusi 	.burst_size = {
4303046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4304046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4305046d679bSPeter Ujfalusi 		0, /* No UH Channels */
4306046d679bSPeter Ujfalusi 	},
430725dcb5ddSPeter Ujfalusi };
430825dcb5ddSPeter Ujfalusi 
43093f58e106SVignesh Raghavendra static struct udma_soc_data am62a_dmss_csi_soc_data = {
43103f58e106SVignesh Raghavendra 	.oes = {
43113f58e106SVignesh Raghavendra 		.bcdma_rchan_data = 0xe00,
43123f58e106SVignesh Raghavendra 		.bcdma_rchan_ring = 0x1000,
43133f58e106SVignesh Raghavendra 	},
43143f58e106SVignesh Raghavendra };
43153f58e106SVignesh Raghavendra 
4316ceb434d5SVaishnav Achath static struct udma_soc_data j721s2_bcdma_csi_soc_data = {
4317ceb434d5SVaishnav Achath 	.oes = {
4318ceb434d5SVaishnav Achath 		.bcdma_tchan_data = 0x800,
4319ceb434d5SVaishnav Achath 		.bcdma_tchan_ring = 0xa00,
4320ceb434d5SVaishnav Achath 		.bcdma_rchan_data = 0xe00,
4321ceb434d5SVaishnav Achath 		.bcdma_rchan_ring = 0x1000,
4322ceb434d5SVaishnav Achath 	},
4323ceb434d5SVaishnav Achath };
4324ceb434d5SVaishnav Achath 
43253f58e106SVignesh Raghavendra static struct udma_match_data am62a_bcdma_csirx_data = {
43263f58e106SVignesh Raghavendra 	.type = DMA_TYPE_BCDMA,
43273f58e106SVignesh Raghavendra 	.psil_base = 0x3100,
43283f58e106SVignesh Raghavendra 	.enable_memcpy_support = false,
43293f58e106SVignesh Raghavendra 	.burst_size = {
43303f58e106SVignesh Raghavendra 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
43313f58e106SVignesh Raghavendra 		0, /* No H Channels */
43323f58e106SVignesh Raghavendra 		0, /* No UH Channels */
43333f58e106SVignesh Raghavendra 	},
43343f58e106SVignesh Raghavendra 	.soc_data = &am62a_dmss_csi_soc_data,
43353f58e106SVignesh Raghavendra };
43363f58e106SVignesh Raghavendra 
433701779473SPeter Ujfalusi static struct udma_match_data am64_bcdma_data = {
433801779473SPeter Ujfalusi 	.type = DMA_TYPE_BCDMA,
433901779473SPeter Ujfalusi 	.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
434001779473SPeter Ujfalusi 	.enable_memcpy_support = true, /* Supported via bchan */
4341046d679bSPeter Ujfalusi 	.flags = UDMA_FLAGS_J7_CLASS,
434201779473SPeter Ujfalusi 	.statictr_z_mask = GENMASK(23, 0),
4343046d679bSPeter Ujfalusi 	.burst_size = {
4344046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4345046d679bSPeter Ujfalusi 		0, /* No H Channels */
4346046d679bSPeter Ujfalusi 		0, /* No UH Channels */
4347046d679bSPeter Ujfalusi 	},
434801779473SPeter Ujfalusi };
434901779473SPeter Ujfalusi 
4350d2abc982SPeter Ujfalusi static struct udma_match_data am64_pktdma_data = {
4351d2abc982SPeter Ujfalusi 	.type = DMA_TYPE_PKTDMA,
4352d2abc982SPeter Ujfalusi 	.psil_base = 0x1000,
4353d2abc982SPeter Ujfalusi 	.enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4354046d679bSPeter Ujfalusi 	.flags = UDMA_FLAGS_J7_CLASS,
4355d2abc982SPeter Ujfalusi 	.statictr_z_mask = GENMASK(23, 0),
4356046d679bSPeter Ujfalusi 	.burst_size = {
4357046d679bSPeter Ujfalusi 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4358046d679bSPeter Ujfalusi 		0, /* No H Channels */
4359046d679bSPeter Ujfalusi 		0, /* No UH Channels */
4360046d679bSPeter Ujfalusi 	},
4361d2abc982SPeter Ujfalusi };
4362d2abc982SPeter Ujfalusi 
4363ceb434d5SVaishnav Achath static struct udma_match_data j721s2_bcdma_csi_data = {
4364ceb434d5SVaishnav Achath 	.type = DMA_TYPE_BCDMA,
4365ceb434d5SVaishnav Achath 	.psil_base = 0x2000,
4366ceb434d5SVaishnav Achath 	.enable_memcpy_support = false,
4367ceb434d5SVaishnav Achath 	.burst_size = {
4368ceb434d5SVaishnav Achath 		TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4369ceb434d5SVaishnav Achath 		0, /* No H Channels */
4370ceb434d5SVaishnav Achath 		0, /* No UH Channels */
4371ceb434d5SVaishnav Achath 	},
4372ceb434d5SVaishnav Achath 	.soc_data = &j721s2_bcdma_csi_soc_data,
4373ceb434d5SVaishnav Achath };
4374ceb434d5SVaishnav Achath 
437525dcb5ddSPeter Ujfalusi static const struct of_device_id udma_of_match[] = {
437625dcb5ddSPeter Ujfalusi 	{
437725dcb5ddSPeter Ujfalusi 		.compatible = "ti,am654-navss-main-udmap",
437825dcb5ddSPeter Ujfalusi 		.data = &am654_main_data,
437925dcb5ddSPeter Ujfalusi 	},
438025dcb5ddSPeter Ujfalusi 	{
438125dcb5ddSPeter Ujfalusi 		.compatible = "ti,am654-navss-mcu-udmap",
438225dcb5ddSPeter Ujfalusi 		.data = &am654_mcu_data,
438325dcb5ddSPeter Ujfalusi 	}, {
438425dcb5ddSPeter Ujfalusi 		.compatible = "ti,j721e-navss-main-udmap",
438525dcb5ddSPeter Ujfalusi 		.data = &j721e_main_data,
438625dcb5ddSPeter Ujfalusi 	}, {
438725dcb5ddSPeter Ujfalusi 		.compatible = "ti,j721e-navss-mcu-udmap",
438825dcb5ddSPeter Ujfalusi 		.data = &j721e_mcu_data,
438925dcb5ddSPeter Ujfalusi 	},
439001779473SPeter Ujfalusi 	{
439101779473SPeter Ujfalusi 		.compatible = "ti,am64-dmss-bcdma",
439201779473SPeter Ujfalusi 		.data = &am64_bcdma_data,
439301779473SPeter Ujfalusi 	},
4394d2abc982SPeter Ujfalusi 	{
4395d2abc982SPeter Ujfalusi 		.compatible = "ti,am64-dmss-pktdma",
4396d2abc982SPeter Ujfalusi 		.data = &am64_pktdma_data,
4397d2abc982SPeter Ujfalusi 	},
43983f58e106SVignesh Raghavendra 	{
43993f58e106SVignesh Raghavendra 		.compatible = "ti,am62a-dmss-bcdma-csirx",
44003f58e106SVignesh Raghavendra 		.data = &am62a_bcdma_csirx_data,
44013f58e106SVignesh Raghavendra 	},
4402ceb434d5SVaishnav Achath 	{
4403ceb434d5SVaishnav Achath 		.compatible = "ti,j721s2-dmss-bcdma-csi",
4404ceb434d5SVaishnav Achath 		.data = &j721s2_bcdma_csi_data,
4405ceb434d5SVaishnav Achath 	},
4406d2abc982SPeter Ujfalusi 	{ /* Sentinel */ },
4407d2abc982SPeter Ujfalusi };
4408d2abc982SPeter Ujfalusi 
4409f9b0366fSPeter Ujfalusi static struct udma_soc_data am654_soc_data = {
441001779473SPeter Ujfalusi 	.oes = {
441101779473SPeter Ujfalusi 		.udma_rchan = 0x200,
441201779473SPeter Ujfalusi 	},
4413f9b0366fSPeter Ujfalusi };
4414f9b0366fSPeter Ujfalusi 
4415f9b0366fSPeter Ujfalusi static struct udma_soc_data j721e_soc_data = {
441601779473SPeter Ujfalusi 	.oes = {
441701779473SPeter Ujfalusi 		.udma_rchan = 0x400,
441801779473SPeter Ujfalusi 	},
4419f9b0366fSPeter Ujfalusi };
4420f9b0366fSPeter Ujfalusi 
4421f9b0366fSPeter Ujfalusi static struct udma_soc_data j7200_soc_data = {
442201779473SPeter Ujfalusi 	.oes = {
442301779473SPeter Ujfalusi 		.udma_rchan = 0x80,
442401779473SPeter Ujfalusi 	},
442501779473SPeter Ujfalusi };
442601779473SPeter Ujfalusi 
442701779473SPeter Ujfalusi static struct udma_soc_data am64_soc_data = {
442801779473SPeter Ujfalusi 	.oes = {
442901779473SPeter Ujfalusi 		.bcdma_bchan_data = 0x2200,
443001779473SPeter Ujfalusi 		.bcdma_bchan_ring = 0x2400,
443101779473SPeter Ujfalusi 		.bcdma_tchan_data = 0x2800,
443201779473SPeter Ujfalusi 		.bcdma_tchan_ring = 0x2a00,
443301779473SPeter Ujfalusi 		.bcdma_rchan_data = 0x2e00,
443401779473SPeter Ujfalusi 		.bcdma_rchan_ring = 0x3000,
4435d2abc982SPeter Ujfalusi 		.pktdma_tchan_flow = 0x1200,
4436d2abc982SPeter Ujfalusi 		.pktdma_rchan_flow = 0x1600,
443701779473SPeter Ujfalusi 	},
443801779473SPeter Ujfalusi 	.bcdma_trigger_event_offset = 0xc400,
4439f9b0366fSPeter Ujfalusi };
4440f9b0366fSPeter Ujfalusi 
4441f9b0366fSPeter Ujfalusi static const struct soc_device_attribute k3_soc_devices[] = {
4442f9b0366fSPeter Ujfalusi 	{ .family = "AM65X", .data = &am654_soc_data },
4443f9b0366fSPeter Ujfalusi 	{ .family = "J721E", .data = &j721e_soc_data },
4444f9b0366fSPeter Ujfalusi 	{ .family = "J7200", .data = &j7200_soc_data },
444501779473SPeter Ujfalusi 	{ .family = "AM64X", .data = &am64_soc_data },
4446839c2e23SAswath Govindraju 	{ .family = "J721S2", .data = &j721e_soc_data},
444703cbdf8bSVignesh Raghavendra 	{ .family = "AM62X", .data = &am64_soc_data },
4448c1475ad3SVignesh Raghavendra 	{ .family = "AM62AX", .data = &am64_soc_data },
444982e6051aSApurva Nandan 	{ .family = "J784S4", .data = &j721e_soc_data },
4450f9b0366fSPeter Ujfalusi 	{ /* sentinel */ }
4451f9b0366fSPeter Ujfalusi };
4452f9b0366fSPeter Ujfalusi 
udma_get_mmrs(struct platform_device * pdev,struct udma_dev * ud)445325dcb5ddSPeter Ujfalusi static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
445425dcb5ddSPeter Ujfalusi {
4455d2abc982SPeter Ujfalusi 	u32 cap2, cap3, cap4;
445625dcb5ddSPeter Ujfalusi 	int i;
445725dcb5ddSPeter Ujfalusi 
445801779473SPeter Ujfalusi 	ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
445901779473SPeter Ujfalusi 	if (IS_ERR(ud->mmrs[MMR_GCFG]))
446001779473SPeter Ujfalusi 		return PTR_ERR(ud->mmrs[MMR_GCFG]);
446101779473SPeter Ujfalusi 
446201779473SPeter Ujfalusi 	cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
446301779473SPeter Ujfalusi 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
446401779473SPeter Ujfalusi 
446501779473SPeter Ujfalusi 	switch (ud->match_data->type) {
446601779473SPeter Ujfalusi 	case DMA_TYPE_UDMA:
446701779473SPeter Ujfalusi 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
446801779473SPeter Ujfalusi 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
446901779473SPeter Ujfalusi 		ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
447001779473SPeter Ujfalusi 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
447101779473SPeter Ujfalusi 		break;
447201779473SPeter Ujfalusi 	case DMA_TYPE_BCDMA:
4473*0d820e1cSVignesh Raghavendra 		ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
4474*0d820e1cSVignesh Raghavendra 				BCDMA_CAP3_HBCHAN_CNT(cap3) +
4475*0d820e1cSVignesh Raghavendra 				BCDMA_CAP3_UBCHAN_CNT(cap3);
447601779473SPeter Ujfalusi 		ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
447701779473SPeter Ujfalusi 		ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4478aecf9d38SVignesh Raghavendra 		ud->rflow_cnt = ud->rchan_cnt;
447901779473SPeter Ujfalusi 		break;
4480d2abc982SPeter Ujfalusi 	case DMA_TYPE_PKTDMA:
4481d2abc982SPeter Ujfalusi 		cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4482d2abc982SPeter Ujfalusi 		ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4483d2abc982SPeter Ujfalusi 		ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4484d2abc982SPeter Ujfalusi 		ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4485d2abc982SPeter Ujfalusi 		ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4486d2abc982SPeter Ujfalusi 		break;
448701779473SPeter Ujfalusi 	default:
448801779473SPeter Ujfalusi 		return -EINVAL;
448901779473SPeter Ujfalusi 	}
449001779473SPeter Ujfalusi 
449101779473SPeter Ujfalusi 	for (i = 1; i < MMR_LAST; i++) {
449201779473SPeter Ujfalusi 		if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
449301779473SPeter Ujfalusi 			continue;
449401779473SPeter Ujfalusi 		if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
449501779473SPeter Ujfalusi 			continue;
449601779473SPeter Ujfalusi 		if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
449701779473SPeter Ujfalusi 			continue;
449801779473SPeter Ujfalusi 
4499ea275007SZhang Qilong 		ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
450025dcb5ddSPeter Ujfalusi 		if (IS_ERR(ud->mmrs[i]))
450125dcb5ddSPeter Ujfalusi 			return PTR_ERR(ud->mmrs[i]);
450225dcb5ddSPeter Ujfalusi 	}
450325dcb5ddSPeter Ujfalusi 
450425dcb5ddSPeter Ujfalusi 	return 0;
450525dcb5ddSPeter Ujfalusi }
450625dcb5ddSPeter Ujfalusi 
udma_mark_resource_ranges(struct udma_dev * ud,unsigned long * map,struct ti_sci_resource_desc * rm_desc,char * name)45071609c15aSPeter Ujfalusi static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
45081609c15aSPeter Ujfalusi 				      struct ti_sci_resource_desc *rm_desc,
45091609c15aSPeter Ujfalusi 				      char *name)
45101609c15aSPeter Ujfalusi {
45111609c15aSPeter Ujfalusi 	bitmap_clear(map, rm_desc->start, rm_desc->num);
45121609c15aSPeter Ujfalusi 	bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
45131609c15aSPeter Ujfalusi 	dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
45141609c15aSPeter Ujfalusi 		rm_desc->start, rm_desc->num, rm_desc->start_sec,
45151609c15aSPeter Ujfalusi 		rm_desc->num_sec);
45161609c15aSPeter Ujfalusi }
45171609c15aSPeter Ujfalusi 
451801779473SPeter Ujfalusi static const char * const range_names[] = {
451901779473SPeter Ujfalusi 	[RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
452001779473SPeter Ujfalusi 	[RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
452101779473SPeter Ujfalusi 	[RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4522d2abc982SPeter Ujfalusi 	[RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4523d2abc982SPeter Ujfalusi 	[RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
452401779473SPeter Ujfalusi };
452501779473SPeter Ujfalusi 
udma_setup_resources(struct udma_dev * ud)452625dcb5ddSPeter Ujfalusi static int udma_setup_resources(struct udma_dev *ud)
452725dcb5ddSPeter Ujfalusi {
452801779473SPeter Ujfalusi 	int ret, i, j;
452925dcb5ddSPeter Ujfalusi 	struct device *dev = ud->dev;
453025dcb5ddSPeter Ujfalusi 	struct ti_sci_resource *rm_res, irq_res;
453125dcb5ddSPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
453201779473SPeter Ujfalusi 	u32 cap3;
453325dcb5ddSPeter Ujfalusi 
4534daf4ad04SPeter Ujfalusi 	/* Set up the throughput level start indexes */
453501779473SPeter Ujfalusi 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4536daf4ad04SPeter Ujfalusi 	if (of_device_is_compatible(dev->of_node,
4537daf4ad04SPeter Ujfalusi 				    "ti,am654-navss-main-udmap")) {
453888448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 2;
453988448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = 8;
4540daf4ad04SPeter Ujfalusi 	} else if (of_device_is_compatible(dev->of_node,
4541daf4ad04SPeter Ujfalusi 					   "ti,am654-navss-mcu-udmap")) {
454288448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 2;
454388448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = 2;
4544daf4ad04SPeter Ujfalusi 	} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
454588448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 3;
454688448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
454788448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4548daf4ad04SPeter Ujfalusi 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
454988448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 2;
455088448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4551daf4ad04SPeter Ujfalusi 	} else {
455288448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 1;
4553daf4ad04SPeter Ujfalusi 	}
4554daf4ad04SPeter Ujfalusi 
455588448980SPeter Ujfalusi 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
455688448980SPeter Ujfalusi 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
455788448980SPeter Ujfalusi 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
455888448980SPeter Ujfalusi 
455925dcb5ddSPeter Ujfalusi 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
456025dcb5ddSPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
456125dcb5ddSPeter Ujfalusi 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
456225dcb5ddSPeter Ujfalusi 				  GFP_KERNEL);
456325dcb5ddSPeter Ujfalusi 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
456425dcb5ddSPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
456525dcb5ddSPeter Ujfalusi 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
456625dcb5ddSPeter Ujfalusi 				  GFP_KERNEL);
456725dcb5ddSPeter Ujfalusi 	ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
456825dcb5ddSPeter Ujfalusi 					      sizeof(unsigned long),
456925dcb5ddSPeter Ujfalusi 					      GFP_KERNEL);
457025dcb5ddSPeter Ujfalusi 	ud->rflow_gp_map_allocated = devm_kcalloc(dev,
457125dcb5ddSPeter Ujfalusi 						  BITS_TO_LONGS(ud->rflow_cnt),
457225dcb5ddSPeter Ujfalusi 						  sizeof(unsigned long),
457325dcb5ddSPeter Ujfalusi 						  GFP_KERNEL);
457425dcb5ddSPeter Ujfalusi 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
457525dcb5ddSPeter Ujfalusi 					sizeof(unsigned long),
457625dcb5ddSPeter Ujfalusi 					GFP_KERNEL);
457725dcb5ddSPeter Ujfalusi 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
457825dcb5ddSPeter Ujfalusi 				  GFP_KERNEL);
457925dcb5ddSPeter Ujfalusi 
458025dcb5ddSPeter Ujfalusi 	if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
458125dcb5ddSPeter Ujfalusi 	    !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
458225dcb5ddSPeter Ujfalusi 	    !ud->rflows || !ud->rflow_in_use)
458325dcb5ddSPeter Ujfalusi 		return -ENOMEM;
458425dcb5ddSPeter Ujfalusi 
458525dcb5ddSPeter Ujfalusi 	/*
458625dcb5ddSPeter Ujfalusi 	 * RX flows with the same Ids as RX channels are reserved to be used
458725dcb5ddSPeter Ujfalusi 	 * as default flows if remote HW can't generate flow_ids. Those
458825dcb5ddSPeter Ujfalusi 	 * RX flows can be requested only explicitly by id.
458925dcb5ddSPeter Ujfalusi 	 */
459025dcb5ddSPeter Ujfalusi 	bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
459125dcb5ddSPeter Ujfalusi 
459225dcb5ddSPeter Ujfalusi 	/* by default no GP rflows are assigned to Linux */
459325dcb5ddSPeter Ujfalusi 	bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
459425dcb5ddSPeter Ujfalusi 
459525dcb5ddSPeter Ujfalusi 	/* Get resource ranges from tisci */
459601779473SPeter Ujfalusi 	for (i = 0; i < RM_RANGE_LAST; i++) {
4597d2abc982SPeter Ujfalusi 		if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
459801779473SPeter Ujfalusi 			continue;
459901779473SPeter Ujfalusi 
460025dcb5ddSPeter Ujfalusi 		tisci_rm->rm_ranges[i] =
460125dcb5ddSPeter Ujfalusi 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
460225dcb5ddSPeter Ujfalusi 						    tisci_rm->tisci_dev_id,
460325dcb5ddSPeter Ujfalusi 						    (char *)range_names[i]);
460401779473SPeter Ujfalusi 	}
460525dcb5ddSPeter Ujfalusi 
460625dcb5ddSPeter Ujfalusi 	/* tchan ranges */
460725dcb5ddSPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
460825dcb5ddSPeter Ujfalusi 	if (IS_ERR(rm_res)) {
460925dcb5ddSPeter Ujfalusi 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
461080936d68SVignesh Raghavendra 		irq_res.sets = 1;
461125dcb5ddSPeter Ujfalusi 	} else {
461225dcb5ddSPeter Ujfalusi 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
46131609c15aSPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++)
46141609c15aSPeter Ujfalusi 			udma_mark_resource_ranges(ud, ud->tchan_map,
46151609c15aSPeter Ujfalusi 						  &rm_res->desc[i], "tchan");
461625dcb5ddSPeter Ujfalusi 		irq_res.sets = rm_res->sets;
461780936d68SVignesh Raghavendra 	}
461825dcb5ddSPeter Ujfalusi 
461925dcb5ddSPeter Ujfalusi 	/* rchan and matching default flow ranges */
462025dcb5ddSPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
462125dcb5ddSPeter Ujfalusi 	if (IS_ERR(rm_res)) {
462225dcb5ddSPeter Ujfalusi 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
462380936d68SVignesh Raghavendra 		irq_res.sets++;
462425dcb5ddSPeter Ujfalusi 	} else {
462525dcb5ddSPeter Ujfalusi 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
46261609c15aSPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++)
46271609c15aSPeter Ujfalusi 			udma_mark_resource_ranges(ud, ud->rchan_map,
46281609c15aSPeter Ujfalusi 						  &rm_res->desc[i], "rchan");
462980936d68SVignesh Raghavendra 		irq_res.sets += rm_res->sets;
463025dcb5ddSPeter Ujfalusi 	}
463125dcb5ddSPeter Ujfalusi 
463225dcb5ddSPeter Ujfalusi 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
463380936d68SVignesh Raghavendra 	if (!irq_res.desc)
463480936d68SVignesh Raghavendra 		return -ENOMEM;
463525dcb5ddSPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
463680936d68SVignesh Raghavendra 	if (IS_ERR(rm_res)) {
463780936d68SVignesh Raghavendra 		irq_res.desc[0].start = 0;
463880936d68SVignesh Raghavendra 		irq_res.desc[0].num = ud->tchan_cnt;
463980936d68SVignesh Raghavendra 		i = 1;
464080936d68SVignesh Raghavendra 	} else {
464125dcb5ddSPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++) {
464225dcb5ddSPeter Ujfalusi 			irq_res.desc[i].start = rm_res->desc[i].start;
464325dcb5ddSPeter Ujfalusi 			irq_res.desc[i].num = rm_res->desc[i].num;
46441609c15aSPeter Ujfalusi 			irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
46451609c15aSPeter Ujfalusi 			irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
464625dcb5ddSPeter Ujfalusi 		}
464780936d68SVignesh Raghavendra 	}
464825dcb5ddSPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
464980936d68SVignesh Raghavendra 	if (IS_ERR(rm_res)) {
465080936d68SVignesh Raghavendra 		irq_res.desc[i].start = 0;
465180936d68SVignesh Raghavendra 		irq_res.desc[i].num = ud->rchan_cnt;
465280936d68SVignesh Raghavendra 	} else {
465325dcb5ddSPeter Ujfalusi 		for (j = 0; j < rm_res->sets; j++, i++) {
46541609c15aSPeter Ujfalusi 			if (rm_res->desc[j].num) {
465525dcb5ddSPeter Ujfalusi 				irq_res.desc[i].start = rm_res->desc[j].start +
465601779473SPeter Ujfalusi 						ud->soc_data->oes.udma_rchan;
465725dcb5ddSPeter Ujfalusi 				irq_res.desc[i].num = rm_res->desc[j].num;
465825dcb5ddSPeter Ujfalusi 			}
46591609c15aSPeter Ujfalusi 			if (rm_res->desc[j].num_sec) {
46601609c15aSPeter Ujfalusi 				irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
466101779473SPeter Ujfalusi 						ud->soc_data->oes.udma_rchan;
46621609c15aSPeter Ujfalusi 				irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
46631609c15aSPeter Ujfalusi 			}
46641609c15aSPeter Ujfalusi 		}
466580936d68SVignesh Raghavendra 	}
466625dcb5ddSPeter Ujfalusi 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
466725dcb5ddSPeter Ujfalusi 	kfree(irq_res.desc);
466825dcb5ddSPeter Ujfalusi 	if (ret) {
466925dcb5ddSPeter Ujfalusi 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
467025dcb5ddSPeter Ujfalusi 		return ret;
467125dcb5ddSPeter Ujfalusi 	}
467225dcb5ddSPeter Ujfalusi 
467325dcb5ddSPeter Ujfalusi 	/* GP rflow ranges */
467425dcb5ddSPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
467525dcb5ddSPeter Ujfalusi 	if (IS_ERR(rm_res)) {
467625dcb5ddSPeter Ujfalusi 		/* all gp flows are assigned exclusively to Linux */
467725dcb5ddSPeter Ujfalusi 		bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
467825dcb5ddSPeter Ujfalusi 			     ud->rflow_cnt - ud->rchan_cnt);
467925dcb5ddSPeter Ujfalusi 	} else {
46801609c15aSPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++)
46811609c15aSPeter Ujfalusi 			udma_mark_resource_ranges(ud, ud->rflow_gp_map,
46821609c15aSPeter Ujfalusi 						  &rm_res->desc[i], "gp-rflow");
468325dcb5ddSPeter Ujfalusi 	}
468425dcb5ddSPeter Ujfalusi 
468501779473SPeter Ujfalusi 	return 0;
468601779473SPeter Ujfalusi }
468701779473SPeter Ujfalusi 
bcdma_setup_resources(struct udma_dev * ud)468801779473SPeter Ujfalusi static int bcdma_setup_resources(struct udma_dev *ud)
468901779473SPeter Ujfalusi {
469001779473SPeter Ujfalusi 	int ret, i, j;
469101779473SPeter Ujfalusi 	struct device *dev = ud->dev;
469201779473SPeter Ujfalusi 	struct ti_sci_resource *rm_res, irq_res;
469301779473SPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
469401779473SPeter Ujfalusi 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
469588448980SPeter Ujfalusi 	u32 cap;
469688448980SPeter Ujfalusi 
469788448980SPeter Ujfalusi 	/* Set up the throughput level start indexes */
469888448980SPeter Ujfalusi 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
469988448980SPeter Ujfalusi 	if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
470088448980SPeter Ujfalusi 		ud->bchan_tpl.levels = 3;
470188448980SPeter Ujfalusi 		ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
470288448980SPeter Ujfalusi 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
470388448980SPeter Ujfalusi 	} else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
470488448980SPeter Ujfalusi 		ud->bchan_tpl.levels = 2;
470588448980SPeter Ujfalusi 		ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
470688448980SPeter Ujfalusi 	} else {
470788448980SPeter Ujfalusi 		ud->bchan_tpl.levels = 1;
470888448980SPeter Ujfalusi 	}
470988448980SPeter Ujfalusi 
471088448980SPeter Ujfalusi 	cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
471188448980SPeter Ujfalusi 	if (BCDMA_CAP4_URCHAN_CNT(cap)) {
471288448980SPeter Ujfalusi 		ud->rchan_tpl.levels = 3;
471388448980SPeter Ujfalusi 		ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
471488448980SPeter Ujfalusi 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
471588448980SPeter Ujfalusi 	} else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
471688448980SPeter Ujfalusi 		ud->rchan_tpl.levels = 2;
471788448980SPeter Ujfalusi 		ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
471888448980SPeter Ujfalusi 	} else {
471988448980SPeter Ujfalusi 		ud->rchan_tpl.levels = 1;
472088448980SPeter Ujfalusi 	}
472188448980SPeter Ujfalusi 
472288448980SPeter Ujfalusi 	if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
472388448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 3;
472488448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
472588448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
472688448980SPeter Ujfalusi 	} else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
472788448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 2;
472888448980SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
472988448980SPeter Ujfalusi 	} else {
473088448980SPeter Ujfalusi 		ud->tchan_tpl.levels = 1;
473188448980SPeter Ujfalusi 	}
473201779473SPeter Ujfalusi 
473301779473SPeter Ujfalusi 	ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
473401779473SPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
473501779473SPeter Ujfalusi 	ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
473601779473SPeter Ujfalusi 				  GFP_KERNEL);
473701779473SPeter Ujfalusi 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
473801779473SPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
473901779473SPeter Ujfalusi 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
474001779473SPeter Ujfalusi 				  GFP_KERNEL);
474101779473SPeter Ujfalusi 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
474201779473SPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
474301779473SPeter Ujfalusi 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
474401779473SPeter Ujfalusi 				  GFP_KERNEL);
474501779473SPeter Ujfalusi 	/* BCDMA do not really have flows, but the driver expect it */
474601779473SPeter Ujfalusi 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
474701779473SPeter Ujfalusi 					sizeof(unsigned long),
474801779473SPeter Ujfalusi 					GFP_KERNEL);
474901779473SPeter Ujfalusi 	ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
475001779473SPeter Ujfalusi 				  GFP_KERNEL);
475101779473SPeter Ujfalusi 
475201779473SPeter Ujfalusi 	if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
475301779473SPeter Ujfalusi 	    !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
475401779473SPeter Ujfalusi 	    !ud->rflows)
475501779473SPeter Ujfalusi 		return -ENOMEM;
475601779473SPeter Ujfalusi 
475701779473SPeter Ujfalusi 	/* Get resource ranges from tisci */
475801779473SPeter Ujfalusi 	for (i = 0; i < RM_RANGE_LAST; i++) {
4759d2abc982SPeter Ujfalusi 		if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
476001779473SPeter Ujfalusi 			continue;
476101779473SPeter Ujfalusi 		if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
476201779473SPeter Ujfalusi 			continue;
476301779473SPeter Ujfalusi 		if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
476401779473SPeter Ujfalusi 			continue;
476501779473SPeter Ujfalusi 		if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
476601779473SPeter Ujfalusi 			continue;
476701779473SPeter Ujfalusi 
476801779473SPeter Ujfalusi 		tisci_rm->rm_ranges[i] =
476901779473SPeter Ujfalusi 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
477001779473SPeter Ujfalusi 						    tisci_rm->tisci_dev_id,
477101779473SPeter Ujfalusi 						    (char *)range_names[i]);
477201779473SPeter Ujfalusi 	}
477301779473SPeter Ujfalusi 
477401779473SPeter Ujfalusi 	irq_res.sets = 0;
477501779473SPeter Ujfalusi 
477601779473SPeter Ujfalusi 	/* bchan ranges */
477701779473SPeter Ujfalusi 	if (ud->bchan_cnt) {
477801779473SPeter Ujfalusi 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
477901779473SPeter Ujfalusi 		if (IS_ERR(rm_res)) {
478001779473SPeter Ujfalusi 			bitmap_zero(ud->bchan_map, ud->bchan_cnt);
478180936d68SVignesh Raghavendra 			irq_res.sets++;
478201779473SPeter Ujfalusi 		} else {
478301779473SPeter Ujfalusi 			bitmap_fill(ud->bchan_map, ud->bchan_cnt);
478401779473SPeter Ujfalusi 			for (i = 0; i < rm_res->sets; i++)
478501779473SPeter Ujfalusi 				udma_mark_resource_ranges(ud, ud->bchan_map,
478601779473SPeter Ujfalusi 							  &rm_res->desc[i],
478701779473SPeter Ujfalusi 							  "bchan");
478801779473SPeter Ujfalusi 			irq_res.sets += rm_res->sets;
478901779473SPeter Ujfalusi 		}
479080936d68SVignesh Raghavendra 	}
479101779473SPeter Ujfalusi 
479201779473SPeter Ujfalusi 	/* tchan ranges */
479301779473SPeter Ujfalusi 	if (ud->tchan_cnt) {
479401779473SPeter Ujfalusi 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
479501779473SPeter Ujfalusi 		if (IS_ERR(rm_res)) {
479601779473SPeter Ujfalusi 			bitmap_zero(ud->tchan_map, ud->tchan_cnt);
479780936d68SVignesh Raghavendra 			irq_res.sets += 2;
479801779473SPeter Ujfalusi 		} else {
479901779473SPeter Ujfalusi 			bitmap_fill(ud->tchan_map, ud->tchan_cnt);
480001779473SPeter Ujfalusi 			for (i = 0; i < rm_res->sets; i++)
480101779473SPeter Ujfalusi 				udma_mark_resource_ranges(ud, ud->tchan_map,
480201779473SPeter Ujfalusi 							  &rm_res->desc[i],
480301779473SPeter Ujfalusi 							  "tchan");
480401779473SPeter Ujfalusi 			irq_res.sets += rm_res->sets * 2;
480501779473SPeter Ujfalusi 		}
480680936d68SVignesh Raghavendra 	}
480701779473SPeter Ujfalusi 
480801779473SPeter Ujfalusi 	/* rchan ranges */
480901779473SPeter Ujfalusi 	if (ud->rchan_cnt) {
481001779473SPeter Ujfalusi 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
481101779473SPeter Ujfalusi 		if (IS_ERR(rm_res)) {
481201779473SPeter Ujfalusi 			bitmap_zero(ud->rchan_map, ud->rchan_cnt);
481380936d68SVignesh Raghavendra 			irq_res.sets += 2;
481401779473SPeter Ujfalusi 		} else {
481501779473SPeter Ujfalusi 			bitmap_fill(ud->rchan_map, ud->rchan_cnt);
481601779473SPeter Ujfalusi 			for (i = 0; i < rm_res->sets; i++)
481701779473SPeter Ujfalusi 				udma_mark_resource_ranges(ud, ud->rchan_map,
481801779473SPeter Ujfalusi 							  &rm_res->desc[i],
481901779473SPeter Ujfalusi 							  "rchan");
482001779473SPeter Ujfalusi 			irq_res.sets += rm_res->sets * 2;
482101779473SPeter Ujfalusi 		}
482280936d68SVignesh Raghavendra 	}
482301779473SPeter Ujfalusi 
482401779473SPeter Ujfalusi 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
482580936d68SVignesh Raghavendra 	if (!irq_res.desc)
482680936d68SVignesh Raghavendra 		return -ENOMEM;
482701779473SPeter Ujfalusi 	if (ud->bchan_cnt) {
482801779473SPeter Ujfalusi 		rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
482980936d68SVignesh Raghavendra 		if (IS_ERR(rm_res)) {
483080936d68SVignesh Raghavendra 			irq_res.desc[0].start = oes->bcdma_bchan_ring;
483180936d68SVignesh Raghavendra 			irq_res.desc[0].num = ud->bchan_cnt;
483280936d68SVignesh Raghavendra 			i = 1;
483380936d68SVignesh Raghavendra 		} else {
483401779473SPeter Ujfalusi 			for (i = 0; i < rm_res->sets; i++) {
483501779473SPeter Ujfalusi 				irq_res.desc[i].start = rm_res->desc[i].start +
483601779473SPeter Ujfalusi 							oes->bcdma_bchan_ring;
483701779473SPeter Ujfalusi 				irq_res.desc[i].num = rm_res->desc[i].num;
483801779473SPeter Ujfalusi 			}
483901779473SPeter Ujfalusi 		}
48404c7f3ca1SVignesh Raghavendra 	} else {
48414c7f3ca1SVignesh Raghavendra 		i = 0;
484280936d68SVignesh Raghavendra 	}
48434c7f3ca1SVignesh Raghavendra 
484401779473SPeter Ujfalusi 	if (ud->tchan_cnt) {
484501779473SPeter Ujfalusi 		rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
484680936d68SVignesh Raghavendra 		if (IS_ERR(rm_res)) {
484780936d68SVignesh Raghavendra 			irq_res.desc[i].start = oes->bcdma_tchan_data;
484880936d68SVignesh Raghavendra 			irq_res.desc[i].num = ud->tchan_cnt;
484980936d68SVignesh Raghavendra 			irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
485080936d68SVignesh Raghavendra 			irq_res.desc[i + 1].num = ud->tchan_cnt;
485180936d68SVignesh Raghavendra 			i += 2;
485280936d68SVignesh Raghavendra 		} else {
485301779473SPeter Ujfalusi 			for (j = 0; j < rm_res->sets; j++, i += 2) {
485401779473SPeter Ujfalusi 				irq_res.desc[i].start = rm_res->desc[j].start +
485501779473SPeter Ujfalusi 							oes->bcdma_tchan_data;
485601779473SPeter Ujfalusi 				irq_res.desc[i].num = rm_res->desc[j].num;
485701779473SPeter Ujfalusi 
485801779473SPeter Ujfalusi 				irq_res.desc[i + 1].start = rm_res->desc[j].start +
485901779473SPeter Ujfalusi 							oes->bcdma_tchan_ring;
486001779473SPeter Ujfalusi 				irq_res.desc[i + 1].num = rm_res->desc[j].num;
486101779473SPeter Ujfalusi 			}
486201779473SPeter Ujfalusi 		}
486380936d68SVignesh Raghavendra 	}
486401779473SPeter Ujfalusi 	if (ud->rchan_cnt) {
486501779473SPeter Ujfalusi 		rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
486680936d68SVignesh Raghavendra 		if (IS_ERR(rm_res)) {
486780936d68SVignesh Raghavendra 			irq_res.desc[i].start = oes->bcdma_rchan_data;
486880936d68SVignesh Raghavendra 			irq_res.desc[i].num = ud->rchan_cnt;
486980936d68SVignesh Raghavendra 			irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
487080936d68SVignesh Raghavendra 			irq_res.desc[i + 1].num = ud->rchan_cnt;
487180936d68SVignesh Raghavendra 			i += 2;
487280936d68SVignesh Raghavendra 		} else {
487301779473SPeter Ujfalusi 			for (j = 0; j < rm_res->sets; j++, i += 2) {
487401779473SPeter Ujfalusi 				irq_res.desc[i].start = rm_res->desc[j].start +
487501779473SPeter Ujfalusi 							oes->bcdma_rchan_data;
487601779473SPeter Ujfalusi 				irq_res.desc[i].num = rm_res->desc[j].num;
487701779473SPeter Ujfalusi 
487801779473SPeter Ujfalusi 				irq_res.desc[i + 1].start = rm_res->desc[j].start +
487901779473SPeter Ujfalusi 							oes->bcdma_rchan_ring;
488001779473SPeter Ujfalusi 				irq_res.desc[i + 1].num = rm_res->desc[j].num;
488101779473SPeter Ujfalusi 			}
488201779473SPeter Ujfalusi 		}
488380936d68SVignesh Raghavendra 	}
488401779473SPeter Ujfalusi 
488501779473SPeter Ujfalusi 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
488601779473SPeter Ujfalusi 	kfree(irq_res.desc);
488701779473SPeter Ujfalusi 	if (ret) {
488801779473SPeter Ujfalusi 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
488901779473SPeter Ujfalusi 		return ret;
489001779473SPeter Ujfalusi 	}
489101779473SPeter Ujfalusi 
489201779473SPeter Ujfalusi 	return 0;
489301779473SPeter Ujfalusi }
489401779473SPeter Ujfalusi 
pktdma_setup_resources(struct udma_dev * ud)4895d2abc982SPeter Ujfalusi static int pktdma_setup_resources(struct udma_dev *ud)
4896d2abc982SPeter Ujfalusi {
4897d2abc982SPeter Ujfalusi 	int ret, i, j;
4898d2abc982SPeter Ujfalusi 	struct device *dev = ud->dev;
4899d2abc982SPeter Ujfalusi 	struct ti_sci_resource *rm_res, irq_res;
4900d2abc982SPeter Ujfalusi 	struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4901d2abc982SPeter Ujfalusi 	const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4902d2abc982SPeter Ujfalusi 	u32 cap3;
4903d2abc982SPeter Ujfalusi 
4904d2abc982SPeter Ujfalusi 	/* Set up the throughput level start indexes */
4905d2abc982SPeter Ujfalusi 	cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4906d2abc982SPeter Ujfalusi 	if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4907d2abc982SPeter Ujfalusi 		ud->tchan_tpl.levels = 3;
4908d2abc982SPeter Ujfalusi 		ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4909d2abc982SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4910d2abc982SPeter Ujfalusi 	} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4911d2abc982SPeter Ujfalusi 		ud->tchan_tpl.levels = 2;
4912d2abc982SPeter Ujfalusi 		ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4913d2abc982SPeter Ujfalusi 	} else {
4914d2abc982SPeter Ujfalusi 		ud->tchan_tpl.levels = 1;
4915d2abc982SPeter Ujfalusi 	}
4916d2abc982SPeter Ujfalusi 
491726b614faSPeter Ujfalusi 	ud->rchan_tpl.levels = ud->tchan_tpl.levels;
491826b614faSPeter Ujfalusi 	ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
491926b614faSPeter Ujfalusi 	ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4920d2abc982SPeter Ujfalusi 
4921d2abc982SPeter Ujfalusi 	ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4922d2abc982SPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
4923d2abc982SPeter Ujfalusi 	ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4924d2abc982SPeter Ujfalusi 				  GFP_KERNEL);
4925d2abc982SPeter Ujfalusi 	ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4926d2abc982SPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
4927d2abc982SPeter Ujfalusi 	ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4928d2abc982SPeter Ujfalusi 				  GFP_KERNEL);
4929d2abc982SPeter Ujfalusi 	ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4930d2abc982SPeter Ujfalusi 					sizeof(unsigned long),
4931d2abc982SPeter Ujfalusi 					GFP_KERNEL);
4932d2abc982SPeter Ujfalusi 	ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4933d2abc982SPeter Ujfalusi 				  GFP_KERNEL);
4934d2abc982SPeter Ujfalusi 	ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4935d2abc982SPeter Ujfalusi 					   sizeof(unsigned long), GFP_KERNEL);
4936d2abc982SPeter Ujfalusi 
4937d2abc982SPeter Ujfalusi 	if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4938d2abc982SPeter Ujfalusi 	    !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4939d2abc982SPeter Ujfalusi 		return -ENOMEM;
4940d2abc982SPeter Ujfalusi 
4941d2abc982SPeter Ujfalusi 	/* Get resource ranges from tisci */
4942d2abc982SPeter Ujfalusi 	for (i = 0; i < RM_RANGE_LAST; i++) {
4943d2abc982SPeter Ujfalusi 		if (i == RM_RANGE_BCHAN)
4944d2abc982SPeter Ujfalusi 			continue;
4945d2abc982SPeter Ujfalusi 
4946d2abc982SPeter Ujfalusi 		tisci_rm->rm_ranges[i] =
4947d2abc982SPeter Ujfalusi 			devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4948d2abc982SPeter Ujfalusi 						    tisci_rm->tisci_dev_id,
4949d2abc982SPeter Ujfalusi 						    (char *)range_names[i]);
4950d2abc982SPeter Ujfalusi 	}
4951d2abc982SPeter Ujfalusi 
4952d2abc982SPeter Ujfalusi 	/* tchan ranges */
4953d2abc982SPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4954d2abc982SPeter Ujfalusi 	if (IS_ERR(rm_res)) {
4955d2abc982SPeter Ujfalusi 		bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4956d2abc982SPeter Ujfalusi 	} else {
4957d2abc982SPeter Ujfalusi 		bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4958d2abc982SPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++)
4959d2abc982SPeter Ujfalusi 			udma_mark_resource_ranges(ud, ud->tchan_map,
4960d2abc982SPeter Ujfalusi 						  &rm_res->desc[i], "tchan");
4961d2abc982SPeter Ujfalusi 	}
4962d2abc982SPeter Ujfalusi 
4963d2abc982SPeter Ujfalusi 	/* rchan ranges */
4964d2abc982SPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4965d2abc982SPeter Ujfalusi 	if (IS_ERR(rm_res)) {
4966d2abc982SPeter Ujfalusi 		bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4967d2abc982SPeter Ujfalusi 	} else {
4968d2abc982SPeter Ujfalusi 		bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4969d2abc982SPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++)
4970d2abc982SPeter Ujfalusi 			udma_mark_resource_ranges(ud, ud->rchan_map,
4971d2abc982SPeter Ujfalusi 						  &rm_res->desc[i], "rchan");
4972d2abc982SPeter Ujfalusi 	}
4973d2abc982SPeter Ujfalusi 
4974d2abc982SPeter Ujfalusi 	/* rflow ranges */
4975d2abc982SPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4976d2abc982SPeter Ujfalusi 	if (IS_ERR(rm_res)) {
4977d2abc982SPeter Ujfalusi 		/* all rflows are assigned exclusively to Linux */
4978d2abc982SPeter Ujfalusi 		bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
497980936d68SVignesh Raghavendra 		irq_res.sets = 1;
4980d2abc982SPeter Ujfalusi 	} else {
4981d2abc982SPeter Ujfalusi 		bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4982d2abc982SPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++)
4983d2abc982SPeter Ujfalusi 			udma_mark_resource_ranges(ud, ud->rflow_in_use,
4984d2abc982SPeter Ujfalusi 						  &rm_res->desc[i], "rflow");
4985d2abc982SPeter Ujfalusi 		irq_res.sets = rm_res->sets;
498680936d68SVignesh Raghavendra 	}
4987d2abc982SPeter Ujfalusi 
4988d2abc982SPeter Ujfalusi 	/* tflow ranges */
4989d2abc982SPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4990d2abc982SPeter Ujfalusi 	if (IS_ERR(rm_res)) {
4991d2abc982SPeter Ujfalusi 		/* all tflows are assigned exclusively to Linux */
4992d2abc982SPeter Ujfalusi 		bitmap_zero(ud->tflow_map, ud->tflow_cnt);
499380936d68SVignesh Raghavendra 		irq_res.sets++;
4994d2abc982SPeter Ujfalusi 	} else {
4995d2abc982SPeter Ujfalusi 		bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4996d2abc982SPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++)
4997d2abc982SPeter Ujfalusi 			udma_mark_resource_ranges(ud, ud->tflow_map,
4998d2abc982SPeter Ujfalusi 						  &rm_res->desc[i], "tflow");
4999d2abc982SPeter Ujfalusi 		irq_res.sets += rm_res->sets;
500080936d68SVignesh Raghavendra 	}
5001d2abc982SPeter Ujfalusi 
5002d2abc982SPeter Ujfalusi 	irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
500380936d68SVignesh Raghavendra 	if (!irq_res.desc)
500480936d68SVignesh Raghavendra 		return -ENOMEM;
5005d2abc982SPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
500680936d68SVignesh Raghavendra 	if (IS_ERR(rm_res)) {
500780936d68SVignesh Raghavendra 		irq_res.desc[0].start = oes->pktdma_tchan_flow;
500880936d68SVignesh Raghavendra 		irq_res.desc[0].num = ud->tflow_cnt;
500980936d68SVignesh Raghavendra 		i = 1;
501080936d68SVignesh Raghavendra 	} else {
5011d2abc982SPeter Ujfalusi 		for (i = 0; i < rm_res->sets; i++) {
5012d2abc982SPeter Ujfalusi 			irq_res.desc[i].start = rm_res->desc[i].start +
5013d2abc982SPeter Ujfalusi 						oes->pktdma_tchan_flow;
5014d2abc982SPeter Ujfalusi 			irq_res.desc[i].num = rm_res->desc[i].num;
5015d2abc982SPeter Ujfalusi 		}
501680936d68SVignesh Raghavendra 	}
5017d2abc982SPeter Ujfalusi 	rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
501880936d68SVignesh Raghavendra 	if (IS_ERR(rm_res)) {
501980936d68SVignesh Raghavendra 		irq_res.desc[i].start = oes->pktdma_rchan_flow;
502080936d68SVignesh Raghavendra 		irq_res.desc[i].num = ud->rflow_cnt;
502180936d68SVignesh Raghavendra 	} else {
5022d2abc982SPeter Ujfalusi 		for (j = 0; j < rm_res->sets; j++, i++) {
5023d2abc982SPeter Ujfalusi 			irq_res.desc[i].start = rm_res->desc[j].start +
5024d2abc982SPeter Ujfalusi 						oes->pktdma_rchan_flow;
5025d2abc982SPeter Ujfalusi 			irq_res.desc[i].num = rm_res->desc[j].num;
5026d2abc982SPeter Ujfalusi 		}
502780936d68SVignesh Raghavendra 	}
5028d2abc982SPeter Ujfalusi 	ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
5029d2abc982SPeter Ujfalusi 	kfree(irq_res.desc);
5030d2abc982SPeter Ujfalusi 	if (ret) {
5031d2abc982SPeter Ujfalusi 		dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
5032d2abc982SPeter Ujfalusi 		return ret;
5033d2abc982SPeter Ujfalusi 	}
5034d2abc982SPeter Ujfalusi 
5035d2abc982SPeter Ujfalusi 	return 0;
5036d2abc982SPeter Ujfalusi }
5037d2abc982SPeter Ujfalusi 
setup_resources(struct udma_dev * ud)503801779473SPeter Ujfalusi static int setup_resources(struct udma_dev *ud)
503901779473SPeter Ujfalusi {
504001779473SPeter Ujfalusi 	struct device *dev = ud->dev;
504101779473SPeter Ujfalusi 	int ch_count, ret;
504201779473SPeter Ujfalusi 
504301779473SPeter Ujfalusi 	switch (ud->match_data->type) {
504401779473SPeter Ujfalusi 	case DMA_TYPE_UDMA:
504501779473SPeter Ujfalusi 		ret = udma_setup_resources(ud);
504601779473SPeter Ujfalusi 		break;
504701779473SPeter Ujfalusi 	case DMA_TYPE_BCDMA:
504801779473SPeter Ujfalusi 		ret = bcdma_setup_resources(ud);
504901779473SPeter Ujfalusi 		break;
5050d2abc982SPeter Ujfalusi 	case DMA_TYPE_PKTDMA:
5051d2abc982SPeter Ujfalusi 		ret = pktdma_setup_resources(ud);
5052d2abc982SPeter Ujfalusi 		break;
505301779473SPeter Ujfalusi 	default:
505401779473SPeter Ujfalusi 		return -EINVAL;
505501779473SPeter Ujfalusi 	}
505601779473SPeter Ujfalusi 
505701779473SPeter Ujfalusi 	if (ret)
505801779473SPeter Ujfalusi 		return ret;
505901779473SPeter Ujfalusi 
506001779473SPeter Ujfalusi 	ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
506101779473SPeter Ujfalusi 	if (ud->bchan_cnt)
506201779473SPeter Ujfalusi 		ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
506325dcb5ddSPeter Ujfalusi 	ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
506425dcb5ddSPeter Ujfalusi 	ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
506525dcb5ddSPeter Ujfalusi 	if (!ch_count)
506625dcb5ddSPeter Ujfalusi 		return -ENODEV;
506725dcb5ddSPeter Ujfalusi 
506825dcb5ddSPeter Ujfalusi 	ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
506925dcb5ddSPeter Ujfalusi 				    GFP_KERNEL);
507025dcb5ddSPeter Ujfalusi 	if (!ud->channels)
507125dcb5ddSPeter Ujfalusi 		return -ENOMEM;
507225dcb5ddSPeter Ujfalusi 
507301779473SPeter Ujfalusi 	switch (ud->match_data->type) {
507401779473SPeter Ujfalusi 	case DMA_TYPE_UDMA:
507501779473SPeter Ujfalusi 		dev_info(dev,
507601779473SPeter Ujfalusi 			 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
507725dcb5ddSPeter Ujfalusi 			 ch_count,
507801779473SPeter Ujfalusi 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
507901779473SPeter Ujfalusi 						       ud->tchan_cnt),
508001779473SPeter Ujfalusi 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
508101779473SPeter Ujfalusi 						       ud->rchan_cnt),
508225dcb5ddSPeter Ujfalusi 			 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
508325dcb5ddSPeter Ujfalusi 						       ud->rflow_cnt));
508401779473SPeter Ujfalusi 		break;
508501779473SPeter Ujfalusi 	case DMA_TYPE_BCDMA:
508601779473SPeter Ujfalusi 		dev_info(dev,
508701779473SPeter Ujfalusi 			 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
508801779473SPeter Ujfalusi 			 ch_count,
508901779473SPeter Ujfalusi 			 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
509001779473SPeter Ujfalusi 						       ud->bchan_cnt),
509101779473SPeter Ujfalusi 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
509201779473SPeter Ujfalusi 						       ud->tchan_cnt),
509301779473SPeter Ujfalusi 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
509401779473SPeter Ujfalusi 						       ud->rchan_cnt));
509501779473SPeter Ujfalusi 		break;
5096d2abc982SPeter Ujfalusi 	case DMA_TYPE_PKTDMA:
5097d2abc982SPeter Ujfalusi 		dev_info(dev,
5098d2abc982SPeter Ujfalusi 			 "Channels: %d (tchan: %u, rchan: %u)\n",
5099d2abc982SPeter Ujfalusi 			 ch_count,
5100d2abc982SPeter Ujfalusi 			 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5101d2abc982SPeter Ujfalusi 						       ud->tchan_cnt),
5102d2abc982SPeter Ujfalusi 			 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5103d2abc982SPeter Ujfalusi 						       ud->rchan_cnt));
5104d6a48a47SGustavo A. R. Silva 		break;
510501779473SPeter Ujfalusi 	default:
510601779473SPeter Ujfalusi 		break;
510701779473SPeter Ujfalusi 	}
510825dcb5ddSPeter Ujfalusi 
510925dcb5ddSPeter Ujfalusi 	return ch_count;
511025dcb5ddSPeter Ujfalusi }
511125dcb5ddSPeter Ujfalusi 
udma_setup_rx_flush(struct udma_dev * ud)511216cd3c67SPeter Ujfalusi static int udma_setup_rx_flush(struct udma_dev *ud)
511316cd3c67SPeter Ujfalusi {
511416cd3c67SPeter Ujfalusi 	struct udma_rx_flush *rx_flush = &ud->rx_flush;
511516cd3c67SPeter Ujfalusi 	struct cppi5_desc_hdr_t *tr_desc;
511616cd3c67SPeter Ujfalusi 	struct cppi5_tr_type1_t *tr_req;
511716cd3c67SPeter Ujfalusi 	struct cppi5_host_desc_t *desc;
511816cd3c67SPeter Ujfalusi 	struct device *dev = ud->dev;
511916cd3c67SPeter Ujfalusi 	struct udma_hwdesc *hwdesc;
512016cd3c67SPeter Ujfalusi 	size_t tr_size;
512116cd3c67SPeter Ujfalusi 
512216cd3c67SPeter Ujfalusi 	/* Allocate 1K buffer for discarded data on RX channel teardown */
512316cd3c67SPeter Ujfalusi 	rx_flush->buffer_size = SZ_1K;
512416cd3c67SPeter Ujfalusi 	rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
512516cd3c67SPeter Ujfalusi 					      GFP_KERNEL);
512616cd3c67SPeter Ujfalusi 	if (!rx_flush->buffer_vaddr)
512716cd3c67SPeter Ujfalusi 		return -ENOMEM;
512816cd3c67SPeter Ujfalusi 
512916cd3c67SPeter Ujfalusi 	rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
513016cd3c67SPeter Ujfalusi 						rx_flush->buffer_size,
513116cd3c67SPeter Ujfalusi 						DMA_TO_DEVICE);
513216cd3c67SPeter Ujfalusi 	if (dma_mapping_error(dev, rx_flush->buffer_paddr))
513316cd3c67SPeter Ujfalusi 		return -ENOMEM;
513416cd3c67SPeter Ujfalusi 
513516cd3c67SPeter Ujfalusi 	/* Set up descriptor to be used for TR mode */
513616cd3c67SPeter Ujfalusi 	hwdesc = &rx_flush->hwdescs[0];
513716cd3c67SPeter Ujfalusi 	tr_size = sizeof(struct cppi5_tr_type1_t);
513816cd3c67SPeter Ujfalusi 	hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
513916cd3c67SPeter Ujfalusi 	hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
514016cd3c67SPeter Ujfalusi 					ud->desc_align);
514116cd3c67SPeter Ujfalusi 
514216cd3c67SPeter Ujfalusi 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
514316cd3c67SPeter Ujfalusi 						GFP_KERNEL);
514416cd3c67SPeter Ujfalusi 	if (!hwdesc->cppi5_desc_vaddr)
514516cd3c67SPeter Ujfalusi 		return -ENOMEM;
514616cd3c67SPeter Ujfalusi 
514716cd3c67SPeter Ujfalusi 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
514816cd3c67SPeter Ujfalusi 						  hwdesc->cppi5_desc_size,
514916cd3c67SPeter Ujfalusi 						  DMA_TO_DEVICE);
515016cd3c67SPeter Ujfalusi 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
515116cd3c67SPeter Ujfalusi 		return -ENOMEM;
515216cd3c67SPeter Ujfalusi 
515316cd3c67SPeter Ujfalusi 	/* Start of the TR req records */
515416cd3c67SPeter Ujfalusi 	hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
515516cd3c67SPeter Ujfalusi 	/* Start address of the TR response array */
515616cd3c67SPeter Ujfalusi 	hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
515716cd3c67SPeter Ujfalusi 
515816cd3c67SPeter Ujfalusi 	tr_desc = hwdesc->cppi5_desc_vaddr;
515916cd3c67SPeter Ujfalusi 	cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
516016cd3c67SPeter Ujfalusi 	cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
516116cd3c67SPeter Ujfalusi 	cppi5_desc_set_retpolicy(tr_desc, 0, 0);
516216cd3c67SPeter Ujfalusi 
516316cd3c67SPeter Ujfalusi 	tr_req = hwdesc->tr_req_base;
516416cd3c67SPeter Ujfalusi 	cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
516516cd3c67SPeter Ujfalusi 		      CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
516616cd3c67SPeter Ujfalusi 	cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
516716cd3c67SPeter Ujfalusi 
516816cd3c67SPeter Ujfalusi 	tr_req->addr = rx_flush->buffer_paddr;
516916cd3c67SPeter Ujfalusi 	tr_req->icnt0 = rx_flush->buffer_size;
517016cd3c67SPeter Ujfalusi 	tr_req->icnt1 = 1;
517116cd3c67SPeter Ujfalusi 
51725bbeea34SPeter Ujfalusi 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
51735bbeea34SPeter Ujfalusi 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
51745bbeea34SPeter Ujfalusi 
517516cd3c67SPeter Ujfalusi 	/* Set up descriptor to be used for packet mode */
517616cd3c67SPeter Ujfalusi 	hwdesc = &rx_flush->hwdescs[1];
517716cd3c67SPeter Ujfalusi 	hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
517816cd3c67SPeter Ujfalusi 					CPPI5_INFO0_HDESC_EPIB_SIZE +
517916cd3c67SPeter Ujfalusi 					CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
518016cd3c67SPeter Ujfalusi 					ud->desc_align);
518116cd3c67SPeter Ujfalusi 
518216cd3c67SPeter Ujfalusi 	hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
518316cd3c67SPeter Ujfalusi 						GFP_KERNEL);
518416cd3c67SPeter Ujfalusi 	if (!hwdesc->cppi5_desc_vaddr)
518516cd3c67SPeter Ujfalusi 		return -ENOMEM;
518616cd3c67SPeter Ujfalusi 
518716cd3c67SPeter Ujfalusi 	hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
518816cd3c67SPeter Ujfalusi 						  hwdesc->cppi5_desc_size,
518916cd3c67SPeter Ujfalusi 						  DMA_TO_DEVICE);
519016cd3c67SPeter Ujfalusi 	if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
519116cd3c67SPeter Ujfalusi 		return -ENOMEM;
519216cd3c67SPeter Ujfalusi 
519316cd3c67SPeter Ujfalusi 	desc = hwdesc->cppi5_desc_vaddr;
519416cd3c67SPeter Ujfalusi 	cppi5_hdesc_init(desc, 0, 0);
519516cd3c67SPeter Ujfalusi 	cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
519616cd3c67SPeter Ujfalusi 	cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
519716cd3c67SPeter Ujfalusi 
519816cd3c67SPeter Ujfalusi 	cppi5_hdesc_attach_buf(desc,
519916cd3c67SPeter Ujfalusi 			       rx_flush->buffer_paddr, rx_flush->buffer_size,
520016cd3c67SPeter Ujfalusi 			       rx_flush->buffer_paddr, rx_flush->buffer_size);
520116cd3c67SPeter Ujfalusi 
520216cd3c67SPeter Ujfalusi 	dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
520316cd3c67SPeter Ujfalusi 				   hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
520416cd3c67SPeter Ujfalusi 	return 0;
520516cd3c67SPeter Ujfalusi }
520616cd3c67SPeter Ujfalusi 
5207db8d9b4cSPeter Ujfalusi #ifdef CONFIG_DEBUG_FS
udma_dbg_summary_show_chan(struct seq_file * s,struct dma_chan * chan)5208db8d9b4cSPeter Ujfalusi static void udma_dbg_summary_show_chan(struct seq_file *s,
5209db8d9b4cSPeter Ujfalusi 				       struct dma_chan *chan)
5210db8d9b4cSPeter Ujfalusi {
5211db8d9b4cSPeter Ujfalusi 	struct udma_chan *uc = to_udma_chan(chan);
5212db8d9b4cSPeter Ujfalusi 	struct udma_chan_config *ucc = &uc->config;
5213db8d9b4cSPeter Ujfalusi 
5214db8d9b4cSPeter Ujfalusi 	seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5215db8d9b4cSPeter Ujfalusi 		   chan->dbg_client_name ?: "in-use");
521601779473SPeter Ujfalusi 	if (ucc->tr_trigger_type)
521701779473SPeter Ujfalusi 		seq_puts(s, " (triggered, ");
521801779473SPeter Ujfalusi 	else
521901779473SPeter Ujfalusi 		seq_printf(s, " (%s, ",
522001779473SPeter Ujfalusi 			   dmaengine_get_direction_text(uc->config.dir));
5221db8d9b4cSPeter Ujfalusi 
5222db8d9b4cSPeter Ujfalusi 	switch (uc->config.dir) {
5223db8d9b4cSPeter Ujfalusi 	case DMA_MEM_TO_MEM:
522401779473SPeter Ujfalusi 		if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
522501779473SPeter Ujfalusi 			seq_printf(s, "bchan%d)\n", uc->bchan->id);
522601779473SPeter Ujfalusi 			return;
522701779473SPeter Ujfalusi 		}
522801779473SPeter Ujfalusi 
5229db8d9b4cSPeter Ujfalusi 		seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5230db8d9b4cSPeter Ujfalusi 			   ucc->src_thread, ucc->dst_thread);
5231db8d9b4cSPeter Ujfalusi 		break;
5232db8d9b4cSPeter Ujfalusi 	case DMA_DEV_TO_MEM:
5233db8d9b4cSPeter Ujfalusi 		seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5234db8d9b4cSPeter Ujfalusi 			   ucc->src_thread, ucc->dst_thread);
5235d2abc982SPeter Ujfalusi 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5236d2abc982SPeter Ujfalusi 			seq_printf(s, "rflow%d, ", uc->rflow->id);
5237db8d9b4cSPeter Ujfalusi 		break;
5238db8d9b4cSPeter Ujfalusi 	case DMA_MEM_TO_DEV:
5239db8d9b4cSPeter Ujfalusi 		seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5240db8d9b4cSPeter Ujfalusi 			   ucc->src_thread, ucc->dst_thread);
5241d2abc982SPeter Ujfalusi 		if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5242d2abc982SPeter Ujfalusi 			seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5243db8d9b4cSPeter Ujfalusi 		break;
5244db8d9b4cSPeter Ujfalusi 	default:
5245db8d9b4cSPeter Ujfalusi 		seq_printf(s, ")\n");
5246db8d9b4cSPeter Ujfalusi 		return;
5247db8d9b4cSPeter Ujfalusi 	}
5248db8d9b4cSPeter Ujfalusi 
5249db8d9b4cSPeter Ujfalusi 	if (ucc->ep_type == PSIL_EP_NATIVE) {
5250db8d9b4cSPeter Ujfalusi 		seq_printf(s, "PSI-L Native");
5251db8d9b4cSPeter Ujfalusi 		if (ucc->metadata_size) {
5252db8d9b4cSPeter Ujfalusi 			seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5253db8d9b4cSPeter Ujfalusi 			if (ucc->psd_size)
5254db8d9b4cSPeter Ujfalusi 				seq_printf(s, " PSDsize:%u", ucc->psd_size);
5255db8d9b4cSPeter Ujfalusi 			seq_printf(s, " ]");
5256db8d9b4cSPeter Ujfalusi 		}
5257db8d9b4cSPeter Ujfalusi 	} else {
5258db8d9b4cSPeter Ujfalusi 		seq_printf(s, "PDMA");
5259db8d9b4cSPeter Ujfalusi 		if (ucc->enable_acc32 || ucc->enable_burst)
5260db8d9b4cSPeter Ujfalusi 			seq_printf(s, "[%s%s ]",
5261db8d9b4cSPeter Ujfalusi 				   ucc->enable_acc32 ? " ACC32" : "",
5262db8d9b4cSPeter Ujfalusi 				   ucc->enable_burst ? " BURST" : "");
5263db8d9b4cSPeter Ujfalusi 	}
5264db8d9b4cSPeter Ujfalusi 
5265db8d9b4cSPeter Ujfalusi 	seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5266db8d9b4cSPeter Ujfalusi }
5267db8d9b4cSPeter Ujfalusi 
udma_dbg_summary_show(struct seq_file * s,struct dma_device * dma_dev)5268db8d9b4cSPeter Ujfalusi static void udma_dbg_summary_show(struct seq_file *s,
5269db8d9b4cSPeter Ujfalusi 				  struct dma_device *dma_dev)
5270db8d9b4cSPeter Ujfalusi {
5271db8d9b4cSPeter Ujfalusi 	struct dma_chan *chan;
5272db8d9b4cSPeter Ujfalusi 
5273db8d9b4cSPeter Ujfalusi 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5274db8d9b4cSPeter Ujfalusi 		if (chan->client_count)
5275db8d9b4cSPeter Ujfalusi 			udma_dbg_summary_show_chan(s, chan);
5276db8d9b4cSPeter Ujfalusi 	}
5277db8d9b4cSPeter Ujfalusi }
5278db8d9b4cSPeter Ujfalusi #endif /* CONFIG_DEBUG_FS */
5279db8d9b4cSPeter Ujfalusi 
udma_get_copy_align(struct udma_dev * ud)5280046d679bSPeter Ujfalusi static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5281046d679bSPeter Ujfalusi {
5282046d679bSPeter Ujfalusi 	const struct udma_match_data *match_data = ud->match_data;
5283046d679bSPeter Ujfalusi 	u8 tpl;
5284046d679bSPeter Ujfalusi 
5285046d679bSPeter Ujfalusi 	if (!match_data->enable_memcpy_support)
5286046d679bSPeter Ujfalusi 		return DMAENGINE_ALIGN_8_BYTES;
5287046d679bSPeter Ujfalusi 
5288046d679bSPeter Ujfalusi 	/* Get the highest TPL level the device supports for memcpy */
5289046d679bSPeter Ujfalusi 	if (ud->bchan_cnt)
5290046d679bSPeter Ujfalusi 		tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5291046d679bSPeter Ujfalusi 	else if (ud->tchan_cnt)
5292046d679bSPeter Ujfalusi 		tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5293046d679bSPeter Ujfalusi 	else
5294046d679bSPeter Ujfalusi 		return DMAENGINE_ALIGN_8_BYTES;
5295046d679bSPeter Ujfalusi 
5296046d679bSPeter Ujfalusi 	switch (match_data->burst_size[tpl]) {
5297046d679bSPeter Ujfalusi 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5298046d679bSPeter Ujfalusi 		return DMAENGINE_ALIGN_256_BYTES;
5299046d679bSPeter Ujfalusi 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5300046d679bSPeter Ujfalusi 		return DMAENGINE_ALIGN_128_BYTES;
5301046d679bSPeter Ujfalusi 	case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5302046d679bSPeter Ujfalusi 	fallthrough;
5303046d679bSPeter Ujfalusi 	default:
5304046d679bSPeter Ujfalusi 		return DMAENGINE_ALIGN_64_BYTES;
5305046d679bSPeter Ujfalusi 	}
5306046d679bSPeter Ujfalusi }
5307046d679bSPeter Ujfalusi 
530825dcb5ddSPeter Ujfalusi #define TI_UDMAC_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
530925dcb5ddSPeter Ujfalusi 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
531025dcb5ddSPeter Ujfalusi 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
531125dcb5ddSPeter Ujfalusi 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
531225dcb5ddSPeter Ujfalusi 				 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
531325dcb5ddSPeter Ujfalusi 
udma_probe(struct platform_device * pdev)531425dcb5ddSPeter Ujfalusi static int udma_probe(struct platform_device *pdev)
531525dcb5ddSPeter Ujfalusi {
531625dcb5ddSPeter Ujfalusi 	struct device_node *navss_node = pdev->dev.parent->of_node;
5317f9b0366fSPeter Ujfalusi 	const struct soc_device_attribute *soc;
531825dcb5ddSPeter Ujfalusi 	struct device *dev = &pdev->dev;
531925dcb5ddSPeter Ujfalusi 	struct udma_dev *ud;
532025dcb5ddSPeter Ujfalusi 	const struct of_device_id *match;
532125dcb5ddSPeter Ujfalusi 	int i, ret;
532225dcb5ddSPeter Ujfalusi 	int ch_count;
532325dcb5ddSPeter Ujfalusi 
532425dcb5ddSPeter Ujfalusi 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
532525dcb5ddSPeter Ujfalusi 	if (ret)
532625dcb5ddSPeter Ujfalusi 		dev_err(dev, "failed to set dma mask stuff\n");
532725dcb5ddSPeter Ujfalusi 
532825dcb5ddSPeter Ujfalusi 	ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
532925dcb5ddSPeter Ujfalusi 	if (!ud)
533025dcb5ddSPeter Ujfalusi 		return -ENOMEM;
533125dcb5ddSPeter Ujfalusi 
533201779473SPeter Ujfalusi 	match = of_match_node(udma_of_match, dev->of_node);
5333d2abc982SPeter Ujfalusi 	if (!match) {
533401779473SPeter Ujfalusi 		dev_err(dev, "No compatible match found\n");
533501779473SPeter Ujfalusi 		return -ENODEV;
533601779473SPeter Ujfalusi 	}
533701779473SPeter Ujfalusi 	ud->match_data = match->data;
533801779473SPeter Ujfalusi 
53393f58e106SVignesh Raghavendra 	ud->soc_data = ud->match_data->soc_data;
53403f58e106SVignesh Raghavendra 	if (!ud->soc_data) {
534101779473SPeter Ujfalusi 		soc = soc_device_match(k3_soc_devices);
534201779473SPeter Ujfalusi 		if (!soc) {
534301779473SPeter Ujfalusi 			dev_err(dev, "No compatible SoC found\n");
534401779473SPeter Ujfalusi 			return -ENODEV;
534501779473SPeter Ujfalusi 		}
534601779473SPeter Ujfalusi 		ud->soc_data = soc->data;
53473f58e106SVignesh Raghavendra 	}
534801779473SPeter Ujfalusi 
534925dcb5ddSPeter Ujfalusi 	ret = udma_get_mmrs(pdev, ud);
535025dcb5ddSPeter Ujfalusi 	if (ret)
535125dcb5ddSPeter Ujfalusi 		return ret;
535225dcb5ddSPeter Ujfalusi 
535325dcb5ddSPeter Ujfalusi 	ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
535425dcb5ddSPeter Ujfalusi 	if (IS_ERR(ud->tisci_rm.tisci))
535525dcb5ddSPeter Ujfalusi 		return PTR_ERR(ud->tisci_rm.tisci);
535625dcb5ddSPeter Ujfalusi 
535725dcb5ddSPeter Ujfalusi 	ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
535825dcb5ddSPeter Ujfalusi 				   &ud->tisci_rm.tisci_dev_id);
535925dcb5ddSPeter Ujfalusi 	if (ret) {
536025dcb5ddSPeter Ujfalusi 		dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
536125dcb5ddSPeter Ujfalusi 		return ret;
536225dcb5ddSPeter Ujfalusi 	}
536325dcb5ddSPeter Ujfalusi 	pdev->id = ud->tisci_rm.tisci_dev_id;
536425dcb5ddSPeter Ujfalusi 
536525dcb5ddSPeter Ujfalusi 	ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
536625dcb5ddSPeter Ujfalusi 				   &ud->tisci_rm.tisci_navss_dev_id);
536725dcb5ddSPeter Ujfalusi 	if (ret) {
536825dcb5ddSPeter Ujfalusi 		dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
536925dcb5ddSPeter Ujfalusi 		return ret;
537025dcb5ddSPeter Ujfalusi 	}
537125dcb5ddSPeter Ujfalusi 
537201779473SPeter Ujfalusi 	if (ud->match_data->type == DMA_TYPE_UDMA) {
537301779473SPeter Ujfalusi 		ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
537401779473SPeter Ujfalusi 					   &ud->atype);
53750ebcf1a2SPeter Ujfalusi 		if (!ret && ud->atype > 2) {
53760ebcf1a2SPeter Ujfalusi 			dev_err(dev, "Invalid atype: %u\n", ud->atype);
53770ebcf1a2SPeter Ujfalusi 			return -EINVAL;
53780ebcf1a2SPeter Ujfalusi 		}
537901779473SPeter Ujfalusi 	} else {
538001779473SPeter Ujfalusi 		ret = of_property_read_u32(dev->of_node, "ti,asel",
538101779473SPeter Ujfalusi 					   &ud->asel);
538201779473SPeter Ujfalusi 		if (!ret && ud->asel > 15) {
538301779473SPeter Ujfalusi 			dev_err(dev, "Invalid asel: %u\n", ud->asel);
538401779473SPeter Ujfalusi 			return -EINVAL;
538501779473SPeter Ujfalusi 		}
538601779473SPeter Ujfalusi 	}
53870ebcf1a2SPeter Ujfalusi 
538825dcb5ddSPeter Ujfalusi 	ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
538925dcb5ddSPeter Ujfalusi 	ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
539025dcb5ddSPeter Ujfalusi 
539101779473SPeter Ujfalusi 	if (ud->match_data->type == DMA_TYPE_UDMA) {
539225dcb5ddSPeter Ujfalusi 		ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
539301779473SPeter Ujfalusi 	} else {
539401779473SPeter Ujfalusi 		struct k3_ringacc_init_data ring_init_data;
539501779473SPeter Ujfalusi 
539601779473SPeter Ujfalusi 		ring_init_data.tisci = ud->tisci_rm.tisci;
539701779473SPeter Ujfalusi 		ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5398d2abc982SPeter Ujfalusi 		if (ud->match_data->type == DMA_TYPE_BCDMA) {
5399d2abc982SPeter Ujfalusi 			ring_init_data.num_rings = ud->bchan_cnt +
5400d2abc982SPeter Ujfalusi 						   ud->tchan_cnt +
540101779473SPeter Ujfalusi 						   ud->rchan_cnt;
5402d2abc982SPeter Ujfalusi 		} else {
5403d2abc982SPeter Ujfalusi 			ring_init_data.num_rings = ud->rflow_cnt +
5404d2abc982SPeter Ujfalusi 						   ud->tflow_cnt;
5405d2abc982SPeter Ujfalusi 		}
540601779473SPeter Ujfalusi 
540701779473SPeter Ujfalusi 		ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
540801779473SPeter Ujfalusi 	}
540901779473SPeter Ujfalusi 
541025dcb5ddSPeter Ujfalusi 	if (IS_ERR(ud->ringacc))
541125dcb5ddSPeter Ujfalusi 		return PTR_ERR(ud->ringacc);
541225dcb5ddSPeter Ujfalusi 
541334fff628SThomas Gleixner 	dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
541425dcb5ddSPeter Ujfalusi 					    DOMAIN_BUS_TI_SCI_INTA_MSI);
541534fff628SThomas Gleixner 	if (!dev->msi.domain) {
541625dcb5ddSPeter Ujfalusi 		return -EPROBE_DEFER;
541725dcb5ddSPeter Ujfalusi 	}
541825dcb5ddSPeter Ujfalusi 
541925dcb5ddSPeter Ujfalusi 	dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5420d2abc982SPeter Ujfalusi 	/* cyclic operation is not supported via PKTDMA */
5421d2abc982SPeter Ujfalusi 	if (ud->match_data->type != DMA_TYPE_PKTDMA) {
542225dcb5ddSPeter Ujfalusi 		dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5423d2abc982SPeter Ujfalusi 		ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5424d2abc982SPeter Ujfalusi 	}
542525dcb5ddSPeter Ujfalusi 
542625dcb5ddSPeter Ujfalusi 	ud->ddev.device_config = udma_slave_config;
542725dcb5ddSPeter Ujfalusi 	ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
542825dcb5ddSPeter Ujfalusi 	ud->ddev.device_issue_pending = udma_issue_pending;
542925dcb5ddSPeter Ujfalusi 	ud->ddev.device_tx_status = udma_tx_status;
543025dcb5ddSPeter Ujfalusi 	ud->ddev.device_pause = udma_pause;
543125dcb5ddSPeter Ujfalusi 	ud->ddev.device_resume = udma_resume;
543225dcb5ddSPeter Ujfalusi 	ud->ddev.device_terminate_all = udma_terminate_all;
543325dcb5ddSPeter Ujfalusi 	ud->ddev.device_synchronize = udma_synchronize;
5434db8d9b4cSPeter Ujfalusi #ifdef CONFIG_DEBUG_FS
5435db8d9b4cSPeter Ujfalusi 	ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5436db8d9b4cSPeter Ujfalusi #endif
543725dcb5ddSPeter Ujfalusi 
543801779473SPeter Ujfalusi 	switch (ud->match_data->type) {
543901779473SPeter Ujfalusi 	case DMA_TYPE_UDMA:
544001779473SPeter Ujfalusi 		ud->ddev.device_alloc_chan_resources =
544101779473SPeter Ujfalusi 					udma_alloc_chan_resources;
544201779473SPeter Ujfalusi 		break;
544301779473SPeter Ujfalusi 	case DMA_TYPE_BCDMA:
544401779473SPeter Ujfalusi 		ud->ddev.device_alloc_chan_resources =
544501779473SPeter Ujfalusi 					bcdma_alloc_chan_resources;
544601779473SPeter Ujfalusi 		ud->ddev.device_router_config = bcdma_router_config;
544701779473SPeter Ujfalusi 		break;
5448d2abc982SPeter Ujfalusi 	case DMA_TYPE_PKTDMA:
5449d2abc982SPeter Ujfalusi 		ud->ddev.device_alloc_chan_resources =
5450d2abc982SPeter Ujfalusi 					pktdma_alloc_chan_resources;
5451d2abc982SPeter Ujfalusi 		break;
545201779473SPeter Ujfalusi 	default:
545301779473SPeter Ujfalusi 		return -EINVAL;
545401779473SPeter Ujfalusi 	}
545525dcb5ddSPeter Ujfalusi 	ud->ddev.device_free_chan_resources = udma_free_chan_resources;
545601779473SPeter Ujfalusi 
545725dcb5ddSPeter Ujfalusi 	ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
545825dcb5ddSPeter Ujfalusi 	ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
545925dcb5ddSPeter Ujfalusi 	ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
546025dcb5ddSPeter Ujfalusi 	ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
546125dcb5ddSPeter Ujfalusi 	ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
546225dcb5ddSPeter Ujfalusi 				       DESC_METADATA_ENGINE;
546301779473SPeter Ujfalusi 	if (ud->match_data->enable_memcpy_support &&
546401779473SPeter Ujfalusi 	    !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
546525dcb5ddSPeter Ujfalusi 		dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
546625dcb5ddSPeter Ujfalusi 		ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
546725dcb5ddSPeter Ujfalusi 		ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
546825dcb5ddSPeter Ujfalusi 	}
546925dcb5ddSPeter Ujfalusi 
547025dcb5ddSPeter Ujfalusi 	ud->ddev.dev = dev;
547125dcb5ddSPeter Ujfalusi 	ud->dev = dev;
547225dcb5ddSPeter Ujfalusi 	ud->psil_base = ud->match_data->psil_base;
547325dcb5ddSPeter Ujfalusi 
547425dcb5ddSPeter Ujfalusi 	INIT_LIST_HEAD(&ud->ddev.channels);
547525dcb5ddSPeter Ujfalusi 	INIT_LIST_HEAD(&ud->desc_to_purge);
547625dcb5ddSPeter Ujfalusi 
547701779473SPeter Ujfalusi 	ch_count = setup_resources(ud);
547825dcb5ddSPeter Ujfalusi 	if (ch_count <= 0)
547925dcb5ddSPeter Ujfalusi 		return ch_count;
548025dcb5ddSPeter Ujfalusi 
548125dcb5ddSPeter Ujfalusi 	spin_lock_init(&ud->lock);
548225dcb5ddSPeter Ujfalusi 	INIT_WORK(&ud->purge_work, udma_purge_desc_work);
548325dcb5ddSPeter Ujfalusi 
548425dcb5ddSPeter Ujfalusi 	ud->desc_align = 64;
548525dcb5ddSPeter Ujfalusi 	if (ud->desc_align < dma_get_cache_alignment())
548625dcb5ddSPeter Ujfalusi 		ud->desc_align = dma_get_cache_alignment();
548725dcb5ddSPeter Ujfalusi 
548816cd3c67SPeter Ujfalusi 	ret = udma_setup_rx_flush(ud);
548916cd3c67SPeter Ujfalusi 	if (ret)
549016cd3c67SPeter Ujfalusi 		return ret;
549116cd3c67SPeter Ujfalusi 
549201779473SPeter Ujfalusi 	for (i = 0; i < ud->bchan_cnt; i++) {
549301779473SPeter Ujfalusi 		struct udma_bchan *bchan = &ud->bchans[i];
549401779473SPeter Ujfalusi 
549501779473SPeter Ujfalusi 		bchan->id = i;
549601779473SPeter Ujfalusi 		bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
549701779473SPeter Ujfalusi 	}
549801779473SPeter Ujfalusi 
549925dcb5ddSPeter Ujfalusi 	for (i = 0; i < ud->tchan_cnt; i++) {
550025dcb5ddSPeter Ujfalusi 		struct udma_tchan *tchan = &ud->tchans[i];
550125dcb5ddSPeter Ujfalusi 
550225dcb5ddSPeter Ujfalusi 		tchan->id = i;
550325dcb5ddSPeter Ujfalusi 		tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
550425dcb5ddSPeter Ujfalusi 	}
550525dcb5ddSPeter Ujfalusi 
550625dcb5ddSPeter Ujfalusi 	for (i = 0; i < ud->rchan_cnt; i++) {
550725dcb5ddSPeter Ujfalusi 		struct udma_rchan *rchan = &ud->rchans[i];
550825dcb5ddSPeter Ujfalusi 
550925dcb5ddSPeter Ujfalusi 		rchan->id = i;
551025dcb5ddSPeter Ujfalusi 		rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
551125dcb5ddSPeter Ujfalusi 	}
551225dcb5ddSPeter Ujfalusi 
551325dcb5ddSPeter Ujfalusi 	for (i = 0; i < ud->rflow_cnt; i++) {
551425dcb5ddSPeter Ujfalusi 		struct udma_rflow *rflow = &ud->rflows[i];
551525dcb5ddSPeter Ujfalusi 
551625dcb5ddSPeter Ujfalusi 		rflow->id = i;
551725dcb5ddSPeter Ujfalusi 	}
551825dcb5ddSPeter Ujfalusi 
551925dcb5ddSPeter Ujfalusi 	for (i = 0; i < ch_count; i++) {
552025dcb5ddSPeter Ujfalusi 		struct udma_chan *uc = &ud->channels[i];
552125dcb5ddSPeter Ujfalusi 
552225dcb5ddSPeter Ujfalusi 		uc->ud = ud;
552325dcb5ddSPeter Ujfalusi 		uc->vc.desc_free = udma_desc_free;
552425dcb5ddSPeter Ujfalusi 		uc->id = i;
552501779473SPeter Ujfalusi 		uc->bchan = NULL;
552625dcb5ddSPeter Ujfalusi 		uc->tchan = NULL;
552725dcb5ddSPeter Ujfalusi 		uc->rchan = NULL;
552825dcb5ddSPeter Ujfalusi 		uc->config.remote_thread_id = -1;
5529d2abc982SPeter Ujfalusi 		uc->config.mapped_channel_id = -1;
5530d2abc982SPeter Ujfalusi 		uc->config.default_flow_id = -1;
553125dcb5ddSPeter Ujfalusi 		uc->config.dir = DMA_MEM_TO_MEM;
553225dcb5ddSPeter Ujfalusi 		uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
553325dcb5ddSPeter Ujfalusi 					  dev_name(dev), i);
553425dcb5ddSPeter Ujfalusi 
553525dcb5ddSPeter Ujfalusi 		vchan_init(&uc->vc, &ud->ddev);
553625dcb5ddSPeter Ujfalusi 		/* Use custom vchan completion handling */
55372fa9bc98SAllen Pais 		tasklet_setup(&uc->vc.task, udma_vchan_complete);
553825dcb5ddSPeter Ujfalusi 		init_completion(&uc->teardown_completed);
5539d964d5ffSPeter Ujfalusi 		INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
554025dcb5ddSPeter Ujfalusi 	}
554125dcb5ddSPeter Ujfalusi 
5542046d679bSPeter Ujfalusi 	/* Configure the copy_align to the maximum burst size the device supports */
5543046d679bSPeter Ujfalusi 	ud->ddev.copy_align = udma_get_copy_align(ud);
5544046d679bSPeter Ujfalusi 
554525dcb5ddSPeter Ujfalusi 	ret = dma_async_device_register(&ud->ddev);
554625dcb5ddSPeter Ujfalusi 	if (ret) {
554725dcb5ddSPeter Ujfalusi 		dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
554825dcb5ddSPeter Ujfalusi 		return ret;
554925dcb5ddSPeter Ujfalusi 	}
555025dcb5ddSPeter Ujfalusi 
555125dcb5ddSPeter Ujfalusi 	platform_set_drvdata(pdev, ud);
555225dcb5ddSPeter Ujfalusi 
555325dcb5ddSPeter Ujfalusi 	ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
555425dcb5ddSPeter Ujfalusi 	if (ret) {
555525dcb5ddSPeter Ujfalusi 		dev_err(dev, "failed to register of_dma controller\n");
555625dcb5ddSPeter Ujfalusi 		dma_async_device_unregister(&ud->ddev);
555725dcb5ddSPeter Ujfalusi 	}
555825dcb5ddSPeter Ujfalusi 
555925dcb5ddSPeter Ujfalusi 	return ret;
556025dcb5ddSPeter Ujfalusi }
556125dcb5ddSPeter Ujfalusi 
udma_pm_suspend(struct device * dev)556238de368aSVinod Koul static int __maybe_unused udma_pm_suspend(struct device *dev)
5563fbe05149SVignesh Raghavendra {
5564fbe05149SVignesh Raghavendra 	struct udma_dev *ud = dev_get_drvdata(dev);
5565fbe05149SVignesh Raghavendra 	struct dma_device *dma_dev = &ud->ddev;
5566fbe05149SVignesh Raghavendra 	struct dma_chan *chan;
5567fbe05149SVignesh Raghavendra 	struct udma_chan *uc;
5568fbe05149SVignesh Raghavendra 
5569fbe05149SVignesh Raghavendra 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5570fbe05149SVignesh Raghavendra 		if (chan->client_count) {
5571fbe05149SVignesh Raghavendra 			uc = to_udma_chan(chan);
5572fbe05149SVignesh Raghavendra 			/* backup the channel configuration */
5573fbe05149SVignesh Raghavendra 			memcpy(&uc->backup_config, &uc->config,
5574fbe05149SVignesh Raghavendra 			       sizeof(struct udma_chan_config));
5575fbe05149SVignesh Raghavendra 			dev_dbg(dev, "Suspending channel %s\n",
5576fbe05149SVignesh Raghavendra 				dma_chan_name(chan));
5577fbe05149SVignesh Raghavendra 			ud->ddev.device_free_chan_resources(chan);
5578fbe05149SVignesh Raghavendra 		}
5579fbe05149SVignesh Raghavendra 	}
5580fbe05149SVignesh Raghavendra 
5581fbe05149SVignesh Raghavendra 	return 0;
5582fbe05149SVignesh Raghavendra }
5583fbe05149SVignesh Raghavendra 
udma_pm_resume(struct device * dev)558438de368aSVinod Koul static int __maybe_unused udma_pm_resume(struct device *dev)
5585fbe05149SVignesh Raghavendra {
5586fbe05149SVignesh Raghavendra 	struct udma_dev *ud = dev_get_drvdata(dev);
5587fbe05149SVignesh Raghavendra 	struct dma_device *dma_dev = &ud->ddev;
5588fbe05149SVignesh Raghavendra 	struct dma_chan *chan;
5589fbe05149SVignesh Raghavendra 	struct udma_chan *uc;
5590fbe05149SVignesh Raghavendra 	int ret;
5591fbe05149SVignesh Raghavendra 
5592fbe05149SVignesh Raghavendra 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
5593fbe05149SVignesh Raghavendra 		if (chan->client_count) {
5594fbe05149SVignesh Raghavendra 			uc = to_udma_chan(chan);
5595fbe05149SVignesh Raghavendra 			/* restore the channel configuration */
5596fbe05149SVignesh Raghavendra 			memcpy(&uc->config, &uc->backup_config,
5597fbe05149SVignesh Raghavendra 			       sizeof(struct udma_chan_config));
5598fbe05149SVignesh Raghavendra 			dev_dbg(dev, "Resuming channel %s\n",
5599fbe05149SVignesh Raghavendra 				dma_chan_name(chan));
5600fbe05149SVignesh Raghavendra 			ret = ud->ddev.device_alloc_chan_resources(chan);
5601fbe05149SVignesh Raghavendra 			if (ret)
5602fbe05149SVignesh Raghavendra 				return ret;
5603fbe05149SVignesh Raghavendra 		}
5604fbe05149SVignesh Raghavendra 	}
5605fbe05149SVignesh Raghavendra 
5606fbe05149SVignesh Raghavendra 	return 0;
5607fbe05149SVignesh Raghavendra }
5608fbe05149SVignesh Raghavendra 
5609fbe05149SVignesh Raghavendra static const struct dev_pm_ops udma_pm_ops = {
5610fbe05149SVignesh Raghavendra 	SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume)
5611fbe05149SVignesh Raghavendra };
5612fbe05149SVignesh Raghavendra 
561325dcb5ddSPeter Ujfalusi static struct platform_driver udma_driver = {
561425dcb5ddSPeter Ujfalusi 	.driver = {
561525dcb5ddSPeter Ujfalusi 		.name	= "ti-udma",
561625dcb5ddSPeter Ujfalusi 		.of_match_table = udma_of_match,
561725dcb5ddSPeter Ujfalusi 		.suppress_bind_attrs = true,
5618fbe05149SVignesh Raghavendra 		.pm = &udma_pm_ops,
561925dcb5ddSPeter Ujfalusi 	},
562025dcb5ddSPeter Ujfalusi 	.probe		= udma_probe,
562125dcb5ddSPeter Ujfalusi };
5622d7024191SGrygorii Strashko 
562356b0a668SKevin Hilman module_platform_driver(udma_driver);
562456b0a668SKevin Hilman MODULE_LICENSE("GPL v2");
5625d2abc982SPeter Ujfalusi 
5626d7024191SGrygorii Strashko /* Private interfaces to UDMA */
5627d7024191SGrygorii Strashko #include "k3-udma-private.c"
5628