xref: /openbmc/linux/drivers/dma/ti/k3-udma-glue.c (revision bc7e55239c7aecfb2896c4c08638fc5682e8de01)
1d7024191SGrygorii Strashko // SPDX-License-Identifier: GPL-2.0
2d7024191SGrygorii Strashko /*
3d7024191SGrygorii Strashko  * K3 NAVSS DMA glue interface
4d7024191SGrygorii Strashko  *
5d7024191SGrygorii Strashko  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6d7024191SGrygorii Strashko  *
7d7024191SGrygorii Strashko  */
8d7024191SGrygorii Strashko 
9d7024191SGrygorii Strashko #include <linux/atomic.h>
10d7024191SGrygorii Strashko #include <linux/delay.h>
11d7024191SGrygorii Strashko #include <linux/dma-mapping.h>
12d7024191SGrygorii Strashko #include <linux/io.h>
13d7024191SGrygorii Strashko #include <linux/init.h>
14d7024191SGrygorii Strashko #include <linux/of.h>
15d7024191SGrygorii Strashko #include <linux/platform_device.h>
16d7024191SGrygorii Strashko #include <linux/soc/ti/k3-ringacc.h>
17d7024191SGrygorii Strashko #include <linux/dma/ti-cppi5.h>
18d7024191SGrygorii Strashko #include <linux/dma/k3-udma-glue.h>
19d7024191SGrygorii Strashko 
20d7024191SGrygorii Strashko #include "k3-udma.h"
21d7024191SGrygorii Strashko #include "k3-psil-priv.h"
22d7024191SGrygorii Strashko 
23d7024191SGrygorii Strashko struct k3_udma_glue_common {
24d7024191SGrygorii Strashko 	struct device *dev;
25d7024191SGrygorii Strashko 	struct udma_dev *udmax;
26d7024191SGrygorii Strashko 	const struct udma_tisci_rm *tisci_rm;
27d7024191SGrygorii Strashko 	struct k3_ringacc *ringacc;
28d7024191SGrygorii Strashko 	u32 src_thread;
29d7024191SGrygorii Strashko 	u32 dst_thread;
30d7024191SGrygorii Strashko 
31d7024191SGrygorii Strashko 	u32  hdesc_size;
32d7024191SGrygorii Strashko 	bool epib;
33d7024191SGrygorii Strashko 	u32  psdata_size;
34d7024191SGrygorii Strashko 	u32  swdata_size;
350ebcf1a2SPeter Ujfalusi 	u32  atype;
36d7024191SGrygorii Strashko };
37d7024191SGrygorii Strashko 
38d7024191SGrygorii Strashko struct k3_udma_glue_tx_channel {
39d7024191SGrygorii Strashko 	struct k3_udma_glue_common common;
40d7024191SGrygorii Strashko 
41d7024191SGrygorii Strashko 	struct udma_tchan *udma_tchanx;
42d7024191SGrygorii Strashko 	int udma_tchan_id;
43d7024191SGrygorii Strashko 
44d7024191SGrygorii Strashko 	struct k3_ring *ringtx;
45d7024191SGrygorii Strashko 	struct k3_ring *ringtxcq;
46d7024191SGrygorii Strashko 
47d7024191SGrygorii Strashko 	bool psil_paired;
48d7024191SGrygorii Strashko 
49d7024191SGrygorii Strashko 	int virq;
50d7024191SGrygorii Strashko 
51d7024191SGrygorii Strashko 	atomic_t free_pkts;
52d7024191SGrygorii Strashko 	bool tx_pause_on_err;
53d7024191SGrygorii Strashko 	bool tx_filt_einfo;
54d7024191SGrygorii Strashko 	bool tx_filt_pswords;
55d7024191SGrygorii Strashko 	bool tx_supr_tdpkt;
56d7024191SGrygorii Strashko };
57d7024191SGrygorii Strashko 
58d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow {
59d7024191SGrygorii Strashko 	struct udma_rflow *udma_rflow;
60d7024191SGrygorii Strashko 	int udma_rflow_id;
61d7024191SGrygorii Strashko 	struct k3_ring *ringrx;
62d7024191SGrygorii Strashko 	struct k3_ring *ringrxfdq;
63d7024191SGrygorii Strashko 
64d7024191SGrygorii Strashko 	int virq;
65d7024191SGrygorii Strashko };
66d7024191SGrygorii Strashko 
67d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel {
68d7024191SGrygorii Strashko 	struct k3_udma_glue_common common;
69d7024191SGrygorii Strashko 
70d7024191SGrygorii Strashko 	struct udma_rchan *udma_rchanx;
71d7024191SGrygorii Strashko 	int udma_rchan_id;
72d7024191SGrygorii Strashko 	bool remote;
73d7024191SGrygorii Strashko 
74d7024191SGrygorii Strashko 	bool psil_paired;
75d7024191SGrygorii Strashko 
76d7024191SGrygorii Strashko 	u32  swdata_size;
77d7024191SGrygorii Strashko 	int  flow_id_base;
78d7024191SGrygorii Strashko 
79d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flows;
80d7024191SGrygorii Strashko 	u32 flow_num;
81d7024191SGrygorii Strashko 	u32 flows_ready;
82d7024191SGrygorii Strashko };
83d7024191SGrygorii Strashko 
84d7024191SGrygorii Strashko #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
85d7024191SGrygorii Strashko 
86d7024191SGrygorii Strashko static int of_k3_udma_glue_parse(struct device_node *udmax_np,
87d7024191SGrygorii Strashko 				 struct k3_udma_glue_common *common)
88d7024191SGrygorii Strashko {
89d7024191SGrygorii Strashko 	common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
90d7024191SGrygorii Strashko 						       "ti,ringacc");
91d7024191SGrygorii Strashko 	if (IS_ERR(common->ringacc))
92d7024191SGrygorii Strashko 		return PTR_ERR(common->ringacc);
93d7024191SGrygorii Strashko 
94d7024191SGrygorii Strashko 	common->udmax = of_xudma_dev_get(udmax_np, NULL);
95d7024191SGrygorii Strashko 	if (IS_ERR(common->udmax))
96d7024191SGrygorii Strashko 		return PTR_ERR(common->udmax);
97d7024191SGrygorii Strashko 
98d7024191SGrygorii Strashko 	common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
99d7024191SGrygorii Strashko 
100d7024191SGrygorii Strashko 	return 0;
101d7024191SGrygorii Strashko }
102d7024191SGrygorii Strashko 
103d7024191SGrygorii Strashko static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
104d7024191SGrygorii Strashko 		const char *name, struct k3_udma_glue_common *common,
105d7024191SGrygorii Strashko 		bool tx_chn)
106d7024191SGrygorii Strashko {
107d7024191SGrygorii Strashko 	struct psil_endpoint_config *ep_config;
108d7024191SGrygorii Strashko 	struct of_phandle_args dma_spec;
109d7024191SGrygorii Strashko 	u32 thread_id;
110d7024191SGrygorii Strashko 	int ret = 0;
111d7024191SGrygorii Strashko 	int index;
112d7024191SGrygorii Strashko 
113d7024191SGrygorii Strashko 	if (unlikely(!name))
114d7024191SGrygorii Strashko 		return -EINVAL;
115d7024191SGrygorii Strashko 
116d7024191SGrygorii Strashko 	index = of_property_match_string(chn_np, "dma-names", name);
117d7024191SGrygorii Strashko 	if (index < 0)
118d7024191SGrygorii Strashko 		return index;
119d7024191SGrygorii Strashko 
120d7024191SGrygorii Strashko 	if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
121d7024191SGrygorii Strashko 				       &dma_spec))
122d7024191SGrygorii Strashko 		return -ENOENT;
123d7024191SGrygorii Strashko 
124d7024191SGrygorii Strashko 	thread_id = dma_spec.args[0];
1250ebcf1a2SPeter Ujfalusi 	if (dma_spec.args_count == 2) {
1260ebcf1a2SPeter Ujfalusi 		if (dma_spec.args[1] > 2) {
1270ebcf1a2SPeter Ujfalusi 			dev_err(common->dev, "Invalid channel atype: %u\n",
1280ebcf1a2SPeter Ujfalusi 				dma_spec.args[1]);
1290ebcf1a2SPeter Ujfalusi 			ret = -EINVAL;
1300ebcf1a2SPeter Ujfalusi 			goto out_put_spec;
1310ebcf1a2SPeter Ujfalusi 		}
1320ebcf1a2SPeter Ujfalusi 		common->atype = dma_spec.args[1];
1330ebcf1a2SPeter Ujfalusi 	}
134d7024191SGrygorii Strashko 
135d7024191SGrygorii Strashko 	if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
136d7024191SGrygorii Strashko 		ret = -EINVAL;
137d7024191SGrygorii Strashko 		goto out_put_spec;
138d7024191SGrygorii Strashko 	}
139d7024191SGrygorii Strashko 
140d7024191SGrygorii Strashko 	if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
141d7024191SGrygorii Strashko 		ret = -EINVAL;
142d7024191SGrygorii Strashko 		goto out_put_spec;
143d7024191SGrygorii Strashko 	}
144d7024191SGrygorii Strashko 
145d7024191SGrygorii Strashko 	/* get psil endpoint config */
146d7024191SGrygorii Strashko 	ep_config = psil_get_ep_config(thread_id);
147d7024191SGrygorii Strashko 	if (IS_ERR(ep_config)) {
148d7024191SGrygorii Strashko 		dev_err(common->dev,
149d7024191SGrygorii Strashko 			"No configuration for psi-l thread 0x%04x\n",
150d7024191SGrygorii Strashko 			thread_id);
151d7024191SGrygorii Strashko 		ret = PTR_ERR(ep_config);
152d7024191SGrygorii Strashko 		goto out_put_spec;
153d7024191SGrygorii Strashko 	}
154d7024191SGrygorii Strashko 
155d7024191SGrygorii Strashko 	common->epib = ep_config->needs_epib;
156d7024191SGrygorii Strashko 	common->psdata_size = ep_config->psd_size;
157d7024191SGrygorii Strashko 
158d7024191SGrygorii Strashko 	if (tx_chn)
159d7024191SGrygorii Strashko 		common->dst_thread = thread_id;
160d7024191SGrygorii Strashko 	else
161d7024191SGrygorii Strashko 		common->src_thread = thread_id;
162d7024191SGrygorii Strashko 
163d7024191SGrygorii Strashko 	ret = of_k3_udma_glue_parse(dma_spec.np, common);
164d7024191SGrygorii Strashko 
165d7024191SGrygorii Strashko out_put_spec:
166d7024191SGrygorii Strashko 	of_node_put(dma_spec.np);
167d7024191SGrygorii Strashko 	return ret;
168d7024191SGrygorii Strashko };
169d7024191SGrygorii Strashko 
170d7024191SGrygorii Strashko static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
171d7024191SGrygorii Strashko {
172d7024191SGrygorii Strashko 	struct device *dev = tx_chn->common.dev;
173d7024191SGrygorii Strashko 
174d7024191SGrygorii Strashko 	dev_dbg(dev, "dump_tx_chn:\n"
175d7024191SGrygorii Strashko 		"udma_tchan_id: %d\n"
176d7024191SGrygorii Strashko 		"src_thread: %08x\n"
177d7024191SGrygorii Strashko 		"dst_thread: %08x\n",
178d7024191SGrygorii Strashko 		tx_chn->udma_tchan_id,
179d7024191SGrygorii Strashko 		tx_chn->common.src_thread,
180d7024191SGrygorii Strashko 		tx_chn->common.dst_thread);
181d7024191SGrygorii Strashko }
182d7024191SGrygorii Strashko 
183d7024191SGrygorii Strashko static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
184d7024191SGrygorii Strashko 					char *mark)
185d7024191SGrygorii Strashko {
186d7024191SGrygorii Strashko 	struct device *dev = chn->common.dev;
187d7024191SGrygorii Strashko 
188d7024191SGrygorii Strashko 	dev_dbg(dev, "=== dump ===> %s\n", mark);
189*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
190*bc7e5523SPeter Ujfalusi 		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
191*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
192d7024191SGrygorii Strashko 		xudma_tchanrt_read(chn->udma_tchanx,
193*bc7e5523SPeter Ujfalusi 				   UDMA_CHAN_RT_PEER_RT_EN_REG));
194*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
195*bc7e5523SPeter Ujfalusi 		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
196*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
197*bc7e5523SPeter Ujfalusi 		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
198*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
199*bc7e5523SPeter Ujfalusi 		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
200d7024191SGrygorii Strashko }
201d7024191SGrygorii Strashko 
202d7024191SGrygorii Strashko static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
203d7024191SGrygorii Strashko {
204d7024191SGrygorii Strashko 	const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
205d7024191SGrygorii Strashko 	struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
206d7024191SGrygorii Strashko 
207d7024191SGrygorii Strashko 	memset(&req, 0, sizeof(req));
208d7024191SGrygorii Strashko 
209d7024191SGrygorii Strashko 	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
210d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
211d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
212d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
213d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
214d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
2150ebcf1a2SPeter Ujfalusi 			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
2160ebcf1a2SPeter Ujfalusi 			TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
217d7024191SGrygorii Strashko 	req.nav_id = tisci_rm->tisci_dev_id;
218d7024191SGrygorii Strashko 	req.index = tx_chn->udma_tchan_id;
219d7024191SGrygorii Strashko 	if (tx_chn->tx_pause_on_err)
220d7024191SGrygorii Strashko 		req.tx_pause_on_err = 1;
221d7024191SGrygorii Strashko 	if (tx_chn->tx_filt_einfo)
222d7024191SGrygorii Strashko 		req.tx_filt_einfo = 1;
223d7024191SGrygorii Strashko 	if (tx_chn->tx_filt_pswords)
224d7024191SGrygorii Strashko 		req.tx_filt_pswords = 1;
225d7024191SGrygorii Strashko 	req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
226d7024191SGrygorii Strashko 	if (tx_chn->tx_supr_tdpkt)
227d7024191SGrygorii Strashko 		req.tx_supr_tdpkt = 1;
228d7024191SGrygorii Strashko 	req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
229d7024191SGrygorii Strashko 	req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
2300ebcf1a2SPeter Ujfalusi 	req.tx_atype = tx_chn->common.atype;
231d7024191SGrygorii Strashko 
232d7024191SGrygorii Strashko 	return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
233d7024191SGrygorii Strashko }
234d7024191SGrygorii Strashko 
235d7024191SGrygorii Strashko struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
236d7024191SGrygorii Strashko 		const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
237d7024191SGrygorii Strashko {
238d7024191SGrygorii Strashko 	struct k3_udma_glue_tx_channel *tx_chn;
239d7024191SGrygorii Strashko 	int ret;
240d7024191SGrygorii Strashko 
241d7024191SGrygorii Strashko 	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
242d7024191SGrygorii Strashko 	if (!tx_chn)
243d7024191SGrygorii Strashko 		return ERR_PTR(-ENOMEM);
244d7024191SGrygorii Strashko 
245d7024191SGrygorii Strashko 	tx_chn->common.dev = dev;
246d7024191SGrygorii Strashko 	tx_chn->common.swdata_size = cfg->swdata_size;
247d7024191SGrygorii Strashko 	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
248d7024191SGrygorii Strashko 	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
249d7024191SGrygorii Strashko 	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
250d7024191SGrygorii Strashko 	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
251d7024191SGrygorii Strashko 
252d7024191SGrygorii Strashko 	/* parse of udmap channel */
253d7024191SGrygorii Strashko 	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
254d7024191SGrygorii Strashko 					&tx_chn->common, true);
255d7024191SGrygorii Strashko 	if (ret)
256d7024191SGrygorii Strashko 		goto err;
257d7024191SGrygorii Strashko 
258d7024191SGrygorii Strashko 	tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
259d7024191SGrygorii Strashko 						tx_chn->common.psdata_size,
260d7024191SGrygorii Strashko 						tx_chn->common.swdata_size);
261d7024191SGrygorii Strashko 
262d7024191SGrygorii Strashko 	/* request and cfg UDMAP TX channel */
263d7024191SGrygorii Strashko 	tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
264d7024191SGrygorii Strashko 	if (IS_ERR(tx_chn->udma_tchanx)) {
265d7024191SGrygorii Strashko 		ret = PTR_ERR(tx_chn->udma_tchanx);
266d7024191SGrygorii Strashko 		dev_err(dev, "UDMAX tchanx get err %d\n", ret);
267d7024191SGrygorii Strashko 		goto err;
268d7024191SGrygorii Strashko 	}
269d7024191SGrygorii Strashko 	tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
270d7024191SGrygorii Strashko 
271d7024191SGrygorii Strashko 	atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
272d7024191SGrygorii Strashko 
273d7024191SGrygorii Strashko 	/* request and cfg rings */
274d7024191SGrygorii Strashko 	tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc,
275d7024191SGrygorii Strashko 						 tx_chn->udma_tchan_id, 0);
276d7024191SGrygorii Strashko 	if (!tx_chn->ringtx) {
277d7024191SGrygorii Strashko 		ret = -ENODEV;
278d7024191SGrygorii Strashko 		dev_err(dev, "Failed to get TX ring %u\n",
279d7024191SGrygorii Strashko 			tx_chn->udma_tchan_id);
280d7024191SGrygorii Strashko 		goto err;
281d7024191SGrygorii Strashko 	}
282d7024191SGrygorii Strashko 
283d7024191SGrygorii Strashko 	tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc,
284d7024191SGrygorii Strashko 						   -1, 0);
285d7024191SGrygorii Strashko 	if (!tx_chn->ringtxcq) {
286d7024191SGrygorii Strashko 		ret = -ENODEV;
287d7024191SGrygorii Strashko 		dev_err(dev, "Failed to get TXCQ ring\n");
288d7024191SGrygorii Strashko 		goto err;
289d7024191SGrygorii Strashko 	}
290d7024191SGrygorii Strashko 
291d7024191SGrygorii Strashko 	ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
292d7024191SGrygorii Strashko 	if (ret) {
293d7024191SGrygorii Strashko 		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
294d7024191SGrygorii Strashko 		goto err;
295d7024191SGrygorii Strashko 	}
296d7024191SGrygorii Strashko 
297d7024191SGrygorii Strashko 	ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
298d7024191SGrygorii Strashko 	if (ret) {
299d7024191SGrygorii Strashko 		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
300d7024191SGrygorii Strashko 		goto err;
301d7024191SGrygorii Strashko 	}
302d7024191SGrygorii Strashko 
303d7024191SGrygorii Strashko 	/* request and cfg psi-l */
304d7024191SGrygorii Strashko 	tx_chn->common.src_thread =
305d7024191SGrygorii Strashko 			xudma_dev_get_psil_base(tx_chn->common.udmax) +
306d7024191SGrygorii Strashko 			tx_chn->udma_tchan_id;
307d7024191SGrygorii Strashko 
308d7024191SGrygorii Strashko 	ret = k3_udma_glue_cfg_tx_chn(tx_chn);
309d7024191SGrygorii Strashko 	if (ret) {
310d7024191SGrygorii Strashko 		dev_err(dev, "Failed to cfg tchan %d\n", ret);
311d7024191SGrygorii Strashko 		goto err;
312d7024191SGrygorii Strashko 	}
313d7024191SGrygorii Strashko 
314d7024191SGrygorii Strashko 	ret = xudma_navss_psil_pair(tx_chn->common.udmax,
315d7024191SGrygorii Strashko 				    tx_chn->common.src_thread,
316d7024191SGrygorii Strashko 				    tx_chn->common.dst_thread);
317d7024191SGrygorii Strashko 	if (ret) {
318d7024191SGrygorii Strashko 		dev_err(dev, "PSI-L request err %d\n", ret);
319d7024191SGrygorii Strashko 		goto err;
320d7024191SGrygorii Strashko 	}
321d7024191SGrygorii Strashko 
322d7024191SGrygorii Strashko 	tx_chn->psil_paired = true;
323d7024191SGrygorii Strashko 
324d7024191SGrygorii Strashko 	/* reset TX RT registers */
325d7024191SGrygorii Strashko 	k3_udma_glue_disable_tx_chn(tx_chn);
326d7024191SGrygorii Strashko 
327d7024191SGrygorii Strashko 	k3_udma_glue_dump_tx_chn(tx_chn);
328d7024191SGrygorii Strashko 
329d7024191SGrygorii Strashko 	return tx_chn;
330d7024191SGrygorii Strashko 
331d7024191SGrygorii Strashko err:
332d7024191SGrygorii Strashko 	k3_udma_glue_release_tx_chn(tx_chn);
333d7024191SGrygorii Strashko 	return ERR_PTR(ret);
334d7024191SGrygorii Strashko }
335d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
336d7024191SGrygorii Strashko 
337d7024191SGrygorii Strashko void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
338d7024191SGrygorii Strashko {
339d7024191SGrygorii Strashko 	if (tx_chn->psil_paired) {
340d7024191SGrygorii Strashko 		xudma_navss_psil_unpair(tx_chn->common.udmax,
341d7024191SGrygorii Strashko 					tx_chn->common.src_thread,
342d7024191SGrygorii Strashko 					tx_chn->common.dst_thread);
343d7024191SGrygorii Strashko 		tx_chn->psil_paired = false;
344d7024191SGrygorii Strashko 	}
345d7024191SGrygorii Strashko 
346d7024191SGrygorii Strashko 	if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
347d7024191SGrygorii Strashko 		xudma_tchan_put(tx_chn->common.udmax,
348d7024191SGrygorii Strashko 				tx_chn->udma_tchanx);
349d7024191SGrygorii Strashko 
350d7024191SGrygorii Strashko 	if (tx_chn->ringtxcq)
351d7024191SGrygorii Strashko 		k3_ringacc_ring_free(tx_chn->ringtxcq);
352d7024191SGrygorii Strashko 
353d7024191SGrygorii Strashko 	if (tx_chn->ringtx)
354d7024191SGrygorii Strashko 		k3_ringacc_ring_free(tx_chn->ringtx);
355d7024191SGrygorii Strashko }
356d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
357d7024191SGrygorii Strashko 
358d7024191SGrygorii Strashko int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
359d7024191SGrygorii Strashko 			     struct cppi5_host_desc_t *desc_tx,
360d7024191SGrygorii Strashko 			     dma_addr_t desc_dma)
361d7024191SGrygorii Strashko {
362d7024191SGrygorii Strashko 	u32 ringtxcq_id;
363d7024191SGrygorii Strashko 
364d7024191SGrygorii Strashko 	if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
365d7024191SGrygorii Strashko 		return -ENOMEM;
366d7024191SGrygorii Strashko 
367d7024191SGrygorii Strashko 	ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
368d7024191SGrygorii Strashko 	cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
369d7024191SGrygorii Strashko 
370d7024191SGrygorii Strashko 	return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
371d7024191SGrygorii Strashko }
372d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
373d7024191SGrygorii Strashko 
374d7024191SGrygorii Strashko int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
375d7024191SGrygorii Strashko 			    dma_addr_t *desc_dma)
376d7024191SGrygorii Strashko {
377d7024191SGrygorii Strashko 	int ret;
378d7024191SGrygorii Strashko 
379d7024191SGrygorii Strashko 	ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
380d7024191SGrygorii Strashko 	if (!ret)
381d7024191SGrygorii Strashko 		atomic_inc(&tx_chn->free_pkts);
382d7024191SGrygorii Strashko 
383d7024191SGrygorii Strashko 	return ret;
384d7024191SGrygorii Strashko }
385d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
386d7024191SGrygorii Strashko 
387d7024191SGrygorii Strashko int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
388d7024191SGrygorii Strashko {
389d7024191SGrygorii Strashko 	u32 txrt_ctl;
390d7024191SGrygorii Strashko 
391d7024191SGrygorii Strashko 	txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
392*bc7e5523SPeter Ujfalusi 	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
393d7024191SGrygorii Strashko 			    txrt_ctl);
394d7024191SGrygorii Strashko 
395d7024191SGrygorii Strashko 	txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
396*bc7e5523SPeter Ujfalusi 				      UDMA_CHAN_RT_CTL_REG);
397d7024191SGrygorii Strashko 	txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
398*bc7e5523SPeter Ujfalusi 	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
399d7024191SGrygorii Strashko 			    txrt_ctl);
400d7024191SGrygorii Strashko 
401d7024191SGrygorii Strashko 	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
402d7024191SGrygorii Strashko 	return 0;
403d7024191SGrygorii Strashko }
404d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
405d7024191SGrygorii Strashko 
406d7024191SGrygorii Strashko void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
407d7024191SGrygorii Strashko {
408d7024191SGrygorii Strashko 	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
409d7024191SGrygorii Strashko 
410*bc7e5523SPeter Ujfalusi 	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
411d7024191SGrygorii Strashko 
412d7024191SGrygorii Strashko 	xudma_tchanrt_write(tx_chn->udma_tchanx,
413*bc7e5523SPeter Ujfalusi 			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
414d7024191SGrygorii Strashko 	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
415d7024191SGrygorii Strashko }
416d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
417d7024191SGrygorii Strashko 
418d7024191SGrygorii Strashko void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
419d7024191SGrygorii Strashko 			       bool sync)
420d7024191SGrygorii Strashko {
421d7024191SGrygorii Strashko 	int i = 0;
422d7024191SGrygorii Strashko 	u32 val;
423d7024191SGrygorii Strashko 
424d7024191SGrygorii Strashko 	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
425d7024191SGrygorii Strashko 
426*bc7e5523SPeter Ujfalusi 	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
427d7024191SGrygorii Strashko 			    UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
428d7024191SGrygorii Strashko 
429*bc7e5523SPeter Ujfalusi 	val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
430d7024191SGrygorii Strashko 
431d7024191SGrygorii Strashko 	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
432d7024191SGrygorii Strashko 		val = xudma_tchanrt_read(tx_chn->udma_tchanx,
433*bc7e5523SPeter Ujfalusi 					 UDMA_CHAN_RT_CTL_REG);
434d7024191SGrygorii Strashko 		udelay(1);
435d7024191SGrygorii Strashko 		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
436d7024191SGrygorii Strashko 			dev_err(tx_chn->common.dev, "TX tdown timeout\n");
437d7024191SGrygorii Strashko 			break;
438d7024191SGrygorii Strashko 		}
439d7024191SGrygorii Strashko 		i++;
440d7024191SGrygorii Strashko 	}
441d7024191SGrygorii Strashko 
442d7024191SGrygorii Strashko 	val = xudma_tchanrt_read(tx_chn->udma_tchanx,
443*bc7e5523SPeter Ujfalusi 				 UDMA_CHAN_RT_PEER_RT_EN_REG);
444d7024191SGrygorii Strashko 	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
445d7024191SGrygorii Strashko 		dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
446d7024191SGrygorii Strashko 	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
447d7024191SGrygorii Strashko }
448d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
449d7024191SGrygorii Strashko 
450d7024191SGrygorii Strashko void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
451d7024191SGrygorii Strashko 			       void *data,
452d7024191SGrygorii Strashko 			       void (*cleanup)(void *data, dma_addr_t desc_dma))
453d7024191SGrygorii Strashko {
454d7024191SGrygorii Strashko 	dma_addr_t desc_dma;
455d7024191SGrygorii Strashko 	int occ_tx, i, ret;
456d7024191SGrygorii Strashko 
457d7024191SGrygorii Strashko 	/* reset TXCQ as it is not input for udma - expected to be empty */
458d7024191SGrygorii Strashko 	if (tx_chn->ringtxcq)
459d7024191SGrygorii Strashko 		k3_ringacc_ring_reset(tx_chn->ringtxcq);
460d7024191SGrygorii Strashko 
461d7024191SGrygorii Strashko 	/*
462d7024191SGrygorii Strashko 	 * TXQ reset need to be special way as it is input for udma and its
463d7024191SGrygorii Strashko 	 * state cached by udma, so:
464d7024191SGrygorii Strashko 	 * 1) save TXQ occ
465d7024191SGrygorii Strashko 	 * 2) clean up TXQ and call callback .cleanup() for each desc
466d7024191SGrygorii Strashko 	 * 3) reset TXQ in a special way
467d7024191SGrygorii Strashko 	 */
468d7024191SGrygorii Strashko 	occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
469d7024191SGrygorii Strashko 	dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
470d7024191SGrygorii Strashko 
471d7024191SGrygorii Strashko 	for (i = 0; i < occ_tx; i++) {
472d7024191SGrygorii Strashko 		ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
473d7024191SGrygorii Strashko 		if (ret) {
474d7024191SGrygorii Strashko 			dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
475d7024191SGrygorii Strashko 			break;
476d7024191SGrygorii Strashko 		}
477d7024191SGrygorii Strashko 		cleanup(data, desc_dma);
478d7024191SGrygorii Strashko 	}
479d7024191SGrygorii Strashko 
480d7024191SGrygorii Strashko 	k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
481d7024191SGrygorii Strashko }
482d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
483d7024191SGrygorii Strashko 
484d7024191SGrygorii Strashko u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
485d7024191SGrygorii Strashko {
486d7024191SGrygorii Strashko 	return tx_chn->common.hdesc_size;
487d7024191SGrygorii Strashko }
488d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
489d7024191SGrygorii Strashko 
490d7024191SGrygorii Strashko u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
491d7024191SGrygorii Strashko {
492d7024191SGrygorii Strashko 	return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
493d7024191SGrygorii Strashko }
494d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
495d7024191SGrygorii Strashko 
496d7024191SGrygorii Strashko int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
497d7024191SGrygorii Strashko {
498d7024191SGrygorii Strashko 	tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
499d7024191SGrygorii Strashko 
500d7024191SGrygorii Strashko 	return tx_chn->virq;
501d7024191SGrygorii Strashko }
502d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
503d7024191SGrygorii Strashko 
504d7024191SGrygorii Strashko static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
505d7024191SGrygorii Strashko {
506d7024191SGrygorii Strashko 	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
507d7024191SGrygorii Strashko 	struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
508d7024191SGrygorii Strashko 	int ret;
509d7024191SGrygorii Strashko 
510d7024191SGrygorii Strashko 	memset(&req, 0, sizeof(req));
511d7024191SGrygorii Strashko 
512d7024191SGrygorii Strashko 	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
513d7024191SGrygorii Strashko 			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
514d7024191SGrygorii Strashko 			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
515d7024191SGrygorii Strashko 			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
5160ebcf1a2SPeter Ujfalusi 			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
5170ebcf1a2SPeter Ujfalusi 			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
518d7024191SGrygorii Strashko 
519d7024191SGrygorii Strashko 	req.nav_id = tisci_rm->tisci_dev_id;
520d7024191SGrygorii Strashko 	req.index = rx_chn->udma_rchan_id;
521d7024191SGrygorii Strashko 	req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
522d7024191SGrygorii Strashko 	/*
523d7024191SGrygorii Strashko 	 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
524d7024191SGrygorii Strashko 	 * and udmax impl, so just configure it to invalid value.
525d7024191SGrygorii Strashko 	 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
526d7024191SGrygorii Strashko 	 */
527d7024191SGrygorii Strashko 	req.rxcq_qnum = 0xFFFF;
528d7024191SGrygorii Strashko 	if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
529d7024191SGrygorii Strashko 		/* Default flow + extra ones */
530d7024191SGrygorii Strashko 		req.flowid_start = rx_chn->flow_id_base;
531d7024191SGrygorii Strashko 		req.flowid_cnt = rx_chn->flow_num;
532d7024191SGrygorii Strashko 	}
533d7024191SGrygorii Strashko 	req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
5340ebcf1a2SPeter Ujfalusi 	req.rx_atype = rx_chn->common.atype;
535d7024191SGrygorii Strashko 
536d7024191SGrygorii Strashko 	ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
537d7024191SGrygorii Strashko 	if (ret)
538d7024191SGrygorii Strashko 		dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
539d7024191SGrygorii Strashko 			rx_chn->udma_rchan_id, ret);
540d7024191SGrygorii Strashko 
541d7024191SGrygorii Strashko 	return ret;
542d7024191SGrygorii Strashko }
543d7024191SGrygorii Strashko 
544d7024191SGrygorii Strashko static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
545d7024191SGrygorii Strashko 					 u32 flow_num)
546d7024191SGrygorii Strashko {
547d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
548d7024191SGrygorii Strashko 
549d7024191SGrygorii Strashko 	if (IS_ERR_OR_NULL(flow->udma_rflow))
550d7024191SGrygorii Strashko 		return;
551d7024191SGrygorii Strashko 
552d7024191SGrygorii Strashko 	if (flow->ringrxfdq)
553d7024191SGrygorii Strashko 		k3_ringacc_ring_free(flow->ringrxfdq);
554d7024191SGrygorii Strashko 
555d7024191SGrygorii Strashko 	if (flow->ringrx)
556d7024191SGrygorii Strashko 		k3_ringacc_ring_free(flow->ringrx);
557d7024191SGrygorii Strashko 
558d7024191SGrygorii Strashko 	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
559d7024191SGrygorii Strashko 	flow->udma_rflow = NULL;
560d7024191SGrygorii Strashko 	rx_chn->flows_ready--;
561d7024191SGrygorii Strashko }
562d7024191SGrygorii Strashko 
563d7024191SGrygorii Strashko static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
564d7024191SGrygorii Strashko 				    u32 flow_idx,
565d7024191SGrygorii Strashko 				    struct k3_udma_glue_rx_flow_cfg *flow_cfg)
566d7024191SGrygorii Strashko {
567d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
568d7024191SGrygorii Strashko 	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
569d7024191SGrygorii Strashko 	struct device *dev = rx_chn->common.dev;
570d7024191SGrygorii Strashko 	struct ti_sci_msg_rm_udmap_flow_cfg req;
571d7024191SGrygorii Strashko 	int rx_ring_id;
572d7024191SGrygorii Strashko 	int rx_ringfdq_id;
573d7024191SGrygorii Strashko 	int ret = 0;
574d7024191SGrygorii Strashko 
575d7024191SGrygorii Strashko 	flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
576d7024191SGrygorii Strashko 					   flow->udma_rflow_id);
577d7024191SGrygorii Strashko 	if (IS_ERR(flow->udma_rflow)) {
578d7024191SGrygorii Strashko 		ret = PTR_ERR(flow->udma_rflow);
579d7024191SGrygorii Strashko 		dev_err(dev, "UDMAX rflow get err %d\n", ret);
580018af9beSChristophe JAILLET 		return ret;
581d7024191SGrygorii Strashko 	}
582d7024191SGrygorii Strashko 
583d7024191SGrygorii Strashko 	if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
584018af9beSChristophe JAILLET 		ret = -ENODEV;
585018af9beSChristophe JAILLET 		goto err_rflow_put;
586d7024191SGrygorii Strashko 	}
587d7024191SGrygorii Strashko 
588d7024191SGrygorii Strashko 	/* request and cfg rings */
589d7024191SGrygorii Strashko 	flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc,
590d7024191SGrygorii Strashko 					       flow_cfg->ring_rxq_id, 0);
591d7024191SGrygorii Strashko 	if (!flow->ringrx) {
592d7024191SGrygorii Strashko 		ret = -ENODEV;
593d7024191SGrygorii Strashko 		dev_err(dev, "Failed to get RX ring\n");
594018af9beSChristophe JAILLET 		goto err_rflow_put;
595d7024191SGrygorii Strashko 	}
596d7024191SGrygorii Strashko 
597d7024191SGrygorii Strashko 	flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
598d7024191SGrygorii Strashko 						  flow_cfg->ring_rxfdq0_id, 0);
599d7024191SGrygorii Strashko 	if (!flow->ringrxfdq) {
600d7024191SGrygorii Strashko 		ret = -ENODEV;
601d7024191SGrygorii Strashko 		dev_err(dev, "Failed to get RXFDQ ring\n");
602018af9beSChristophe JAILLET 		goto err_ringrx_free;
603d7024191SGrygorii Strashko 	}
604d7024191SGrygorii Strashko 
605d7024191SGrygorii Strashko 	ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
606d7024191SGrygorii Strashko 	if (ret) {
607d7024191SGrygorii Strashko 		dev_err(dev, "Failed to cfg ringrx %d\n", ret);
608018af9beSChristophe JAILLET 		goto err_ringrxfdq_free;
609d7024191SGrygorii Strashko 	}
610d7024191SGrygorii Strashko 
611d7024191SGrygorii Strashko 	ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
612d7024191SGrygorii Strashko 	if (ret) {
613d7024191SGrygorii Strashko 		dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
614018af9beSChristophe JAILLET 		goto err_ringrxfdq_free;
615d7024191SGrygorii Strashko 	}
616d7024191SGrygorii Strashko 
617d7024191SGrygorii Strashko 	if (rx_chn->remote) {
618d7024191SGrygorii Strashko 		rx_ring_id = TI_SCI_RESOURCE_NULL;
619d7024191SGrygorii Strashko 		rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
620d7024191SGrygorii Strashko 	} else {
621d7024191SGrygorii Strashko 		rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
622d7024191SGrygorii Strashko 		rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
623d7024191SGrygorii Strashko 	}
624d7024191SGrygorii Strashko 
625d7024191SGrygorii Strashko 	memset(&req, 0, sizeof(req));
626d7024191SGrygorii Strashko 
627d7024191SGrygorii Strashko 	req.valid_params =
628d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
629d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
630d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
631d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
632d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
633d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
634d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
635d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
636d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
637d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
638d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
639d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
640d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
641d7024191SGrygorii Strashko 	req.nav_id = tisci_rm->tisci_dev_id;
642d7024191SGrygorii Strashko 	req.flow_index = flow->udma_rflow_id;
643d7024191SGrygorii Strashko 	if (rx_chn->common.epib)
644d7024191SGrygorii Strashko 		req.rx_einfo_present = 1;
645d7024191SGrygorii Strashko 	if (rx_chn->common.psdata_size)
646d7024191SGrygorii Strashko 		req.rx_psinfo_present = 1;
647d7024191SGrygorii Strashko 	if (flow_cfg->rx_error_handling)
648d7024191SGrygorii Strashko 		req.rx_error_handling = 1;
649d7024191SGrygorii Strashko 	req.rx_desc_type = 0;
650d7024191SGrygorii Strashko 	req.rx_dest_qnum = rx_ring_id;
651d7024191SGrygorii Strashko 	req.rx_src_tag_hi_sel = 0;
652d7024191SGrygorii Strashko 	req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
653d7024191SGrygorii Strashko 	req.rx_dest_tag_hi_sel = 0;
654d7024191SGrygorii Strashko 	req.rx_dest_tag_lo_sel = 0;
655d7024191SGrygorii Strashko 	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
656d7024191SGrygorii Strashko 	req.rx_fdq1_qnum = rx_ringfdq_id;
657d7024191SGrygorii Strashko 	req.rx_fdq2_qnum = rx_ringfdq_id;
658d7024191SGrygorii Strashko 	req.rx_fdq3_qnum = rx_ringfdq_id;
659d7024191SGrygorii Strashko 
660d7024191SGrygorii Strashko 	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
661d7024191SGrygorii Strashko 	if (ret) {
662d7024191SGrygorii Strashko 		dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
663d7024191SGrygorii Strashko 			ret);
664018af9beSChristophe JAILLET 		goto err_ringrxfdq_free;
665d7024191SGrygorii Strashko 	}
666d7024191SGrygorii Strashko 
667d7024191SGrygorii Strashko 	rx_chn->flows_ready++;
668d7024191SGrygorii Strashko 	dev_dbg(dev, "flow%d config done. ready:%d\n",
669d7024191SGrygorii Strashko 		flow->udma_rflow_id, rx_chn->flows_ready);
670d7024191SGrygorii Strashko 
671d7024191SGrygorii Strashko 	return 0;
672018af9beSChristophe JAILLET 
673018af9beSChristophe JAILLET err_ringrxfdq_free:
674018af9beSChristophe JAILLET 	k3_ringacc_ring_free(flow->ringrxfdq);
675018af9beSChristophe JAILLET 
676018af9beSChristophe JAILLET err_ringrx_free:
677018af9beSChristophe JAILLET 	k3_ringacc_ring_free(flow->ringrx);
678018af9beSChristophe JAILLET 
679018af9beSChristophe JAILLET err_rflow_put:
680018af9beSChristophe JAILLET 	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
681018af9beSChristophe JAILLET 	flow->udma_rflow = NULL;
682018af9beSChristophe JAILLET 
683d7024191SGrygorii Strashko 	return ret;
684d7024191SGrygorii Strashko }
685d7024191SGrygorii Strashko 
686d7024191SGrygorii Strashko static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
687d7024191SGrygorii Strashko {
688d7024191SGrygorii Strashko 	struct device *dev = chn->common.dev;
689d7024191SGrygorii Strashko 
690d7024191SGrygorii Strashko 	dev_dbg(dev, "dump_rx_chn:\n"
691d7024191SGrygorii Strashko 		"udma_rchan_id: %d\n"
692d7024191SGrygorii Strashko 		"src_thread: %08x\n"
693d7024191SGrygorii Strashko 		"dst_thread: %08x\n"
694d7024191SGrygorii Strashko 		"epib: %d\n"
695d7024191SGrygorii Strashko 		"hdesc_size: %u\n"
696d7024191SGrygorii Strashko 		"psdata_size: %u\n"
697d7024191SGrygorii Strashko 		"swdata_size: %u\n"
698d7024191SGrygorii Strashko 		"flow_id_base: %d\n"
699d7024191SGrygorii Strashko 		"flow_num: %d\n",
700d7024191SGrygorii Strashko 		chn->udma_rchan_id,
701d7024191SGrygorii Strashko 		chn->common.src_thread,
702d7024191SGrygorii Strashko 		chn->common.dst_thread,
703d7024191SGrygorii Strashko 		chn->common.epib,
704d7024191SGrygorii Strashko 		chn->common.hdesc_size,
705d7024191SGrygorii Strashko 		chn->common.psdata_size,
706d7024191SGrygorii Strashko 		chn->common.swdata_size,
707d7024191SGrygorii Strashko 		chn->flow_id_base,
708d7024191SGrygorii Strashko 		chn->flow_num);
709d7024191SGrygorii Strashko }
710d7024191SGrygorii Strashko 
711d7024191SGrygorii Strashko static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
712d7024191SGrygorii Strashko 					char *mark)
713d7024191SGrygorii Strashko {
714d7024191SGrygorii Strashko 	struct device *dev = chn->common.dev;
715d7024191SGrygorii Strashko 
716d7024191SGrygorii Strashko 	dev_dbg(dev, "=== dump ===> %s\n", mark);
717d7024191SGrygorii Strashko 
718*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
719*bc7e5523SPeter Ujfalusi 		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
720*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
721d7024191SGrygorii Strashko 		xudma_rchanrt_read(chn->udma_rchanx,
722*bc7e5523SPeter Ujfalusi 				   UDMA_CHAN_RT_PEER_RT_EN_REG));
723*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
724*bc7e5523SPeter Ujfalusi 		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
725*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
726*bc7e5523SPeter Ujfalusi 		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
727*bc7e5523SPeter Ujfalusi 	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
728*bc7e5523SPeter Ujfalusi 		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
729d7024191SGrygorii Strashko }
730d7024191SGrygorii Strashko 
731d7024191SGrygorii Strashko static int
732d7024191SGrygorii Strashko k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
733d7024191SGrygorii Strashko 			       struct k3_udma_glue_rx_channel_cfg *cfg)
734d7024191SGrygorii Strashko {
735d7024191SGrygorii Strashko 	int ret;
736d7024191SGrygorii Strashko 
737d7024191SGrygorii Strashko 	/* default rflow */
738d7024191SGrygorii Strashko 	if (cfg->flow_id_use_rxchan_id)
739d7024191SGrygorii Strashko 		return 0;
740d7024191SGrygorii Strashko 
741d7024191SGrygorii Strashko 	/* not a GP rflows */
742d7024191SGrygorii Strashko 	if (rx_chn->flow_id_base != -1 &&
743d7024191SGrygorii Strashko 	    !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
744d7024191SGrygorii Strashko 		return 0;
745d7024191SGrygorii Strashko 
746d7024191SGrygorii Strashko 	/* Allocate range of GP rflows */
747d7024191SGrygorii Strashko 	ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
748d7024191SGrygorii Strashko 					 rx_chn->flow_id_base,
749d7024191SGrygorii Strashko 					 rx_chn->flow_num);
750d7024191SGrygorii Strashko 	if (ret < 0) {
751d7024191SGrygorii Strashko 		dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
752d7024191SGrygorii Strashko 			rx_chn->flow_id_base, rx_chn->flow_num, ret);
753d7024191SGrygorii Strashko 		return ret;
754d7024191SGrygorii Strashko 	}
755d7024191SGrygorii Strashko 	rx_chn->flow_id_base = ret;
756d7024191SGrygorii Strashko 
757d7024191SGrygorii Strashko 	return 0;
758d7024191SGrygorii Strashko }
759d7024191SGrygorii Strashko 
760d7024191SGrygorii Strashko static struct k3_udma_glue_rx_channel *
761d7024191SGrygorii Strashko k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
762d7024191SGrygorii Strashko 				 struct k3_udma_glue_rx_channel_cfg *cfg)
763d7024191SGrygorii Strashko {
764d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_channel *rx_chn;
765d7024191SGrygorii Strashko 	int ret, i;
766d7024191SGrygorii Strashko 
767d7024191SGrygorii Strashko 	if (cfg->flow_id_num <= 0)
768d7024191SGrygorii Strashko 		return ERR_PTR(-EINVAL);
769d7024191SGrygorii Strashko 
770d7024191SGrygorii Strashko 	if (cfg->flow_id_num != 1 &&
771d7024191SGrygorii Strashko 	    (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
772d7024191SGrygorii Strashko 		return ERR_PTR(-EINVAL);
773d7024191SGrygorii Strashko 
774d7024191SGrygorii Strashko 	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
775d7024191SGrygorii Strashko 	if (!rx_chn)
776d7024191SGrygorii Strashko 		return ERR_PTR(-ENOMEM);
777d7024191SGrygorii Strashko 
778d7024191SGrygorii Strashko 	rx_chn->common.dev = dev;
779d7024191SGrygorii Strashko 	rx_chn->common.swdata_size = cfg->swdata_size;
780d7024191SGrygorii Strashko 	rx_chn->remote = false;
781d7024191SGrygorii Strashko 
782d7024191SGrygorii Strashko 	/* parse of udmap channel */
783d7024191SGrygorii Strashko 	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
784d7024191SGrygorii Strashko 					&rx_chn->common, false);
785d7024191SGrygorii Strashko 	if (ret)
786d7024191SGrygorii Strashko 		goto err;
787d7024191SGrygorii Strashko 
788d7024191SGrygorii Strashko 	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
789d7024191SGrygorii Strashko 						rx_chn->common.psdata_size,
790d7024191SGrygorii Strashko 						rx_chn->common.swdata_size);
791d7024191SGrygorii Strashko 
792d7024191SGrygorii Strashko 	/* request and cfg UDMAP RX channel */
793d7024191SGrygorii Strashko 	rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
794d7024191SGrygorii Strashko 	if (IS_ERR(rx_chn->udma_rchanx)) {
795d7024191SGrygorii Strashko 		ret = PTR_ERR(rx_chn->udma_rchanx);
796d7024191SGrygorii Strashko 		dev_err(dev, "UDMAX rchanx get err %d\n", ret);
797d7024191SGrygorii Strashko 		goto err;
798d7024191SGrygorii Strashko 	}
799d7024191SGrygorii Strashko 	rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
800d7024191SGrygorii Strashko 
801d7024191SGrygorii Strashko 	rx_chn->flow_num = cfg->flow_id_num;
802d7024191SGrygorii Strashko 	rx_chn->flow_id_base = cfg->flow_id_base;
803d7024191SGrygorii Strashko 
804d7024191SGrygorii Strashko 	/* Use RX channel id as flow id: target dev can't generate flow_id */
805d7024191SGrygorii Strashko 	if (cfg->flow_id_use_rxchan_id)
806d7024191SGrygorii Strashko 		rx_chn->flow_id_base = rx_chn->udma_rchan_id;
807d7024191SGrygorii Strashko 
808d7024191SGrygorii Strashko 	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
809d7024191SGrygorii Strashko 				     sizeof(*rx_chn->flows), GFP_KERNEL);
810d7024191SGrygorii Strashko 	if (!rx_chn->flows) {
811d7024191SGrygorii Strashko 		ret = -ENOMEM;
812d7024191SGrygorii Strashko 		goto err;
813d7024191SGrygorii Strashko 	}
814d7024191SGrygorii Strashko 
815d7024191SGrygorii Strashko 	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
816d7024191SGrygorii Strashko 	if (ret)
817d7024191SGrygorii Strashko 		goto err;
818d7024191SGrygorii Strashko 
819d7024191SGrygorii Strashko 	for (i = 0; i < rx_chn->flow_num; i++)
820d7024191SGrygorii Strashko 		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
821d7024191SGrygorii Strashko 
822d7024191SGrygorii Strashko 	/* request and cfg psi-l */
823d7024191SGrygorii Strashko 	rx_chn->common.dst_thread =
824d7024191SGrygorii Strashko 			xudma_dev_get_psil_base(rx_chn->common.udmax) +
825d7024191SGrygorii Strashko 			rx_chn->udma_rchan_id;
826d7024191SGrygorii Strashko 
827d7024191SGrygorii Strashko 	ret = k3_udma_glue_cfg_rx_chn(rx_chn);
828d7024191SGrygorii Strashko 	if (ret) {
829d7024191SGrygorii Strashko 		dev_err(dev, "Failed to cfg rchan %d\n", ret);
830d7024191SGrygorii Strashko 		goto err;
831d7024191SGrygorii Strashko 	}
832d7024191SGrygorii Strashko 
833d7024191SGrygorii Strashko 	/* init default RX flow only if flow_num = 1 */
834d7024191SGrygorii Strashko 	if (cfg->def_flow_cfg) {
835d7024191SGrygorii Strashko 		ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
836d7024191SGrygorii Strashko 		if (ret)
837d7024191SGrygorii Strashko 			goto err;
838d7024191SGrygorii Strashko 	}
839d7024191SGrygorii Strashko 
840d7024191SGrygorii Strashko 	ret = xudma_navss_psil_pair(rx_chn->common.udmax,
841d7024191SGrygorii Strashko 				    rx_chn->common.src_thread,
842d7024191SGrygorii Strashko 				    rx_chn->common.dst_thread);
843d7024191SGrygorii Strashko 	if (ret) {
844d7024191SGrygorii Strashko 		dev_err(dev, "PSI-L request err %d\n", ret);
845d7024191SGrygorii Strashko 		goto err;
846d7024191SGrygorii Strashko 	}
847d7024191SGrygorii Strashko 
848d7024191SGrygorii Strashko 	rx_chn->psil_paired = true;
849d7024191SGrygorii Strashko 
850d7024191SGrygorii Strashko 	/* reset RX RT registers */
851d7024191SGrygorii Strashko 	k3_udma_glue_disable_rx_chn(rx_chn);
852d7024191SGrygorii Strashko 
853d7024191SGrygorii Strashko 	k3_udma_glue_dump_rx_chn(rx_chn);
854d7024191SGrygorii Strashko 
855d7024191SGrygorii Strashko 	return rx_chn;
856d7024191SGrygorii Strashko 
857d7024191SGrygorii Strashko err:
858d7024191SGrygorii Strashko 	k3_udma_glue_release_rx_chn(rx_chn);
859d7024191SGrygorii Strashko 	return ERR_PTR(ret);
860d7024191SGrygorii Strashko }
861d7024191SGrygorii Strashko 
862d7024191SGrygorii Strashko static struct k3_udma_glue_rx_channel *
863d7024191SGrygorii Strashko k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
864d7024191SGrygorii Strashko 				   struct k3_udma_glue_rx_channel_cfg *cfg)
865d7024191SGrygorii Strashko {
866d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_channel *rx_chn;
867d7024191SGrygorii Strashko 	int ret, i;
868d7024191SGrygorii Strashko 
869d7024191SGrygorii Strashko 	if (cfg->flow_id_num <= 0 ||
870d7024191SGrygorii Strashko 	    cfg->flow_id_use_rxchan_id ||
871d7024191SGrygorii Strashko 	    cfg->def_flow_cfg ||
872d7024191SGrygorii Strashko 	    cfg->flow_id_base < 0)
873d7024191SGrygorii Strashko 		return ERR_PTR(-EINVAL);
874d7024191SGrygorii Strashko 
875d7024191SGrygorii Strashko 	/*
876d7024191SGrygorii Strashko 	 * Remote RX channel is under control of Remote CPU core, so
877d7024191SGrygorii Strashko 	 * Linux can only request and manipulate by dedicated RX flows
878d7024191SGrygorii Strashko 	 */
879d7024191SGrygorii Strashko 
880d7024191SGrygorii Strashko 	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
881d7024191SGrygorii Strashko 	if (!rx_chn)
882d7024191SGrygorii Strashko 		return ERR_PTR(-ENOMEM);
883d7024191SGrygorii Strashko 
884d7024191SGrygorii Strashko 	rx_chn->common.dev = dev;
885d7024191SGrygorii Strashko 	rx_chn->common.swdata_size = cfg->swdata_size;
886d7024191SGrygorii Strashko 	rx_chn->remote = true;
887d7024191SGrygorii Strashko 	rx_chn->udma_rchan_id = -1;
888d7024191SGrygorii Strashko 	rx_chn->flow_num = cfg->flow_id_num;
889d7024191SGrygorii Strashko 	rx_chn->flow_id_base = cfg->flow_id_base;
890d7024191SGrygorii Strashko 	rx_chn->psil_paired = false;
891d7024191SGrygorii Strashko 
892d7024191SGrygorii Strashko 	/* parse of udmap channel */
893d7024191SGrygorii Strashko 	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
894d7024191SGrygorii Strashko 					&rx_chn->common, false);
895d7024191SGrygorii Strashko 	if (ret)
896d7024191SGrygorii Strashko 		goto err;
897d7024191SGrygorii Strashko 
898d7024191SGrygorii Strashko 	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
899d7024191SGrygorii Strashko 						rx_chn->common.psdata_size,
900d7024191SGrygorii Strashko 						rx_chn->common.swdata_size);
901d7024191SGrygorii Strashko 
902d7024191SGrygorii Strashko 	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
903d7024191SGrygorii Strashko 				     sizeof(*rx_chn->flows), GFP_KERNEL);
904d7024191SGrygorii Strashko 	if (!rx_chn->flows) {
905d7024191SGrygorii Strashko 		ret = -ENOMEM;
906d7024191SGrygorii Strashko 		goto err;
907d7024191SGrygorii Strashko 	}
908d7024191SGrygorii Strashko 
909d7024191SGrygorii Strashko 	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
910d7024191SGrygorii Strashko 	if (ret)
911d7024191SGrygorii Strashko 		goto err;
912d7024191SGrygorii Strashko 
913d7024191SGrygorii Strashko 	for (i = 0; i < rx_chn->flow_num; i++)
914d7024191SGrygorii Strashko 		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
915d7024191SGrygorii Strashko 
916d7024191SGrygorii Strashko 	k3_udma_glue_dump_rx_chn(rx_chn);
917d7024191SGrygorii Strashko 
918d7024191SGrygorii Strashko 	return rx_chn;
919d7024191SGrygorii Strashko 
920d7024191SGrygorii Strashko err:
921d7024191SGrygorii Strashko 	k3_udma_glue_release_rx_chn(rx_chn);
922d7024191SGrygorii Strashko 	return ERR_PTR(ret);
923d7024191SGrygorii Strashko }
924d7024191SGrygorii Strashko 
925d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel *
926d7024191SGrygorii Strashko k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
927d7024191SGrygorii Strashko 			    struct k3_udma_glue_rx_channel_cfg *cfg)
928d7024191SGrygorii Strashko {
929d7024191SGrygorii Strashko 	if (cfg->remote)
930d7024191SGrygorii Strashko 		return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
931d7024191SGrygorii Strashko 	else
932d7024191SGrygorii Strashko 		return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
933d7024191SGrygorii Strashko }
934d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
935d7024191SGrygorii Strashko 
936d7024191SGrygorii Strashko void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
937d7024191SGrygorii Strashko {
938d7024191SGrygorii Strashko 	int i;
939d7024191SGrygorii Strashko 
940d7024191SGrygorii Strashko 	if (IS_ERR_OR_NULL(rx_chn->common.udmax))
941d7024191SGrygorii Strashko 		return;
942d7024191SGrygorii Strashko 
943d7024191SGrygorii Strashko 	if (rx_chn->psil_paired) {
944d7024191SGrygorii Strashko 		xudma_navss_psil_unpair(rx_chn->common.udmax,
945d7024191SGrygorii Strashko 					rx_chn->common.src_thread,
946d7024191SGrygorii Strashko 					rx_chn->common.dst_thread);
947d7024191SGrygorii Strashko 		rx_chn->psil_paired = false;
948d7024191SGrygorii Strashko 	}
949d7024191SGrygorii Strashko 
950d7024191SGrygorii Strashko 	for (i = 0; i < rx_chn->flow_num; i++)
951d7024191SGrygorii Strashko 		k3_udma_glue_release_rx_flow(rx_chn, i);
952d7024191SGrygorii Strashko 
953d7024191SGrygorii Strashko 	if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
954d7024191SGrygorii Strashko 		xudma_free_gp_rflow_range(rx_chn->common.udmax,
955d7024191SGrygorii Strashko 					  rx_chn->flow_id_base,
956d7024191SGrygorii Strashko 					  rx_chn->flow_num);
957d7024191SGrygorii Strashko 
958d7024191SGrygorii Strashko 	if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
959d7024191SGrygorii Strashko 		xudma_rchan_put(rx_chn->common.udmax,
960d7024191SGrygorii Strashko 				rx_chn->udma_rchanx);
961d7024191SGrygorii Strashko }
962d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
963d7024191SGrygorii Strashko 
964d7024191SGrygorii Strashko int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
965d7024191SGrygorii Strashko 			      u32 flow_idx,
966d7024191SGrygorii Strashko 			      struct k3_udma_glue_rx_flow_cfg *flow_cfg)
967d7024191SGrygorii Strashko {
968d7024191SGrygorii Strashko 	if (flow_idx >= rx_chn->flow_num)
969d7024191SGrygorii Strashko 		return -EINVAL;
970d7024191SGrygorii Strashko 
971d7024191SGrygorii Strashko 	return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
972d7024191SGrygorii Strashko }
973d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
974d7024191SGrygorii Strashko 
975d7024191SGrygorii Strashko u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
976d7024191SGrygorii Strashko 				    u32 flow_idx)
977d7024191SGrygorii Strashko {
978d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow;
979d7024191SGrygorii Strashko 
980d7024191SGrygorii Strashko 	if (flow_idx >= rx_chn->flow_num)
981d7024191SGrygorii Strashko 		return -EINVAL;
982d7024191SGrygorii Strashko 
983d7024191SGrygorii Strashko 	flow = &rx_chn->flows[flow_idx];
984d7024191SGrygorii Strashko 
985d7024191SGrygorii Strashko 	return k3_ringacc_get_ring_id(flow->ringrxfdq);
986d7024191SGrygorii Strashko }
987d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
988d7024191SGrygorii Strashko 
989d7024191SGrygorii Strashko u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
990d7024191SGrygorii Strashko {
991d7024191SGrygorii Strashko 	return rx_chn->flow_id_base;
992d7024191SGrygorii Strashko }
993d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
994d7024191SGrygorii Strashko 
995d7024191SGrygorii Strashko int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
996d7024191SGrygorii Strashko 				u32 flow_idx)
997d7024191SGrygorii Strashko {
998d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
999d7024191SGrygorii Strashko 	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1000d7024191SGrygorii Strashko 	struct device *dev = rx_chn->common.dev;
1001d7024191SGrygorii Strashko 	struct ti_sci_msg_rm_udmap_flow_cfg req;
1002d7024191SGrygorii Strashko 	int rx_ring_id;
1003d7024191SGrygorii Strashko 	int rx_ringfdq_id;
1004d7024191SGrygorii Strashko 	int ret = 0;
1005d7024191SGrygorii Strashko 
1006d7024191SGrygorii Strashko 	if (!rx_chn->remote)
1007d7024191SGrygorii Strashko 		return -EINVAL;
1008d7024191SGrygorii Strashko 
1009d7024191SGrygorii Strashko 	rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1010d7024191SGrygorii Strashko 	rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1011d7024191SGrygorii Strashko 
1012d7024191SGrygorii Strashko 	memset(&req, 0, sizeof(req));
1013d7024191SGrygorii Strashko 
1014d7024191SGrygorii Strashko 	req.valid_params =
1015d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1016d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1017d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1018d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1019d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1020d7024191SGrygorii Strashko 	req.nav_id = tisci_rm->tisci_dev_id;
1021d7024191SGrygorii Strashko 	req.flow_index = flow->udma_rflow_id;
1022d7024191SGrygorii Strashko 	req.rx_dest_qnum = rx_ring_id;
1023d7024191SGrygorii Strashko 	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1024d7024191SGrygorii Strashko 	req.rx_fdq1_qnum = rx_ringfdq_id;
1025d7024191SGrygorii Strashko 	req.rx_fdq2_qnum = rx_ringfdq_id;
1026d7024191SGrygorii Strashko 	req.rx_fdq3_qnum = rx_ringfdq_id;
1027d7024191SGrygorii Strashko 
1028d7024191SGrygorii Strashko 	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1029d7024191SGrygorii Strashko 	if (ret) {
1030d7024191SGrygorii Strashko 		dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1031d7024191SGrygorii Strashko 			ret);
1032d7024191SGrygorii Strashko 	}
1033d7024191SGrygorii Strashko 
1034d7024191SGrygorii Strashko 	return ret;
1035d7024191SGrygorii Strashko }
1036d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1037d7024191SGrygorii Strashko 
1038d7024191SGrygorii Strashko int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1039d7024191SGrygorii Strashko 				 u32 flow_idx)
1040d7024191SGrygorii Strashko {
1041d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1042d7024191SGrygorii Strashko 	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1043d7024191SGrygorii Strashko 	struct device *dev = rx_chn->common.dev;
1044d7024191SGrygorii Strashko 	struct ti_sci_msg_rm_udmap_flow_cfg req;
1045d7024191SGrygorii Strashko 	int ret = 0;
1046d7024191SGrygorii Strashko 
1047d7024191SGrygorii Strashko 	if (!rx_chn->remote)
1048d7024191SGrygorii Strashko 		return -EINVAL;
1049d7024191SGrygorii Strashko 
1050d7024191SGrygorii Strashko 	memset(&req, 0, sizeof(req));
1051d7024191SGrygorii Strashko 	req.valid_params =
1052d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1053d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1054d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1055d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1056d7024191SGrygorii Strashko 			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1057d7024191SGrygorii Strashko 	req.nav_id = tisci_rm->tisci_dev_id;
1058d7024191SGrygorii Strashko 	req.flow_index = flow->udma_rflow_id;
1059d7024191SGrygorii Strashko 	req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1060d7024191SGrygorii Strashko 	req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1061d7024191SGrygorii Strashko 	req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1062d7024191SGrygorii Strashko 	req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1063d7024191SGrygorii Strashko 	req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1064d7024191SGrygorii Strashko 
1065d7024191SGrygorii Strashko 	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1066d7024191SGrygorii Strashko 	if (ret) {
1067d7024191SGrygorii Strashko 		dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1068d7024191SGrygorii Strashko 			ret);
1069d7024191SGrygorii Strashko 	}
1070d7024191SGrygorii Strashko 
1071d7024191SGrygorii Strashko 	return ret;
1072d7024191SGrygorii Strashko }
1073d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1074d7024191SGrygorii Strashko 
1075d7024191SGrygorii Strashko int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1076d7024191SGrygorii Strashko {
1077d7024191SGrygorii Strashko 	u32 rxrt_ctl;
1078d7024191SGrygorii Strashko 
1079d7024191SGrygorii Strashko 	if (rx_chn->remote)
1080d7024191SGrygorii Strashko 		return -EINVAL;
1081d7024191SGrygorii Strashko 
1082d7024191SGrygorii Strashko 	if (rx_chn->flows_ready < rx_chn->flow_num)
1083d7024191SGrygorii Strashko 		return -EINVAL;
1084d7024191SGrygorii Strashko 
1085d7024191SGrygorii Strashko 	rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
1086*bc7e5523SPeter Ujfalusi 				      UDMA_CHAN_RT_CTL_REG);
1087d7024191SGrygorii Strashko 	rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
1088*bc7e5523SPeter Ujfalusi 	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1089d7024191SGrygorii Strashko 			    rxrt_ctl);
1090d7024191SGrygorii Strashko 
1091*bc7e5523SPeter Ujfalusi 	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1092d7024191SGrygorii Strashko 			    UDMA_PEER_RT_EN_ENABLE);
1093d7024191SGrygorii Strashko 
1094d7024191SGrygorii Strashko 	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1095d7024191SGrygorii Strashko 	return 0;
1096d7024191SGrygorii Strashko }
1097d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1098d7024191SGrygorii Strashko 
1099d7024191SGrygorii Strashko void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1100d7024191SGrygorii Strashko {
1101d7024191SGrygorii Strashko 	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1102d7024191SGrygorii Strashko 
1103d7024191SGrygorii Strashko 	xudma_rchanrt_write(rx_chn->udma_rchanx,
1104*bc7e5523SPeter Ujfalusi 			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1105*bc7e5523SPeter Ujfalusi 	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1106d7024191SGrygorii Strashko 
1107d7024191SGrygorii Strashko 	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1108d7024191SGrygorii Strashko }
1109d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1110d7024191SGrygorii Strashko 
1111d7024191SGrygorii Strashko void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1112d7024191SGrygorii Strashko 			       bool sync)
1113d7024191SGrygorii Strashko {
1114d7024191SGrygorii Strashko 	int i = 0;
1115d7024191SGrygorii Strashko 	u32 val;
1116d7024191SGrygorii Strashko 
1117d7024191SGrygorii Strashko 	if (rx_chn->remote)
1118d7024191SGrygorii Strashko 		return;
1119d7024191SGrygorii Strashko 
1120d7024191SGrygorii Strashko 	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1121d7024191SGrygorii Strashko 
1122*bc7e5523SPeter Ujfalusi 	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1123d7024191SGrygorii Strashko 			    UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1124d7024191SGrygorii Strashko 
1125*bc7e5523SPeter Ujfalusi 	val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1126d7024191SGrygorii Strashko 
1127d7024191SGrygorii Strashko 	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1128d7024191SGrygorii Strashko 		val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1129*bc7e5523SPeter Ujfalusi 					 UDMA_CHAN_RT_CTL_REG);
1130d7024191SGrygorii Strashko 		udelay(1);
1131d7024191SGrygorii Strashko 		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1132d7024191SGrygorii Strashko 			dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1133d7024191SGrygorii Strashko 			break;
1134d7024191SGrygorii Strashko 		}
1135d7024191SGrygorii Strashko 		i++;
1136d7024191SGrygorii Strashko 	}
1137d7024191SGrygorii Strashko 
1138d7024191SGrygorii Strashko 	val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1139*bc7e5523SPeter Ujfalusi 				 UDMA_CHAN_RT_PEER_RT_EN_REG);
1140d7024191SGrygorii Strashko 	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1141d7024191SGrygorii Strashko 		dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1142d7024191SGrygorii Strashko 	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1143d7024191SGrygorii Strashko }
1144d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1145d7024191SGrygorii Strashko 
1146d7024191SGrygorii Strashko void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1147d7024191SGrygorii Strashko 		u32 flow_num, void *data,
1148d7024191SGrygorii Strashko 		void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1149d7024191SGrygorii Strashko {
1150d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1151d7024191SGrygorii Strashko 	struct device *dev = rx_chn->common.dev;
1152d7024191SGrygorii Strashko 	dma_addr_t desc_dma;
1153d7024191SGrygorii Strashko 	int occ_rx, i, ret;
1154d7024191SGrygorii Strashko 
1155d7024191SGrygorii Strashko 	/* reset RXCQ as it is not input for udma - expected to be empty */
1156d7024191SGrygorii Strashko 	occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1157d7024191SGrygorii Strashko 	dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1158d7024191SGrygorii Strashko 	if (flow->ringrx)
1159d7024191SGrygorii Strashko 		k3_ringacc_ring_reset(flow->ringrx);
1160d7024191SGrygorii Strashko 
1161d7024191SGrygorii Strashko 	/* Skip RX FDQ in case one FDQ is used for the set of flows */
1162d7024191SGrygorii Strashko 	if (skip_fdq)
1163d7024191SGrygorii Strashko 		return;
1164d7024191SGrygorii Strashko 
1165d7024191SGrygorii Strashko 	/*
1166d7024191SGrygorii Strashko 	 * RX FDQ reset need to be special way as it is input for udma and its
1167d7024191SGrygorii Strashko 	 * state cached by udma, so:
1168d7024191SGrygorii Strashko 	 * 1) save RX FDQ occ
1169d7024191SGrygorii Strashko 	 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1170d7024191SGrygorii Strashko 	 * 3) reset RX FDQ in a special way
1171d7024191SGrygorii Strashko 	 */
1172d7024191SGrygorii Strashko 	occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1173d7024191SGrygorii Strashko 	dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1174d7024191SGrygorii Strashko 
1175d7024191SGrygorii Strashko 	for (i = 0; i < occ_rx; i++) {
1176d7024191SGrygorii Strashko 		ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1177d7024191SGrygorii Strashko 		if (ret) {
1178d7024191SGrygorii Strashko 			dev_err(dev, "RX reset pop %d\n", ret);
1179d7024191SGrygorii Strashko 			break;
1180d7024191SGrygorii Strashko 		}
1181d7024191SGrygorii Strashko 		cleanup(data, desc_dma);
1182d7024191SGrygorii Strashko 	}
1183d7024191SGrygorii Strashko 
1184d7024191SGrygorii Strashko 	k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1185d7024191SGrygorii Strashko }
1186d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1187d7024191SGrygorii Strashko 
1188d7024191SGrygorii Strashko int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1189d7024191SGrygorii Strashko 			     u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1190d7024191SGrygorii Strashko 			     dma_addr_t desc_dma)
1191d7024191SGrygorii Strashko {
1192d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1193d7024191SGrygorii Strashko 
1194d7024191SGrygorii Strashko 	return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1195d7024191SGrygorii Strashko }
1196d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1197d7024191SGrygorii Strashko 
1198d7024191SGrygorii Strashko int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1199d7024191SGrygorii Strashko 			    u32 flow_num, dma_addr_t *desc_dma)
1200d7024191SGrygorii Strashko {
1201d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1202d7024191SGrygorii Strashko 
1203d7024191SGrygorii Strashko 	return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1204d7024191SGrygorii Strashko }
1205d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1206d7024191SGrygorii Strashko 
1207d7024191SGrygorii Strashko int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1208d7024191SGrygorii Strashko 			    u32 flow_num)
1209d7024191SGrygorii Strashko {
1210d7024191SGrygorii Strashko 	struct k3_udma_glue_rx_flow *flow;
1211d7024191SGrygorii Strashko 
1212d7024191SGrygorii Strashko 	flow = &rx_chn->flows[flow_num];
1213d7024191SGrygorii Strashko 
1214d7024191SGrygorii Strashko 	flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1215d7024191SGrygorii Strashko 
1216d7024191SGrygorii Strashko 	return flow->virq;
1217d7024191SGrygorii Strashko }
1218d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1219