xref: /openbmc/linux/drivers/dma/ti/k3-udma-glue.c (revision f291209eca5eba0b4704fa0832af57b12dbc1a02)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * K3 NAVSS DMA glue interface
4   *
5   * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6   *
7   */
8  
9  #include <linux/module.h>
10  #include <linux/atomic.h>
11  #include <linux/delay.h>
12  #include <linux/dma-mapping.h>
13  #include <linux/io.h>
14  #include <linux/init.h>
15  #include <linux/of.h>
16  #include <linux/platform_device.h>
17  #include <linux/soc/ti/k3-ringacc.h>
18  #include <linux/dma/ti-cppi5.h>
19  #include <linux/dma/k3-udma-glue.h>
20  
21  #include "k3-udma.h"
22  #include "k3-psil-priv.h"
23  
24  struct k3_udma_glue_common {
25  	struct device *dev;
26  	struct device chan_dev;
27  	struct udma_dev *udmax;
28  	const struct udma_tisci_rm *tisci_rm;
29  	struct k3_ringacc *ringacc;
30  	u32 src_thread;
31  	u32 dst_thread;
32  
33  	u32  hdesc_size;
34  	bool epib;
35  	u32  psdata_size;
36  	u32  swdata_size;
37  	u32  atype_asel;
38  	struct psil_endpoint_config *ep_config;
39  };
40  
41  struct k3_udma_glue_tx_channel {
42  	struct k3_udma_glue_common common;
43  
44  	struct udma_tchan *udma_tchanx;
45  	int udma_tchan_id;
46  
47  	struct k3_ring *ringtx;
48  	struct k3_ring *ringtxcq;
49  
50  	bool psil_paired;
51  
52  	int virq;
53  
54  	atomic_t free_pkts;
55  	bool tx_pause_on_err;
56  	bool tx_filt_einfo;
57  	bool tx_filt_pswords;
58  	bool tx_supr_tdpkt;
59  
60  	int udma_tflow_id;
61  };
62  
63  struct k3_udma_glue_rx_flow {
64  	struct udma_rflow *udma_rflow;
65  	int udma_rflow_id;
66  	struct k3_ring *ringrx;
67  	struct k3_ring *ringrxfdq;
68  
69  	int virq;
70  };
71  
72  struct k3_udma_glue_rx_channel {
73  	struct k3_udma_glue_common common;
74  
75  	struct udma_rchan *udma_rchanx;
76  	int udma_rchan_id;
77  	bool remote;
78  
79  	bool psil_paired;
80  
81  	u32  swdata_size;
82  	int  flow_id_base;
83  
84  	struct k3_udma_glue_rx_flow *flows;
85  	u32 flow_num;
86  	u32 flows_ready;
87  };
88  
k3_udma_chan_dev_release(struct device * dev)89  static void k3_udma_chan_dev_release(struct device *dev)
90  {
91  	/* The struct containing the device is devm managed */
92  }
93  
94  static struct class k3_udma_glue_devclass = {
95  	.name		= "k3_udma_glue_chan",
96  	.dev_release	= k3_udma_chan_dev_release,
97  };
98  
99  #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
100  
of_k3_udma_glue_parse(struct device_node * udmax_np,struct k3_udma_glue_common * common)101  static int of_k3_udma_glue_parse(struct device_node *udmax_np,
102  				 struct k3_udma_glue_common *common)
103  {
104  	common->udmax = of_xudma_dev_get(udmax_np, NULL);
105  	if (IS_ERR(common->udmax))
106  		return PTR_ERR(common->udmax);
107  
108  	common->ringacc = xudma_get_ringacc(common->udmax);
109  	common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
110  
111  	return 0;
112  }
113  
of_k3_udma_glue_parse_chn(struct device_node * chn_np,const char * name,struct k3_udma_glue_common * common,bool tx_chn)114  static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
115  		const char *name, struct k3_udma_glue_common *common,
116  		bool tx_chn)
117  {
118  	struct of_phandle_args dma_spec;
119  	u32 thread_id;
120  	int ret = 0;
121  	int index;
122  
123  	if (unlikely(!name))
124  		return -EINVAL;
125  
126  	index = of_property_match_string(chn_np, "dma-names", name);
127  	if (index < 0)
128  		return index;
129  
130  	if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
131  				       &dma_spec))
132  		return -ENOENT;
133  
134  	ret = of_k3_udma_glue_parse(dma_spec.np, common);
135  	if (ret)
136  		goto out_put_spec;
137  
138  	thread_id = dma_spec.args[0];
139  	if (dma_spec.args_count == 2) {
140  		if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
141  			dev_err(common->dev, "Invalid channel atype: %u\n",
142  				dma_spec.args[1]);
143  			ret = -EINVAL;
144  			goto out_put_spec;
145  		}
146  		if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
147  			dev_err(common->dev, "Invalid channel asel: %u\n",
148  				dma_spec.args[1]);
149  			ret = -EINVAL;
150  			goto out_put_spec;
151  		}
152  
153  		common->atype_asel = dma_spec.args[1];
154  	}
155  
156  	if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
157  		ret = -EINVAL;
158  		goto out_put_spec;
159  	}
160  
161  	if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
162  		ret = -EINVAL;
163  		goto out_put_spec;
164  	}
165  
166  	/* get psil endpoint config */
167  	common->ep_config = psil_get_ep_config(thread_id);
168  	if (IS_ERR(common->ep_config)) {
169  		dev_err(common->dev,
170  			"No configuration for psi-l thread 0x%04x\n",
171  			thread_id);
172  		ret = PTR_ERR(common->ep_config);
173  		goto out_put_spec;
174  	}
175  
176  	common->epib = common->ep_config->needs_epib;
177  	common->psdata_size = common->ep_config->psd_size;
178  
179  	if (tx_chn)
180  		common->dst_thread = thread_id;
181  	else
182  		common->src_thread = thread_id;
183  
184  out_put_spec:
185  	of_node_put(dma_spec.np);
186  	return ret;
187  };
188  
k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)189  static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
190  {
191  	struct device *dev = tx_chn->common.dev;
192  
193  	dev_dbg(dev, "dump_tx_chn:\n"
194  		"udma_tchan_id: %d\n"
195  		"src_thread: %08x\n"
196  		"dst_thread: %08x\n",
197  		tx_chn->udma_tchan_id,
198  		tx_chn->common.src_thread,
199  		tx_chn->common.dst_thread);
200  }
201  
k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel * chn,char * mark)202  static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
203  					char *mark)
204  {
205  	struct device *dev = chn->common.dev;
206  
207  	dev_dbg(dev, "=== dump ===> %s\n", mark);
208  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
209  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
210  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
211  		xudma_tchanrt_read(chn->udma_tchanx,
212  				   UDMA_CHAN_RT_PEER_RT_EN_REG));
213  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
214  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
215  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
216  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
217  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
218  		xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
219  }
220  
k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)221  static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
222  {
223  	const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
224  	struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
225  
226  	memset(&req, 0, sizeof(req));
227  
228  	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
229  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
230  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
231  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
232  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
233  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
234  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
235  			TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
236  	req.nav_id = tisci_rm->tisci_dev_id;
237  	req.index = tx_chn->udma_tchan_id;
238  	if (tx_chn->tx_pause_on_err)
239  		req.tx_pause_on_err = 1;
240  	if (tx_chn->tx_filt_einfo)
241  		req.tx_filt_einfo = 1;
242  	if (tx_chn->tx_filt_pswords)
243  		req.tx_filt_pswords = 1;
244  	req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
245  	if (tx_chn->tx_supr_tdpkt)
246  		req.tx_supr_tdpkt = 1;
247  	req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
248  	req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
249  	req.tx_atype = tx_chn->common.atype_asel;
250  
251  	return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
252  }
253  
k3_udma_glue_request_tx_chn(struct device * dev,const char * name,struct k3_udma_glue_tx_channel_cfg * cfg)254  struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
255  		const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
256  {
257  	struct k3_udma_glue_tx_channel *tx_chn;
258  	int ret;
259  
260  	tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
261  	if (!tx_chn)
262  		return ERR_PTR(-ENOMEM);
263  
264  	tx_chn->common.dev = dev;
265  	tx_chn->common.swdata_size = cfg->swdata_size;
266  	tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
267  	tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
268  	tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
269  	tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
270  
271  	/* parse of udmap channel */
272  	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
273  					&tx_chn->common, true);
274  	if (ret)
275  		goto err;
276  
277  	tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
278  						tx_chn->common.psdata_size,
279  						tx_chn->common.swdata_size);
280  
281  	if (xudma_is_pktdma(tx_chn->common.udmax))
282  		tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
283  	else
284  		tx_chn->udma_tchan_id = -1;
285  
286  	/* request and cfg UDMAP TX channel */
287  	tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
288  					      tx_chn->udma_tchan_id);
289  	if (IS_ERR(tx_chn->udma_tchanx)) {
290  		ret = PTR_ERR(tx_chn->udma_tchanx);
291  		dev_err(dev, "UDMAX tchanx get err %d\n", ret);
292  		goto err;
293  	}
294  	tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
295  
296  	tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
297  	tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
298  	dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
299  		     tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
300  	ret = device_register(&tx_chn->common.chan_dev);
301  	if (ret) {
302  		dev_err(dev, "Channel Device registration failed %d\n", ret);
303  		put_device(&tx_chn->common.chan_dev);
304  		tx_chn->common.chan_dev.parent = NULL;
305  		goto err;
306  	}
307  
308  	if (xudma_is_pktdma(tx_chn->common.udmax)) {
309  		/* prepare the channel device as coherent */
310  		tx_chn->common.chan_dev.dma_coherent = true;
311  		dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
312  					     DMA_BIT_MASK(48));
313  	}
314  
315  	atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
316  
317  	if (xudma_is_pktdma(tx_chn->common.udmax))
318  		tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
319  	else
320  		tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
321  
322  	/* request and cfg rings */
323  	ret =  k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
324  					     tx_chn->udma_tflow_id, -1,
325  					     &tx_chn->ringtx,
326  					     &tx_chn->ringtxcq);
327  	if (ret) {
328  		dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
329  		goto err;
330  	}
331  
332  	/* Set the dma_dev for the rings to be configured */
333  	cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
334  	cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
335  
336  	/* Set the ASEL value for DMA rings of PKTDMA */
337  	if (xudma_is_pktdma(tx_chn->common.udmax)) {
338  		cfg->tx_cfg.asel = tx_chn->common.atype_asel;
339  		cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
340  	}
341  
342  	ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
343  	if (ret) {
344  		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
345  		goto err;
346  	}
347  
348  	ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
349  	if (ret) {
350  		dev_err(dev, "Failed to cfg ringtx %d\n", ret);
351  		goto err;
352  	}
353  
354  	/* request and cfg psi-l */
355  	tx_chn->common.src_thread =
356  			xudma_dev_get_psil_base(tx_chn->common.udmax) +
357  			tx_chn->udma_tchan_id;
358  
359  	ret = k3_udma_glue_cfg_tx_chn(tx_chn);
360  	if (ret) {
361  		dev_err(dev, "Failed to cfg tchan %d\n", ret);
362  		goto err;
363  	}
364  
365  	k3_udma_glue_dump_tx_chn(tx_chn);
366  
367  	return tx_chn;
368  
369  err:
370  	k3_udma_glue_release_tx_chn(tx_chn);
371  	return ERR_PTR(ret);
372  }
373  EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
374  
k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)375  void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
376  {
377  	if (tx_chn->psil_paired) {
378  		xudma_navss_psil_unpair(tx_chn->common.udmax,
379  					tx_chn->common.src_thread,
380  					tx_chn->common.dst_thread);
381  		tx_chn->psil_paired = false;
382  	}
383  
384  	if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
385  		xudma_tchan_put(tx_chn->common.udmax,
386  				tx_chn->udma_tchanx);
387  
388  	if (tx_chn->ringtxcq)
389  		k3_ringacc_ring_free(tx_chn->ringtxcq);
390  
391  	if (tx_chn->ringtx)
392  		k3_ringacc_ring_free(tx_chn->ringtx);
393  
394  	if (tx_chn->common.chan_dev.parent) {
395  		device_unregister(&tx_chn->common.chan_dev);
396  		tx_chn->common.chan_dev.parent = NULL;
397  	}
398  }
399  EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
400  
k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,struct cppi5_host_desc_t * desc_tx,dma_addr_t desc_dma)401  int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
402  			     struct cppi5_host_desc_t *desc_tx,
403  			     dma_addr_t desc_dma)
404  {
405  	u32 ringtxcq_id;
406  
407  	if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
408  		return -ENOMEM;
409  
410  	ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
411  	cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
412  
413  	return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
414  }
415  EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
416  
k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * desc_dma)417  int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
418  			    dma_addr_t *desc_dma)
419  {
420  	int ret;
421  
422  	ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
423  	if (!ret)
424  		atomic_inc(&tx_chn->free_pkts);
425  
426  	return ret;
427  }
428  EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
429  
k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)430  int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
431  {
432  	int ret;
433  
434  	ret = xudma_navss_psil_pair(tx_chn->common.udmax,
435  				    tx_chn->common.src_thread,
436  				    tx_chn->common.dst_thread);
437  	if (ret) {
438  		dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
439  		return ret;
440  	}
441  
442  	tx_chn->psil_paired = true;
443  
444  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
445  			    UDMA_PEER_RT_EN_ENABLE);
446  
447  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
448  			    UDMA_CHAN_RT_CTL_EN);
449  
450  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
451  	return 0;
452  }
453  EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
454  
k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)455  void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
456  {
457  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
458  
459  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
460  
461  	xudma_tchanrt_write(tx_chn->udma_tchanx,
462  			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
463  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
464  
465  	if (tx_chn->psil_paired) {
466  		xudma_navss_psil_unpair(tx_chn->common.udmax,
467  					tx_chn->common.src_thread,
468  					tx_chn->common.dst_thread);
469  		tx_chn->psil_paired = false;
470  	}
471  }
472  EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
473  
k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,bool sync)474  void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
475  			       bool sync)
476  {
477  	int i = 0;
478  	u32 val;
479  
480  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
481  
482  	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
483  			    UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
484  
485  	val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
486  
487  	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
488  		val = xudma_tchanrt_read(tx_chn->udma_tchanx,
489  					 UDMA_CHAN_RT_CTL_REG);
490  		udelay(1);
491  		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
492  			dev_err(tx_chn->common.dev, "TX tdown timeout\n");
493  			break;
494  		}
495  		i++;
496  	}
497  
498  	val = xudma_tchanrt_read(tx_chn->udma_tchanx,
499  				 UDMA_CHAN_RT_PEER_RT_EN_REG);
500  	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
501  		dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
502  	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
503  }
504  EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
505  
k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,void * data,void (* cleanup)(void * data,dma_addr_t desc_dma))506  void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
507  			       void *data,
508  			       void (*cleanup)(void *data, dma_addr_t desc_dma))
509  {
510  	struct device *dev = tx_chn->common.dev;
511  	dma_addr_t desc_dma;
512  	int occ_tx, i, ret;
513  
514  	/*
515  	 * TXQ reset need to be special way as it is input for udma and its
516  	 * state cached by udma, so:
517  	 * 1) save TXQ occ
518  	 * 2) clean up TXQ and call callback .cleanup() for each desc
519  	 * 3) reset TXQ in a special way
520  	 */
521  	occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
522  	dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
523  
524  	for (i = 0; i < occ_tx; i++) {
525  		ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
526  		if (ret) {
527  			if (ret != -ENODATA)
528  				dev_err(dev, "TX reset pop %d\n", ret);
529  			break;
530  		}
531  		cleanup(data, desc_dma);
532  	}
533  
534  	/* reset TXCQ as it is not input for udma - expected to be empty */
535  	k3_ringacc_ring_reset(tx_chn->ringtxcq);
536  	k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
537  }
538  EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
539  
k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel * tx_chn)540  u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
541  {
542  	return tx_chn->common.hdesc_size;
543  }
544  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
545  
k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel * tx_chn)546  u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
547  {
548  	return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
549  }
550  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
551  
k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel * tx_chn)552  int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
553  {
554  	if (xudma_is_pktdma(tx_chn->common.udmax)) {
555  		tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
556  							  tx_chn->udma_tflow_id);
557  	} else {
558  		tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
559  	}
560  
561  	if (!tx_chn->virq)
562  		return -ENXIO;
563  
564  	return tx_chn->virq;
565  }
566  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
567  
568  struct device *
k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel * tx_chn)569  	k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
570  {
571  	if (xudma_is_pktdma(tx_chn->common.udmax) &&
572  	    (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
573  		return &tx_chn->common.chan_dev;
574  
575  	return xudma_get_device(tx_chn->common.udmax);
576  }
577  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
578  
k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * addr)579  void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
580  				       dma_addr_t *addr)
581  {
582  	if (!xudma_is_pktdma(tx_chn->common.udmax) ||
583  	    !tx_chn->common.atype_asel)
584  		return;
585  
586  	*addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
587  }
588  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
589  
k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * addr)590  void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
591  				       dma_addr_t *addr)
592  {
593  	if (!xudma_is_pktdma(tx_chn->common.udmax) ||
594  	    !tx_chn->common.atype_asel)
595  		return;
596  
597  	*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
598  }
599  EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
600  
k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)601  static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
602  {
603  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
604  	struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
605  	int ret;
606  
607  	memset(&req, 0, sizeof(req));
608  
609  	req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
610  			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
611  			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
612  			   TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
613  
614  	req.nav_id = tisci_rm->tisci_dev_id;
615  	req.index = rx_chn->udma_rchan_id;
616  	req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
617  	/*
618  	 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
619  	 * and udmax impl, so just configure it to invalid value.
620  	 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
621  	 */
622  	req.rxcq_qnum = 0xFFFF;
623  	if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
624  	    rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
625  		/* Default flow + extra ones */
626  		req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
627  				    TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
628  		req.flowid_start = rx_chn->flow_id_base;
629  		req.flowid_cnt = rx_chn->flow_num;
630  	}
631  	req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
632  	req.rx_atype = rx_chn->common.atype_asel;
633  
634  	ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
635  	if (ret)
636  		dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
637  			rx_chn->udma_rchan_id, ret);
638  
639  	return ret;
640  }
641  
k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num)642  static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
643  					 u32 flow_num)
644  {
645  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
646  
647  	if (IS_ERR_OR_NULL(flow->udma_rflow))
648  		return;
649  
650  	if (flow->ringrxfdq)
651  		k3_ringacc_ring_free(flow->ringrxfdq);
652  
653  	if (flow->ringrx)
654  		k3_ringacc_ring_free(flow->ringrx);
655  
656  	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
657  	flow->udma_rflow = NULL;
658  	rx_chn->flows_ready--;
659  }
660  
k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx,struct k3_udma_glue_rx_flow_cfg * flow_cfg)661  static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
662  				    u32 flow_idx,
663  				    struct k3_udma_glue_rx_flow_cfg *flow_cfg)
664  {
665  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
666  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
667  	struct device *dev = rx_chn->common.dev;
668  	struct ti_sci_msg_rm_udmap_flow_cfg req;
669  	int rx_ring_id;
670  	int rx_ringfdq_id;
671  	int ret = 0;
672  
673  	flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
674  					   flow->udma_rflow_id);
675  	if (IS_ERR(flow->udma_rflow)) {
676  		ret = PTR_ERR(flow->udma_rflow);
677  		dev_err(dev, "UDMAX rflow get err %d\n", ret);
678  		return ret;
679  	}
680  
681  	if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
682  		ret = -ENODEV;
683  		goto err_rflow_put;
684  	}
685  
686  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
687  		rx_ringfdq_id = flow->udma_rflow_id +
688  				xudma_get_rflow_ring_offset(rx_chn->common.udmax);
689  		rx_ring_id = 0;
690  	} else {
691  		rx_ring_id = flow_cfg->ring_rxq_id;
692  		rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
693  	}
694  
695  	/* request and cfg rings */
696  	ret =  k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
697  					     rx_ringfdq_id, rx_ring_id,
698  					     &flow->ringrxfdq,
699  					     &flow->ringrx);
700  	if (ret) {
701  		dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
702  		goto err_rflow_put;
703  	}
704  
705  	/* Set the dma_dev for the rings to be configured */
706  	flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
707  	flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
708  
709  	/* Set the ASEL value for DMA rings of PKTDMA */
710  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
711  		flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
712  		flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
713  	}
714  
715  	ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
716  	if (ret) {
717  		dev_err(dev, "Failed to cfg ringrx %d\n", ret);
718  		goto err_ringrxfdq_free;
719  	}
720  
721  	ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
722  	if (ret) {
723  		dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
724  		goto err_ringrxfdq_free;
725  	}
726  
727  	if (rx_chn->remote) {
728  		rx_ring_id = TI_SCI_RESOURCE_NULL;
729  		rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
730  	} else {
731  		rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
732  		rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
733  	}
734  
735  	memset(&req, 0, sizeof(req));
736  
737  	req.valid_params =
738  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
739  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
740  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
741  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
742  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
743  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
744  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
745  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
746  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
747  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
748  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
749  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
750  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
751  	req.nav_id = tisci_rm->tisci_dev_id;
752  	req.flow_index = flow->udma_rflow_id;
753  	if (rx_chn->common.epib)
754  		req.rx_einfo_present = 1;
755  	if (rx_chn->common.psdata_size)
756  		req.rx_psinfo_present = 1;
757  	if (flow_cfg->rx_error_handling)
758  		req.rx_error_handling = 1;
759  	req.rx_desc_type = 0;
760  	req.rx_dest_qnum = rx_ring_id;
761  	req.rx_src_tag_hi_sel = 0;
762  	req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
763  	req.rx_dest_tag_hi_sel = 0;
764  	req.rx_dest_tag_lo_sel = 0;
765  	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
766  	req.rx_fdq1_qnum = rx_ringfdq_id;
767  	req.rx_fdq2_qnum = rx_ringfdq_id;
768  	req.rx_fdq3_qnum = rx_ringfdq_id;
769  
770  	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
771  	if (ret) {
772  		dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
773  			ret);
774  		goto err_ringrxfdq_free;
775  	}
776  
777  	rx_chn->flows_ready++;
778  	dev_dbg(dev, "flow%d config done. ready:%d\n",
779  		flow->udma_rflow_id, rx_chn->flows_ready);
780  
781  	return 0;
782  
783  err_ringrxfdq_free:
784  	k3_ringacc_ring_free(flow->ringrxfdq);
785  	k3_ringacc_ring_free(flow->ringrx);
786  
787  err_rflow_put:
788  	xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
789  	flow->udma_rflow = NULL;
790  
791  	return ret;
792  }
793  
k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel * chn)794  static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
795  {
796  	struct device *dev = chn->common.dev;
797  
798  	dev_dbg(dev, "dump_rx_chn:\n"
799  		"udma_rchan_id: %d\n"
800  		"src_thread: %08x\n"
801  		"dst_thread: %08x\n"
802  		"epib: %d\n"
803  		"hdesc_size: %u\n"
804  		"psdata_size: %u\n"
805  		"swdata_size: %u\n"
806  		"flow_id_base: %d\n"
807  		"flow_num: %d\n",
808  		chn->udma_rchan_id,
809  		chn->common.src_thread,
810  		chn->common.dst_thread,
811  		chn->common.epib,
812  		chn->common.hdesc_size,
813  		chn->common.psdata_size,
814  		chn->common.swdata_size,
815  		chn->flow_id_base,
816  		chn->flow_num);
817  }
818  
k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel * chn,char * mark)819  static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
820  					char *mark)
821  {
822  	struct device *dev = chn->common.dev;
823  
824  	dev_dbg(dev, "=== dump ===> %s\n", mark);
825  
826  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
827  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
828  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
829  		xudma_rchanrt_read(chn->udma_rchanx,
830  				   UDMA_CHAN_RT_PEER_RT_EN_REG));
831  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
832  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
833  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
834  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
835  	dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
836  		xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
837  }
838  
839  static int
k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel * rx_chn,struct k3_udma_glue_rx_channel_cfg * cfg)840  k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
841  			       struct k3_udma_glue_rx_channel_cfg *cfg)
842  {
843  	int ret;
844  
845  	/* default rflow */
846  	if (cfg->flow_id_use_rxchan_id)
847  		return 0;
848  
849  	/* not a GP rflows */
850  	if (rx_chn->flow_id_base != -1 &&
851  	    !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
852  		return 0;
853  
854  	/* Allocate range of GP rflows */
855  	ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
856  					 rx_chn->flow_id_base,
857  					 rx_chn->flow_num);
858  	if (ret < 0) {
859  		dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
860  			rx_chn->flow_id_base, rx_chn->flow_num, ret);
861  		return ret;
862  	}
863  	rx_chn->flow_id_base = ret;
864  
865  	return 0;
866  }
867  
868  static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn_priv(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)869  k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
870  				 struct k3_udma_glue_rx_channel_cfg *cfg)
871  {
872  	struct k3_udma_glue_rx_channel *rx_chn;
873  	struct psil_endpoint_config *ep_cfg;
874  	int ret, i;
875  
876  	if (cfg->flow_id_num <= 0)
877  		return ERR_PTR(-EINVAL);
878  
879  	if (cfg->flow_id_num != 1 &&
880  	    (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
881  		return ERR_PTR(-EINVAL);
882  
883  	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
884  	if (!rx_chn)
885  		return ERR_PTR(-ENOMEM);
886  
887  	rx_chn->common.dev = dev;
888  	rx_chn->common.swdata_size = cfg->swdata_size;
889  	rx_chn->remote = false;
890  
891  	/* parse of udmap channel */
892  	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
893  					&rx_chn->common, false);
894  	if (ret)
895  		goto err;
896  
897  	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
898  						rx_chn->common.psdata_size,
899  						rx_chn->common.swdata_size);
900  
901  	ep_cfg = rx_chn->common.ep_config;
902  
903  	if (xudma_is_pktdma(rx_chn->common.udmax))
904  		rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
905  	else
906  		rx_chn->udma_rchan_id = -1;
907  
908  	/* request and cfg UDMAP RX channel */
909  	rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
910  					      rx_chn->udma_rchan_id);
911  	if (IS_ERR(rx_chn->udma_rchanx)) {
912  		ret = PTR_ERR(rx_chn->udma_rchanx);
913  		dev_err(dev, "UDMAX rchanx get err %d\n", ret);
914  		goto err;
915  	}
916  	rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
917  
918  	rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
919  	rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
920  	dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
921  		     rx_chn->udma_rchan_id, rx_chn->common.src_thread);
922  	ret = device_register(&rx_chn->common.chan_dev);
923  	if (ret) {
924  		dev_err(dev, "Channel Device registration failed %d\n", ret);
925  		put_device(&rx_chn->common.chan_dev);
926  		rx_chn->common.chan_dev.parent = NULL;
927  		goto err;
928  	}
929  
930  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
931  		/* prepare the channel device as coherent */
932  		rx_chn->common.chan_dev.dma_coherent = true;
933  		dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
934  					     DMA_BIT_MASK(48));
935  	}
936  
937  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
938  		int flow_start = cfg->flow_id_base;
939  		int flow_end;
940  
941  		if (flow_start == -1)
942  			flow_start = ep_cfg->flow_start;
943  
944  		flow_end = flow_start + cfg->flow_id_num - 1;
945  		if (flow_start < ep_cfg->flow_start ||
946  		    flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
947  			dev_err(dev, "Invalid flow range requested\n");
948  			ret = -EINVAL;
949  			goto err;
950  		}
951  		rx_chn->flow_id_base = flow_start;
952  	} else {
953  		rx_chn->flow_id_base = cfg->flow_id_base;
954  
955  		/* Use RX channel id as flow id: target dev can't generate flow_id */
956  		if (cfg->flow_id_use_rxchan_id)
957  			rx_chn->flow_id_base = rx_chn->udma_rchan_id;
958  	}
959  
960  	rx_chn->flow_num = cfg->flow_id_num;
961  
962  	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
963  				     sizeof(*rx_chn->flows), GFP_KERNEL);
964  	if (!rx_chn->flows) {
965  		ret = -ENOMEM;
966  		goto err;
967  	}
968  
969  	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
970  	if (ret)
971  		goto err;
972  
973  	for (i = 0; i < rx_chn->flow_num; i++)
974  		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
975  
976  	/* request and cfg psi-l */
977  	rx_chn->common.dst_thread =
978  			xudma_dev_get_psil_base(rx_chn->common.udmax) +
979  			rx_chn->udma_rchan_id;
980  
981  	ret = k3_udma_glue_cfg_rx_chn(rx_chn);
982  	if (ret) {
983  		dev_err(dev, "Failed to cfg rchan %d\n", ret);
984  		goto err;
985  	}
986  
987  	/* init default RX flow only if flow_num = 1 */
988  	if (cfg->def_flow_cfg) {
989  		ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
990  		if (ret)
991  			goto err;
992  	}
993  
994  	k3_udma_glue_dump_rx_chn(rx_chn);
995  
996  	return rx_chn;
997  
998  err:
999  	k3_udma_glue_release_rx_chn(rx_chn);
1000  	return ERR_PTR(ret);
1001  }
1002  
1003  static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)1004  k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
1005  				   struct k3_udma_glue_rx_channel_cfg *cfg)
1006  {
1007  	struct k3_udma_glue_rx_channel *rx_chn;
1008  	int ret, i;
1009  
1010  	if (cfg->flow_id_num <= 0 ||
1011  	    cfg->flow_id_use_rxchan_id ||
1012  	    cfg->def_flow_cfg ||
1013  	    cfg->flow_id_base < 0)
1014  		return ERR_PTR(-EINVAL);
1015  
1016  	/*
1017  	 * Remote RX channel is under control of Remote CPU core, so
1018  	 * Linux can only request and manipulate by dedicated RX flows
1019  	 */
1020  
1021  	rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1022  	if (!rx_chn)
1023  		return ERR_PTR(-ENOMEM);
1024  
1025  	rx_chn->common.dev = dev;
1026  	rx_chn->common.swdata_size = cfg->swdata_size;
1027  	rx_chn->remote = true;
1028  	rx_chn->udma_rchan_id = -1;
1029  	rx_chn->flow_num = cfg->flow_id_num;
1030  	rx_chn->flow_id_base = cfg->flow_id_base;
1031  	rx_chn->psil_paired = false;
1032  
1033  	/* parse of udmap channel */
1034  	ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1035  					&rx_chn->common, false);
1036  	if (ret)
1037  		goto err;
1038  
1039  	rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1040  						rx_chn->common.psdata_size,
1041  						rx_chn->common.swdata_size);
1042  
1043  	rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1044  				     sizeof(*rx_chn->flows), GFP_KERNEL);
1045  	if (!rx_chn->flows) {
1046  		ret = -ENOMEM;
1047  		goto err;
1048  	}
1049  
1050  	rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1051  	rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1052  	dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
1053  		     rx_chn->common.src_thread);
1054  	ret = device_register(&rx_chn->common.chan_dev);
1055  	if (ret) {
1056  		dev_err(dev, "Channel Device registration failed %d\n", ret);
1057  		put_device(&rx_chn->common.chan_dev);
1058  		rx_chn->common.chan_dev.parent = NULL;
1059  		goto err;
1060  	}
1061  
1062  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1063  		/* prepare the channel device as coherent */
1064  		rx_chn->common.chan_dev.dma_coherent = true;
1065  		dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1066  					     DMA_BIT_MASK(48));
1067  	}
1068  
1069  	ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1070  	if (ret)
1071  		goto err;
1072  
1073  	for (i = 0; i < rx_chn->flow_num; i++)
1074  		rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1075  
1076  	k3_udma_glue_dump_rx_chn(rx_chn);
1077  
1078  	return rx_chn;
1079  
1080  err:
1081  	k3_udma_glue_release_rx_chn(rx_chn);
1082  	return ERR_PTR(ret);
1083  }
1084  
1085  struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)1086  k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1087  			    struct k3_udma_glue_rx_channel_cfg *cfg)
1088  {
1089  	if (cfg->remote)
1090  		return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1091  	else
1092  		return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1093  }
1094  EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1095  
k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1096  void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1097  {
1098  	int i;
1099  
1100  	if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1101  		return;
1102  
1103  	if (rx_chn->psil_paired) {
1104  		xudma_navss_psil_unpair(rx_chn->common.udmax,
1105  					rx_chn->common.src_thread,
1106  					rx_chn->common.dst_thread);
1107  		rx_chn->psil_paired = false;
1108  	}
1109  
1110  	for (i = 0; i < rx_chn->flow_num; i++)
1111  		k3_udma_glue_release_rx_flow(rx_chn, i);
1112  
1113  	if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1114  		xudma_free_gp_rflow_range(rx_chn->common.udmax,
1115  					  rx_chn->flow_id_base,
1116  					  rx_chn->flow_num);
1117  
1118  	if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1119  		xudma_rchan_put(rx_chn->common.udmax,
1120  				rx_chn->udma_rchanx);
1121  
1122  	if (rx_chn->common.chan_dev.parent) {
1123  		device_unregister(&rx_chn->common.chan_dev);
1124  		rx_chn->common.chan_dev.parent = NULL;
1125  	}
1126  }
1127  EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1128  
k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx,struct k3_udma_glue_rx_flow_cfg * flow_cfg)1129  int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1130  			      u32 flow_idx,
1131  			      struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1132  {
1133  	if (flow_idx >= rx_chn->flow_num)
1134  		return -EINVAL;
1135  
1136  	return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1137  }
1138  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1139  
k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1140  u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1141  				    u32 flow_idx)
1142  {
1143  	struct k3_udma_glue_rx_flow *flow;
1144  
1145  	if (flow_idx >= rx_chn->flow_num)
1146  		return -EINVAL;
1147  
1148  	flow = &rx_chn->flows[flow_idx];
1149  
1150  	return k3_ringacc_get_ring_id(flow->ringrxfdq);
1151  }
1152  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1153  
k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel * rx_chn)1154  u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1155  {
1156  	return rx_chn->flow_id_base;
1157  }
1158  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1159  
k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1160  int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1161  				u32 flow_idx)
1162  {
1163  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1164  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1165  	struct device *dev = rx_chn->common.dev;
1166  	struct ti_sci_msg_rm_udmap_flow_cfg req;
1167  	int rx_ring_id;
1168  	int rx_ringfdq_id;
1169  	int ret = 0;
1170  
1171  	if (!rx_chn->remote)
1172  		return -EINVAL;
1173  
1174  	rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1175  	rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1176  
1177  	memset(&req, 0, sizeof(req));
1178  
1179  	req.valid_params =
1180  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1181  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1182  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1183  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1184  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1185  	req.nav_id = tisci_rm->tisci_dev_id;
1186  	req.flow_index = flow->udma_rflow_id;
1187  	req.rx_dest_qnum = rx_ring_id;
1188  	req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1189  	req.rx_fdq1_qnum = rx_ringfdq_id;
1190  	req.rx_fdq2_qnum = rx_ringfdq_id;
1191  	req.rx_fdq3_qnum = rx_ringfdq_id;
1192  
1193  	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1194  	if (ret) {
1195  		dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1196  			ret);
1197  	}
1198  
1199  	return ret;
1200  }
1201  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1202  
k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1203  int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1204  				 u32 flow_idx)
1205  {
1206  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1207  	const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1208  	struct device *dev = rx_chn->common.dev;
1209  	struct ti_sci_msg_rm_udmap_flow_cfg req;
1210  	int ret = 0;
1211  
1212  	if (!rx_chn->remote)
1213  		return -EINVAL;
1214  
1215  	memset(&req, 0, sizeof(req));
1216  	req.valid_params =
1217  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1218  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1219  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1220  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1221  			TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1222  	req.nav_id = tisci_rm->tisci_dev_id;
1223  	req.flow_index = flow->udma_rflow_id;
1224  	req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1225  	req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1226  	req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1227  	req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1228  	req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1229  
1230  	ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1231  	if (ret) {
1232  		dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1233  			ret);
1234  	}
1235  
1236  	return ret;
1237  }
1238  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1239  
k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1240  int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1241  {
1242  	int ret;
1243  
1244  	if (rx_chn->remote)
1245  		return -EINVAL;
1246  
1247  	if (rx_chn->flows_ready < rx_chn->flow_num)
1248  		return -EINVAL;
1249  
1250  	ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1251  				    rx_chn->common.src_thread,
1252  				    rx_chn->common.dst_thread);
1253  	if (ret) {
1254  		dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1255  		return ret;
1256  	}
1257  
1258  	rx_chn->psil_paired = true;
1259  
1260  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1261  			    UDMA_CHAN_RT_CTL_EN);
1262  
1263  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1264  			    UDMA_PEER_RT_EN_ENABLE);
1265  
1266  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1267  	return 0;
1268  }
1269  EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1270  
k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1271  void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1272  {
1273  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1274  
1275  	xudma_rchanrt_write(rx_chn->udma_rchanx,
1276  			    UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1277  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1278  
1279  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1280  
1281  	if (rx_chn->psil_paired) {
1282  		xudma_navss_psil_unpair(rx_chn->common.udmax,
1283  					rx_chn->common.src_thread,
1284  					rx_chn->common.dst_thread);
1285  		rx_chn->psil_paired = false;
1286  	}
1287  }
1288  EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1289  
k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,bool sync)1290  void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1291  			       bool sync)
1292  {
1293  	int i = 0;
1294  	u32 val;
1295  
1296  	if (rx_chn->remote)
1297  		return;
1298  
1299  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1300  
1301  	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1302  			    UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1303  
1304  	val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1305  
1306  	while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1307  		val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1308  					 UDMA_CHAN_RT_CTL_REG);
1309  		udelay(1);
1310  		if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1311  			dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1312  			break;
1313  		}
1314  		i++;
1315  	}
1316  
1317  	val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1318  				 UDMA_CHAN_RT_PEER_RT_EN_REG);
1319  	if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1320  		dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1321  	k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1322  }
1323  EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1324  
k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,void * data,void (* cleanup)(void * data,dma_addr_t desc_dma),bool skip_fdq)1325  void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1326  		u32 flow_num, void *data,
1327  		void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1328  {
1329  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1330  	struct device *dev = rx_chn->common.dev;
1331  	dma_addr_t desc_dma;
1332  	int occ_rx, i, ret;
1333  
1334  	/* reset RXCQ as it is not input for udma - expected to be empty */
1335  	occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1336  	dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1337  
1338  	/* Skip RX FDQ in case one FDQ is used for the set of flows */
1339  	if (skip_fdq)
1340  		goto do_reset;
1341  
1342  	/*
1343  	 * RX FDQ reset need to be special way as it is input for udma and its
1344  	 * state cached by udma, so:
1345  	 * 1) save RX FDQ occ
1346  	 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1347  	 * 3) reset RX FDQ in a special way
1348  	 */
1349  	occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1350  	dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1351  
1352  	for (i = 0; i < occ_rx; i++) {
1353  		ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1354  		if (ret) {
1355  			if (ret != -ENODATA)
1356  				dev_err(dev, "RX reset pop %d\n", ret);
1357  			break;
1358  		}
1359  		cleanup(data, desc_dma);
1360  	}
1361  
1362  	k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1363  
1364  do_reset:
1365  	k3_ringacc_ring_reset(flow->ringrx);
1366  }
1367  EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1368  
k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,struct cppi5_host_desc_t * desc_rx,dma_addr_t desc_dma)1369  int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1370  			     u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1371  			     dma_addr_t desc_dma)
1372  {
1373  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1374  
1375  	return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1376  }
1377  EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1378  
k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,dma_addr_t * desc_dma)1379  int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1380  			    u32 flow_num, dma_addr_t *desc_dma)
1381  {
1382  	struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1383  
1384  	return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1385  }
1386  EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1387  
k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num)1388  int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1389  			    u32 flow_num)
1390  {
1391  	struct k3_udma_glue_rx_flow *flow;
1392  
1393  	flow = &rx_chn->flows[flow_num];
1394  
1395  	if (xudma_is_pktdma(rx_chn->common.udmax)) {
1396  		flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1397  							flow->udma_rflow_id);
1398  	} else {
1399  		flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1400  	}
1401  
1402  	return flow->virq;
1403  }
1404  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1405  
1406  struct device *
k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel * rx_chn)1407  	k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1408  {
1409  	if (xudma_is_pktdma(rx_chn->common.udmax) &&
1410  	    (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1411  		return &rx_chn->common.chan_dev;
1412  
1413  	return xudma_get_device(rx_chn->common.udmax);
1414  }
1415  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1416  
k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel * rx_chn,dma_addr_t * addr)1417  void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1418  				       dma_addr_t *addr)
1419  {
1420  	if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1421  	    !rx_chn->common.atype_asel)
1422  		return;
1423  
1424  	*addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1425  }
1426  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1427  
k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel * rx_chn,dma_addr_t * addr)1428  void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1429  				       dma_addr_t *addr)
1430  {
1431  	if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1432  	    !rx_chn->common.atype_asel)
1433  		return;
1434  
1435  	*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1436  }
1437  EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1438  
k3_udma_glue_class_init(void)1439  static int __init k3_udma_glue_class_init(void)
1440  {
1441  	return class_register(&k3_udma_glue_devclass);
1442  }
1443  
1444  module_init(k3_udma_glue_class_init);
1445  MODULE_LICENSE("GPL v2");
1446