Lines Matching +full:psi +full:- +full:l
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
12 #include <linux/dma-mapping.h>
17 #include <linux/soc/ti/k3-ringacc.h>
18 #include <linux/dma/ti-cppi5.h>
19 #include <linux/dma/k3-udma-glue.h>
21 #include "k3-udma.h"
22 #include "k3-psil-priv.h"
104 common->udmax = of_xudma_dev_get(udmax_np, NULL); in of_k3_udma_glue_parse()
105 if (IS_ERR(common->udmax)) in of_k3_udma_glue_parse()
106 return PTR_ERR(common->udmax); in of_k3_udma_glue_parse()
108 common->ringacc = xudma_get_ringacc(common->udmax); in of_k3_udma_glue_parse()
109 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); in of_k3_udma_glue_parse()
124 return -EINVAL; in of_k3_udma_glue_parse_chn()
126 index = of_property_match_string(chn_np, "dma-names", name); in of_k3_udma_glue_parse_chn()
130 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, in of_k3_udma_glue_parse_chn()
132 return -ENOENT; in of_k3_udma_glue_parse_chn()
140 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) { in of_k3_udma_glue_parse_chn()
141 dev_err(common->dev, "Invalid channel atype: %u\n", in of_k3_udma_glue_parse_chn()
143 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
146 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) { in of_k3_udma_glue_parse_chn()
147 dev_err(common->dev, "Invalid channel asel: %u\n", in of_k3_udma_glue_parse_chn()
149 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
153 common->atype_asel = dma_spec.args[1]; in of_k3_udma_glue_parse_chn()
157 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
162 ret = -EINVAL; in of_k3_udma_glue_parse_chn()
167 common->ep_config = psil_get_ep_config(thread_id); in of_k3_udma_glue_parse_chn()
168 if (IS_ERR(common->ep_config)) { in of_k3_udma_glue_parse_chn()
169 dev_err(common->dev, in of_k3_udma_glue_parse_chn()
170 "No configuration for psi-l thread 0x%04x\n", in of_k3_udma_glue_parse_chn()
172 ret = PTR_ERR(common->ep_config); in of_k3_udma_glue_parse_chn()
176 common->epib = common->ep_config->needs_epib; in of_k3_udma_glue_parse_chn()
177 common->psdata_size = common->ep_config->psd_size; in of_k3_udma_glue_parse_chn()
180 common->dst_thread = thread_id; in of_k3_udma_glue_parse_chn()
182 common->src_thread = thread_id; in of_k3_udma_glue_parse_chn()
191 struct device *dev = tx_chn->common.dev; in k3_udma_glue_dump_tx_chn()
197 tx_chn->udma_tchan_id, in k3_udma_glue_dump_tx_chn()
198 tx_chn->common.src_thread, in k3_udma_glue_dump_tx_chn()
199 tx_chn->common.dst_thread); in k3_udma_glue_dump_tx_chn()
205 struct device *dev = chn->common.dev; in k3_udma_glue_dump_tx_rt_chn()
209 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG)); in k3_udma_glue_dump_tx_rt_chn()
211 xudma_tchanrt_read(chn->udma_tchanx, in k3_udma_glue_dump_tx_rt_chn()
214 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
216 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
218 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG)); in k3_udma_glue_dump_tx_rt_chn()
223 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; in k3_udma_glue_cfg_tx_chn()
236 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_tx_chn()
237 req.index = tx_chn->udma_tchan_id; in k3_udma_glue_cfg_tx_chn()
238 if (tx_chn->tx_pause_on_err) in k3_udma_glue_cfg_tx_chn()
240 if (tx_chn->tx_filt_einfo) in k3_udma_glue_cfg_tx_chn()
242 if (tx_chn->tx_filt_pswords) in k3_udma_glue_cfg_tx_chn()
245 if (tx_chn->tx_supr_tdpkt) in k3_udma_glue_cfg_tx_chn()
247 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_tx_chn()
248 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_cfg_tx_chn()
249 req.tx_atype = tx_chn->common.atype_asel; in k3_udma_glue_cfg_tx_chn()
251 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_tx_chn()
262 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_tx_chn()
264 tx_chn->common.dev = dev; in k3_udma_glue_request_tx_chn()
265 tx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_tx_chn()
266 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; in k3_udma_glue_request_tx_chn()
267 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; in k3_udma_glue_request_tx_chn()
268 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; in k3_udma_glue_request_tx_chn()
269 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; in k3_udma_glue_request_tx_chn()
272 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_tx_chn()
273 &tx_chn->common, true); in k3_udma_glue_request_tx_chn()
277 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, in k3_udma_glue_request_tx_chn()
278 tx_chn->common.psdata_size, in k3_udma_glue_request_tx_chn()
279 tx_chn->common.swdata_size); in k3_udma_glue_request_tx_chn()
281 if (xudma_is_pktdma(tx_chn->common.udmax)) in k3_udma_glue_request_tx_chn()
282 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id; in k3_udma_glue_request_tx_chn()
284 tx_chn->udma_tchan_id = -1; in k3_udma_glue_request_tx_chn()
287 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, in k3_udma_glue_request_tx_chn()
288 tx_chn->udma_tchan_id); in k3_udma_glue_request_tx_chn()
289 if (IS_ERR(tx_chn->udma_tchanx)) { in k3_udma_glue_request_tx_chn()
290 ret = PTR_ERR(tx_chn->udma_tchanx); in k3_udma_glue_request_tx_chn()
294 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); in k3_udma_glue_request_tx_chn()
296 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_tx_chn()
297 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax); in k3_udma_glue_request_tx_chn()
298 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x", in k3_udma_glue_request_tx_chn()
299 tx_chn->udma_tchan_id, tx_chn->common.dst_thread); in k3_udma_glue_request_tx_chn()
300 ret = device_register(&tx_chn->common.chan_dev); in k3_udma_glue_request_tx_chn()
303 put_device(&tx_chn->common.chan_dev); in k3_udma_glue_request_tx_chn()
304 tx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_tx_chn()
308 if (xudma_is_pktdma(tx_chn->common.udmax)) { in k3_udma_glue_request_tx_chn()
310 tx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_tx_chn()
311 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev, in k3_udma_glue_request_tx_chn()
315 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); in k3_udma_glue_request_tx_chn()
317 if (xudma_is_pktdma(tx_chn->common.udmax)) in k3_udma_glue_request_tx_chn()
318 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id; in k3_udma_glue_request_tx_chn()
320 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id; in k3_udma_glue_request_tx_chn()
323 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc, in k3_udma_glue_request_tx_chn()
324 tx_chn->udma_tflow_id, -1, in k3_udma_glue_request_tx_chn()
325 &tx_chn->ringtx, in k3_udma_glue_request_tx_chn()
326 &tx_chn->ringtxcq); in k3_udma_glue_request_tx_chn()
333 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn); in k3_udma_glue_request_tx_chn()
334 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev; in k3_udma_glue_request_tx_chn()
337 if (xudma_is_pktdma(tx_chn->common.udmax)) { in k3_udma_glue_request_tx_chn()
338 cfg->tx_cfg.asel = tx_chn->common.atype_asel; in k3_udma_glue_request_tx_chn()
339 cfg->txcq_cfg.asel = tx_chn->common.atype_asel; in k3_udma_glue_request_tx_chn()
342 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); in k3_udma_glue_request_tx_chn()
348 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); in k3_udma_glue_request_tx_chn()
354 /* request and cfg psi-l */ in k3_udma_glue_request_tx_chn()
355 tx_chn->common.src_thread = in k3_udma_glue_request_tx_chn()
356 xudma_dev_get_psil_base(tx_chn->common.udmax) + in k3_udma_glue_request_tx_chn()
357 tx_chn->udma_tchan_id; in k3_udma_glue_request_tx_chn()
377 if (tx_chn->psil_paired) { in k3_udma_glue_release_tx_chn()
378 xudma_navss_psil_unpair(tx_chn->common.udmax, in k3_udma_glue_release_tx_chn()
379 tx_chn->common.src_thread, in k3_udma_glue_release_tx_chn()
380 tx_chn->common.dst_thread); in k3_udma_glue_release_tx_chn()
381 tx_chn->psil_paired = false; in k3_udma_glue_release_tx_chn()
384 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) in k3_udma_glue_release_tx_chn()
385 xudma_tchan_put(tx_chn->common.udmax, in k3_udma_glue_release_tx_chn()
386 tx_chn->udma_tchanx); in k3_udma_glue_release_tx_chn()
388 if (tx_chn->ringtxcq) in k3_udma_glue_release_tx_chn()
389 k3_ringacc_ring_free(tx_chn->ringtxcq); in k3_udma_glue_release_tx_chn()
391 if (tx_chn->ringtx) in k3_udma_glue_release_tx_chn()
392 k3_ringacc_ring_free(tx_chn->ringtx); in k3_udma_glue_release_tx_chn()
394 if (tx_chn->common.chan_dev.parent) { in k3_udma_glue_release_tx_chn()
395 device_unregister(&tx_chn->common.chan_dev); in k3_udma_glue_release_tx_chn()
396 tx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_release_tx_chn()
407 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) in k3_udma_glue_push_tx_chn()
408 return -ENOMEM; in k3_udma_glue_push_tx_chn()
410 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_push_tx_chn()
411 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); in k3_udma_glue_push_tx_chn()
413 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); in k3_udma_glue_push_tx_chn()
422 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); in k3_udma_glue_pop_tx_chn()
424 atomic_inc(&tx_chn->free_pkts); in k3_udma_glue_pop_tx_chn()
434 ret = xudma_navss_psil_pair(tx_chn->common.udmax, in k3_udma_glue_enable_tx_chn()
435 tx_chn->common.src_thread, in k3_udma_glue_enable_tx_chn()
436 tx_chn->common.dst_thread); in k3_udma_glue_enable_tx_chn()
438 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret); in k3_udma_glue_enable_tx_chn()
442 tx_chn->psil_paired = true; in k3_udma_glue_enable_tx_chn()
444 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_tx_chn()
447 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_tx_chn()
459 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_tx_chn()
461 xudma_tchanrt_write(tx_chn->udma_tchanx, in k3_udma_glue_disable_tx_chn()
465 if (tx_chn->psil_paired) { in k3_udma_glue_disable_tx_chn()
466 xudma_navss_psil_unpair(tx_chn->common.udmax, in k3_udma_glue_disable_tx_chn()
467 tx_chn->common.src_thread, in k3_udma_glue_disable_tx_chn()
468 tx_chn->common.dst_thread); in k3_udma_glue_disable_tx_chn()
469 tx_chn->psil_paired = false; in k3_udma_glue_disable_tx_chn()
482 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_tdown_tx_chn()
485 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_tx_chn()
488 val = xudma_tchanrt_read(tx_chn->udma_tchanx, in k3_udma_glue_tdown_tx_chn()
492 dev_err(tx_chn->common.dev, "TX tdown timeout\n"); in k3_udma_glue_tdown_tx_chn()
498 val = xudma_tchanrt_read(tx_chn->udma_tchanx, in k3_udma_glue_tdown_tx_chn()
501 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_tx_chn()
510 struct device *dev = tx_chn->common.dev; in k3_udma_glue_reset_tx_chn()
521 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); in k3_udma_glue_reset_tx_chn()
525 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); in k3_udma_glue_reset_tx_chn()
527 if (ret != -ENODATA) in k3_udma_glue_reset_tx_chn()
534 /* reset TXCQ as it is not input for udma - expected to be empty */ in k3_udma_glue_reset_tx_chn()
535 k3_ringacc_ring_reset(tx_chn->ringtxcq); in k3_udma_glue_reset_tx_chn()
536 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); in k3_udma_glue_reset_tx_chn()
542 return tx_chn->common.hdesc_size; in k3_udma_glue_tx_get_hdesc_size()
548 return k3_ringacc_get_ring_id(tx_chn->ringtxcq); in k3_udma_glue_tx_get_txcq_id()
554 if (xudma_is_pktdma(tx_chn->common.udmax)) { in k3_udma_glue_tx_get_irq()
555 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax, in k3_udma_glue_tx_get_irq()
556 tx_chn->udma_tflow_id); in k3_udma_glue_tx_get_irq()
558 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); in k3_udma_glue_tx_get_irq()
561 if (!tx_chn->virq) in k3_udma_glue_tx_get_irq()
562 return -ENXIO; in k3_udma_glue_tx_get_irq()
564 return tx_chn->virq; in k3_udma_glue_tx_get_irq()
571 if (xudma_is_pktdma(tx_chn->common.udmax) && in k3_udma_glue_tx_get_dma_device()
572 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15)) in k3_udma_glue_tx_get_dma_device()
573 return &tx_chn->common.chan_dev; in k3_udma_glue_tx_get_dma_device()
575 return xudma_get_device(tx_chn->common.udmax); in k3_udma_glue_tx_get_dma_device()
582 if (!xudma_is_pktdma(tx_chn->common.udmax) || in k3_udma_glue_tx_dma_to_cppi5_addr()
583 !tx_chn->common.atype_asel) in k3_udma_glue_tx_dma_to_cppi5_addr()
586 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; in k3_udma_glue_tx_dma_to_cppi5_addr()
593 if (!xudma_is_pktdma(tx_chn->common.udmax) || in k3_udma_glue_tx_cppi5_to_dma_addr()
594 !tx_chn->common.atype_asel) in k3_udma_glue_tx_cppi5_to_dma_addr()
597 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); in k3_udma_glue_tx_cppi5_to_dma_addr()
603 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_chn()
614 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_rx_chn()
615 req.index = rx_chn->udma_rchan_id; in k3_udma_glue_cfg_rx_chn()
616 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; in k3_udma_glue_cfg_rx_chn()
620 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); in k3_udma_glue_cfg_rx_chn()
623 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num && in k3_udma_glue_cfg_rx_chn()
624 rx_chn->flow_id_base != rx_chn->udma_rchan_id) { in k3_udma_glue_cfg_rx_chn()
628 req.flowid_start = rx_chn->flow_id_base; in k3_udma_glue_cfg_rx_chn()
629 req.flowid_cnt = rx_chn->flow_num; in k3_udma_glue_cfg_rx_chn()
632 req.rx_atype = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_chn()
634 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_rx_chn()
636 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", in k3_udma_glue_cfg_rx_chn()
637 rx_chn->udma_rchan_id, ret); in k3_udma_glue_cfg_rx_chn()
645 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow()
647 if (IS_ERR_OR_NULL(flow->udma_rflow)) in k3_udma_glue_release_rx_flow()
650 if (flow->ringrxfdq) in k3_udma_glue_release_rx_flow()
651 k3_ringacc_ring_free(flow->ringrxfdq); in k3_udma_glue_release_rx_flow()
653 if (flow->ringrx) in k3_udma_glue_release_rx_flow()
654 k3_ringacc_ring_free(flow->ringrx); in k3_udma_glue_release_rx_flow()
656 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_release_rx_flow()
657 flow->udma_rflow = NULL; in k3_udma_glue_release_rx_flow()
658 rx_chn->flows_ready--; in k3_udma_glue_release_rx_flow()
665 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow()
666 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_cfg_rx_flow()
667 struct device *dev = rx_chn->common.dev; in k3_udma_glue_cfg_rx_flow()
673 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, in k3_udma_glue_cfg_rx_flow()
674 flow->udma_rflow_id); in k3_udma_glue_cfg_rx_flow()
675 if (IS_ERR(flow->udma_rflow)) { in k3_udma_glue_cfg_rx_flow()
676 ret = PTR_ERR(flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
681 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { in k3_udma_glue_cfg_rx_flow()
682 ret = -ENODEV; in k3_udma_glue_cfg_rx_flow()
686 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_cfg_rx_flow()
687 rx_ringfdq_id = flow->udma_rflow_id + in k3_udma_glue_cfg_rx_flow()
688 xudma_get_rflow_ring_offset(rx_chn->common.udmax); in k3_udma_glue_cfg_rx_flow()
691 rx_ring_id = flow_cfg->ring_rxq_id; in k3_udma_glue_cfg_rx_flow()
692 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id; in k3_udma_glue_cfg_rx_flow()
696 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, in k3_udma_glue_cfg_rx_flow()
698 &flow->ringrxfdq, in k3_udma_glue_cfg_rx_flow()
699 &flow->ringrx); in k3_udma_glue_cfg_rx_flow()
706 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn); in k3_udma_glue_cfg_rx_flow()
707 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev; in k3_udma_glue_cfg_rx_flow()
710 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_cfg_rx_flow()
711 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_flow()
712 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel; in k3_udma_glue_cfg_rx_flow()
715 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); in k3_udma_glue_cfg_rx_flow()
721 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); in k3_udma_glue_cfg_rx_flow()
727 if (rx_chn->remote) { in k3_udma_glue_cfg_rx_flow()
731 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); in k3_udma_glue_cfg_rx_flow()
732 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_cfg_rx_flow()
751 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_cfg_rx_flow()
752 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_cfg_rx_flow()
753 if (rx_chn->common.epib) in k3_udma_glue_cfg_rx_flow()
755 if (rx_chn->common.psdata_size) in k3_udma_glue_cfg_rx_flow()
757 if (flow_cfg->rx_error_handling) in k3_udma_glue_cfg_rx_flow()
762 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; in k3_udma_glue_cfg_rx_flow()
770 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_cfg_rx_flow()
772 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_cfg_rx_flow()
777 rx_chn->flows_ready++; in k3_udma_glue_cfg_rx_flow()
779 flow->udma_rflow_id, rx_chn->flows_ready); in k3_udma_glue_cfg_rx_flow()
784 k3_ringacc_ring_free(flow->ringrxfdq); in k3_udma_glue_cfg_rx_flow()
785 k3_ringacc_ring_free(flow->ringrx); in k3_udma_glue_cfg_rx_flow()
788 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); in k3_udma_glue_cfg_rx_flow()
789 flow->udma_rflow = NULL; in k3_udma_glue_cfg_rx_flow()
796 struct device *dev = chn->common.dev; in k3_udma_glue_dump_rx_chn()
808 chn->udma_rchan_id, in k3_udma_glue_dump_rx_chn()
809 chn->common.src_thread, in k3_udma_glue_dump_rx_chn()
810 chn->common.dst_thread, in k3_udma_glue_dump_rx_chn()
811 chn->common.epib, in k3_udma_glue_dump_rx_chn()
812 chn->common.hdesc_size, in k3_udma_glue_dump_rx_chn()
813 chn->common.psdata_size, in k3_udma_glue_dump_rx_chn()
814 chn->common.swdata_size, in k3_udma_glue_dump_rx_chn()
815 chn->flow_id_base, in k3_udma_glue_dump_rx_chn()
816 chn->flow_num); in k3_udma_glue_dump_rx_chn()
822 struct device *dev = chn->common.dev; in k3_udma_glue_dump_rx_rt_chn()
827 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG)); in k3_udma_glue_dump_rx_rt_chn()
829 xudma_rchanrt_read(chn->udma_rchanx, in k3_udma_glue_dump_rx_rt_chn()
832 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
834 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
836 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG)); in k3_udma_glue_dump_rx_rt_chn()
846 if (cfg->flow_id_use_rxchan_id) in k3_udma_glue_allocate_rx_flows()
850 if (rx_chn->flow_id_base != -1 && in k3_udma_glue_allocate_rx_flows()
851 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_allocate_rx_flows()
855 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_allocate_rx_flows()
856 rx_chn->flow_id_base, in k3_udma_glue_allocate_rx_flows()
857 rx_chn->flow_num); in k3_udma_glue_allocate_rx_flows()
859 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", in k3_udma_glue_allocate_rx_flows()
860 rx_chn->flow_id_base, rx_chn->flow_num, ret); in k3_udma_glue_allocate_rx_flows()
863 rx_chn->flow_id_base = ret; in k3_udma_glue_allocate_rx_flows()
876 if (cfg->flow_id_num <= 0) in k3_udma_glue_request_rx_chn_priv()
877 return ERR_PTR(-EINVAL); in k3_udma_glue_request_rx_chn_priv()
879 if (cfg->flow_id_num != 1 && in k3_udma_glue_request_rx_chn_priv()
880 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) in k3_udma_glue_request_rx_chn_priv()
881 return ERR_PTR(-EINVAL); in k3_udma_glue_request_rx_chn_priv()
885 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_rx_chn_priv()
887 rx_chn->common.dev = dev; in k3_udma_glue_request_rx_chn_priv()
888 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_rx_chn_priv()
889 rx_chn->remote = false; in k3_udma_glue_request_rx_chn_priv()
892 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_rx_chn_priv()
893 &rx_chn->common, false); in k3_udma_glue_request_rx_chn_priv()
897 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_rx_chn_priv()
898 rx_chn->common.psdata_size, in k3_udma_glue_request_rx_chn_priv()
899 rx_chn->common.swdata_size); in k3_udma_glue_request_rx_chn_priv()
901 ep_cfg = rx_chn->common.ep_config; in k3_udma_glue_request_rx_chn_priv()
903 if (xudma_is_pktdma(rx_chn->common.udmax)) in k3_udma_glue_request_rx_chn_priv()
904 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; in k3_udma_glue_request_rx_chn_priv()
906 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_rx_chn_priv()
909 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, in k3_udma_glue_request_rx_chn_priv()
910 rx_chn->udma_rchan_id); in k3_udma_glue_request_rx_chn_priv()
911 if (IS_ERR(rx_chn->udma_rchanx)) { in k3_udma_glue_request_rx_chn_priv()
912 ret = PTR_ERR(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
916 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); in k3_udma_glue_request_rx_chn_priv()
918 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_rx_chn_priv()
919 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_request_rx_chn_priv()
920 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x", in k3_udma_glue_request_rx_chn_priv()
921 rx_chn->udma_rchan_id, rx_chn->common.src_thread); in k3_udma_glue_request_rx_chn_priv()
922 ret = device_register(&rx_chn->common.chan_dev); in k3_udma_glue_request_rx_chn_priv()
925 put_device(&rx_chn->common.chan_dev); in k3_udma_glue_request_rx_chn_priv()
926 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_rx_chn_priv()
930 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_rx_chn_priv()
932 rx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_rx_chn_priv()
933 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, in k3_udma_glue_request_rx_chn_priv()
937 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_rx_chn_priv()
938 int flow_start = cfg->flow_id_base; in k3_udma_glue_request_rx_chn_priv()
941 if (flow_start == -1) in k3_udma_glue_request_rx_chn_priv()
942 flow_start = ep_cfg->flow_start; in k3_udma_glue_request_rx_chn_priv()
944 flow_end = flow_start + cfg->flow_id_num - 1; in k3_udma_glue_request_rx_chn_priv()
945 if (flow_start < ep_cfg->flow_start || in k3_udma_glue_request_rx_chn_priv()
946 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) { in k3_udma_glue_request_rx_chn_priv()
948 ret = -EINVAL; in k3_udma_glue_request_rx_chn_priv()
951 rx_chn->flow_id_base = flow_start; in k3_udma_glue_request_rx_chn_priv()
953 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_rx_chn_priv()
956 if (cfg->flow_id_use_rxchan_id) in k3_udma_glue_request_rx_chn_priv()
957 rx_chn->flow_id_base = rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
960 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_rx_chn_priv()
962 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv()
963 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv()
964 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv()
965 ret = -ENOMEM; in k3_udma_glue_request_rx_chn_priv()
973 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_rx_chn_priv()
974 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv()
976 /* request and cfg psi-l */ in k3_udma_glue_request_rx_chn_priv()
977 rx_chn->common.dst_thread = in k3_udma_glue_request_rx_chn_priv()
978 xudma_dev_get_psil_base(rx_chn->common.udmax) + in k3_udma_glue_request_rx_chn_priv()
979 rx_chn->udma_rchan_id; in k3_udma_glue_request_rx_chn_priv()
988 if (cfg->def_flow_cfg) { in k3_udma_glue_request_rx_chn_priv()
989 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); in k3_udma_glue_request_rx_chn_priv()
1010 if (cfg->flow_id_num <= 0 || in k3_udma_glue_request_remote_rx_chn()
1011 cfg->flow_id_use_rxchan_id || in k3_udma_glue_request_remote_rx_chn()
1012 cfg->def_flow_cfg || in k3_udma_glue_request_remote_rx_chn()
1013 cfg->flow_id_base < 0) in k3_udma_glue_request_remote_rx_chn()
1014 return ERR_PTR(-EINVAL); in k3_udma_glue_request_remote_rx_chn()
1023 return ERR_PTR(-ENOMEM); in k3_udma_glue_request_remote_rx_chn()
1025 rx_chn->common.dev = dev; in k3_udma_glue_request_remote_rx_chn()
1026 rx_chn->common.swdata_size = cfg->swdata_size; in k3_udma_glue_request_remote_rx_chn()
1027 rx_chn->remote = true; in k3_udma_glue_request_remote_rx_chn()
1028 rx_chn->udma_rchan_id = -1; in k3_udma_glue_request_remote_rx_chn()
1029 rx_chn->flow_num = cfg->flow_id_num; in k3_udma_glue_request_remote_rx_chn()
1030 rx_chn->flow_id_base = cfg->flow_id_base; in k3_udma_glue_request_remote_rx_chn()
1031 rx_chn->psil_paired = false; in k3_udma_glue_request_remote_rx_chn()
1034 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, in k3_udma_glue_request_remote_rx_chn()
1035 &rx_chn->common, false); in k3_udma_glue_request_remote_rx_chn()
1039 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, in k3_udma_glue_request_remote_rx_chn()
1040 rx_chn->common.psdata_size, in k3_udma_glue_request_remote_rx_chn()
1041 rx_chn->common.swdata_size); in k3_udma_glue_request_remote_rx_chn()
1043 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn()
1044 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn()
1045 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn()
1046 ret = -ENOMEM; in k3_udma_glue_request_remote_rx_chn()
1050 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; in k3_udma_glue_request_remote_rx_chn()
1051 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_request_remote_rx_chn()
1052 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x", in k3_udma_glue_request_remote_rx_chn()
1053 rx_chn->common.src_thread); in k3_udma_glue_request_remote_rx_chn()
1054 ret = device_register(&rx_chn->common.chan_dev); in k3_udma_glue_request_remote_rx_chn()
1057 put_device(&rx_chn->common.chan_dev); in k3_udma_glue_request_remote_rx_chn()
1058 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_request_remote_rx_chn()
1062 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_request_remote_rx_chn()
1064 rx_chn->common.chan_dev.dma_coherent = true; in k3_udma_glue_request_remote_rx_chn()
1065 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, in k3_udma_glue_request_remote_rx_chn()
1073 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_request_remote_rx_chn()
1074 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn()
1089 if (cfg->remote) in k3_udma_glue_request_rx_chn()
1100 if (IS_ERR_OR_NULL(rx_chn->common.udmax)) in k3_udma_glue_release_rx_chn()
1103 if (rx_chn->psil_paired) { in k3_udma_glue_release_rx_chn()
1104 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1105 rx_chn->common.src_thread, in k3_udma_glue_release_rx_chn()
1106 rx_chn->common.dst_thread); in k3_udma_glue_release_rx_chn()
1107 rx_chn->psil_paired = false; in k3_udma_glue_release_rx_chn()
1110 for (i = 0; i < rx_chn->flow_num; i++) in k3_udma_glue_release_rx_chn()
1113 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) in k3_udma_glue_release_rx_chn()
1114 xudma_free_gp_rflow_range(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1115 rx_chn->flow_id_base, in k3_udma_glue_release_rx_chn()
1116 rx_chn->flow_num); in k3_udma_glue_release_rx_chn()
1118 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) in k3_udma_glue_release_rx_chn()
1119 xudma_rchan_put(rx_chn->common.udmax, in k3_udma_glue_release_rx_chn()
1120 rx_chn->udma_rchanx); in k3_udma_glue_release_rx_chn()
1122 if (rx_chn->common.chan_dev.parent) { in k3_udma_glue_release_rx_chn()
1123 device_unregister(&rx_chn->common.chan_dev); in k3_udma_glue_release_rx_chn()
1124 rx_chn->common.chan_dev.parent = NULL; in k3_udma_glue_release_rx_chn()
1133 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_init()
1134 return -EINVAL; in k3_udma_glue_rx_flow_init()
1145 if (flow_idx >= rx_chn->flow_num) in k3_udma_glue_rx_flow_get_fdq_id()
1146 return -EINVAL; in k3_udma_glue_rx_flow_get_fdq_id()
1148 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id()
1150 return k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_rx_flow_get_fdq_id()
1156 return rx_chn->flow_id_base; in k3_udma_glue_rx_get_flow_id_base()
1163 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_enable()
1164 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_enable()
1165 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_enable()
1171 if (!rx_chn->remote) in k3_udma_glue_rx_flow_enable()
1172 return -EINVAL; in k3_udma_glue_rx_flow_enable()
1174 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); in k3_udma_glue_rx_flow_enable()
1175 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); in k3_udma_glue_rx_flow_enable()
1185 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_rx_flow_enable()
1186 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_rx_flow_enable()
1193 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_rx_flow_enable()
1195 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_rx_flow_enable()
1206 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_disable()
1207 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; in k3_udma_glue_rx_flow_disable()
1208 struct device *dev = rx_chn->common.dev; in k3_udma_glue_rx_flow_disable()
1212 if (!rx_chn->remote) in k3_udma_glue_rx_flow_disable()
1213 return -EINVAL; in k3_udma_glue_rx_flow_disable()
1222 req.nav_id = tisci_rm->tisci_dev_id; in k3_udma_glue_rx_flow_disable()
1223 req.flow_index = flow->udma_rflow_id; in k3_udma_glue_rx_flow_disable()
1230 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); in k3_udma_glue_rx_flow_disable()
1232 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, in k3_udma_glue_rx_flow_disable()
1244 if (rx_chn->remote) in k3_udma_glue_enable_rx_chn()
1245 return -EINVAL; in k3_udma_glue_enable_rx_chn()
1247 if (rx_chn->flows_ready < rx_chn->flow_num) in k3_udma_glue_enable_rx_chn()
1248 return -EINVAL; in k3_udma_glue_enable_rx_chn()
1250 ret = xudma_navss_psil_pair(rx_chn->common.udmax, in k3_udma_glue_enable_rx_chn()
1251 rx_chn->common.src_thread, in k3_udma_glue_enable_rx_chn()
1252 rx_chn->common.dst_thread); in k3_udma_glue_enable_rx_chn()
1254 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret); in k3_udma_glue_enable_rx_chn()
1258 rx_chn->psil_paired = true; in k3_udma_glue_enable_rx_chn()
1260 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, in k3_udma_glue_enable_rx_chn()
1263 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_enable_rx_chn()
1275 xudma_rchanrt_write(rx_chn->udma_rchanx, in k3_udma_glue_disable_rx_chn()
1277 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0); in k3_udma_glue_disable_rx_chn()
1281 if (rx_chn->psil_paired) { in k3_udma_glue_disable_rx_chn()
1282 xudma_navss_psil_unpair(rx_chn->common.udmax, in k3_udma_glue_disable_rx_chn()
1283 rx_chn->common.src_thread, in k3_udma_glue_disable_rx_chn()
1284 rx_chn->common.dst_thread); in k3_udma_glue_disable_rx_chn()
1285 rx_chn->psil_paired = false; in k3_udma_glue_disable_rx_chn()
1296 if (rx_chn->remote) in k3_udma_glue_tdown_rx_chn()
1301 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, in k3_udma_glue_tdown_rx_chn()
1304 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); in k3_udma_glue_tdown_rx_chn()
1307 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1311 dev_err(rx_chn->common.dev, "RX tdown timeout\n"); in k3_udma_glue_tdown_rx_chn()
1317 val = xudma_rchanrt_read(rx_chn->udma_rchanx, in k3_udma_glue_tdown_rx_chn()
1320 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); in k3_udma_glue_tdown_rx_chn()
1329 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_reset_rx_chn()
1330 struct device *dev = rx_chn->common.dev; in k3_udma_glue_reset_rx_chn()
1334 /* reset RXCQ as it is not input for udma - expected to be empty */ in k3_udma_glue_reset_rx_chn()
1335 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); in k3_udma_glue_reset_rx_chn()
1349 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); in k3_udma_glue_reset_rx_chn()
1353 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); in k3_udma_glue_reset_rx_chn()
1355 if (ret != -ENODATA) in k3_udma_glue_reset_rx_chn()
1362 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); in k3_udma_glue_reset_rx_chn()
1365 k3_ringacc_ring_reset(flow->ringrx); in k3_udma_glue_reset_rx_chn()
1373 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_push_rx_chn()
1375 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); in k3_udma_glue_push_rx_chn()
1382 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_pop_rx_chn()
1384 return k3_ringacc_ring_pop(flow->ringrx, desc_dma); in k3_udma_glue_pop_rx_chn()
1393 flow = &rx_chn->flows[flow_num]; in k3_udma_glue_rx_get_irq()
1395 if (xudma_is_pktdma(rx_chn->common.udmax)) { in k3_udma_glue_rx_get_irq()
1396 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax, in k3_udma_glue_rx_get_irq()
1397 flow->udma_rflow_id); in k3_udma_glue_rx_get_irq()
1399 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); in k3_udma_glue_rx_get_irq()
1402 return flow->virq; in k3_udma_glue_rx_get_irq()
1409 if (xudma_is_pktdma(rx_chn->common.udmax) && in k3_udma_glue_rx_get_dma_device()
1410 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15)) in k3_udma_glue_rx_get_dma_device()
1411 return &rx_chn->common.chan_dev; in k3_udma_glue_rx_get_dma_device()
1413 return xudma_get_device(rx_chn->common.udmax); in k3_udma_glue_rx_get_dma_device()
1420 if (!xudma_is_pktdma(rx_chn->common.udmax) || in k3_udma_glue_rx_dma_to_cppi5_addr()
1421 !rx_chn->common.atype_asel) in k3_udma_glue_rx_dma_to_cppi5_addr()
1424 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; in k3_udma_glue_rx_dma_to_cppi5_addr()
1431 if (!xudma_is_pktdma(rx_chn->common.udmax) || in k3_udma_glue_rx_cppi5_to_dma_addr()
1432 !rx_chn->common.atype_asel) in k3_udma_glue_rx_cppi5_to_dma_addr()
1435 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); in k3_udma_glue_rx_cppi5_to_dma_addr()