1*d7024191SGrygorii Strashko // SPDX-License-Identifier: GPL-2.0 2*d7024191SGrygorii Strashko /* 3*d7024191SGrygorii Strashko * K3 NAVSS DMA glue interface 4*d7024191SGrygorii Strashko * 5*d7024191SGrygorii Strashko * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 6*d7024191SGrygorii Strashko * 7*d7024191SGrygorii Strashko */ 8*d7024191SGrygorii Strashko 9*d7024191SGrygorii Strashko #include <linux/atomic.h> 10*d7024191SGrygorii Strashko #include <linux/delay.h> 11*d7024191SGrygorii Strashko #include <linux/dma-mapping.h> 12*d7024191SGrygorii Strashko #include <linux/io.h> 13*d7024191SGrygorii Strashko #include <linux/init.h> 14*d7024191SGrygorii Strashko #include <linux/of.h> 15*d7024191SGrygorii Strashko #include <linux/platform_device.h> 16*d7024191SGrygorii Strashko #include <linux/soc/ti/k3-ringacc.h> 17*d7024191SGrygorii Strashko #include <linux/dma/ti-cppi5.h> 18*d7024191SGrygorii Strashko #include <linux/dma/k3-udma-glue.h> 19*d7024191SGrygorii Strashko 20*d7024191SGrygorii Strashko #include "k3-udma.h" 21*d7024191SGrygorii Strashko #include "k3-psil-priv.h" 22*d7024191SGrygorii Strashko 23*d7024191SGrygorii Strashko struct k3_udma_glue_common { 24*d7024191SGrygorii Strashko struct device *dev; 25*d7024191SGrygorii Strashko struct udma_dev *udmax; 26*d7024191SGrygorii Strashko const struct udma_tisci_rm *tisci_rm; 27*d7024191SGrygorii Strashko struct k3_ringacc *ringacc; 28*d7024191SGrygorii Strashko u32 src_thread; 29*d7024191SGrygorii Strashko u32 dst_thread; 30*d7024191SGrygorii Strashko 31*d7024191SGrygorii Strashko u32 hdesc_size; 32*d7024191SGrygorii Strashko bool epib; 33*d7024191SGrygorii Strashko u32 psdata_size; 34*d7024191SGrygorii Strashko u32 swdata_size; 35*d7024191SGrygorii Strashko }; 36*d7024191SGrygorii Strashko 37*d7024191SGrygorii Strashko struct k3_udma_glue_tx_channel { 38*d7024191SGrygorii Strashko struct k3_udma_glue_common common; 39*d7024191SGrygorii Strashko 40*d7024191SGrygorii Strashko struct udma_tchan *udma_tchanx; 41*d7024191SGrygorii Strashko int udma_tchan_id; 42*d7024191SGrygorii Strashko 43*d7024191SGrygorii Strashko struct k3_ring *ringtx; 44*d7024191SGrygorii Strashko struct k3_ring *ringtxcq; 45*d7024191SGrygorii Strashko 46*d7024191SGrygorii Strashko bool psil_paired; 47*d7024191SGrygorii Strashko 48*d7024191SGrygorii Strashko int virq; 49*d7024191SGrygorii Strashko 50*d7024191SGrygorii Strashko atomic_t free_pkts; 51*d7024191SGrygorii Strashko bool tx_pause_on_err; 52*d7024191SGrygorii Strashko bool tx_filt_einfo; 53*d7024191SGrygorii Strashko bool tx_filt_pswords; 54*d7024191SGrygorii Strashko bool tx_supr_tdpkt; 55*d7024191SGrygorii Strashko }; 56*d7024191SGrygorii Strashko 57*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow { 58*d7024191SGrygorii Strashko struct udma_rflow *udma_rflow; 59*d7024191SGrygorii Strashko int udma_rflow_id; 60*d7024191SGrygorii Strashko struct k3_ring *ringrx; 61*d7024191SGrygorii Strashko struct k3_ring *ringrxfdq; 62*d7024191SGrygorii Strashko 63*d7024191SGrygorii Strashko int virq; 64*d7024191SGrygorii Strashko }; 65*d7024191SGrygorii Strashko 66*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel { 67*d7024191SGrygorii Strashko struct k3_udma_glue_common common; 68*d7024191SGrygorii Strashko 69*d7024191SGrygorii Strashko struct udma_rchan *udma_rchanx; 70*d7024191SGrygorii Strashko int udma_rchan_id; 71*d7024191SGrygorii Strashko bool remote; 72*d7024191SGrygorii Strashko 73*d7024191SGrygorii Strashko bool psil_paired; 74*d7024191SGrygorii Strashko 75*d7024191SGrygorii Strashko u32 swdata_size; 76*d7024191SGrygorii Strashko int flow_id_base; 77*d7024191SGrygorii Strashko 78*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flows; 79*d7024191SGrygorii Strashko u32 flow_num; 80*d7024191SGrygorii Strashko u32 flows_ready; 81*d7024191SGrygorii Strashko }; 82*d7024191SGrygorii Strashko 83*d7024191SGrygorii Strashko #define K3_UDMAX_TDOWN_TIMEOUT_US 1000 84*d7024191SGrygorii Strashko 85*d7024191SGrygorii Strashko static int of_k3_udma_glue_parse(struct device_node *udmax_np, 86*d7024191SGrygorii Strashko struct k3_udma_glue_common *common) 87*d7024191SGrygorii Strashko { 88*d7024191SGrygorii Strashko common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np, 89*d7024191SGrygorii Strashko "ti,ringacc"); 90*d7024191SGrygorii Strashko if (IS_ERR(common->ringacc)) 91*d7024191SGrygorii Strashko return PTR_ERR(common->ringacc); 92*d7024191SGrygorii Strashko 93*d7024191SGrygorii Strashko common->udmax = of_xudma_dev_get(udmax_np, NULL); 94*d7024191SGrygorii Strashko if (IS_ERR(common->udmax)) 95*d7024191SGrygorii Strashko return PTR_ERR(common->udmax); 96*d7024191SGrygorii Strashko 97*d7024191SGrygorii Strashko common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); 98*d7024191SGrygorii Strashko 99*d7024191SGrygorii Strashko return 0; 100*d7024191SGrygorii Strashko } 101*d7024191SGrygorii Strashko 102*d7024191SGrygorii Strashko static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, 103*d7024191SGrygorii Strashko const char *name, struct k3_udma_glue_common *common, 104*d7024191SGrygorii Strashko bool tx_chn) 105*d7024191SGrygorii Strashko { 106*d7024191SGrygorii Strashko struct psil_endpoint_config *ep_config; 107*d7024191SGrygorii Strashko struct of_phandle_args dma_spec; 108*d7024191SGrygorii Strashko u32 thread_id; 109*d7024191SGrygorii Strashko int ret = 0; 110*d7024191SGrygorii Strashko int index; 111*d7024191SGrygorii Strashko 112*d7024191SGrygorii Strashko if (unlikely(!name)) 113*d7024191SGrygorii Strashko return -EINVAL; 114*d7024191SGrygorii Strashko 115*d7024191SGrygorii Strashko index = of_property_match_string(chn_np, "dma-names", name); 116*d7024191SGrygorii Strashko if (index < 0) 117*d7024191SGrygorii Strashko return index; 118*d7024191SGrygorii Strashko 119*d7024191SGrygorii Strashko if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, 120*d7024191SGrygorii Strashko &dma_spec)) 121*d7024191SGrygorii Strashko return -ENOENT; 122*d7024191SGrygorii Strashko 123*d7024191SGrygorii Strashko thread_id = dma_spec.args[0]; 124*d7024191SGrygorii Strashko 125*d7024191SGrygorii Strashko if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { 126*d7024191SGrygorii Strashko ret = -EINVAL; 127*d7024191SGrygorii Strashko goto out_put_spec; 128*d7024191SGrygorii Strashko } 129*d7024191SGrygorii Strashko 130*d7024191SGrygorii Strashko if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { 131*d7024191SGrygorii Strashko ret = -EINVAL; 132*d7024191SGrygorii Strashko goto out_put_spec; 133*d7024191SGrygorii Strashko } 134*d7024191SGrygorii Strashko 135*d7024191SGrygorii Strashko /* get psil endpoint config */ 136*d7024191SGrygorii Strashko ep_config = psil_get_ep_config(thread_id); 137*d7024191SGrygorii Strashko if (IS_ERR(ep_config)) { 138*d7024191SGrygorii Strashko dev_err(common->dev, 139*d7024191SGrygorii Strashko "No configuration for psi-l thread 0x%04x\n", 140*d7024191SGrygorii Strashko thread_id); 141*d7024191SGrygorii Strashko ret = PTR_ERR(ep_config); 142*d7024191SGrygorii Strashko goto out_put_spec; 143*d7024191SGrygorii Strashko } 144*d7024191SGrygorii Strashko 145*d7024191SGrygorii Strashko common->epib = ep_config->needs_epib; 146*d7024191SGrygorii Strashko common->psdata_size = ep_config->psd_size; 147*d7024191SGrygorii Strashko 148*d7024191SGrygorii Strashko if (tx_chn) 149*d7024191SGrygorii Strashko common->dst_thread = thread_id; 150*d7024191SGrygorii Strashko else 151*d7024191SGrygorii Strashko common->src_thread = thread_id; 152*d7024191SGrygorii Strashko 153*d7024191SGrygorii Strashko ret = of_k3_udma_glue_parse(dma_spec.np, common); 154*d7024191SGrygorii Strashko 155*d7024191SGrygorii Strashko out_put_spec: 156*d7024191SGrygorii Strashko of_node_put(dma_spec.np); 157*d7024191SGrygorii Strashko return ret; 158*d7024191SGrygorii Strashko }; 159*d7024191SGrygorii Strashko 160*d7024191SGrygorii Strashko static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 161*d7024191SGrygorii Strashko { 162*d7024191SGrygorii Strashko struct device *dev = tx_chn->common.dev; 163*d7024191SGrygorii Strashko 164*d7024191SGrygorii Strashko dev_dbg(dev, "dump_tx_chn:\n" 165*d7024191SGrygorii Strashko "udma_tchan_id: %d\n" 166*d7024191SGrygorii Strashko "src_thread: %08x\n" 167*d7024191SGrygorii Strashko "dst_thread: %08x\n", 168*d7024191SGrygorii Strashko tx_chn->udma_tchan_id, 169*d7024191SGrygorii Strashko tx_chn->common.src_thread, 170*d7024191SGrygorii Strashko tx_chn->common.dst_thread); 171*d7024191SGrygorii Strashko } 172*d7024191SGrygorii Strashko 173*d7024191SGrygorii Strashko static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn, 174*d7024191SGrygorii Strashko char *mark) 175*d7024191SGrygorii Strashko { 176*d7024191SGrygorii Strashko struct device *dev = chn->common.dev; 177*d7024191SGrygorii Strashko 178*d7024191SGrygorii Strashko dev_dbg(dev, "=== dump ===> %s\n", mark); 179*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG, 180*d7024191SGrygorii Strashko xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG)); 181*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG, 182*d7024191SGrygorii Strashko xudma_tchanrt_read(chn->udma_tchanx, 183*d7024191SGrygorii Strashko UDMA_TCHAN_RT_PEER_RT_EN_REG)); 184*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG, 185*d7024191SGrygorii Strashko xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG)); 186*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG, 187*d7024191SGrygorii Strashko xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG)); 188*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG, 189*d7024191SGrygorii Strashko xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG)); 190*d7024191SGrygorii Strashko } 191*d7024191SGrygorii Strashko 192*d7024191SGrygorii Strashko static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 193*d7024191SGrygorii Strashko { 194*d7024191SGrygorii Strashko const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; 195*d7024191SGrygorii Strashko struct ti_sci_msg_rm_udmap_tx_ch_cfg req; 196*d7024191SGrygorii Strashko 197*d7024191SGrygorii Strashko memset(&req, 0, sizeof(req)); 198*d7024191SGrygorii Strashko 199*d7024191SGrygorii Strashko req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | 200*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | 201*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | 202*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | 203*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | 204*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | 205*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; 206*d7024191SGrygorii Strashko req.nav_id = tisci_rm->tisci_dev_id; 207*d7024191SGrygorii Strashko req.index = tx_chn->udma_tchan_id; 208*d7024191SGrygorii Strashko if (tx_chn->tx_pause_on_err) 209*d7024191SGrygorii Strashko req.tx_pause_on_err = 1; 210*d7024191SGrygorii Strashko if (tx_chn->tx_filt_einfo) 211*d7024191SGrygorii Strashko req.tx_filt_einfo = 1; 212*d7024191SGrygorii Strashko if (tx_chn->tx_filt_pswords) 213*d7024191SGrygorii Strashko req.tx_filt_pswords = 1; 214*d7024191SGrygorii Strashko req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 215*d7024191SGrygorii Strashko if (tx_chn->tx_supr_tdpkt) 216*d7024191SGrygorii Strashko req.tx_supr_tdpkt = 1; 217*d7024191SGrygorii Strashko req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; 218*d7024191SGrygorii Strashko req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); 219*d7024191SGrygorii Strashko 220*d7024191SGrygorii Strashko return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); 221*d7024191SGrygorii Strashko } 222*d7024191SGrygorii Strashko 223*d7024191SGrygorii Strashko struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, 224*d7024191SGrygorii Strashko const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) 225*d7024191SGrygorii Strashko { 226*d7024191SGrygorii Strashko struct k3_udma_glue_tx_channel *tx_chn; 227*d7024191SGrygorii Strashko int ret; 228*d7024191SGrygorii Strashko 229*d7024191SGrygorii Strashko tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); 230*d7024191SGrygorii Strashko if (!tx_chn) 231*d7024191SGrygorii Strashko return ERR_PTR(-ENOMEM); 232*d7024191SGrygorii Strashko 233*d7024191SGrygorii Strashko tx_chn->common.dev = dev; 234*d7024191SGrygorii Strashko tx_chn->common.swdata_size = cfg->swdata_size; 235*d7024191SGrygorii Strashko tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; 236*d7024191SGrygorii Strashko tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; 237*d7024191SGrygorii Strashko tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; 238*d7024191SGrygorii Strashko tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; 239*d7024191SGrygorii Strashko 240*d7024191SGrygorii Strashko /* parse of udmap channel */ 241*d7024191SGrygorii Strashko ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 242*d7024191SGrygorii Strashko &tx_chn->common, true); 243*d7024191SGrygorii Strashko if (ret) 244*d7024191SGrygorii Strashko goto err; 245*d7024191SGrygorii Strashko 246*d7024191SGrygorii Strashko tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, 247*d7024191SGrygorii Strashko tx_chn->common.psdata_size, 248*d7024191SGrygorii Strashko tx_chn->common.swdata_size); 249*d7024191SGrygorii Strashko 250*d7024191SGrygorii Strashko /* request and cfg UDMAP TX channel */ 251*d7024191SGrygorii Strashko tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1); 252*d7024191SGrygorii Strashko if (IS_ERR(tx_chn->udma_tchanx)) { 253*d7024191SGrygorii Strashko ret = PTR_ERR(tx_chn->udma_tchanx); 254*d7024191SGrygorii Strashko dev_err(dev, "UDMAX tchanx get err %d\n", ret); 255*d7024191SGrygorii Strashko goto err; 256*d7024191SGrygorii Strashko } 257*d7024191SGrygorii Strashko tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); 258*d7024191SGrygorii Strashko 259*d7024191SGrygorii Strashko atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); 260*d7024191SGrygorii Strashko 261*d7024191SGrygorii Strashko /* request and cfg rings */ 262*d7024191SGrygorii Strashko tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc, 263*d7024191SGrygorii Strashko tx_chn->udma_tchan_id, 0); 264*d7024191SGrygorii Strashko if (!tx_chn->ringtx) { 265*d7024191SGrygorii Strashko ret = -ENODEV; 266*d7024191SGrygorii Strashko dev_err(dev, "Failed to get TX ring %u\n", 267*d7024191SGrygorii Strashko tx_chn->udma_tchan_id); 268*d7024191SGrygorii Strashko goto err; 269*d7024191SGrygorii Strashko } 270*d7024191SGrygorii Strashko 271*d7024191SGrygorii Strashko tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc, 272*d7024191SGrygorii Strashko -1, 0); 273*d7024191SGrygorii Strashko if (!tx_chn->ringtxcq) { 274*d7024191SGrygorii Strashko ret = -ENODEV; 275*d7024191SGrygorii Strashko dev_err(dev, "Failed to get TXCQ ring\n"); 276*d7024191SGrygorii Strashko goto err; 277*d7024191SGrygorii Strashko } 278*d7024191SGrygorii Strashko 279*d7024191SGrygorii Strashko ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); 280*d7024191SGrygorii Strashko if (ret) { 281*d7024191SGrygorii Strashko dev_err(dev, "Failed to cfg ringtx %d\n", ret); 282*d7024191SGrygorii Strashko goto err; 283*d7024191SGrygorii Strashko } 284*d7024191SGrygorii Strashko 285*d7024191SGrygorii Strashko ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); 286*d7024191SGrygorii Strashko if (ret) { 287*d7024191SGrygorii Strashko dev_err(dev, "Failed to cfg ringtx %d\n", ret); 288*d7024191SGrygorii Strashko goto err; 289*d7024191SGrygorii Strashko } 290*d7024191SGrygorii Strashko 291*d7024191SGrygorii Strashko /* request and cfg psi-l */ 292*d7024191SGrygorii Strashko tx_chn->common.src_thread = 293*d7024191SGrygorii Strashko xudma_dev_get_psil_base(tx_chn->common.udmax) + 294*d7024191SGrygorii Strashko tx_chn->udma_tchan_id; 295*d7024191SGrygorii Strashko 296*d7024191SGrygorii Strashko ret = k3_udma_glue_cfg_tx_chn(tx_chn); 297*d7024191SGrygorii Strashko if (ret) { 298*d7024191SGrygorii Strashko dev_err(dev, "Failed to cfg tchan %d\n", ret); 299*d7024191SGrygorii Strashko goto err; 300*d7024191SGrygorii Strashko } 301*d7024191SGrygorii Strashko 302*d7024191SGrygorii Strashko ret = xudma_navss_psil_pair(tx_chn->common.udmax, 303*d7024191SGrygorii Strashko tx_chn->common.src_thread, 304*d7024191SGrygorii Strashko tx_chn->common.dst_thread); 305*d7024191SGrygorii Strashko if (ret) { 306*d7024191SGrygorii Strashko dev_err(dev, "PSI-L request err %d\n", ret); 307*d7024191SGrygorii Strashko goto err; 308*d7024191SGrygorii Strashko } 309*d7024191SGrygorii Strashko 310*d7024191SGrygorii Strashko tx_chn->psil_paired = true; 311*d7024191SGrygorii Strashko 312*d7024191SGrygorii Strashko /* reset TX RT registers */ 313*d7024191SGrygorii Strashko k3_udma_glue_disable_tx_chn(tx_chn); 314*d7024191SGrygorii Strashko 315*d7024191SGrygorii Strashko k3_udma_glue_dump_tx_chn(tx_chn); 316*d7024191SGrygorii Strashko 317*d7024191SGrygorii Strashko return tx_chn; 318*d7024191SGrygorii Strashko 319*d7024191SGrygorii Strashko err: 320*d7024191SGrygorii Strashko k3_udma_glue_release_tx_chn(tx_chn); 321*d7024191SGrygorii Strashko return ERR_PTR(ret); 322*d7024191SGrygorii Strashko } 323*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); 324*d7024191SGrygorii Strashko 325*d7024191SGrygorii Strashko void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 326*d7024191SGrygorii Strashko { 327*d7024191SGrygorii Strashko if (tx_chn->psil_paired) { 328*d7024191SGrygorii Strashko xudma_navss_psil_unpair(tx_chn->common.udmax, 329*d7024191SGrygorii Strashko tx_chn->common.src_thread, 330*d7024191SGrygorii Strashko tx_chn->common.dst_thread); 331*d7024191SGrygorii Strashko tx_chn->psil_paired = false; 332*d7024191SGrygorii Strashko } 333*d7024191SGrygorii Strashko 334*d7024191SGrygorii Strashko if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) 335*d7024191SGrygorii Strashko xudma_tchan_put(tx_chn->common.udmax, 336*d7024191SGrygorii Strashko tx_chn->udma_tchanx); 337*d7024191SGrygorii Strashko 338*d7024191SGrygorii Strashko if (tx_chn->ringtxcq) 339*d7024191SGrygorii Strashko k3_ringacc_ring_free(tx_chn->ringtxcq); 340*d7024191SGrygorii Strashko 341*d7024191SGrygorii Strashko if (tx_chn->ringtx) 342*d7024191SGrygorii Strashko k3_ringacc_ring_free(tx_chn->ringtx); 343*d7024191SGrygorii Strashko } 344*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); 345*d7024191SGrygorii Strashko 346*d7024191SGrygorii Strashko int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 347*d7024191SGrygorii Strashko struct cppi5_host_desc_t *desc_tx, 348*d7024191SGrygorii Strashko dma_addr_t desc_dma) 349*d7024191SGrygorii Strashko { 350*d7024191SGrygorii Strashko u32 ringtxcq_id; 351*d7024191SGrygorii Strashko 352*d7024191SGrygorii Strashko if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) 353*d7024191SGrygorii Strashko return -ENOMEM; 354*d7024191SGrygorii Strashko 355*d7024191SGrygorii Strashko ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); 356*d7024191SGrygorii Strashko cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); 357*d7024191SGrygorii Strashko 358*d7024191SGrygorii Strashko return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); 359*d7024191SGrygorii Strashko } 360*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn); 361*d7024191SGrygorii Strashko 362*d7024191SGrygorii Strashko int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 363*d7024191SGrygorii Strashko dma_addr_t *desc_dma) 364*d7024191SGrygorii Strashko { 365*d7024191SGrygorii Strashko int ret; 366*d7024191SGrygorii Strashko 367*d7024191SGrygorii Strashko ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); 368*d7024191SGrygorii Strashko if (!ret) 369*d7024191SGrygorii Strashko atomic_inc(&tx_chn->free_pkts); 370*d7024191SGrygorii Strashko 371*d7024191SGrygorii Strashko return ret; 372*d7024191SGrygorii Strashko } 373*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); 374*d7024191SGrygorii Strashko 375*d7024191SGrygorii Strashko int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 376*d7024191SGrygorii Strashko { 377*d7024191SGrygorii Strashko u32 txrt_ctl; 378*d7024191SGrygorii Strashko 379*d7024191SGrygorii Strashko txrt_ctl = UDMA_PEER_RT_EN_ENABLE; 380*d7024191SGrygorii Strashko xudma_tchanrt_write(tx_chn->udma_tchanx, 381*d7024191SGrygorii Strashko UDMA_TCHAN_RT_PEER_RT_EN_REG, 382*d7024191SGrygorii Strashko txrt_ctl); 383*d7024191SGrygorii Strashko 384*d7024191SGrygorii Strashko txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx, 385*d7024191SGrygorii Strashko UDMA_TCHAN_RT_CTL_REG); 386*d7024191SGrygorii Strashko txrt_ctl |= UDMA_CHAN_RT_CTL_EN; 387*d7024191SGrygorii Strashko xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 388*d7024191SGrygorii Strashko txrt_ctl); 389*d7024191SGrygorii Strashko 390*d7024191SGrygorii Strashko k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); 391*d7024191SGrygorii Strashko return 0; 392*d7024191SGrygorii Strashko } 393*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn); 394*d7024191SGrygorii Strashko 395*d7024191SGrygorii Strashko void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 396*d7024191SGrygorii Strashko { 397*d7024191SGrygorii Strashko k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1"); 398*d7024191SGrygorii Strashko 399*d7024191SGrygorii Strashko xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0); 400*d7024191SGrygorii Strashko 401*d7024191SGrygorii Strashko xudma_tchanrt_write(tx_chn->udma_tchanx, 402*d7024191SGrygorii Strashko UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); 403*d7024191SGrygorii Strashko k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2"); 404*d7024191SGrygorii Strashko } 405*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn); 406*d7024191SGrygorii Strashko 407*d7024191SGrygorii Strashko void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 408*d7024191SGrygorii Strashko bool sync) 409*d7024191SGrygorii Strashko { 410*d7024191SGrygorii Strashko int i = 0; 411*d7024191SGrygorii Strashko u32 val; 412*d7024191SGrygorii Strashko 413*d7024191SGrygorii Strashko k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1"); 414*d7024191SGrygorii Strashko 415*d7024191SGrygorii Strashko xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 416*d7024191SGrygorii Strashko UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); 417*d7024191SGrygorii Strashko 418*d7024191SGrygorii Strashko val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG); 419*d7024191SGrygorii Strashko 420*d7024191SGrygorii Strashko while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { 421*d7024191SGrygorii Strashko val = xudma_tchanrt_read(tx_chn->udma_tchanx, 422*d7024191SGrygorii Strashko UDMA_TCHAN_RT_CTL_REG); 423*d7024191SGrygorii Strashko udelay(1); 424*d7024191SGrygorii Strashko if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { 425*d7024191SGrygorii Strashko dev_err(tx_chn->common.dev, "TX tdown timeout\n"); 426*d7024191SGrygorii Strashko break; 427*d7024191SGrygorii Strashko } 428*d7024191SGrygorii Strashko i++; 429*d7024191SGrygorii Strashko } 430*d7024191SGrygorii Strashko 431*d7024191SGrygorii Strashko val = xudma_tchanrt_read(tx_chn->udma_tchanx, 432*d7024191SGrygorii Strashko UDMA_TCHAN_RT_PEER_RT_EN_REG); 433*d7024191SGrygorii Strashko if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) 434*d7024191SGrygorii Strashko dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); 435*d7024191SGrygorii Strashko k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2"); 436*d7024191SGrygorii Strashko } 437*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn); 438*d7024191SGrygorii Strashko 439*d7024191SGrygorii Strashko void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 440*d7024191SGrygorii Strashko void *data, 441*d7024191SGrygorii Strashko void (*cleanup)(void *data, dma_addr_t desc_dma)) 442*d7024191SGrygorii Strashko { 443*d7024191SGrygorii Strashko dma_addr_t desc_dma; 444*d7024191SGrygorii Strashko int occ_tx, i, ret; 445*d7024191SGrygorii Strashko 446*d7024191SGrygorii Strashko /* reset TXCQ as it is not input for udma - expected to be empty */ 447*d7024191SGrygorii Strashko if (tx_chn->ringtxcq) 448*d7024191SGrygorii Strashko k3_ringacc_ring_reset(tx_chn->ringtxcq); 449*d7024191SGrygorii Strashko 450*d7024191SGrygorii Strashko /* 451*d7024191SGrygorii Strashko * TXQ reset need to be special way as it is input for udma and its 452*d7024191SGrygorii Strashko * state cached by udma, so: 453*d7024191SGrygorii Strashko * 1) save TXQ occ 454*d7024191SGrygorii Strashko * 2) clean up TXQ and call callback .cleanup() for each desc 455*d7024191SGrygorii Strashko * 3) reset TXQ in a special way 456*d7024191SGrygorii Strashko */ 457*d7024191SGrygorii Strashko occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); 458*d7024191SGrygorii Strashko dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx); 459*d7024191SGrygorii Strashko 460*d7024191SGrygorii Strashko for (i = 0; i < occ_tx; i++) { 461*d7024191SGrygorii Strashko ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); 462*d7024191SGrygorii Strashko if (ret) { 463*d7024191SGrygorii Strashko dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret); 464*d7024191SGrygorii Strashko break; 465*d7024191SGrygorii Strashko } 466*d7024191SGrygorii Strashko cleanup(data, desc_dma); 467*d7024191SGrygorii Strashko } 468*d7024191SGrygorii Strashko 469*d7024191SGrygorii Strashko k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); 470*d7024191SGrygorii Strashko } 471*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); 472*d7024191SGrygorii Strashko 473*d7024191SGrygorii Strashko u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn) 474*d7024191SGrygorii Strashko { 475*d7024191SGrygorii Strashko return tx_chn->common.hdesc_size; 476*d7024191SGrygorii Strashko } 477*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size); 478*d7024191SGrygorii Strashko 479*d7024191SGrygorii Strashko u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn) 480*d7024191SGrygorii Strashko { 481*d7024191SGrygorii Strashko return k3_ringacc_get_ring_id(tx_chn->ringtxcq); 482*d7024191SGrygorii Strashko } 483*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); 484*d7024191SGrygorii Strashko 485*d7024191SGrygorii Strashko int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) 486*d7024191SGrygorii Strashko { 487*d7024191SGrygorii Strashko tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); 488*d7024191SGrygorii Strashko 489*d7024191SGrygorii Strashko return tx_chn->virq; 490*d7024191SGrygorii Strashko } 491*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); 492*d7024191SGrygorii Strashko 493*d7024191SGrygorii Strashko static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 494*d7024191SGrygorii Strashko { 495*d7024191SGrygorii Strashko const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 496*d7024191SGrygorii Strashko struct ti_sci_msg_rm_udmap_rx_ch_cfg req; 497*d7024191SGrygorii Strashko int ret; 498*d7024191SGrygorii Strashko 499*d7024191SGrygorii Strashko memset(&req, 0, sizeof(req)); 500*d7024191SGrygorii Strashko 501*d7024191SGrygorii Strashko req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | 502*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | 503*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | 504*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | 505*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; 506*d7024191SGrygorii Strashko 507*d7024191SGrygorii Strashko req.nav_id = tisci_rm->tisci_dev_id; 508*d7024191SGrygorii Strashko req.index = rx_chn->udma_rchan_id; 509*d7024191SGrygorii Strashko req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; 510*d7024191SGrygorii Strashko /* 511*d7024191SGrygorii Strashko * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw 512*d7024191SGrygorii Strashko * and udmax impl, so just configure it to invalid value. 513*d7024191SGrygorii Strashko * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); 514*d7024191SGrygorii Strashko */ 515*d7024191SGrygorii Strashko req.rxcq_qnum = 0xFFFF; 516*d7024191SGrygorii Strashko if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { 517*d7024191SGrygorii Strashko /* Default flow + extra ones */ 518*d7024191SGrygorii Strashko req.flowid_start = rx_chn->flow_id_base; 519*d7024191SGrygorii Strashko req.flowid_cnt = rx_chn->flow_num; 520*d7024191SGrygorii Strashko } 521*d7024191SGrygorii Strashko req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 522*d7024191SGrygorii Strashko 523*d7024191SGrygorii Strashko ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); 524*d7024191SGrygorii Strashko if (ret) 525*d7024191SGrygorii Strashko dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", 526*d7024191SGrygorii Strashko rx_chn->udma_rchan_id, ret); 527*d7024191SGrygorii Strashko 528*d7024191SGrygorii Strashko return ret; 529*d7024191SGrygorii Strashko } 530*d7024191SGrygorii Strashko 531*d7024191SGrygorii Strashko static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, 532*d7024191SGrygorii Strashko u32 flow_num) 533*d7024191SGrygorii Strashko { 534*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 535*d7024191SGrygorii Strashko 536*d7024191SGrygorii Strashko if (IS_ERR_OR_NULL(flow->udma_rflow)) 537*d7024191SGrygorii Strashko return; 538*d7024191SGrygorii Strashko 539*d7024191SGrygorii Strashko if (flow->ringrxfdq) 540*d7024191SGrygorii Strashko k3_ringacc_ring_free(flow->ringrxfdq); 541*d7024191SGrygorii Strashko 542*d7024191SGrygorii Strashko if (flow->ringrx) 543*d7024191SGrygorii Strashko k3_ringacc_ring_free(flow->ringrx); 544*d7024191SGrygorii Strashko 545*d7024191SGrygorii Strashko xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); 546*d7024191SGrygorii Strashko flow->udma_rflow = NULL; 547*d7024191SGrygorii Strashko rx_chn->flows_ready--; 548*d7024191SGrygorii Strashko } 549*d7024191SGrygorii Strashko 550*d7024191SGrygorii Strashko static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, 551*d7024191SGrygorii Strashko u32 flow_idx, 552*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow_cfg *flow_cfg) 553*d7024191SGrygorii Strashko { 554*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; 555*d7024191SGrygorii Strashko const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 556*d7024191SGrygorii Strashko struct device *dev = rx_chn->common.dev; 557*d7024191SGrygorii Strashko struct ti_sci_msg_rm_udmap_flow_cfg req; 558*d7024191SGrygorii Strashko int rx_ring_id; 559*d7024191SGrygorii Strashko int rx_ringfdq_id; 560*d7024191SGrygorii Strashko int ret = 0; 561*d7024191SGrygorii Strashko 562*d7024191SGrygorii Strashko flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, 563*d7024191SGrygorii Strashko flow->udma_rflow_id); 564*d7024191SGrygorii Strashko if (IS_ERR(flow->udma_rflow)) { 565*d7024191SGrygorii Strashko ret = PTR_ERR(flow->udma_rflow); 566*d7024191SGrygorii Strashko dev_err(dev, "UDMAX rflow get err %d\n", ret); 567*d7024191SGrygorii Strashko goto err; 568*d7024191SGrygorii Strashko } 569*d7024191SGrygorii Strashko 570*d7024191SGrygorii Strashko if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { 571*d7024191SGrygorii Strashko xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); 572*d7024191SGrygorii Strashko return -ENODEV; 573*d7024191SGrygorii Strashko } 574*d7024191SGrygorii Strashko 575*d7024191SGrygorii Strashko /* request and cfg rings */ 576*d7024191SGrygorii Strashko flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc, 577*d7024191SGrygorii Strashko flow_cfg->ring_rxq_id, 0); 578*d7024191SGrygorii Strashko if (!flow->ringrx) { 579*d7024191SGrygorii Strashko ret = -ENODEV; 580*d7024191SGrygorii Strashko dev_err(dev, "Failed to get RX ring\n"); 581*d7024191SGrygorii Strashko goto err; 582*d7024191SGrygorii Strashko } 583*d7024191SGrygorii Strashko 584*d7024191SGrygorii Strashko flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, 585*d7024191SGrygorii Strashko flow_cfg->ring_rxfdq0_id, 0); 586*d7024191SGrygorii Strashko if (!flow->ringrxfdq) { 587*d7024191SGrygorii Strashko ret = -ENODEV; 588*d7024191SGrygorii Strashko dev_err(dev, "Failed to get RXFDQ ring\n"); 589*d7024191SGrygorii Strashko goto err; 590*d7024191SGrygorii Strashko } 591*d7024191SGrygorii Strashko 592*d7024191SGrygorii Strashko ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); 593*d7024191SGrygorii Strashko if (ret) { 594*d7024191SGrygorii Strashko dev_err(dev, "Failed to cfg ringrx %d\n", ret); 595*d7024191SGrygorii Strashko goto err; 596*d7024191SGrygorii Strashko } 597*d7024191SGrygorii Strashko 598*d7024191SGrygorii Strashko ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); 599*d7024191SGrygorii Strashko if (ret) { 600*d7024191SGrygorii Strashko dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); 601*d7024191SGrygorii Strashko goto err; 602*d7024191SGrygorii Strashko } 603*d7024191SGrygorii Strashko 604*d7024191SGrygorii Strashko if (rx_chn->remote) { 605*d7024191SGrygorii Strashko rx_ring_id = TI_SCI_RESOURCE_NULL; 606*d7024191SGrygorii Strashko rx_ringfdq_id = TI_SCI_RESOURCE_NULL; 607*d7024191SGrygorii Strashko } else { 608*d7024191SGrygorii Strashko rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); 609*d7024191SGrygorii Strashko rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); 610*d7024191SGrygorii Strashko } 611*d7024191SGrygorii Strashko 612*d7024191SGrygorii Strashko memset(&req, 0, sizeof(req)); 613*d7024191SGrygorii Strashko 614*d7024191SGrygorii Strashko req.valid_params = 615*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 616*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 617*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 618*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 619*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 620*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 621*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 622*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 623*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 624*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 625*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 626*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 627*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 628*d7024191SGrygorii Strashko req.nav_id = tisci_rm->tisci_dev_id; 629*d7024191SGrygorii Strashko req.flow_index = flow->udma_rflow_id; 630*d7024191SGrygorii Strashko if (rx_chn->common.epib) 631*d7024191SGrygorii Strashko req.rx_einfo_present = 1; 632*d7024191SGrygorii Strashko if (rx_chn->common.psdata_size) 633*d7024191SGrygorii Strashko req.rx_psinfo_present = 1; 634*d7024191SGrygorii Strashko if (flow_cfg->rx_error_handling) 635*d7024191SGrygorii Strashko req.rx_error_handling = 1; 636*d7024191SGrygorii Strashko req.rx_desc_type = 0; 637*d7024191SGrygorii Strashko req.rx_dest_qnum = rx_ring_id; 638*d7024191SGrygorii Strashko req.rx_src_tag_hi_sel = 0; 639*d7024191SGrygorii Strashko req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; 640*d7024191SGrygorii Strashko req.rx_dest_tag_hi_sel = 0; 641*d7024191SGrygorii Strashko req.rx_dest_tag_lo_sel = 0; 642*d7024191SGrygorii Strashko req.rx_fdq0_sz0_qnum = rx_ringfdq_id; 643*d7024191SGrygorii Strashko req.rx_fdq1_qnum = rx_ringfdq_id; 644*d7024191SGrygorii Strashko req.rx_fdq2_qnum = rx_ringfdq_id; 645*d7024191SGrygorii Strashko req.rx_fdq3_qnum = rx_ringfdq_id; 646*d7024191SGrygorii Strashko 647*d7024191SGrygorii Strashko ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); 648*d7024191SGrygorii Strashko if (ret) { 649*d7024191SGrygorii Strashko dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, 650*d7024191SGrygorii Strashko ret); 651*d7024191SGrygorii Strashko goto err; 652*d7024191SGrygorii Strashko } 653*d7024191SGrygorii Strashko 654*d7024191SGrygorii Strashko rx_chn->flows_ready++; 655*d7024191SGrygorii Strashko dev_dbg(dev, "flow%d config done. ready:%d\n", 656*d7024191SGrygorii Strashko flow->udma_rflow_id, rx_chn->flows_ready); 657*d7024191SGrygorii Strashko 658*d7024191SGrygorii Strashko return 0; 659*d7024191SGrygorii Strashko err: 660*d7024191SGrygorii Strashko k3_udma_glue_release_rx_flow(rx_chn, flow_idx); 661*d7024191SGrygorii Strashko return ret; 662*d7024191SGrygorii Strashko } 663*d7024191SGrygorii Strashko 664*d7024191SGrygorii Strashko static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn) 665*d7024191SGrygorii Strashko { 666*d7024191SGrygorii Strashko struct device *dev = chn->common.dev; 667*d7024191SGrygorii Strashko 668*d7024191SGrygorii Strashko dev_dbg(dev, "dump_rx_chn:\n" 669*d7024191SGrygorii Strashko "udma_rchan_id: %d\n" 670*d7024191SGrygorii Strashko "src_thread: %08x\n" 671*d7024191SGrygorii Strashko "dst_thread: %08x\n" 672*d7024191SGrygorii Strashko "epib: %d\n" 673*d7024191SGrygorii Strashko "hdesc_size: %u\n" 674*d7024191SGrygorii Strashko "psdata_size: %u\n" 675*d7024191SGrygorii Strashko "swdata_size: %u\n" 676*d7024191SGrygorii Strashko "flow_id_base: %d\n" 677*d7024191SGrygorii Strashko "flow_num: %d\n", 678*d7024191SGrygorii Strashko chn->udma_rchan_id, 679*d7024191SGrygorii Strashko chn->common.src_thread, 680*d7024191SGrygorii Strashko chn->common.dst_thread, 681*d7024191SGrygorii Strashko chn->common.epib, 682*d7024191SGrygorii Strashko chn->common.hdesc_size, 683*d7024191SGrygorii Strashko chn->common.psdata_size, 684*d7024191SGrygorii Strashko chn->common.swdata_size, 685*d7024191SGrygorii Strashko chn->flow_id_base, 686*d7024191SGrygorii Strashko chn->flow_num); 687*d7024191SGrygorii Strashko } 688*d7024191SGrygorii Strashko 689*d7024191SGrygorii Strashko static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn, 690*d7024191SGrygorii Strashko char *mark) 691*d7024191SGrygorii Strashko { 692*d7024191SGrygorii Strashko struct device *dev = chn->common.dev; 693*d7024191SGrygorii Strashko 694*d7024191SGrygorii Strashko dev_dbg(dev, "=== dump ===> %s\n", mark); 695*d7024191SGrygorii Strashko 696*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG, 697*d7024191SGrygorii Strashko xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG)); 698*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG, 699*d7024191SGrygorii Strashko xudma_rchanrt_read(chn->udma_rchanx, 700*d7024191SGrygorii Strashko UDMA_RCHAN_RT_PEER_RT_EN_REG)); 701*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG, 702*d7024191SGrygorii Strashko xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG)); 703*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG, 704*d7024191SGrygorii Strashko xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG)); 705*d7024191SGrygorii Strashko dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG, 706*d7024191SGrygorii Strashko xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG)); 707*d7024191SGrygorii Strashko } 708*d7024191SGrygorii Strashko 709*d7024191SGrygorii Strashko static int 710*d7024191SGrygorii Strashko k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, 711*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel_cfg *cfg) 712*d7024191SGrygorii Strashko { 713*d7024191SGrygorii Strashko int ret; 714*d7024191SGrygorii Strashko 715*d7024191SGrygorii Strashko /* default rflow */ 716*d7024191SGrygorii Strashko if (cfg->flow_id_use_rxchan_id) 717*d7024191SGrygorii Strashko return 0; 718*d7024191SGrygorii Strashko 719*d7024191SGrygorii Strashko /* not a GP rflows */ 720*d7024191SGrygorii Strashko if (rx_chn->flow_id_base != -1 && 721*d7024191SGrygorii Strashko !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) 722*d7024191SGrygorii Strashko return 0; 723*d7024191SGrygorii Strashko 724*d7024191SGrygorii Strashko /* Allocate range of GP rflows */ 725*d7024191SGrygorii Strashko ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, 726*d7024191SGrygorii Strashko rx_chn->flow_id_base, 727*d7024191SGrygorii Strashko rx_chn->flow_num); 728*d7024191SGrygorii Strashko if (ret < 0) { 729*d7024191SGrygorii Strashko dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", 730*d7024191SGrygorii Strashko rx_chn->flow_id_base, rx_chn->flow_num, ret); 731*d7024191SGrygorii Strashko return ret; 732*d7024191SGrygorii Strashko } 733*d7024191SGrygorii Strashko rx_chn->flow_id_base = ret; 734*d7024191SGrygorii Strashko 735*d7024191SGrygorii Strashko return 0; 736*d7024191SGrygorii Strashko } 737*d7024191SGrygorii Strashko 738*d7024191SGrygorii Strashko static struct k3_udma_glue_rx_channel * 739*d7024191SGrygorii Strashko k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, 740*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel_cfg *cfg) 741*d7024191SGrygorii Strashko { 742*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel *rx_chn; 743*d7024191SGrygorii Strashko int ret, i; 744*d7024191SGrygorii Strashko 745*d7024191SGrygorii Strashko if (cfg->flow_id_num <= 0) 746*d7024191SGrygorii Strashko return ERR_PTR(-EINVAL); 747*d7024191SGrygorii Strashko 748*d7024191SGrygorii Strashko if (cfg->flow_id_num != 1 && 749*d7024191SGrygorii Strashko (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) 750*d7024191SGrygorii Strashko return ERR_PTR(-EINVAL); 751*d7024191SGrygorii Strashko 752*d7024191SGrygorii Strashko rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); 753*d7024191SGrygorii Strashko if (!rx_chn) 754*d7024191SGrygorii Strashko return ERR_PTR(-ENOMEM); 755*d7024191SGrygorii Strashko 756*d7024191SGrygorii Strashko rx_chn->common.dev = dev; 757*d7024191SGrygorii Strashko rx_chn->common.swdata_size = cfg->swdata_size; 758*d7024191SGrygorii Strashko rx_chn->remote = false; 759*d7024191SGrygorii Strashko 760*d7024191SGrygorii Strashko /* parse of udmap channel */ 761*d7024191SGrygorii Strashko ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 762*d7024191SGrygorii Strashko &rx_chn->common, false); 763*d7024191SGrygorii Strashko if (ret) 764*d7024191SGrygorii Strashko goto err; 765*d7024191SGrygorii Strashko 766*d7024191SGrygorii Strashko rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, 767*d7024191SGrygorii Strashko rx_chn->common.psdata_size, 768*d7024191SGrygorii Strashko rx_chn->common.swdata_size); 769*d7024191SGrygorii Strashko 770*d7024191SGrygorii Strashko /* request and cfg UDMAP RX channel */ 771*d7024191SGrygorii Strashko rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); 772*d7024191SGrygorii Strashko if (IS_ERR(rx_chn->udma_rchanx)) { 773*d7024191SGrygorii Strashko ret = PTR_ERR(rx_chn->udma_rchanx); 774*d7024191SGrygorii Strashko dev_err(dev, "UDMAX rchanx get err %d\n", ret); 775*d7024191SGrygorii Strashko goto err; 776*d7024191SGrygorii Strashko } 777*d7024191SGrygorii Strashko rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); 778*d7024191SGrygorii Strashko 779*d7024191SGrygorii Strashko rx_chn->flow_num = cfg->flow_id_num; 780*d7024191SGrygorii Strashko rx_chn->flow_id_base = cfg->flow_id_base; 781*d7024191SGrygorii Strashko 782*d7024191SGrygorii Strashko /* Use RX channel id as flow id: target dev can't generate flow_id */ 783*d7024191SGrygorii Strashko if (cfg->flow_id_use_rxchan_id) 784*d7024191SGrygorii Strashko rx_chn->flow_id_base = rx_chn->udma_rchan_id; 785*d7024191SGrygorii Strashko 786*d7024191SGrygorii Strashko rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, 787*d7024191SGrygorii Strashko sizeof(*rx_chn->flows), GFP_KERNEL); 788*d7024191SGrygorii Strashko if (!rx_chn->flows) { 789*d7024191SGrygorii Strashko ret = -ENOMEM; 790*d7024191SGrygorii Strashko goto err; 791*d7024191SGrygorii Strashko } 792*d7024191SGrygorii Strashko 793*d7024191SGrygorii Strashko ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); 794*d7024191SGrygorii Strashko if (ret) 795*d7024191SGrygorii Strashko goto err; 796*d7024191SGrygorii Strashko 797*d7024191SGrygorii Strashko for (i = 0; i < rx_chn->flow_num; i++) 798*d7024191SGrygorii Strashko rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; 799*d7024191SGrygorii Strashko 800*d7024191SGrygorii Strashko /* request and cfg psi-l */ 801*d7024191SGrygorii Strashko rx_chn->common.dst_thread = 802*d7024191SGrygorii Strashko xudma_dev_get_psil_base(rx_chn->common.udmax) + 803*d7024191SGrygorii Strashko rx_chn->udma_rchan_id; 804*d7024191SGrygorii Strashko 805*d7024191SGrygorii Strashko ret = k3_udma_glue_cfg_rx_chn(rx_chn); 806*d7024191SGrygorii Strashko if (ret) { 807*d7024191SGrygorii Strashko dev_err(dev, "Failed to cfg rchan %d\n", ret); 808*d7024191SGrygorii Strashko goto err; 809*d7024191SGrygorii Strashko } 810*d7024191SGrygorii Strashko 811*d7024191SGrygorii Strashko /* init default RX flow only if flow_num = 1 */ 812*d7024191SGrygorii Strashko if (cfg->def_flow_cfg) { 813*d7024191SGrygorii Strashko ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); 814*d7024191SGrygorii Strashko if (ret) 815*d7024191SGrygorii Strashko goto err; 816*d7024191SGrygorii Strashko } 817*d7024191SGrygorii Strashko 818*d7024191SGrygorii Strashko ret = xudma_navss_psil_pair(rx_chn->common.udmax, 819*d7024191SGrygorii Strashko rx_chn->common.src_thread, 820*d7024191SGrygorii Strashko rx_chn->common.dst_thread); 821*d7024191SGrygorii Strashko if (ret) { 822*d7024191SGrygorii Strashko dev_err(dev, "PSI-L request err %d\n", ret); 823*d7024191SGrygorii Strashko goto err; 824*d7024191SGrygorii Strashko } 825*d7024191SGrygorii Strashko 826*d7024191SGrygorii Strashko rx_chn->psil_paired = true; 827*d7024191SGrygorii Strashko 828*d7024191SGrygorii Strashko /* reset RX RT registers */ 829*d7024191SGrygorii Strashko k3_udma_glue_disable_rx_chn(rx_chn); 830*d7024191SGrygorii Strashko 831*d7024191SGrygorii Strashko k3_udma_glue_dump_rx_chn(rx_chn); 832*d7024191SGrygorii Strashko 833*d7024191SGrygorii Strashko return rx_chn; 834*d7024191SGrygorii Strashko 835*d7024191SGrygorii Strashko err: 836*d7024191SGrygorii Strashko k3_udma_glue_release_rx_chn(rx_chn); 837*d7024191SGrygorii Strashko return ERR_PTR(ret); 838*d7024191SGrygorii Strashko } 839*d7024191SGrygorii Strashko 840*d7024191SGrygorii Strashko static struct k3_udma_glue_rx_channel * 841*d7024191SGrygorii Strashko k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, 842*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel_cfg *cfg) 843*d7024191SGrygorii Strashko { 844*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel *rx_chn; 845*d7024191SGrygorii Strashko int ret, i; 846*d7024191SGrygorii Strashko 847*d7024191SGrygorii Strashko if (cfg->flow_id_num <= 0 || 848*d7024191SGrygorii Strashko cfg->flow_id_use_rxchan_id || 849*d7024191SGrygorii Strashko cfg->def_flow_cfg || 850*d7024191SGrygorii Strashko cfg->flow_id_base < 0) 851*d7024191SGrygorii Strashko return ERR_PTR(-EINVAL); 852*d7024191SGrygorii Strashko 853*d7024191SGrygorii Strashko /* 854*d7024191SGrygorii Strashko * Remote RX channel is under control of Remote CPU core, so 855*d7024191SGrygorii Strashko * Linux can only request and manipulate by dedicated RX flows 856*d7024191SGrygorii Strashko */ 857*d7024191SGrygorii Strashko 858*d7024191SGrygorii Strashko rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); 859*d7024191SGrygorii Strashko if (!rx_chn) 860*d7024191SGrygorii Strashko return ERR_PTR(-ENOMEM); 861*d7024191SGrygorii Strashko 862*d7024191SGrygorii Strashko rx_chn->common.dev = dev; 863*d7024191SGrygorii Strashko rx_chn->common.swdata_size = cfg->swdata_size; 864*d7024191SGrygorii Strashko rx_chn->remote = true; 865*d7024191SGrygorii Strashko rx_chn->udma_rchan_id = -1; 866*d7024191SGrygorii Strashko rx_chn->flow_num = cfg->flow_id_num; 867*d7024191SGrygorii Strashko rx_chn->flow_id_base = cfg->flow_id_base; 868*d7024191SGrygorii Strashko rx_chn->psil_paired = false; 869*d7024191SGrygorii Strashko 870*d7024191SGrygorii Strashko /* parse of udmap channel */ 871*d7024191SGrygorii Strashko ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 872*d7024191SGrygorii Strashko &rx_chn->common, false); 873*d7024191SGrygorii Strashko if (ret) 874*d7024191SGrygorii Strashko goto err; 875*d7024191SGrygorii Strashko 876*d7024191SGrygorii Strashko rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, 877*d7024191SGrygorii Strashko rx_chn->common.psdata_size, 878*d7024191SGrygorii Strashko rx_chn->common.swdata_size); 879*d7024191SGrygorii Strashko 880*d7024191SGrygorii Strashko rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, 881*d7024191SGrygorii Strashko sizeof(*rx_chn->flows), GFP_KERNEL); 882*d7024191SGrygorii Strashko if (!rx_chn->flows) { 883*d7024191SGrygorii Strashko ret = -ENOMEM; 884*d7024191SGrygorii Strashko goto err; 885*d7024191SGrygorii Strashko } 886*d7024191SGrygorii Strashko 887*d7024191SGrygorii Strashko ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); 888*d7024191SGrygorii Strashko if (ret) 889*d7024191SGrygorii Strashko goto err; 890*d7024191SGrygorii Strashko 891*d7024191SGrygorii Strashko for (i = 0; i < rx_chn->flow_num; i++) 892*d7024191SGrygorii Strashko rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; 893*d7024191SGrygorii Strashko 894*d7024191SGrygorii Strashko k3_udma_glue_dump_rx_chn(rx_chn); 895*d7024191SGrygorii Strashko 896*d7024191SGrygorii Strashko return rx_chn; 897*d7024191SGrygorii Strashko 898*d7024191SGrygorii Strashko err: 899*d7024191SGrygorii Strashko k3_udma_glue_release_rx_chn(rx_chn); 900*d7024191SGrygorii Strashko return ERR_PTR(ret); 901*d7024191SGrygorii Strashko } 902*d7024191SGrygorii Strashko 903*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel * 904*d7024191SGrygorii Strashko k3_udma_glue_request_rx_chn(struct device *dev, const char *name, 905*d7024191SGrygorii Strashko struct k3_udma_glue_rx_channel_cfg *cfg) 906*d7024191SGrygorii Strashko { 907*d7024191SGrygorii Strashko if (cfg->remote) 908*d7024191SGrygorii Strashko return k3_udma_glue_request_remote_rx_chn(dev, name, cfg); 909*d7024191SGrygorii Strashko else 910*d7024191SGrygorii Strashko return k3_udma_glue_request_rx_chn_priv(dev, name, cfg); 911*d7024191SGrygorii Strashko } 912*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn); 913*d7024191SGrygorii Strashko 914*d7024191SGrygorii Strashko void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 915*d7024191SGrygorii Strashko { 916*d7024191SGrygorii Strashko int i; 917*d7024191SGrygorii Strashko 918*d7024191SGrygorii Strashko if (IS_ERR_OR_NULL(rx_chn->common.udmax)) 919*d7024191SGrygorii Strashko return; 920*d7024191SGrygorii Strashko 921*d7024191SGrygorii Strashko if (rx_chn->psil_paired) { 922*d7024191SGrygorii Strashko xudma_navss_psil_unpair(rx_chn->common.udmax, 923*d7024191SGrygorii Strashko rx_chn->common.src_thread, 924*d7024191SGrygorii Strashko rx_chn->common.dst_thread); 925*d7024191SGrygorii Strashko rx_chn->psil_paired = false; 926*d7024191SGrygorii Strashko } 927*d7024191SGrygorii Strashko 928*d7024191SGrygorii Strashko for (i = 0; i < rx_chn->flow_num; i++) 929*d7024191SGrygorii Strashko k3_udma_glue_release_rx_flow(rx_chn, i); 930*d7024191SGrygorii Strashko 931*d7024191SGrygorii Strashko if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) 932*d7024191SGrygorii Strashko xudma_free_gp_rflow_range(rx_chn->common.udmax, 933*d7024191SGrygorii Strashko rx_chn->flow_id_base, 934*d7024191SGrygorii Strashko rx_chn->flow_num); 935*d7024191SGrygorii Strashko 936*d7024191SGrygorii Strashko if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) 937*d7024191SGrygorii Strashko xudma_rchan_put(rx_chn->common.udmax, 938*d7024191SGrygorii Strashko rx_chn->udma_rchanx); 939*d7024191SGrygorii Strashko } 940*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); 941*d7024191SGrygorii Strashko 942*d7024191SGrygorii Strashko int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, 943*d7024191SGrygorii Strashko u32 flow_idx, 944*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow_cfg *flow_cfg) 945*d7024191SGrygorii Strashko { 946*d7024191SGrygorii Strashko if (flow_idx >= rx_chn->flow_num) 947*d7024191SGrygorii Strashko return -EINVAL; 948*d7024191SGrygorii Strashko 949*d7024191SGrygorii Strashko return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); 950*d7024191SGrygorii Strashko } 951*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init); 952*d7024191SGrygorii Strashko 953*d7024191SGrygorii Strashko u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, 954*d7024191SGrygorii Strashko u32 flow_idx) 955*d7024191SGrygorii Strashko { 956*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow; 957*d7024191SGrygorii Strashko 958*d7024191SGrygorii Strashko if (flow_idx >= rx_chn->flow_num) 959*d7024191SGrygorii Strashko return -EINVAL; 960*d7024191SGrygorii Strashko 961*d7024191SGrygorii Strashko flow = &rx_chn->flows[flow_idx]; 962*d7024191SGrygorii Strashko 963*d7024191SGrygorii Strashko return k3_ringacc_get_ring_id(flow->ringrxfdq); 964*d7024191SGrygorii Strashko } 965*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id); 966*d7024191SGrygorii Strashko 967*d7024191SGrygorii Strashko u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) 968*d7024191SGrygorii Strashko { 969*d7024191SGrygorii Strashko return rx_chn->flow_id_base; 970*d7024191SGrygorii Strashko } 971*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base); 972*d7024191SGrygorii Strashko 973*d7024191SGrygorii Strashko int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, 974*d7024191SGrygorii Strashko u32 flow_idx) 975*d7024191SGrygorii Strashko { 976*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; 977*d7024191SGrygorii Strashko const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 978*d7024191SGrygorii Strashko struct device *dev = rx_chn->common.dev; 979*d7024191SGrygorii Strashko struct ti_sci_msg_rm_udmap_flow_cfg req; 980*d7024191SGrygorii Strashko int rx_ring_id; 981*d7024191SGrygorii Strashko int rx_ringfdq_id; 982*d7024191SGrygorii Strashko int ret = 0; 983*d7024191SGrygorii Strashko 984*d7024191SGrygorii Strashko if (!rx_chn->remote) 985*d7024191SGrygorii Strashko return -EINVAL; 986*d7024191SGrygorii Strashko 987*d7024191SGrygorii Strashko rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); 988*d7024191SGrygorii Strashko rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); 989*d7024191SGrygorii Strashko 990*d7024191SGrygorii Strashko memset(&req, 0, sizeof(req)); 991*d7024191SGrygorii Strashko 992*d7024191SGrygorii Strashko req.valid_params = 993*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 994*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 995*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 996*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 997*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 998*d7024191SGrygorii Strashko req.nav_id = tisci_rm->tisci_dev_id; 999*d7024191SGrygorii Strashko req.flow_index = flow->udma_rflow_id; 1000*d7024191SGrygorii Strashko req.rx_dest_qnum = rx_ring_id; 1001*d7024191SGrygorii Strashko req.rx_fdq0_sz0_qnum = rx_ringfdq_id; 1002*d7024191SGrygorii Strashko req.rx_fdq1_qnum = rx_ringfdq_id; 1003*d7024191SGrygorii Strashko req.rx_fdq2_qnum = rx_ringfdq_id; 1004*d7024191SGrygorii Strashko req.rx_fdq3_qnum = rx_ringfdq_id; 1005*d7024191SGrygorii Strashko 1006*d7024191SGrygorii Strashko ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); 1007*d7024191SGrygorii Strashko if (ret) { 1008*d7024191SGrygorii Strashko dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, 1009*d7024191SGrygorii Strashko ret); 1010*d7024191SGrygorii Strashko } 1011*d7024191SGrygorii Strashko 1012*d7024191SGrygorii Strashko return ret; 1013*d7024191SGrygorii Strashko } 1014*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable); 1015*d7024191SGrygorii Strashko 1016*d7024191SGrygorii Strashko int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, 1017*d7024191SGrygorii Strashko u32 flow_idx) 1018*d7024191SGrygorii Strashko { 1019*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; 1020*d7024191SGrygorii Strashko const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 1021*d7024191SGrygorii Strashko struct device *dev = rx_chn->common.dev; 1022*d7024191SGrygorii Strashko struct ti_sci_msg_rm_udmap_flow_cfg req; 1023*d7024191SGrygorii Strashko int ret = 0; 1024*d7024191SGrygorii Strashko 1025*d7024191SGrygorii Strashko if (!rx_chn->remote) 1026*d7024191SGrygorii Strashko return -EINVAL; 1027*d7024191SGrygorii Strashko 1028*d7024191SGrygorii Strashko memset(&req, 0, sizeof(req)); 1029*d7024191SGrygorii Strashko req.valid_params = 1030*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 1031*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 1032*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 1033*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 1034*d7024191SGrygorii Strashko TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 1035*d7024191SGrygorii Strashko req.nav_id = tisci_rm->tisci_dev_id; 1036*d7024191SGrygorii Strashko req.flow_index = flow->udma_rflow_id; 1037*d7024191SGrygorii Strashko req.rx_dest_qnum = TI_SCI_RESOURCE_NULL; 1038*d7024191SGrygorii Strashko req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL; 1039*d7024191SGrygorii Strashko req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL; 1040*d7024191SGrygorii Strashko req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL; 1041*d7024191SGrygorii Strashko req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL; 1042*d7024191SGrygorii Strashko 1043*d7024191SGrygorii Strashko ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); 1044*d7024191SGrygorii Strashko if (ret) { 1045*d7024191SGrygorii Strashko dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, 1046*d7024191SGrygorii Strashko ret); 1047*d7024191SGrygorii Strashko } 1048*d7024191SGrygorii Strashko 1049*d7024191SGrygorii Strashko return ret; 1050*d7024191SGrygorii Strashko } 1051*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); 1052*d7024191SGrygorii Strashko 1053*d7024191SGrygorii Strashko int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 1054*d7024191SGrygorii Strashko { 1055*d7024191SGrygorii Strashko u32 rxrt_ctl; 1056*d7024191SGrygorii Strashko 1057*d7024191SGrygorii Strashko if (rx_chn->remote) 1058*d7024191SGrygorii Strashko return -EINVAL; 1059*d7024191SGrygorii Strashko 1060*d7024191SGrygorii Strashko if (rx_chn->flows_ready < rx_chn->flow_num) 1061*d7024191SGrygorii Strashko return -EINVAL; 1062*d7024191SGrygorii Strashko 1063*d7024191SGrygorii Strashko rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx, 1064*d7024191SGrygorii Strashko UDMA_RCHAN_RT_CTL_REG); 1065*d7024191SGrygorii Strashko rxrt_ctl |= UDMA_CHAN_RT_CTL_EN; 1066*d7024191SGrygorii Strashko xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 1067*d7024191SGrygorii Strashko rxrt_ctl); 1068*d7024191SGrygorii Strashko 1069*d7024191SGrygorii Strashko xudma_rchanrt_write(rx_chn->udma_rchanx, 1070*d7024191SGrygorii Strashko UDMA_RCHAN_RT_PEER_RT_EN_REG, 1071*d7024191SGrygorii Strashko UDMA_PEER_RT_EN_ENABLE); 1072*d7024191SGrygorii Strashko 1073*d7024191SGrygorii Strashko k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); 1074*d7024191SGrygorii Strashko return 0; 1075*d7024191SGrygorii Strashko } 1076*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn); 1077*d7024191SGrygorii Strashko 1078*d7024191SGrygorii Strashko void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 1079*d7024191SGrygorii Strashko { 1080*d7024191SGrygorii Strashko k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); 1081*d7024191SGrygorii Strashko 1082*d7024191SGrygorii Strashko xudma_rchanrt_write(rx_chn->udma_rchanx, 1083*d7024191SGrygorii Strashko UDMA_RCHAN_RT_PEER_RT_EN_REG, 1084*d7024191SGrygorii Strashko 0); 1085*d7024191SGrygorii Strashko xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0); 1086*d7024191SGrygorii Strashko 1087*d7024191SGrygorii Strashko k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); 1088*d7024191SGrygorii Strashko } 1089*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn); 1090*d7024191SGrygorii Strashko 1091*d7024191SGrygorii Strashko void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1092*d7024191SGrygorii Strashko bool sync) 1093*d7024191SGrygorii Strashko { 1094*d7024191SGrygorii Strashko int i = 0; 1095*d7024191SGrygorii Strashko u32 val; 1096*d7024191SGrygorii Strashko 1097*d7024191SGrygorii Strashko if (rx_chn->remote) 1098*d7024191SGrygorii Strashko return; 1099*d7024191SGrygorii Strashko 1100*d7024191SGrygorii Strashko k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); 1101*d7024191SGrygorii Strashko 1102*d7024191SGrygorii Strashko xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG, 1103*d7024191SGrygorii Strashko UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); 1104*d7024191SGrygorii Strashko 1105*d7024191SGrygorii Strashko val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG); 1106*d7024191SGrygorii Strashko 1107*d7024191SGrygorii Strashko while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { 1108*d7024191SGrygorii Strashko val = xudma_rchanrt_read(rx_chn->udma_rchanx, 1109*d7024191SGrygorii Strashko UDMA_RCHAN_RT_CTL_REG); 1110*d7024191SGrygorii Strashko udelay(1); 1111*d7024191SGrygorii Strashko if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { 1112*d7024191SGrygorii Strashko dev_err(rx_chn->common.dev, "RX tdown timeout\n"); 1113*d7024191SGrygorii Strashko break; 1114*d7024191SGrygorii Strashko } 1115*d7024191SGrygorii Strashko i++; 1116*d7024191SGrygorii Strashko } 1117*d7024191SGrygorii Strashko 1118*d7024191SGrygorii Strashko val = xudma_rchanrt_read(rx_chn->udma_rchanx, 1119*d7024191SGrygorii Strashko UDMA_RCHAN_RT_PEER_RT_EN_REG); 1120*d7024191SGrygorii Strashko if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) 1121*d7024191SGrygorii Strashko dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); 1122*d7024191SGrygorii Strashko k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); 1123*d7024191SGrygorii Strashko } 1124*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); 1125*d7024191SGrygorii Strashko 1126*d7024191SGrygorii Strashko void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1127*d7024191SGrygorii Strashko u32 flow_num, void *data, 1128*d7024191SGrygorii Strashko void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) 1129*d7024191SGrygorii Strashko { 1130*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 1131*d7024191SGrygorii Strashko struct device *dev = rx_chn->common.dev; 1132*d7024191SGrygorii Strashko dma_addr_t desc_dma; 1133*d7024191SGrygorii Strashko int occ_rx, i, ret; 1134*d7024191SGrygorii Strashko 1135*d7024191SGrygorii Strashko /* reset RXCQ as it is not input for udma - expected to be empty */ 1136*d7024191SGrygorii Strashko occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); 1137*d7024191SGrygorii Strashko dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); 1138*d7024191SGrygorii Strashko if (flow->ringrx) 1139*d7024191SGrygorii Strashko k3_ringacc_ring_reset(flow->ringrx); 1140*d7024191SGrygorii Strashko 1141*d7024191SGrygorii Strashko /* Skip RX FDQ in case one FDQ is used for the set of flows */ 1142*d7024191SGrygorii Strashko if (skip_fdq) 1143*d7024191SGrygorii Strashko return; 1144*d7024191SGrygorii Strashko 1145*d7024191SGrygorii Strashko /* 1146*d7024191SGrygorii Strashko * RX FDQ reset need to be special way as it is input for udma and its 1147*d7024191SGrygorii Strashko * state cached by udma, so: 1148*d7024191SGrygorii Strashko * 1) save RX FDQ occ 1149*d7024191SGrygorii Strashko * 2) clean up RX FDQ and call callback .cleanup() for each desc 1150*d7024191SGrygorii Strashko * 3) reset RX FDQ in a special way 1151*d7024191SGrygorii Strashko */ 1152*d7024191SGrygorii Strashko occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); 1153*d7024191SGrygorii Strashko dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx); 1154*d7024191SGrygorii Strashko 1155*d7024191SGrygorii Strashko for (i = 0; i < occ_rx; i++) { 1156*d7024191SGrygorii Strashko ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); 1157*d7024191SGrygorii Strashko if (ret) { 1158*d7024191SGrygorii Strashko dev_err(dev, "RX reset pop %d\n", ret); 1159*d7024191SGrygorii Strashko break; 1160*d7024191SGrygorii Strashko } 1161*d7024191SGrygorii Strashko cleanup(data, desc_dma); 1162*d7024191SGrygorii Strashko } 1163*d7024191SGrygorii Strashko 1164*d7024191SGrygorii Strashko k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); 1165*d7024191SGrygorii Strashko } 1166*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); 1167*d7024191SGrygorii Strashko 1168*d7024191SGrygorii Strashko int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1169*d7024191SGrygorii Strashko u32 flow_num, struct cppi5_host_desc_t *desc_rx, 1170*d7024191SGrygorii Strashko dma_addr_t desc_dma) 1171*d7024191SGrygorii Strashko { 1172*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 1173*d7024191SGrygorii Strashko 1174*d7024191SGrygorii Strashko return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); 1175*d7024191SGrygorii Strashko } 1176*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn); 1177*d7024191SGrygorii Strashko 1178*d7024191SGrygorii Strashko int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1179*d7024191SGrygorii Strashko u32 flow_num, dma_addr_t *desc_dma) 1180*d7024191SGrygorii Strashko { 1181*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 1182*d7024191SGrygorii Strashko 1183*d7024191SGrygorii Strashko return k3_ringacc_ring_pop(flow->ringrx, desc_dma); 1184*d7024191SGrygorii Strashko } 1185*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn); 1186*d7024191SGrygorii Strashko 1187*d7024191SGrygorii Strashko int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, 1188*d7024191SGrygorii Strashko u32 flow_num) 1189*d7024191SGrygorii Strashko { 1190*d7024191SGrygorii Strashko struct k3_udma_glue_rx_flow *flow; 1191*d7024191SGrygorii Strashko 1192*d7024191SGrygorii Strashko flow = &rx_chn->flows[flow_num]; 1193*d7024191SGrygorii Strashko 1194*d7024191SGrygorii Strashko flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); 1195*d7024191SGrygorii Strashko 1196*d7024191SGrygorii Strashko return flow->virq; 1197*d7024191SGrygorii Strashko } 1198*d7024191SGrygorii Strashko EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); 1199