1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * K3 NAVSS DMA glue interface 4 * 5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 6 * 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/io.h> 13 #include <linux/init.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/soc/ti/k3-ringacc.h> 17 #include <linux/dma/ti-cppi5.h> 18 #include <linux/dma/k3-udma-glue.h> 19 20 #include "k3-udma.h" 21 #include "k3-psil-priv.h" 22 23 struct k3_udma_glue_common { 24 struct device *dev; 25 struct device chan_dev; 26 struct udma_dev *udmax; 27 const struct udma_tisci_rm *tisci_rm; 28 struct k3_ringacc *ringacc; 29 u32 src_thread; 30 u32 dst_thread; 31 32 u32 hdesc_size; 33 bool epib; 34 u32 psdata_size; 35 u32 swdata_size; 36 u32 atype_asel; 37 struct psil_endpoint_config *ep_config; 38 }; 39 40 struct k3_udma_glue_tx_channel { 41 struct k3_udma_glue_common common; 42 43 struct udma_tchan *udma_tchanx; 44 int udma_tchan_id; 45 46 struct k3_ring *ringtx; 47 struct k3_ring *ringtxcq; 48 49 bool psil_paired; 50 51 int virq; 52 53 atomic_t free_pkts; 54 bool tx_pause_on_err; 55 bool tx_filt_einfo; 56 bool tx_filt_pswords; 57 bool tx_supr_tdpkt; 58 59 int udma_tflow_id; 60 }; 61 62 struct k3_udma_glue_rx_flow { 63 struct udma_rflow *udma_rflow; 64 int udma_rflow_id; 65 struct k3_ring *ringrx; 66 struct k3_ring *ringrxfdq; 67 68 int virq; 69 }; 70 71 struct k3_udma_glue_rx_channel { 72 struct k3_udma_glue_common common; 73 74 struct udma_rchan *udma_rchanx; 75 int udma_rchan_id; 76 bool remote; 77 78 bool psil_paired; 79 80 u32 swdata_size; 81 int flow_id_base; 82 83 struct k3_udma_glue_rx_flow *flows; 84 u32 flow_num; 85 u32 flows_ready; 86 }; 87 88 static void k3_udma_chan_dev_release(struct device *dev) 89 { 90 /* The struct containing the device is devm managed */ 91 } 92 93 static struct class k3_udma_glue_devclass = { 94 .name = "k3_udma_glue_chan", 95 .dev_release = k3_udma_chan_dev_release, 96 }; 97 98 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000 99 100 static int of_k3_udma_glue_parse(struct device_node *udmax_np, 101 struct k3_udma_glue_common *common) 102 { 103 common->udmax = of_xudma_dev_get(udmax_np, NULL); 104 if (IS_ERR(common->udmax)) 105 return PTR_ERR(common->udmax); 106 107 common->ringacc = xudma_get_ringacc(common->udmax); 108 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); 109 110 return 0; 111 } 112 113 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, 114 const char *name, struct k3_udma_glue_common *common, 115 bool tx_chn) 116 { 117 struct of_phandle_args dma_spec; 118 u32 thread_id; 119 int ret = 0; 120 int index; 121 122 if (unlikely(!name)) 123 return -EINVAL; 124 125 index = of_property_match_string(chn_np, "dma-names", name); 126 if (index < 0) 127 return index; 128 129 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, 130 &dma_spec)) 131 return -ENOENT; 132 133 ret = of_k3_udma_glue_parse(dma_spec.np, common); 134 if (ret) 135 goto out_put_spec; 136 137 thread_id = dma_spec.args[0]; 138 if (dma_spec.args_count == 2) { 139 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) { 140 dev_err(common->dev, "Invalid channel atype: %u\n", 141 dma_spec.args[1]); 142 ret = -EINVAL; 143 goto out_put_spec; 144 } 145 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) { 146 dev_err(common->dev, "Invalid channel asel: %u\n", 147 dma_spec.args[1]); 148 ret = -EINVAL; 149 goto out_put_spec; 150 } 151 152 common->atype_asel = dma_spec.args[1]; 153 } 154 155 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { 156 ret = -EINVAL; 157 goto out_put_spec; 158 } 159 160 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { 161 ret = -EINVAL; 162 goto out_put_spec; 163 } 164 165 /* get psil endpoint config */ 166 common->ep_config = psil_get_ep_config(thread_id); 167 if (IS_ERR(common->ep_config)) { 168 dev_err(common->dev, 169 "No configuration for psi-l thread 0x%04x\n", 170 thread_id); 171 ret = PTR_ERR(common->ep_config); 172 goto out_put_spec; 173 } 174 175 common->epib = common->ep_config->needs_epib; 176 common->psdata_size = common->ep_config->psd_size; 177 178 if (tx_chn) 179 common->dst_thread = thread_id; 180 else 181 common->src_thread = thread_id; 182 183 out_put_spec: 184 of_node_put(dma_spec.np); 185 return ret; 186 }; 187 188 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 189 { 190 struct device *dev = tx_chn->common.dev; 191 192 dev_dbg(dev, "dump_tx_chn:\n" 193 "udma_tchan_id: %d\n" 194 "src_thread: %08x\n" 195 "dst_thread: %08x\n", 196 tx_chn->udma_tchan_id, 197 tx_chn->common.src_thread, 198 tx_chn->common.dst_thread); 199 } 200 201 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn, 202 char *mark) 203 { 204 struct device *dev = chn->common.dev; 205 206 dev_dbg(dev, "=== dump ===> %s\n", mark); 207 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG, 208 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG)); 209 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG, 210 xudma_tchanrt_read(chn->udma_tchanx, 211 UDMA_CHAN_RT_PEER_RT_EN_REG)); 212 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG, 213 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG)); 214 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG, 215 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG)); 216 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG, 217 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG)); 218 } 219 220 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 221 { 222 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; 223 struct ti_sci_msg_rm_udmap_tx_ch_cfg req; 224 225 memset(&req, 0, sizeof(req)); 226 227 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | 228 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | 229 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | 230 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | 231 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | 232 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | 233 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | 234 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID; 235 req.nav_id = tisci_rm->tisci_dev_id; 236 req.index = tx_chn->udma_tchan_id; 237 if (tx_chn->tx_pause_on_err) 238 req.tx_pause_on_err = 1; 239 if (tx_chn->tx_filt_einfo) 240 req.tx_filt_einfo = 1; 241 if (tx_chn->tx_filt_pswords) 242 req.tx_filt_pswords = 1; 243 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 244 if (tx_chn->tx_supr_tdpkt) 245 req.tx_supr_tdpkt = 1; 246 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; 247 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); 248 req.tx_atype = tx_chn->common.atype_asel; 249 250 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); 251 } 252 253 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, 254 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) 255 { 256 struct k3_udma_glue_tx_channel *tx_chn; 257 int ret; 258 259 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); 260 if (!tx_chn) 261 return ERR_PTR(-ENOMEM); 262 263 tx_chn->common.dev = dev; 264 tx_chn->common.swdata_size = cfg->swdata_size; 265 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; 266 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; 267 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; 268 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; 269 270 /* parse of udmap channel */ 271 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 272 &tx_chn->common, true); 273 if (ret) 274 goto err; 275 276 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, 277 tx_chn->common.psdata_size, 278 tx_chn->common.swdata_size); 279 280 if (xudma_is_pktdma(tx_chn->common.udmax)) 281 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id; 282 else 283 tx_chn->udma_tchan_id = -1; 284 285 /* request and cfg UDMAP TX channel */ 286 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, 287 tx_chn->udma_tchan_id); 288 if (IS_ERR(tx_chn->udma_tchanx)) { 289 ret = PTR_ERR(tx_chn->udma_tchanx); 290 dev_err(dev, "UDMAX tchanx get err %d\n", ret); 291 goto err; 292 } 293 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); 294 295 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass; 296 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax); 297 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x", 298 tx_chn->udma_tchan_id, tx_chn->common.dst_thread); 299 ret = device_register(&tx_chn->common.chan_dev); 300 if (ret) { 301 dev_err(dev, "Channel Device registration failed %d\n", ret); 302 put_device(&tx_chn->common.chan_dev); 303 tx_chn->common.chan_dev.parent = NULL; 304 goto err; 305 } 306 307 if (xudma_is_pktdma(tx_chn->common.udmax)) { 308 /* prepare the channel device as coherent */ 309 tx_chn->common.chan_dev.dma_coherent = true; 310 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev, 311 DMA_BIT_MASK(48)); 312 } 313 314 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); 315 316 if (xudma_is_pktdma(tx_chn->common.udmax)) 317 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id; 318 else 319 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id; 320 321 /* request and cfg rings */ 322 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc, 323 tx_chn->udma_tflow_id, -1, 324 &tx_chn->ringtx, 325 &tx_chn->ringtxcq); 326 if (ret) { 327 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret); 328 goto err; 329 } 330 331 /* Set the dma_dev for the rings to be configured */ 332 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn); 333 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev; 334 335 /* Set the ASEL value for DMA rings of PKTDMA */ 336 if (xudma_is_pktdma(tx_chn->common.udmax)) { 337 cfg->tx_cfg.asel = tx_chn->common.atype_asel; 338 cfg->txcq_cfg.asel = tx_chn->common.atype_asel; 339 } 340 341 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); 342 if (ret) { 343 dev_err(dev, "Failed to cfg ringtx %d\n", ret); 344 goto err; 345 } 346 347 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); 348 if (ret) { 349 dev_err(dev, "Failed to cfg ringtx %d\n", ret); 350 goto err; 351 } 352 353 /* request and cfg psi-l */ 354 tx_chn->common.src_thread = 355 xudma_dev_get_psil_base(tx_chn->common.udmax) + 356 tx_chn->udma_tchan_id; 357 358 ret = k3_udma_glue_cfg_tx_chn(tx_chn); 359 if (ret) { 360 dev_err(dev, "Failed to cfg tchan %d\n", ret); 361 goto err; 362 } 363 364 k3_udma_glue_dump_tx_chn(tx_chn); 365 366 return tx_chn; 367 368 err: 369 k3_udma_glue_release_tx_chn(tx_chn); 370 return ERR_PTR(ret); 371 } 372 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); 373 374 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 375 { 376 if (tx_chn->psil_paired) { 377 xudma_navss_psil_unpair(tx_chn->common.udmax, 378 tx_chn->common.src_thread, 379 tx_chn->common.dst_thread); 380 tx_chn->psil_paired = false; 381 } 382 383 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) 384 xudma_tchan_put(tx_chn->common.udmax, 385 tx_chn->udma_tchanx); 386 387 if (tx_chn->ringtxcq) 388 k3_ringacc_ring_free(tx_chn->ringtxcq); 389 390 if (tx_chn->ringtx) 391 k3_ringacc_ring_free(tx_chn->ringtx); 392 393 if (tx_chn->common.chan_dev.parent) { 394 device_unregister(&tx_chn->common.chan_dev); 395 tx_chn->common.chan_dev.parent = NULL; 396 } 397 } 398 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); 399 400 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 401 struct cppi5_host_desc_t *desc_tx, 402 dma_addr_t desc_dma) 403 { 404 u32 ringtxcq_id; 405 406 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) 407 return -ENOMEM; 408 409 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); 410 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); 411 412 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); 413 } 414 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn); 415 416 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 417 dma_addr_t *desc_dma) 418 { 419 int ret; 420 421 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); 422 if (!ret) 423 atomic_inc(&tx_chn->free_pkts); 424 425 return ret; 426 } 427 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); 428 429 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 430 { 431 int ret; 432 433 ret = xudma_navss_psil_pair(tx_chn->common.udmax, 434 tx_chn->common.src_thread, 435 tx_chn->common.dst_thread); 436 if (ret) { 437 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret); 438 return ret; 439 } 440 441 tx_chn->psil_paired = true; 442 443 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, 444 UDMA_PEER_RT_EN_ENABLE); 445 446 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 447 UDMA_CHAN_RT_CTL_EN); 448 449 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); 450 return 0; 451 } 452 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn); 453 454 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 455 { 456 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1"); 457 458 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0); 459 460 xudma_tchanrt_write(tx_chn->udma_tchanx, 461 UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 462 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2"); 463 464 if (tx_chn->psil_paired) { 465 xudma_navss_psil_unpair(tx_chn->common.udmax, 466 tx_chn->common.src_thread, 467 tx_chn->common.dst_thread); 468 tx_chn->psil_paired = false; 469 } 470 } 471 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn); 472 473 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 474 bool sync) 475 { 476 int i = 0; 477 u32 val; 478 479 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1"); 480 481 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 482 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); 483 484 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG); 485 486 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { 487 val = xudma_tchanrt_read(tx_chn->udma_tchanx, 488 UDMA_CHAN_RT_CTL_REG); 489 udelay(1); 490 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { 491 dev_err(tx_chn->common.dev, "TX tdown timeout\n"); 492 break; 493 } 494 i++; 495 } 496 497 val = xudma_tchanrt_read(tx_chn->udma_tchanx, 498 UDMA_CHAN_RT_PEER_RT_EN_REG); 499 if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) 500 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); 501 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2"); 502 } 503 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn); 504 505 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 506 void *data, 507 void (*cleanup)(void *data, dma_addr_t desc_dma)) 508 { 509 struct device *dev = tx_chn->common.dev; 510 dma_addr_t desc_dma; 511 int occ_tx, i, ret; 512 513 /* 514 * TXQ reset need to be special way as it is input for udma and its 515 * state cached by udma, so: 516 * 1) save TXQ occ 517 * 2) clean up TXQ and call callback .cleanup() for each desc 518 * 3) reset TXQ in a special way 519 */ 520 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); 521 dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx); 522 523 for (i = 0; i < occ_tx; i++) { 524 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); 525 if (ret) { 526 if (ret != -ENODATA) 527 dev_err(dev, "TX reset pop %d\n", ret); 528 break; 529 } 530 cleanup(data, desc_dma); 531 } 532 533 /* reset TXCQ as it is not input for udma - expected to be empty */ 534 k3_ringacc_ring_reset(tx_chn->ringtxcq); 535 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); 536 } 537 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); 538 539 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn) 540 { 541 return tx_chn->common.hdesc_size; 542 } 543 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size); 544 545 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn) 546 { 547 return k3_ringacc_get_ring_id(tx_chn->ringtxcq); 548 } 549 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); 550 551 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) 552 { 553 if (xudma_is_pktdma(tx_chn->common.udmax)) { 554 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax, 555 tx_chn->udma_tflow_id); 556 } else { 557 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); 558 } 559 560 return tx_chn->virq; 561 } 562 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); 563 564 struct device * 565 k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn) 566 { 567 if (xudma_is_pktdma(tx_chn->common.udmax) && 568 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15)) 569 return &tx_chn->common.chan_dev; 570 571 return xudma_get_device(tx_chn->common.udmax); 572 } 573 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device); 574 575 void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn, 576 dma_addr_t *addr) 577 { 578 if (!xudma_is_pktdma(tx_chn->common.udmax) || 579 !tx_chn->common.atype_asel) 580 return; 581 582 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; 583 } 584 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr); 585 586 void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn, 587 dma_addr_t *addr) 588 { 589 if (!xudma_is_pktdma(tx_chn->common.udmax) || 590 !tx_chn->common.atype_asel) 591 return; 592 593 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); 594 } 595 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr); 596 597 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 598 { 599 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 600 struct ti_sci_msg_rm_udmap_rx_ch_cfg req; 601 int ret; 602 603 memset(&req, 0, sizeof(req)); 604 605 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | 606 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | 607 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | 608 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID; 609 610 req.nav_id = tisci_rm->tisci_dev_id; 611 req.index = rx_chn->udma_rchan_id; 612 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; 613 /* 614 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw 615 * and udmax impl, so just configure it to invalid value. 616 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); 617 */ 618 req.rxcq_qnum = 0xFFFF; 619 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num && 620 rx_chn->flow_id_base != rx_chn->udma_rchan_id) { 621 /* Default flow + extra ones */ 622 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | 623 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; 624 req.flowid_start = rx_chn->flow_id_base; 625 req.flowid_cnt = rx_chn->flow_num; 626 } 627 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 628 req.rx_atype = rx_chn->common.atype_asel; 629 630 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); 631 if (ret) 632 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", 633 rx_chn->udma_rchan_id, ret); 634 635 return ret; 636 } 637 638 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, 639 u32 flow_num) 640 { 641 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 642 643 if (IS_ERR_OR_NULL(flow->udma_rflow)) 644 return; 645 646 if (flow->ringrxfdq) 647 k3_ringacc_ring_free(flow->ringrxfdq); 648 649 if (flow->ringrx) 650 k3_ringacc_ring_free(flow->ringrx); 651 652 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); 653 flow->udma_rflow = NULL; 654 rx_chn->flows_ready--; 655 } 656 657 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, 658 u32 flow_idx, 659 struct k3_udma_glue_rx_flow_cfg *flow_cfg) 660 { 661 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; 662 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 663 struct device *dev = rx_chn->common.dev; 664 struct ti_sci_msg_rm_udmap_flow_cfg req; 665 int rx_ring_id; 666 int rx_ringfdq_id; 667 int ret = 0; 668 669 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, 670 flow->udma_rflow_id); 671 if (IS_ERR(flow->udma_rflow)) { 672 ret = PTR_ERR(flow->udma_rflow); 673 dev_err(dev, "UDMAX rflow get err %d\n", ret); 674 return ret; 675 } 676 677 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { 678 ret = -ENODEV; 679 goto err_rflow_put; 680 } 681 682 if (xudma_is_pktdma(rx_chn->common.udmax)) { 683 rx_ringfdq_id = flow->udma_rflow_id + 684 xudma_get_rflow_ring_offset(rx_chn->common.udmax); 685 rx_ring_id = 0; 686 } else { 687 rx_ring_id = flow_cfg->ring_rxq_id; 688 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id; 689 } 690 691 /* request and cfg rings */ 692 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, 693 rx_ringfdq_id, rx_ring_id, 694 &flow->ringrxfdq, 695 &flow->ringrx); 696 if (ret) { 697 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret); 698 goto err_rflow_put; 699 } 700 701 /* Set the dma_dev for the rings to be configured */ 702 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn); 703 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev; 704 705 /* Set the ASEL value for DMA rings of PKTDMA */ 706 if (xudma_is_pktdma(rx_chn->common.udmax)) { 707 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel; 708 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel; 709 } 710 711 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); 712 if (ret) { 713 dev_err(dev, "Failed to cfg ringrx %d\n", ret); 714 goto err_ringrxfdq_free; 715 } 716 717 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); 718 if (ret) { 719 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); 720 goto err_ringrxfdq_free; 721 } 722 723 if (rx_chn->remote) { 724 rx_ring_id = TI_SCI_RESOURCE_NULL; 725 rx_ringfdq_id = TI_SCI_RESOURCE_NULL; 726 } else { 727 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); 728 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); 729 } 730 731 memset(&req, 0, sizeof(req)); 732 733 req.valid_params = 734 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 735 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 736 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 737 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 738 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 739 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 740 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 741 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 742 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 743 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 744 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 745 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 746 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 747 req.nav_id = tisci_rm->tisci_dev_id; 748 req.flow_index = flow->udma_rflow_id; 749 if (rx_chn->common.epib) 750 req.rx_einfo_present = 1; 751 if (rx_chn->common.psdata_size) 752 req.rx_psinfo_present = 1; 753 if (flow_cfg->rx_error_handling) 754 req.rx_error_handling = 1; 755 req.rx_desc_type = 0; 756 req.rx_dest_qnum = rx_ring_id; 757 req.rx_src_tag_hi_sel = 0; 758 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; 759 req.rx_dest_tag_hi_sel = 0; 760 req.rx_dest_tag_lo_sel = 0; 761 req.rx_fdq0_sz0_qnum = rx_ringfdq_id; 762 req.rx_fdq1_qnum = rx_ringfdq_id; 763 req.rx_fdq2_qnum = rx_ringfdq_id; 764 req.rx_fdq3_qnum = rx_ringfdq_id; 765 766 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); 767 if (ret) { 768 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, 769 ret); 770 goto err_ringrxfdq_free; 771 } 772 773 rx_chn->flows_ready++; 774 dev_dbg(dev, "flow%d config done. ready:%d\n", 775 flow->udma_rflow_id, rx_chn->flows_ready); 776 777 return 0; 778 779 err_ringrxfdq_free: 780 k3_ringacc_ring_free(flow->ringrxfdq); 781 k3_ringacc_ring_free(flow->ringrx); 782 783 err_rflow_put: 784 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); 785 flow->udma_rflow = NULL; 786 787 return ret; 788 } 789 790 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn) 791 { 792 struct device *dev = chn->common.dev; 793 794 dev_dbg(dev, "dump_rx_chn:\n" 795 "udma_rchan_id: %d\n" 796 "src_thread: %08x\n" 797 "dst_thread: %08x\n" 798 "epib: %d\n" 799 "hdesc_size: %u\n" 800 "psdata_size: %u\n" 801 "swdata_size: %u\n" 802 "flow_id_base: %d\n" 803 "flow_num: %d\n", 804 chn->udma_rchan_id, 805 chn->common.src_thread, 806 chn->common.dst_thread, 807 chn->common.epib, 808 chn->common.hdesc_size, 809 chn->common.psdata_size, 810 chn->common.swdata_size, 811 chn->flow_id_base, 812 chn->flow_num); 813 } 814 815 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn, 816 char *mark) 817 { 818 struct device *dev = chn->common.dev; 819 820 dev_dbg(dev, "=== dump ===> %s\n", mark); 821 822 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG, 823 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG)); 824 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG, 825 xudma_rchanrt_read(chn->udma_rchanx, 826 UDMA_CHAN_RT_PEER_RT_EN_REG)); 827 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG, 828 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG)); 829 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG, 830 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG)); 831 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG, 832 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG)); 833 } 834 835 static int 836 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, 837 struct k3_udma_glue_rx_channel_cfg *cfg) 838 { 839 int ret; 840 841 /* default rflow */ 842 if (cfg->flow_id_use_rxchan_id) 843 return 0; 844 845 /* not a GP rflows */ 846 if (rx_chn->flow_id_base != -1 && 847 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) 848 return 0; 849 850 /* Allocate range of GP rflows */ 851 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, 852 rx_chn->flow_id_base, 853 rx_chn->flow_num); 854 if (ret < 0) { 855 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", 856 rx_chn->flow_id_base, rx_chn->flow_num, ret); 857 return ret; 858 } 859 rx_chn->flow_id_base = ret; 860 861 return 0; 862 } 863 864 static struct k3_udma_glue_rx_channel * 865 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, 866 struct k3_udma_glue_rx_channel_cfg *cfg) 867 { 868 struct k3_udma_glue_rx_channel *rx_chn; 869 struct psil_endpoint_config *ep_cfg; 870 int ret, i; 871 872 if (cfg->flow_id_num <= 0) 873 return ERR_PTR(-EINVAL); 874 875 if (cfg->flow_id_num != 1 && 876 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) 877 return ERR_PTR(-EINVAL); 878 879 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); 880 if (!rx_chn) 881 return ERR_PTR(-ENOMEM); 882 883 rx_chn->common.dev = dev; 884 rx_chn->common.swdata_size = cfg->swdata_size; 885 rx_chn->remote = false; 886 887 /* parse of udmap channel */ 888 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 889 &rx_chn->common, false); 890 if (ret) 891 goto err; 892 893 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, 894 rx_chn->common.psdata_size, 895 rx_chn->common.swdata_size); 896 897 ep_cfg = rx_chn->common.ep_config; 898 899 if (xudma_is_pktdma(rx_chn->common.udmax)) 900 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; 901 else 902 rx_chn->udma_rchan_id = -1; 903 904 /* request and cfg UDMAP RX channel */ 905 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, 906 rx_chn->udma_rchan_id); 907 if (IS_ERR(rx_chn->udma_rchanx)) { 908 ret = PTR_ERR(rx_chn->udma_rchanx); 909 dev_err(dev, "UDMAX rchanx get err %d\n", ret); 910 goto err; 911 } 912 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); 913 914 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; 915 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); 916 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x", 917 rx_chn->udma_rchan_id, rx_chn->common.src_thread); 918 ret = device_register(&rx_chn->common.chan_dev); 919 if (ret) { 920 dev_err(dev, "Channel Device registration failed %d\n", ret); 921 put_device(&rx_chn->common.chan_dev); 922 rx_chn->common.chan_dev.parent = NULL; 923 goto err; 924 } 925 926 if (xudma_is_pktdma(rx_chn->common.udmax)) { 927 /* prepare the channel device as coherent */ 928 rx_chn->common.chan_dev.dma_coherent = true; 929 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, 930 DMA_BIT_MASK(48)); 931 } 932 933 if (xudma_is_pktdma(rx_chn->common.udmax)) { 934 int flow_start = cfg->flow_id_base; 935 int flow_end; 936 937 if (flow_start == -1) 938 flow_start = ep_cfg->flow_start; 939 940 flow_end = flow_start + cfg->flow_id_num - 1; 941 if (flow_start < ep_cfg->flow_start || 942 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) { 943 dev_err(dev, "Invalid flow range requested\n"); 944 ret = -EINVAL; 945 goto err; 946 } 947 rx_chn->flow_id_base = flow_start; 948 } else { 949 rx_chn->flow_id_base = cfg->flow_id_base; 950 951 /* Use RX channel id as flow id: target dev can't generate flow_id */ 952 if (cfg->flow_id_use_rxchan_id) 953 rx_chn->flow_id_base = rx_chn->udma_rchan_id; 954 } 955 956 rx_chn->flow_num = cfg->flow_id_num; 957 958 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, 959 sizeof(*rx_chn->flows), GFP_KERNEL); 960 if (!rx_chn->flows) { 961 ret = -ENOMEM; 962 goto err; 963 } 964 965 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); 966 if (ret) 967 goto err; 968 969 for (i = 0; i < rx_chn->flow_num; i++) 970 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; 971 972 /* request and cfg psi-l */ 973 rx_chn->common.dst_thread = 974 xudma_dev_get_psil_base(rx_chn->common.udmax) + 975 rx_chn->udma_rchan_id; 976 977 ret = k3_udma_glue_cfg_rx_chn(rx_chn); 978 if (ret) { 979 dev_err(dev, "Failed to cfg rchan %d\n", ret); 980 goto err; 981 } 982 983 /* init default RX flow only if flow_num = 1 */ 984 if (cfg->def_flow_cfg) { 985 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); 986 if (ret) 987 goto err; 988 } 989 990 k3_udma_glue_dump_rx_chn(rx_chn); 991 992 return rx_chn; 993 994 err: 995 k3_udma_glue_release_rx_chn(rx_chn); 996 return ERR_PTR(ret); 997 } 998 999 static struct k3_udma_glue_rx_channel * 1000 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, 1001 struct k3_udma_glue_rx_channel_cfg *cfg) 1002 { 1003 struct k3_udma_glue_rx_channel *rx_chn; 1004 int ret, i; 1005 1006 if (cfg->flow_id_num <= 0 || 1007 cfg->flow_id_use_rxchan_id || 1008 cfg->def_flow_cfg || 1009 cfg->flow_id_base < 0) 1010 return ERR_PTR(-EINVAL); 1011 1012 /* 1013 * Remote RX channel is under control of Remote CPU core, so 1014 * Linux can only request and manipulate by dedicated RX flows 1015 */ 1016 1017 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); 1018 if (!rx_chn) 1019 return ERR_PTR(-ENOMEM); 1020 1021 rx_chn->common.dev = dev; 1022 rx_chn->common.swdata_size = cfg->swdata_size; 1023 rx_chn->remote = true; 1024 rx_chn->udma_rchan_id = -1; 1025 rx_chn->flow_num = cfg->flow_id_num; 1026 rx_chn->flow_id_base = cfg->flow_id_base; 1027 rx_chn->psil_paired = false; 1028 1029 /* parse of udmap channel */ 1030 ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 1031 &rx_chn->common, false); 1032 if (ret) 1033 goto err; 1034 1035 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, 1036 rx_chn->common.psdata_size, 1037 rx_chn->common.swdata_size); 1038 1039 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, 1040 sizeof(*rx_chn->flows), GFP_KERNEL); 1041 if (!rx_chn->flows) { 1042 ret = -ENOMEM; 1043 goto err; 1044 } 1045 1046 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; 1047 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); 1048 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x", 1049 rx_chn->common.src_thread); 1050 ret = device_register(&rx_chn->common.chan_dev); 1051 if (ret) { 1052 dev_err(dev, "Channel Device registration failed %d\n", ret); 1053 put_device(&rx_chn->common.chan_dev); 1054 rx_chn->common.chan_dev.parent = NULL; 1055 goto err; 1056 } 1057 1058 if (xudma_is_pktdma(rx_chn->common.udmax)) { 1059 /* prepare the channel device as coherent */ 1060 rx_chn->common.chan_dev.dma_coherent = true; 1061 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, 1062 DMA_BIT_MASK(48)); 1063 } 1064 1065 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); 1066 if (ret) 1067 goto err; 1068 1069 for (i = 0; i < rx_chn->flow_num; i++) 1070 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; 1071 1072 k3_udma_glue_dump_rx_chn(rx_chn); 1073 1074 return rx_chn; 1075 1076 err: 1077 k3_udma_glue_release_rx_chn(rx_chn); 1078 return ERR_PTR(ret); 1079 } 1080 1081 struct k3_udma_glue_rx_channel * 1082 k3_udma_glue_request_rx_chn(struct device *dev, const char *name, 1083 struct k3_udma_glue_rx_channel_cfg *cfg) 1084 { 1085 if (cfg->remote) 1086 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg); 1087 else 1088 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg); 1089 } 1090 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn); 1091 1092 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 1093 { 1094 int i; 1095 1096 if (IS_ERR_OR_NULL(rx_chn->common.udmax)) 1097 return; 1098 1099 if (rx_chn->psil_paired) { 1100 xudma_navss_psil_unpair(rx_chn->common.udmax, 1101 rx_chn->common.src_thread, 1102 rx_chn->common.dst_thread); 1103 rx_chn->psil_paired = false; 1104 } 1105 1106 for (i = 0; i < rx_chn->flow_num; i++) 1107 k3_udma_glue_release_rx_flow(rx_chn, i); 1108 1109 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) 1110 xudma_free_gp_rflow_range(rx_chn->common.udmax, 1111 rx_chn->flow_id_base, 1112 rx_chn->flow_num); 1113 1114 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) 1115 xudma_rchan_put(rx_chn->common.udmax, 1116 rx_chn->udma_rchanx); 1117 1118 if (rx_chn->common.chan_dev.parent) { 1119 device_unregister(&rx_chn->common.chan_dev); 1120 rx_chn->common.chan_dev.parent = NULL; 1121 } 1122 } 1123 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); 1124 1125 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, 1126 u32 flow_idx, 1127 struct k3_udma_glue_rx_flow_cfg *flow_cfg) 1128 { 1129 if (flow_idx >= rx_chn->flow_num) 1130 return -EINVAL; 1131 1132 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); 1133 } 1134 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init); 1135 1136 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, 1137 u32 flow_idx) 1138 { 1139 struct k3_udma_glue_rx_flow *flow; 1140 1141 if (flow_idx >= rx_chn->flow_num) 1142 return -EINVAL; 1143 1144 flow = &rx_chn->flows[flow_idx]; 1145 1146 return k3_ringacc_get_ring_id(flow->ringrxfdq); 1147 } 1148 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id); 1149 1150 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) 1151 { 1152 return rx_chn->flow_id_base; 1153 } 1154 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base); 1155 1156 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, 1157 u32 flow_idx) 1158 { 1159 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; 1160 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 1161 struct device *dev = rx_chn->common.dev; 1162 struct ti_sci_msg_rm_udmap_flow_cfg req; 1163 int rx_ring_id; 1164 int rx_ringfdq_id; 1165 int ret = 0; 1166 1167 if (!rx_chn->remote) 1168 return -EINVAL; 1169 1170 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); 1171 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); 1172 1173 memset(&req, 0, sizeof(req)); 1174 1175 req.valid_params = 1176 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 1177 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 1178 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 1179 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 1180 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 1181 req.nav_id = tisci_rm->tisci_dev_id; 1182 req.flow_index = flow->udma_rflow_id; 1183 req.rx_dest_qnum = rx_ring_id; 1184 req.rx_fdq0_sz0_qnum = rx_ringfdq_id; 1185 req.rx_fdq1_qnum = rx_ringfdq_id; 1186 req.rx_fdq2_qnum = rx_ringfdq_id; 1187 req.rx_fdq3_qnum = rx_ringfdq_id; 1188 1189 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); 1190 if (ret) { 1191 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, 1192 ret); 1193 } 1194 1195 return ret; 1196 } 1197 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable); 1198 1199 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, 1200 u32 flow_idx) 1201 { 1202 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; 1203 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; 1204 struct device *dev = rx_chn->common.dev; 1205 struct ti_sci_msg_rm_udmap_flow_cfg req; 1206 int ret = 0; 1207 1208 if (!rx_chn->remote) 1209 return -EINVAL; 1210 1211 memset(&req, 0, sizeof(req)); 1212 req.valid_params = 1213 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 1214 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 1215 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 1216 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 1217 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 1218 req.nav_id = tisci_rm->tisci_dev_id; 1219 req.flow_index = flow->udma_rflow_id; 1220 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL; 1221 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL; 1222 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL; 1223 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL; 1224 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL; 1225 1226 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); 1227 if (ret) { 1228 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, 1229 ret); 1230 } 1231 1232 return ret; 1233 } 1234 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); 1235 1236 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 1237 { 1238 int ret; 1239 1240 if (rx_chn->remote) 1241 return -EINVAL; 1242 1243 if (rx_chn->flows_ready < rx_chn->flow_num) 1244 return -EINVAL; 1245 1246 ret = xudma_navss_psil_pair(rx_chn->common.udmax, 1247 rx_chn->common.src_thread, 1248 rx_chn->common.dst_thread); 1249 if (ret) { 1250 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret); 1251 return ret; 1252 } 1253 1254 rx_chn->psil_paired = true; 1255 1256 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 1257 UDMA_CHAN_RT_CTL_EN); 1258 1259 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, 1260 UDMA_PEER_RT_EN_ENABLE); 1261 1262 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); 1263 return 0; 1264 } 1265 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn); 1266 1267 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) 1268 { 1269 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); 1270 1271 xudma_rchanrt_write(rx_chn->udma_rchanx, 1272 UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 1273 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0); 1274 1275 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); 1276 1277 if (rx_chn->psil_paired) { 1278 xudma_navss_psil_unpair(rx_chn->common.udmax, 1279 rx_chn->common.src_thread, 1280 rx_chn->common.dst_thread); 1281 rx_chn->psil_paired = false; 1282 } 1283 } 1284 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn); 1285 1286 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1287 bool sync) 1288 { 1289 int i = 0; 1290 u32 val; 1291 1292 if (rx_chn->remote) 1293 return; 1294 1295 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); 1296 1297 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG, 1298 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); 1299 1300 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG); 1301 1302 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { 1303 val = xudma_rchanrt_read(rx_chn->udma_rchanx, 1304 UDMA_CHAN_RT_CTL_REG); 1305 udelay(1); 1306 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { 1307 dev_err(rx_chn->common.dev, "RX tdown timeout\n"); 1308 break; 1309 } 1310 i++; 1311 } 1312 1313 val = xudma_rchanrt_read(rx_chn->udma_rchanx, 1314 UDMA_CHAN_RT_PEER_RT_EN_REG); 1315 if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) 1316 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); 1317 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); 1318 } 1319 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); 1320 1321 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1322 u32 flow_num, void *data, 1323 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) 1324 { 1325 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 1326 struct device *dev = rx_chn->common.dev; 1327 dma_addr_t desc_dma; 1328 int occ_rx, i, ret; 1329 1330 /* reset RXCQ as it is not input for udma - expected to be empty */ 1331 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); 1332 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); 1333 1334 /* Skip RX FDQ in case one FDQ is used for the set of flows */ 1335 if (skip_fdq) 1336 goto do_reset; 1337 1338 /* 1339 * RX FDQ reset need to be special way as it is input for udma and its 1340 * state cached by udma, so: 1341 * 1) save RX FDQ occ 1342 * 2) clean up RX FDQ and call callback .cleanup() for each desc 1343 * 3) reset RX FDQ in a special way 1344 */ 1345 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); 1346 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx); 1347 1348 for (i = 0; i < occ_rx; i++) { 1349 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); 1350 if (ret) { 1351 if (ret != -ENODATA) 1352 dev_err(dev, "RX reset pop %d\n", ret); 1353 break; 1354 } 1355 cleanup(data, desc_dma); 1356 } 1357 1358 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); 1359 1360 do_reset: 1361 k3_ringacc_ring_reset(flow->ringrx); 1362 } 1363 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); 1364 1365 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1366 u32 flow_num, struct cppi5_host_desc_t *desc_rx, 1367 dma_addr_t desc_dma) 1368 { 1369 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 1370 1371 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); 1372 } 1373 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn); 1374 1375 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, 1376 u32 flow_num, dma_addr_t *desc_dma) 1377 { 1378 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; 1379 1380 return k3_ringacc_ring_pop(flow->ringrx, desc_dma); 1381 } 1382 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn); 1383 1384 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, 1385 u32 flow_num) 1386 { 1387 struct k3_udma_glue_rx_flow *flow; 1388 1389 flow = &rx_chn->flows[flow_num]; 1390 1391 if (xudma_is_pktdma(rx_chn->common.udmax)) { 1392 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax, 1393 flow->udma_rflow_id); 1394 } else { 1395 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); 1396 } 1397 1398 return flow->virq; 1399 } 1400 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); 1401 1402 struct device * 1403 k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn) 1404 { 1405 if (xudma_is_pktdma(rx_chn->common.udmax) && 1406 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15)) 1407 return &rx_chn->common.chan_dev; 1408 1409 return xudma_get_device(rx_chn->common.udmax); 1410 } 1411 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device); 1412 1413 void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn, 1414 dma_addr_t *addr) 1415 { 1416 if (!xudma_is_pktdma(rx_chn->common.udmax) || 1417 !rx_chn->common.atype_asel) 1418 return; 1419 1420 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT; 1421 } 1422 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr); 1423 1424 void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn, 1425 dma_addr_t *addr) 1426 { 1427 if (!xudma_is_pktdma(rx_chn->common.udmax) || 1428 !rx_chn->common.atype_asel) 1429 return; 1430 1431 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0); 1432 } 1433 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr); 1434 1435 static int __init k3_udma_glue_class_init(void) 1436 { 1437 return class_register(&k3_udma_glue_devclass); 1438 } 1439 arch_initcall(k3_udma_glue_class_init); 1440