1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/delay.h> 9 #include <linux/dmaengine.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/err.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/sys_soc.h> 20 #include <linux/of.h> 21 #include <linux/of_dma.h> 22 #include <linux/of_device.h> 23 #include <linux/of_irq.h> 24 #include <linux/workqueue.h> 25 #include <linux/completion.h> 26 #include <linux/soc/ti/k3-ringacc.h> 27 #include <linux/soc/ti/ti_sci_protocol.h> 28 #include <linux/soc/ti/ti_sci_inta_msi.h> 29 #include <linux/dma/k3-event-router.h> 30 #include <linux/dma/ti-cppi5.h> 31 32 #include "../virt-dma.h" 33 #include "k3-udma.h" 34 #include "k3-psil-priv.h" 35 36 struct udma_static_tr { 37 u8 elsize; /* RPSTR0 */ 38 u16 elcnt; /* RPSTR0 */ 39 u16 bstcnt; /* RPSTR1 */ 40 }; 41 42 #define K3_UDMA_MAX_RFLOWS 1024 43 #define K3_UDMA_DEFAULT_RING_SIZE 16 44 45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ 46 #define UDMA_RFLOW_SRCTAG_NONE 0 47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 50 51 #define UDMA_RFLOW_DSTTAG_NONE 0 52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 56 57 struct udma_chan; 58 59 enum k3_dma_type { 60 DMA_TYPE_UDMA = 0, 61 DMA_TYPE_BCDMA, 62 DMA_TYPE_PKTDMA, 63 }; 64 65 enum udma_mmr { 66 MMR_GCFG = 0, 67 MMR_BCHANRT, 68 MMR_RCHANRT, 69 MMR_TCHANRT, 70 MMR_LAST, 71 }; 72 73 static const char * const mmr_names[] = { 74 [MMR_GCFG] = "gcfg", 75 [MMR_BCHANRT] = "bchanrt", 76 [MMR_RCHANRT] = "rchanrt", 77 [MMR_TCHANRT] = "tchanrt", 78 }; 79 80 struct udma_tchan { 81 void __iomem *reg_rt; 82 83 int id; 84 struct k3_ring *t_ring; /* Transmit ring */ 85 struct k3_ring *tc_ring; /* Transmit Completion ring */ 86 int tflow_id; /* applicable only for PKTDMA */ 87 88 }; 89 90 #define udma_bchan udma_tchan 91 92 struct udma_rflow { 93 int id; 94 struct k3_ring *fd_ring; /* Free Descriptor ring */ 95 struct k3_ring *r_ring; /* Receive ring */ 96 }; 97 98 struct udma_rchan { 99 void __iomem *reg_rt; 100 101 int id; 102 }; 103 104 struct udma_oes_offsets { 105 /* K3 UDMA Output Event Offset */ 106 u32 udma_rchan; 107 108 /* BCDMA Output Event Offsets */ 109 u32 bcdma_bchan_data; 110 u32 bcdma_bchan_ring; 111 u32 bcdma_tchan_data; 112 u32 bcdma_tchan_ring; 113 u32 bcdma_rchan_data; 114 u32 bcdma_rchan_ring; 115 116 /* PKTDMA Output Event Offsets */ 117 u32 pktdma_tchan_flow; 118 u32 pktdma_rchan_flow; 119 }; 120 121 #define UDMA_FLAG_PDMA_ACC32 BIT(0) 122 #define UDMA_FLAG_PDMA_BURST BIT(1) 123 #define UDMA_FLAG_TDTYPE BIT(2) 124 #define UDMA_FLAG_BURST_SIZE BIT(3) 125 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ 126 UDMA_FLAG_PDMA_BURST | \ 127 UDMA_FLAG_TDTYPE | \ 128 UDMA_FLAG_BURST_SIZE) 129 130 struct udma_match_data { 131 enum k3_dma_type type; 132 u32 psil_base; 133 bool enable_memcpy_support; 134 u32 flags; 135 u32 statictr_z_mask; 136 u8 burst_size[3]; 137 }; 138 139 struct udma_soc_data { 140 struct udma_oes_offsets oes; 141 u32 bcdma_trigger_event_offset; 142 }; 143 144 struct udma_hwdesc { 145 size_t cppi5_desc_size; 146 void *cppi5_desc_vaddr; 147 dma_addr_t cppi5_desc_paddr; 148 149 /* TR descriptor internal pointers */ 150 void *tr_req_base; 151 struct cppi5_tr_resp_t *tr_resp_base; 152 }; 153 154 struct udma_rx_flush { 155 struct udma_hwdesc hwdescs[2]; 156 157 size_t buffer_size; 158 void *buffer_vaddr; 159 dma_addr_t buffer_paddr; 160 }; 161 162 struct udma_tpl { 163 u8 levels; 164 u32 start_idx[3]; 165 }; 166 167 struct udma_dev { 168 struct dma_device ddev; 169 struct device *dev; 170 void __iomem *mmrs[MMR_LAST]; 171 const struct udma_match_data *match_data; 172 const struct udma_soc_data *soc_data; 173 174 struct udma_tpl bchan_tpl; 175 struct udma_tpl tchan_tpl; 176 struct udma_tpl rchan_tpl; 177 178 size_t desc_align; /* alignment to use for descriptors */ 179 180 struct udma_tisci_rm tisci_rm; 181 182 struct k3_ringacc *ringacc; 183 184 struct work_struct purge_work; 185 struct list_head desc_to_purge; 186 spinlock_t lock; 187 188 struct udma_rx_flush rx_flush; 189 190 int bchan_cnt; 191 int tchan_cnt; 192 int echan_cnt; 193 int rchan_cnt; 194 int rflow_cnt; 195 int tflow_cnt; 196 unsigned long *bchan_map; 197 unsigned long *tchan_map; 198 unsigned long *rchan_map; 199 unsigned long *rflow_gp_map; 200 unsigned long *rflow_gp_map_allocated; 201 unsigned long *rflow_in_use; 202 unsigned long *tflow_map; 203 204 struct udma_bchan *bchans; 205 struct udma_tchan *tchans; 206 struct udma_rchan *rchans; 207 struct udma_rflow *rflows; 208 209 struct udma_chan *channels; 210 u32 psil_base; 211 u32 atype; 212 u32 asel; 213 }; 214 215 struct udma_desc { 216 struct virt_dma_desc vd; 217 218 bool terminated; 219 220 enum dma_transfer_direction dir; 221 222 struct udma_static_tr static_tr; 223 u32 residue; 224 225 unsigned int sglen; 226 unsigned int desc_idx; /* Only used for cyclic in packet mode */ 227 unsigned int tr_idx; 228 229 u32 metadata_size; 230 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ 231 232 unsigned int hwdesc_count; 233 struct udma_hwdesc hwdesc[]; 234 }; 235 236 enum udma_chan_state { 237 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ 238 UDMA_CHAN_IS_ACTIVE, /* Normal operation */ 239 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ 240 }; 241 242 struct udma_tx_drain { 243 struct delayed_work work; 244 ktime_t tstamp; 245 u32 residue; 246 }; 247 248 struct udma_chan_config { 249 bool pkt_mode; /* TR or packet */ 250 bool needs_epib; /* EPIB is needed for the communication or not */ 251 u32 psd_size; /* size of Protocol Specific Data */ 252 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ 253 u32 hdesc_size; /* Size of a packet descriptor in packet mode */ 254 bool notdpkt; /* Suppress sending TDC packet */ 255 int remote_thread_id; 256 u32 atype; 257 u32 asel; 258 u32 src_thread; 259 u32 dst_thread; 260 enum psil_endpoint_type ep_type; 261 bool enable_acc32; 262 bool enable_burst; 263 enum udma_tp_level channel_tpl; /* Channel Throughput Level */ 264 265 u32 tr_trigger_type; 266 267 /* PKDMA mapped channel */ 268 int mapped_channel_id; 269 /* PKTDMA default tflow or rflow for mapped channel */ 270 int default_flow_id; 271 272 enum dma_transfer_direction dir; 273 }; 274 275 struct udma_chan { 276 struct virt_dma_chan vc; 277 struct dma_slave_config cfg; 278 struct udma_dev *ud; 279 struct device *dma_dev; 280 struct udma_desc *desc; 281 struct udma_desc *terminated_desc; 282 struct udma_static_tr static_tr; 283 char *name; 284 285 struct udma_bchan *bchan; 286 struct udma_tchan *tchan; 287 struct udma_rchan *rchan; 288 struct udma_rflow *rflow; 289 290 bool psil_paired; 291 292 int irq_num_ring; 293 int irq_num_udma; 294 295 bool cyclic; 296 bool paused; 297 298 enum udma_chan_state state; 299 struct completion teardown_completed; 300 301 struct udma_tx_drain tx_drain; 302 303 u32 bcnt; /* number of bytes completed since the start of the channel */ 304 305 /* Channel configuration parameters */ 306 struct udma_chan_config config; 307 308 /* dmapool for packet mode descriptors */ 309 bool use_dma_pool; 310 struct dma_pool *hdesc_pool; 311 312 u32 id; 313 }; 314 315 static inline struct udma_dev *to_udma_dev(struct dma_device *d) 316 { 317 return container_of(d, struct udma_dev, ddev); 318 } 319 320 static inline struct udma_chan *to_udma_chan(struct dma_chan *c) 321 { 322 return container_of(c, struct udma_chan, vc.chan); 323 } 324 325 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) 326 { 327 return container_of(t, struct udma_desc, vd.tx); 328 } 329 330 /* Generic register access functions */ 331 static inline u32 udma_read(void __iomem *base, int reg) 332 { 333 return readl(base + reg); 334 } 335 336 static inline void udma_write(void __iomem *base, int reg, u32 val) 337 { 338 writel(val, base + reg); 339 } 340 341 static inline void udma_update_bits(void __iomem *base, int reg, 342 u32 mask, u32 val) 343 { 344 u32 tmp, orig; 345 346 orig = readl(base + reg); 347 tmp = orig & ~mask; 348 tmp |= (val & mask); 349 350 if (tmp != orig) 351 writel(tmp, base + reg); 352 } 353 354 /* TCHANRT */ 355 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) 356 { 357 if (!uc->tchan) 358 return 0; 359 return udma_read(uc->tchan->reg_rt, reg); 360 } 361 362 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) 363 { 364 if (!uc->tchan) 365 return; 366 udma_write(uc->tchan->reg_rt, reg, val); 367 } 368 369 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, 370 u32 mask, u32 val) 371 { 372 if (!uc->tchan) 373 return; 374 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); 375 } 376 377 /* RCHANRT */ 378 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) 379 { 380 if (!uc->rchan) 381 return 0; 382 return udma_read(uc->rchan->reg_rt, reg); 383 } 384 385 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) 386 { 387 if (!uc->rchan) 388 return; 389 udma_write(uc->rchan->reg_rt, reg, val); 390 } 391 392 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, 393 u32 mask, u32 val) 394 { 395 if (!uc->rchan) 396 return; 397 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); 398 } 399 400 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) 401 { 402 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 403 404 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 405 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, 406 tisci_rm->tisci_navss_dev_id, 407 src_thread, dst_thread); 408 } 409 410 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, 411 u32 dst_thread) 412 { 413 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 414 415 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 416 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, 417 tisci_rm->tisci_navss_dev_id, 418 src_thread, dst_thread); 419 } 420 421 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) 422 { 423 struct device *chan_dev = &chan->dev->device; 424 425 if (asel == 0) { 426 /* No special handling for the channel */ 427 chan->dev->chan_dma_dev = false; 428 429 chan_dev->dma_coherent = false; 430 chan_dev->dma_parms = NULL; 431 } else if (asel == 14 || asel == 15) { 432 chan->dev->chan_dma_dev = true; 433 434 chan_dev->dma_coherent = true; 435 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48)); 436 chan_dev->dma_parms = chan_dev->parent->dma_parms; 437 } else { 438 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); 439 440 chan_dev->dma_coherent = false; 441 chan_dev->dma_parms = NULL; 442 } 443 } 444 445 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) 446 { 447 int i; 448 449 for (i = 0; i < tpl_map->levels; i++) { 450 if (chan_id >= tpl_map->start_idx[i]) 451 return i; 452 } 453 454 return 0; 455 } 456 457 static void udma_reset_uchan(struct udma_chan *uc) 458 { 459 memset(&uc->config, 0, sizeof(uc->config)); 460 uc->config.remote_thread_id = -1; 461 uc->config.mapped_channel_id = -1; 462 uc->config.default_flow_id = -1; 463 uc->state = UDMA_CHAN_IS_IDLE; 464 } 465 466 static void udma_dump_chan_stdata(struct udma_chan *uc) 467 { 468 struct device *dev = uc->ud->dev; 469 u32 offset; 470 int i; 471 472 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { 473 dev_dbg(dev, "TCHAN State data:\n"); 474 for (i = 0; i < 32; i++) { 475 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 476 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, 477 udma_tchanrt_read(uc, offset)); 478 } 479 } 480 481 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { 482 dev_dbg(dev, "RCHAN State data:\n"); 483 for (i = 0; i < 32; i++) { 484 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 485 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, 486 udma_rchanrt_read(uc, offset)); 487 } 488 } 489 } 490 491 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, 492 int idx) 493 { 494 return d->hwdesc[idx].cppi5_desc_paddr; 495 } 496 497 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) 498 { 499 return d->hwdesc[idx].cppi5_desc_vaddr; 500 } 501 502 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, 503 dma_addr_t paddr) 504 { 505 struct udma_desc *d = uc->terminated_desc; 506 507 if (d) { 508 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 509 d->desc_idx); 510 511 if (desc_paddr != paddr) 512 d = NULL; 513 } 514 515 if (!d) { 516 d = uc->desc; 517 if (d) { 518 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 519 d->desc_idx); 520 521 if (desc_paddr != paddr) 522 d = NULL; 523 } 524 } 525 526 return d; 527 } 528 529 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) 530 { 531 if (uc->use_dma_pool) { 532 int i; 533 534 for (i = 0; i < d->hwdesc_count; i++) { 535 if (!d->hwdesc[i].cppi5_desc_vaddr) 536 continue; 537 538 dma_pool_free(uc->hdesc_pool, 539 d->hwdesc[i].cppi5_desc_vaddr, 540 d->hwdesc[i].cppi5_desc_paddr); 541 542 d->hwdesc[i].cppi5_desc_vaddr = NULL; 543 } 544 } else if (d->hwdesc[0].cppi5_desc_vaddr) { 545 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, 546 d->hwdesc[0].cppi5_desc_vaddr, 547 d->hwdesc[0].cppi5_desc_paddr); 548 549 d->hwdesc[0].cppi5_desc_vaddr = NULL; 550 } 551 } 552 553 static void udma_purge_desc_work(struct work_struct *work) 554 { 555 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); 556 struct virt_dma_desc *vd, *_vd; 557 unsigned long flags; 558 LIST_HEAD(head); 559 560 spin_lock_irqsave(&ud->lock, flags); 561 list_splice_tail_init(&ud->desc_to_purge, &head); 562 spin_unlock_irqrestore(&ud->lock, flags); 563 564 list_for_each_entry_safe(vd, _vd, &head, node) { 565 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 566 struct udma_desc *d = to_udma_desc(&vd->tx); 567 568 udma_free_hwdesc(uc, d); 569 list_del(&vd->node); 570 kfree(d); 571 } 572 573 /* If more to purge, schedule the work again */ 574 if (!list_empty(&ud->desc_to_purge)) 575 schedule_work(&ud->purge_work); 576 } 577 578 static void udma_desc_free(struct virt_dma_desc *vd) 579 { 580 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); 581 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 582 struct udma_desc *d = to_udma_desc(&vd->tx); 583 unsigned long flags; 584 585 if (uc->terminated_desc == d) 586 uc->terminated_desc = NULL; 587 588 if (uc->use_dma_pool) { 589 udma_free_hwdesc(uc, d); 590 kfree(d); 591 return; 592 } 593 594 spin_lock_irqsave(&ud->lock, flags); 595 list_add_tail(&vd->node, &ud->desc_to_purge); 596 spin_unlock_irqrestore(&ud->lock, flags); 597 598 schedule_work(&ud->purge_work); 599 } 600 601 static bool udma_is_chan_running(struct udma_chan *uc) 602 { 603 u32 trt_ctl = 0; 604 u32 rrt_ctl = 0; 605 606 if (uc->tchan) 607 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 608 if (uc->rchan) 609 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 610 611 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) 612 return true; 613 614 return false; 615 } 616 617 static bool udma_is_chan_paused(struct udma_chan *uc) 618 { 619 u32 val, pause_mask; 620 621 switch (uc->config.dir) { 622 case DMA_DEV_TO_MEM: 623 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 624 pause_mask = UDMA_PEER_RT_EN_PAUSE; 625 break; 626 case DMA_MEM_TO_DEV: 627 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 628 pause_mask = UDMA_PEER_RT_EN_PAUSE; 629 break; 630 case DMA_MEM_TO_MEM: 631 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 632 pause_mask = UDMA_CHAN_RT_CTL_PAUSE; 633 break; 634 default: 635 return false; 636 } 637 638 if (val & pause_mask) 639 return true; 640 641 return false; 642 } 643 644 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) 645 { 646 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; 647 } 648 649 static int udma_push_to_ring(struct udma_chan *uc, int idx) 650 { 651 struct udma_desc *d = uc->desc; 652 struct k3_ring *ring = NULL; 653 dma_addr_t paddr; 654 655 switch (uc->config.dir) { 656 case DMA_DEV_TO_MEM: 657 ring = uc->rflow->fd_ring; 658 break; 659 case DMA_MEM_TO_DEV: 660 case DMA_MEM_TO_MEM: 661 ring = uc->tchan->t_ring; 662 break; 663 default: 664 return -EINVAL; 665 } 666 667 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ 668 if (idx == -1) { 669 paddr = udma_get_rx_flush_hwdesc_paddr(uc); 670 } else { 671 paddr = udma_curr_cppi5_desc_paddr(d, idx); 672 673 wmb(); /* Ensure that writes are not moved over this point */ 674 } 675 676 return k3_ringacc_ring_push(ring, &paddr); 677 } 678 679 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) 680 { 681 if (uc->config.dir != DMA_DEV_TO_MEM) 682 return false; 683 684 if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) 685 return true; 686 687 return false; 688 } 689 690 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) 691 { 692 struct k3_ring *ring = NULL; 693 int ret; 694 695 switch (uc->config.dir) { 696 case DMA_DEV_TO_MEM: 697 ring = uc->rflow->r_ring; 698 break; 699 case DMA_MEM_TO_DEV: 700 case DMA_MEM_TO_MEM: 701 ring = uc->tchan->tc_ring; 702 break; 703 default: 704 return -ENOENT; 705 } 706 707 ret = k3_ringacc_ring_pop(ring, addr); 708 if (ret) 709 return ret; 710 711 rmb(); /* Ensure that reads are not moved before this point */ 712 713 /* Teardown completion */ 714 if (cppi5_desc_is_tdcm(*addr)) 715 return 0; 716 717 /* Check for flush descriptor */ 718 if (udma_desc_is_rx_flush(uc, *addr)) 719 return -ENOENT; 720 721 return 0; 722 } 723 724 static void udma_reset_rings(struct udma_chan *uc) 725 { 726 struct k3_ring *ring1 = NULL; 727 struct k3_ring *ring2 = NULL; 728 729 switch (uc->config.dir) { 730 case DMA_DEV_TO_MEM: 731 if (uc->rchan) { 732 ring1 = uc->rflow->fd_ring; 733 ring2 = uc->rflow->r_ring; 734 } 735 break; 736 case DMA_MEM_TO_DEV: 737 case DMA_MEM_TO_MEM: 738 if (uc->tchan) { 739 ring1 = uc->tchan->t_ring; 740 ring2 = uc->tchan->tc_ring; 741 } 742 break; 743 default: 744 break; 745 } 746 747 if (ring1) 748 k3_ringacc_ring_reset_dma(ring1, 749 k3_ringacc_ring_get_occ(ring1)); 750 if (ring2) 751 k3_ringacc_ring_reset(ring2); 752 753 /* make sure we are not leaking memory by stalled descriptor */ 754 if (uc->terminated_desc) { 755 udma_desc_free(&uc->terminated_desc->vd); 756 uc->terminated_desc = NULL; 757 } 758 } 759 760 static void udma_reset_counters(struct udma_chan *uc) 761 { 762 u32 val; 763 764 if (uc->tchan) { 765 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 766 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 767 768 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 769 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 770 771 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 772 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 773 774 if (!uc->bchan) { 775 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 776 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 777 } 778 } 779 780 if (uc->rchan) { 781 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 782 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 783 784 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 785 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 786 787 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 788 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 789 790 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 791 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 792 } 793 794 uc->bcnt = 0; 795 } 796 797 static int udma_reset_chan(struct udma_chan *uc, bool hard) 798 { 799 switch (uc->config.dir) { 800 case DMA_DEV_TO_MEM: 801 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 802 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 803 break; 804 case DMA_MEM_TO_DEV: 805 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 806 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 807 break; 808 case DMA_MEM_TO_MEM: 809 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 810 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 811 break; 812 default: 813 return -EINVAL; 814 } 815 816 /* Reset all counters */ 817 udma_reset_counters(uc); 818 819 /* Hard reset: re-initialize the channel to reset */ 820 if (hard) { 821 struct udma_chan_config ucc_backup; 822 int ret; 823 824 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); 825 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); 826 827 /* restore the channel configuration */ 828 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); 829 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); 830 if (ret) 831 return ret; 832 833 /* 834 * Setting forced teardown after forced reset helps recovering 835 * the rchan. 836 */ 837 if (uc->config.dir == DMA_DEV_TO_MEM) 838 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 839 UDMA_CHAN_RT_CTL_EN | 840 UDMA_CHAN_RT_CTL_TDOWN | 841 UDMA_CHAN_RT_CTL_FTDOWN); 842 } 843 uc->state = UDMA_CHAN_IS_IDLE; 844 845 return 0; 846 } 847 848 static void udma_start_desc(struct udma_chan *uc) 849 { 850 struct udma_chan_config *ucc = &uc->config; 851 852 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && 853 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { 854 int i; 855 856 /* 857 * UDMA only: Push all descriptors to ring for packet mode 858 * cyclic or RX 859 * PKTDMA supports pre-linked descriptor and cyclic is not 860 * supported 861 */ 862 for (i = 0; i < uc->desc->sglen; i++) 863 udma_push_to_ring(uc, i); 864 } else { 865 udma_push_to_ring(uc, 0); 866 } 867 } 868 869 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) 870 { 871 /* Only PDMAs have staticTR */ 872 if (uc->config.ep_type == PSIL_EP_NATIVE) 873 return false; 874 875 /* Check if the staticTR configuration has changed for TX */ 876 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) 877 return true; 878 879 return false; 880 } 881 882 static int udma_start(struct udma_chan *uc) 883 { 884 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); 885 886 if (!vd) { 887 uc->desc = NULL; 888 return -ENOENT; 889 } 890 891 list_del(&vd->node); 892 893 uc->desc = to_udma_desc(&vd->tx); 894 895 /* Channel is already running and does not need reconfiguration */ 896 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { 897 udma_start_desc(uc); 898 goto out; 899 } 900 901 /* Make sure that we clear the teardown bit, if it is set */ 902 udma_reset_chan(uc, false); 903 904 /* Push descriptors before we start the channel */ 905 udma_start_desc(uc); 906 907 switch (uc->desc->dir) { 908 case DMA_DEV_TO_MEM: 909 /* Config remote TR */ 910 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 911 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 912 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 913 const struct udma_match_data *match_data = 914 uc->ud->match_data; 915 916 if (uc->config.enable_acc32) 917 val |= PDMA_STATIC_TR_XY_ACC32; 918 if (uc->config.enable_burst) 919 val |= PDMA_STATIC_TR_XY_BURST; 920 921 udma_rchanrt_write(uc, 922 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 923 val); 924 925 udma_rchanrt_write(uc, 926 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, 927 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, 928 match_data->statictr_z_mask)); 929 930 /* save the current staticTR configuration */ 931 memcpy(&uc->static_tr, &uc->desc->static_tr, 932 sizeof(uc->static_tr)); 933 } 934 935 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 936 UDMA_CHAN_RT_CTL_EN); 937 938 /* Enable remote */ 939 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 940 UDMA_PEER_RT_EN_ENABLE); 941 942 break; 943 case DMA_MEM_TO_DEV: 944 /* Config remote TR */ 945 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 946 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 947 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 948 949 if (uc->config.enable_acc32) 950 val |= PDMA_STATIC_TR_XY_ACC32; 951 if (uc->config.enable_burst) 952 val |= PDMA_STATIC_TR_XY_BURST; 953 954 udma_tchanrt_write(uc, 955 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 956 val); 957 958 /* save the current staticTR configuration */ 959 memcpy(&uc->static_tr, &uc->desc->static_tr, 960 sizeof(uc->static_tr)); 961 } 962 963 /* Enable remote */ 964 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 965 UDMA_PEER_RT_EN_ENABLE); 966 967 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 968 UDMA_CHAN_RT_CTL_EN); 969 970 break; 971 case DMA_MEM_TO_MEM: 972 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 973 UDMA_CHAN_RT_CTL_EN); 974 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 975 UDMA_CHAN_RT_CTL_EN); 976 977 break; 978 default: 979 return -EINVAL; 980 } 981 982 uc->state = UDMA_CHAN_IS_ACTIVE; 983 out: 984 985 return 0; 986 } 987 988 static int udma_stop(struct udma_chan *uc) 989 { 990 enum udma_chan_state old_state = uc->state; 991 992 uc->state = UDMA_CHAN_IS_TERMINATING; 993 reinit_completion(&uc->teardown_completed); 994 995 switch (uc->config.dir) { 996 case DMA_DEV_TO_MEM: 997 if (!uc->cyclic && !uc->desc) 998 udma_push_to_ring(uc, -1); 999 1000 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1001 UDMA_PEER_RT_EN_ENABLE | 1002 UDMA_PEER_RT_EN_TEARDOWN); 1003 break; 1004 case DMA_MEM_TO_DEV: 1005 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1006 UDMA_PEER_RT_EN_ENABLE | 1007 UDMA_PEER_RT_EN_FLUSH); 1008 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1009 UDMA_CHAN_RT_CTL_EN | 1010 UDMA_CHAN_RT_CTL_TDOWN); 1011 break; 1012 case DMA_MEM_TO_MEM: 1013 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1014 UDMA_CHAN_RT_CTL_EN | 1015 UDMA_CHAN_RT_CTL_TDOWN); 1016 break; 1017 default: 1018 uc->state = old_state; 1019 complete_all(&uc->teardown_completed); 1020 return -EINVAL; 1021 } 1022 1023 return 0; 1024 } 1025 1026 static void udma_cyclic_packet_elapsed(struct udma_chan *uc) 1027 { 1028 struct udma_desc *d = uc->desc; 1029 struct cppi5_host_desc_t *h_desc; 1030 1031 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; 1032 cppi5_hdesc_reset_to_original(h_desc); 1033 udma_push_to_ring(uc, d->desc_idx); 1034 d->desc_idx = (d->desc_idx + 1) % d->sglen; 1035 } 1036 1037 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) 1038 { 1039 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; 1040 1041 memcpy(d->metadata, h_desc->epib, d->metadata_size); 1042 } 1043 1044 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) 1045 { 1046 u32 peer_bcnt, bcnt; 1047 1048 /* Only TX towards PDMA is affected */ 1049 if (uc->config.ep_type == PSIL_EP_NATIVE || 1050 uc->config.dir != DMA_MEM_TO_DEV) 1051 return true; 1052 1053 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 1054 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 1055 1056 /* Transfer is incomplete, store current residue and time stamp */ 1057 if (peer_bcnt < bcnt) { 1058 uc->tx_drain.residue = bcnt - peer_bcnt; 1059 uc->tx_drain.tstamp = ktime_get(); 1060 return false; 1061 } 1062 1063 return true; 1064 } 1065 1066 static void udma_check_tx_completion(struct work_struct *work) 1067 { 1068 struct udma_chan *uc = container_of(work, typeof(*uc), 1069 tx_drain.work.work); 1070 bool desc_done = true; 1071 u32 residue_diff; 1072 ktime_t time_diff; 1073 unsigned long delay; 1074 1075 while (1) { 1076 if (uc->desc) { 1077 /* Get previous residue and time stamp */ 1078 residue_diff = uc->tx_drain.residue; 1079 time_diff = uc->tx_drain.tstamp; 1080 /* 1081 * Get current residue and time stamp or see if 1082 * transfer is complete 1083 */ 1084 desc_done = udma_is_desc_really_done(uc, uc->desc); 1085 } 1086 1087 if (!desc_done) { 1088 /* 1089 * Find the time delta and residue delta w.r.t 1090 * previous poll 1091 */ 1092 time_diff = ktime_sub(uc->tx_drain.tstamp, 1093 time_diff) + 1; 1094 residue_diff -= uc->tx_drain.residue; 1095 if (residue_diff) { 1096 /* 1097 * Try to guess when we should check 1098 * next time by calculating rate at 1099 * which data is being drained at the 1100 * peer device 1101 */ 1102 delay = (time_diff / residue_diff) * 1103 uc->tx_drain.residue; 1104 } else { 1105 /* No progress, check again in 1 second */ 1106 schedule_delayed_work(&uc->tx_drain.work, HZ); 1107 break; 1108 } 1109 1110 usleep_range(ktime_to_us(delay), 1111 ktime_to_us(delay) + 10); 1112 continue; 1113 } 1114 1115 if (uc->desc) { 1116 struct udma_desc *d = uc->desc; 1117 1118 uc->bcnt += d->residue; 1119 udma_start(uc); 1120 vchan_cookie_complete(&d->vd); 1121 break; 1122 } 1123 1124 break; 1125 } 1126 } 1127 1128 static irqreturn_t udma_ring_irq_handler(int irq, void *data) 1129 { 1130 struct udma_chan *uc = data; 1131 struct udma_desc *d; 1132 dma_addr_t paddr = 0; 1133 1134 if (udma_pop_from_ring(uc, &paddr) || !paddr) 1135 return IRQ_HANDLED; 1136 1137 spin_lock(&uc->vc.lock); 1138 1139 /* Teardown completion message */ 1140 if (cppi5_desc_is_tdcm(paddr)) { 1141 complete_all(&uc->teardown_completed); 1142 1143 if (uc->terminated_desc) { 1144 udma_desc_free(&uc->terminated_desc->vd); 1145 uc->terminated_desc = NULL; 1146 } 1147 1148 if (!uc->desc) 1149 udma_start(uc); 1150 1151 goto out; 1152 } 1153 1154 d = udma_udma_desc_from_paddr(uc, paddr); 1155 1156 if (d) { 1157 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 1158 d->desc_idx); 1159 if (desc_paddr != paddr) { 1160 dev_err(uc->ud->dev, "not matching descriptors!\n"); 1161 goto out; 1162 } 1163 1164 if (d == uc->desc) { 1165 /* active descriptor */ 1166 if (uc->cyclic) { 1167 udma_cyclic_packet_elapsed(uc); 1168 vchan_cyclic_callback(&d->vd); 1169 } else { 1170 if (udma_is_desc_really_done(uc, d)) { 1171 uc->bcnt += d->residue; 1172 udma_start(uc); 1173 vchan_cookie_complete(&d->vd); 1174 } else { 1175 schedule_delayed_work(&uc->tx_drain.work, 1176 0); 1177 } 1178 } 1179 } else { 1180 /* 1181 * terminated descriptor, mark the descriptor as 1182 * completed to update the channel's cookie marker 1183 */ 1184 dma_cookie_complete(&d->vd.tx); 1185 } 1186 } 1187 out: 1188 spin_unlock(&uc->vc.lock); 1189 1190 return IRQ_HANDLED; 1191 } 1192 1193 static irqreturn_t udma_udma_irq_handler(int irq, void *data) 1194 { 1195 struct udma_chan *uc = data; 1196 struct udma_desc *d; 1197 1198 spin_lock(&uc->vc.lock); 1199 d = uc->desc; 1200 if (d) { 1201 d->tr_idx = (d->tr_idx + 1) % d->sglen; 1202 1203 if (uc->cyclic) { 1204 vchan_cyclic_callback(&d->vd); 1205 } else { 1206 /* TODO: figure out the real amount of data */ 1207 uc->bcnt += d->residue; 1208 udma_start(uc); 1209 vchan_cookie_complete(&d->vd); 1210 } 1211 } 1212 1213 spin_unlock(&uc->vc.lock); 1214 1215 return IRQ_HANDLED; 1216 } 1217 1218 /** 1219 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows 1220 * @ud: UDMA device 1221 * @from: Start the search from this flow id number 1222 * @cnt: Number of consecutive flow ids to allocate 1223 * 1224 * Allocate range of RX flow ids for future use, those flows can be requested 1225 * only using explicit flow id number. if @from is set to -1 it will try to find 1226 * first free range. if @from is positive value it will force allocation only 1227 * of the specified range of flows. 1228 * 1229 * Returns -ENOMEM if can't find free range. 1230 * -EEXIST if requested range is busy. 1231 * -EINVAL if wrong input values passed. 1232 * Returns flow id on success. 1233 */ 1234 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1235 { 1236 int start, tmp_from; 1237 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); 1238 1239 tmp_from = from; 1240 if (tmp_from < 0) 1241 tmp_from = ud->rchan_cnt; 1242 /* default flows can't be allocated and accessible only by id */ 1243 if (tmp_from < ud->rchan_cnt) 1244 return -EINVAL; 1245 1246 if (tmp_from + cnt > ud->rflow_cnt) 1247 return -EINVAL; 1248 1249 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, 1250 ud->rflow_cnt); 1251 1252 start = bitmap_find_next_zero_area(tmp, 1253 ud->rflow_cnt, 1254 tmp_from, cnt, 0); 1255 if (start >= ud->rflow_cnt) 1256 return -ENOMEM; 1257 1258 if (from >= 0 && start != from) 1259 return -EEXIST; 1260 1261 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); 1262 return start; 1263 } 1264 1265 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1266 { 1267 if (from < ud->rchan_cnt) 1268 return -EINVAL; 1269 if (from + cnt > ud->rflow_cnt) 1270 return -EINVAL; 1271 1272 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); 1273 return 0; 1274 } 1275 1276 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) 1277 { 1278 /* 1279 * Attempt to request rflow by ID can be made for any rflow 1280 * if not in use with assumption that caller knows what's doing. 1281 * TI-SCI FW will perform additional permission check ant way, it's 1282 * safe 1283 */ 1284 1285 if (id < 0 || id >= ud->rflow_cnt) 1286 return ERR_PTR(-ENOENT); 1287 1288 if (test_bit(id, ud->rflow_in_use)) 1289 return ERR_PTR(-ENOENT); 1290 1291 if (ud->rflow_gp_map) { 1292 /* GP rflow has to be allocated first */ 1293 if (!test_bit(id, ud->rflow_gp_map) && 1294 !test_bit(id, ud->rflow_gp_map_allocated)) 1295 return ERR_PTR(-EINVAL); 1296 } 1297 1298 dev_dbg(ud->dev, "get rflow%d\n", id); 1299 set_bit(id, ud->rflow_in_use); 1300 return &ud->rflows[id]; 1301 } 1302 1303 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) 1304 { 1305 if (!test_bit(rflow->id, ud->rflow_in_use)) { 1306 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); 1307 return; 1308 } 1309 1310 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); 1311 clear_bit(rflow->id, ud->rflow_in_use); 1312 } 1313 1314 #define UDMA_RESERVE_RESOURCE(res) \ 1315 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ 1316 enum udma_tp_level tpl, \ 1317 int id) \ 1318 { \ 1319 if (id >= 0) { \ 1320 if (test_bit(id, ud->res##_map)) { \ 1321 dev_err(ud->dev, "res##%d is in use\n", id); \ 1322 return ERR_PTR(-ENOENT); \ 1323 } \ 1324 } else { \ 1325 int start; \ 1326 \ 1327 if (tpl >= ud->res##_tpl.levels) \ 1328 tpl = ud->res##_tpl.levels - 1; \ 1329 \ 1330 start = ud->res##_tpl.start_idx[tpl]; \ 1331 \ 1332 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ 1333 start); \ 1334 if (id == ud->res##_cnt) { \ 1335 return ERR_PTR(-ENOENT); \ 1336 } \ 1337 } \ 1338 \ 1339 set_bit(id, ud->res##_map); \ 1340 return &ud->res##s[id]; \ 1341 } 1342 1343 UDMA_RESERVE_RESOURCE(bchan); 1344 UDMA_RESERVE_RESOURCE(tchan); 1345 UDMA_RESERVE_RESOURCE(rchan); 1346 1347 static int bcdma_get_bchan(struct udma_chan *uc) 1348 { 1349 struct udma_dev *ud = uc->ud; 1350 enum udma_tp_level tpl; 1351 int ret; 1352 1353 if (uc->bchan) { 1354 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", 1355 uc->id, uc->bchan->id); 1356 return 0; 1357 } 1358 1359 /* 1360 * Use normal channels for peripherals, and highest TPL channel for 1361 * mem2mem 1362 */ 1363 if (uc->config.tr_trigger_type) 1364 tpl = 0; 1365 else 1366 tpl = ud->bchan_tpl.levels - 1; 1367 1368 uc->bchan = __udma_reserve_bchan(ud, tpl, -1); 1369 if (IS_ERR(uc->bchan)) { 1370 ret = PTR_ERR(uc->bchan); 1371 uc->bchan = NULL; 1372 return ret; 1373 } 1374 1375 uc->tchan = uc->bchan; 1376 1377 return 0; 1378 } 1379 1380 static int udma_get_tchan(struct udma_chan *uc) 1381 { 1382 struct udma_dev *ud = uc->ud; 1383 int ret; 1384 1385 if (uc->tchan) { 1386 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", 1387 uc->id, uc->tchan->id); 1388 return 0; 1389 } 1390 1391 /* 1392 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1393 * For PKTDMA mapped channels it is configured to a channel which must 1394 * be used to service the peripheral. 1395 */ 1396 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, 1397 uc->config.mapped_channel_id); 1398 if (IS_ERR(uc->tchan)) { 1399 ret = PTR_ERR(uc->tchan); 1400 uc->tchan = NULL; 1401 return ret; 1402 } 1403 1404 if (ud->tflow_cnt) { 1405 int tflow_id; 1406 1407 /* Only PKTDMA have support for tx flows */ 1408 if (uc->config.default_flow_id >= 0) 1409 tflow_id = uc->config.default_flow_id; 1410 else 1411 tflow_id = uc->tchan->id; 1412 1413 if (test_bit(tflow_id, ud->tflow_map)) { 1414 dev_err(ud->dev, "tflow%d is in use\n", tflow_id); 1415 clear_bit(uc->tchan->id, ud->tchan_map); 1416 uc->tchan = NULL; 1417 return -ENOENT; 1418 } 1419 1420 uc->tchan->tflow_id = tflow_id; 1421 set_bit(tflow_id, ud->tflow_map); 1422 } else { 1423 uc->tchan->tflow_id = -1; 1424 } 1425 1426 return 0; 1427 } 1428 1429 static int udma_get_rchan(struct udma_chan *uc) 1430 { 1431 struct udma_dev *ud = uc->ud; 1432 int ret; 1433 1434 if (uc->rchan) { 1435 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", 1436 uc->id, uc->rchan->id); 1437 return 0; 1438 } 1439 1440 /* 1441 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1442 * For PKTDMA mapped channels it is configured to a channel which must 1443 * be used to service the peripheral. 1444 */ 1445 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, 1446 uc->config.mapped_channel_id); 1447 if (IS_ERR(uc->rchan)) { 1448 ret = PTR_ERR(uc->rchan); 1449 uc->rchan = NULL; 1450 return ret; 1451 } 1452 1453 return 0; 1454 } 1455 1456 static int udma_get_chan_pair(struct udma_chan *uc) 1457 { 1458 struct udma_dev *ud = uc->ud; 1459 int chan_id, end; 1460 1461 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { 1462 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", 1463 uc->id, uc->tchan->id); 1464 return 0; 1465 } 1466 1467 if (uc->tchan) { 1468 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", 1469 uc->id, uc->tchan->id); 1470 return -EBUSY; 1471 } else if (uc->rchan) { 1472 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", 1473 uc->id, uc->rchan->id); 1474 return -EBUSY; 1475 } 1476 1477 /* Can be optimized, but let's have it like this for now */ 1478 end = min(ud->tchan_cnt, ud->rchan_cnt); 1479 /* 1480 * Try to use the highest TPL channel pair for MEM_TO_MEM channels 1481 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan 1482 */ 1483 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; 1484 for (; chan_id < end; chan_id++) { 1485 if (!test_bit(chan_id, ud->tchan_map) && 1486 !test_bit(chan_id, ud->rchan_map)) 1487 break; 1488 } 1489 1490 if (chan_id == end) 1491 return -ENOENT; 1492 1493 set_bit(chan_id, ud->tchan_map); 1494 set_bit(chan_id, ud->rchan_map); 1495 uc->tchan = &ud->tchans[chan_id]; 1496 uc->rchan = &ud->rchans[chan_id]; 1497 1498 /* UDMA does not use tx flows */ 1499 uc->tchan->tflow_id = -1; 1500 1501 return 0; 1502 } 1503 1504 static int udma_get_rflow(struct udma_chan *uc, int flow_id) 1505 { 1506 struct udma_dev *ud = uc->ud; 1507 int ret; 1508 1509 if (!uc->rchan) { 1510 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); 1511 return -EINVAL; 1512 } 1513 1514 if (uc->rflow) { 1515 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", 1516 uc->id, uc->rflow->id); 1517 return 0; 1518 } 1519 1520 uc->rflow = __udma_get_rflow(ud, flow_id); 1521 if (IS_ERR(uc->rflow)) { 1522 ret = PTR_ERR(uc->rflow); 1523 uc->rflow = NULL; 1524 return ret; 1525 } 1526 1527 return 0; 1528 } 1529 1530 static void bcdma_put_bchan(struct udma_chan *uc) 1531 { 1532 struct udma_dev *ud = uc->ud; 1533 1534 if (uc->bchan) { 1535 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, 1536 uc->bchan->id); 1537 clear_bit(uc->bchan->id, ud->bchan_map); 1538 uc->bchan = NULL; 1539 uc->tchan = NULL; 1540 } 1541 } 1542 1543 static void udma_put_rchan(struct udma_chan *uc) 1544 { 1545 struct udma_dev *ud = uc->ud; 1546 1547 if (uc->rchan) { 1548 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, 1549 uc->rchan->id); 1550 clear_bit(uc->rchan->id, ud->rchan_map); 1551 uc->rchan = NULL; 1552 } 1553 } 1554 1555 static void udma_put_tchan(struct udma_chan *uc) 1556 { 1557 struct udma_dev *ud = uc->ud; 1558 1559 if (uc->tchan) { 1560 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, 1561 uc->tchan->id); 1562 clear_bit(uc->tchan->id, ud->tchan_map); 1563 1564 if (uc->tchan->tflow_id >= 0) 1565 clear_bit(uc->tchan->tflow_id, ud->tflow_map); 1566 1567 uc->tchan = NULL; 1568 } 1569 } 1570 1571 static void udma_put_rflow(struct udma_chan *uc) 1572 { 1573 struct udma_dev *ud = uc->ud; 1574 1575 if (uc->rflow) { 1576 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, 1577 uc->rflow->id); 1578 __udma_put_rflow(ud, uc->rflow); 1579 uc->rflow = NULL; 1580 } 1581 } 1582 1583 static void bcdma_free_bchan_resources(struct udma_chan *uc) 1584 { 1585 if (!uc->bchan) 1586 return; 1587 1588 k3_ringacc_ring_free(uc->bchan->tc_ring); 1589 k3_ringacc_ring_free(uc->bchan->t_ring); 1590 uc->bchan->tc_ring = NULL; 1591 uc->bchan->t_ring = NULL; 1592 k3_configure_chan_coherency(&uc->vc.chan, 0); 1593 1594 bcdma_put_bchan(uc); 1595 } 1596 1597 static int bcdma_alloc_bchan_resources(struct udma_chan *uc) 1598 { 1599 struct k3_ring_cfg ring_cfg; 1600 struct udma_dev *ud = uc->ud; 1601 int ret; 1602 1603 ret = bcdma_get_bchan(uc); 1604 if (ret) 1605 return ret; 1606 1607 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, 1608 &uc->bchan->t_ring, 1609 &uc->bchan->tc_ring); 1610 if (ret) { 1611 ret = -EBUSY; 1612 goto err_ring; 1613 } 1614 1615 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1616 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1617 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1618 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1619 1620 k3_configure_chan_coherency(&uc->vc.chan, ud->asel); 1621 ring_cfg.asel = ud->asel; 1622 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1623 1624 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); 1625 if (ret) 1626 goto err_ringcfg; 1627 1628 return 0; 1629 1630 err_ringcfg: 1631 k3_ringacc_ring_free(uc->bchan->tc_ring); 1632 uc->bchan->tc_ring = NULL; 1633 k3_ringacc_ring_free(uc->bchan->t_ring); 1634 uc->bchan->t_ring = NULL; 1635 k3_configure_chan_coherency(&uc->vc.chan, 0); 1636 err_ring: 1637 bcdma_put_bchan(uc); 1638 1639 return ret; 1640 } 1641 1642 static void udma_free_tx_resources(struct udma_chan *uc) 1643 { 1644 if (!uc->tchan) 1645 return; 1646 1647 k3_ringacc_ring_free(uc->tchan->t_ring); 1648 k3_ringacc_ring_free(uc->tchan->tc_ring); 1649 uc->tchan->t_ring = NULL; 1650 uc->tchan->tc_ring = NULL; 1651 1652 udma_put_tchan(uc); 1653 } 1654 1655 static int udma_alloc_tx_resources(struct udma_chan *uc) 1656 { 1657 struct k3_ring_cfg ring_cfg; 1658 struct udma_dev *ud = uc->ud; 1659 struct udma_tchan *tchan; 1660 int ring_idx, ret; 1661 1662 ret = udma_get_tchan(uc); 1663 if (ret) 1664 return ret; 1665 1666 tchan = uc->tchan; 1667 if (tchan->tflow_id >= 0) 1668 ring_idx = tchan->tflow_id; 1669 else 1670 ring_idx = ud->bchan_cnt + tchan->id; 1671 1672 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, 1673 &tchan->t_ring, 1674 &tchan->tc_ring); 1675 if (ret) { 1676 ret = -EBUSY; 1677 goto err_ring; 1678 } 1679 1680 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1681 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1682 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1683 if (ud->match_data->type == DMA_TYPE_UDMA) { 1684 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1685 } else { 1686 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1687 1688 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1689 ring_cfg.asel = uc->config.asel; 1690 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1691 } 1692 1693 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); 1694 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); 1695 1696 if (ret) 1697 goto err_ringcfg; 1698 1699 return 0; 1700 1701 err_ringcfg: 1702 k3_ringacc_ring_free(uc->tchan->tc_ring); 1703 uc->tchan->tc_ring = NULL; 1704 k3_ringacc_ring_free(uc->tchan->t_ring); 1705 uc->tchan->t_ring = NULL; 1706 err_ring: 1707 udma_put_tchan(uc); 1708 1709 return ret; 1710 } 1711 1712 static void udma_free_rx_resources(struct udma_chan *uc) 1713 { 1714 if (!uc->rchan) 1715 return; 1716 1717 if (uc->rflow) { 1718 struct udma_rflow *rflow = uc->rflow; 1719 1720 k3_ringacc_ring_free(rflow->fd_ring); 1721 k3_ringacc_ring_free(rflow->r_ring); 1722 rflow->fd_ring = NULL; 1723 rflow->r_ring = NULL; 1724 1725 udma_put_rflow(uc); 1726 } 1727 1728 udma_put_rchan(uc); 1729 } 1730 1731 static int udma_alloc_rx_resources(struct udma_chan *uc) 1732 { 1733 struct udma_dev *ud = uc->ud; 1734 struct k3_ring_cfg ring_cfg; 1735 struct udma_rflow *rflow; 1736 int fd_ring_id; 1737 int ret; 1738 1739 ret = udma_get_rchan(uc); 1740 if (ret) 1741 return ret; 1742 1743 /* For MEM_TO_MEM we don't need rflow or rings */ 1744 if (uc->config.dir == DMA_MEM_TO_MEM) 1745 return 0; 1746 1747 if (uc->config.default_flow_id >= 0) 1748 ret = udma_get_rflow(uc, uc->config.default_flow_id); 1749 else 1750 ret = udma_get_rflow(uc, uc->rchan->id); 1751 1752 if (ret) { 1753 ret = -EBUSY; 1754 goto err_rflow; 1755 } 1756 1757 rflow = uc->rflow; 1758 if (ud->tflow_cnt) 1759 fd_ring_id = ud->tflow_cnt + rflow->id; 1760 else 1761 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + 1762 uc->rchan->id; 1763 1764 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, 1765 &rflow->fd_ring, &rflow->r_ring); 1766 if (ret) { 1767 ret = -EBUSY; 1768 goto err_ring; 1769 } 1770 1771 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1772 1773 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1774 if (ud->match_data->type == DMA_TYPE_UDMA) { 1775 if (uc->config.pkt_mode) 1776 ring_cfg.size = SG_MAX_SEGMENTS; 1777 else 1778 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1779 1780 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1781 } else { 1782 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1783 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1784 1785 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1786 ring_cfg.asel = uc->config.asel; 1787 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1788 } 1789 1790 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); 1791 1792 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1793 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); 1794 1795 if (ret) 1796 goto err_ringcfg; 1797 1798 return 0; 1799 1800 err_ringcfg: 1801 k3_ringacc_ring_free(rflow->r_ring); 1802 rflow->r_ring = NULL; 1803 k3_ringacc_ring_free(rflow->fd_ring); 1804 rflow->fd_ring = NULL; 1805 err_ring: 1806 udma_put_rflow(uc); 1807 err_rflow: 1808 udma_put_rchan(uc); 1809 1810 return ret; 1811 } 1812 1813 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ 1814 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1815 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) 1816 1817 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ 1818 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1819 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) 1820 1821 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ 1822 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) 1823 1824 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ 1825 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1826 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ 1827 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ 1828 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1829 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ 1830 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1831 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1832 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1833 1834 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ 1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1836 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1837 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1838 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1839 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ 1840 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ 1841 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ 1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ 1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1844 1845 static int udma_tisci_m2m_channel_config(struct udma_chan *uc) 1846 { 1847 struct udma_dev *ud = uc->ud; 1848 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1849 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1850 struct udma_tchan *tchan = uc->tchan; 1851 struct udma_rchan *rchan = uc->rchan; 1852 u8 burst_size = 0; 1853 int ret; 1854 u8 tpl; 1855 1856 /* Non synchronized - mem to mem type of transfer */ 1857 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1858 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1859 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 1860 1861 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1862 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); 1863 1864 burst_size = ud->match_data->burst_size[tpl]; 1865 } 1866 1867 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1868 req_tx.nav_id = tisci_rm->tisci_dev_id; 1869 req_tx.index = tchan->id; 1870 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1871 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1872 req_tx.txcq_qnum = tc_ring; 1873 req_tx.tx_atype = ud->atype; 1874 if (burst_size) { 1875 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1876 req_tx.tx_burst_size = burst_size; 1877 } 1878 1879 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1880 if (ret) { 1881 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1882 return ret; 1883 } 1884 1885 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 1886 req_rx.nav_id = tisci_rm->tisci_dev_id; 1887 req_rx.index = rchan->id; 1888 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1889 req_rx.rxcq_qnum = tc_ring; 1890 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1891 req_rx.rx_atype = ud->atype; 1892 if (burst_size) { 1893 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1894 req_rx.rx_burst_size = burst_size; 1895 } 1896 1897 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 1898 if (ret) 1899 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); 1900 1901 return ret; 1902 } 1903 1904 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) 1905 { 1906 struct udma_dev *ud = uc->ud; 1907 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1908 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1909 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1910 struct udma_bchan *bchan = uc->bchan; 1911 u8 burst_size = 0; 1912 int ret; 1913 u8 tpl; 1914 1915 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1916 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); 1917 1918 burst_size = ud->match_data->burst_size[tpl]; 1919 } 1920 1921 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; 1922 req_tx.nav_id = tisci_rm->tisci_dev_id; 1923 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; 1924 req_tx.index = bchan->id; 1925 if (burst_size) { 1926 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1927 req_tx.tx_burst_size = burst_size; 1928 } 1929 1930 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1931 if (ret) 1932 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); 1933 1934 return ret; 1935 } 1936 1937 static int udma_tisci_tx_channel_config(struct udma_chan *uc) 1938 { 1939 struct udma_dev *ud = uc->ud; 1940 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1941 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1942 struct udma_tchan *tchan = uc->tchan; 1943 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1944 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1945 u32 mode, fetch_size; 1946 int ret; 1947 1948 if (uc->config.pkt_mode) { 1949 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 1950 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 1951 uc->config.psd_size, 0); 1952 } else { 1953 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 1954 fetch_size = sizeof(struct cppi5_desc_hdr_t); 1955 } 1956 1957 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1958 req_tx.nav_id = tisci_rm->tisci_dev_id; 1959 req_tx.index = tchan->id; 1960 req_tx.tx_chan_type = mode; 1961 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 1962 req_tx.tx_fetch_size = fetch_size >> 2; 1963 req_tx.txcq_qnum = tc_ring; 1964 req_tx.tx_atype = uc->config.atype; 1965 if (uc->config.ep_type == PSIL_EP_PDMA_XY && 1966 ud->match_data->flags & UDMA_FLAG_TDTYPE) { 1967 /* wait for peer to complete the teardown for PDMAs */ 1968 req_tx.valid_params |= 1969 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 1970 req_tx.tx_tdtype = 1; 1971 } 1972 1973 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1974 if (ret) 1975 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1976 1977 return ret; 1978 } 1979 1980 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) 1981 { 1982 struct udma_dev *ud = uc->ud; 1983 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1984 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1985 struct udma_tchan *tchan = uc->tchan; 1986 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1987 int ret; 1988 1989 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; 1990 req_tx.nav_id = tisci_rm->tisci_dev_id; 1991 req_tx.index = tchan->id; 1992 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 1993 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { 1994 /* wait for peer to complete the teardown for PDMAs */ 1995 req_tx.valid_params |= 1996 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 1997 req_tx.tx_tdtype = 1; 1998 } 1999 2000 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 2001 if (ret) 2002 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 2003 2004 return ret; 2005 } 2006 2007 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config 2008 2009 static int udma_tisci_rx_channel_config(struct udma_chan *uc) 2010 { 2011 struct udma_dev *ud = uc->ud; 2012 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2013 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2014 struct udma_rchan *rchan = uc->rchan; 2015 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); 2016 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2017 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2018 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2019 u32 mode, fetch_size; 2020 int ret; 2021 2022 if (uc->config.pkt_mode) { 2023 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 2024 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 2025 uc->config.psd_size, 0); 2026 } else { 2027 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 2028 fetch_size = sizeof(struct cppi5_desc_hdr_t); 2029 } 2030 2031 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 2032 req_rx.nav_id = tisci_rm->tisci_dev_id; 2033 req_rx.index = rchan->id; 2034 req_rx.rx_fetch_size = fetch_size >> 2; 2035 req_rx.rxcq_qnum = rx_ring; 2036 req_rx.rx_chan_type = mode; 2037 req_rx.rx_atype = uc->config.atype; 2038 2039 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2040 if (ret) { 2041 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2042 return ret; 2043 } 2044 2045 flow_req.valid_params = 2046 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2047 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2048 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 2049 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 2050 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 2051 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 2052 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 2053 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 2054 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 2055 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 2056 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 2057 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 2058 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 2059 2060 flow_req.nav_id = tisci_rm->tisci_dev_id; 2061 flow_req.flow_index = rchan->id; 2062 2063 if (uc->config.needs_epib) 2064 flow_req.rx_einfo_present = 1; 2065 else 2066 flow_req.rx_einfo_present = 0; 2067 if (uc->config.psd_size) 2068 flow_req.rx_psinfo_present = 1; 2069 else 2070 flow_req.rx_psinfo_present = 0; 2071 flow_req.rx_error_handling = 1; 2072 flow_req.rx_dest_qnum = rx_ring; 2073 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; 2074 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; 2075 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; 2076 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; 2077 flow_req.rx_fdq0_sz0_qnum = fd_ring; 2078 flow_req.rx_fdq1_qnum = fd_ring; 2079 flow_req.rx_fdq2_qnum = fd_ring; 2080 flow_req.rx_fdq3_qnum = fd_ring; 2081 2082 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2083 2084 if (ret) 2085 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); 2086 2087 return 0; 2088 } 2089 2090 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) 2091 { 2092 struct udma_dev *ud = uc->ud; 2093 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2094 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2095 struct udma_rchan *rchan = uc->rchan; 2096 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2097 int ret; 2098 2099 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2100 req_rx.nav_id = tisci_rm->tisci_dev_id; 2101 req_rx.index = rchan->id; 2102 2103 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2104 if (ret) 2105 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2106 2107 return ret; 2108 } 2109 2110 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) 2111 { 2112 struct udma_dev *ud = uc->ud; 2113 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2114 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2115 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2116 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2117 int ret; 2118 2119 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2120 req_rx.nav_id = tisci_rm->tisci_dev_id; 2121 req_rx.index = uc->rchan->id; 2122 2123 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2124 if (ret) { 2125 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); 2126 return ret; 2127 } 2128 2129 flow_req.valid_params = 2130 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2131 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2132 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; 2133 2134 flow_req.nav_id = tisci_rm->tisci_dev_id; 2135 flow_req.flow_index = uc->rflow->id; 2136 2137 if (uc->config.needs_epib) 2138 flow_req.rx_einfo_present = 1; 2139 else 2140 flow_req.rx_einfo_present = 0; 2141 if (uc->config.psd_size) 2142 flow_req.rx_psinfo_present = 1; 2143 else 2144 flow_req.rx_psinfo_present = 0; 2145 flow_req.rx_error_handling = 1; 2146 2147 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2148 2149 if (ret) 2150 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, 2151 ret); 2152 2153 return ret; 2154 } 2155 2156 static int udma_alloc_chan_resources(struct dma_chan *chan) 2157 { 2158 struct udma_chan *uc = to_udma_chan(chan); 2159 struct udma_dev *ud = to_udma_dev(chan->device); 2160 const struct udma_soc_data *soc_data = ud->soc_data; 2161 struct k3_ring *irq_ring; 2162 u32 irq_udma_idx; 2163 int ret; 2164 2165 uc->dma_dev = ud->dev; 2166 2167 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { 2168 uc->use_dma_pool = true; 2169 /* in case of MEM_TO_MEM we have maximum of two TRs */ 2170 if (uc->config.dir == DMA_MEM_TO_MEM) { 2171 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2172 sizeof(struct cppi5_tr_type15_t), 2); 2173 uc->config.pkt_mode = false; 2174 } 2175 } 2176 2177 if (uc->use_dma_pool) { 2178 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2179 uc->config.hdesc_size, 2180 ud->desc_align, 2181 0); 2182 if (!uc->hdesc_pool) { 2183 dev_err(ud->ddev.dev, 2184 "Descriptor pool allocation failed\n"); 2185 uc->use_dma_pool = false; 2186 ret = -ENOMEM; 2187 goto err_cleanup; 2188 } 2189 } 2190 2191 /* 2192 * Make sure that the completion is in a known state: 2193 * No teardown, the channel is idle 2194 */ 2195 reinit_completion(&uc->teardown_completed); 2196 complete_all(&uc->teardown_completed); 2197 uc->state = UDMA_CHAN_IS_IDLE; 2198 2199 switch (uc->config.dir) { 2200 case DMA_MEM_TO_MEM: 2201 /* Non synchronized - mem to mem type of transfer */ 2202 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2203 uc->id); 2204 2205 ret = udma_get_chan_pair(uc); 2206 if (ret) 2207 goto err_cleanup; 2208 2209 ret = udma_alloc_tx_resources(uc); 2210 if (ret) { 2211 udma_put_rchan(uc); 2212 goto err_cleanup; 2213 } 2214 2215 ret = udma_alloc_rx_resources(uc); 2216 if (ret) { 2217 udma_free_tx_resources(uc); 2218 goto err_cleanup; 2219 } 2220 2221 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2222 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2223 K3_PSIL_DST_THREAD_ID_OFFSET; 2224 2225 irq_ring = uc->tchan->tc_ring; 2226 irq_udma_idx = uc->tchan->id; 2227 2228 ret = udma_tisci_m2m_channel_config(uc); 2229 break; 2230 case DMA_MEM_TO_DEV: 2231 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2232 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2233 uc->id); 2234 2235 ret = udma_alloc_tx_resources(uc); 2236 if (ret) 2237 goto err_cleanup; 2238 2239 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2240 uc->config.dst_thread = uc->config.remote_thread_id; 2241 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2242 2243 irq_ring = uc->tchan->tc_ring; 2244 irq_udma_idx = uc->tchan->id; 2245 2246 ret = udma_tisci_tx_channel_config(uc); 2247 break; 2248 case DMA_DEV_TO_MEM: 2249 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2250 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2251 uc->id); 2252 2253 ret = udma_alloc_rx_resources(uc); 2254 if (ret) 2255 goto err_cleanup; 2256 2257 uc->config.src_thread = uc->config.remote_thread_id; 2258 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2259 K3_PSIL_DST_THREAD_ID_OFFSET; 2260 2261 irq_ring = uc->rflow->r_ring; 2262 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; 2263 2264 ret = udma_tisci_rx_channel_config(uc); 2265 break; 2266 default: 2267 /* Can not happen */ 2268 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2269 __func__, uc->id, uc->config.dir); 2270 ret = -EINVAL; 2271 goto err_cleanup; 2272 2273 } 2274 2275 /* check if the channel configuration was successful */ 2276 if (ret) 2277 goto err_res_free; 2278 2279 if (udma_is_chan_running(uc)) { 2280 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2281 udma_reset_chan(uc, false); 2282 if (udma_is_chan_running(uc)) { 2283 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2284 ret = -EBUSY; 2285 goto err_res_free; 2286 } 2287 } 2288 2289 /* PSI-L pairing */ 2290 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2291 if (ret) { 2292 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2293 uc->config.src_thread, uc->config.dst_thread); 2294 goto err_res_free; 2295 } 2296 2297 uc->psil_paired = true; 2298 2299 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); 2300 if (uc->irq_num_ring <= 0) { 2301 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2302 k3_ringacc_get_ring_id(irq_ring)); 2303 ret = -EINVAL; 2304 goto err_psi_free; 2305 } 2306 2307 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2308 IRQF_TRIGGER_HIGH, uc->name, uc); 2309 if (ret) { 2310 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2311 goto err_irq_free; 2312 } 2313 2314 /* Event from UDMA (TR events) only needed for slave TR mode channels */ 2315 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { 2316 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, 2317 irq_udma_idx); 2318 if (uc->irq_num_udma <= 0) { 2319 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", 2320 irq_udma_idx); 2321 free_irq(uc->irq_num_ring, uc); 2322 ret = -EINVAL; 2323 goto err_irq_free; 2324 } 2325 2326 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2327 uc->name, uc); 2328 if (ret) { 2329 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", 2330 uc->id); 2331 free_irq(uc->irq_num_ring, uc); 2332 goto err_irq_free; 2333 } 2334 } else { 2335 uc->irq_num_udma = 0; 2336 } 2337 2338 udma_reset_rings(uc); 2339 2340 return 0; 2341 2342 err_irq_free: 2343 uc->irq_num_ring = 0; 2344 uc->irq_num_udma = 0; 2345 err_psi_free: 2346 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2347 uc->psil_paired = false; 2348 err_res_free: 2349 udma_free_tx_resources(uc); 2350 udma_free_rx_resources(uc); 2351 err_cleanup: 2352 udma_reset_uchan(uc); 2353 2354 if (uc->use_dma_pool) { 2355 dma_pool_destroy(uc->hdesc_pool); 2356 uc->use_dma_pool = false; 2357 } 2358 2359 return ret; 2360 } 2361 2362 static int bcdma_alloc_chan_resources(struct dma_chan *chan) 2363 { 2364 struct udma_chan *uc = to_udma_chan(chan); 2365 struct udma_dev *ud = to_udma_dev(chan->device); 2366 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2367 u32 irq_udma_idx, irq_ring_idx; 2368 int ret; 2369 2370 /* Only TR mode is supported */ 2371 uc->config.pkt_mode = false; 2372 2373 /* 2374 * Make sure that the completion is in a known state: 2375 * No teardown, the channel is idle 2376 */ 2377 reinit_completion(&uc->teardown_completed); 2378 complete_all(&uc->teardown_completed); 2379 uc->state = UDMA_CHAN_IS_IDLE; 2380 2381 switch (uc->config.dir) { 2382 case DMA_MEM_TO_MEM: 2383 /* Non synchronized - mem to mem type of transfer */ 2384 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2385 uc->id); 2386 2387 ret = bcdma_alloc_bchan_resources(uc); 2388 if (ret) 2389 return ret; 2390 2391 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; 2392 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; 2393 2394 ret = bcdma_tisci_m2m_channel_config(uc); 2395 break; 2396 case DMA_MEM_TO_DEV: 2397 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2398 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2399 uc->id); 2400 2401 ret = udma_alloc_tx_resources(uc); 2402 if (ret) { 2403 uc->config.remote_thread_id = -1; 2404 return ret; 2405 } 2406 2407 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2408 uc->config.dst_thread = uc->config.remote_thread_id; 2409 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2410 2411 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; 2412 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; 2413 2414 ret = bcdma_tisci_tx_channel_config(uc); 2415 break; 2416 case DMA_DEV_TO_MEM: 2417 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2418 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2419 uc->id); 2420 2421 ret = udma_alloc_rx_resources(uc); 2422 if (ret) { 2423 uc->config.remote_thread_id = -1; 2424 return ret; 2425 } 2426 2427 uc->config.src_thread = uc->config.remote_thread_id; 2428 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2429 K3_PSIL_DST_THREAD_ID_OFFSET; 2430 2431 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; 2432 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; 2433 2434 ret = bcdma_tisci_rx_channel_config(uc); 2435 break; 2436 default: 2437 /* Can not happen */ 2438 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2439 __func__, uc->id, uc->config.dir); 2440 return -EINVAL; 2441 } 2442 2443 /* check if the channel configuration was successful */ 2444 if (ret) 2445 goto err_res_free; 2446 2447 if (udma_is_chan_running(uc)) { 2448 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2449 udma_reset_chan(uc, false); 2450 if (udma_is_chan_running(uc)) { 2451 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2452 ret = -EBUSY; 2453 goto err_res_free; 2454 } 2455 } 2456 2457 uc->dma_dev = dmaengine_get_dma_device(chan); 2458 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { 2459 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2460 sizeof(struct cppi5_tr_type15_t), 2); 2461 2462 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2463 uc->config.hdesc_size, 2464 ud->desc_align, 2465 0); 2466 if (!uc->hdesc_pool) { 2467 dev_err(ud->ddev.dev, 2468 "Descriptor pool allocation failed\n"); 2469 uc->use_dma_pool = false; 2470 ret = -ENOMEM; 2471 goto err_res_free; 2472 } 2473 2474 uc->use_dma_pool = true; 2475 } else if (uc->config.dir != DMA_MEM_TO_MEM) { 2476 /* PSI-L pairing */ 2477 ret = navss_psil_pair(ud, uc->config.src_thread, 2478 uc->config.dst_thread); 2479 if (ret) { 2480 dev_err(ud->dev, 2481 "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2482 uc->config.src_thread, uc->config.dst_thread); 2483 goto err_res_free; 2484 } 2485 2486 uc->psil_paired = true; 2487 } 2488 2489 uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx); 2490 if (uc->irq_num_ring <= 0) { 2491 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2492 irq_ring_idx); 2493 ret = -EINVAL; 2494 goto err_psi_free; 2495 } 2496 2497 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2498 IRQF_TRIGGER_HIGH, uc->name, uc); 2499 if (ret) { 2500 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2501 goto err_irq_free; 2502 } 2503 2504 /* Event from BCDMA (TR events) only needed for slave channels */ 2505 if (is_slave_direction(uc->config.dir)) { 2506 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, 2507 irq_udma_idx); 2508 if (uc->irq_num_udma <= 0) { 2509 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", 2510 irq_udma_idx); 2511 free_irq(uc->irq_num_ring, uc); 2512 ret = -EINVAL; 2513 goto err_irq_free; 2514 } 2515 2516 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2517 uc->name, uc); 2518 if (ret) { 2519 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", 2520 uc->id); 2521 free_irq(uc->irq_num_ring, uc); 2522 goto err_irq_free; 2523 } 2524 } else { 2525 uc->irq_num_udma = 0; 2526 } 2527 2528 udma_reset_rings(uc); 2529 2530 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2531 udma_check_tx_completion); 2532 return 0; 2533 2534 err_irq_free: 2535 uc->irq_num_ring = 0; 2536 uc->irq_num_udma = 0; 2537 err_psi_free: 2538 if (uc->psil_paired) 2539 navss_psil_unpair(ud, uc->config.src_thread, 2540 uc->config.dst_thread); 2541 uc->psil_paired = false; 2542 err_res_free: 2543 bcdma_free_bchan_resources(uc); 2544 udma_free_tx_resources(uc); 2545 udma_free_rx_resources(uc); 2546 2547 udma_reset_uchan(uc); 2548 2549 if (uc->use_dma_pool) { 2550 dma_pool_destroy(uc->hdesc_pool); 2551 uc->use_dma_pool = false; 2552 } 2553 2554 return ret; 2555 } 2556 2557 static int bcdma_router_config(struct dma_chan *chan) 2558 { 2559 struct k3_event_route_data *router_data = chan->route_data; 2560 struct udma_chan *uc = to_udma_chan(chan); 2561 u32 trigger_event; 2562 2563 if (!uc->bchan) 2564 return -EINVAL; 2565 2566 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) 2567 return -EINVAL; 2568 2569 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; 2570 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; 2571 2572 return router_data->set_event(router_data->priv, trigger_event); 2573 } 2574 2575 static int pktdma_alloc_chan_resources(struct dma_chan *chan) 2576 { 2577 struct udma_chan *uc = to_udma_chan(chan); 2578 struct udma_dev *ud = to_udma_dev(chan->device); 2579 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2580 u32 irq_ring_idx; 2581 int ret; 2582 2583 /* 2584 * Make sure that the completion is in a known state: 2585 * No teardown, the channel is idle 2586 */ 2587 reinit_completion(&uc->teardown_completed); 2588 complete_all(&uc->teardown_completed); 2589 uc->state = UDMA_CHAN_IS_IDLE; 2590 2591 switch (uc->config.dir) { 2592 case DMA_MEM_TO_DEV: 2593 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2594 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2595 uc->id); 2596 2597 ret = udma_alloc_tx_resources(uc); 2598 if (ret) { 2599 uc->config.remote_thread_id = -1; 2600 return ret; 2601 } 2602 2603 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2604 uc->config.dst_thread = uc->config.remote_thread_id; 2605 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2606 2607 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; 2608 2609 ret = pktdma_tisci_tx_channel_config(uc); 2610 break; 2611 case DMA_DEV_TO_MEM: 2612 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2613 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2614 uc->id); 2615 2616 ret = udma_alloc_rx_resources(uc); 2617 if (ret) { 2618 uc->config.remote_thread_id = -1; 2619 return ret; 2620 } 2621 2622 uc->config.src_thread = uc->config.remote_thread_id; 2623 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2624 K3_PSIL_DST_THREAD_ID_OFFSET; 2625 2626 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; 2627 2628 ret = pktdma_tisci_rx_channel_config(uc); 2629 break; 2630 default: 2631 /* Can not happen */ 2632 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2633 __func__, uc->id, uc->config.dir); 2634 return -EINVAL; 2635 } 2636 2637 /* check if the channel configuration was successful */ 2638 if (ret) 2639 goto err_res_free; 2640 2641 if (udma_is_chan_running(uc)) { 2642 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2643 udma_reset_chan(uc, false); 2644 if (udma_is_chan_running(uc)) { 2645 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2646 ret = -EBUSY; 2647 goto err_res_free; 2648 } 2649 } 2650 2651 uc->dma_dev = dmaengine_get_dma_device(chan); 2652 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, 2653 uc->config.hdesc_size, ud->desc_align, 2654 0); 2655 if (!uc->hdesc_pool) { 2656 dev_err(ud->ddev.dev, 2657 "Descriptor pool allocation failed\n"); 2658 uc->use_dma_pool = false; 2659 ret = -ENOMEM; 2660 goto err_res_free; 2661 } 2662 2663 uc->use_dma_pool = true; 2664 2665 /* PSI-L pairing */ 2666 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2667 if (ret) { 2668 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2669 uc->config.src_thread, uc->config.dst_thread); 2670 goto err_res_free; 2671 } 2672 2673 uc->psil_paired = true; 2674 2675 uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx); 2676 if (uc->irq_num_ring <= 0) { 2677 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2678 irq_ring_idx); 2679 ret = -EINVAL; 2680 goto err_psi_free; 2681 } 2682 2683 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2684 IRQF_TRIGGER_HIGH, uc->name, uc); 2685 if (ret) { 2686 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2687 goto err_irq_free; 2688 } 2689 2690 uc->irq_num_udma = 0; 2691 2692 udma_reset_rings(uc); 2693 2694 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2695 udma_check_tx_completion); 2696 2697 if (uc->tchan) 2698 dev_dbg(ud->dev, 2699 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", 2700 uc->id, uc->tchan->id, uc->tchan->tflow_id, 2701 uc->config.remote_thread_id); 2702 else if (uc->rchan) 2703 dev_dbg(ud->dev, 2704 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", 2705 uc->id, uc->rchan->id, uc->rflow->id, 2706 uc->config.remote_thread_id); 2707 return 0; 2708 2709 err_irq_free: 2710 uc->irq_num_ring = 0; 2711 err_psi_free: 2712 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2713 uc->psil_paired = false; 2714 err_res_free: 2715 udma_free_tx_resources(uc); 2716 udma_free_rx_resources(uc); 2717 2718 udma_reset_uchan(uc); 2719 2720 dma_pool_destroy(uc->hdesc_pool); 2721 uc->use_dma_pool = false; 2722 2723 return ret; 2724 } 2725 2726 static int udma_slave_config(struct dma_chan *chan, 2727 struct dma_slave_config *cfg) 2728 { 2729 struct udma_chan *uc = to_udma_chan(chan); 2730 2731 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); 2732 2733 return 0; 2734 } 2735 2736 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, 2737 size_t tr_size, int tr_count, 2738 enum dma_transfer_direction dir) 2739 { 2740 struct udma_hwdesc *hwdesc; 2741 struct cppi5_desc_hdr_t *tr_desc; 2742 struct udma_desc *d; 2743 u32 reload_count = 0; 2744 u32 ring_id; 2745 2746 switch (tr_size) { 2747 case 16: 2748 case 32: 2749 case 64: 2750 case 128: 2751 break; 2752 default: 2753 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); 2754 return NULL; 2755 } 2756 2757 /* We have only one descriptor containing multiple TRs */ 2758 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); 2759 if (!d) 2760 return NULL; 2761 2762 d->sglen = tr_count; 2763 2764 d->hwdesc_count = 1; 2765 hwdesc = &d->hwdesc[0]; 2766 2767 /* Allocate memory for DMA ring descriptor */ 2768 if (uc->use_dma_pool) { 2769 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 2770 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 2771 GFP_NOWAIT, 2772 &hwdesc->cppi5_desc_paddr); 2773 } else { 2774 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 2775 tr_count); 2776 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 2777 uc->ud->desc_align); 2778 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, 2779 hwdesc->cppi5_desc_size, 2780 &hwdesc->cppi5_desc_paddr, 2781 GFP_NOWAIT); 2782 } 2783 2784 if (!hwdesc->cppi5_desc_vaddr) { 2785 kfree(d); 2786 return NULL; 2787 } 2788 2789 /* Start of the TR req records */ 2790 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 2791 /* Start address of the TR response array */ 2792 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; 2793 2794 tr_desc = hwdesc->cppi5_desc_vaddr; 2795 2796 if (uc->cyclic) 2797 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; 2798 2799 if (dir == DMA_DEV_TO_MEM) 2800 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2801 else 2802 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 2803 2804 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); 2805 cppi5_desc_set_pktids(tr_desc, uc->id, 2806 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 2807 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); 2808 2809 return d; 2810 } 2811 2812 /** 2813 * udma_get_tr_counters - calculate TR counters for a given length 2814 * @len: Length of the trasnfer 2815 * @align_to: Preferred alignment 2816 * @tr0_cnt0: First TR icnt0 2817 * @tr0_cnt1: First TR icnt1 2818 * @tr1_cnt0: Second (if used) TR icnt0 2819 * 2820 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated 2821 * For len >= SZ_64K two TRs are used in a simple way: 2822 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) 2823 * Second TR: the remaining length (tr1_cnt0) 2824 * 2825 * Returns the number of TRs the length needs (1 or 2) 2826 * -EINVAL if the length can not be supported 2827 */ 2828 static int udma_get_tr_counters(size_t len, unsigned long align_to, 2829 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) 2830 { 2831 if (len < SZ_64K) { 2832 *tr0_cnt0 = len; 2833 *tr0_cnt1 = 1; 2834 2835 return 1; 2836 } 2837 2838 if (align_to > 3) 2839 align_to = 3; 2840 2841 realign: 2842 *tr0_cnt0 = SZ_64K - BIT(align_to); 2843 if (len / *tr0_cnt0 >= SZ_64K) { 2844 if (align_to) { 2845 align_to--; 2846 goto realign; 2847 } 2848 return -EINVAL; 2849 } 2850 2851 *tr0_cnt1 = len / *tr0_cnt0; 2852 *tr1_cnt0 = len % *tr0_cnt0; 2853 2854 return 2; 2855 } 2856 2857 static struct udma_desc * 2858 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, 2859 unsigned int sglen, enum dma_transfer_direction dir, 2860 unsigned long tx_flags, void *context) 2861 { 2862 struct scatterlist *sgent; 2863 struct udma_desc *d; 2864 struct cppi5_tr_type1_t *tr_req = NULL; 2865 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2866 unsigned int i; 2867 size_t tr_size; 2868 int num_tr = 0; 2869 int tr_idx = 0; 2870 u64 asel; 2871 2872 /* estimate the number of TRs we will need */ 2873 for_each_sg(sgl, sgent, sglen, i) { 2874 if (sg_dma_len(sgent) < SZ_64K) 2875 num_tr++; 2876 else 2877 num_tr += 2; 2878 } 2879 2880 /* Now allocate and setup the descriptor. */ 2881 tr_size = sizeof(struct cppi5_tr_type1_t); 2882 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 2883 if (!d) 2884 return NULL; 2885 2886 d->sglen = sglen; 2887 2888 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 2889 asel = 0; 2890 else 2891 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 2892 2893 tr_req = d->hwdesc[0].tr_req_base; 2894 for_each_sg(sgl, sgent, sglen, i) { 2895 dma_addr_t sg_addr = sg_dma_address(sgent); 2896 2897 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), 2898 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); 2899 if (num_tr < 0) { 2900 dev_err(uc->ud->dev, "size %u is not supported\n", 2901 sg_dma_len(sgent)); 2902 udma_free_hwdesc(uc, d); 2903 kfree(d); 2904 return NULL; 2905 } 2906 2907 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 2908 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2909 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 2910 2911 sg_addr |= asel; 2912 tr_req[tr_idx].addr = sg_addr; 2913 tr_req[tr_idx].icnt0 = tr0_cnt0; 2914 tr_req[tr_idx].icnt1 = tr0_cnt1; 2915 tr_req[tr_idx].dim1 = tr0_cnt0; 2916 tr_idx++; 2917 2918 if (num_tr == 2) { 2919 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 2920 false, false, 2921 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2922 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2923 CPPI5_TR_CSF_SUPR_EVT); 2924 2925 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; 2926 tr_req[tr_idx].icnt0 = tr1_cnt0; 2927 tr_req[tr_idx].icnt1 = 1; 2928 tr_req[tr_idx].dim1 = tr1_cnt0; 2929 tr_idx++; 2930 } 2931 2932 d->residue += sg_dma_len(sgent); 2933 } 2934 2935 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 2936 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 2937 2938 return d; 2939 } 2940 2941 static struct udma_desc * 2942 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl, 2943 unsigned int sglen, 2944 enum dma_transfer_direction dir, 2945 unsigned long tx_flags, void *context) 2946 { 2947 struct scatterlist *sgent; 2948 struct cppi5_tr_type15_t *tr_req = NULL; 2949 enum dma_slave_buswidth dev_width; 2950 u16 tr_cnt0, tr_cnt1; 2951 dma_addr_t dev_addr; 2952 struct udma_desc *d; 2953 unsigned int i; 2954 size_t tr_size, sg_len; 2955 int num_tr = 0; 2956 int tr_idx = 0; 2957 u32 burst, trigger_size, port_window; 2958 u64 asel; 2959 2960 if (dir == DMA_DEV_TO_MEM) { 2961 dev_addr = uc->cfg.src_addr; 2962 dev_width = uc->cfg.src_addr_width; 2963 burst = uc->cfg.src_maxburst; 2964 port_window = uc->cfg.src_port_window_size; 2965 } else if (dir == DMA_MEM_TO_DEV) { 2966 dev_addr = uc->cfg.dst_addr; 2967 dev_width = uc->cfg.dst_addr_width; 2968 burst = uc->cfg.dst_maxburst; 2969 port_window = uc->cfg.dst_port_window_size; 2970 } else { 2971 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 2972 return NULL; 2973 } 2974 2975 if (!burst) 2976 burst = 1; 2977 2978 if (port_window) { 2979 if (port_window != burst) { 2980 dev_err(uc->ud->dev, 2981 "The burst must be equal to port_window\n"); 2982 return NULL; 2983 } 2984 2985 tr_cnt0 = dev_width * port_window; 2986 tr_cnt1 = 1; 2987 } else { 2988 tr_cnt0 = dev_width; 2989 tr_cnt1 = burst; 2990 } 2991 trigger_size = tr_cnt0 * tr_cnt1; 2992 2993 /* estimate the number of TRs we will need */ 2994 for_each_sg(sgl, sgent, sglen, i) { 2995 sg_len = sg_dma_len(sgent); 2996 2997 if (sg_len % trigger_size) { 2998 dev_err(uc->ud->dev, 2999 "Not aligned SG entry (%zu for %u)\n", sg_len, 3000 trigger_size); 3001 return NULL; 3002 } 3003 3004 if (sg_len / trigger_size < SZ_64K) 3005 num_tr++; 3006 else 3007 num_tr += 2; 3008 } 3009 3010 /* Now allocate and setup the descriptor. */ 3011 tr_size = sizeof(struct cppi5_tr_type15_t); 3012 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 3013 if (!d) 3014 return NULL; 3015 3016 d->sglen = sglen; 3017 3018 if (uc->ud->match_data->type == DMA_TYPE_UDMA) { 3019 asel = 0; 3020 } else { 3021 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3022 dev_addr |= asel; 3023 } 3024 3025 tr_req = d->hwdesc[0].tr_req_base; 3026 for_each_sg(sgl, sgent, sglen, i) { 3027 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2; 3028 dma_addr_t sg_addr = sg_dma_address(sgent); 3029 3030 sg_len = sg_dma_len(sgent); 3031 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0, 3032 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2); 3033 if (num_tr < 0) { 3034 dev_err(uc->ud->dev, "size %zu is not supported\n", 3035 sg_len); 3036 udma_free_hwdesc(uc, d); 3037 kfree(d); 3038 return NULL; 3039 } 3040 3041 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false, 3042 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3043 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 3044 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3045 uc->config.tr_trigger_type, 3046 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0); 3047 3048 sg_addr |= asel; 3049 if (dir == DMA_DEV_TO_MEM) { 3050 tr_req[tr_idx].addr = dev_addr; 3051 tr_req[tr_idx].icnt0 = tr_cnt0; 3052 tr_req[tr_idx].icnt1 = tr_cnt1; 3053 tr_req[tr_idx].icnt2 = tr0_cnt2; 3054 tr_req[tr_idx].icnt3 = tr0_cnt3; 3055 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3056 3057 tr_req[tr_idx].daddr = sg_addr; 3058 tr_req[tr_idx].dicnt0 = tr_cnt0; 3059 tr_req[tr_idx].dicnt1 = tr_cnt1; 3060 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3061 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3062 tr_req[tr_idx].ddim1 = tr_cnt0; 3063 tr_req[tr_idx].ddim2 = trigger_size; 3064 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2; 3065 } else { 3066 tr_req[tr_idx].addr = sg_addr; 3067 tr_req[tr_idx].icnt0 = tr_cnt0; 3068 tr_req[tr_idx].icnt1 = tr_cnt1; 3069 tr_req[tr_idx].icnt2 = tr0_cnt2; 3070 tr_req[tr_idx].icnt3 = tr0_cnt3; 3071 tr_req[tr_idx].dim1 = tr_cnt0; 3072 tr_req[tr_idx].dim2 = trigger_size; 3073 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2; 3074 3075 tr_req[tr_idx].daddr = dev_addr; 3076 tr_req[tr_idx].dicnt0 = tr_cnt0; 3077 tr_req[tr_idx].dicnt1 = tr_cnt1; 3078 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3079 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3080 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3081 } 3082 3083 tr_idx++; 3084 3085 if (num_tr == 2) { 3086 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, 3087 false, true, 3088 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3089 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3090 CPPI5_TR_CSF_SUPR_EVT); 3091 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3092 uc->config.tr_trigger_type, 3093 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 3094 0, 0); 3095 3096 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3; 3097 if (dir == DMA_DEV_TO_MEM) { 3098 tr_req[tr_idx].addr = dev_addr; 3099 tr_req[tr_idx].icnt0 = tr_cnt0; 3100 tr_req[tr_idx].icnt1 = tr_cnt1; 3101 tr_req[tr_idx].icnt2 = tr1_cnt2; 3102 tr_req[tr_idx].icnt3 = 1; 3103 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3104 3105 tr_req[tr_idx].daddr = sg_addr; 3106 tr_req[tr_idx].dicnt0 = tr_cnt0; 3107 tr_req[tr_idx].dicnt1 = tr_cnt1; 3108 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3109 tr_req[tr_idx].dicnt3 = 1; 3110 tr_req[tr_idx].ddim1 = tr_cnt0; 3111 tr_req[tr_idx].ddim2 = trigger_size; 3112 } else { 3113 tr_req[tr_idx].addr = sg_addr; 3114 tr_req[tr_idx].icnt0 = tr_cnt0; 3115 tr_req[tr_idx].icnt1 = tr_cnt1; 3116 tr_req[tr_idx].icnt2 = tr1_cnt2; 3117 tr_req[tr_idx].icnt3 = 1; 3118 tr_req[tr_idx].dim1 = tr_cnt0; 3119 tr_req[tr_idx].dim2 = trigger_size; 3120 3121 tr_req[tr_idx].daddr = dev_addr; 3122 tr_req[tr_idx].dicnt0 = tr_cnt0; 3123 tr_req[tr_idx].dicnt1 = tr_cnt1; 3124 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3125 tr_req[tr_idx].dicnt3 = 1; 3126 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3127 } 3128 tr_idx++; 3129 } 3130 3131 d->residue += sg_len; 3132 } 3133 3134 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 3135 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3136 3137 return d; 3138 } 3139 3140 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, 3141 enum dma_slave_buswidth dev_width, 3142 u16 elcnt) 3143 { 3144 if (uc->config.ep_type != PSIL_EP_PDMA_XY) 3145 return 0; 3146 3147 /* Bus width translates to the element size (ES) */ 3148 switch (dev_width) { 3149 case DMA_SLAVE_BUSWIDTH_1_BYTE: 3150 d->static_tr.elsize = 0; 3151 break; 3152 case DMA_SLAVE_BUSWIDTH_2_BYTES: 3153 d->static_tr.elsize = 1; 3154 break; 3155 case DMA_SLAVE_BUSWIDTH_3_BYTES: 3156 d->static_tr.elsize = 2; 3157 break; 3158 case DMA_SLAVE_BUSWIDTH_4_BYTES: 3159 d->static_tr.elsize = 3; 3160 break; 3161 case DMA_SLAVE_BUSWIDTH_8_BYTES: 3162 d->static_tr.elsize = 4; 3163 break; 3164 default: /* not reached */ 3165 return -EINVAL; 3166 } 3167 3168 d->static_tr.elcnt = elcnt; 3169 3170 /* 3171 * PDMA must to close the packet when the channel is in packet mode. 3172 * For TR mode when the channel is not cyclic we also need PDMA to close 3173 * the packet otherwise the transfer will stall because PDMA holds on 3174 * the data it has received from the peripheral. 3175 */ 3176 if (uc->config.pkt_mode || !uc->cyclic) { 3177 unsigned int div = dev_width * elcnt; 3178 3179 if (uc->cyclic) 3180 d->static_tr.bstcnt = d->residue / d->sglen / div; 3181 else 3182 d->static_tr.bstcnt = d->residue / div; 3183 3184 if (uc->config.dir == DMA_DEV_TO_MEM && 3185 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) 3186 return -EINVAL; 3187 } else { 3188 d->static_tr.bstcnt = 0; 3189 } 3190 3191 return 0; 3192 } 3193 3194 static struct udma_desc * 3195 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, 3196 unsigned int sglen, enum dma_transfer_direction dir, 3197 unsigned long tx_flags, void *context) 3198 { 3199 struct scatterlist *sgent; 3200 struct cppi5_host_desc_t *h_desc = NULL; 3201 struct udma_desc *d; 3202 u32 ring_id; 3203 unsigned int i; 3204 u64 asel; 3205 3206 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); 3207 if (!d) 3208 return NULL; 3209 3210 d->sglen = sglen; 3211 d->hwdesc_count = sglen; 3212 3213 if (dir == DMA_DEV_TO_MEM) 3214 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3215 else 3216 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3217 3218 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3219 asel = 0; 3220 else 3221 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3222 3223 for_each_sg(sgl, sgent, sglen, i) { 3224 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3225 dma_addr_t sg_addr = sg_dma_address(sgent); 3226 struct cppi5_host_desc_t *desc; 3227 size_t sg_len = sg_dma_len(sgent); 3228 3229 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3230 GFP_NOWAIT, 3231 &hwdesc->cppi5_desc_paddr); 3232 if (!hwdesc->cppi5_desc_vaddr) { 3233 dev_err(uc->ud->dev, 3234 "descriptor%d allocation failed\n", i); 3235 3236 udma_free_hwdesc(uc, d); 3237 kfree(d); 3238 return NULL; 3239 } 3240 3241 d->residue += sg_len; 3242 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3243 desc = hwdesc->cppi5_desc_vaddr; 3244 3245 if (i == 0) { 3246 cppi5_hdesc_init(desc, 0, 0); 3247 /* Flow and Packed ID */ 3248 cppi5_desc_set_pktids(&desc->hdr, uc->id, 3249 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3250 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); 3251 } else { 3252 cppi5_hdesc_reset_hbdesc(desc); 3253 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); 3254 } 3255 3256 /* attach the sg buffer to the descriptor */ 3257 sg_addr |= asel; 3258 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); 3259 3260 /* Attach link as host buffer descriptor */ 3261 if (h_desc) 3262 cppi5_hdesc_link_hbdesc(h_desc, 3263 hwdesc->cppi5_desc_paddr | asel); 3264 3265 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || 3266 dir == DMA_MEM_TO_DEV) 3267 h_desc = desc; 3268 } 3269 3270 if (d->residue >= SZ_4M) { 3271 dev_err(uc->ud->dev, 3272 "%s: Transfer size %u is over the supported 4M range\n", 3273 __func__, d->residue); 3274 udma_free_hwdesc(uc, d); 3275 kfree(d); 3276 return NULL; 3277 } 3278 3279 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3280 cppi5_hdesc_set_pktlen(h_desc, d->residue); 3281 3282 return d; 3283 } 3284 3285 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, 3286 void *data, size_t len) 3287 { 3288 struct udma_desc *d = to_udma_desc(desc); 3289 struct udma_chan *uc = to_udma_chan(desc->chan); 3290 struct cppi5_host_desc_t *h_desc; 3291 u32 psd_size = len; 3292 u32 flags = 0; 3293 3294 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3295 return -ENOTSUPP; 3296 3297 if (!data || len > uc->config.metadata_size) 3298 return -EINVAL; 3299 3300 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3301 return -EINVAL; 3302 3303 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3304 if (d->dir == DMA_MEM_TO_DEV) 3305 memcpy(h_desc->epib, data, len); 3306 3307 if (uc->config.needs_epib) 3308 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3309 3310 d->metadata = data; 3311 d->metadata_size = len; 3312 if (uc->config.needs_epib) 3313 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3314 3315 cppi5_hdesc_update_flags(h_desc, flags); 3316 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3317 3318 return 0; 3319 } 3320 3321 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 3322 size_t *payload_len, size_t *max_len) 3323 { 3324 struct udma_desc *d = to_udma_desc(desc); 3325 struct udma_chan *uc = to_udma_chan(desc->chan); 3326 struct cppi5_host_desc_t *h_desc; 3327 3328 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3329 return ERR_PTR(-ENOTSUPP); 3330 3331 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3332 3333 *max_len = uc->config.metadata_size; 3334 3335 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? 3336 CPPI5_INFO0_HDESC_EPIB_SIZE : 0; 3337 *payload_len += cppi5_hdesc_get_psdata_size(h_desc); 3338 3339 return h_desc->epib; 3340 } 3341 3342 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, 3343 size_t payload_len) 3344 { 3345 struct udma_desc *d = to_udma_desc(desc); 3346 struct udma_chan *uc = to_udma_chan(desc->chan); 3347 struct cppi5_host_desc_t *h_desc; 3348 u32 psd_size = payload_len; 3349 u32 flags = 0; 3350 3351 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3352 return -ENOTSUPP; 3353 3354 if (payload_len > uc->config.metadata_size) 3355 return -EINVAL; 3356 3357 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3358 return -EINVAL; 3359 3360 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3361 3362 if (uc->config.needs_epib) { 3363 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3364 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3365 } 3366 3367 cppi5_hdesc_update_flags(h_desc, flags); 3368 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3369 3370 return 0; 3371 } 3372 3373 static struct dma_descriptor_metadata_ops metadata_ops = { 3374 .attach = udma_attach_metadata, 3375 .get_ptr = udma_get_metadata_ptr, 3376 .set_len = udma_set_metadata_len, 3377 }; 3378 3379 static struct dma_async_tx_descriptor * 3380 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 3381 unsigned int sglen, enum dma_transfer_direction dir, 3382 unsigned long tx_flags, void *context) 3383 { 3384 struct udma_chan *uc = to_udma_chan(chan); 3385 enum dma_slave_buswidth dev_width; 3386 struct udma_desc *d; 3387 u32 burst; 3388 3389 if (dir != uc->config.dir && 3390 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { 3391 dev_err(chan->device->dev, 3392 "%s: chan%d is for %s, not supporting %s\n", 3393 __func__, uc->id, 3394 dmaengine_get_direction_text(uc->config.dir), 3395 dmaengine_get_direction_text(dir)); 3396 return NULL; 3397 } 3398 3399 if (dir == DMA_DEV_TO_MEM) { 3400 dev_width = uc->cfg.src_addr_width; 3401 burst = uc->cfg.src_maxburst; 3402 } else if (dir == DMA_MEM_TO_DEV) { 3403 dev_width = uc->cfg.dst_addr_width; 3404 burst = uc->cfg.dst_maxburst; 3405 } else { 3406 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 3407 return NULL; 3408 } 3409 3410 if (!burst) 3411 burst = 1; 3412 3413 if (uc->config.pkt_mode) 3414 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, 3415 context); 3416 else if (is_slave_direction(uc->config.dir)) 3417 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, 3418 context); 3419 else 3420 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir, 3421 tx_flags, context); 3422 3423 if (!d) 3424 return NULL; 3425 3426 d->dir = dir; 3427 d->desc_idx = 0; 3428 d->tr_idx = 0; 3429 3430 /* static TR for remote PDMA */ 3431 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3432 dev_err(uc->ud->dev, 3433 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3434 __func__, d->static_tr.bstcnt); 3435 3436 udma_free_hwdesc(uc, d); 3437 kfree(d); 3438 return NULL; 3439 } 3440 3441 if (uc->config.metadata_size) 3442 d->vd.tx.metadata_ops = &metadata_ops; 3443 3444 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3445 } 3446 3447 static struct udma_desc * 3448 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, 3449 size_t buf_len, size_t period_len, 3450 enum dma_transfer_direction dir, unsigned long flags) 3451 { 3452 struct udma_desc *d; 3453 size_t tr_size, period_addr; 3454 struct cppi5_tr_type1_t *tr_req; 3455 unsigned int periods = buf_len / period_len; 3456 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3457 unsigned int i; 3458 int num_tr; 3459 3460 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, 3461 &tr0_cnt1, &tr1_cnt0); 3462 if (num_tr < 0) { 3463 dev_err(uc->ud->dev, "size %zu is not supported\n", 3464 period_len); 3465 return NULL; 3466 } 3467 3468 /* Now allocate and setup the descriptor. */ 3469 tr_size = sizeof(struct cppi5_tr_type1_t); 3470 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); 3471 if (!d) 3472 return NULL; 3473 3474 tr_req = d->hwdesc[0].tr_req_base; 3475 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3476 period_addr = buf_addr; 3477 else 3478 period_addr = buf_addr | 3479 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); 3480 3481 for (i = 0; i < periods; i++) { 3482 int tr_idx = i * num_tr; 3483 3484 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 3485 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3486 3487 tr_req[tr_idx].addr = period_addr; 3488 tr_req[tr_idx].icnt0 = tr0_cnt0; 3489 tr_req[tr_idx].icnt1 = tr0_cnt1; 3490 tr_req[tr_idx].dim1 = tr0_cnt0; 3491 3492 if (num_tr == 2) { 3493 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3494 CPPI5_TR_CSF_SUPR_EVT); 3495 tr_idx++; 3496 3497 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 3498 false, false, 3499 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3500 3501 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; 3502 tr_req[tr_idx].icnt0 = tr1_cnt0; 3503 tr_req[tr_idx].icnt1 = 1; 3504 tr_req[tr_idx].dim1 = tr1_cnt0; 3505 } 3506 3507 if (!(flags & DMA_PREP_INTERRUPT)) 3508 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3509 CPPI5_TR_CSF_SUPR_EVT); 3510 3511 period_addr += period_len; 3512 } 3513 3514 return d; 3515 } 3516 3517 static struct udma_desc * 3518 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, 3519 size_t buf_len, size_t period_len, 3520 enum dma_transfer_direction dir, unsigned long flags) 3521 { 3522 struct udma_desc *d; 3523 u32 ring_id; 3524 int i; 3525 int periods = buf_len / period_len; 3526 3527 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) 3528 return NULL; 3529 3530 if (period_len >= SZ_4M) 3531 return NULL; 3532 3533 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); 3534 if (!d) 3535 return NULL; 3536 3537 d->hwdesc_count = periods; 3538 3539 /* TODO: re-check this... */ 3540 if (dir == DMA_DEV_TO_MEM) 3541 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3542 else 3543 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3544 3545 if (uc->ud->match_data->type != DMA_TYPE_UDMA) 3546 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3547 3548 for (i = 0; i < periods; i++) { 3549 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3550 dma_addr_t period_addr = buf_addr + (period_len * i); 3551 struct cppi5_host_desc_t *h_desc; 3552 3553 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3554 GFP_NOWAIT, 3555 &hwdesc->cppi5_desc_paddr); 3556 if (!hwdesc->cppi5_desc_vaddr) { 3557 dev_err(uc->ud->dev, 3558 "descriptor%d allocation failed\n", i); 3559 3560 udma_free_hwdesc(uc, d); 3561 kfree(d); 3562 return NULL; 3563 } 3564 3565 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3566 h_desc = hwdesc->cppi5_desc_vaddr; 3567 3568 cppi5_hdesc_init(h_desc, 0, 0); 3569 cppi5_hdesc_set_pktlen(h_desc, period_len); 3570 3571 /* Flow and Packed ID */ 3572 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, 3573 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3574 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); 3575 3576 /* attach each period to a new descriptor */ 3577 cppi5_hdesc_attach_buf(h_desc, 3578 period_addr, period_len, 3579 period_addr, period_len); 3580 } 3581 3582 return d; 3583 } 3584 3585 static struct dma_async_tx_descriptor * 3586 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 3587 size_t period_len, enum dma_transfer_direction dir, 3588 unsigned long flags) 3589 { 3590 struct udma_chan *uc = to_udma_chan(chan); 3591 enum dma_slave_buswidth dev_width; 3592 struct udma_desc *d; 3593 u32 burst; 3594 3595 if (dir != uc->config.dir) { 3596 dev_err(chan->device->dev, 3597 "%s: chan%d is for %s, not supporting %s\n", 3598 __func__, uc->id, 3599 dmaengine_get_direction_text(uc->config.dir), 3600 dmaengine_get_direction_text(dir)); 3601 return NULL; 3602 } 3603 3604 uc->cyclic = true; 3605 3606 if (dir == DMA_DEV_TO_MEM) { 3607 dev_width = uc->cfg.src_addr_width; 3608 burst = uc->cfg.src_maxburst; 3609 } else if (dir == DMA_MEM_TO_DEV) { 3610 dev_width = uc->cfg.dst_addr_width; 3611 burst = uc->cfg.dst_maxburst; 3612 } else { 3613 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 3614 return NULL; 3615 } 3616 3617 if (!burst) 3618 burst = 1; 3619 3620 if (uc->config.pkt_mode) 3621 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, 3622 dir, flags); 3623 else 3624 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, 3625 dir, flags); 3626 3627 if (!d) 3628 return NULL; 3629 3630 d->sglen = buf_len / period_len; 3631 3632 d->dir = dir; 3633 d->residue = buf_len; 3634 3635 /* static TR for remote PDMA */ 3636 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3637 dev_err(uc->ud->dev, 3638 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3639 __func__, d->static_tr.bstcnt); 3640 3641 udma_free_hwdesc(uc, d); 3642 kfree(d); 3643 return NULL; 3644 } 3645 3646 if (uc->config.metadata_size) 3647 d->vd.tx.metadata_ops = &metadata_ops; 3648 3649 return vchan_tx_prep(&uc->vc, &d->vd, flags); 3650 } 3651 3652 static struct dma_async_tx_descriptor * 3653 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 3654 size_t len, unsigned long tx_flags) 3655 { 3656 struct udma_chan *uc = to_udma_chan(chan); 3657 struct udma_desc *d; 3658 struct cppi5_tr_type15_t *tr_req; 3659 int num_tr; 3660 size_t tr_size = sizeof(struct cppi5_tr_type15_t); 3661 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3662 3663 if (uc->config.dir != DMA_MEM_TO_MEM) { 3664 dev_err(chan->device->dev, 3665 "%s: chan%d is for %s, not supporting %s\n", 3666 __func__, uc->id, 3667 dmaengine_get_direction_text(uc->config.dir), 3668 dmaengine_get_direction_text(DMA_MEM_TO_MEM)); 3669 return NULL; 3670 } 3671 3672 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, 3673 &tr0_cnt1, &tr1_cnt0); 3674 if (num_tr < 0) { 3675 dev_err(uc->ud->dev, "size %zu is not supported\n", 3676 len); 3677 return NULL; 3678 } 3679 3680 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); 3681 if (!d) 3682 return NULL; 3683 3684 d->dir = DMA_MEM_TO_MEM; 3685 d->desc_idx = 0; 3686 d->tr_idx = 0; 3687 d->residue = len; 3688 3689 if (uc->ud->match_data->type != DMA_TYPE_UDMA) { 3690 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3691 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3692 } 3693 3694 tr_req = d->hwdesc[0].tr_req_base; 3695 3696 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, 3697 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3698 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); 3699 3700 tr_req[0].addr = src; 3701 tr_req[0].icnt0 = tr0_cnt0; 3702 tr_req[0].icnt1 = tr0_cnt1; 3703 tr_req[0].icnt2 = 1; 3704 tr_req[0].icnt3 = 1; 3705 tr_req[0].dim1 = tr0_cnt0; 3706 3707 tr_req[0].daddr = dest; 3708 tr_req[0].dicnt0 = tr0_cnt0; 3709 tr_req[0].dicnt1 = tr0_cnt1; 3710 tr_req[0].dicnt2 = 1; 3711 tr_req[0].dicnt3 = 1; 3712 tr_req[0].ddim1 = tr0_cnt0; 3713 3714 if (num_tr == 2) { 3715 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, 3716 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3717 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); 3718 3719 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; 3720 tr_req[1].icnt0 = tr1_cnt0; 3721 tr_req[1].icnt1 = 1; 3722 tr_req[1].icnt2 = 1; 3723 tr_req[1].icnt3 = 1; 3724 3725 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; 3726 tr_req[1].dicnt0 = tr1_cnt0; 3727 tr_req[1].dicnt1 = 1; 3728 tr_req[1].dicnt2 = 1; 3729 tr_req[1].dicnt3 = 1; 3730 } 3731 3732 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, 3733 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3734 3735 if (uc->config.metadata_size) 3736 d->vd.tx.metadata_ops = &metadata_ops; 3737 3738 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3739 } 3740 3741 static void udma_issue_pending(struct dma_chan *chan) 3742 { 3743 struct udma_chan *uc = to_udma_chan(chan); 3744 unsigned long flags; 3745 3746 spin_lock_irqsave(&uc->vc.lock, flags); 3747 3748 /* If we have something pending and no active descriptor, then */ 3749 if (vchan_issue_pending(&uc->vc) && !uc->desc) { 3750 /* 3751 * start a descriptor if the channel is NOT [marked as 3752 * terminating _and_ it is still running (teardown has not 3753 * completed yet)]. 3754 */ 3755 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && 3756 udma_is_chan_running(uc))) 3757 udma_start(uc); 3758 } 3759 3760 spin_unlock_irqrestore(&uc->vc.lock, flags); 3761 } 3762 3763 static enum dma_status udma_tx_status(struct dma_chan *chan, 3764 dma_cookie_t cookie, 3765 struct dma_tx_state *txstate) 3766 { 3767 struct udma_chan *uc = to_udma_chan(chan); 3768 enum dma_status ret; 3769 unsigned long flags; 3770 3771 spin_lock_irqsave(&uc->vc.lock, flags); 3772 3773 ret = dma_cookie_status(chan, cookie, txstate); 3774 3775 if (!udma_is_chan_running(uc)) 3776 ret = DMA_COMPLETE; 3777 3778 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) 3779 ret = DMA_PAUSED; 3780 3781 if (ret == DMA_COMPLETE || !txstate) 3782 goto out; 3783 3784 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { 3785 u32 peer_bcnt = 0; 3786 u32 bcnt = 0; 3787 u32 residue = uc->desc->residue; 3788 u32 delay = 0; 3789 3790 if (uc->desc->dir == DMA_MEM_TO_DEV) { 3791 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 3792 3793 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3794 peer_bcnt = udma_tchanrt_read(uc, 3795 UDMA_CHAN_RT_PEER_BCNT_REG); 3796 3797 if (bcnt > peer_bcnt) 3798 delay = bcnt - peer_bcnt; 3799 } 3800 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { 3801 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3802 3803 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3804 peer_bcnt = udma_rchanrt_read(uc, 3805 UDMA_CHAN_RT_PEER_BCNT_REG); 3806 3807 if (peer_bcnt > bcnt) 3808 delay = peer_bcnt - bcnt; 3809 } 3810 } else { 3811 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3812 } 3813 3814 bcnt -= uc->bcnt; 3815 if (bcnt && !(bcnt % uc->desc->residue)) 3816 residue = 0; 3817 else 3818 residue -= bcnt % uc->desc->residue; 3819 3820 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { 3821 ret = DMA_COMPLETE; 3822 delay = 0; 3823 } 3824 3825 dma_set_residue(txstate, residue); 3826 dma_set_in_flight_bytes(txstate, delay); 3827 3828 } else { 3829 ret = DMA_COMPLETE; 3830 } 3831 3832 out: 3833 spin_unlock_irqrestore(&uc->vc.lock, flags); 3834 return ret; 3835 } 3836 3837 static int udma_pause(struct dma_chan *chan) 3838 { 3839 struct udma_chan *uc = to_udma_chan(chan); 3840 3841 /* pause the channel */ 3842 switch (uc->config.dir) { 3843 case DMA_DEV_TO_MEM: 3844 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3845 UDMA_PEER_RT_EN_PAUSE, 3846 UDMA_PEER_RT_EN_PAUSE); 3847 break; 3848 case DMA_MEM_TO_DEV: 3849 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3850 UDMA_PEER_RT_EN_PAUSE, 3851 UDMA_PEER_RT_EN_PAUSE); 3852 break; 3853 case DMA_MEM_TO_MEM: 3854 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3855 UDMA_CHAN_RT_CTL_PAUSE, 3856 UDMA_CHAN_RT_CTL_PAUSE); 3857 break; 3858 default: 3859 return -EINVAL; 3860 } 3861 3862 return 0; 3863 } 3864 3865 static int udma_resume(struct dma_chan *chan) 3866 { 3867 struct udma_chan *uc = to_udma_chan(chan); 3868 3869 /* resume the channel */ 3870 switch (uc->config.dir) { 3871 case DMA_DEV_TO_MEM: 3872 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3873 UDMA_PEER_RT_EN_PAUSE, 0); 3874 3875 break; 3876 case DMA_MEM_TO_DEV: 3877 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3878 UDMA_PEER_RT_EN_PAUSE, 0); 3879 break; 3880 case DMA_MEM_TO_MEM: 3881 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3882 UDMA_CHAN_RT_CTL_PAUSE, 0); 3883 break; 3884 default: 3885 return -EINVAL; 3886 } 3887 3888 return 0; 3889 } 3890 3891 static int udma_terminate_all(struct dma_chan *chan) 3892 { 3893 struct udma_chan *uc = to_udma_chan(chan); 3894 unsigned long flags; 3895 LIST_HEAD(head); 3896 3897 spin_lock_irqsave(&uc->vc.lock, flags); 3898 3899 if (udma_is_chan_running(uc)) 3900 udma_stop(uc); 3901 3902 if (uc->desc) { 3903 uc->terminated_desc = uc->desc; 3904 uc->desc = NULL; 3905 uc->terminated_desc->terminated = true; 3906 cancel_delayed_work(&uc->tx_drain.work); 3907 } 3908 3909 uc->paused = false; 3910 3911 vchan_get_all_descriptors(&uc->vc, &head); 3912 spin_unlock_irqrestore(&uc->vc.lock, flags); 3913 vchan_dma_desc_free_list(&uc->vc, &head); 3914 3915 return 0; 3916 } 3917 3918 static void udma_synchronize(struct dma_chan *chan) 3919 { 3920 struct udma_chan *uc = to_udma_chan(chan); 3921 unsigned long timeout = msecs_to_jiffies(1000); 3922 3923 vchan_synchronize(&uc->vc); 3924 3925 if (uc->state == UDMA_CHAN_IS_TERMINATING) { 3926 timeout = wait_for_completion_timeout(&uc->teardown_completed, 3927 timeout); 3928 if (!timeout) { 3929 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", 3930 uc->id); 3931 udma_dump_chan_stdata(uc); 3932 udma_reset_chan(uc, true); 3933 } 3934 } 3935 3936 udma_reset_chan(uc, false); 3937 if (udma_is_chan_running(uc)) 3938 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); 3939 3940 cancel_delayed_work_sync(&uc->tx_drain.work); 3941 udma_reset_rings(uc); 3942 } 3943 3944 static void udma_desc_pre_callback(struct virt_dma_chan *vc, 3945 struct virt_dma_desc *vd, 3946 struct dmaengine_result *result) 3947 { 3948 struct udma_chan *uc = to_udma_chan(&vc->chan); 3949 struct udma_desc *d; 3950 3951 if (!vd) 3952 return; 3953 3954 d = to_udma_desc(&vd->tx); 3955 3956 if (d->metadata_size) 3957 udma_fetch_epib(uc, d); 3958 3959 /* Provide residue information for the client */ 3960 if (result) { 3961 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); 3962 3963 if (cppi5_desc_get_type(desc_vaddr) == 3964 CPPI5_INFO0_DESC_TYPE_VAL_HOST) { 3965 result->residue = d->residue - 3966 cppi5_hdesc_get_pktlen(desc_vaddr); 3967 if (result->residue) 3968 result->result = DMA_TRANS_ABORTED; 3969 else 3970 result->result = DMA_TRANS_NOERROR; 3971 } else { 3972 result->residue = 0; 3973 result->result = DMA_TRANS_NOERROR; 3974 } 3975 } 3976 } 3977 3978 /* 3979 * This tasklet handles the completion of a DMA descriptor by 3980 * calling its callback and freeing it. 3981 */ 3982 static void udma_vchan_complete(struct tasklet_struct *t) 3983 { 3984 struct virt_dma_chan *vc = from_tasklet(vc, t, task); 3985 struct virt_dma_desc *vd, *_vd; 3986 struct dmaengine_desc_callback cb; 3987 LIST_HEAD(head); 3988 3989 spin_lock_irq(&vc->lock); 3990 list_splice_tail_init(&vc->desc_completed, &head); 3991 vd = vc->cyclic; 3992 if (vd) { 3993 vc->cyclic = NULL; 3994 dmaengine_desc_get_callback(&vd->tx, &cb); 3995 } else { 3996 memset(&cb, 0, sizeof(cb)); 3997 } 3998 spin_unlock_irq(&vc->lock); 3999 4000 udma_desc_pre_callback(vc, vd, NULL); 4001 dmaengine_desc_callback_invoke(&cb, NULL); 4002 4003 list_for_each_entry_safe(vd, _vd, &head, node) { 4004 struct dmaengine_result result; 4005 4006 dmaengine_desc_get_callback(&vd->tx, &cb); 4007 4008 list_del(&vd->node); 4009 4010 udma_desc_pre_callback(vc, vd, &result); 4011 dmaengine_desc_callback_invoke(&cb, &result); 4012 4013 vchan_vdesc_fini(vd); 4014 } 4015 } 4016 4017 static void udma_free_chan_resources(struct dma_chan *chan) 4018 { 4019 struct udma_chan *uc = to_udma_chan(chan); 4020 struct udma_dev *ud = to_udma_dev(chan->device); 4021 4022 udma_terminate_all(chan); 4023 if (uc->terminated_desc) { 4024 udma_reset_chan(uc, false); 4025 udma_reset_rings(uc); 4026 } 4027 4028 cancel_delayed_work_sync(&uc->tx_drain.work); 4029 4030 if (uc->irq_num_ring > 0) { 4031 free_irq(uc->irq_num_ring, uc); 4032 4033 uc->irq_num_ring = 0; 4034 } 4035 if (uc->irq_num_udma > 0) { 4036 free_irq(uc->irq_num_udma, uc); 4037 4038 uc->irq_num_udma = 0; 4039 } 4040 4041 /* Release PSI-L pairing */ 4042 if (uc->psil_paired) { 4043 navss_psil_unpair(ud, uc->config.src_thread, 4044 uc->config.dst_thread); 4045 uc->psil_paired = false; 4046 } 4047 4048 vchan_free_chan_resources(&uc->vc); 4049 tasklet_kill(&uc->vc.task); 4050 4051 bcdma_free_bchan_resources(uc); 4052 udma_free_tx_resources(uc); 4053 udma_free_rx_resources(uc); 4054 udma_reset_uchan(uc); 4055 4056 if (uc->use_dma_pool) { 4057 dma_pool_destroy(uc->hdesc_pool); 4058 uc->use_dma_pool = false; 4059 } 4060 } 4061 4062 static struct platform_driver udma_driver; 4063 static struct platform_driver bcdma_driver; 4064 static struct platform_driver pktdma_driver; 4065 4066 struct udma_filter_param { 4067 int remote_thread_id; 4068 u32 atype; 4069 u32 asel; 4070 u32 tr_trigger_type; 4071 }; 4072 4073 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) 4074 { 4075 struct udma_chan_config *ucc; 4076 struct psil_endpoint_config *ep_config; 4077 struct udma_filter_param *filter_param; 4078 struct udma_chan *uc; 4079 struct udma_dev *ud; 4080 4081 if (chan->device->dev->driver != &udma_driver.driver && 4082 chan->device->dev->driver != &bcdma_driver.driver && 4083 chan->device->dev->driver != &pktdma_driver.driver) 4084 return false; 4085 4086 uc = to_udma_chan(chan); 4087 ucc = &uc->config; 4088 ud = uc->ud; 4089 filter_param = param; 4090 4091 if (filter_param->atype > 2) { 4092 dev_err(ud->dev, "Invalid channel atype: %u\n", 4093 filter_param->atype); 4094 return false; 4095 } 4096 4097 if (filter_param->asel > 15) { 4098 dev_err(ud->dev, "Invalid channel asel: %u\n", 4099 filter_param->asel); 4100 return false; 4101 } 4102 4103 ucc->remote_thread_id = filter_param->remote_thread_id; 4104 ucc->atype = filter_param->atype; 4105 ucc->asel = filter_param->asel; 4106 ucc->tr_trigger_type = filter_param->tr_trigger_type; 4107 4108 if (ucc->tr_trigger_type) { 4109 ucc->dir = DMA_MEM_TO_MEM; 4110 goto triggered_bchan; 4111 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { 4112 ucc->dir = DMA_MEM_TO_DEV; 4113 } else { 4114 ucc->dir = DMA_DEV_TO_MEM; 4115 } 4116 4117 ep_config = psil_get_ep_config(ucc->remote_thread_id); 4118 if (IS_ERR(ep_config)) { 4119 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", 4120 ucc->remote_thread_id); 4121 ucc->dir = DMA_MEM_TO_MEM; 4122 ucc->remote_thread_id = -1; 4123 ucc->atype = 0; 4124 ucc->asel = 0; 4125 return false; 4126 } 4127 4128 if (ud->match_data->type == DMA_TYPE_BCDMA && 4129 ep_config->pkt_mode) { 4130 dev_err(ud->dev, 4131 "Only TR mode is supported (psi-l thread 0x%04x)\n", 4132 ucc->remote_thread_id); 4133 ucc->dir = DMA_MEM_TO_MEM; 4134 ucc->remote_thread_id = -1; 4135 ucc->atype = 0; 4136 ucc->asel = 0; 4137 return false; 4138 } 4139 4140 ucc->pkt_mode = ep_config->pkt_mode; 4141 ucc->channel_tpl = ep_config->channel_tpl; 4142 ucc->notdpkt = ep_config->notdpkt; 4143 ucc->ep_type = ep_config->ep_type; 4144 4145 if (ud->match_data->type == DMA_TYPE_PKTDMA && 4146 ep_config->mapped_channel_id >= 0) { 4147 ucc->mapped_channel_id = ep_config->mapped_channel_id; 4148 ucc->default_flow_id = ep_config->default_flow_id; 4149 } else { 4150 ucc->mapped_channel_id = -1; 4151 ucc->default_flow_id = -1; 4152 } 4153 4154 if (ucc->ep_type != PSIL_EP_NATIVE) { 4155 const struct udma_match_data *match_data = ud->match_data; 4156 4157 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) 4158 ucc->enable_acc32 = ep_config->pdma_acc32; 4159 if (match_data->flags & UDMA_FLAG_PDMA_BURST) 4160 ucc->enable_burst = ep_config->pdma_burst; 4161 } 4162 4163 ucc->needs_epib = ep_config->needs_epib; 4164 ucc->psd_size = ep_config->psd_size; 4165 ucc->metadata_size = 4166 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + 4167 ucc->psd_size; 4168 4169 if (ucc->pkt_mode) 4170 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 4171 ucc->metadata_size, ud->desc_align); 4172 4173 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, 4174 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); 4175 4176 return true; 4177 4178 triggered_bchan: 4179 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, 4180 ucc->tr_trigger_type); 4181 4182 return true; 4183 4184 } 4185 4186 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, 4187 struct of_dma *ofdma) 4188 { 4189 struct udma_dev *ud = ofdma->of_dma_data; 4190 dma_cap_mask_t mask = ud->ddev.cap_mask; 4191 struct udma_filter_param filter_param; 4192 struct dma_chan *chan; 4193 4194 if (ud->match_data->type == DMA_TYPE_BCDMA) { 4195 if (dma_spec->args_count != 3) 4196 return NULL; 4197 4198 filter_param.tr_trigger_type = dma_spec->args[0]; 4199 filter_param.remote_thread_id = dma_spec->args[1]; 4200 filter_param.asel = dma_spec->args[2]; 4201 filter_param.atype = 0; 4202 } else { 4203 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) 4204 return NULL; 4205 4206 filter_param.remote_thread_id = dma_spec->args[0]; 4207 filter_param.tr_trigger_type = 0; 4208 if (dma_spec->args_count == 2) { 4209 if (ud->match_data->type == DMA_TYPE_UDMA) { 4210 filter_param.atype = dma_spec->args[1]; 4211 filter_param.asel = 0; 4212 } else { 4213 filter_param.atype = 0; 4214 filter_param.asel = dma_spec->args[1]; 4215 } 4216 } else { 4217 filter_param.atype = 0; 4218 filter_param.asel = 0; 4219 } 4220 } 4221 4222 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, 4223 ofdma->of_node); 4224 if (!chan) { 4225 dev_err(ud->dev, "get channel fail in %s.\n", __func__); 4226 return ERR_PTR(-EINVAL); 4227 } 4228 4229 return chan; 4230 } 4231 4232 static struct udma_match_data am654_main_data = { 4233 .type = DMA_TYPE_UDMA, 4234 .psil_base = 0x1000, 4235 .enable_memcpy_support = true, 4236 .statictr_z_mask = GENMASK(11, 0), 4237 .burst_size = { 4238 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4239 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4240 0, /* No UH Channels */ 4241 }, 4242 }; 4243 4244 static struct udma_match_data am654_mcu_data = { 4245 .type = DMA_TYPE_UDMA, 4246 .psil_base = 0x6000, 4247 .enable_memcpy_support = false, 4248 .statictr_z_mask = GENMASK(11, 0), 4249 .burst_size = { 4250 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4251 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4252 0, /* No UH Channels */ 4253 }, 4254 }; 4255 4256 static struct udma_match_data j721e_main_data = { 4257 .type = DMA_TYPE_UDMA, 4258 .psil_base = 0x1000, 4259 .enable_memcpy_support = true, 4260 .flags = UDMA_FLAGS_J7_CLASS, 4261 .statictr_z_mask = GENMASK(23, 0), 4262 .burst_size = { 4263 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4264 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ 4265 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ 4266 }, 4267 }; 4268 4269 static struct udma_match_data j721e_mcu_data = { 4270 .type = DMA_TYPE_UDMA, 4271 .psil_base = 0x6000, 4272 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ 4273 .flags = UDMA_FLAGS_J7_CLASS, 4274 .statictr_z_mask = GENMASK(23, 0), 4275 .burst_size = { 4276 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4277 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ 4278 0, /* No UH Channels */ 4279 }, 4280 }; 4281 4282 static struct udma_match_data am64_bcdma_data = { 4283 .type = DMA_TYPE_BCDMA, 4284 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ 4285 .enable_memcpy_support = true, /* Supported via bchan */ 4286 .flags = UDMA_FLAGS_J7_CLASS, 4287 .statictr_z_mask = GENMASK(23, 0), 4288 .burst_size = { 4289 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4290 0, /* No H Channels */ 4291 0, /* No UH Channels */ 4292 }, 4293 }; 4294 4295 static struct udma_match_data am64_pktdma_data = { 4296 .type = DMA_TYPE_PKTDMA, 4297 .psil_base = 0x1000, 4298 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ 4299 .flags = UDMA_FLAGS_J7_CLASS, 4300 .statictr_z_mask = GENMASK(23, 0), 4301 .burst_size = { 4302 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4303 0, /* No H Channels */ 4304 0, /* No UH Channels */ 4305 }, 4306 }; 4307 4308 static const struct of_device_id udma_of_match[] = { 4309 { 4310 .compatible = "ti,am654-navss-main-udmap", 4311 .data = &am654_main_data, 4312 }, 4313 { 4314 .compatible = "ti,am654-navss-mcu-udmap", 4315 .data = &am654_mcu_data, 4316 }, { 4317 .compatible = "ti,j721e-navss-main-udmap", 4318 .data = &j721e_main_data, 4319 }, { 4320 .compatible = "ti,j721e-navss-mcu-udmap", 4321 .data = &j721e_mcu_data, 4322 }, 4323 { /* Sentinel */ }, 4324 }; 4325 4326 static const struct of_device_id bcdma_of_match[] = { 4327 { 4328 .compatible = "ti,am64-dmss-bcdma", 4329 .data = &am64_bcdma_data, 4330 }, 4331 { /* Sentinel */ }, 4332 }; 4333 4334 static const struct of_device_id pktdma_of_match[] = { 4335 { 4336 .compatible = "ti,am64-dmss-pktdma", 4337 .data = &am64_pktdma_data, 4338 }, 4339 { /* Sentinel */ }, 4340 }; 4341 4342 static struct udma_soc_data am654_soc_data = { 4343 .oes = { 4344 .udma_rchan = 0x200, 4345 }, 4346 }; 4347 4348 static struct udma_soc_data j721e_soc_data = { 4349 .oes = { 4350 .udma_rchan = 0x400, 4351 }, 4352 }; 4353 4354 static struct udma_soc_data j7200_soc_data = { 4355 .oes = { 4356 .udma_rchan = 0x80, 4357 }, 4358 }; 4359 4360 static struct udma_soc_data am64_soc_data = { 4361 .oes = { 4362 .bcdma_bchan_data = 0x2200, 4363 .bcdma_bchan_ring = 0x2400, 4364 .bcdma_tchan_data = 0x2800, 4365 .bcdma_tchan_ring = 0x2a00, 4366 .bcdma_rchan_data = 0x2e00, 4367 .bcdma_rchan_ring = 0x3000, 4368 .pktdma_tchan_flow = 0x1200, 4369 .pktdma_rchan_flow = 0x1600, 4370 }, 4371 .bcdma_trigger_event_offset = 0xc400, 4372 }; 4373 4374 static const struct soc_device_attribute k3_soc_devices[] = { 4375 { .family = "AM65X", .data = &am654_soc_data }, 4376 { .family = "J721E", .data = &j721e_soc_data }, 4377 { .family = "J7200", .data = &j7200_soc_data }, 4378 { .family = "AM64X", .data = &am64_soc_data }, 4379 { /* sentinel */ } 4380 }; 4381 4382 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) 4383 { 4384 u32 cap2, cap3, cap4; 4385 int i; 4386 4387 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); 4388 if (IS_ERR(ud->mmrs[MMR_GCFG])) 4389 return PTR_ERR(ud->mmrs[MMR_GCFG]); 4390 4391 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); 4392 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4393 4394 switch (ud->match_data->type) { 4395 case DMA_TYPE_UDMA: 4396 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4397 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4398 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); 4399 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4400 break; 4401 case DMA_TYPE_BCDMA: 4402 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); 4403 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); 4404 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); 4405 ud->rflow_cnt = ud->rchan_cnt; 4406 break; 4407 case DMA_TYPE_PKTDMA: 4408 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4409 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4410 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4411 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4412 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); 4413 break; 4414 default: 4415 return -EINVAL; 4416 } 4417 4418 for (i = 1; i < MMR_LAST; i++) { 4419 if (i == MMR_BCHANRT && ud->bchan_cnt == 0) 4420 continue; 4421 if (i == MMR_TCHANRT && ud->tchan_cnt == 0) 4422 continue; 4423 if (i == MMR_RCHANRT && ud->rchan_cnt == 0) 4424 continue; 4425 4426 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); 4427 if (IS_ERR(ud->mmrs[i])) 4428 return PTR_ERR(ud->mmrs[i]); 4429 } 4430 4431 return 0; 4432 } 4433 4434 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map, 4435 struct ti_sci_resource_desc *rm_desc, 4436 char *name) 4437 { 4438 bitmap_clear(map, rm_desc->start, rm_desc->num); 4439 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); 4440 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, 4441 rm_desc->start, rm_desc->num, rm_desc->start_sec, 4442 rm_desc->num_sec); 4443 } 4444 4445 static const char * const range_names[] = { 4446 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", 4447 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", 4448 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", 4449 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", 4450 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", 4451 }; 4452 4453 static int udma_setup_resources(struct udma_dev *ud) 4454 { 4455 int ret, i, j; 4456 struct device *dev = ud->dev; 4457 struct ti_sci_resource *rm_res, irq_res; 4458 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4459 u32 cap3; 4460 4461 /* Set up the throughput level start indexes */ 4462 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4463 if (of_device_is_compatible(dev->of_node, 4464 "ti,am654-navss-main-udmap")) { 4465 ud->tchan_tpl.levels = 2; 4466 ud->tchan_tpl.start_idx[0] = 8; 4467 } else if (of_device_is_compatible(dev->of_node, 4468 "ti,am654-navss-mcu-udmap")) { 4469 ud->tchan_tpl.levels = 2; 4470 ud->tchan_tpl.start_idx[0] = 2; 4471 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4472 ud->tchan_tpl.levels = 3; 4473 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4474 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4475 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4476 ud->tchan_tpl.levels = 2; 4477 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4478 } else { 4479 ud->tchan_tpl.levels = 1; 4480 } 4481 4482 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4483 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4484 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4485 4486 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4487 sizeof(unsigned long), GFP_KERNEL); 4488 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4489 GFP_KERNEL); 4490 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4491 sizeof(unsigned long), GFP_KERNEL); 4492 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4493 GFP_KERNEL); 4494 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), 4495 sizeof(unsigned long), 4496 GFP_KERNEL); 4497 ud->rflow_gp_map_allocated = devm_kcalloc(dev, 4498 BITS_TO_LONGS(ud->rflow_cnt), 4499 sizeof(unsigned long), 4500 GFP_KERNEL); 4501 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4502 sizeof(unsigned long), 4503 GFP_KERNEL); 4504 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4505 GFP_KERNEL); 4506 4507 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || 4508 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || 4509 !ud->rflows || !ud->rflow_in_use) 4510 return -ENOMEM; 4511 4512 /* 4513 * RX flows with the same Ids as RX channels are reserved to be used 4514 * as default flows if remote HW can't generate flow_ids. Those 4515 * RX flows can be requested only explicitly by id. 4516 */ 4517 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); 4518 4519 /* by default no GP rflows are assigned to Linux */ 4520 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); 4521 4522 /* Get resource ranges from tisci */ 4523 for (i = 0; i < RM_RANGE_LAST; i++) { 4524 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) 4525 continue; 4526 4527 tisci_rm->rm_ranges[i] = 4528 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4529 tisci_rm->tisci_dev_id, 4530 (char *)range_names[i]); 4531 } 4532 4533 /* tchan ranges */ 4534 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4535 if (IS_ERR(rm_res)) { 4536 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4537 } else { 4538 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4539 for (i = 0; i < rm_res->sets; i++) 4540 udma_mark_resource_ranges(ud, ud->tchan_map, 4541 &rm_res->desc[i], "tchan"); 4542 } 4543 irq_res.sets = rm_res->sets; 4544 4545 /* rchan and matching default flow ranges */ 4546 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4547 if (IS_ERR(rm_res)) { 4548 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4549 } else { 4550 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4551 for (i = 0; i < rm_res->sets; i++) 4552 udma_mark_resource_ranges(ud, ud->rchan_map, 4553 &rm_res->desc[i], "rchan"); 4554 } 4555 4556 irq_res.sets += rm_res->sets; 4557 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4558 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4559 for (i = 0; i < rm_res->sets; i++) { 4560 irq_res.desc[i].start = rm_res->desc[i].start; 4561 irq_res.desc[i].num = rm_res->desc[i].num; 4562 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4563 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4564 } 4565 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4566 for (j = 0; j < rm_res->sets; j++, i++) { 4567 if (rm_res->desc[j].num) { 4568 irq_res.desc[i].start = rm_res->desc[j].start + 4569 ud->soc_data->oes.udma_rchan; 4570 irq_res.desc[i].num = rm_res->desc[j].num; 4571 } 4572 if (rm_res->desc[j].num_sec) { 4573 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4574 ud->soc_data->oes.udma_rchan; 4575 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4576 } 4577 } 4578 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4579 kfree(irq_res.desc); 4580 if (ret) { 4581 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4582 return ret; 4583 } 4584 4585 /* GP rflow ranges */ 4586 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4587 if (IS_ERR(rm_res)) { 4588 /* all gp flows are assigned exclusively to Linux */ 4589 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, 4590 ud->rflow_cnt - ud->rchan_cnt); 4591 } else { 4592 for (i = 0; i < rm_res->sets; i++) 4593 udma_mark_resource_ranges(ud, ud->rflow_gp_map, 4594 &rm_res->desc[i], "gp-rflow"); 4595 } 4596 4597 return 0; 4598 } 4599 4600 static int bcdma_setup_resources(struct udma_dev *ud) 4601 { 4602 int ret, i, j; 4603 struct device *dev = ud->dev; 4604 struct ti_sci_resource *rm_res, irq_res; 4605 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4606 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4607 u32 cap; 4608 4609 /* Set up the throughput level start indexes */ 4610 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4611 if (BCDMA_CAP3_UBCHAN_CNT(cap)) { 4612 ud->bchan_tpl.levels = 3; 4613 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); 4614 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4615 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) { 4616 ud->bchan_tpl.levels = 2; 4617 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4618 } else { 4619 ud->bchan_tpl.levels = 1; 4620 } 4621 4622 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4623 if (BCDMA_CAP4_URCHAN_CNT(cap)) { 4624 ud->rchan_tpl.levels = 3; 4625 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); 4626 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4627 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) { 4628 ud->rchan_tpl.levels = 2; 4629 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4630 } else { 4631 ud->rchan_tpl.levels = 1; 4632 } 4633 4634 if (BCDMA_CAP4_UTCHAN_CNT(cap)) { 4635 ud->tchan_tpl.levels = 3; 4636 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); 4637 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4638 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) { 4639 ud->tchan_tpl.levels = 2; 4640 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4641 } else { 4642 ud->tchan_tpl.levels = 1; 4643 } 4644 4645 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), 4646 sizeof(unsigned long), GFP_KERNEL); 4647 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), 4648 GFP_KERNEL); 4649 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4650 sizeof(unsigned long), GFP_KERNEL); 4651 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4652 GFP_KERNEL); 4653 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4654 sizeof(unsigned long), GFP_KERNEL); 4655 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4656 GFP_KERNEL); 4657 /* BCDMA do not really have flows, but the driver expect it */ 4658 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), 4659 sizeof(unsigned long), 4660 GFP_KERNEL); 4661 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), 4662 GFP_KERNEL); 4663 4664 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || 4665 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || 4666 !ud->rflows) 4667 return -ENOMEM; 4668 4669 /* Get resource ranges from tisci */ 4670 for (i = 0; i < RM_RANGE_LAST; i++) { 4671 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) 4672 continue; 4673 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) 4674 continue; 4675 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) 4676 continue; 4677 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) 4678 continue; 4679 4680 tisci_rm->rm_ranges[i] = 4681 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4682 tisci_rm->tisci_dev_id, 4683 (char *)range_names[i]); 4684 } 4685 4686 irq_res.sets = 0; 4687 4688 /* bchan ranges */ 4689 if (ud->bchan_cnt) { 4690 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4691 if (IS_ERR(rm_res)) { 4692 bitmap_zero(ud->bchan_map, ud->bchan_cnt); 4693 } else { 4694 bitmap_fill(ud->bchan_map, ud->bchan_cnt); 4695 for (i = 0; i < rm_res->sets; i++) 4696 udma_mark_resource_ranges(ud, ud->bchan_map, 4697 &rm_res->desc[i], 4698 "bchan"); 4699 } 4700 irq_res.sets += rm_res->sets; 4701 } 4702 4703 /* tchan ranges */ 4704 if (ud->tchan_cnt) { 4705 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4706 if (IS_ERR(rm_res)) { 4707 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4708 } else { 4709 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4710 for (i = 0; i < rm_res->sets; i++) 4711 udma_mark_resource_ranges(ud, ud->tchan_map, 4712 &rm_res->desc[i], 4713 "tchan"); 4714 } 4715 irq_res.sets += rm_res->sets * 2; 4716 } 4717 4718 /* rchan ranges */ 4719 if (ud->rchan_cnt) { 4720 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4721 if (IS_ERR(rm_res)) { 4722 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4723 } else { 4724 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4725 for (i = 0; i < rm_res->sets; i++) 4726 udma_mark_resource_ranges(ud, ud->rchan_map, 4727 &rm_res->desc[i], 4728 "rchan"); 4729 } 4730 irq_res.sets += rm_res->sets * 2; 4731 } 4732 4733 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4734 if (ud->bchan_cnt) { 4735 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4736 for (i = 0; i < rm_res->sets; i++) { 4737 irq_res.desc[i].start = rm_res->desc[i].start + 4738 oes->bcdma_bchan_ring; 4739 irq_res.desc[i].num = rm_res->desc[i].num; 4740 } 4741 } 4742 if (ud->tchan_cnt) { 4743 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4744 for (j = 0; j < rm_res->sets; j++, i += 2) { 4745 irq_res.desc[i].start = rm_res->desc[j].start + 4746 oes->bcdma_tchan_data; 4747 irq_res.desc[i].num = rm_res->desc[j].num; 4748 4749 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4750 oes->bcdma_tchan_ring; 4751 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4752 } 4753 } 4754 if (ud->rchan_cnt) { 4755 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4756 for (j = 0; j < rm_res->sets; j++, i += 2) { 4757 irq_res.desc[i].start = rm_res->desc[j].start + 4758 oes->bcdma_rchan_data; 4759 irq_res.desc[i].num = rm_res->desc[j].num; 4760 4761 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4762 oes->bcdma_rchan_ring; 4763 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4764 } 4765 } 4766 4767 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4768 kfree(irq_res.desc); 4769 if (ret) { 4770 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4771 return ret; 4772 } 4773 4774 return 0; 4775 } 4776 4777 static int pktdma_setup_resources(struct udma_dev *ud) 4778 { 4779 int ret, i, j; 4780 struct device *dev = ud->dev; 4781 struct ti_sci_resource *rm_res, irq_res; 4782 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4783 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4784 u32 cap3; 4785 4786 /* Set up the throughput level start indexes */ 4787 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4788 if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4789 ud->tchan_tpl.levels = 3; 4790 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4791 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4792 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4793 ud->tchan_tpl.levels = 2; 4794 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4795 } else { 4796 ud->tchan_tpl.levels = 1; 4797 } 4798 4799 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4800 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4801 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4802 4803 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4804 sizeof(unsigned long), GFP_KERNEL); 4805 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4806 GFP_KERNEL); 4807 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4808 sizeof(unsigned long), GFP_KERNEL); 4809 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4810 GFP_KERNEL); 4811 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4812 sizeof(unsigned long), 4813 GFP_KERNEL); 4814 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4815 GFP_KERNEL); 4816 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), 4817 sizeof(unsigned long), GFP_KERNEL); 4818 4819 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || 4820 !ud->rchans || !ud->rflows || !ud->rflow_in_use) 4821 return -ENOMEM; 4822 4823 /* Get resource ranges from tisci */ 4824 for (i = 0; i < RM_RANGE_LAST; i++) { 4825 if (i == RM_RANGE_BCHAN) 4826 continue; 4827 4828 tisci_rm->rm_ranges[i] = 4829 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4830 tisci_rm->tisci_dev_id, 4831 (char *)range_names[i]); 4832 } 4833 4834 /* tchan ranges */ 4835 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4836 if (IS_ERR(rm_res)) { 4837 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4838 } else { 4839 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4840 for (i = 0; i < rm_res->sets; i++) 4841 udma_mark_resource_ranges(ud, ud->tchan_map, 4842 &rm_res->desc[i], "tchan"); 4843 } 4844 4845 /* rchan ranges */ 4846 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4847 if (IS_ERR(rm_res)) { 4848 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4849 } else { 4850 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4851 for (i = 0; i < rm_res->sets; i++) 4852 udma_mark_resource_ranges(ud, ud->rchan_map, 4853 &rm_res->desc[i], "rchan"); 4854 } 4855 4856 /* rflow ranges */ 4857 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4858 if (IS_ERR(rm_res)) { 4859 /* all rflows are assigned exclusively to Linux */ 4860 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); 4861 } else { 4862 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); 4863 for (i = 0; i < rm_res->sets; i++) 4864 udma_mark_resource_ranges(ud, ud->rflow_in_use, 4865 &rm_res->desc[i], "rflow"); 4866 } 4867 irq_res.sets = rm_res->sets; 4868 4869 /* tflow ranges */ 4870 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4871 if (IS_ERR(rm_res)) { 4872 /* all tflows are assigned exclusively to Linux */ 4873 bitmap_zero(ud->tflow_map, ud->tflow_cnt); 4874 } else { 4875 bitmap_fill(ud->tflow_map, ud->tflow_cnt); 4876 for (i = 0; i < rm_res->sets; i++) 4877 udma_mark_resource_ranges(ud, ud->tflow_map, 4878 &rm_res->desc[i], "tflow"); 4879 } 4880 irq_res.sets += rm_res->sets; 4881 4882 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4883 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4884 for (i = 0; i < rm_res->sets; i++) { 4885 irq_res.desc[i].start = rm_res->desc[i].start + 4886 oes->pktdma_tchan_flow; 4887 irq_res.desc[i].num = rm_res->desc[i].num; 4888 } 4889 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4890 for (j = 0; j < rm_res->sets; j++, i++) { 4891 irq_res.desc[i].start = rm_res->desc[j].start + 4892 oes->pktdma_rchan_flow; 4893 irq_res.desc[i].num = rm_res->desc[j].num; 4894 } 4895 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4896 kfree(irq_res.desc); 4897 if (ret) { 4898 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4899 return ret; 4900 } 4901 4902 return 0; 4903 } 4904 4905 static int setup_resources(struct udma_dev *ud) 4906 { 4907 struct device *dev = ud->dev; 4908 int ch_count, ret; 4909 4910 switch (ud->match_data->type) { 4911 case DMA_TYPE_UDMA: 4912 ret = udma_setup_resources(ud); 4913 break; 4914 case DMA_TYPE_BCDMA: 4915 ret = bcdma_setup_resources(ud); 4916 break; 4917 case DMA_TYPE_PKTDMA: 4918 ret = pktdma_setup_resources(ud); 4919 break; 4920 default: 4921 return -EINVAL; 4922 } 4923 4924 if (ret) 4925 return ret; 4926 4927 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; 4928 if (ud->bchan_cnt) 4929 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); 4930 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); 4931 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); 4932 if (!ch_count) 4933 return -ENODEV; 4934 4935 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), 4936 GFP_KERNEL); 4937 if (!ud->channels) 4938 return -ENOMEM; 4939 4940 switch (ud->match_data->type) { 4941 case DMA_TYPE_UDMA: 4942 dev_info(dev, 4943 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", 4944 ch_count, 4945 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 4946 ud->tchan_cnt), 4947 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 4948 ud->rchan_cnt), 4949 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, 4950 ud->rflow_cnt)); 4951 break; 4952 case DMA_TYPE_BCDMA: 4953 dev_info(dev, 4954 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", 4955 ch_count, 4956 ud->bchan_cnt - bitmap_weight(ud->bchan_map, 4957 ud->bchan_cnt), 4958 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 4959 ud->tchan_cnt), 4960 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 4961 ud->rchan_cnt)); 4962 break; 4963 case DMA_TYPE_PKTDMA: 4964 dev_info(dev, 4965 "Channels: %d (tchan: %u, rchan: %u)\n", 4966 ch_count, 4967 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 4968 ud->tchan_cnt), 4969 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 4970 ud->rchan_cnt)); 4971 break; 4972 default: 4973 break; 4974 } 4975 4976 return ch_count; 4977 } 4978 4979 static int udma_setup_rx_flush(struct udma_dev *ud) 4980 { 4981 struct udma_rx_flush *rx_flush = &ud->rx_flush; 4982 struct cppi5_desc_hdr_t *tr_desc; 4983 struct cppi5_tr_type1_t *tr_req; 4984 struct cppi5_host_desc_t *desc; 4985 struct device *dev = ud->dev; 4986 struct udma_hwdesc *hwdesc; 4987 size_t tr_size; 4988 4989 /* Allocate 1K buffer for discarded data on RX channel teardown */ 4990 rx_flush->buffer_size = SZ_1K; 4991 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, 4992 GFP_KERNEL); 4993 if (!rx_flush->buffer_vaddr) 4994 return -ENOMEM; 4995 4996 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, 4997 rx_flush->buffer_size, 4998 DMA_TO_DEVICE); 4999 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) 5000 return -ENOMEM; 5001 5002 /* Set up descriptor to be used for TR mode */ 5003 hwdesc = &rx_flush->hwdescs[0]; 5004 tr_size = sizeof(struct cppi5_tr_type1_t); 5005 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); 5006 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 5007 ud->desc_align); 5008 5009 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5010 GFP_KERNEL); 5011 if (!hwdesc->cppi5_desc_vaddr) 5012 return -ENOMEM; 5013 5014 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5015 hwdesc->cppi5_desc_size, 5016 DMA_TO_DEVICE); 5017 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5018 return -ENOMEM; 5019 5020 /* Start of the TR req records */ 5021 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 5022 /* Start address of the TR response array */ 5023 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; 5024 5025 tr_desc = hwdesc->cppi5_desc_vaddr; 5026 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); 5027 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5028 cppi5_desc_set_retpolicy(tr_desc, 0, 0); 5029 5030 tr_req = hwdesc->tr_req_base; 5031 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, 5032 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 5033 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); 5034 5035 tr_req->addr = rx_flush->buffer_paddr; 5036 tr_req->icnt0 = rx_flush->buffer_size; 5037 tr_req->icnt1 = 1; 5038 5039 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5040 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5041 5042 /* Set up descriptor to be used for packet mode */ 5043 hwdesc = &rx_flush->hwdescs[1]; 5044 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 5045 CPPI5_INFO0_HDESC_EPIB_SIZE + 5046 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, 5047 ud->desc_align); 5048 5049 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5050 GFP_KERNEL); 5051 if (!hwdesc->cppi5_desc_vaddr) 5052 return -ENOMEM; 5053 5054 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5055 hwdesc->cppi5_desc_size, 5056 DMA_TO_DEVICE); 5057 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5058 return -ENOMEM; 5059 5060 desc = hwdesc->cppi5_desc_vaddr; 5061 cppi5_hdesc_init(desc, 0, 0); 5062 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5063 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); 5064 5065 cppi5_hdesc_attach_buf(desc, 5066 rx_flush->buffer_paddr, rx_flush->buffer_size, 5067 rx_flush->buffer_paddr, rx_flush->buffer_size); 5068 5069 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5070 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5071 return 0; 5072 } 5073 5074 #ifdef CONFIG_DEBUG_FS 5075 static void udma_dbg_summary_show_chan(struct seq_file *s, 5076 struct dma_chan *chan) 5077 { 5078 struct udma_chan *uc = to_udma_chan(chan); 5079 struct udma_chan_config *ucc = &uc->config; 5080 5081 seq_printf(s, " %-13s| %s", dma_chan_name(chan), 5082 chan->dbg_client_name ?: "in-use"); 5083 if (ucc->tr_trigger_type) 5084 seq_puts(s, " (triggered, "); 5085 else 5086 seq_printf(s, " (%s, ", 5087 dmaengine_get_direction_text(uc->config.dir)); 5088 5089 switch (uc->config.dir) { 5090 case DMA_MEM_TO_MEM: 5091 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { 5092 seq_printf(s, "bchan%d)\n", uc->bchan->id); 5093 return; 5094 } 5095 5096 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, 5097 ucc->src_thread, ucc->dst_thread); 5098 break; 5099 case DMA_DEV_TO_MEM: 5100 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, 5101 ucc->src_thread, ucc->dst_thread); 5102 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5103 seq_printf(s, "rflow%d, ", uc->rflow->id); 5104 break; 5105 case DMA_MEM_TO_DEV: 5106 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, 5107 ucc->src_thread, ucc->dst_thread); 5108 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5109 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); 5110 break; 5111 default: 5112 seq_printf(s, ")\n"); 5113 return; 5114 } 5115 5116 if (ucc->ep_type == PSIL_EP_NATIVE) { 5117 seq_printf(s, "PSI-L Native"); 5118 if (ucc->metadata_size) { 5119 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); 5120 if (ucc->psd_size) 5121 seq_printf(s, " PSDsize:%u", ucc->psd_size); 5122 seq_printf(s, " ]"); 5123 } 5124 } else { 5125 seq_printf(s, "PDMA"); 5126 if (ucc->enable_acc32 || ucc->enable_burst) 5127 seq_printf(s, "[%s%s ]", 5128 ucc->enable_acc32 ? " ACC32" : "", 5129 ucc->enable_burst ? " BURST" : ""); 5130 } 5131 5132 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); 5133 } 5134 5135 static void udma_dbg_summary_show(struct seq_file *s, 5136 struct dma_device *dma_dev) 5137 { 5138 struct dma_chan *chan; 5139 5140 list_for_each_entry(chan, &dma_dev->channels, device_node) { 5141 if (chan->client_count) 5142 udma_dbg_summary_show_chan(s, chan); 5143 } 5144 } 5145 #endif /* CONFIG_DEBUG_FS */ 5146 5147 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) 5148 { 5149 const struct udma_match_data *match_data = ud->match_data; 5150 u8 tpl; 5151 5152 if (!match_data->enable_memcpy_support) 5153 return DMAENGINE_ALIGN_8_BYTES; 5154 5155 /* Get the highest TPL level the device supports for memcpy */ 5156 if (ud->bchan_cnt) 5157 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); 5158 else if (ud->tchan_cnt) 5159 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); 5160 else 5161 return DMAENGINE_ALIGN_8_BYTES; 5162 5163 switch (match_data->burst_size[tpl]) { 5164 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: 5165 return DMAENGINE_ALIGN_256_BYTES; 5166 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: 5167 return DMAENGINE_ALIGN_128_BYTES; 5168 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: 5169 fallthrough; 5170 default: 5171 return DMAENGINE_ALIGN_64_BYTES; 5172 } 5173 } 5174 5175 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 5176 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 5177 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 5178 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 5179 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 5180 5181 static int udma_probe(struct platform_device *pdev) 5182 { 5183 struct device_node *navss_node = pdev->dev.parent->of_node; 5184 const struct soc_device_attribute *soc; 5185 struct device *dev = &pdev->dev; 5186 struct udma_dev *ud; 5187 const struct of_device_id *match; 5188 int i, ret; 5189 int ch_count; 5190 5191 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); 5192 if (ret) 5193 dev_err(dev, "failed to set dma mask stuff\n"); 5194 5195 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); 5196 if (!ud) 5197 return -ENOMEM; 5198 5199 match = of_match_node(udma_of_match, dev->of_node); 5200 if (!match) 5201 match = of_match_node(bcdma_of_match, dev->of_node); 5202 if (!match) { 5203 match = of_match_node(pktdma_of_match, dev->of_node); 5204 if (!match) { 5205 dev_err(dev, "No compatible match found\n"); 5206 return -ENODEV; 5207 } 5208 } 5209 ud->match_data = match->data; 5210 5211 soc = soc_device_match(k3_soc_devices); 5212 if (!soc) { 5213 dev_err(dev, "No compatible SoC found\n"); 5214 return -ENODEV; 5215 } 5216 ud->soc_data = soc->data; 5217 5218 ret = udma_get_mmrs(pdev, ud); 5219 if (ret) 5220 return ret; 5221 5222 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); 5223 if (IS_ERR(ud->tisci_rm.tisci)) 5224 return PTR_ERR(ud->tisci_rm.tisci); 5225 5226 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", 5227 &ud->tisci_rm.tisci_dev_id); 5228 if (ret) { 5229 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); 5230 return ret; 5231 } 5232 pdev->id = ud->tisci_rm.tisci_dev_id; 5233 5234 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", 5235 &ud->tisci_rm.tisci_navss_dev_id); 5236 if (ret) { 5237 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); 5238 return ret; 5239 } 5240 5241 if (ud->match_data->type == DMA_TYPE_UDMA) { 5242 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", 5243 &ud->atype); 5244 if (!ret && ud->atype > 2) { 5245 dev_err(dev, "Invalid atype: %u\n", ud->atype); 5246 return -EINVAL; 5247 } 5248 } else { 5249 ret = of_property_read_u32(dev->of_node, "ti,asel", 5250 &ud->asel); 5251 if (!ret && ud->asel > 15) { 5252 dev_err(dev, "Invalid asel: %u\n", ud->asel); 5253 return -EINVAL; 5254 } 5255 } 5256 5257 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; 5258 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; 5259 5260 if (ud->match_data->type == DMA_TYPE_UDMA) { 5261 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); 5262 } else { 5263 struct k3_ringacc_init_data ring_init_data; 5264 5265 ring_init_data.tisci = ud->tisci_rm.tisci; 5266 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; 5267 if (ud->match_data->type == DMA_TYPE_BCDMA) { 5268 ring_init_data.num_rings = ud->bchan_cnt + 5269 ud->tchan_cnt + 5270 ud->rchan_cnt; 5271 } else { 5272 ring_init_data.num_rings = ud->rflow_cnt + 5273 ud->tflow_cnt; 5274 } 5275 5276 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); 5277 } 5278 5279 if (IS_ERR(ud->ringacc)) 5280 return PTR_ERR(ud->ringacc); 5281 5282 dev->msi_domain = of_msi_get_domain(dev, dev->of_node, 5283 DOMAIN_BUS_TI_SCI_INTA_MSI); 5284 if (!dev->msi_domain) { 5285 dev_err(dev, "Failed to get MSI domain\n"); 5286 return -EPROBE_DEFER; 5287 } 5288 5289 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); 5290 /* cyclic operation is not supported via PKTDMA */ 5291 if (ud->match_data->type != DMA_TYPE_PKTDMA) { 5292 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); 5293 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; 5294 } 5295 5296 ud->ddev.device_config = udma_slave_config; 5297 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; 5298 ud->ddev.device_issue_pending = udma_issue_pending; 5299 ud->ddev.device_tx_status = udma_tx_status; 5300 ud->ddev.device_pause = udma_pause; 5301 ud->ddev.device_resume = udma_resume; 5302 ud->ddev.device_terminate_all = udma_terminate_all; 5303 ud->ddev.device_synchronize = udma_synchronize; 5304 #ifdef CONFIG_DEBUG_FS 5305 ud->ddev.dbg_summary_show = udma_dbg_summary_show; 5306 #endif 5307 5308 switch (ud->match_data->type) { 5309 case DMA_TYPE_UDMA: 5310 ud->ddev.device_alloc_chan_resources = 5311 udma_alloc_chan_resources; 5312 break; 5313 case DMA_TYPE_BCDMA: 5314 ud->ddev.device_alloc_chan_resources = 5315 bcdma_alloc_chan_resources; 5316 ud->ddev.device_router_config = bcdma_router_config; 5317 break; 5318 case DMA_TYPE_PKTDMA: 5319 ud->ddev.device_alloc_chan_resources = 5320 pktdma_alloc_chan_resources; 5321 break; 5322 default: 5323 return -EINVAL; 5324 } 5325 ud->ddev.device_free_chan_resources = udma_free_chan_resources; 5326 5327 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; 5328 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; 5329 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 5330 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 5331 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | 5332 DESC_METADATA_ENGINE; 5333 if (ud->match_data->enable_memcpy_support && 5334 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { 5335 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); 5336 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; 5337 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); 5338 } 5339 5340 ud->ddev.dev = dev; 5341 ud->dev = dev; 5342 ud->psil_base = ud->match_data->psil_base; 5343 5344 INIT_LIST_HEAD(&ud->ddev.channels); 5345 INIT_LIST_HEAD(&ud->desc_to_purge); 5346 5347 ch_count = setup_resources(ud); 5348 if (ch_count <= 0) 5349 return ch_count; 5350 5351 spin_lock_init(&ud->lock); 5352 INIT_WORK(&ud->purge_work, udma_purge_desc_work); 5353 5354 ud->desc_align = 64; 5355 if (ud->desc_align < dma_get_cache_alignment()) 5356 ud->desc_align = dma_get_cache_alignment(); 5357 5358 ret = udma_setup_rx_flush(ud); 5359 if (ret) 5360 return ret; 5361 5362 for (i = 0; i < ud->bchan_cnt; i++) { 5363 struct udma_bchan *bchan = &ud->bchans[i]; 5364 5365 bchan->id = i; 5366 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; 5367 } 5368 5369 for (i = 0; i < ud->tchan_cnt; i++) { 5370 struct udma_tchan *tchan = &ud->tchans[i]; 5371 5372 tchan->id = i; 5373 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; 5374 } 5375 5376 for (i = 0; i < ud->rchan_cnt; i++) { 5377 struct udma_rchan *rchan = &ud->rchans[i]; 5378 5379 rchan->id = i; 5380 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; 5381 } 5382 5383 for (i = 0; i < ud->rflow_cnt; i++) { 5384 struct udma_rflow *rflow = &ud->rflows[i]; 5385 5386 rflow->id = i; 5387 } 5388 5389 for (i = 0; i < ch_count; i++) { 5390 struct udma_chan *uc = &ud->channels[i]; 5391 5392 uc->ud = ud; 5393 uc->vc.desc_free = udma_desc_free; 5394 uc->id = i; 5395 uc->bchan = NULL; 5396 uc->tchan = NULL; 5397 uc->rchan = NULL; 5398 uc->config.remote_thread_id = -1; 5399 uc->config.mapped_channel_id = -1; 5400 uc->config.default_flow_id = -1; 5401 uc->config.dir = DMA_MEM_TO_MEM; 5402 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", 5403 dev_name(dev), i); 5404 5405 vchan_init(&uc->vc, &ud->ddev); 5406 /* Use custom vchan completion handling */ 5407 tasklet_setup(&uc->vc.task, udma_vchan_complete); 5408 init_completion(&uc->teardown_completed); 5409 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); 5410 } 5411 5412 /* Configure the copy_align to the maximum burst size the device supports */ 5413 ud->ddev.copy_align = udma_get_copy_align(ud); 5414 5415 ret = dma_async_device_register(&ud->ddev); 5416 if (ret) { 5417 dev_err(dev, "failed to register slave DMA engine: %d\n", ret); 5418 return ret; 5419 } 5420 5421 platform_set_drvdata(pdev, ud); 5422 5423 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); 5424 if (ret) { 5425 dev_err(dev, "failed to register of_dma controller\n"); 5426 dma_async_device_unregister(&ud->ddev); 5427 } 5428 5429 return ret; 5430 } 5431 5432 static struct platform_driver udma_driver = { 5433 .driver = { 5434 .name = "ti-udma", 5435 .of_match_table = udma_of_match, 5436 .suppress_bind_attrs = true, 5437 }, 5438 .probe = udma_probe, 5439 }; 5440 builtin_platform_driver(udma_driver); 5441 5442 static struct platform_driver bcdma_driver = { 5443 .driver = { 5444 .name = "ti-bcdma", 5445 .of_match_table = bcdma_of_match, 5446 .suppress_bind_attrs = true, 5447 }, 5448 .probe = udma_probe, 5449 }; 5450 builtin_platform_driver(bcdma_driver); 5451 5452 static struct platform_driver pktdma_driver = { 5453 .driver = { 5454 .name = "ti-pktdma", 5455 .of_match_table = pktdma_of_match, 5456 .suppress_bind_attrs = true, 5457 }, 5458 .probe = udma_probe, 5459 }; 5460 builtin_platform_driver(pktdma_driver); 5461 5462 /* Private interfaces to UDMA */ 5463 #include "k3-udma-private.c" 5464