1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/delay.h> 9 #include <linux/dmaengine.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/err.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/sys_soc.h> 20 #include <linux/of.h> 21 #include <linux/of_dma.h> 22 #include <linux/of_device.h> 23 #include <linux/of_irq.h> 24 #include <linux/workqueue.h> 25 #include <linux/completion.h> 26 #include <linux/soc/ti/k3-ringacc.h> 27 #include <linux/soc/ti/ti_sci_protocol.h> 28 #include <linux/soc/ti/ti_sci_inta_msi.h> 29 #include <linux/dma/k3-event-router.h> 30 #include <linux/dma/ti-cppi5.h> 31 32 #include "../virt-dma.h" 33 #include "k3-udma.h" 34 #include "k3-psil-priv.h" 35 36 struct udma_static_tr { 37 u8 elsize; /* RPSTR0 */ 38 u16 elcnt; /* RPSTR0 */ 39 u16 bstcnt; /* RPSTR1 */ 40 }; 41 42 #define K3_UDMA_MAX_RFLOWS 1024 43 #define K3_UDMA_DEFAULT_RING_SIZE 16 44 45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ 46 #define UDMA_RFLOW_SRCTAG_NONE 0 47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1 48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2 49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4 50 51 #define UDMA_RFLOW_DSTTAG_NONE 0 52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1 53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2 54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 56 57 struct udma_chan; 58 59 enum k3_dma_type { 60 DMA_TYPE_UDMA = 0, 61 DMA_TYPE_BCDMA, 62 DMA_TYPE_PKTDMA, 63 }; 64 65 enum udma_mmr { 66 MMR_GCFG = 0, 67 MMR_BCHANRT, 68 MMR_RCHANRT, 69 MMR_TCHANRT, 70 MMR_LAST, 71 }; 72 73 static const char * const mmr_names[] = { 74 [MMR_GCFG] = "gcfg", 75 [MMR_BCHANRT] = "bchanrt", 76 [MMR_RCHANRT] = "rchanrt", 77 [MMR_TCHANRT] = "tchanrt", 78 }; 79 80 struct udma_tchan { 81 void __iomem *reg_rt; 82 83 int id; 84 struct k3_ring *t_ring; /* Transmit ring */ 85 struct k3_ring *tc_ring; /* Transmit Completion ring */ 86 int tflow_id; /* applicable only for PKTDMA */ 87 88 }; 89 90 #define udma_bchan udma_tchan 91 92 struct udma_rflow { 93 int id; 94 struct k3_ring *fd_ring; /* Free Descriptor ring */ 95 struct k3_ring *r_ring; /* Receive ring */ 96 }; 97 98 struct udma_rchan { 99 void __iomem *reg_rt; 100 101 int id; 102 }; 103 104 struct udma_oes_offsets { 105 /* K3 UDMA Output Event Offset */ 106 u32 udma_rchan; 107 108 /* BCDMA Output Event Offsets */ 109 u32 bcdma_bchan_data; 110 u32 bcdma_bchan_ring; 111 u32 bcdma_tchan_data; 112 u32 bcdma_tchan_ring; 113 u32 bcdma_rchan_data; 114 u32 bcdma_rchan_ring; 115 116 /* PKTDMA Output Event Offsets */ 117 u32 pktdma_tchan_flow; 118 u32 pktdma_rchan_flow; 119 }; 120 121 #define UDMA_FLAG_PDMA_ACC32 BIT(0) 122 #define UDMA_FLAG_PDMA_BURST BIT(1) 123 #define UDMA_FLAG_TDTYPE BIT(2) 124 #define UDMA_FLAG_BURST_SIZE BIT(3) 125 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \ 126 UDMA_FLAG_PDMA_BURST | \ 127 UDMA_FLAG_TDTYPE | \ 128 UDMA_FLAG_BURST_SIZE) 129 130 struct udma_match_data { 131 enum k3_dma_type type; 132 u32 psil_base; 133 bool enable_memcpy_support; 134 u32 flags; 135 u32 statictr_z_mask; 136 u8 burst_size[3]; 137 }; 138 139 struct udma_soc_data { 140 struct udma_oes_offsets oes; 141 u32 bcdma_trigger_event_offset; 142 }; 143 144 struct udma_hwdesc { 145 size_t cppi5_desc_size; 146 void *cppi5_desc_vaddr; 147 dma_addr_t cppi5_desc_paddr; 148 149 /* TR descriptor internal pointers */ 150 void *tr_req_base; 151 struct cppi5_tr_resp_t *tr_resp_base; 152 }; 153 154 struct udma_rx_flush { 155 struct udma_hwdesc hwdescs[2]; 156 157 size_t buffer_size; 158 void *buffer_vaddr; 159 dma_addr_t buffer_paddr; 160 }; 161 162 struct udma_tpl { 163 u8 levels; 164 u32 start_idx[3]; 165 }; 166 167 struct udma_dev { 168 struct dma_device ddev; 169 struct device *dev; 170 void __iomem *mmrs[MMR_LAST]; 171 const struct udma_match_data *match_data; 172 const struct udma_soc_data *soc_data; 173 174 struct udma_tpl bchan_tpl; 175 struct udma_tpl tchan_tpl; 176 struct udma_tpl rchan_tpl; 177 178 size_t desc_align; /* alignment to use for descriptors */ 179 180 struct udma_tisci_rm tisci_rm; 181 182 struct k3_ringacc *ringacc; 183 184 struct work_struct purge_work; 185 struct list_head desc_to_purge; 186 spinlock_t lock; 187 188 struct udma_rx_flush rx_flush; 189 190 int bchan_cnt; 191 int tchan_cnt; 192 int echan_cnt; 193 int rchan_cnt; 194 int rflow_cnt; 195 int tflow_cnt; 196 unsigned long *bchan_map; 197 unsigned long *tchan_map; 198 unsigned long *rchan_map; 199 unsigned long *rflow_gp_map; 200 unsigned long *rflow_gp_map_allocated; 201 unsigned long *rflow_in_use; 202 unsigned long *tflow_map; 203 204 struct udma_bchan *bchans; 205 struct udma_tchan *tchans; 206 struct udma_rchan *rchans; 207 struct udma_rflow *rflows; 208 209 struct udma_chan *channels; 210 u32 psil_base; 211 u32 atype; 212 u32 asel; 213 }; 214 215 struct udma_desc { 216 struct virt_dma_desc vd; 217 218 bool terminated; 219 220 enum dma_transfer_direction dir; 221 222 struct udma_static_tr static_tr; 223 u32 residue; 224 225 unsigned int sglen; 226 unsigned int desc_idx; /* Only used for cyclic in packet mode */ 227 unsigned int tr_idx; 228 229 u32 metadata_size; 230 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ 231 232 unsigned int hwdesc_count; 233 struct udma_hwdesc hwdesc[]; 234 }; 235 236 enum udma_chan_state { 237 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ 238 UDMA_CHAN_IS_ACTIVE, /* Normal operation */ 239 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ 240 }; 241 242 struct udma_tx_drain { 243 struct delayed_work work; 244 ktime_t tstamp; 245 u32 residue; 246 }; 247 248 struct udma_chan_config { 249 bool pkt_mode; /* TR or packet */ 250 bool needs_epib; /* EPIB is needed for the communication or not */ 251 u32 psd_size; /* size of Protocol Specific Data */ 252 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ 253 u32 hdesc_size; /* Size of a packet descriptor in packet mode */ 254 bool notdpkt; /* Suppress sending TDC packet */ 255 int remote_thread_id; 256 u32 atype; 257 u32 asel; 258 u32 src_thread; 259 u32 dst_thread; 260 enum psil_endpoint_type ep_type; 261 bool enable_acc32; 262 bool enable_burst; 263 enum udma_tp_level channel_tpl; /* Channel Throughput Level */ 264 265 u32 tr_trigger_type; 266 unsigned long tx_flags; 267 268 /* PKDMA mapped channel */ 269 int mapped_channel_id; 270 /* PKTDMA default tflow or rflow for mapped channel */ 271 int default_flow_id; 272 273 enum dma_transfer_direction dir; 274 }; 275 276 struct udma_chan { 277 struct virt_dma_chan vc; 278 struct dma_slave_config cfg; 279 struct udma_dev *ud; 280 struct device *dma_dev; 281 struct udma_desc *desc; 282 struct udma_desc *terminated_desc; 283 struct udma_static_tr static_tr; 284 char *name; 285 286 struct udma_bchan *bchan; 287 struct udma_tchan *tchan; 288 struct udma_rchan *rchan; 289 struct udma_rflow *rflow; 290 291 bool psil_paired; 292 293 int irq_num_ring; 294 int irq_num_udma; 295 296 bool cyclic; 297 bool paused; 298 299 enum udma_chan_state state; 300 struct completion teardown_completed; 301 302 struct udma_tx_drain tx_drain; 303 304 /* Channel configuration parameters */ 305 struct udma_chan_config config; 306 307 /* dmapool for packet mode descriptors */ 308 bool use_dma_pool; 309 struct dma_pool *hdesc_pool; 310 311 u32 id; 312 }; 313 314 static inline struct udma_dev *to_udma_dev(struct dma_device *d) 315 { 316 return container_of(d, struct udma_dev, ddev); 317 } 318 319 static inline struct udma_chan *to_udma_chan(struct dma_chan *c) 320 { 321 return container_of(c, struct udma_chan, vc.chan); 322 } 323 324 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) 325 { 326 return container_of(t, struct udma_desc, vd.tx); 327 } 328 329 /* Generic register access functions */ 330 static inline u32 udma_read(void __iomem *base, int reg) 331 { 332 return readl(base + reg); 333 } 334 335 static inline void udma_write(void __iomem *base, int reg, u32 val) 336 { 337 writel(val, base + reg); 338 } 339 340 static inline void udma_update_bits(void __iomem *base, int reg, 341 u32 mask, u32 val) 342 { 343 u32 tmp, orig; 344 345 orig = readl(base + reg); 346 tmp = orig & ~mask; 347 tmp |= (val & mask); 348 349 if (tmp != orig) 350 writel(tmp, base + reg); 351 } 352 353 /* TCHANRT */ 354 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg) 355 { 356 if (!uc->tchan) 357 return 0; 358 return udma_read(uc->tchan->reg_rt, reg); 359 } 360 361 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val) 362 { 363 if (!uc->tchan) 364 return; 365 udma_write(uc->tchan->reg_rt, reg, val); 366 } 367 368 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg, 369 u32 mask, u32 val) 370 { 371 if (!uc->tchan) 372 return; 373 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); 374 } 375 376 /* RCHANRT */ 377 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg) 378 { 379 if (!uc->rchan) 380 return 0; 381 return udma_read(uc->rchan->reg_rt, reg); 382 } 383 384 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val) 385 { 386 if (!uc->rchan) 387 return; 388 udma_write(uc->rchan->reg_rt, reg, val); 389 } 390 391 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg, 392 u32 mask, u32 val) 393 { 394 if (!uc->rchan) 395 return; 396 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); 397 } 398 399 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) 400 { 401 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 402 403 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 404 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, 405 tisci_rm->tisci_navss_dev_id, 406 src_thread, dst_thread); 407 } 408 409 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, 410 u32 dst_thread) 411 { 412 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 413 414 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 415 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, 416 tisci_rm->tisci_navss_dev_id, 417 src_thread, dst_thread); 418 } 419 420 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel) 421 { 422 struct device *chan_dev = &chan->dev->device; 423 424 if (asel == 0) { 425 /* No special handling for the channel */ 426 chan->dev->chan_dma_dev = false; 427 428 chan_dev->dma_coherent = false; 429 chan_dev->dma_parms = NULL; 430 } else if (asel == 14 || asel == 15) { 431 chan->dev->chan_dma_dev = true; 432 433 chan_dev->dma_coherent = true; 434 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48)); 435 chan_dev->dma_parms = chan_dev->parent->dma_parms; 436 } else { 437 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); 438 439 chan_dev->dma_coherent = false; 440 chan_dev->dma_parms = NULL; 441 } 442 } 443 444 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id) 445 { 446 int i; 447 448 for (i = 0; i < tpl_map->levels; i++) { 449 if (chan_id >= tpl_map->start_idx[i]) 450 return i; 451 } 452 453 return 0; 454 } 455 456 static void udma_reset_uchan(struct udma_chan *uc) 457 { 458 memset(&uc->config, 0, sizeof(uc->config)); 459 uc->config.remote_thread_id = -1; 460 uc->config.mapped_channel_id = -1; 461 uc->config.default_flow_id = -1; 462 uc->state = UDMA_CHAN_IS_IDLE; 463 } 464 465 static void udma_dump_chan_stdata(struct udma_chan *uc) 466 { 467 struct device *dev = uc->ud->dev; 468 u32 offset; 469 int i; 470 471 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { 472 dev_dbg(dev, "TCHAN State data:\n"); 473 for (i = 0; i < 32; i++) { 474 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 475 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, 476 udma_tchanrt_read(uc, offset)); 477 } 478 } 479 480 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { 481 dev_dbg(dev, "RCHAN State data:\n"); 482 for (i = 0; i < 32; i++) { 483 offset = UDMA_CHAN_RT_STDATA_REG + i * 4; 484 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, 485 udma_rchanrt_read(uc, offset)); 486 } 487 } 488 } 489 490 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, 491 int idx) 492 { 493 return d->hwdesc[idx].cppi5_desc_paddr; 494 } 495 496 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) 497 { 498 return d->hwdesc[idx].cppi5_desc_vaddr; 499 } 500 501 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, 502 dma_addr_t paddr) 503 { 504 struct udma_desc *d = uc->terminated_desc; 505 506 if (d) { 507 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 508 d->desc_idx); 509 510 if (desc_paddr != paddr) 511 d = NULL; 512 } 513 514 if (!d) { 515 d = uc->desc; 516 if (d) { 517 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 518 d->desc_idx); 519 520 if (desc_paddr != paddr) 521 d = NULL; 522 } 523 } 524 525 return d; 526 } 527 528 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) 529 { 530 if (uc->use_dma_pool) { 531 int i; 532 533 for (i = 0; i < d->hwdesc_count; i++) { 534 if (!d->hwdesc[i].cppi5_desc_vaddr) 535 continue; 536 537 dma_pool_free(uc->hdesc_pool, 538 d->hwdesc[i].cppi5_desc_vaddr, 539 d->hwdesc[i].cppi5_desc_paddr); 540 541 d->hwdesc[i].cppi5_desc_vaddr = NULL; 542 } 543 } else if (d->hwdesc[0].cppi5_desc_vaddr) { 544 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, 545 d->hwdesc[0].cppi5_desc_vaddr, 546 d->hwdesc[0].cppi5_desc_paddr); 547 548 d->hwdesc[0].cppi5_desc_vaddr = NULL; 549 } 550 } 551 552 static void udma_purge_desc_work(struct work_struct *work) 553 { 554 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); 555 struct virt_dma_desc *vd, *_vd; 556 unsigned long flags; 557 LIST_HEAD(head); 558 559 spin_lock_irqsave(&ud->lock, flags); 560 list_splice_tail_init(&ud->desc_to_purge, &head); 561 spin_unlock_irqrestore(&ud->lock, flags); 562 563 list_for_each_entry_safe(vd, _vd, &head, node) { 564 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 565 struct udma_desc *d = to_udma_desc(&vd->tx); 566 567 udma_free_hwdesc(uc, d); 568 list_del(&vd->node); 569 kfree(d); 570 } 571 572 /* If more to purge, schedule the work again */ 573 if (!list_empty(&ud->desc_to_purge)) 574 schedule_work(&ud->purge_work); 575 } 576 577 static void udma_desc_free(struct virt_dma_desc *vd) 578 { 579 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); 580 struct udma_chan *uc = to_udma_chan(vd->tx.chan); 581 struct udma_desc *d = to_udma_desc(&vd->tx); 582 unsigned long flags; 583 584 if (uc->terminated_desc == d) 585 uc->terminated_desc = NULL; 586 587 if (uc->use_dma_pool) { 588 udma_free_hwdesc(uc, d); 589 kfree(d); 590 return; 591 } 592 593 spin_lock_irqsave(&ud->lock, flags); 594 list_add_tail(&vd->node, &ud->desc_to_purge); 595 spin_unlock_irqrestore(&ud->lock, flags); 596 597 schedule_work(&ud->purge_work); 598 } 599 600 static bool udma_is_chan_running(struct udma_chan *uc) 601 { 602 u32 trt_ctl = 0; 603 u32 rrt_ctl = 0; 604 605 if (uc->tchan) 606 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 607 if (uc->rchan) 608 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 609 610 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) 611 return true; 612 613 return false; 614 } 615 616 static bool udma_is_chan_paused(struct udma_chan *uc) 617 { 618 u32 val, pause_mask; 619 620 switch (uc->config.dir) { 621 case DMA_DEV_TO_MEM: 622 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 623 pause_mask = UDMA_PEER_RT_EN_PAUSE; 624 break; 625 case DMA_MEM_TO_DEV: 626 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG); 627 pause_mask = UDMA_PEER_RT_EN_PAUSE; 628 break; 629 case DMA_MEM_TO_MEM: 630 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG); 631 pause_mask = UDMA_CHAN_RT_CTL_PAUSE; 632 break; 633 default: 634 return false; 635 } 636 637 if (val & pause_mask) 638 return true; 639 640 return false; 641 } 642 643 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) 644 { 645 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; 646 } 647 648 static int udma_push_to_ring(struct udma_chan *uc, int idx) 649 { 650 struct udma_desc *d = uc->desc; 651 struct k3_ring *ring = NULL; 652 dma_addr_t paddr; 653 654 switch (uc->config.dir) { 655 case DMA_DEV_TO_MEM: 656 ring = uc->rflow->fd_ring; 657 break; 658 case DMA_MEM_TO_DEV: 659 case DMA_MEM_TO_MEM: 660 ring = uc->tchan->t_ring; 661 break; 662 default: 663 return -EINVAL; 664 } 665 666 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ 667 if (idx == -1) { 668 paddr = udma_get_rx_flush_hwdesc_paddr(uc); 669 } else { 670 paddr = udma_curr_cppi5_desc_paddr(d, idx); 671 672 wmb(); /* Ensure that writes are not moved over this point */ 673 } 674 675 return k3_ringacc_ring_push(ring, &paddr); 676 } 677 678 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr) 679 { 680 if (uc->config.dir != DMA_DEV_TO_MEM) 681 return false; 682 683 if (addr == udma_get_rx_flush_hwdesc_paddr(uc)) 684 return true; 685 686 return false; 687 } 688 689 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) 690 { 691 struct k3_ring *ring = NULL; 692 int ret; 693 694 switch (uc->config.dir) { 695 case DMA_DEV_TO_MEM: 696 ring = uc->rflow->r_ring; 697 break; 698 case DMA_MEM_TO_DEV: 699 case DMA_MEM_TO_MEM: 700 ring = uc->tchan->tc_ring; 701 break; 702 default: 703 return -ENOENT; 704 } 705 706 ret = k3_ringacc_ring_pop(ring, addr); 707 if (ret) 708 return ret; 709 710 rmb(); /* Ensure that reads are not moved before this point */ 711 712 /* Teardown completion */ 713 if (cppi5_desc_is_tdcm(*addr)) 714 return 0; 715 716 /* Check for flush descriptor */ 717 if (udma_desc_is_rx_flush(uc, *addr)) 718 return -ENOENT; 719 720 return 0; 721 } 722 723 static void udma_reset_rings(struct udma_chan *uc) 724 { 725 struct k3_ring *ring1 = NULL; 726 struct k3_ring *ring2 = NULL; 727 728 switch (uc->config.dir) { 729 case DMA_DEV_TO_MEM: 730 if (uc->rchan) { 731 ring1 = uc->rflow->fd_ring; 732 ring2 = uc->rflow->r_ring; 733 } 734 break; 735 case DMA_MEM_TO_DEV: 736 case DMA_MEM_TO_MEM: 737 if (uc->tchan) { 738 ring1 = uc->tchan->t_ring; 739 ring2 = uc->tchan->tc_ring; 740 } 741 break; 742 default: 743 break; 744 } 745 746 if (ring1) 747 k3_ringacc_ring_reset_dma(ring1, 748 k3_ringacc_ring_get_occ(ring1)); 749 if (ring2) 750 k3_ringacc_ring_reset(ring2); 751 752 /* make sure we are not leaking memory by stalled descriptor */ 753 if (uc->terminated_desc) { 754 udma_desc_free(&uc->terminated_desc->vd); 755 uc->terminated_desc = NULL; 756 } 757 } 758 759 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val) 760 { 761 if (uc->desc->dir == DMA_DEV_TO_MEM) { 762 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 763 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 764 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 765 } else { 766 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 767 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 768 if (!uc->bchan) 769 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 770 } 771 } 772 773 static void udma_reset_counters(struct udma_chan *uc) 774 { 775 u32 val; 776 777 if (uc->tchan) { 778 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 779 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 780 781 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 782 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 783 784 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 785 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 786 787 if (!uc->bchan) { 788 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 789 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 790 } 791 } 792 793 if (uc->rchan) { 794 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 795 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 796 797 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 798 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 799 800 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG); 801 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val); 802 803 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 804 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 805 } 806 } 807 808 static int udma_reset_chan(struct udma_chan *uc, bool hard) 809 { 810 switch (uc->config.dir) { 811 case DMA_DEV_TO_MEM: 812 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 813 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 814 break; 815 case DMA_MEM_TO_DEV: 816 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 817 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); 818 break; 819 case DMA_MEM_TO_MEM: 820 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 821 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0); 822 break; 823 default: 824 return -EINVAL; 825 } 826 827 /* Reset all counters */ 828 udma_reset_counters(uc); 829 830 /* Hard reset: re-initialize the channel to reset */ 831 if (hard) { 832 struct udma_chan_config ucc_backup; 833 int ret; 834 835 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); 836 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); 837 838 /* restore the channel configuration */ 839 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); 840 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); 841 if (ret) 842 return ret; 843 844 /* 845 * Setting forced teardown after forced reset helps recovering 846 * the rchan. 847 */ 848 if (uc->config.dir == DMA_DEV_TO_MEM) 849 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 850 UDMA_CHAN_RT_CTL_EN | 851 UDMA_CHAN_RT_CTL_TDOWN | 852 UDMA_CHAN_RT_CTL_FTDOWN); 853 } 854 uc->state = UDMA_CHAN_IS_IDLE; 855 856 return 0; 857 } 858 859 static void udma_start_desc(struct udma_chan *uc) 860 { 861 struct udma_chan_config *ucc = &uc->config; 862 863 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && 864 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { 865 int i; 866 867 /* 868 * UDMA only: Push all descriptors to ring for packet mode 869 * cyclic or RX 870 * PKTDMA supports pre-linked descriptor and cyclic is not 871 * supported 872 */ 873 for (i = 0; i < uc->desc->sglen; i++) 874 udma_push_to_ring(uc, i); 875 } else { 876 udma_push_to_ring(uc, 0); 877 } 878 } 879 880 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) 881 { 882 /* Only PDMAs have staticTR */ 883 if (uc->config.ep_type == PSIL_EP_NATIVE) 884 return false; 885 886 /* Check if the staticTR configuration has changed for TX */ 887 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) 888 return true; 889 890 return false; 891 } 892 893 static int udma_start(struct udma_chan *uc) 894 { 895 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); 896 897 if (!vd) { 898 uc->desc = NULL; 899 return -ENOENT; 900 } 901 902 list_del(&vd->node); 903 904 uc->desc = to_udma_desc(&vd->tx); 905 906 /* Channel is already running and does not need reconfiguration */ 907 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { 908 udma_start_desc(uc); 909 goto out; 910 } 911 912 /* Make sure that we clear the teardown bit, if it is set */ 913 udma_reset_chan(uc, false); 914 915 /* Push descriptors before we start the channel */ 916 udma_start_desc(uc); 917 918 switch (uc->desc->dir) { 919 case DMA_DEV_TO_MEM: 920 /* Config remote TR */ 921 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 922 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 923 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 924 const struct udma_match_data *match_data = 925 uc->ud->match_data; 926 927 if (uc->config.enable_acc32) 928 val |= PDMA_STATIC_TR_XY_ACC32; 929 if (uc->config.enable_burst) 930 val |= PDMA_STATIC_TR_XY_BURST; 931 932 udma_rchanrt_write(uc, 933 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 934 val); 935 936 udma_rchanrt_write(uc, 937 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG, 938 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, 939 match_data->statictr_z_mask)); 940 941 /* save the current staticTR configuration */ 942 memcpy(&uc->static_tr, &uc->desc->static_tr, 943 sizeof(uc->static_tr)); 944 } 945 946 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 947 UDMA_CHAN_RT_CTL_EN); 948 949 /* Enable remote */ 950 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 951 UDMA_PEER_RT_EN_ENABLE); 952 953 break; 954 case DMA_MEM_TO_DEV: 955 /* Config remote TR */ 956 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { 957 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | 958 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); 959 960 if (uc->config.enable_acc32) 961 val |= PDMA_STATIC_TR_XY_ACC32; 962 if (uc->config.enable_burst) 963 val |= PDMA_STATIC_TR_XY_BURST; 964 965 udma_tchanrt_write(uc, 966 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG, 967 val); 968 969 /* save the current staticTR configuration */ 970 memcpy(&uc->static_tr, &uc->desc->static_tr, 971 sizeof(uc->static_tr)); 972 } 973 974 /* Enable remote */ 975 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 976 UDMA_PEER_RT_EN_ENABLE); 977 978 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 979 UDMA_CHAN_RT_CTL_EN); 980 981 break; 982 case DMA_MEM_TO_MEM: 983 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 984 UDMA_CHAN_RT_CTL_EN); 985 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 986 UDMA_CHAN_RT_CTL_EN); 987 988 break; 989 default: 990 return -EINVAL; 991 } 992 993 uc->state = UDMA_CHAN_IS_ACTIVE; 994 out: 995 996 return 0; 997 } 998 999 static int udma_stop(struct udma_chan *uc) 1000 { 1001 enum udma_chan_state old_state = uc->state; 1002 1003 uc->state = UDMA_CHAN_IS_TERMINATING; 1004 reinit_completion(&uc->teardown_completed); 1005 1006 switch (uc->config.dir) { 1007 case DMA_DEV_TO_MEM: 1008 if (!uc->cyclic && !uc->desc) 1009 udma_push_to_ring(uc, -1); 1010 1011 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1012 UDMA_PEER_RT_EN_ENABLE | 1013 UDMA_PEER_RT_EN_TEARDOWN); 1014 break; 1015 case DMA_MEM_TO_DEV: 1016 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 1017 UDMA_PEER_RT_EN_ENABLE | 1018 UDMA_PEER_RT_EN_FLUSH); 1019 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1020 UDMA_CHAN_RT_CTL_EN | 1021 UDMA_CHAN_RT_CTL_TDOWN); 1022 break; 1023 case DMA_MEM_TO_MEM: 1024 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 1025 UDMA_CHAN_RT_CTL_EN | 1026 UDMA_CHAN_RT_CTL_TDOWN); 1027 break; 1028 default: 1029 uc->state = old_state; 1030 complete_all(&uc->teardown_completed); 1031 return -EINVAL; 1032 } 1033 1034 return 0; 1035 } 1036 1037 static void udma_cyclic_packet_elapsed(struct udma_chan *uc) 1038 { 1039 struct udma_desc *d = uc->desc; 1040 struct cppi5_host_desc_t *h_desc; 1041 1042 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; 1043 cppi5_hdesc_reset_to_original(h_desc); 1044 udma_push_to_ring(uc, d->desc_idx); 1045 d->desc_idx = (d->desc_idx + 1) % d->sglen; 1046 } 1047 1048 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) 1049 { 1050 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; 1051 1052 memcpy(d->metadata, h_desc->epib, d->metadata_size); 1053 } 1054 1055 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) 1056 { 1057 u32 peer_bcnt, bcnt; 1058 1059 /* 1060 * Only TX towards PDMA is affected. 1061 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer 1062 * completion calculation, consumer must ensure that there is no stale 1063 * data in DMA fabric in this case. 1064 */ 1065 if (uc->config.ep_type == PSIL_EP_NATIVE || 1066 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT)) 1067 return true; 1068 1069 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); 1070 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 1071 1072 /* Transfer is incomplete, store current residue and time stamp */ 1073 if (peer_bcnt < bcnt) { 1074 uc->tx_drain.residue = bcnt - peer_bcnt; 1075 uc->tx_drain.tstamp = ktime_get(); 1076 return false; 1077 } 1078 1079 return true; 1080 } 1081 1082 static void udma_check_tx_completion(struct work_struct *work) 1083 { 1084 struct udma_chan *uc = container_of(work, typeof(*uc), 1085 tx_drain.work.work); 1086 bool desc_done = true; 1087 u32 residue_diff; 1088 ktime_t time_diff; 1089 unsigned long delay; 1090 1091 while (1) { 1092 if (uc->desc) { 1093 /* Get previous residue and time stamp */ 1094 residue_diff = uc->tx_drain.residue; 1095 time_diff = uc->tx_drain.tstamp; 1096 /* 1097 * Get current residue and time stamp or see if 1098 * transfer is complete 1099 */ 1100 desc_done = udma_is_desc_really_done(uc, uc->desc); 1101 } 1102 1103 if (!desc_done) { 1104 /* 1105 * Find the time delta and residue delta w.r.t 1106 * previous poll 1107 */ 1108 time_diff = ktime_sub(uc->tx_drain.tstamp, 1109 time_diff) + 1; 1110 residue_diff -= uc->tx_drain.residue; 1111 if (residue_diff) { 1112 /* 1113 * Try to guess when we should check 1114 * next time by calculating rate at 1115 * which data is being drained at the 1116 * peer device 1117 */ 1118 delay = (time_diff / residue_diff) * 1119 uc->tx_drain.residue; 1120 } else { 1121 /* No progress, check again in 1 second */ 1122 schedule_delayed_work(&uc->tx_drain.work, HZ); 1123 break; 1124 } 1125 1126 usleep_range(ktime_to_us(delay), 1127 ktime_to_us(delay) + 10); 1128 continue; 1129 } 1130 1131 if (uc->desc) { 1132 struct udma_desc *d = uc->desc; 1133 1134 udma_decrement_byte_counters(uc, d->residue); 1135 udma_start(uc); 1136 vchan_cookie_complete(&d->vd); 1137 break; 1138 } 1139 1140 break; 1141 } 1142 } 1143 1144 static irqreturn_t udma_ring_irq_handler(int irq, void *data) 1145 { 1146 struct udma_chan *uc = data; 1147 struct udma_desc *d; 1148 dma_addr_t paddr = 0; 1149 1150 if (udma_pop_from_ring(uc, &paddr) || !paddr) 1151 return IRQ_HANDLED; 1152 1153 spin_lock(&uc->vc.lock); 1154 1155 /* Teardown completion message */ 1156 if (cppi5_desc_is_tdcm(paddr)) { 1157 complete_all(&uc->teardown_completed); 1158 1159 if (uc->terminated_desc) { 1160 udma_desc_free(&uc->terminated_desc->vd); 1161 uc->terminated_desc = NULL; 1162 } 1163 1164 if (!uc->desc) 1165 udma_start(uc); 1166 1167 goto out; 1168 } 1169 1170 d = udma_udma_desc_from_paddr(uc, paddr); 1171 1172 if (d) { 1173 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, 1174 d->desc_idx); 1175 if (desc_paddr != paddr) { 1176 dev_err(uc->ud->dev, "not matching descriptors!\n"); 1177 goto out; 1178 } 1179 1180 if (d == uc->desc) { 1181 /* active descriptor */ 1182 if (uc->cyclic) { 1183 udma_cyclic_packet_elapsed(uc); 1184 vchan_cyclic_callback(&d->vd); 1185 } else { 1186 if (udma_is_desc_really_done(uc, d)) { 1187 udma_decrement_byte_counters(uc, d->residue); 1188 udma_start(uc); 1189 vchan_cookie_complete(&d->vd); 1190 } else { 1191 schedule_delayed_work(&uc->tx_drain.work, 1192 0); 1193 } 1194 } 1195 } else { 1196 /* 1197 * terminated descriptor, mark the descriptor as 1198 * completed to update the channel's cookie marker 1199 */ 1200 dma_cookie_complete(&d->vd.tx); 1201 } 1202 } 1203 out: 1204 spin_unlock(&uc->vc.lock); 1205 1206 return IRQ_HANDLED; 1207 } 1208 1209 static irqreturn_t udma_udma_irq_handler(int irq, void *data) 1210 { 1211 struct udma_chan *uc = data; 1212 struct udma_desc *d; 1213 1214 spin_lock(&uc->vc.lock); 1215 d = uc->desc; 1216 if (d) { 1217 d->tr_idx = (d->tr_idx + 1) % d->sglen; 1218 1219 if (uc->cyclic) { 1220 vchan_cyclic_callback(&d->vd); 1221 } else { 1222 /* TODO: figure out the real amount of data */ 1223 udma_decrement_byte_counters(uc, d->residue); 1224 udma_start(uc); 1225 vchan_cookie_complete(&d->vd); 1226 } 1227 } 1228 1229 spin_unlock(&uc->vc.lock); 1230 1231 return IRQ_HANDLED; 1232 } 1233 1234 /** 1235 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows 1236 * @ud: UDMA device 1237 * @from: Start the search from this flow id number 1238 * @cnt: Number of consecutive flow ids to allocate 1239 * 1240 * Allocate range of RX flow ids for future use, those flows can be requested 1241 * only using explicit flow id number. if @from is set to -1 it will try to find 1242 * first free range. if @from is positive value it will force allocation only 1243 * of the specified range of flows. 1244 * 1245 * Returns -ENOMEM if can't find free range. 1246 * -EEXIST if requested range is busy. 1247 * -EINVAL if wrong input values passed. 1248 * Returns flow id on success. 1249 */ 1250 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1251 { 1252 int start, tmp_from; 1253 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); 1254 1255 tmp_from = from; 1256 if (tmp_from < 0) 1257 tmp_from = ud->rchan_cnt; 1258 /* default flows can't be allocated and accessible only by id */ 1259 if (tmp_from < ud->rchan_cnt) 1260 return -EINVAL; 1261 1262 if (tmp_from + cnt > ud->rflow_cnt) 1263 return -EINVAL; 1264 1265 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, 1266 ud->rflow_cnt); 1267 1268 start = bitmap_find_next_zero_area(tmp, 1269 ud->rflow_cnt, 1270 tmp_from, cnt, 0); 1271 if (start >= ud->rflow_cnt) 1272 return -ENOMEM; 1273 1274 if (from >= 0 && start != from) 1275 return -EEXIST; 1276 1277 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); 1278 return start; 1279 } 1280 1281 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) 1282 { 1283 if (from < ud->rchan_cnt) 1284 return -EINVAL; 1285 if (from + cnt > ud->rflow_cnt) 1286 return -EINVAL; 1287 1288 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); 1289 return 0; 1290 } 1291 1292 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) 1293 { 1294 /* 1295 * Attempt to request rflow by ID can be made for any rflow 1296 * if not in use with assumption that caller knows what's doing. 1297 * TI-SCI FW will perform additional permission check ant way, it's 1298 * safe 1299 */ 1300 1301 if (id < 0 || id >= ud->rflow_cnt) 1302 return ERR_PTR(-ENOENT); 1303 1304 if (test_bit(id, ud->rflow_in_use)) 1305 return ERR_PTR(-ENOENT); 1306 1307 if (ud->rflow_gp_map) { 1308 /* GP rflow has to be allocated first */ 1309 if (!test_bit(id, ud->rflow_gp_map) && 1310 !test_bit(id, ud->rflow_gp_map_allocated)) 1311 return ERR_PTR(-EINVAL); 1312 } 1313 1314 dev_dbg(ud->dev, "get rflow%d\n", id); 1315 set_bit(id, ud->rflow_in_use); 1316 return &ud->rflows[id]; 1317 } 1318 1319 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) 1320 { 1321 if (!test_bit(rflow->id, ud->rflow_in_use)) { 1322 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); 1323 return; 1324 } 1325 1326 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); 1327 clear_bit(rflow->id, ud->rflow_in_use); 1328 } 1329 1330 #define UDMA_RESERVE_RESOURCE(res) \ 1331 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ 1332 enum udma_tp_level tpl, \ 1333 int id) \ 1334 { \ 1335 if (id >= 0) { \ 1336 if (test_bit(id, ud->res##_map)) { \ 1337 dev_err(ud->dev, "res##%d is in use\n", id); \ 1338 return ERR_PTR(-ENOENT); \ 1339 } \ 1340 } else { \ 1341 int start; \ 1342 \ 1343 if (tpl >= ud->res##_tpl.levels) \ 1344 tpl = ud->res##_tpl.levels - 1; \ 1345 \ 1346 start = ud->res##_tpl.start_idx[tpl]; \ 1347 \ 1348 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ 1349 start); \ 1350 if (id == ud->res##_cnt) { \ 1351 return ERR_PTR(-ENOENT); \ 1352 } \ 1353 } \ 1354 \ 1355 set_bit(id, ud->res##_map); \ 1356 return &ud->res##s[id]; \ 1357 } 1358 1359 UDMA_RESERVE_RESOURCE(bchan); 1360 UDMA_RESERVE_RESOURCE(tchan); 1361 UDMA_RESERVE_RESOURCE(rchan); 1362 1363 static int bcdma_get_bchan(struct udma_chan *uc) 1364 { 1365 struct udma_dev *ud = uc->ud; 1366 enum udma_tp_level tpl; 1367 int ret; 1368 1369 if (uc->bchan) { 1370 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", 1371 uc->id, uc->bchan->id); 1372 return 0; 1373 } 1374 1375 /* 1376 * Use normal channels for peripherals, and highest TPL channel for 1377 * mem2mem 1378 */ 1379 if (uc->config.tr_trigger_type) 1380 tpl = 0; 1381 else 1382 tpl = ud->bchan_tpl.levels - 1; 1383 1384 uc->bchan = __udma_reserve_bchan(ud, tpl, -1); 1385 if (IS_ERR(uc->bchan)) { 1386 ret = PTR_ERR(uc->bchan); 1387 uc->bchan = NULL; 1388 return ret; 1389 } 1390 1391 uc->tchan = uc->bchan; 1392 1393 return 0; 1394 } 1395 1396 static int udma_get_tchan(struct udma_chan *uc) 1397 { 1398 struct udma_dev *ud = uc->ud; 1399 int ret; 1400 1401 if (uc->tchan) { 1402 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", 1403 uc->id, uc->tchan->id); 1404 return 0; 1405 } 1406 1407 /* 1408 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1409 * For PKTDMA mapped channels it is configured to a channel which must 1410 * be used to service the peripheral. 1411 */ 1412 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, 1413 uc->config.mapped_channel_id); 1414 if (IS_ERR(uc->tchan)) { 1415 ret = PTR_ERR(uc->tchan); 1416 uc->tchan = NULL; 1417 return ret; 1418 } 1419 1420 if (ud->tflow_cnt) { 1421 int tflow_id; 1422 1423 /* Only PKTDMA have support for tx flows */ 1424 if (uc->config.default_flow_id >= 0) 1425 tflow_id = uc->config.default_flow_id; 1426 else 1427 tflow_id = uc->tchan->id; 1428 1429 if (test_bit(tflow_id, ud->tflow_map)) { 1430 dev_err(ud->dev, "tflow%d is in use\n", tflow_id); 1431 clear_bit(uc->tchan->id, ud->tchan_map); 1432 uc->tchan = NULL; 1433 return -ENOENT; 1434 } 1435 1436 uc->tchan->tflow_id = tflow_id; 1437 set_bit(tflow_id, ud->tflow_map); 1438 } else { 1439 uc->tchan->tflow_id = -1; 1440 } 1441 1442 return 0; 1443 } 1444 1445 static int udma_get_rchan(struct udma_chan *uc) 1446 { 1447 struct udma_dev *ud = uc->ud; 1448 int ret; 1449 1450 if (uc->rchan) { 1451 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", 1452 uc->id, uc->rchan->id); 1453 return 0; 1454 } 1455 1456 /* 1457 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. 1458 * For PKTDMA mapped channels it is configured to a channel which must 1459 * be used to service the peripheral. 1460 */ 1461 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, 1462 uc->config.mapped_channel_id); 1463 if (IS_ERR(uc->rchan)) { 1464 ret = PTR_ERR(uc->rchan); 1465 uc->rchan = NULL; 1466 return ret; 1467 } 1468 1469 return 0; 1470 } 1471 1472 static int udma_get_chan_pair(struct udma_chan *uc) 1473 { 1474 struct udma_dev *ud = uc->ud; 1475 int chan_id, end; 1476 1477 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { 1478 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", 1479 uc->id, uc->tchan->id); 1480 return 0; 1481 } 1482 1483 if (uc->tchan) { 1484 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", 1485 uc->id, uc->tchan->id); 1486 return -EBUSY; 1487 } else if (uc->rchan) { 1488 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", 1489 uc->id, uc->rchan->id); 1490 return -EBUSY; 1491 } 1492 1493 /* Can be optimized, but let's have it like this for now */ 1494 end = min(ud->tchan_cnt, ud->rchan_cnt); 1495 /* 1496 * Try to use the highest TPL channel pair for MEM_TO_MEM channels 1497 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan 1498 */ 1499 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; 1500 for (; chan_id < end; chan_id++) { 1501 if (!test_bit(chan_id, ud->tchan_map) && 1502 !test_bit(chan_id, ud->rchan_map)) 1503 break; 1504 } 1505 1506 if (chan_id == end) 1507 return -ENOENT; 1508 1509 set_bit(chan_id, ud->tchan_map); 1510 set_bit(chan_id, ud->rchan_map); 1511 uc->tchan = &ud->tchans[chan_id]; 1512 uc->rchan = &ud->rchans[chan_id]; 1513 1514 /* UDMA does not use tx flows */ 1515 uc->tchan->tflow_id = -1; 1516 1517 return 0; 1518 } 1519 1520 static int udma_get_rflow(struct udma_chan *uc, int flow_id) 1521 { 1522 struct udma_dev *ud = uc->ud; 1523 int ret; 1524 1525 if (!uc->rchan) { 1526 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); 1527 return -EINVAL; 1528 } 1529 1530 if (uc->rflow) { 1531 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", 1532 uc->id, uc->rflow->id); 1533 return 0; 1534 } 1535 1536 uc->rflow = __udma_get_rflow(ud, flow_id); 1537 if (IS_ERR(uc->rflow)) { 1538 ret = PTR_ERR(uc->rflow); 1539 uc->rflow = NULL; 1540 return ret; 1541 } 1542 1543 return 0; 1544 } 1545 1546 static void bcdma_put_bchan(struct udma_chan *uc) 1547 { 1548 struct udma_dev *ud = uc->ud; 1549 1550 if (uc->bchan) { 1551 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, 1552 uc->bchan->id); 1553 clear_bit(uc->bchan->id, ud->bchan_map); 1554 uc->bchan = NULL; 1555 uc->tchan = NULL; 1556 } 1557 } 1558 1559 static void udma_put_rchan(struct udma_chan *uc) 1560 { 1561 struct udma_dev *ud = uc->ud; 1562 1563 if (uc->rchan) { 1564 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, 1565 uc->rchan->id); 1566 clear_bit(uc->rchan->id, ud->rchan_map); 1567 uc->rchan = NULL; 1568 } 1569 } 1570 1571 static void udma_put_tchan(struct udma_chan *uc) 1572 { 1573 struct udma_dev *ud = uc->ud; 1574 1575 if (uc->tchan) { 1576 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, 1577 uc->tchan->id); 1578 clear_bit(uc->tchan->id, ud->tchan_map); 1579 1580 if (uc->tchan->tflow_id >= 0) 1581 clear_bit(uc->tchan->tflow_id, ud->tflow_map); 1582 1583 uc->tchan = NULL; 1584 } 1585 } 1586 1587 static void udma_put_rflow(struct udma_chan *uc) 1588 { 1589 struct udma_dev *ud = uc->ud; 1590 1591 if (uc->rflow) { 1592 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, 1593 uc->rflow->id); 1594 __udma_put_rflow(ud, uc->rflow); 1595 uc->rflow = NULL; 1596 } 1597 } 1598 1599 static void bcdma_free_bchan_resources(struct udma_chan *uc) 1600 { 1601 if (!uc->bchan) 1602 return; 1603 1604 k3_ringacc_ring_free(uc->bchan->tc_ring); 1605 k3_ringacc_ring_free(uc->bchan->t_ring); 1606 uc->bchan->tc_ring = NULL; 1607 uc->bchan->t_ring = NULL; 1608 k3_configure_chan_coherency(&uc->vc.chan, 0); 1609 1610 bcdma_put_bchan(uc); 1611 } 1612 1613 static int bcdma_alloc_bchan_resources(struct udma_chan *uc) 1614 { 1615 struct k3_ring_cfg ring_cfg; 1616 struct udma_dev *ud = uc->ud; 1617 int ret; 1618 1619 ret = bcdma_get_bchan(uc); 1620 if (ret) 1621 return ret; 1622 1623 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, 1624 &uc->bchan->t_ring, 1625 &uc->bchan->tc_ring); 1626 if (ret) { 1627 ret = -EBUSY; 1628 goto err_ring; 1629 } 1630 1631 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1632 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1633 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1634 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1635 1636 k3_configure_chan_coherency(&uc->vc.chan, ud->asel); 1637 ring_cfg.asel = ud->asel; 1638 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1639 1640 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); 1641 if (ret) 1642 goto err_ringcfg; 1643 1644 return 0; 1645 1646 err_ringcfg: 1647 k3_ringacc_ring_free(uc->bchan->tc_ring); 1648 uc->bchan->tc_ring = NULL; 1649 k3_ringacc_ring_free(uc->bchan->t_ring); 1650 uc->bchan->t_ring = NULL; 1651 k3_configure_chan_coherency(&uc->vc.chan, 0); 1652 err_ring: 1653 bcdma_put_bchan(uc); 1654 1655 return ret; 1656 } 1657 1658 static void udma_free_tx_resources(struct udma_chan *uc) 1659 { 1660 if (!uc->tchan) 1661 return; 1662 1663 k3_ringacc_ring_free(uc->tchan->t_ring); 1664 k3_ringacc_ring_free(uc->tchan->tc_ring); 1665 uc->tchan->t_ring = NULL; 1666 uc->tchan->tc_ring = NULL; 1667 1668 udma_put_tchan(uc); 1669 } 1670 1671 static int udma_alloc_tx_resources(struct udma_chan *uc) 1672 { 1673 struct k3_ring_cfg ring_cfg; 1674 struct udma_dev *ud = uc->ud; 1675 struct udma_tchan *tchan; 1676 int ring_idx, ret; 1677 1678 ret = udma_get_tchan(uc); 1679 if (ret) 1680 return ret; 1681 1682 tchan = uc->tchan; 1683 if (tchan->tflow_id >= 0) 1684 ring_idx = tchan->tflow_id; 1685 else 1686 ring_idx = ud->bchan_cnt + tchan->id; 1687 1688 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, 1689 &tchan->t_ring, 1690 &tchan->tc_ring); 1691 if (ret) { 1692 ret = -EBUSY; 1693 goto err_ring; 1694 } 1695 1696 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1697 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1698 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1699 if (ud->match_data->type == DMA_TYPE_UDMA) { 1700 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1701 } else { 1702 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1703 1704 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1705 ring_cfg.asel = uc->config.asel; 1706 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1707 } 1708 1709 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); 1710 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); 1711 1712 if (ret) 1713 goto err_ringcfg; 1714 1715 return 0; 1716 1717 err_ringcfg: 1718 k3_ringacc_ring_free(uc->tchan->tc_ring); 1719 uc->tchan->tc_ring = NULL; 1720 k3_ringacc_ring_free(uc->tchan->t_ring); 1721 uc->tchan->t_ring = NULL; 1722 err_ring: 1723 udma_put_tchan(uc); 1724 1725 return ret; 1726 } 1727 1728 static void udma_free_rx_resources(struct udma_chan *uc) 1729 { 1730 if (!uc->rchan) 1731 return; 1732 1733 if (uc->rflow) { 1734 struct udma_rflow *rflow = uc->rflow; 1735 1736 k3_ringacc_ring_free(rflow->fd_ring); 1737 k3_ringacc_ring_free(rflow->r_ring); 1738 rflow->fd_ring = NULL; 1739 rflow->r_ring = NULL; 1740 1741 udma_put_rflow(uc); 1742 } 1743 1744 udma_put_rchan(uc); 1745 } 1746 1747 static int udma_alloc_rx_resources(struct udma_chan *uc) 1748 { 1749 struct udma_dev *ud = uc->ud; 1750 struct k3_ring_cfg ring_cfg; 1751 struct udma_rflow *rflow; 1752 int fd_ring_id; 1753 int ret; 1754 1755 ret = udma_get_rchan(uc); 1756 if (ret) 1757 return ret; 1758 1759 /* For MEM_TO_MEM we don't need rflow or rings */ 1760 if (uc->config.dir == DMA_MEM_TO_MEM) 1761 return 0; 1762 1763 if (uc->config.default_flow_id >= 0) 1764 ret = udma_get_rflow(uc, uc->config.default_flow_id); 1765 else 1766 ret = udma_get_rflow(uc, uc->rchan->id); 1767 1768 if (ret) { 1769 ret = -EBUSY; 1770 goto err_rflow; 1771 } 1772 1773 rflow = uc->rflow; 1774 if (ud->tflow_cnt) 1775 fd_ring_id = ud->tflow_cnt + rflow->id; 1776 else 1777 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + 1778 uc->rchan->id; 1779 1780 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, 1781 &rflow->fd_ring, &rflow->r_ring); 1782 if (ret) { 1783 ret = -EBUSY; 1784 goto err_ring; 1785 } 1786 1787 memset(&ring_cfg, 0, sizeof(ring_cfg)); 1788 1789 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; 1790 if (ud->match_data->type == DMA_TYPE_UDMA) { 1791 if (uc->config.pkt_mode) 1792 ring_cfg.size = SG_MAX_SEGMENTS; 1793 else 1794 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1795 1796 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; 1797 } else { 1798 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1799 ring_cfg.mode = K3_RINGACC_RING_MODE_RING; 1800 1801 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); 1802 ring_cfg.asel = uc->config.asel; 1803 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); 1804 } 1805 1806 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); 1807 1808 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; 1809 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); 1810 1811 if (ret) 1812 goto err_ringcfg; 1813 1814 return 0; 1815 1816 err_ringcfg: 1817 k3_ringacc_ring_free(rflow->r_ring); 1818 rflow->r_ring = NULL; 1819 k3_ringacc_ring_free(rflow->fd_ring); 1820 rflow->fd_ring = NULL; 1821 err_ring: 1822 udma_put_rflow(uc); 1823 err_rflow: 1824 udma_put_rchan(uc); 1825 1826 return ret; 1827 } 1828 1829 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ 1830 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1831 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) 1832 1833 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ 1834 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) 1836 1837 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ 1838 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) 1839 1840 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ 1841 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ 1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ 1844 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ 1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1849 1850 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ 1851 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ 1852 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ 1853 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ 1854 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ 1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ 1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ 1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ 1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ 1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) 1860 1861 static int udma_tisci_m2m_channel_config(struct udma_chan *uc) 1862 { 1863 struct udma_dev *ud = uc->ud; 1864 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1865 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1866 struct udma_tchan *tchan = uc->tchan; 1867 struct udma_rchan *rchan = uc->rchan; 1868 u8 burst_size = 0; 1869 int ret; 1870 u8 tpl; 1871 1872 /* Non synchronized - mem to mem type of transfer */ 1873 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1874 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1875 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 1876 1877 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1878 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); 1879 1880 burst_size = ud->match_data->burst_size[tpl]; 1881 } 1882 1883 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1884 req_tx.nav_id = tisci_rm->tisci_dev_id; 1885 req_tx.index = tchan->id; 1886 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1887 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1888 req_tx.txcq_qnum = tc_ring; 1889 req_tx.tx_atype = ud->atype; 1890 if (burst_size) { 1891 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1892 req_tx.tx_burst_size = burst_size; 1893 } 1894 1895 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1896 if (ret) { 1897 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1898 return ret; 1899 } 1900 1901 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 1902 req_rx.nav_id = tisci_rm->tisci_dev_id; 1903 req_rx.index = rchan->id; 1904 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; 1905 req_rx.rxcq_qnum = tc_ring; 1906 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; 1907 req_rx.rx_atype = ud->atype; 1908 if (burst_size) { 1909 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1910 req_rx.rx_burst_size = burst_size; 1911 } 1912 1913 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 1914 if (ret) 1915 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); 1916 1917 return ret; 1918 } 1919 1920 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) 1921 { 1922 struct udma_dev *ud = uc->ud; 1923 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1924 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1925 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1926 struct udma_bchan *bchan = uc->bchan; 1927 u8 burst_size = 0; 1928 int ret; 1929 u8 tpl; 1930 1931 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { 1932 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); 1933 1934 burst_size = ud->match_data->burst_size[tpl]; 1935 } 1936 1937 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; 1938 req_tx.nav_id = tisci_rm->tisci_dev_id; 1939 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; 1940 req_tx.index = bchan->id; 1941 if (burst_size) { 1942 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID; 1943 req_tx.tx_burst_size = burst_size; 1944 } 1945 1946 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1947 if (ret) 1948 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); 1949 1950 return ret; 1951 } 1952 1953 static int udma_tisci_tx_channel_config(struct udma_chan *uc) 1954 { 1955 struct udma_dev *ud = uc->ud; 1956 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 1957 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 1958 struct udma_tchan *tchan = uc->tchan; 1959 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); 1960 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 1961 u32 mode, fetch_size; 1962 int ret; 1963 1964 if (uc->config.pkt_mode) { 1965 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 1966 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 1967 uc->config.psd_size, 0); 1968 } else { 1969 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 1970 fetch_size = sizeof(struct cppi5_desc_hdr_t); 1971 } 1972 1973 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS; 1974 req_tx.nav_id = tisci_rm->tisci_dev_id; 1975 req_tx.index = tchan->id; 1976 req_tx.tx_chan_type = mode; 1977 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 1978 req_tx.tx_fetch_size = fetch_size >> 2; 1979 req_tx.txcq_qnum = tc_ring; 1980 req_tx.tx_atype = uc->config.atype; 1981 if (uc->config.ep_type == PSIL_EP_PDMA_XY && 1982 ud->match_data->flags & UDMA_FLAG_TDTYPE) { 1983 /* wait for peer to complete the teardown for PDMAs */ 1984 req_tx.valid_params |= 1985 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 1986 req_tx.tx_tdtype = 1; 1987 } 1988 1989 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 1990 if (ret) 1991 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 1992 1993 return ret; 1994 } 1995 1996 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) 1997 { 1998 struct udma_dev *ud = uc->ud; 1999 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2000 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2001 struct udma_tchan *tchan = uc->tchan; 2002 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; 2003 int ret; 2004 2005 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; 2006 req_tx.nav_id = tisci_rm->tisci_dev_id; 2007 req_tx.index = tchan->id; 2008 req_tx.tx_supr_tdpkt = uc->config.notdpkt; 2009 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { 2010 /* wait for peer to complete the teardown for PDMAs */ 2011 req_tx.valid_params |= 2012 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; 2013 req_tx.tx_tdtype = 1; 2014 } 2015 2016 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); 2017 if (ret) 2018 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); 2019 2020 return ret; 2021 } 2022 2023 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config 2024 2025 static int udma_tisci_rx_channel_config(struct udma_chan *uc) 2026 { 2027 struct udma_dev *ud = uc->ud; 2028 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2029 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2030 struct udma_rchan *rchan = uc->rchan; 2031 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); 2032 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2033 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2034 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2035 u32 mode, fetch_size; 2036 int ret; 2037 2038 if (uc->config.pkt_mode) { 2039 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; 2040 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, 2041 uc->config.psd_size, 0); 2042 } else { 2043 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; 2044 fetch_size = sizeof(struct cppi5_desc_hdr_t); 2045 } 2046 2047 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS; 2048 req_rx.nav_id = tisci_rm->tisci_dev_id; 2049 req_rx.index = rchan->id; 2050 req_rx.rx_fetch_size = fetch_size >> 2; 2051 req_rx.rxcq_qnum = rx_ring; 2052 req_rx.rx_chan_type = mode; 2053 req_rx.rx_atype = uc->config.atype; 2054 2055 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2056 if (ret) { 2057 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2058 return ret; 2059 } 2060 2061 flow_req.valid_params = 2062 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2063 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2064 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | 2065 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | 2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | 2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | 2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | 2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | 2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | 2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | 2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | 2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | 2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; 2075 2076 flow_req.nav_id = tisci_rm->tisci_dev_id; 2077 flow_req.flow_index = rchan->id; 2078 2079 if (uc->config.needs_epib) 2080 flow_req.rx_einfo_present = 1; 2081 else 2082 flow_req.rx_einfo_present = 0; 2083 if (uc->config.psd_size) 2084 flow_req.rx_psinfo_present = 1; 2085 else 2086 flow_req.rx_psinfo_present = 0; 2087 flow_req.rx_error_handling = 1; 2088 flow_req.rx_dest_qnum = rx_ring; 2089 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; 2090 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; 2091 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; 2092 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; 2093 flow_req.rx_fdq0_sz0_qnum = fd_ring; 2094 flow_req.rx_fdq1_qnum = fd_ring; 2095 flow_req.rx_fdq2_qnum = fd_ring; 2096 flow_req.rx_fdq3_qnum = fd_ring; 2097 2098 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2099 2100 if (ret) 2101 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); 2102 2103 return 0; 2104 } 2105 2106 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc) 2107 { 2108 struct udma_dev *ud = uc->ud; 2109 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2110 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2111 struct udma_rchan *rchan = uc->rchan; 2112 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2113 int ret; 2114 2115 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2116 req_rx.nav_id = tisci_rm->tisci_dev_id; 2117 req_rx.index = rchan->id; 2118 2119 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2120 if (ret) 2121 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); 2122 2123 return ret; 2124 } 2125 2126 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) 2127 { 2128 struct udma_dev *ud = uc->ud; 2129 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 2130 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; 2131 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; 2132 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; 2133 int ret; 2134 2135 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; 2136 req_rx.nav_id = tisci_rm->tisci_dev_id; 2137 req_rx.index = uc->rchan->id; 2138 2139 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); 2140 if (ret) { 2141 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); 2142 return ret; 2143 } 2144 2145 flow_req.valid_params = 2146 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | 2147 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | 2148 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; 2149 2150 flow_req.nav_id = tisci_rm->tisci_dev_id; 2151 flow_req.flow_index = uc->rflow->id; 2152 2153 if (uc->config.needs_epib) 2154 flow_req.rx_einfo_present = 1; 2155 else 2156 flow_req.rx_einfo_present = 0; 2157 if (uc->config.psd_size) 2158 flow_req.rx_psinfo_present = 1; 2159 else 2160 flow_req.rx_psinfo_present = 0; 2161 flow_req.rx_error_handling = 1; 2162 2163 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); 2164 2165 if (ret) 2166 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, 2167 ret); 2168 2169 return ret; 2170 } 2171 2172 static int udma_alloc_chan_resources(struct dma_chan *chan) 2173 { 2174 struct udma_chan *uc = to_udma_chan(chan); 2175 struct udma_dev *ud = to_udma_dev(chan->device); 2176 const struct udma_soc_data *soc_data = ud->soc_data; 2177 struct k3_ring *irq_ring; 2178 u32 irq_udma_idx; 2179 int ret; 2180 2181 uc->dma_dev = ud->dev; 2182 2183 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { 2184 uc->use_dma_pool = true; 2185 /* in case of MEM_TO_MEM we have maximum of two TRs */ 2186 if (uc->config.dir == DMA_MEM_TO_MEM) { 2187 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2188 sizeof(struct cppi5_tr_type15_t), 2); 2189 uc->config.pkt_mode = false; 2190 } 2191 } 2192 2193 if (uc->use_dma_pool) { 2194 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2195 uc->config.hdesc_size, 2196 ud->desc_align, 2197 0); 2198 if (!uc->hdesc_pool) { 2199 dev_err(ud->ddev.dev, 2200 "Descriptor pool allocation failed\n"); 2201 uc->use_dma_pool = false; 2202 ret = -ENOMEM; 2203 goto err_cleanup; 2204 } 2205 } 2206 2207 /* 2208 * Make sure that the completion is in a known state: 2209 * No teardown, the channel is idle 2210 */ 2211 reinit_completion(&uc->teardown_completed); 2212 complete_all(&uc->teardown_completed); 2213 uc->state = UDMA_CHAN_IS_IDLE; 2214 2215 switch (uc->config.dir) { 2216 case DMA_MEM_TO_MEM: 2217 /* Non synchronized - mem to mem type of transfer */ 2218 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2219 uc->id); 2220 2221 ret = udma_get_chan_pair(uc); 2222 if (ret) 2223 goto err_cleanup; 2224 2225 ret = udma_alloc_tx_resources(uc); 2226 if (ret) { 2227 udma_put_rchan(uc); 2228 goto err_cleanup; 2229 } 2230 2231 ret = udma_alloc_rx_resources(uc); 2232 if (ret) { 2233 udma_free_tx_resources(uc); 2234 goto err_cleanup; 2235 } 2236 2237 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2238 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2239 K3_PSIL_DST_THREAD_ID_OFFSET; 2240 2241 irq_ring = uc->tchan->tc_ring; 2242 irq_udma_idx = uc->tchan->id; 2243 2244 ret = udma_tisci_m2m_channel_config(uc); 2245 break; 2246 case DMA_MEM_TO_DEV: 2247 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2248 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2249 uc->id); 2250 2251 ret = udma_alloc_tx_resources(uc); 2252 if (ret) 2253 goto err_cleanup; 2254 2255 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2256 uc->config.dst_thread = uc->config.remote_thread_id; 2257 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2258 2259 irq_ring = uc->tchan->tc_ring; 2260 irq_udma_idx = uc->tchan->id; 2261 2262 ret = udma_tisci_tx_channel_config(uc); 2263 break; 2264 case DMA_DEV_TO_MEM: 2265 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2266 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2267 uc->id); 2268 2269 ret = udma_alloc_rx_resources(uc); 2270 if (ret) 2271 goto err_cleanup; 2272 2273 uc->config.src_thread = uc->config.remote_thread_id; 2274 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2275 K3_PSIL_DST_THREAD_ID_OFFSET; 2276 2277 irq_ring = uc->rflow->r_ring; 2278 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; 2279 2280 ret = udma_tisci_rx_channel_config(uc); 2281 break; 2282 default: 2283 /* Can not happen */ 2284 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2285 __func__, uc->id, uc->config.dir); 2286 ret = -EINVAL; 2287 goto err_cleanup; 2288 2289 } 2290 2291 /* check if the channel configuration was successful */ 2292 if (ret) 2293 goto err_res_free; 2294 2295 if (udma_is_chan_running(uc)) { 2296 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2297 udma_reset_chan(uc, false); 2298 if (udma_is_chan_running(uc)) { 2299 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2300 ret = -EBUSY; 2301 goto err_res_free; 2302 } 2303 } 2304 2305 /* PSI-L pairing */ 2306 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2307 if (ret) { 2308 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2309 uc->config.src_thread, uc->config.dst_thread); 2310 goto err_res_free; 2311 } 2312 2313 uc->psil_paired = true; 2314 2315 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); 2316 if (uc->irq_num_ring <= 0) { 2317 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2318 k3_ringacc_get_ring_id(irq_ring)); 2319 ret = -EINVAL; 2320 goto err_psi_free; 2321 } 2322 2323 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2324 IRQF_TRIGGER_HIGH, uc->name, uc); 2325 if (ret) { 2326 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2327 goto err_irq_free; 2328 } 2329 2330 /* Event from UDMA (TR events) only needed for slave TR mode channels */ 2331 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { 2332 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2333 if (uc->irq_num_udma <= 0) { 2334 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", 2335 irq_udma_idx); 2336 free_irq(uc->irq_num_ring, uc); 2337 ret = -EINVAL; 2338 goto err_irq_free; 2339 } 2340 2341 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2342 uc->name, uc); 2343 if (ret) { 2344 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", 2345 uc->id); 2346 free_irq(uc->irq_num_ring, uc); 2347 goto err_irq_free; 2348 } 2349 } else { 2350 uc->irq_num_udma = 0; 2351 } 2352 2353 udma_reset_rings(uc); 2354 2355 return 0; 2356 2357 err_irq_free: 2358 uc->irq_num_ring = 0; 2359 uc->irq_num_udma = 0; 2360 err_psi_free: 2361 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2362 uc->psil_paired = false; 2363 err_res_free: 2364 udma_free_tx_resources(uc); 2365 udma_free_rx_resources(uc); 2366 err_cleanup: 2367 udma_reset_uchan(uc); 2368 2369 if (uc->use_dma_pool) { 2370 dma_pool_destroy(uc->hdesc_pool); 2371 uc->use_dma_pool = false; 2372 } 2373 2374 return ret; 2375 } 2376 2377 static int bcdma_alloc_chan_resources(struct dma_chan *chan) 2378 { 2379 struct udma_chan *uc = to_udma_chan(chan); 2380 struct udma_dev *ud = to_udma_dev(chan->device); 2381 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2382 u32 irq_udma_idx, irq_ring_idx; 2383 int ret; 2384 2385 /* Only TR mode is supported */ 2386 uc->config.pkt_mode = false; 2387 2388 /* 2389 * Make sure that the completion is in a known state: 2390 * No teardown, the channel is idle 2391 */ 2392 reinit_completion(&uc->teardown_completed); 2393 complete_all(&uc->teardown_completed); 2394 uc->state = UDMA_CHAN_IS_IDLE; 2395 2396 switch (uc->config.dir) { 2397 case DMA_MEM_TO_MEM: 2398 /* Non synchronized - mem to mem type of transfer */ 2399 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, 2400 uc->id); 2401 2402 ret = bcdma_alloc_bchan_resources(uc); 2403 if (ret) 2404 return ret; 2405 2406 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; 2407 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; 2408 2409 ret = bcdma_tisci_m2m_channel_config(uc); 2410 break; 2411 case DMA_MEM_TO_DEV: 2412 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2413 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2414 uc->id); 2415 2416 ret = udma_alloc_tx_resources(uc); 2417 if (ret) { 2418 uc->config.remote_thread_id = -1; 2419 return ret; 2420 } 2421 2422 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2423 uc->config.dst_thread = uc->config.remote_thread_id; 2424 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2425 2426 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; 2427 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; 2428 2429 ret = bcdma_tisci_tx_channel_config(uc); 2430 break; 2431 case DMA_DEV_TO_MEM: 2432 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2433 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2434 uc->id); 2435 2436 ret = udma_alloc_rx_resources(uc); 2437 if (ret) { 2438 uc->config.remote_thread_id = -1; 2439 return ret; 2440 } 2441 2442 uc->config.src_thread = uc->config.remote_thread_id; 2443 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2444 K3_PSIL_DST_THREAD_ID_OFFSET; 2445 2446 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; 2447 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; 2448 2449 ret = bcdma_tisci_rx_channel_config(uc); 2450 break; 2451 default: 2452 /* Can not happen */ 2453 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2454 __func__, uc->id, uc->config.dir); 2455 return -EINVAL; 2456 } 2457 2458 /* check if the channel configuration was successful */ 2459 if (ret) 2460 goto err_res_free; 2461 2462 if (udma_is_chan_running(uc)) { 2463 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2464 udma_reset_chan(uc, false); 2465 if (udma_is_chan_running(uc)) { 2466 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2467 ret = -EBUSY; 2468 goto err_res_free; 2469 } 2470 } 2471 2472 uc->dma_dev = dmaengine_get_dma_device(chan); 2473 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { 2474 uc->config.hdesc_size = cppi5_trdesc_calc_size( 2475 sizeof(struct cppi5_tr_type15_t), 2); 2476 2477 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, 2478 uc->config.hdesc_size, 2479 ud->desc_align, 2480 0); 2481 if (!uc->hdesc_pool) { 2482 dev_err(ud->ddev.dev, 2483 "Descriptor pool allocation failed\n"); 2484 uc->use_dma_pool = false; 2485 ret = -ENOMEM; 2486 goto err_res_free; 2487 } 2488 2489 uc->use_dma_pool = true; 2490 } else if (uc->config.dir != DMA_MEM_TO_MEM) { 2491 /* PSI-L pairing */ 2492 ret = navss_psil_pair(ud, uc->config.src_thread, 2493 uc->config.dst_thread); 2494 if (ret) { 2495 dev_err(ud->dev, 2496 "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2497 uc->config.src_thread, uc->config.dst_thread); 2498 goto err_res_free; 2499 } 2500 2501 uc->psil_paired = true; 2502 } 2503 2504 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2505 if (uc->irq_num_ring <= 0) { 2506 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2507 irq_ring_idx); 2508 ret = -EINVAL; 2509 goto err_psi_free; 2510 } 2511 2512 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2513 IRQF_TRIGGER_HIGH, uc->name, uc); 2514 if (ret) { 2515 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2516 goto err_irq_free; 2517 } 2518 2519 /* Event from BCDMA (TR events) only needed for slave channels */ 2520 if (is_slave_direction(uc->config.dir)) { 2521 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); 2522 if (uc->irq_num_udma <= 0) { 2523 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", 2524 irq_udma_idx); 2525 free_irq(uc->irq_num_ring, uc); 2526 ret = -EINVAL; 2527 goto err_irq_free; 2528 } 2529 2530 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, 2531 uc->name, uc); 2532 if (ret) { 2533 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", 2534 uc->id); 2535 free_irq(uc->irq_num_ring, uc); 2536 goto err_irq_free; 2537 } 2538 } else { 2539 uc->irq_num_udma = 0; 2540 } 2541 2542 udma_reset_rings(uc); 2543 2544 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2545 udma_check_tx_completion); 2546 return 0; 2547 2548 err_irq_free: 2549 uc->irq_num_ring = 0; 2550 uc->irq_num_udma = 0; 2551 err_psi_free: 2552 if (uc->psil_paired) 2553 navss_psil_unpair(ud, uc->config.src_thread, 2554 uc->config.dst_thread); 2555 uc->psil_paired = false; 2556 err_res_free: 2557 bcdma_free_bchan_resources(uc); 2558 udma_free_tx_resources(uc); 2559 udma_free_rx_resources(uc); 2560 2561 udma_reset_uchan(uc); 2562 2563 if (uc->use_dma_pool) { 2564 dma_pool_destroy(uc->hdesc_pool); 2565 uc->use_dma_pool = false; 2566 } 2567 2568 return ret; 2569 } 2570 2571 static int bcdma_router_config(struct dma_chan *chan) 2572 { 2573 struct k3_event_route_data *router_data = chan->route_data; 2574 struct udma_chan *uc = to_udma_chan(chan); 2575 u32 trigger_event; 2576 2577 if (!uc->bchan) 2578 return -EINVAL; 2579 2580 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) 2581 return -EINVAL; 2582 2583 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; 2584 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; 2585 2586 return router_data->set_event(router_data->priv, trigger_event); 2587 } 2588 2589 static int pktdma_alloc_chan_resources(struct dma_chan *chan) 2590 { 2591 struct udma_chan *uc = to_udma_chan(chan); 2592 struct udma_dev *ud = to_udma_dev(chan->device); 2593 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 2594 u32 irq_ring_idx; 2595 int ret; 2596 2597 /* 2598 * Make sure that the completion is in a known state: 2599 * No teardown, the channel is idle 2600 */ 2601 reinit_completion(&uc->teardown_completed); 2602 complete_all(&uc->teardown_completed); 2603 uc->state = UDMA_CHAN_IS_IDLE; 2604 2605 switch (uc->config.dir) { 2606 case DMA_MEM_TO_DEV: 2607 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ 2608 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, 2609 uc->id); 2610 2611 ret = udma_alloc_tx_resources(uc); 2612 if (ret) { 2613 uc->config.remote_thread_id = -1; 2614 return ret; 2615 } 2616 2617 uc->config.src_thread = ud->psil_base + uc->tchan->id; 2618 uc->config.dst_thread = uc->config.remote_thread_id; 2619 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; 2620 2621 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; 2622 2623 ret = pktdma_tisci_tx_channel_config(uc); 2624 break; 2625 case DMA_DEV_TO_MEM: 2626 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ 2627 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, 2628 uc->id); 2629 2630 ret = udma_alloc_rx_resources(uc); 2631 if (ret) { 2632 uc->config.remote_thread_id = -1; 2633 return ret; 2634 } 2635 2636 uc->config.src_thread = uc->config.remote_thread_id; 2637 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 2638 K3_PSIL_DST_THREAD_ID_OFFSET; 2639 2640 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; 2641 2642 ret = pktdma_tisci_rx_channel_config(uc); 2643 break; 2644 default: 2645 /* Can not happen */ 2646 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", 2647 __func__, uc->id, uc->config.dir); 2648 return -EINVAL; 2649 } 2650 2651 /* check if the channel configuration was successful */ 2652 if (ret) 2653 goto err_res_free; 2654 2655 if (udma_is_chan_running(uc)) { 2656 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); 2657 udma_reset_chan(uc, false); 2658 if (udma_is_chan_running(uc)) { 2659 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); 2660 ret = -EBUSY; 2661 goto err_res_free; 2662 } 2663 } 2664 2665 uc->dma_dev = dmaengine_get_dma_device(chan); 2666 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, 2667 uc->config.hdesc_size, ud->desc_align, 2668 0); 2669 if (!uc->hdesc_pool) { 2670 dev_err(ud->ddev.dev, 2671 "Descriptor pool allocation failed\n"); 2672 uc->use_dma_pool = false; 2673 ret = -ENOMEM; 2674 goto err_res_free; 2675 } 2676 2677 uc->use_dma_pool = true; 2678 2679 /* PSI-L pairing */ 2680 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); 2681 if (ret) { 2682 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", 2683 uc->config.src_thread, uc->config.dst_thread); 2684 goto err_res_free; 2685 } 2686 2687 uc->psil_paired = true; 2688 2689 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); 2690 if (uc->irq_num_ring <= 0) { 2691 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", 2692 irq_ring_idx); 2693 ret = -EINVAL; 2694 goto err_psi_free; 2695 } 2696 2697 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, 2698 IRQF_TRIGGER_HIGH, uc->name, uc); 2699 if (ret) { 2700 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); 2701 goto err_irq_free; 2702 } 2703 2704 uc->irq_num_udma = 0; 2705 2706 udma_reset_rings(uc); 2707 2708 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, 2709 udma_check_tx_completion); 2710 2711 if (uc->tchan) 2712 dev_dbg(ud->dev, 2713 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", 2714 uc->id, uc->tchan->id, uc->tchan->tflow_id, 2715 uc->config.remote_thread_id); 2716 else if (uc->rchan) 2717 dev_dbg(ud->dev, 2718 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", 2719 uc->id, uc->rchan->id, uc->rflow->id, 2720 uc->config.remote_thread_id); 2721 return 0; 2722 2723 err_irq_free: 2724 uc->irq_num_ring = 0; 2725 err_psi_free: 2726 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); 2727 uc->psil_paired = false; 2728 err_res_free: 2729 udma_free_tx_resources(uc); 2730 udma_free_rx_resources(uc); 2731 2732 udma_reset_uchan(uc); 2733 2734 dma_pool_destroy(uc->hdesc_pool); 2735 uc->use_dma_pool = false; 2736 2737 return ret; 2738 } 2739 2740 static int udma_slave_config(struct dma_chan *chan, 2741 struct dma_slave_config *cfg) 2742 { 2743 struct udma_chan *uc = to_udma_chan(chan); 2744 2745 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); 2746 2747 return 0; 2748 } 2749 2750 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, 2751 size_t tr_size, int tr_count, 2752 enum dma_transfer_direction dir) 2753 { 2754 struct udma_hwdesc *hwdesc; 2755 struct cppi5_desc_hdr_t *tr_desc; 2756 struct udma_desc *d; 2757 u32 reload_count = 0; 2758 u32 ring_id; 2759 2760 switch (tr_size) { 2761 case 16: 2762 case 32: 2763 case 64: 2764 case 128: 2765 break; 2766 default: 2767 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); 2768 return NULL; 2769 } 2770 2771 /* We have only one descriptor containing multiple TRs */ 2772 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); 2773 if (!d) 2774 return NULL; 2775 2776 d->sglen = tr_count; 2777 2778 d->hwdesc_count = 1; 2779 hwdesc = &d->hwdesc[0]; 2780 2781 /* Allocate memory for DMA ring descriptor */ 2782 if (uc->use_dma_pool) { 2783 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 2784 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 2785 GFP_NOWAIT, 2786 &hwdesc->cppi5_desc_paddr); 2787 } else { 2788 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 2789 tr_count); 2790 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 2791 uc->ud->desc_align); 2792 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, 2793 hwdesc->cppi5_desc_size, 2794 &hwdesc->cppi5_desc_paddr, 2795 GFP_NOWAIT); 2796 } 2797 2798 if (!hwdesc->cppi5_desc_vaddr) { 2799 kfree(d); 2800 return NULL; 2801 } 2802 2803 /* Start of the TR req records */ 2804 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 2805 /* Start address of the TR response array */ 2806 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; 2807 2808 tr_desc = hwdesc->cppi5_desc_vaddr; 2809 2810 if (uc->cyclic) 2811 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; 2812 2813 if (dir == DMA_DEV_TO_MEM) 2814 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 2815 else 2816 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 2817 2818 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); 2819 cppi5_desc_set_pktids(tr_desc, uc->id, 2820 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 2821 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); 2822 2823 return d; 2824 } 2825 2826 /** 2827 * udma_get_tr_counters - calculate TR counters for a given length 2828 * @len: Length of the trasnfer 2829 * @align_to: Preferred alignment 2830 * @tr0_cnt0: First TR icnt0 2831 * @tr0_cnt1: First TR icnt1 2832 * @tr1_cnt0: Second (if used) TR icnt0 2833 * 2834 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated 2835 * For len >= SZ_64K two TRs are used in a simple way: 2836 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1) 2837 * Second TR: the remaining length (tr1_cnt0) 2838 * 2839 * Returns the number of TRs the length needs (1 or 2) 2840 * -EINVAL if the length can not be supported 2841 */ 2842 static int udma_get_tr_counters(size_t len, unsigned long align_to, 2843 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0) 2844 { 2845 if (len < SZ_64K) { 2846 *tr0_cnt0 = len; 2847 *tr0_cnt1 = 1; 2848 2849 return 1; 2850 } 2851 2852 if (align_to > 3) 2853 align_to = 3; 2854 2855 realign: 2856 *tr0_cnt0 = SZ_64K - BIT(align_to); 2857 if (len / *tr0_cnt0 >= SZ_64K) { 2858 if (align_to) { 2859 align_to--; 2860 goto realign; 2861 } 2862 return -EINVAL; 2863 } 2864 2865 *tr0_cnt1 = len / *tr0_cnt0; 2866 *tr1_cnt0 = len % *tr0_cnt0; 2867 2868 return 2; 2869 } 2870 2871 static struct udma_desc * 2872 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, 2873 unsigned int sglen, enum dma_transfer_direction dir, 2874 unsigned long tx_flags, void *context) 2875 { 2876 struct scatterlist *sgent; 2877 struct udma_desc *d; 2878 struct cppi5_tr_type1_t *tr_req = NULL; 2879 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 2880 unsigned int i; 2881 size_t tr_size; 2882 int num_tr = 0; 2883 int tr_idx = 0; 2884 u64 asel; 2885 2886 /* estimate the number of TRs we will need */ 2887 for_each_sg(sgl, sgent, sglen, i) { 2888 if (sg_dma_len(sgent) < SZ_64K) 2889 num_tr++; 2890 else 2891 num_tr += 2; 2892 } 2893 2894 /* Now allocate and setup the descriptor. */ 2895 tr_size = sizeof(struct cppi5_tr_type1_t); 2896 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 2897 if (!d) 2898 return NULL; 2899 2900 d->sglen = sglen; 2901 2902 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 2903 asel = 0; 2904 else 2905 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 2906 2907 tr_req = d->hwdesc[0].tr_req_base; 2908 for_each_sg(sgl, sgent, sglen, i) { 2909 dma_addr_t sg_addr = sg_dma_address(sgent); 2910 2911 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr), 2912 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0); 2913 if (num_tr < 0) { 2914 dev_err(uc->ud->dev, "size %u is not supported\n", 2915 sg_dma_len(sgent)); 2916 udma_free_hwdesc(uc, d); 2917 kfree(d); 2918 return NULL; 2919 } 2920 2921 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 2922 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2923 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 2924 2925 sg_addr |= asel; 2926 tr_req[tr_idx].addr = sg_addr; 2927 tr_req[tr_idx].icnt0 = tr0_cnt0; 2928 tr_req[tr_idx].icnt1 = tr0_cnt1; 2929 tr_req[tr_idx].dim1 = tr0_cnt0; 2930 tr_idx++; 2931 2932 if (num_tr == 2) { 2933 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 2934 false, false, 2935 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 2936 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 2937 CPPI5_TR_CSF_SUPR_EVT); 2938 2939 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0; 2940 tr_req[tr_idx].icnt0 = tr1_cnt0; 2941 tr_req[tr_idx].icnt1 = 1; 2942 tr_req[tr_idx].dim1 = tr1_cnt0; 2943 tr_idx++; 2944 } 2945 2946 d->residue += sg_dma_len(sgent); 2947 } 2948 2949 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 2950 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 2951 2952 return d; 2953 } 2954 2955 static struct udma_desc * 2956 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl, 2957 unsigned int sglen, 2958 enum dma_transfer_direction dir, 2959 unsigned long tx_flags, void *context) 2960 { 2961 struct scatterlist *sgent; 2962 struct cppi5_tr_type15_t *tr_req = NULL; 2963 enum dma_slave_buswidth dev_width; 2964 u16 tr_cnt0, tr_cnt1; 2965 dma_addr_t dev_addr; 2966 struct udma_desc *d; 2967 unsigned int i; 2968 size_t tr_size, sg_len; 2969 int num_tr = 0; 2970 int tr_idx = 0; 2971 u32 burst, trigger_size, port_window; 2972 u64 asel; 2973 2974 if (dir == DMA_DEV_TO_MEM) { 2975 dev_addr = uc->cfg.src_addr; 2976 dev_width = uc->cfg.src_addr_width; 2977 burst = uc->cfg.src_maxburst; 2978 port_window = uc->cfg.src_port_window_size; 2979 } else if (dir == DMA_MEM_TO_DEV) { 2980 dev_addr = uc->cfg.dst_addr; 2981 dev_width = uc->cfg.dst_addr_width; 2982 burst = uc->cfg.dst_maxburst; 2983 port_window = uc->cfg.dst_port_window_size; 2984 } else { 2985 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 2986 return NULL; 2987 } 2988 2989 if (!burst) 2990 burst = 1; 2991 2992 if (port_window) { 2993 if (port_window != burst) { 2994 dev_err(uc->ud->dev, 2995 "The burst must be equal to port_window\n"); 2996 return NULL; 2997 } 2998 2999 tr_cnt0 = dev_width * port_window; 3000 tr_cnt1 = 1; 3001 } else { 3002 tr_cnt0 = dev_width; 3003 tr_cnt1 = burst; 3004 } 3005 trigger_size = tr_cnt0 * tr_cnt1; 3006 3007 /* estimate the number of TRs we will need */ 3008 for_each_sg(sgl, sgent, sglen, i) { 3009 sg_len = sg_dma_len(sgent); 3010 3011 if (sg_len % trigger_size) { 3012 dev_err(uc->ud->dev, 3013 "Not aligned SG entry (%zu for %u)\n", sg_len, 3014 trigger_size); 3015 return NULL; 3016 } 3017 3018 if (sg_len / trigger_size < SZ_64K) 3019 num_tr++; 3020 else 3021 num_tr += 2; 3022 } 3023 3024 /* Now allocate and setup the descriptor. */ 3025 tr_size = sizeof(struct cppi5_tr_type15_t); 3026 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir); 3027 if (!d) 3028 return NULL; 3029 3030 d->sglen = sglen; 3031 3032 if (uc->ud->match_data->type == DMA_TYPE_UDMA) { 3033 asel = 0; 3034 } else { 3035 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3036 dev_addr |= asel; 3037 } 3038 3039 tr_req = d->hwdesc[0].tr_req_base; 3040 for_each_sg(sgl, sgent, sglen, i) { 3041 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2; 3042 dma_addr_t sg_addr = sg_dma_address(sgent); 3043 3044 sg_len = sg_dma_len(sgent); 3045 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0, 3046 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2); 3047 if (num_tr < 0) { 3048 dev_err(uc->ud->dev, "size %zu is not supported\n", 3049 sg_len); 3050 udma_free_hwdesc(uc, d); 3051 kfree(d); 3052 return NULL; 3053 } 3054 3055 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false, 3056 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3057 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); 3058 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3059 uc->config.tr_trigger_type, 3060 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0); 3061 3062 sg_addr |= asel; 3063 if (dir == DMA_DEV_TO_MEM) { 3064 tr_req[tr_idx].addr = dev_addr; 3065 tr_req[tr_idx].icnt0 = tr_cnt0; 3066 tr_req[tr_idx].icnt1 = tr_cnt1; 3067 tr_req[tr_idx].icnt2 = tr0_cnt2; 3068 tr_req[tr_idx].icnt3 = tr0_cnt3; 3069 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3070 3071 tr_req[tr_idx].daddr = sg_addr; 3072 tr_req[tr_idx].dicnt0 = tr_cnt0; 3073 tr_req[tr_idx].dicnt1 = tr_cnt1; 3074 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3075 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3076 tr_req[tr_idx].ddim1 = tr_cnt0; 3077 tr_req[tr_idx].ddim2 = trigger_size; 3078 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2; 3079 } else { 3080 tr_req[tr_idx].addr = sg_addr; 3081 tr_req[tr_idx].icnt0 = tr_cnt0; 3082 tr_req[tr_idx].icnt1 = tr_cnt1; 3083 tr_req[tr_idx].icnt2 = tr0_cnt2; 3084 tr_req[tr_idx].icnt3 = tr0_cnt3; 3085 tr_req[tr_idx].dim1 = tr_cnt0; 3086 tr_req[tr_idx].dim2 = trigger_size; 3087 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2; 3088 3089 tr_req[tr_idx].daddr = dev_addr; 3090 tr_req[tr_idx].dicnt0 = tr_cnt0; 3091 tr_req[tr_idx].dicnt1 = tr_cnt1; 3092 tr_req[tr_idx].dicnt2 = tr0_cnt2; 3093 tr_req[tr_idx].dicnt3 = tr0_cnt3; 3094 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3095 } 3096 3097 tr_idx++; 3098 3099 if (num_tr == 2) { 3100 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, 3101 false, true, 3102 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3103 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3104 CPPI5_TR_CSF_SUPR_EVT); 3105 cppi5_tr_set_trigger(&tr_req[tr_idx].flags, 3106 uc->config.tr_trigger_type, 3107 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 3108 0, 0); 3109 3110 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3; 3111 if (dir == DMA_DEV_TO_MEM) { 3112 tr_req[tr_idx].addr = dev_addr; 3113 tr_req[tr_idx].icnt0 = tr_cnt0; 3114 tr_req[tr_idx].icnt1 = tr_cnt1; 3115 tr_req[tr_idx].icnt2 = tr1_cnt2; 3116 tr_req[tr_idx].icnt3 = 1; 3117 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; 3118 3119 tr_req[tr_idx].daddr = sg_addr; 3120 tr_req[tr_idx].dicnt0 = tr_cnt0; 3121 tr_req[tr_idx].dicnt1 = tr_cnt1; 3122 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3123 tr_req[tr_idx].dicnt3 = 1; 3124 tr_req[tr_idx].ddim1 = tr_cnt0; 3125 tr_req[tr_idx].ddim2 = trigger_size; 3126 } else { 3127 tr_req[tr_idx].addr = sg_addr; 3128 tr_req[tr_idx].icnt0 = tr_cnt0; 3129 tr_req[tr_idx].icnt1 = tr_cnt1; 3130 tr_req[tr_idx].icnt2 = tr1_cnt2; 3131 tr_req[tr_idx].icnt3 = 1; 3132 tr_req[tr_idx].dim1 = tr_cnt0; 3133 tr_req[tr_idx].dim2 = trigger_size; 3134 3135 tr_req[tr_idx].daddr = dev_addr; 3136 tr_req[tr_idx].dicnt0 = tr_cnt0; 3137 tr_req[tr_idx].dicnt1 = tr_cnt1; 3138 tr_req[tr_idx].dicnt2 = tr1_cnt2; 3139 tr_req[tr_idx].dicnt3 = 1; 3140 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; 3141 } 3142 tr_idx++; 3143 } 3144 3145 d->residue += sg_len; 3146 } 3147 3148 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, 3149 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3150 3151 return d; 3152 } 3153 3154 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, 3155 enum dma_slave_buswidth dev_width, 3156 u16 elcnt) 3157 { 3158 if (uc->config.ep_type != PSIL_EP_PDMA_XY) 3159 return 0; 3160 3161 /* Bus width translates to the element size (ES) */ 3162 switch (dev_width) { 3163 case DMA_SLAVE_BUSWIDTH_1_BYTE: 3164 d->static_tr.elsize = 0; 3165 break; 3166 case DMA_SLAVE_BUSWIDTH_2_BYTES: 3167 d->static_tr.elsize = 1; 3168 break; 3169 case DMA_SLAVE_BUSWIDTH_3_BYTES: 3170 d->static_tr.elsize = 2; 3171 break; 3172 case DMA_SLAVE_BUSWIDTH_4_BYTES: 3173 d->static_tr.elsize = 3; 3174 break; 3175 case DMA_SLAVE_BUSWIDTH_8_BYTES: 3176 d->static_tr.elsize = 4; 3177 break; 3178 default: /* not reached */ 3179 return -EINVAL; 3180 } 3181 3182 d->static_tr.elcnt = elcnt; 3183 3184 /* 3185 * PDMA must to close the packet when the channel is in packet mode. 3186 * For TR mode when the channel is not cyclic we also need PDMA to close 3187 * the packet otherwise the transfer will stall because PDMA holds on 3188 * the data it has received from the peripheral. 3189 */ 3190 if (uc->config.pkt_mode || !uc->cyclic) { 3191 unsigned int div = dev_width * elcnt; 3192 3193 if (uc->cyclic) 3194 d->static_tr.bstcnt = d->residue / d->sglen / div; 3195 else 3196 d->static_tr.bstcnt = d->residue / div; 3197 3198 if (uc->config.dir == DMA_DEV_TO_MEM && 3199 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) 3200 return -EINVAL; 3201 } else { 3202 d->static_tr.bstcnt = 0; 3203 } 3204 3205 return 0; 3206 } 3207 3208 static struct udma_desc * 3209 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, 3210 unsigned int sglen, enum dma_transfer_direction dir, 3211 unsigned long tx_flags, void *context) 3212 { 3213 struct scatterlist *sgent; 3214 struct cppi5_host_desc_t *h_desc = NULL; 3215 struct udma_desc *d; 3216 u32 ring_id; 3217 unsigned int i; 3218 u64 asel; 3219 3220 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT); 3221 if (!d) 3222 return NULL; 3223 3224 d->sglen = sglen; 3225 d->hwdesc_count = sglen; 3226 3227 if (dir == DMA_DEV_TO_MEM) 3228 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3229 else 3230 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3231 3232 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3233 asel = 0; 3234 else 3235 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3236 3237 for_each_sg(sgl, sgent, sglen, i) { 3238 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3239 dma_addr_t sg_addr = sg_dma_address(sgent); 3240 struct cppi5_host_desc_t *desc; 3241 size_t sg_len = sg_dma_len(sgent); 3242 3243 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3244 GFP_NOWAIT, 3245 &hwdesc->cppi5_desc_paddr); 3246 if (!hwdesc->cppi5_desc_vaddr) { 3247 dev_err(uc->ud->dev, 3248 "descriptor%d allocation failed\n", i); 3249 3250 udma_free_hwdesc(uc, d); 3251 kfree(d); 3252 return NULL; 3253 } 3254 3255 d->residue += sg_len; 3256 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3257 desc = hwdesc->cppi5_desc_vaddr; 3258 3259 if (i == 0) { 3260 cppi5_hdesc_init(desc, 0, 0); 3261 /* Flow and Packed ID */ 3262 cppi5_desc_set_pktids(&desc->hdr, uc->id, 3263 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3264 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); 3265 } else { 3266 cppi5_hdesc_reset_hbdesc(desc); 3267 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); 3268 } 3269 3270 /* attach the sg buffer to the descriptor */ 3271 sg_addr |= asel; 3272 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); 3273 3274 /* Attach link as host buffer descriptor */ 3275 if (h_desc) 3276 cppi5_hdesc_link_hbdesc(h_desc, 3277 hwdesc->cppi5_desc_paddr | asel); 3278 3279 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || 3280 dir == DMA_MEM_TO_DEV) 3281 h_desc = desc; 3282 } 3283 3284 if (d->residue >= SZ_4M) { 3285 dev_err(uc->ud->dev, 3286 "%s: Transfer size %u is over the supported 4M range\n", 3287 __func__, d->residue); 3288 udma_free_hwdesc(uc, d); 3289 kfree(d); 3290 return NULL; 3291 } 3292 3293 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3294 cppi5_hdesc_set_pktlen(h_desc, d->residue); 3295 3296 return d; 3297 } 3298 3299 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, 3300 void *data, size_t len) 3301 { 3302 struct udma_desc *d = to_udma_desc(desc); 3303 struct udma_chan *uc = to_udma_chan(desc->chan); 3304 struct cppi5_host_desc_t *h_desc; 3305 u32 psd_size = len; 3306 u32 flags = 0; 3307 3308 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3309 return -ENOTSUPP; 3310 3311 if (!data || len > uc->config.metadata_size) 3312 return -EINVAL; 3313 3314 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3315 return -EINVAL; 3316 3317 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3318 if (d->dir == DMA_MEM_TO_DEV) 3319 memcpy(h_desc->epib, data, len); 3320 3321 if (uc->config.needs_epib) 3322 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3323 3324 d->metadata = data; 3325 d->metadata_size = len; 3326 if (uc->config.needs_epib) 3327 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3328 3329 cppi5_hdesc_update_flags(h_desc, flags); 3330 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3331 3332 return 0; 3333 } 3334 3335 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, 3336 size_t *payload_len, size_t *max_len) 3337 { 3338 struct udma_desc *d = to_udma_desc(desc); 3339 struct udma_chan *uc = to_udma_chan(desc->chan); 3340 struct cppi5_host_desc_t *h_desc; 3341 3342 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3343 return ERR_PTR(-ENOTSUPP); 3344 3345 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3346 3347 *max_len = uc->config.metadata_size; 3348 3349 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? 3350 CPPI5_INFO0_HDESC_EPIB_SIZE : 0; 3351 *payload_len += cppi5_hdesc_get_psdata_size(h_desc); 3352 3353 return h_desc->epib; 3354 } 3355 3356 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, 3357 size_t payload_len) 3358 { 3359 struct udma_desc *d = to_udma_desc(desc); 3360 struct udma_chan *uc = to_udma_chan(desc->chan); 3361 struct cppi5_host_desc_t *h_desc; 3362 u32 psd_size = payload_len; 3363 u32 flags = 0; 3364 3365 if (!uc->config.pkt_mode || !uc->config.metadata_size) 3366 return -ENOTSUPP; 3367 3368 if (payload_len > uc->config.metadata_size) 3369 return -EINVAL; 3370 3371 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) 3372 return -EINVAL; 3373 3374 h_desc = d->hwdesc[0].cppi5_desc_vaddr; 3375 3376 if (uc->config.needs_epib) { 3377 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; 3378 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; 3379 } 3380 3381 cppi5_hdesc_update_flags(h_desc, flags); 3382 cppi5_hdesc_update_psdata_size(h_desc, psd_size); 3383 3384 return 0; 3385 } 3386 3387 static struct dma_descriptor_metadata_ops metadata_ops = { 3388 .attach = udma_attach_metadata, 3389 .get_ptr = udma_get_metadata_ptr, 3390 .set_len = udma_set_metadata_len, 3391 }; 3392 3393 static struct dma_async_tx_descriptor * 3394 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 3395 unsigned int sglen, enum dma_transfer_direction dir, 3396 unsigned long tx_flags, void *context) 3397 { 3398 struct udma_chan *uc = to_udma_chan(chan); 3399 enum dma_slave_buswidth dev_width; 3400 struct udma_desc *d; 3401 u32 burst; 3402 3403 if (dir != uc->config.dir && 3404 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { 3405 dev_err(chan->device->dev, 3406 "%s: chan%d is for %s, not supporting %s\n", 3407 __func__, uc->id, 3408 dmaengine_get_direction_text(uc->config.dir), 3409 dmaengine_get_direction_text(dir)); 3410 return NULL; 3411 } 3412 3413 if (dir == DMA_DEV_TO_MEM) { 3414 dev_width = uc->cfg.src_addr_width; 3415 burst = uc->cfg.src_maxburst; 3416 } else if (dir == DMA_MEM_TO_DEV) { 3417 dev_width = uc->cfg.dst_addr_width; 3418 burst = uc->cfg.dst_maxburst; 3419 } else { 3420 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 3421 return NULL; 3422 } 3423 3424 if (!burst) 3425 burst = 1; 3426 3427 uc->config.tx_flags = tx_flags; 3428 3429 if (uc->config.pkt_mode) 3430 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, 3431 context); 3432 else if (is_slave_direction(uc->config.dir)) 3433 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, 3434 context); 3435 else 3436 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir, 3437 tx_flags, context); 3438 3439 if (!d) 3440 return NULL; 3441 3442 d->dir = dir; 3443 d->desc_idx = 0; 3444 d->tr_idx = 0; 3445 3446 /* static TR for remote PDMA */ 3447 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3448 dev_err(uc->ud->dev, 3449 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3450 __func__, d->static_tr.bstcnt); 3451 3452 udma_free_hwdesc(uc, d); 3453 kfree(d); 3454 return NULL; 3455 } 3456 3457 if (uc->config.metadata_size) 3458 d->vd.tx.metadata_ops = &metadata_ops; 3459 3460 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3461 } 3462 3463 static struct udma_desc * 3464 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, 3465 size_t buf_len, size_t period_len, 3466 enum dma_transfer_direction dir, unsigned long flags) 3467 { 3468 struct udma_desc *d; 3469 size_t tr_size, period_addr; 3470 struct cppi5_tr_type1_t *tr_req; 3471 unsigned int periods = buf_len / period_len; 3472 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3473 unsigned int i; 3474 int num_tr; 3475 3476 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, 3477 &tr0_cnt1, &tr1_cnt0); 3478 if (num_tr < 0) { 3479 dev_err(uc->ud->dev, "size %zu is not supported\n", 3480 period_len); 3481 return NULL; 3482 } 3483 3484 /* Now allocate and setup the descriptor. */ 3485 tr_size = sizeof(struct cppi5_tr_type1_t); 3486 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir); 3487 if (!d) 3488 return NULL; 3489 3490 tr_req = d->hwdesc[0].tr_req_base; 3491 if (uc->ud->match_data->type == DMA_TYPE_UDMA) 3492 period_addr = buf_addr; 3493 else 3494 period_addr = buf_addr | 3495 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); 3496 3497 for (i = 0; i < periods; i++) { 3498 int tr_idx = i * num_tr; 3499 3500 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, 3501 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3502 3503 tr_req[tr_idx].addr = period_addr; 3504 tr_req[tr_idx].icnt0 = tr0_cnt0; 3505 tr_req[tr_idx].icnt1 = tr0_cnt1; 3506 tr_req[tr_idx].dim1 = tr0_cnt0; 3507 3508 if (num_tr == 2) { 3509 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3510 CPPI5_TR_CSF_SUPR_EVT); 3511 tr_idx++; 3512 3513 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, 3514 false, false, 3515 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3516 3517 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0; 3518 tr_req[tr_idx].icnt0 = tr1_cnt0; 3519 tr_req[tr_idx].icnt1 = 1; 3520 tr_req[tr_idx].dim1 = tr1_cnt0; 3521 } 3522 3523 if (!(flags & DMA_PREP_INTERRUPT)) 3524 cppi5_tr_csf_set(&tr_req[tr_idx].flags, 3525 CPPI5_TR_CSF_SUPR_EVT); 3526 3527 period_addr += period_len; 3528 } 3529 3530 return d; 3531 } 3532 3533 static struct udma_desc * 3534 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, 3535 size_t buf_len, size_t period_len, 3536 enum dma_transfer_direction dir, unsigned long flags) 3537 { 3538 struct udma_desc *d; 3539 u32 ring_id; 3540 int i; 3541 int periods = buf_len / period_len; 3542 3543 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) 3544 return NULL; 3545 3546 if (period_len >= SZ_4M) 3547 return NULL; 3548 3549 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT); 3550 if (!d) 3551 return NULL; 3552 3553 d->hwdesc_count = periods; 3554 3555 /* TODO: re-check this... */ 3556 if (dir == DMA_DEV_TO_MEM) 3557 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); 3558 else 3559 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); 3560 3561 if (uc->ud->match_data->type != DMA_TYPE_UDMA) 3562 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; 3563 3564 for (i = 0; i < periods; i++) { 3565 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; 3566 dma_addr_t period_addr = buf_addr + (period_len * i); 3567 struct cppi5_host_desc_t *h_desc; 3568 3569 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, 3570 GFP_NOWAIT, 3571 &hwdesc->cppi5_desc_paddr); 3572 if (!hwdesc->cppi5_desc_vaddr) { 3573 dev_err(uc->ud->dev, 3574 "descriptor%d allocation failed\n", i); 3575 3576 udma_free_hwdesc(uc, d); 3577 kfree(d); 3578 return NULL; 3579 } 3580 3581 hwdesc->cppi5_desc_size = uc->config.hdesc_size; 3582 h_desc = hwdesc->cppi5_desc_vaddr; 3583 3584 cppi5_hdesc_init(h_desc, 0, 0); 3585 cppi5_hdesc_set_pktlen(h_desc, period_len); 3586 3587 /* Flow and Packed ID */ 3588 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, 3589 CPPI5_INFO1_DESC_FLOWID_DEFAULT); 3590 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); 3591 3592 /* attach each period to a new descriptor */ 3593 cppi5_hdesc_attach_buf(h_desc, 3594 period_addr, period_len, 3595 period_addr, period_len); 3596 } 3597 3598 return d; 3599 } 3600 3601 static struct dma_async_tx_descriptor * 3602 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 3603 size_t period_len, enum dma_transfer_direction dir, 3604 unsigned long flags) 3605 { 3606 struct udma_chan *uc = to_udma_chan(chan); 3607 enum dma_slave_buswidth dev_width; 3608 struct udma_desc *d; 3609 u32 burst; 3610 3611 if (dir != uc->config.dir) { 3612 dev_err(chan->device->dev, 3613 "%s: chan%d is for %s, not supporting %s\n", 3614 __func__, uc->id, 3615 dmaengine_get_direction_text(uc->config.dir), 3616 dmaengine_get_direction_text(dir)); 3617 return NULL; 3618 } 3619 3620 uc->cyclic = true; 3621 3622 if (dir == DMA_DEV_TO_MEM) { 3623 dev_width = uc->cfg.src_addr_width; 3624 burst = uc->cfg.src_maxburst; 3625 } else if (dir == DMA_MEM_TO_DEV) { 3626 dev_width = uc->cfg.dst_addr_width; 3627 burst = uc->cfg.dst_maxburst; 3628 } else { 3629 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); 3630 return NULL; 3631 } 3632 3633 if (!burst) 3634 burst = 1; 3635 3636 if (uc->config.pkt_mode) 3637 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, 3638 dir, flags); 3639 else 3640 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, 3641 dir, flags); 3642 3643 if (!d) 3644 return NULL; 3645 3646 d->sglen = buf_len / period_len; 3647 3648 d->dir = dir; 3649 d->residue = buf_len; 3650 3651 /* static TR for remote PDMA */ 3652 if (udma_configure_statictr(uc, d, dev_width, burst)) { 3653 dev_err(uc->ud->dev, 3654 "%s: StaticTR Z is limited to maximum 4095 (%u)\n", 3655 __func__, d->static_tr.bstcnt); 3656 3657 udma_free_hwdesc(uc, d); 3658 kfree(d); 3659 return NULL; 3660 } 3661 3662 if (uc->config.metadata_size) 3663 d->vd.tx.metadata_ops = &metadata_ops; 3664 3665 return vchan_tx_prep(&uc->vc, &d->vd, flags); 3666 } 3667 3668 static struct dma_async_tx_descriptor * 3669 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 3670 size_t len, unsigned long tx_flags) 3671 { 3672 struct udma_chan *uc = to_udma_chan(chan); 3673 struct udma_desc *d; 3674 struct cppi5_tr_type15_t *tr_req; 3675 int num_tr; 3676 size_t tr_size = sizeof(struct cppi5_tr_type15_t); 3677 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; 3678 3679 if (uc->config.dir != DMA_MEM_TO_MEM) { 3680 dev_err(chan->device->dev, 3681 "%s: chan%d is for %s, not supporting %s\n", 3682 __func__, uc->id, 3683 dmaengine_get_direction_text(uc->config.dir), 3684 dmaengine_get_direction_text(DMA_MEM_TO_MEM)); 3685 return NULL; 3686 } 3687 3688 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0, 3689 &tr0_cnt1, &tr1_cnt0); 3690 if (num_tr < 0) { 3691 dev_err(uc->ud->dev, "size %zu is not supported\n", 3692 len); 3693 return NULL; 3694 } 3695 3696 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); 3697 if (!d) 3698 return NULL; 3699 3700 d->dir = DMA_MEM_TO_MEM; 3701 d->desc_idx = 0; 3702 d->tr_idx = 0; 3703 d->residue = len; 3704 3705 if (uc->ud->match_data->type != DMA_TYPE_UDMA) { 3706 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3707 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; 3708 } 3709 3710 tr_req = d->hwdesc[0].tr_req_base; 3711 3712 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, 3713 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3714 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); 3715 3716 tr_req[0].addr = src; 3717 tr_req[0].icnt0 = tr0_cnt0; 3718 tr_req[0].icnt1 = tr0_cnt1; 3719 tr_req[0].icnt2 = 1; 3720 tr_req[0].icnt3 = 1; 3721 tr_req[0].dim1 = tr0_cnt0; 3722 3723 tr_req[0].daddr = dest; 3724 tr_req[0].dicnt0 = tr0_cnt0; 3725 tr_req[0].dicnt1 = tr0_cnt1; 3726 tr_req[0].dicnt2 = 1; 3727 tr_req[0].dicnt3 = 1; 3728 tr_req[0].ddim1 = tr0_cnt0; 3729 3730 if (num_tr == 2) { 3731 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, 3732 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 3733 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); 3734 3735 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; 3736 tr_req[1].icnt0 = tr1_cnt0; 3737 tr_req[1].icnt1 = 1; 3738 tr_req[1].icnt2 = 1; 3739 tr_req[1].icnt3 = 1; 3740 3741 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; 3742 tr_req[1].dicnt0 = tr1_cnt0; 3743 tr_req[1].dicnt1 = 1; 3744 tr_req[1].dicnt2 = 1; 3745 tr_req[1].dicnt3 = 1; 3746 } 3747 3748 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, 3749 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP); 3750 3751 if (uc->config.metadata_size) 3752 d->vd.tx.metadata_ops = &metadata_ops; 3753 3754 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); 3755 } 3756 3757 static void udma_issue_pending(struct dma_chan *chan) 3758 { 3759 struct udma_chan *uc = to_udma_chan(chan); 3760 unsigned long flags; 3761 3762 spin_lock_irqsave(&uc->vc.lock, flags); 3763 3764 /* If we have something pending and no active descriptor, then */ 3765 if (vchan_issue_pending(&uc->vc) && !uc->desc) { 3766 /* 3767 * start a descriptor if the channel is NOT [marked as 3768 * terminating _and_ it is still running (teardown has not 3769 * completed yet)]. 3770 */ 3771 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && 3772 udma_is_chan_running(uc))) 3773 udma_start(uc); 3774 } 3775 3776 spin_unlock_irqrestore(&uc->vc.lock, flags); 3777 } 3778 3779 static enum dma_status udma_tx_status(struct dma_chan *chan, 3780 dma_cookie_t cookie, 3781 struct dma_tx_state *txstate) 3782 { 3783 struct udma_chan *uc = to_udma_chan(chan); 3784 enum dma_status ret; 3785 unsigned long flags; 3786 3787 spin_lock_irqsave(&uc->vc.lock, flags); 3788 3789 ret = dma_cookie_status(chan, cookie, txstate); 3790 3791 if (!udma_is_chan_running(uc)) 3792 ret = DMA_COMPLETE; 3793 3794 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) 3795 ret = DMA_PAUSED; 3796 3797 if (ret == DMA_COMPLETE || !txstate) 3798 goto out; 3799 3800 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { 3801 u32 peer_bcnt = 0; 3802 u32 bcnt = 0; 3803 u32 residue = uc->desc->residue; 3804 u32 delay = 0; 3805 3806 if (uc->desc->dir == DMA_MEM_TO_DEV) { 3807 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG); 3808 3809 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3810 peer_bcnt = udma_tchanrt_read(uc, 3811 UDMA_CHAN_RT_PEER_BCNT_REG); 3812 3813 if (bcnt > peer_bcnt) 3814 delay = bcnt - peer_bcnt; 3815 } 3816 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { 3817 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3818 3819 if (uc->config.ep_type != PSIL_EP_NATIVE) { 3820 peer_bcnt = udma_rchanrt_read(uc, 3821 UDMA_CHAN_RT_PEER_BCNT_REG); 3822 3823 if (peer_bcnt > bcnt) 3824 delay = peer_bcnt - bcnt; 3825 } 3826 } else { 3827 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); 3828 } 3829 3830 if (bcnt && !(bcnt % uc->desc->residue)) 3831 residue = 0; 3832 else 3833 residue -= bcnt % uc->desc->residue; 3834 3835 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { 3836 ret = DMA_COMPLETE; 3837 delay = 0; 3838 } 3839 3840 dma_set_residue(txstate, residue); 3841 dma_set_in_flight_bytes(txstate, delay); 3842 3843 } else { 3844 ret = DMA_COMPLETE; 3845 } 3846 3847 out: 3848 spin_unlock_irqrestore(&uc->vc.lock, flags); 3849 return ret; 3850 } 3851 3852 static int udma_pause(struct dma_chan *chan) 3853 { 3854 struct udma_chan *uc = to_udma_chan(chan); 3855 3856 /* pause the channel */ 3857 switch (uc->config.dir) { 3858 case DMA_DEV_TO_MEM: 3859 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3860 UDMA_PEER_RT_EN_PAUSE, 3861 UDMA_PEER_RT_EN_PAUSE); 3862 break; 3863 case DMA_MEM_TO_DEV: 3864 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3865 UDMA_PEER_RT_EN_PAUSE, 3866 UDMA_PEER_RT_EN_PAUSE); 3867 break; 3868 case DMA_MEM_TO_MEM: 3869 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3870 UDMA_CHAN_RT_CTL_PAUSE, 3871 UDMA_CHAN_RT_CTL_PAUSE); 3872 break; 3873 default: 3874 return -EINVAL; 3875 } 3876 3877 return 0; 3878 } 3879 3880 static int udma_resume(struct dma_chan *chan) 3881 { 3882 struct udma_chan *uc = to_udma_chan(chan); 3883 3884 /* resume the channel */ 3885 switch (uc->config.dir) { 3886 case DMA_DEV_TO_MEM: 3887 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3888 UDMA_PEER_RT_EN_PAUSE, 0); 3889 3890 break; 3891 case DMA_MEM_TO_DEV: 3892 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 3893 UDMA_PEER_RT_EN_PAUSE, 0); 3894 break; 3895 case DMA_MEM_TO_MEM: 3896 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG, 3897 UDMA_CHAN_RT_CTL_PAUSE, 0); 3898 break; 3899 default: 3900 return -EINVAL; 3901 } 3902 3903 return 0; 3904 } 3905 3906 static int udma_terminate_all(struct dma_chan *chan) 3907 { 3908 struct udma_chan *uc = to_udma_chan(chan); 3909 unsigned long flags; 3910 LIST_HEAD(head); 3911 3912 spin_lock_irqsave(&uc->vc.lock, flags); 3913 3914 if (udma_is_chan_running(uc)) 3915 udma_stop(uc); 3916 3917 if (uc->desc) { 3918 uc->terminated_desc = uc->desc; 3919 uc->desc = NULL; 3920 uc->terminated_desc->terminated = true; 3921 cancel_delayed_work(&uc->tx_drain.work); 3922 } 3923 3924 uc->paused = false; 3925 3926 vchan_get_all_descriptors(&uc->vc, &head); 3927 spin_unlock_irqrestore(&uc->vc.lock, flags); 3928 vchan_dma_desc_free_list(&uc->vc, &head); 3929 3930 return 0; 3931 } 3932 3933 static void udma_synchronize(struct dma_chan *chan) 3934 { 3935 struct udma_chan *uc = to_udma_chan(chan); 3936 unsigned long timeout = msecs_to_jiffies(1000); 3937 3938 vchan_synchronize(&uc->vc); 3939 3940 if (uc->state == UDMA_CHAN_IS_TERMINATING) { 3941 timeout = wait_for_completion_timeout(&uc->teardown_completed, 3942 timeout); 3943 if (!timeout) { 3944 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", 3945 uc->id); 3946 udma_dump_chan_stdata(uc); 3947 udma_reset_chan(uc, true); 3948 } 3949 } 3950 3951 udma_reset_chan(uc, false); 3952 if (udma_is_chan_running(uc)) 3953 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); 3954 3955 cancel_delayed_work_sync(&uc->tx_drain.work); 3956 udma_reset_rings(uc); 3957 } 3958 3959 static void udma_desc_pre_callback(struct virt_dma_chan *vc, 3960 struct virt_dma_desc *vd, 3961 struct dmaengine_result *result) 3962 { 3963 struct udma_chan *uc = to_udma_chan(&vc->chan); 3964 struct udma_desc *d; 3965 3966 if (!vd) 3967 return; 3968 3969 d = to_udma_desc(&vd->tx); 3970 3971 if (d->metadata_size) 3972 udma_fetch_epib(uc, d); 3973 3974 /* Provide residue information for the client */ 3975 if (result) { 3976 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); 3977 3978 if (cppi5_desc_get_type(desc_vaddr) == 3979 CPPI5_INFO0_DESC_TYPE_VAL_HOST) { 3980 result->residue = d->residue - 3981 cppi5_hdesc_get_pktlen(desc_vaddr); 3982 if (result->residue) 3983 result->result = DMA_TRANS_ABORTED; 3984 else 3985 result->result = DMA_TRANS_NOERROR; 3986 } else { 3987 result->residue = 0; 3988 result->result = DMA_TRANS_NOERROR; 3989 } 3990 } 3991 } 3992 3993 /* 3994 * This tasklet handles the completion of a DMA descriptor by 3995 * calling its callback and freeing it. 3996 */ 3997 static void udma_vchan_complete(struct tasklet_struct *t) 3998 { 3999 struct virt_dma_chan *vc = from_tasklet(vc, t, task); 4000 struct virt_dma_desc *vd, *_vd; 4001 struct dmaengine_desc_callback cb; 4002 LIST_HEAD(head); 4003 4004 spin_lock_irq(&vc->lock); 4005 list_splice_tail_init(&vc->desc_completed, &head); 4006 vd = vc->cyclic; 4007 if (vd) { 4008 vc->cyclic = NULL; 4009 dmaengine_desc_get_callback(&vd->tx, &cb); 4010 } else { 4011 memset(&cb, 0, sizeof(cb)); 4012 } 4013 spin_unlock_irq(&vc->lock); 4014 4015 udma_desc_pre_callback(vc, vd, NULL); 4016 dmaengine_desc_callback_invoke(&cb, NULL); 4017 4018 list_for_each_entry_safe(vd, _vd, &head, node) { 4019 struct dmaengine_result result; 4020 4021 dmaengine_desc_get_callback(&vd->tx, &cb); 4022 4023 list_del(&vd->node); 4024 4025 udma_desc_pre_callback(vc, vd, &result); 4026 dmaengine_desc_callback_invoke(&cb, &result); 4027 4028 vchan_vdesc_fini(vd); 4029 } 4030 } 4031 4032 static void udma_free_chan_resources(struct dma_chan *chan) 4033 { 4034 struct udma_chan *uc = to_udma_chan(chan); 4035 struct udma_dev *ud = to_udma_dev(chan->device); 4036 4037 udma_terminate_all(chan); 4038 if (uc->terminated_desc) { 4039 udma_reset_chan(uc, false); 4040 udma_reset_rings(uc); 4041 } 4042 4043 cancel_delayed_work_sync(&uc->tx_drain.work); 4044 4045 if (uc->irq_num_ring > 0) { 4046 free_irq(uc->irq_num_ring, uc); 4047 4048 uc->irq_num_ring = 0; 4049 } 4050 if (uc->irq_num_udma > 0) { 4051 free_irq(uc->irq_num_udma, uc); 4052 4053 uc->irq_num_udma = 0; 4054 } 4055 4056 /* Release PSI-L pairing */ 4057 if (uc->psil_paired) { 4058 navss_psil_unpair(ud, uc->config.src_thread, 4059 uc->config.dst_thread); 4060 uc->psil_paired = false; 4061 } 4062 4063 vchan_free_chan_resources(&uc->vc); 4064 tasklet_kill(&uc->vc.task); 4065 4066 bcdma_free_bchan_resources(uc); 4067 udma_free_tx_resources(uc); 4068 udma_free_rx_resources(uc); 4069 udma_reset_uchan(uc); 4070 4071 if (uc->use_dma_pool) { 4072 dma_pool_destroy(uc->hdesc_pool); 4073 uc->use_dma_pool = false; 4074 } 4075 } 4076 4077 static struct platform_driver udma_driver; 4078 static struct platform_driver bcdma_driver; 4079 static struct platform_driver pktdma_driver; 4080 4081 struct udma_filter_param { 4082 int remote_thread_id; 4083 u32 atype; 4084 u32 asel; 4085 u32 tr_trigger_type; 4086 }; 4087 4088 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) 4089 { 4090 struct udma_chan_config *ucc; 4091 struct psil_endpoint_config *ep_config; 4092 struct udma_filter_param *filter_param; 4093 struct udma_chan *uc; 4094 struct udma_dev *ud; 4095 4096 if (chan->device->dev->driver != &udma_driver.driver && 4097 chan->device->dev->driver != &bcdma_driver.driver && 4098 chan->device->dev->driver != &pktdma_driver.driver) 4099 return false; 4100 4101 uc = to_udma_chan(chan); 4102 ucc = &uc->config; 4103 ud = uc->ud; 4104 filter_param = param; 4105 4106 if (filter_param->atype > 2) { 4107 dev_err(ud->dev, "Invalid channel atype: %u\n", 4108 filter_param->atype); 4109 return false; 4110 } 4111 4112 if (filter_param->asel > 15) { 4113 dev_err(ud->dev, "Invalid channel asel: %u\n", 4114 filter_param->asel); 4115 return false; 4116 } 4117 4118 ucc->remote_thread_id = filter_param->remote_thread_id; 4119 ucc->atype = filter_param->atype; 4120 ucc->asel = filter_param->asel; 4121 ucc->tr_trigger_type = filter_param->tr_trigger_type; 4122 4123 if (ucc->tr_trigger_type) { 4124 ucc->dir = DMA_MEM_TO_MEM; 4125 goto triggered_bchan; 4126 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { 4127 ucc->dir = DMA_MEM_TO_DEV; 4128 } else { 4129 ucc->dir = DMA_DEV_TO_MEM; 4130 } 4131 4132 ep_config = psil_get_ep_config(ucc->remote_thread_id); 4133 if (IS_ERR(ep_config)) { 4134 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", 4135 ucc->remote_thread_id); 4136 ucc->dir = DMA_MEM_TO_MEM; 4137 ucc->remote_thread_id = -1; 4138 ucc->atype = 0; 4139 ucc->asel = 0; 4140 return false; 4141 } 4142 4143 if (ud->match_data->type == DMA_TYPE_BCDMA && 4144 ep_config->pkt_mode) { 4145 dev_err(ud->dev, 4146 "Only TR mode is supported (psi-l thread 0x%04x)\n", 4147 ucc->remote_thread_id); 4148 ucc->dir = DMA_MEM_TO_MEM; 4149 ucc->remote_thread_id = -1; 4150 ucc->atype = 0; 4151 ucc->asel = 0; 4152 return false; 4153 } 4154 4155 ucc->pkt_mode = ep_config->pkt_mode; 4156 ucc->channel_tpl = ep_config->channel_tpl; 4157 ucc->notdpkt = ep_config->notdpkt; 4158 ucc->ep_type = ep_config->ep_type; 4159 4160 if (ud->match_data->type == DMA_TYPE_PKTDMA && 4161 ep_config->mapped_channel_id >= 0) { 4162 ucc->mapped_channel_id = ep_config->mapped_channel_id; 4163 ucc->default_flow_id = ep_config->default_flow_id; 4164 } else { 4165 ucc->mapped_channel_id = -1; 4166 ucc->default_flow_id = -1; 4167 } 4168 4169 if (ucc->ep_type != PSIL_EP_NATIVE) { 4170 const struct udma_match_data *match_data = ud->match_data; 4171 4172 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) 4173 ucc->enable_acc32 = ep_config->pdma_acc32; 4174 if (match_data->flags & UDMA_FLAG_PDMA_BURST) 4175 ucc->enable_burst = ep_config->pdma_burst; 4176 } 4177 4178 ucc->needs_epib = ep_config->needs_epib; 4179 ucc->psd_size = ep_config->psd_size; 4180 ucc->metadata_size = 4181 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + 4182 ucc->psd_size; 4183 4184 if (ucc->pkt_mode) 4185 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 4186 ucc->metadata_size, ud->desc_align); 4187 4188 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, 4189 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); 4190 4191 return true; 4192 4193 triggered_bchan: 4194 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, 4195 ucc->tr_trigger_type); 4196 4197 return true; 4198 4199 } 4200 4201 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, 4202 struct of_dma *ofdma) 4203 { 4204 struct udma_dev *ud = ofdma->of_dma_data; 4205 dma_cap_mask_t mask = ud->ddev.cap_mask; 4206 struct udma_filter_param filter_param; 4207 struct dma_chan *chan; 4208 4209 if (ud->match_data->type == DMA_TYPE_BCDMA) { 4210 if (dma_spec->args_count != 3) 4211 return NULL; 4212 4213 filter_param.tr_trigger_type = dma_spec->args[0]; 4214 filter_param.remote_thread_id = dma_spec->args[1]; 4215 filter_param.asel = dma_spec->args[2]; 4216 filter_param.atype = 0; 4217 } else { 4218 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) 4219 return NULL; 4220 4221 filter_param.remote_thread_id = dma_spec->args[0]; 4222 filter_param.tr_trigger_type = 0; 4223 if (dma_spec->args_count == 2) { 4224 if (ud->match_data->type == DMA_TYPE_UDMA) { 4225 filter_param.atype = dma_spec->args[1]; 4226 filter_param.asel = 0; 4227 } else { 4228 filter_param.atype = 0; 4229 filter_param.asel = dma_spec->args[1]; 4230 } 4231 } else { 4232 filter_param.atype = 0; 4233 filter_param.asel = 0; 4234 } 4235 } 4236 4237 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, 4238 ofdma->of_node); 4239 if (!chan) { 4240 dev_err(ud->dev, "get channel fail in %s.\n", __func__); 4241 return ERR_PTR(-EINVAL); 4242 } 4243 4244 return chan; 4245 } 4246 4247 static struct udma_match_data am654_main_data = { 4248 .type = DMA_TYPE_UDMA, 4249 .psil_base = 0x1000, 4250 .enable_memcpy_support = true, 4251 .statictr_z_mask = GENMASK(11, 0), 4252 .burst_size = { 4253 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4254 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4255 0, /* No UH Channels */ 4256 }, 4257 }; 4258 4259 static struct udma_match_data am654_mcu_data = { 4260 .type = DMA_TYPE_UDMA, 4261 .psil_base = 0x6000, 4262 .enable_memcpy_support = false, 4263 .statictr_z_mask = GENMASK(11, 0), 4264 .burst_size = { 4265 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4266 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */ 4267 0, /* No UH Channels */ 4268 }, 4269 }; 4270 4271 static struct udma_match_data j721e_main_data = { 4272 .type = DMA_TYPE_UDMA, 4273 .psil_base = 0x1000, 4274 .enable_memcpy_support = true, 4275 .flags = UDMA_FLAGS_J7_CLASS, 4276 .statictr_z_mask = GENMASK(23, 0), 4277 .burst_size = { 4278 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4279 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */ 4280 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */ 4281 }, 4282 }; 4283 4284 static struct udma_match_data j721e_mcu_data = { 4285 .type = DMA_TYPE_UDMA, 4286 .psil_base = 0x6000, 4287 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ 4288 .flags = UDMA_FLAGS_J7_CLASS, 4289 .statictr_z_mask = GENMASK(23, 0), 4290 .burst_size = { 4291 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4292 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */ 4293 0, /* No UH Channels */ 4294 }, 4295 }; 4296 4297 static struct udma_match_data am64_bcdma_data = { 4298 .type = DMA_TYPE_BCDMA, 4299 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ 4300 .enable_memcpy_support = true, /* Supported via bchan */ 4301 .flags = UDMA_FLAGS_J7_CLASS, 4302 .statictr_z_mask = GENMASK(23, 0), 4303 .burst_size = { 4304 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4305 0, /* No H Channels */ 4306 0, /* No UH Channels */ 4307 }, 4308 }; 4309 4310 static struct udma_match_data am64_pktdma_data = { 4311 .type = DMA_TYPE_PKTDMA, 4312 .psil_base = 0x1000, 4313 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */ 4314 .flags = UDMA_FLAGS_J7_CLASS, 4315 .statictr_z_mask = GENMASK(23, 0), 4316 .burst_size = { 4317 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */ 4318 0, /* No H Channels */ 4319 0, /* No UH Channels */ 4320 }, 4321 }; 4322 4323 static const struct of_device_id udma_of_match[] = { 4324 { 4325 .compatible = "ti,am654-navss-main-udmap", 4326 .data = &am654_main_data, 4327 }, 4328 { 4329 .compatible = "ti,am654-navss-mcu-udmap", 4330 .data = &am654_mcu_data, 4331 }, { 4332 .compatible = "ti,j721e-navss-main-udmap", 4333 .data = &j721e_main_data, 4334 }, { 4335 .compatible = "ti,j721e-navss-mcu-udmap", 4336 .data = &j721e_mcu_data, 4337 }, 4338 { /* Sentinel */ }, 4339 }; 4340 4341 static const struct of_device_id bcdma_of_match[] = { 4342 { 4343 .compatible = "ti,am64-dmss-bcdma", 4344 .data = &am64_bcdma_data, 4345 }, 4346 { /* Sentinel */ }, 4347 }; 4348 4349 static const struct of_device_id pktdma_of_match[] = { 4350 { 4351 .compatible = "ti,am64-dmss-pktdma", 4352 .data = &am64_pktdma_data, 4353 }, 4354 { /* Sentinel */ }, 4355 }; 4356 4357 static struct udma_soc_data am654_soc_data = { 4358 .oes = { 4359 .udma_rchan = 0x200, 4360 }, 4361 }; 4362 4363 static struct udma_soc_data j721e_soc_data = { 4364 .oes = { 4365 .udma_rchan = 0x400, 4366 }, 4367 }; 4368 4369 static struct udma_soc_data j7200_soc_data = { 4370 .oes = { 4371 .udma_rchan = 0x80, 4372 }, 4373 }; 4374 4375 static struct udma_soc_data am64_soc_data = { 4376 .oes = { 4377 .bcdma_bchan_data = 0x2200, 4378 .bcdma_bchan_ring = 0x2400, 4379 .bcdma_tchan_data = 0x2800, 4380 .bcdma_tchan_ring = 0x2a00, 4381 .bcdma_rchan_data = 0x2e00, 4382 .bcdma_rchan_ring = 0x3000, 4383 .pktdma_tchan_flow = 0x1200, 4384 .pktdma_rchan_flow = 0x1600, 4385 }, 4386 .bcdma_trigger_event_offset = 0xc400, 4387 }; 4388 4389 static const struct soc_device_attribute k3_soc_devices[] = { 4390 { .family = "AM65X", .data = &am654_soc_data }, 4391 { .family = "J721E", .data = &j721e_soc_data }, 4392 { .family = "J7200", .data = &j7200_soc_data }, 4393 { .family = "AM64X", .data = &am64_soc_data }, 4394 { .family = "J721S2", .data = &j721e_soc_data}, 4395 { .family = "AM62X", .data = &am64_soc_data }, 4396 { /* sentinel */ } 4397 }; 4398 4399 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) 4400 { 4401 u32 cap2, cap3, cap4; 4402 int i; 4403 4404 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); 4405 if (IS_ERR(ud->mmrs[MMR_GCFG])) 4406 return PTR_ERR(ud->mmrs[MMR_GCFG]); 4407 4408 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); 4409 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4410 4411 switch (ud->match_data->type) { 4412 case DMA_TYPE_UDMA: 4413 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4414 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4415 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); 4416 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4417 break; 4418 case DMA_TYPE_BCDMA: 4419 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); 4420 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); 4421 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); 4422 ud->rflow_cnt = ud->rchan_cnt; 4423 break; 4424 case DMA_TYPE_PKTDMA: 4425 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4426 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); 4427 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); 4428 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); 4429 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); 4430 break; 4431 default: 4432 return -EINVAL; 4433 } 4434 4435 for (i = 1; i < MMR_LAST; i++) { 4436 if (i == MMR_BCHANRT && ud->bchan_cnt == 0) 4437 continue; 4438 if (i == MMR_TCHANRT && ud->tchan_cnt == 0) 4439 continue; 4440 if (i == MMR_RCHANRT && ud->rchan_cnt == 0) 4441 continue; 4442 4443 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); 4444 if (IS_ERR(ud->mmrs[i])) 4445 return PTR_ERR(ud->mmrs[i]); 4446 } 4447 4448 return 0; 4449 } 4450 4451 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map, 4452 struct ti_sci_resource_desc *rm_desc, 4453 char *name) 4454 { 4455 bitmap_clear(map, rm_desc->start, rm_desc->num); 4456 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); 4457 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, 4458 rm_desc->start, rm_desc->num, rm_desc->start_sec, 4459 rm_desc->num_sec); 4460 } 4461 4462 static const char * const range_names[] = { 4463 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", 4464 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", 4465 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", 4466 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", 4467 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", 4468 }; 4469 4470 static int udma_setup_resources(struct udma_dev *ud) 4471 { 4472 int ret, i, j; 4473 struct device *dev = ud->dev; 4474 struct ti_sci_resource *rm_res, irq_res; 4475 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4476 u32 cap3; 4477 4478 /* Set up the throughput level start indexes */ 4479 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4480 if (of_device_is_compatible(dev->of_node, 4481 "ti,am654-navss-main-udmap")) { 4482 ud->tchan_tpl.levels = 2; 4483 ud->tchan_tpl.start_idx[0] = 8; 4484 } else if (of_device_is_compatible(dev->of_node, 4485 "ti,am654-navss-mcu-udmap")) { 4486 ud->tchan_tpl.levels = 2; 4487 ud->tchan_tpl.start_idx[0] = 2; 4488 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4489 ud->tchan_tpl.levels = 3; 4490 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4491 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4492 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4493 ud->tchan_tpl.levels = 2; 4494 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4495 } else { 4496 ud->tchan_tpl.levels = 1; 4497 } 4498 4499 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4500 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4501 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4502 4503 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4504 sizeof(unsigned long), GFP_KERNEL); 4505 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4506 GFP_KERNEL); 4507 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4508 sizeof(unsigned long), GFP_KERNEL); 4509 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4510 GFP_KERNEL); 4511 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), 4512 sizeof(unsigned long), 4513 GFP_KERNEL); 4514 ud->rflow_gp_map_allocated = devm_kcalloc(dev, 4515 BITS_TO_LONGS(ud->rflow_cnt), 4516 sizeof(unsigned long), 4517 GFP_KERNEL); 4518 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4519 sizeof(unsigned long), 4520 GFP_KERNEL); 4521 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4522 GFP_KERNEL); 4523 4524 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || 4525 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || 4526 !ud->rflows || !ud->rflow_in_use) 4527 return -ENOMEM; 4528 4529 /* 4530 * RX flows with the same Ids as RX channels are reserved to be used 4531 * as default flows if remote HW can't generate flow_ids. Those 4532 * RX flows can be requested only explicitly by id. 4533 */ 4534 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); 4535 4536 /* by default no GP rflows are assigned to Linux */ 4537 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); 4538 4539 /* Get resource ranges from tisci */ 4540 for (i = 0; i < RM_RANGE_LAST; i++) { 4541 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) 4542 continue; 4543 4544 tisci_rm->rm_ranges[i] = 4545 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4546 tisci_rm->tisci_dev_id, 4547 (char *)range_names[i]); 4548 } 4549 4550 /* tchan ranges */ 4551 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4552 if (IS_ERR(rm_res)) { 4553 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4554 irq_res.sets = 1; 4555 } else { 4556 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4557 for (i = 0; i < rm_res->sets; i++) 4558 udma_mark_resource_ranges(ud, ud->tchan_map, 4559 &rm_res->desc[i], "tchan"); 4560 irq_res.sets = rm_res->sets; 4561 } 4562 4563 /* rchan and matching default flow ranges */ 4564 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4565 if (IS_ERR(rm_res)) { 4566 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4567 irq_res.sets++; 4568 } else { 4569 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4570 for (i = 0; i < rm_res->sets; i++) 4571 udma_mark_resource_ranges(ud, ud->rchan_map, 4572 &rm_res->desc[i], "rchan"); 4573 irq_res.sets += rm_res->sets; 4574 } 4575 4576 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4577 if (!irq_res.desc) 4578 return -ENOMEM; 4579 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4580 if (IS_ERR(rm_res)) { 4581 irq_res.desc[0].start = 0; 4582 irq_res.desc[0].num = ud->tchan_cnt; 4583 i = 1; 4584 } else { 4585 for (i = 0; i < rm_res->sets; i++) { 4586 irq_res.desc[i].start = rm_res->desc[i].start; 4587 irq_res.desc[i].num = rm_res->desc[i].num; 4588 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; 4589 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; 4590 } 4591 } 4592 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4593 if (IS_ERR(rm_res)) { 4594 irq_res.desc[i].start = 0; 4595 irq_res.desc[i].num = ud->rchan_cnt; 4596 } else { 4597 for (j = 0; j < rm_res->sets; j++, i++) { 4598 if (rm_res->desc[j].num) { 4599 irq_res.desc[i].start = rm_res->desc[j].start + 4600 ud->soc_data->oes.udma_rchan; 4601 irq_res.desc[i].num = rm_res->desc[j].num; 4602 } 4603 if (rm_res->desc[j].num_sec) { 4604 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + 4605 ud->soc_data->oes.udma_rchan; 4606 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; 4607 } 4608 } 4609 } 4610 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4611 kfree(irq_res.desc); 4612 if (ret) { 4613 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4614 return ret; 4615 } 4616 4617 /* GP rflow ranges */ 4618 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4619 if (IS_ERR(rm_res)) { 4620 /* all gp flows are assigned exclusively to Linux */ 4621 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, 4622 ud->rflow_cnt - ud->rchan_cnt); 4623 } else { 4624 for (i = 0; i < rm_res->sets; i++) 4625 udma_mark_resource_ranges(ud, ud->rflow_gp_map, 4626 &rm_res->desc[i], "gp-rflow"); 4627 } 4628 4629 return 0; 4630 } 4631 4632 static int bcdma_setup_resources(struct udma_dev *ud) 4633 { 4634 int ret, i, j; 4635 struct device *dev = ud->dev; 4636 struct ti_sci_resource *rm_res, irq_res; 4637 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4638 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4639 u32 cap; 4640 4641 /* Set up the throughput level start indexes */ 4642 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4643 if (BCDMA_CAP3_UBCHAN_CNT(cap)) { 4644 ud->bchan_tpl.levels = 3; 4645 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); 4646 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4647 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) { 4648 ud->bchan_tpl.levels = 2; 4649 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); 4650 } else { 4651 ud->bchan_tpl.levels = 1; 4652 } 4653 4654 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); 4655 if (BCDMA_CAP4_URCHAN_CNT(cap)) { 4656 ud->rchan_tpl.levels = 3; 4657 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); 4658 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4659 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) { 4660 ud->rchan_tpl.levels = 2; 4661 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); 4662 } else { 4663 ud->rchan_tpl.levels = 1; 4664 } 4665 4666 if (BCDMA_CAP4_UTCHAN_CNT(cap)) { 4667 ud->tchan_tpl.levels = 3; 4668 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); 4669 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4670 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) { 4671 ud->tchan_tpl.levels = 2; 4672 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); 4673 } else { 4674 ud->tchan_tpl.levels = 1; 4675 } 4676 4677 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), 4678 sizeof(unsigned long), GFP_KERNEL); 4679 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), 4680 GFP_KERNEL); 4681 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4682 sizeof(unsigned long), GFP_KERNEL); 4683 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4684 GFP_KERNEL); 4685 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4686 sizeof(unsigned long), GFP_KERNEL); 4687 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4688 GFP_KERNEL); 4689 /* BCDMA do not really have flows, but the driver expect it */ 4690 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), 4691 sizeof(unsigned long), 4692 GFP_KERNEL); 4693 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), 4694 GFP_KERNEL); 4695 4696 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || 4697 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || 4698 !ud->rflows) 4699 return -ENOMEM; 4700 4701 /* Get resource ranges from tisci */ 4702 for (i = 0; i < RM_RANGE_LAST; i++) { 4703 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) 4704 continue; 4705 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) 4706 continue; 4707 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) 4708 continue; 4709 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) 4710 continue; 4711 4712 tisci_rm->rm_ranges[i] = 4713 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4714 tisci_rm->tisci_dev_id, 4715 (char *)range_names[i]); 4716 } 4717 4718 irq_res.sets = 0; 4719 4720 /* bchan ranges */ 4721 if (ud->bchan_cnt) { 4722 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4723 if (IS_ERR(rm_res)) { 4724 bitmap_zero(ud->bchan_map, ud->bchan_cnt); 4725 irq_res.sets++; 4726 } else { 4727 bitmap_fill(ud->bchan_map, ud->bchan_cnt); 4728 for (i = 0; i < rm_res->sets; i++) 4729 udma_mark_resource_ranges(ud, ud->bchan_map, 4730 &rm_res->desc[i], 4731 "bchan"); 4732 irq_res.sets += rm_res->sets; 4733 } 4734 } 4735 4736 /* tchan ranges */ 4737 if (ud->tchan_cnt) { 4738 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4739 if (IS_ERR(rm_res)) { 4740 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4741 irq_res.sets += 2; 4742 } else { 4743 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4744 for (i = 0; i < rm_res->sets; i++) 4745 udma_mark_resource_ranges(ud, ud->tchan_map, 4746 &rm_res->desc[i], 4747 "tchan"); 4748 irq_res.sets += rm_res->sets * 2; 4749 } 4750 } 4751 4752 /* rchan ranges */ 4753 if (ud->rchan_cnt) { 4754 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4755 if (IS_ERR(rm_res)) { 4756 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4757 irq_res.sets += 2; 4758 } else { 4759 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4760 for (i = 0; i < rm_res->sets; i++) 4761 udma_mark_resource_ranges(ud, ud->rchan_map, 4762 &rm_res->desc[i], 4763 "rchan"); 4764 irq_res.sets += rm_res->sets * 2; 4765 } 4766 } 4767 4768 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4769 if (!irq_res.desc) 4770 return -ENOMEM; 4771 if (ud->bchan_cnt) { 4772 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; 4773 if (IS_ERR(rm_res)) { 4774 irq_res.desc[0].start = oes->bcdma_bchan_ring; 4775 irq_res.desc[0].num = ud->bchan_cnt; 4776 i = 1; 4777 } else { 4778 for (i = 0; i < rm_res->sets; i++) { 4779 irq_res.desc[i].start = rm_res->desc[i].start + 4780 oes->bcdma_bchan_ring; 4781 irq_res.desc[i].num = rm_res->desc[i].num; 4782 } 4783 } 4784 } 4785 if (ud->tchan_cnt) { 4786 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4787 if (IS_ERR(rm_res)) { 4788 irq_res.desc[i].start = oes->bcdma_tchan_data; 4789 irq_res.desc[i].num = ud->tchan_cnt; 4790 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; 4791 irq_res.desc[i + 1].num = ud->tchan_cnt; 4792 i += 2; 4793 } else { 4794 for (j = 0; j < rm_res->sets; j++, i += 2) { 4795 irq_res.desc[i].start = rm_res->desc[j].start + 4796 oes->bcdma_tchan_data; 4797 irq_res.desc[i].num = rm_res->desc[j].num; 4798 4799 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4800 oes->bcdma_tchan_ring; 4801 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4802 } 4803 } 4804 } 4805 if (ud->rchan_cnt) { 4806 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4807 if (IS_ERR(rm_res)) { 4808 irq_res.desc[i].start = oes->bcdma_rchan_data; 4809 irq_res.desc[i].num = ud->rchan_cnt; 4810 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; 4811 irq_res.desc[i + 1].num = ud->rchan_cnt; 4812 i += 2; 4813 } else { 4814 for (j = 0; j < rm_res->sets; j++, i += 2) { 4815 irq_res.desc[i].start = rm_res->desc[j].start + 4816 oes->bcdma_rchan_data; 4817 irq_res.desc[i].num = rm_res->desc[j].num; 4818 4819 irq_res.desc[i + 1].start = rm_res->desc[j].start + 4820 oes->bcdma_rchan_ring; 4821 irq_res.desc[i + 1].num = rm_res->desc[j].num; 4822 } 4823 } 4824 } 4825 4826 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4827 kfree(irq_res.desc); 4828 if (ret) { 4829 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4830 return ret; 4831 } 4832 4833 return 0; 4834 } 4835 4836 static int pktdma_setup_resources(struct udma_dev *ud) 4837 { 4838 int ret, i, j; 4839 struct device *dev = ud->dev; 4840 struct ti_sci_resource *rm_res, irq_res; 4841 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; 4842 const struct udma_oes_offsets *oes = &ud->soc_data->oes; 4843 u32 cap3; 4844 4845 /* Set up the throughput level start indexes */ 4846 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); 4847 if (UDMA_CAP3_UCHAN_CNT(cap3)) { 4848 ud->tchan_tpl.levels = 3; 4849 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); 4850 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4851 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) { 4852 ud->tchan_tpl.levels = 2; 4853 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); 4854 } else { 4855 ud->tchan_tpl.levels = 1; 4856 } 4857 4858 ud->rchan_tpl.levels = ud->tchan_tpl.levels; 4859 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; 4860 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; 4861 4862 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), 4863 sizeof(unsigned long), GFP_KERNEL); 4864 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), 4865 GFP_KERNEL); 4866 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), 4867 sizeof(unsigned long), GFP_KERNEL); 4868 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), 4869 GFP_KERNEL); 4870 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), 4871 sizeof(unsigned long), 4872 GFP_KERNEL); 4873 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), 4874 GFP_KERNEL); 4875 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), 4876 sizeof(unsigned long), GFP_KERNEL); 4877 4878 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || 4879 !ud->rchans || !ud->rflows || !ud->rflow_in_use) 4880 return -ENOMEM; 4881 4882 /* Get resource ranges from tisci */ 4883 for (i = 0; i < RM_RANGE_LAST; i++) { 4884 if (i == RM_RANGE_BCHAN) 4885 continue; 4886 4887 tisci_rm->rm_ranges[i] = 4888 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, 4889 tisci_rm->tisci_dev_id, 4890 (char *)range_names[i]); 4891 } 4892 4893 /* tchan ranges */ 4894 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; 4895 if (IS_ERR(rm_res)) { 4896 bitmap_zero(ud->tchan_map, ud->tchan_cnt); 4897 } else { 4898 bitmap_fill(ud->tchan_map, ud->tchan_cnt); 4899 for (i = 0; i < rm_res->sets; i++) 4900 udma_mark_resource_ranges(ud, ud->tchan_map, 4901 &rm_res->desc[i], "tchan"); 4902 } 4903 4904 /* rchan ranges */ 4905 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; 4906 if (IS_ERR(rm_res)) { 4907 bitmap_zero(ud->rchan_map, ud->rchan_cnt); 4908 } else { 4909 bitmap_fill(ud->rchan_map, ud->rchan_cnt); 4910 for (i = 0; i < rm_res->sets; i++) 4911 udma_mark_resource_ranges(ud, ud->rchan_map, 4912 &rm_res->desc[i], "rchan"); 4913 } 4914 4915 /* rflow ranges */ 4916 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4917 if (IS_ERR(rm_res)) { 4918 /* all rflows are assigned exclusively to Linux */ 4919 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); 4920 irq_res.sets = 1; 4921 } else { 4922 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); 4923 for (i = 0; i < rm_res->sets; i++) 4924 udma_mark_resource_ranges(ud, ud->rflow_in_use, 4925 &rm_res->desc[i], "rflow"); 4926 irq_res.sets = rm_res->sets; 4927 } 4928 4929 /* tflow ranges */ 4930 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4931 if (IS_ERR(rm_res)) { 4932 /* all tflows are assigned exclusively to Linux */ 4933 bitmap_zero(ud->tflow_map, ud->tflow_cnt); 4934 irq_res.sets++; 4935 } else { 4936 bitmap_fill(ud->tflow_map, ud->tflow_cnt); 4937 for (i = 0; i < rm_res->sets; i++) 4938 udma_mark_resource_ranges(ud, ud->tflow_map, 4939 &rm_res->desc[i], "tflow"); 4940 irq_res.sets += rm_res->sets; 4941 } 4942 4943 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); 4944 if (!irq_res.desc) 4945 return -ENOMEM; 4946 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; 4947 if (IS_ERR(rm_res)) { 4948 irq_res.desc[0].start = oes->pktdma_tchan_flow; 4949 irq_res.desc[0].num = ud->tflow_cnt; 4950 i = 1; 4951 } else { 4952 for (i = 0; i < rm_res->sets; i++) { 4953 irq_res.desc[i].start = rm_res->desc[i].start + 4954 oes->pktdma_tchan_flow; 4955 irq_res.desc[i].num = rm_res->desc[i].num; 4956 } 4957 } 4958 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; 4959 if (IS_ERR(rm_res)) { 4960 irq_res.desc[i].start = oes->pktdma_rchan_flow; 4961 irq_res.desc[i].num = ud->rflow_cnt; 4962 } else { 4963 for (j = 0; j < rm_res->sets; j++, i++) { 4964 irq_res.desc[i].start = rm_res->desc[j].start + 4965 oes->pktdma_rchan_flow; 4966 irq_res.desc[i].num = rm_res->desc[j].num; 4967 } 4968 } 4969 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); 4970 kfree(irq_res.desc); 4971 if (ret) { 4972 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); 4973 return ret; 4974 } 4975 4976 return 0; 4977 } 4978 4979 static int setup_resources(struct udma_dev *ud) 4980 { 4981 struct device *dev = ud->dev; 4982 int ch_count, ret; 4983 4984 switch (ud->match_data->type) { 4985 case DMA_TYPE_UDMA: 4986 ret = udma_setup_resources(ud); 4987 break; 4988 case DMA_TYPE_BCDMA: 4989 ret = bcdma_setup_resources(ud); 4990 break; 4991 case DMA_TYPE_PKTDMA: 4992 ret = pktdma_setup_resources(ud); 4993 break; 4994 default: 4995 return -EINVAL; 4996 } 4997 4998 if (ret) 4999 return ret; 5000 5001 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; 5002 if (ud->bchan_cnt) 5003 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); 5004 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); 5005 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); 5006 if (!ch_count) 5007 return -ENODEV; 5008 5009 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), 5010 GFP_KERNEL); 5011 if (!ud->channels) 5012 return -ENOMEM; 5013 5014 switch (ud->match_data->type) { 5015 case DMA_TYPE_UDMA: 5016 dev_info(dev, 5017 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", 5018 ch_count, 5019 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5020 ud->tchan_cnt), 5021 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5022 ud->rchan_cnt), 5023 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, 5024 ud->rflow_cnt)); 5025 break; 5026 case DMA_TYPE_BCDMA: 5027 dev_info(dev, 5028 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", 5029 ch_count, 5030 ud->bchan_cnt - bitmap_weight(ud->bchan_map, 5031 ud->bchan_cnt), 5032 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5033 ud->tchan_cnt), 5034 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5035 ud->rchan_cnt)); 5036 break; 5037 case DMA_TYPE_PKTDMA: 5038 dev_info(dev, 5039 "Channels: %d (tchan: %u, rchan: %u)\n", 5040 ch_count, 5041 ud->tchan_cnt - bitmap_weight(ud->tchan_map, 5042 ud->tchan_cnt), 5043 ud->rchan_cnt - bitmap_weight(ud->rchan_map, 5044 ud->rchan_cnt)); 5045 break; 5046 default: 5047 break; 5048 } 5049 5050 return ch_count; 5051 } 5052 5053 static int udma_setup_rx_flush(struct udma_dev *ud) 5054 { 5055 struct udma_rx_flush *rx_flush = &ud->rx_flush; 5056 struct cppi5_desc_hdr_t *tr_desc; 5057 struct cppi5_tr_type1_t *tr_req; 5058 struct cppi5_host_desc_t *desc; 5059 struct device *dev = ud->dev; 5060 struct udma_hwdesc *hwdesc; 5061 size_t tr_size; 5062 5063 /* Allocate 1K buffer for discarded data on RX channel teardown */ 5064 rx_flush->buffer_size = SZ_1K; 5065 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, 5066 GFP_KERNEL); 5067 if (!rx_flush->buffer_vaddr) 5068 return -ENOMEM; 5069 5070 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, 5071 rx_flush->buffer_size, 5072 DMA_TO_DEVICE); 5073 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) 5074 return -ENOMEM; 5075 5076 /* Set up descriptor to be used for TR mode */ 5077 hwdesc = &rx_flush->hwdescs[0]; 5078 tr_size = sizeof(struct cppi5_tr_type1_t); 5079 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); 5080 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, 5081 ud->desc_align); 5082 5083 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5084 GFP_KERNEL); 5085 if (!hwdesc->cppi5_desc_vaddr) 5086 return -ENOMEM; 5087 5088 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5089 hwdesc->cppi5_desc_size, 5090 DMA_TO_DEVICE); 5091 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5092 return -ENOMEM; 5093 5094 /* Start of the TR req records */ 5095 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; 5096 /* Start address of the TR response array */ 5097 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; 5098 5099 tr_desc = hwdesc->cppi5_desc_vaddr; 5100 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0); 5101 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5102 cppi5_desc_set_retpolicy(tr_desc, 0, 0); 5103 5104 tr_req = hwdesc->tr_req_base; 5105 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, 5106 CPPI5_TR_EVENT_SIZE_COMPLETION, 0); 5107 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); 5108 5109 tr_req->addr = rx_flush->buffer_paddr; 5110 tr_req->icnt0 = rx_flush->buffer_size; 5111 tr_req->icnt1 = 1; 5112 5113 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5114 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5115 5116 /* Set up descriptor to be used for packet mode */ 5117 hwdesc = &rx_flush->hwdescs[1]; 5118 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + 5119 CPPI5_INFO0_HDESC_EPIB_SIZE + 5120 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE, 5121 ud->desc_align); 5122 5123 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, 5124 GFP_KERNEL); 5125 if (!hwdesc->cppi5_desc_vaddr) 5126 return -ENOMEM; 5127 5128 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, 5129 hwdesc->cppi5_desc_size, 5130 DMA_TO_DEVICE); 5131 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) 5132 return -ENOMEM; 5133 5134 desc = hwdesc->cppi5_desc_vaddr; 5135 cppi5_hdesc_init(desc, 0, 0); 5136 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); 5137 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); 5138 5139 cppi5_hdesc_attach_buf(desc, 5140 rx_flush->buffer_paddr, rx_flush->buffer_size, 5141 rx_flush->buffer_paddr, rx_flush->buffer_size); 5142 5143 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, 5144 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); 5145 return 0; 5146 } 5147 5148 #ifdef CONFIG_DEBUG_FS 5149 static void udma_dbg_summary_show_chan(struct seq_file *s, 5150 struct dma_chan *chan) 5151 { 5152 struct udma_chan *uc = to_udma_chan(chan); 5153 struct udma_chan_config *ucc = &uc->config; 5154 5155 seq_printf(s, " %-13s| %s", dma_chan_name(chan), 5156 chan->dbg_client_name ?: "in-use"); 5157 if (ucc->tr_trigger_type) 5158 seq_puts(s, " (triggered, "); 5159 else 5160 seq_printf(s, " (%s, ", 5161 dmaengine_get_direction_text(uc->config.dir)); 5162 5163 switch (uc->config.dir) { 5164 case DMA_MEM_TO_MEM: 5165 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { 5166 seq_printf(s, "bchan%d)\n", uc->bchan->id); 5167 return; 5168 } 5169 5170 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, 5171 ucc->src_thread, ucc->dst_thread); 5172 break; 5173 case DMA_DEV_TO_MEM: 5174 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, 5175 ucc->src_thread, ucc->dst_thread); 5176 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5177 seq_printf(s, "rflow%d, ", uc->rflow->id); 5178 break; 5179 case DMA_MEM_TO_DEV: 5180 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, 5181 ucc->src_thread, ucc->dst_thread); 5182 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) 5183 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); 5184 break; 5185 default: 5186 seq_printf(s, ")\n"); 5187 return; 5188 } 5189 5190 if (ucc->ep_type == PSIL_EP_NATIVE) { 5191 seq_printf(s, "PSI-L Native"); 5192 if (ucc->metadata_size) { 5193 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); 5194 if (ucc->psd_size) 5195 seq_printf(s, " PSDsize:%u", ucc->psd_size); 5196 seq_printf(s, " ]"); 5197 } 5198 } else { 5199 seq_printf(s, "PDMA"); 5200 if (ucc->enable_acc32 || ucc->enable_burst) 5201 seq_printf(s, "[%s%s ]", 5202 ucc->enable_acc32 ? " ACC32" : "", 5203 ucc->enable_burst ? " BURST" : ""); 5204 } 5205 5206 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); 5207 } 5208 5209 static void udma_dbg_summary_show(struct seq_file *s, 5210 struct dma_device *dma_dev) 5211 { 5212 struct dma_chan *chan; 5213 5214 list_for_each_entry(chan, &dma_dev->channels, device_node) { 5215 if (chan->client_count) 5216 udma_dbg_summary_show_chan(s, chan); 5217 } 5218 } 5219 #endif /* CONFIG_DEBUG_FS */ 5220 5221 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud) 5222 { 5223 const struct udma_match_data *match_data = ud->match_data; 5224 u8 tpl; 5225 5226 if (!match_data->enable_memcpy_support) 5227 return DMAENGINE_ALIGN_8_BYTES; 5228 5229 /* Get the highest TPL level the device supports for memcpy */ 5230 if (ud->bchan_cnt) 5231 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); 5232 else if (ud->tchan_cnt) 5233 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); 5234 else 5235 return DMAENGINE_ALIGN_8_BYTES; 5236 5237 switch (match_data->burst_size[tpl]) { 5238 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES: 5239 return DMAENGINE_ALIGN_256_BYTES; 5240 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES: 5241 return DMAENGINE_ALIGN_128_BYTES; 5242 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES: 5243 fallthrough; 5244 default: 5245 return DMAENGINE_ALIGN_64_BYTES; 5246 } 5247 } 5248 5249 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 5250 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 5251 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 5252 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 5253 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 5254 5255 static int udma_probe(struct platform_device *pdev) 5256 { 5257 struct device_node *navss_node = pdev->dev.parent->of_node; 5258 const struct soc_device_attribute *soc; 5259 struct device *dev = &pdev->dev; 5260 struct udma_dev *ud; 5261 const struct of_device_id *match; 5262 int i, ret; 5263 int ch_count; 5264 5265 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); 5266 if (ret) 5267 dev_err(dev, "failed to set dma mask stuff\n"); 5268 5269 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); 5270 if (!ud) 5271 return -ENOMEM; 5272 5273 match = of_match_node(udma_of_match, dev->of_node); 5274 if (!match) 5275 match = of_match_node(bcdma_of_match, dev->of_node); 5276 if (!match) { 5277 match = of_match_node(pktdma_of_match, dev->of_node); 5278 if (!match) { 5279 dev_err(dev, "No compatible match found\n"); 5280 return -ENODEV; 5281 } 5282 } 5283 ud->match_data = match->data; 5284 5285 soc = soc_device_match(k3_soc_devices); 5286 if (!soc) { 5287 dev_err(dev, "No compatible SoC found\n"); 5288 return -ENODEV; 5289 } 5290 ud->soc_data = soc->data; 5291 5292 ret = udma_get_mmrs(pdev, ud); 5293 if (ret) 5294 return ret; 5295 5296 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); 5297 if (IS_ERR(ud->tisci_rm.tisci)) 5298 return PTR_ERR(ud->tisci_rm.tisci); 5299 5300 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", 5301 &ud->tisci_rm.tisci_dev_id); 5302 if (ret) { 5303 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); 5304 return ret; 5305 } 5306 pdev->id = ud->tisci_rm.tisci_dev_id; 5307 5308 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", 5309 &ud->tisci_rm.tisci_navss_dev_id); 5310 if (ret) { 5311 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); 5312 return ret; 5313 } 5314 5315 if (ud->match_data->type == DMA_TYPE_UDMA) { 5316 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", 5317 &ud->atype); 5318 if (!ret && ud->atype > 2) { 5319 dev_err(dev, "Invalid atype: %u\n", ud->atype); 5320 return -EINVAL; 5321 } 5322 } else { 5323 ret = of_property_read_u32(dev->of_node, "ti,asel", 5324 &ud->asel); 5325 if (!ret && ud->asel > 15) { 5326 dev_err(dev, "Invalid asel: %u\n", ud->asel); 5327 return -EINVAL; 5328 } 5329 } 5330 5331 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; 5332 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; 5333 5334 if (ud->match_data->type == DMA_TYPE_UDMA) { 5335 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); 5336 } else { 5337 struct k3_ringacc_init_data ring_init_data; 5338 5339 ring_init_data.tisci = ud->tisci_rm.tisci; 5340 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; 5341 if (ud->match_data->type == DMA_TYPE_BCDMA) { 5342 ring_init_data.num_rings = ud->bchan_cnt + 5343 ud->tchan_cnt + 5344 ud->rchan_cnt; 5345 } else { 5346 ring_init_data.num_rings = ud->rflow_cnt + 5347 ud->tflow_cnt; 5348 } 5349 5350 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); 5351 } 5352 5353 if (IS_ERR(ud->ringacc)) 5354 return PTR_ERR(ud->ringacc); 5355 5356 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, 5357 DOMAIN_BUS_TI_SCI_INTA_MSI); 5358 if (!dev->msi.domain) { 5359 dev_err(dev, "Failed to get MSI domain\n"); 5360 return -EPROBE_DEFER; 5361 } 5362 5363 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); 5364 /* cyclic operation is not supported via PKTDMA */ 5365 if (ud->match_data->type != DMA_TYPE_PKTDMA) { 5366 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); 5367 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; 5368 } 5369 5370 ud->ddev.device_config = udma_slave_config; 5371 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; 5372 ud->ddev.device_issue_pending = udma_issue_pending; 5373 ud->ddev.device_tx_status = udma_tx_status; 5374 ud->ddev.device_pause = udma_pause; 5375 ud->ddev.device_resume = udma_resume; 5376 ud->ddev.device_terminate_all = udma_terminate_all; 5377 ud->ddev.device_synchronize = udma_synchronize; 5378 #ifdef CONFIG_DEBUG_FS 5379 ud->ddev.dbg_summary_show = udma_dbg_summary_show; 5380 #endif 5381 5382 switch (ud->match_data->type) { 5383 case DMA_TYPE_UDMA: 5384 ud->ddev.device_alloc_chan_resources = 5385 udma_alloc_chan_resources; 5386 break; 5387 case DMA_TYPE_BCDMA: 5388 ud->ddev.device_alloc_chan_resources = 5389 bcdma_alloc_chan_resources; 5390 ud->ddev.device_router_config = bcdma_router_config; 5391 break; 5392 case DMA_TYPE_PKTDMA: 5393 ud->ddev.device_alloc_chan_resources = 5394 pktdma_alloc_chan_resources; 5395 break; 5396 default: 5397 return -EINVAL; 5398 } 5399 ud->ddev.device_free_chan_resources = udma_free_chan_resources; 5400 5401 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; 5402 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; 5403 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 5404 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 5405 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | 5406 DESC_METADATA_ENGINE; 5407 if (ud->match_data->enable_memcpy_support && 5408 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { 5409 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); 5410 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; 5411 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); 5412 } 5413 5414 ud->ddev.dev = dev; 5415 ud->dev = dev; 5416 ud->psil_base = ud->match_data->psil_base; 5417 5418 INIT_LIST_HEAD(&ud->ddev.channels); 5419 INIT_LIST_HEAD(&ud->desc_to_purge); 5420 5421 ch_count = setup_resources(ud); 5422 if (ch_count <= 0) 5423 return ch_count; 5424 5425 spin_lock_init(&ud->lock); 5426 INIT_WORK(&ud->purge_work, udma_purge_desc_work); 5427 5428 ud->desc_align = 64; 5429 if (ud->desc_align < dma_get_cache_alignment()) 5430 ud->desc_align = dma_get_cache_alignment(); 5431 5432 ret = udma_setup_rx_flush(ud); 5433 if (ret) 5434 return ret; 5435 5436 for (i = 0; i < ud->bchan_cnt; i++) { 5437 struct udma_bchan *bchan = &ud->bchans[i]; 5438 5439 bchan->id = i; 5440 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; 5441 } 5442 5443 for (i = 0; i < ud->tchan_cnt; i++) { 5444 struct udma_tchan *tchan = &ud->tchans[i]; 5445 5446 tchan->id = i; 5447 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; 5448 } 5449 5450 for (i = 0; i < ud->rchan_cnt; i++) { 5451 struct udma_rchan *rchan = &ud->rchans[i]; 5452 5453 rchan->id = i; 5454 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; 5455 } 5456 5457 for (i = 0; i < ud->rflow_cnt; i++) { 5458 struct udma_rflow *rflow = &ud->rflows[i]; 5459 5460 rflow->id = i; 5461 } 5462 5463 for (i = 0; i < ch_count; i++) { 5464 struct udma_chan *uc = &ud->channels[i]; 5465 5466 uc->ud = ud; 5467 uc->vc.desc_free = udma_desc_free; 5468 uc->id = i; 5469 uc->bchan = NULL; 5470 uc->tchan = NULL; 5471 uc->rchan = NULL; 5472 uc->config.remote_thread_id = -1; 5473 uc->config.mapped_channel_id = -1; 5474 uc->config.default_flow_id = -1; 5475 uc->config.dir = DMA_MEM_TO_MEM; 5476 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", 5477 dev_name(dev), i); 5478 5479 vchan_init(&uc->vc, &ud->ddev); 5480 /* Use custom vchan completion handling */ 5481 tasklet_setup(&uc->vc.task, udma_vchan_complete); 5482 init_completion(&uc->teardown_completed); 5483 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); 5484 } 5485 5486 /* Configure the copy_align to the maximum burst size the device supports */ 5487 ud->ddev.copy_align = udma_get_copy_align(ud); 5488 5489 ret = dma_async_device_register(&ud->ddev); 5490 if (ret) { 5491 dev_err(dev, "failed to register slave DMA engine: %d\n", ret); 5492 return ret; 5493 } 5494 5495 platform_set_drvdata(pdev, ud); 5496 5497 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); 5498 if (ret) { 5499 dev_err(dev, "failed to register of_dma controller\n"); 5500 dma_async_device_unregister(&ud->ddev); 5501 } 5502 5503 return ret; 5504 } 5505 5506 static struct platform_driver udma_driver = { 5507 .driver = { 5508 .name = "ti-udma", 5509 .of_match_table = udma_of_match, 5510 .suppress_bind_attrs = true, 5511 }, 5512 .probe = udma_probe, 5513 }; 5514 builtin_platform_driver(udma_driver); 5515 5516 static struct platform_driver bcdma_driver = { 5517 .driver = { 5518 .name = "ti-bcdma", 5519 .of_match_table = bcdma_of_match, 5520 .suppress_bind_attrs = true, 5521 }, 5522 .probe = udma_probe, 5523 }; 5524 builtin_platform_driver(bcdma_driver); 5525 5526 static struct platform_driver pktdma_driver = { 5527 .driver = { 5528 .name = "ti-pktdma", 5529 .of_match_table = pktdma_of_match, 5530 .suppress_bind_attrs = true, 5531 }, 5532 .probe = udma_probe, 5533 }; 5534 builtin_platform_driver(pktdma_driver); 5535 5536 /* Private interfaces to UDMA */ 5537 #include "k3-udma-private.c" 5538